Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- /**
- * Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
- * OSTATECZNY KOD
- * Please refer to the NVIDIA end user license agreement (EULA) associated
- * with this source code for terms and conditions that govern your use of
- * this software. Any use, reproduction, disclosure, or distribution of
- * this software and related documentation outside the terms of the EULA
- * is strictly prohibited.
- *
- */
- /**
- * Matrix multiplication: C = A * B.
- * Host code.
- *
- * This sample implements matrix multiplication as described in Chapter 3
- * of the programming guide.
- * It has been written for clarity of exposition to illustrate various CUDA
- * programming principles, not with the goal of providing the most
- * performant generic kernel for matrix multiplication.
- *
- * See also:
- * V. Volkov and J. Demmel, "Benchmarking GPUs to tune dense linear algebra,"
- * in Proc. 2008 ACM/IEEE Conf. on Superconducting (SC '08),
- * Piscataway, NJ: IEEE Press, 2008, pp. Art. 31:1-11.
- */
- // System includes
- #define WIN32
- #include <stdio.h>
- #include <assert.h>
- #include "stdio.h"
- // CUDA runtime
- #include <cuda_runtime.h>
- // Helper functions and utilities to work with CUDA
- #include "helper_functions.h"
- /**
- * Matrix multiplication (CUDA Kernel) on the device: C = A * B
- * wA is A's width and wB is B's width
- */
- template <int BLOCK_SIZE> __global__ void
- matrixMulCUDA(float *C, float *A, float *B, int wA, int wB)
- {
- // Block index
- int bx = blockIdx.x;
- int by = blockIdx.y;
- // Thread index
- int tx = threadIdx.x;
- int ty = threadIdx.y;
- // Index of the first sub-matrix of A processed by the block
- int aBegin = wA * BLOCK_SIZE * by * 2;
- // Index of the last sub-matrix of A processed by the block
- int aEnd = aBegin + wA - 1;
- // Step size used to iterate through the sub-matrices of A
- int aStep = BLOCK_SIZE;
- // Index of the first sub-matrix of B processed by the block
- int bBegin = BLOCK_SIZE * bx;
- // Step size used to iterate through the sub-matrices of B
- int bStep = BLOCK_SIZE * wB;
- // Csub is used to store the element of the block sub-matrix
- // that is computed by the thread
- float Csub = 0;
- __syncthreads();
- // Loop over all the sub-matrices of A and B
- // required to compute the block sub-matrix
- for (int a = aBegin, b = bBegin;
- a <= aEnd;
- a += aStep, b += bStep)
- {
- // Declaration of the shared memory array As used to
- // store the sub-matrix of A
- // Load the matrices from device memory
- // to shared memory; each thread loads
- // one element of each matrix
- // Synchronize to make sure the matrices are loaded
- // Multiply the two matrices together;
- // each thread computes one element
- // of the block sub-matrix
- #pragma unroll
- for (int k = 0; k < BLOCK_SIZE; ++k)
- {
- Csub += A[aBegin + wA * ty + tx] * B[bBegin + wB * ty + tx];
- }
- // Synchronize to make sure that the preceding
- // computation is done before loading two new
- // sub-matrices of A and B in the next iteration
- }
- __syncthreads();
- // Write the block sub-matrix to device memory;
- // each thread writes one element
- int c = wB * BLOCK_SIZE * by * 2 + BLOCK_SIZE * bx;
- C[c + wB * ty + tx] = Csub;
- //printf(" %f, %i, %i \n", Csub, c + wB * ty + tx, c + wB * ty + 10240 + tx);
- }
- void constantInit(float *data, int size, float val)
- {
- for (int i = 0; i < size; ++i)
- {
- data[i] = (float)rand() / RAND_MAX;
- }
- }
- /**
- * Run a simple test of matrix multiplication using CUDA
- */
- int matrixMultiply(int argc, char **argv, int block_size, dim3 &dimsA, dim3 &dimsB)
- {
- // Allocate host memory for matrices A and B and C
- unsigned int size_A = dimsA.x * dimsA.y;
- unsigned int mem_size_A = sizeof(float) * size_A;
- float *h_A = (float *)malloc(mem_size_A);
- unsigned int size_B = dimsB.x * dimsB.y;
- unsigned int mem_size_B = sizeof(float) * size_B;
- float *h_B = (float *)malloc(mem_size_B);
- dim3 dimsC(dimsB.x, dimsA.y, 1);
- unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float);
- float *h_C = (float *)malloc(mem_size_C);
- // Allocate device memory
- float *d_A, *d_B, *d_C;
- const float valB = 0.01f;
- constantInit(h_A, size_A, 1.0f);
- constantInit(h_B, size_B, valB);
- cudaError_t error;
- error = cudaMalloc((void **)&d_A, mem_size_A);
- error = cudaMalloc((void **)&d_B, mem_size_B);
- error = cudaMalloc((void **)&d_C, mem_size_C);
- // copy host memory to device
- error = cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice);
- error = cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice);
- // Setup execution parameters
- dim3 threads(block_size, block_size);
- dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y / 2);//Zmniejszam gridy o połowę
- // Create and start timer
- printf("Computing result using CUDA Kernel..%i \n", grid.y);
- // Performs warmup operation using matrixMul CUDA kernel
- matrixMulCUDA<32> << < grid, threads >> > (d_C, d_A, d_B, dimsA.x, dimsB.x);
- printf("done\n");
- cudaDeviceSynchronize();
- // Allocate CUDA events that we'll use for timing
- cudaEvent_t start;
- error = cudaEventCreate(&start);
- // Record the start event
- error = cudaEventRecord(start, NULL);
- // Execute the kernel
- int nIter = 100;
- for (int j = 0; j < nIter; j++)
- {
- if (block_size == 16)
- {
- matrixMulCUDA<16> << < grid, threads >> > (d_C, d_A, d_B, dimsA.x, dimsB.x);
- }
- else
- {
- matrixMulCUDA<32> << < grid, threads >> > (d_C, d_A, d_B, dimsA.x, dimsB.x);
- }
- }
- cudaEvent_t stop;
- error = cudaEventCreate(&stop);
- // Record the stop event
- error = cudaEventRecord(stop, NULL);
- // Wait for the stop event to complete
- error = cudaEventSynchronize(stop);
- float msecTotal = 0.0f;
- error = cudaEventElapsedTime(&msecTotal, start, stop);
- // Compute and print the performance
- float msecPerMatrixMul = msecTotal / nIter;
- double flopsPerMatrixMul = 2.0 * (double)dimsA.x * (double)dimsA.y * (double)dimsB.x;
- double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f);
- printf(
- "Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops, WorkgroupSize= %u threads/block\n",
- gigaFlops,
- msecPerMatrixMul,
- flopsPerMatrixMul,
- threads.x * threads.y);
- // Copy result from device to host
- error = cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost);
- if (error != cudaSuccess)
- {
- printf("cudaMemcpy (h_C,d_C) returned error code %d, line(%d)\n", error, __LINE__);
- exit(EXIT_FAILURE);
- }
- printf("Checking computed result for correctness: ");
- bool correct = true;
- //for (int i = 0; i < (int)(dimsC.x * dimsC.y); i++)
- //{
- // if (fabs(h_C[i] - (dimsA.x * valB)) > 1e-3)
- // {
- // //printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > 1e-3\n", i, h_C[i], dimsA.x*valB);
- // correct = false;
- // }
- //}
- //printf("%s\n", correct ? "OK" : "FAIL");
- // Clean up memory
- free(h_A);
- free(h_B);
- free(h_C);
- cudaFree(d_A);
- cudaFree(d_B);
- cudaFree(d_C);
- cudaDeviceReset();
- }
- /**
- * Program main
- */
- int main(int argc, char **argv)
- {
- printf("[Matrix Multiply Using CUDA] - Starting...\n");
- if (checkCmdLineFlag(argc, (const char **)argv, "help") ||
- checkCmdLineFlag(argc, (const char **)argv, "?"))
- {
- printf("Usage -device=n (n >= 0 for deviceID)\n");
- printf(" -wA=WidthA -hA=HeightA (Width x Height of Matrix A)\n");
- printf(" -wB=WidthB -hB=HeightB (Width x Height of Matrix B)\n");
- printf(" Note: Outer matrix dimensions of A & B matrices must be equal.\n");
- exit(EXIT_SUCCESS);
- }
- // By default, we use device 0, otherwise we override the device ID based on what is provided at the command line
- int devID = 0;
- cudaError_t error;
- cudaDeviceProp deviceProp;
- error = cudaGetDevice(&devID);
- error = cudaGetDeviceProperties(&deviceProp, devID);
- // Use a larger block size for Fermi and above
- int block_size = (deviceProp.major < 2) ? 16 : 32;
- int i = 512;
- dim3 dimsA(i, i, 1);
- dim3 dimsB(i, i, 1);
- printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y, dimsB.x, dimsB.y);
- int matrix_result = matrixMultiply(argc, argv, block_size, dimsA, dimsB);
- std::cin >> i;
- exit(matrix_result);
- return 0;
- }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement