Advertisement
Guest User

Untitled

a guest
May 23rd, 2018
83
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
C 12.29 KB | None | 0 0
  1. /**
  2. * Copyright 1993-2012 NVIDIA Corporation.  All rights reserved.
  3. *
  4. * Please refer to the NVIDIA end user license agreement (EULA) associated
  5. * with this source code for terms and conditions that govern your use of
  6. * this software. Any use, reproduction, disclosure, or distribution of
  7. * this software and related documentation outside the terms of the EULA
  8. * is strictly prohibited.
  9. *
  10. */
  11.  
  12. /**
  13. * Matrix multiplication: C = A * B.
  14. * Host code.
  15. *
  16. * This sample implements matrix multiplication as described in Chapter 3
  17. * of the programming guide.
  18. * It has been written for clarity of exposition to illustrate various CUDA
  19. * programming principles, not with the goal of providing the most
  20. * performant generic kernel for matrix multiplication.
  21. *
  22. * See also:
  23. * V. Volkov and J. Demmel, "Benchmarking GPUs to tune dense linear algebra,"
  24. * in Proc. 2008 ACM/IEEE Conf. on Superconducting (SC '08),
  25. * Piscataway, NJ: IEEE Press, 2008, pp. Art. 31:1-11.
  26. */
  27.  
  28. // System includes
  29. #define WIN32
  30. #include <stdio.h>
  31. #include <assert.h>
  32.  
  33. // CUDA runtime
  34. #include <cuda_runtime.h>
  35.  
  36. // Helper functions and utilities to work with CUDA
  37. #include <helper_functions.h>
  38.  
  39. /**
  40. * Matrix multiplication (CUDA Kernel) on the device: C = A * B
  41. * wA is A's width and wB is B's width
  42. */
  43.  
  44. template <int BLOCK_SIZE> __global__ void
  45. matrixMulFirst(float *C, float *A, float *B, int width)
  46. {
  47.     int tx = threadIdx.x;
  48.     int ty = threadIdx.y;
  49.  
  50.     for (int i = tx; i < width; i += BLOCK_SIZE) {
  51.         for (int j = ty; j < width; j += BLOCK_SIZE) {
  52.             float sum = 0;
  53.  
  54.             for (int k = 0; k < width; ++k)
  55.                 sum += A[j * width + k] * B[k * width + i];
  56.  
  57.             C[j * width + i] = sum;
  58.         }
  59.     }
  60. }
  61.  
  62.  
  63.  
  64. template <int BLOCK_SIZE> __global__ void
  65. matrixMulCUDA(float *C, float *A, float *B, int wA, int wB)
  66. {
  67.     // Block index
  68.     int bx = blockIdx.x;
  69.     int by = blockIdx.y;
  70.  
  71.     // Thread index
  72.     int tx = threadIdx.x;
  73.     int ty = threadIdx.y;
  74.  
  75.     // Index of the first sub-matrix of A processed by the block
  76.     int aBegin = wA * BLOCK_SIZE * by;
  77.  
  78.     // Index of the last sub-matrix of A processed by the block
  79.     int aEnd = aBegin + wA - 1;
  80.  
  81.     // Step size used to iterate through the sub-matrices of A
  82.     int aStep = BLOCK_SIZE;
  83.  
  84.     // Index of the first sub-matrix of B processed by the block
  85.     int bBegin = BLOCK_SIZE * bx;
  86.  
  87.     // Step size used to iterate through the sub-matrices of B
  88.     int bStep = BLOCK_SIZE * wB;
  89.  
  90.     // Csub is used to store the element of the block sub-matrix
  91.     // that is computed by the thread
  92.     float Csub = 0;
  93.  
  94.     // Loop over all the sub-matrices of A and B
  95.     // required to compute the block sub-matrix
  96.     for (int a = aBegin, b = bBegin;
  97.         a <= aEnd;
  98.         a += aStep, b += bStep)
  99.     {
  100.  
  101.         // Declaration of the shared memory array As used to
  102.         // store the sub-matrix of A
  103.         __shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
  104.  
  105.         // Declaration of the shared memory array Bs used to
  106.         // store the sub-matrix of B
  107.         __shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
  108.  
  109.         // Load the matrices from device memory
  110.         // to shared memory; each thread loads
  111.         // one element of each matrix
  112.         As[ty][tx] = A[a + wA * ty + tx];
  113.         Bs[ty][tx] = B[b + wB * ty + tx];
  114.  
  115.         // Synchronize to make sure the matrices are loaded
  116.         __syncthreads();
  117.  
  118.         // Multiply the two matrices together;
  119.         // each thread computes one element
  120.         // of the block sub-matrix
  121. #pragma unroll
  122.  
  123.         for (int k = 0; k < BLOCK_SIZE; ++k)
  124.         {
  125.             Csub += As[ty][k] * Bs[k][tx];
  126.         }
  127.  
  128.         // Synchronize to make sure that the preceding
  129.         // computation is done before loading two new
  130.         // sub-matrices of A and B in the next iteration
  131.         __syncthreads();
  132.     }
  133.  
  134.     // Write the block sub-matrix to device memory;
  135.     // each thread writes one element
  136.     int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
  137.     C[c + wB * ty + tx] = Csub;
  138. }
  139.  
  140. void constantInit(float *data, int size, float val)
  141. {
  142.     for (int i = 0; i < size; ++i)
  143.     {
  144.         data[i] = val;
  145.     }
  146. }
  147.  
  148. /**
  149. * Run a simple test of matrix multiplication using CUDA
  150. */
  151. int matrixMultiply(int argc, char **argv, int block_size, dim3 &dimsA, dim3 &dimsB)
  152. {
  153.     // Allocate host memory for matrices A and B
  154.     unsigned int size_A = dimsA.x * dimsA.y;
  155.     unsigned int mem_size_A = sizeof(float) * size_A;
  156.     float *h_A = (float *)malloc(mem_size_A);
  157.     unsigned int size_B = dimsB.x * dimsB.y;
  158.     unsigned int mem_size_B = sizeof(float) * size_B;
  159.     float *h_B = (float *)malloc(mem_size_B);
  160.  
  161.     // Initialize host memory
  162.     const float valB = 0.01f;
  163.     constantInit(h_A, size_A, 1.0f);
  164.     constantInit(h_B, size_B, valB);
  165.  
  166.     // Allocate device memory
  167.     float *d_A, *d_B, *d_C;
  168.  
  169.     // Allocate host matrix C
  170.     dim3 dimsC(dimsB.x, dimsA.y, 1);
  171.     unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float);
  172.     float *h_C = (float *)malloc(mem_size_C);
  173.  
  174.     if (h_C == NULL)
  175.     {
  176.         fprintf(stderr, "Failed to allocate host matrix C!\n");
  177.         exit(EXIT_FAILURE);
  178.     }
  179.  
  180.     cudaError_t error;
  181.  
  182.     error = cudaMalloc((void **)&d_A, mem_size_A);
  183.  
  184.     if (error != cudaSuccess)
  185.     {
  186.         printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
  187.         exit(EXIT_FAILURE);
  188.     }
  189.  
  190.     error = cudaMalloc((void **)&d_B, mem_size_B);
  191.  
  192.     if (error != cudaSuccess)
  193.     {
  194.         printf("cudaMalloc d_B returned error code %d, line(%d)\n", error, __LINE__);
  195.         exit(EXIT_FAILURE);
  196.     }
  197.  
  198.     error = cudaMalloc((void **)&d_C, mem_size_C);
  199.  
  200.     if (error != cudaSuccess)
  201.     {
  202.         printf("cudaMalloc d_C returned error code %d, line(%d)\n", error, __LINE__);
  203.         exit(EXIT_FAILURE);
  204.     }
  205.  
  206.     // copy host memory to device
  207.     error = cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice);
  208.  
  209.     if (error != cudaSuccess)
  210.     {
  211.         printf("cudaMemcpy (d_A,h_A) returned error code %d, line(%d)\n", error, __LINE__);
  212.         exit(EXIT_FAILURE);
  213.     }
  214.  
  215.     error = cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice);
  216.  
  217.     if (error != cudaSuccess)
  218.     {
  219.         printf("cudaMemcpy (d_B,h_B) returned error code %d, line(%d)\n", error, __LINE__);
  220.         exit(EXIT_FAILURE);
  221.     }
  222.  
  223.     // Setup execution parameters
  224.     dim3 threads(block_size, block_size);
  225.     dim3 grid(1, 1);
  226.  
  227.     // Create and start timer
  228.     printf("Computing result using CUDA Kernel...\n");
  229.  
  230.     // Performs warmup operation using matrixMul CUDA kernel
  231.     if (block_size == 16)
  232.     {
  233.         matrixMulFirst<16> << < grid, threads >> >(d_C, d_A, d_B, dimsA.x, dimsB.x);
  234.     }
  235.     else
  236.     {
  237.         matrixMulFirst<8> << < grid, threads >> >(d_C, d_A, d_B, dimsA.x, dimsB.x);
  238.     }
  239.  
  240.     printf("done\n");
  241.  
  242.     cudaDeviceSynchronize();
  243.  
  244.     // Allocate CUDA events that we'll use for timing
  245.     cudaEvent_t start;
  246.     error = cudaEventCreate(&start);
  247.  
  248.     if (error != cudaSuccess)
  249.     {
  250.         fprintf(stderr, "Failed to create start event (error code %s)!\n", cudaGetErrorString(error));
  251.         exit(EXIT_FAILURE);
  252.     }
  253.  
  254.     cudaEvent_t stop;
  255.     error = cudaEventCreate(&stop);
  256.  
  257.     if (error != cudaSuccess)
  258.     {
  259.         fprintf(stderr, "Failed to create stop event (error code %s)!\n", cudaGetErrorString(error));
  260.         exit(EXIT_FAILURE);
  261.     }
  262.  
  263.     // Record the start event
  264.     error = cudaEventRecord(start, NULL);
  265.  
  266.     if (error != cudaSuccess)
  267.     {
  268.         fprintf(stderr, "Failed to record start event (error code %s)!\n", cudaGetErrorString(error));
  269.         exit(EXIT_FAILURE);
  270.     }
  271.  
  272.     // Execute the kernel
  273.     int nIter = 30;
  274.  
  275.     for (int j = 0; j < nIter; j++)
  276.     {
  277.         if (block_size == 8)
  278.         {
  279.             matrixMulFirst<8> << < grid, threads >> >(d_C, d_A, d_B, dimsA.x, dimsB.x);
  280.         }
  281.         else if (block_size == 16)
  282.         {
  283.             matrixMulFirst<16> << < grid, threads >> >(d_C, d_A, d_B, dimsA.x, dimsB.x);
  284.         }
  285.         else {
  286.             printf("ERRRROR!!!!\n");
  287.         }
  288.     }
  289.  
  290.     // Record the stop event
  291.     error = cudaEventRecord(stop, NULL);
  292.  
  293.     if (error != cudaSuccess)
  294.     {
  295.         fprintf(stderr, "Failed to record stop event (error code %s)!\n", cudaGetErrorString(error));
  296.         exit(EXIT_FAILURE);
  297.     }
  298.  
  299.     // Wait for the stop event to complete
  300.     error = cudaEventSynchronize(stop);
  301.  
  302.     if (error != cudaSuccess)
  303.     {
  304.         fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", cudaGetErrorString(error));
  305.         exit(EXIT_FAILURE);
  306.     }
  307.  
  308.     float msecTotal = 0.0f;
  309.     error = cudaEventElapsedTime(&msecTotal, start, stop);
  310.  
  311.     if (error != cudaSuccess)
  312.     {
  313.         fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", cudaGetErrorString(error));
  314.         exit(EXIT_FAILURE);
  315.     }
  316.  
  317.     // Compute and print the performance
  318.     float msecPerMatrixMul = msecTotal / nIter;
  319.     double flopsPerMatrixMul = 2.0 * (double)dimsA.x * (double)dimsA.y * (double)dimsB.x;
  320.     double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f);
  321.     printf(
  322.         "Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops, WorkgroupSize= %u threads/block\n",
  323.         gigaFlops,
  324.         msecPerMatrixMul,
  325.         flopsPerMatrixMul,
  326.         threads.x * threads.y);
  327.  
  328.     // Copy result from device to host
  329.     error = cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost);
  330.  
  331.     if (error != cudaSuccess)
  332.     {
  333.         printf("cudaMemcpy (h_C,d_C) returned error code %d, line(%d)\n", error, __LINE__);
  334.         exit(EXIT_FAILURE);
  335.     }
  336.  
  337.     printf("Checking computed result for correctness: ");
  338.     bool correct = true;
  339.  
  340.     for (int i = 0; i < (int)(dimsC.x * dimsC.y); i++)
  341.     {
  342.         if (fabs(h_C[i] - (dimsA.x * valB)) > 1e-5)
  343.         {
  344.             printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > 1e-5\n", i, h_C[i], dimsA.x*valB);
  345.             correct = false;
  346.         }
  347.     }
  348.  
  349.     printf("%s\n", correct ? "OK" : "FAIL");
  350.  
  351.     // Clean up memory
  352.     free(h_A);
  353.     free(h_B);
  354.     free(h_C);
  355.     cudaFree(d_A);
  356.     cudaFree(d_B);
  357.     cudaFree(d_C);
  358.  
  359.     printf("\nNote: For peak performance, please refer to the matrixMulCUBLAS example.\n");
  360.  
  361.     cudaDeviceReset();
  362.  
  363.     if (correct)
  364.     {
  365.         return EXIT_SUCCESS;
  366.     }
  367.     else
  368.     {
  369.         return EXIT_FAILURE;
  370.     }
  371. }
  372.  
  373.  
  374. /**
  375. * Program main
  376. */
  377. int main(int argc, char **argv)
  378. {
  379.     printf("[Matrix Multiply Using CUDA] - Starting...\n");
  380.  
  381.     if (checkCmdLineFlag(argc, (const char **)argv, "help") ||
  382.         checkCmdLineFlag(argc, (const char **)argv, "?"))
  383.     {
  384.         printf("Usage -device=n (n >= 0 for deviceID)\n");
  385.         printf("      -wA=WidthA -hA=HeightA (Width x Height of Matrix A)\n");
  386.         printf("      -wB=WidthB -hB=HeightB (Width x Height of Matrix B)\n");
  387.         printf("  Note: Outer matrix dimensions of A & B matrices must be equal.\n");
  388.  
  389.         exit(EXIT_SUCCESS);
  390.     }
  391.  
  392.     // By default, we use device 0, otherwise we override the device ID based on what is provided at the command line
  393.     int devID = 0;
  394.  
  395.     if (checkCmdLineFlag(argc, (const char **)argv, "device"))
  396.     {
  397.         devID = getCmdLineArgumentInt(argc, (const char **)argv, "device");
  398.         cudaSetDevice(devID);
  399.     }
  400.  
  401.     cudaError_t error;
  402.     cudaDeviceProp deviceProp;
  403.     error = cudaGetDevice(&devID);
  404.  
  405.     if (error != cudaSuccess)
  406.     {
  407.         printf("cudaGetDevice returned error code %d, line(%d)\n", error, __LINE__);
  408.     }
  409.  
  410.     error = cudaGetDeviceProperties(&deviceProp, devID);
  411.  
  412.     if (deviceProp.computeMode == cudaComputeModeProhibited)
  413.     {
  414.         fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::cudaSetDevice().\n");
  415.         exit(EXIT_SUCCESS);
  416.     }
  417.  
  418.     if (error != cudaSuccess)
  419.     {
  420.         printf("cudaGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__);
  421.     }
  422.     else
  423.     {
  424.         printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor);
  425.     }
  426.  
  427.     // Use a larger block size for Fermi and above
  428.     //int block_size = (deviceProp.major < 2) ? 16 : 32;
  429.     int block_size = 8;
  430.  
  431.     printf("\nOLI: %d\n\n", deviceProp.major);
  432.     int constant_size = 16;
  433.  
  434.     dim3 dimsA(constant_size* block_size, constant_size* block_size, 1);
  435.     dim3 dimsB(constant_size* block_size, constant_size* block_size, 1);
  436.  
  437.     // width of Matrix A
  438.     if (checkCmdLineFlag(argc, (const char **)argv, "wA"))
  439.     {
  440.         dimsA.x = getCmdLineArgumentInt(argc, (const char **)argv, "wA");
  441.     }
  442.  
  443.     // height of Matrix A
  444.     if (checkCmdLineFlag(argc, (const char **)argv, "hA"))
  445.     {
  446.         dimsA.y = getCmdLineArgumentInt(argc, (const char **)argv, "hA");
  447.     }
  448.  
  449.     // width of Matrix B
  450.     if (checkCmdLineFlag(argc, (const char **)argv, "wB"))
  451.     {
  452.         dimsB.x = getCmdLineArgumentInt(argc, (const char **)argv, "wB");
  453.     }
  454.  
  455.     // height of Matrix B
  456.     if (checkCmdLineFlag(argc, (const char **)argv, "hB"))
  457.     {
  458.         dimsB.y = getCmdLineArgumentInt(argc, (const char **)argv, "hB");
  459.     }
  460.  
  461.     if (dimsA.x != dimsB.y)
  462.     {
  463.         printf("Error: outer matrix dimensions must be equal. (%d != %d)\n",
  464.             dimsA.x, dimsB.y);
  465.         exit(EXIT_FAILURE);
  466.     }
  467.  
  468.     printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y, dimsB.x, dimsB.y);
  469.  
  470.     int matrix_result = matrixMultiply(argc, argv, block_size, dimsA, dimsB);
  471.  
  472.     exit(matrix_result);
  473. }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement