Advertisement
Guest User

Untitled

a guest
Jul 28th, 2017
80
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
C 2.95 KB | None | 0 0
  1. #include <mpi.h>
  2. #include <stdio.h>
  3.  
  4.  
  5. extern void compDotProduct(int, int, double*, double*, double*);
  6.  
  7. void distribute(int my_rank, int p, int *rows, int n, double *A, double *b)
  8.  
  9. /*
  10.   Distribute an m x n matrix A and and an n vector b on p processes.
  11.   Input
  12.      my_rank: rank of the current process
  13.      p      : number of processes
  14.      rows:    array of size p. p[i] contains the number of rows on process i
  15.      A      : an array of size m*n = (sum_i rows[i])*n
  16.      n      : number of columns in A
  17.      b      : vector of size n
  18.  
  19.   Process 0 distributes the matrix A by rows, where process i (i=0,...,p) gets
  20.   rows[i] rows in order, and broadcast the b vector.
  21. */
  22. {
  23.   int tag    = 0;
  24.   int offset, dest;
  25. //------
  26.     int prev, next;
  27.         MPI_Request req[2];
  28.         MPI_Status stat[2];
  29.  
  30.         prev = my_rank - 1;
  31.         next = my_rank + 1;
  32.  
  33.         if(my_rank == 0) prev = p - 1;
  34.         if(my_rank == p - 1) next = 0;
  35. //------
  36.   MPI_Status status;
  37.  
  38.   if (my_rank ==0)
  39.     {
  40.       offset = 0;
  41.       for (dest = 1; dest < p; dest++)
  42.     {
  43.       // offset of the begining of data that is sent to dest
  44.       offset += rows[dest-1];
  45.       MPI_Isend(A + offset*n, rows[dest]*n, MPI_DOUBLE, next, tag, MPI_COMM_WORLD, &req[0]);
  46.     }
  47.     }
  48.   else
  49.     MPI_Irecv(A, rows[my_rank]*n, MPI_DOUBLE, prev, tag, MPI_COMM_WORLD, &req[1]);
  50.  
  51.   // Broadcast b
  52.   MPI_Bcast(b, n, MPI_DOUBLE, 0, MPI_COMM_WORLD);
  53. //-----
  54.     MPI_Waitall(1, req, stat);
  55. //-----
  56. }
  57.  
  58.  
  59. void collect(int my_rank, int p, int *rows, double *y)
  60. {
  61.   /*
  62.      Process i contains rows[i] elements of vector y.
  63.      Collect all the elements of y on process 0. The ordering of the blocks is 0,...,p-1
  64.  
  65.      Input
  66.      my_rank     rank of current process
  67.      p           number of processes
  68.      rows        an array of size p, where rows[i] is the number of elements of y on process i
  69.  
  70.      Output
  71.      y           an array of size m = sum_i rows[i]
  72.   */
  73. //------
  74.         int prev, next;
  75.     MPI_Request req[2];
  76.         MPI_Status stat[2];
  77.    
  78.     prev = my_rank - 1;
  79.     next = my_rank + 1;
  80.  
  81.     if(my_rank == 0) prev = p - 1;
  82.     if(my_rank == p - 1) next = 0;
  83. //------
  84.   int tag=0, source, offset;
  85.   MPI_Status status;
  86.  
  87.   if (my_rank !=0)
  88.     MPI_Isend(y, rows[my_rank], MPI_DOUBLE, next, tag, MPI_COMM_WORLD, &req[0]);
  89.   else
  90.     {
  91.       offset = 0;
  92.       for (source = 1; source < p; source++)
  93.     {
  94.       // offset of the beginning of data that is received fro process source
  95.       offset += rows[source-1];
  96.       MPI_Irecv(y+offset, rows[source], MPI_DOUBLE, prev, tag, MPI_COMM_WORLD,  &req[1]);
  97.     }
  98.     }
  99. //-----
  100.     MPI_Waitall(1,req,stat);
  101. //-----
  102. }
  103.  
  104. void parallelMatrixVectorProduct(int my_rank, int p, int *rows, int n, double *A, double *b, double *y)
  105. {
  106.   // Distribute A and b
  107.   distribute(my_rank, p, rows, n, A, b);
  108.                    
  109.   // Compute product on process my_rank
  110.   compDotProduct(rows[my_rank], n, A, b, y);
  111.  
  112.   // Collect the vector y
  113.   collect(my_rank, p, rows, y);
  114. }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement