Advertisement
Guest User

Untitled

a guest
Mar 21st, 2018
73
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
C 8.98 KB | None | 0 0
  1. #include <stdio.h>
  2. #include <mpi.h>
  3. #include <math.h>
  4. #include <stdlib.h>
  5. #define PRECISION 0.000001
  6. #define RANGESIZE 1
  7. #define DATA 0
  8. #define RESULT 1
  9. #define FINISH 2
  10. #define DEBUG
  11. double f(double x)
  12. {
  13.     return sin(x) * sin(x) / x;
  14. }
  15. double SimpleIntegration(double a, double b)
  16. {
  17.     double i;
  18.     double sum = 0;
  19.     for (i = a; i < b; i += PRECISION)
  20.         sum += f(i) * PRECISION;
  21.     return sum;
  22. }
  23. int main(int argc, char** argv)
  24. {
  25.     MPI_Request* requests;
  26.     int requestcount = 0;
  27.     int requestcompleted;
  28.     int myrank, proccount;
  29.     double a = 1, b = 100;
  30.     double* ranges;
  31.     double range[2];
  32.     double result = 0;
  33.     double* resulttemp;
  34.     int sentcount = 0;
  35.     int recvcount = 0;
  36.     int i;
  37.     MPI_Status status;
  38.     // Initialize MPI
  39.     MPI_Init(&argc, &argv);
  40.     // find out my rank
  41.     MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
  42.     // find out the number of processes in MPI_COMM_WORLD
  43.     MPI_Comm_size(MPI_COMM_WORLD, &proccount);
  44.     if (proccount < 2) {
  45.         printf("Run with at least 2 processes");
  46.         MPI_Finalize();
  47.         return -1;
  48.     }
  49.     if (((b - a) / RANGESIZE) < 2 * (proccount - 1)) {
  50.         printf("More subranges needed");
  51.         MPI_Finalize();
  52.         return -1;
  53.     }
  54.     // now the master will distribute the data and slave processes will perform computations
  55.     if (myrank == 0) {
  56.         requests = (MPI_Request*)malloc(3 * (proccount - 1) * sizeof(MPI_Request));
  57.         if (!requests) {
  58.             printf("\nNot enough memory");
  59.             MPI_Finalize();
  60.             return -1;
  61.         }
  62.         ranges = (double*)malloc(4 * (proccount - 1) * sizeof(double));
  63.         if (!ranges) {
  64.             printf("\nNot enough memory");
  65.             MPI_Finalize();
  66.             return -1;
  67.         }
  68.         resulttemp = (double*)malloc((proccount - 1) * sizeof(double));
  69.         if (!resulttemp) {
  70.             printf("\nNot enough memory");
  71.             MPI_Finalize();
  72.             return -1;
  73.         }
  74.         range[0] = a;
  75.         // first distribute some ranges to all slaves
  76.         for (i = 1; i < proccount; i++) {
  77.             range[1] = range[0] + RANGESIZE;
  78. #ifdef DEBUG
  79.             printf("\nMaster sending range %f,%f to process %d", range[0], range[1], i);
  80.             fflush(stdout);
  81. #endif
  82.             // send it to process i
  83.             MPI_Send(range, 2, MPI_DOUBLE, i, DATA, MPI_COMM_WORLD);
  84.             sentcount++;
  85.             range[0] = range[1];
  86.         }
  87.         // the first proccount requests will be for receiving, the latter ones for sending
  88.         for (i = 0; i < 2 * (proccount - 1); i++)
  89.             requests[i] = MPI_REQUEST_NULL; // none active at this point
  90.         // start receiving for results from the slaves
  91.         for (i = 1; i < proccount; i++)
  92.             MPI_Irecv(&(resulttemp[i - 1]), 1, MPI_DOUBLE, i, RESULT, MPI_COMM_WORLD, &(requests[i - 1]));
  93.         // start sending new data parts to the slaves
  94.         for (i = 1; i < proccount; i++) {
  95.             range[1] = range[0] + RANGESIZE;
  96. #ifdef DEBUG
  97.             printf("\nMaster sending range %f,%f to process %d", range[0], range[1], i);
  98.             fflush(stdout);
  99. #endif
  100.             ranges[2 * i - 2] = range[0];
  101.             ranges[2 * i - 1] = range[1];
  102.             // send it to process i
  103.             MPI_Isend(&(ranges[2 * i - 2]), 2, MPI_DOUBLE, i, DATA, MPI_COMM_WORLD, &(requests[proccount - 2 + i]));
  104.             sentcount++;
  105.             range[0] = range[1];
  106.         }
  107.         while (range[1] < b) {
  108. #ifdef DEBUG
  109.             printf("\nMaster waiting for completion of requests");
  110.             fflush(stdout);
  111. #endif
  112.             // wait for completion of any of the requests
  113.             MPI_Waitany(2 * proccount - 2, requests, &requestcompleted, MPI_STATUS_IGNORE);
  114.             // if it is a result then send new data to the process
  115.             // and add the result
  116.             if (requestcompleted < (proccount - 1)) {
  117.                 result += resulttemp[requestcompleted];
  118.                 recvcount++;
  119. #ifdef DEBUG
  120. printf("\nMaster received %d result %f from process
  121. %d",recvcount,resulttemp[requestcompleted],requestcompleted+1);
  122. fflush(stdout);
  123. #endif
  124. // first check if the send has terminated
  125. MPI_Wait(&(requests[proccount-1+requestcompleted]),MPI_STATUS_IGNORE);
  126. // now send some new data portion to this process
  127. range[1]=range[0]+RANGESIZE;
  128. if (range[1]>b) range[1]=b;
  129. #ifdef DEBUG
  130. printf("\nMaster sending range %f,%f to process
  131. %d",range[0],range[1],requestcompleted+1);
  132. fflush(stdout);
  133. #endif
  134. ranges[2*requestcompleted]=range[0];
  135. ranges[2*requestcompleted+1]=range[1];
  136. MPI_Isend(&(ranges[2*requestcompleted]),2,MPI_DOUBLE,requestcompleted+1,DATA,MPI_CO
  137. MM_WORLD,&(requests[proccount-1+requestcompleted]));
  138. sentcount++;
  139. range[0]=range[1];
  140. // now issue a corresponding recv
  141. MPI_Irecv(&(resulttemp[requestcompleted]),1,MPI_DOUBLE,requestcompleted+1,RESULT,MPI_
  142. COMM_WORLD,&(requests[requestcompleted]));
  143.             }
  144.         }
  145.         // now send the FINISHING ranges to the slaves
  146.         // shut down the slaves
  147.         range[0] = range[1];
  148.         for (i = 1; i < proccount; i++) {
  149. #ifdef DEBUG
  150.             printf("\nMaster sending FINISHING range %f,%f to process %d", range[0], range[1], i);
  151.             fflush(stdout);
  152. #endif
  153.             ranges[2 * i - 4 + 2 * proccount] = range[0];
  154.             ranges[2 * i - 3 + 2 * proccount] = range[1];
  155.             MPI_Isend(range, 2, MPI_DOUBLE, i, DATA, MPI_COMM_WORLD, &(requests[2 * proccount - 3 + i]));
  156.         }
  157. #ifdef DEBUG
  158.         printf("\nMaster before MPI_Waitall with total proccount=%d", proccount);
  159.         fflush(stdout);
  160. #endif
  161.         // now receive results from the processes - that is finalize the pending requests
  162.         MPI_Waitall(3 * proccount - 3, requests, MPI_STATUSES_IGNORE);
  163. #ifdef DEBUG
  164.         printf("\nMaster after MPI_Waitall with total proccount=%d", proccount);
  165.         fflush(stdout);
  166. #endif
  167.         // now simply add the results
  168.         for (i = 0; i < (proccount - 1); i++) {
  169.             result += resulttemp[i];
  170.         }
  171.         // now receive results for the initial sends
  172.         for (i = 0; i < (proccount - 1); i++) {
  173. #ifdef DEBUG
  174.             printf("\nMaster receiving result from process %d", i + 1);
  175.             fflush(stdout);
  176. #endif
  177.             MPI_Recv(&(resulttemp[i]), 1, MPI_DOUBLE, i + 1, RESULT, MPI_COMM_WORLD, &status);
  178.             result += resulttemp[i];
  179.             recvcount++;
  180. #ifdef DEBUG
  181.             printf("\nMaster received %d result %f from process %d", recvcount, resulttemp[i], i + 1);
  182.             fflush(stdout);
  183. #endif
  184.         }
  185.         // now display the result
  186.         printf("\nHi, I am process 0, the result is %f\n", result);
  187.     }
  188.     else { // slave
  189.         requests = (MPI_Request*)malloc(2 * sizeof(MPI_Request));
  190.         if (!requests) {
  191.             printf("\nNot enough memory");
  192.             MPI_Finalize();
  193.             return -1;
  194.         }
  195.         requests[0] = requests[1] = MPI_REQUEST_NULL;
  196.         ranges = (double*)malloc(2 * sizeof(double));
  197.         if (!ranges) {
  198.             printf("\nNot enough memory");
  199.             MPI_Finalize();
  200.             return -1;
  201.         }
  202.         resulttemp = (double*)malloc(2 * sizeof(double));
  203.         if (!resulttemp) {
  204.             printf("\nNot enough memory");
  205.             MPI_Finalize();
  206.             return -1;
  207.         }
  208.         // first receive the initial data
  209.         MPI_Recv(range, 2, MPI_DOUBLE, 0, DATA, MPI_COMM_WORLD, &status);
  210. #ifdef DEBUG
  211.         printf("\nSlave received range %f,%f", range[0], range[1]);
  212.         fflush(stdout);
  213. #endif
  214.         while (range[0] < range[1]) { // if there is some data to process
  215.             // before computing the next part start receiving a new data part
  216.             MPI_Irecv(ranges, 2, MPI_DOUBLE, 0, DATA, MPI_COMM_WORLD, &(requests[0]));
  217.             // compute my part
  218.             resulttemp[1] = SimpleIntegration(range[0], range[1]);
  219. #ifdef DEBUG
  220.             printf("\nSlave just computed range %f,%f", range[0], range[1]);
  221.             fflush(stdout);
  222. #endif
  223.             // now finish receiving the new part
  224.             // and finish sending the previous results back to the master
  225.             MPI_Waitall(2, requests, MPI_STATUSES_IGNORE);
  226. #ifdef DEBUG
  227.             printf("\nSlave just received range %f,%f", ranges[0], ranges[1]);
  228.             fflush(stdout);
  229. #endif
  230.             range[0] = ranges[0];
  231.             range[1] = ranges[1];
  232.             resulttemp[0] = resulttemp[1];
  233.             // and start sending the results back
  234.             MPI_Isend(&resulttemp[0], 1, MPI_DOUBLE, 0, RESULT, MPI_COMM_WORLD, &(requests[1]));
  235. #ifdef DEBUG
  236.             printf("\nSlave just initiated send to master with result %f", resulttemp[0]);
  237.             fflush(stdout);
  238. #endif
  239.         }
  240.         // now finish sending the last results to the master
  241.         MPI_Wait(&(requests[1]), MPI_STATUS_IGNORE);
  242.     }
  243.     // Shut down MPI
  244.     MPI_Finalize();
  245. #ifdef DEBUG
  246.     printf("\nProcess %d finished", myrank);
  247.     fflush(stdout);
  248. #endif
  249.     return 0;
  250. }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement