Advertisement
Guest User

Untitled

a guest
Mar 29th, 2017
68
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
C 4.81 KB | None | 0 0
  1. #include <stdio.h>
  2. #include </usr/lib64/mpi/gcc/openmpi/include/mpi.h>
  3. #include <math.h>
  4.  
  5. #define PRECISION 0.000001
  6. #define RANGESIZE 1
  7. #define DATA 0
  8. #define RESULT 1
  9. #define FINISH 2
  10.  
  11. #define DEBUG
  12. double f(double x) {
  13.     return sin(x)*sin(x)/x;
  14. }
  15. double SimpleIntegration(double a,double b) {
  16.     double i;
  17.     double sum=0;
  18.     for (i=a; i<b; i+=PRECISION)
  19.         sum+=f(i)*PRECISION;
  20.     return sum;
  21. }
  22. int main(int argc, char **argv) {
  23.     int myrank,proccount;
  24.     double a=1,b=100;
  25.     double range[2];
  26.     double result=0,resulttemp;
  27.     int sentcount=0;
  28.     int i;
  29.     int flag;
  30.     double time;
  31.     MPI_Status status;
  32.     // Initialize MPI
  33.     MPI_Init(&argc, &argv);
  34.     // find out my rank
  35.     MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
  36.     // find out the number of processes in MPI_COMM_WORLD
  37.     MPI_Comm_size(MPI_COMM_WORLD, &proccount);
  38.     if (proccount<2) {
  39.         printf("Run with at least 2 processes");
  40.         MPI_Finalize();
  41.         return -1;
  42.     }
  43.     if (((b-a)/RANGESIZE)<4*(proccount-1)) {
  44.         printf("More subranges needed");
  45.         MPI_Finalize();
  46.         return -1;
  47.     }
  48.     // now the master will distribute the data and slave processes will perform computations
  49.     if (myrank==0) {
  50.         range[0]=a;
  51.         // first distribute some ranges to all slaves
  52.         for(i=2; i<proccount*2; i++) {
  53.             range[1]=range[0]+RANGESIZE;
  54. #ifdef DEBUG
  55.             printf("\nMaster sending range %f,%f to process %d",range[0],range[1],i/2);
  56.             fflush(stdout);
  57. #endif
  58.             // send it to process i
  59.             MPI_Send(range,2,MPI_DOUBLE,i/2,DATA,MPI_COMM_WORLD);
  60.             sentcount++;
  61.             range[0]=range[1];
  62.         }
  63.  
  64.  
  65.         do {
  66.             // distribute remaining subranges to the processes which have completed their parts
  67.             //***************************************************************************
  68.             // ODBIERZMY WSZYSTKO CO JEST DO ODEBRANIA
  69.             for (;;) {
  70.                 MPI_Iprobe(MPI_ANY_SOURCE,RESULT,MPI_COMM_WORLD,&flag, &status);
  71.                 if (flag == 0) {
  72.                     break;
  73.                 }
  74.                 MPI_Recv(&resulttemp,1,MPI_DOUBLE,MPI_ANY_SOURCE,RESULT,MPI_COMM_WORLD,&status);
  75.                 result+=resulttemp;
  76. #ifdef DEBUG
  77.                 printf("\nMaster received result %f from process %d",resulttemp,status.MPI_SOURCE);
  78.                 fflush(stdout);
  79. #endif
  80.                 // check the sender and send some more data
  81.                 range[1]=range[0]+RANGESIZE;
  82.                 if (range[1]>b) range[1]=b;
  83. #ifdef DEBUG
  84.                 printf("\nMaster sending range %f,%f to process %d",range[0],range[1],status.MPI_SOURCE);
  85.                 fflush(stdout);
  86. #endif
  87.                 MPI_Send(range,2,MPI_DOUBLE,status.MPI_SOURCE,DATA,MPI_COMM_WORLD);
  88.                 range[0]=range[1];
  89.             }
  90.             //***************************************************************************
  91.             // MASTER LICZY TU SOBIE SWOJA PORCJE
  92.             range[1]=range[0]+RANGESIZE;
  93.             if (range[1]>b) range[1]=b;
  94.  
  95.             result+=SimpleIntegration(range[0],range[1]);
  96.  
  97.             range[0]=range[1];
  98.  
  99.         } while (range[1]<b);
  100.         // now receive results from the processes
  101.         for(i=2; i<proccount*2; i++) {
  102.             time = MPI_Wtime();
  103.  
  104.             MPI_Recv(&resulttemp,1,MPI_DOUBLE,MPI_ANY_SOURCE,RESULT,MPI_COMM_WORLD,&status);
  105. #ifdef DEBUG
  106.             printf("\nMaster received result %f from process %d, in time: %f",resulttemp,status.MPI_SOURCE, MPI_Wtime()-time);
  107.             fflush(stdout);
  108. #endif
  109.             result+=resulttemp;
  110.         }
  111.         // shut down the slaves
  112.         for(i=1; i<proccount; i++) {
  113.             MPI_Send(NULL,0,MPI_DOUBLE,i,FINISH,MPI_COMM_WORLD);
  114.         }
  115.         // now display the result
  116.         printf("\nHi, I am process 0, the result is %f\n",result);
  117.     }
  118.     // ***************************************
  119.  
  120.  
  121.  
  122.  
  123.     else {
  124.         // slave
  125.         // this is easy - just receive data and do the work
  126.         do {
  127.             time = MPI_Wtime();
  128.             MPI_Iprobe(0,MPI_ANY_TAG,MPI_COMM_WORLD,&flag, &status);
  129.             if (0 == flag) {
  130.                 continue;
  131.             }
  132.             printf("\nSlave %d waited %f for message",myrank,MPI_Wtime()-time);
  133.             if (status.MPI_TAG==DATA) {
  134.  
  135.  
  136.                 MPI_Recv(range,2,MPI_DOUBLE,0,DATA,MPI_COMM_WORLD,&status);
  137.                 printf("\nSlave received range: %f %f\n", range[0], range[1]);
  138.  
  139. // compute my part
  140.                 resulttemp=SimpleIntegration(range[0],range[1]);
  141. // send the result back
  142.                 MPI_Send(&resulttemp,1,MPI_DOUBLE,0,RESULT,MPI_COMM_WORLD);
  143.             }
  144.         } while (status.MPI_TAG!=FINISH);
  145.     }
  146.     // Shut down MPI
  147.     MPI_Finalize();
  148.     return 0;
  149. }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement