Advertisement
Guest User

Untitled

a guest
May 12th, 2015
488
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
  1. #include <stdio.h>
  2. #include <mpi.h>
  3. #include <math.h>
  4. #include <stdlib.h>
  5.  
  6. #define PRECISION 0.000001
  7. #define RANGESIZE 1
  8. #define DATA 0
  9. #define RESULT 1
  10. #define FINISH 2
  11. //#define DEBUG
  12.  
  13.  
  14.  
  15. double f( double x ) {
  16.     return sin( x )*sin( x )/x;
  17. }
  18.  
  19.  
  20.  
  21. double SimpleIntegration( double a, double b ) {
  22.     double i;
  23.     double sum = 0;
  24.     for ( i = a; i<b; i += PRECISION )
  25.         sum += f( i )*PRECISION;
  26.     return sum;
  27. }
  28.  
  29.  
  30.  
  31. int main( int argc, char **argv ) {
  32.     MPI_Request *requests;
  33.     int requestcount = 0;
  34.     int requestcompleted;
  35.     int myrank, proccount;
  36.     double a = 1, b = 100;
  37.     double *ranges;
  38.     double range[ 2 ];
  39.     double result = 0;
  40.     double *resulttemp;
  41.     int sentcount = 0;
  42.     int recvcount = 0;
  43.     int i;
  44.     MPI_Status status;
  45.  
  46.     // Initialize MPI
  47.     MPI_Init( &argc, &argv );
  48.  
  49.     // find out my rank
  50.     MPI_Comm_rank( MPI_COMM_WORLD, &myrank );
  51.  
  52.     // find out the number of processes in MPI_COMM_WORLD
  53.     MPI_Comm_size( MPI_COMM_WORLD, &proccount );
  54.  
  55.     if ( proccount<2 ) {
  56.         printf( "Run with at least 2 processes" );
  57.         MPI_Finalize();
  58.         return -1;
  59.     }
  60.     if ( ( ( b-a )/RANGESIZE )<2*( proccount-1 ) ) {
  61.         printf( "More subranges needed" );
  62.         MPI_Finalize();
  63.         return -1;
  64.     }
  65.  
  66.     // now the master will distribute the data and slave processes will perform computations
  67.     if ( myrank==0 ) {
  68.         requests = ( MPI_Request * ) malloc( 3*( proccount-1 )*sizeof( MPI_Request ) );
  69.         if ( !requests ) {
  70.             printf( "\nNot enough memory" );
  71.             MPI_Finalize();
  72.             return -1;
  73.         }
  74.  
  75.         ranges = ( double * ) malloc( 4*( proccount-1 )*sizeof( double ) );
  76.         if ( !ranges ) {
  77.             printf( "\nNot enough memory" );
  78.             MPI_Finalize();
  79.             return -1;
  80.         }
  81.  
  82.         resulttemp = ( double * ) malloc( ( proccount-1 )*sizeof( double ) );
  83.         if ( !resulttemp ) {
  84.             printf( "\nNot enough memory" );
  85.             MPI_Finalize();
  86.             return -1;
  87.         }
  88.  
  89.         range[ 0 ] = a;
  90.         // first distribute some ranges to all slaves
  91.         for ( i = 1; i<proccount; i++ ) {
  92.             range[ 1 ] = range[ 0 ]+RANGESIZE;
  93. #ifdef DEBUG
  94.             printf( "\nMaster sending range %f,%f to process %d", range[ 0 ], range[ 1 ], i );
  95.             fflush( stdout );
  96. #endif
  97.             // send it to process i
  98.             MPI_Send( range, 2, MPI_DOUBLE, i, DATA, MPI_COMM_WORLD );
  99.             sentcount++;
  100.             range[ 0 ] = range[ 1 ];
  101.         }
  102.  
  103.         // the first proccount requests will be for receiving, the latter ones for sending
  104.         for ( i = 0; i<2*( proccount-1 ); i++ )
  105.             requests[ i ] = MPI_REQUEST_NULL; // none active at this point
  106.  
  107.         // start receiving for results from the slaves
  108.         for ( i = 1; i<proccount; i++ )
  109.             MPI_Irecv( &( resulttemp[ i-1 ] ), 1, MPI_DOUBLE, i, RESULT, MPI_COMM_WORLD, &( requests[ i-1 ] ) );
  110.  
  111.         // start sending new data parts to the slaves
  112.         for ( i = 1; i<proccount; i++ ) {
  113.             range[ 1 ] = range[ 0 ]+RANGESIZE;
  114. #ifdef DEBUG
  115.             printf( "\nMaster sending range %f,%f to process %d", range[ 0 ], range[ 1 ], i );
  116.             fflush( stdout );
  117. #endif
  118.             ranges[ 2*i-2 ] = range[ 0 ];
  119.             ranges[ 2*i-1 ] = range[ 1 ];
  120.  
  121.             // send it to process i
  122.             MPI_Isend( &( ranges[ 2*i-2 ] ), 2, MPI_DOUBLE, i, DATA, MPI_COMM_WORLD, &( requests[ proccount-2+i ] ) );
  123.             sentcount++;
  124.             range[ 0 ] = range[ 1 ];
  125.         }
  126.  
  127.         while ( range[ 1 ]<b ) {
  128. #ifdef DEBUG
  129.             printf( "\nMaster waiting for completion of requests" );
  130.             fflush( stdout );
  131. #endif
  132.             // wait for completion of any of the requests
  133.             MPI_Waitany( 2*proccount-2, requests, &requestcompleted, MPI_STATUS_IGNORE );
  134.  
  135.             // if it is a result then send new data to the process and add the result
  136.             if ( requestcompleted<( proccount-1 ) ) {
  137.                 result += resulttemp[ requestcompleted ];
  138.                 recvcount++;
  139. #ifdef DEBUG
  140.                 printf( "\nMaster received %d result %f from process %d",recvcount,resulttemp[requestcompleted],requestcompleted+1);
  141.                         fflush( stdout );
  142. #endif
  143.                 // first check if the send has terminated
  144.                 MPI_Wait( &( requests[ proccount-1+requestcompleted ] ), MPI_STATUS_IGNORE );
  145.  
  146.                 // now send some new data portion to this process
  147.                 range[ 1 ] = range[ 0 ]+RANGESIZE;
  148.                 if ( range[ 1 ]>b ) range[ 1 ] = b;
  149. #ifdef DEBUG
  150.                 printf( "\nMaster sending range %f,%f to process %d",range[0],range[1],requestcompleted+1);
  151.                         fflush( stdout );
  152. #endif
  153.                 ranges[ 2*requestcompleted ] = range[ 0 ];
  154.                 ranges[ 2*requestcompleted+1 ] = range[ 1 ];
  155.                 MPI_Isend( &( ranges[ 2*requestcompleted ] ), 2, MPI_DOUBLE, requestcompleted+1, DATA, MPI_COMM_WORLD, &( requests[ proccount-1+requestcompleted ] ) );
  156.                 sentcount++;
  157.                 range[ 0 ] = range[ 1 ];
  158.                 // now issue a corresponding recv
  159.                 MPI_Irecv( &( resulttemp[ requestcompleted ] ), 1, MPI_DOUBLE, requestcompleted+1, RESULT, MPI_COMM_WORLD, &( requests[ requestcompleted ] ) );
  160.             }
  161.         }
  162.  
  163.         // now send the FINISHING ranges to the slaves
  164.         // shut down the slaves
  165.         range[ 0 ] = range[ 1 ];
  166.         for ( i = 1; i<proccount; i++ ) {
  167. #ifdef DEBUG
  168.             printf( "\nMaster sending FINISHING range %f,%f to process %d", range[ 0 ], range[ 1 ], i );
  169.             fflush( stdout );
  170. #endif
  171.             ranges[ 2*i-4+2*proccount ] = range[ 0 ];
  172.             ranges[ 2*i-3+2*proccount ] = range[ 1 ];
  173.             MPI_Isend( range, 2, MPI_DOUBLE, i, DATA, MPI_COMM_WORLD, &( requests[ 2*proccount-3+i ] ) );
  174.         }
  175. #ifdef DEBUG
  176.         printf( "\nMaster before MPI_Waitall with total proccount=%d", proccount );
  177.         fflush( stdout );
  178. #endif
  179.  
  180.         // now receive results from the processes - that is finalize the pending requests
  181.         MPI_Waitall( 3*proccount-3, requests, MPI_STATUSES_IGNORE );
  182. #ifdef DEBUG
  183.         printf( "\nMaster after MPI_Waitall with total proccount=%d", proccount );
  184.         fflush( stdout );
  185. #endif
  186.  
  187.         // now simply add the results
  188.         for ( i = 0; i<( proccount-1 ); i++ ) {
  189.             result += resulttemp[ i ];
  190.         }
  191.  
  192.         // now receive results for the initial sends
  193.         for ( i = 0; i<( proccount-1 ); i++ ) {
  194. #ifdef DEBUG
  195.             printf( "\nMaster receiving result from process %d", i+1 );
  196.             fflush( stdout );
  197. #endif
  198.             MPI_Recv( &( resulttemp[ i ] ), 1, MPI_DOUBLE, i+1, RESULT, MPI_COMM_WORLD, &status );
  199.             result += resulttemp[ i ];
  200.             recvcount++;
  201. #ifdef DEBUG
  202.             printf( "\nMaster received %d result %f from process %d", recvcount, resulttemp[ i ], i+1 );
  203.             fflush( stdout );
  204. #endif
  205.         }
  206.  
  207.         // now display the result
  208.         printf( "\nHi, I am process 0, the result is %f\n", result );
  209.     }
  210.     else { // slave
  211.         requests = ( MPI_Request * ) malloc( 2*sizeof( MPI_Request ) );
  212.         if ( !requests ) {
  213.             printf( "\nNot enough memory" );
  214.             MPI_Finalize();
  215.             return -1;
  216.         }
  217.  
  218.         requests[ 0 ] = requests[ 1 ] = MPI_REQUEST_NULL;
  219.         ranges = ( double * ) malloc( 2*sizeof( double ) );
  220.         if ( !ranges ) {
  221.             printf( "\nNot enough memory" );
  222.             MPI_Finalize();
  223.             return -1;
  224.         }
  225.  
  226.         resulttemp = ( double * ) malloc( 2*sizeof( double ) );
  227.         if ( !resulttemp ) {
  228.             printf( "\nNot enough memory" );
  229.             MPI_Finalize();
  230.             return -1;
  231.         }
  232.  
  233.         // first receive the initial data
  234.         MPI_Recv( range, 2, MPI_DOUBLE, 0, DATA, MPI_COMM_WORLD, &status );
  235. #ifdef DEBUG
  236.         printf( "\nSlave received range %f,%f", range[ 0 ], range[ 1 ] );
  237.         fflush( stdout );
  238. #endif
  239.  
  240.         while ( range[ 0 ]<range[ 1 ] ) { // if there is some data to process
  241.             // before computing the next part start receiving a new data part
  242.             MPI_Irecv( ranges, 2, MPI_DOUBLE, 0, DATA, MPI_COMM_WORLD, &( requests[ 0 ] ) );
  243.             // compute my part
  244.             resulttemp[ 1 ] = SimpleIntegration( range[ 0 ], range[ 1 ] );
  245. #ifdef DEBUG
  246.             printf( "\nSlave just computed range %f,%f", range[ 0 ], range[ 1 ] );
  247.             fflush( stdout );
  248. #endif
  249.  
  250.             // now finish receiving the new part
  251.             // and finish sending the previous results back to the master
  252.             MPI_Waitall( 2, requests, MPI_STATUSES_IGNORE );
  253. #ifdef DEBUG
  254.             printf( "\nSlave just received range %f,%f", ranges[ 0 ], ranges[ 1 ] );
  255.             fflush( stdout );
  256. #endif
  257.  
  258.             range[ 0 ] = ranges[ 0 ];
  259.             range[ 1 ] = ranges[ 1 ];
  260.             resulttemp[ 0 ] = resulttemp[ 1 ];
  261.  
  262.             // and start sending the results back
  263.             MPI_Isend( &resulttemp[ 0 ], 1, MPI_DOUBLE, 0, RESULT, MPI_COMM_WORLD, &( requests[ 1 ] ) );
  264. #ifdef DEBUG
  265.             printf( "\nSlave just initiated send to master with result %f", resulttemp[ 0 ] );
  266.             fflush( stdout );
  267. #endif
  268.         }
  269.  
  270.         // now finish sending the last results to the master
  271.         MPI_Wait( &( requests[ 1 ] ), MPI_STATUS_IGNORE );
  272.     }
  273.  
  274.     // Shut down MPI
  275.     MPI_Finalize();
  276.  
  277. #ifdef DEBUG
  278.     printf( "\nProcess %d finished", myrank );
  279.     fflush( stdout );
  280. #endif
  281.  
  282.     return 0;
  283. }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement