Advertisement
desdemona

soww_lab2

Mar 24th, 2015
730
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
C++ 5.12 KB | None | 0 0
  1.  
  2. #include <stdio.h>
  3. #include <mpi.h>
  4. #include <math.h>
  5. #include <stdlib.h>
  6. #include <time.h>
  7.  
  8. #define PRECISION 0.000001
  9. //#define RANGESIZE 1
  10. #define DATA 0
  11. #define RESULT 1
  12. #define FINISH 2
  13.  
  14. //#define DEBUG
  15. double f (double x)
  16. {
  17.   return x;//sin (x) * sin (x) / x;
  18. }
  19.  
  20. double SimpleIntegration (double a, double b)
  21. {
  22.   return a;
  23. }
  24.  
  25. int main (int argc, char **argv)
  26. {
  27.   int messagecnt =0;
  28.   int myrank, proccount;
  29.   double a = 1, b = 100;
  30.   int n=512;
  31.  
  32.   char input[512];
  33.   int zarodek;
  34.   zarodek= time(NULL);
  35.   srand(zarodek);   // za zarodek wstawiamy pobrany czas w sekundach
  36.   int i=0;
  37.   for(i=0;i<n;i++)
  38.   {
  39.     int x = rand()%3;
  40.     char xx;
  41.     if(x==0)
  42.     { xx = 'a';}
  43.     else if(x==1)
  44.     { xx='b';}
  45.     else
  46.     { xx='c';}
  47.     input[i] = xx;
  48.   }
  49.  
  50.   double result = 0, resulttemp;
  51.   int sentcount = 0;
  52.   MPI_Status status;
  53.  
  54.   // Initialize MPI
  55.   MPI_Init (&argc, &argv);
  56. // find out my rank
  57.   MPI_Comm_rank (MPI_COMM_WORLD, &myrank);
  58. // find out the number of processes in MPI_COMM_WORLD
  59.   MPI_Comm_size (MPI_COMM_WORLD, &proccount);
  60.  
  61.   if (proccount < 2)
  62.     {
  63.       printf ("Run with at least 2 processes");
  64.       MPI_Finalize ();
  65.       return -1;
  66.  
  67.     }
  68.    
  69.     if(n % (proccount-1) != 0)
  70.     {
  71.       printf ("Can't devide task evenly, try running with 9");
  72.       MPI_Finalize ();
  73.       return -1;    
  74.     }
  75.    
  76.     int package_size = n/(proccount-1);
  77.     /*
  78.   if (((b - a) / RANGESIZE) < 2 * (proccount - 1))
  79.     {
  80.       printf ("More subranges needed");
  81.       MPI_Finalize ();
  82.       return -1;
  83.     }*/
  84.    
  85. int
  86. // now the master will distribute the data and slave processes will perform computations
  87.   if (myrank == 0)
  88.     {
  89.       int a = 0;
  90.       int b = a + package_size-1;
  91.  
  92. // first distribute some ranges to all slaves
  93.       for (i = 1; i < proccount; i++)
  94.     {
  95.       printf ("\nMaster sending chars from %f,%f to process %d", a,
  96.           b, i);
  97.       fflush (stdout);
  98.      
  99.       int nums[2];
  100.       nums[0] = sentcount;
  101.       nums[1] = package_size;
  102. // send it to process i
  103.       MPI_Send(nums,2,MPI_INT,i,DATA,MPI_COMM_WORLD);
  104.       MPI_Send(input[(i-1)*package_size], package_size, MPI_CHAR, i, DATA, MPI_COMM_WORLD);
  105.       sentcount++;
  106.       a = b + 1;
  107.       b = a + package_size - 1;
  108.     }
  109.  
  110.       do
  111.     {
  112. // distribute remaining subranges to the processes which have completed their parts
  113.       MPI_Probe (0, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
  114.  
  115.       if (status.MPI_TAG == DATA)
  116.       {}
  117.      
  118.       if(recived_result < 0)
  119.       {
  120.         printf ("\nMaster received finished message form process %d", status.MPI_SOURCE);
  121.         fflush (stdout);       
  122.       }
  123.       else
  124.       {
  125.         printf ("\nMaster received result %f from process %d", resulttemp,
  126.           status.MPI_SOURCE);
  127.       fflush (stdout);
  128.       }
  129.  
  130.       // check the sender and send some more data
  131.       range[1] = range[0] + RANGESIZE;
  132.       if (range[1] > b)
  133.         range[1] = b;
  134. #ifdef DEBUG
  135.       printf ("\nMaster sending range %f,%f to process %d", range[0],
  136.           range[1], status.MPI_SOURCE);
  137.       fflush (stdout);
  138. #endif
  139.       MPI_Send (range, 2, MPI_DOUBLE, status.MPI_SOURCE, DATA,
  140.             MPI_COMM_WORLD);
  141.       range[0] = range[1];
  142.     }
  143.       while (range[1] < b);
  144.       // now receive results from the processes
  145.       for (i = 0; i < (proccount - 1); i++)
  146.     {
  147.       MPI_Recv (&resulttemp, 1, MPI_DOUBLE, MPI_ANY_SOURCE, RESULT,
  148.             MPI_COMM_WORLD, &status);
  149. #ifdef DEBUG
  150.       printf ("\nMaster received result %f from process %d", resulttemp,
  151.           status.MPI_SOURCE);
  152.       fflush (stdout);
  153. #endif
  154.  
  155.       result += resulttemp;
  156.     }
  157. // shut down the slaves
  158.       for (i = 1; i < proccount; i++)
  159.     {
  160.       MPI_Send (NULL, 0, MPI_DOUBLE, i, FINISH, MPI_COMM_WORLD);
  161.     }
  162.  
  163.       // now display the result
  164.       printf ("\nHi, I am process 0, the result is %f\n", result);
  165.     }
  166.   else
  167.     {
  168.       // slave
  169.       // this is easy - just receive data and do the work
  170.       do
  171.     {
  172.      
  173.       MPI_Probe (0, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
  174.       char data_recived[512];
  175.       int numbers[2];
  176.       int pnumber;
  177.       int sizep;
  178.       if (status.MPI_TAG == DATA)
  179.         {
  180.           if(messagecnt % 2 == 0)
  181.           {
  182.         MPI_Recv (numbers, package_size, MPI_CHAR, 0, DATA, MPI_COMM_WORLD,
  183.               &status);
  184.         sizep = numbers[1];
  185.         pnumber= numbers[0];
  186.           }
  187.           else
  188.           {
  189.           MPI_Recv (data_recived, numbers[1], MPI_CHAR, 0, DATA, MPI_COMM_WORLD,
  190.               &status);    
  191.  
  192.   // compute my part
  193.         int last = -1;
  194.         int found[512];
  195.         int howmany = 0;
  196.         int index = 0;
  197.         for(i=0; i<sizep; i++)
  198.         {
  199.           if(last == -1 && data_recived[i] == 'a')
  200.           {
  201.             last++;
  202.             index = i;
  203.           }
  204.           else if(last == 0 && data_recived[i] == 'b')
  205.           {
  206.             last++;
  207.           }
  208.           else if(last == 1 && data_recived[i] == 'c')
  209.           { last=-1;
  210.             index = index + (pnumber-1)*sizep;
  211.             found[howmany] = index;
  212.             howmany++;
  213.           }
  214.           else
  215.           { last=-1;}
  216.          
  217.           if(i==sizep-1)
  218.           {
  219.             if(howmany == 0)
  220.             {found[0] = -666;}
  221.             MPI_Send(found, howmany+1, MPI_INT,0,RESULT, MPI_COMM_WORLD);
  222.           }
  223.         }
  224.           }
  225.           messagecnt++;
  226.           }
  227. // send the result back
  228.  
  229.         }
  230.     }
  231.       while (status.MPI_TAG != FINISH);
  232.     }
  233. // Shut down MPI
  234.   MPI_Finalize ();
  235.   return 0;
  236. }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement