Advertisement
Helena357

Untitled

May 11th, 2022
1,019
13 days
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
  1. #include "utility.h"
  2. #include <stdio.h>
  3. #include <stdlib.h>
  4. #include <sys/time.h>
  5. #include <mpi.h>
  6. #include <omp.h>
  7.  
  8. #include <math.h>
  9. #define PRECISION 0.000001
  10. #define RANGESIZE 1
  11. #define DATA 0
  12. #define RESULT 1
  13. #define FINISH 2
  14.  
  15.  
  16. double
  17. f (double x)
  18. {
  19.     return sin (x) * sin (x) / x;
  20. }
  21.  
  22. double
  23. SimpleIntegration (double a, double b)
  24. {
  25.     double sum = 0;
  26.    
  27.     double len=b-a;
  28.    
  29.     int parts=len/PRECISION;
  30.    
  31.     #pragma omp parallel for reduction(+:sum)
  32.     for (int i = 0; i < parts; i++){
  33.     sum += f(a+i*PRECISION) * PRECISION;
  34.     }
  35.     return sum;
  36. }
  37.  
  38. int main(int argc,char **argv) {
  39.  
  40.   Args ins__args;
  41.   parseArgs(&ins__args, &argc, argv);
  42.  
  43.   //set number of threads
  44.   omp_set_num_threads(ins__args.n_thr);
  45.  
  46.   //program input argument
  47.   int INITIAL_NUMBER = ins__args.start;
  48.   int FINAL_NUMBER = ins__args.stop;
  49.  
  50.   struct timeval ins__tstart, ins__tstop;
  51.  
  52.   int threadsupport;
  53.   int myrank,nproc;
  54.  
  55.   // Initialize MPI with desired support for multithreading -- state your desired support level
  56.  
  57.   MPI_Init_thread(&argc, &argv,MPI_THREAD_FUNNELED,&threadsupport);
  58.  
  59.   if (threadsupport<MPI_THREAD_FUNNELED) {
  60.     printf("\nThe implementation does not support MPI_THREAD_FUNNELED, it supports level %d\n",threadsupport);
  61.     MPI_Finalize();
  62.     return -1;
  63.   }
  64.  
  65.   // obtain my rank
  66.   MPI_Comm_rank(MPI_COMM_WORLD,&myrank);
  67.   // and the number of processes
  68.   MPI_Comm_size(MPI_COMM_WORLD,&nproc);
  69.  
  70.   if (!myrank)
  71.     gettimeofday(&ins__tstart, NULL);
  72.   // run your computations here (including MPI communication and OpenMP stuff)
  73.  
  74.  
  75.  
  76.  
  77.    
  78.    int a = INITIAL_NUMBER, b = FINAL_NUMBER;
  79.     double range[2];
  80.     double result = 0, resulttemp;
  81.     int sentcount = 0;
  82.     int i;
  83.     MPI_Status status;
  84.  
  85.  
  86.     if (nproc < 2)
  87.     {
  88.         printf ("Run with at least 2 processes");
  89.     MPI_Finalize ();
  90.     return -1;
  91.     }
  92.  
  93.     if (((b - a) / RANGESIZE) < 2 * (nproc - 1))
  94.     {
  95.         printf ("More subranges needed");
  96.     MPI_Finalize ();
  97.     return -1;
  98.     }
  99.  
  100.     // now the master will distribute the data and slave processes will perform computations
  101.     if (myrank == 0)
  102.     {
  103.         range[0] = a;
  104.  
  105.     // first distribute some ranges to all slaves
  106.     for (i = 1; i < nproc; i++)
  107.     {
  108.             range[1] = range[0] + RANGESIZE;
  109. #ifdef DEBUG
  110.         printf ("\nMaster sending range %f,%f to process %d",
  111.                 range[0], range[1], i);
  112.         fflush (stdout);
  113. #endif
  114.         // send it to process i
  115.         MPI_Send (range, 2, MPI_DOUBLE, i, DATA, MPI_COMM_WORLD);
  116.         sentcount++;
  117.         range[0] = range[1];
  118.     }
  119.     do
  120.     {
  121.             // distribute remaining subranges to the processes which have completed their parts
  122.             MPI_Recv (&resulttemp, 1, MPI_DOUBLE, MPI_ANY_SOURCE, RESULT,
  123.                 MPI_COMM_WORLD, &status);
  124.         result += resulttemp;
  125. #ifdef DEBUG
  126.         printf ("\nMaster received result %f from process %d",
  127.                     resulttemp, status.MPI_SOURCE);
  128.         fflush (stdout);
  129. #endif
  130.             // check the sender and send some more data
  131.         range[1] = range[0] + RANGESIZE;
  132.         if (range[1] > b)
  133.                 range[1] = b;
  134. #ifdef DEBUG
  135.         printf ("\nMaster sending range %f,%f to process %d",
  136.                     range[0], range[1], status.MPI_SOURCE);
  137.         fflush (stdout);
  138. #endif
  139.         MPI_Send (range, 2, MPI_DOUBLE, status.MPI_SOURCE, DATA,
  140.                 MPI_COMM_WORLD);
  141.         range[0] = range[1];
  142.     }
  143.  
  144.     while (range[1] < b);
  145.     // now receive results from the processes
  146.         for (i = 0; i < (nproc - 1); i++)
  147.     {
  148.             MPI_Recv (&resulttemp, 1, MPI_DOUBLE, MPI_ANY_SOURCE, RESULT,
  149.                 MPI_COMM_WORLD, &status);
  150. #ifdef DEBUG
  151.         printf ("\nMaster received result %f from process %d",
  152.                     resulttemp, status.MPI_SOURCE);
  153.         fflush (stdout);
  154. #endif
  155.         result += resulttemp;
  156.     }
  157.     // shut down the slaves
  158.     for (i = 1; i < nproc; i++)
  159.     {
  160.             MPI_Send (NULL, 0, MPI_DOUBLE, i, FINISH, MPI_COMM_WORLD);
  161.     }
  162.         // now display the result
  163.         printf ("\nHi, I am process 0, the result is %f\n", result);
  164.     }
  165.     else
  166.     {               // slave
  167.         // this is easy - just receive data and do the work
  168.     do
  169.     {
  170.             MPI_Probe (0, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
  171.  
  172.         if (status.MPI_TAG == DATA)
  173.         {
  174.                 MPI_Recv (range, 2, MPI_DOUBLE, 0, DATA, MPI_COMM_WORLD,
  175.                 &status);
  176.         // compute my part
  177.         resulttemp = SimpleIntegration (range[0], range[1]);
  178.         // send the result back
  179.         MPI_Send (&resulttemp, 1, MPI_DOUBLE, 0, RESULT,
  180.                 MPI_COMM_WORLD);
  181.         }
  182.     }
  183.     while (status.MPI_TAG != FINISH);
  184.     }
  185.  
  186.  
  187.  
  188.   // synchronize/finalize your computations
  189.  
  190.   if (!myrank) {
  191.     gettimeofday(&ins__tstop, NULL);
  192.     ins__printtime(&ins__tstart, &ins__tstop, ins__args.marker);
  193.   }
  194.    
  195.   MPI_Finalize();
  196.  
  197. }
Advertisement
Advertisement
Advertisement
RAW Paste Data Copied
Advertisement