Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- #include <stdio.h>
- #include <mpi.h>
- #include <stdlib.h>
- #include <math.h>
- #include <string.h>
- #define PRECISION 0.000001
- #define RANGESIZE 0.00001
- #define DATA 0
- #define RESULT 1
- #define FINISH 2
- //#define DEBUG
- double f(double x) {
- return sin(exp(x))*cos(x);
- }
- double SimpleIntegration(double a,double b, double* arr) {
- double result = 0;
- double i;
- for (i=a;i<b;i+=PRECISION) {
- double first = f(i);
- double second = f(i + PRECISION);
- double sign1 = first >= 0 ? 1 : -1;
- double sign2 = second >= 0 ? 1 : -1;
- if (sign1 != sign2) {
- result++;
- double zeroPlace = (first + second) / 2;
- int size = sizeof(arr)/sizeof(arr[0]);
- arr = realloc(arr, (size+1) * sizeof(double));
- arr[size + 1] = zeroPlace;
- }
- }
- return result;
- }
- int main(int argc, char **argv) {
- int myrank,proccount;
- double a=0,b=5;
- double range[2];
- double result=0,resulttemp;
- int sentcount=0;
- int i;
- double *arr = malloc (sizeof (double) * 1);
- double *arrTemp;
- MPI_Status status;
- // Initialize MPI
- MPI_Init(&argc, &argv);
- // find out my rank
- MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
- // find out the number of processes in MPI_COMM_WORLD
- MPI_Comm_size(MPI_COMM_WORLD, &proccount);
- if (proccount<2) {
- printf("Run with at least 2 processes");
- MPI_Finalize();
- return -1;
- }
- if (((b-a)/RANGESIZE)<2*(proccount-1)) {
- printf("More subranges needed");
- MPI_Finalize();
- return -1;
- }
- // now the master will distribute the data and slave processes will perform computations
- if (myrank==0) {
- range[0]=a;
- // first distribute some ranges to all slaves
- for(i=1;i<proccount;i++) {
- range[1]=range[0]+RANGESIZE;
- // send it to process i
- MPI_Send(range,2,MPI_DOUBLE,i,DATA,MPI_COMM_WORLD);
- MPI_Send(arr, sizeof(arr)/sizeof(arr[0]), MPI_DOUBLE,0,RESULT,MPI_COMM_WORLD);
- sentcount++;
- range[0]=range[1];
- }
- do {
- // distribute remaining subranges to the processes which have completed their parts
- MPI_Recv(&resulttemp,1,MPI_DOUBLE,MPI_ANY_SOURCE,RESULT,MPI_COMM_WORLD,&status);
- MPI_Recv(arr, sizeof(arr)/sizeof(arr[0]), MPI_DOUBLE,MPI_ANY_SOURCE,RESULT,MPI_COMM_WORLD,&status);
- result+=resulttemp;
- // check the sender and send some more data
- range[1]=range[0]+RANGESIZE;
- if (range[1]>b) range[1]=b;
- MPI_Send(range,2,MPI_DOUBLE,status.MPI_SOURCE,DATA,MPI_COMM_WORLD);
- MPI_Send(arr, sizeof(arr)/sizeof(arr[0]), MPI_DOUBLE,0,RESULT,MPI_COMM_WORLD);
- range[0]=range[1];
- } while (range[1]<b);
- // now receive results from the processes
- for(i=0;i<(proccount-1);i++) {
- MPI_Recv(&resulttemp,1,MPI_DOUBLE,MPI_ANY_SOURCE,RESULT,MPI_COMM_WORLD,&status);
- MPI_Recv(arr, sizeof(arr)/sizeof(arr[0]), MPI_DOUBLE,MPI_ANY_SOURCE,RESULT,MPI_COMM_WORLD,&status);
- result+=resulttemp;
- }
- // shut down the slaves
- for(i=1;i<proccount;i++) {
- MPI_Send(NULL,0,MPI_DOUBLE,i,FINISH,MPI_COMM_WORLD);
- }
- // now display the result
- printf("\nHi, I am process 0, the result is %f\n%d",result, sizeof(arr)/sizeof(arr[0]));
- } else { // slave
- // this is easy - just receive data and do the work
- do {
- MPI_Probe(0,MPI_ANY_TAG,MPI_COMM_WORLD,&status);
- if (status.MPI_TAG==DATA) {
- MPI_Recv(range,2,MPI_DOUBLE,0,DATA,MPI_COMM_WORLD,&status);
- MPI_Recv(arr, sizeof(arr)/sizeof(arr[0]), MPI_DOUBLE,MPI_ANY_SOURCE,RESULT,MPI_COMM_WORLD,&status);
- // compute my part
- resulttemp=SimpleIntegration(range[0],range[1], arr);
- // send the result back
- MPI_Send(&resulttemp,1,MPI_DOUBLE,0,RESULT,MPI_COMM_WORLD);
- MPI_Send(arr, sizeof(arr)/sizeof(arr[0]), MPI_DOUBLE,0,RESULT,MPI_COMM_WORLD);
- }
- } while (status.MPI_TAG!=FINISH);
- }
- free(arr);
- // Shut down MPI
- MPI_Finalize();
- return 0;
- }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement