Advertisement
Guest User

Untitled

a guest
Mar 21st, 2018
66
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 4.02 KB | None | 0 0
  1.  
  2. #include <stdio.h>
  3. #include <mpi.h>
  4. #include <stdlib.h>
  5. #include <math.h>
  6. #include <string.h>
  7. #define PRECISION 0.000001
  8. #define RANGESIZE 0.00001
  9. #define DATA 0
  10. #define RESULT 1
  11. #define FINISH 2
  12. //#define DEBUG
  13. double f(double x) {
  14. return sin(exp(x))*cos(x);
  15. }
  16.  
  17. double SimpleIntegration(double a,double b, double* arr) {
  18. double result = 0;
  19. double i;
  20. for (i=a;i<b;i+=PRECISION) {
  21. double first = f(i);
  22. double second = f(i + PRECISION);
  23. double sign1 = first >= 0 ? 1 : -1;
  24. double sign2 = second >= 0 ? 1 : -1;
  25. if (sign1 != sign2) {
  26. result++;
  27. double zeroPlace = (first + second) / 2;
  28. int size = sizeof(arr)/sizeof(arr[0]);
  29. arr = realloc(arr, (size+1) * sizeof(double));
  30. arr[size + 1] = zeroPlace;
  31. }
  32. }
  33. return result;
  34. }
  35.  
  36. int main(int argc, char **argv) {
  37. int myrank,proccount;
  38. double a=0,b=5;
  39. double range[2];
  40. double result=0,resulttemp;
  41. int sentcount=0;
  42. int i;
  43.  
  44. double *arr = malloc (sizeof (double) * 1);
  45. double *arrTemp;
  46.  
  47. MPI_Status status;
  48. // Initialize MPI
  49. MPI_Init(&argc, &argv);
  50. // find out my rank
  51. MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
  52. // find out the number of processes in MPI_COMM_WORLD
  53. MPI_Comm_size(MPI_COMM_WORLD, &proccount);
  54. if (proccount<2) {
  55. printf("Run with at least 2 processes");
  56. MPI_Finalize();
  57. return -1;
  58. }
  59. if (((b-a)/RANGESIZE)<2*(proccount-1)) {
  60. printf("More subranges needed");
  61. MPI_Finalize();
  62. return -1;
  63. }
  64. // now the master will distribute the data and slave processes will perform computations
  65. if (myrank==0) {
  66. range[0]=a;
  67. // first distribute some ranges to all slaves
  68. for(i=1;i<proccount;i++) {
  69. range[1]=range[0]+RANGESIZE;
  70. // send it to process i
  71. MPI_Send(range,2,MPI_DOUBLE,i,DATA,MPI_COMM_WORLD);
  72. MPI_Send(arr, sizeof(arr)/sizeof(arr[0]), MPI_DOUBLE,0,RESULT,MPI_COMM_WORLD);
  73. sentcount++;
  74. range[0]=range[1];
  75. }
  76. do {
  77. // distribute remaining subranges to the processes which have completed their parts
  78. MPI_Recv(&resulttemp,1,MPI_DOUBLE,MPI_ANY_SOURCE,RESULT,MPI_COMM_WORLD,&status);
  79. MPI_Recv(arr, sizeof(arr)/sizeof(arr[0]), MPI_DOUBLE,MPI_ANY_SOURCE,RESULT,MPI_COMM_WORLD,&status);
  80.  
  81. result+=resulttemp;
  82.  
  83.  
  84. // check the sender and send some more data
  85. range[1]=range[0]+RANGESIZE;
  86. if (range[1]>b) range[1]=b;
  87.  
  88. MPI_Send(range,2,MPI_DOUBLE,status.MPI_SOURCE,DATA,MPI_COMM_WORLD);
  89. MPI_Send(arr, sizeof(arr)/sizeof(arr[0]), MPI_DOUBLE,0,RESULT,MPI_COMM_WORLD);
  90. range[0]=range[1];
  91. } while (range[1]<b);
  92. // now receive results from the processes
  93. for(i=0;i<(proccount-1);i++) {
  94. MPI_Recv(&resulttemp,1,MPI_DOUBLE,MPI_ANY_SOURCE,RESULT,MPI_COMM_WORLD,&status);
  95. MPI_Recv(arr, sizeof(arr)/sizeof(arr[0]), MPI_DOUBLE,MPI_ANY_SOURCE,RESULT,MPI_COMM_WORLD,&status);
  96.  
  97. result+=resulttemp;
  98. }
  99. // shut down the slaves
  100. for(i=1;i<proccount;i++) {
  101. MPI_Send(NULL,0,MPI_DOUBLE,i,FINISH,MPI_COMM_WORLD);
  102. }
  103. // now display the result
  104. printf("\nHi, I am process 0, the result is %f\n%d",result, sizeof(arr)/sizeof(arr[0]));
  105.  
  106. } else { // slave
  107. // this is easy - just receive data and do the work
  108. do {
  109. MPI_Probe(0,MPI_ANY_TAG,MPI_COMM_WORLD,&status);
  110. if (status.MPI_TAG==DATA) {
  111. MPI_Recv(range,2,MPI_DOUBLE,0,DATA,MPI_COMM_WORLD,&status);
  112. MPI_Recv(arr, sizeof(arr)/sizeof(arr[0]), MPI_DOUBLE,MPI_ANY_SOURCE,RESULT,MPI_COMM_WORLD,&status);
  113.  
  114. // compute my part
  115. resulttemp=SimpleIntegration(range[0],range[1], arr);
  116. // send the result back
  117. MPI_Send(&resulttemp,1,MPI_DOUBLE,0,RESULT,MPI_COMM_WORLD);
  118. MPI_Send(arr, sizeof(arr)/sizeof(arr[0]), MPI_DOUBLE,0,RESULT,MPI_COMM_WORLD);
  119. }
  120. } while (status.MPI_TAG!=FINISH);
  121. }
  122. free(arr);
  123. // Shut down MPI
  124. MPI_Finalize();
  125. return 0;
  126. }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement