Advertisement
Guest User

Untitled

a guest
Feb 20th, 2019
80
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 7.83 KB | None | 0 0
  1. /*
  2. Nattawut Khuadplod
  3. .
  4. .
  5. .
  6. HeatTransfer using MPI Collective Communication
  7. */
  8. #include<mpi.h>
  9. #include<stdio.h>
  10. #include<stdlib.h>
  11.  
  12.  
  13. int main(int argc, char *argv[]){
  14.  
  15. int rank, size , rankCounter;
  16. MPI_Init(&argc,&argv);
  17. MPI_Comm_rank(MPI_COMM_WORLD,&rank);
  18. MPI_Comm_size(MPI_COMM_WORLD,&size);
  19.  
  20. int row ; // number of nodes in x direction
  21. int col ; // number of nodes in y direction
  22. int iters; // number of iterations
  23. int i, j, k, frac=0;
  24. int print_i,print_j; // initial variables
  25. float *heatMatrix;
  26. float *buff_heatMatrix,*buff_temp;
  27. float sum;
  28.  
  29. int partrow,frac_data;
  30.  
  31. int *sendcounts = (int*)malloc(sizeof(int)*size);
  32. int *displs = (int*)malloc(sizeof(int)*size);
  33.  
  34. FILE *fp;
  35.  
  36.  
  37. if(rank==0){
  38. fp = fopen(argv[1],"r");
  39. if(fp){
  40. fscanf(fp,"%d %d \n",&row,&col);
  41. fclose(fp);
  42. }
  43. iters = atoi(argv[3]);
  44.  
  45. partrow = row/size;
  46. frac_data = row%size;
  47.  
  48. heatMatrix = (float*)calloc(row*col,sizeof(float));
  49.  
  50. //top
  51. for(i=0;i<col;i++){
  52. heatMatrix[i] = 255;
  53. }
  54. //left
  55. for(i=1;i<row;i++){
  56. heatMatrix[i*col] = 255;
  57. }
  58. //bottom
  59. for(i=0;i<col;i++){
  60. heatMatrix[(row-1)*col+i] = 255;
  61. }
  62. //right
  63. for(i=1;i<row;i++){
  64. heatMatrix[i*col+(col-1)] = 255;
  65. }
  66. }
  67.  
  68. MPI_Bcast(&row,1,MPI_INT,0,MPI_COMM_WORLD);
  69. MPI_Bcast(&col,1,MPI_INT,0,MPI_COMM_WORLD);
  70. MPI_Bcast(&iters,1,MPI_INT,0,MPI_COMM_WORLD);
  71. MPI_Bcast(&frac_data,1,MPI_INT,0,MPI_COMM_WORLD);
  72. MPI_Barrier(MPI_COMM_WORLD);
  73.  
  74. int *displs_top = (int*)calloc(size,sizeof(int));
  75. int *displs_bottom = (int*)calloc(size,sizeof(int));
  76.  
  77. int sumDispls=0;
  78. // Calculate send counts and displacements
  79. for (int i = 0; i < size; i++) {
  80. sendcounts[i] = (row/size)*col;
  81.  
  82. /*
  83. if (frac_data > 0 && i==0) {
  84. sendcounts[i]+=frac_data*col;
  85. frac_data=0;
  86. }
  87. */
  88.  
  89. if (frac_data > 0 ) {
  90. sendcounts[i]+=col;
  91. frac_data--;
  92. }
  93. displs[i] = sumDispls;
  94. sumDispls += sendcounts[i];
  95. }
  96.  
  97. buff_heatMatrix = (float*)calloc(row*col,sizeof(float));
  98. buff_temp = (float*)calloc(row*col,sizeof(float));
  99.  
  100. // Scatter global data to all processes
  101. MPI_Scatterv(&heatMatrix[0], sendcounts, displs, MPI_FLOAT, buff_heatMatrix,row*col, MPI_FLOAT, 0, MPI_COMM_WORLD);
  102.  
  103. for(k=0;k<iters;k++){
  104. if(rank==0){
  105. //Rank 0
  106.  
  107. partrow = sendcounts[rank]/col;
  108.  
  109. for(i=1;i<size;i++){
  110. if (i==(size-1)){
  111. displs_top[i]=displs[i]-col;
  112. MPI_Send(&heatMatrix[displs_top[i]],col, MPI_FLOAT, i, 0, MPI_COMM_WORLD);
  113. }else{
  114. displs_top[i]=displs[i]-col;
  115. displs_bottom[i]=displs[i+1];
  116. MPI_Send(&heatMatrix[displs_top[i]],col, MPI_FLOAT, i, 0, MPI_COMM_WORLD);
  117. MPI_Send(&heatMatrix[displs_bottom[i]],col, MPI_FLOAT, i, 1, MPI_COMM_WORLD);
  118. }
  119. }
  120.  
  121. for(i=1; i<partrow; i++)
  122. {
  123. for(j=1; j<col-1; j++)
  124. {
  125. //left = heatMatrix[(i*col)+j-1];
  126. //right = heatMatrix[(i*col)+j+1];
  127. //top = heatMatrix[((i-1)*col)+j];
  128. //bot = heatMatrix[((i+1)*col)+j];
  129. buff_temp[(i*col)+j] = .25*(heatMatrix[(i*col)+j-1]+heatMatrix[(i*col)+j+1]+heatMatrix[((i-1)*col)+j]+heatMatrix[((i+1)*col)+j]);
  130. }
  131. }
  132.  
  133. //Set result
  134. for(i=0;i<partrow;i++){
  135. for(j=0;j<col;j++){
  136. if (heatMatrix[(i*col)+j] >=255){
  137. buff_heatMatrix[(i*col)+j] = 255;
  138. }
  139. else{
  140. buff_heatMatrix[(i*col)+j] = buff_temp[(i*col)+j];
  141. }
  142. }
  143. }
  144.  
  145. }else{
  146. //Other rank
  147. partrow = sendcounts[rank]/col;
  148. float *buff_top = (float*)calloc(col,sizeof(float));
  149. float *buff_bottom = (float*)calloc(col,sizeof(float));
  150.  
  151. if(rank==size-1){
  152. MPI_Recv(&buff_top[0],col, MPI_FLOAT, 0, 0, MPI_COMM_WORLD,MPI_STATUS_IGNORE);
  153.  
  154. for(i=0; i<partrow-1; i++){
  155. for(j=1; j<col-1; j++){
  156.  
  157. if(i==0){
  158. buff_temp[(i*col)+j] = .25*(buff_heatMatrix[(i*col)+j-1]+buff_heatMatrix[(i*col)+j+1]+buff_top[j]+buff_heatMatrix[((i+1)*col)+j]);
  159.  
  160. }else{
  161. buff_temp[(i*col)+j] = .25*(buff_heatMatrix[(i*col)+j-1]+buff_heatMatrix[(i*col)+j+1]+buff_heatMatrix[((i-1)*col)+j]+buff_heatMatrix[((i+1)*col)+j]);
  162. }
  163.  
  164. }
  165. }
  166.  
  167. //Set result
  168. for(i=0;i<partrow;i++){
  169. for(j=0;j<col;j++){
  170. if (buff_heatMatrix[(i*col)+j] >=255){
  171. buff_heatMatrix[(i*col)+j] = 255;
  172. }
  173. else{
  174. buff_heatMatrix[(i*col)+j] = buff_temp[(i*col)+j];
  175. }
  176. }
  177. }
  178. MPI_Send(&buff_top[0],col, MPI_FLOAT, 0, 2, MPI_COMM_WORLD);
  179. }else{
  180. MPI_Recv(&buff_top[0],col, MPI_FLOAT, 0, 0, MPI_COMM_WORLD,MPI_STATUS_IGNORE);
  181. MPI_Recv(&buff_bottom[0],col, MPI_FLOAT, 0, 1, MPI_COMM_WORLD,MPI_STATUS_IGNORE);
  182.  
  183. for(i=0; i<partrow; i++){
  184. for(j=1; j<col-1; j++){
  185. if(i==0){
  186. if(i==partrow-1){
  187. buff_temp[(i*col)+j] = .25*(buff_heatMatrix[(i*col)+j-1]+buff_heatMatrix[(i*col)+j+1]+buff_top[j]+buff_bottom[j]);
  188. }
  189. else{
  190. buff_temp[(i*col)+j] = .25*(buff_heatMatrix[(i*col)+j-1]+buff_heatMatrix[(i*col)+j+1]+buff_top[j]+buff_heatMatrix[((i+1)*col)+j]);
  191. }
  192. }else{
  193. if(i==partrow-1){
  194. buff_temp[(i*col)+j] = .25*(buff_heatMatrix[(i*col)+j-1]+buff_heatMatrix[(i*col)+j+1]+buff_heatMatrix[((i-1)*col)+j]+buff_bottom[j]);
  195. }
  196. else{
  197. buff_temp[(i*col)+j] = .25*(buff_heatMatrix[(i*col)+j-1]+buff_heatMatrix[(i*col)+j+1]+buff_heatMatrix[((i-1)*col)+j]+buff_heatMatrix[((i+1)*col)+j]);
  198.  
  199. }
  200. }
  201. }
  202. }
  203.  
  204. //Set result
  205. for(i=0;i<partrow;i++){
  206. for(j=0;j<col;j++){
  207. if (buff_heatMatrix[(i*col)+j] >=255){
  208. buff_heatMatrix[(i*col)+j] = 255;
  209. }
  210. else{
  211. buff_heatMatrix[(i*col)+j] = buff_temp[(i*col)+j];
  212. }
  213. }
  214. }
  215. }
  216. free(buff_top);
  217. free(buff_bottom);
  218. }
  219.  
  220. MPI_Barrier(MPI_COMM_WORLD);
  221. MPI_Gatherv(buff_heatMatrix,sendcounts[rank],MPI_FLOAT,heatMatrix,sendcounts,displs,MPI_FLOAT,0,MPI_COMM_WORLD);
  222.  
  223. }
  224. if(rank==0){
  225. fp = fopen(argv[2],"w");
  226. if(fp){
  227. fprintf(fp,"%d %d\n",row,col);
  228. for(i=0;i<row;i++){
  229. for(j=0;j<col;j++){
  230. fprintf(fp, "%.0f ",heatMatrix[(i*col)+j]);
  231. }
  232. fprintf(fp, "\n");
  233. }
  234. fclose(fp);
  235. }
  236. printf("Successful! , Let's see result file.\n");
  237.  
  238. free(heatMatrix);
  239. }
  240.  
  241. MPI_Finalize();
  242.  
  243. free(buff_heatMatrix);
  244. free(buff_temp);
  245. free(sendcounts);
  246. free(displs);
  247.  
  248. return 0;
  249. }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement