Advertisement
Guest User

Untitled

a guest
Jun 17th, 2019
109
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 2.77 KB | None | 0 0
  1. #include <stdio.h>
  2. #include <stdlib.h>
  3. #include "mpi.h"
  4.  
  5. int main(int argc, char *argv[]) {
  6. int numtasks, rank, N;
  7. int result=0;
  8.  
  9. //arrays for the two vectors
  10. int* a;
  11. int* b;
  12.  
  13. double start, end, tot_time;
  14. start = 0;
  15. end = 0;
  16.  
  17. //initializing MPI
  18. MPI_Init(&argc, &argv);
  19. //get size
  20. MPI_Comm_size(MPI_COMM_WORLD, &numtasks);
  21. //get rank
  22. MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  23. //have to broadcast N still to distribute it
  24. MPI_Bcast(&N, 1, MPI_INT, 0, MPI_COMM_WORLD);
  25.  
  26. if(rank == 0) {
  27. N = 100; //simulated reading in user input
  28. }
  29.  
  30. if(rank == 0) {
  31. //pointers to dynamically allocate memory for 2 arrays on each node. done in master thread
  32. a=(int*)malloc(N*sizeof(int));
  33. b=(int*)malloc(N*sizeof(int));
  34. //hardcoding each value of vectors to 2
  35. for(int i = 0; i < N; i++) {
  36. a[i]=b[i]=2;
  37. }
  38. }
  39.  
  40. //receive buffers
  41. // int* recA = (int*)malloc(N*sizeof(int));
  42. // int* recB = (int*)malloc(N*sizeof(int));
  43.  
  44. int new_N; //new_N: new discrete variable to sum counts
  45. if(rank < (N%numtasks)) {
  46. new_N = N/numtasks + 1;
  47. }
  48. else {
  49. new_N = N/numtasks;
  50. }
  51.  
  52. //array to hold count for each rank
  53. int* sendcount = (int*)malloc(N*sizeof(int));
  54. //array that holds displacement from start of sendbuf to take the data
  55. int* displs = (int*)malloc(N*sizeof(int));
  56.  
  57. //fill sendcount array
  58. for(int i = 0; i < new_N; i++) {
  59. sendcount[i] = new_N;
  60. displs[i] = i*sendcount[i];
  61. }
  62. //takes care of remainder
  63. for(int i = N%numtasks; i < numtasks; i++) {
  64. sendcount[i] = N/numtasks;
  65. displs[i] = i*sendcount[i];
  66. }
  67.  
  68. start = MPI_Wtime();
  69. MPI_Scatterv(a, sendcount, displs, MPI_INT, a, new_N, MPI_INT, 0, MPI_COMM_WORLD);
  70. MPI_Scatterv(b, sendcount, displs, MPI_INT, a, new_N, MPI_INT, 0, MPI_COMM_WORLD);
  71. end = MPI_Wtime();
  72. tot_time = end - start;
  73. printf("Scatterv total time elapsed on %d: %lf", rank, tot_time);
  74.  
  75. //actually calculating dot product
  76. for(int i = 0; i < N; i++){
  77. result+=(a[i] * b[i]);
  78. }
  79. //collect individual sums from processors for final result
  80. MPI_Allreduce(MPI_IN_PLACE, &result, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
  81. printf("Rank: %d: Result: %d\n", rank, result);
  82. //finished with MPI
  83. MPI_Finalize();
  84. return 0;
  85. }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement