Advertisement
Guest User

Untitled

a guest
Feb 23rd, 2020
113
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 4.46 KB | None | 0 0
  1. #include <stdio.h>
  2. #include <mpi.h>
  3.  
  4. const int NIM = 13517078;
  5. const int INF = 1000000000;
  6.  
  7. void randomGraph(int node, int local_graph, int local_node, MPI_Datatype block_column, int my_rank, MPI_Comm comm){
  8. int * graph;
  9. int i, j;
  10.  
  11.  
  12. if (my_rank == 0){
  13. graph = (int*) malloc(node * node * sizeof(int *));
  14.  
  15. srand(NIM);
  16.  
  17. for (i = 0; i < node; ++i){
  18. for (j = i; j < node; ++j){
  19. if (i == j){
  20. graph[i * node + j] = 0;
  21. graph[j * node + i] = 0;
  22. }else {
  23. int bil = rand() % 1000;
  24. graph[i * node + j] = (bil == 0 ? INF : bil);
  25. graph[j * node + i] = graph[i * node + j];
  26. }
  27. }
  28. }
  29. }
  30.  
  31. MPI_Scatter(graph, 1, BlockType, local_graph, node * local_node, MPI_INT, 0, comm);
  32.  
  33. if (my_rank == 0) free(mat);
  34. }
  35.  
  36. int readNode(int my_rank, MPI_COMM comm){
  37. int node;
  38. if (my_rank == 0){
  39. scanf("%d", &node);
  40. }
  41. MPI_Bcast(&node, 1, MPI_INT, 0, comm);
  42. return node;
  43. }
  44.  
  45. MPI_Datatype BlockType(int node, int local_node){
  46. MPI_Aint lb, extent;
  47. MPI_Datatype block_mpi_t;
  48. MPI_Datatype first_bc_mpi_t;
  49. MPI_Datatype blk_col_mpi_t;
  50.  
  51. MPI_Type_contiguous(local_node, MPI_INT, &block_mpi_t);
  52. MPI_Type_get_extent(block_mpi_t, &lb, &extent);
  53.  
  54. /* MPI_Type_vector(numblocks, elts_per_block, stride, oldtype, *newtype) */
  55. MPI_Type_vector(node, local_node, node, MPI_INT, &first_bc_mpi_t);
  56.  
  57. /* This call is needed to get the right extent of the new datatype */
  58. MPI_Type_create_resized(first_bc_mpi_t, lb, extent, &blk_col_mpi_t);
  59.  
  60. MPI_Type_commit(&blk_col_mpi_t);
  61.  
  62. MPI_Type_free(&block_mpi_t);
  63. MPI_Type_free(&first_bc_mpi_t);
  64.  
  65. return blk_col_mpi_t;
  66. }
  67.  
  68. void dijkstraInit(int local_graph[], int local_distance[], int visited[], int my_rank, int local_node, int source){
  69. int i;
  70. if (my_rank == 0){
  71. visited[source] = 1;
  72. }else{
  73. visited[source] = 0;
  74. }
  75.  
  76. for (int i = 0; i < local_node; ++i){
  77. if (i != source){
  78. visited[i] = 0;
  79. }
  80. }
  81.  
  82. for (int i = 0; i < local_node; ++i){
  83. local_distance[i] = local_graph[i];
  84. }
  85. }
  86.  
  87. int minDistance(int local_distance[], int visited[], int local_node){
  88. int node = -1, i;
  89. int distance = INF;
  90.  
  91. for (i = 0; i < local_node; ++i){
  92. if (!visited[i] && local_distance[i] < distance){
  93. distance = local_distance[i];
  94. node = i;
  95. }
  96. }
  97.  
  98. return node;
  99. }
  100.  
  101. void dijkstra(int local_graph[], int local_distance[], int local_node, int node, MPI_Comm comm,int source){
  102. int *visited;
  103. int i, local_now, local_adj, global_now, new_distance, my_rank, distance_global_now;
  104. int minimal_local_dist, minimal_local_node;
  105. int minimal_global_dist, minimal_global_node;
  106.  
  107. MPI_Comm_rank(comm, &my_rank);
  108. visited = (int *) malloc(local_node * sizeof(int));
  109.  
  110. if (my_rank == 0){
  111. visited[source] = 1;
  112. }else{
  113. visited[source] = 0;
  114. }
  115.  
  116. for (i = 0; i < local_node; ++i){
  117. if (i != source){
  118. visited[i] = 0;
  119. }
  120. }
  121.  
  122. for (i = 0; i < local_node; ++i){
  123. local_distance[i] = local_graph[i];
  124. }
  125.  
  126. for (i = 0; i < n-1; ++i){
  127. int local_now = minDistance(local_distance, visited, local_node);
  128.  
  129. if (local)
  130.  
  131.  
  132. }
  133. }
  134.  
  135. int main(int argc, char **argv){
  136. MPI_Comm comm;
  137. int my_rank, node, local_node, num_process;
  138. int *local_graph = NULL;
  139. int *local_distance = NULL;
  140. int *global_distance = NULL;
  141. MPI_Datatype block_column;
  142.  
  143. MPI_Init(NULL, NULL);
  144. comm = MPI_COMM_WORLD;
  145. MPI_Comm_rank(comm, &my_rank);
  146. MPI_Comm_size(comm, &num_process);
  147.  
  148. node = readNode(my_rank, comm);
  149. if (node % num_process != 0){
  150. fprintf(stderr, "Number of process should be divde the number of node");
  151. return;
  152. }
  153.  
  154. // setup for local process variable
  155. local_node = node / num_process;
  156. local_graph = malloc(node * local_node * sizeof(int));
  157. local_distance = (int *)malloc(local_node * sizeof(int));
  158. block_column = BlockType(node, local_node);
  159.  
  160. if (my_rank == 0){
  161. global_distance = (int *)malloc(local_node * sizeof(int));
  162. }
  163.  
  164. randomGraph(node, local_graph, local_node, block_column, my_rank, comm);
  165.  
  166.  
  167.  
  168.  
  169.  
  170. MPI_Finalize();
  171. }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement