Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- #include <stdio.h>
- #include <mpi.h>
- const int NIM = 13517078;
- const int INF = 1000000000;
- void randomGraph(int node, int local_graph, int local_node, MPI_Datatype block_column, int my_rank, MPI_Comm comm){
- int * graph;
- int i, j;
- if (my_rank == 0){
- graph = (int*) malloc(node * node * sizeof(int *));
- srand(NIM);
- for (i = 0; i < node; ++i){
- for (j = i; j < node; ++j){
- if (i == j){
- graph[i * node + j] = 0;
- graph[j * node + i] = 0;
- }else {
- int bil = rand() % 1000;
- graph[i * node + j] = (bil == 0 ? INF : bil);
- graph[j * node + i] = graph[i * node + j];
- }
- }
- }
- }
- MPI_Scatter(graph, 1, BlockType, local_graph, node * local_node, MPI_INT, 0, comm);
- if (my_rank == 0) free(mat);
- }
- int readNode(int my_rank, MPI_COMM comm){
- int node;
- if (my_rank == 0){
- scanf("%d", &node);
- }
- MPI_Bcast(&node, 1, MPI_INT, 0, comm);
- return node;
- }
- MPI_Datatype BlockType(int node, int local_node){
- MPI_Aint lb, extent;
- MPI_Datatype block_mpi_t;
- MPI_Datatype first_bc_mpi_t;
- MPI_Datatype blk_col_mpi_t;
- MPI_Type_contiguous(local_node, MPI_INT, &block_mpi_t);
- MPI_Type_get_extent(block_mpi_t, &lb, &extent);
- /* MPI_Type_vector(numblocks, elts_per_block, stride, oldtype, *newtype) */
- MPI_Type_vector(node, local_node, node, MPI_INT, &first_bc_mpi_t);
- /* This call is needed to get the right extent of the new datatype */
- MPI_Type_create_resized(first_bc_mpi_t, lb, extent, &blk_col_mpi_t);
- MPI_Type_commit(&blk_col_mpi_t);
- MPI_Type_free(&block_mpi_t);
- MPI_Type_free(&first_bc_mpi_t);
- return blk_col_mpi_t;
- }
- void dijkstraInit(int local_graph[], int local_distance[], int visited[], int my_rank, int local_node, int source){
- int i;
- if (my_rank == 0){
- visited[source] = 1;
- }else{
- visited[source] = 0;
- }
- for (int i = 0; i < local_node; ++i){
- if (i != source){
- visited[i] = 0;
- }
- }
- for (int i = 0; i < local_node; ++i){
- local_distance[i] = local_graph[i];
- }
- }
- int minDistance(int local_distance[], int visited[], int local_node){
- int node = -1, i;
- int distance = INF;
- for (i = 0; i < local_node; ++i){
- if (!visited[i] && local_distance[i] < distance){
- distance = local_distance[i];
- node = i;
- }
- }
- return node;
- }
- void dijkstra(int local_graph[], int local_distance[], int local_node, int node, MPI_Comm comm,int source){
- int *visited;
- int i, local_now, local_adj, global_now, new_distance, my_rank, distance_global_now;
- int minimal_local_dist, minimal_local_node;
- int minimal_global_dist, minimal_global_node;
- MPI_Comm_rank(comm, &my_rank);
- visited = (int *) malloc(local_node * sizeof(int));
- if (my_rank == 0){
- visited[source] = 1;
- }else{
- visited[source] = 0;
- }
- for (i = 0; i < local_node; ++i){
- if (i != source){
- visited[i] = 0;
- }
- }
- for (i = 0; i < local_node; ++i){
- local_distance[i] = local_graph[i];
- }
- for (i = 0; i < n-1; ++i){
- int local_now = minDistance(local_distance, visited, local_node);
- if (local)
- }
- }
- int main(int argc, char **argv){
- MPI_Comm comm;
- int my_rank, node, local_node, num_process;
- int *local_graph = NULL;
- int *local_distance = NULL;
- int *global_distance = NULL;
- MPI_Datatype block_column;
- MPI_Init(NULL, NULL);
- comm = MPI_COMM_WORLD;
- MPI_Comm_rank(comm, &my_rank);
- MPI_Comm_size(comm, &num_process);
- node = readNode(my_rank, comm);
- if (node % num_process != 0){
- fprintf(stderr, "Number of process should be divde the number of node");
- return;
- }
- // setup for local process variable
- local_node = node / num_process;
- local_graph = malloc(node * local_node * sizeof(int));
- local_distance = (int *)malloc(local_node * sizeof(int));
- block_column = BlockType(node, local_node);
- if (my_rank == 0){
- global_distance = (int *)malloc(local_node * sizeof(int));
- }
- randomGraph(node, local_graph, local_node, block_column, my_rank, comm);
- MPI_Finalize();
- }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement