Advertisement
Guest User

MPI_Waitany causes segmentation fault - sscce

a guest
Jul 20th, 2014
336
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
  1. #include <stdlib.h>
  2. #include <stdio.h>
  3. #include <unistd.h>
  4. #include "mpi.h"
  5.  
  6. #define MAXPROC 16    /* Max number of processes */
  7. #define TOTAL_FILES 7
  8.  
  9. int main(int argc, char* argv[]) {
  10.     int i, nprocs, tprocs, me, index;
  11.     const int tag  = 42;    /* Tag value for communication */
  12.  
  13.     MPI_Request recv_req[MAXPROC];  /* Request objects for non-blocking receive */
  14.     MPI_Request send_req[MAXPROC]; /* Request objects for non-blocking send */ 
  15.     MPI_Status status;              /* Status object for non-blocing receive */
  16.    
  17.     char myname[MPI_MAX_PROCESSOR_NAME];             /* Local host name string */
  18.     char hostname[MAXPROC][MPI_MAX_PROCESSOR_NAME];  /* Received host names */
  19.     int namelen;   
  20.    
  21.     MPI_Init(&argc, &argv);                /* Initialize MPI */
  22.     MPI_Comm_size(MPI_COMM_WORLD, &nprocs);    /* Get nr of processes */
  23.     MPI_Comm_rank(MPI_COMM_WORLD, &me);    /* Get own identifier */
  24.    
  25.     MPI_Get_processor_name(myname, &namelen);  /* Get host name */
  26.     myname[namelen++] = (char)0;              /* Terminating null byte */
  27.    
  28.     /* First check that we have at least 2 and at most MAXPROC processes */
  29.     if (nprocs<2 || nprocs>MAXPROC) {
  30.         if (me == 0) {
  31.           printf("You have to use at least 2 and at most %d processes\n", MAXPROC);
  32.         }
  33.         MPI_Finalize(); exit(0);
  34.     }
  35.    
  36.     /* if TOTAL_FILES < nprocs then use only TOTAL_FILES + 1 procs */
  37.     tprocs = (TOTAL_FILES < nprocs) ? TOTAL_FILES + 1 : nprocs;
  38.     int done = -1;
  39.    
  40.     if (me == 0) {    /* Process 0 does this */
  41.        
  42.         int send_counter = 0, received_counter;
  43.    
  44.         for (i=1; i<tprocs; i++) {
  45.             MPI_Isend(&send_counter, 1, MPI_INT, i, tag, MPI_COMM_WORLD, &send_req[i]);
  46.             ++send_counter;
  47.             /* Receive a message from all other processes */
  48.             MPI_Irecv (hostname[i], namelen, MPI_CHAR, MPI_ANY_SOURCE, tag, MPI_COMM_WORLD, &recv_req[i]);
  49.         }  
  50.        
  51.         for (received_counter = 0; received_counter < TOTAL_FILES; received_counter++){
  52.  
  53.             /* Wait until at least one message has been received from any process other than 0*/
  54.             MPI_Waitany(tprocs-1, &recv_req[1], &index, &status);
  55.            
  56.             if (index == MPI_UNDEFINED) perror("Errorrrrrrr");         
  57.             printf("Received a message from process %d on %s\n", status.MPI_SOURCE, hostname[index+1]);
  58.            
  59.             if (send_counter < TOTAL_FILES){ /* si todavia faltan imagenes por procesar */
  60.                 MPI_Isend(&send_counter, 1, MPI_INT, status.MPI_SOURCE, tag, MPI_COMM_WORLD, &send_req[status.MPI_SOURCE]);
  61.                 ++send_counter;
  62.                 MPI_Irecv (hostname[status.MPI_SOURCE], namelen, MPI_CHAR, MPI_ANY_SOURCE, tag, MPI_COMM_WORLD, &recv_req[status.MPI_SOURCE]);
  63.             }  
  64.         }
  65.        
  66. //      for (i=1; i<tprocs; i++) {
  67. //          MPI_Isend(&done, 1, MPI_INT, i, tag, MPI_COMM_WORLD, &send_req[i]);
  68. //      }
  69.    
  70.     } else if (me < tprocs) { /* all other processes do this */
  71.    
  72.         int y;     
  73.         MPI_Recv(&y, 1, MPI_INT, 0,tag,MPI_COMM_WORLD,&status);
  74.        
  75.         while (y != -1) {                  
  76.             printf("Process %d: Received image %d\n", me, y);
  77.             sleep(me%3+1);  /* Let the processes sleep for 1-3 seconds */
  78.            
  79.             /* Send own identifier back to process 0 */
  80.             MPI_Send (myname, namelen, MPI_CHAR, 0, tag, MPI_COMM_WORLD);
  81.             MPI_Recv(&y, 1, MPI_INT, 0,tag,MPI_COMM_WORLD,&status);        
  82.         }  
  83.     }
  84.    
  85.     MPI_Finalize();
  86.     exit(0);
  87. }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement