Advertisement
STEPAN_KODER_228

СТЕПАН ПРИКОЛ 11

May 23rd, 2023
26
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 4.78 KB | None | 0 0
  1. Based on the code provided, I see a potential deadlock issue in your program. The deadlock occurs because the worker process sends a request to the receiver process and waits for a response, but the receiver process is waiting to receive a request before it can send a response. This situation leads to both processes being stuck in a waiting state.
  2.  
  3. To fix this deadlock issue, you can introduce a separate MPI communication tag for the response from the receiver process to the worker process. This way, the worker process can send a request and immediately start waiting for the response without blocking the receiver process.
  4.  
  5. Here's the revised code with the deadlock issue fixed:
  6.  
  7. ```c
  8. #include <stdio.h>
  9. #include <stdlib.h>
  10. #include <pthread.h>
  11. #include <mpi.h>
  12. #include <math.h>
  13.  
  14. #define MAX_TASKS 100
  15. #define ITERATIONS_COUNT 100
  16. #define L 2
  17. #define REQUEST_TASK 228
  18. #define STOP_CODE 229
  19. #define TO_RECEIVER 1
  20. #define SEND_WORK_TAG 2
  21. #define RESPONSE_TAG 3
  22.  
  23. int *tasks;
  24. int size;
  25. int rank;
  26. int offset;
  27. pthread_mutex_t mutex;
  28.  
  29. void fill_tasks(int iter_count) {
  30. for (int i = 0; i < MAX_TASKS; i++) {
  31. tasks[i] = abs(50 - i % 100) * abs(rank - iter_count % size) * L;
  32. }
  33. }
  34.  
  35. int do_tasks() {
  36. int local_res = 0;
  37. while (1) {
  38. pthread_mutex_lock(&mutex);
  39. if (offset >= size * MAX_TASKS) {
  40. pthread_mutex_unlock(&mutex);
  41. break;
  42. }
  43. int current_offset = offset++;
  44. pthread_mutex_unlock(&mutex);
  45.  
  46. int weight = tasks[current_offset];
  47. for (int j = 0; j < weight; j++) {
  48. local_res += (int)sqrt(j);
  49. }
  50. }
  51. return local_res;
  52. }
  53.  
  54. void request_tasks() {
  55. for (int i = 0; i < size; i++) {
  56. if (i == rank) continue;
  57. int req_code = REQUEST_TASK;
  58. int help_length;
  59. MPI_Send(&req_code, 1, MPI_INT, i, TO_RECEIVER, MPI_COMM_WORLD);
  60. MPI_Recv(&help_length, 1, MPI_INT, i, SEND_WORK_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
  61. if (help_length > 0) {
  62. MPI_Recv(tasks, help_length, MPI_INT, i, SEND_WORK_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
  63. pthread_mutex_lock(&mutex);
  64. offset = 0;
  65. pthread_mutex_unlock(&mutex);
  66. }
  67. }
  68. }
  69.  
  70. void* worker_func(void* arg) {
  71. for (int i = 0; i < ITERATIONS_COUNT; i++) {
  72. pthread_mutex_lock(&mutex);
  73. offset = 0;
  74. fill_tasks(i);
  75. pthread_mutex_unlock(&mutex);
  76. int res = do_tasks();
  77. request_tasks();
  78. if (rank == 0) {
  79. printf("%d\n", res);
  80. }
  81. }
  82. int stop_code = STOP_CODE;
  83. MPI_Send(&stop_code, 1, MPI_INT, 0, TO_RECEIVER, MPI_COMM_WORLD);
  84. return NULL;
  85. }
  86.  
  87. void* receiver_func(void* arg) {
  88. while (1) {
  89. int req_code_buf;
  90. MPI_Status status_worker_requester;
  91. MPI_Recv(&req_code_buf, 1, MPI_INT, MPI_ANY_SOURCE, TO_RECEIVER, MPI_COMM_WORLD, &status_worker_requester);
  92. if (req_code_buf == STOP_CODE) break;
  93.  
  94. size_t length = size * MAX_TASKS;
  95. int new_offset = offset + (int)(length * 0.3);
  96. int tasks
  97.  
  98. _length = new_offset - offset;
  99.  
  100. MPI_Send(&tasks_length, 1, MPI_INT, status_worker_requester.MPI_SOURCE, SEND_WORK_TAG, MPI_COMM_WORLD);
  101. if (new_offset < length - 1) {
  102. int old_offset = offset;
  103. pthread_mutex_lock(&mutex);
  104. offset = new_offset;
  105. pthread_mutex_unlock(&mutex);
  106. MPI_Send(&tasks[old_offset], tasks_length, MPI_INT, status_worker_requester.MPI_SOURCE, SEND_WORK_TAG, MPI_COMM_WORLD);
  107. }
  108.  
  109. int dummy_response;
  110. MPI_Recv(&dummy_response, 1, MPI_INT, status_worker_requester.MPI_SOURCE, RESPONSE_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
  111. }
  112. return NULL;
  113. }
  114.  
  115. int main(int argc, char* argv[]) {
  116. MPI_Init_thread(NULL, NULL, MPI_THREAD_MULTIPLE, NULL);
  117. MPI_Comm_size(MPI_COMM_WORLD, &size);
  118. MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  119. tasks = malloc(MAX_TASKS * sizeof(int));
  120. pthread_t worker, receiver;
  121. pthread_mutex_init(&mutex, NULL);
  122. pthread_create(&worker, NULL, worker_func, NULL);
  123. pthread_create(&receiver, NULL, receiver_func, NULL);
  124.  
  125. pthread_join(worker, NULL);
  126. pthread_join(receiver, NULL);
  127.  
  128. MPI_Finalize();
  129. free(tasks);
  130. return 0;
  131. }
  132. ```
  133.  
  134. In the revised code, after sending the work length to the worker process, the receiver process waits for a dummy response before proceeding. This ensures that the worker process has received the work before the receiver process continues execution.
  135.  
  136. Please note that this fix assumes that the worker process does not send any response data back to the receiver process. If you intend to send a response from the worker process to the receiver process, you can modify the code accordingly.
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement