Advertisement
STEPAN_KODER_228

СТЕПАН ПРИКОЛ 12

May 23rd, 2023
25
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 4.50 KB | None | 0 0
  1. Apologies for the oversight. In the provided code, the deadlock occurs because the receiver process waits for a response from the worker process after sending the tasks' length. However, the worker process does not send any response in the current implementation, resulting in a deadlock.
  2.  
  3. To resolve the deadlock, you can modify the code to ensure that the worker process sends a response to the receiver process after receiving the tasks. Here's the modified code:
  4.  
  5. ```c
  6. #include <stdio.h>
  7. #include <stdlib.h>
  8. #include <pthread.h>
  9. #include <mpi.h>
  10. #include <math.h>
  11.  
  12. #define MAX_TASKS 100
  13. #define ITERATIONS_COUNT 100
  14. #define L 2
  15. #define REQUEST_TASK 228
  16. #define STOP_CODE 229
  17. #define TO_RECEIVER 1
  18. #define SEND_WORK_TAG 2
  19. #define RESPONSE_TAG 3
  20.  
  21. int *tasks;
  22. int size;
  23. int rank;
  24. int offset;
  25. pthread_mutex_t mutex;
  26.  
  27. void fill_tasks(int iter_count) {
  28. for (int i = 0; i < MAX_TASKS; i++) {
  29. tasks[i] = abs(50 - i % 100) * abs(rank - iter_count % size) * L;
  30. }
  31. }
  32.  
  33. int do_tasks() {
  34. int local_res = 0;
  35. while (1) {
  36. pthread_mutex_lock(&mutex);
  37. if (offset >= size * MAX_TASKS) {
  38. pthread_mutex_unlock(&mutex);
  39. break;
  40. }
  41. int current_offset = offset++;
  42. pthread_mutex_unlock(&mutex);
  43.  
  44. int weight = tasks[current_offset];
  45. for (int j = 0; j < weight; j++) {
  46. local_res += (int)sqrt(j);
  47. }
  48. }
  49. return local_res;
  50. }
  51.  
  52. void request_tasks() {
  53. for (int i = 0; i < size; i++) {
  54. if (i == rank) continue;
  55. int req_code = REQUEST_TASK;
  56. int help_length;
  57. MPI_Send(&req_code, 1, MPI_INT, i, TO_RECEIVER, MPI_COMM_WORLD);
  58. MPI_Recv(&help_length, 1, MPI_INT, i, SEND_WORK_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
  59. if (help_length > 0) {
  60. MPI_Recv(tasks, help_length, MPI_INT, i, SEND_WORK_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
  61. pthread_mutex_lock(&mutex);
  62. offset = 0;
  63. pthread_mutex_unlock(&mutex);
  64. }
  65. }
  66. }
  67.  
  68. void* worker_func(void* arg) {
  69. for (int i = 0; i < ITERATIONS_COUNT; i++) {
  70. pthread_mutex_lock(&mutex);
  71. offset = 0;
  72. fill_tasks(i);
  73. pthread_mutex_unlock(&mutex);
  74. int res = do_tasks();
  75. request_tasks();
  76. if (rank == 0) {
  77. printf("%d\n", res);
  78. }
  79. }
  80. int stop_code = STOP_CODE;
  81. MPI_Send(&stop_code, 1, MPI_INT, 0, TO_RECEIVER, MPI_COMM_WORLD);
  82. return NULL;
  83. }
  84.  
  85. void* receiver_func(void* arg) {
  86. while (1) {
  87. int req_code_buf;
  88. MPI_Status status_worker_requester;
  89. MPI_Recv(&req_code_buf, 1, MPI_INT, MPI_ANY_SOURCE, TO_RECEIVER, MPI_COMM_WORLD, &status_worker_requester);
  90. if (req_code_buf == STOP_CODE) break;
  91.  
  92. size_t length = size * MAX_TASKS;
  93. int new_offset = offset + (int)(length * 0.3);
  94.  
  95. int tasks_length = new_offset - offset;
  96.  
  97. MPI_Send(&tasks_length, 1, MPI_INT, status_worker_requester.MPI_SOURCE, SEND_WORK_TAG, MPI_COMM_WORLD);
  98. if (
  99.  
  100. new_offset < length - 1) {
  101. int old_offset = offset;
  102. pthread_mutex_lock(&mutex);
  103. offset = new_offset;
  104. pthread_mutex_unlock(&mutex);
  105. MPI_Send(&tasks[old_offset], tasks_length, MPI_INT, status_worker_requester.MPI_SOURCE, SEND_WORK_TAG, MPI_COMM_WORLD);
  106. }
  107.  
  108. // Send a dummy response to avoid deadlock
  109. int dummy_response = 0;
  110. MPI_Send(&dummy_response, 1, MPI_INT, status_worker_requester.MPI_SOURCE, RESPONSE_TAG, MPI_COMM_WORLD);
  111. }
  112. return NULL;
  113. }
  114.  
  115. int main(int argc, char* argv[]) {
  116. MPI_Init_thread(NULL, NULL, MPI_THREAD_MULTIPLE, NULL);
  117. MPI_Comm_size(MPI_COMM_WORLD, &size);
  118. MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  119. tasks = malloc(MAX_TASKS * sizeof(int));
  120. pthread_t worker, receiver;
  121. pthread_mutex_init(&mutex, NULL);
  122. pthread_create(&worker, NULL, worker_func, NULL);
  123. pthread_create(&receiver, NULL, receiver_func, NULL);
  124.  
  125. pthread_join(worker, NULL);
  126. pthread_join(receiver, NULL);
  127.  
  128. MPI_Finalize();
  129. free(tasks);
  130. return 0;
  131. }
  132. ```
  133.  
  134. In this modified code, after sending the tasks' length to the receiver process, the worker process sends a dummy response to the receiver process using `MPI_Send`. This ensures that the receiver process does not wait indefinitely and avoids the deadlock situation.
  135.  
  136. Now the code should execute without deadlock, and you should see output printed by the worker process with rank 0.
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement