Advertisement
STEPAN_KODER_228

СТЕПАН ПРИКОЛ 10

May 23rd, 2023
29
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 4.46 KB | None | 0 0
  1. Certainly! Here's the revised code with the mentioned fixes:
  2.  
  3. ```c
  4. #include <stdio.h>
  5. #include <stdlib.h>
  6. #include <pthread.h>
  7. #include <mpi.h>
  8. #include <math.h>
  9.  
  10. #define MAX_TASKS 100
  11. #define ITERATIONS_COUNT 100
  12. #define L 2
  13. #define REQUEST_TASK 228
  14. #define STOP_CODE 229
  15. #define TO_RECEIVER 1
  16. #define SEND_WORK_TAG 2
  17.  
  18. int *tasks;
  19. int size;
  20. int rank;
  21. int offset;
  22. pthread_mutex_t mutex;
  23.  
  24. void fill_tasks(int iter_count) {
  25. for (int i = 0; i < MAX_TASKS; i++) {
  26. tasks[i] = abs(50 - i % 100) * abs(rank - iter_count % size) * L;
  27. }
  28. }
  29.  
  30. int do_tasks() {
  31. int local_res = 0;
  32. while (1) {
  33. pthread_mutex_lock(&mutex);
  34. if (offset >= size * MAX_TASKS) {
  35. pthread_mutex_unlock(&mutex);
  36. break;
  37. }
  38. int current_offset = offset++;
  39. pthread_mutex_unlock(&mutex);
  40.  
  41. int weight = tasks[current_offset];
  42. for (int j = 0; j < weight; j++) {
  43. local_res += (int)sqrt(j);
  44. }
  45. }
  46. return local_res;
  47. }
  48.  
  49. void request_tasks() {
  50. for (int i = 0; i < size; i++) {
  51. if (i == rank) continue;
  52. int req_code = REQUEST_TASK;
  53. int help_length;
  54. MPI_Send(&req_code, 1, MPI_INT, i, TO_RECEIVER, MPI_COMM_WORLD);
  55. MPI_Recv(&help_length, 1, MPI_INT, i, SEND_WORK_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
  56. if (help_length > 0) {
  57. MPI_Recv(tasks, help_length, MPI_INT, i, SEND_WORK_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
  58. pthread_mutex_lock(&mutex);
  59. offset = 0;
  60. pthread_mutex_unlock(&mutex);
  61. }
  62. }
  63. }
  64.  
  65. void* worker_func(void* arg) {
  66. for (int i = 0; i < ITERATIONS_COUNT; i++) {
  67. pthread_mutex_lock(&mutex);
  68. offset = 0;
  69. fill_tasks(i);
  70. pthread_mutex_unlock(&mutex);
  71. int res = do_tasks();
  72. request_tasks();
  73. if (rank == 0) {
  74. printf("%d\n", res);
  75. }
  76. }
  77. int stop_code = STOP_CODE;
  78. MPI_Send(&stop_code, 1, MPI_INT, 0, TO_RECEIVER, MPI_COMM_WORLD);
  79. return NULL;
  80. }
  81.  
  82. void* receiver_func(void* arg) {
  83. while (1) {
  84. int req_code_buf;
  85. MPI_Status status_worker_requester;
  86. MPI_Recv(&req_code_buf, 1, MPI_INT, MPI_ANY_SOURCE, TO_RECEIVER, MPI_COMM_WORLD, &status_worker_requester);
  87. if (req_code_buf == STOP_CODE) break;
  88.  
  89. size_t length = size * MAX_TASKS;
  90. int new_offset = offset + (int)(length * 0.3);
  91. int tasks_length = new_offset - offset;
  92.  
  93. MPI_Send(&tasks_length, 1, MPI_INT, status_worker_requester.MPI_SOURCE, SEND_WORK_TAG, MPI_COMM_WORLD);
  94. if (new_offset < length - 1) {
  95. int old_offset = offset;
  96. pthread_mutex_lock(&mutex);
  97. offset = new_offset;
  98. pthread_mutex_unlock(&mutex);
  99. MPI_Send(&tasks[old_offset], tasks_length, MPI_INT, status_worker_requester.MPI_SOURCE, SEND_WORK_TAG, MPI_COMM_WORLD);
  100. }
  101. }
  102. return NULL;
  103. }
  104.  
  105. int main
  106.  
  107. (int argc, char* argv[]) {
  108. MPI_Init_thread(NULL, NULL, MPI_THREAD_MULTIPLE, NULL);
  109. MPI_Comm_size(MPI_COMM_WORLD, &size);
  110. MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  111. tasks = malloc(MAX_TASKS * sizeof(int));
  112. pthread_t worker, receiver;
  113. pthread_mutex_init(&mutex, NULL);
  114. pthread_create(&worker, NULL, worker_func, NULL);
  115. pthread_create(&receiver, NULL, receiver_func, NULL);
  116.  
  117. pthread_join(worker, NULL);
  118. pthread_join(receiver, NULL);
  119.  
  120. MPI_Finalize();
  121. free(tasks);
  122. return 0;
  123. }
  124. ```
  125.  
  126. I've made the following changes:
  127.  
  128. 1. Fixed the loop in the `fill_tasks` function to iterate over `MAX_TASKS` instead of `size * MAX_TASKS`.
  129. 2. Modified the condition in the `do_tasks` function to break the loop when `offset >= size * MAX_TASKS`.
  130. 3. Changed the source argument in the `MPI_Send` function in the `worker_func` to 0 when sending the request to the receiver process.
  131. 4. Joined the worker and receiver threads using `pthread_join` before calling `MPI_Finalize`.
  132. 5. Changed the allocation size of the `tasks` array to `MAX_TASKS * sizeof(int)` instead of `size * MAX_TASKS * sizeof(int)` since each process only requires `MAX_TASKS` elements.
  133. 6. Updated the argument types of the thread functions to match the signature expected by `pthread_create`.
  134.  
  135. Please note that the revised code assumes that you have an appropriate MPI implementation and that you are running multiple processes using the `mpirun` command or a similar approach.
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement