Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- Based on the code provided, I see a potential deadlock issue in your program. The deadlock occurs because the worker process sends a request to the receiver process and waits for a response, but the receiver process is waiting to receive a request before it can send a response. This situation leads to both processes being stuck in a waiting state.
- To fix this deadlock issue, you can introduce a separate MPI communication tag for the response from the receiver process to the worker process. This way, the worker process can send a request and immediately start waiting for the response without blocking the receiver process.
- Here's the revised code with the deadlock issue fixed:
- ```c
- #include <stdio.h>
- #include <stdlib.h>
- #include <pthread.h>
- #include <mpi.h>
- #include <math.h>
- #define MAX_TASKS 100
- #define ITERATIONS_COUNT 100
- #define L 2
- #define REQUEST_TASK 228
- #define STOP_CODE 229
- #define TO_RECEIVER 1
- #define SEND_WORK_TAG 2
- #define RESPONSE_TAG 3
- int *tasks;
- int size;
- int rank;
- int offset;
- pthread_mutex_t mutex;
- void fill_tasks(int iter_count) {
- for (int i = 0; i < MAX_TASKS; i++) {
- tasks[i] = abs(50 - i % 100) * abs(rank - iter_count % size) * L;
- }
- }
- int do_tasks() {
- int local_res = 0;
- while (1) {
- pthread_mutex_lock(&mutex);
- if (offset >= size * MAX_TASKS) {
- pthread_mutex_unlock(&mutex);
- break;
- }
- int current_offset = offset++;
- pthread_mutex_unlock(&mutex);
- int weight = tasks[current_offset];
- for (int j = 0; j < weight; j++) {
- local_res += (int)sqrt(j);
- }
- }
- return local_res;
- }
- void request_tasks() {
- for (int i = 0; i < size; i++) {
- if (i == rank) continue;
- int req_code = REQUEST_TASK;
- int help_length;
- MPI_Send(&req_code, 1, MPI_INT, i, TO_RECEIVER, MPI_COMM_WORLD);
- MPI_Recv(&help_length, 1, MPI_INT, i, SEND_WORK_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
- if (help_length > 0) {
- MPI_Recv(tasks, help_length, MPI_INT, i, SEND_WORK_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
- pthread_mutex_lock(&mutex);
- offset = 0;
- pthread_mutex_unlock(&mutex);
- }
- }
- }
- void* worker_func(void* arg) {
- for (int i = 0; i < ITERATIONS_COUNT; i++) {
- pthread_mutex_lock(&mutex);
- offset = 0;
- fill_tasks(i);
- pthread_mutex_unlock(&mutex);
- int res = do_tasks();
- request_tasks();
- if (rank == 0) {
- printf("%d\n", res);
- }
- }
- int stop_code = STOP_CODE;
- MPI_Send(&stop_code, 1, MPI_INT, 0, TO_RECEIVER, MPI_COMM_WORLD);
- return NULL;
- }
- void* receiver_func(void* arg) {
- while (1) {
- int req_code_buf;
- MPI_Status status_worker_requester;
- MPI_Recv(&req_code_buf, 1, MPI_INT, MPI_ANY_SOURCE, TO_RECEIVER, MPI_COMM_WORLD, &status_worker_requester);
- if (req_code_buf == STOP_CODE) break;
- size_t length = size * MAX_TASKS;
- int new_offset = offset + (int)(length * 0.3);
- int tasks
- _length = new_offset - offset;
- MPI_Send(&tasks_length, 1, MPI_INT, status_worker_requester.MPI_SOURCE, SEND_WORK_TAG, MPI_COMM_WORLD);
- if (new_offset < length - 1) {
- int old_offset = offset;
- pthread_mutex_lock(&mutex);
- offset = new_offset;
- pthread_mutex_unlock(&mutex);
- MPI_Send(&tasks[old_offset], tasks_length, MPI_INT, status_worker_requester.MPI_SOURCE, SEND_WORK_TAG, MPI_COMM_WORLD);
- }
- int dummy_response;
- MPI_Recv(&dummy_response, 1, MPI_INT, status_worker_requester.MPI_SOURCE, RESPONSE_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
- }
- return NULL;
- }
- int main(int argc, char* argv[]) {
- MPI_Init_thread(NULL, NULL, MPI_THREAD_MULTIPLE, NULL);
- MPI_Comm_size(MPI_COMM_WORLD, &size);
- MPI_Comm_rank(MPI_COMM_WORLD, &rank);
- tasks = malloc(MAX_TASKS * sizeof(int));
- pthread_t worker, receiver;
- pthread_mutex_init(&mutex, NULL);
- pthread_create(&worker, NULL, worker_func, NULL);
- pthread_create(&receiver, NULL, receiver_func, NULL);
- pthread_join(worker, NULL);
- pthread_join(receiver, NULL);
- MPI_Finalize();
- free(tasks);
- return 0;
- }
- ```
- In the revised code, after sending the work length to the worker process, the receiver process waits for a dummy response before proceeding. This ensures that the worker process has received the work before the receiver process continues execution.
- Please note that this fix assumes that the worker process does not send any response data back to the receiver process. If you intend to send a response from the worker process to the receiver process, you can modify the code accordingly.
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement