Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- Certainly! Here's the revised code with the mentioned fixes:
- ```c
- #include <stdio.h>
- #include <stdlib.h>
- #include <pthread.h>
- #include <mpi.h>
- #include <math.h>
- #define MAX_TASKS 100
- #define ITERATIONS_COUNT 100
- #define L 2
- #define REQUEST_TASK 228
- #define STOP_CODE 229
- #define TO_RECEIVER 1
- #define SEND_WORK_TAG 2
- int *tasks;
- int size;
- int rank;
- int offset;
- pthread_mutex_t mutex;
- void fill_tasks(int iter_count) {
- for (int i = 0; i < MAX_TASKS; i++) {
- tasks[i] = abs(50 - i % 100) * abs(rank - iter_count % size) * L;
- }
- }
- int do_tasks() {
- int local_res = 0;
- while (1) {
- pthread_mutex_lock(&mutex);
- if (offset >= size * MAX_TASKS) {
- pthread_mutex_unlock(&mutex);
- break;
- }
- int current_offset = offset++;
- pthread_mutex_unlock(&mutex);
- int weight = tasks[current_offset];
- for (int j = 0; j < weight; j++) {
- local_res += (int)sqrt(j);
- }
- }
- return local_res;
- }
- void request_tasks() {
- for (int i = 0; i < size; i++) {
- if (i == rank) continue;
- int req_code = REQUEST_TASK;
- int help_length;
- MPI_Send(&req_code, 1, MPI_INT, i, TO_RECEIVER, MPI_COMM_WORLD);
- MPI_Recv(&help_length, 1, MPI_INT, i, SEND_WORK_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
- if (help_length > 0) {
- MPI_Recv(tasks, help_length, MPI_INT, i, SEND_WORK_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
- pthread_mutex_lock(&mutex);
- offset = 0;
- pthread_mutex_unlock(&mutex);
- }
- }
- }
- void* worker_func(void* arg) {
- for (int i = 0; i < ITERATIONS_COUNT; i++) {
- pthread_mutex_lock(&mutex);
- offset = 0;
- fill_tasks(i);
- pthread_mutex_unlock(&mutex);
- int res = do_tasks();
- request_tasks();
- if (rank == 0) {
- printf("%d\n", res);
- }
- }
- int stop_code = STOP_CODE;
- MPI_Send(&stop_code, 1, MPI_INT, 0, TO_RECEIVER, MPI_COMM_WORLD);
- return NULL;
- }
- void* receiver_func(void* arg) {
- while (1) {
- int req_code_buf;
- MPI_Status status_worker_requester;
- MPI_Recv(&req_code_buf, 1, MPI_INT, MPI_ANY_SOURCE, TO_RECEIVER, MPI_COMM_WORLD, &status_worker_requester);
- if (req_code_buf == STOP_CODE) break;
- size_t length = size * MAX_TASKS;
- int new_offset = offset + (int)(length * 0.3);
- int tasks_length = new_offset - offset;
- MPI_Send(&tasks_length, 1, MPI_INT, status_worker_requester.MPI_SOURCE, SEND_WORK_TAG, MPI_COMM_WORLD);
- if (new_offset < length - 1) {
- int old_offset = offset;
- pthread_mutex_lock(&mutex);
- offset = new_offset;
- pthread_mutex_unlock(&mutex);
- MPI_Send(&tasks[old_offset], tasks_length, MPI_INT, status_worker_requester.MPI_SOURCE, SEND_WORK_TAG, MPI_COMM_WORLD);
- }
- }
- return NULL;
- }
- int main
- (int argc, char* argv[]) {
- MPI_Init_thread(NULL, NULL, MPI_THREAD_MULTIPLE, NULL);
- MPI_Comm_size(MPI_COMM_WORLD, &size);
- MPI_Comm_rank(MPI_COMM_WORLD, &rank);
- tasks = malloc(MAX_TASKS * sizeof(int));
- pthread_t worker, receiver;
- pthread_mutex_init(&mutex, NULL);
- pthread_create(&worker, NULL, worker_func, NULL);
- pthread_create(&receiver, NULL, receiver_func, NULL);
- pthread_join(worker, NULL);
- pthread_join(receiver, NULL);
- MPI_Finalize();
- free(tasks);
- return 0;
- }
- ```
- I've made the following changes:
- 1. Fixed the loop in the `fill_tasks` function to iterate over `MAX_TASKS` instead of `size * MAX_TASKS`.
- 2. Modified the condition in the `do_tasks` function to break the loop when `offset >= size * MAX_TASKS`.
- 3. Changed the source argument in the `MPI_Send` function in the `worker_func` to 0 when sending the request to the receiver process.
- 4. Joined the worker and receiver threads using `pthread_join` before calling `MPI_Finalize`.
- 5. Changed the allocation size of the `tasks` array to `MAX_TASKS * sizeof(int)` instead of `size * MAX_TASKS * sizeof(int)` since each process only requires `MAX_TASKS` elements.
- 6. Updated the argument types of the thread functions to match the signature expected by `pthread_create`.
- Please note that the revised code assumes that you have an appropriate MPI implementation and that you are running multiple processes using the `mpirun` command or a similar approach.
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement