Guest User

Untitled

a guest
Feb 8th, 2023
141
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
C++ 2.15 KB | None | 0 0
  1. #include <iostream>
  2. #include "mpi.h"
  3. #include <random>
  4. #include <cassert>
  5.  
  6. using namespace std;
  7.  
  8. #define VECTOR_SIZE 1000
  9.  
  10. inline int generateRandomInRange(int a, int b) {
  11.     random_device rd;
  12.     uniform_int_distribution gen(a, b);
  13.     return gen(rd);
  14. }
  15.  
  16. void fill_vector_values(int *vec){
  17.     for(size_t i = 0; i < VECTOR_SIZE; i++){
  18.         vec[i] = generateRandomInRange(1, 100);
  19.     }
  20. }
  21.  
  22.  
  23. void print_vec(int *vec, int n){
  24.     for(size_t i = 0; i < n; i++){
  25.         std::cout << vec[i] << endl;
  26.     }
  27.     std::cout << "---------\n";
  28. }
  29.  
  30.  
  31. int main(int argc, char **argv){
  32.     int rank, size;
  33.     int VEC_SIZE = VECTOR_SIZE;
  34.     auto *vec1 = new int[VECTOR_SIZE];
  35.     auto *vec2 = new int[VECTOR_SIZE];
  36.     fill_vector_values(vec1);
  37.     fill_vector_values(vec2);
  38.  
  39.     MPI_Init(&argc, &argv);
  40.     MPI_Comm_size(MPI_COMM_WORLD, &size);
  41.     MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  42.     size_t sub_size = VEC_SIZE / size;
  43.     size_t rest = VEC_SIZE % size;
  44.     int result = 0;
  45.  
  46.     if(rank == 0){
  47.         for(size_t i = 0; i < size; i++){
  48.             //std::cout << sub_size*i + 1 << " " << sub_size*i + sub_size  << endl;
  49.             MPI_Send(&vec1[sub_size*i + 1 ], static_cast<int>(sub_size), MPI_INT, static_cast<int>(i), (int)i, MPI_COMM_WORLD);
  50.             MPI_Send(&vec2[sub_size*i + 1 ], static_cast<int>(sub_size), MPI_INT, static_cast<int>(i), (int)i, MPI_COMM_WORLD);
  51.         }
  52.  
  53.  
  54.         for(size_t i = 0; i < size; i++){
  55.             int local_res;
  56.             MPI_Recv(&local_res, 1, MPI_INT, MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
  57.             result += local_res;
  58.         }
  59.  
  60.         std::cout << result;
  61.     }
  62.  
  63.     if(rank != 0){
  64.         MPI_Recv(&vec1[0], (int)sub_size, MPI_INT, 0, rank, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
  65.         MPI_Recv(&vec2[0], (int)sub_size, MPI_INT, 0, rank, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
  66.         int curr_res = 0;
  67.         for(size_t j = 0; j < sub_size; j++){
  68.             curr_res += vec1[j]*vec2[j];
  69.         }
  70.         MPI_Send(&curr_res, 1, MPI_INT, 0, rank, MPI_COMM_WORLD);
  71.     }
  72.  
  73.  
  74.     MPI_Finalize();
  75.     delete []vec1;
  76.     delete []vec2;
  77.     return 0;
  78. }
Add Comment
Please, Sign In to add comment