Advertisement
Guest User

Untitled

a guest
Dec 9th, 2019
114
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 3.10 KB | None | 0 0
  1. // solve in parallel Ax = b, there A is a coefficientsMatrix and b is a freeMembersVector
  2. std::vector<double> solveParallelSeidel(matrix &a,
  3. std::vector<double> &b,
  4. double eps,
  5. int approximationsCount) {
  6. int size;
  7. int rank;
  8. MPI_Comm_size(MPI_COMM_WORLD, &size);
  9. MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  10. MPI_Status status;
  11.  
  12. int root = 0;
  13. int tag = 0;
  14. int procNumber = 0;
  15. int indexInProc = 0;
  16. int elementsCounter = 0;
  17. int n = static_cast<int>(b.size());
  18. int elementsPerBlock = n / size;
  19. int remainElements = n % size + elementsPerBlock;
  20. int elementsCount = (rank == size - 1 ? remainElements : elementsPerBlock);
  21. double currSum = 0.;
  22. double globalSum = 0.;
  23. double oldNorm = 0.;
  24. double newNorm = 0.;
  25. double currEps = 0.;
  26. char endOfProgram = 0;
  27.  
  28. std::vector<double> resultX;
  29. std::vector<double> x(elementsCount);
  30. matrix currA;
  31.  
  32.  
  33. // Init data on all Proc
  34. if (rank == root) {
  35. resultX.resize(n);
  36. for (int i = 1; i < size; i++) {
  37. elementsCounter = (i == size - 1 ? remainElements : elementsPerBlock);
  38. MPI_Send(a[i * elementsPerBlock], elementsCounter * n, MPI_DOUBLE, i, tag, MPI_COMM_WORLD);
  39. }
  40.  
  41. } else {
  42. currA.init(n, elementsCount);
  43. MPI_Recv (currA[0], elementsCount * n, MPI_DOUBLE, root, tag, MPI_COMM_WORLD, &status);
  44. }
  45.  
  46. //main process
  47. do {
  48. newNorm = 0.;
  49.  
  50. for (int i = 0; i < n; i++) {
  51. currSum = 0;
  52. procNumber = i / elementsPerBlock;
  53. if (procNumber == size) procNumber--;
  54. indexInProc = i - procNumber * elementsPerBlock;
  55.  
  56. for (int j = 0; j < elementsCount; j++) {
  57. currSum -= currA[i][j] * x[j];
  58. }
  59. //MPI_Reduce(&currSum, &globalSum, 1, MPI_DOUBLE, MPI_SUM, root, MPI_COMM_WORLD);
  60.  
  61. if (rank == root) {
  62. newNorm += globalSum * globalSum;
  63. resultX[i] = (globalSum + b[i]) / a[i][i];
  64. globalSum = 0.;
  65. if (procNumber != root) {
  66. MPI_Send (&resultX[i], 1, MPI_DOUBLE, procNumber, tag, MPI_COMM_WORLD);
  67. } else {
  68. x[indexInProc] = resultX[i];
  69. }
  70. } else if (rank == procNumber) {
  71. MPI_Recv (&x[indexInProc], 1, MPI_DOUBLE, root, tag, MPI_COMM_WORLD, &status);
  72. }
  73. }
  74. if (rank == root) {
  75. std::cerr << std::endl;
  76. currEps = abs(sqrt(newNorm) - sqrt(oldNorm));
  77. approximationsCount--;
  78. oldNorm = newNorm;
  79. if (currEps < eps || !approximationsCount) endOfProgram = 1;
  80. MPI_Bcast (&endOfProgram, 1, MPI_BYTE, root, MPI_COMM_WORLD);
  81. }
  82. } while (!endOfProgram);
  83.  
  84. return resultX;
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement