Advertisement
manac68974

Untitled

Dec 6th, 2019
101
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 28.23 KB | None | 0 0
  1. #include <mpi.h>
  2. #include <omp.h>
  3.  
  4. #include <iomanip>
  5. #include <iostream>
  6. #include <vector>
  7. #include <map>
  8. #include <cmath>
  9. #include <cstdlib>
  10. #include <algorithm>
  11. #include <fstream>
  12. #include<sstream>
  13.  
  14. using namespace std;
  15.  
  16. class MPIComputations{
  17. int M, N;
  18. int cur_block_size_x, cur_block_size_y, cur_block_global_offset_x, cur_block_global_offset_y;
  19. int Gx, Gy;
  20. int cur_block_global_coors_x, cur_block_global_coors_y;
  21. double h_x, h_y;
  22. int global_block_size_x, global_block_size_y;
  23. int my_rank;
  24. double current_norm;
  25. MPI_Comm comm;
  26. double x_left, x_right, y_bottom, y_top;
  27. ofstream my_file;
  28.  
  29. vector<double> internal_data;
  30. vector<double> old_internal_data;
  31. vector<double> external_data[4];
  32.  
  33. int IsNodeInternalCornerOrSide(int current_node_global_offset_x, int current_node_global_offset_y){
  34.  
  35. //corners
  36. //left bottom corner
  37. if (current_node_global_offset_x == 0 && current_node_global_offset_y == 0){
  38. return 2;
  39. }
  40.  
  41. //left top corner
  42. if (current_node_global_offset_x == 0 && current_node_global_offset_y == N){
  43. return 4;
  44. }
  45.  
  46. //right bottom corner
  47. if (current_node_global_offset_x == M && current_node_global_offset_y == 0){
  48. return 6;
  49. }
  50.  
  51. //right top corner
  52. if (current_node_global_offset_x == M && current_node_global_offset_y == N){
  53. return 8;
  54. }
  55.  
  56. //sides
  57. //left side
  58. if (current_node_global_offset_y >= 1 && current_node_global_offset_y <= N - 1 &&
  59. current_node_global_offset_x == 0){
  60. return 1;
  61. }
  62.  
  63. //right side
  64. if (current_node_global_offset_y >= 1 && current_node_global_offset_y <= N - 1 &&
  65. current_node_global_offset_x == M){
  66. return 3;
  67. }
  68.  
  69. //bottom side
  70. if (current_node_global_offset_x >= 1 && current_node_global_offset_x <= M - 1 &&
  71. current_node_global_offset_y == 0){
  72. return 5;
  73. }
  74.  
  75. //top side
  76. if ((current_node_global_offset_x >= 1 && current_node_global_offset_x <= M - 1 &&
  77. current_node_global_offset_y == N)){
  78. return 7;
  79. }
  80.  
  81. //internal
  82. if ((current_node_global_offset_x >= 1 && current_node_global_offset_x <= M - 1) &&
  83. (current_node_global_offset_y >= 1 && current_node_global_offset_y <= N - 1)){
  84. return 0;
  85. }
  86.  
  87. return -1;
  88. }
  89.  
  90. double k(double x, double y) {
  91. return 1 + pow(x + y, 2);
  92. }
  93.  
  94. double q(double x, double y) {
  95. return 1;
  96. }
  97.  
  98. double u(double x, double y) {
  99. return 2.0 / (1 + pow(x, 2) + pow(y, 2));
  100. }
  101.  
  102. // psi_R(x, y) = k(A2, y) * du/dx(A2, y)
  103. double psi_R(double y) {
  104. return (-12) * (pow((y + 3), 2) + 1) / pow((pow(y, 2) + 10), 2);
  105. }
  106.  
  107. // psi_L(x, y) = -k(A1, y) * du/dx(A1, y)
  108. double psi_L(double y) {
  109. return (-8) * (pow((y - 2), 2) + 1) / pow((pow(y, 2) + 5), 2);
  110. }
  111.  
  112. // psi_T(x, y) = k(x, B2) * du/dy(x, B2)
  113. double psi_T(double x) {
  114. return (-16) * (pow((x + 4), 2) + 1) / pow((pow(x, 2) + 17), 2);
  115. }
  116.  
  117. // psi_B(x, y) = -k(x, B1) * du/dy(x, B1)
  118. double psi_B(double x) {
  119. return (-4) * (pow((x - 1), 2) + 1) / pow((pow(x, 2) + 2), 2);
  120. }
  121.  
  122. // right-part function of Poisson equation
  123. double F(double x, double y) {
  124. return 2 * (pow(x,4) + pow(y,4) + 2 * (pow(x,2) + 3) * pow(y,2) + 6 * pow(x,2) + 16*x*y + 5)
  125. / pow((1 + pow(x, 2) + pow(y, 2)), 3);
  126. }
  127.  
  128. //inner_product(A[i], internal_data)
  129. double ComputeMagicInnerProductA_iw (int current_node_global_offset_x, int current_node_global_offset_y){
  130.  
  131. int glob_x = current_node_global_offset_x;
  132. int glob_y = current_node_global_offset_y;
  133.  
  134. double result = 0.0;
  135.  
  136. map <string,bool> neighbours = {
  137. {"left", true},
  138. {"right", true},
  139. {"bottom", true},
  140. {"top", true}
  141. };
  142.  
  143. double left_neighbour = 0.0, right_neighbour = 0.0, bottom_neighbour = 0.0, top_neighbour = 0.0, this_node = 0.0;
  144. double left_coeff = 1.0, right_coeff = 1.0, bottom_coeff = 1.0, top_coeff = 1.0, this_coeff = 1.0;
  145.  
  146. switch (IsNodeInternalCornerOrSide(glob_x, glob_y)){
  147. case 2:
  148. //left bottom corner
  149. neighbours["left"] = false;
  150. neighbours["bottom"] = false;
  151. break;
  152. case 4:
  153. //left top corner
  154. neighbours["left"] = false;
  155. neighbours["top"] = false;
  156. break;
  157. case 6:
  158. //right bottom corner
  159. neighbours["right"] = false;
  160. neighbours["bottom"] = false;
  161. break;
  162. case 8:
  163. //right top corner
  164. neighbours["right"] = false;
  165. neighbours["top"] = false;
  166. break;
  167. case 1:
  168. //left side
  169. neighbours["left"] = false;
  170. break;
  171. case 3:
  172. //right side
  173. neighbours["right"] = false;
  174. break;
  175. case 5:
  176. //bottom side
  177. neighbours["bottom"] = false;
  178. break;
  179. case 7:
  180. //top side
  181. neighbours["top"] = false;
  182. break;
  183. case 0:
  184. //internal
  185. break;
  186. default:
  187. cout << "[ERROR]: Bad global coords compute matrix. Global:" << glob_x << " " << glob_y<<endl;
  188. }
  189.  
  190. if (!neighbours["left"]){
  191. right_coeff = 2.0;
  192. left_coeff = 0.0;
  193. }
  194.  
  195. if (!neighbours["right"]){
  196. left_coeff = 2.0;
  197. right_coeff = 0.0;
  198. }
  199.  
  200. if (!neighbours["bottom"]){
  201. top_coeff = 2.0;
  202. bottom_coeff = 0.0;
  203. }
  204.  
  205. if (!neighbours["top"]){
  206. bottom_coeff = 2.0;
  207. top_coeff = 0.0;
  208. }
  209.  
  210.  
  211.  
  212. if (neighbours["left"]){
  213. left_coeff *= -k(x_left + (glob_x - 0.5) * h_x, y_bottom + glob_y * h_y) / pow(h_x, 2);
  214. left_neighbour = Get(glob_x - 1, glob_y);
  215. }
  216.  
  217. if (neighbours["right"]){
  218. right_coeff *= -k(x_left + (glob_x + 0.5) * h_x, y_bottom + glob_y * h_y) / pow(h_x, 2);
  219. right_neighbour = Get(glob_x + 1, glob_y);
  220. }
  221.  
  222. if (neighbours["bottom"]){
  223. bottom_coeff *= -k(x_left + glob_x * h_x, y_bottom + (glob_y - 0.5) * h_y) / pow(h_y, 2);
  224. bottom_neighbour = Get(glob_x, glob_y - 1);
  225. }
  226.  
  227. if (neighbours["top"]){
  228. top_coeff *= -k(x_left + glob_x * h_x, y_bottom + (glob_y + 0.5) * h_y) / pow(h_y, 2);
  229. top_neighbour = Get(glob_x, glob_y + 1);
  230. }
  231.  
  232. this_coeff = q(x_left + glob_x * h_x, y_bottom + glob_y * h_y) - left_coeff - right_coeff - bottom_coeff - top_coeff;
  233. this_node = Get(glob_x, glob_y);
  234.  
  235. result = left_coeff * left_neighbour +
  236. right_coeff * right_neighbour +
  237. bottom_coeff * bottom_neighbour +
  238. top_coeff * top_neighbour +
  239. this_coeff * this_node;
  240.  
  241. return result;
  242. }
  243.  
  244. double GetNodeFromB(int current_node_global_offset_x, int current_node_global_offset_y) {
  245.  
  246. int glob_x = current_node_global_offset_x;
  247. int glob_y = current_node_global_offset_y;
  248.  
  249. double result = 0.0;
  250.  
  251. switch (IsNodeInternalCornerOrSide(glob_x, glob_y)){
  252. case 2:
  253. //left bottom corner
  254. result = F(x_left, y_bottom) + 2.0 / h_x * psi_L(y_bottom) + 2.0 / h_y * psi_B(x_left);
  255. break;
  256. case 4:
  257. //left top corner
  258. result = F(x_left, y_top) + 2.0 / h_x * psi_L(y_top) + 2.0 / h_y * psi_T(x_left);
  259. break;
  260. case 6:
  261. //right bottom corner
  262. result = F(x_right, y_bottom) + 2.0 / h_x * psi_R(y_bottom) + 2.0 / h_y * psi_B(x_right);
  263. break;
  264. case 8:
  265. //right top corner
  266. result = F(x_right, y_top) + 2.0 / h_x * psi_R(y_top) + 2.0 / h_y * psi_T(x_right);
  267. break;
  268. case 1:
  269. //left side
  270. result = F(x_left, y_bottom + glob_y * h_y) + 2.0 / h_x * psi_L(y_bottom + glob_y * h_y);
  271. break;
  272. case 3:
  273. //right side
  274. result = F(x_right, y_bottom + glob_y * h_y) + 2.0 / h_x * psi_R(y_bottom + glob_y * h_y);
  275. break;
  276. case 5:
  277. //bottom side
  278. result = F(x_left + glob_x * h_x, y_bottom) + 2.0 / h_y * psi_B(x_left + glob_x * h_x);
  279. break;
  280. case 7:
  281. //top side
  282. result = F(x_left + glob_x * h_x, y_top) + 2.0 / h_y * psi_T(x_left + glob_x * h_x);
  283. break;
  284. case 0:
  285. //internal
  286. result = F(x_left + glob_x * h_x, y_bottom + glob_y * h_y);
  287. break;
  288. default:
  289. cout << "[ERROR]: Bad global coords compute matrix. Global:" << glob_x << " " << glob_y <<endl;
  290.  
  291. }
  292.  
  293. return result;
  294.  
  295. }
  296.  
  297. double GetNodeFromExact(int current_node_global_offset_x, int current_node_global_offset_y) {
  298.  
  299. int glob_x = current_node_global_offset_x;
  300. int glob_y = current_node_global_offset_y;
  301.  
  302. return u(x_left + glob_x * h_x, y_bottom + glob_y * h_y);
  303.  
  304. }
  305.  
  306. void ComputeMatrixR(){
  307. /*if(my_rank == 0)
  308. cout << "[INFO]: Computation of matrix r started"<<endl;
  309. */
  310.  
  311. vector<double> r_tmp_matrix (cur_block_size_x*cur_block_size_y, 0.0);
  312.  
  313. //#pragma omp parallel for
  314. for(int i = 0; i < cur_block_size_x; ++i){
  315. for(int j = 0; j < cur_block_size_y; ++j){
  316.  
  317. int current_node_global_offset_x = GetGlobalX(i),
  318. current_node_global_offset_y = GetGlobalY(j);
  319.  
  320. int glob_x = current_node_global_offset_x,
  321. glob_y = current_node_global_offset_y;
  322.  
  323. r_tmp_matrix [ j + cur_block_size_y*i ] = ComputeMagicInnerProductA_iw(glob_x,glob_y) - GetNodeFromB(glob_x, glob_y);
  324.  
  325. }
  326. }
  327.  
  328. //#pragma omp parallel for
  329. for(int i = 0; i < cur_block_size_x; ++i){
  330. for(int j = 0; j < cur_block_size_y; ++j){
  331.  
  332. old_internal_data[ j + cur_block_size_y*i ] = internal_data[ j + cur_block_size_y*i];
  333. internal_data[ j + cur_block_size_y*i ] = r_tmp_matrix[ j + cur_block_size_y*i];
  334.  
  335. }
  336. }
  337.  
  338. //old_internal_data = internal_data;
  339. //internal_data = r_tmp_matrix;
  340.  
  341. SyncMPI();
  342.  
  343. }
  344.  
  345. double ComputeTauAndStopCase(bool &should_i_stop){
  346.  
  347. double local_Ar_r_inner_product_sum = 0.0;
  348. double local_Ar_Ar_inner_product_sum = 0.0;
  349. double global_Ar_r_inner_product_sum = 0.0;
  350. double global_Ar_Ar_inner_product_sum = 0.0;
  351. double local_r_norm = 0.0;
  352. double global_r_norm = 0.0;
  353.  
  354. //#pragma omp parallel for
  355. for(int i = 0; i < cur_block_size_x; ++i){
  356. for(int j = 0; j < cur_block_size_y; ++j){
  357. double rho = 1.0;
  358.  
  359. int current_node_global_offset_x = GetGlobalX(i),
  360. current_node_global_offset_y = GetGlobalY(j);
  361.  
  362. int glob_x = current_node_global_offset_x,
  363. glob_y = current_node_global_offset_y;
  364.  
  365. double tmp_Ar_i_j = ComputeMagicInnerProductA_iw(glob_x, glob_y);
  366.  
  367. switch (IsNodeInternalCornerOrSide(glob_x, glob_y)){
  368. case 2:
  369. case 4:
  370. case 6:
  371. case 8:
  372. //angle
  373. rho = 0.25;
  374. break;
  375. case 1:
  376. case 3:
  377. case 5:
  378. case 7:
  379. //side
  380. rho = 0.5;
  381. break;
  382. case 0:
  383. //internal
  384. rho = 1.0;
  385. break;
  386. default:
  387. cout << "[ERROR]: Bad global coords compute tau. Global:" << glob_x << " " << glob_y << endl;
  388. }
  389.  
  390. double tmp_cur_node_value = Get(glob_x, glob_y);
  391.  
  392. local_Ar_r_inner_product_sum += rho * tmp_Ar_i_j * tmp_cur_node_value * h_x*h_y;
  393. local_Ar_Ar_inner_product_sum += rho * pow (tmp_Ar_i_j, 2) * h_x*h_y;
  394. local_r_norm += rho * pow(tmp_cur_node_value, 2) * h_x*h_y;
  395.  
  396. }
  397. }
  398.  
  399. //cout << "[DEBUG]: Local"<< local_Ar_r_inner_product_sum << endl;
  400.  
  401. MPI_Allreduce(&local_Ar_r_inner_product_sum, &global_Ar_r_inner_product_sum, 1, MPI_DOUBLE, MPI_SUM,
  402. comm);
  403.  
  404. //cout << "[DEBUG]: "<< global_Ar_r_inner_product_sum << endl;
  405.  
  406. MPI_Allreduce(&local_Ar_Ar_inner_product_sum, &global_Ar_Ar_inner_product_sum, 1, MPI_DOUBLE, MPI_SUM,
  407. MPI_COMM_WORLD);
  408.  
  409. //cout << "[DEBUG]: "<< global_Ar_Ar_inner_product_sum << endl;
  410.  
  411. double global_tau = global_Ar_r_inner_product_sum/ global_Ar_Ar_inner_product_sum;
  412.  
  413. MPI_Allreduce(&local_r_norm, &global_r_norm, 1, MPI_DOUBLE, MPI_SUM,
  414. MPI_COMM_WORLD);
  415.  
  416. double eps = 1e-06;
  417.  
  418. if (global_r_norm < 0){
  419. cout << "[ERROR]: bad global r norm" << endl;
  420. }
  421.  
  422. current_norm = fabs(global_tau)*sqrt(global_r_norm);
  423.  
  424. //if (my_rank == 0)
  425. // cout << "[DEBUG]: solution norm "<< tmp_norm << endl;
  426.  
  427. if (current_norm <= eps){
  428. should_i_stop = true;
  429. }else{
  430. should_i_stop = false;
  431. }
  432.  
  433. return global_tau;
  434.  
  435. }
  436.  
  437.  
  438. void ComputeNewW(double tau){
  439. //#pragma omp parallel for
  440. for(int i = 0; i < cur_block_size_x; ++i){
  441. for(int j = 0; j < cur_block_size_y; ++j){
  442. internal_data[ j + cur_block_size_y*i ] = old_internal_data[ j + cur_block_size_y*i ] - tau * internal_data[ j + cur_block_size_y*i ];
  443. //old_internal_data[ j + cur_block_size_y*i ] = 0.0;
  444. }
  445. }
  446. }
  447.  
  448. int GetGlobalX(int i){
  449. return cur_block_global_offset_x + i;
  450. }
  451.  
  452. int GetGlobalY(int j){
  453. return cur_block_global_offset_y + j;
  454. }
  455.  
  456. public:
  457. MPIComputations(int inpM, int inpN, int inpGx, int inpGy, int inpx_left, int inpx_right, int inpy_bottom, int inpy_top, int inp_cur_block_global_coors_x, int inp_cur_block_global_coors_y, int inprank, MPI_Comm inpcomm){
  458.  
  459. M = inpM;
  460. N = inpN;
  461.  
  462. Gx = inpGx;
  463. Gy = inpGy;
  464.  
  465. x_left = inpx_left;
  466. x_right = inpx_right;
  467. y_bottom = inpy_bottom;
  468. y_top = inpy_top;
  469.  
  470. h_x = double((x_right - x_left)) / M;
  471. h_y = double((y_top - y_bottom)) / N;
  472.  
  473. my_rank = inprank;
  474. comm = inpcomm;
  475.  
  476. ofstream myfile;
  477. stringstream ss;
  478. ss<< "./file" << inp_cur_block_global_coors_x << "_" << inp_cur_block_global_coors_y << ".res";
  479.  
  480. string filename;
  481.  
  482. ss >> filename;
  483.  
  484. my_file.open (filename);
  485.  
  486.  
  487. cur_block_global_coors_x = inp_cur_block_global_coors_x;
  488. cur_block_global_coors_y = inp_cur_block_global_coors_y;
  489.  
  490. global_block_size_x = (M + 1) / Gx;
  491. global_block_size_y = (N + 1) / Gy;
  492.  
  493. cur_block_size_x = global_block_size_x;
  494. cur_block_size_y = global_block_size_y;
  495.  
  496. cur_block_global_offset_x = global_block_size_x * cur_block_global_coors_x;
  497. cur_block_global_offset_y = global_block_size_y * cur_block_global_coors_y;
  498.  
  499. if (cur_block_global_offset_x + 2 * global_block_size_x > M + 1){
  500. cur_block_size_x += (M + 1) % Gx;
  501. }
  502.  
  503. if (cur_block_global_offset_y + 2 * global_block_size_y > N + 1){
  504. cur_block_size_y += (N + 1) % Gy;
  505. }
  506.  
  507. internal_data.resize(cur_block_size_x * cur_block_size_y);
  508. old_internal_data.resize(cur_block_size_x * cur_block_size_y);
  509.  
  510.  
  511. //OX
  512. external_data[0].resize(cur_block_size_y);
  513. external_data[1].resize(cur_block_size_y);
  514.  
  515. //OY
  516. external_data[2].resize(cur_block_size_x);
  517. external_data[3].resize(cur_block_size_x);
  518.  
  519. }
  520.  
  521. double Get(int i, int j) {
  522. return GetLocalIndex(i - cur_block_global_offset_x, j - cur_block_global_offset_y);
  523. }
  524.  
  525. void Set(int i, int j, double v) {
  526. return SetLocalIndex(i - cur_block_global_offset_x, j - cur_block_global_offset_y, v);
  527. }
  528.  
  529. void SyncMPI(){
  530.  
  531. /*if(my_rank == 0)
  532. cout << "[INFO]: Sync started"<< endl;
  533. */
  534.  
  535. MPI_Barrier(MPI_COMM_WORLD);
  536.  
  537. //left and right sides
  538. //#pragma omp parallel for
  539. for(int j = 0; j < cur_block_size_y; ++j){
  540.  
  541. external_data[ 0 ][ j ] = GetLocalIndex(0, j);//internal_data[ j ];
  542. external_data[ 1 ][ j ] = GetLocalIndex(cur_block_size_x - 1, j);//internal_data[ j + cur_block_size_y * (cur_block_size_x - 1) ];
  543.  
  544. }
  545.  
  546. //bottom and top sides
  547. //#pragma omp parallel for
  548. for(int i = 0; i < cur_block_size_x; ++i){
  549.  
  550. external_data[ 2 ][ i ] = GetLocalIndex(i, 0);//internal_data[ cur_block_size_y*i ];
  551. external_data[ 3 ][ i ] = GetLocalIndex(i, cur_block_size_y - 1); //internal_data[ (cur_block_size_y - 1) + cur_block_size_y*i ];
  552.  
  553. }
  554.  
  555. int my_coords[2];
  556. int targets_ranks[4];
  557.  
  558. MPI_Cart_coords(comm, my_rank, 2, my_coords);
  559.  
  560. int neighbour_offsets[ 4 ][ 2 ] = {
  561. { -1, 0 },{ 1, 0 },
  562. { 0, -1 },{ 0, 1 }
  563. };
  564.  
  565. //#pragma omp parallel for
  566. for(int i = 0; i < 4; i++){
  567.  
  568. int target_coords[2];
  569.  
  570. target_coords[ 0 ] = my_coords[ 0 ] + neighbour_offsets[ i ][ 0 ];
  571. target_coords[ 1 ] = my_coords[ 1 ] + neighbour_offsets[ i ][ 1 ];
  572.  
  573. if (target_coords[0] >= 0 && target_coords[0] < Gx && target_coords[1] >= 0 && target_coords[1] < Gy){
  574.  
  575. MPI_Cart_rank(comm, target_coords, &targets_ranks[ i ]);
  576.  
  577. }
  578. else{
  579. targets_ranks[i] = -1;
  580. }
  581. }
  582.  
  583. //Now we have rank for all targets
  584.  
  585. for(int axis = 0; axis < 2; axis++){
  586.  
  587. int parity_bit = (my_coords[ axis ]) % 2;
  588.  
  589. //if parity_bit == 0 then
  590. // zuerst mit links, dann mit rechts tauschen
  591. //elif parity_bit == 1:
  592. // zuerst mit rechts, dann mit links tauschen
  593.  
  594. for(int tmp = 0; tmp < 2; tmp++){
  595. parity_bit = 1 - parity_bit;
  596.  
  597. //target id in external_data und targets_ranks
  598. int target_idx = 2 * axis + (1 - parity_bit);
  599.  
  600. if (targets_ranks[target_idx] != -1){
  601.  
  602. // вычисляем теги отправки и приема
  603. // в них зашиты номер ноды, ось, направление
  604. int send_tag = 100000 + my_rank * 100 + axis * 10 + parity_bit;
  605. int recv_tag = 100000 + targets_ranks[ target_idx ] * 100 + axis * 10 + (1-parity_bit);
  606.  
  607. MPI_Status tmp_status;
  608. // если отправка не на себя, то отправляем
  609. if(my_rank != targets_ranks[ target_idx ]){
  610.  
  611. MPI_Sendrecv_replace(&external_data[ target_idx ][ 0 ], external_data[ target_idx ].size(),
  612. MPI_DOUBLE, targets_ranks[ target_idx ], send_tag, targets_ranks[ target_idx ], recv_tag,
  613. comm, &tmp_status);
  614.  
  615. }
  616. }
  617. }
  618. }
  619.  
  620. MPI_Barrier(MPI_COMM_WORLD);
  621.  
  622. }
  623.  
  624. void DoIteration(bool &should_i_stop){
  625.  
  626. ComputeMatrixR();
  627.  
  628. //Now R Matrix is in internal_data
  629.  
  630. double tau = ComputeTauAndStopCase(should_i_stop);
  631.  
  632. //We have in our block jetzt:
  633. //in internal_data: R Matrix
  634. //in old_internal_data: w aus letzte iteration
  635. //and we have tau
  636. //jetzt koennen wir naechste w finden
  637.  
  638. ComputeNewW(tau);
  639.  
  640. SyncMPI();
  641.  
  642. }
  643.  
  644. double GetLocalIndex(int i, int j){
  645. //internal data
  646. if ((j >= 0) && (j < cur_block_size_y) && (i >= 0) && (i < cur_block_size_x)){
  647. return internal_data[ j + cur_block_size_y*i ];
  648. }
  649.  
  650. //external data
  651. //OX
  652. if((j >= 0) && (j < cur_block_size_y)){
  653.  
  654. if (i == -1)
  655. return external_data[ 0 ][ j ];
  656.  
  657. if (i == cur_block_size_x)
  658. return external_data[ 1 ][ j ];
  659.  
  660. }
  661.  
  662. //OY
  663. if((i >= 0) && (i < cur_block_size_x)){
  664.  
  665. if (j == -1)
  666. return external_data[ 2 ][ i ];
  667. if (j == cur_block_size_y)
  668. return external_data[ 3 ][ i ];
  669.  
  670. }
  671.  
  672. cout << "[ERROR]: bad local index" << endl;
  673.  
  674. return nan("");
  675. }
  676.  
  677. void SetLocalIndex(int i, int j, double v){
  678. if ((j >= 0) && (j < cur_block_size_y) && (i >= 0) && (i < cur_block_size_x)){
  679. internal_data[ j + cur_block_size_y*i ] = v;
  680. }else{
  681. cout << "[ERROR]: trying to set data outside the local area" << endl;
  682. }
  683. }
  684.  
  685. double CompareWithExact(bool print_data_to_files) {
  686.  
  687. double local_diff_norm = 0.0;
  688. double global_diff_norm = 0.0;
  689. /*
  690. if (my_rank == 0)
  691. cout << "[INFO]: Starting computing compare with exact" << endl;
  692. */
  693. vector <double> tmp_elements(cur_block_size_x * cur_block_size_y, 0.0);
  694.  
  695. //#pragma omp parallel for
  696. for (int i = 0; i < cur_block_size_x; ++i){
  697. for (int j = 0; j < cur_block_size_y; ++j){
  698.  
  699.  
  700. int current_node_global_offset_x = GetGlobalX(i),
  701. current_node_global_offset_y = GetGlobalY(j);
  702.  
  703. int glob_x = current_node_global_offset_x,
  704. glob_y = current_node_global_offset_y;
  705.  
  706. double tmp_elem = Get(glob_x, glob_y) - GetNodeFromExact(glob_x, glob_y);
  707.  
  708. //local_diff_norm = max( fabs(tmp_elem), local_diff_norm);
  709.  
  710. //local_diff_norm += rho * pow(tmp_elem, 2) * h_x * h_y;
  711.  
  712. tmp_elements [j + cur_block_size_y*i] = tmp_elem;
  713.  
  714. }
  715. }
  716.  
  717. //cout << "[INFO]: local max diff in " << cur_block_global_offset_x << " " << cur_block_global_offset_y << " " << local_diff_norm << endl;
  718.  
  719.  
  720. for (int i = 0; i < cur_block_size_x; ++i){
  721. for (int j = 0; j < cur_block_size_y; ++j){
  722.  
  723. double rho = 1.0;
  724.  
  725. int current_node_global_offset_x = GetGlobalX(i),
  726. current_node_global_offset_y = GetGlobalY(j);
  727.  
  728. int glob_x = current_node_global_offset_x,
  729. glob_y = current_node_global_offset_y;
  730.  
  731. switch (IsNodeInternalCornerOrSide(glob_x, glob_y)){
  732. case 2:
  733. case 4:
  734. case 6:
  735. case 8:
  736. //angle
  737. rho = 0.25;
  738. break;
  739. case 1:
  740. case 3:
  741. case 5:
  742. case 7:
  743. //side
  744. rho = 0.5;
  745. break;
  746. case 0:
  747. //internal
  748. rho = 1.0;
  749. break;
  750. default:
  751. cout << "[ERROR]: Bad global coords compute exact. Local:" << i << " " << j << ". Global:" << glob_x << " " << glob_y <<endl;
  752. }
  753.  
  754.  
  755. double tmp_elem = tmp_elements [j + cur_block_size_y*i];
  756.  
  757. //local_diff_norm = max( fabs(tmp_elem), local_diff_norm);
  758. local_diff_norm += rho * pow(tmp_elem, 2) * h_x * h_y;
  759. }
  760. }
  761.  
  762. MPI_Allreduce(&local_diff_norm, &global_diff_norm, 1, MPI_DOUBLE, MPI_SUM,
  763. MPI_COMM_WORLD);
  764.  
  765. global_diff_norm = sqrt(global_diff_norm);
  766.  
  767.  
  768.  
  769.  
  770.  
  771. if (print_data_to_files){
  772. for (int i = 0; i < cur_block_size_x; ++i){
  773. for (int j = 0; j < cur_block_size_y; ++j){
  774.  
  775. int current_node_global_offset_x = GetGlobalX(i),
  776. current_node_global_offset_y = GetGlobalY(j);
  777.  
  778. int glob_x = current_node_global_offset_x,
  779. glob_y = current_node_global_offset_y;
  780.  
  781. my_file << Get(glob_x, glob_y) << ":" << GetNodeFromExact(glob_x, glob_y) << "\t";
  782. }
  783. my_file << endl;
  784. }
  785. }
  786.  
  787. return global_diff_norm;
  788. }
  789.  
  790. double GetCurrentNorm(){
  791. return current_norm;
  792. }
  793. };
  794.  
  795.  
  796.  
  797.  
  798. int main(int argc, char* argv[]){
  799.  
  800. const double x_left = -2, x_right = 3;
  801. const double y_bottom = -1, y_top = 4;
  802.  
  803. double time_start, time_stop;
  804. int N, Gx, Gy;
  805. int dim[2], period[2];
  806.  
  807. MPI_Comm comm;
  808.  
  809. //N - global grid size
  810. N = atoi(argv[1]);
  811.  
  812. //Gx
  813. Gx = dim[0] = atoi(argv[2]);
  814.  
  815. //Gy
  816. Gy = dim[1] = atoi(argv[3]);
  817.  
  818. bool print_data_to_files = false;
  819.  
  820. if (argc == 4){
  821. print_data_to_files = true;
  822. }
  823. period[0]=0;
  824. period[1]=0;
  825.  
  826.  
  827. MPI_Init(&argc, &argv);
  828.  
  829. time_start = MPI_Wtime();
  830.  
  831. int world_size;
  832.  
  833. int my_rank;
  834.  
  835. MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
  836. MPI_Comm_size(MPI_COMM_WORLD, &world_size);
  837.  
  838.  
  839. if(my_rank == 0){
  840. cout << "[INFO]: N = " << N << endl;
  841. cout << "[INFO]: Gx = " << dim[0] << endl;
  842. cout << "[INFO]: Gy = " << dim[1] << endl;
  843. }
  844.  
  845.  
  846.  
  847. if(argc <= 3){
  848. if(my_rank == 0)
  849. cout << "[ERROR]: Usage: mpieval <N> <Gx> <Gy> [<print_data_to_files>]" << endl;
  850.  
  851. MPI_Abort(MPI_COMM_WORLD, 1);
  852. }
  853.  
  854. if(Gx * Gy != world_size){
  855. if(my_rank == 0)
  856. cout << "[ERROR]: mpi world size is not equal to "<< Gx << "*" << Gy << endl;
  857.  
  858. MPI_Abort(MPI_COMM_WORLD, 1);
  859. }
  860.  
  861. MPI_Cart_create(MPI_COMM_WORLD, 2, dim, period, 1, &comm);
  862.  
  863. /*if(my_rank == 0)
  864. cout << "[INFO]: Cart created"<<endl;
  865. */
  866.  
  867. MPI_Comm_rank(comm, &my_rank);
  868.  
  869. int my_coords[2];
  870.  
  871. MPI_Cart_coords(comm, my_rank, 2, my_coords);
  872.  
  873.  
  874.  
  875.  
  876. class MPIComputations w_func(N, N, Gx, Gy, x_left, x_right, y_bottom, y_top, my_coords[0], my_coords[1], my_rank, comm);
  877.  
  878.  
  879. int iteration_num = 0;
  880.  
  881. bool should_i_stop = false;
  882.  
  883. while ( should_i_stop != true ){
  884.  
  885. w_func.DoIteration(should_i_stop);
  886.  
  887. if ( (my_rank == 0) && (iteration_num % 10000 == 0) ){
  888. cout << "[INFO]: Iteration " << iteration_num << " difference with previous is: "<< w_func.GetCurrentNorm() << endl;
  889. }
  890.  
  891. iteration_num++;
  892. }
  893.  
  894. MPI_Barrier(MPI_COMM_WORLD);
  895.  
  896. //vector <double> local_elements;
  897.  
  898. double comparing = w_func.CompareWithExact(print_data_to_files);
  899.  
  900. if (my_rank == 0)
  901. cout << "[INFO]: Diff with exact solution: " << comparing << endl;
  902.  
  903.  
  904.  
  905. //myfile.close();
  906.  
  907. time_stop = MPI_Wtime();
  908. if( my_rank == 0 )
  909. cout << "Finished!" << endl
  910. << "Total iterations: " << iteration_num << endl
  911. << "Elapsed time: " << (time_stop - time_start) << endl
  912. << "Time per iteration: " << (time_stop - time_start) / double(iteration_num) << endl;
  913.  
  914. MPI_Finalize();
  915.  
  916. return 0;
  917. }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement