Advertisement
Guest User

lab2_schedule

a guest
May 20th, 2019
111
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 2.68 KB | None | 0 0
  1. #include <stdio.h>
  2. #include <stdlib.h>
  3. #include <math.h>
  4. #include <omp.h>
  5.  
  6. #define N 1024
  7.  
  8. const double t = 1e-5;
  9. const double e = 1e-9;
  10.  
  11. #define schedule_type static
  12. #define schedule_arg 1
  13.  
  14. typedef struct matrix {
  15. double* data;
  16. int w, h;
  17. } matrix;
  18.  
  19. void setInitData(matrix *A, matrix *B, matrix *x) {
  20. A->data = (double*)malloc(N * N * sizeof(double));
  21. A->w = N;
  22. A->h = N;
  23.  
  24. B->data = (double*)malloc(N * sizeof(double));
  25. B->w = 1;
  26. B->h = N;
  27.  
  28. x->data = (double*)malloc(N * sizeof(double));
  29. x->w = 1;
  30. x->h = N;
  31.  
  32. for (int i = 0; i < A->w; i++) {
  33. for (int j = 0; j < A->h; j++) {
  34. A->data[i * N + j] = 1.0;
  35. if (i == j) A->data[i * N + j] = 2.0;
  36. }
  37. }
  38.  
  39. for (int i = 0; i < B->h; i++) {
  40. B->data[i] = N + 1;
  41. x->data[i] = 0;
  42. }
  43. }
  44.  
  45. void mult(matrix *a, matrix *b, matrix *res) {
  46. double temp = 0;
  47.  
  48. #pragma omp parallel for reduction(+:temp) schedule(schedule_type, schedule_arg)
  49. for (int i = 0; i < a->h; i++) {
  50. temp = 0;
  51.  
  52. for (int k = 0; k < b->h; k++)
  53. temp += a->data[(i * a->w) + k] * b->data[k];
  54.  
  55. res->data[i] = temp;
  56. }
  57. }
  58.  
  59. void sub(matrix *a, matrix *b, matrix *res) {
  60. #pragma omp parallel for schedule(schedule_type, schedule_arg)
  61. for (int i = 0; i < a->h; i++)
  62. res->data[i] = a->data[i] - b->data[i];
  63. }
  64.  
  65. void sum(matrix *a, matrix *b, matrix *res) {
  66. #pragma omp parallel for schedule(schedule_type, schedule_arg)
  67. for (int i = 0; i < a->h; i++)
  68. res->data[i] = a->data[i] + b->data[i];
  69. }
  70.  
  71. void scalarMult(matrix *a, double k, matrix *res) {
  72. #pragma omp parallel for schedule(schedule_type, schedule_arg)
  73. for (int i = 0; i < a->h; i++)
  74. res->data[i] = a->data[i] * k;
  75. }
  76.  
  77. double squaredNorma(matrix *a) {
  78. double res = 0;
  79.  
  80. #pragma omp parallel for reduction(+:res) schedule(schedule_type, schedule_arg)
  81. for (int i = 0; i < a->h; i++)
  82. res += a->data[i] * a->data[i];
  83.  
  84. return res;
  85. }
  86.  
  87.  
  88. int main(int argc, char** argv) {
  89. if (argc > 1) {
  90. omp_set_num_threads(atoi(argv[1]));
  91. }
  92. else {
  93. printf("Error: specify number of threads!\n");
  94. return 0;
  95. }
  96.  
  97. double timeStart, Ax_b_norma = N, b_norma;
  98. int counter = 0;
  99. matrix A, B, x, temp;
  100.  
  101. setInitData(&A, &B, &x);
  102. temp.data = (double*)malloc(N * sizeof(double));
  103. temp.h = N;
  104. temp.w = 1;
  105. b_norma = sqrt(squaredNorma(&B));
  106.  
  107. timeStart = omp_get_wtime();
  108.  
  109. while (sqrt(Ax_b_norma) / b_norma >= e) {
  110. mult(&A, &x, &temp);
  111. sub(&temp, &B, &temp);
  112. Ax_b_norma = squaredNorma(&temp);
  113. scalarMult(&temp, t, &temp);
  114. sub(&x, &temp, &x);
  115. counter++;
  116. }
  117.  
  118. printf("x[0]: %lf\nCounter: %d\nTime: %lf s\n", x.data[0], counter, omp_get_wtime() - timeStart);
  119.  
  120. free(A.data);
  121. free(B.data);
  122. free(x.data);
  123. free(temp.data);
  124.  
  125. return 0;
  126. }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement