Guest User

Untitled

a guest
Jul 17th, 2018
86
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
C 3.27 KB | None | 0 0
  1. #include <stdio.h>
  2. #include <math.h>
  3. #include "matmul.h"
  4. #include <pthread.h>
  5. #include <semaphore.h>
  6. #include <unistd.h>
  7. #include <fcntl.h>
  8.  
  9. sem_t *availableWork;
  10.  
  11. int rowsDone, _n, currentRow, _itt;
  12. double *_t, *_t1, *_error;
  13. pthread_mutex_t doingWork, mut_RowsDone, mut_CurrentRow;
  14.  
  15. void compute(double *error, int n, double *t, double *t1, int itt_max)
  16. {  
  17.     int i, j, itt;
  18.     double errori, sum;
  19.     double *ttemp;
  20.     pthread_t threads[N];
  21.    
  22.     _n = n;
  23.     _t = t;
  24.     _t1 = t1;
  25.     _error = error;
  26.    
  27.     // initialize mutex
  28.     if(pthread_mutex_init(&doingWork, NULL) != 0) perror("mutex");
  29.     if(pthread_mutex_init(&mut_RowsDone, NULL) != 0) perror("mutex");
  30.     if(pthread_mutex_init(&mut_CurrentRow, NULL) != 0) perror("mutex");
  31.    
  32.     // initialize semaphore
  33.     availableWork = sem_open("sem", O_CREAT, 0644, 0);
  34.    
  35.     // initialize threads.
  36.     for (i = 0; i < NUMTHREADS; ++i) {
  37.         pthread_create(&threads[i], NULL, &doWork, (void *)i);
  38.     }
  39.    
  40.     // print success to the console
  41.     fprintf(stderr, "Threads initialized.\n\n");
  42.    
  43.     // while we're less than or equal to the number of iterations
  44.     for(itt=0; itt<=itt_max; itt++)
  45.     {
  46.         // lock the mutex so the process doesn't end prematurely
  47.         pthread_mutex_lock(&doingWork);
  48.        
  49.         _itt = itt;
  50.        
  51.         // post work to the semaphore
  52.         for (i = 0; i < n; ++i) {
  53.             sem_post(availableWork);
  54.         }
  55.        
  56.         // if there is no more work currently in the queue, the last thread to do work will unlock this
  57.         // and allow continued execution
  58.         pthread_mutex_lock(&doingWork);
  59.        
  60.         // answer into temporary array
  61.         ttemp = t1;
  62.        
  63.         // old x vector into t1 (why?)
  64.         t1 = t;
  65.        
  66.         // new x vector is the answer from the last multiplication
  67.         t = ttemp;
  68.        
  69.         // print, do it all over again
  70.         //printf("%5d %14.6e\n", itt, error[itt]);
  71.     }
  72. }
  73.  
  74. void *doWork(void *arg)
  75. {
  76.     double sum, errori;
  77.     int j, doingWorkOnRow;
  78.     while(1)
  79.     {
  80.         sem_wait(availableWork);
  81.        
  82.         doingWorkOnRow = getCurrentRow();
  83.                
  84.         // columns of the rows
  85.         for(j=0; j< _n; j++)
  86.         {
  87.             // multiply an entry
  88.             sum += a[doingWorkOnRow][j] * _t[j];
  89.         }
  90.        
  91.         _t1[doingWorkOnRow] = sum + b[doingWorkOnRow];
  92.         errori = fabs(_t1[doingWorkOnRow] - _t[doingWorkOnRow]);
  93.         if(errori > _error[_itt])
  94.         {
  95.             _error[_itt] = errori;
  96.         }
  97.        
  98.         fprintf(stderr, "Thread %d did work.  ThreadsDoingWork = %d\n", (int)arg, rowsDone);
  99.        
  100.         incRowsDone();
  101.         // if all the rows in this iteration are done, allow the main thread to continue
  102.         if(rowsDone == _n - 1) pthread_mutex_unlock(&doingWork);
  103.     }
  104. }
  105.  
  106. int incRowsDone()
  107. {
  108.     if(pthread_mutex_lock(&mut_RowsDone) != 0) perror("mutex lock");
  109.     rowsDone = rowsDone + 1;
  110.     pthread_mutex_unlock(&mut_RowsDone);
  111.    
  112.     return rowsDone;
  113. }
  114.  
  115. int getCurrentRow()
  116. {
  117.     if(pthread_mutex_lock(&mut_CurrentRow)!= 0) perror("mutex lock");
  118.     currentRow++;
  119.     pthread_mutex_unlock(&mut_CurrentRow);
  120.    
  121.     return currentRow - 1;
  122. }
Add Comment
Please, Sign In to add comment