SHARE
TWEET

Untitled

a guest Oct 15th, 2019 84 Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
  1. #include <assert.h>
  2. #include <stdlib.h>
  3. #include <ucontext.h>
  4. #include "thread.h"
  5. #include "interrupt.h"
  6. #include <stdbool.h>
  7.  
  8. //Struct
  9. /* This is the wait queue structure */
  10. struct wait_queue {
  11.     struct thread* wait_info;
  12.     struct wait_queue* next;
  13. };
  14.  
  15. /* This is the thread control block */
  16. struct thread {
  17.     ucontext_t* thread_context;
  18.     Tid thread_id;
  19.     bool kill_status;
  20.  
  21. };
  22.  
  23. /* This is the ready queue structure */
  24. struct ready_queue {
  25.     struct thread* thread_info;
  26.     struct ready_queue* next;
  27. };
  28.  
  29. //Globals
  30. int thread_tracker[THREAD_MAX_THREADS];
  31. struct thread* current_thread;
  32. struct ready_queue* ready_q;
  33. struct ready_queue* exit_q;
  34.  
  35. //Functions
  36. void insert_ready_queue(struct ready_queue* new_node);
  37. void exit_queue(struct ready_queue* new_node);
  38. void destroy_exit_q();
  39. void insert_wait_queue(struct wait_queue* wq,struct wait_queue* new_node);
  40.  
  41. void
  42. thread_init(void) {
  43.  
  44.     //initialize array of threads that keeps track of all thread id's and kill status in particular
  45.     for (int i = 0; i < THREAD_MAX_THREADS; i++) {
  46.         thread_tracker[i] = 0;
  47.     }
  48.    
  49.     //initialize exit queue
  50.     exit_q = (struct ready_queue*) malloc(sizeof (struct ready_queue));
  51.     exit_q->next = NULL;
  52.     exit_q->thread_info = NULL;
  53.  
  54.     //initialize ready queue
  55.     ready_q = (struct ready_queue*) malloc(sizeof (struct ready_queue));
  56.     ready_q->next = NULL;
  57.     ready_q->thread_info = NULL;
  58.  
  59.     //set the current_thread id to 0
  60.     current_thread = (struct thread*) malloc(sizeof (struct thread));
  61.     current_thread->thread_context = (ucontext_t*) malloc(sizeof (ucontext_t));
  62.     current_thread->thread_id = 0;
  63.     current_thread->kill_status = false;
  64.     thread_tracker[0] = 1;
  65.  
  66. }
  67.  
  68. void
  69. thread_stub(void (*thread_main)(void *), void *arg) {
  70.     interrupts_set(1);
  71.     thread_main(arg);
  72.     thread_exit();
  73.  
  74. }
  75.  
  76. /*This function returns the thread identifier of the currently running thread*/
  77. Tid
  78. thread_id() {
  79.     return current_thread->thread_id;
  80. }
  81.  
  82. /*This function creates a thread*/
  83. Tid
  84. thread_create(void (*fn) (void *), void *parg) {
  85.    
  86.     int enabled = interrupts_set(0);   
  87.     assert(!interrupts_enabled());
  88.     //search the array to find an id that is usable
  89.     int free_id = 0;
  90.     while (free_id < THREAD_MAX_THREADS && thread_tracker[free_id] != 0) {
  91.         free_id++;
  92.         if (free_id == THREAD_MAX_THREADS) {
  93.             interrupts_set(enabled);
  94.             return THREAD_NOMORE;
  95.         }
  96.     }
  97.  
  98.     //no space to create more threads
  99.     if (free_id == 0) {
  100.         interrupts_set(enabled);
  101.         return THREAD_NOMORE;
  102.     }
  103.    
  104.     //create the new thread
  105.     struct thread* new_thread = (struct thread*) malloc(sizeof (struct thread));
  106.     if (new_thread == NULL) {
  107.         interrupts_set(enabled);
  108.         return THREAD_NOMEMORY;
  109.     }
  110.  
  111.     new_thread->thread_context = (ucontext_t*) malloc(sizeof (ucontext_t));
  112.     if (new_thread == NULL) {
  113.         interrupts_set(enabled);
  114.         return THREAD_NOMEMORY;
  115.     }
  116.    
  117.     assert(!interrupts_enabled());
  118.     getcontext(new_thread->thread_context);
  119.     new_thread->thread_id = free_id;
  120.     thread_tracker[free_id] = 1;
  121.  
  122.     //initialize the thread context
  123.     new_thread->thread_context->uc_mcontext.gregs[REG_RIP] = (unsigned long) &thread_stub;
  124.     void* new_stack = (void*) malloc(THREAD_MIN_STACK);
  125.     if (new_stack == NULL) {
  126.         interrupts_set(enabled);
  127.         return THREAD_NOMEMORY;
  128.     }
  129.     new_thread->thread_context->uc_stack.ss_sp = new_stack;
  130.     new_thread->thread_context->uc_stack.ss_size = THREAD_MIN_STACK;
  131.     new_thread->thread_context->uc_mcontext.gregs[REG_RSP] = (unsigned long) (new_stack + THREAD_MIN_STACK + 8);
  132.     new_thread->thread_context->uc_mcontext.gregs[REG_RDI] = (unsigned long) fn;
  133.     new_thread->thread_context->uc_mcontext.gregs[REG_RSI] = (unsigned long) parg;
  134.     new_thread->kill_status = false;
  135.  
  136.     //insert it into the ready queue
  137.     struct ready_queue* queue_new_thread;
  138.     queue_new_thread = (struct ready_queue*) malloc(sizeof (struct ready_queue));
  139.     queue_new_thread->next = NULL;
  140.     queue_new_thread->thread_info = new_thread;
  141.     insert_ready_queue(queue_new_thread);
  142.     interrupts_set(enabled);
  143.     return free_id;
  144. }
  145.  
  146. /*This function suspends the caller and activates the thread given by the identifier tid*/
  147. Tid
  148. thread_yield(Tid want_tid) {
  149.    
  150.     int enabled = interrupts_set(0);   
  151.     assert(!interrupts_enabled());
  152.    
  153.     if(exit_q->next!=NULL){
  154.         destroy_exit_q();
  155.     }
  156.  
  157.     if (want_tid == THREAD_SELF) {
  158.         interrupts_set(enabled);
  159.         return current_thread->thread_id;
  160.     }
  161.  
  162.     if (current_thread->thread_id == want_tid) {
  163.         interrupts_set(enabled);
  164.         return current_thread->thread_id;
  165.     }
  166.  
  167.     if (want_tid<-2 || want_tid >= THREAD_MAX_THREADS) {
  168.         interrupts_set(enabled);
  169.         return THREAD_INVALID;
  170.     }
  171.  
  172.     if (thread_tracker[want_tid] == 0 && want_tid != THREAD_ANY) {
  173.         interrupts_set(enabled);
  174.         return THREAD_INVALID;
  175.     }
  176.  
  177.     if (want_tid == THREAD_ANY) {
  178.         if (ready_q->next == NULL) {
  179.             interrupts_set(enabled);
  180.             return THREAD_NONE;
  181.         }
  182.     }
  183.  
  184.     if (want_tid == THREAD_ANY) {
  185.        
  186.         assert(!interrupts_enabled());
  187.         //get the next thread to run
  188.         struct ready_queue* next_node = ready_q->next;
  189.         ready_q->next = ready_q->next->next;
  190.         next_node->next = NULL;
  191.  
  192.  
  193.         //add the current thread to the ready queue
  194.         struct thread* next_thread = next_node->thread_info;
  195.  
  196.         volatile int next_node_id = next_node->thread_info->thread_id;
  197.  
  198.         next_node->thread_info = current_thread;
  199.         current_thread = next_thread;
  200.         insert_ready_queue(next_node);
  201.  
  202.         //setcontext of the next thread
  203.         volatile int setcontext_called = 0;
  204.         getcontext(next_node->thread_info->thread_context);
  205.         if (setcontext_called) {
  206.             int enabled = interrupts_set(0);
  207.             if (exit_q->next != NULL) {
  208.                 destroy_exit_q();
  209.             }
  210.             if (current_thread->kill_status == 1) {
  211.                 thread_exit();
  212.             }
  213.             interrupts_set(enabled);
  214.             return next_node_id;
  215.             //return want_tid;
  216.         }
  217.         setcontext_called = 1;
  218.         setcontext(current_thread->thread_context);
  219.     }
  220.     else {
  221.         //looking for the specific thread to run in ready_queue
  222.         struct ready_queue* next_node = ready_q->next;
  223.         struct ready_queue* prev_node = ready_q;
  224.         while (next_node != NULL && next_node->thread_info->thread_id != want_tid) {
  225.             prev_node = next_node;
  226.             next_node = next_node->next;
  227.         }
  228.         prev_node->next = next_node->next;
  229.         next_node->next = NULL;
  230.  
  231.         //add the current thread to the ready queue
  232.         struct thread* next_thread = next_node->thread_info;
  233.  
  234.         volatile int next_node_id = next_node->thread_info->thread_id;
  235.  
  236.         next_node->thread_info = current_thread;
  237.         current_thread = next_thread;
  238.         insert_ready_queue(next_node);
  239.  
  240.         //setcontext of the next thread
  241.         volatile int setcontext_called = 0;
  242.         getcontext(next_node -> thread_info -> thread_context);
  243.         if (setcontext_called) {
  244.             int enabled = interrupts_set(0);
  245.             if (exit_q->next != NULL) {
  246.                 destroy_exit_q();
  247.             }
  248.             if (current_thread->kill_status == 1) {
  249.                 thread_exit();
  250.             }
  251.             interrupts_set(enabled);
  252.             return next_node_id;
  253.             //return want_tid;
  254.         }
  255.         setcontext_called = 1;
  256.         setcontext(current_thread->thread_context);
  257.     }
  258.     interrupts_set(enabled);
  259.     return THREAD_FAILED;
  260. }
  261.  
  262. void
  263. thread_exit() {
  264.     //int enabled = interrupts_off();
  265.     //assert(!interrupts_enabled());
  266.     //exit the current thread
  267.     if (ready_q->next == NULL) {
  268.         //interrupts_set(enabled);
  269.         exit(0);
  270.     } else {
  271.         struct ready_queue* next_node;
  272.         next_node = ready_q->next;
  273.         ready_q->next = ready_q->next->next;
  274.         next_node->next = NULL;
  275.         struct thread* next_thread = next_node->thread_info;
  276.         next_node->thread_info = current_thread;
  277.         current_thread = next_thread;
  278.         exit_queue(next_node);
  279.         setcontext(current_thread->thread_context);
  280.         //interrupts_set(enabled);
  281.     }
  282. }
  283.  
  284. Tid
  285. thread_kill(Tid tid) {
  286.     int enabled = interrupts_off();
  287.     assert(!interrupts_enabled());
  288.     if (tid == THREAD_SELF) {
  289.         interrupts_set(enabled);
  290.         return THREAD_INVALID;
  291.     }if (current_thread->thread_id == tid) {
  292.         interrupts_set(enabled);
  293.         return THREAD_INVALID;
  294.     }if (tid < 0 || tid >= THREAD_MAX_THREADS) {
  295.         interrupts_set(enabled);
  296.         return THREAD_INVALID;
  297.     }if (thread_tracker[tid] == 0) {
  298.         interrupts_set(enabled);
  299.         return THREAD_INVALID;
  300.     }else{
  301.         struct ready_queue* kill_node = ready_q->next;
  302.         while (kill_node != NULL && kill_node->thread_info->thread_id != tid) {
  303.             kill_node = kill_node->next;
  304.         }
  305.         kill_node->thread_info->kill_status=1;
  306.     }
  307.     interrupts_set(enabled);
  308.     return tid;
  309. }
  310.  
  311. /*******************************************************************
  312.  * Important: The rest of the code should be implemented in Lab 3. *
  313.  *******************************************************************/
  314.  
  315. /* make sure to fill the wait_queue structure defined above */
  316. struct wait_queue *
  317. wait_queue_create() {
  318.     struct wait_queue *wq;
  319.  
  320.     wq = malloc(sizeof (struct wait_queue));
  321.     assert(wq);
  322.     //initialize wait_queue
  323.     wq->wait_info=NULL;
  324.     wq->next=NULL;
  325.  
  326.     return wq;
  327. }
  328.  
  329. void
  330. wait_queue_destroy(struct wait_queue *wq) {
  331.     TBD();
  332.     free(wq);
  333. }
  334.  
  335. Tid
  336. thread_sleep(struct wait_queue *queue) {
  337.     if(queue==NULL){
  338.         return THREAD_INVALID;
  339.     }
  340.  
  341.     if(ready_q->next==NULL){
  342.         return THREAD_NONE;
  343.     }
  344.  
  345.     //get the next thread to run
  346.     struct ready_queue* next_node = ready_q->next;
  347.     ready_q->next = ready_q->next->next;
  348.     next_node->next = NULL;
  349.     struct thread* next_thread=next_node->thread_info;
  350.     free(next_node);
  351.    
  352.     //add the current thread to the wait_queue
  353.     struct wait_queue* new_node=(struct wait_queue*)malloc(sizeof (struct wait_queue));
  354.     new_node->wait_info=next_thread;
  355.     new_node->next=NULL;
  356.     current_thread = next_thread;
  357.     insert_wait_queue(queue,new_node);
  358.  
  359.     //setcontext of the next thread
  360.     volatile int setcontext_called = 0;
  361.     getcontext(next_node -> thread_info -> thread_context);
  362.     if (setcontext_called) {
  363.         return current_thread->thread_id;
  364.     }
  365.     setcontext_called = 1;
  366.     setcontext(current_thread->thread_context);
  367.     return current_thread->thread_id;
  368. }
  369.  
  370. /* when the 'all' parameter is 1, wakeup all threads waiting in the queue.
  371.  * returns whether a thread was woken up on not. */
  372. int
  373. thread_wakeup(struct wait_queue *queue, int all) {
  374.     if(queue==NULL||queue->next==NULL){
  375.         return 0;
  376.     }
  377.  
  378.     if(all==1){
  379.         int threads_awake=0;
  380.         while (queue != NULL){  
  381.             threads_awake+=1;
  382.             struct wait_queue* run_node=queue->next;
  383.             queue->next=queue->next->next;
  384.             struct thread* new_thread=run_node->wait_info;
  385.             run_node->next=NULL;
  386.             free(run_node);
  387.            
  388.             struct ready_queue* new_node=(struct ready_queue*)malloc(sizeof(struct ready_queue));
  389.             new_node->thread_info=new_thread;
  390.             new_node->next=NULL;
  391.             insert_ready_queue(new_node);
  392.         }
  393.         return threads_awake;
  394.  
  395.     }else{
  396.         //get the next thread in the wait queue
  397.         struct wait_queue* next_node = queue->next;
  398.         queue->next = queue->next->next;
  399.         next_node->next = NULL;
  400.         struct thread* new_thread=next_node->wait_info;
  401.         free(next_node);
  402.  
  403.         //add it to the ready queue
  404.         struct ready_queue* new_node=(struct ready_queue*)malloc(sizeof(struct ready_queue));
  405.         new_node->thread_info=new_thread;
  406.         new_node->next=NULL;
  407.         insert_ready_queue(new_node);
  408.         return 1;
  409.     }
  410.     return 0;
  411. }
  412.  
  413. /* suspend current thread until Thread tid exits */
  414. Tid
  415. thread_wait(Tid tid) {
  416.     TBD();
  417.     return 0;
  418. }
  419.  
  420. struct lock {
  421.     /* ... Fill this in ... */
  422. };
  423.  
  424. struct lock *
  425. lock_create() {
  426.     struct lock *lock;
  427.  
  428.     lock = malloc(sizeof (struct lock));
  429.     assert(lock);
  430.  
  431.     TBD();
  432.  
  433.     return lock;
  434. }
  435.  
  436. void
  437. lock_destroy(struct lock *lock) {
  438.     assert(lock != NULL);
  439.  
  440.     TBD();
  441.  
  442.     free(lock);
  443. }
  444.  
  445. void
  446. lock_acquire(struct lock *lock) {
  447.     assert(lock != NULL);
  448.  
  449.     TBD();
  450. }
  451.  
  452. void
  453. lock_release(struct lock *lock) {
  454.     assert(lock != NULL);
  455.  
  456.     TBD();
  457. }
  458.  
  459. struct cv {
  460.     /* ... Fill this in ... */
  461. };
  462.  
  463. struct cv *
  464. cv_create() {
  465.     struct cv *cv;
  466.  
  467.     cv = malloc(sizeof (struct cv));
  468.     assert(cv);
  469.  
  470.     TBD();
  471.  
  472.     return cv;
  473. }
  474.  
  475. void
  476. cv_destroy(struct cv *cv) {
  477.     assert(cv != NULL);
  478.  
  479.     TBD();
  480.  
  481.     free(cv);
  482. }
  483.  
  484. void
  485. cv_wait(struct cv *cv, struct lock *lock) {
  486.     assert(cv != NULL);
  487.     assert(lock != NULL);
  488.  
  489.     TBD();
  490. }
  491.  
  492. void
  493. cv_signal(struct cv *cv, struct lock *lock) {
  494.     assert(cv != NULL);
  495.     assert(lock != NULL);
  496.  
  497.     TBD();
  498. }
  499.  
  500. void
  501. cv_broadcast(struct cv *cv, struct lock *lock) {
  502.     assert(cv != NULL);
  503.     assert(lock != NULL);
  504.  
  505.     TBD();
  506. }
  507. /*******************************************************************
  508.  * Helper Functions Lab 2 *
  509.  *******************************************************************/
  510. void exit_queue(struct ready_queue* new_node) {
  511.     //if list is empty
  512.     if (exit_q->next == NULL) {
  513.  
  514.         exit_q->next = new_node;
  515.     }        //list has elements
  516.     else {
  517.         struct ready_queue* iterator = exit_q;
  518.         while (iterator->next != NULL) {
  519.             iterator = iterator->next;
  520.         }
  521.         iterator->next = new_node;
  522.     }
  523. }
  524.  
  525. void insert_ready_queue(struct ready_queue* new_node) {
  526.     if (ready_q->next == NULL) {
  527.         ready_q->next = new_node;
  528.     }        //list has elements
  529.     else {
  530.         struct ready_queue* iterator;
  531.         iterator = ready_q;
  532.         while (iterator->next != NULL) {
  533.             iterator = iterator->next;
  534.         }
  535.         iterator->next = new_node;
  536.  
  537.     }
  538. }
  539.  
  540. void destroy_exit_q() {
  541.     struct ready_queue* iterator = exit_q->next;
  542.     while (iterator != NULL) {
  543.         struct ready_queue* kill = iterator;
  544.         iterator = iterator->next;
  545.         exit_q->next = iterator;
  546.         thread_tracker[kill->thread_info->thread_id] = 0;
  547.         free(kill->thread_info->thread_context->uc_stack.ss_sp);
  548.         free(kill->thread_info->thread_context);
  549.         free(kill->thread_info);
  550.         kill->next = NULL;
  551.         free(kill);
  552.         kill = NULL;
  553.     }
  554. }
  555. /*******************************************************************
  556.  * Helper Functions Lab 3 *
  557.  *******************************************************************/
  558. void insert_wait_queue(struct wait_queue* wq,struct wait_queue* new_node) {
  559.     //if list is empty
  560.     if (wq->next == NULL) {
  561.         wq->next = new_node;
  562.     }        //list has elements
  563.     else {
  564.         struct wait_queue* iterator = wq;
  565.         while (iterator->next != NULL) {
  566.             iterator = iterator->next;
  567.         }
  568.         iterator->next = new_node;
  569.     }
  570. }
RAW Paste Data
We use cookies for various purposes including analytics. By continuing to use Pastebin, you agree to our use of cookies as described in the Cookies Policy. OK, I Understand
Not a member of Pastebin yet?
Sign Up, it unlocks many cool features!
 
Top