Advertisement
Guest User

Untitled

a guest
Jan 16th, 2018
83
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 27.40 KB | None | 0 0
  1. #include "threads/thread.h"
  2. #include <debug.h>
  3. #include <stddef.h>
  4. #include <random.h>
  5. #include <stdio.h>
  6. #include <string.h>
  7. #include <devices/timer.h>
  8. #include "threads/flags.h"
  9. #include "threads/interrupt.h"
  10. #include "threads/intr-stubs.h"
  11. #include "threads/palloc.h"
  12. #include "threads/switch.h"
  13. #include "threads/synch.h"
  14. #include "threads/vaddr.h"
  15. #include "threads/malloc.h"
  16.  
  17. #ifdef USERPROG
  18. #include "userprog/process.h"
  19. #endif
  20.  
  21. /* Random value for struct thread's `magic' member.
  22. Used to detect stack overflow. See the big comment at the top
  23. of thread.h for details. */
  24. #define THREAD_MAGIC 0xcd6abf4b
  25.  
  26. /* List of processes in THREAD_READY state, that is, processes
  27. that are ready to run but not actually running. */
  28. static struct list ready_list;
  29.  
  30. /* List of all processes. Processes are added to this list
  31. when they are first scheduled and removed when they exit. */
  32. static struct list all_list;
  33.  
  34. /* List of processes in THREAD_SLEEPING state */
  35. static struct list sleep_list;
  36.  
  37. /*semaphore used to synchronize access over sleep_list*/
  38. static struct semaphore sleep_semaphore;
  39.  
  40. /* Idle thread. */
  41. static struct thread *idle_thread;
  42.  
  43. /* Initial thread, the thread running init.c:main(). */
  44. static struct thread *initial_thread;
  45.  
  46. /* Lock used by allocate_tid(). */
  47. static struct lock tid_lock;
  48.  
  49. /* Stack frame for kernel_thread(). */
  50. struct kernel_thread_frame {
  51. void *eip; /* Return address. */
  52. thread_func *function; /* Function to call. */
  53. void *aux; /* Auxiliary data for function. */
  54. };
  55.  
  56. /* Statistics. */
  57. static long long idle_ticks; /* # of timer ticks spent idle. */
  58. static long long kernel_ticks; /* # of timer ticks in kernel threads. */
  59. static long long user_ticks; /* # of timer ticks in user programs. */
  60.  
  61. /* Scheduling. */
  62. #define TIME_SLICE 4 /* # of timer ticks to give each thread. */
  63. static unsigned thread_ticks; /* # of timer ticks since last yield. */
  64.  
  65. /* If false (default), use round-robin scheduler.
  66. If true, use multi-level feedback queue scheduler.
  67. Controlled by kernel command-line option "-o mlfqs". */
  68. bool thread_mlfqs;
  69. /* load average value for BSD scheduling*/
  70.  
  71. #define ALPHA 59
  72. #define BETA 60
  73. #define ZETA 1
  74.  
  75. real load_avg;
  76. real load_avg_const1; // 59/60
  77. real load_avg_const2; // 1/60
  78.  
  79. struct fdt_entry {
  80. int fd;
  81. const char *file;
  82. struct list_elem elem;
  83. };
  84.  
  85. static void kernel_thread(thread_func *, void *aux);
  86.  
  87. static void idle(void *aux UNUSED);
  88.  
  89. static struct thread *running_thread(void);
  90.  
  91. static struct thread *next_thread_to_run(void);
  92.  
  93. static void init_thread(struct thread *, const char *name, int priority);
  94.  
  95. static bool is_thread(struct thread *) UNUSED;
  96.  
  97. static void *alloc_frame(struct thread *, size_t size);
  98.  
  99. static void schedule(void);
  100.  
  101. static bool check_yield(void);
  102.  
  103. void thread_schedule_tail(struct thread *prev);
  104.  
  105. static tid_t allocate_tid(void);
  106.  
  107. /* Initializes the threading system by transforming the code
  108. that's currently running into a thread. This can't work in
  109. general and it is possible in this case only because loader.S
  110. was careful to put the bottom of the stack at a page boundary.
  111.  
  112. Also initializes the run queue and the tid lock.
  113.  
  114. After calling this function, be sure to initialize the page
  115. allocator before trying to create any threads with
  116. thread_create().
  117.  
  118. It is not safe to call thread_current() until this function
  119. finishes. */
  120. void
  121. thread_init(void) {
  122. ASSERT (intr_get_level() == INTR_OFF);
  123.  
  124. lock_init(&tid_lock);
  125. list_init(&ready_list);
  126. list_init(&all_list);
  127. list_init(&sleep_list);
  128. sema_init(&sleep_semaphore, 1);
  129.  
  130. /* Set up a thread structure for the running thread. */
  131. initial_thread = running_thread();
  132. init_thread(initial_thread, "main", PRI_DEFAULT);
  133. initial_thread->status = THREAD_RUNNING;
  134. initial_thread->tid = allocate_tid();
  135.  
  136. /* Initializing "load_avg_const1", "load_avg_const2", then "load_avg" of the system for MLFQ Scheduler */
  137. load_avg_const1 = divide_int(convert_to_fixed(ALPHA), BETA);
  138. load_avg_const2 = divide_int(convert_to_fixed(ZETA), BETA);
  139. load_avg = convert_to_fixed(0);
  140. }
  141.  
  142. /* Starts preemptive thread scheduling by enabling interrupts.
  143. Also creates the idle thread. */
  144. void
  145. thread_start(void) {
  146. /* Create the idle thread. */
  147. struct semaphore idle_started;
  148. sema_init(&idle_started, 0);
  149. thread_create("idle", PRI_MIN, idle, &idle_started);
  150.  
  151. /* Start preemptive thread scheduling. */
  152. intr_enable();
  153.  
  154. /* Wait for the idle thread to initialize idle_thread. */
  155. sema_down(&idle_started);
  156. }
  157.  
  158. /* Called by the timer interrupt handler at each timer tick.
  159. Thus, this function runs in an external interrupt context. */
  160. void
  161. thread_tick(void) {
  162. struct thread *t = thread_current();
  163.  
  164. /* Update statistics. */
  165. if (t == idle_thread)
  166. idle_ticks++;
  167. #ifdef USERPROG
  168. else if (t->pagedir != NULL)
  169. user_ticks++;
  170. #endif
  171. else {
  172. kernel_ticks++;
  173. /* increment recent_cpu every tick */
  174. t->recent_cpu = add_int(t->recent_cpu, 1);
  175. }
  176.  
  177. /*load_avg & recent_cpu change every 1 second */
  178. if (thread_mlfqs && timer_ticks() % TIMER_FREQ == 0) {
  179. /* load_avg recalculation */
  180. mlfq_update_load_avg();
  181. /* recalculate all threads recent cpu */
  182. thread_foreach((thread_action_func *) mlfq_update_recent_cpu, NULL);
  183. }
  184.  
  185. if (timer_ticks() % TIME_SLICE == 0) {
  186. if (thread_mlfqs) {
  187. /* priority equation recalculation */
  188. thread_foreach((thread_action_func *) mlfq_update_priority, NULL);
  189. list_sort(&ready_list, (list_less_func *) priority_higher_func, NULL);
  190. }
  191. }
  192.  
  193. /* Enforce preemption. */
  194. if (++thread_ticks >= TIME_SLICE)
  195. intr_yield_on_return();
  196. }
  197.  
  198. /* Prints thread statistics. */
  199. void
  200. thread_print_stats(void) {
  201. printf("Thread: %lld idle ticks, %lld kernel ticks, %lld user ticks\n",
  202. idle_ticks, kernel_ticks, user_ticks);
  203. }
  204.  
  205. /* Creates a new kernel thread named NAME with the given initial
  206. PRIORITY, which executes FUNCTION passing AUX as the argument,
  207. and adds it to the ready queue. Returns the thread identifier
  208. for the new thread, or TID_ERROR if creation fails.
  209.  
  210. If thread_start() has been called, then the new thread may be
  211. scheduled before thread_create() returns. It could even exit
  212. before thread_create() returns. Contrariwise, the original
  213. thread may run for any amount of time before the new thread is
  214. scheduled. Use a semaphore or some other form of
  215. synchronization if you need to ensure ordering.
  216.  
  217. The code provided sets the new thread's `priority' member to
  218. PRIORITY, but no actual priority scheduling is implemented.
  219. Priority scheduling is the goal of Problem 1-3. */
  220. tid_t
  221. thread_create(const char *name, int priority,
  222. thread_func *function, void *aux) {
  223. struct thread *t;
  224. struct kernel_thread_frame *kf;
  225. struct switch_entry_frame *ef;
  226. struct switch_threads_frame *sf;
  227. tid_t tid;
  228. enum intr_level old_level;
  229.  
  230. ASSERT (function != NULL);
  231.  
  232. /* Allocate thread. */
  233. t = palloc_get_page(PAL_ZERO);
  234. if (t == NULL)
  235. return TID_ERROR;
  236.  
  237. /* Initialize thread. */
  238. init_thread(t, name, priority);
  239. tid = t->tid = allocate_tid();
  240.  
  241. /* Prepare thread for first run by initializing its stack.
  242. Do this atomically so intermediate values for the 'stack'
  243. member cannot be observed. */
  244. old_level = intr_disable();
  245.  
  246. /* Stack frame for kernel_thread(). */
  247. kf = alloc_frame(t, sizeof *kf);
  248. kf->eip = NULL;
  249. kf->function = function;
  250. kf->aux = aux;
  251.  
  252. /* Stack frame for switch_entry(). */
  253. ef = alloc_frame(t, sizeof *ef);
  254. ef->eip = (void (*)(void)) kernel_thread;
  255.  
  256. /* Stack frame for switch_threads(). */
  257. sf = alloc_frame(t, sizeof *sf);
  258. sf->eip = switch_entry;
  259. sf->ebp = 0;
  260.  
  261. intr_set_level(old_level);
  262.  
  263. /* Add to run queue. */
  264. thread_unblock(t);
  265.  
  266. return tid;
  267. }
  268.  
  269. /* Puts the current thread to sleep. It will not be scheduled
  270. again until awoken by thread_unblock().
  271.  
  272. This function must be called with interrupts turned off. It
  273. is usually a better idea to use one of the synchronization
  274. primitives in synch.h. */
  275. void
  276. thread_block(void) {
  277. ASSERT (!intr_context());
  278. ASSERT (intr_get_level() == INTR_OFF);
  279.  
  280. thread_current()->status = THREAD_BLOCKED;
  281. schedule();
  282. }
  283.  
  284. /* Transitions a blocked thread T to the ready-to-run state.
  285. This is an error if T is not blocked. (Use thread_yield() to
  286. make the running thread ready.)
  287.  
  288. This function does not preempt the running thread. This can
  289. be important: if the caller had disabled interrupts itself,
  290. it may expect that it can atomically unblock a thread and
  291. update other data. */
  292. void
  293. thread_unblock(struct thread *t) {
  294. enum intr_level old_level;
  295.  
  296. ASSERT (is_thread(t));
  297. old_level = intr_disable();
  298. ASSERT (t->status == THREAD_BLOCKED);
  299. list_insert_ordered(&ready_list, &t->elem, (list_less_func *) priority_higher_func, NULL);
  300. t->status = THREAD_READY;
  301. if (check_yield()) {
  302. if (!intr_context())
  303. thread_yield(); // must not be in an external interrupts
  304. else
  305. intr_yield_on_return(); // if yield is required and we are in external interrupt
  306. }
  307. intr_set_level(old_level);
  308.  
  309. }
  310.  
  311. /* Called when a thread is to sleep for a certain amount of time.
  312. * Sets the wake-up time of that thread, so that it never wakes up before that time. */
  313. void
  314. thread_sleep(int64_t wake_up_time) {
  315. ASSERT (intr_get_level() == INTR_ON);
  316.  
  317. struct thread *current = thread_current();
  318. current->wake_up_time = wake_up_time; /* was inside semaphore before */
  319. sema_down(&sleep_semaphore);
  320. list_insert_ordered(&sleep_list, &current->sleep_elem, (list_less_func *) sleep_less_func, NULL);
  321. sema_up(&sleep_semaphore);
  322. enum intr_level old_level;
  323. old_level = intr_disable();
  324. thread_block();
  325. intr_set_level(old_level);
  326. }
  327.  
  328. /* Called by the timer interrupt handler at each timer tick.
  329. Thus, this function runs in an external interrupt context. */
  330. void
  331. thread_wake_up(int64_t ticks) {
  332. if (!list_empty(&sleep_list)) {
  333. struct list_elem *awaken_thread_elem = list_front(&sleep_list);
  334. struct thread *awaken_thread = list_entry(awaken_thread_elem, struct thread, sleep_elem);
  335. while (awaken_thread->wake_up_time <= ticks) {
  336. list_pop_front(&sleep_list);
  337. thread_unblock(awaken_thread);
  338. if (list_empty(&sleep_list)) {
  339. break;
  340. }
  341. awaken_thread_elem = list_front(&sleep_list);
  342. awaken_thread = list_entry(awaken_thread_elem, struct thread, sleep_elem);
  343. }
  344. }
  345. }
  346.  
  347. /* Returns the name of the running thread. */
  348. const char *
  349. thread_name(void) {
  350. return thread_current()->name;
  351. }
  352.  
  353. /* Returns the running thread.
  354. This is running_thread() plus a couple of sanity checks.
  355. See the big comment at the top of thread.h for details. */
  356. struct thread *
  357. thread_current(void) {
  358. struct thread *t = running_thread();
  359.  
  360. /* Make sure T is really a thread.
  361. If either of these assertions fire, then your thread may
  362. have overflowed its stack. Each thread has less than 4 kB
  363. of stack, so a few big automatic arrays or moderate
  364. recursion can cause stack overflow. */
  365. ASSERT (is_thread(t));
  366. ASSERT (t->status == THREAD_RUNNING);
  367.  
  368. return t;
  369. }
  370.  
  371. /* Returns the running thread's tid. */
  372. tid_t
  373. thread_tid(void) {
  374. return thread_current()->tid;
  375. }
  376.  
  377. /* Deschedules the current thread and destroys it. Never
  378. returns to the caller. */
  379. void
  380. thread_exit(void) {
  381. ASSERT (!intr_context());
  382.  
  383. #ifdef USERPROG
  384. process_exit ();
  385. #endif
  386.  
  387. /* Remove thread from all threads list, set our status to dying,
  388. and schedule another process. That process will destroy us
  389. when it calls thread_schedule_tail(). */
  390. intr_disable();
  391. list_remove(&thread_current()->allelem);
  392. thread_current()->status = THREAD_DYING;
  393. schedule();
  394. NOT_REACHED ();
  395. }
  396.  
  397. static bool check_yield(void) {
  398. if (!list_empty(&ready_list))
  399. return list_entry (list_front(&ready_list), struct thread, elem)->priority > thread_current()->priority;
  400. return true;
  401. }
  402.  
  403. /* Yields the CPU. The current thread is not put to sleep and
  404. may be scheduled again immediately at the scheduler's whim. */
  405. void
  406. thread_yield(void) {
  407. struct thread *cur = thread_current();
  408. enum intr_level old_level;
  409.  
  410. ASSERT (!intr_context());
  411.  
  412. old_level = intr_disable();
  413. if (cur != idle_thread)
  414. list_insert_ordered(&ready_list, &cur->elem, (list_less_func *) priority_higher_func, NULL);
  415. cur->status = THREAD_READY;
  416. schedule();
  417.  
  418. intr_set_level(old_level);
  419. }
  420.  
  421. /* Invoke function 'func' on all threads, passing along 'aux'.
  422. This function must be called with interrupts off. */
  423. void
  424. thread_foreach(thread_action_func *func, void *aux) {
  425. struct list_elem *e;
  426.  
  427. ASSERT (intr_get_level() == INTR_OFF);
  428.  
  429. for (e = list_begin(&all_list); e != list_end(&all_list); e = list_next(e)) {
  430. struct thread *t = list_entry (e, struct thread, allelem);
  431. if (t == idle_thread)
  432. continue;
  433. func(t, aux);
  434. }
  435. }
  436.  
  437.  
  438. /* Idle thread. Executes when no other thread is ready to run.
  439.  
  440. The idle thread is initially put on the ready list by
  441. thread_start(). It will be scheduled once initially, at which
  442. point it initializes idle_thread, "up"s the semaphore passed
  443. to it to enable thread_start() to continue, and immediately
  444. blocks. After that, the idle thread never appears in the
  445. ready list. It is returned by next_thread_to_run() as a
  446. special case when the ready list is empty. */
  447. static void
  448. idle(void *idle_started_ UNUSED) {
  449. struct semaphore *idle_started = idle_started_;
  450. idle_thread = thread_current();
  451. sema_up(idle_started);
  452.  
  453. for (;;) {
  454. /* Let someone else run. */
  455. intr_disable();
  456. thread_block();
  457.  
  458. /* Re-enable interrupts and wait for the next one.
  459.  
  460. The `sti' instruction disables interrupts until the
  461. completion of the next instruction, so these two
  462. instructions are executed atomically. This atomicity is
  463. important; otherwise, an interrupt could be handled
  464. between re-enabling interrupts and waiting for the next
  465. one to occur, wasting as much as one clock tick worth of
  466. time.
  467.  
  468. See [IA32-v2a] "HLT", [IA32-v2b] "STI", and [IA32-v3a]
  469. 7.11.1 "HLT Instruction". */
  470. asm volatile ("sti; hlt" : : : "memory");
  471. }
  472. }
  473.  
  474. /* Function used as the basis for a kernel thread. */
  475. static void
  476. kernel_thread(thread_func *function, void *aux) {
  477. ASSERT (function != NULL);
  478.  
  479. intr_enable(); /* The scheduler runs with interrupts off. */
  480. function(aux); /* Execute the thread function. */
  481. thread_exit(); /* If function() returns, kill the thread. */
  482. }
  483.  
  484. /* Returns the running thread. */
  485. struct thread *
  486. running_thread(void) {
  487. uint32_t *esp;
  488.  
  489. /* Copy the CPU's stack pointer into `esp', and then round that
  490. down to the start of a page. Because `struct thread' is
  491. always at the beginning of a page and the stack pointer is
  492. somewhere in the middle, this locates the curent thread. */
  493. asm ("mov %%esp, %0" : "=g" (esp));
  494. return pg_round_down(esp);
  495. }
  496.  
  497. /* Returns true if T appears to point to a valid thread. */
  498. static bool
  499. is_thread(struct thread *t) {
  500. return t != NULL && t->magic == THREAD_MAGIC;
  501. }
  502.  
  503. /* Does basic initialization of T as a blocked thread named
  504. NAME. */
  505. static void
  506. init_thread(struct thread *t, const char *name, int priority) {
  507. ASSERT (t != NULL);
  508. ASSERT (PRI_MIN <= priority && priority <= PRI_MAX);
  509. ASSERT (name != NULL);
  510.  
  511. memset(t, 0, sizeof *t);
  512. t->status = THREAD_BLOCKED;
  513. strlcpy(t->name, name, sizeof t->name);
  514. t->stack = (uint8_t *) t + PGSIZE;
  515. t->priority = priority;
  516. t->original_priority = priority;
  517. t->blocking_lock = NULL;
  518. list_init(&(t->locks_held));
  519. list_init(&(t->fdt));
  520. t->current_fd = 2; //first possible fd index
  521. if (thread_mlfqs) {
  522. if (t == initial_thread) {
  523. t->nice = 0;
  524. t->recent_cpu = convert_to_fixed(0);
  525. } else {
  526. t->nice = thread_current()->nice;
  527. t->recent_cpu = thread_current()->recent_cpu;
  528. }
  529. mlfq_update_priority(t, NULL);
  530. }
  531. t->magic = THREAD_MAGIC;
  532. list_push_back(&all_list, &t->allelem);
  533. }
  534.  
  535. /* Allocates a SIZE-byte frame at the top of thread T's stack and
  536. returns a pointer to the frame's base. */
  537. static void *
  538. alloc_frame(struct thread *t, size_t size) {
  539. /* Stack data is always allocated in word-size units. */
  540. ASSERT (is_thread(t));
  541. ASSERT (size % sizeof(uint32_t) == 0);
  542.  
  543. t->stack -= size;
  544. return t->stack;
  545. }
  546.  
  547. /* Chooses and returns the next thread to be scheduled. Should
  548. return a thread from the run queue, unless the run queue is
  549. empty. (If the running thread can continue running, then it
  550. will be in the run queue.) If the run queue is empty, return
  551. idle_thread. */
  552. static struct thread *
  553. next_thread_to_run(void) {
  554. if (list_empty(&ready_list))
  555. return idle_thread;
  556. else
  557. return list_entry (list_pop_front(&ready_list), struct thread, elem);
  558. }
  559.  
  560. /* Completes a thread switch by activating the new thread's page
  561. tables, and, if the previous thread is dying, destroying it.
  562.  
  563. At this function's invocation, we just switched from thread
  564. PREV, the new thread is already running, and interrupts are
  565. still disabled. This function is normally invoked by
  566. thread_schedule() as its final action before returning, but
  567. the first time a thread is scheduled it is called by
  568. switch_entry() (see switch.S).
  569.  
  570. It's not safe to call printf() until the thread switch is
  571. complete. In practice that means that printf()s should be
  572. added at the end of the function.
  573.  
  574. After this function and its caller returns, the thread switch
  575. is complete. */
  576. void
  577. thread_schedule_tail(struct thread *prev) {
  578. struct thread *cur = running_thread();
  579.  
  580. ASSERT (intr_get_level() == INTR_OFF);
  581.  
  582. /* Mark us as running. */
  583. cur->status = THREAD_RUNNING;
  584.  
  585. /* Start new time slice. */
  586. thread_ticks = 0;
  587.  
  588. #ifdef USERPROG
  589. /* Activate the new address space. */
  590. process_activate ();
  591. #endif
  592.  
  593. /* If the thread we switched from is dying, destroy its struct
  594. thread. This must happen late so that thread_exit() doesn't
  595. pull out the rug under itself. (We don't free
  596. initial_thread because its memory was not obtained via
  597. palloc().) */
  598. if (prev != NULL && prev->status == THREAD_DYING && prev != initial_thread) {
  599. ASSERT (prev != cur);
  600. palloc_free_page(prev);
  601. }
  602. }
  603.  
  604. /* Schedules a new process. At entry, interrupts must be off and
  605. the running process's state must have been changed from
  606. running to some other state. This function finds another
  607. thread to run and switches to it.
  608.  
  609. It's not safe to call printf() until thread_schedule_tail()
  610. has completed. */
  611. static void
  612. schedule(void) {
  613. struct thread *cur = running_thread();
  614. struct thread *next = next_thread_to_run();
  615. struct thread *prev = NULL;
  616.  
  617. ASSERT (intr_get_level() == INTR_OFF);
  618. ASSERT (cur->status != THREAD_RUNNING);
  619. ASSERT (is_thread(next));
  620.  
  621. if (cur != next)
  622. prev = switch_threads(cur, next);
  623. thread_schedule_tail(prev);
  624. }
  625.  
  626. /* Returns a tid to use for a new thread. */
  627. static tid_t
  628. allocate_tid(void) {
  629. static tid_t next_tid = 1;
  630. tid_t tid;
  631.  
  632. lock_acquire(&tid_lock);
  633. tid = next_tid++;
  634. lock_release(&tid_lock);
  635.  
  636. return tid;
  637. }
  638.  
  639. /* Priority Scheduler
  640. * ------------------ */
  641.  
  642. /* Sets the current thread's priority to NEW_PRIORITY. */
  643. void
  644. thread_set_priority(int new_priority) {
  645. enum intr_level old_level;
  646. old_level = intr_disable(); // disabling interrupt for atomic operation
  647.  
  648. int prev_priority = thread_current()->original_priority;
  649. thread_current()->original_priority = new_priority;
  650. if (thread_current()->priority <= new_priority) // new priority is higher, no need to yield
  651. thread_current()->priority = new_priority;
  652. else if (thread_current()->priority == prev_priority) // new priority is less with no donation found
  653. thread_current()->priority = new_priority;
  654.  
  655. if (check_yield())
  656. thread_yield(); // must not be in an external interrupts
  657.  
  658. intr_set_level(old_level);
  659. }
  660.  
  661. /* Returns the current thread's priority. */
  662. int
  663. thread_get_priority(void) {
  664. return thread_current()->priority;
  665. }
  666.  
  667. /* Donates the priority of higher-priority blocked threads
  668. * to lower-priority threads when necessary */
  669. void thread_donate_priority(void) {
  670. struct thread *cur = thread_current();
  671. struct thread *holding = NULL;
  672. while (cur->blocking_lock != NULL) {
  673. holding = cur->blocking_lock->holder;
  674. if (holding->priority < cur->priority)
  675. holding->priority = cur->priority;
  676. else
  677. break;
  678. cur = holding;
  679. }
  680. }
  681.  
  682. /* Updates the priority of a thread when releasing a lock */
  683. void thread_recompute_priority(void) {
  684. int donated = -1;
  685. if (!list_empty(&thread_current()->locks_held)) {
  686. struct list_elem *e;
  687. for (e = list_begin(&thread_current()->locks_held);
  688. e != list_end(&thread_current()->locks_held); e = list_next(e)) {
  689. struct lock *cur_lock = list_entry(e, struct lock, elem);
  690. if (!list_empty(&cur_lock->semaphore.waiters)) {
  691. int waiter_priority = list_entry(
  692. list_max(&cur_lock->semaphore.waiters, (list_less_func *) priority_less_func, NULL),
  693. struct thread, elem)->priority;
  694. donated = donated > waiter_priority ? donated : waiter_priority;
  695. }
  696. }
  697. }
  698. thread_current()->priority =
  699. donated > thread_current()->original_priority ? donated : thread_current()->original_priority;
  700. }
  701.  
  702. /* MLFQ Scheduler
  703. * -------------- */
  704.  
  705. /* Sets the current thread's nice value to NICE. */
  706. void
  707. thread_set_nice(int nice UNUSED) {
  708. enum intr_level old_level;
  709. old_level = intr_disable();
  710. thread_current()->nice = nice;
  711. mlfq_update_priority(thread_current(), NULL);
  712. list_sort(&ready_list, (list_less_func *) priority_higher_func, NULL);
  713. intr_set_level(old_level);
  714. if (check_yield())
  715. thread_yield();
  716. }
  717.  
  718. /* Returns the current thread's nice value. */
  719. int
  720. thread_get_nice(void) {
  721. return thread_current()->nice;
  722. }
  723.  
  724. /* Returns 100 times the current thread's recent_cpu value. */
  725. int
  726. thread_get_recent_cpu(void) {
  727. return convert_to_integer_nearestround(multiply_int(thread_current()->recent_cpu, 100));
  728. }
  729.  
  730. /* Returns 100 times the system load average. */
  731. int
  732. thread_get_load_avg(void) {
  733. return convert_to_integer_nearestround(multiply_int(load_avg, 100));
  734. }
  735.  
  736. /* Updates the value of "load_avg" for the system -every one second- */
  737. void
  738. mlfq_update_load_avg(void) {
  739. int ready_threads = list_size(&ready_list) + 1; // running eliminating the idle thread in the ready queue
  740. if (thread_current() == idle_thread)
  741. ready_threads--;
  742. load_avg = add_fixed(multiply_fixed(load_avg_const1, load_avg), multiply_int(load_avg_const2, ready_threads));
  743. }
  744.  
  745. /* updates the value of "recent_cpu" for a given thread */
  746. thread_action_func*mlfq_update_recent_cpu(struct thread *t, void *aux) {
  747. real d_load_avg = multiply_int(load_avg, 2);
  748. real d_load_avg_plus = add_int(d_load_avg, 1);
  749. real ratio = divide_fixed(d_load_avg, d_load_avg_plus);
  750. t->recent_cpu = add_int(multiply_fixed(t->recent_cpu, ratio), t->nice);
  751. }
  752.  
  753. /* updates the "priority" for a given thread */
  754. thread_action_func *mlfq_update_priority(struct thread *t, void *aux) {
  755. real fp_pri_max = convert_to_fixed(PRI_MAX);
  756. real fp_d_nice = convert_to_fixed(2 * t->nice);
  757. real fp_quarter_recent = divide_int(t->recent_cpu, 4);
  758. real fp_pri = subtract_fixed(fp_pri_max, fp_quarter_recent);
  759. fp_pri = subtract_fixed(fp_pri, fp_d_nice);
  760. t->priority = convert_to_integer_nearestround(fp_pri);
  761. }
  762.  
  763. /* Managing file descriptor table*/
  764.  
  765. int insert_into_fdt(const char*file_name){
  766. struct thread *current = thread_current();
  767. struct fdt_entry *fdt_new_entry = malloc(sizeof(struct fdt_entry));
  768. if (fdt_new_entry != NULL) {
  769. fdt_new_entry->fd = current->current_fd;
  770. current->current_fd++;
  771. fdt_new_entry->file = file_name;
  772. //pushing to descriptor table
  773. list_push_back(&current->fdt, &fdt_new_entry->elem);
  774. return fdt_new_entry->fd;
  775. } else {
  776. //failed to allocate fdt_entry
  777. return -1;
  778. }
  779.  
  780. }
  781.  
  782. const char* get_file_name (int fd){
  783. struct list_elem *e;
  784. struct thread *current = thread_current();
  785.  
  786. for (e = list_begin(&current->fdt); e != list_end (&current->fdt); e = list_next (e)) {
  787. struct fdt_entry *req_entry;
  788. req_entry = list_entry (e, struct fdt_entry, elem);
  789. if (req_entry->fd == fd) {
  790. return req_entry->file;
  791. }
  792. }
  793. // no entry found for this fd number
  794. return NULL;
  795. }
  796.  
  797. /* Comparator Functions
  798. * -------------------- */
  799.  
  800. /* Compares two threads according to their wake-up time.
  801. * Returns true if wake-up time of a is less than b.
  802. * Used as a comparator function in "list_insert_ordered()"
  803. * when adding a sleeping thread to "sleep_list";
  804. * to keep them in an ascending order.
  805. * Guarantees that 2 threads with the same wake-up time will be added in the correct order */
  806. list_less_func *sleep_less_func(const struct list_elem *a, const struct list_elem *b, void *aux) {
  807. return list_entry(a, struct thread, sleep_elem)->wake_up_time <
  808. list_entry(b, struct thread, sleep_elem)->wake_up_time;
  809. };
  810.  
  811. /* Compares two threads according to their priority.
  812. * Returns true if priority of a is less than b.
  813. * Used as a comparator function in "list_max()"
  814. * Guarantees that the older thread of maximum priority among others of the same priority is the one returned */
  815. list_less_func *priority_less_func(const struct list_elem *a, const struct list_elem *b, void *aux) {
  816. return list_entry(a, struct thread, elem)->priority <
  817. list_entry(b, struct thread, elem)->priority;
  818. };
  819.  
  820. /*comparator for two threads according to priority used to sort them desendingly*/
  821.  
  822. /* Compares two threads according to their priority.
  823. * Returns true if priority of a is higher than b.
  824. * Used as a comparator function in "list_sort()", "list_insert_ordered()".
  825. * Guarantees that the older thread of maximum priority among others of the same priority is countered first */
  826. list_less_func *priority_higher_func(const struct list_elem *a, const struct list_elem *b, void *aux) {
  827. return list_entry(a, struct thread, elem)->priority >
  828. list_entry(b, struct thread, elem)->priority;
  829. };
  830.  
  831. /* Offset of `stack' member within `struct thread'.
  832. Used by switch.S, which can't figure it out on its own. */
  833. uint32_t thread_stack_ofs = offsetof (struct thread, stack);
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement