Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- #include "threads/thread.h"
- #include <debug.h>
- #include <stddef.h>
- #include <random.h>
- #include <stdio.h>
- #include <string.h>
- #include <devices/timer.h>
- #include "threads/flags.h"
- #include "threads/interrupt.h"
- #include "threads/intr-stubs.h"
- #include "threads/palloc.h"
- #include "threads/switch.h"
- #include "threads/synch.h"
- #include "threads/vaddr.h"
- #include "threads/malloc.h"
- #ifdef USERPROG
- #include "userprog/process.h"
- #endif
- /* Random value for struct thread's `magic' member.
- Used to detect stack overflow. See the big comment at the top
- of thread.h for details. */
- #define THREAD_MAGIC 0xcd6abf4b
- /* List of processes in THREAD_READY state, that is, processes
- that are ready to run but not actually running. */
- static struct list ready_list;
- /* List of all processes. Processes are added to this list
- when they are first scheduled and removed when they exit. */
- static struct list all_list;
- /* List of processes in THREAD_SLEEPING state */
- static struct list sleep_list;
- /*semaphore used to synchronize access over sleep_list*/
- static struct semaphore sleep_semaphore;
- /* Idle thread. */
- static struct thread *idle_thread;
- /* Initial thread, the thread running init.c:main(). */
- static struct thread *initial_thread;
- /* Lock used by allocate_tid(). */
- static struct lock tid_lock;
- /* Stack frame for kernel_thread(). */
- struct kernel_thread_frame {
- void *eip; /* Return address. */
- thread_func *function; /* Function to call. */
- void *aux; /* Auxiliary data for function. */
- };
- /* Statistics. */
- static long long idle_ticks; /* # of timer ticks spent idle. */
- static long long kernel_ticks; /* # of timer ticks in kernel threads. */
- static long long user_ticks; /* # of timer ticks in user programs. */
- /* Scheduling. */
- #define TIME_SLICE 4 /* # of timer ticks to give each thread. */
- static unsigned thread_ticks; /* # of timer ticks since last yield. */
- /* If false (default), use round-robin scheduler.
- If true, use multi-level feedback queue scheduler.
- Controlled by kernel command-line option "-o mlfqs". */
- bool thread_mlfqs;
- /* load average value for BSD scheduling*/
- #define ALPHA 59
- #define BETA 60
- #define ZETA 1
- real load_avg;
- real load_avg_const1; // 59/60
- real load_avg_const2; // 1/60
- struct fdt_entry {
- int fd;
- const char *file;
- struct list_elem elem;
- };
- static void kernel_thread(thread_func *, void *aux);
- static void idle(void *aux UNUSED);
- static struct thread *running_thread(void);
- static struct thread *next_thread_to_run(void);
- static void init_thread(struct thread *, const char *name, int priority);
- static bool is_thread(struct thread *) UNUSED;
- static void *alloc_frame(struct thread *, size_t size);
- static void schedule(void);
- static bool check_yield(void);
- void thread_schedule_tail(struct thread *prev);
- static tid_t allocate_tid(void);
- /* Initializes the threading system by transforming the code
- that's currently running into a thread. This can't work in
- general and it is possible in this case only because loader.S
- was careful to put the bottom of the stack at a page boundary.
- Also initializes the run queue and the tid lock.
- After calling this function, be sure to initialize the page
- allocator before trying to create any threads with
- thread_create().
- It is not safe to call thread_current() until this function
- finishes. */
- void
- thread_init(void) {
- ASSERT (intr_get_level() == INTR_OFF);
- lock_init(&tid_lock);
- list_init(&ready_list);
- list_init(&all_list);
- list_init(&sleep_list);
- sema_init(&sleep_semaphore, 1);
- /* Set up a thread structure for the running thread. */
- initial_thread = running_thread();
- init_thread(initial_thread, "main", PRI_DEFAULT);
- initial_thread->status = THREAD_RUNNING;
- initial_thread->tid = allocate_tid();
- /* Initializing "load_avg_const1", "load_avg_const2", then "load_avg" of the system for MLFQ Scheduler */
- load_avg_const1 = divide_int(convert_to_fixed(ALPHA), BETA);
- load_avg_const2 = divide_int(convert_to_fixed(ZETA), BETA);
- load_avg = convert_to_fixed(0);
- }
- /* Starts preemptive thread scheduling by enabling interrupts.
- Also creates the idle thread. */
- void
- thread_start(void) {
- /* Create the idle thread. */
- struct semaphore idle_started;
- sema_init(&idle_started, 0);
- thread_create("idle", PRI_MIN, idle, &idle_started);
- /* Start preemptive thread scheduling. */
- intr_enable();
- /* Wait for the idle thread to initialize idle_thread. */
- sema_down(&idle_started);
- }
- /* Called by the timer interrupt handler at each timer tick.
- Thus, this function runs in an external interrupt context. */
- void
- thread_tick(void) {
- struct thread *t = thread_current();
- /* Update statistics. */
- if (t == idle_thread)
- idle_ticks++;
- #ifdef USERPROG
- else if (t->pagedir != NULL)
- user_ticks++;
- #endif
- else {
- kernel_ticks++;
- /* increment recent_cpu every tick */
- t->recent_cpu = add_int(t->recent_cpu, 1);
- }
- /*load_avg & recent_cpu change every 1 second */
- if (thread_mlfqs && timer_ticks() % TIMER_FREQ == 0) {
- /* load_avg recalculation */
- mlfq_update_load_avg();
- /* recalculate all threads recent cpu */
- thread_foreach((thread_action_func *) mlfq_update_recent_cpu, NULL);
- }
- if (timer_ticks() % TIME_SLICE == 0) {
- if (thread_mlfqs) {
- /* priority equation recalculation */
- thread_foreach((thread_action_func *) mlfq_update_priority, NULL);
- list_sort(&ready_list, (list_less_func *) priority_higher_func, NULL);
- }
- }
- /* Enforce preemption. */
- if (++thread_ticks >= TIME_SLICE)
- intr_yield_on_return();
- }
- /* Prints thread statistics. */
- void
- thread_print_stats(void) {
- printf("Thread: %lld idle ticks, %lld kernel ticks, %lld user ticks\n",
- idle_ticks, kernel_ticks, user_ticks);
- }
- /* Creates a new kernel thread named NAME with the given initial
- PRIORITY, which executes FUNCTION passing AUX as the argument,
- and adds it to the ready queue. Returns the thread identifier
- for the new thread, or TID_ERROR if creation fails.
- If thread_start() has been called, then the new thread may be
- scheduled before thread_create() returns. It could even exit
- before thread_create() returns. Contrariwise, the original
- thread may run for any amount of time before the new thread is
- scheduled. Use a semaphore or some other form of
- synchronization if you need to ensure ordering.
- The code provided sets the new thread's `priority' member to
- PRIORITY, but no actual priority scheduling is implemented.
- Priority scheduling is the goal of Problem 1-3. */
- tid_t
- thread_create(const char *name, int priority,
- thread_func *function, void *aux) {
- struct thread *t;
- struct kernel_thread_frame *kf;
- struct switch_entry_frame *ef;
- struct switch_threads_frame *sf;
- tid_t tid;
- enum intr_level old_level;
- ASSERT (function != NULL);
- /* Allocate thread. */
- t = palloc_get_page(PAL_ZERO);
- if (t == NULL)
- return TID_ERROR;
- /* Initialize thread. */
- init_thread(t, name, priority);
- tid = t->tid = allocate_tid();
- /* Prepare thread for first run by initializing its stack.
- Do this atomically so intermediate values for the 'stack'
- member cannot be observed. */
- old_level = intr_disable();
- /* Stack frame for kernel_thread(). */
- kf = alloc_frame(t, sizeof *kf);
- kf->eip = NULL;
- kf->function = function;
- kf->aux = aux;
- /* Stack frame for switch_entry(). */
- ef = alloc_frame(t, sizeof *ef);
- ef->eip = (void (*)(void)) kernel_thread;
- /* Stack frame for switch_threads(). */
- sf = alloc_frame(t, sizeof *sf);
- sf->eip = switch_entry;
- sf->ebp = 0;
- intr_set_level(old_level);
- /* Add to run queue. */
- thread_unblock(t);
- return tid;
- }
- /* Puts the current thread to sleep. It will not be scheduled
- again until awoken by thread_unblock().
- This function must be called with interrupts turned off. It
- is usually a better idea to use one of the synchronization
- primitives in synch.h. */
- void
- thread_block(void) {
- ASSERT (!intr_context());
- ASSERT (intr_get_level() == INTR_OFF);
- thread_current()->status = THREAD_BLOCKED;
- schedule();
- }
- /* Transitions a blocked thread T to the ready-to-run state.
- This is an error if T is not blocked. (Use thread_yield() to
- make the running thread ready.)
- This function does not preempt the running thread. This can
- be important: if the caller had disabled interrupts itself,
- it may expect that it can atomically unblock a thread and
- update other data. */
- void
- thread_unblock(struct thread *t) {
- enum intr_level old_level;
- ASSERT (is_thread(t));
- old_level = intr_disable();
- ASSERT (t->status == THREAD_BLOCKED);
- list_insert_ordered(&ready_list, &t->elem, (list_less_func *) priority_higher_func, NULL);
- t->status = THREAD_READY;
- if (check_yield()) {
- if (!intr_context())
- thread_yield(); // must not be in an external interrupts
- else
- intr_yield_on_return(); // if yield is required and we are in external interrupt
- }
- intr_set_level(old_level);
- }
- /* Called when a thread is to sleep for a certain amount of time.
- * Sets the wake-up time of that thread, so that it never wakes up before that time. */
- void
- thread_sleep(int64_t wake_up_time) {
- ASSERT (intr_get_level() == INTR_ON);
- struct thread *current = thread_current();
- current->wake_up_time = wake_up_time; /* was inside semaphore before */
- sema_down(&sleep_semaphore);
- list_insert_ordered(&sleep_list, ¤t->sleep_elem, (list_less_func *) sleep_less_func, NULL);
- sema_up(&sleep_semaphore);
- enum intr_level old_level;
- old_level = intr_disable();
- thread_block();
- intr_set_level(old_level);
- }
- /* Called by the timer interrupt handler at each timer tick.
- Thus, this function runs in an external interrupt context. */
- void
- thread_wake_up(int64_t ticks) {
- if (!list_empty(&sleep_list)) {
- struct list_elem *awaken_thread_elem = list_front(&sleep_list);
- struct thread *awaken_thread = list_entry(awaken_thread_elem, struct thread, sleep_elem);
- while (awaken_thread->wake_up_time <= ticks) {
- list_pop_front(&sleep_list);
- thread_unblock(awaken_thread);
- if (list_empty(&sleep_list)) {
- break;
- }
- awaken_thread_elem = list_front(&sleep_list);
- awaken_thread = list_entry(awaken_thread_elem, struct thread, sleep_elem);
- }
- }
- }
- /* Returns the name of the running thread. */
- const char *
- thread_name(void) {
- return thread_current()->name;
- }
- /* Returns the running thread.
- This is running_thread() plus a couple of sanity checks.
- See the big comment at the top of thread.h for details. */
- struct thread *
- thread_current(void) {
- struct thread *t = running_thread();
- /* Make sure T is really a thread.
- If either of these assertions fire, then your thread may
- have overflowed its stack. Each thread has less than 4 kB
- of stack, so a few big automatic arrays or moderate
- recursion can cause stack overflow. */
- ASSERT (is_thread(t));
- ASSERT (t->status == THREAD_RUNNING);
- return t;
- }
- /* Returns the running thread's tid. */
- tid_t
- thread_tid(void) {
- return thread_current()->tid;
- }
- /* Deschedules the current thread and destroys it. Never
- returns to the caller. */
- void
- thread_exit(void) {
- ASSERT (!intr_context());
- #ifdef USERPROG
- process_exit ();
- #endif
- /* Remove thread from all threads list, set our status to dying,
- and schedule another process. That process will destroy us
- when it calls thread_schedule_tail(). */
- intr_disable();
- list_remove(&thread_current()->allelem);
- thread_current()->status = THREAD_DYING;
- schedule();
- NOT_REACHED ();
- }
- static bool check_yield(void) {
- if (!list_empty(&ready_list))
- return list_entry (list_front(&ready_list), struct thread, elem)->priority > thread_current()->priority;
- return true;
- }
- /* Yields the CPU. The current thread is not put to sleep and
- may be scheduled again immediately at the scheduler's whim. */
- void
- thread_yield(void) {
- struct thread *cur = thread_current();
- enum intr_level old_level;
- ASSERT (!intr_context());
- old_level = intr_disable();
- if (cur != idle_thread)
- list_insert_ordered(&ready_list, &cur->elem, (list_less_func *) priority_higher_func, NULL);
- cur->status = THREAD_READY;
- schedule();
- intr_set_level(old_level);
- }
- /* Invoke function 'func' on all threads, passing along 'aux'.
- This function must be called with interrupts off. */
- void
- thread_foreach(thread_action_func *func, void *aux) {
- struct list_elem *e;
- ASSERT (intr_get_level() == INTR_OFF);
- for (e = list_begin(&all_list); e != list_end(&all_list); e = list_next(e)) {
- struct thread *t = list_entry (e, struct thread, allelem);
- if (t == idle_thread)
- continue;
- func(t, aux);
- }
- }
- /* Idle thread. Executes when no other thread is ready to run.
- The idle thread is initially put on the ready list by
- thread_start(). It will be scheduled once initially, at which
- point it initializes idle_thread, "up"s the semaphore passed
- to it to enable thread_start() to continue, and immediately
- blocks. After that, the idle thread never appears in the
- ready list. It is returned by next_thread_to_run() as a
- special case when the ready list is empty. */
- static void
- idle(void *idle_started_ UNUSED) {
- struct semaphore *idle_started = idle_started_;
- idle_thread = thread_current();
- sema_up(idle_started);
- for (;;) {
- /* Let someone else run. */
- intr_disable();
- thread_block();
- /* Re-enable interrupts and wait for the next one.
- The `sti' instruction disables interrupts until the
- completion of the next instruction, so these two
- instructions are executed atomically. This atomicity is
- important; otherwise, an interrupt could be handled
- between re-enabling interrupts and waiting for the next
- one to occur, wasting as much as one clock tick worth of
- time.
- See [IA32-v2a] "HLT", [IA32-v2b] "STI", and [IA32-v3a]
- 7.11.1 "HLT Instruction". */
- asm volatile ("sti; hlt" : : : "memory");
- }
- }
- /* Function used as the basis for a kernel thread. */
- static void
- kernel_thread(thread_func *function, void *aux) {
- ASSERT (function != NULL);
- intr_enable(); /* The scheduler runs with interrupts off. */
- function(aux); /* Execute the thread function. */
- thread_exit(); /* If function() returns, kill the thread. */
- }
- /* Returns the running thread. */
- struct thread *
- running_thread(void) {
- uint32_t *esp;
- /* Copy the CPU's stack pointer into `esp', and then round that
- down to the start of a page. Because `struct thread' is
- always at the beginning of a page and the stack pointer is
- somewhere in the middle, this locates the curent thread. */
- asm ("mov %%esp, %0" : "=g" (esp));
- return pg_round_down(esp);
- }
- /* Returns true if T appears to point to a valid thread. */
- static bool
- is_thread(struct thread *t) {
- return t != NULL && t->magic == THREAD_MAGIC;
- }
- /* Does basic initialization of T as a blocked thread named
- NAME. */
- static void
- init_thread(struct thread *t, const char *name, int priority) {
- ASSERT (t != NULL);
- ASSERT (PRI_MIN <= priority && priority <= PRI_MAX);
- ASSERT (name != NULL);
- memset(t, 0, sizeof *t);
- t->status = THREAD_BLOCKED;
- strlcpy(t->name, name, sizeof t->name);
- t->stack = (uint8_t *) t + PGSIZE;
- t->priority = priority;
- t->original_priority = priority;
- t->blocking_lock = NULL;
- list_init(&(t->locks_held));
- list_init(&(t->fdt));
- t->current_fd = 2; //first possible fd index
- if (thread_mlfqs) {
- if (t == initial_thread) {
- t->nice = 0;
- t->recent_cpu = convert_to_fixed(0);
- } else {
- t->nice = thread_current()->nice;
- t->recent_cpu = thread_current()->recent_cpu;
- }
- mlfq_update_priority(t, NULL);
- }
- t->magic = THREAD_MAGIC;
- list_push_back(&all_list, &t->allelem);
- }
- /* Allocates a SIZE-byte frame at the top of thread T's stack and
- returns a pointer to the frame's base. */
- static void *
- alloc_frame(struct thread *t, size_t size) {
- /* Stack data is always allocated in word-size units. */
- ASSERT (is_thread(t));
- ASSERT (size % sizeof(uint32_t) == 0);
- t->stack -= size;
- return t->stack;
- }
- /* Chooses and returns the next thread to be scheduled. Should
- return a thread from the run queue, unless the run queue is
- empty. (If the running thread can continue running, then it
- will be in the run queue.) If the run queue is empty, return
- idle_thread. */
- static struct thread *
- next_thread_to_run(void) {
- if (list_empty(&ready_list))
- return idle_thread;
- else
- return list_entry (list_pop_front(&ready_list), struct thread, elem);
- }
- /* Completes a thread switch by activating the new thread's page
- tables, and, if the previous thread is dying, destroying it.
- At this function's invocation, we just switched from thread
- PREV, the new thread is already running, and interrupts are
- still disabled. This function is normally invoked by
- thread_schedule() as its final action before returning, but
- the first time a thread is scheduled it is called by
- switch_entry() (see switch.S).
- It's not safe to call printf() until the thread switch is
- complete. In practice that means that printf()s should be
- added at the end of the function.
- After this function and its caller returns, the thread switch
- is complete. */
- void
- thread_schedule_tail(struct thread *prev) {
- struct thread *cur = running_thread();
- ASSERT (intr_get_level() == INTR_OFF);
- /* Mark us as running. */
- cur->status = THREAD_RUNNING;
- /* Start new time slice. */
- thread_ticks = 0;
- #ifdef USERPROG
- /* Activate the new address space. */
- process_activate ();
- #endif
- /* If the thread we switched from is dying, destroy its struct
- thread. This must happen late so that thread_exit() doesn't
- pull out the rug under itself. (We don't free
- initial_thread because its memory was not obtained via
- palloc().) */
- if (prev != NULL && prev->status == THREAD_DYING && prev != initial_thread) {
- ASSERT (prev != cur);
- palloc_free_page(prev);
- }
- }
- /* Schedules a new process. At entry, interrupts must be off and
- the running process's state must have been changed from
- running to some other state. This function finds another
- thread to run and switches to it.
- It's not safe to call printf() until thread_schedule_tail()
- has completed. */
- static void
- schedule(void) {
- struct thread *cur = running_thread();
- struct thread *next = next_thread_to_run();
- struct thread *prev = NULL;
- ASSERT (intr_get_level() == INTR_OFF);
- ASSERT (cur->status != THREAD_RUNNING);
- ASSERT (is_thread(next));
- if (cur != next)
- prev = switch_threads(cur, next);
- thread_schedule_tail(prev);
- }
- /* Returns a tid to use for a new thread. */
- static tid_t
- allocate_tid(void) {
- static tid_t next_tid = 1;
- tid_t tid;
- lock_acquire(&tid_lock);
- tid = next_tid++;
- lock_release(&tid_lock);
- return tid;
- }
- /* Priority Scheduler
- * ------------------ */
- /* Sets the current thread's priority to NEW_PRIORITY. */
- void
- thread_set_priority(int new_priority) {
- enum intr_level old_level;
- old_level = intr_disable(); // disabling interrupt for atomic operation
- int prev_priority = thread_current()->original_priority;
- thread_current()->original_priority = new_priority;
- if (thread_current()->priority <= new_priority) // new priority is higher, no need to yield
- thread_current()->priority = new_priority;
- else if (thread_current()->priority == prev_priority) // new priority is less with no donation found
- thread_current()->priority = new_priority;
- if (check_yield())
- thread_yield(); // must not be in an external interrupts
- intr_set_level(old_level);
- }
- /* Returns the current thread's priority. */
- int
- thread_get_priority(void) {
- return thread_current()->priority;
- }
- /* Donates the priority of higher-priority blocked threads
- * to lower-priority threads when necessary */
- void thread_donate_priority(void) {
- struct thread *cur = thread_current();
- struct thread *holding = NULL;
- while (cur->blocking_lock != NULL) {
- holding = cur->blocking_lock->holder;
- if (holding->priority < cur->priority)
- holding->priority = cur->priority;
- else
- break;
- cur = holding;
- }
- }
- /* Updates the priority of a thread when releasing a lock */
- void thread_recompute_priority(void) {
- int donated = -1;
- if (!list_empty(&thread_current()->locks_held)) {
- struct list_elem *e;
- for (e = list_begin(&thread_current()->locks_held);
- e != list_end(&thread_current()->locks_held); e = list_next(e)) {
- struct lock *cur_lock = list_entry(e, struct lock, elem);
- if (!list_empty(&cur_lock->semaphore.waiters)) {
- int waiter_priority = list_entry(
- list_max(&cur_lock->semaphore.waiters, (list_less_func *) priority_less_func, NULL),
- struct thread, elem)->priority;
- donated = donated > waiter_priority ? donated : waiter_priority;
- }
- }
- }
- thread_current()->priority =
- donated > thread_current()->original_priority ? donated : thread_current()->original_priority;
- }
- /* MLFQ Scheduler
- * -------------- */
- /* Sets the current thread's nice value to NICE. */
- void
- thread_set_nice(int nice UNUSED) {
- enum intr_level old_level;
- old_level = intr_disable();
- thread_current()->nice = nice;
- mlfq_update_priority(thread_current(), NULL);
- list_sort(&ready_list, (list_less_func *) priority_higher_func, NULL);
- intr_set_level(old_level);
- if (check_yield())
- thread_yield();
- }
- /* Returns the current thread's nice value. */
- int
- thread_get_nice(void) {
- return thread_current()->nice;
- }
- /* Returns 100 times the current thread's recent_cpu value. */
- int
- thread_get_recent_cpu(void) {
- return convert_to_integer_nearestround(multiply_int(thread_current()->recent_cpu, 100));
- }
- /* Returns 100 times the system load average. */
- int
- thread_get_load_avg(void) {
- return convert_to_integer_nearestround(multiply_int(load_avg, 100));
- }
- /* Updates the value of "load_avg" for the system -every one second- */
- void
- mlfq_update_load_avg(void) {
- int ready_threads = list_size(&ready_list) + 1; // running eliminating the idle thread in the ready queue
- if (thread_current() == idle_thread)
- ready_threads--;
- load_avg = add_fixed(multiply_fixed(load_avg_const1, load_avg), multiply_int(load_avg_const2, ready_threads));
- }
- /* updates the value of "recent_cpu" for a given thread */
- thread_action_func*mlfq_update_recent_cpu(struct thread *t, void *aux) {
- real d_load_avg = multiply_int(load_avg, 2);
- real d_load_avg_plus = add_int(d_load_avg, 1);
- real ratio = divide_fixed(d_load_avg, d_load_avg_plus);
- t->recent_cpu = add_int(multiply_fixed(t->recent_cpu, ratio), t->nice);
- }
- /* updates the "priority" for a given thread */
- thread_action_func *mlfq_update_priority(struct thread *t, void *aux) {
- real fp_pri_max = convert_to_fixed(PRI_MAX);
- real fp_d_nice = convert_to_fixed(2 * t->nice);
- real fp_quarter_recent = divide_int(t->recent_cpu, 4);
- real fp_pri = subtract_fixed(fp_pri_max, fp_quarter_recent);
- fp_pri = subtract_fixed(fp_pri, fp_d_nice);
- t->priority = convert_to_integer_nearestround(fp_pri);
- }
- /* Managing file descriptor table*/
- int insert_into_fdt(const char*file_name){
- struct thread *current = thread_current();
- struct fdt_entry *fdt_new_entry = malloc(sizeof(struct fdt_entry));
- if (fdt_new_entry != NULL) {
- fdt_new_entry->fd = current->current_fd;
- current->current_fd++;
- fdt_new_entry->file = file_name;
- //pushing to descriptor table
- list_push_back(¤t->fdt, &fdt_new_entry->elem);
- return fdt_new_entry->fd;
- } else {
- //failed to allocate fdt_entry
- return -1;
- }
- }
- const char* get_file_name (int fd){
- struct list_elem *e;
- struct thread *current = thread_current();
- for (e = list_begin(¤t->fdt); e != list_end (¤t->fdt); e = list_next (e)) {
- struct fdt_entry *req_entry;
- req_entry = list_entry (e, struct fdt_entry, elem);
- if (req_entry->fd == fd) {
- return req_entry->file;
- }
- }
- // no entry found for this fd number
- return NULL;
- }
- /* Comparator Functions
- * -------------------- */
- /* Compares two threads according to their wake-up time.
- * Returns true if wake-up time of a is less than b.
- * Used as a comparator function in "list_insert_ordered()"
- * when adding a sleeping thread to "sleep_list";
- * to keep them in an ascending order.
- * Guarantees that 2 threads with the same wake-up time will be added in the correct order */
- list_less_func *sleep_less_func(const struct list_elem *a, const struct list_elem *b, void *aux) {
- return list_entry(a, struct thread, sleep_elem)->wake_up_time <
- list_entry(b, struct thread, sleep_elem)->wake_up_time;
- };
- /* Compares two threads according to their priority.
- * Returns true if priority of a is less than b.
- * Used as a comparator function in "list_max()"
- * Guarantees that the older thread of maximum priority among others of the same priority is the one returned */
- list_less_func *priority_less_func(const struct list_elem *a, const struct list_elem *b, void *aux) {
- return list_entry(a, struct thread, elem)->priority <
- list_entry(b, struct thread, elem)->priority;
- };
- /*comparator for two threads according to priority used to sort them desendingly*/
- /* Compares two threads according to their priority.
- * Returns true if priority of a is higher than b.
- * Used as a comparator function in "list_sort()", "list_insert_ordered()".
- * Guarantees that the older thread of maximum priority among others of the same priority is countered first */
- list_less_func *priority_higher_func(const struct list_elem *a, const struct list_elem *b, void *aux) {
- return list_entry(a, struct thread, elem)->priority >
- list_entry(b, struct thread, elem)->priority;
- };
- /* Offset of `stack' member within `struct thread'.
- Used by switch.S, which can't figure it out on its own. */
- uint32_t thread_stack_ofs = offsetof (struct thread, stack);
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement