Advertisement
Guest User

Untitled

a guest
Apr 24th, 2019
95
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 20.20 KB | None | 0 0
  1. #include "threads/thread.h"
  2. #include <debug.h>
  3. #include <stddef.h>
  4. #include <random.h>
  5. #include <stdio.h>
  6. #include <string.h>
  7. #include "threads/flags.h"
  8. #include "threads/interrupt.h"
  9. #include "threads/intr-stubs.h"
  10. #include "threads/palloc.h"
  11. #include "threads/switch.h"
  12. #include "threads/synch.h"
  13. #include "threads/vaddr.h"
  14. #ifdef USERPROG
  15. #include "userprog/process.h"
  16. #endif
  17.  
  18. /* Random value for struct thread's `magic' member.
  19. Used to detect stack overflow. See the big comment at the top
  20. of thread.h for details. */
  21. #define THREAD_MAGIC 0xcd6abf4b
  22.  
  23. /* List of processes in THREAD_READY state, that is, processes
  24. that are ready to run but not actually running. */
  25. static struct list ready_list;
  26.  
  27. /* List of all processes. Processes are added to this list
  28. when they are first scheduled and removed when they exit. */
  29. static struct list all_list;
  30.  
  31. /* Idle thread. */
  32. static struct thread *idle_thread;
  33.  
  34. /* Initial thread, the thread running init.c:main(). */
  35. static struct thread *initial_thread;
  36.  
  37. /* Lock used by allocate_tid(). */
  38. static struct lock tid_lock;
  39.  
  40. /* System load average */
  41. static fixed_point_t load_avg;
  42.  
  43. /* Stack frame for kernel_thread(). */
  44. struct kernel_thread_frame
  45. {
  46. void *eip; /* Return address. */
  47. thread_func *function; /* Function to call. */
  48. void *aux; /* Auxiliary data for function. */
  49. };
  50.  
  51. /* Statistics. */
  52. static long long idle_ticks; /* # of timer ticks spent idle. */
  53. static long long kernel_ticks; /* # of timer ticks in kernel threads. */
  54. static long long user_ticks; /* # of timer ticks in user programs. */
  55.  
  56. /* Scheduling. */
  57. #define TIME_SLICE 4 /* # of timer ticks to give each thread. */
  58. static unsigned thread_ticks; /* # of timer ticks since last yield. */
  59.  
  60. /* If false (default), use round-robin scheduler.
  61. If true, use multi-level feedback queue scheduler.
  62. Controlled by kernel command-line option "-o mlfqs". */
  63. bool thread_mlfqs;
  64.  
  65. static void kernel_thread (thread_func *, void *aux);
  66.  
  67. static void idle (void *aux UNUSED);
  68. static struct thread *running_thread (void);
  69. static struct thread *next_thread_to_run (void);
  70. static void init_thread (struct thread *, const char *name, int priority);
  71. static bool is_thread (struct thread *) UNUSED;
  72. static void *alloc_frame (struct thread *, size_t size);
  73. static void schedule (void);
  74. void thread_schedule_tail (struct thread *prev);
  75. static tid_t allocate_tid (void);
  76.  
  77. /* Initializes the threading system by transforming the code
  78. that's currently running into a thread. This can't work in
  79. general and it is possible in this case only because loader.S
  80. was careful to put the bottom of the stack at a page boundary.
  81.  
  82. Also initializes the run queue and the tid lock.
  83.  
  84. After calling this function, be sure to initialize the page
  85. allocator before trying to create any threads with
  86. thread_create().
  87.  
  88. It is not safe to call thread_current() until this function
  89. finishes. */
  90. void
  91. thread_init (void)
  92. {
  93. ASSERT (intr_get_level () == INTR_OFF);
  94.  
  95. lock_init (&tid_lock);
  96. list_init (&ready_list);
  97. list_init (&all_list);
  98.  
  99. /* Set up a thread structure for the running thread. */
  100. initial_thread = running_thread ();
  101. init_thread (initial_thread, "main", PRI_DEFAULT);
  102. initial_thread->status = THREAD_RUNNING;
  103. initial_thread->tid = allocate_tid ();
  104. }
  105.  
  106. bool
  107. has_higher_priority_than(struct list_elem* a, struct list_elem* b, void* aux)
  108. {
  109. if (b->next == NULL) return true;
  110. struct thread * t1 = list_entry(a,struct thread, elem);
  111. struct thread * t2 = list_entry(b,struct thread, elem);
  112. return t1->priority > t2->priority;
  113. }
  114.  
  115. bool
  116. has_lower_priority_than(struct list_elem* a, struct list_elem* b, void* aux)
  117. {
  118. if (b->next == NULL) return true;
  119. struct thread* t1 = list_entry(a, struct thread, elem);
  120. struct thread* t2 = list_entry(b, struct thread, elem);
  121. return t1->priority < t2->priority;
  122. }
  123.  
  124. /* Starts preemptive thread scheduling by enabling interrupts.
  125. Also creates the idle thread. */
  126. void
  127. thread_start (void)
  128. {
  129. /* Create the idle thread. */
  130. struct semaphore idle_started;
  131. sema_init (&idle_started, 0);
  132. thread_create ("idle", PRI_MIN, idle, &idle_started);
  133.  
  134. /* Start preemptive thread scheduling. */
  135. intr_enable ();
  136.  
  137. /* Wait for the idle thread to initialize idle_thread. */
  138. sema_down (&idle_started);
  139. }
  140.  
  141. /* Called by the timer interrupt handler at each timer tick.
  142. Thus, this function runs in an external interrupt context. */
  143. void
  144. thread_tick (void)
  145. {
  146. struct thread *t = thread_current ();
  147. /* increment the recent_cpu of the running thread */
  148. if(t!=idle_thread)
  149. t->recent_cpu = fix_add(t->recent_cpu,fix_int(1));
  150. /* Update statistics. */
  151. if (t == idle_thread)
  152. idle_ticks++;
  153. #ifdef USERPROG
  154. else if (t->pagedir != NULL)
  155. user_ticks++;
  156. #endif
  157. else
  158. kernel_ticks++;
  159.  
  160. /* Enforce preemption. */
  161. if (++thread_ticks >= TIME_SLICE)
  162. intr_yield_on_return ();
  163. }
  164.  
  165. /* Prints thread statistics. */
  166. void
  167. thread_print_stats (void)
  168. {
  169. printf ("Thread: %lld idle ticks, %lld kernel ticks, %lld user ticks\n",
  170. idle_ticks, kernel_ticks, user_ticks);
  171. }
  172.  
  173. /* Creates a new kernel thread named NAME with the given initial
  174. PRIORITY, which executes FUNCTION passing AUX as the argument,
  175. and adds it to the ready queue. Returns the thread identifier
  176. for the new thread, or TID_ERROR if creation fails.
  177.  
  178. If thread_start() has been called, then the new thread may be
  179. scheduled before thread_create() returns. It could even exit
  180. before thread_create() returns. Contrariwise, the original
  181. thread may run for any amount of time before the new thread is
  182. scheduled. Use a semaphore or some other form of
  183. synchronization if you need to ensure ordering.
  184.  
  185. The code provided sets the new thread's `priority' member to
  186. PRIORITY, but no actual priority scheduling is implemented.
  187. Priority scheduling is the goal of Problem 1-3. */
  188. tid_t
  189. thread_create (const char *name, int priority,
  190. thread_func *function, void *aux)
  191. {
  192. struct thread *t;
  193. struct kernel_thread_frame *kf;
  194. struct switch_entry_frame *ef;
  195. struct switch_threads_frame *sf;
  196. tid_t tid;
  197.  
  198. ASSERT (function != NULL);
  199.  
  200. /* Allocate thread. */
  201. t = palloc_get_page (PAL_ZERO);
  202. if (t == NULL)
  203. return TID_ERROR;
  204.  
  205. /* Initialize thread. */
  206. init_thread (t, name, priority);
  207. tid = t->tid = allocate_tid ();
  208.  
  209. /* Stack frame for kernel_thread(). */
  210. kf = alloc_frame (t, sizeof *kf);
  211. kf->eip = NULL;
  212. kf->function = function;
  213. kf->aux = aux;
  214.  
  215. /* Stack frame for switch_entry(). */
  216. ef = alloc_frame (t, sizeof *ef);
  217. ef->eip = (void (*) (void)) kernel_thread;
  218.  
  219. /* Stack frame for switch_threads(). */
  220. sf = alloc_frame (t, sizeof *sf);
  221. sf->eip = switch_entry;
  222. sf->ebp = 0;
  223.  
  224. /* Add to run queue. */
  225. thread_unblock (t);
  226. if(thread_current()->priority < t->priority && thread_current() != idle_thread)
  227. thread_yield();
  228. return tid;
  229. }
  230.  
  231. /* Puts the current thread to sleep. It will not be scheduled
  232. again until awoken by thread_unblock().
  233.  
  234. This function must be called with interrupts turned off. It
  235. is usually a better idea to use one of the synchronization
  236. primitives in synch.h. */
  237. void
  238. thread_block (void)
  239. {
  240. ASSERT (!intr_context ());
  241. ASSERT (intr_get_level () == INTR_OFF);
  242.  
  243. thread_current ()->status = THREAD_BLOCKED;
  244. schedule ();
  245. }
  246.  
  247. /* Transitions a blocked thread T to the ready-to-run state.
  248. This is an error if T is not blocked. (Use thread_yield() to
  249. make the running thread ready.)
  250.  
  251. This function does not preempt the running thread. This can
  252. be important: if the caller had disabled interrupts itself,
  253. it may expect that it can atomically unblock a thread and
  254. update other data. */
  255. void
  256. thread_unblock (struct thread *t)
  257. {
  258. enum intr_level old_level;
  259.  
  260. ASSERT (is_thread (t));
  261.  
  262. old_level = intr_disable ();
  263. ASSERT (t->status == THREAD_BLOCKED);
  264.  
  265. list_push_back (&ready_list, &t->elem);
  266. t->status = THREAD_READY;
  267. intr_set_level (old_level);
  268. }
  269.  
  270. /* Returns the name of the running thread. */
  271. const char *
  272. thread_name (void)
  273. {
  274. return thread_current ()->name;
  275. }
  276.  
  277. /* Returns the running thread.
  278. This is running_thread() plus a couple of sanity checks.
  279. See the big comment at the top of thread.h for details. */
  280. struct thread *
  281. thread_current (void)
  282. {
  283. struct thread *t = running_thread ();
  284.  
  285. /* Make sure T is really a thread.
  286. If either of these assertions fire, then your thread may
  287. have overflowed its stack. Each thread has less than 4 kB
  288. of stack, so a few big automatic arrays or moderate
  289. recursion can cause stack overflow. */
  290. ASSERT (is_thread (t));
  291. ASSERT (t->status == THREAD_RUNNING);
  292.  
  293. return t;
  294. }
  295.  
  296. /* Returns the running thread's tid. */
  297. tid_t
  298. thread_tid (void)
  299. {
  300. return thread_current ()->tid;
  301. }
  302.  
  303. /* Deschedules the current thread and destroys it. Never
  304. returns to the caller. */
  305. void
  306. thread_exit (void)
  307. {
  308. ASSERT (!intr_context ());
  309.  
  310. #ifdef USERPROG
  311. process_exit ();
  312. #endif
  313.  
  314. /* Remove thread from all threads list, set our status to dying,
  315. and schedule another process. That process will destroy us
  316. when it calls thread_schedule_tail(). */
  317. intr_disable ();
  318. list_remove (&thread_current()->allelem);
  319. thread_current ()->status = THREAD_DYING;
  320. schedule ();
  321. NOT_REACHED ();
  322. }
  323.  
  324. /* Yields the CPU. The current thread is not put to sleep and
  325. may be scheduled again immediately at the scheduler's whim. */
  326. void
  327. thread_yield (void)
  328. {
  329. struct thread *cur = thread_current ();
  330. enum intr_level old_level;
  331.  
  332. ASSERT (!intr_context ());
  333.  
  334. old_level = intr_disable ();
  335.  
  336. if (cur != idle_thread)
  337. list_push_back (&ready_list, &cur->elem);
  338.  
  339. cur->status = THREAD_READY;
  340. schedule ();
  341. intr_set_level (old_level);
  342.  
  343. }
  344.  
  345. /* Invoke function 'func' on all threads, passing along 'aux'.
  346. This function must be called with interrupts off. */
  347. void
  348. thread_foreach (thread_action_func *func, void *aux)
  349. {
  350. struct list_elem *e;
  351.  
  352. ASSERT (intr_get_level () == INTR_OFF);
  353.  
  354. for (e = list_begin (&all_list); e != list_end (&all_list);
  355. e = list_next (e))
  356. {
  357. struct thread *t = list_entry (e, struct thread, allelem);
  358. func (t, aux);
  359. }
  360. }
  361.  
  362. /* Sets the current thread's priority to NEW_PRIORITY. */
  363. void
  364. thread_set_priority (int new_priority)
  365. {
  366. if (thread_mlfqs) return;
  367.  
  368. if (thread_current ()->priority != thread_current ()->own_priority)
  369. {
  370. thread_current ()->own_priority = new_priority;
  371. } else {
  372. thread_current ()->priority = new_priority;
  373. thread_current ()->own_priority = new_priority;
  374. }
  375. thread_yield();
  376. }
  377.  
  378. /* Returns the current thread's priority. */
  379. int
  380. thread_get_priority (void)
  381. {
  382. return thread_current ()->priority;
  383. }
  384.  
  385. /* Sets the current thread's nice value to NICE. Yields the thread if there is a thread
  386. with a higher priority
  387. */
  388. void
  389. thread_set_nice (int nice UNUSED)
  390. {
  391. struct thread* cur = thread_current();
  392. cur->nice = nice;
  393. cur->priority = PRI_MAX - fix_trunc(fix_unscale(cur->recent_cpu, 4)) - 2 * cur->nice;
  394. if (cur->priority < list_entry(list_begin(&ready_list), struct thread,elem)->priority)
  395. thread_yield();
  396. }
  397.  
  398. /*
  399. Updates priorities of every thread in the system using mlfqs priority formula
  400. and resorts the ready queue according to new values
  401. */
  402. void
  403. thread_update_priorities(void)
  404. {
  405. thread_foreach(priority_formula,NULL);
  406. list_sort(&ready_list, has_higher_priority_than, NULL);
  407. }
  408.  
  409. /*
  410. Update priority of thread t using mlfqs priority formula
  411. */
  412. void
  413. priority_formula (struct thread* t, void* aux)
  414. {
  415. t->priority = PRI_MAX - fix_trunc(fix_unscale(t->recent_cpu, 4)) - 2 * t->nice;
  416. }
  417.  
  418. /* Returns the current thread's nice value. */
  419. int
  420. thread_get_nice (void)
  421. {
  422. return thread_current()->nice;
  423. }
  424.  
  425. /* Returns 100 times the system load average. */
  426. int
  427. thread_get_load_avg (void)
  428. {
  429. return fix_round(fix_scale(load_avg,100));
  430. }
  431.  
  432. /*
  433. update load avarage value for the system using mlfqs priority formula
  434. */
  435. void
  436. thread_update_load_avg(void)
  437. {
  438. enum intr_level old_level = intr_disable();
  439.  
  440. int ready_threads = list_size(&ready_list);
  441. if(thread_current()!=idle_thread)
  442. ready_threads++;
  443.  
  444. fixed_point_t new_info = fix_scale( fix_frac(1,60),ready_threads);
  445. fixed_point_t old_info = fix_mul(fix_frac(59,60), load_avg);
  446. load_avg = fix_add(new_info,old_info);
  447.  
  448. intr_set_level(old_level);
  449. }
  450.  
  451. /* Returns 100 times the current thread's recent_cpu value. */
  452. int
  453. thread_get_recent_cpu (void)
  454. {
  455. return fix_round(fix_scale(thread_current()->recent_cpu,100));
  456. }
  457.  
  458. /*
  459. update recent_cpu value for every thread
  460. */
  461. void
  462. thread_update_recent_cpu(void){
  463. enum intr_level old_level = intr_disable();
  464.  
  465. fixed_point_t a =fix_scale(load_avg,2);
  466. fixed_point_t b =fix_add(a,fix_int(1));
  467. fixed_point_t k = fix_div(a,b);
  468.  
  469. thread_foreach(recent_cpu_formula,&k);
  470.  
  471. intr_set_level(old_level);
  472. }
  473.  
  474. /*
  475. Update recent_cpu of thread t using mlfqs formula for recent_cpu
  476. */
  477. void
  478. recent_cpu_formula(struct thread* t, void* aux){
  479. fixed_point_t k = *(fixed_point_t*) aux;
  480. t->recent_cpu = fix_add(fix_mul(k,t->recent_cpu),fix_int(t->nice));
  481. }
  482.  
  483. /* Idle thread. Executes when no other thread is ready to run.
  484.  
  485. The idle thread is initially put on the ready list by
  486. thread_start(). It will be scheduled once initially, at which
  487. point it initializes idle_thread, "up"s the semaphore passed
  488. to it to enable thread_start() to continue, and immediately
  489. blocks. After that, the idle thread never appears in the
  490. ready list. It is returned by next_thread_to_run() as a
  491. special case when the ready list is empty. */
  492. static void
  493. idle (void *idle_started_ UNUSED)
  494. {
  495. struct semaphore *idle_started = idle_started_;
  496. idle_thread = thread_current ();
  497. sema_up (idle_started);
  498.  
  499. for (;;)
  500. {
  501. /* Let someone else run. */
  502. intr_disable ();
  503. thread_block ();
  504.  
  505. /* Re-enable interrupts and wait for the next one.
  506.  
  507. The `sti' instruction disables interrupts until the
  508. completion of the next instruction, so these two
  509. instructions are executed atomically. This atomicity is
  510. important; otherwise, an interrupt could be handled
  511. between re-enabling interrupts and waiting for the next
  512. one to occur, wasting as much as one clock tick worth of
  513. time.
  514.  
  515. See [IA32-v2a] "HLT", [IA32-v2b] "STI", and [IA32-v3a]
  516. 7.11.1 "HLT Instruction". */
  517. asm volatile ("sti; hlt" : : : "memory");
  518. }
  519. }
  520.  
  521. /* Function used as the basis for a kernel thread. */
  522. static void
  523. kernel_thread (thread_func *function, void *aux)
  524. {
  525. ASSERT (function != NULL);
  526.  
  527. intr_enable (); /* The scheduler runs with interrupts off. */
  528. function (aux); /* Execute the thread function. */
  529. thread_exit (); /* If function() returns, kill the thread. */
  530. }
  531.  
  532. /* Returns the running thread. */
  533. struct thread *
  534. running_thread (void)
  535. {
  536. uint32_t *esp;
  537.  
  538. /* Copy the CPU's stack pointer into `esp', and then round that
  539. down to the start of a page. Because `struct thread' is
  540. always at the beginning of a page and the stack pointer is
  541. somewhere in the middle, this locates the curent thread. */
  542. asm ("mov %%esp, %0" : "=g" (esp));
  543. return pg_round_down (esp);
  544. }
  545.  
  546. /* Returns true if T appears to point to a valid thread. */
  547. static bool
  548. is_thread (struct thread *t)
  549. {
  550. return t != NULL && t->magic == THREAD_MAGIC;
  551. }
  552.  
  553. /* Does basic initialization of T as a blocked thread named
  554. NAME. */
  555. static void
  556. init_thread (struct thread *t, const char *name, int priority)
  557. {
  558. enum intr_level old_level;
  559.  
  560. ASSERT (t != NULL);
  561. ASSERT (PRI_MIN <= priority && priority <= PRI_MAX);
  562. ASSERT (name != NULL);
  563.  
  564. if (t == initial_thread) load_avg = fix_int(0);
  565.  
  566. memset (t, 0, sizeof *t);
  567. t->status = THREAD_BLOCKED;
  568. strlcpy (t->name, name, sizeof t->name);
  569. t->stack = (uint8_t *) t + PGSIZE;
  570. t->priority = priority;
  571. t->own_priority = priority;
  572. t->magic = THREAD_MAGIC;
  573.  
  574. if (thread_mlfqs)
  575. {
  576. if (t == initial_thread)
  577. {
  578. t->nice = 0;
  579. t->recent_cpu = fix_int(0);
  580. } else {
  581. t->nice = thread_get_nice();
  582. t->recent_cpu = thread_current()->recent_cpu;
  583. }
  584.  
  585. t->priority = 0;
  586. t->own_priority = 0;
  587. }
  588.  
  589. list_init(&t->held_locks);
  590.  
  591. old_level = intr_disable ();
  592. list_push_back (&all_list, &t->allelem);
  593. intr_set_level (old_level);
  594. }
  595.  
  596. /* Allocates a SIZE-byte frame at the top of thread T's stack and
  597. returns a pointer to the frame's base. */
  598. static void *
  599. alloc_frame (struct thread *t, size_t size)
  600. {
  601. /* Stack data is always allocated in word-size units. */
  602. ASSERT (is_thread (t));
  603. ASSERT (size % sizeof (uint32_t) == 0);
  604.  
  605. t->stack -= size;
  606. return t->stack;
  607. }
  608.  
  609. /* Chooses and returns the next thread to be scheduled. Should
  610. return a thread from the run queue, unless the run queue is
  611. empty. (If the running thread can continue running, then it
  612. will be in the run queue.) If the run queue is empty, return
  613. idle_thread. */
  614. static struct thread *
  615. next_thread_to_run (void)
  616. {
  617. if (list_empty (&ready_list))
  618. return idle_thread;
  619. else{
  620. struct list_elem* e = list_max (&ready_list, has_lower_priority_than, NULL);
  621. struct thread * max_priority_thread = list_entry(e,struct thread, elem);
  622. list_remove(& max_priority_thread -> elem);
  623. return max_priority_thread;
  624. }
  625. }
  626.  
  627. /* Completes a thread switch by activating the new thread's page
  628. tables, and, if the previous thread is dying, destroying it.
  629.  
  630. At this function's invocation, we just switched from thread
  631. PREV, the new thread is already running, and interrupts are
  632. still disabled. This function is normally invoked by
  633. thread_schedule() as its final action before returning, but
  634. the first time a thread is scheduled it is called by
  635. switch_entry() (see switch.S).
  636.  
  637. It's not safe to call printf() until the thread switch is
  638. complete. In practice that means that printf()s should be
  639. added at the end of the function.
  640.  
  641. After this function and its caller returns, the thread switch
  642. is complete. */
  643. void
  644. thread_schedule_tail (struct thread *prev)
  645. {
  646. struct thread *cur = running_thread ();
  647.  
  648. ASSERT (intr_get_level () == INTR_OFF);
  649.  
  650. /* Mark us as running. */
  651. cur->status = THREAD_RUNNING;
  652.  
  653. /* Start new time slice. */
  654. thread_ticks = 0;
  655.  
  656. #ifdef USERPROG
  657. /* Activate the new address space. */
  658. process_activate ();
  659. #endif
  660.  
  661. /* If the thread we switched from is dying, destroy its struct
  662. thread. This must happen late so that thread_exit() doesn't
  663. pull out the rug under itself. (We don't free
  664. initial_thread because its memory was not obtained via
  665. palloc().) */
  666. if (prev != NULL && prev->status == THREAD_DYING && prev != initial_thread)
  667. {
  668. ASSERT (prev != cur);
  669. palloc_free_page (prev);
  670. }
  671. }
  672.  
  673. /* Schedules a new process. At entry, interrupts must be off and
  674. the running process's state must have been changed from
  675. running to some other state. This function finds another
  676. thread to run and switches to it.
  677.  
  678. It's not safe to call printf() until thread_schedule_tail()
  679. has completed. */
  680. static void
  681. schedule (void)
  682. {
  683.  
  684. struct thread *cur = running_thread ();
  685. struct thread *next = next_thread_to_run ();
  686. struct thread *prev = NULL;
  687. ASSERT (intr_get_level () == INTR_OFF);
  688. ASSERT (cur->status != THREAD_RUNNING);
  689. ASSERT (is_thread (next));
  690.  
  691. if (cur != next)
  692. prev = switch_threads (cur, next);
  693. thread_schedule_tail (prev);
  694. }
  695.  
  696. /* Returns a tid to use for a new thread. */
  697. static tid_t
  698. allocate_tid (void)
  699. {
  700. static tid_t next_tid = 1;
  701. tid_t tid;
  702.  
  703. lock_acquire (&tid_lock);
  704. tid = next_tid++;
  705. lock_release (&tid_lock);
  706.  
  707. return tid;
  708. }
  709. void resort_running_thread(struct thread* t){
  710. list_remove(&t->elem);
  711. list_insert_ordered(&ready_list, &t->elem,has_higher_priority_than,NULL);
  712. }
  713.  
  714. /* Offset of `stack' member within `struct thread'.
  715. Used by switch.S, which can't figure it out on its own. */
  716. uint32_t thread_stack_ofs = offsetof (struct thread, stack);
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement