Advertisement
Idadel

threadqenque_formatted.c

Jun 4th, 2021
92
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 22.92 KB | None | 0 0
  1. /**
  2. * @file
  3. *
  4. * @ingroup RTEMSScoreThreadQueue
  5. *
  6. * @brief This source file contains the implementation of
  7. * _Thread_queue_Deadlock_fatal(), _Thread_queue_Deadlock_status(),
  8. * _Thread_queue_Do_dequeue(), _Thread_queue_Enqueue(),
  9. * _Thread_queue_Enqueue_do_nothing_extra(), _Thread_queue_Enqueue_sticky(),
  10. * _Thread_queue_Extract(), _Thread_queue_Extract_critical(),
  11. * _Thread_queue_Extract_locked(), _Thread_queue_Path_acquire_critical(),
  12. * _Thread_queue_Path_release_critical(), _Thread_queue_Surrender(),
  13. * _Thread_queue_Surrender_sticky(), and _Thread_queue_Unblock_critical().
  14. */
  15.  
  16. /*
  17. * COPYRIGHT (c) 1989-2014.
  18. * On-Line Applications Research Corporation (OAR).
  19. *
  20. * Copyright (c) 2015, 2016 embedded brains GmbH.
  21. *
  22. * The license and distribution terms for this file may be
  23. * found in the file LICENSE in this distribution or at
  24. * http://www.rtems.org/license/LICENSE.
  25. */
  26.  
  27. #ifdef HAVE_CONFIG_H
  28. #include "config.h"
  29. #endif
  30.  
  31. #include <rtems/score/assert.h>
  32. #include <rtems/score/status.h>
  33. #include <rtems/score/threaddispatch.h>
  34. #include <rtems/score/threadimpl.h>
  35. #include <rtems/score/threadqimpl.h>
  36. #include <rtems/score/watchdogimpl.h>
  37.  
  38. #define THREAD_QUEUE_INTEND_TO_BLOCK \
  39. ( THREAD_WAIT_CLASS_OBJECT | THREAD_WAIT_STATE_INTEND_TO_BLOCK )
  40.  
  41. #define THREAD_QUEUE_BLOCKED \
  42. ( THREAD_WAIT_CLASS_OBJECT | THREAD_WAIT_STATE_BLOCKED )
  43.  
  44. #define THREAD_QUEUE_READY_AGAIN \
  45. ( THREAD_WAIT_CLASS_OBJECT | THREAD_WAIT_STATE_READY_AGAIN )
  46.  
  47. #if defined( RTEMS_SMP )
  48. /*
  49. * A global registry of active thread queue links is used to provide deadlock
  50. * detection on SMP configurations. This is simple to implement and no
  51. * additional storage is required for the thread queues. The disadvantage is
  52. * the global registry is not scalable and may lead to lock contention.
  53. * However, the registry is only used in case of nested resource conflicts. In
  54. * this case, the application is already in trouble.
  55. */
  56.  
  57. typedef struct {
  58. ISR_lock_Control Lock;
  59.  
  60. RBTree_Control Links;
  61. } Thread_queue_Links;
  62.  
  63. static Thread_queue_Links _Thread_queue_Links = {
  64. ISR_LOCK_INITIALIZER( "Thread Queue Links" ),
  65. RBTREE_INITIALIZER_EMPTY( _Thread_queue_Links.Links ) };
  66.  
  67. static bool _Thread_queue_Link_equal( const void *left,
  68. const RBTree_Node *right )
  69. {
  70. const Thread_queue_Queue *the_left;
  71. const Thread_queue_Link *the_right;
  72.  
  73. the_left = left;
  74. the_right = (Thread_queue_Link *) right;
  75.  
  76. return the_left == the_right->source;
  77. }
  78.  
  79. static bool _Thread_queue_Link_less( const void *left,
  80. const RBTree_Node *right )
  81. {
  82. const Thread_queue_Queue *the_left;
  83. const Thread_queue_Link *the_right;
  84.  
  85. the_left = left;
  86. the_right = (Thread_queue_Link *) right;
  87.  
  88. return (uintptr_t) the_left < (uintptr_t) the_right->source;
  89. }
  90.  
  91. static void *_Thread_queue_Link_map( RBTree_Node *node )
  92. {
  93. return node;
  94. }
  95.  
  96. static Thread_queue_Link *_Thread_queue_Link_find( Thread_queue_Links *links,
  97. Thread_queue_Queue *source )
  98. {
  99. return _RBTree_Find_inline( &links->Links,
  100. source,
  101. _Thread_queue_Link_equal,
  102. _Thread_queue_Link_less,
  103. _Thread_queue_Link_map );
  104. }
  105.  
  106. static bool _Thread_queue_Link_add( Thread_queue_Link *link,
  107. Thread_queue_Queue *source,
  108. Thread_queue_Queue *target )
  109. {
  110. Thread_queue_Links *links;
  111. Thread_queue_Queue *recursive_target;
  112. ISR_lock_Context lock_context;
  113.  
  114. link->source = source;
  115. link->target = target;
  116.  
  117. links = &_Thread_queue_Links;
  118. recursive_target = target;
  119.  
  120. _ISR_lock_Acquire( &links->Lock, &lock_context );
  121.  
  122. while ( true ) {
  123. Thread_queue_Link *recursive_link;
  124.  
  125. recursive_link = _Thread_queue_Link_find( links, recursive_target );
  126.  
  127. if ( recursive_link == NULL ) {
  128. break;
  129. }
  130.  
  131. recursive_target = recursive_link->target;
  132.  
  133. if ( recursive_target == source ) {
  134. _ISR_lock_Release( &links->Lock, &lock_context );
  135. return false;
  136. }
  137. }
  138.  
  139. _RBTree_Insert_inline( &links->Links,
  140. &link->Registry_node,
  141. source,
  142. _Thread_queue_Link_less );
  143.  
  144. _ISR_lock_Release( &links->Lock, &lock_context );
  145. return true;
  146. }
  147.  
  148. static void _Thread_queue_Link_remove( Thread_queue_Link *link )
  149. {
  150. Thread_queue_Links *links;
  151. ISR_lock_Context lock_context;
  152.  
  153. links = &_Thread_queue_Links;
  154.  
  155. _ISR_lock_Acquire( &links->Lock, &lock_context );
  156. _RBTree_Extract( &links->Links, &link->Registry_node );
  157. _ISR_lock_Release( &links->Lock, &lock_context );
  158. }
  159. #endif
  160.  
  161. #if !defined( RTEMS_SMP )
  162. static
  163. #endif
  164. void
  165. _Thread_queue_Path_release_critical( Thread_queue_Context *queue_context )
  166. {
  167. #if defined( RTEMS_SMP )
  168. Chain_Node *head;
  169. Chain_Node *node;
  170.  
  171. head = _Chain_Head( &queue_context->Path.Links );
  172. node = _Chain_Last( &queue_context->Path.Links );
  173.  
  174. while ( head != node ) {
  175. Thread_queue_Link *link;
  176.  
  177. link = THREAD_QUEUE_LINK_OF_PATH_NODE( node );
  178.  
  179. if ( link->Lock_context.Wait.queue != NULL ) {
  180. _Thread_queue_Link_remove( link );
  181. _Thread_Wait_release_queue_critical( link->Lock_context.Wait.queue,
  182. &link->Lock_context );
  183. _Thread_Wait_remove_request( link->owner, &link->Lock_context );
  184. } else {
  185. _Thread_Wait_release_default_critical( link->owner,
  186. &link->Lock_context.Lock_context );
  187. }
  188.  
  189. node = _Chain_Previous( node );
  190. #if defined( RTEMS_DEBUG )
  191. _Chain_Set_off_chain( &link->Path_node );
  192. #endif
  193. }
  194. #else
  195. (void) queue_context;
  196. #endif
  197. }
  198.  
  199. #if defined( RTEMS_SMP )
  200. static void
  201. _Thread_queue_Path_append_deadlock_thread( Thread_Control *the_thread,
  202. Thread_queue_Context *queue_context )
  203. {
  204. Thread_Control *deadlock;
  205.  
  206. /*
  207. * In case of a deadlock, we must obtain the thread wait default lock for the
  208. * first thread on the path that tries to enqueue on a thread queue. This
  209. * thread can be identified by the thread wait operations. This lock acquire
  210. * is necessary for the timeout and explicit thread priority changes, see
  211. * _Thread_Priority_perform_actions().
  212. */
  213.  
  214. deadlock = NULL;
  215.  
  216. while ( the_thread->Wait.operations != &_Thread_queue_Operations_default ) {
  217. the_thread = the_thread->Wait.queue->owner;
  218. deadlock = the_thread;
  219. }
  220.  
  221. if ( deadlock != NULL ) {
  222. Thread_queue_Link *link;
  223.  
  224. link = &queue_context->Path.Deadlock;
  225. _Chain_Initialize_node( &link->Path_node );
  226. _Chain_Append_unprotected( &queue_context->Path.Links, &link->Path_node );
  227. link->owner = deadlock;
  228. link->Lock_context.Wait.queue = NULL;
  229. _Thread_Wait_acquire_default_critical( deadlock,
  230. &link->Lock_context.Lock_context );
  231. }
  232. }
  233. #endif
  234.  
  235. #if !defined( RTEMS_SMP )
  236. static
  237. #endif
  238. bool
  239. _Thread_queue_Path_acquire_critical( Thread_queue_Queue *queue,
  240. Thread_Control *the_thread,
  241. Thread_queue_Context *queue_context )
  242. {
  243. Thread_Control *owner;
  244. #if defined( RTEMS_SMP )
  245. Thread_queue_Link *link;
  246. Thread_queue_Queue *target;
  247.  
  248. /*
  249. * For an overview please look at the non-SMP part below. We basically do
  250. * the same on SMP configurations. The fact that we may have more than one
  251. * executing thread and each thread queue has its own SMP lock makes the task
  252. * a bit more difficult. We have to avoid deadlocks at SMP lock level, since
  253. * this would result in an unrecoverable deadlock of the overall system.
  254. */
  255.  
  256. _Chain_Initialize_empty( &queue_context->Path.Links );
  257.  
  258. owner = queue->owner;
  259.  
  260. if ( owner == NULL ) {
  261. return true;
  262. }
  263.  
  264. if ( owner == the_thread ) {
  265. return false;
  266. }
  267.  
  268. _Chain_Initialize_node(
  269. &queue_context->Path.Start.Lock_context.Wait.Gate.Node );
  270. link = &queue_context->Path.Start;
  271. _RBTree_Initialize_node( &link->Registry_node );
  272. _Chain_Initialize_node( &link->Path_node );
  273.  
  274. do {
  275. _Chain_Append_unprotected( &queue_context->Path.Links, &link->Path_node );
  276. link->owner = owner;
  277.  
  278. _Thread_Wait_acquire_default_critical( owner,
  279. &link->Lock_context.Lock_context );
  280.  
  281. target = owner->Wait.queue;
  282. link->Lock_context.Wait.queue = target;
  283.  
  284. if ( target != NULL ) {
  285. if ( _Thread_queue_Link_add( link, queue, target ) ) {
  286. _Thread_queue_Gate_add( &owner->Wait.Lock.Pending_requests,
  287. &link->Lock_context.Wait.Gate );
  288. _Thread_Wait_release_default_critical(
  289. owner,
  290. &link->Lock_context.Lock_context );
  291. _Thread_Wait_acquire_queue_critical( target, &link->Lock_context );
  292.  
  293. if ( link->Lock_context.Wait.queue == NULL ) {
  294. _Thread_queue_Link_remove( link );
  295. _Thread_Wait_release_queue_critical( target, &link->Lock_context );
  296. _Thread_Wait_acquire_default_critical(
  297. owner,
  298. &link->Lock_context.Lock_context );
  299. _Thread_Wait_remove_request_locked( owner, &link->Lock_context );
  300. _Assert( owner->Wait.queue == NULL );
  301. return true;
  302. }
  303. } else {
  304. link->Lock_context.Wait.queue = NULL;
  305. _Thread_queue_Path_append_deadlock_thread( owner, queue_context );
  306. return false;
  307. }
  308. } else {
  309. return true;
  310. }
  311.  
  312. link = &owner->Wait.Link;
  313. queue = target;
  314. owner = queue->owner;
  315. } while ( owner != NULL );
  316. #else
  317. do {
  318. owner = queue->owner;
  319.  
  320. if ( owner == NULL ) {
  321. return true;
  322. }
  323.  
  324. if ( owner == the_thread ) {
  325. return false;
  326. }
  327.  
  328. queue = owner->Wait.queue;
  329. } while ( queue != NULL );
  330. #endif
  331.  
  332. return true;
  333. }
  334.  
  335. void _Thread_queue_Enqueue_do_nothing_extra(
  336. Thread_queue_Queue *queue,
  337. Thread_Control *the_thread,
  338. Per_CPU_Control *cpu_self,
  339. Thread_queue_Context *queue_context )
  340. {
  341. /* Do nothing */
  342. }
  343.  
  344. void _Thread_queue_Deadlock_status( Thread_Control *the_thread )
  345. {
  346. the_thread->Wait.return_code = STATUS_DEADLOCK;
  347. }
  348.  
  349. void _Thread_queue_Deadlock_fatal( Thread_Control *the_thread )
  350. {
  351. _Internal_error( INTERNAL_ERROR_THREAD_QUEUE_DEADLOCK );
  352. }
  353.  
  354. void _Thread_queue_Enqueue( Thread_queue_Queue *queue,
  355. const Thread_queue_Operations *operations,
  356. Thread_Control *the_thread,
  357. Thread_queue_Context *queue_context )
  358. {
  359. Per_CPU_Control *cpu_self;
  360. bool success;
  361.  
  362. _Assert( queue_context->enqueue_callout != NULL );
  363.  
  364. #if defined( RTEMS_MULTIPROCESSING )
  365. if ( _Thread_MP_Is_receive( the_thread ) && the_thread->receive_packet ) {
  366. the_thread = _Thread_MP_Allocate_proxy( queue_context->thread_state );
  367. }
  368. #endif
  369.  
  370. _Thread_Wait_claim( the_thread, queue );
  371.  
  372. if ( !_Thread_queue_Path_acquire_critical( queue,
  373. the_thread,
  374. queue_context ) ) {
  375. _Thread_queue_Path_release_critical( queue_context );
  376. _Thread_Wait_restore_default( the_thread );
  377. _Thread_queue_Queue_release( queue,
  378. &queue_context->Lock_context.Lock_context );
  379. _Thread_Wait_tranquilize( the_thread );
  380. _Assert( queue_context->deadlock_callout != NULL );
  381. ( *queue_context->deadlock_callout )( the_thread );
  382. return;
  383. }
  384.  
  385. _Thread_queue_Context_clear_priority_updates( queue_context );
  386. _Thread_Wait_claim_finalize( the_thread, operations );
  387. ( *operations->enqueue )( queue, the_thread, queue_context );
  388.  
  389. _Thread_queue_Path_release_critical( queue_context );
  390.  
  391. the_thread->Wait.return_code = STATUS_SUCCESSFUL;
  392. _Thread_Wait_flags_set( the_thread, THREAD_QUEUE_INTEND_TO_BLOCK );
  393. cpu_self = _Thread_queue_Dispatch_disable( queue_context );
  394. _Thread_queue_Queue_release( queue,
  395. &queue_context->Lock_context.Lock_context );
  396.  
  397. ( *queue_context->enqueue_callout )( queue,
  398. the_thread,
  399. cpu_self,
  400. queue_context );
  401.  
  402. /*
  403. * Set the blocking state for this thread queue in the thread.
  404. */
  405. _Thread_Set_state( the_thread, queue_context->thread_state );
  406.  
  407. /*
  408. * At this point thread dispatching is disabled, however, we already released
  409. * the thread queue lock. Thus, interrupts or threads on other processors
  410. * may already changed our state with respect to the thread queue object.
  411. * The request could be satisfied or timed out. This situation is indicated
  412. * by the thread wait flags. Other parties must not modify our thread state
  413. * as long as we are in the THREAD_QUEUE_INTEND_TO_BLOCK thread wait state,
  414. * thus we have to cancel the blocking operation ourself if necessary.
  415. */
  416. success = _Thread_Wait_flags_try_change_acquire( the_thread,
  417. THREAD_QUEUE_INTEND_TO_BLOCK,
  418. THREAD_QUEUE_BLOCKED );
  419. if ( !success ) {
  420. _Thread_Remove_timer_and_unblock( the_thread, queue );
  421. }
  422.  
  423. _Thread_Priority_update( queue_context );
  424. _Thread_Dispatch_direct( cpu_self );
  425. }
  426.  
  427. #if defined( RTEMS_SMP )
  428. Status_Control
  429. _Thread_queue_Enqueue_sticky( Thread_queue_Queue *queue,
  430. const Thread_queue_Operations *operations,
  431. Thread_Control *the_thread,
  432. Thread_queue_Context *queue_context )
  433. {
  434. Per_CPU_Control *cpu_self;
  435.  
  436. _Assert( queue_context->enqueue_callout != NULL );
  437.  
  438. _Thread_Wait_claim( the_thread, queue );
  439.  
  440. if ( !_Thread_queue_Path_acquire_critical( queue,
  441. the_thread,
  442. queue_context ) ) {
  443. _Thread_queue_Path_release_critical( queue_context );
  444. _Thread_Wait_restore_default( the_thread );
  445. _Thread_queue_Queue_release( queue,
  446. &queue_context->Lock_context.Lock_context );
  447. _Thread_Wait_tranquilize( the_thread );
  448. ( *queue_context->deadlock_callout )( the_thread );
  449. return _Thread_Wait_get_status( the_thread );
  450. }
  451.  
  452. _Thread_queue_Context_clear_priority_updates( queue_context );
  453. _Thread_Wait_claim_finalize( the_thread, operations );
  454. ( *operations->enqueue )( queue, the_thread, queue_context );
  455.  
  456. _Thread_queue_Path_release_critical( queue_context );
  457.  
  458. the_thread->Wait.return_code = STATUS_SUCCESSFUL;
  459. _Thread_Wait_flags_set( the_thread, THREAD_QUEUE_INTEND_TO_BLOCK );
  460. cpu_self = _Thread_queue_Dispatch_disable( queue_context );
  461. _Thread_queue_Queue_release( queue,
  462. &queue_context->Lock_context.Lock_context );
  463.  
  464. if ( cpu_self->thread_dispatch_disable_level != 1 ) {
  465. _Internal_error(
  466. INTERNAL_ERROR_THREAD_QUEUE_ENQUEUE_STICKY_FROM_BAD_STATE );
  467. }
  468.  
  469. ( *queue_context->enqueue_callout )( queue,
  470. the_thread,
  471. cpu_self,
  472. queue_context );
  473.  
  474. _Thread_Priority_update( queue_context );
  475. _Thread_Priority_and_sticky_update( the_thread, 1 );
  476. _Thread_Dispatch_enable( cpu_self );
  477.  
  478. while ( _Thread_Wait_flags_get_acquire( the_thread ) ==
  479. THREAD_QUEUE_INTEND_TO_BLOCK ) {
  480. /* Wait */
  481. }
  482.  
  483. _Thread_Wait_tranquilize( the_thread );
  484. _Thread_Timer_remove( the_thread );
  485. return _Thread_Wait_get_status( the_thread );
  486. }
  487. #endif
  488.  
  489. #if defined( RTEMS_MULTIPROCESSING )
  490. static bool
  491. _Thread_queue_MP_set_callout( Thread_Control *the_thread,
  492. const Thread_queue_Context *queue_context )
  493. {
  494. Thread_Proxy_control *the_proxy;
  495. Thread_queue_MP_callout mp_callout;
  496.  
  497. if ( _Objects_Is_local_id( the_thread->Object.id ) ) {
  498. return false;
  499. }
  500.  
  501. the_proxy = (Thread_Proxy_control *) the_thread;
  502. mp_callout = queue_context->mp_callout;
  503. _Assert( mp_callout != NULL );
  504. the_proxy->thread_queue_callout = mp_callout;
  505. return true;
  506. }
  507. #endif
  508.  
  509. static bool _Thread_queue_Make_ready_again( Thread_Control *the_thread )
  510. {
  511. bool success;
  512. bool unblock;
  513.  
  514. /*
  515. * We must update the wait flags under protection of the current thread lock,
  516. * otherwise a _Thread_Timeout() running on another processor may interfere.
  517. */
  518. success = _Thread_Wait_flags_try_change_release( the_thread,
  519. THREAD_QUEUE_INTEND_TO_BLOCK,
  520. THREAD_QUEUE_READY_AGAIN );
  521. if ( success ) {
  522. unblock = false;
  523. } else {
  524. _Assert( _Thread_Wait_flags_get( the_thread ) == THREAD_QUEUE_BLOCKED );
  525. _Thread_Wait_flags_set( the_thread, THREAD_QUEUE_READY_AGAIN );
  526. unblock = true;
  527. }
  528.  
  529. _Thread_Wait_restore_default( the_thread );
  530. return unblock;
  531. }
  532.  
  533. bool _Thread_queue_Extract_locked( Thread_queue_Queue *queue,
  534. const Thread_queue_Operations *operations,
  535. Thread_Control *the_thread,
  536. Thread_queue_Context *queue_context )
  537. {
  538. #if defined( RTEMS_MULTIPROCESSING )
  539. _Thread_queue_MP_set_callout( the_thread, queue_context );
  540. #endif
  541. ( *operations->extract )( queue, the_thread, queue_context );
  542. return _Thread_queue_Make_ready_again( the_thread );
  543. }
  544.  
  545. void _Thread_queue_Unblock_critical( bool unblock,
  546. Thread_queue_Queue *queue,
  547. Thread_Control *the_thread,
  548. ISR_lock_Context *lock_context )
  549. {
  550. if ( unblock ) {
  551. Per_CPU_Control *cpu_self;
  552.  
  553. cpu_self = _Thread_Dispatch_disable_critical( lock_context );
  554. _Thread_queue_Queue_release( queue, lock_context );
  555.  
  556. _Thread_Remove_timer_and_unblock( the_thread, queue );
  557.  
  558. _Thread_Dispatch_enable( cpu_self );
  559. } else {
  560. _Thread_queue_Queue_release( queue, lock_context );
  561. }
  562. }
  563.  
  564. void _Thread_queue_Extract_critical( Thread_queue_Queue *queue,
  565. const Thread_queue_Operations *operations,
  566. Thread_Control *the_thread,
  567. Thread_queue_Context *queue_context )
  568. {
  569. bool unblock;
  570.  
  571. unblock = _Thread_queue_Extract_locked( queue,
  572. operations,
  573. the_thread,
  574. queue_context );
  575.  
  576. _Thread_queue_Unblock_critical( unblock,
  577. queue,
  578. the_thread,
  579. &queue_context->Lock_context.Lock_context );
  580. }
  581.  
  582. void _Thread_queue_Extract( Thread_Control *the_thread )
  583. {
  584. Thread_queue_Context queue_context;
  585. Thread_queue_Queue *queue;
  586.  
  587. _Thread_queue_Context_initialize( &queue_context );
  588. _Thread_queue_Context_clear_priority_updates( &queue_context );
  589. _Thread_Wait_acquire( the_thread, &queue_context );
  590.  
  591. queue = the_thread->Wait.queue;
  592.  
  593. if ( queue != NULL ) {
  594. bool unblock;
  595.  
  596. _Thread_Wait_remove_request( the_thread, &queue_context.Lock_context );
  597. _Thread_queue_Context_set_MP_callout( &queue_context,
  598. _Thread_queue_MP_callout_do_nothing );
  599. unblock = _Thread_queue_Extract_locked( queue,
  600. the_thread->Wait.operations,
  601. the_thread,
  602. &queue_context );
  603. _Thread_queue_Unblock_critical( unblock,
  604. queue,
  605. the_thread,
  606. &queue_context.Lock_context.Lock_context );
  607. } else {
  608. _Thread_Wait_release( the_thread, &queue_context );
  609. }
  610. }
  611.  
  612. void _Thread_queue_Surrender( Thread_queue_Queue *queue,
  613. Thread_queue_Heads *heads,
  614. Thread_Control *previous_owner,
  615. Thread_queue_Context *queue_context,
  616. const Thread_queue_Operations *operations )
  617. {
  618. Thread_Control *new_owner;
  619. bool unblock;
  620. Per_CPU_Control *cpu_self;
  621.  
  622. _Assert( heads != NULL );
  623.  
  624. _Thread_queue_Context_clear_priority_updates( queue_context );
  625. new_owner =
  626. ( *operations->surrender )( queue, heads, previous_owner, queue_context );
  627. queue->owner = new_owner;
  628.  
  629. #if defined( RTEMS_MULTIPROCESSING )
  630. if ( !_Thread_queue_MP_set_callout( new_owner, queue_context ) )
  631. #endif
  632. {
  633. _Thread_Resource_count_increment( new_owner );
  634. }
  635.  
  636. unblock = _Thread_queue_Make_ready_again( new_owner );
  637.  
  638. cpu_self = _Thread_queue_Dispatch_disable( queue_context );
  639. _Thread_queue_Queue_release( queue,
  640. &queue_context->Lock_context.Lock_context );
  641.  
  642. _Thread_Priority_update( queue_context );
  643.  
  644. if ( unblock ) {
  645. _Thread_Remove_timer_and_unblock( new_owner, queue );
  646. }
  647.  
  648. _Thread_Dispatch_enable( cpu_self );
  649. }
  650.  
  651. #if defined( RTEMS_SMP )
  652. void _Thread_queue_Surrender_sticky( Thread_queue_Queue *queue,
  653. Thread_queue_Heads *heads,
  654. Thread_Control *previous_owner,
  655. Thread_queue_Context *queue_context,
  656. const Thread_queue_Operations *operations )
  657. {
  658. Thread_Control *new_owner;
  659. Per_CPU_Control *cpu_self;
  660.  
  661. _Assert( heads != NULL );
  662.  
  663. _Thread_queue_Context_clear_priority_updates( queue_context );
  664. new_owner =
  665. ( *operations->surrender )( queue, heads, previous_owner, queue_context );
  666. queue->owner = new_owner;
  667. _Thread_queue_Make_ready_again( new_owner );
  668.  
  669. cpu_self = _Thread_queue_Dispatch_disable( queue_context );
  670. _Thread_queue_Queue_release( queue,
  671. &queue_context->Lock_context.Lock_context );
  672. _Thread_Priority_and_sticky_update( previous_owner, -1 );
  673. _Thread_Priority_and_sticky_update( new_owner, 0 );
  674. _Thread_Dispatch_enable( cpu_self );
  675. }
  676. #endif
  677.  
  678. #if defined( RTEMS_MULTIPROCESSING )
  679. void _Thread_queue_Unblock_proxy( Thread_queue_Queue *queue,
  680. Thread_Control *the_thread )
  681. {
  682. const Thread_queue_Object *the_queue_object;
  683. Thread_Proxy_control *the_proxy;
  684. Thread_queue_MP_callout mp_callout;
  685.  
  686. the_queue_object = THREAD_QUEUE_QUEUE_TO_OBJECT( queue );
  687. the_proxy = (Thread_Proxy_control *) the_thread;
  688. mp_callout = the_proxy->thread_queue_callout;
  689. ( *mp_callout )( the_thread, the_queue_object->Object.id );
  690.  
  691. _Thread_MP_Free_proxy( the_thread );
  692. }
  693. #endif
  694.  
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement