Update #2556. --- cpukit/score/include/rtems/score/schedulerimpl.h | 257 +++++++-------------- cpukit/score/include/rtems/score/schedulernode.h | 104 ++------- .../score/include/rtems/score/schedulernodeimpl.h | 1 - .../score/include/rtems/score/schedulersmpimpl.h | 98 +++++--- cpukit/score/include/rtems/score/threadimpl.h | 7 + cpukit/score/src/schedulerpriorityaffinitysmp.c | 8 - testsuites/smptests/smpmrsp01/init.c | 5 - 7 files changed, 187 insertions(+), 293 deletions(-)
diff --git a/cpukit/score/include/rtems/score/schedulerimpl.h b/cpukit/score/include/rtems/score/schedulerimpl.h index 54ddd53..ac6893e 100644 --- a/cpukit/score/include/rtems/score/schedulerimpl.h +++ b/cpukit/score/include/rtems/score/schedulerimpl.h @@ -483,6 +483,9 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Priority_and_sticky_update( _Scheduler_Acquire_critical( scheduler, &lock_context ); + scheduler_node->sticky_level += sticky_level_change; + _Assert( scheduler_node->sticky_level >= 0 ); + ( *scheduler->Operations.update_priority )( scheduler, the_thread, @@ -929,27 +932,6 @@ typedef void ( *Scheduler_Release_idle_thread )( Thread_Control *idle ); -RTEMS_INLINE_ROUTINE void _Scheduler_Thread_set_node( - Thread_Control *the_thread, - Scheduler_Node *node -) -{ - the_thread->Scheduler.node = node; -} - -RTEMS_INLINE_ROUTINE void _Scheduler_Thread_set_scheduler_and_node( - Thread_Control *the_thread, - Scheduler_Node *node, - const Thread_Control *previous_user_of_node -) -{ - const Scheduler_Control *scheduler = - _Scheduler_Get_own( previous_user_of_node ); - - the_thread->Scheduler.control = scheduler; - _Scheduler_Thread_set_node( the_thread, node ); -} - extern const bool _Scheduler_Thread_state_valid_state_changes[ 3 ][ 3 ]; RTEMS_INLINE_ROUTINE void _Scheduler_Thread_change_state( @@ -975,17 +957,11 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Set_idle_thread( Thread_Control *idle ) { - _Assert( - node->help_state == SCHEDULER_HELP_ACTIVE_OWNER - || node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL - ); _Assert( _Scheduler_Node_get_idle( node ) == NULL ); _Assert( _Scheduler_Node_get_owner( node ) == _Scheduler_Node_get_user( node ) ); - _Scheduler_Thread_set_node( idle, node ); - _Scheduler_Node_set_user( node, idle ); node->idle = idle; } @@ -993,25 +969,27 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Set_idle_thread( /** * @brief Use an idle thread for this scheduler node. * - * A thread in the SCHEDULER_HELP_ACTIVE_OWNER or SCHEDULER_HELP_ACTIVE_RIVAL - * helping state may use an idle thread for the scheduler node owned by itself - * in case it executes currently using another scheduler node or in case it is - * in a blocking state. + * A thread those home scheduler node has a sticky level greater than zero may + * use an idle thread in the home scheduler instance in case it executes + * currently in another scheduler instance or in case it is in a blocking + * state. * * @param[in] context The scheduler instance context. * @param[in] node The node which wants to use the idle thread. + * @param[in] cpu The processor for the idle thread. * @param[in] get_idle_thread Function to get an idle thread. */ RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Use_idle_thread( Scheduler_Context *context, Scheduler_Node *node, + Per_CPU_Control *cpu, Scheduler_Get_idle_thread get_idle_thread ) { Thread_Control *idle = ( *get_idle_thread )( context ); _Scheduler_Set_idle_thread( node, idle ); - + _Thread_Set_CPU( idle, cpu ); return idle; } @@ -1042,7 +1020,6 @@ _Scheduler_Try_to_schedule_node( { ISR_lock_Context lock_context; Scheduler_Try_to_schedule_action action; - Thread_Control *owner; Thread_Control *user; action = SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE; @@ -1050,52 +1027,20 @@ _Scheduler_Try_to_schedule_node( _Thread_Scheduler_acquire_critical( user, &lock_context ); - if ( node->help_state == SCHEDULER_HELP_YOURSELF ) { - if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) { - _Thread_Scheduler_cancel_need_for_help( user, _Thread_Get_CPU( user ) ); - _Scheduler_Thread_change_state( user, THREAD_SCHEDULER_SCHEDULED ); - } else { - action = SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK; - } - - _Thread_Scheduler_release_critical( user, &lock_context ); - return action; - } - - owner = _Scheduler_Node_get_owner( node ); - - if ( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL) { - if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) { - _Scheduler_Thread_set_scheduler_and_node( user, node, owner ); - } else if ( owner->Scheduler.state == THREAD_SCHEDULER_BLOCKED ) { - if ( idle != NULL ) { - action = SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE; - } else { - _Scheduler_Use_idle_thread( context, node, get_idle_thread ); - } - } else { - _Scheduler_Node_set_user( node, owner ); - } - } else if ( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER ) { - if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) { - _Scheduler_Thread_set_scheduler_and_node( user, node, owner ); - } else if ( idle != NULL ) { - action = SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE; - } else { - _Scheduler_Use_idle_thread( context, node, get_idle_thread ); - } - } else { - _Assert( node->help_state == SCHEDULER_HELP_PASSIVE ); - - if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) { - _Scheduler_Thread_set_scheduler_and_node( user, node, owner ); - } else { - action = SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK; - } - } - - if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) { + if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) { + _Thread_Scheduler_cancel_need_for_help( user, _Thread_Get_CPU( user ) ); _Scheduler_Thread_change_state( user, THREAD_SCHEDULER_SCHEDULED ); + } else if ( node->sticky_level == 0 ) { + action = SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK; + } else if ( idle != NULL ) { + action = SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE; + } else { + _Scheduler_Use_idle_thread( + context, + node, + _Thread_Get_CPU( user ), + get_idle_thread + ); } _Thread_Scheduler_release_critical( user, &lock_context ); @@ -1125,9 +1070,6 @@ RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Release_idle_thread( node->idle = NULL; _Scheduler_Node_set_user( node, owner ); - _Scheduler_Thread_change_state( idle, THREAD_SCHEDULER_READY ); - _Scheduler_Thread_set_node( idle, idle->Scheduler.own_node ); - ( *release_idle_thread )( context, idle ); } @@ -1171,63 +1113,63 @@ RTEMS_INLINE_ROUTINE Per_CPU_Control *_Scheduler_Block_node( Scheduler_Get_idle_thread get_idle_thread ) { + int sticky_level; ISR_lock_Context lock_context; - Thread_Control *old_user; - Thread_Control *new_user; Per_CPU_Control *thread_cpu; + sticky_level = node->sticky_level; + --sticky_level; + node->sticky_level = sticky_level; + _Assert( sticky_level >= 0 ); + _Thread_Scheduler_acquire_critical( thread, &lock_context ); thread_cpu = _Thread_Get_CPU( thread ); _Thread_Scheduler_cancel_need_for_help( thread, thread_cpu ); _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_BLOCKED ); _Thread_Scheduler_release_critical( thread, &lock_context ); - if ( node->help_state == SCHEDULER_HELP_YOURSELF ) { - _Assert( thread == _Scheduler_Node_get_user( node ) ); + if ( sticky_level > 0 ) { + if ( is_scheduled && _Scheduler_Node_get_idle( node ) == NULL ) { + Thread_Control *idle; + + idle = _Scheduler_Use_idle_thread( + context, + node, + thread_cpu, + get_idle_thread + ); + _Thread_Dispatch_update_heir( _Per_CPU_Get(), thread_cpu, idle ); + } - return thread_cpu; + return NULL; } - new_user = NULL; + _Assert( thread == _Scheduler_Node_get_user( node ) ); + return thread_cpu; +} - if ( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER ) { - if ( is_scheduled ) { - _Assert( thread == _Scheduler_Node_get_user( node ) ); - old_user = thread; - new_user = _Scheduler_Use_idle_thread( context, node, get_idle_thread ); - } - } else if ( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL ) { - if ( is_scheduled ) { - old_user = _Scheduler_Node_get_user( node ); - - if ( thread == old_user ) { - Thread_Control *owner = _Scheduler_Node_get_owner( node ); - - if ( - thread != owner - && owner->Scheduler.state == THREAD_SCHEDULER_READY - ) { - new_user = owner; - _Scheduler_Node_set_user( node, new_user ); - } else { - new_user = _Scheduler_Use_idle_thread( context, node, get_idle_thread ); - } - } - } - } else { - /* Not implemented, this is part of the OMIP support path. */ - _Assert(0); - } +RTEMS_INLINE_ROUTINE void _Scheduler_Discard_idle_thread( + Scheduler_Context *context, + Thread_Control *the_thread, + Scheduler_Node *node, + Scheduler_Release_idle_thread release_idle_thread +) +{ + Thread_Control *idle; + Thread_Control *owner; + Per_CPU_Control *cpu; - if ( new_user != NULL ) { - Per_CPU_Control *cpu = _Thread_Get_CPU( old_user ); + idle = _Scheduler_Node_get_idle( node ); + owner = _Scheduler_Node_get_owner( node ); - _Scheduler_Thread_change_state( new_user, THREAD_SCHEDULER_SCHEDULED ); - _Thread_Set_CPU( new_user, cpu ); - _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, new_user ); - } + node->idle = NULL; + _Assert( _Scheduler_Node_get_user( node ) == idle ); + _Scheduler_Node_set_user( node, owner ); + ( *release_idle_thread )( context, idle ); - return NULL; + cpu = _Thread_Get_CPU( idle ); + _Thread_Set_CPU( the_thread, cpu ); + _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, the_thread ); } /** @@ -1252,46 +1194,20 @@ RTEMS_INLINE_ROUTINE bool _Scheduler_Unblock_node( { bool unblock; + ++node->sticky_level; + _Assert( node->sticky_level > 0 ); + if ( is_scheduled ) { - Thread_Control *old_user = _Scheduler_Node_get_user( node ); - Per_CPU_Control *cpu = _Thread_Get_CPU( old_user ); - Thread_Control *idle = _Scheduler_Release_idle_thread( + _Scheduler_Discard_idle_thread( context, + the_thread, node, release_idle_thread ); - Thread_Control *owner = _Scheduler_Node_get_owner( node ); - Thread_Control *new_user; - - if ( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER ) { - _Assert( idle != NULL ); - new_user = the_thread; - } else if ( idle != NULL ) { - _Assert( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL ); - new_user = the_thread; - } else if ( the_thread != owner ) { - _Assert( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL ); - _Assert( old_user != the_thread ); - _Scheduler_Thread_change_state( owner, THREAD_SCHEDULER_READY ); - new_user = the_thread; - _Scheduler_Node_set_user( node, new_user ); - } else { - _Assert( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL ); - _Assert( old_user != the_thread ); - _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_READY ); - new_user = NULL; - } - - if ( new_user != NULL ) { - _Scheduler_Thread_change_state( new_user, THREAD_SCHEDULER_SCHEDULED ); - _Thread_Set_CPU( new_user, cpu ); - _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, new_user ); - } - + _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_SCHEDULED ); unblock = false; } else { _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_READY ); - unblock = true; } @@ -1372,21 +1288,6 @@ RTEMS_INLINE_ROUTINE Status_Control _Scheduler_Set( ); #if defined(RTEMS_SMP) - _Chain_Extract_unprotected( &old_scheduler_node->Thread.Wait_node ); - _Assert( _Chain_Is_empty( &the_thread->Scheduler.Wait_nodes ) ); - _Chain_Initialize_one( - &the_thread->Scheduler.Wait_nodes, - &new_scheduler_node->Thread.Wait_node - ); - _Chain_Extract_unprotected( - &old_scheduler_node->Thread.Scheduler_node.Chain - ); - _Assert( _Chain_Is_empty( &the_thread->Scheduler.Scheduler_nodes ) ); - _Chain_Initialize_one( - &the_thread->Scheduler.Scheduler_nodes, - &new_scheduler_node->Thread.Scheduler_node.Chain - ); - { const Scheduler_Control *old_scheduler; @@ -1401,6 +1302,24 @@ RTEMS_INLINE_ROUTINE Status_Control _Scheduler_Set( _Scheduler_Block( the_thread ); } + _Assert( old_scheduler_node->sticky_level == 0 ); + _Assert( new_scheduler_node->sticky_level == 0 ); + + _Chain_Extract_unprotected( &old_scheduler_node->Thread.Wait_node ); + _Assert( _Chain_Is_empty( &the_thread->Scheduler.Wait_nodes ) ); + _Chain_Initialize_one( + &the_thread->Scheduler.Wait_nodes, + &new_scheduler_node->Thread.Wait_node + ); + _Chain_Extract_unprotected( + &old_scheduler_node->Thread.Scheduler_node.Chain + ); + _Assert( _Chain_Is_empty( &the_thread->Scheduler.Scheduler_nodes ) ); + _Chain_Initialize_one( + &the_thread->Scheduler.Scheduler_nodes, + &new_scheduler_node->Thread.Scheduler_node.Chain + ); + the_thread->Scheduler.own_control = new_scheduler; the_thread->Scheduler.control = new_scheduler; the_thread->Scheduler.own_node = new_scheduler_node; diff --git a/cpukit/score/include/rtems/score/schedulernode.h b/cpukit/score/include/rtems/score/schedulernode.h index 620b029..e27c4a3 100644 --- a/cpukit/score/include/rtems/score/schedulernode.h +++ b/cpukit/score/include/rtems/score/schedulernode.h @@ -28,75 +28,6 @@ extern "C" { #if defined(RTEMS_SMP) /** - * @brief State to indicate potential help for other threads. - * - * @dot - * digraph state { - * y [label="HELP YOURSELF"]; - * ao [label="HELP ACTIVE OWNER"]; - * ar [label="HELP ACTIVE RIVAL"]; - * - * y -> ao [label="obtain"]; - * y -> ar [label="wait for obtain"]; - * ao -> y [label="last release"]; - * ao -> r [label="wait for obtain"]; - * ar -> r [label="timeout"]; - * ar -> ao [label="timeout"]; - * } - * @enddot - */ -typedef enum { - /** - * @brief This scheduler node is solely used by the owner thread. - * - * This thread owns no resources using a helping protocol and thus does not - * take part in the scheduler helping protocol. No help will be provided for - * other thread. - */ - SCHEDULER_HELP_YOURSELF, - - /** - * @brief This scheduler node is owned by a thread actively owning a resource. - * - * This scheduler node can be used to help out threads. - * - * In case this scheduler node changes its state from ready to scheduled and - * the thread executes using another node, then an idle thread will be - * provided as a user of this node to temporarily execute on behalf of the - * owner thread. Thus lower priority threads are denied access to the - * processors of this scheduler instance. - * - * In case a thread actively owning a resource performs a blocking operation, - * then an idle thread will be used also in case this node is in the - * scheduled state. - */ - SCHEDULER_HELP_ACTIVE_OWNER, - - /** - * @brief This scheduler node is owned by a thread actively obtaining a - * resource currently owned by another thread. - * - * This scheduler node can be used to help out threads. - * - * The thread owning this node is ready and will give away its processor in - * case the thread owning the resource asks for help. - */ - SCHEDULER_HELP_ACTIVE_RIVAL, - - /** - * @brief This scheduler node is owned by a thread obtaining a - * resource currently owned by another thread. - * - * This scheduler node can be used to help out threads. - * - * The thread owning this node is blocked. - */ - SCHEDULER_HELP_PASSIVE -} Scheduler_Help_state; -#endif - -#if defined(RTEMS_SMP) -/** * @brief The scheduler node requests. */ typedef enum { @@ -146,27 +77,37 @@ struct Scheduler_Node { Chain_Node Node; /** - * @brief The thread using this node. + * @brief The sticky level determines if this scheduler node should use an + * idle thread in case this node is scheduled and the owner thread is + * blocked. */ - struct _Thread_Control *user; + int sticky_level; /** - * @brief The help state of this node. + * @brief The thread using this node. + * + * This is either the owner or an idle thread. */ - Scheduler_Help_state help_state; + struct _Thread_Control *user; /** - * @brief The idle thread claimed by this node in case the help state is - * SCHEDULER_HELP_ACTIVE_OWNER. + * @brief The idle thread claimed by this node in case the sticky level is + * greater than zero and the thread is block or is scheduled on another + * scheduler instance. * - * Active owners will lend their own node to an idle thread in case they - * execute currently using another node or in case they perform a blocking - * operation. This is necessary to ensure the priority ceiling protocols - * work across scheduler boundaries. + * This is necessary to ensure the priority ceiling protocols work across + * scheduler boundaries. */ struct _Thread_Control *idle; +#endif /** + * @brief The thread owning this node. + */ + struct _Thread_Control *owner; + +#if defined(RTEMS_SMP) + /** * @brief The thread accepting help by this node in case the help state is * not SCHEDULER_HELP_YOURSELF. */ @@ -222,11 +163,6 @@ struct Scheduler_Node { } Wait; /** - * @brief The thread owning this node. - */ - struct _Thread_Control *owner; - - /** * @brief The thread priority information used by the scheduler. * * The thread priority is manifest in two independent areas. One area is the diff --git a/cpukit/score/include/rtems/score/schedulernodeimpl.h b/cpukit/score/include/rtems/score/schedulernodeimpl.h index f590131..62c2fab 100644 --- a/cpukit/score/include/rtems/score/schedulernodeimpl.h +++ b/cpukit/score/include/rtems/score/schedulernodeimpl.h @@ -46,7 +46,6 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Node_do_initialize( _Chain_Initialize_node( &node->Thread.Wait_node ); node->Wait.Priority.scheduler = scheduler; node->user = the_thread; - node->help_state = SCHEDULER_HELP_YOURSELF; node->idle = NULL; node->accepts_help = the_thread; _SMP_sequence_lock_Initialize( &node->Priority.Lock ); diff --git a/cpukit/score/include/rtems/score/schedulersmpimpl.h b/cpukit/score/include/rtems/score/schedulersmpimpl.h index 6c3a409..67f1595 100644 --- a/cpukit/score/include/rtems/score/schedulersmpimpl.h +++ b/cpukit/score/include/rtems/score/schedulersmpimpl.h @@ -781,8 +781,36 @@ static inline Thread_Control *_Scheduler_SMP_Enqueue_scheduled_ordered( * The node has been extracted from the scheduled chain. We have to place * it now on the scheduled or ready set. */ - if ( ( *order )( &node->Node, &highest_ready->Node ) ) { + if ( + node->sticky_level > 0 + && ( *order )( &node->Node, &highest_ready->Node ) + ) { ( *insert_scheduled )( context, node ); + + if ( _Scheduler_Node_get_idle( node ) != NULL ) { + Thread_Control *owner; + ISR_lock_Context lock_context; + + owner = _Scheduler_Node_get_owner( node ); + _Thread_Scheduler_acquire_critical( owner, &lock_context ); + + if ( owner->Scheduler.state == THREAD_SCHEDULER_READY ) { + _Thread_Scheduler_cancel_need_for_help( + owner, + _Thread_Get_CPU( owner ) + ); + _Scheduler_Discard_idle_thread( + context, + owner, + node, + _Scheduler_SMP_Release_idle_thread + ); + _Scheduler_Thread_change_state( owner, THREAD_SCHEDULER_SCHEDULED ); + } + + _Thread_Scheduler_release_critical( owner, &lock_context ); + } + return NULL; } @@ -992,10 +1020,7 @@ static inline Thread_Control *_Scheduler_SMP_Unblock( needs_help = ( *enqueue_fifo )( context, node, thread ); } else { _Assert( node_state == SCHEDULER_SMP_NODE_READY ); - _Assert( - node->help_state == SCHEDULER_HELP_ACTIVE_OWNER - || node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL - ); + _Assert( node->sticky_level > 0 ); _Assert( node->idle == NULL ); if ( node->accepts_help == thread ) { @@ -1146,38 +1171,58 @@ static inline bool _Scheduler_SMP_Ask_for_help( _Thread_Scheduler_acquire_critical( thread, &lock_context ); - if ( - thread->Scheduler.state == THREAD_SCHEDULER_READY - && _Scheduler_SMP_Node_state( node ) == SCHEDULER_SMP_NODE_BLOCKED - ) { - if ( ( *order )( &node->Node, &lowest_scheduled->Node ) ) { + if ( thread->Scheduler.state == THREAD_SCHEDULER_READY ) { + Scheduler_SMP_Node_state node_state; + + node_state = _Scheduler_SMP_Node_state( node ); + + if ( node_state == SCHEDULER_SMP_NODE_BLOCKED ) { + if ( ( *order )( &node->Node, &lowest_scheduled->Node ) ) { + _Thread_Scheduler_cancel_need_for_help( + thread, + _Thread_Get_CPU( thread ) + ); + _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_SCHEDULED ); + _Thread_Scheduler_release_critical( thread, &lock_context ); + + _Scheduler_SMP_Preempt( + context, + node, + lowest_scheduled, + allocate_processor + ); + + ( *insert_scheduled )( context, node ); + ( *move_from_scheduled_to_ready )( context, lowest_scheduled ); + + _Scheduler_Release_idle_thread( + context, + lowest_scheduled, + _Scheduler_SMP_Release_idle_thread + ); + success = true; + } else { + _Thread_Scheduler_release_critical( thread, &lock_context ); + _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY ); + ( *insert_ready )( context, node ); + success = false; + } + } else if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) { _Thread_Scheduler_cancel_need_for_help( thread, _Thread_Get_CPU( thread ) ); - _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_SCHEDULED ); - _Thread_Scheduler_release_critical( thread, &lock_context ); - - _Scheduler_SMP_Preempt( + _Scheduler_Discard_idle_thread( context, + thread, node, - lowest_scheduled, - allocate_processor - ); - - ( *insert_scheduled )( context, node ); - ( *move_from_scheduled_to_ready )( context, lowest_scheduled ); - - _Scheduler_Release_idle_thread( - context, - lowest_scheduled, _Scheduler_SMP_Release_idle_thread ); + _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_SCHEDULED ); + _Thread_Scheduler_release_critical( thread, &lock_context ); success = true; } else { _Thread_Scheduler_release_critical( thread, &lock_context ); - _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY ); - ( *insert_ready )( context, node ); success = false; } } else { @@ -1202,6 +1247,7 @@ static inline void _Scheduler_SMP_Reconsider_help_request( if ( thread->Scheduler.state == THREAD_SCHEDULER_SCHEDULED && _Scheduler_SMP_Node_state( node ) == SCHEDULER_SMP_NODE_READY + && node->sticky_level == 1 ) { _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED ); ( *extract_from_ready )( context, node ); diff --git a/cpukit/score/include/rtems/score/threadimpl.h b/cpukit/score/include/rtems/score/threadimpl.h index ccfb6bf..b439758 100644 --- a/cpukit/score/include/rtems/score/threadimpl.h +++ b/cpukit/score/include/rtems/score/threadimpl.h @@ -570,6 +570,13 @@ void _Thread_Priority_replace( */ void _Thread_Priority_update( Thread_queue_Context *queue_context ); +#if defined(RTEMS_SMP) +void _Thread_Priority_and_sticky_update( + Thread_Control *the_thread, + int sticky_level_change +); +#endif + /** * @brief Returns true if the left thread priority is less than the right * thread priority in the intuitive sense of priority and false otherwise. diff --git a/cpukit/score/src/schedulerpriorityaffinitysmp.c b/cpukit/score/src/schedulerpriorityaffinitysmp.c index 91f9c3b..1163a1a 100644 --- a/cpukit/score/src/schedulerpriorityaffinitysmp.c +++ b/cpukit/score/src/schedulerpriorityaffinitysmp.c @@ -335,14 +335,6 @@ static void _Scheduler_priority_affinity_SMP_Check_for_migrations( break; /* - * FIXME: Do not consider threads using the scheduler helping protocol - * since this could produce more than one thread in need for help in one - * operation which is currently not possible. - */ - if ( lowest_scheduled->help_state != SCHEDULER_HELP_YOURSELF ) - break; - - /* * But if we found a thread which is lower priority than one * in the ready set, then we need to swap them out. */ diff --git a/testsuites/smptests/smpmrsp01/init.c b/testsuites/smptests/smpmrsp01/init.c index de5f31c..bb729e0 100644 --- a/testsuites/smptests/smpmrsp01/init.c +++ b/testsuites/smptests/smpmrsp01/init.c @@ -1430,11 +1430,6 @@ static void test_mrsp_obtain_and_release_with_help(test_context *ctx) rtems_test_assert(rtems_get_current_processor() == 1); - sc = rtems_task_wake_after(2); - rtems_test_assert(sc == RTEMS_SUCCESSFUL); - - rtems_test_assert(rtems_get_current_processor() == 1); - /* * With this operation the scheduler instance 0 has now only the main and the * idle threads in the ready set. -- 1.8.4.5 _______________________________________________ devel mailing list devel@rtems.org http://lists.rtems.org/mailman/listinfo/devel