This avoids having conditional statements to get the API-specific status code. --- cpukit/include/rtems/score/scheduler.h | 12 ++++++--- cpukit/include/rtems/score/scheduleredfsmp.h | 9 ++++--- cpukit/include/rtems/score/schedulerimpl.h | 27 ++++++++++++------- .../score/schedulerpriorityaffinitysmp.h | 6 ++--- cpukit/posix/src/pthreadcreate.c | 4 +-- cpukit/posix/src/pthreadsetaffinitynp.c | 7 ++--- cpukit/rtems/src/tasksetaffinity.c | 7 ++--- .../score/src/schedulerdefaultsetaffinity.c | 2 +- cpukit/score/src/scheduleredfsmp.c | 6 ++--- .../score/src/schedulerpriorityaffinitysmp.c | 8 +++--- cpukit/score/src/schedulersetaffinity.c | 20 +++++++------- 11 files changed, 63 insertions(+), 45 deletions(-)
diff --git a/cpukit/include/rtems/score/scheduler.h b/cpukit/include/rtems/score/scheduler.h index 7a566cf44d..da1e030ab8 100644 --- a/cpukit/include/rtems/score/scheduler.h +++ b/cpukit/include/rtems/score/scheduler.h @@ -21,6 +21,7 @@ #define _RTEMS_SCORE_SCHEDULER_H #include <rtems/score/thread.h> +#include <rtems/score/status.h> #ifdef __cplusplus extern "C" { @@ -228,7 +229,7 @@ typedef struct { #if defined(RTEMS_SMP) /** @see _Scheduler_Set_affinity() */ - bool ( *set_affinity )( + Status_Control ( *set_affinity )( const Scheduler_Control *, Thread_Control *, Scheduler_Node *, @@ -581,10 +582,13 @@ void _Scheduler_default_Start_idle( * @param node This parameter is unused. * @param affinity The new processor affinity set for the thread. * - * @retval true The processor set of the scheduler is a subset of the affinity set. - * @retval false The processor set of the scheduler is not a subset of the affinity set. + * @retval STATUS_SUCCESSFUL The affinity is a subset of the online + * processors. + * + * @retval STATUS_INVALID_NUMBER The affinity is not a subset of the online + * processors. */ - bool _Scheduler_default_Set_affinity( + Status_Control _Scheduler_default_Set_affinity( const Scheduler_Control *scheduler, Thread_Control *thread, Scheduler_Node *node, diff --git a/cpukit/include/rtems/score/scheduleredfsmp.h b/cpukit/include/rtems/score/scheduleredfsmp.h index 69dcd1ab3f..6fef6fb86a 100644 --- a/cpukit/include/rtems/score/scheduleredfsmp.h +++ b/cpukit/include/rtems/score/scheduleredfsmp.h @@ -330,10 +330,13 @@ void _Scheduler_EDF_SMP_Start_idle( * @param node This parameter is unused. * @param affinity The new processor affinity set for the thread. * - * @retval true The processor set of the scheduler is a subset of the affinity set. - * @retval false The processor set of the scheduler is not a subset of the affinity set. + * @retval STATUS_SUCCESSFUL The processor set of the scheduler is a subset of + * the affinity set. + * + * @retval STATUS_INVALID_NUMBER The processor set of the scheduler is not a + * subset of the affinity set. */ -bool _Scheduler_EDF_SMP_Set_affinity( +Status_Control _Scheduler_EDF_SMP_Set_affinity( const Scheduler_Control *scheduler, Thread_Control *thread, Scheduler_Node *node, diff --git a/cpukit/include/rtems/score/schedulerimpl.h b/cpukit/include/rtems/score/schedulerimpl.h index 65c600b583..595d6291b4 100644 --- a/cpukit/include/rtems/score/schedulerimpl.h +++ b/cpukit/include/rtems/score/schedulerimpl.h @@ -711,10 +711,12 @@ Status_Control _Scheduler_Get_affinity( * @param node This parameter is unused. * @param affinity The processor mask to check. * - * @retval true @a affinity is a subset of the online processors. - * @retval false @a affinity is not a subset of the online processors. + * @retval STATUS_SUCCESSFUL The affinity is a subset of the online processors. + * + * @retval STATUS_INVALID_NUMBER The affinity is not a subset of the online + * processors. */ -RTEMS_INLINE_ROUTINE bool _Scheduler_default_Set_affinity_body( +RTEMS_INLINE_ROUTINE Status_Control _Scheduler_default_Set_affinity_body( const Scheduler_Control *scheduler, Thread_Control *the_thread, Scheduler_Node *node, @@ -724,7 +726,12 @@ RTEMS_INLINE_ROUTINE bool _Scheduler_default_Set_affinity_body( (void) scheduler; (void) the_thread; (void) node; - return _Processor_mask_Is_subset( affinity, _SMP_Get_online_processors() ); + + if ( !_Processor_mask_Is_subset( affinity, _SMP_Get_online_processors() ) ) { + return STATUS_INVALID_NUMBER; + } + + return STATUS_SUCCESSFUL; } /** @@ -734,10 +741,12 @@ RTEMS_INLINE_ROUTINE bool _Scheduler_default_Set_affinity_body( * @param cpusetsize The size of @a cpuset. * @param cpuset The cpuset to set the affinity. * - * @retval true The operation succeeded. - * @retval false The operation did not succeed. + * @retval STATUS_SUCCESSFUL The operation succeeded. + * + * @retval STATUS_INVALID_NUMBER The processor set was not a valid new + * processor affinity set for the thread. */ -bool _Scheduler_Set_affinity( +Status_Control _Scheduler_Set_affinity( Thread_Control *the_thread, size_t cpusetsize, const cpu_set_t *cpuset @@ -1318,12 +1327,12 @@ RTEMS_INLINE_ROUTINE Status_Control _Scheduler_Set( if ( _Scheduler_Get_processor_count( new_scheduler ) == 0 - || !( *new_scheduler->Operations.set_affinity )( + || ( *new_scheduler->Operations.set_affinity )( new_scheduler, the_thread, new_scheduler_node, &the_thread->Scheduler.Affinity - ) + ) != STATUS_SUCCESSFUL ) { _Scheduler_Release_critical( new_scheduler, &lock_context ); _Priority_Plain_insert( diff --git a/cpukit/include/rtems/score/schedulerpriorityaffinitysmp.h b/cpukit/include/rtems/score/schedulerpriorityaffinitysmp.h index 3d5ccaae10..772a83f541 100644 --- a/cpukit/include/rtems/score/schedulerpriorityaffinitysmp.h +++ b/cpukit/include/rtems/score/schedulerpriorityaffinitysmp.h @@ -213,10 +213,10 @@ Thread_Control *_Scheduler_priority_affinity_SMP_Remove_processor( * @param[in, out] node The scheduler node. * @param affinity The new affinity set. * - * @retval true if successful - * @retval false if unsuccessful + * @retval STATUS_SUCCESSFUL if successful + * @retval STATUS_INVALID_NUMBER if unsuccessful */ -bool _Scheduler_priority_affinity_SMP_Set_affinity( +Status_Control _Scheduler_priority_affinity_SMP_Set_affinity( const Scheduler_Control *scheduler, Thread_Control *thread, Scheduler_Node *node, diff --git a/cpukit/posix/src/pthreadcreate.c b/cpukit/posix/src/pthreadcreate.c index 75d3c64676..f53cd5e310 100644 --- a/cpukit/posix/src/pthreadcreate.c +++ b/cpukit/posix/src/pthreadcreate.c @@ -251,13 +251,13 @@ int pthread_create( the_thread->Life.state |= THREAD_LIFE_CHANGE_DEFERRED; _ISR_lock_ISR_disable( &lock_context ); - ok = _Scheduler_Set_affinity( + status = _Scheduler_Set_affinity( the_thread, the_attr->affinitysetsize, the_attr->affinityset ); _ISR_lock_ISR_enable( &lock_context ); - if ( !ok ) { + if ( status != STATUS_SUCCESSFUL ) { _Thread_Free( &_POSIX_Threads_Information, the_thread ); _RTEMS_Unlock_allocator(); return EINVAL; diff --git a/cpukit/posix/src/pthreadsetaffinitynp.c b/cpukit/posix/src/pthreadsetaffinitynp.c index ae91d135e1..9cb29ac167 100644 --- a/cpukit/posix/src/pthreadsetaffinitynp.c +++ b/cpukit/posix/src/pthreadsetaffinitynp.c @@ -28,6 +28,7 @@ #include <rtems/score/threadimpl.h> #include <rtems/score/schedulerimpl.h> +#include <rtems/posix/posixapi.h> int pthread_setaffinity_np( pthread_t thread, @@ -38,7 +39,7 @@ int pthread_setaffinity_np( Thread_Control *the_thread; ISR_lock_Context lock_context; Per_CPU_Control *cpu_self; - bool ok; + Status_Control status; if ( cpuset == NULL ) { return EFAULT; @@ -53,7 +54,7 @@ int pthread_setaffinity_np( cpu_self = _Thread_Dispatch_disable_critical( &lock_context ); _Thread_State_acquire_critical( the_thread, &lock_context ); - ok = _Scheduler_Set_affinity( + status = _Scheduler_Set_affinity( the_thread, cpusetsize, cpuset @@ -61,6 +62,6 @@ int pthread_setaffinity_np( _Thread_State_release( the_thread, &lock_context ); _Thread_Dispatch_enable( cpu_self ); - return ok ? 0 : EINVAL; + return _POSIX_Get_error( status ); } #endif diff --git a/cpukit/rtems/src/tasksetaffinity.c b/cpukit/rtems/src/tasksetaffinity.c index c3cfbf08cb..84071b2f51 100644 --- a/cpukit/rtems/src/tasksetaffinity.c +++ b/cpukit/rtems/src/tasksetaffinity.c @@ -21,6 +21,7 @@ #endif #include <rtems/rtems/tasks.h> +#include <rtems/rtems/statusimpl.h> #include <rtems/score/threadimpl.h> #include <rtems/score/schedulerimpl.h> @@ -33,7 +34,7 @@ rtems_status_code rtems_task_set_affinity( Thread_Control *the_thread; ISR_lock_Context lock_context; Per_CPU_Control *cpu_self; - bool ok; + Status_Control status; if ( cpuset == NULL ) { return RTEMS_INVALID_ADDRESS; @@ -54,7 +55,7 @@ rtems_status_code rtems_task_set_affinity( cpu_self = _Thread_Dispatch_disable_critical( &lock_context ); _Thread_State_acquire_critical( the_thread, &lock_context ); - ok = _Scheduler_Set_affinity( + status = _Scheduler_Set_affinity( the_thread, cpusetsize, cpuset @@ -62,5 +63,5 @@ rtems_status_code rtems_task_set_affinity( _Thread_State_release( the_thread, &lock_context ); _Thread_Dispatch_enable( cpu_self ); - return ok ? RTEMS_SUCCESSFUL : RTEMS_INVALID_NUMBER; + return _Status_Get( status ); } diff --git a/cpukit/score/src/schedulerdefaultsetaffinity.c b/cpukit/score/src/schedulerdefaultsetaffinity.c index 6399b1e352..7583fc1c8f 100644 --- a/cpukit/score/src/schedulerdefaultsetaffinity.c +++ b/cpukit/score/src/schedulerdefaultsetaffinity.c @@ -22,7 +22,7 @@ #include <rtems/score/schedulerimpl.h> -bool _Scheduler_default_Set_affinity( +Status_Control _Scheduler_default_Set_affinity( const Scheduler_Control *scheduler, Thread_Control *thread, Scheduler_Node *node, diff --git a/cpukit/score/src/scheduleredfsmp.c b/cpukit/score/src/scheduleredfsmp.c index b8b1d9de95..13d512118e 100644 --- a/cpukit/score/src/scheduleredfsmp.c +++ b/cpukit/score/src/scheduleredfsmp.c @@ -723,7 +723,7 @@ void _Scheduler_EDF_SMP_Unpin( node->pinning_ready_queue_index = 0; } -bool _Scheduler_EDF_SMP_Set_affinity( +Status_Control _Scheduler_EDF_SMP_Set_affinity( const Scheduler_Control *scheduler, Thread_Control *thread, Scheduler_Node *node_base, @@ -739,7 +739,7 @@ bool _Scheduler_EDF_SMP_Set_affinity( _Processor_mask_And( &local_affinity, &context->Processors, affinity ); if ( _Processor_mask_Is_zero( &local_affinity ) ) { - return false; + return STATUS_INVALID_NUMBER; } if ( _Processor_mask_Is_equal( affinity, &_SMP_Online_processors ) ) { @@ -766,5 +766,5 @@ bool _Scheduler_EDF_SMP_Set_affinity( ); } - return true; + return STATUS_SUCCESSFUL; } diff --git a/cpukit/score/src/schedulerpriorityaffinitysmp.c b/cpukit/score/src/schedulerpriorityaffinitysmp.c index 2511b6bbed..10f6808a75 100644 --- a/cpukit/score/src/schedulerpriorityaffinitysmp.c +++ b/cpukit/score/src/schedulerpriorityaffinitysmp.c @@ -541,7 +541,7 @@ Thread_Control *_Scheduler_priority_affinity_SMP_Remove_processor( ); } -bool _Scheduler_priority_affinity_SMP_Set_affinity( +Status_Control _Scheduler_priority_affinity_SMP_Set_affinity( const Scheduler_Control *scheduler, Thread_Control *thread, Scheduler_Node *node_base, @@ -557,7 +557,7 @@ bool _Scheduler_priority_affinity_SMP_Set_affinity( _Processor_mask_And( &my_affinity, &context->Processors, affinity ); if ( _Processor_mask_Count( &my_affinity ) == 0 ) { - return false; + return STATUS_INVALID_NUMBER; } node = _Scheduler_priority_affinity_SMP_Node_downcast( node_base ); @@ -567,7 +567,7 @@ bool _Scheduler_priority_affinity_SMP_Set_affinity( * doing anything. */ if ( _Processor_mask_Is_equal( &node->Affinity, affinity ) ) - return true; + return STATUS_SUCCESSFUL; current_state = thread->current_state; @@ -584,5 +584,5 @@ bool _Scheduler_priority_affinity_SMP_Set_affinity( (void) _Scheduler_priority_affinity_SMP_Unblock( scheduler, thread, &node->Base.Base.Base ); } - return true; + return STATUS_SUCCESSFUL; } diff --git a/cpukit/score/src/schedulersetaffinity.c b/cpukit/score/src/schedulersetaffinity.c index 8d16af3ed3..24d13feee7 100644 --- a/cpukit/score/src/schedulersetaffinity.c +++ b/cpukit/score/src/schedulersetaffinity.c @@ -27,22 +27,22 @@ #include <rtems/score/schedulerimpl.h> -bool _Scheduler_Set_affinity( +Status_Control _Scheduler_Set_affinity( Thread_Control *the_thread, size_t cpusetsize, const cpu_set_t *cpuset ) { Processor_mask affinity; - Processor_mask_Copy_status status; + Processor_mask_Copy_status copy_status; const Scheduler_Control *scheduler; Scheduler_Node *node; ISR_lock_Context lock_context; - bool ok; + Status_Control status; - status = _Processor_mask_From_cpu_set_t( &affinity, cpusetsize, cpuset ); - if ( !_Processor_mask_Is_at_most_partial_loss( status ) ) { - return false; + copy_status = _Processor_mask_From_cpu_set_t( &affinity, cpusetsize, cpuset ); + if ( !_Processor_mask_Is_at_most_partial_loss( copy_status ) ) { + return STATUS_INVALID_NUMBER; } /* @@ -57,18 +57,18 @@ bool _Scheduler_Set_affinity( node = _Thread_Scheduler_get_home_node( the_thread ); #if defined(RTEMS_SMP) - ok = ( *scheduler->Operations.set_affinity )( + status = ( *scheduler->Operations.set_affinity )( scheduler, the_thread, node, &affinity ); - if ( ok ) { + if ( status == STATUS_SUCCESSFUL ) { _Processor_mask_Assign( &the_thread->Scheduler.Affinity, &affinity ); } #else - ok = _Scheduler_default_Set_affinity_body( + status = _Scheduler_default_Set_affinity_body( scheduler, the_thread, node, @@ -77,5 +77,5 @@ bool _Scheduler_Set_affinity( #endif _Scheduler_Release_critical( scheduler, &lock_context ); - return ok; + return status; } -- 2.26.2 _______________________________________________ devel mailing list devel@rtems.org http://lists.rtems.org/mailman/listinfo/devel