summaryrefslogtreecommitdiffstats
path: root/cpukit/rtems/src/schedulerremoveprocessor.c
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2021-11-08 11:08:24 +0100
committerSebastian Huber <sebastian.huber@embedded-brains.de>2021-11-23 11:00:28 +0100
commitc69a70a597ec5df75a51bfa39c14198a5c5fb22e (patch)
tree345bef38c0158a520c437d363e6b52ad276e03f0 /cpukit/rtems/src/schedulerremoveprocessor.c
parentscore: Fix assertion in SMP scheduler framework (diff)
downloadrtems-c69a70a597ec5df75a51bfa39c14198a5c5fb22e.tar.bz2
rtems: Fix rtems_scheduler_remove_processor()
Return an error status for the following error condition in rtems_scheduler_remove_processor(): While an attempt is made to remove a processor from a scheduler, while the processor is the only processor owned by the scheduler, if a thread exists which uses the scheduler as a helping scheduler, then the processor shall not be removed. The reason is that ask for help requests and withdraw node requests are processed asynchronously in any order. An ask for help request carried out on a scheduler without a processor is undefined behaviour. Update error status description. Update #4544.
Diffstat (limited to '')
-rw-r--r--cpukit/rtems/src/schedulerremoveprocessor.c50
1 files changed, 48 insertions, 2 deletions
diff --git a/cpukit/rtems/src/schedulerremoveprocessor.c b/cpukit/rtems/src/schedulerremoveprocessor.c
index 79c17bda49..3136a8e0c0 100644
--- a/cpukit/rtems/src/schedulerremoveprocessor.c
+++ b/cpukit/rtems/src/schedulerremoveprocessor.c
@@ -35,7 +35,7 @@ typedef struct {
rtems_status_code status;
} Scheduler_Processor_removal_context;
-static bool _Scheduler_Check_processor_removal(
+static bool _Scheduler_Check_processor_not_required(
Thread_Control *the_thread,
void *arg
)
@@ -68,6 +68,45 @@ static bool _Scheduler_Check_processor_removal(
_Thread_Wait_release( the_thread, &queue_context );
return iter_context->status != RTEMS_SUCCESSFUL;
}
+
+static bool _Scheduler_Check_no_helping(
+ Thread_Control *the_thread,
+ void *arg
+)
+{
+ Scheduler_Processor_removal_context *iter_context;
+ ISR_lock_Context lock_context;
+ const Chain_Node *node;
+ const Chain_Node *tail;
+
+ if ( the_thread->is_idle ) {
+ return false;
+ }
+
+ iter_context = arg;
+
+ _Thread_State_acquire( the_thread, &lock_context );
+ node = _Chain_Immutable_first( &the_thread->Scheduler.Scheduler_nodes );
+ tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
+
+ do {
+ const Scheduler_Node *scheduler_node;
+ const Scheduler_Control *scheduler;
+
+ scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
+ scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
+
+ if ( scheduler == iter_context->scheduler ) {
+ iter_context->status = RTEMS_RESOURCE_IN_USE;
+ break;
+ }
+
+ node = _Chain_Immutable_next( node );
+ } while ( node != tail );
+
+ _Thread_State_release( the_thread, &lock_context );
+ return iter_context->status != RTEMS_SUCCESSFUL;
+}
#endif
rtems_status_code rtems_scheduler_remove_processor(
@@ -116,7 +155,14 @@ rtems_status_code rtems_scheduler_remove_processor(
_Scheduler_Release_critical( scheduler, &lock_context );
_ISR_lock_ISR_enable( &lock_context );
- _Thread_Iterate( _Scheduler_Check_processor_removal, &iter_context );
+ _Thread_Iterate( _Scheduler_Check_processor_not_required, &iter_context );
+
+ if (
+ _Processor_mask_Is_zero( &scheduler_context->Processors ) &&
+ iter_context.status == RTEMS_SUCCESSFUL
+ ) {
+ _Thread_Iterate( _Scheduler_Check_no_helping, &iter_context );
+ }
_ISR_lock_ISR_disable( &lock_context );
_Scheduler_Acquire_critical( scheduler, &lock_context );