From 05ca53ddf6bc8333c2f3ad861c5415467c3262d2 Mon Sep 17 00:00:00 2001 From: Sebastian Huber Date: Mon, 31 Oct 2016 13:08:33 +0100 Subject: rtems: Add scheduler processor add/remove Update #2797. --- cpukit/rtems/src/scheduleraddprocessor.c | 119 +++++++++++++++++++++++ cpukit/rtems/src/schedulerremoveprocessor.c | 144 ++++++++++++++++++++++++++++ 2 files changed, 263 insertions(+) create mode 100644 cpukit/rtems/src/scheduleraddprocessor.c create mode 100644 cpukit/rtems/src/schedulerremoveprocessor.c (limited to 'cpukit/rtems/src') diff --git a/cpukit/rtems/src/scheduleraddprocessor.c b/cpukit/rtems/src/scheduleraddprocessor.c new file mode 100644 index 0000000000..c39b81d8ce --- /dev/null +++ b/cpukit/rtems/src/scheduleraddprocessor.c @@ -0,0 +1,119 @@ +/* + * Copyright (c) 2016 embedded brains GmbH. All rights reserved. + * + * embedded brains GmbH + * Dornierstr. 4 + * 82178 Puchheim + * Germany + * + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#if HAVE_CONFIG_H +#include "config.h" +#endif + +#include +#include +#include +#include + +rtems_status_code rtems_scheduler_add_processor( + rtems_id scheduler_id, + uint32_t cpu_index +) +{ + uint32_t scheduler_index; +#if defined(RTEMS_SMP) + Per_CPU_Control *cpu; + rtems_status_code status; +#endif + + scheduler_index = _Scheduler_Get_index_by_id( scheduler_id ); + + if ( scheduler_index >= _Scheduler_Count ) { + return RTEMS_INVALID_ID; + } + + if ( cpu_index >= rtems_configuration_get_maximum_processors() ) { + return RTEMS_NOT_CONFIGURED; + } + +#if defined(RTEMS_SMP) + cpu = _Per_CPU_Get_by_index( cpu_index ); + + if ( _Scheduler_Initial_assignments[ cpu_index ].scheduler == NULL ) { + return RTEMS_NOT_CONFIGURED; + } + + if ( !_Per_CPU_Is_processor_online( cpu ) ) { + return RTEMS_INCORRECT_STATE; + } + + _Objects_Allocator_lock(); + + if ( cpu->Scheduler.control == NULL ) { + const Scheduler_Control *scheduler; + Scheduler_Context *scheduler_context; + Priority_Control idle_priority; + Thread_Control *idle; + Scheduler_Node *scheduler_node; + ISR_lock_Context lock_context; + Thread_queue_Context queue_context; + Per_CPU_Control *cpu_self; + + scheduler = &_Scheduler_Table[ scheduler_index ]; + scheduler_context = _Scheduler_Get_context( scheduler ); + idle_priority = + _Scheduler_Map_priority( scheduler, scheduler->maximum_priority ); + + idle = cpu->Scheduler.idle_if_online_and_unused; + _Assert( idle != NULL ); + cpu->Scheduler.idle_if_online_and_unused = NULL; + + idle->Scheduler.home = scheduler; + idle->Start.initial_priority = idle_priority; + scheduler_node = + _Thread_Scheduler_get_node_by_index( idle, scheduler_index ); + _Priority_Node_set_priority( &idle->Real_priority, idle_priority ); + _Priority_Initialize_one( + &scheduler_node->Wait.Priority, + &idle->Real_priority + ); + _Assert( _Chain_Is_empty( &idle->Scheduler.Wait_nodes ) ); + _Chain_Initialize_one( + &idle->Scheduler.Wait_nodes, + &scheduler_node->Thread.Wait_node + ); + _Assert( _Chain_Is_empty( &idle->Scheduler.Scheduler_nodes ) ); + _Chain_Initialize_one( + &idle->Scheduler.Scheduler_nodes, + &scheduler_node->Thread.Scheduler_node.Chain + ); + + _ISR_lock_ISR_disable( &lock_context ); + _Scheduler_Acquire_critical( scheduler, &lock_context ); + ++scheduler_context->processor_count; + cpu->Scheduler.control = scheduler; + cpu->Scheduler.context = scheduler_context; + ( *scheduler->Operations.add_processor )( scheduler, idle ); + cpu_self = _Thread_Dispatch_disable_critical( + &queue_context.Lock_context.Lock_context + ); + _Scheduler_Release_critical( scheduler, &lock_context ); + _ISR_lock_ISR_enable( &lock_context ); + _Thread_Dispatch_enable( cpu_self ); + status = RTEMS_SUCCESSFUL; + } else { + status = RTEMS_RESOURCE_IN_USE; + } + + _Objects_Allocator_unlock(); + return status; +#else + return RTEMS_RESOURCE_IN_USE; +#endif +} diff --git a/cpukit/rtems/src/schedulerremoveprocessor.c b/cpukit/rtems/src/schedulerremoveprocessor.c new file mode 100644 index 0000000000..21b7c4611e --- /dev/null +++ b/cpukit/rtems/src/schedulerremoveprocessor.c @@ -0,0 +1,144 @@ +/* + * Copyright (c) 2016 embedded brains GmbH. All rights reserved. + * + * embedded brains GmbH + * Dornierstr. 4 + * 82178 Puchheim + * Germany + * + * + * The license and distribution terms for this file may be + * found in the file LICENSE in this distribution or at + * http://www.rtems.org/license/LICENSE. + */ + +#if HAVE_CONFIG_H +#include "config.h" +#endif + +#include +#include +#include + +#if defined(RTEMS_SMP) +typedef struct { + const Scheduler_Control *scheduler; + rtems_status_code status; +} Scheduler_Processor_removal_context; + +static bool _Scheduler_Check_processor_removal( + Thread_Control *the_thread, + void *arg +) +{ + Scheduler_Processor_removal_context *iter_context; + Thread_queue_Context queue_context; + ISR_lock_Context state_context; + + if ( the_thread->is_idle ) { + return false; + } + + iter_context = arg; + + _Thread_Wait_acquire( the_thread, &queue_context ); + _Thread_State_acquire_critical( the_thread, &state_context ); + + if ( _Thread_Scheduler_get_home( the_thread ) == iter_context->scheduler ) { + iter_context->status = RTEMS_RESOURCE_IN_USE; + } + + _Thread_State_release_critical( the_thread, &state_context ); + _Thread_Wait_release( the_thread, &queue_context ); + return iter_context->status != RTEMS_SUCCESSFUL; +} +#endif + +rtems_status_code rtems_scheduler_remove_processor( + rtems_id scheduler_id, + uint32_t cpu_index +) +{ + const Scheduler_Control *scheduler; +#if defined(RTEMS_SMP) + uint32_t processor_count; + Scheduler_Processor_removal_context iter_context; + ISR_lock_Context lock_context; + Scheduler_Context *scheduler_context; + Per_CPU_Control *cpu; + Per_CPU_Control *cpu_self; +#endif + + scheduler = _Scheduler_Get_by_id( scheduler_id ); + if ( scheduler == NULL ) { + return RTEMS_INVALID_ID; + } + + if ( cpu_index >= rtems_configuration_get_maximum_processors() ) { + return RTEMS_INVALID_NUMBER; + } + +#if defined(RTEMS_SMP) + iter_context.scheduler = scheduler; + iter_context.status = RTEMS_SUCCESSFUL; + scheduler_context = _Scheduler_Get_context( scheduler ); + cpu = _Per_CPU_Get_by_index( cpu_index ); + + _Objects_Allocator_lock(); + + if ( cpu->Scheduler.control != scheduler ) { + _Objects_Allocator_unlock(); + return RTEMS_INVALID_NUMBER; + } + + /* + * This prevents the selection of this scheduler instance by new threads in + * case the processor count changes to zero. + */ + _ISR_lock_ISR_disable( &lock_context ); + _Scheduler_Acquire_critical( scheduler, &lock_context ); + processor_count = scheduler_context->processor_count - 1; + scheduler_context->processor_count = processor_count; + _Scheduler_Release_critical( scheduler, &lock_context ); + _ISR_lock_ISR_enable( &lock_context ); + + if ( processor_count == 0 ) { + _Thread_Iterate( _Scheduler_Check_processor_removal, &iter_context ); + } + + _ISR_lock_ISR_disable( &lock_context ); + _Scheduler_Acquire_critical( scheduler, &lock_context ); + + if ( iter_context.status == RTEMS_SUCCESSFUL ) { + Thread_Control *idle; + Scheduler_Node *scheduler_node; + + cpu->Scheduler.control = NULL; + cpu->Scheduler.context = NULL; + idle = ( *scheduler->Operations.remove_processor )( scheduler, cpu ); + cpu->Scheduler.idle_if_online_and_unused = idle; + + scheduler_node = _Thread_Scheduler_get_home_node( idle ); + _Priority_Plain_extract( + &scheduler_node->Wait.Priority, + &idle->Real_priority + ); + _Assert( _Priority_Is_empty( &scheduler_node->Wait.Priority ) ); + _Chain_Extract_unprotected( &scheduler_node->Thread.Wait_node ); + _Assert( _Chain_Is_empty( &idle->Scheduler.Wait_nodes ) ); + _Chain_Extract_unprotected( &scheduler_node->Thread.Scheduler_node.Chain ); + _Assert( _Chain_Is_empty( &idle->Scheduler.Scheduler_nodes ) ); + } else { + ++scheduler_context->processor_count; + } + + cpu_self = _Thread_Dispatch_disable_critical( &lock_context ); + _Scheduler_Release_critical( scheduler, &lock_context ); + _ISR_lock_ISR_enable( &lock_context ); + _Thread_Dispatch_enable( cpu_self ); + _Objects_Allocator_unlock(); + return iter_context.status; +#else + return RTEMS_RESOURCE_IN_USE; +#endif +} -- cgit v1.2.3