summaryrefslogtreecommitdiffstats
path: root/cpukit/rtems
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2016-10-31 13:08:33 +0100
committerSebastian Huber <sebastian.huber@embedded-brains.de>2016-11-10 09:22:09 +0100
commit05ca53ddf6bc8333c2f3ad861c5415467c3262d2 (patch)
tree9b011af47a8304527c77ba8992418e473f540ecf /cpukit/rtems
parentscore: Add and use Thread_Control::is_idle (diff)
downloadrtems-05ca53ddf6bc8333c2f3ad861c5415467c3262d2.tar.bz2
rtems: Add scheduler processor add/remove
Update #2797.
Diffstat (limited to 'cpukit/rtems')
-rw-r--r--cpukit/rtems/Makefile.am2
-rw-r--r--cpukit/rtems/include/rtems/rtems/tasks.h57
-rw-r--r--cpukit/rtems/src/scheduleraddprocessor.c119
-rw-r--r--cpukit/rtems/src/schedulerremoveprocessor.c144
4 files changed, 318 insertions, 4 deletions
diff --git a/cpukit/rtems/Makefile.am b/cpukit/rtems/Makefile.am
index 6ecff9e068..ada1f83510 100644
--- a/cpukit/rtems/Makefile.am
+++ b/cpukit/rtems/Makefile.am
@@ -105,8 +105,10 @@ librtems_a_SOURCES += src/taskstart.c
librtems_a_SOURCES += src/tasksuspend.c
librtems_a_SOURCES += src/taskwakeafter.c
librtems_a_SOURCES += src/taskwakewhen.c
+librtems_a_SOURCES += src/scheduleraddprocessor.c
librtems_a_SOURCES += src/schedulergetprocessorset.c
librtems_a_SOURCES += src/schedulerident.c
+librtems_a_SOURCES += src/schedulerremoveprocessor.c
## RATEMON_C_FILES
librtems_a_SOURCES += src/ratemon.c
diff --git a/cpukit/rtems/include/rtems/rtems/tasks.h b/cpukit/rtems/include/rtems/rtems/tasks.h
index 3a94e348ed..6baefa0a8b 100644
--- a/cpukit/rtems/include/rtems/rtems/tasks.h
+++ b/cpukit/rtems/include/rtems/rtems/tasks.h
@@ -465,7 +465,7 @@ rtems_status_code rtems_task_set_affinity(
*
* @param[in] task_id Identifier of the task. Use @ref RTEMS_SELF to select
* the executing task.
- * @param[out] scheduler_id Identifier of the scheduler.
+ * @param[out] scheduler_id Identifier of the scheduler instance.
*
* @retval RTEMS_SUCCESSFUL Successful operation.
* @retval RTEMS_INVALID_ADDRESS The @a scheduler_id parameter is @c NULL.
@@ -563,9 +563,9 @@ rtems_status_code rtems_scheduler_ident(
#if defined(__RTEMS_HAVE_SYS_CPUSET_H__)
/**
- * @brief Gets the set of processors owned by the scheduler.
+ * @brief Gets the set of processors owned by the specified scheduler instance.
*
- * @param[in] scheduler_id Identifier of the scheduler.
+ * @param[in] scheduler_id Identifier of the scheduler instance.
* @param[in] cpusetsize Size of the specified processor set buffer in
* bytes. This value must be positive.
* @param[out] cpuset The processor set owned by the scheduler. A set bit in
@@ -574,7 +574,7 @@ rtems_status_code rtems_scheduler_ident(
*
* @retval RTEMS_SUCCESSFUL Successful operation.
* @retval RTEMS_INVALID_ADDRESS The @a cpuset parameter is @c NULL.
- * @retval RTEMS_INVALID_ID Invalid scheduler identifier.
+ * @retval RTEMS_INVALID_ID Invalid scheduler instance identifier.
* @retval RTEMS_INVALID_NUMBER The processor set buffer is too small for the
* set of processors owned by the scheduler.
*/
@@ -585,6 +585,55 @@ rtems_status_code rtems_scheduler_get_processor_set(
);
#endif
+/**
+ * @brief Adds a processor to the set of processors owned by the specified
+ * scheduler instance.
+ *
+ * Must be called from task context. This operation obtains and releases the
+ * objects allocator lock.
+ *
+ * @param[in] scheduler_id Identifier of the scheduler instance.
+ * @param[in] cpu_index Index of the processor to add.
+ *
+ * @retval RTEMS_SUCCESSFUL Successful operation.
+ * @retval RTEMS_INVALID_ID Invalid scheduler instance identifier.
+ * @retval RTEMS_NOT_CONFIGURED The processor is not configured to be used by
+ * the application.
+ * @retval RTEMS_INCORRECT_STATE The processor is configured to be used by
+ * the application, however, it is not online.
+ * @retval RTEMS_RESOURCE_IN_USE The processor is already assigned to a
+ * scheduler instance.
+ */
+rtems_status_code rtems_scheduler_add_processor(
+ rtems_id scheduler_id,
+ uint32_t cpu_index
+);
+
+/**
+ * @brief Removes a processor from set of processors owned by the specified
+ * scheduler instance.
+ *
+ * Must be called from task context. This operation obtains and releases the
+ * objects allocator lock. Removing a processor from a scheduler is a complex
+ * operation that involves all tasks of the system.
+ *
+ * @param[in] scheduler_id Identifier of the scheduler instance.
+ * @param[in] cpu_index Index of the processor to add.
+ *
+ * @retval RTEMS_SUCCESSFUL Successful operation.
+ * @retval RTEMS_INVALID_ID Invalid scheduler instance identifier.
+ * @retval RTEMS_INVALID_NUMBER The processor is not owned by the specified
+ * scheduler instance.
+ * @retval RTEMS_RESOURCE_IN_USE The set of processors owned by the specified
+ * scheduler instance would be empty after the processor removal and there
+ * exists a non-idle task that uses this scheduler instance as its home
+ * scheduler instance.
+ */
+rtems_status_code rtems_scheduler_remove_processor(
+ rtems_id scheduler_id,
+ uint32_t cpu_index
+);
+
/**@}*/
/**
diff --git a/cpukit/rtems/src/scheduleraddprocessor.c b/cpukit/rtems/src/scheduleraddprocessor.c
new file mode 100644
index 0000000000..c39b81d8ce
--- /dev/null
+++ b/cpukit/rtems/src/scheduleraddprocessor.c
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2016 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Dornierstr. 4
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#if HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems/rtems/tasks.h>
+#include <rtems/score/assert.h>
+#include <rtems/score/schedulerimpl.h>
+#include <rtems/config.h>
+
+rtems_status_code rtems_scheduler_add_processor(
+ rtems_id scheduler_id,
+ uint32_t cpu_index
+)
+{
+ uint32_t scheduler_index;
+#if defined(RTEMS_SMP)
+ Per_CPU_Control *cpu;
+ rtems_status_code status;
+#endif
+
+ scheduler_index = _Scheduler_Get_index_by_id( scheduler_id );
+
+ if ( scheduler_index >= _Scheduler_Count ) {
+ return RTEMS_INVALID_ID;
+ }
+
+ if ( cpu_index >= rtems_configuration_get_maximum_processors() ) {
+ return RTEMS_NOT_CONFIGURED;
+ }
+
+#if defined(RTEMS_SMP)
+ cpu = _Per_CPU_Get_by_index( cpu_index );
+
+ if ( _Scheduler_Initial_assignments[ cpu_index ].scheduler == NULL ) {
+ return RTEMS_NOT_CONFIGURED;
+ }
+
+ if ( !_Per_CPU_Is_processor_online( cpu ) ) {
+ return RTEMS_INCORRECT_STATE;
+ }
+
+ _Objects_Allocator_lock();
+
+ if ( cpu->Scheduler.control == NULL ) {
+ const Scheduler_Control *scheduler;
+ Scheduler_Context *scheduler_context;
+ Priority_Control idle_priority;
+ Thread_Control *idle;
+ Scheduler_Node *scheduler_node;
+ ISR_lock_Context lock_context;
+ Thread_queue_Context queue_context;
+ Per_CPU_Control *cpu_self;
+
+ scheduler = &_Scheduler_Table[ scheduler_index ];
+ scheduler_context = _Scheduler_Get_context( scheduler );
+ idle_priority =
+ _Scheduler_Map_priority( scheduler, scheduler->maximum_priority );
+
+ idle = cpu->Scheduler.idle_if_online_and_unused;
+ _Assert( idle != NULL );
+ cpu->Scheduler.idle_if_online_and_unused = NULL;
+
+ idle->Scheduler.home = scheduler;
+ idle->Start.initial_priority = idle_priority;
+ scheduler_node =
+ _Thread_Scheduler_get_node_by_index( idle, scheduler_index );
+ _Priority_Node_set_priority( &idle->Real_priority, idle_priority );
+ _Priority_Initialize_one(
+ &scheduler_node->Wait.Priority,
+ &idle->Real_priority
+ );
+ _Assert( _Chain_Is_empty( &idle->Scheduler.Wait_nodes ) );
+ _Chain_Initialize_one(
+ &idle->Scheduler.Wait_nodes,
+ &scheduler_node->Thread.Wait_node
+ );
+ _Assert( _Chain_Is_empty( &idle->Scheduler.Scheduler_nodes ) );
+ _Chain_Initialize_one(
+ &idle->Scheduler.Scheduler_nodes,
+ &scheduler_node->Thread.Scheduler_node.Chain
+ );
+
+ _ISR_lock_ISR_disable( &lock_context );
+ _Scheduler_Acquire_critical( scheduler, &lock_context );
+ ++scheduler_context->processor_count;
+ cpu->Scheduler.control = scheduler;
+ cpu->Scheduler.context = scheduler_context;
+ ( *scheduler->Operations.add_processor )( scheduler, idle );
+ cpu_self = _Thread_Dispatch_disable_critical(
+ &queue_context.Lock_context.Lock_context
+ );
+ _Scheduler_Release_critical( scheduler, &lock_context );
+ _ISR_lock_ISR_enable( &lock_context );
+ _Thread_Dispatch_enable( cpu_self );
+ status = RTEMS_SUCCESSFUL;
+ } else {
+ status = RTEMS_RESOURCE_IN_USE;
+ }
+
+ _Objects_Allocator_unlock();
+ return status;
+#else
+ return RTEMS_RESOURCE_IN_USE;
+#endif
+}
diff --git a/cpukit/rtems/src/schedulerremoveprocessor.c b/cpukit/rtems/src/schedulerremoveprocessor.c
new file mode 100644
index 0000000000..21b7c4611e
--- /dev/null
+++ b/cpukit/rtems/src/schedulerremoveprocessor.c
@@ -0,0 +1,144 @@
+/*
+ * Copyright (c) 2016 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Dornierstr. 4
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#if HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems/rtems/tasks.h>
+#include <rtems/score/schedulerimpl.h>
+#include <rtems/config.h>
+
+#if defined(RTEMS_SMP)
+typedef struct {
+ const Scheduler_Control *scheduler;
+ rtems_status_code status;
+} Scheduler_Processor_removal_context;
+
+static bool _Scheduler_Check_processor_removal(
+ Thread_Control *the_thread,
+ void *arg
+)
+{
+ Scheduler_Processor_removal_context *iter_context;
+ Thread_queue_Context queue_context;
+ ISR_lock_Context state_context;
+
+ if ( the_thread->is_idle ) {
+ return false;
+ }
+
+ iter_context = arg;
+
+ _Thread_Wait_acquire( the_thread, &queue_context );
+ _Thread_State_acquire_critical( the_thread, &state_context );
+
+ if ( _Thread_Scheduler_get_home( the_thread ) == iter_context->scheduler ) {
+ iter_context->status = RTEMS_RESOURCE_IN_USE;
+ }
+
+ _Thread_State_release_critical( the_thread, &state_context );
+ _Thread_Wait_release( the_thread, &queue_context );
+ return iter_context->status != RTEMS_SUCCESSFUL;
+}
+#endif
+
+rtems_status_code rtems_scheduler_remove_processor(
+ rtems_id scheduler_id,
+ uint32_t cpu_index
+)
+{
+ const Scheduler_Control *scheduler;
+#if defined(RTEMS_SMP)
+ uint32_t processor_count;
+ Scheduler_Processor_removal_context iter_context;
+ ISR_lock_Context lock_context;
+ Scheduler_Context *scheduler_context;
+ Per_CPU_Control *cpu;
+ Per_CPU_Control *cpu_self;
+#endif
+
+ scheduler = _Scheduler_Get_by_id( scheduler_id );
+ if ( scheduler == NULL ) {
+ return RTEMS_INVALID_ID;
+ }
+
+ if ( cpu_index >= rtems_configuration_get_maximum_processors() ) {
+ return RTEMS_INVALID_NUMBER;
+ }
+
+#if defined(RTEMS_SMP)
+ iter_context.scheduler = scheduler;
+ iter_context.status = RTEMS_SUCCESSFUL;
+ scheduler_context = _Scheduler_Get_context( scheduler );
+ cpu = _Per_CPU_Get_by_index( cpu_index );
+
+ _Objects_Allocator_lock();
+
+ if ( cpu->Scheduler.control != scheduler ) {
+ _Objects_Allocator_unlock();
+ return RTEMS_INVALID_NUMBER;
+ }
+
+ /*
+ * This prevents the selection of this scheduler instance by new threads in
+ * case the processor count changes to zero.
+ */
+ _ISR_lock_ISR_disable( &lock_context );
+ _Scheduler_Acquire_critical( scheduler, &lock_context );
+ processor_count = scheduler_context->processor_count - 1;
+ scheduler_context->processor_count = processor_count;
+ _Scheduler_Release_critical( scheduler, &lock_context );
+ _ISR_lock_ISR_enable( &lock_context );
+
+ if ( processor_count == 0 ) {
+ _Thread_Iterate( _Scheduler_Check_processor_removal, &iter_context );
+ }
+
+ _ISR_lock_ISR_disable( &lock_context );
+ _Scheduler_Acquire_critical( scheduler, &lock_context );
+
+ if ( iter_context.status == RTEMS_SUCCESSFUL ) {
+ Thread_Control *idle;
+ Scheduler_Node *scheduler_node;
+
+ cpu->Scheduler.control = NULL;
+ cpu->Scheduler.context = NULL;
+ idle = ( *scheduler->Operations.remove_processor )( scheduler, cpu );
+ cpu->Scheduler.idle_if_online_and_unused = idle;
+
+ scheduler_node = _Thread_Scheduler_get_home_node( idle );
+ _Priority_Plain_extract(
+ &scheduler_node->Wait.Priority,
+ &idle->Real_priority
+ );
+ _Assert( _Priority_Is_empty( &scheduler_node->Wait.Priority ) );
+ _Chain_Extract_unprotected( &scheduler_node->Thread.Wait_node );
+ _Assert( _Chain_Is_empty( &idle->Scheduler.Wait_nodes ) );
+ _Chain_Extract_unprotected( &scheduler_node->Thread.Scheduler_node.Chain );
+ _Assert( _Chain_Is_empty( &idle->Scheduler.Scheduler_nodes ) );
+ } else {
+ ++scheduler_context->processor_count;
+ }
+
+ cpu_self = _Thread_Dispatch_disable_critical( &lock_context );
+ _Scheduler_Release_critical( scheduler, &lock_context );
+ _ISR_lock_ISR_enable( &lock_context );
+ _Thread_Dispatch_enable( cpu_self );
+ _Objects_Allocator_unlock();
+ return iter_context.status;
+#else
+ return RTEMS_RESOURCE_IN_USE;
+#endif
+}