summaryrefslogtreecommitdiffstats
path: root/cpukit/score
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2018-08-29 09:43:44 +0200
committerSebastian Huber <sebastian.huber@embedded-brains.de>2018-09-10 10:38:45 +0200
commit709796209c88e6749320b3096df57f369c2d62be (patch)
tree072e7cd5cef37aad7404a02344724a4348602f35 /cpukit/score
parentscore: Modify _Scheduler_Unblock() (diff)
downloadrtems-709796209c88e6749320b3096df57f369c2d62be.tar.bz2
score: Add thread pin/unpin support
Add support to temporarily pin a thread to its current processor. This may be used to access per-processor data structures in critical sections with enabled thread dispatching, e.g. a pinned thread is allowed to block. Update #3508.
Diffstat (limited to 'cpukit/score')
-rw-r--r--cpukit/score/Makefile.am2
-rw-r--r--cpukit/score/src/schedulerdefaultpinunpin.c35
-rw-r--r--cpukit/score/src/scheduleredfsmp.c96
-rw-r--r--cpukit/score/src/threaddispatch.c88
-rw-r--r--cpukit/score/src/threadinitialize.c2
-rw-r--r--cpukit/score/src/threadunpin.c72
6 files changed, 268 insertions, 27 deletions
diff --git a/cpukit/score/Makefile.am b/cpukit/score/Makefile.am
index e345f77daa..cdd9740d41 100644
--- a/cpukit/score/Makefile.am
+++ b/cpukit/score/Makefile.am
@@ -26,6 +26,7 @@ endif
if HAS_SMP
libscore_a_SOURCES += src/percpustatewait.c
libscore_a_SOURCES += src/profilingsmplock.c
+libscore_a_SOURCES += src/schedulerdefaultpinunpin.c
libscore_a_SOURCES += src/scheduleredfsmp.c
libscore_a_SOURCES += src/schedulerpriorityaffinitysmp.c
libscore_a_SOURCES += src/schedulerprioritysmp.c
@@ -38,6 +39,7 @@ libscore_a_SOURCES += src/schedulerdefaultaskforhelp.c
libscore_a_SOURCES += src/schedulerdefaultsetaffinity.c
libscore_a_SOURCES += src/schedulersmp.c
libscore_a_SOURCES += src/schedulersmpstartidle.c
+libscore_a_SOURCES += src/threadunpin.c
endif
## CORE_APIMUTEX_C_FILES
diff --git a/cpukit/score/src/schedulerdefaultpinunpin.c b/cpukit/score/src/schedulerdefaultpinunpin.c
new file mode 100644
index 0000000000..5fc9cca026
--- /dev/null
+++ b/cpukit/score/src/schedulerdefaultpinunpin.c
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2018 embedded brains GmbH
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#if HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems/score/scheduler.h>
+#include <rtems/score/interr.h>
+#include <rtems/score/smpimpl.h>
+
+void _Scheduler_default_Pin_or_unpin(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node,
+ struct Per_CPU_Control *cpu
+)
+{
+ (void) scheduler;
+ (void) the_thread;
+ (void) node;
+ (void) cpu;
+
+ if ( _SMP_Get_processor_count() > 1 ) {
+ _Terminate(
+ RTEMS_FATAL_SOURCE_SMP,
+ SMP_FATAL_SCHEDULER_PIN_OR_UNPIN_NOT_SUPPORTED
+ );
+ }
+}
diff --git a/cpukit/score/src/scheduleredfsmp.c b/cpukit/score/src/scheduleredfsmp.c
index 874ec3e58b..62ddbdafa0 100644
--- a/cpukit/score/src/scheduleredfsmp.c
+++ b/cpukit/score/src/scheduleredfsmp.c
@@ -143,7 +143,7 @@ static inline Scheduler_Node *_Scheduler_EDF_SMP_Get_highest_ready(
Scheduler_EDF_SMP_Context *self;
Scheduler_EDF_SMP_Node *highest_ready;
Scheduler_EDF_SMP_Node *node;
- uint32_t rqi;
+ uint8_t rqi;
const Chain_Node *tail;
Chain_Node *next;
@@ -199,7 +199,7 @@ static inline void _Scheduler_EDF_SMP_Set_scheduled(
static inline Scheduler_EDF_SMP_Node *_Scheduler_EDF_SMP_Get_scheduled(
const Scheduler_EDF_SMP_Context *self,
- uint32_t rqi
+ uint8_t rqi
)
{
return self->Ready[ rqi ].scheduled;
@@ -211,7 +211,7 @@ static inline Scheduler_Node *_Scheduler_EDF_SMP_Get_lowest_scheduled(
)
{
Scheduler_EDF_SMP_Node *filter;
- uint32_t rqi;
+ uint8_t rqi;
filter = _Scheduler_EDF_SMP_Node_downcast( filter_base );
rqi = filter->ready_queue_index;
@@ -240,7 +240,7 @@ static inline void _Scheduler_EDF_SMP_Insert_ready(
{
Scheduler_EDF_SMP_Context *self;
Scheduler_EDF_SMP_Node *node;
- uint32_t rqi;
+ uint8_t rqi;
Scheduler_EDF_SMP_Ready_queue *ready_queue;
int generation_index;
int increment;
@@ -306,7 +306,7 @@ static inline void _Scheduler_EDF_SMP_Extract_from_ready(
{
Scheduler_EDF_SMP_Context *self;
Scheduler_EDF_SMP_Node *node;
- uint32_t rqi;
+ uint8_t rqi;
Scheduler_EDF_SMP_Ready_queue *ready_queue;
self = _Scheduler_EDF_SMP_Get_self( context );
@@ -369,7 +369,7 @@ static inline void _Scheduler_EDF_SMP_Allocate_processor(
{
Scheduler_EDF_SMP_Context *self;
Scheduler_EDF_SMP_Node *scheduled;
- uint32_t rqi;
+ uint8_t rqi;
(void) victim_base;
self = _Scheduler_EDF_SMP_Get_self( context );
@@ -647,7 +647,7 @@ static inline void _Scheduler_EDF_SMP_Do_set_affinity(
)
{
Scheduler_EDF_SMP_Node *node;
- const uint32_t *rqi;
+ const uint8_t *rqi;
node = _Scheduler_EDF_SMP_Node_downcast( node_base );
rqi = arg;
@@ -672,16 +672,61 @@ void _Scheduler_EDF_SMP_Start_idle(
);
}
+void _Scheduler_EDF_SMP_Pin(
+ const Scheduler_Control *scheduler,
+ Thread_Control *thread,
+ Scheduler_Node *node_base,
+ struct Per_CPU_Control *cpu
+)
+{
+ Scheduler_EDF_SMP_Node *node;
+ uint8_t rqi;
+
+ (void) scheduler;
+ node = _Scheduler_EDF_SMP_Node_downcast( node_base );
+ rqi = (uint8_t) _Per_CPU_Get_index( cpu ) + 1;
+
+ _Assert(
+ _Scheduler_SMP_Node_state( &node->Base.Base ) == SCHEDULER_SMP_NODE_BLOCKED
+ );
+
+ node = _Scheduler_EDF_SMP_Node_downcast( node_base );
+ node->ready_queue_index = rqi;
+ node->pinning_ready_queue_index = rqi;
+}
+
+void _Scheduler_EDF_SMP_Unpin(
+ const Scheduler_Control *scheduler,
+ Thread_Control *thread,
+ Scheduler_Node *node_base,
+ struct Per_CPU_Control *cpu
+)
+{
+ Scheduler_EDF_SMP_Node *node;
+
+ (void) scheduler;
+ (void) cpu;
+ node = _Scheduler_EDF_SMP_Node_downcast( node_base );
+
+ _Assert(
+ _Scheduler_SMP_Node_state( &node->Base.Base ) == SCHEDULER_SMP_NODE_BLOCKED
+ );
+
+ node->ready_queue_index = node->affinity_ready_queue_index;
+ node->pinning_ready_queue_index = 0;
+}
+
bool _Scheduler_EDF_SMP_Set_affinity(
const Scheduler_Control *scheduler,
Thread_Control *thread,
- Scheduler_Node *node,
+ Scheduler_Node *node_base,
const Processor_mask *affinity
)
{
- Scheduler_Context *context;
- Processor_mask local_affinity;
- uint32_t rqi;
+ Scheduler_Context *context;
+ Scheduler_EDF_SMP_Node *node;
+ Processor_mask local_affinity;
+ uint8_t rqi;
context = _Scheduler_Get_context( scheduler );
_Processor_mask_And( &local_affinity, &context->Processors, affinity );
@@ -696,18 +741,23 @@ bool _Scheduler_EDF_SMP_Set_affinity(
rqi = _Processor_mask_Find_last_set( &local_affinity );
}
- _Scheduler_SMP_Set_affinity(
- context,
- thread,
- node,
- &rqi,
- _Scheduler_EDF_SMP_Do_set_affinity,
- _Scheduler_EDF_SMP_Extract_from_ready,
- _Scheduler_EDF_SMP_Get_highest_ready,
- _Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
- _Scheduler_EDF_SMP_Enqueue,
- _Scheduler_EDF_SMP_Allocate_processor
- );
+ node = _Scheduler_EDF_SMP_Node_downcast( node_base );
+ node->affinity_ready_queue_index = rqi;
+
+ if ( node->pinning_ready_queue_index == 0 ) {
+ _Scheduler_SMP_Set_affinity(
+ context,
+ thread,
+ node_base,
+ &rqi,
+ _Scheduler_EDF_SMP_Do_set_affinity,
+ _Scheduler_EDF_SMP_Extract_from_ready,
+ _Scheduler_EDF_SMP_Get_highest_ready,
+ _Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
+ _Scheduler_EDF_SMP_Enqueue,
+ _Scheduler_EDF_SMP_Allocate_processor
+ );
+ }
return true;
}
diff --git a/cpukit/score/src/threaddispatch.c b/cpukit/score/src/threaddispatch.c
index d6207bc898..b82fe539c1 100644
--- a/cpukit/score/src/threaddispatch.c
+++ b/cpukit/score/src/threaddispatch.c
@@ -9,7 +9,7 @@
* COPYRIGHT (c) 1989-2009.
* On-Line Applications Research Corporation (OAR).
*
- * Copyright (c) 2014, 2016 embedded brains GmbH.
+ * Copyright (c) 2014, 2018 embedded brains GmbH.
*
* The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at
@@ -37,6 +37,80 @@ Thread_Control *_Thread_Allocated_fp;
CHAIN_DEFINE_EMPTY( _User_extensions_Switches_list );
#if defined(RTEMS_SMP)
+static ISR_Level _Thread_Check_pinning(
+ Thread_Control *executing,
+ Per_CPU_Control *cpu_self,
+ ISR_Level level
+)
+{
+ unsigned int pin_level;
+
+ pin_level = executing->Scheduler.pin_level;
+
+ if (
+ RTEMS_PREDICT_FALSE( pin_level != 0 )
+ && ( pin_level & THREAD_PIN_PREEMPTION ) == 0
+ ) {
+ ISR_lock_Context state_lock_context;
+ ISR_lock_Context scheduler_lock_context;
+ const Scheduler_Control *pinned_scheduler;
+ Scheduler_Node *pinned_node;
+ const Scheduler_Control *home_scheduler;
+
+ _ISR_Local_enable( level );
+
+ executing->Scheduler.pin_level = pin_level | THREAD_PIN_PREEMPTION;
+
+ _Thread_State_acquire( executing, &state_lock_context );
+
+ pinned_scheduler = _Scheduler_Get_by_CPU( cpu_self );
+ pinned_node = _Thread_Scheduler_get_node_by_index(
+ executing,
+ _Scheduler_Get_index( pinned_scheduler )
+ );
+
+ if ( _Thread_Is_ready( executing ) ) {
+ _Scheduler_Block( executing);
+ }
+
+ home_scheduler = _Thread_Scheduler_get_home( executing );
+ executing->Scheduler.pinned_scheduler = pinned_scheduler;
+
+ if ( home_scheduler != pinned_scheduler ) {
+ _Chain_Extract_unprotected( &pinned_node->Thread.Scheduler_node.Chain );
+ _Chain_Prepend_unprotected(
+ &executing->Scheduler.Scheduler_nodes,
+ &pinned_node->Thread.Scheduler_node.Chain
+ );
+ }
+
+ _Scheduler_Acquire_critical( pinned_scheduler, &scheduler_lock_context );
+
+ ( *pinned_scheduler->Operations.pin )(
+ pinned_scheduler,
+ executing,
+ pinned_node,
+ cpu_self
+ );
+
+ if ( _Thread_Is_ready( executing ) ) {
+ ( *pinned_scheduler->Operations.unblock )(
+ pinned_scheduler,
+ executing,
+ pinned_node
+ );
+ }
+
+ _Scheduler_Release_critical( pinned_scheduler, &scheduler_lock_context );
+
+ _Thread_State_release( executing, &state_lock_context );
+
+ _ISR_Local_disable( level );
+ }
+
+ return level;
+}
+
static void _Thread_Ask_for_help( Thread_Control *the_thread )
{
Chain_Node *node;
@@ -77,9 +151,15 @@ static bool _Thread_Can_ask_for_help( const Thread_Control *executing )
}
#endif
-static void _Thread_Preemption_intervention( Per_CPU_Control *cpu_self )
+static ISR_Level _Thread_Preemption_intervention(
+ Thread_Control *executing,
+ Per_CPU_Control *cpu_self,
+ ISR_Level level
+)
{
#if defined(RTEMS_SMP)
+ level = _Thread_Check_pinning( executing, cpu_self, level );
+
_Per_CPU_Acquire( cpu_self );
while ( !_Chain_Is_empty( &cpu_self->Threads_in_need_for_help ) ) {
@@ -102,6 +182,8 @@ static void _Thread_Preemption_intervention( Per_CPU_Control *cpu_self )
#else
(void) cpu_self;
#endif
+
+ return level;
}
static void _Thread_Post_switch_cleanup( Thread_Control *executing )
@@ -192,7 +274,7 @@ void _Thread_Do_dispatch( Per_CPU_Control *cpu_self, ISR_Level level )
do {
Thread_Control *heir;
- _Thread_Preemption_intervention( cpu_self );
+ level = _Thread_Preemption_intervention( executing, cpu_self, level );
heir = _Thread_Get_heir_and_make_it_executing( cpu_self );
/*
diff --git a/cpukit/score/src/threadinitialize.c b/cpukit/score/src/threadinitialize.c
index c2296fbf55..401973ec9b 100644
--- a/cpukit/score/src/threadinitialize.c
+++ b/cpukit/score/src/threadinitialize.c
@@ -245,7 +245,7 @@ bool _Thread_Initialize(
#if defined(RTEMS_SMP)
RTEMS_STATIC_ASSERT( THREAD_SCHEDULER_BLOCKED == 0, Scheduler_state );
- the_thread->Scheduler.home = scheduler;
+ the_thread->Scheduler.home_scheduler = scheduler;
_ISR_lock_Initialize( &the_thread->Scheduler.Lock, "Thread Scheduler" );
_Processor_mask_Assign(
&the_thread->Scheduler.Affinity,
diff --git a/cpukit/score/src/threadunpin.c b/cpukit/score/src/threadunpin.c
new file mode 100644
index 0000000000..bf05790694
--- /dev/null
+++ b/cpukit/score/src/threadunpin.c
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2018 embedded brains GmbH
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#if HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems/score/schedulerimpl.h>
+
+void _Thread_Do_unpin( Thread_Control *executing, Per_CPU_Control *cpu_self )
+{
+ ISR_lock_Context state_lock_context;
+ ISR_lock_Context scheduler_lock_context;
+ Scheduler_Node *pinned_node;
+ const Scheduler_Control *pinned_scheduler;
+ Scheduler_Node *home_node;
+ const Scheduler_Control *home_scheduler;
+ const Scheduler_Control *scheduler;
+
+ _Thread_State_acquire( executing, &state_lock_context );
+
+ executing->Scheduler.pin_level = 0;
+
+ pinned_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE(
+ _Chain_First( &executing->Scheduler.Scheduler_nodes )
+ );
+ pinned_scheduler = _Scheduler_Node_get_scheduler( pinned_node );
+ home_node = _Thread_Scheduler_get_home_node( executing );
+ home_scheduler = _Thread_Scheduler_get_home( executing );
+ scheduler = pinned_scheduler;
+
+ executing->Scheduler.pinned_scheduler = NULL;
+
+ _Scheduler_Acquire_critical( scheduler, &scheduler_lock_context );
+
+ if ( _Thread_Is_ready( executing ) ) {
+ ( *scheduler->Operations.block )( scheduler, executing, pinned_node );
+ }
+
+ ( *scheduler->Operations.unpin )(
+ scheduler,
+ executing,
+ pinned_node,
+ cpu_self
+ );
+
+ if ( home_node != pinned_node ) {
+ _Scheduler_Release_critical( scheduler, &scheduler_lock_context );
+
+ _Chain_Extract_unprotected( &home_node->Thread.Scheduler_node.Chain );
+ _Chain_Prepend_unprotected(
+ &executing->Scheduler.Scheduler_nodes,
+ &home_node->Thread.Scheduler_node.Chain
+ );
+ scheduler = home_scheduler;
+
+ _Scheduler_Acquire_critical( scheduler, &scheduler_lock_context );
+ }
+
+ if ( _Thread_Is_ready( executing ) ) {
+ ( *scheduler->Operations.unblock )( scheduler, executing, home_node );
+ }
+
+ _Scheduler_Release_critical( scheduler, &scheduler_lock_context );
+
+ _Thread_State_release( executing, &state_lock_context );
+}