summaryrefslogtreecommitdiffstats
path: root/cpukit
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2020-02-26 10:02:37 +0100
committerSebastian Huber <sebastian.huber@embedded-brains.de>2020-02-28 07:50:19 +0100
commitfcb11510c6b38a7a1f4da7205acc5461a4e18214 (patch)
tree02f0c07dec4124f93eccdcdd45af06a9c9099fb7 /cpukit
parentbsp/pc386: Fix interrupt enable to make debug option work again (diff)
downloadrtems-fcb11510c6b38a7a1f4da7205acc5461a4e18214.tar.bz2
score: Fix context switch extensions (SMP)
In uniprocessor and SMP configurations, the context switch extensions were called during _Thread_Do_dispatch(): void _Thread_Do_dispatch( Per_CPU_Control *cpu_self, ISR_Level level ) { Thread_Control *executing; executing = cpu_self->executing; ... do { Thread_Control *heir; heir = _Thread_Get_heir_and_make_it_executing( cpu_self ); ... _User_extensions_Thread_switch( executing, heir ); ... _Context_Switch( &executing->Registers, &heir->Registers ); ... } while ( cpu_self->dispatch_necessary ); ... } In uniprocessor configurations, this is fine and the context switch extensions are called for all thread switches except the very first thread switch to the initialization thread. However, in SMP configurations, the context switch may be invalidated and updated in the low-level _Context_Switch() routine. See: https://docs.rtems.org/branches/master/c-user/symmetric_multiprocessing_services.html#thread-dispatch-details In case such an update happens, a thread will execute on the processor which was not seen in the previous call of the context switch extensions. This can confuse for example event record consumers which use events generated by a context switch extension. Fixing this is not straight forward. The context switch extensions call must move after the low-level context switch. The problem here is that we may end up in _Thread_Handler(). Adding the context switch extensions call to _Thread_Handler() covers now also the thread switch to the initialization thread. We also have to save the last executing thread (ancestor) of the processor. Registers or the stack cannot be used for this purpose. We have to add it to the per-processor information. Existing extensions may be affected, since now context switch extensions use the stack of the heir thread. The stack checker is affected by this. Calling the thread switch extensions in the low-level context switch is difficult since at this point an intermediate stack is used which is only large enough to enable servicing of interrupts. Update #3885.
Diffstat (limited to 'cpukit')
-rw-r--r--cpukit/include/rtems/score/percpu.h7
-rw-r--r--cpukit/include/rtems/score/userextimpl.h11
-rw-r--r--cpukit/libmisc/stackchk/check.c26
-rw-r--r--cpukit/score/src/threadcreateidle.c3
-rw-r--r--cpukit/score/src/threaddispatch.c5
-rw-r--r--cpukit/score/src/threadhandler.c4
-rw-r--r--cpukit/score/src/userextaddset.c21
7 files changed, 74 insertions, 3 deletions
diff --git a/cpukit/include/rtems/score/percpu.h b/cpukit/include/rtems/score/percpu.h
index 7c95a9649a..31bc2b0bff 100644
--- a/cpukit/include/rtems/score/percpu.h
+++ b/cpukit/include/rtems/score/percpu.h
@@ -529,6 +529,13 @@ typedef struct Per_CPU_Control {
} Scheduler;
/**
+ * @brief The ancestor of the executing thread.
+ *
+ * This member is used by _User_extensions_Thread_switch().
+ */
+ struct _Thread_Control *ancestor;
+
+ /**
* @brief Begin of the per-CPU data area.
*
* Contains items defined via PER_CPU_DATA_ITEM().
diff --git a/cpukit/include/rtems/score/userextimpl.h b/cpukit/include/rtems/score/userextimpl.h
index 8b456c072d..23ee957724 100644
--- a/cpukit/include/rtems/score/userextimpl.h
+++ b/cpukit/include/rtems/score/userextimpl.h
@@ -386,7 +386,16 @@ static inline void _User_extensions_Thread_switch(
_ISR_lock_ISR_disable( &lock_context );
_Per_CPU_Acquire( cpu_self, &lock_context );
+ executing = cpu_self->ancestor;
+ cpu_self->ancestor = heir;
node = _Chain_Immutable_first( chain );
+
+ /*
+ * An executing thread equal to the heir thread may happen in two
+ * situations. Firstly, in case context switch extensions are created after
+ * system initialization. Secondly, during a thread self restart.
+ */
+ if ( executing != heir ) {
#endif
while ( node != tail ) {
@@ -398,6 +407,8 @@ static inline void _User_extensions_Thread_switch(
}
#if defined(RTEMS_SMP)
+ }
+
_Per_CPU_Release( cpu_self, &lock_context );
_ISR_lock_ISR_enable( &lock_context );
#endif
diff --git a/cpukit/libmisc/stackchk/check.c b/cpukit/libmisc/stackchk/check.c
index eec3a911aa..0c859f6375 100644
--- a/cpukit/libmisc/stackchk/check.c
+++ b/cpukit/libmisc/stackchk/check.c
@@ -295,8 +295,8 @@ static void Stack_check_report_blown_task(
* rtems_stack_checker_switch_extension
*/
void rtems_stack_checker_switch_extension(
- Thread_Control *running RTEMS_UNUSED,
- Thread_Control *heir RTEMS_UNUSED
+ Thread_Control *running,
+ Thread_Control *heir
)
{
bool sp_ok;
@@ -306,6 +306,22 @@ void rtems_stack_checker_switch_extension(
/*
* Check for an out of bounds stack pointer or an overwrite
*/
+#if defined(RTEMS_SMP)
+ sp_ok = Stack_check_Frame_pointer_in_range( heir );
+
+ if ( !sp_ok ) {
+ pattern_ok = Stack_check_Is_sanity_pattern_valid(
+ &heir->Start.Initial_stack
+ );
+ Stack_check_report_blown_task( heir, pattern_ok );
+ }
+
+ pattern_ok = Stack_check_Is_sanity_pattern_valid( &running->Start.Initial_stack );
+
+ if ( !pattern_ok ) {
+ Stack_check_report_blown_task( running, pattern_ok );
+ }
+#else
sp_ok = Stack_check_Frame_pointer_in_range( running );
pattern_ok = Stack_check_Is_sanity_pattern_valid( &running->Start.Initial_stack );
@@ -313,6 +329,7 @@ void rtems_stack_checker_switch_extension(
if ( !sp_ok || !pattern_ok ) {
Stack_check_report_blown_task( running, pattern_ok );
}
+#endif
stack = &Stack_check_Interrupt_stack[ _SMP_Get_current_processor() ];
@@ -329,7 +346,10 @@ void rtems_stack_checker_switch_extension(
*/
bool rtems_stack_checker_is_blown( void )
{
- rtems_stack_checker_switch_extension( _Thread_Get_executing(), NULL );
+ Thread_Control *executing;
+
+ executing = _Thread_Get_executing();
+ rtems_stack_checker_switch_extension( executing, executing );
/*
* The Stack Pointer and the Pattern Area are OK so return false.
diff --git a/cpukit/score/src/threadcreateidle.c b/cpukit/score/src/threadcreateidle.c
index 52c3ad4534..d71315005b 100644
--- a/cpukit/score/src/threadcreateidle.c
+++ b/cpukit/score/src/threadcreateidle.c
@@ -75,6 +75,9 @@ static void _Thread_Create_idle_for_CPU( Per_CPU_Control *cpu )
*/
cpu->heir =
cpu->executing = idle;
+#if defined(RTEMS_SMP)
+ cpu->ancestor = idle;
+#endif
idle->is_idle = true;
idle->Start.Entry.adaptor = _Thread_Entry_adaptor_idle;
diff --git a/cpukit/score/src/threaddispatch.c b/cpukit/score/src/threaddispatch.c
index cd32f5a1d2..3b611f7a7a 100644
--- a/cpukit/score/src/threaddispatch.c
+++ b/cpukit/score/src/threaddispatch.c
@@ -297,10 +297,15 @@ void _Thread_Do_dispatch( Per_CPU_Control *cpu_self, ISR_Level level )
_ISR_Local_enable( level );
+#if !defined(RTEMS_SMP)
_User_extensions_Thread_switch( executing, heir );
+#endif
_Thread_Save_fp( executing );
_Context_Switch( &executing->Registers, &heir->Registers );
_Thread_Restore_fp( executing );
+#if defined(RTEMS_SMP)
+ _User_extensions_Thread_switch( NULL, executing );
+#endif
/*
* We have to obtain this value again after the context switch since the
diff --git a/cpukit/score/src/threadhandler.c b/cpukit/score/src/threadhandler.c
index 6ddb303299..4b8e386a67 100644
--- a/cpukit/score/src/threadhandler.c
+++ b/cpukit/score/src/threadhandler.c
@@ -97,6 +97,10 @@ void _Thread_Handler( void )
*/
_Thread_Restore_fp( executing );
+#if defined(RTEMS_SMP)
+ _User_extensions_Thread_switch( NULL, executing );
+#endif
+
/*
* Do not use the level of the thread control block, since it has a
* different format.
diff --git a/cpukit/score/src/userextaddset.c b/cpukit/score/src/userextaddset.c
index 2b13dfad62..70e9fb008f 100644
--- a/cpukit/score/src/userextaddset.c
+++ b/cpukit/score/src/userextaddset.c
@@ -20,8 +20,28 @@
#endif
#include <rtems/score/userextimpl.h>
+#include <rtems/score/smp.h>
#include <rtems/score/percpu.h>
+static void _User_extensions_Set_ancestors( void )
+{
+#if defined(RTEMS_SMP)
+ if ( _Chain_Is_empty( &_User_extensions_Switches_list ) ) {
+ uint32_t cpu_max;
+ uint32_t cpu_index;
+
+ cpu_max = _SMP_Get_processor_maximum();
+
+ for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) {
+ Per_CPU_Control *cpu;
+
+ cpu = _Per_CPU_Get_by_index( cpu_index );
+ cpu->ancestor = cpu->executing;
+ }
+ }
+#endif
+}
+
void _User_extensions_Add_set(
User_extensions_Control *the_extension
)
@@ -45,6 +65,7 @@ void _User_extensions_Add_set(
the_extension->Callouts.thread_switch;
_Per_CPU_Acquire_all( &lock_context );
+ _User_extensions_Set_ancestors();
_Chain_Initialize_node( &the_extension->Switch.Node );
_Chain_Append_unprotected(
&_User_extensions_Switches_list,