summaryrefslogtreecommitdiffstats
path: root/cpukit/score
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2016-05-11 11:54:49 +0200
committerSebastian Huber <sebastian.huber@embedded-brains.de>2016-05-12 13:24:41 +0200
commitbd12dda405e1bab16c522f7ef0dd2b455230d269 (patch)
treec644df77b8512e1e211bfae39587511d0774f0d8 /cpukit/score
parentrtems: Avoid Giant lock rtems_task_is_suspended() (diff)
downloadrtems-bd12dda405e1bab16c522f7ef0dd2b455230d269.tar.bz2
score: Use thread state lock for current state
In addition protect scheduler of thread by thread state lock. Enables use of scheduler per-instance locks. Update #2555.
Diffstat (limited to 'cpukit/score')
-rw-r--r--cpukit/score/include/rtems/score/schedulerimpl.h134
-rw-r--r--cpukit/score/include/rtems/score/thread.h7
-rw-r--r--cpukit/score/src/schedulercbsunblock.c7
-rw-r--r--cpukit/score/src/threadchangepriority.c4
-rw-r--r--cpukit/score/src/threadclearstate.c4
-rw-r--r--cpukit/score/src/threadgetcputimeused.c11
-rw-r--r--cpukit/score/src/threadsetstate.c4
-rw-r--r--cpukit/score/src/threadyield.c4
8 files changed, 117 insertions, 58 deletions
diff --git a/cpukit/score/include/rtems/score/schedulerimpl.h b/cpukit/score/include/rtems/score/schedulerimpl.h
index 5cf3503045..c888237376 100644
--- a/cpukit/score/include/rtems/score/schedulerimpl.h
+++ b/cpukit/score/include/rtems/score/schedulerimpl.h
@@ -10,7 +10,7 @@
/*
* Copyright (C) 2010 Gedare Bloom.
* Copyright (C) 2011 On-Line Applications Research Corporation (OAR).
- * Copyright (c) 2014-2015 embedded brains GmbH
+ * Copyright (c) 2014, 2016 embedded brains GmbH
*
* The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at
@@ -118,6 +118,42 @@ RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Node_get_user(
}
#endif
+ISR_LOCK_DECLARE( extern, _Scheduler_Lock )
+
+/**
+ * @brief Acquires the scheduler instance inside a critical section (interrupts
+ * disabled).
+ *
+ * @param[in] scheduler The scheduler instance.
+ * @param[in] lock_context The lock context to use for
+ * _Scheduler_Release_critical().
+ */
+RTEMS_INLINE_ROUTINE void _Scheduler_Acquire_critical(
+ const Scheduler_Control *scheduler,
+ ISR_lock_Context *lock_context
+)
+{
+ (void) scheduler;
+ _ISR_lock_Acquire( &_Scheduler_Lock, lock_context );
+}
+
+/**
+ * @brief Releases the scheduler instance inside a critical section (interrupts
+ * disabled).
+ *
+ * @param[in] scheduler The scheduler instance.
+ * @param[in] lock_context The lock context used for
+ * _Scheduler_Acquire_critical().
+ */
+RTEMS_INLINE_ROUTINE void _Scheduler_Release_critical(
+ const Scheduler_Control *scheduler,
+ ISR_lock_Context *lock_context
+)
+{
+ (void) scheduler;
+ _ISR_lock_Release( &_Scheduler_Lock, lock_context );
+}
+
/**
* The preferred method to add a new scheduler is to define the jump table
* entries and add a case to the _Scheduler_Initialize routine.
@@ -143,9 +179,15 @@ RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Node_get_user(
*/
RTEMS_INLINE_ROUTINE void _Scheduler_Schedule( Thread_Control *the_thread )
{
- const Scheduler_Control *scheduler = _Scheduler_Get( the_thread );
+ const Scheduler_Control *scheduler;
+ ISR_lock_Context lock_context;
+
+ scheduler = _Scheduler_Get( the_thread );
+ _Scheduler_Acquire_critical( scheduler, &lock_context );
( *scheduler->Operations.schedule )( scheduler, the_thread );
+
+ _Scheduler_Release_critical( scheduler, &lock_context );
}
#if defined(RTEMS_SMP)
@@ -252,10 +294,16 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Ask_for_help_if_necessary(
*/
RTEMS_INLINE_ROUTINE void _Scheduler_Yield( Thread_Control *the_thread )
{
- const Scheduler_Control *scheduler = _Scheduler_Get( the_thread );
+ const Scheduler_Control *scheduler;
+ ISR_lock_Context lock_context;
#if defined(RTEMS_SMP)
- Thread_Control *needs_help;
+ Thread_Control *needs_help;
+#endif
+
+ scheduler = _Scheduler_Get( the_thread );
+ _Scheduler_Acquire_critical( scheduler, &lock_context );
+#if defined(RTEMS_SMP)
needs_help =
#endif
( *scheduler->Operations.yield )( scheduler, the_thread );
@@ -263,6 +311,8 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Yield( Thread_Control *the_thread )
#if defined(RTEMS_SMP)
_Scheduler_Ask_for_help_if_necessary( needs_help );
#endif
+
+ _Scheduler_Release_critical( scheduler, &lock_context );
}
/**
@@ -277,9 +327,15 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Yield( Thread_Control *the_thread )
*/
RTEMS_INLINE_ROUTINE void _Scheduler_Block( Thread_Control *the_thread )
{
- const Scheduler_Control *scheduler = _Scheduler_Get( the_thread );
+ const Scheduler_Control *scheduler;
+ ISR_lock_Context lock_context;
+
+ scheduler = _Scheduler_Get( the_thread );
+ _Scheduler_Acquire_critical( scheduler, &lock_context );
( *scheduler->Operations.block )( scheduler, the_thread );
+
+ _Scheduler_Release_critical( scheduler, &lock_context );
}
/**
@@ -294,10 +350,16 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Block( Thread_Control *the_thread )
*/
RTEMS_INLINE_ROUTINE void _Scheduler_Unblock( Thread_Control *the_thread )
{
- const Scheduler_Control *scheduler = _Scheduler_Get( the_thread );
+ const Scheduler_Control *scheduler;
+ ISR_lock_Context lock_context;
#if defined(RTEMS_SMP)
- Thread_Control *needs_help;
+ Thread_Control *needs_help;
+#endif
+ scheduler = _Scheduler_Get( the_thread );
+ _Scheduler_Acquire_critical( scheduler, &lock_context );
+
+#if defined(RTEMS_SMP)
needs_help =
#endif
( *scheduler->Operations.unblock )( scheduler, the_thread );
@@ -305,6 +367,8 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Unblock( Thread_Control *the_thread )
#if defined(RTEMS_SMP)
_Scheduler_Ask_for_help_if_necessary( needs_help );
#endif
+
+ _Scheduler_Release_critical( scheduler, &lock_context );
}
/**
@@ -329,14 +393,20 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Change_priority(
bool prepend_it
)
{
- const Scheduler_Control *scheduler = _Scheduler_Get_own( the_thread );
+ const Scheduler_Control *own_scheduler;
+ ISR_lock_Context lock_context;
#if defined(RTEMS_SMP)
- Thread_Control *needs_help;
+ Thread_Control *needs_help;
+#endif
+ own_scheduler = _Scheduler_Get_own( the_thread );
+ _Scheduler_Acquire_critical( own_scheduler, &lock_context );
+
+#if defined(RTEMS_SMP)
needs_help =
#endif
- ( *scheduler->Operations.change_priority )(
- scheduler,
+ ( *own_scheduler->Operations.change_priority )(
+ own_scheduler,
the_thread,
new_priority,
prepend_it
@@ -345,6 +415,8 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Change_priority(
#if defined(RTEMS_SMP)
_Scheduler_Ask_for_help_if_necessary( needs_help );
#endif
+
+ _Scheduler_Release_critical( own_scheduler, &lock_context );
}
/**
@@ -394,13 +466,19 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Update_priority(
Priority_Control new_priority
)
{
- const Scheduler_Control *scheduler = _Scheduler_Get( the_thread );
+ const Scheduler_Control *scheduler;
+ ISR_lock_Context lock_context;
+
+ scheduler = _Scheduler_Get( the_thread );
+ _Scheduler_Acquire_critical( scheduler, &lock_context );
( *scheduler->Operations.update_priority )(
scheduler,
the_thread,
new_priority
);
+
+ _Scheduler_Release_critical( scheduler, &lock_context );
}
/**
@@ -1341,8 +1419,6 @@ RTEMS_INLINE_ROUTINE bool _Scheduler_Ask_blocked_node_for_help(
}
#endif
-ISR_LOCK_DECLARE( extern, _Scheduler_Lock )
-
RTEMS_INLINE_ROUTINE void _Scheduler_Update_heir(
Thread_Control *new_heir,
bool force_dispatch
@@ -1367,36 +1443,6 @@ RTEMS_INLINE_ROUTINE void _Scheduler_Update_heir(
}
}
-/**
- * @brief Acquires the scheduler instance of the thread.
- *
- * @param[in] the_thread The thread.
- * @param[in] lock_context The lock context for _Scheduler_Release().
- */
-RTEMS_INLINE_ROUTINE void _Scheduler_Acquire(
- Thread_Control *the_thread,
- ISR_lock_Context *lock_context
-)
-{
- (void) the_thread;
- _ISR_lock_ISR_disable_and_acquire( &_Scheduler_Lock, lock_context );
-}
-
-/**
- * @brief Releases the scheduler instance of the thread.
- *
- * @param[in] the_thread The thread.
- * @param[in] lock_context The lock context used for _Scheduler_Acquire().
- */
-RTEMS_INLINE_ROUTINE void _Scheduler_Release(
- Thread_Control *the_thread,
- ISR_lock_Context *lock_context
-)
-{
- (void) the_thread;
- _ISR_lock_Release_and_ISR_enable( &_Scheduler_Lock, lock_context );
-}
-
/** @} */
#ifdef __cplusplus
diff --git a/cpukit/score/include/rtems/score/thread.h b/cpukit/score/include/rtems/score/thread.h
index d2a6d4b5bd..15b068d92b 100644
--- a/cpukit/score/include/rtems/score/thread.h
+++ b/cpukit/score/include/rtems/score/thread.h
@@ -718,8 +718,11 @@ struct _Thread_Control {
* The lock of this thread queue is used for various purposes. It protects
* the following fields
*
- * - RTEMS_API_Control::Signal, and
- * - Thread_Control::Post_switch_actions.
+ * - RTEMS_API_Control::Signal,
+ * - Thread_Control::current_state,
+ * - Thread_Control::Post_switch_actions,
+ * - Thread_Control::Scheduler::control, and
+ * - Thread_Control::Scheduler::own_control.
*
* @see _Thread_State_acquire().
*/
diff --git a/cpukit/score/src/schedulercbsunblock.c b/cpukit/score/src/schedulercbsunblock.c
index 9170889491..7898588bf2 100644
--- a/cpukit/score/src/schedulercbsunblock.c
+++ b/cpukit/score/src/schedulercbsunblock.c
@@ -56,7 +56,12 @@ Scheduler_Void_or_thread _Scheduler_CBS_Unblock(
the_thread->real_priority = new_priority;
if ( the_thread->current_priority != new_priority ) {
the_thread->current_priority = new_priority;
- _Scheduler_Change_priority(the_thread, new_priority, true);
+ _Scheduler_EDF_Change_priority(
+ scheduler,
+ the_thread,
+ new_priority,
+ true
+ );
}
}
}
diff --git a/cpukit/score/src/threadchangepriority.c b/cpukit/score/src/threadchangepriority.c
index 35e5e5bfbc..152646f52f 100644
--- a/cpukit/score/src/threadchangepriority.c
+++ b/cpukit/score/src/threadchangepriority.c
@@ -64,7 +64,7 @@ void _Thread_Change_priority(
_Thread_Lock_release( lock, &lock_context );
- _Scheduler_Acquire( the_thread, &lock_context );
+ _Thread_State_acquire( the_thread, &lock_context );
if ( the_thread->priority_generation == my_generation ) {
if ( _States_Is_ready( the_thread->current_state ) ) {
@@ -78,7 +78,7 @@ void _Thread_Change_priority(
}
}
- _Scheduler_Release( the_thread, &lock_context );
+ _Thread_State_release( the_thread, &lock_context );
} else {
_Thread_Lock_release( lock, &lock_context );
}
diff --git a/cpukit/score/src/threadclearstate.c b/cpukit/score/src/threadclearstate.c
index ae54e3aac1..3da3538551 100644
--- a/cpukit/score/src/threadclearstate.c
+++ b/cpukit/score/src/threadclearstate.c
@@ -32,7 +32,7 @@ States_Control _Thread_Clear_state(
_Assert( state != 0 );
- _Scheduler_Acquire( the_thread, &lock_context );
+ _Thread_State_acquire( the_thread, &lock_context );
previous_state = the_thread->current_state;
@@ -47,7 +47,7 @@ States_Control _Thread_Clear_state(
}
}
- _Scheduler_Release( the_thread, &lock_context );
+ _Thread_State_release( the_thread, &lock_context );
return previous_state;
}
diff --git a/cpukit/score/src/threadgetcputimeused.c b/cpukit/score/src/threadgetcputimeused.c
index 6bfe8ea216..9026007cf0 100644
--- a/cpukit/score/src/threadgetcputimeused.c
+++ b/cpukit/score/src/threadgetcputimeused.c
@@ -33,9 +33,13 @@ void _Thread_Get_CPU_time_used(
Timestamp_Control *cpu_time_used
)
{
- ISR_lock_Context lock_context;
+ const Scheduler_Control *scheduler;
+ ISR_lock_Context state_lock_context;
+ ISR_lock_Context scheduler_lock_context;
- _Scheduler_Acquire( the_thread, &lock_context );
+ _Thread_State_acquire( the_thread, &state_lock_context );
+ scheduler = _Scheduler_Get( the_thread );
+ _Scheduler_Acquire_critical( scheduler, &scheduler_lock_context );
if ( _Thread_Is_scheduled( the_thread ) ) {
_Thread_Update_CPU_time_used( the_thread, _Thread_Get_CPU( the_thread ) );
@@ -43,5 +47,6 @@ void _Thread_Get_CPU_time_used(
*cpu_time_used = the_thread->cpu_time_used;
- _Scheduler_Release( the_thread, &lock_context );
+ _Scheduler_Release_critical( scheduler, &scheduler_lock_context );
+ _Thread_State_release( the_thread, &state_lock_context );
}
diff --git a/cpukit/score/src/threadsetstate.c b/cpukit/score/src/threadsetstate.c
index 51128279b1..3aaa463f97 100644
--- a/cpukit/score/src/threadsetstate.c
+++ b/cpukit/score/src/threadsetstate.c
@@ -36,7 +36,7 @@ States_Control _Thread_Set_state(
_Assert( state != 0 );
- _Scheduler_Acquire( the_thread, &lock_context );
+ _Thread_State_acquire( the_thread, &lock_context );
previous_state = the_thread->current_state;
next_state = _States_Set( state, previous_state);
@@ -46,7 +46,7 @@ States_Control _Thread_Set_state(
_Scheduler_Block( the_thread );
}
- _Scheduler_Release( the_thread, &lock_context );
+ _Thread_State_release( the_thread, &lock_context );
return previous_state;
}
diff --git a/cpukit/score/src/threadyield.c b/cpukit/score/src/threadyield.c
index 7f1c175b4a..cfd8118665 100644
--- a/cpukit/score/src/threadyield.c
+++ b/cpukit/score/src/threadyield.c
@@ -31,11 +31,11 @@ void _Thread_Yield( Thread_Control *executing )
{
ISR_lock_Context lock_context;
- _Scheduler_Acquire( executing, &lock_context );
+ _Thread_State_acquire( executing, &lock_context );
if ( _States_Is_ready( executing->current_state ) ) {
_Scheduler_Yield( executing );
}
- _Scheduler_Release( executing, &lock_context );
+ _Thread_State_release( executing, &lock_context );
}