summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2016-05-27 14:43:19 +0200
committerSebastian Huber <sebastian.huber@embedded-brains.de>2016-05-30 16:16:21 +0200
commit93306058c0417b1c6e950b44ef279e096062dfba (patch)
tree1a3326d052169d2aec4c4acdd894589d5d84614c
parentscore: Add _Thread_queue_Context_set_MP_callout() (diff)
downloadrtems-93306058c0417b1c6e950b44ef279e096062dfba.tar.bz2
score: _CORE_mutex_Check_dispatch_for_seize()
Move the safety check performed by _CORE_mutex_Check_dispatch_for_seize() out of the performance critical path and generalize it. Blocking on a thread queue with an unexpected thread dispatch disabled level is illegal in all system states. Add the expected thread dispatch disable level (which may be 1 or 2 depending on the operation) to Thread_queue_Context and use it in _Thread_queue_Enqueue_critical().
-rw-r--r--cpukit/posix/include/rtems/posix/psignalimpl.h24
-rw-r--r--cpukit/posix/src/condwaitsupp.c3
-rw-r--r--cpukit/posix/src/killinfo.c9
-rw-r--r--cpukit/posix/src/nanosleep.c3
-rw-r--r--cpukit/posix/src/psignalclearsignals.c7
-rw-r--r--cpukit/posix/src/psignalsetprocesssignals.c7
-rw-r--r--cpukit/posix/src/psignalunblockthread.c15
-rw-r--r--cpukit/posix/src/pthreadjoin.c26
-rw-r--r--cpukit/posix/src/sigaction.c7
-rw-r--r--cpukit/posix/src/sigtimedwait.c26
-rw-r--r--cpukit/rtems/src/regiongetsegment.c3
-rw-r--r--cpukit/sapi/src/interrtext.c2
-rw-r--r--cpukit/score/include/rtems/score/coremuteximpl.h33
-rw-r--r--cpukit/score/include/rtems/score/coresemimpl.h3
-rw-r--r--cpukit/score/include/rtems/score/interr.h2
-rw-r--r--cpukit/score/include/rtems/score/threadimpl.h8
-rw-r--r--cpukit/score/include/rtems/score/threadq.h9
-rw-r--r--cpukit/score/include/rtems/score/threadqimpl.h50
-rw-r--r--cpukit/score/src/condition.c88
-rw-r--r--cpukit/score/src/corebarrierwait.c3
-rw-r--r--cpukit/score/src/coremsgseize.c3
-rw-r--r--cpukit/score/src/coremsgsubmit.c3
-rw-r--r--cpukit/score/src/coremutexseize.c23
-rw-r--r--cpukit/score/src/corerwlockobtainread.c3
-rw-r--r--cpukit/score/src/corerwlockobtainwrite.c3
-rw-r--r--cpukit/score/src/futex.c36
-rw-r--r--cpukit/score/src/mpci.c3
-rw-r--r--cpukit/score/src/mutex.c125
-rw-r--r--cpukit/score/src/semaphore.c37
-rw-r--r--cpukit/score/src/threadqenqueue.c17
-rw-r--r--cpukit/score/src/threadrestart.c18
-rw-r--r--testsuites/sptests/spfatal03/testcase.h6
32 files changed, 347 insertions, 258 deletions
diff --git a/cpukit/posix/include/rtems/posix/psignalimpl.h b/cpukit/posix/include/rtems/posix/psignalimpl.h
index 3b76fc3809..62c1a8546c 100644
--- a/cpukit/posix/include/rtems/posix/psignalimpl.h
+++ b/cpukit/posix/include/rtems/posix/psignalimpl.h
@@ -73,11 +73,25 @@ extern Chain_Control _POSIX_signals_Siginfo[ SIG_ARRAY_MAX ];
* Internal routines
*/
-#define _POSIX_signals_Acquire( lock_context ) \
- _Thread_queue_Acquire( &_POSIX_signals_Wait_queue, lock_context )
-
-#define _POSIX_signals_Release( lock_context ) \
- _Thread_queue_Release( &_POSIX_signals_Wait_queue, lock_context )
+RTEMS_INLINE_ROUTINE void _POSIX_signals_Acquire(
+ Thread_queue_Context *queue_context
+)
+{
+ _Thread_queue_Acquire(
+ &_POSIX_signals_Wait_queue,
+ &queue_context->Lock_context
+ );
+}
+
+RTEMS_INLINE_ROUTINE void _POSIX_signals_Release(
+ Thread_queue_Context *queue_context
+)
+{
+ _Thread_queue_Release(
+ &_POSIX_signals_Wait_queue,
+ &queue_context->Lock_context
+ );
+}
/**
* @brief Unlock POSIX signals thread.
diff --git a/cpukit/posix/src/condwaitsupp.c b/cpukit/posix/src/condwaitsupp.c
index 9270c36393..7dff27f161 100644
--- a/cpukit/posix/src/condwaitsupp.c
+++ b/cpukit/posix/src/condwaitsupp.c
@@ -86,13 +86,14 @@ int _POSIX_Condition_variables_Wait_support(
}
if ( !already_timedout ) {
+ _Thread_queue_Context_set_expected_level( &queue_context, 2 );
_Thread_queue_Enqueue_critical(
&the_cond->Wait_queue.Queue,
POSIX_CONDITION_VARIABLES_TQ_OPERATIONS,
executing,
STATES_WAITING_FOR_CONDITION_VARIABLE,
timeout,
- &queue_context.Lock_context
+ &queue_context
);
} else {
_POSIX_Condition_variables_Release( the_cond, &queue_context );
diff --git a/cpukit/posix/src/killinfo.c b/cpukit/posix/src/killinfo.c
index b16b408825..33754af682 100644
--- a/cpukit/posix/src/killinfo.c
+++ b/cpukit/posix/src/killinfo.c
@@ -75,7 +75,7 @@ int _POSIX_signals_Send(
siginfo_t *siginfo;
POSIX_signals_Siginfo_node *psiginfo;
Thread_queue_Heads *heads;
- ISR_lock_Context lock_context;
+ Thread_queue_Context queue_context;
Per_CPU_Control *cpu_self;
/*
@@ -334,14 +334,15 @@ post_process_signal:
*/
_POSIX_signals_Set_process_signals( mask );
- _POSIX_signals_Acquire( &lock_context );
+ _Thread_queue_Context_initialize( &queue_context );
+ _POSIX_signals_Acquire( &queue_context );
if ( _POSIX_signals_Vectors[ sig ].sa_flags == SA_SIGINFO ) {
psiginfo = (POSIX_signals_Siginfo_node *)
_Chain_Get_unprotected( &_POSIX_signals_Inactive_siginfo );
if ( !psiginfo ) {
- _POSIX_signals_Release( &lock_context );
+ _POSIX_signals_Release( &queue_context );
_Thread_Dispatch_enable( cpu_self );
rtems_set_errno_and_return_minus_one( EAGAIN );
}
@@ -354,7 +355,7 @@ post_process_signal:
);
}
- _POSIX_signals_Release( &lock_context );
+ _POSIX_signals_Release( &queue_context );
DEBUG_STEP("\n");
_Thread_Dispatch_enable( cpu_self );
return 0;
diff --git a/cpukit/posix/src/nanosleep.c b/cpukit/posix/src/nanosleep.c
index 21b0d15a2a..e0e1b2676e 100644
--- a/cpukit/posix/src/nanosleep.c
+++ b/cpukit/posix/src/nanosleep.c
@@ -92,7 +92,8 @@ int nanosleep(
&_Thread_queue_Operations_FIFO,
executing,
STATES_DELAYING | STATES_INTERRUPTIBLE_BY_SIGNAL,
- ticks
+ ticks,
+ 1
);
/*
diff --git a/cpukit/posix/src/psignalclearsignals.c b/cpukit/posix/src/psignalclearsignals.c
index c7852554e5..39ea41c77c 100644
--- a/cpukit/posix/src/psignalclearsignals.c
+++ b/cpukit/posix/src/psignalclearsignals.c
@@ -47,7 +47,7 @@ bool _POSIX_signals_Clear_signals(
{
sigset_t mask;
sigset_t signals_unblocked;
- ISR_lock_Context lock_context;
+ Thread_queue_Context queue_context;
bool do_callout;
POSIX_signals_Siginfo_node *psiginfo;
@@ -68,7 +68,8 @@ bool _POSIX_signals_Clear_signals(
/* XXX are we sure they can be cleared the same way? */
if ( do_signals_acquire_release ) {
- _POSIX_signals_Acquire( &lock_context );
+ _Thread_queue_Context_initialize( &queue_context );
+ _POSIX_signals_Acquire( &queue_context );
}
if ( is_global ) {
@@ -102,7 +103,7 @@ bool _POSIX_signals_Clear_signals(
}
if ( do_signals_acquire_release ) {
- _POSIX_signals_Release( &lock_context );
+ _POSIX_signals_Release( &queue_context );
}
return do_callout;
diff --git a/cpukit/posix/src/psignalsetprocesssignals.c b/cpukit/posix/src/psignalsetprocesssignals.c
index 8a2586435c..b755c2db89 100644
--- a/cpukit/posix/src/psignalsetprocesssignals.c
+++ b/cpukit/posix/src/psignalsetprocesssignals.c
@@ -36,9 +36,10 @@ void _POSIX_signals_Set_process_signals(
sigset_t mask
)
{
- ISR_lock_Context lock_context;
+ Thread_queue_Context queue_context;
- _POSIX_signals_Acquire( &lock_context );
+ _Thread_queue_Context_initialize( &queue_context );
+ _POSIX_signals_Acquire( &queue_context );
_POSIX_signals_Pending |= mask;
- _POSIX_signals_Release( &lock_context );
+ _POSIX_signals_Release( &queue_context );
}
diff --git a/cpukit/posix/src/psignalunblockthread.c b/cpukit/posix/src/psignalunblockthread.c
index b4475b268d..d75e454655 100644
--- a/cpukit/posix/src/psignalunblockthread.c
+++ b/cpukit/posix/src/psignalunblockthread.c
@@ -96,9 +96,9 @@ static void _POSIX_signals_Action_handler(
ISR_lock_Context *lock_context
)
{
- POSIX_API_Control *api;
- int signo;
- uint32_t hold_errno;
+ POSIX_API_Control *api;
+ int signo;
+ uint32_t hold_errno;
(void) action;
_Thread_State_release( executing, lock_context );
@@ -135,13 +135,16 @@ static void _POSIX_signals_Action_handler(
* processed at all. No point in doing this loop otherwise.
*/
while (1) {
- _POSIX_signals_Acquire( lock_context );
+ Thread_queue_Context queue_context;
+
+ _Thread_queue_Context_initialize( &queue_context );
+ _POSIX_signals_Acquire( &queue_context );
if ( !(api->signals_unblocked &
(api->signals_pending | _POSIX_signals_Pending)) ) {
- _POSIX_signals_Release( lock_context );
+ _POSIX_signals_Release( &queue_context );
break;
}
- _POSIX_signals_Release( lock_context );
+ _POSIX_signals_Release( &queue_context );
for ( signo = SIGRTMIN ; signo <= SIGRTMAX ; signo++ ) {
_POSIX_signals_Check_signal( api, signo, false );
diff --git a/cpukit/posix/src/pthreadjoin.c b/cpukit/posix/src/pthreadjoin.c
index f4a0676d6b..86b805132a 100644
--- a/cpukit/posix/src/pthreadjoin.c
+++ b/cpukit/posix/src/pthreadjoin.c
@@ -32,13 +32,15 @@
static int _POSIX_Threads_Join( pthread_t thread, void **value_ptr )
{
- Thread_Control *the_thread;
- ISR_lock_Context lock_context;
- Per_CPU_Control *cpu_self;
- Thread_Control *executing;
- void *value;
+ Thread_Control *the_thread;
+ Thread_queue_Context queue_context;
+ Per_CPU_Control *cpu_self;
+ Thread_Control *executing;
+ void *value;
- the_thread = _Thread_Get( thread, &lock_context );
+ _Thread_queue_Context_initialize( &queue_context );
+ _Thread_queue_Context_set_expected_level( &queue_context, 1 );
+ the_thread = _Thread_Get( thread, &queue_context.Lock_context );
if ( the_thread == NULL ) {
return ESRCH;
@@ -48,29 +50,29 @@ static int _POSIX_Threads_Join( pthread_t thread, void **value_ptr )
executing = _Per_CPU_Get_executing( cpu_self );
if ( executing == the_thread ) {
- _ISR_lock_ISR_enable( &lock_context );
+ _ISR_lock_ISR_enable( &queue_context.Lock_context );
return EDEADLK;
}
- _Thread_State_acquire_critical( the_thread, &lock_context );
+ _Thread_State_acquire_critical( the_thread, &queue_context.Lock_context );
if ( !_Thread_Is_joinable( the_thread ) ) {
- _Thread_State_release( the_thread, &lock_context );
+ _Thread_State_release( the_thread, &queue_context.Lock_context );
return EINVAL;
}
if ( _States_Is_waiting_for_join_at_exit( the_thread->current_state ) ) {
value = the_thread->Life.exit_value;
_Thread_Clear_state_locked( the_thread, STATES_WAITING_FOR_JOIN_AT_EXIT );
- _Thread_Dispatch_disable_with_CPU( cpu_self, &lock_context );
- _Thread_State_release( the_thread, &lock_context );
+ _Thread_Dispatch_disable_with_CPU( cpu_self, &queue_context.Lock_context );
+ _Thread_State_release( the_thread, &queue_context.Lock_context );
_Thread_Dispatch_enable( cpu_self );
} else {
_Thread_Join(
the_thread,
STATES_INTERRUPTIBLE_BY_SIGNAL | STATES_WAITING_FOR_JOIN,
executing,
- &lock_context
+ &queue_context
);
if ( _POSIX_Get_error_after_wait( executing ) != 0 ) {
diff --git a/cpukit/posix/src/sigaction.c b/cpukit/posix/src/sigaction.c
index 177dcd19ab..26df98d394 100644
--- a/cpukit/posix/src/sigaction.c
+++ b/cpukit/posix/src/sigaction.c
@@ -33,7 +33,7 @@ int sigaction(
struct sigaction *__restrict oact
)
{
- ISR_lock_Context lock_context;
+ Thread_queue_Context queue_context;
if ( !sig )
rtems_set_errno_and_return_minus_one( EINVAL );
@@ -51,7 +51,8 @@ int sigaction(
if ( sig == SIGKILL )
rtems_set_errno_and_return_minus_one( EINVAL );
- _POSIX_signals_Acquire( &lock_context );
+ _Thread_queue_Context_initialize( &queue_context );
+ _POSIX_signals_Acquire( &queue_context );
if ( oact )
*oact = _POSIX_signals_Vectors[ sig ];
@@ -76,7 +77,7 @@ int sigaction(
}
}
- _POSIX_signals_Release( &lock_context );
+ _POSIX_signals_Release( &queue_context );
return 0;
}
diff --git a/cpukit/posix/src/sigtimedwait.c b/cpukit/posix/src/sigtimedwait.c
index ddc2884d68..7855bb0080 100644
--- a/cpukit/posix/src/sigtimedwait.c
+++ b/cpukit/posix/src/sigtimedwait.c
@@ -69,14 +69,14 @@ int sigtimedwait(
const struct timespec *__restrict timeout
)
{
- Thread_Control *executing;
- POSIX_API_Control *api;
- Watchdog_Interval interval;
- siginfo_t signal_information;
- siginfo_t *the_info;
- int signo;
- ISR_lock_Context lock_context;
- int error;
+ Thread_Control *executing;
+ POSIX_API_Control *api;
+ Watchdog_Interval interval;
+ siginfo_t signal_information;
+ siginfo_t *the_info;
+ int signo;
+ Thread_queue_Context queue_context;
+ int error;
/*
* Error check parameters before disabling interrupts.
@@ -115,7 +115,8 @@ int sigtimedwait(
/* API signals pending? */
- _POSIX_signals_Acquire( &lock_context );
+ _Thread_queue_Context_initialize( &queue_context );
+ _POSIX_signals_Acquire( &queue_context );
if ( *set & api->signals_pending ) {
/* XXX real info later */
the_info->si_signo = _POSIX_signals_Get_lowest( api->signals_pending );
@@ -127,7 +128,7 @@ int sigtimedwait(
false,
false
);
- _POSIX_signals_Release( &lock_context );
+ _POSIX_signals_Release( &queue_context );
the_info->si_code = SI_USER;
the_info->si_value.sival_int = 0;
@@ -139,7 +140,7 @@ int sigtimedwait(
if ( *set & _POSIX_signals_Pending ) {
signo = _POSIX_signals_Get_lowest( _POSIX_signals_Pending );
_POSIX_signals_Clear_signals( api, signo, the_info, true, false, false );
- _POSIX_signals_Release( &lock_context );
+ _POSIX_signals_Release( &queue_context );
the_info->si_signo = signo;
the_info->si_code = SI_USER;
@@ -151,13 +152,14 @@ int sigtimedwait(
executing->Wait.option = *set;
executing->Wait.return_argument = the_info;
+ _Thread_queue_Context_set_expected_level( &queue_context, 1 );
_Thread_queue_Enqueue_critical(
&_POSIX_signals_Wait_queue.Queue,
POSIX_SIGNALS_TQ_OPERATIONS,
executing,
STATES_WAITING_FOR_SIGNAL | STATES_INTERRUPTIBLE_BY_SIGNAL,
interval,
- &lock_context
+ &queue_context
);
/*
diff --git a/cpukit/rtems/src/regiongetsegment.c b/cpukit/rtems/src/regiongetsegment.c
index fe2b9e78ea..cfc664580c 100644
--- a/cpukit/rtems/src/regiongetsegment.c
+++ b/cpukit/rtems/src/regiongetsegment.c
@@ -86,7 +86,8 @@ rtems_status_code rtems_region_get_segment(
the_region->wait_operations,
executing,
STATES_WAITING_FOR_SEGMENT,
- timeout
+ timeout,
+ 2
);
_Thread_Dispatch_enable( cpu_self );
diff --git a/cpukit/sapi/src/interrtext.c b/cpukit/sapi/src/interrtext.c
index 3a0681df41..3ae7315f85 100644
--- a/cpukit/sapi/src/interrtext.c
+++ b/cpukit/sapi/src/interrtext.c
@@ -45,7 +45,7 @@ static const char *const internal_error_text[] = {
"INTERNAL_ERROR_BAD_ATTRIBUTES",
"INTERNAL_ERROR_IMPLEMENTATION_KEY_CREATE_INCONSISTENCY",
"OBSOLETE_INTERNAL_ERROR_IMPLEMENTATION_BLOCKING_OPERATION_CANCEL",
- "INTERNAL_ERROR_MUTEX_OBTAIN_FROM_BAD_STATE",
+ "INTERNAL_ERROR_THREAD_QUEUE_ENQUEUE_FROM_BAD_STATE",
"INTERNAL_ERROR_UNLIMITED_AND_MAXIMUM_IS_0",
"OBSOLETE_INTERNAL_ERROR_SHUTDOWN_WHEN_NOT_UP",
"INTERNAL_ERROR_GXX_KEY_ADD_FAILED",
diff --git a/cpukit/score/include/rtems/score/coremuteximpl.h b/cpukit/score/include/rtems/score/coremuteximpl.h
index f5faf9517a..e29d4b7f5d 100644
--- a/cpukit/score/include/rtems/score/coremuteximpl.h
+++ b/cpukit/score/include/rtems/score/coremuteximpl.h
@@ -21,7 +21,6 @@
#include <rtems/score/coremutex.h>
#include <rtems/score/chainimpl.h>
#include <rtems/score/status.h>
-#include <rtems/score/sysstate.h>
#include <rtems/score/threadimpl.h>
#include <rtems/score/threadqimpl.h>
@@ -95,27 +94,13 @@ RTEMS_INLINE_ROUTINE void _CORE_mutex_Release(
* @param[in] lock_context is the interrupt level
*/
Status_Control _CORE_mutex_Seize_interrupt_blocking(
- CORE_mutex_Control *the_mutex,
- Thread_Control *executing,
- Watchdog_Interval timeout,
- ISR_lock_Context *lock_context
+ CORE_mutex_Control *the_mutex,
+ Thread_Control *executing,
+ Watchdog_Interval timeout,
+ Thread_queue_Context *queue_context
);
/**
- * @brief Verifies that a mutex blocking seize is performed safely.
- *
- * This macro is to verify that a mutex blocking seize is
- * performed from a safe system state. For example, one
- * cannot block inside an isr.
- *
- * @retval this method returns true if dispatch is in an unsafe state.
- */
-#define _CORE_mutex_Check_dispatch_for_seize(_wait) \
- (!_Thread_Dispatch_is_enabled() \
- && (_wait) \
- && (_System_state_Get() >= SYSTEM_STATE_UP))
-
-/**
* @brief Is mutex locked.
*
* This routine returns true if the mutex specified is locked and false
@@ -301,14 +286,6 @@ RTEMS_INLINE_ROUTINE Status_Control _CORE_mutex_Seize(
{
Status_Control status;
- if ( _CORE_mutex_Check_dispatch_for_seize( wait ) ) {
- _Terminate(
- INTERNAL_ERROR_CORE,
- false,
- INTERNAL_ERROR_MUTEX_OBTAIN_FROM_BAD_STATE
- );
- }
-
_CORE_mutex_Acquire_critical( the_mutex, queue_context );
status = _CORE_mutex_Seize_interrupt_trylock(
@@ -330,7 +307,7 @@ RTEMS_INLINE_ROUTINE Status_Control _CORE_mutex_Seize(
the_mutex,
executing,
timeout,
- &queue_context->Lock_context
+ queue_context
);
}
diff --git a/cpukit/score/include/rtems/score/coresemimpl.h b/cpukit/score/include/rtems/score/coresemimpl.h
index ac90f20b7d..a55089e399 100644
--- a/cpukit/score/include/rtems/score/coresemimpl.h
+++ b/cpukit/score/include/rtems/score/coresemimpl.h
@@ -204,13 +204,14 @@ RTEMS_INLINE_ROUTINE Status_Control _CORE_semaphore_Seize(
return STATUS_UNSATISFIED;
}
+ _Thread_queue_Context_set_expected_level( queue_context, 1 );
_Thread_queue_Enqueue_critical(
&the_semaphore->Wait_queue.Queue,
the_semaphore->operations,
executing,
STATES_WAITING_FOR_SEMAPHORE,
timeout,
- &queue_context->Lock_context
+ queue_context
);
return _Thread_Wait_get_status( executing );
}
diff --git a/cpukit/score/include/rtems/score/interr.h b/cpukit/score/include/rtems/score/interr.h
index ea468e035e..ca48db2c06 100644
--- a/cpukit/score/include/rtems/score/interr.h
+++ b/cpukit/score/include/rtems/score/interr.h
@@ -154,7 +154,7 @@ typedef enum {
INTERNAL_ERROR_BAD_ATTRIBUTES,
INTERNAL_ERROR_IMPLEMENTATION_KEY_CREATE_INCONSISTENCY,
OBSOLETE_INTERNAL_ERROR_IMPLEMENTATION_BLOCKING_OPERATION_CANCEL,
- INTERNAL_ERROR_MUTEX_OBTAIN_FROM_BAD_STATE,
+ INTERNAL_ERROR_THREAD_QUEUE_ENQUEUE_FROM_BAD_STATE,
INTERNAL_ERROR_UNLIMITED_AND_MAXIMUM_IS_0,
OBSOLETE_INTERNAL_ERROR_SHUTDOWN_WHEN_NOT_UP,
INTERNAL_ERROR_GXX_KEY_ADD_FAILED,
diff --git a/cpukit/score/include/rtems/score/threadimpl.h b/cpukit/score/include/rtems/score/threadimpl.h
index 10811088c9..d10221234c 100644
--- a/cpukit/score/include/rtems/score/threadimpl.h
+++ b/cpukit/score/include/rtems/score/threadimpl.h
@@ -232,10 +232,10 @@ void _Thread_Exit(
);
void _Thread_Join(
- Thread_Control *the_thread,
- States_Control waiting_for_join,
- Thread_Control *executing,
- ISR_lock_Context *lock_context
+ Thread_Control *the_thread,
+ States_Control waiting_for_join,
+ Thread_Control *executing,
+ Thread_queue_Context *queue_context
);
void _Thread_Cancel(
diff --git a/cpukit/score/include/rtems/score/threadq.h b/cpukit/score/include/rtems/score/threadq.h
index 5f2ffe49a9..a4e5292f34 100644
--- a/cpukit/score/include/rtems/score/threadq.h
+++ b/cpukit/score/include/rtems/score/threadq.h
@@ -70,6 +70,15 @@ typedef struct {
ISR_lock_Context Lock_context;
/**
+ * @brief The expected thread dispatch disable level for
+ * _Thread_queue_Enqueue_critical().
+ *
+ * In case the actual thread dispatch disable level is not equal to the
+ * expected level, then a fatal error occurs.
+ */
+ uint32_t expected_thread_dispatch_disable_level;
+
+ /**
* @brief Callout to unblock the thread in case it is actually a thread
* proxy.
*
diff --git a/cpukit/score/include/rtems/score/threadqimpl.h b/cpukit/score/include/rtems/score/threadqimpl.h
index 4f5b48b6eb..73d4de2032 100644
--- a/cpukit/score/include/rtems/score/threadqimpl.h
+++ b/cpukit/score/include/rtems/score/threadqimpl.h
@@ -62,14 +62,35 @@ RTEMS_INLINE_ROUTINE void _Thread_queue_Context_initialize(
Thread_queue_Context *queue_context
)
{
-#if defined(RTEMS_MULTIPROCESSING) && defined(RTEMS_DEBUG)
+#if defined(RTEMS_DEBUG)
+ queue_context->expected_thread_dispatch_disable_level = 0xdeadbeef;
+#if defined(RTEMS_MULTIPROCESSING)
queue_context->mp_callout = NULL;
+#endif
#else
(void) queue_context;
#endif
}
/**
+ * @brief Sets the expected thread dispatch disable level in the thread queue
+ * context.
+ *
+ * @param queue_context The thread queue context.
+ * @param expected_level The expected thread dispatch disable level.
+ *
+ * @see _Thread_queue_Enqueue_critical().
+ */
+RTEMS_INLINE_ROUTINE void
+_Thread_queue_Context_set_expected_level(
+ Thread_queue_Context *queue_context,
+ uint32_t expected_level
+)
+{
+ queue_context->expected_thread_dispatch_disable_level = expected_level;
+}
+
+/**
* @brief Sets the MP callout in the thread queue context.
*
* @param queue_context The thread queue context.
@@ -309,17 +330,19 @@ Thread_Control *_Thread_queue_Do_dequeue(
*
* void _Mutex_Obtain( Mutex *mutex )
* {
- * ISR_lock_Context lock_context;
- * Thread_Control *executing;
+ * Thread_queue_Context queue_context;
+ * Thread_Control *executing;
*
- * _Thread_queue_Acquire( &mutex->Queue, &lock_context );
+ * _Thread_queue_Context_initialize( &queue_context );
+ * _Thread_queue_Acquire( &mutex->Queue, &queue_context.Lock_context );
*
* executing = _Thread_Executing;
*
* if ( mutex->owner == NULL ) {
* mutex->owner = executing;
- * _Thread_queue_Release( &mutex->Queue, &lock_context );
+ * _Thread_queue_Release( &mutex->Queue, &queue_context.Lock_context );
* } else {
+ * _Thread_queue_Context_set_expected_level( &queue_context, 1 );
* _Thread_queue_Enqueue_critical(
* &mutex->Queue.Queue,
* MUTEX_TQ_OPERATIONS,
@@ -327,7 +350,7 @@ Thread_Control *_Thread_queue_Do_dequeue(
* STATES_WAITING_FOR_MUTEX,
* WATCHDOG_NO_TIMEOUT,
* 0,
- * &lock_context
+ * &queue_context
* );
* }
* }
@@ -339,7 +362,7 @@ Thread_Control *_Thread_queue_Do_dequeue(
* @param[in] state The new state of the thread.
* @param[in] timeout Interval to wait. Use WATCHDOG_NO_TIMEOUT to block
* potentially forever.
- * @param[in] lock_context The lock context of the lock acquire.
+ * @param[in] queue_context The thread queue context of the lock acquire.
*/
void _Thread_queue_Enqueue_critical(
Thread_queue_Queue *queue,
@@ -347,7 +370,7 @@ void _Thread_queue_Enqueue_critical(
Thread_Control *the_thread,
States_Control state,
Watchdog_Interval timeout,
- ISR_lock_Context *lock_context
+ Thread_queue_Context *queue_context
);
/**
@@ -359,19 +382,22 @@ RTEMS_INLINE_ROUTINE void _Thread_queue_Enqueue(
const Thread_queue_Operations *operations,
Thread_Control *the_thread,
States_Control state,
- Watchdog_Interval timeout
+ Watchdog_Interval timeout,
+ uint32_t expected_level
)
{
- ISR_lock_Context lock_context;
+ Thread_queue_Context queue_context;
- _Thread_queue_Acquire( the_thread_queue, &lock_context );
+ _Thread_queue_Context_initialize( &queue_context );
+ _Thread_queue_Acquire( the_thread_queue, &queue_context.Lock_context );
+ _Thread_queue_Context_set_expected_level( &queue_context, expected_level );
_Thread_queue_Enqueue_critical(
&the_thread_queue->Queue,
operations,
the_thread,
state,
timeout,
- &lock_context
+ &queue_context
);
}
diff --git a/cpukit/score/src/condition.c b/cpukit/score/src/condition.c
index fae150a7cd..e7d98058a0 100644
--- a/cpukit/score/src/condition.c
+++ b/cpukit/score/src/condition.c
@@ -53,8 +53,8 @@ static Condition_Control *_Condition_Get(
}
static Thread_Control *_Condition_Queue_acquire_critical(
- Condition_Control *condition,
- ISR_lock_Context *lock_context
+ Condition_Control *condition,
+ Thread_queue_Context *queue_context
)
{
Thread_Control *executing;
@@ -63,24 +63,27 @@ static Thread_Control *_Condition_Queue_acquire_critical(
_Thread_queue_Queue_acquire_critical(
&condition->Queue.Queue,
&executing->Potpourri_stats,
- lock_context
+ &queue_context->Lock_context
);
return executing;
}
static void _Condition_Queue_release(
- Condition_Control *condition,
- ISR_lock_Context *lock_context
+ Condition_Control *condition,
+ Thread_queue_Context *queue_context
)
{
- _Thread_queue_Queue_release( &condition->Queue.Queue, lock_context );
+ _Thread_queue_Queue_release(
+ &condition->Queue.Queue,
+ &queue_context->Lock_context
+ );
}
static Per_CPU_Control *_Condition_Do_wait(
struct _Condition_Control *_condition,
Watchdog_Interval timeout,
- ISR_lock_Context *lock_context
+ Thread_queue_Context *queue_context
)
{
Condition_Control *condition;
@@ -88,16 +91,17 @@ static Per_CPU_Control *_Condition_Do_wait(
Per_CPU_Control *cpu_self;
condition = _Condition_Get( _condition );
- executing = _Condition_Queue_acquire_critical( condition, lock_context );
- cpu_self = _Thread_Dispatch_disable_critical( lock_context );
+ executing = _Condition_Queue_acquire_critical( condition, queue_context );
+ cpu_self = _Thread_Dispatch_disable_critical( &queue_context->Lock_context );
+ _Thread_queue_Context_set_expected_level( queue_context, 2 );
_Thread_queue_Enqueue_critical(
&condition->Queue.Queue,
CONDITION_TQ_OPERATIONS,
executing,
STATES_WAITING_FOR_SYS_LOCK_CONDITION,
timeout,
- lock_context
+ queue_context
);
return cpu_self;
@@ -108,11 +112,12 @@ void _Condition_Wait(
struct _Mutex_Control *_mutex
)
{
- ISR_lock_Context lock_context;
- Per_CPU_Control *cpu_self;
+ Thread_queue_Context queue_context;
+ Per_CPU_Control *cpu_self;
- _ISR_lock_ISR_disable( &lock_context );
- cpu_self = _Condition_Do_wait( _condition, 0, &lock_context );
+ _Thread_queue_Context_initialize( &queue_context );
+ _ISR_lock_ISR_disable( &queue_context.Lock_context );
+ cpu_self = _Condition_Do_wait( _condition, 0, &queue_context );
_Mutex_Release( _mutex );
_Thread_Dispatch_enable( cpu_self );
@@ -125,27 +130,28 @@ int _Condition_Wait_timed(
const struct timespec *abstime
)
{
- ISR_lock_Context lock_context;
- Per_CPU_Control *cpu_self;
- Thread_Control *executing;
- int eno;
- Watchdog_Interval ticks;
+ Thread_queue_Context queue_context;
+ Per_CPU_Control *cpu_self;
+ Thread_Control *executing;
+ int eno;
+ Watchdog_Interval ticks;
- _ISR_lock_ISR_disable( &lock_context );
+ _Thread_queue_Context_initialize( &queue_context );
+ _ISR_lock_ISR_disable( &queue_context.Lock_context );
switch ( _TOD_Absolute_timeout_to_ticks( abstime, &ticks ) ) {
case TOD_ABSOLUTE_TIMEOUT_INVALID:
- _ISR_lock_ISR_enable( &lock_context );
+ _ISR_lock_ISR_enable( &queue_context.Lock_context );
return EINVAL;
case TOD_ABSOLUTE_TIMEOUT_IS_IN_PAST:
case TOD_ABSOLUTE_TIMEOUT_IS_NOW:
- _ISR_lock_ISR_enable( &lock_context );
+ _ISR_lock_ISR_enable( &queue_context.Lock_context );
return ETIMEDOUT;
default:
break;
}
- cpu_self = _Condition_Do_wait( _condition, ticks, &lock_context );
+ cpu_self = _Condition_Do_wait( _condition, ticks, &queue_context );
_Mutex_Release( _mutex );
executing = cpu_self->executing;
@@ -161,12 +167,13 @@ void _Condition_Wait_recursive(
struct _Mutex_recursive_Control *_mutex
)
{
- ISR_lock_Context lock_context;
- Per_CPU_Control *cpu_self;
- unsigned int nest_level;
+ Thread_queue_Context queue_context;
+ Per_CPU_Control *cpu_self;
+ unsigned int nest_level;
- _ISR_lock_ISR_disable( &lock_context );
- cpu_self = _Condition_Do_wait( _condition, 0, &lock_context );
+ _Thread_queue_Context_initialize( &queue_context );
+ _ISR_lock_ISR_disable( &queue_context.Lock_context );
+ cpu_self = _Condition_Do_wait( _condition, 0, &queue_context );
nest_level = _mutex->_nest_level;
_mutex->_nest_level = 0;
@@ -182,28 +189,29 @@ int _Condition_Wait_recursive_timed(
const struct timespec *abstime
)
{
- ISR_lock_Context lock_context;
- Per_CPU_Control *cpu_self;
- Thread_Control *executing;
- int eno;
- unsigned int nest_level;
- Watchdog_Interval ticks;
+ Thread_queue_Context queue_context;
+ Per_CPU_Control *cpu_self;
+ Thread_Control *executing;
+ int eno;
+ unsigned int nest_level;
+ Watchdog_Interval ticks;
- _ISR_lock_ISR_disable( &lock_context );
+ _Thread_queue_Context_initialize( &queue_context );
+ _ISR_lock_ISR_disable( &queue_context.Lock_context );
switch ( _TOD_Absolute_timeout_to_ticks( abstime, &ticks ) ) {
case TOD_ABSOLUTE_TIMEOUT_INVALID:
- _ISR_lock_ISR_enable( &lock_context );
+ _ISR_lock_ISR_enable( &queue_context.Lock_context );
return EINVAL;
case TOD_ABSOLUTE_TIMEOUT_IS_IN_PAST:
case TOD_ABSOLUTE_TIMEOUT_IS_NOW:
- _ISR_lock_ISR_enable( &lock_context );
+ _ISR_lock_ISR_enable( &queue_context.Lock_context );
return ETIMEDOUT;
default:
break;
}
- cpu_self = _Condition_Do_wait( _condition, ticks, &lock_context );
+ cpu_self = _Condition_Do_wait( _condition, ticks, &queue_context );
nest_level = _mutex->_nest_level;
_mutex->_nest_level = 0;
@@ -249,14 +257,14 @@ static void _Condition_Wake( struct _Condition_Control *_condition, int count )
condition = _Condition_Get( _condition );
_Thread_queue_Context_initialize( &context.Base );
_ISR_lock_ISR_disable( &context.Base.Lock_context );
- _Condition_Queue_acquire_critical( condition, &context.Base.Lock_context );
+ _Condition_Queue_acquire_critical( condition, &context.Base );
/*
* In common uses cases of condition variables there are normally no threads
* on the queue, so check this condition early.
*/
if ( __predict_true( _Thread_queue_Is_empty( &condition->Queue.Queue ) ) ) {
- _Condition_Queue_release( condition, &context.Base.Lock_context );
+ _Condition_Queue_release( condition, &context.Base );
return;
}
diff --git a/cpukit/score/src/corebarrierwait.c b/cpukit/score/src/corebarrierwait.c
index a1c862d293..d939acf69b 100644
--- a/cpukit/score/src/corebarrierwait.c
+++ b/cpukit/score/src/corebarrierwait.c
@@ -45,13 +45,14 @@ Status_Control _CORE_barrier_Seize(
return STATUS_BARRIER_AUTOMATICALLY_RELEASED;
} else {
the_barrier->number_of_waiting_threads = number_of_waiting_threads;
+ _Thread_queue_Context_set_expected_level( queue_context, 1 );
_Thread_queue_Enqueue_critical(
&the_barrier->Wait_queue.Queue,
CORE_BARRIER_TQ_OPERATIONS,
executing,
STATES_WAITING_FOR_BARRIER,
timeout,
- &queue_context->Lock_context
+ queue_context
);
return _Thread_Wait_get_status( executing );
}
diff --git a/cpukit/score/src/coremsgseize.c b/cpukit/score/src/coremsgseize.c
index 00ff437e01..6906328039 100644
--- a/cpukit/score/src/coremsgseize.c
+++ b/cpukit/score/src/coremsgseize.c
@@ -114,13 +114,14 @@ Status_Control _CORE_message_queue_Seize(
executing->Wait.return_argument = size_p;
/* Wait.count will be filled in with the message priority */
+ _Thread_queue_Context_set_expected_level( queue_context, 1 );
_Thread_queue_Enqueue_critical(
&the_message_queue->Wait_queue.Queue,
the_message_queue->operations,
executing,
STATES_WAITING_FOR_MESSAGE,
timeout,
- &queue_context->Lock_context
+ queue_context
);
return _Thread_Wait_get_status( executing );
}
diff --git a/cpukit/score/src/coremsgsubmit.c b/cpukit/score/src/coremsgsubmit.c
index a623291776..fb56ffed4a 100644
--- a/cpukit/score/src/coremsgsubmit.c
+++ b/cpukit/score/src/coremsgsubmit.c
@@ -132,13 +132,14 @@ Status_Control _CORE_message_queue_Submit(
executing->Wait.option = (uint32_t) size;
executing->Wait.count = submit_type;
+ _Thread_queue_Context_set_expected_level( queue_context, 1 );
_Thread_queue_Enqueue_critical(
&the_message_queue->Wait_queue.Queue,
the_message_queue->operations,
executing,
STATES_WAITING_FOR_MESSAGE,
timeout,
- &queue_context->Lock_context
+ queue_context
);
return _Thread_Wait_get_status( executing );
#endif
diff --git a/cpukit/score/src/coremutexseize.c b/cpukit/score/src/coremutexseize.c
index 0fc63f4693..7b8b603d9b 100644
--- a/cpukit/score/src/coremutexseize.c
+++ b/cpukit/score/src/coremutexseize.c
@@ -18,17 +18,15 @@
#include "config.h"
#endif
-#include <rtems/system.h>
-#include <rtems/score/isr.h>
#include <rtems/score/coremuteximpl.h>
#include <rtems/score/statesimpl.h>
#include <rtems/score/thread.h>
Status_Control _CORE_mutex_Seize_interrupt_blocking(
- CORE_mutex_Control *the_mutex,
- Thread_Control *executing,
- Watchdog_Interval timeout,
- ISR_lock_Context *lock_context
+ CORE_mutex_Control *the_mutex,
+ Thread_Control *executing,
+ Watchdog_Interval timeout,
+ Thread_queue_Context *queue_context
)
{
#if !defined(RTEMS_SMP)
@@ -51,23 +49,30 @@ Status_Control _CORE_mutex_Seize_interrupt_blocking(
* otherwise the current holder may be no longer the holder of the mutex
* once we released the lock.
*/
- _Thread_queue_Release( &the_mutex->Wait_queue, lock_context );
+ _CORE_mutex_Release( the_mutex, queue_context );
#endif
_Thread_Inherit_priority( holder, executing );
#if !defined(RTEMS_SMP)
- _Thread_queue_Acquire( &the_mutex->Wait_queue, lock_context );
+ _ISR_lock_ISR_disable( &queue_context->Lock_context );
+ _CORE_mutex_Acquire_critical( the_mutex, queue_context );
#endif
}
+#if defined(RTEMS_SMP)
+ _Thread_queue_Context_set_expected_level( queue_context, 1 );
+#else
+ _Thread_queue_Context_set_expected_level( queue_context, 2 );
+#endif
+
_Thread_queue_Enqueue_critical(
&the_mutex->Wait_queue.Queue,
the_mutex->operations,
executing,
STATES_WAITING_FOR_MUTEX,
timeout,
- lock_context
+ queue_context
);
#if !defined(RTEMS_SMP)
diff --git a/cpukit/score/src/corerwlockobtainread.c b/cpukit/score/src/corerwlockobtainread.c
index 5192eb1f8f..bce992c1b0 100644
--- a/cpukit/score/src/corerwlockobtainread.c
+++ b/cpukit/score/src/corerwlockobtainread.c
@@ -79,13 +79,14 @@ Status_Control _CORE_RWLock_Seize_for_reading(
executing->Wait.option = CORE_RWLOCK_THREAD_WAITING_FOR_READ;
+ _Thread_queue_Context_set_expected_level( queue_context, 1 );
_Thread_queue_Enqueue_critical(
&the_rwlock->Wait_queue.Queue,
CORE_RWLOCK_TQ_OPERATIONS,
executing,
STATES_WAITING_FOR_RWLOCK,
timeout,
- &queue_context->Lock_context
+ queue_context
);
return _Thread_Wait_get_status( executing );
}
diff --git a/cpukit/score/src/corerwlockobtainwrite.c b/cpukit/score/src/corerwlockobtainwrite.c
index 0536b8287f..c261d32f3f 100644
--- a/cpukit/score/src/corerwlockobtainwrite.c
+++ b/cpukit/score/src/corerwlockobtainwrite.c
@@ -67,13 +67,14 @@ Status_Control _CORE_RWLock_Seize_for_writing(
executing->Wait.option = CORE_RWLOCK_THREAD_WAITING_FOR_WRITE;
+ _Thread_queue_Context_set_expected_level( queue_context, 1 );
_Thread_queue_Enqueue_critical(
&the_rwlock->Wait_queue.Queue,
CORE_RWLOCK_TQ_OPERATIONS,
executing,
STATES_WAITING_FOR_RWLOCK,
timeout,
- &queue_context->Lock_context
+ queue_context
);
return _Thread_Wait_get_status( executing );
}
diff --git a/cpukit/score/src/futex.c b/cpukit/score/src/futex.c
index 980c7fbccc..a192509f5c 100644
--- a/cpukit/score/src/futex.c
+++ b/cpukit/score/src/futex.c
@@ -50,52 +50,56 @@ static Futex_Control *_Futex_Get( struct _Futex_Control *_futex )
static Thread_Control *_Futex_Queue_acquire(
Futex_Control *futex,
- ISR_lock_Context *lock_context
+ Thread_queue_Context *queue_context
)
{
Thread_Control *executing;
- _ISR_lock_ISR_disable( lock_context );
+ _ISR_lock_ISR_disable( &queue_context->Lock_context );
executing = _Thread_Executing;
_Thread_queue_Queue_acquire_critical(
&futex->Queue.Queue,
&executing->Potpourri_stats,
- lock_context
+ &queue_context->Lock_context
);
return executing;
}
static void _Futex_Queue_release(
- Futex_Control *futex,
- ISR_lock_Context *lock_context
+ Futex_Control *futex,
+ Thread_queue_Context *queue_context
)
{
- _Thread_queue_Queue_release( &futex->Queue.Queue, lock_context );
+ _Thread_queue_Queue_release(
+ &futex->Queue.Queue,
+ &queue_context->Lock_context
+ );
}
int _Futex_Wait( struct _Futex_Control *_futex, int *uaddr, int val )
{
- Futex_Control *futex;
- ISR_lock_Context lock_context;
- Thread_Control *executing;
- int eno;
+ Futex_Control *futex;
+ Thread_queue_Context queue_context;
+ Thread_Control *executing;
+ int eno;
futex = _Futex_Get( _futex );
- executing = _Futex_Queue_acquire( futex, &lock_context );
+ executing = _Futex_Queue_acquire( futex, &queue_context );
if ( *uaddr == val ) {
+ _Thread_queue_Context_set_expected_level( &queue_context, 1 );
_Thread_queue_Enqueue_critical(
&futex->Queue.Queue,
FUTEX_TQ_OPERATIONS,
executing,
STATES_WAITING_FOR_SYS_LOCK_FUTEX,
WATCHDOG_NO_TIMEOUT,
- &lock_context
+ &queue_context
);
eno = 0;
} else {
- _Futex_Queue_release( futex, &lock_context );
+ _Futex_Queue_release( futex, &queue_context );
eno = EWOULDBLOCK;
}
@@ -128,11 +132,11 @@ static Thread_Control *_Futex_Flush_filter(
int _Futex_Wake( struct _Futex_Control *_futex, int count )
{
- Futex_Control *futex;
+ Futex_Control *futex;
Futex_Context context;
futex = _Futex_Get( _futex );
- _Futex_Queue_acquire( futex, &context.Base.Lock_context );
+ _Futex_Queue_acquire( futex, &context.Base );
/*
* For some synchronization objects like barriers the _Futex_Wake() must be
@@ -140,7 +144,7 @@ int _Futex_Wake( struct _Futex_Control *_futex, int count )
* check this condition early.
*/
if ( __predict_true( _Thread_queue_Is_empty( &futex->Queue.Queue ) ) ) {
- _Futex_Queue_release( futex, &context.Base.Lock_context );
+ _Futex_Queue_release( futex, &context.Base );
return 0;
}
diff --git a/cpukit/score/src/mpci.c b/cpukit/score/src/mpci.c
index 1e26b1f3ce..8c61e23b0f 100644
--- a/cpukit/score/src/mpci.c
+++ b/cpukit/score/src/mpci.c
@@ -259,7 +259,8 @@ Status_Control _MPCI_Send_request_packet(
&_Thread_queue_Operations_FIFO,
executing,
STATES_WAITING_FOR_RPC_REPLY | extra_state,
- the_packet->timeout
+ the_packet->timeout,
+ 2
);
_Thread_Dispatch_enable( cpu_self );
diff --git a/cpukit/score/src/mutex.c b/cpukit/score/src/mutex.c
index 6a85850a89..ed374a0768 100644
--- a/cpukit/score/src/mutex.c
+++ b/cpukit/score/src/mutex.c
@@ -72,47 +72,51 @@ static Mutex_Control *_Mutex_Get( struct _Mutex_Control *_mutex )
}
static Thread_Control *_Mutex_Queue_acquire(
- Mutex_Control *mutex,
- ISR_lock_Context *lock_context
+ Mutex_Control *mutex,
+ Thread_queue_Context *queue_context
)
{
Thread_Control *executing;
- _ISR_lock_ISR_disable( lock_context );
+ _ISR_lock_ISR_disable( &queue_context->Lock_context );
executing = _Thread_Executing;
_Thread_queue_Queue_acquire_critical(
&mutex->Queue.Queue,
&executing->Potpourri_stats,
- lock_context
+ &queue_context->Lock_context
);
return executing;
}
static void _Mutex_Queue_release(
- Mutex_Control *mutex,
- ISR_lock_Context *lock_context
+ Mutex_Control *mutex,
+ Thread_queue_Context *queue_context
)
{
- _Thread_queue_Queue_release( &mutex->Queue.Queue, lock_context );
+ _Thread_queue_Queue_release(
+ &mutex->Queue.Queue,
+ &queue_context->Lock_context
+ );
}
static void _Mutex_Acquire_slow(
- Mutex_Control *mutex,
- Thread_Control *owner,
- Thread_Control *executing,
- Watchdog_Interval timeout,
- ISR_lock_Context *lock_context
+ Mutex_Control *mutex,
+ Thread_Control *owner,
+ Thread_Control *executing,
+ Watchdog_Interval timeout,
+ Thread_queue_Context *queue_context
)
{
_Thread_Inherit_priority( owner, executing );
+ _Thread_queue_Context_set_expected_level( queue_context, 1 );
_Thread_queue_Enqueue_critical(
&mutex->Queue.Queue,
MUTEX_TQ_OPERATIONS,
executing,
STATES_WAITING_FOR_SYS_LOCK_MUTEX,
timeout,
- lock_context
+ queue_context
);
}
@@ -148,7 +152,7 @@ static void _Mutex_Release_slow(
&queue_context->Lock_context
);
} else {
- _Mutex_Queue_release( mutex, &queue_context->Lock_context );
+ _Mutex_Queue_release( mutex, queue_context );
}
if ( !keep_priority ) {
@@ -185,7 +189,7 @@ static void _Mutex_Release_critical(
|| !executing->priority_restore_hint;
if ( __predict_true( heads == NULL && keep_priority ) ) {
- _Mutex_Queue_release( mutex, &queue_context->Lock_context );
+ _Mutex_Queue_release( mutex, queue_context );
} else {
_Mutex_Release_slow(
mutex,
@@ -199,22 +203,23 @@ static void _Mutex_Release_critical(
void _Mutex_Acquire( struct _Mutex_Control *_mutex )
{
- Mutex_Control *mutex;
- ISR_lock_Context lock_context;
- Thread_Control *executing;
- Thread_Control *owner;
+ Mutex_Control *mutex;
+ Thread_queue_Context queue_context;
+ Thread_Control *executing;
+ Thread_Control *owner;
mutex = _Mutex_Get( _mutex );
- executing = _Mutex_Queue_acquire( mutex, &lock_context );
+ _Thread_queue_Context_initialize( &queue_context );
+ executing = _Mutex_Queue_acquire( mutex, &queue_context );
owner = mutex->Queue.Queue.owner;
if ( __predict_true( owner == NULL ) ) {
mutex->Queue.Queue.owner = executing;
++executing->resource_count;
- _Mutex_Queue_release( mutex, &lock_context );
+ _Mutex_Queue_release( mutex, &queue_context );
} else {
- _Mutex_Acquire_slow( mutex, owner, executing, 0, &lock_context );
+ _Mutex_Acquire_slow( mutex, owner, executing, 0, &queue_context );
}
}
@@ -223,20 +228,21 @@ int _Mutex_Acquire_timed(
const struct timespec *abstime
)
{
- Mutex_Control *mutex;
- ISR_lock_Context lock_context;
- Thread_Control *executing;
- Thread_Control *owner;
+ Mutex_Control *mutex;
+ Thread_queue_Context queue_context;
+ Thread_Control *executing;
+ Thread_Control *owner;
mutex = _Mutex_Get( _mutex );
- executing = _Mutex_Queue_acquire( mutex, &lock_context );
+ _Thread_queue_Context_initialize( &queue_context );
+ executing = _Mutex_Queue_acquire( mutex, &queue_context );
owner = mutex->Queue.Queue.owner;
if ( __predict_true( owner == NULL ) ) {
mutex->Queue.Queue.owner = executing;
++executing->resource_count;
- _Mutex_Queue_release( mutex, &lock_context );
+ _Mutex_Queue_release( mutex, &queue_context );
return 0;
} else {
@@ -244,17 +250,17 @@ int _Mutex_Acquire_timed(
switch ( _TOD_Absolute_timeout_to_ticks( abstime, &ticks ) ) {
case TOD_ABSOLUTE_TIMEOUT_INVALID:
- _Mutex_Queue_release( mutex, &lock_context );
+ _Mutex_Queue_release( mutex, &queue_context );
return EINVAL;
case TOD_ABSOLUTE_TIMEOUT_IS_IN_PAST:
case TOD_ABSOLUTE_TIMEOUT_IS_NOW:
- _Mutex_Queue_release( mutex, &lock_context );
+ _Mutex_Queue_release( mutex, &queue_context );
return ETIMEDOUT;
default:
break;
}
- _Mutex_Acquire_slow( mutex, owner, executing, ticks, &lock_context );
+ _Mutex_Acquire_slow( mutex, owner, executing, ticks, &queue_context );
return STATUS_GET_POSIX( _Thread_Wait_get_status( executing ) );
}
@@ -262,14 +268,15 @@ int _Mutex_Acquire_timed(
int _Mutex_Try_acquire( struct _Mutex_Control *_mutex )
{
- Mutex_Control *mutex;
- ISR_lock_Context lock_context;
- Thread_Control *executing;
- Thread_Control *owner;
- int eno;
+ Mutex_Control *mutex;
+ Thread_queue_Context queue_context;
+ Thread_Control *executing;
+ Thread_Control *owner;
+ int eno;
mutex = _Mutex_Get( _mutex );
- executing = _Mutex_Queue_acquire( mutex, &lock_context );
+ _Thread_queue_Context_initialize( &queue_context );
+ executing = _Mutex_Queue_acquire( mutex, &queue_context );
owner = mutex->Queue.Queue.owner;
@@ -281,7 +288,7 @@ int _Mutex_Try_acquire( struct _Mutex_Control *_mutex )
eno = EBUSY;
}
- _Mutex_Queue_release( mutex, &lock_context );
+ _Mutex_Queue_release( mutex, &queue_context );
return eno;
}
@@ -294,7 +301,7 @@ void _Mutex_Release( struct _Mutex_Control *_mutex )
mutex = _Mutex_Get( _mutex );
_Thread_queue_Context_initialize( &queue_context );
- executing = _Mutex_Queue_acquire( mutex, &queue_context.Lock_context );
+ executing = _Mutex_Queue_acquire( mutex, &queue_context );
_Assert( mutex->Queue.Queue.owner == executing );
@@ -311,24 +318,25 @@ static Mutex_recursive_Control *_Mutex_recursive_Get(
void _Mutex_recursive_Acquire( struct _Mutex_recursive_Control *_mutex )
{
Mutex_recursive_Control *mutex;
- ISR_lock_Context lock_context;
+ Thread_queue_Context queue_context;
Thread_Control *executing;
Thread_Control *owner;
mutex = _Mutex_recursive_Get( _mutex );
- executing = _Mutex_Queue_acquire( &mutex->Mutex, &lock_context );
+ _Thread_queue_Context_initialize( &queue_context );
+ executing = _Mutex_Queue_acquire( &mutex->Mutex, &queue_context );
owner = mutex->Mutex.Queue.Queue.owner;
if ( __predict_true( owner == NULL ) ) {
mutex->Mutex.Queue.Queue.owner = executing;
++executing->resource_count;
- _Mutex_Queue_release( &mutex->Mutex, &lock_context );
+ _Mutex_Queue_release( &mutex->Mutex, &queue_context );
} else if ( owner == executing ) {
++mutex->nest_level;
- _Mutex_Queue_release( &mutex->Mutex, &lock_context );
+ _Mutex_Queue_release( &mutex->Mutex, &queue_context );
} else {
- _Mutex_Acquire_slow( &mutex->Mutex, owner, executing, 0, &lock_context );
+ _Mutex_Acquire_slow( &mutex->Mutex, owner, executing, 0, &queue_context );
}
}
@@ -338,24 +346,25 @@ int _Mutex_recursive_Acquire_timed(
)
{
Mutex_recursive_Control *mutex;
- ISR_lock_Context lock_context;
+ Thread_queue_Context queue_context;
Thread_Control *executing;
Thread_Control *owner;
mutex = _Mutex_recursive_Get( _mutex );
- executing = _Mutex_Queue_acquire( &mutex->Mutex, &lock_context );
+ _Thread_queue_Context_initialize( &queue_context );
+ executing = _Mutex_Queue_acquire( &mutex->Mutex, &queue_context );
owner = mutex->Mutex.Queue.Queue.owner;
if ( __predict_true( owner == NULL ) ) {
mutex->Mutex.Queue.Queue.owner = executing;
++executing->resource_count;
- _Mutex_Queue_release( &mutex->Mutex, &lock_context );
+ _Mutex_Queue_release( &mutex->Mutex, &queue_context );
return 0;
} else if ( owner == executing ) {
++mutex->nest_level;
- _Mutex_Queue_release( &mutex->Mutex, &lock_context );
+ _Mutex_Queue_release( &mutex->Mutex, &queue_context );
return 0;
} else {
@@ -363,11 +372,11 @@ int _Mutex_recursive_Acquire_timed(
switch ( _TOD_Absolute_timeout_to_ticks( abstime, &ticks ) ) {
case TOD_ABSOLUTE_TIMEOUT_INVALID:
- _Mutex_Queue_release( &mutex->Mutex, &lock_context );
+ _Mutex_Queue_release( &mutex->Mutex, &queue_context );
return EINVAL;
case TOD_ABSOLUTE_TIMEOUT_IS_IN_PAST:
case TOD_ABSOLUTE_TIMEOUT_IS_NOW:
- _Mutex_Queue_release( &mutex->Mutex, &lock_context );
+ _Mutex_Queue_release( &mutex->Mutex, &queue_context );
return ETIMEDOUT;
default:
break;
@@ -378,7 +387,7 @@ int _Mutex_recursive_Acquire_timed(
owner,
executing,
ticks,
- &lock_context
+ &queue_context
);
return STATUS_GET_POSIX( _Thread_Wait_get_status( executing ) );
@@ -388,13 +397,14 @@ int _Mutex_recursive_Acquire_timed(
int _Mutex_recursive_Try_acquire( struct _Mutex_recursive_Control *_mutex )
{
Mutex_recursive_Control *mutex;
- ISR_lock_Context lock_context;
+ Thread_queue_Context queue_context;
Thread_Control *executing;
Thread_Control *owner;
int eno;
mutex = _Mutex_recursive_Get( _mutex );
- executing = _Mutex_Queue_acquire( &mutex->Mutex, &lock_context );
+ _Thread_queue_Context_initialize( &queue_context );
+ executing = _Mutex_Queue_acquire( &mutex->Mutex, &queue_context );
owner = mutex->Mutex.Queue.Queue.owner;
@@ -409,7 +419,7 @@ int _Mutex_recursive_Try_acquire( struct _Mutex_recursive_Control *_mutex )
eno = EBUSY;
}
- _Mutex_Queue_release( &mutex->Mutex, &lock_context );
+ _Mutex_Queue_release( &mutex->Mutex, &queue_context );
return eno;
}
@@ -423,10 +433,7 @@ void _Mutex_recursive_Release( struct _Mutex_recursive_Control *_mutex )
mutex = _Mutex_recursive_Get( _mutex );
_Thread_queue_Context_initialize( &queue_context );
- executing = _Mutex_Queue_acquire(
- &mutex->Mutex,
- &queue_context.Lock_context
- );
+ executing = _Mutex_Queue_acquire( &mutex->Mutex, &queue_context );
_Assert( mutex->Mutex.Queue.Queue.owner == executing );
@@ -437,7 +444,7 @@ void _Mutex_recursive_Release( struct _Mutex_recursive_Control *_mutex )
} else {
mutex->nest_level = nest_level - 1;
- _Mutex_Queue_release( &mutex->Mutex, &queue_context.Lock_context );
+ _Mutex_Queue_release( &mutex->Mutex, &queue_context );
}
}
diff --git a/cpukit/score/src/semaphore.c b/cpukit/score/src/semaphore.c
index 72abd9e12a..03af9cff7e 100644
--- a/cpukit/score/src/semaphore.c
+++ b/cpukit/score/src/semaphore.c
@@ -56,53 +56,58 @@ static Semaphore_Control *_Semaphore_Get(
}
static Thread_Control *_Semaphore_Queue_acquire(
- Semaphore_Control *sem,
- ISR_lock_Context *lock_context
+ Semaphore_Control *sem,
+ Thread_queue_Context *queue_context
)
{
Thread_Control *executing;
- _ISR_lock_ISR_disable( lock_context );
+ _ISR_lock_ISR_disable( &queue_context->Lock_context );
executing = _Thread_Executing;
_Thread_queue_Queue_acquire_critical(
&sem->Queue.Queue,
&executing->Potpourri_stats,
- lock_context
+ &queue_context->Lock_context
);
return executing;
}
static void _Semaphore_Queue_release(
- Semaphore_Control *sem,
- ISR_lock_Context *lock_context
+ Semaphore_Control *sem,
+ Thread_queue_Context *queue_context
)
{
- _Thread_queue_Queue_release( &sem->Queue.Queue, lock_context );
+ _Thread_queue_Queue_release(
+ &sem->Queue.Queue,
+ &queue_context->Lock_context
+ );
}
void _Semaphore_Wait( struct _Semaphore_Control *_sem )
{
- Semaphore_Control *sem ;
- ISR_lock_Context lock_context;
- Thread_Control *executing;
- unsigned int count;
+ Semaphore_Control *sem ;
+ Thread_queue_Context queue_context;
+ Thread_Control *executing;
+ unsigned int count;
sem = _Semaphore_Get( _sem );
- executing = _Semaphore_Queue_acquire( sem, &lock_context );
+ _Thread_queue_Context_initialize( &queue_context );
+ executing = _Semaphore_Queue_acquire( sem, &queue_context );
count = sem->count;
if ( count > 0 ) {
sem->count = count - 1;
- _Semaphore_Queue_release( sem, &lock_context );
+ _Semaphore_Queue_release( sem, &queue_context );
} else {
+ _Thread_queue_Context_set_expected_level( &queue_context, 1 );
_Thread_queue_Enqueue_critical(
&sem->Queue.Queue,
SEMAPHORE_TQ_OPERATIONS,
executing,
STATES_WAITING_FOR_SYS_LOCK_SEMAPHORE,
WATCHDOG_NO_TIMEOUT,
- &lock_context
+ &queue_context
);
}
}
@@ -115,13 +120,13 @@ void _Semaphore_Post( struct _Semaphore_Control *_sem )
sem = _Semaphore_Get( _sem );
_Thread_queue_Context_initialize( &queue_context );
- _Semaphore_Queue_acquire( sem, &queue_context.Lock_context );
+ _Semaphore_Queue_acquire( sem, &queue_context );
heads = sem->Queue.Queue.heads;
if ( heads == NULL ) {
_Assert( sem->count < UINT_MAX );
++sem->count;
- _Semaphore_Queue_release( sem, &queue_context.Lock_context );
+ _Semaphore_Queue_release( sem, &queue_context );
} else {
const Thread_queue_Operations *operations;
Thread_Control *first;
diff --git a/cpukit/score/src/threadqenqueue.c b/cpukit/score/src/threadqenqueue.c
index 4eaafa9036..1e95003aa7 100644
--- a/cpukit/score/src/threadqenqueue.c
+++ b/cpukit/score/src/threadqenqueue.c
@@ -40,7 +40,7 @@ void _Thread_queue_Enqueue_critical(
Thread_Control *the_thread,
States_Control state,
Watchdog_Interval timeout,
- ISR_lock_Context *lock_context
+ Thread_queue_Context *queue_context
)
{
Per_CPU_Control *cpu_self;
@@ -61,8 +61,19 @@ void _Thread_queue_Enqueue_critical(
( *operations->enqueue )( queue, the_thread );
_Thread_Wait_flags_set( the_thread, THREAD_QUEUE_INTEND_TO_BLOCK );
- cpu_self = _Thread_Dispatch_disable_critical( lock_context );
- _Thread_queue_Queue_release( queue, lock_context );
+ cpu_self = _Thread_Dispatch_disable_critical( &queue_context->Lock_context );
+ _Thread_queue_Queue_release( queue, &queue_context->Lock_context );
+
+ if (
+ cpu_self->thread_dispatch_disable_level
+ != queue_context->expected_thread_dispatch_disable_level
+ ) {
+ _Terminate(
+ INTERNAL_ERROR_CORE,
+ false,
+ INTERNAL_ERROR_THREAD_QUEUE_ENQUEUE_FROM_BAD_STATE
+ );
+ }
/*
* Set the blocking state for this thread queue in the thread.
diff --git a/cpukit/score/src/threadrestart.c b/cpukit/score/src/threadrestart.c
index 3bddac458d..f155980c1a 100644
--- a/cpukit/score/src/threadrestart.c
+++ b/cpukit/score/src/threadrestart.c
@@ -443,10 +443,10 @@ static void _Thread_Finalize_life_change(
}
void _Thread_Join(
- Thread_Control *the_thread,
- States_Control waiting_for_join,
- Thread_Control *executing,
- ISR_lock_Context *lock_context
+ Thread_Control *the_thread,
+ States_Control waiting_for_join,
+ Thread_Control *executing,
+ Thread_queue_Context *queue_context
)
{
_Assert( the_thread != executing );
@@ -462,7 +462,7 @@ void _Thread_Join(
executing,
waiting_for_join,
WATCHDOG_NO_TIMEOUT,
- lock_context
+ queue_context
);
}
@@ -524,14 +524,16 @@ void _Thread_Cancel(
void _Thread_Close( Thread_Control *the_thread, Thread_Control *executing )
{
- ISR_lock_Context lock_context;
+ Thread_queue_Context queue_context;
- _Thread_State_acquire( the_thread, &lock_context );
+ _Thread_queue_Context_initialize( &queue_context );
+ _Thread_queue_Context_set_expected_level( &queue_context, 2 );
+ _Thread_State_acquire( the_thread, &queue_context.Lock_context );
_Thread_Join(
the_thread,
STATES_WAITING_FOR_JOIN,
executing,
- &lock_context
+ &queue_context
);
_Thread_Cancel( the_thread, executing, NULL );
}
diff --git a/testsuites/sptests/spfatal03/testcase.h b/testsuites/sptests/spfatal03/testcase.h
index 86b300344e..34a20f6825 100644
--- a/testsuites/sptests/spfatal03/testcase.h
+++ b/testsuites/sptests/spfatal03/testcase.h
@@ -16,7 +16,7 @@
#define FATAL_ERROR_EXPECTED_SOURCE INTERNAL_ERROR_CORE
#define FATAL_ERROR_EXPECTED_IS_INTERNAL FALSE
#define FATAL_ERROR_EXPECTED_ERROR \
- INTERNAL_ERROR_MUTEX_OBTAIN_FROM_BAD_STATE
+ INTERNAL_ERROR_THREAD_QUEUE_ENQUEUE_FROM_BAD_STATE
void force_error(void)
{
@@ -26,7 +26,7 @@ void force_error(void)
status = rtems_semaphore_create(
rtems_build_name( 'S','0',' ',' '),
- 1,
+ 0,
RTEMS_LOCAL|
RTEMS_SIMPLE_BINARY_SEMAPHORE,
0,
@@ -37,7 +37,7 @@ void force_error(void)
printk("Obtain semaphore in dispatching critical section\n");
_Thread_Dispatch_disable();
- status = rtems_semaphore_obtain( mutex, RTEMS_DEFAULT_OPTIONS, 0 );
+ status = rtems_semaphore_obtain( mutex, RTEMS_WAIT, RTEMS_NO_TIMEOUT );
/* !!! SHOULD NOT RETURN FROM THE ABOVE CALL */
rtems_test_assert( 0 );