summaryrefslogtreecommitdiffstats
path: root/cpukit/score/src
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2016-05-27 14:43:19 +0200
committerSebastian Huber <sebastian.huber@embedded-brains.de>2016-05-30 16:16:21 +0200
commit93306058c0417b1c6e950b44ef279e096062dfba (patch)
tree1a3326d052169d2aec4c4acdd894589d5d84614c /cpukit/score/src
parentscore: Add _Thread_queue_Context_set_MP_callout() (diff)
downloadrtems-93306058c0417b1c6e950b44ef279e096062dfba.tar.bz2
score: _CORE_mutex_Check_dispatch_for_seize()
Move the safety check performed by _CORE_mutex_Check_dispatch_for_seize() out of the performance critical path and generalize it. Blocking on a thread queue with an unexpected thread dispatch disabled level is illegal in all system states. Add the expected thread dispatch disable level (which may be 1 or 2 depending on the operation) to Thread_queue_Context and use it in _Thread_queue_Enqueue_critical().
Diffstat (limited to 'cpukit/score/src')
-rw-r--r--cpukit/score/src/condition.c88
-rw-r--r--cpukit/score/src/corebarrierwait.c3
-rw-r--r--cpukit/score/src/coremsgseize.c3
-rw-r--r--cpukit/score/src/coremsgsubmit.c3
-rw-r--r--cpukit/score/src/coremutexseize.c23
-rw-r--r--cpukit/score/src/corerwlockobtainread.c3
-rw-r--r--cpukit/score/src/corerwlockobtainwrite.c3
-rw-r--r--cpukit/score/src/futex.c36
-rw-r--r--cpukit/score/src/mpci.c3
-rw-r--r--cpukit/score/src/mutex.c125
-rw-r--r--cpukit/score/src/semaphore.c37
-rw-r--r--cpukit/score/src/threadqenqueue.c17
-rw-r--r--cpukit/score/src/threadrestart.c18
13 files changed, 205 insertions, 157 deletions
diff --git a/cpukit/score/src/condition.c b/cpukit/score/src/condition.c
index fae150a7cd..e7d98058a0 100644
--- a/cpukit/score/src/condition.c
+++ b/cpukit/score/src/condition.c
@@ -53,8 +53,8 @@ static Condition_Control *_Condition_Get(
}
static Thread_Control *_Condition_Queue_acquire_critical(
- Condition_Control *condition,
- ISR_lock_Context *lock_context
+ Condition_Control *condition,
+ Thread_queue_Context *queue_context
)
{
Thread_Control *executing;
@@ -63,24 +63,27 @@ static Thread_Control *_Condition_Queue_acquire_critical(
_Thread_queue_Queue_acquire_critical(
&condition->Queue.Queue,
&executing->Potpourri_stats,
- lock_context
+ &queue_context->Lock_context
);
return executing;
}
static void _Condition_Queue_release(
- Condition_Control *condition,
- ISR_lock_Context *lock_context
+ Condition_Control *condition,
+ Thread_queue_Context *queue_context
)
{
- _Thread_queue_Queue_release( &condition->Queue.Queue, lock_context );
+ _Thread_queue_Queue_release(
+ &condition->Queue.Queue,
+ &queue_context->Lock_context
+ );
}
static Per_CPU_Control *_Condition_Do_wait(
struct _Condition_Control *_condition,
Watchdog_Interval timeout,
- ISR_lock_Context *lock_context
+ Thread_queue_Context *queue_context
)
{
Condition_Control *condition;
@@ -88,16 +91,17 @@ static Per_CPU_Control *_Condition_Do_wait(
Per_CPU_Control *cpu_self;
condition = _Condition_Get( _condition );
- executing = _Condition_Queue_acquire_critical( condition, lock_context );
- cpu_self = _Thread_Dispatch_disable_critical( lock_context );
+ executing = _Condition_Queue_acquire_critical( condition, queue_context );
+ cpu_self = _Thread_Dispatch_disable_critical( &queue_context->Lock_context );
+ _Thread_queue_Context_set_expected_level( queue_context, 2 );
_Thread_queue_Enqueue_critical(
&condition->Queue.Queue,
CONDITION_TQ_OPERATIONS,
executing,
STATES_WAITING_FOR_SYS_LOCK_CONDITION,
timeout,
- lock_context
+ queue_context
);
return cpu_self;
@@ -108,11 +112,12 @@ void _Condition_Wait(
struct _Mutex_Control *_mutex
)
{
- ISR_lock_Context lock_context;
- Per_CPU_Control *cpu_self;
+ Thread_queue_Context queue_context;
+ Per_CPU_Control *cpu_self;
- _ISR_lock_ISR_disable( &lock_context );
- cpu_self = _Condition_Do_wait( _condition, 0, &lock_context );
+ _Thread_queue_Context_initialize( &queue_context );
+ _ISR_lock_ISR_disable( &queue_context.Lock_context );
+ cpu_self = _Condition_Do_wait( _condition, 0, &queue_context );
_Mutex_Release( _mutex );
_Thread_Dispatch_enable( cpu_self );
@@ -125,27 +130,28 @@ int _Condition_Wait_timed(
const struct timespec *abstime
)
{
- ISR_lock_Context lock_context;
- Per_CPU_Control *cpu_self;
- Thread_Control *executing;
- int eno;
- Watchdog_Interval ticks;
+ Thread_queue_Context queue_context;
+ Per_CPU_Control *cpu_self;
+ Thread_Control *executing;
+ int eno;
+ Watchdog_Interval ticks;
- _ISR_lock_ISR_disable( &lock_context );
+ _Thread_queue_Context_initialize( &queue_context );
+ _ISR_lock_ISR_disable( &queue_context.Lock_context );
switch ( _TOD_Absolute_timeout_to_ticks( abstime, &ticks ) ) {
case TOD_ABSOLUTE_TIMEOUT_INVALID:
- _ISR_lock_ISR_enable( &lock_context );
+ _ISR_lock_ISR_enable( &queue_context.Lock_context );
return EINVAL;
case TOD_ABSOLUTE_TIMEOUT_IS_IN_PAST:
case TOD_ABSOLUTE_TIMEOUT_IS_NOW:
- _ISR_lock_ISR_enable( &lock_context );
+ _ISR_lock_ISR_enable( &queue_context.Lock_context );
return ETIMEDOUT;
default:
break;
}
- cpu_self = _Condition_Do_wait( _condition, ticks, &lock_context );
+ cpu_self = _Condition_Do_wait( _condition, ticks, &queue_context );
_Mutex_Release( _mutex );
executing = cpu_self->executing;
@@ -161,12 +167,13 @@ void _Condition_Wait_recursive(
struct _Mutex_recursive_Control *_mutex
)
{
- ISR_lock_Context lock_context;
- Per_CPU_Control *cpu_self;
- unsigned int nest_level;
+ Thread_queue_Context queue_context;
+ Per_CPU_Control *cpu_self;
+ unsigned int nest_level;
- _ISR_lock_ISR_disable( &lock_context );
- cpu_self = _Condition_Do_wait( _condition, 0, &lock_context );
+ _Thread_queue_Context_initialize( &queue_context );
+ _ISR_lock_ISR_disable( &queue_context.Lock_context );
+ cpu_self = _Condition_Do_wait( _condition, 0, &queue_context );
nest_level = _mutex->_nest_level;
_mutex->_nest_level = 0;
@@ -182,28 +189,29 @@ int _Condition_Wait_recursive_timed(
const struct timespec *abstime
)
{
- ISR_lock_Context lock_context;
- Per_CPU_Control *cpu_self;
- Thread_Control *executing;
- int eno;
- unsigned int nest_level;
- Watchdog_Interval ticks;
+ Thread_queue_Context queue_context;
+ Per_CPU_Control *cpu_self;
+ Thread_Control *executing;
+ int eno;
+ unsigned int nest_level;
+ Watchdog_Interval ticks;
- _ISR_lock_ISR_disable( &lock_context );
+ _Thread_queue_Context_initialize( &queue_context );
+ _ISR_lock_ISR_disable( &queue_context.Lock_context );
switch ( _TOD_Absolute_timeout_to_ticks( abstime, &ticks ) ) {
case TOD_ABSOLUTE_TIMEOUT_INVALID:
- _ISR_lock_ISR_enable( &lock_context );
+ _ISR_lock_ISR_enable( &queue_context.Lock_context );
return EINVAL;
case TOD_ABSOLUTE_TIMEOUT_IS_IN_PAST:
case TOD_ABSOLUTE_TIMEOUT_IS_NOW:
- _ISR_lock_ISR_enable( &lock_context );
+ _ISR_lock_ISR_enable( &queue_context.Lock_context );
return ETIMEDOUT;
default:
break;
}
- cpu_self = _Condition_Do_wait( _condition, ticks, &lock_context );
+ cpu_self = _Condition_Do_wait( _condition, ticks, &queue_context );
nest_level = _mutex->_nest_level;
_mutex->_nest_level = 0;
@@ -249,14 +257,14 @@ static void _Condition_Wake( struct _Condition_Control *_condition, int count )
condition = _Condition_Get( _condition );
_Thread_queue_Context_initialize( &context.Base );
_ISR_lock_ISR_disable( &context.Base.Lock_context );
- _Condition_Queue_acquire_critical( condition, &context.Base.Lock_context );
+ _Condition_Queue_acquire_critical( condition, &context.Base );
/*
* In common uses cases of condition variables there are normally no threads
* on the queue, so check this condition early.
*/
if ( __predict_true( _Thread_queue_Is_empty( &condition->Queue.Queue ) ) ) {
- _Condition_Queue_release( condition, &context.Base.Lock_context );
+ _Condition_Queue_release( condition, &context.Base );
return;
}
diff --git a/cpukit/score/src/corebarrierwait.c b/cpukit/score/src/corebarrierwait.c
index a1c862d293..d939acf69b 100644
--- a/cpukit/score/src/corebarrierwait.c
+++ b/cpukit/score/src/corebarrierwait.c
@@ -45,13 +45,14 @@ Status_Control _CORE_barrier_Seize(
return STATUS_BARRIER_AUTOMATICALLY_RELEASED;
} else {
the_barrier->number_of_waiting_threads = number_of_waiting_threads;
+ _Thread_queue_Context_set_expected_level( queue_context, 1 );
_Thread_queue_Enqueue_critical(
&the_barrier->Wait_queue.Queue,
CORE_BARRIER_TQ_OPERATIONS,
executing,
STATES_WAITING_FOR_BARRIER,
timeout,
- &queue_context->Lock_context
+ queue_context
);
return _Thread_Wait_get_status( executing );
}
diff --git a/cpukit/score/src/coremsgseize.c b/cpukit/score/src/coremsgseize.c
index 00ff437e01..6906328039 100644
--- a/cpukit/score/src/coremsgseize.c
+++ b/cpukit/score/src/coremsgseize.c
@@ -114,13 +114,14 @@ Status_Control _CORE_message_queue_Seize(
executing->Wait.return_argument = size_p;
/* Wait.count will be filled in with the message priority */
+ _Thread_queue_Context_set_expected_level( queue_context, 1 );
_Thread_queue_Enqueue_critical(
&the_message_queue->Wait_queue.Queue,
the_message_queue->operations,
executing,
STATES_WAITING_FOR_MESSAGE,
timeout,
- &queue_context->Lock_context
+ queue_context
);
return _Thread_Wait_get_status( executing );
}
diff --git a/cpukit/score/src/coremsgsubmit.c b/cpukit/score/src/coremsgsubmit.c
index a623291776..fb56ffed4a 100644
--- a/cpukit/score/src/coremsgsubmit.c
+++ b/cpukit/score/src/coremsgsubmit.c
@@ -132,13 +132,14 @@ Status_Control _CORE_message_queue_Submit(
executing->Wait.option = (uint32_t) size;
executing->Wait.count = submit_type;
+ _Thread_queue_Context_set_expected_level( queue_context, 1 );
_Thread_queue_Enqueue_critical(
&the_message_queue->Wait_queue.Queue,
the_message_queue->operations,
executing,
STATES_WAITING_FOR_MESSAGE,
timeout,
- &queue_context->Lock_context
+ queue_context
);
return _Thread_Wait_get_status( executing );
#endif
diff --git a/cpukit/score/src/coremutexseize.c b/cpukit/score/src/coremutexseize.c
index 0fc63f4693..7b8b603d9b 100644
--- a/cpukit/score/src/coremutexseize.c
+++ b/cpukit/score/src/coremutexseize.c
@@ -18,17 +18,15 @@
#include "config.h"
#endif
-#include <rtems/system.h>
-#include <rtems/score/isr.h>
#include <rtems/score/coremuteximpl.h>
#include <rtems/score/statesimpl.h>
#include <rtems/score/thread.h>
Status_Control _CORE_mutex_Seize_interrupt_blocking(
- CORE_mutex_Control *the_mutex,
- Thread_Control *executing,
- Watchdog_Interval timeout,
- ISR_lock_Context *lock_context
+ CORE_mutex_Control *the_mutex,
+ Thread_Control *executing,
+ Watchdog_Interval timeout,
+ Thread_queue_Context *queue_context
)
{
#if !defined(RTEMS_SMP)
@@ -51,23 +49,30 @@ Status_Control _CORE_mutex_Seize_interrupt_blocking(
* otherwise the current holder may be no longer the holder of the mutex
* once we released the lock.
*/
- _Thread_queue_Release( &the_mutex->Wait_queue, lock_context );
+ _CORE_mutex_Release( the_mutex, queue_context );
#endif
_Thread_Inherit_priority( holder, executing );
#if !defined(RTEMS_SMP)
- _Thread_queue_Acquire( &the_mutex->Wait_queue, lock_context );
+ _ISR_lock_ISR_disable( &queue_context->Lock_context );
+ _CORE_mutex_Acquire_critical( the_mutex, queue_context );
#endif
}
+#if defined(RTEMS_SMP)
+ _Thread_queue_Context_set_expected_level( queue_context, 1 );
+#else
+ _Thread_queue_Context_set_expected_level( queue_context, 2 );
+#endif
+
_Thread_queue_Enqueue_critical(
&the_mutex->Wait_queue.Queue,
the_mutex->operations,
executing,
STATES_WAITING_FOR_MUTEX,
timeout,
- lock_context
+ queue_context
);
#if !defined(RTEMS_SMP)
diff --git a/cpukit/score/src/corerwlockobtainread.c b/cpukit/score/src/corerwlockobtainread.c
index 5192eb1f8f..bce992c1b0 100644
--- a/cpukit/score/src/corerwlockobtainread.c
+++ b/cpukit/score/src/corerwlockobtainread.c
@@ -79,13 +79,14 @@ Status_Control _CORE_RWLock_Seize_for_reading(
executing->Wait.option = CORE_RWLOCK_THREAD_WAITING_FOR_READ;
+ _Thread_queue_Context_set_expected_level( queue_context, 1 );
_Thread_queue_Enqueue_critical(
&the_rwlock->Wait_queue.Queue,
CORE_RWLOCK_TQ_OPERATIONS,
executing,
STATES_WAITING_FOR_RWLOCK,
timeout,
- &queue_context->Lock_context
+ queue_context
);
return _Thread_Wait_get_status( executing );
}
diff --git a/cpukit/score/src/corerwlockobtainwrite.c b/cpukit/score/src/corerwlockobtainwrite.c
index 0536b8287f..c261d32f3f 100644
--- a/cpukit/score/src/corerwlockobtainwrite.c
+++ b/cpukit/score/src/corerwlockobtainwrite.c
@@ -67,13 +67,14 @@ Status_Control _CORE_RWLock_Seize_for_writing(
executing->Wait.option = CORE_RWLOCK_THREAD_WAITING_FOR_WRITE;
+ _Thread_queue_Context_set_expected_level( queue_context, 1 );
_Thread_queue_Enqueue_critical(
&the_rwlock->Wait_queue.Queue,
CORE_RWLOCK_TQ_OPERATIONS,
executing,
STATES_WAITING_FOR_RWLOCK,
timeout,
- &queue_context->Lock_context
+ queue_context
);
return _Thread_Wait_get_status( executing );
}
diff --git a/cpukit/score/src/futex.c b/cpukit/score/src/futex.c
index 980c7fbccc..a192509f5c 100644
--- a/cpukit/score/src/futex.c
+++ b/cpukit/score/src/futex.c
@@ -50,52 +50,56 @@ static Futex_Control *_Futex_Get( struct _Futex_Control *_futex )
static Thread_Control *_Futex_Queue_acquire(
Futex_Control *futex,
- ISR_lock_Context *lock_context
+ Thread_queue_Context *queue_context
)
{
Thread_Control *executing;
- _ISR_lock_ISR_disable( lock_context );
+ _ISR_lock_ISR_disable( &queue_context->Lock_context );
executing = _Thread_Executing;
_Thread_queue_Queue_acquire_critical(
&futex->Queue.Queue,
&executing->Potpourri_stats,
- lock_context
+ &queue_context->Lock_context
);
return executing;
}
static void _Futex_Queue_release(
- Futex_Control *futex,
- ISR_lock_Context *lock_context
+ Futex_Control *futex,
+ Thread_queue_Context *queue_context
)
{
- _Thread_queue_Queue_release( &futex->Queue.Queue, lock_context );
+ _Thread_queue_Queue_release(
+ &futex->Queue.Queue,
+ &queue_context->Lock_context
+ );
}
int _Futex_Wait( struct _Futex_Control *_futex, int *uaddr, int val )
{
- Futex_Control *futex;
- ISR_lock_Context lock_context;
- Thread_Control *executing;
- int eno;
+ Futex_Control *futex;
+ Thread_queue_Context queue_context;
+ Thread_Control *executing;
+ int eno;
futex = _Futex_Get( _futex );
- executing = _Futex_Queue_acquire( futex, &lock_context );
+ executing = _Futex_Queue_acquire( futex, &queue_context );
if ( *uaddr == val ) {
+ _Thread_queue_Context_set_expected_level( &queue_context, 1 );
_Thread_queue_Enqueue_critical(
&futex->Queue.Queue,
FUTEX_TQ_OPERATIONS,
executing,
STATES_WAITING_FOR_SYS_LOCK_FUTEX,
WATCHDOG_NO_TIMEOUT,
- &lock_context
+ &queue_context
);
eno = 0;
} else {
- _Futex_Queue_release( futex, &lock_context );
+ _Futex_Queue_release( futex, &queue_context );
eno = EWOULDBLOCK;
}
@@ -128,11 +132,11 @@ static Thread_Control *_Futex_Flush_filter(
int _Futex_Wake( struct _Futex_Control *_futex, int count )
{
- Futex_Control *futex;
+ Futex_Control *futex;
Futex_Context context;
futex = _Futex_Get( _futex );
- _Futex_Queue_acquire( futex, &context.Base.Lock_context );
+ _Futex_Queue_acquire( futex, &context.Base );
/*
* For some synchronization objects like barriers the _Futex_Wake() must be
@@ -140,7 +144,7 @@ int _Futex_Wake( struct _Futex_Control *_futex, int count )
* check this condition early.
*/
if ( __predict_true( _Thread_queue_Is_empty( &futex->Queue.Queue ) ) ) {
- _Futex_Queue_release( futex, &context.Base.Lock_context );
+ _Futex_Queue_release( futex, &context.Base );
return 0;
}
diff --git a/cpukit/score/src/mpci.c b/cpukit/score/src/mpci.c
index 1e26b1f3ce..8c61e23b0f 100644
--- a/cpukit/score/src/mpci.c
+++ b/cpukit/score/src/mpci.c
@@ -259,7 +259,8 @@ Status_Control _MPCI_Send_request_packet(
&_Thread_queue_Operations_FIFO,
executing,
STATES_WAITING_FOR_RPC_REPLY | extra_state,
- the_packet->timeout
+ the_packet->timeout,
+ 2
);
_Thread_Dispatch_enable( cpu_self );
diff --git a/cpukit/score/src/mutex.c b/cpukit/score/src/mutex.c
index 6a85850a89..ed374a0768 100644
--- a/cpukit/score/src/mutex.c
+++ b/cpukit/score/src/mutex.c
@@ -72,47 +72,51 @@ static Mutex_Control *_Mutex_Get( struct _Mutex_Control *_mutex )
}
static Thread_Control *_Mutex_Queue_acquire(
- Mutex_Control *mutex,
- ISR_lock_Context *lock_context
+ Mutex_Control *mutex,
+ Thread_queue_Context *queue_context
)
{
Thread_Control *executing;
- _ISR_lock_ISR_disable( lock_context );
+ _ISR_lock_ISR_disable( &queue_context->Lock_context );
executing = _Thread_Executing;
_Thread_queue_Queue_acquire_critical(
&mutex->Queue.Queue,
&executing->Potpourri_stats,
- lock_context
+ &queue_context->Lock_context
);
return executing;
}
static void _Mutex_Queue_release(
- Mutex_Control *mutex,
- ISR_lock_Context *lock_context
+ Mutex_Control *mutex,
+ Thread_queue_Context *queue_context
)
{
- _Thread_queue_Queue_release( &mutex->Queue.Queue, lock_context );
+ _Thread_queue_Queue_release(
+ &mutex->Queue.Queue,
+ &queue_context->Lock_context
+ );
}
static void _Mutex_Acquire_slow(
- Mutex_Control *mutex,
- Thread_Control *owner,
- Thread_Control *executing,
- Watchdog_Interval timeout,
- ISR_lock_Context *lock_context
+ Mutex_Control *mutex,
+ Thread_Control *owner,
+ Thread_Control *executing,
+ Watchdog_Interval timeout,
+ Thread_queue_Context *queue_context
)
{
_Thread_Inherit_priority( owner, executing );
+ _Thread_queue_Context_set_expected_level( queue_context, 1 );
_Thread_queue_Enqueue_critical(
&mutex->Queue.Queue,
MUTEX_TQ_OPERATIONS,
executing,
STATES_WAITING_FOR_SYS_LOCK_MUTEX,
timeout,
- lock_context
+ queue_context
);
}
@@ -148,7 +152,7 @@ static void _Mutex_Release_slow(
&queue_context->Lock_context
);
} else {
- _Mutex_Queue_release( mutex, &queue_context->Lock_context );
+ _Mutex_Queue_release( mutex, queue_context );
}
if ( !keep_priority ) {
@@ -185,7 +189,7 @@ static void _Mutex_Release_critical(
|| !executing->priority_restore_hint;
if ( __predict_true( heads == NULL && keep_priority ) ) {
- _Mutex_Queue_release( mutex, &queue_context->Lock_context );
+ _Mutex_Queue_release( mutex, queue_context );
} else {
_Mutex_Release_slow(
mutex,
@@ -199,22 +203,23 @@ static void _Mutex_Release_critical(
void _Mutex_Acquire( struct _Mutex_Control *_mutex )
{
- Mutex_Control *mutex;
- ISR_lock_Context lock_context;
- Thread_Control *executing;
- Thread_Control *owner;
+ Mutex_Control *mutex;
+ Thread_queue_Context queue_context;
+ Thread_Control *executing;
+ Thread_Control *owner;
mutex = _Mutex_Get( _mutex );
- executing = _Mutex_Queue_acquire( mutex, &lock_context );
+ _Thread_queue_Context_initialize( &queue_context );
+ executing = _Mutex_Queue_acquire( mutex, &queue_context );
owner = mutex->Queue.Queue.owner;
if ( __predict_true( owner == NULL ) ) {
mutex->Queue.Queue.owner = executing;
++executing->resource_count;
- _Mutex_Queue_release( mutex, &lock_context );
+ _Mutex_Queue_release( mutex, &queue_context );
} else {
- _Mutex_Acquire_slow( mutex, owner, executing, 0, &lock_context );
+ _Mutex_Acquire_slow( mutex, owner, executing, 0, &queue_context );
}
}
@@ -223,20 +228,21 @@ int _Mutex_Acquire_timed(
const struct timespec *abstime
)
{
- Mutex_Control *mutex;
- ISR_lock_Context lock_context;
- Thread_Control *executing;
- Thread_Control *owner;
+ Mutex_Control *mutex;
+ Thread_queue_Context queue_context;
+ Thread_Control *executing;
+ Thread_Control *owner;
mutex = _Mutex_Get( _mutex );
- executing = _Mutex_Queue_acquire( mutex, &lock_context );
+ _Thread_queue_Context_initialize( &queue_context );
+ executing = _Mutex_Queue_acquire( mutex, &queue_context );
owner = mutex->Queue.Queue.owner;
if ( __predict_true( owner == NULL ) ) {
mutex->Queue.Queue.owner = executing;
++executing->resource_count;
- _Mutex_Queue_release( mutex, &lock_context );
+ _Mutex_Queue_release( mutex, &queue_context );
return 0;
} else {
@@ -244,17 +250,17 @@ int _Mutex_Acquire_timed(
switch ( _TOD_Absolute_timeout_to_ticks( abstime, &ticks ) ) {
case TOD_ABSOLUTE_TIMEOUT_INVALID:
- _Mutex_Queue_release( mutex, &lock_context );
+ _Mutex_Queue_release( mutex, &queue_context );
return EINVAL;
case TOD_ABSOLUTE_TIMEOUT_IS_IN_PAST:
case TOD_ABSOLUTE_TIMEOUT_IS_NOW:
- _Mutex_Queue_release( mutex, &lock_context );
+ _Mutex_Queue_release( mutex, &queue_context );
return ETIMEDOUT;
default:
break;
}
- _Mutex_Acquire_slow( mutex, owner, executing, ticks, &lock_context );
+ _Mutex_Acquire_slow( mutex, owner, executing, ticks, &queue_context );
return STATUS_GET_POSIX( _Thread_Wait_get_status( executing ) );
}
@@ -262,14 +268,15 @@ int _Mutex_Acquire_timed(
int _Mutex_Try_acquire( struct _Mutex_Control *_mutex )
{
- Mutex_Control *mutex;
- ISR_lock_Context lock_context;
- Thread_Control *executing;
- Thread_Control *owner;
- int eno;
+ Mutex_Control *mutex;
+ Thread_queue_Context queue_context;
+ Thread_Control *executing;
+ Thread_Control *owner;
+ int eno;
mutex = _Mutex_Get( _mutex );
- executing = _Mutex_Queue_acquire( mutex, &lock_context );
+ _Thread_queue_Context_initialize( &queue_context );
+ executing = _Mutex_Queue_acquire( mutex, &queue_context );
owner = mutex->Queue.Queue.owner;
@@ -281,7 +288,7 @@ int _Mutex_Try_acquire( struct _Mutex_Control *_mutex )
eno = EBUSY;
}
- _Mutex_Queue_release( mutex, &lock_context );
+ _Mutex_Queue_release( mutex, &queue_context );
return eno;
}
@@ -294,7 +301,7 @@ void _Mutex_Release( struct _Mutex_Control *_mutex )
mutex = _Mutex_Get( _mutex );
_Thread_queue_Context_initialize( &queue_context );
- executing = _Mutex_Queue_acquire( mutex, &queue_context.Lock_context );
+ executing = _Mutex_Queue_acquire( mutex, &queue_context );
_Assert( mutex->Queue.Queue.owner == executing );
@@ -311,24 +318,25 @@ static Mutex_recursive_Control *_Mutex_recursive_Get(
void _Mutex_recursive_Acquire( struct _Mutex_recursive_Control *_mutex )
{
Mutex_recursive_Control *mutex;
- ISR_lock_Context lock_context;
+ Thread_queue_Context queue_context;
Thread_Control *executing;
Thread_Control *owner;
mutex = _Mutex_recursive_Get( _mutex );
- executing = _Mutex_Queue_acquire( &mutex->Mutex, &lock_context );
+ _Thread_queue_Context_initialize( &queue_context );
+ executing = _Mutex_Queue_acquire( &mutex->Mutex, &queue_context );
owner = mutex->Mutex.Queue.Queue.owner;
if ( __predict_true( owner == NULL ) ) {
mutex->Mutex.Queue.Queue.owner = executing;
++executing->resource_count;
- _Mutex_Queue_release( &mutex->Mutex, &lock_context );
+ _Mutex_Queue_release( &mutex->Mutex, &queue_context );
} else if ( owner == executing ) {
++mutex->nest_level;
- _Mutex_Queue_release( &mutex->Mutex, &lock_context );
+ _Mutex_Queue_release( &mutex->Mutex, &queue_context );
} else {
- _Mutex_Acquire_slow( &mutex->Mutex, owner, executing, 0, &lock_context );
+ _Mutex_Acquire_slow( &mutex->Mutex, owner, executing, 0, &queue_context );
}
}
@@ -338,24 +346,25 @@ int _Mutex_recursive_Acquire_timed(
)
{
Mutex_recursive_Control *mutex;
- ISR_lock_Context lock_context;
+ Thread_queue_Context queue_context;
Thread_Control *executing;
Thread_Control *owner;
mutex = _Mutex_recursive_Get( _mutex );
- executing = _Mutex_Queue_acquire( &mutex->Mutex, &lock_context );
+ _Thread_queue_Context_initialize( &queue_context );
+ executing = _Mutex_Queue_acquire( &mutex->Mutex, &queue_context );
owner = mutex->Mutex.Queue.Queue.owner;
if ( __predict_true( owner == NULL ) ) {
mutex->Mutex.Queue.Queue.owner = executing;
++executing->resource_count;
- _Mutex_Queue_release( &mutex->Mutex, &lock_context );
+ _Mutex_Queue_release( &mutex->Mutex, &queue_context );
return 0;
} else if ( owner == executing ) {
++mutex->nest_level;
- _Mutex_Queue_release( &mutex->Mutex, &lock_context );
+ _Mutex_Queue_release( &mutex->Mutex, &queue_context );
return 0;
} else {
@@ -363,11 +372,11 @@ int _Mutex_recursive_Acquire_timed(
switch ( _TOD_Absolute_timeout_to_ticks( abstime, &ticks ) ) {
case TOD_ABSOLUTE_TIMEOUT_INVALID:
- _Mutex_Queue_release( &mutex->Mutex, &lock_context );
+ _Mutex_Queue_release( &mutex->Mutex, &queue_context );
return EINVAL;
case TOD_ABSOLUTE_TIMEOUT_IS_IN_PAST:
case TOD_ABSOLUTE_TIMEOUT_IS_NOW:
- _Mutex_Queue_release( &mutex->Mutex, &lock_context );
+ _Mutex_Queue_release( &mutex->Mutex, &queue_context );
return ETIMEDOUT;
default:
break;
@@ -378,7 +387,7 @@ int _Mutex_recursive_Acquire_timed(
owner,
executing,
ticks,
- &lock_context
+ &queue_context
);
return STATUS_GET_POSIX( _Thread_Wait_get_status( executing ) );
@@ -388,13 +397,14 @@ int _Mutex_recursive_Acquire_timed(
int _Mutex_recursive_Try_acquire( struct _Mutex_recursive_Control *_mutex )
{
Mutex_recursive_Control *mutex;
- ISR_lock_Context lock_context;
+ Thread_queue_Context queue_context;
Thread_Control *executing;
Thread_Control *owner;
int eno;
mutex = _Mutex_recursive_Get( _mutex );
- executing = _Mutex_Queue_acquire( &mutex->Mutex, &lock_context );
+ _Thread_queue_Context_initialize( &queue_context );
+ executing = _Mutex_Queue_acquire( &mutex->Mutex, &queue_context );
owner = mutex->Mutex.Queue.Queue.owner;
@@ -409,7 +419,7 @@ int _Mutex_recursive_Try_acquire( struct _Mutex_recursive_Control *_mutex )
eno = EBUSY;
}
- _Mutex_Queue_release( &mutex->Mutex, &lock_context );
+ _Mutex_Queue_release( &mutex->Mutex, &queue_context );
return eno;
}
@@ -423,10 +433,7 @@ void _Mutex_recursive_Release( struct _Mutex_recursive_Control *_mutex )
mutex = _Mutex_recursive_Get( _mutex );
_Thread_queue_Context_initialize( &queue_context );
- executing = _Mutex_Queue_acquire(
- &mutex->Mutex,
- &queue_context.Lock_context
- );
+ executing = _Mutex_Queue_acquire( &mutex->Mutex, &queue_context );
_Assert( mutex->Mutex.Queue.Queue.owner == executing );
@@ -437,7 +444,7 @@ void _Mutex_recursive_Release( struct _Mutex_recursive_Control *_mutex )
} else {
mutex->nest_level = nest_level - 1;
- _Mutex_Queue_release( &mutex->Mutex, &queue_context.Lock_context );
+ _Mutex_Queue_release( &mutex->Mutex, &queue_context );
}
}
diff --git a/cpukit/score/src/semaphore.c b/cpukit/score/src/semaphore.c
index 72abd9e12a..03af9cff7e 100644
--- a/cpukit/score/src/semaphore.c
+++ b/cpukit/score/src/semaphore.c
@@ -56,53 +56,58 @@ static Semaphore_Control *_Semaphore_Get(
}
static Thread_Control *_Semaphore_Queue_acquire(
- Semaphore_Control *sem,
- ISR_lock_Context *lock_context
+ Semaphore_Control *sem,
+ Thread_queue_Context *queue_context
)
{
Thread_Control *executing;
- _ISR_lock_ISR_disable( lock_context );
+ _ISR_lock_ISR_disable( &queue_context->Lock_context );
executing = _Thread_Executing;
_Thread_queue_Queue_acquire_critical(
&sem->Queue.Queue,
&executing->Potpourri_stats,
- lock_context
+ &queue_context->Lock_context
);
return executing;
}
static void _Semaphore_Queue_release(
- Semaphore_Control *sem,
- ISR_lock_Context *lock_context
+ Semaphore_Control *sem,
+ Thread_queue_Context *queue_context
)
{
- _Thread_queue_Queue_release( &sem->Queue.Queue, lock_context );
+ _Thread_queue_Queue_release(
+ &sem->Queue.Queue,
+ &queue_context->Lock_context
+ );
}
void _Semaphore_Wait( struct _Semaphore_Control *_sem )
{
- Semaphore_Control *sem ;
- ISR_lock_Context lock_context;
- Thread_Control *executing;
- unsigned int count;
+ Semaphore_Control *sem ;
+ Thread_queue_Context queue_context;
+ Thread_Control *executing;
+ unsigned int count;
sem = _Semaphore_Get( _sem );
- executing = _Semaphore_Queue_acquire( sem, &lock_context );
+ _Thread_queue_Context_initialize( &queue_context );
+ executing = _Semaphore_Queue_acquire( sem, &queue_context );
count = sem->count;
if ( count > 0 ) {
sem->count = count - 1;
- _Semaphore_Queue_release( sem, &lock_context );
+ _Semaphore_Queue_release( sem, &queue_context );
} else {
+ _Thread_queue_Context_set_expected_level( &queue_context, 1 );
_Thread_queue_Enqueue_critical(
&sem->Queue.Queue,
SEMAPHORE_TQ_OPERATIONS,
executing,
STATES_WAITING_FOR_SYS_LOCK_SEMAPHORE,
WATCHDOG_NO_TIMEOUT,
- &lock_context
+ &queue_context
);
}
}
@@ -115,13 +120,13 @@ void _Semaphore_Post( struct _Semaphore_Control *_sem )
sem = _Semaphore_Get( _sem );
_Thread_queue_Context_initialize( &queue_context );
- _Semaphore_Queue_acquire( sem, &queue_context.Lock_context );
+ _Semaphore_Queue_acquire( sem, &queue_context );
heads = sem->Queue.Queue.heads;
if ( heads == NULL ) {
_Assert( sem->count < UINT_MAX );
++sem->count;
- _Semaphore_Queue_release( sem, &queue_context.Lock_context );
+ _Semaphore_Queue_release( sem, &queue_context );
} else {
const Thread_queue_Operations *operations;
Thread_Control *first;
diff --git a/cpukit/score/src/threadqenqueue.c b/cpukit/score/src/threadqenqueue.c
index 4eaafa9036..1e95003aa7 100644
--- a/cpukit/score/src/threadqenqueue.c
+++ b/cpukit/score/src/threadqenqueue.c
@@ -40,7 +40,7 @@ void _Thread_queue_Enqueue_critical(
Thread_Control *the_thread,
States_Control state,
Watchdog_Interval timeout,
- ISR_lock_Context *lock_context
+ Thread_queue_Context *queue_context
)
{
Per_CPU_Control *cpu_self;
@@ -61,8 +61,19 @@ void _Thread_queue_Enqueue_critical(
( *operations->enqueue )( queue, the_thread );
_Thread_Wait_flags_set( the_thread, THREAD_QUEUE_INTEND_TO_BLOCK );
- cpu_self = _Thread_Dispatch_disable_critical( lock_context );
- _Thread_queue_Queue_release( queue, lock_context );
+ cpu_self = _Thread_Dispatch_disable_critical( &queue_context->Lock_context );
+ _Thread_queue_Queue_release( queue, &queue_context->Lock_context );
+
+ if (
+ cpu_self->thread_dispatch_disable_level
+ != queue_context->expected_thread_dispatch_disable_level
+ ) {
+ _Terminate(
+ INTERNAL_ERROR_CORE,
+ false,
+ INTERNAL_ERROR_THREAD_QUEUE_ENQUEUE_FROM_BAD_STATE
+ );
+ }
/*
* Set the blocking state for this thread queue in the thread.
diff --git a/cpukit/score/src/threadrestart.c b/cpukit/score/src/threadrestart.c
index 3bddac458d..f155980c1a 100644
--- a/cpukit/score/src/threadrestart.c
+++ b/cpukit/score/src/threadrestart.c
@@ -443,10 +443,10 @@ static void _Thread_Finalize_life_change(
}
void _Thread_Join(
- Thread_Control *the_thread,
- States_Control waiting_for_join,
- Thread_Control *executing,
- ISR_lock_Context *lock_context
+ Thread_Control *the_thread,
+ States_Control waiting_for_join,
+ Thread_Control *executing,
+ Thread_queue_Context *queue_context
)
{
_Assert( the_thread != executing );
@@ -462,7 +462,7 @@ void _Thread_Join(
executing,
waiting_for_join,
WATCHDOG_NO_TIMEOUT,
- lock_context
+ queue_context
);
}
@@ -524,14 +524,16 @@ void _Thread_Cancel(
void _Thread_Close( Thread_Control *the_thread, Thread_Control *executing )
{
- ISR_lock_Context lock_context;
+ Thread_queue_Context queue_context;
- _Thread_State_acquire( the_thread, &lock_context );
+ _Thread_queue_Context_initialize( &queue_context );
+ _Thread_queue_Context_set_expected_level( &queue_context, 2 );
+ _Thread_State_acquire( the_thread, &queue_context.Lock_context );
_Thread_Join(
the_thread,
STATES_WAITING_FOR_JOIN,
executing,
- &lock_context
+ &queue_context
);
_Thread_Cancel( the_thread, executing, NULL );
}