summaryrefslogtreecommitdiffstats
path: root/cpukit/score/src/mutex.c
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2016-11-03 14:44:15 +0100
committerSebastian Huber <sebastian.huber@embedded-brains.de>2016-11-04 10:54:53 +0100
commitee42943cd03e0f64319f90444d0c1d0c71e07089 (patch)
treea71dfbde8fd87e958f8c05c9c7a23776492023fa /cpukit/score/src/mutex.c
parentscore: Use non-inline thread queue lock ops (diff)
downloadrtems-ee42943cd03e0f64319f90444d0c1d0c71e07089.tar.bz2
score: Optimize self-contained mutexes
Diffstat (limited to 'cpukit/score/src/mutex.c')
-rw-r--r--cpukit/score/src/mutex.c99
1 files changed, 67 insertions, 32 deletions
diff --git a/cpukit/score/src/mutex.c b/cpukit/score/src/mutex.c
index 719ae5bd4c..4700563bf6 100644
--- a/cpukit/score/src/mutex.c
+++ b/cpukit/score/src/mutex.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015 embedded brains GmbH. All rights reserved.
+ * Copyright (c) 2015, 2016 embedded brains GmbH. All rights reserved.
*
* embedded brains GmbH
* Dornierstr. 4
@@ -71,14 +71,21 @@ static Mutex_Control *_Mutex_Get( struct _Mutex_Control *_mutex )
return (Mutex_Control *) _mutex;
}
-static Thread_Control *_Mutex_Queue_acquire(
+#define _Mutex_ISR_disable( level, queue_context ) \
+ do { \
+ _ISR_Local_disable( level ); \
+ _ISR_lock_ISR_disable_profile( \
+ &( queue_context )->Lock_context.Lock_context \
+ ) \
+ } while ( 0 )
+
+static Thread_Control *_Mutex_Queue_acquire_critical(
Mutex_Control *mutex,
Thread_queue_Context *queue_context
)
{
Thread_Control *executing;
- _ISR_lock_ISR_disable( &queue_context->Lock_context.Lock_context );
executing = _Thread_Executing;
_Thread_queue_Queue_acquire_critical(
&mutex->Queue.Queue,
@@ -91,19 +98,22 @@ static Thread_Control *_Mutex_Queue_acquire(
static void _Mutex_Queue_release(
Mutex_Control *mutex,
+ ISR_Level level,
Thread_queue_Context *queue_context
)
{
- _Thread_queue_Queue_release(
+ _Thread_queue_Queue_release_critical(
&mutex->Queue.Queue,
&queue_context->Lock_context.Lock_context
);
+ _ISR_Local_enable( level );
}
static void _Mutex_Acquire_slow(
Mutex_Control *mutex,
Thread_Control *owner,
Thread_Control *executing,
+ ISR_Level level,
Thread_queue_Context *queue_context
)
{
@@ -112,6 +122,10 @@ static void _Mutex_Acquire_slow(
queue_context,
_Thread_queue_Deadlock_fatal
);
+ _ISR_lock_Context_set_level(
+ &queue_context->Lock_context.Lock_context,
+ level
+ );
_Thread_queue_Enqueue_critical(
&mutex->Queue.Queue,
MUTEX_TQ_OPERATIONS,
@@ -124,6 +138,7 @@ static void _Mutex_Acquire_slow(
static void _Mutex_Release_critical(
Mutex_Control *mutex,
Thread_Control *executing,
+ ISR_Level level,
Thread_queue_Context *queue_context
)
{
@@ -134,8 +149,12 @@ static void _Mutex_Release_critical(
_Thread_Resource_count_decrement( executing );
if ( __predict_true( heads == NULL ) ) {
- _Mutex_Queue_release( mutex, queue_context );
+ _Mutex_Queue_release( mutex, level, queue_context );
} else {
+ _ISR_lock_Context_set_level(
+ &queue_context->Lock_context.Lock_context,
+ level
+ );
_Thread_queue_Surrender(
&mutex->Queue.Queue,
heads,
@@ -150,22 +169,24 @@ void _Mutex_Acquire( struct _Mutex_Control *_mutex )
{
Mutex_Control *mutex;
Thread_queue_Context queue_context;
+ ISR_Level level;
Thread_Control *executing;
Thread_Control *owner;
mutex = _Mutex_Get( _mutex );
_Thread_queue_Context_initialize( &queue_context );
- executing = _Mutex_Queue_acquire( mutex, &queue_context );
+ _Mutex_ISR_disable( level, &queue_context );
+ executing = _Mutex_Queue_acquire_critical( mutex, &queue_context );
owner = mutex->Queue.Queue.owner;
if ( __predict_true( owner == NULL ) ) {
mutex->Queue.Queue.owner = executing;
_Thread_Resource_count_increment( executing );
- _Mutex_Queue_release( mutex, &queue_context );
+ _Mutex_Queue_release( mutex, level, &queue_context );
} else {
_Thread_queue_Context_set_no_timeout( &queue_context );
- _Mutex_Acquire_slow( mutex, owner, executing, &queue_context );
+ _Mutex_Acquire_slow( mutex, owner, executing, level, &queue_context );
}
}
@@ -176,19 +197,21 @@ int _Mutex_Acquire_timed(
{
Mutex_Control *mutex;
Thread_queue_Context queue_context;
+ ISR_Level level;
Thread_Control *executing;
Thread_Control *owner;
mutex = _Mutex_Get( _mutex );
_Thread_queue_Context_initialize( &queue_context );
- executing = _Mutex_Queue_acquire( mutex, &queue_context );
+ _Mutex_ISR_disable( level, &queue_context );
+ executing = _Mutex_Queue_acquire_critical( mutex, &queue_context );
owner = mutex->Queue.Queue.owner;
if ( __predict_true( owner == NULL ) ) {
mutex->Queue.Queue.owner = executing;
_Thread_Resource_count_increment( executing );
- _Mutex_Queue_release( mutex, &queue_context );
+ _Mutex_Queue_release( mutex, level, &queue_context );
return 0;
} else {
@@ -196,18 +219,18 @@ int _Mutex_Acquire_timed(
switch ( _TOD_Absolute_timeout_to_ticks( abstime, CLOCK_REALTIME, &ticks ) ) {
case TOD_ABSOLUTE_TIMEOUT_INVALID:
- _Mutex_Queue_release( mutex, &queue_context );
+ _Mutex_Queue_release( mutex, level, &queue_context );
return EINVAL;
case TOD_ABSOLUTE_TIMEOUT_IS_IN_PAST:
case TOD_ABSOLUTE_TIMEOUT_IS_NOW:
- _Mutex_Queue_release( mutex, &queue_context );
+ _Mutex_Queue_release( mutex, level, &queue_context );
return ETIMEDOUT;
default:
break;
}
_Thread_queue_Context_set_relative_timeout( &queue_context, ticks );
- _Mutex_Acquire_slow( mutex, owner, executing, &queue_context );
+ _Mutex_Acquire_slow( mutex, owner, executing, level, &queue_context );
return STATUS_GET_POSIX( _Thread_Wait_get_status( executing ) );
}
@@ -217,13 +240,15 @@ int _Mutex_Try_acquire( struct _Mutex_Control *_mutex )
{
Mutex_Control *mutex;
Thread_queue_Context queue_context;
+ ISR_Level level;
Thread_Control *executing;
Thread_Control *owner;
int eno;
mutex = _Mutex_Get( _mutex );
_Thread_queue_Context_initialize( &queue_context );
- executing = _Mutex_Queue_acquire( mutex, &queue_context );
+ _Mutex_ISR_disable( level, &queue_context );
+ executing = _Mutex_Queue_acquire_critical( mutex, &queue_context );
owner = mutex->Queue.Queue.owner;
@@ -235,7 +260,7 @@ int _Mutex_Try_acquire( struct _Mutex_Control *_mutex )
eno = EBUSY;
}
- _Mutex_Queue_release( mutex, &queue_context );
+ _Mutex_Queue_release( mutex, level, &queue_context );
return eno;
}
@@ -244,15 +269,17 @@ void _Mutex_Release( struct _Mutex_Control *_mutex )
{
Mutex_Control *mutex;
Thread_queue_Context queue_context;
+ ISR_Level level;
Thread_Control *executing;
mutex = _Mutex_Get( _mutex );
_Thread_queue_Context_initialize( &queue_context );
- executing = _Mutex_Queue_acquire( mutex, &queue_context );
+ _Mutex_ISR_disable( level, &queue_context );
+ executing = _Mutex_Queue_acquire_critical( mutex, &queue_context );
_Assert( mutex->Queue.Queue.owner == executing );
- _Mutex_Release_critical( mutex, executing, &queue_context );
+ _Mutex_Release_critical( mutex, executing, level, &queue_context );
}
static Mutex_recursive_Control *_Mutex_recursive_Get(
@@ -266,25 +293,27 @@ void _Mutex_recursive_Acquire( struct _Mutex_recursive_Control *_mutex )
{
Mutex_recursive_Control *mutex;
Thread_queue_Context queue_context;
+ ISR_Level level;
Thread_Control *executing;
Thread_Control *owner;
mutex = _Mutex_recursive_Get( _mutex );
_Thread_queue_Context_initialize( &queue_context );
- executing = _Mutex_Queue_acquire( &mutex->Mutex, &queue_context );
+ _Mutex_ISR_disable( level, &queue_context );
+ executing = _Mutex_Queue_acquire_critical( &mutex->Mutex, &queue_context );
owner = mutex->Mutex.Queue.Queue.owner;
if ( __predict_true( owner == NULL ) ) {
mutex->Mutex.Queue.Queue.owner = executing;
_Thread_Resource_count_increment( executing );
- _Mutex_Queue_release( &mutex->Mutex, &queue_context );
+ _Mutex_Queue_release( &mutex->Mutex, level, &queue_context );
} else if ( owner == executing ) {
++mutex->nest_level;
- _Mutex_Queue_release( &mutex->Mutex, &queue_context );
+ _Mutex_Queue_release( &mutex->Mutex, level, &queue_context );
} else {
_Thread_queue_Context_set_no_timeout( &queue_context );
- _Mutex_Acquire_slow( &mutex->Mutex, owner, executing, &queue_context );
+ _Mutex_Acquire_slow( &mutex->Mutex, owner, executing, level, &queue_context );
}
}
@@ -295,24 +324,26 @@ int _Mutex_recursive_Acquire_timed(
{
Mutex_recursive_Control *mutex;
Thread_queue_Context queue_context;
+ ISR_Level level;
Thread_Control *executing;
Thread_Control *owner;
mutex = _Mutex_recursive_Get( _mutex );
_Thread_queue_Context_initialize( &queue_context );
- executing = _Mutex_Queue_acquire( &mutex->Mutex, &queue_context );
+ _Mutex_ISR_disable( level, &queue_context );
+ executing = _Mutex_Queue_acquire_critical( &mutex->Mutex, &queue_context );
owner = mutex->Mutex.Queue.Queue.owner;
if ( __predict_true( owner == NULL ) ) {
mutex->Mutex.Queue.Queue.owner = executing;
_Thread_Resource_count_increment( executing );
- _Mutex_Queue_release( &mutex->Mutex, &queue_context );
+ _Mutex_Queue_release( &mutex->Mutex, level, &queue_context );
return 0;
} else if ( owner == executing ) {
++mutex->nest_level;
- _Mutex_Queue_release( &mutex->Mutex, &queue_context );
+ _Mutex_Queue_release( &mutex->Mutex, level, &queue_context );
return 0;
} else {
@@ -320,18 +351,18 @@ int _Mutex_recursive_Acquire_timed(
switch ( _TOD_Absolute_timeout_to_ticks( abstime, CLOCK_REALTIME, &ticks ) ) {
case TOD_ABSOLUTE_TIMEOUT_INVALID:
- _Mutex_Queue_release( &mutex->Mutex, &queue_context );
+ _Mutex_Queue_release( &mutex->Mutex, level, &queue_context );
return EINVAL;
case TOD_ABSOLUTE_TIMEOUT_IS_IN_PAST:
case TOD_ABSOLUTE_TIMEOUT_IS_NOW:
- _Mutex_Queue_release( &mutex->Mutex, &queue_context );
+ _Mutex_Queue_release( &mutex->Mutex, level, &queue_context );
return ETIMEDOUT;
default:
break;
}
_Thread_queue_Context_set_relative_timeout( &queue_context, ticks );
- _Mutex_Acquire_slow( &mutex->Mutex, owner, executing, &queue_context );
+ _Mutex_Acquire_slow( &mutex->Mutex, owner, executing, level, &queue_context );
return STATUS_GET_POSIX( _Thread_Wait_get_status( executing ) );
}
@@ -341,13 +372,15 @@ int _Mutex_recursive_Try_acquire( struct _Mutex_recursive_Control *_mutex )
{
Mutex_recursive_Control *mutex;
Thread_queue_Context queue_context;
+ ISR_Level level;
Thread_Control *executing;
Thread_Control *owner;
int eno;
mutex = _Mutex_recursive_Get( _mutex );
_Thread_queue_Context_initialize( &queue_context );
- executing = _Mutex_Queue_acquire( &mutex->Mutex, &queue_context );
+ _Mutex_ISR_disable( level, &queue_context );
+ executing = _Mutex_Queue_acquire_critical( &mutex->Mutex, &queue_context );
owner = mutex->Mutex.Queue.Queue.owner;
@@ -362,7 +395,7 @@ int _Mutex_recursive_Try_acquire( struct _Mutex_recursive_Control *_mutex )
eno = EBUSY;
}
- _Mutex_Queue_release( &mutex->Mutex, &queue_context );
+ _Mutex_Queue_release( &mutex->Mutex, level, &queue_context );
return eno;
}
@@ -371,23 +404,25 @@ void _Mutex_recursive_Release( struct _Mutex_recursive_Control *_mutex )
{
Mutex_recursive_Control *mutex;
Thread_queue_Context queue_context;
+ ISR_Level level;
Thread_Control *executing;
unsigned int nest_level;
mutex = _Mutex_recursive_Get( _mutex );
_Thread_queue_Context_initialize( &queue_context );
- executing = _Mutex_Queue_acquire( &mutex->Mutex, &queue_context );
+ _Mutex_ISR_disable( level, &queue_context );
+ executing = _Mutex_Queue_acquire_critical( &mutex->Mutex, &queue_context );
_Assert( mutex->Mutex.Queue.Queue.owner == executing );
nest_level = mutex->nest_level;
if ( __predict_true( nest_level == 0 ) ) {
- _Mutex_Release_critical( &mutex->Mutex, executing, &queue_context );
+ _Mutex_Release_critical( &mutex->Mutex, executing, level, &queue_context );
} else {
mutex->nest_level = nest_level - 1;
- _Mutex_Queue_release( &mutex->Mutex, &queue_context );
+ _Mutex_Queue_release( &mutex->Mutex, level, &queue_context );
}
}