From 142628edcd9fc091ce02246a761d5d5100f22ca3 Mon Sep 17 00:00:00 2001 From: Sebastian Huber Date: Thu, 17 Nov 2016 07:40:05 +0100 Subject: score: Optimize self-contained objects Avoid use of the stack for the hot paths. --- cpukit/score/include/rtems/score/threadqimpl.h | 19 ++++++++++++++ cpukit/score/src/futex.c | 25 +++++++++++++------ cpukit/score/src/mutex.c | 34 ++++++++------------------ cpukit/score/src/semaphore.c | 25 ++++++++++++------- 4 files changed, 62 insertions(+), 41 deletions(-) diff --git a/cpukit/score/include/rtems/score/threadqimpl.h b/cpukit/score/include/rtems/score/threadqimpl.h index e6c8f0576b..45f552a103 100644 --- a/cpukit/score/include/rtems/score/threadqimpl.h +++ b/cpukit/score/include/rtems/score/threadqimpl.h @@ -217,6 +217,25 @@ RTEMS_INLINE_ROUTINE void _Thread_queue_Context_add_priority_update( queue_context->Priority.update[ n ] = the_thread; } +#define _Thread_queue_Context_ISR_disable( queue_context, level ) \ + do { \ + _ISR_Local_disable( level ); \ + _ISR_lock_ISR_disable_profile( \ + &( queue_context )->Lock_context.Lock_context \ + ) \ + } while ( 0 ) + +RTEMS_INLINE_ROUTINE void _Thread_queue_Context_set_ISR_level( + Thread_queue_Context *queue_context, + ISR_Level level +) +{ + _ISR_lock_Context_set_level( + &queue_context->Lock_context.Lock_context, + level + ); +} + /** * @brief Sets the MP callout in the thread queue context. * diff --git a/cpukit/score/src/futex.c b/cpukit/score/src/futex.c index 1989445c73..38c3be4857 100644 --- a/cpukit/score/src/futex.c +++ b/cpukit/score/src/futex.c @@ -48,14 +48,13 @@ static Futex_Control *_Futex_Get( struct _Futex_Control *_futex ) return (Futex_Control *) _futex; } -static Thread_Control *_Futex_Queue_acquire( - Futex_Control *futex, +static Thread_Control *_Futex_Queue_acquire_critical( + Futex_Control *futex, Thread_queue_Context *queue_context ) { Thread_Control *executing; - _ISR_lock_ISR_disable( &queue_context->Lock_context.Lock_context ); executing = _Thread_Executing; _Thread_queue_Queue_acquire_critical( &futex->Queue.Queue, @@ -68,28 +67,34 @@ static Thread_Control *_Futex_Queue_acquire( static void _Futex_Queue_release( Futex_Control *futex, + ISR_Level level, Thread_queue_Context *queue_context ) { - _Thread_queue_Queue_release( + _Thread_queue_Queue_release_critical( &futex->Queue.Queue, &queue_context->Lock_context.Lock_context ); + _ISR_Local_enable( level ); } int _Futex_Wait( struct _Futex_Control *_futex, int *uaddr, int val ) { Futex_Control *futex; + ISR_Level level; Thread_queue_Context queue_context; Thread_Control *executing; int eno; futex = _Futex_Get( _futex ); - executing = _Futex_Queue_acquire( futex, &queue_context ); + _Thread_queue_Context_initialize( &queue_context ); + _Thread_queue_Context_ISR_disable( &queue_context, level ); + executing = _Futex_Queue_acquire_critical( futex, &queue_context ); if ( *uaddr == val ) { _Thread_queue_Context_set_expected_level( &queue_context, 1 ); _Thread_queue_Context_set_no_timeout( &queue_context ); + _Thread_queue_Context_set_ISR_level( &queue_context, level ); _Thread_queue_Enqueue_critical( &futex->Queue.Queue, FUTEX_TQ_OPERATIONS, @@ -99,7 +104,7 @@ int _Futex_Wait( struct _Futex_Control *_futex, int *uaddr, int val ) ); eno = 0; } else { - _Futex_Queue_release( futex, &queue_context ); + _Futex_Queue_release( futex, level, &queue_context ); eno = EWOULDBLOCK; } @@ -133,10 +138,13 @@ static Thread_Control *_Futex_Flush_filter( int _Futex_Wake( struct _Futex_Control *_futex, int count ) { Futex_Control *futex; + ISR_Level level; Futex_Context context; futex = _Futex_Get( _futex ); - _Futex_Queue_acquire( futex, &context.Base ); + _Thread_queue_Context_initialize( &context.Base ); + _Thread_queue_Context_ISR_disable( &context.Base, level ); + _Futex_Queue_acquire_critical( futex, &context.Base ); /* * For some synchronization objects like barriers the _Futex_Wake() must be @@ -144,11 +152,12 @@ int _Futex_Wake( struct _Futex_Control *_futex, int count ) * check this condition early. */ if ( __predict_true( _Thread_queue_Is_empty( &futex->Queue.Queue ) ) ) { - _Futex_Queue_release( futex, &context.Base ); + _Futex_Queue_release( futex, level, &context.Base ); return 0; } context.count = count; + _Thread_queue_Context_set_ISR_level( &context.Base, level ); return (int) _Thread_queue_Flush_critical( &futex->Queue.Queue, FUTEX_TQ_OPERATIONS, diff --git a/cpukit/score/src/mutex.c b/cpukit/score/src/mutex.c index 4700563bf6..4e1d9ceae4 100644 --- a/cpukit/score/src/mutex.c +++ b/cpukit/score/src/mutex.c @@ -71,14 +71,6 @@ static Mutex_Control *_Mutex_Get( struct _Mutex_Control *_mutex ) return (Mutex_Control *) _mutex; } -#define _Mutex_ISR_disable( level, queue_context ) \ - do { \ - _ISR_Local_disable( level ); \ - _ISR_lock_ISR_disable_profile( \ - &( queue_context )->Lock_context.Lock_context \ - ) \ - } while ( 0 ) - static Thread_Control *_Mutex_Queue_acquire_critical( Mutex_Control *mutex, Thread_queue_Context *queue_context @@ -122,10 +114,7 @@ static void _Mutex_Acquire_slow( queue_context, _Thread_queue_Deadlock_fatal ); - _ISR_lock_Context_set_level( - &queue_context->Lock_context.Lock_context, - level - ); + _Thread_queue_Context_set_ISR_level( queue_context, level ); _Thread_queue_Enqueue_critical( &mutex->Queue.Queue, MUTEX_TQ_OPERATIONS, @@ -151,10 +140,7 @@ static void _Mutex_Release_critical( if ( __predict_true( heads == NULL ) ) { _Mutex_Queue_release( mutex, level, queue_context ); } else { - _ISR_lock_Context_set_level( - &queue_context->Lock_context.Lock_context, - level - ); + _Thread_queue_Context_set_ISR_level( queue_context, level ); _Thread_queue_Surrender( &mutex->Queue.Queue, heads, @@ -175,7 +161,7 @@ void _Mutex_Acquire( struct _Mutex_Control *_mutex ) mutex = _Mutex_Get( _mutex ); _Thread_queue_Context_initialize( &queue_context ); - _Mutex_ISR_disable( level, &queue_context ); + _Thread_queue_Context_ISR_disable( &queue_context, level ); executing = _Mutex_Queue_acquire_critical( mutex, &queue_context ); owner = mutex->Queue.Queue.owner; @@ -203,7 +189,7 @@ int _Mutex_Acquire_timed( mutex = _Mutex_Get( _mutex ); _Thread_queue_Context_initialize( &queue_context ); - _Mutex_ISR_disable( level, &queue_context ); + _Thread_queue_Context_ISR_disable( &queue_context, level ); executing = _Mutex_Queue_acquire_critical( mutex, &queue_context ); owner = mutex->Queue.Queue.owner; @@ -247,7 +233,7 @@ int _Mutex_Try_acquire( struct _Mutex_Control *_mutex ) mutex = _Mutex_Get( _mutex ); _Thread_queue_Context_initialize( &queue_context ); - _Mutex_ISR_disable( level, &queue_context ); + _Thread_queue_Context_ISR_disable( &queue_context, level ); executing = _Mutex_Queue_acquire_critical( mutex, &queue_context ); owner = mutex->Queue.Queue.owner; @@ -274,7 +260,7 @@ void _Mutex_Release( struct _Mutex_Control *_mutex ) mutex = _Mutex_Get( _mutex ); _Thread_queue_Context_initialize( &queue_context ); - _Mutex_ISR_disable( level, &queue_context ); + _Thread_queue_Context_ISR_disable( &queue_context, level ); executing = _Mutex_Queue_acquire_critical( mutex, &queue_context ); _Assert( mutex->Queue.Queue.owner == executing ); @@ -299,7 +285,7 @@ void _Mutex_recursive_Acquire( struct _Mutex_recursive_Control *_mutex ) mutex = _Mutex_recursive_Get( _mutex ); _Thread_queue_Context_initialize( &queue_context ); - _Mutex_ISR_disable( level, &queue_context ); + _Thread_queue_Context_ISR_disable( &queue_context, level ); executing = _Mutex_Queue_acquire_critical( &mutex->Mutex, &queue_context ); owner = mutex->Mutex.Queue.Queue.owner; @@ -330,7 +316,7 @@ int _Mutex_recursive_Acquire_timed( mutex = _Mutex_recursive_Get( _mutex ); _Thread_queue_Context_initialize( &queue_context ); - _Mutex_ISR_disable( level, &queue_context ); + _Thread_queue_Context_ISR_disable( &queue_context, level ); executing = _Mutex_Queue_acquire_critical( &mutex->Mutex, &queue_context ); owner = mutex->Mutex.Queue.Queue.owner; @@ -379,7 +365,7 @@ int _Mutex_recursive_Try_acquire( struct _Mutex_recursive_Control *_mutex ) mutex = _Mutex_recursive_Get( _mutex ); _Thread_queue_Context_initialize( &queue_context ); - _Mutex_ISR_disable( level, &queue_context ); + _Thread_queue_Context_ISR_disable( &queue_context, level ); executing = _Mutex_Queue_acquire_critical( &mutex->Mutex, &queue_context ); owner = mutex->Mutex.Queue.Queue.owner; @@ -410,7 +396,7 @@ void _Mutex_recursive_Release( struct _Mutex_recursive_Control *_mutex ) mutex = _Mutex_recursive_Get( _mutex ); _Thread_queue_Context_initialize( &queue_context ); - _Mutex_ISR_disable( level, &queue_context ); + _Thread_queue_Context_ISR_disable( &queue_context, level ); executing = _Mutex_Queue_acquire_critical( &mutex->Mutex, &queue_context ); _Assert( mutex->Mutex.Queue.Queue.owner == executing ); diff --git a/cpukit/score/src/semaphore.c b/cpukit/score/src/semaphore.c index be7d685105..29acef4fe1 100644 --- a/cpukit/score/src/semaphore.c +++ b/cpukit/score/src/semaphore.c @@ -55,14 +55,13 @@ static Semaphore_Control *_Semaphore_Get( return (Semaphore_Control *) _sem; } -static Thread_Control *_Semaphore_Queue_acquire( +static Thread_Control *_Semaphore_Queue_acquire_critical( Semaphore_Control *sem, Thread_queue_Context *queue_context ) { Thread_Control *executing; - _ISR_lock_ISR_disable( &queue_context->Lock_context.Lock_context ); executing = _Thread_Executing; _Thread_queue_Queue_acquire_critical( &sem->Queue.Queue, @@ -75,33 +74,38 @@ static Thread_Control *_Semaphore_Queue_acquire( static void _Semaphore_Queue_release( Semaphore_Control *sem, + ISR_Level level, Thread_queue_Context *queue_context ) { - _Thread_queue_Queue_release( + _Thread_queue_Queue_release_critical( &sem->Queue.Queue, &queue_context->Lock_context.Lock_context ); + _ISR_Local_enable( level ); } void _Semaphore_Wait( struct _Semaphore_Control *_sem ) { Semaphore_Control *sem ; + ISR_Level level; Thread_queue_Context queue_context; Thread_Control *executing; unsigned int count; sem = _Semaphore_Get( _sem ); _Thread_queue_Context_initialize( &queue_context ); - executing = _Semaphore_Queue_acquire( sem, &queue_context ); + _Thread_queue_Context_ISR_disable( &queue_context, level ); + executing = _Semaphore_Queue_acquire_critical( sem, &queue_context ); count = sem->count; - if ( count > 0 ) { + if ( __predict_true( count > 0 ) ) { sem->count = count - 1; - _Semaphore_Queue_release( sem, &queue_context ); + _Semaphore_Queue_release( sem, level, &queue_context ); } else { _Thread_queue_Context_set_expected_level( &queue_context, 1 ); _Thread_queue_Context_set_no_timeout( &queue_context ); + _Thread_queue_Context_set_ISR_level( &queue_context, level ); _Thread_queue_Enqueue_critical( &sem->Queue.Queue, SEMAPHORE_TQ_OPERATIONS, @@ -115,22 +119,25 @@ void _Semaphore_Wait( struct _Semaphore_Control *_sem ) void _Semaphore_Post( struct _Semaphore_Control *_sem ) { Semaphore_Control *sem; + ISR_Level level; Thread_queue_Context queue_context; Thread_queue_Heads *heads; sem = _Semaphore_Get( _sem ); _Thread_queue_Context_initialize( &queue_context ); - _Semaphore_Queue_acquire( sem, &queue_context ); + _Thread_queue_Context_ISR_disable( &queue_context, level ); + _Semaphore_Queue_acquire_critical( sem, &queue_context ); heads = sem->Queue.Queue.heads; - if ( heads == NULL ) { + if ( __predict_true( heads == NULL ) ) { _Assert( sem->count < UINT_MAX ); ++sem->count; - _Semaphore_Queue_release( sem, &queue_context ); + _Semaphore_Queue_release( sem, level, &queue_context ); } else { const Thread_queue_Operations *operations; Thread_Control *first; + _Thread_queue_Context_set_ISR_level( &queue_context, level ); operations = SEMAPHORE_TQ_OPERATIONS; first = ( *operations->first )( heads ); -- cgit v1.2.3