summaryrefslogtreecommitdiffstats
path: root/cpukit/score/src/threadq.c
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2016-11-04 10:04:27 +0100
committerSebastian Huber <sebastian.huber@embedded-brains.de>2016-11-04 10:54:52 +0100
commite7ab43d46fe871db090f563072e7ca00049cb73e (patch)
tree8be1881941d995bdcf580531fe7842db3e6ddc70 /cpukit/score/src/threadq.c
parentscore: Provide inline variants for ISR lock ops (diff)
downloadrtems-e7ab43d46fe871db090f563072e7ca00049cb73e.tar.bz2
score: Use non-inline thread queue lock ops
This reduces the code size and helps to reduce the amount of testing. Hot paths can use the _Thread_queue_Queue_acquire_critical() and _Thread_queue_Queue_release_critical() functions which are still inline.
Diffstat (limited to 'cpukit/score/src/threadq.c')
-rw-r--r--cpukit/score/src/threadq.c64
1 files changed, 64 insertions, 0 deletions
diff --git a/cpukit/score/src/threadq.c b/cpukit/score/src/threadq.c
index ca2b900fe2..c20007977b 100644
--- a/cpukit/score/src/threadq.c
+++ b/cpukit/score/src/threadq.c
@@ -64,6 +64,70 @@ RTEMS_STATIC_ASSERT(
#endif /* HAVE_STRUCT__THREAD_QUEUE_QUEUE */
+#if defined(RTEMS_SMP)
+void _Thread_queue_Do_acquire_critical(
+ Thread_queue_Control *the_thread_queue,
+ ISR_lock_Context *lock_context
+)
+{
+ _Thread_queue_Queue_acquire_critical(
+ &the_thread_queue->Queue,
+ &the_thread_queue->Lock_stats,
+ lock_context
+ );
+#if defined(RTEMS_DEBUG)
+ the_thread_queue->owner = _SMP_Get_current_processor();
+#endif
+}
+
+void _Thread_queue_Acquire(
+ Thread_queue_Control *the_thread_queue,
+ Thread_queue_Context *queue_context
+)
+{
+ _ISR_lock_ISR_disable( &queue_context->Lock_context.Lock_context );
+ _Thread_queue_Queue_acquire_critical(
+ &the_thread_queue->Queue,
+ &the_thread_queue->Lock_stats,
+ &queue_context->Lock_context.Lock_context
+ );
+#if defined(RTEMS_DEBUG)
+ the_thread_queue->owner = _SMP_Get_current_processor();
+#endif
+}
+
+void _Thread_queue_Do_release_critical(
+ Thread_queue_Control *the_thread_queue,
+ ISR_lock_Context *lock_context
+)
+{
+#if defined(RTEMS_DEBUG)
+ _Assert( _Thread_queue_Is_lock_owner( the_thread_queue ) );
+ the_thread_queue->owner = SMP_LOCK_NO_OWNER;
+#endif
+ _Thread_queue_Queue_release_critical(
+ &the_thread_queue->Queue,
+ lock_context
+ );
+}
+
+void _Thread_queue_Release(
+ Thread_queue_Control *the_thread_queue,
+ Thread_queue_Context *queue_context
+)
+{
+#if defined(RTEMS_DEBUG)
+ _Assert( _Thread_queue_Is_lock_owner( the_thread_queue ) );
+ the_thread_queue->owner = SMP_LOCK_NO_OWNER;
+#endif
+ _Thread_queue_Queue_release_critical(
+ &the_thread_queue->Queue,
+ &queue_context->Lock_context.Lock_context
+ );
+ _ISR_lock_ISR_enable( &queue_context->Lock_context.Lock_context );
+}
+#endif
+
void _Thread_queue_Initialize( Thread_queue_Control *the_thread_queue )
{
_Thread_queue_Queue_initialize( &the_thread_queue->Queue );