summaryrefslogtreecommitdiffstats
path: root/cpukit
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2015-07-28 13:46:56 +0200
committerSebastian Huber <sebastian.huber@embedded-brains.de>2015-07-30 09:11:18 +0200
commit9e9e61d27d146e2ca83d5b0f590683a3f605c3f1 (patch)
treeb8ac4241112d2e6e00ba85601ce22fc85f2bd8c1 /cpukit
parentscore: Add scheduler <sys/lock.h> support (diff)
downloadrtems-9e9e61d27d146e2ca83d5b0f590683a3f605c3f1.tar.bz2
score: Add self-contained condition implementation
Diffstat (limited to 'cpukit')
-rw-r--r--cpukit/libmisc/monitor/mon-prmisc.c1
-rw-r--r--cpukit/score/Makefile.am1
-rw-r--r--cpukit/score/include/rtems/score/statesimpl.h3
-rw-r--r--cpukit/score/src/condition.c309
4 files changed, 314 insertions, 0 deletions
diff --git a/cpukit/libmisc/monitor/mon-prmisc.c b/cpukit/libmisc/monitor/mon-prmisc.c
index 14ba173256..a3d7a663be 100644
--- a/cpukit/libmisc/monitor/mon-prmisc.c
+++ b/cpukit/libmisc/monitor/mon-prmisc.c
@@ -134,6 +134,7 @@ static const rtems_assoc_t rtems_monitor_state_assoc[] = {
{ "Wseg", STATES_WAITING_FOR_SEGMENT, 0 },
{ "Wsem", STATES_WAITING_FOR_SEMAPHORE, 0 },
{ "Wsig", STATES_WAITING_FOR_SIGNAL, 0 },
+ { "Wslcnd", STATES_WAITING_FOR_SYS_LOCK_CONDITION, 0 },
{ "Wslftx", STATES_WAITING_FOR_SYS_LOCK_FUTEX, 0 },
{ "Wslmtx", STATES_WAITING_FOR_SYS_LOCK_MUTEX, 0 },
{ "Wslsem", STATES_WAITING_FOR_SYS_LOCK_SEMAPHORE, 0 },
diff --git a/cpukit/score/Makefile.am b/cpukit/score/Makefile.am
index 7c95ef984d..03ceb7aff9 100644
--- a/cpukit/score/Makefile.am
+++ b/cpukit/score/Makefile.am
@@ -344,6 +344,7 @@ libscore_a_SOURCES += src/apiext.c src/chain.c src/chainappend.c \
src/debugisthreaddispatchingallowed.c \
src/interr.c src/isr.c src/wkspace.c src/wkstringduplicate.c
libscore_a_SOURCES += src/isrisinprogress.c
+libscore_a_SOURCES += src/condition.c
libscore_a_SOURCES += src/debugisownerofallocator.c
libscore_a_SOURCES += src/futex.c
libscore_a_SOURCES += src/profilingisrentryexit.c
diff --git a/cpukit/score/include/rtems/score/statesimpl.h b/cpukit/score/include/rtems/score/statesimpl.h
index 82d222c490..97cadb2888 100644
--- a/cpukit/score/include/rtems/score/statesimpl.h
+++ b/cpukit/score/include/rtems/score/statesimpl.h
@@ -97,6 +97,8 @@ extern "C" {
* blocking state.
*/
#define STATES_INTERRUPTIBLE_BY_SIGNAL 0x10000000
+/** This macro corresponds to a task waiting for a <sys/lock.h> condition. */
+#define STATES_WAITING_FOR_SYS_LOCK_CONDITION 0x20000000
/** This macro corresponds to a task waiting for a local object operation. */
#define STATES_LOCALLY_BLOCKED ( STATES_WAITING_FOR_BUFFER | \
@@ -112,6 +114,7 @@ extern "C" {
STATES_WAITING_FOR_SYS_LOCK_MUTEX | \
STATES_WAITING_FOR_SYS_LOCK_SEMAPHORE | \
STATES_WAITING_FOR_SYS_LOCK_FUTEX | \
+ STATES_WAITING_FOR_SYS_LOCK_CONDITION | \
STATES_WAITING_FOR_RWLOCK )
/** This macro corresponds to a task waiting which is blocked. */
diff --git a/cpukit/score/src/condition.c b/cpukit/score/src/condition.c
new file mode 100644
index 0000000000..22c2a9b97c
--- /dev/null
+++ b/cpukit/score/src/condition.c
@@ -0,0 +1,309 @@
+/*
+ * Copyright (c) 2015 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Dornierstr. 4
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#if HAVE_CONFIG_H
+ #include "config.h"
+#endif
+
+#if HAVE_STRUCT__THREAD_QUEUE_QUEUE
+
+#include <sys/lock.h>
+#include <errno.h>
+
+#include <rtems/score/atomic.h>
+#include <rtems/score/chainimpl.h>
+#include <rtems/score/threadimpl.h>
+#include <rtems/score/threadqimpl.h>
+#include <rtems/score/todimpl.h>
+#include <rtems/score/watchdogimpl.h>
+
+#define CONDITION_TQ_OPERATIONS &_Thread_queue_Operations_FIFO
+
+typedef struct {
+ Thread_queue_Syslock_queue Queue;
+} Condition_Control;
+
+RTEMS_STATIC_ASSERT(
+ offsetof( Condition_Control, Queue )
+ == offsetof( struct _Condition_Control, _Queue ),
+ CONDITION_CONTROL_QUEUE
+);
+
+RTEMS_STATIC_ASSERT(
+ sizeof( Condition_Control ) == sizeof( struct _Condition_Control ),
+ CONDITION_CONTROL_SIZE
+);
+
+static Condition_Control *_Condition_Get(
+ struct _Condition_Control *_condition
+)
+{
+ return (Condition_Control *) _condition;
+}
+
+static Thread_Control *_Condition_Queue_acquire_critical(
+ Condition_Control *condition,
+ ISR_lock_Context *lock_context
+)
+{
+ Thread_Control *executing;
+
+ executing = _Thread_Executing;
+ _Thread_queue_Queue_acquire_critical(
+ &condition->Queue.Queue,
+ &executing->Potpourri_stats,
+ lock_context
+ );
+
+ return executing;
+}
+
+static void _Condition_Queue_release(
+ Condition_Control *condition,
+ ISR_lock_Context *lock_context
+)
+{
+ _Thread_queue_Queue_release( &condition->Queue.Queue, lock_context );
+}
+
+static Per_CPU_Control *_Condition_Do_wait(
+ struct _Condition_Control *_condition,
+ Watchdog_Interval timeout,
+ ISR_lock_Context *lock_context
+)
+{
+ Condition_Control *condition;
+ Thread_Control *executing;
+ Per_CPU_Control *cpu_self;
+
+ condition = _Condition_Get( _condition );
+ executing = _Condition_Queue_acquire_critical( condition, lock_context );
+ cpu_self = _Thread_Dispatch_disable_critical( lock_context );
+
+ executing->Wait.return_code = 0;
+ _Thread_queue_Enqueue_critical(
+ &condition->Queue.Queue,
+ CONDITION_TQ_OPERATIONS,
+ executing,
+ STATES_WAITING_FOR_SYS_LOCK_CONDITION,
+ timeout,
+ ETIMEDOUT,
+ lock_context
+ );
+
+ return cpu_self;
+}
+
+void _Condition_Wait(
+ struct _Condition_Control *_condition,
+ struct _Mutex_Control *_mutex
+)
+{
+ ISR_lock_Context lock_context;
+ Per_CPU_Control *cpu_self;
+
+ _ISR_lock_ISR_disable( &lock_context );
+ cpu_self = _Condition_Do_wait( _condition, 0, &lock_context );
+
+ _Mutex_Release( _mutex );
+ _Thread_Dispatch_enable( cpu_self );
+ _Mutex_Acquire( _mutex );
+}
+
+int _Condition_Wait_timed(
+ struct _Condition_Control *_condition,
+ struct _Mutex_Control *_mutex,
+ const struct timespec *abstime
+)
+{
+ ISR_lock_Context lock_context;
+ Per_CPU_Control *cpu_self;
+ Thread_Control *executing;
+ int eno;
+ Watchdog_Interval ticks;
+
+ _ISR_lock_ISR_disable( &lock_context );
+
+ switch ( _TOD_Absolute_timeout_to_ticks( abstime, &ticks ) ) {
+ case TOD_ABSOLUTE_TIMEOUT_INVALID:
+ _ISR_lock_ISR_enable( &lock_context );
+ return EINVAL;
+ case TOD_ABSOLUTE_TIMEOUT_IS_IN_PAST:
+ case TOD_ABSOLUTE_TIMEOUT_IS_NOW:
+ _ISR_lock_ISR_enable( &lock_context );
+ return ETIMEDOUT;
+ default:
+ break;
+ }
+
+ cpu_self = _Condition_Do_wait( _condition, ticks, &lock_context );
+
+ _Mutex_Release( _mutex );
+ executing = cpu_self->executing;
+ _Thread_Dispatch_enable( cpu_self );
+ eno = (int) executing->Wait.return_code;
+ _Mutex_Acquire( _mutex );
+
+ return eno;
+}
+
+void _Condition_Wait_recursive(
+ struct _Condition_Control *_condition,
+ struct _Mutex_recursive_Control *_mutex
+)
+{
+ ISR_lock_Context lock_context;
+ Per_CPU_Control *cpu_self;
+ unsigned int nest_level;
+
+ _ISR_lock_ISR_disable( &lock_context );
+ cpu_self = _Condition_Do_wait( _condition, 0, &lock_context );
+
+ nest_level = _mutex->_nest_level;
+ _mutex->_nest_level = 0;
+ _Mutex_recursive_Release( _mutex );
+ _Thread_Dispatch_enable( cpu_self );
+ _Mutex_recursive_Acquire( _mutex );
+ _mutex->_nest_level = nest_level;
+}
+
+int _Condition_Wait_recursive_timed(
+ struct _Condition_Control *_condition,
+ struct _Mutex_recursive_Control *_mutex,
+ const struct timespec *abstime
+)
+{
+ ISR_lock_Context lock_context;
+ Per_CPU_Control *cpu_self;
+ Thread_Control *executing;
+ int eno;
+ unsigned int nest_level;
+ Watchdog_Interval ticks;
+
+ _ISR_lock_ISR_disable( &lock_context );
+
+ switch ( _TOD_Absolute_timeout_to_ticks( abstime, &ticks ) ) {
+ case TOD_ABSOLUTE_TIMEOUT_INVALID:
+ _ISR_lock_ISR_enable( &lock_context );
+ return EINVAL;
+ case TOD_ABSOLUTE_TIMEOUT_IS_IN_PAST:
+ case TOD_ABSOLUTE_TIMEOUT_IS_NOW:
+ _ISR_lock_ISR_enable( &lock_context );
+ return ETIMEDOUT;
+ default:
+ break;
+ }
+
+ cpu_self = _Condition_Do_wait( _condition, ticks, &lock_context );
+
+ nest_level = _mutex->_nest_level;
+ _mutex->_nest_level = 0;
+ _Mutex_recursive_Release( _mutex );
+ executing = cpu_self->executing;
+ _Thread_Dispatch_enable( cpu_self );
+ eno = (int) executing->Wait.return_code;
+ _Mutex_recursive_Acquire( _mutex );
+ _mutex->_nest_level = nest_level;
+
+ return eno;
+}
+
+static int _Condition_Wake( struct _Condition_Control *_condition, int count )
+{
+ Condition_Control *condition;
+ ISR_lock_Context lock_context;
+ Thread_queue_Heads *heads;
+ Chain_Control unblock;
+ Chain_Node *node;
+ Chain_Node *tail;
+ int woken;
+
+ condition = _Condition_Get( _condition );
+ _ISR_lock_ISR_disable( &lock_context );
+ _Condition_Queue_acquire_critical( condition, &lock_context );
+
+ /*
+ * In common uses cases of condition variables there are normally no threads
+ * on the queue, so check this condition early.
+ */
+ heads = condition->Queue.Queue.heads;
+ if ( __predict_true( heads == NULL ) ) {
+ _Condition_Queue_release( condition, &lock_context );
+
+ return 0;
+ }
+
+ woken = 0;
+ _Chain_Initialize_empty( &unblock );
+ while ( count > 0 && heads != NULL ) {
+ const Thread_queue_Operations *operations;
+ Thread_Control *first;
+ bool do_unblock;
+
+ operations = CONDITION_TQ_OPERATIONS;
+ first = ( *operations->first )( heads );
+
+ do_unblock = _Thread_queue_Extract_locked(
+ &condition->Queue.Queue,
+ operations,
+ first
+ );
+ if (do_unblock) {
+ _Chain_Append_unprotected( &unblock, &first->Wait.Node.Chain );
+ }
+
+ ++woken;
+ --count;
+ heads = condition->Queue.Queue.heads;
+ }
+
+ node = _Chain_First( &unblock );
+ tail = _Chain_Tail( &unblock );
+ if ( node != tail ) {
+ Per_CPU_Control *cpu_self;
+
+ cpu_self = _Thread_Dispatch_disable_critical( &lock_context );
+ _Condition_Queue_release( condition, &lock_context );
+
+ do {
+ Thread_Control *thread;
+ Chain_Node *next;
+
+ next = _Chain_Next( node );
+ thread = THREAD_CHAIN_NODE_TO_THREAD( node );
+ _Watchdog_Remove_ticks( &thread->Timer );
+ _Thread_Unblock( thread );
+
+ node = next;
+ } while ( node != tail );
+
+ _Thread_Dispatch_enable( cpu_self );
+ } else {
+ _Condition_Queue_release( condition, &lock_context );
+ }
+
+ return woken;
+}
+
+void _Condition_Signal( struct _Condition_Control *_condition )
+{
+ _Condition_Wake( _condition, 1 );
+}
+
+void _Condition_Broadcast( struct _Condition_Control *_condition )
+{
+ _Condition_Wake( _condition, INT_MAX );
+}
+
+#endif /* HAVE_STRUCT__THREAD_QUEUE_QUEUE */