summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2015-03-23 15:05:32 +0100
committerSebastian Huber <sebastian.huber@embedded-brains.de>2015-03-24 14:35:04 +0100
commit57947f137c26c47c5c08b1aa9ab2b51f0a7463bb (patch)
tree1fcf0a33953cfb757da97e1054ccdaaf5f2728e9
parentscore: Add thread priority change handler (diff)
downloadrtems-57947f137c26c47c5c08b1aa9ab2b51f0a7463bb.tar.bz2
score: Add thread lock
Update #2273.
-rw-r--r--cpukit/score/include/rtems/score/thread.h44
-rw-r--r--cpukit/score/include/rtems/score/threadimpl.h145
-rw-r--r--cpukit/score/src/threadchangepriority.c19
-rw-r--r--cpukit/score/src/threadinitialize.c3
-rw-r--r--cpukit/score/src/threadrestart.c4
5 files changed, 209 insertions, 6 deletions
diff --git a/cpukit/score/include/rtems/score/thread.h b/cpukit/score/include/rtems/score/thread.h
index ee7886c934..ef238a339d 100644
--- a/cpukit/score/include/rtems/score/thread.h
+++ b/cpukit/score/include/rtems/score/thread.h
@@ -618,6 +618,42 @@ typedef struct {
void * control;
}Thread_Capture_control;
+#if defined(RTEMS_SMP)
+/**
+ * @brief Thread lock control.
+ *
+ * The thread lock is either the default lock or the lock of the resource on
+ * which the thread is currently blocked. The generation number takes care
+ * that the up to date lock is used. Only resources using fine grained locking
+ * provide their own lock.
+ *
+ * The thread lock protects the following thread variables
+ * - Thread_Control::current_priority,
+ * - Thread_Control::Priority::change_handler, and
+ * - Thread_Control::Priority::change_handler_context.
+ *
+ * @see _Thread_Lock_acquire(), _Thread_Lock_release(), _Thread_Lock_set() and
+ * _Thread_Lock_restore_default().
+ */
+typedef struct {
+ /**
+ * @brief The current thread lock.
+ */
+ ISR_lock_Control *current;
+
+ /**
+ * @brief The default thread lock in case the thread is not blocked on a
+ * resource.
+ */
+ ISR_lock_Control Default;
+
+ /**
+ * @brief Generation number to invalidate stale locks.
+ */
+ Atomic_Uint generation;
+} Thread_Lock_control;
+#endif
+
/**
* This structure defines the Thread Control Block (TCB).
*/
@@ -640,6 +676,14 @@ struct Thread_Control_struct {
/** This field is the number of mutexes currently held by this thread. */
uint32_t resource_count;
+
+#if defined(RTEMS_SMP)
+ /**
+ * @brief Thread lock control.
+ */
+ Thread_Lock_control Lock;
+#endif
+
/** This field is the blocking information for this thread. */
Thread_Wait_information Wait;
/** This field is the Watchdog used to manage thread delays and timeouts. */
diff --git a/cpukit/score/include/rtems/score/threadimpl.h b/cpukit/score/include/rtems/score/threadimpl.h
index 5376ce1660..551df452d6 100644
--- a/cpukit/score/include/rtems/score/threadimpl.h
+++ b/cpukit/score/include/rtems/score/threadimpl.h
@@ -883,6 +883,151 @@ RTEMS_INLINE_ROUTINE bool _Thread_Owns_resources(
return owns_resources;
}
+/**
+ * @brief Release the thread lock.
+ *
+ * @param[in] lock The lock returned by _Thread_Lock_acquire().
+ * @param[in] lock_context The lock context used for _Thread_Lock_acquire().
+ */
+RTEMS_INLINE_ROUTINE void _Thread_Lock_release(
+ ISR_lock_Control *lock,
+ ISR_lock_Context *lock_context
+)
+{
+ _ISR_lock_Release_and_ISR_enable( lock, lock_context );
+}
+
+/**
+ * @brief Acquires the thread lock.
+ *
+ * @param[in] the_thread The thread.
+ * @param[in] lock_context The lock context for _Thread_Lock_release().
+ *
+ * @return The lock required by _Thread_Lock_release().
+ */
+RTEMS_INLINE_ROUTINE ISR_lock_Control *_Thread_Lock_acquire(
+ Thread_Control *the_thread,
+ ISR_lock_Context *lock_context
+)
+{
+#if defined(RTEMS_SMP)
+ ISR_lock_Control *lock;
+
+ while ( true ) {
+ uint32_t my_generation;
+
+ _ISR_Disable_without_giant( lock_context->Lock_context.isr_level );
+ my_generation = the_thread->Lock.generation;
+
+ /*
+ * Ensure that we read the initial lock generation before we obtain our
+ * current lock.
+ */
+ _Atomic_Fence( ATOMIC_ORDER_ACQUIRE );
+
+ lock = the_thread->Lock.current;
+ _ISR_lock_Acquire( lock, lock_context );
+
+ /*
+ * Ensure that we read the second lock generation after we obtained our
+ * current lock.
+ */
+ _Atomic_Fence( ATOMIC_ORDER_ACQUIRE );
+
+ if ( the_thread->Lock.generation == my_generation ) {
+ break;
+ }
+
+ _Thread_Lock_release( lock, lock_context );
+ }
+
+ return lock;
+#else
+ _ISR_Disable( lock_context->isr_level );
+
+ return NULL;
+#endif
+}
+
+#if defined(RTEMS_SMP)
+/*
+ * Internal function, use _Thread_Lock_set() or _Thread_Lock_restore_default()
+ * instead.
+ */
+RTEMS_INLINE_ROUTINE void _Thread_Lock_set_unprotected(
+ Thread_Control *the_thread,
+ ISR_lock_Control *new_lock
+)
+{
+ the_thread->Lock.current = new_lock;
+
+ /*
+ * Ensure that the new lock is visible before we update the generation
+ * number. Otherwise someone would be able to read an up to date generation
+ * number and an old lock.
+ */
+ _Atomic_Fence( ATOMIC_ORDER_RELEASE );
+
+ /*
+ * Since we set a new lock right before, this increment is not protected by a
+ * lock and thus must be an atomic operation.
+ */
+ _Atomic_Fetch_add_uint(
+ &the_thread->Lock.generation,
+ 1,
+ ATOMIC_ORDER_RELAXED
+ );
+}
+#endif
+
+/**
+ * @brief Sets a new thread lock.
+ *
+ * The caller must not be the owner of the default thread lock. The caller
+ * must be the owner of the new lock.
+ *
+ * @param[in] the_thread The thread.
+ * @param[in] new_lock The new thread lock.
+ */
+#if defined(RTEMS_SMP)
+RTEMS_INLINE_ROUTINE void _Thread_Lock_set(
+ Thread_Control *the_thread,
+ ISR_lock_Control *new_lock
+)
+{
+ ISR_lock_Control *lock;
+ ISR_lock_Context lock_context;
+
+ lock = _Thread_Lock_acquire( the_thread, &lock_context );
+ _Thread_Lock_set_unprotected( the_thread, new_lock );
+ _Thread_Lock_release( lock, &lock_context );
+}
+#else
+#define _Thread_Lock_set( the_thread, new_lock ) \
+ do { } while ( 0 )
+#endif
+
+/**
+ * @brief Restores the default thread lock.
+ *
+ * The caller must be the owner of the current thread lock.
+ *
+ * @param[in] the_thread The thread.
+ */
+#if defined(RTEMS_SMP)
+RTEMS_INLINE_ROUTINE void _Thread_Lock_restore_default(
+ Thread_Control *the_thread
+)
+{
+ _Atomic_Fence( ATOMIC_ORDER_RELEASE );
+
+ _Thread_Lock_set_unprotected( the_thread, &the_thread->Lock.Default );
+}
+#else
+#define _Thread_Lock_restore_default( the_thread ) \
+ do { } while ( 0 )
+#endif
+
void _Thread_Priority_change_do_nothing(
Thread_Control *the_thread,
Priority_Control new_priority,
diff --git a/cpukit/score/src/threadchangepriority.c b/cpukit/score/src/threadchangepriority.c
index dea671de3f..2baa9d2774 100644
--- a/cpukit/score/src/threadchangepriority.c
+++ b/cpukit/score/src/threadchangepriority.c
@@ -29,17 +29,20 @@ void _Thread_Change_priority(
bool prepend_it
)
{
- ISR_Level level;
+ ISR_lock_Context lock_context;
+ ISR_lock_Control *lock;
- _ISR_Disable( level );
+ lock = _Thread_Lock_acquire( the_thread, &lock_context );
/*
* Do not bother recomputing all the priority related information if
* we are not REALLY changing priority.
*/
if ( the_thread->current_priority != new_priority ) {
- uint32_t my_generation = the_thread->Priority.generation + 1;
+ uint32_t my_generation;
+ ISR_Level level;
+ my_generation = the_thread->Priority.generation + 1;
the_thread->current_priority = new_priority;
the_thread->Priority.generation = my_generation;
@@ -49,7 +52,9 @@ void _Thread_Change_priority(
the_thread->Priority.change_handler_context
);
- _ISR_Flash( level );
+ _Thread_Lock_release( lock, &lock_context );
+
+ _ISR_Disable( level );
if ( the_thread->Priority.generation == my_generation ) {
if ( _States_Is_ready( the_thread->current_state ) ) {
@@ -62,7 +67,9 @@ void _Thread_Change_priority(
_Scheduler_Update_priority( the_thread, new_priority );
}
}
- }
- _ISR_Enable( level );
+ _ISR_Enable( level );
+ } else {
+ _Thread_Lock_release( lock, &lock_context );
+ }
}
diff --git a/cpukit/score/src/threadinitialize.c b/cpukit/score/src/threadinitialize.c
index b0066cd19a..27c3f06af7 100644
--- a/cpukit/score/src/threadinitialize.c
+++ b/cpukit/score/src/threadinitialize.c
@@ -196,6 +196,9 @@ bool _Thread_Initialize(
the_thread->Scheduler.own_node = the_thread->Scheduler.node;
_Resource_Node_initialize( &the_thread->Resource_node );
_CPU_Context_Set_is_executing( &the_thread->Registers, false );
+ the_thread->Lock.current = &the_thread->Lock.Default;
+ _ISR_lock_Initialize( &the_thread->Lock.Default, "Thread Lock Default");
+ _Atomic_Init_uint(&the_thread->Lock.generation, 0);
#endif
_Thread_Debug_set_real_processor( the_thread, cpu );
diff --git a/cpukit/score/src/threadrestart.c b/cpukit/score/src/threadrestart.c
index 10d05f1c15..e759b5b42b 100644
--- a/cpukit/score/src/threadrestart.c
+++ b/cpukit/score/src/threadrestart.c
@@ -98,6 +98,10 @@ static void _Thread_Free( Thread_Control *the_thread )
_Workspace_Free( the_thread->Start.tls_area );
+#if defined(RTEMS_SMP)
+ _ISR_lock_Destroy( &the_thread->Lock.Default );
+#endif
+
_Objects_Free(
_Objects_Get_information_id( the_thread->Object.id ),
&the_thread->Object