summaryrefslogtreecommitdiffstats
path: root/cpukit/score/include/rtems/score/threadimpl.h
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2016-09-09 11:00:06 +0200
committerSebastian Huber <sebastian.huber@embedded-brains.de>2016-09-21 08:59:33 +0200
commitf6142c19f192e40ee1aa9ff67eb1c711343c157d (patch)
tree061086bf693d934063cdd601498e0e138e72eeb7 /cpukit/score/include/rtems/score/threadimpl.h
parentrtems: Add rtems_task_get_priority() (diff)
downloadrtems-f6142c19f192e40ee1aa9ff67eb1c711343c157d.tar.bz2
score: Scheduler node awareness for thread queues
Maintain the priority of a thread for each scheduler instance via the thread queue enqueue, extract, priority actions and surrender operations. This replaces the primitive priority boosting. Update #2556.
Diffstat (limited to 'cpukit/score/include/rtems/score/threadimpl.h')
-rw-r--r--cpukit/score/include/rtems/score/threadimpl.h45
1 files changed, 37 insertions, 8 deletions
diff --git a/cpukit/score/include/rtems/score/threadimpl.h b/cpukit/score/include/rtems/score/threadimpl.h
index 09af9c15dd..7b978ea477 100644
--- a/cpukit/score/include/rtems/score/threadimpl.h
+++ b/cpukit/score/include/rtems/score/threadimpl.h
@@ -997,6 +997,20 @@ RTEMS_INLINE_ROUTINE Scheduler_Node *_Thread_Scheduler_get_own_node(
#endif
}
+RTEMS_INLINE_ROUTINE Scheduler_Node *_Thread_Scheduler_get_home_node(
+ const Thread_Control *the_thread
+)
+{
+#if defined(RTEMS_SMP)
+ _Assert( !_Chain_Is_empty( &the_thread->Scheduler.Wait_nodes ) );
+ return SCHEDULER_NODE_OF_THREAD_WAIT_NODE(
+ _Chain_First( &the_thread->Scheduler.Wait_nodes )
+ );
+#else
+ return the_thread->Scheduler.nodes;
+#endif
+}
+
RTEMS_INLINE_ROUTINE Scheduler_Node *_Thread_Scheduler_get_node_by_index(
const Thread_Control *the_thread,
size_t scheduler_index
@@ -1308,21 +1322,22 @@ RTEMS_INLINE_ROUTINE void _Thread_Wait_release(
}
/**
- * @brief Claims the thread wait queue and operations.
+ * @brief Claims the thread wait queue.
*
* The caller must not be the owner of the default thread wait lock. The
- * caller must be the owner of the corresponding thread queue lock.
+ * caller must be the owner of the corresponding thread queue lock. The
+ * registration of the corresponding thread queue operations is deferred and
+ * done after the deadlock detection. This is crucial to support timeouts on
+ * SMP configurations.
*
* @param[in] the_thread The thread.
* @param[in] queue The new thread queue.
- * @param[in] operations The new thread operations.
*
- * @see _Thread_Wait_restore_default().
+ * @see _Thread_Wait_claim_finalize() and _Thread_Wait_restore_default().
*/
RTEMS_INLINE_ROUTINE void _Thread_Wait_claim(
- Thread_Control *the_thread,
- Thread_queue_Queue *queue,
- const Thread_queue_Operations *operations
+ Thread_Control *the_thread,
+ Thread_queue_Queue *queue
)
{
ISR_lock_Context lock_context;
@@ -1338,12 +1353,26 @@ RTEMS_INLINE_ROUTINE void _Thread_Wait_claim(
#endif
the_thread->Wait.queue = queue;
- the_thread->Wait.operations = operations;
_Thread_Wait_release_default_critical( the_thread, &lock_context );
}
/**
+ * @brief Finalizes the thread wait queue claim via registration of the
+ * corresponding thread queue operations.
+ *
+ * @param[in] the_thread The thread.
+ * @param[in] operations The corresponding thread queue operations.
+ */
+RTEMS_INLINE_ROUTINE void _Thread_Wait_claim_finalize(
+ Thread_Control *the_thread,
+ const Thread_queue_Operations *operations
+)
+{
+ the_thread->Wait.operations = operations;
+}
+
+/**
* @brief Removes a thread wait lock request.
*
* On SMP configurations, removes a thread wait lock request.