summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJoel Sherrill <joel.sherrill@oarcorp.com>2014-05-19 15:26:55 -0500
committerJennifer Averett <jennifer.averett@oarcorp.com>2014-06-11 09:27:06 -0500
commit238629f2f144807abd29e9fefc9d20746ae16b2c (patch)
treeb156738a4f3764a717288c3b7546bfbcaf4ee749
parent647859e9bb19c5b72dbdf77ac7245b69ec9e22ce (diff)
Add SMP Priority Scheduler with Affinity
This scheduler attempts to account for needed thread migrations caused as a side-effect of a thread state, affinity, or priority change operation. This scheduler has its own allocate_processor handler named _Scheduler_SMP_Allocate_processor_exact() because _Scheduler_SMP_Allocate_processor() attempts to prevent an executing thread from moving off its current CPU without considering affinity. Without this, the scheduler makes all the right decisions and then they are discarded at the end. ==Side Effects of Adding This Scheduler== Added Thread_Control * parameter to Scheduler_SMP_Get_highest_ready type so methods looking for the highest ready thread can filter by the processor on which the thread blocking resides. This allows affinity to be considered. Simple Priority SMP and Priority SMP ignore this parameter. + Added get_lowest_scheduled argument to _Scheduler_SMP_Enqueue_ordered(). + Added allocate_processor argument to the following methods: - _Scheduler_SMP_Block() - _Scheduler_SMP_Enqueue_scheduled_ordered() - _Scheduler_SMP_Enqueue_scheduled_ordered() + schedulerprioritysmpimpl.h is a new file with prototypes for methods which were formerly static in schedulerprioritysmp.c but now need to be public to be shared with this scheduler. NOTE: _Scheduler_SMP_Get_lowest_ready() appears to have a path which would allow it to return a NULL. Previously, _Scheduler_SMP_Enqueue_ordered() would have asserted on it. If it cannot return a NULL, _Scheduler_SMP_Get_lowest_ready() should have an assertions.
-rw-r--r--cpukit/score/Makefile.am1
-rw-r--r--cpukit/score/include/rtems/score/schedulerpriorityaffinitysmp.h68
-rw-r--r--cpukit/score/include/rtems/score/schedulerprioritysmpimpl.h84
-rw-r--r--cpukit/score/include/rtems/score/schedulersmpimpl.h113
-rw-r--r--cpukit/score/preinstall.am4
-rw-r--r--cpukit/score/src/schedulerpriorityaffinitysmp.c502
-rw-r--r--cpukit/score/src/schedulerprioritysmp.c35
-rw-r--r--cpukit/score/src/schedulersimplesmp.c15
8 files changed, 745 insertions, 77 deletions
diff --git a/cpukit/score/Makefile.am b/cpukit/score/Makefile.am
index 7b17569cb5..2fc31c574d 100644
--- a/cpukit/score/Makefile.am
+++ b/cpukit/score/Makefile.am
@@ -114,6 +114,7 @@ endif
if HAS_SMP
include_rtems_score_HEADERS += include/rtems/score/atomic.h
include_rtems_score_HEADERS += include/rtems/score/cpustdatomic.h
+include_rtems_score_HEADERS += include/rtems/score/schedulerprioritysmpimpl.h
include_rtems_score_HEADERS += include/rtems/score/schedulerpriorityaffinitysmp.h
include_rtems_score_HEADERS += include/rtems/score/schedulersimplesmp.h
endif
diff --git a/cpukit/score/include/rtems/score/schedulerpriorityaffinitysmp.h b/cpukit/score/include/rtems/score/schedulerpriorityaffinitysmp.h
index 0ba8191e44..831d1d47a6 100644
--- a/cpukit/score/include/rtems/score/schedulerpriorityaffinitysmp.h
+++ b/cpukit/score/include/rtems/score/schedulerpriorityaffinitysmp.h
@@ -52,9 +52,9 @@ extern "C" {
_Scheduler_priority_SMP_Initialize, \
_Scheduler_default_Schedule, \
_Scheduler_priority_SMP_Yield, \
- _Scheduler_priority_SMP_Block, \
- _Scheduler_priority_SMP_Unblock, \
- _Scheduler_priority_SMP_Change_priority, \
+ _Scheduler_priority_affinity_SMP_Block, \
+ _Scheduler_priority_affinity_SMP_Unblock, \
+ _Scheduler_priority_affinity_SMP_Change_priority, \
_Scheduler_priority_affinity_SMP_Node_initialize, \
_Scheduler_default_Node_destroy, \
_Scheduler_priority_SMP_Update_priority, \
@@ -67,21 +67,47 @@ extern "C" {
}
/**
- * @brief Allocates @a the_thread->scheduler.
+ * @brief Initializes per thread scheduler information
*
- * This routine allocates @a the_thread->scheduler.
+ * This routine allocates @a thread->scheduler.
*
* @param[in] scheduler points to the scheduler specific information.
- * @param[in] the_thread is the thread the scheduler is allocating
+ * @param[in] thread is the thread the scheduler is allocating
* management memory for.
*/
void _Scheduler_priority_affinity_SMP_Node_initialize(
const Scheduler_Control *scheduler,
- Thread_Control *the_thread
+ Thread_Control *thread
);
/**
- * @brief Get affinity for the priority affinity smp scheduler.
+ * @brief SMP Priority Affinity Scheduler Block Operation
+ *
+ * This method is the block operation for this scheduler.
+ *
+ * @param[in] scheduler is the scheduler instance information
+ * @param[in] thread is the thread to block
+ */
+void _Scheduler_priority_affinity_SMP_Block(
+ const Scheduler_Control *scheduler,
+ Thread_Control *thread
+);
+
+/**
+ * @brief SMP Priority Affinity Scheduler Unblock Operation
+ *
+ * This method is the unblock operation for this scheduler.
+ *
+ * @param[in] scheduler is the scheduler instance information
+ * @param[in] thread is the thread to unblock
+ */
+void _Scheduler_priority_affinity_SMP_Unblock(
+ const Scheduler_Control *scheduler,
+ Thread_Control *thread
+);
+
+/**
+ * @brief Get affinity for the priority affinity SMP scheduler.
*
* @param[in] scheduler The scheduler of the thread.
* @param[in] thread The associated thread.
@@ -98,26 +124,44 @@ bool _Scheduler_priority_affinity_SMP_Get_affinity(
cpu_set_t *cpuset
);
+/**
+ * @brief Change priority for the priority affinity SMP scheduler.
+ *
+ * @param[in] scheduler The scheduler of the thread.
+ * @param[in] thread The associated thread.
+ * @param[in] new_priority The new priority for the thread.
+ * @param[in] prepend_it Append or prepend the thread to its priority FIFO.
+ */
+void _Scheduler_priority_affinity_SMP_Change_priority(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Priority_Control new_priority,
+ bool prepend_it
+);
+
/**
- * @brief Set affinity for the priority affinity smp scheduler.
+ * @brief Set affinity for the priority affinity SMP scheduler.
*
* @param[in] scheduler The scheduler of the thread.
* @param[in] thread The associated thread.
* @param[in] cpusetsize The size of the cpuset.
* @param[in] cpuset Affinity new affinity set.
*
- * @retval 0 Successful
+ * @retval true if successful
+ * @retval false if unsuccessful
*/
bool _Scheduler_priority_affinity_SMP_Set_affinity(
const Scheduler_Control *scheduler,
Thread_Control *thread,
size_t cpusetsize,
- cpu_set_t *cpuset
+ const cpu_set_t *cpuset
);
/**
* @brief Scheduler node specialization for Deterministic Priority Affinity SMP
* schedulers.
+ *
+ * This is a per thread structure.
*/
typedef struct {
/**
@@ -137,4 +181,4 @@ typedef struct {
}
#endif /* __cplusplus */
-#endif /* _RTEMS_SCORE_SCHEDULERPRIORITYSMP_H */
+#endif /* _RTEMS_SCORE_SCHEDULERPRIORITYAFFINITYSMP_H */
diff --git a/cpukit/score/include/rtems/score/schedulerprioritysmpimpl.h b/cpukit/score/include/rtems/score/schedulerprioritysmpimpl.h
new file mode 100644
index 0000000000..2d8d1a5aed
--- /dev/null
+++ b/cpukit/score/include/rtems/score/schedulerprioritysmpimpl.h
@@ -0,0 +1,84 @@
+/**
+ * @file
+ *
+ * @ingroup ScoreSchedulerPrioritySMP
+ *
+ * @brief Deterministic Priority SMP Scheduler API
+ */
+
+/*
+ * Copyright (c) 2013-2014 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Dornierstr. 4
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_SCHEDULERPRIORITYSMPIMPL_H
+#define _RTEMS_SCORE_SCHEDULERPRIORITYSMPIMPL_H
+
+#include <rtems/score/scheduler.h>
+#include <rtems/score/schedulerpriority.h>
+#include <rtems/score/schedulersmp.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/**
+ * @ingroup ScoreSchedulerPrioritySMP
+ * @{
+ */
+
+Scheduler_priority_SMP_Context *_Scheduler_priority_SMP_Get_self(
+ Scheduler_Context *context
+);
+
+Scheduler_priority_SMP_Node *_Scheduler_priority_SMP_Node_get(
+ Thread_Control *thread
+);
+
+void _Scheduler_priority_SMP_Insert_ready_fifo(
+ Scheduler_Context *context,
+ Thread_Control *thread
+);
+
+void _Scheduler_priority_SMP_Insert_ready_lifo(
+ Scheduler_Context *context,
+ Thread_Control *thread
+);
+
+void _Scheduler_priority_SMP_Move_from_scheduled_to_ready(
+ Scheduler_Context *context,
+ Thread_Control *scheduled_to_ready
+);
+
+void _Scheduler_priority_SMP_Move_from_ready_to_scheduled(
+ Scheduler_Context *context,
+ Thread_Control *ready_to_scheduled
+);
+
+void _Scheduler_priority_SMP_Extract_from_ready(
+ Scheduler_Context *context,
+ Thread_Control *thread
+);
+
+void _Scheduler_priority_SMP_Do_update(
+ Scheduler_Context *context,
+ Scheduler_Node *base_node,
+ Priority_Control new_priority
+);
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* _RTEMS_SCORE_SCHEDULERPRIORITYSMPIMPL_H */
diff --git a/cpukit/score/include/rtems/score/schedulersmpimpl.h b/cpukit/score/include/rtems/score/schedulersmpimpl.h
index bb7c41e8c0..c3b0ab3864 100644
--- a/cpukit/score/include/rtems/score/schedulersmpimpl.h
+++ b/cpukit/score/include/rtems/score/schedulersmpimpl.h
@@ -275,7 +275,14 @@ extern "C" {
*/
typedef Thread_Control *( *Scheduler_SMP_Get_highest_ready )(
- Scheduler_Context *context
+ Scheduler_Context *context,
+ Thread_Control *blocking
+);
+
+typedef Thread_Control *( *Scheduler_SMP_Get_lowest_scheduled )(
+ Scheduler_Context *context,
+ Thread_Control *thread,
+ Chain_Node_order order
);
typedef void ( *Scheduler_SMP_Extract )(
@@ -304,6 +311,12 @@ typedef void ( *Scheduler_SMP_Enqueue )(
Thread_Control *thread_to_enqueue
);
+typedef void ( *Scheduler_SMP_Allocate_processor )(
+ Scheduler_SMP_Context *self,
+ Thread_Control *scheduled,
+ Thread_Control *victim
+);
+
static inline Scheduler_SMP_Context *_Scheduler_SMP_Get_self(
Scheduler_Context *context
)
@@ -382,7 +395,7 @@ static inline void _Scheduler_SMP_Update_heir(
}
}
-static inline void _Scheduler_SMP_Allocate_processor(
+static void _Scheduler_SMP_Allocate_processor(
Scheduler_SMP_Context *self,
Thread_Control *scheduled,
Thread_Control *victim
@@ -420,10 +433,13 @@ static inline void _Scheduler_SMP_Allocate_processor(
}
}
-static inline Thread_Control *_Scheduler_SMP_Get_lowest_scheduled(
- Scheduler_SMP_Context *self
+static Thread_Control *_Scheduler_SMP_Get_lowest_scheduled(
+ Scheduler_Context *context,
+ Thread_Control *filter,
+ Chain_Node_order order
)
{
+ Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
Thread_Control *lowest_ready = NULL;
Chain_Control *scheduled = &self->Scheduled;
@@ -431,6 +447,12 @@ static inline Thread_Control *_Scheduler_SMP_Get_lowest_scheduled(
lowest_ready = (Thread_Control *) _Chain_Last( scheduled );
}
+ /*
+ * _Scheduler_SMP_Enqueue_ordered() assumes that get_lowest_scheduled
+ * helpers may return NULL. But this method never should.
+ */
+ _Assert( lowest_ready != NULL );
+
return lowest_ready;
}
@@ -443,28 +465,45 @@ static inline Thread_Control *_Scheduler_SMP_Get_lowest_scheduled(
* @param[in] thread The thread to enqueue.
* @param[in] order The order function.
* @param[in] insert_ready Function to insert a node into the set of ready
- * nodes.
+ * nodes.
* @param[in] insert_scheduled Function to insert a node into the set of
- * scheduled nodes.
+ * scheduled nodes.
* @param[in] move_from_scheduled_to_ready Function to move a node from the set
- * of scheduled nodes to the set of ready nodes.
+ * of scheduled nodes to the set of ready nodes.
+ * @param[in] get_lowest_scheduled Function to select the thread from the
+ * scheduled nodes to replace. It may not be possible to find one.
+ * @param[in] allocate_processor Function to allocate a processor to a thread
+ * based on the rules of the scheduler.
*/
static inline void _Scheduler_SMP_Enqueue_ordered(
- Scheduler_Context *context,
- Thread_Control *thread,
- Chain_Node_order order,
- Scheduler_SMP_Insert insert_ready,
- Scheduler_SMP_Insert insert_scheduled,
- Scheduler_SMP_Move move_from_scheduled_to_ready
+ Scheduler_Context *context,
+ Thread_Control *thread,
+ Chain_Node_order order,
+ Scheduler_SMP_Insert insert_ready,
+ Scheduler_SMP_Insert insert_scheduled,
+ Scheduler_SMP_Move move_from_scheduled_to_ready,
+ Scheduler_SMP_Get_lowest_scheduled get_lowest_scheduled,
+ Scheduler_SMP_Allocate_processor allocate_processor
)
{
Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
Thread_Control *lowest_scheduled =
- _Scheduler_SMP_Get_lowest_scheduled( self );
+ ( *get_lowest_scheduled )( context, thread, order );
- _Assert( lowest_scheduled != NULL );
+ /*
+ * get_lowest_scheduled can return a NULL if no scheduled threads
+ * should be removed from their processor based on the selection
+ * criteria. For example, this can occur when the affinity of the
+ * thread being enqueued schedules it against higher priority threads.
+ * A low priority thread with affinity can only consider the threads
+ * which are on the cores if has affinity for.
+ *
+ * The get_lowest_scheduled helper should assert on not returning NULL
+ * if that is not possible for that scheduler.
+ */
- if ( ( *order )( &thread->Object.Node, &lowest_scheduled->Object.Node ) ) {
+ if ( lowest_scheduled &&
+ ( *order )( &thread->Object.Node, &lowest_scheduled->Object.Node ) ) {
Scheduler_SMP_Node *lowest_scheduled_node =
_Scheduler_SMP_Node_get( lowest_scheduled );
@@ -472,7 +511,7 @@ static inline void _Scheduler_SMP_Enqueue_ordered(
lowest_scheduled_node,
SCHEDULER_SMP_NODE_READY
);
- _Scheduler_SMP_Allocate_processor( self, thread, lowest_scheduled );
+ ( *allocate_processor )( self, thread, lowest_scheduled );
( *insert_scheduled )( &self->Base, thread );
( *move_from_scheduled_to_ready )( &self->Base, lowest_scheduled );
} else {
@@ -489,25 +528,29 @@ static inline void _Scheduler_SMP_Enqueue_ordered(
* @param[in] order The order function.
* @param[in] get_highest_ready Function to get the highest ready node.
* @param[in] insert_ready Function to insert a node into the set of ready
- * nodes.
+ * nodes.
* @param[in] insert_scheduled Function to insert a node into the set of
- * scheduled nodes.
+ * scheduled nodes.
* @param[in] move_from_ready_to_scheduled Function to move a node from the set
- * of ready nodes to the set of scheduled nodes.
+ * of ready nodes to the set of scheduled nodes.
+ * @param[in] allocate_processor Function to allocate a processor to a thread
+ * based on the rules of the scheduler.
*/
static inline void _Scheduler_SMP_Enqueue_scheduled_ordered(
Scheduler_Context *context,
- Thread_Control *thread,
- Chain_Node_order order,
- Scheduler_SMP_Get_highest_ready get_highest_ready,
- Scheduler_SMP_Insert insert_ready,
- Scheduler_SMP_Insert insert_scheduled,
- Scheduler_SMP_Move move_from_ready_to_scheduled
+ Thread_Control *thread,
+ Chain_Node_order order,
+ Scheduler_SMP_Get_highest_ready get_highest_ready,
+ Scheduler_SMP_Insert insert_ready,
+ Scheduler_SMP_Insert insert_scheduled,
+ Scheduler_SMP_Move move_from_ready_to_scheduled,
+ Scheduler_SMP_Allocate_processor allocate_processor
)
{
Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
Scheduler_SMP_Node *node = _Scheduler_SMP_Node_get( thread );
- Thread_Control *highest_ready = ( *get_highest_ready )( &self->Base );
+ Thread_Control *highest_ready =
+ ( *get_highest_ready )( &self->Base, thread );
_Assert( highest_ready != NULL );
@@ -519,7 +562,7 @@ static inline void _Scheduler_SMP_Enqueue_scheduled_ordered(
( *insert_scheduled )( &self->Base, thread );
} else {
_Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
- _Scheduler_SMP_Allocate_processor( self, highest_ready, thread );
+ ( *allocate_processor) ( self, highest_ready, thread );
( *insert_ready )( &self->Base, thread );
( *move_from_ready_to_scheduled )( &self->Base, highest_ready );
}
@@ -536,13 +579,15 @@ static inline void _Scheduler_SMP_Schedule_highest_ready(
Scheduler_Context *context,
Thread_Control *victim,
Scheduler_SMP_Get_highest_ready get_highest_ready,
- Scheduler_SMP_Move move_from_ready_to_scheduled
+ Scheduler_SMP_Move move_from_ready_to_scheduled,
+ Scheduler_SMP_Allocate_processor allocate_processor
)
{
Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
- Thread_Control *highest_ready = ( *get_highest_ready )( &self->Base );
+ Thread_Control *highest_ready =
+ ( *get_highest_ready )( &self->Base, victim );
- _Scheduler_SMP_Allocate_processor( self, highest_ready, victim );
+ ( *allocate_processor )( self, highest_ready, victim );
( *move_from_ready_to_scheduled )( &self->Base, highest_ready );
}
@@ -563,7 +608,8 @@ static inline void _Scheduler_SMP_Block(
Thread_Control *thread,
Scheduler_SMP_Extract extract_from_ready,
Scheduler_SMP_Get_highest_ready get_highest_ready,
- Scheduler_SMP_Move move_from_ready_to_scheduled
+ Scheduler_SMP_Move move_from_ready_to_scheduled,
+ Scheduler_SMP_Allocate_processor allocate_processor
)
{
Scheduler_SMP_Node *node = _Scheduler_SMP_Node_get( thread );
@@ -578,7 +624,8 @@ static inline void _Scheduler_SMP_Block(
context,
thread,
get_highest_ready,
- move_from_ready_to_scheduled
+ move_from_ready_to_scheduled,
+ allocate_processor
);
} else {
( *extract_from_ready )( context, thread );
diff --git a/cpukit/score/preinstall.am b/cpukit/score/preinstall.am
index f5474c5406..891c21e93e 100644
--- a/cpukit/score/preinstall.am
+++ b/cpukit/score/preinstall.am
@@ -406,6 +406,10 @@ $(PROJECT_INCLUDE)/rtems/score/cpustdatomic.h: include/rtems/score/cpustdatomic.
$(INSTALL_DATA) $< $(PROJECT_INCLUDE)/rtems/score/cpustdatomic.h
PREINSTALL_FILES += $(PROJECT_INCLUDE)/rtems/score/cpustdatomic.h
+$(PROJECT_INCLUDE)/rtems/score/schedulerprioritysmpimpl.h: include/rtems/score/schedulerprioritysmpimpl.h $(PROJECT_INCLUDE)/rtems/score/$(dirstamp)
+ $(INSTALL_DATA) $< $(PROJECT_INCLUDE)/rtems/score/schedulerprioritysmpimpl.h
+PREINSTALL_FILES += $(PROJECT_INCLUDE)/rtems/score/schedulerprioritysmpimpl.h
+
$(PROJECT_INCLUDE)/rtems/score/schedulerpriorityaffinitysmp.h: include/rtems/score/schedulerpriorityaffinitysmp.h $(PROJECT_INCLUDE)/rtems/score/$(dirstamp)
$(INSTALL_DATA) $< $(PROJECT_INCLUDE)/rtems/score/schedulerpriorityaffinitysmp.h
PREINSTALL_FILES += $(PROJECT_INCLUDE)/rtems/score/schedulerpriorityaffinitysmp.h
diff --git a/cpukit/score/src/schedulerpriorityaffinitysmp.c b/cpukit/score/src/schedulerpriorityaffinitysmp.c
index 317d8ce32d..75208b7f8c 100644
--- a/cpukit/score/src/schedulerpriorityaffinitysmp.c
+++ b/cpukit/score/src/schedulerpriorityaffinitysmp.c
@@ -22,30 +22,490 @@
#include <rtems/score/schedulerpriorityaffinitysmp.h>
#include <rtems/score/schedulerpriorityimpl.h>
#include <rtems/score/schedulersmpimpl.h>
+#include <rtems/score/schedulerprioritysmpimpl.h>
#include <rtems/score/wkspace.h>
#include <rtems/score/cpusetimpl.h>
+#include <rtems/score/priority.h>
+
+/*
+ * The following methods which initially were static in schedulerprioritysmp.c
+ * are shared with this scheduler. They are now public so they can be shared.
+ *
+ * + _Scheduler_priority_SMP_Get_self
+ * + _Scheduler_priority_SMP_Insert_ready_fifo
+ * + _Scheduler_priority_SMP_Insert_ready_lifo
+ * + _Scheduler_priority_SMP_Node_get
+ * + _Scheduler_priority_SMP_Move_from_scheduled_to_ready
+ * + _Scheduler_priority_SMP_Move_from_ready_to_scheduled
+ * + _Scheduler_priority_SMP_Extract_from_ready
+ * + _Scheduler_priority_SMP_Do_update
+ */
+
+/*
+ * This method returns the scheduler node for the specified thread
+ * as a scheduler specific type.
+ */
static Scheduler_priority_affinity_SMP_Node *
-_Scheduler_priority_affinity_Node_get( Thread_Control *thread )
+_Scheduler_priority_affinity_SMP_Node_get(
+ Thread_Control *thread
+)
{
- return ( Scheduler_priority_affinity_SMP_Node * )
- _Scheduler_Node_get( thread );
+ return (Scheduler_priority_affinity_SMP_Node *) _Scheduler_Node_get( thread );
}
+/*
+ * This method initializes the scheduler control information for
+ * this scheduler instance.
+ */
void _Scheduler_priority_affinity_SMP_Node_initialize(
const Scheduler_Control *scheduler,
- Thread_Control *the_thread
+ Thread_Control *thread
)
{
+ Scheduler_SMP_Node *smp_node = _Scheduler_SMP_Node_get( thread );
+
Scheduler_priority_affinity_SMP_Node *node =
- _Scheduler_priority_affinity_Node_get( the_thread );
+ _Scheduler_priority_affinity_SMP_Node_get( thread );
- _Scheduler_SMP_Node_initialize( &node->Base.Base );
+ (scheduler);
- node->Affinity = *_CPU_set_Default();
+ /*
+ * All we add is affinity information to the basic SMP node.
+ */
+ _Scheduler_SMP_Node_initialize( smp_node );
+
+ node->Affinity = *_CPU_set_Default();
node->Affinity.set = &node->Affinity.preallocated;
}
+/*
+ * This method is slightly different from _Scheduler_SMP_Allocate_processor()
+ * in that it does what it is asked to do. _Scheduler_SMP_Allocate_processor()
+ * attempts to prevent migrations but does not take into account affinity
+ */
+static inline void _Scheduler_SMP_Allocate_processor_exact(
+ Scheduler_SMP_Context *self,
+ Thread_Control *scheduled,
+ Thread_Control *victim
+)
+{
+ Scheduler_SMP_Node *scheduled_node = _Scheduler_SMP_Node_get( scheduled );
+ Per_CPU_Control *cpu_of_scheduled = _Thread_Get_CPU( scheduled );
+ Per_CPU_Control *cpu_of_victim = _Thread_Get_CPU( victim );
+ Per_CPU_Control *cpu_self = _Per_CPU_Get();
+
+ _Scheduler_SMP_Node_change_state(
+ scheduled_node,
+ SCHEDULER_SMP_NODE_SCHEDULED
+ );
+
+ _Thread_Set_CPU( scheduled, cpu_of_victim );
+ _Scheduler_SMP_Update_heir( cpu_self, cpu_of_victim, scheduled );
+}
+
+/*
+ * This method is unique to this scheduler because it takes into
+ * account affinity as it determines the highest ready thread.
+ * Since this is used to pick a new thread to replace the victim,
+ * the highest ready thread must have affinity such that it can
+ * be executed on the victim's processor.
+ */
+static Thread_Control *_Scheduler_priority_affinity_SMP_Get_highest_ready(
+ Scheduler_Context *context,
+ Thread_Control *victim
+)
+{
+ Scheduler_priority_SMP_Context *self =
+ _Scheduler_priority_SMP_Get_self( context );
+ Priority_Control index;
+ Thread_Control *highest = NULL;
+ int victim_cpu;
+
+ /*
+ * This is done when we need to check if reevaluations are needed.
+ */
+ if ( victim == NULL ) {
+ return _Scheduler_priority_Ready_queue_first(
+ &self->Bit_map,
+ &self->Ready[ 0 ]
+ );
+ }
+
+ victim_cpu = _Per_CPU_Get_index( _Thread_Get_CPU( victim ) );
+
+ /**
+ * @todo The deterministic priority scheduler structure is optimized
+ * for insertion, extraction, and finding the highest priority
+ * thread. Scanning the list of ready threads is not a purpose
+ * for which it was optimized. There are optimizations to be
+ * made in this loop.
+ *
+ * + by checking the major bit, we could potentially skip entire
+ * groups of 16.
+ *
+ * When using this scheduler as implemented, the application's
+ * choice of numeric priorities and their distribution can have
+ * an impact on performance.
+ */
+ for ( index = _Priority_bit_map_Get_highest( &self->Bit_map ) ;
+ index <= PRIORITY_MAXIMUM;
+ index++ )
+ {
+ Chain_Control *chain = &self->Ready[index];
+ Chain_Node *chain_node;
+ for ( chain_node = _Chain_First( chain );
+ chain_node != _Chain_Immutable_tail( chain ) ;
+ chain_node = _Chain_Next( chain_node ) )
+ {
+ Thread_Control *thread;
+ Scheduler_priority_affinity_SMP_Node *node;
+
+ thread = (Thread_Control *) chain_node;
+ node = _Scheduler_priority_affinity_SMP_Node_get( thread );
+
+ /*
+ * Can this thread run on this CPU?
+ */
+ if ( CPU_ISSET( victim_cpu, node->Affinity.set ) ) {
+ highest = thread;
+ break;
+ }
+ }
+ if ( highest )
+ break;
+ }
+
+ _Assert( highest != NULL );
+
+ return highest;
+}
+
+/*
+ * This method is very similar to _Scheduler_priority_affinity_SMP_Block
+ * but has the difference that is invokes this scheduler's
+ * get_highest_ready() support method.
+ */
+void _Scheduler_priority_affinity_SMP_Block(
+ const Scheduler_Control *scheduler,
+ Thread_Control *thread
+)
+{
+ Scheduler_Context *context = _Scheduler_Get_context( scheduler );
+
+ _Scheduler_SMP_Block(
+ context,
+ thread,
+ _Scheduler_priority_SMP_Extract_from_ready,
+ _Scheduler_priority_affinity_SMP_Get_highest_ready,
+ _Scheduler_priority_SMP_Move_from_ready_to_scheduled,
+ _Scheduler_SMP_Allocate_processor_exact
+ );
+
+ /*
+ * Since this removed a single thread from the scheduled set
+ * and selected the most appropriate thread from the ready
+ * set to replace it, there should be no need for thread
+ * migrations.
+ */
+}
+
+/*
+ * This method is unique to this scheduler because it must take into
+ * account affinity as it searches for the lowest priority scheduled
+ * thread. It ignores those which cannot be replaced by the filter
+ * thread because the potential victim thread does not have affinity
+ * for that processor.
+ */
+static Thread_Control *_Scheduler_priority_affinity_SMP_Get_lowest_scheduled(
+ Scheduler_Context *context,
+ Thread_Control *filter,
+ Chain_Node_order order
+)
+{
+ Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
+ Thread_Control *lowest_scheduled = NULL;
+ Thread_Control *thread = NULL;
+ Chain_Control *scheduled = &self->Scheduled;
+ Scheduler_priority_affinity_SMP_Node *node =
+ _Scheduler_priority_affinity_SMP_Node_get( filter );
+
+ for ( thread = (Thread_Control *) _Chain_Last( scheduled );
+ (Chain_Node *) thread != _Chain_Immutable_head( scheduled ) ;
+ thread = (Thread_Control *) _Chain_Previous( &thread->Object.Node ) ) {
+ int cpu_index;
+
+ /*
+ * If we didn't find a thread which is of equal or lower importance
+ * than filter thread is, then we can't schedule the filter thread
+ * to execute.
+ */
+ if ( (*order)(&thread->Object.Node, &filter->Object.Node) )
+ break;
+
+ /* cpu_index is the processor number thread is executing on */
+ cpu_index = _Per_CPU_Get_index( _Thread_Get_CPU( thread ) );
+
+ if ( CPU_ISSET( cpu_index, node->Affinity.set ) ) {
+ lowest_scheduled = thread;
+ break;
+ }
+
+ }
+
+ return lowest_scheduled;
+}
+
+/*
+ * This method is unique to this scheduler because it must pass
+ * _Scheduler_priority_affinity_SMP_Get_lowest_scheduled into
+ * _Scheduler_SMP_Enqueue_ordered.
+ */
+static void _Scheduler_priority_affinity_SMP_Enqueue_fifo(
+ Scheduler_Context *context,
+ Thread_Control *thread
+)
+{
+ _Scheduler_SMP_Enqueue_ordered(
+ context,
+ thread,
+ _Scheduler_simple_Insert_priority_fifo_order,
+ _Scheduler_priority_SMP_Insert_ready_fifo,
+ _Scheduler_SMP_Insert_scheduled_fifo,
+ _Scheduler_priority_SMP_Move_from_scheduled_to_ready,
+ _Scheduler_priority_affinity_SMP_Get_lowest_scheduled,
+ _Scheduler_SMP_Allocate_processor_exact
+ );
+}
+
+/*
+ * This method is invoked at the end of certain scheduling operations
+ * to ensure that the highest priority ready thread cannot be scheduled
+ * to execute. When we schedule with affinity, there is the possibility
+ * that we need to migrate a thread to another core to ensure that the
+ * highest priority ready threads are in fact scheduled.
+ */
+static void _Scheduler_priority_affinity_SMP_Check_for_migrations(
+ Scheduler_Context *context
+)
+{
+ Thread_Control *lowest_scheduled;
+ Thread_Control *highest_ready;
+ Scheduler_SMP_Node *lowest_scheduled_node;
+ Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
+
+ while (1) {
+ highest_ready =
+ _Scheduler_priority_affinity_SMP_Get_highest_ready( context, NULL );
+ lowest_scheduled = _Scheduler_priority_affinity_SMP_Get_lowest_scheduled(
+ context,
+ highest_ready,
+ _Scheduler_simple_Insert_priority_lifo_order
+ );
+
+ /*
+ * If we can't find a thread to displace from the scheduled set,
+ * then we have placed all the highest priority threads possible
+ * in the scheduled set.
+ *
+ * We found the absolute highest priority thread without
+ * considering affinity. But now we have to consider that thread's
+ * affinity as we look to place it.
+ */
+ if ( lowest_scheduled == NULL )
+ break;
+
+ /*
+ * But if we found a thread which is lower priority than one
+ * in the ready set, then we need to swap them out.
+ */
+ lowest_scheduled_node = _Scheduler_SMP_Node_get( lowest_scheduled );
+
+ _Scheduler_SMP_Node_change_state(
+ lowest_scheduled_node,
+ SCHEDULER_SMP_NODE_READY
+ );
+
+ _Scheduler_SMP_Allocate_processor_exact(
+ self,
+ highest_ready,
+ lowest_scheduled
+ );
+
+ _Scheduler_priority_SMP_Move_from_ready_to_scheduled(
+ context,
+ highest_ready
+ );
+
+ _Scheduler_priority_SMP_Move_from_scheduled_to_ready(
+ &self->Base,
+ lowest_scheduled
+ );
+ }
+}
+
+/*
+ * This is the public scheduler specific Unblock operation.
+ */
+void _Scheduler_priority_affinity_SMP_Unblock(
+ const Scheduler_Control *scheduler,
+ Thread_Control *thread
+)
+{
+ Scheduler_Context *context = _Scheduler_Get_context( scheduler );
+
+ _Scheduler_SMP_Unblock(
+ context,
+ thread,
+ _Scheduler_priority_affinity_SMP_Enqueue_fifo
+ );
+
+ /*
+ * Perform any thread migrations that are needed due to these changes.
+ */
+ _Scheduler_priority_affinity_SMP_Check_for_migrations( context );
+}
+
+/*
+ * This is unique to this scheduler because it passes scheduler specific
+ * get_lowest_scheduled helper to _Scheduler_SMP_Enqueue_ordered.
+ */
+static void _Scheduler_priority_affinity_SMP_Enqueue_ordered(
+ Scheduler_Context *context,
+ Thread_Control *thread,
+ Chain_Node_order order,
+ Scheduler_SMP_Insert insert_ready,
+ Scheduler_SMP_Insert insert_scheduled
+)
+{
+ _Scheduler_SMP_Enqueue_ordered(
+ context,
+ thread,
+ order,
+ insert_ready,
+ insert_scheduled,
+ _Scheduler_priority_SMP_Move_from_scheduled_to_ready,
+ _Scheduler_priority_affinity_SMP_Get_lowest_scheduled,
+ _Scheduler_SMP_Allocate_processor_exact
+ );
+}
+
+/*
+ * This is unique to this scheduler because it is on the path
+ * to _Scheduler_priority_affinity_SMP_Enqueue_ordered() which
+ * invokes a scheduler unique get_lowest_scheduled helper.
+ */
+static void _Scheduler_priority_affinity_SMP_Enqueue_lifo(
+ Scheduler_Context *context,
+ Thread_Control *thread
+)
+{
+ _Scheduler_priority_affinity_SMP_Enqueue_ordered(
+ context,
+ thread,
+ _Scheduler_simple_Insert_priority_lifo_order,
+ _Scheduler_priority_SMP_Insert_ready_lifo,
+ _Scheduler_SMP_Insert_scheduled_lifo
+ );
+}
+
+/*
+ * This method is unique to this scheduler because it must
+ * invoke _Scheduler_SMP_Enqueue_scheduled_ordered() with
+ * this scheduler's get_highest_ready() helper.
+ */
+static void _Scheduler_priority_affinity_SMP_Enqueue_scheduled_ordered(
+ Scheduler_Context *context,
+ Thread_Control *thread,
+ Chain_Node_order order,
+ Scheduler_SMP_Insert insert_ready,
+ Scheduler_SMP_Insert insert_scheduled
+)
+{
+ _Scheduler_SMP_Enqueue_scheduled_ordered(
+ context,
+ thread,
+ order,
+ _Scheduler_priority_affinity_SMP_Get_highest_ready,
+ insert_ready,
+ insert_scheduled,
+ _Scheduler_priority_SMP_Move_from_ready_to_scheduled,
+ _Scheduler_SMP_Allocate_processor_exact
+ );
+}
+
+/*
+ * This is unique to this scheduler because it is on the path
+ * to _Scheduler_priority_affinity_SMP_Enqueue_scheduled__ordered() which
+ * invokes a scheduler unique get_lowest_scheduled helper.
+ */
+static void _Scheduler_priority_affinity_SMP_Enqueue_scheduled_lifo(
+ Scheduler_Context *context,
+ Thread_Control *thread
+)
+{
+ _Scheduler_priority_affinity_SMP_Enqueue_scheduled_ordered(
+ context,
+ thread,
+ _Scheduler_simple_Insert_priority_lifo_order,
+ _Scheduler_priority_SMP_Insert_ready_lifo,
+ _Scheduler_SMP_Insert_scheduled_lifo
+ );
+}
+
+/*
+ * This is unique to this scheduler because it is on the path
+ * to _Scheduler_priority_affinity_SMP_Enqueue_scheduled__ordered() which
+ * invokes a scheduler unique get_lowest_scheduled helper.
+ */
+static void _Scheduler_priority_affinity_SMP_Enqueue_scheduled_fifo(
+ Scheduler_Context *context,
+ Thread_Control *thread
+)
+{
+ _Scheduler_priority_affinity_SMP_Enqueue_scheduled_ordered(
+ context,
+ thread,
+ _Scheduler_simple_Insert_priority_fifo_order,
+ _Scheduler_priority_SMP_Insert_ready_fifo,
+ _Scheduler_SMP_Insert_scheduled_fifo
+ );
+}
+
+/*
+ * This is the public scheduler specific Change Priority operation.
+ */
+void _Scheduler_priority_affinity_SMP_Change_priority(
+ const Scheduler_Control *scheduler,
+ Thread_Control *thread,
+ Priority_Control new_priority,
+ bool prepend_it
+)
+{
+ Scheduler_Context *context = _Scheduler_Get_context( scheduler );
+
+ _Scheduler_SMP_Change_priority(
+ context,
+ thread,
+ new_priority,
+ prepend_it,
+ _Scheduler_priority_SMP_Extract_from_ready,
+ _Scheduler_priority_SMP_Do_update,
+ _Scheduler_priority_affinity_SMP_Enqueue_fifo,
+ _Scheduler_priority_affinity_SMP_Enqueue_lifo,
+ _Scheduler_priority_affinity_SMP_Enqueue_scheduled_fifo,
+ _Scheduler_priority_affinity_SMP_Enqueue_scheduled_lifo
+ );
+
+ /*
+ * Perform any thread migrations that are needed due to these changes.
+ */
+ _Scheduler_priority_affinity_SMP_Check_for_migrations( context );
+}
+
+/*
+ * This is the public scheduler specific Change Priority operation.
+ */
bool _Scheduler_priority_affinity_SMP_Get_affinity(
const Scheduler_Control *scheduler,
Thread_Control *thread,
@@ -54,7 +514,7 @@ bool _Scheduler_priority_affinity_SMP_Get_affinity(
)
{
Scheduler_priority_affinity_SMP_Node *node =
- _Scheduler_priority_affinity_Node_get(thread);
+ _Scheduler_priority_affinity_SMP_Node_get(thread);
(void) scheduler;
@@ -63,26 +523,38 @@ bool _Scheduler_priority_affinity_SMP_Get_affinity(
}
CPU_COPY( cpuset, node->Affinity.set );
- return true;
+ return true;
}
bool _Scheduler_priority_affinity_SMP_Set_affinity(
const Scheduler_Control *scheduler,
Thread_Control *thread,
size_t cpusetsize,
- cpu_set_t *cpuset
+ const cpu_set_t *cpuset
)
{
Scheduler_priority_affinity_SMP_Node *node =
- _Scheduler_priority_affinity_Node_get(thread);
+ _Scheduler_priority_affinity_SMP_Node_get(thread);
(void) scheduler;
-
- if ( ! _CPU_set_Is_valid( cpuset, cpusetsize ) ) {
+
+ /*
+ * Validate that the cpset meets basic requirements.
+ */
+ if ( !_CPU_set_Is_valid( cpuset, cpusetsize ) ) {
return false;
}
- CPU_COPY( node->Affinity.set, cpuset );
-
+ /*
+ * The old and new set are the same, there is no point in
+ * doing anything.
+ */
+ if ( CPU_EQUAL_S( cpusetsize, cpuset, node->Affinity.set ) )
+ return true;
+
+ _Thread_Set_state( thread, STATES_MIGRATING );
+ CPU_COPY( node->Affinity.set, cpuset );
+ _Thread_Clear_state( thread, STATES_MIGRATING );
+
return true;
}
diff --git a/cpukit/score/src/schedulerprioritysmp.c b/cpukit/score/src/schedulerprioritysmp.c
index 7915ce6bef..48da162fe1 100644
--- a/cpukit/score/src/schedulerprioritysmp.c
+++ b/cpukit/score/src/schedulerprioritysmp.c
@@ -26,6 +26,7 @@
#include <rtems/score/schedulerprioritysmp.h>
#include <rtems/score/schedulerpriorityimpl.h>
+#include <rtems/score/schedulerprioritysmpimpl.h>
#include <rtems/score/schedulersmpimpl.h>
static Scheduler_priority_SMP_Context *
@@ -34,13 +35,14 @@ _Scheduler_priority_SMP_Get_context( const Scheduler_Control *scheduler )
return (Scheduler_priority_SMP_Context *) _Scheduler_Get_context( scheduler );
}
-static Scheduler_priority_SMP_Context *
-_Scheduler_priority_SMP_Get_self( Scheduler_Context *context )
+Scheduler_priority_SMP_Context *_Scheduler_priority_SMP_Get_self(
+ Scheduler_Context *context
+)
{
return (Scheduler_priority_SMP_Context *) context;
}
-static Scheduler_priority_SMP_Node *_Scheduler_priority_SMP_Node_get(
+Scheduler_priority_SMP_Node *_Scheduler_priority_SMP_Node_get(
Thread_Control *thread
)
{
@@ -74,7 +76,7 @@ void _Scheduler_priority_SMP_Node_initialize(
_Scheduler_SMP_Node_initialize( node );
}
-static void _Scheduler_priority_SMP_Do_update(
+void _Scheduler_priority_SMP_Do_update(
Scheduler_Context *context,
Scheduler_Node *base_node,
Priority_Control new_priority
@@ -106,19 +108,22 @@ void _Scheduler_priority_SMP_Update_priority(
}
static Thread_Control *_Scheduler_priority_SMP_Get_highest_ready(
- Scheduler_Context *context
+ Scheduler_Context *context,
+ Thread_Control *thread
)
{
Scheduler_priority_SMP_Context *self =
_Scheduler_priority_SMP_Get_self( context );
+ (void) thread;
+
return _Scheduler_priority_Ready_queue_first(
&self->Bit_map,
&self->Ready[ 0 ]
);
}
-static void _Scheduler_priority_SMP_Move_from_scheduled_to_ready(
+void _Scheduler_priority_SMP_Move_from_scheduled_to_ready(
Scheduler_Context *context,
Thread_Control *scheduled_to_ready
)
@@ -136,7 +141,7 @@ static void _Scheduler_priority_SMP_Move_from_scheduled_to_ready(
);
}
-static void _Scheduler_priority_SMP_Move_from_ready_to_scheduled(
+void _Scheduler_priority_SMP_Move_from_ready_to_scheduled(
Scheduler_Context *context,
Thread_Control *ready_to_scheduled
)
@@ -157,7 +162,7 @@ static void _Scheduler_priority_SMP_Move_from_ready_to_scheduled(
);
}
-static void _Scheduler_priority_SMP_Insert_ready_lifo(
+void _Scheduler_priority_SMP_Insert_ready_lifo(
Scheduler_Context *context,
Thread_Control *thread
)
@@ -174,7 +179,7 @@ static void _Scheduler_priority_SMP_Insert_ready_lifo(
);
}
-static void _Scheduler_priority_SMP_Insert_ready_fifo(
+void _Scheduler_priority_SMP_Insert_ready_fifo(
Scheduler_Context *context,
Thread_Control *thread
)
@@ -191,7 +196,7 @@ static void _Scheduler_priority_SMP_Insert_ready_fifo(
);
}
-static void _Scheduler_priority_SMP_Extract_from_ready(
+void _Scheduler_priority_SMP_Extract_from_ready(
Scheduler_Context *context,
Thread_Control *thread
)
@@ -220,7 +225,8 @@ void _Scheduler_priority_SMP_Block(
thread,
_Scheduler_priority_SMP_Extract_from_ready,
_Scheduler_priority_SMP_Get_highest_ready,
- _Scheduler_priority_SMP_Move_from_ready_to_scheduled
+ _Scheduler_priority_SMP_Move_from_ready_to_scheduled,
+ _Scheduler_SMP_Allocate_processor
);
}
@@ -238,7 +244,9 @@ static void _Scheduler_priority_SMP_Enqueue_ordered(
order,
insert_ready,
insert_scheduled,
- _Scheduler_priority_SMP_Move_from_scheduled_to_ready
+ _Scheduler_priority_SMP_Move_from_scheduled_to_ready,
+ _Scheduler_SMP_Get_lowest_scheduled,
+ _Scheduler_SMP_Allocate_processor
);
}
@@ -285,7 +293,8 @@ static void _Scheduler_priority_SMP_Enqueue_scheduled_ordered(
_Scheduler_priority_SMP_Get_highest_ready,
insert_ready,
insert_scheduled,
- _Scheduler_priority_SMP_Move_from_ready_to_scheduled
+ _Scheduler_priority_SMP_Move_from_ready_to_scheduled,
+ _Scheduler_SMP_Allocate_processor
);
}
diff --git a/cpukit/score/src/schedulersimplesmp.c b/cpukit/score/src/schedulersimplesmp.c
index 029da67b7a..eb260efd09 100644
--- a/cpukit/score/src/schedulersimplesmp.c
+++ b/cpukit/score/src/schedulersimplesmp.c
@@ -64,12 +64,15 @@ static void _Scheduler_simple_SMP_Do_update(
}
static Thread_Control *_Scheduler_simple_SMP_Get_highest_ready(
- Scheduler_Context *context
+ Scheduler_Context *context,
+ Thread_Control *thread
)
{
Scheduler_simple_SMP_Context *self =
_Scheduler_simple_SMP_Get_self( context );
+ (void) thread;
+
return (Thread_Control *) _Chain_First( &self->Ready );
}
@@ -155,7 +158,8 @@ void _Scheduler_simple_SMP_Block(
thread,
_Scheduler_simple_SMP_Extract_from_ready,
_Scheduler_simple_SMP_Get_highest_ready,
- _Scheduler_simple_SMP_Move_from_ready_to_scheduled
+ _Scheduler_simple_SMP_Move_from_ready_to_scheduled,
+ _Scheduler_SMP_Allocate_processor
);
}
@@ -173,7 +177,9 @@ static void _Scheduler_simple_SMP_Enqueue_ordered(
order,
insert_ready,
insert_scheduled,
- _Scheduler_simple_SMP_Move_from_scheduled_to_ready
+ _Scheduler_simple_SMP_Move_from_scheduled_to_ready,
+ _Scheduler_SMP_Get_lowest_scheduled,
+ _Scheduler_SMP_Allocate_processor
);
}
@@ -220,7 +226,8 @@ static void _Scheduler_simple_SMP_Enqueue_scheduled_ordered(
_Scheduler_simple_SMP_Get_highest_ready,
insert_ready,
insert_scheduled,
- _Scheduler_simple_SMP_Move_from_ready_to_scheduled
+ _Scheduler_simple_SMP_Move_from_ready_to_scheduled,
+ _Scheduler_SMP_Allocate_processor
);
}