summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJennifer Averett <jennifer.averett@oarcorp.com>2014-06-26 09:40:03 -0500
committerDaniel Cederman <cederman@gaisler.com>2014-06-27 11:17:02 +0200
commite7551d7f1ec35cd52acb93394c4b9ba017f95ec0 (patch)
tree35a27596f4bac02dfeb7900e3672d51c54e1c3c7
parent24641baa5245f9248c1d972e605238d44f7254f4 (diff)
score: Remove migration from the priority affinity scheduler.rcc-v1.2.99.0
This patch removes migration of a running thread. This may result in less than optimal run sets.
-rw-r--r--cpukit/score/src/schedulerpriorityaffinitysmp.c95
1 files changed, 93 insertions, 2 deletions
diff --git a/cpukit/score/src/schedulerpriorityaffinitysmp.c b/cpukit/score/src/schedulerpriorityaffinitysmp.c
index bc240544d9..be8f7b08c0 100644
--- a/cpukit/score/src/schedulerpriorityaffinitysmp.c
+++ b/cpukit/score/src/schedulerpriorityaffinitysmp.c
@@ -28,6 +28,8 @@
#include <rtems/score/priority.h>
+#include <rtems/score/assert.h>
+
/*
* The following methods which initially were static in schedulerprioritysmp.c
* are shared with this scheduler. They are now public so they can be shared.
@@ -130,6 +132,65 @@ static inline void _Scheduler_SMP_Allocate_processor_exact(
_Scheduler_SMP_Update_heir( cpu_self, victim_cpu, scheduled_thread );
}
+/*
+ * This method is used by _Scheduler_priority_affinity_SMP_Check_for_migrations
+ * to detremine if a better thread set can be identified.
+ */
+static Scheduler_Node *_Scheduler_priority_affinity_SMP_Get_any_highest_ready(
+ Scheduler_Context *context
+)
+{
+ Scheduler_priority_SMP_Context *self =
+ _Scheduler_priority_SMP_Get_self( context );
+ Priority_Control index;
+ Scheduler_Node *highest = NULL;
+ Scheduler_priority_affinity_SMP_Node *node;
+
+
+ /**
+ * @todo The deterministic priority scheduler structure is optimized
+ * for insertion, extraction, and finding the highest priority
+ * thread. Scanning the list of ready threads is not a purpose
+ * for which it was optimized. There are optimizations to be
+ * made in this loop.
+ *
+ * + by checking the major bit, we could potentially skip entire
+ * groups of 16.
+ *
+ * When using this scheduler as implemented, the application's
+ * choice of numeric priorities and their distribution can have
+ * an impact on performance.
+ */
+ for ( index = _Priority_bit_map_Get_highest( &self->Bit_map ) ;
+ index <= PRIORITY_MAXIMUM;
+ index++ )
+ {
+ Chain_Control *chain = &self->Ready[index];
+ Chain_Node *chain_node;
+ for ( chain_node = _Chain_First( chain );
+ chain_node != _Chain_Immutable_tail( chain ) ;
+ chain_node = _Chain_Next( chain_node ) )
+ {
+ Thread_Control *thread;
+
+ node = (Scheduler_priority_affinity_SMP_Node *) chain_node;
+ thread = _Scheduler_Node_get_owner( &node->Base.Base.Base );
+
+ /*
+ * Don't attempt to schedule an already executing thread.
+ */
+ if ( _Thread_Is_executing_on_a_processor(thread) ) {
+ continue;
+ }
+
+ highest = &node->Base.Base.Base;
+ break;
+ }
+ }
+
+ return highest;
+}
+
/*
* This method is unique to this scheduler because it takes into
* account affinity as it determines the highest ready thread.
@@ -150,6 +211,7 @@ static Scheduler_Node *_Scheduler_priority_affinity_SMP_Get_highest_ready(
uint32_t victim_cpu_index;
Scheduler_priority_affinity_SMP_Node *node;
+#if 0
/*
* This is done when we need to check if reevaluations are needed.
*/
@@ -162,6 +224,8 @@ static Scheduler_Node *_Scheduler_priority_affinity_SMP_Get_highest_ready(
return &node->Base.Base.Base;
}
+#endif
+ _Assert( victim != NULL );
victim_thread = _Scheduler_Node_get_owner( victim );
victim_cpu_index = _Per_CPU_Get_index( _Thread_Get_CPU( victim_thread ) );
@@ -189,8 +253,18 @@ static Scheduler_Node *_Scheduler_priority_affinity_SMP_Get_highest_ready(
for ( chain_node = _Chain_First( chain );
chain_node != _Chain_Immutable_tail( chain ) ;
chain_node = _Chain_Next( chain_node ) )
- {
+ {
+ Thread_Control *thread;
+
node = (Scheduler_priority_affinity_SMP_Node *) chain_node;
+ thread = _Scheduler_Node_get_owner( &node->Base.Base.Base );
+
+ /*
+ * Don't attempt to schedule an already executing thread.
+ */
+ if ( _Thread_Is_executing_on_a_processor(thread) ) {
+ continue;
+ }
/*
* Can this thread run on this CPU?
@@ -205,6 +279,8 @@ static Scheduler_Node *_Scheduler_priority_affinity_SMP_Get_highest_ready(
}
_Assert( highest != NULL );
+ _Assert( highest != victim );
+ _Assert( !_Thread_Is_executing_on_a_processor(_Scheduler_Node_get_owner(highest) ));
return highest;
}
@@ -279,6 +355,12 @@ static Scheduler_Node * _Scheduler_priority_affinity_SMP_Get_lowest_scheduled(
thread = _Scheduler_Node_get_owner( &node->Base.Base.Base );
cpu_index = _Per_CPU_Get_index( _Thread_Get_CPU( thread ) );
+ /*
+ * This assert indicates that we are attempting to schedule a thread
+ * that is already scheduled.
+ */
+ _Assert( thread != _Scheduler_Node_get_owner( &filter->Base.Base.Base ) );
+
if ( CPU_ISSET( (int) cpu_index, filter->Affinity.set ) ) {
lowest_scheduled = &node->Base.Base.Base;
break;
@@ -322,12 +404,20 @@ static void _Scheduler_priority_affinity_SMP_Check_for_migrations(
Scheduler_Context *context
)
{
+#if 1
Scheduler_Node *lowest_scheduled;
Scheduler_Node *highest_ready;
while (1) {
highest_ready =
- _Scheduler_priority_affinity_SMP_Get_highest_ready( context, NULL );
+ _Scheduler_priority_affinity_SMP_Get_any_highest_ready( context );
+
+ /*
+ * All ready threads are executing. We are thus executing the best
+ * set of threads that are ready.
+ */
+ if ( highest_ready == NULL )
+ break;
lowest_scheduled =
_Scheduler_priority_affinity_SMP_Get_lowest_scheduled(
@@ -374,6 +464,7 @@ static void _Scheduler_priority_affinity_SMP_Check_for_migrations(
lowest_scheduled
);
}
+#endif
}
/*