summaryrefslogtreecommitdiffstats
path: root/cpukit/score/src
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2017-11-09 16:21:37 +0100
committerSebastian Huber <sebastian.huber@embedded-brains.de>2017-11-20 08:36:49 +0100
commitc597fb166e05d315ce5da29c0a43a09992772dad (patch)
tree627d3f248020e0a292fe7dcb2505cf4d90957ae7 /cpukit/score/src
parentbsps/powerpc: Fix PPC_EXC_CONFIG_USE_FIXED_HANDLER (diff)
downloadrtems-c597fb166e05d315ce5da29c0a43a09992772dad.tar.bz2
score: Optimize scheduler priority updates
Thread priority changes may append or prepend the thread to its priority group on the scheduler ready queue. Previously, a separate priority value and a prepend-it flag in the scheduler node were used to propagate a priority change to the scheduler. Now, use an append-it bit in the priority control and reduce the plain priority value to 63 bits. This change leads to a significant code size reduction (about 25%) of the SMP schedulers. The negligible increase of the standard priority scheduler is due to some additional shift operations (SCHEDULER_PRIORITY_MAP() and SCHEDULER_PRIORITY_UNMAP()). Before: text filename 136 sparc-rtems5/c/erc32/cpukit/score/src/libscore_a-schedulersimpleblock.o 464 sparc-rtems5/c/erc32/cpukit/score/src/libscore_a-schedulersimplechangepriority.o 24 sparc-rtems5/c/erc32/cpukit/score/src/libscore_a-schedulersimple.o 108 sparc-rtems5/c/erc32/cpukit/score/src/libscore_a-schedulersimpleschedule.o 292 sparc-rtems5/c/erc32/cpukit/score/src/libscore_a-schedulersimpleunblock.o 264 sparc-rtems5/c/erc32/cpukit/score/src/libscore_a-schedulersimpleyield.o text filename 280 sparc-rtems5/c/erc32/cpukit/score/src/libscore_a-schedulerpriorityblock.o 488 sparc-rtems5/c/erc32/cpukit/score/src/libscore_a-schedulerprioritychangepriority.o 200 sparc-rtems5/c/erc32/cpukit/score/src/libscore_a-schedulerpriority.o 164 sparc-rtems5/c/erc32/cpukit/score/src/libscore_a-schedulerpriorityschedule.o 328 sparc-rtems5/c/erc32/cpukit/score/src/libscore_a-schedulerpriorityunblock.o 200 sparc-rtems5/c/erc32/cpukit/score/src/libscore_a-schedulerpriorityyield.o text filename 24112 arm-rtems5/c/imx7/cpukit/score/src/libscore_a-scheduleredfsmp.o text filename 37204 sparc-rtems5/c/gr740/cpukit/score/src/libscore_a-scheduleredfsmp.o text filename 42236 powerpc-rtems5/c/qoriq_e6500_32/cpukit/score/src/libscore_a-scheduleredfsmp.o After: text filename 136 sparc-rtems5/c/erc32/cpukit/score/src/libscore_a-schedulersimpleblock.o 272 sparc-rtems5/c/erc32/cpukit/score/src/libscore_a-schedulersimplechangepriority.o 24 sparc-rtems5/c/erc32/cpukit/score/src/libscore_a-schedulersimple.o 108 sparc-rtems5/c/erc32/cpukit/score/src/libscore_a-schedulersimpleschedule.o 292 sparc-rtems5/c/erc32/cpukit/score/src/libscore_a-schedulersimpleunblock.o 264 sparc-rtems5/c/erc32/cpukit/score/src/libscore_a-schedulersimpleyield.o text filename 280 sparc-rtems5/c/erc32/cpukit/score/src/libscore_a-schedulerpriorityblock.o 488 sparc-rtems5/c/erc32/cpukit/score/src/libscore_a-schedulerprioritychangepriority.o 208 sparc-rtems5/c/erc32/cpukit/score/src/libscore_a-schedulerpriority.o 164 sparc-rtems5/c/erc32/cpukit/score/src/libscore_a-schedulerpriorityschedule.o 332 sparc-rtems5/c/erc32/cpukit/score/src/libscore_a-schedulerpriorityunblock.o 200 sparc-rtems5/c/erc32/cpukit/score/src/libscore_a-schedulerpriorityyield.o text filename 18860 arm-rtems5/c/imx7/cpukit/score/src/libscore_a-scheduleredfsmp.o text filename 28520 sparc-rtems5/c/gr740/cpukit/score/src/libscore_a-scheduleredfsmp.o text filename 32664 powerpc-rtems5/c/qoriq_e6500_32/cpukit/score/src/libscore_a-scheduleredfsmp.o
Diffstat (limited to 'cpukit/score/src')
-rw-r--r--cpukit/score/src/schedulercbsunblock.c5
-rw-r--r--cpukit/score/src/schedulerdefaultmappriority.c14
-rw-r--r--cpukit/score/src/scheduleredfchangepriority.c13
-rw-r--r--cpukit/score/src/scheduleredfreleasejob.c15
-rw-r--r--cpukit/score/src/scheduleredfsmp.c184
-rw-r--r--cpukit/score/src/scheduleredfunblock.c9
-rw-r--r--cpukit/score/src/schedulerpriority.c3
-rw-r--r--cpukit/score/src/schedulerpriorityaffinitysmp.c150
-rw-r--r--cpukit/score/src/schedulerprioritychangepriority.c19
-rw-r--r--cpukit/score/src/schedulerprioritysmp.c116
-rw-r--r--cpukit/score/src/schedulerpriorityunblock.c11
-rw-r--r--cpukit/score/src/schedulersimplechangepriority.c12
-rw-r--r--cpukit/score/src/schedulersimplesmp.c161
-rw-r--r--cpukit/score/src/schedulersimpleunblock.c6
-rw-r--r--cpukit/score/src/schedulersimpleyield.c10
-rw-r--r--cpukit/score/src/schedulerstrongapa.c176
16 files changed, 276 insertions, 628 deletions
diff --git a/cpukit/score/src/schedulercbsunblock.c b/cpukit/score/src/schedulercbsunblock.c
index 403435eeb1..9b7a0ca424 100644
--- a/cpukit/score/src/schedulercbsunblock.c
+++ b/cpukit/score/src/schedulercbsunblock.c
@@ -34,12 +34,11 @@ void _Scheduler_CBS_Unblock(
Scheduler_CBS_Node *the_node;
Scheduler_CBS_Server *serv_info;
Priority_Control priority;
- bool prepend_it;
the_node = _Scheduler_CBS_Node_downcast( node );
serv_info = the_node->cbs_server;
- priority = _Scheduler_Node_get_priority( &the_node->Base.Base, &prepend_it );
- (void) prepend_it;
+ priority = _Scheduler_Node_get_priority( &the_node->Base.Base );
+ priority = SCHEDULER_PRIORITY_PURIFY( priority );
/*
* Late unblock rule for deadline-driven tasks. The remaining time to
diff --git a/cpukit/score/src/schedulerdefaultmappriority.c b/cpukit/score/src/schedulerdefaultmappriority.c
index 37a600011e..228549f20d 100644
--- a/cpukit/score/src/schedulerdefaultmappriority.c
+++ b/cpukit/score/src/schedulerdefaultmappriority.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016 embedded brains GmbH
+ * Copyright (c) 2016, 2017 embedded brains GmbH
*
* The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at
@@ -10,12 +10,20 @@
#include "config.h"
#endif
-#include <rtems/score/scheduler.h>
+#include <rtems/score/schedulerimpl.h>
Priority_Control _Scheduler_default_Map_priority(
const Scheduler_Control *scheduler,
Priority_Control priority
)
{
- return priority;
+ return SCHEDULER_PRIORITY_MAP( priority );
+}
+
+Priority_Control _Scheduler_default_Unmap_priority(
+ const Scheduler_Control *scheduler,
+ Priority_Control priority
+)
+{
+ return SCHEDULER_PRIORITY_UNMAP( priority );
}
diff --git a/cpukit/score/src/scheduleredfchangepriority.c b/cpukit/score/src/scheduleredfchangepriority.c
index 23382973cc..d3d1f94cbf 100644
--- a/cpukit/score/src/scheduleredfchangepriority.c
+++ b/cpukit/score/src/scheduleredfchangepriority.c
@@ -29,7 +29,7 @@ void _Scheduler_EDF_Update_priority(
Scheduler_EDF_Context *context;
Scheduler_EDF_Node *the_node;
Priority_Control priority;
- bool prepend_it;
+ Priority_Control insert_priority;
if ( !_Thread_Is_ready( the_thread ) ) {
/* Nothing to do */
@@ -37,7 +37,8 @@ void _Scheduler_EDF_Update_priority(
}
the_node = _Scheduler_EDF_Node_downcast( node );
- priority = _Scheduler_Node_get_priority( &the_node->Base, &prepend_it );
+ insert_priority = _Scheduler_Node_get_priority( &the_node->Base );
+ priority = SCHEDULER_PRIORITY_PURIFY( insert_priority );
if ( priority == the_node->priority ) {
/* Nothing to do */
@@ -48,12 +49,6 @@ void _Scheduler_EDF_Update_priority(
context = _Scheduler_EDF_Get_context( scheduler );
_Scheduler_EDF_Extract( context, the_node );
-
- if ( prepend_it ) {
- _Scheduler_EDF_Enqueue_first( context, the_node, priority );
- } else {
- _Scheduler_EDF_Enqueue( context, the_node, priority );
- }
-
+ _Scheduler_EDF_Enqueue( context, the_node, insert_priority );
_Scheduler_EDF_Schedule_body( scheduler, the_thread, false );
}
diff --git a/cpukit/score/src/scheduleredfreleasejob.c b/cpukit/score/src/scheduleredfreleasejob.c
index 068a0db7a3..d7865ff5fb 100644
--- a/cpukit/score/src/scheduleredfreleasejob.c
+++ b/cpukit/score/src/scheduleredfreleasejob.c
@@ -25,7 +25,7 @@ Priority_Control _Scheduler_EDF_Map_priority(
Priority_Control priority
)
{
- return SCHEDULER_EDF_PRIO_MSB | priority;
+ return SCHEDULER_EDF_PRIO_MSB | SCHEDULER_PRIORITY_MAP( priority );
}
Priority_Control _Scheduler_EDF_Unmap_priority(
@@ -33,7 +33,7 @@ Priority_Control _Scheduler_EDF_Unmap_priority(
Priority_Control priority
)
{
- return priority & ~SCHEDULER_EDF_PRIO_MSB;
+ return SCHEDULER_PRIORITY_UNMAP( priority & ~SCHEDULER_EDF_PRIO_MSB );
}
void _Scheduler_EDF_Release_job(
@@ -48,7 +48,16 @@ void _Scheduler_EDF_Release_job(
_Thread_Wait_acquire_critical( the_thread, queue_context );
- _Priority_Node_set_priority( priority_node, deadline );
+ /*
+ * There is no integer overflow problem here due to the
+ * SCHEDULER_PRIORITY_MAP(). The deadline is in clock ticks. With the
+ * minimum clock tick interval of 1us, the uptime is limited to about 146235
+ * years.
+ */
+ _Priority_Node_set_priority(
+ priority_node,
+ SCHEDULER_PRIORITY_MAP( deadline )
+ );
if ( _Priority_Node_is_active( priority_node ) ) {
_Thread_Priority_changed(
diff --git a/cpukit/score/src/scheduleredfsmp.c b/cpukit/score/src/scheduleredfsmp.c
index badee44e2e..102a33d4f7 100644
--- a/cpukit/score/src/scheduleredfsmp.c
+++ b/cpukit/score/src/scheduleredfsmp.c
@@ -39,26 +39,7 @@ _Scheduler_EDF_SMP_Node_downcast( Scheduler_Node *node )
return (Scheduler_EDF_SMP_Node *) node;
}
-static inline bool _Scheduler_EDF_SMP_Less(
- const void *left,
- const RBTree_Node *right
-)
-{
- const Priority_Control *the_left;
- const Scheduler_SMP_Node *the_right;
- Priority_Control prio_left;
- Priority_Control prio_right;
-
- the_left = left;
- the_right = RTEMS_CONTAINER_OF( right, Scheduler_SMP_Node, Base.Node.RBTree );
-
- prio_left = *the_left;
- prio_right = the_right->priority;
-
- return prio_left < prio_right;
-}
-
-static inline bool _Scheduler_EDF_SMP_Less_or_equal(
+static inline bool _Scheduler_EDF_SMP_Priority_less_equal(
const void *left,
const RBTree_Node *right
)
@@ -254,20 +235,21 @@ static inline Scheduler_Node *_Scheduler_EDF_SMP_Get_lowest_scheduled(
static inline void _Scheduler_EDF_SMP_Insert_ready(
Scheduler_Context *context,
Scheduler_Node *node_base,
- int generation_index,
- bool ( *less )( const void *, const RBTree_Node * )
+ Priority_Control insert_priority
)
{
Scheduler_EDF_SMP_Context *self;
Scheduler_EDF_SMP_Node *node;
uint32_t rqi;
Scheduler_EDF_SMP_Ready_queue *ready_queue;
+ int generation_index;
int increment;
int64_t generation;
self = _Scheduler_EDF_SMP_Get_self( context );
node = _Scheduler_EDF_SMP_Node_downcast( node_base );
rqi = node->ready_queue_index;
+ generation_index = SCHEDULER_PRIORITY_IS_APPEND( insert_priority );
increment = ( generation_index << 1 ) - 1;
ready_queue = &self->Ready[ rqi ];
@@ -279,8 +261,8 @@ static inline void _Scheduler_EDF_SMP_Insert_ready(
_RBTree_Insert_inline(
&ready_queue->Queue,
&node->Base.Base.Node.RBTree,
- &node->Base.priority,
- less
+ &insert_priority,
+ _Scheduler_EDF_SMP_Priority_less_equal
);
if ( rqi != 0 && _Chain_Is_node_off_chain( &ready_queue->Node ) ) {
@@ -327,12 +309,14 @@ static inline void _Scheduler_EDF_SMP_Move_from_scheduled_to_ready(
Scheduler_Node *scheduled_to_ready
)
{
+ Priority_Control insert_priority;
+
_Chain_Extract_unprotected( &scheduled_to_ready->Node.Chain );
+ insert_priority = _Scheduler_SMP_Node_priority( scheduled_to_ready );
_Scheduler_EDF_SMP_Insert_ready(
context,
scheduled_to_ready,
- 1,
- _Scheduler_EDF_SMP_Less
+ insert_priority
);
}
@@ -341,33 +325,15 @@ static inline void _Scheduler_EDF_SMP_Move_from_ready_to_scheduled(
Scheduler_Node *ready_to_scheduled
)
{
- _Scheduler_EDF_SMP_Extract_from_ready( context, ready_to_scheduled );
- _Scheduler_SMP_Insert_scheduled_fifo( context, ready_to_scheduled );
-}
+ Priority_Control insert_priority;
-static inline void _Scheduler_EDF_SMP_Insert_ready_lifo(
- Scheduler_Context *context,
- Scheduler_Node *node_to_insert
-)
-{
- _Scheduler_EDF_SMP_Insert_ready(
- context,
- node_to_insert,
- 0,
- _Scheduler_EDF_SMP_Less_or_equal
- );
-}
-
-static inline void _Scheduler_EDF_SMP_Insert_ready_fifo(
- Scheduler_Context *context,
- Scheduler_Node *node_to_insert
-)
-{
- _Scheduler_EDF_SMP_Insert_ready(
+ _Scheduler_EDF_SMP_Extract_from_ready( context, ready_to_scheduled );
+ insert_priority = _Scheduler_SMP_Node_priority( ready_to_scheduled );
+ insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
+ _Scheduler_SMP_Insert_scheduled(
context,
- node_to_insert,
- 1,
- _Scheduler_EDF_SMP_Less
+ ready_to_scheduled,
+ insert_priority
);
}
@@ -444,103 +410,45 @@ void _Scheduler_EDF_SMP_Block(
);
}
-static inline bool _Scheduler_EDF_SMP_Enqueue_ordered(
- Scheduler_Context *context,
- Scheduler_Node *node,
- Chain_Node_order order,
- Scheduler_SMP_Insert insert_ready,
- Scheduler_SMP_Insert insert_scheduled
+static inline bool _Scheduler_EDF_SMP_Enqueue(
+ Scheduler_Context *context,
+ Scheduler_Node *node,
+ Priority_Control insert_priority
)
{
- return _Scheduler_SMP_Enqueue_ordered(
+ return _Scheduler_SMP_Enqueue(
context,
node,
- order,
- insert_ready,
- insert_scheduled,
+ insert_priority,
+ _Scheduler_SMP_Priority_less_equal,
+ _Scheduler_EDF_SMP_Insert_ready,
+ _Scheduler_SMP_Insert_scheduled,
_Scheduler_EDF_SMP_Move_from_scheduled_to_ready,
_Scheduler_EDF_SMP_Get_lowest_scheduled,
_Scheduler_EDF_SMP_Allocate_processor
);
}
-static inline bool _Scheduler_EDF_SMP_Enqueue_lifo(
+static inline bool _Scheduler_EDF_SMP_Enqueue_scheduled(
Scheduler_Context *context,
- Scheduler_Node *node
-)
-{
- return _Scheduler_EDF_SMP_Enqueue_ordered(
- context,
- node,
- _Scheduler_SMP_Insert_priority_lifo_order,
- _Scheduler_EDF_SMP_Insert_ready_lifo,
- _Scheduler_SMP_Insert_scheduled_lifo
- );
-}
-
-static inline bool _Scheduler_EDF_SMP_Enqueue_fifo(
- Scheduler_Context *context,
- Scheduler_Node *node
-)
-{
- return _Scheduler_EDF_SMP_Enqueue_ordered(
- context,
- node,
- _Scheduler_SMP_Insert_priority_fifo_order,
- _Scheduler_EDF_SMP_Insert_ready_fifo,
- _Scheduler_SMP_Insert_scheduled_fifo
- );
-}
-
-static inline bool _Scheduler_EDF_SMP_Enqueue_scheduled_ordered(
- Scheduler_Context *context,
- Scheduler_Node *node,
- Chain_Node_order order,
- Scheduler_SMP_Insert insert_ready,
- Scheduler_SMP_Insert insert_scheduled
+ Scheduler_Node *node,
+ Priority_Control insert_priority
)
{
- return _Scheduler_SMP_Enqueue_scheduled_ordered(
+ return _Scheduler_SMP_Enqueue_scheduled(
context,
node,
- order,
+ insert_priority,
+ _Scheduler_SMP_Priority_less_equal,
_Scheduler_EDF_SMP_Extract_from_ready,
_Scheduler_EDF_SMP_Get_highest_ready,
- insert_ready,
- insert_scheduled,
+ _Scheduler_EDF_SMP_Insert_ready,
+ _Scheduler_SMP_Insert_scheduled,
_Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
_Scheduler_EDF_SMP_Allocate_processor
);
}
-static inline bool _Scheduler_EDF_SMP_Enqueue_scheduled_lifo(
- Scheduler_Context *context,
- Scheduler_Node *node
-)
-{
- return _Scheduler_EDF_SMP_Enqueue_scheduled_ordered(
- context,
- node,
- _Scheduler_SMP_Insert_priority_lifo_order,
- _Scheduler_EDF_SMP_Insert_ready_lifo,
- _Scheduler_SMP_Insert_scheduled_lifo
- );
-}
-
-static inline bool _Scheduler_EDF_SMP_Enqueue_scheduled_fifo(
- Scheduler_Context *context,
- Scheduler_Node *node
-)
-{
- return _Scheduler_EDF_SMP_Enqueue_scheduled_ordered(
- context,
- node,
- _Scheduler_SMP_Insert_priority_fifo_order,
- _Scheduler_EDF_SMP_Insert_ready_fifo,
- _Scheduler_SMP_Insert_scheduled_fifo
- );
-}
-
void _Scheduler_EDF_SMP_Unblock(
const Scheduler_Control *scheduler,
Thread_Control *thread,
@@ -554,7 +462,7 @@ void _Scheduler_EDF_SMP_Unblock(
thread,
node,
_Scheduler_EDF_SMP_Do_update,
- _Scheduler_EDF_SMP_Enqueue_fifo
+ _Scheduler_EDF_SMP_Enqueue
);
}
@@ -568,9 +476,9 @@ static inline bool _Scheduler_EDF_SMP_Do_ask_for_help(
context,
the_thread,
node,
- _Scheduler_SMP_Insert_priority_lifo_order,
- _Scheduler_EDF_SMP_Insert_ready_lifo,
- _Scheduler_SMP_Insert_scheduled_lifo,
+ _Scheduler_SMP_Priority_less_equal,
+ _Scheduler_EDF_SMP_Insert_ready,
+ _Scheduler_SMP_Insert_scheduled,
_Scheduler_EDF_SMP_Move_from_scheduled_to_ready,
_Scheduler_EDF_SMP_Get_lowest_scheduled,
_Scheduler_EDF_SMP_Allocate_processor
@@ -591,10 +499,8 @@ void _Scheduler_EDF_SMP_Update_priority(
node,
_Scheduler_EDF_SMP_Extract_from_ready,
_Scheduler_EDF_SMP_Do_update,
- _Scheduler_EDF_SMP_Enqueue_fifo,
- _Scheduler_EDF_SMP_Enqueue_lifo,
- _Scheduler_EDF_SMP_Enqueue_scheduled_fifo,
- _Scheduler_EDF_SMP_Enqueue_scheduled_lifo,
+ _Scheduler_EDF_SMP_Enqueue,
+ _Scheduler_EDF_SMP_Enqueue_scheduled,
_Scheduler_EDF_SMP_Do_ask_for_help
);
}
@@ -672,7 +578,7 @@ void _Scheduler_EDF_SMP_Add_processor(
context,
idle,
_Scheduler_EDF_SMP_Has_ready,
- _Scheduler_EDF_SMP_Enqueue_scheduled_fifo,
+ _Scheduler_EDF_SMP_Enqueue_scheduled,
_Scheduler_EDF_SMP_Register_idle
);
}
@@ -688,7 +594,7 @@ Thread_Control *_Scheduler_EDF_SMP_Remove_processor(
context,
cpu,
_Scheduler_EDF_SMP_Extract_from_ready,
- _Scheduler_EDF_SMP_Enqueue_fifo
+ _Scheduler_EDF_SMP_Enqueue
);
}
@@ -705,8 +611,8 @@ void _Scheduler_EDF_SMP_Yield(
thread,
node,
_Scheduler_EDF_SMP_Extract_from_ready,
- _Scheduler_EDF_SMP_Enqueue_fifo,
- _Scheduler_EDF_SMP_Enqueue_scheduled_fifo
+ _Scheduler_EDF_SMP_Enqueue,
+ _Scheduler_EDF_SMP_Enqueue_scheduled
);
}
@@ -777,7 +683,7 @@ bool _Scheduler_EDF_SMP_Set_affinity(
_Scheduler_EDF_SMP_Extract_from_ready,
_Scheduler_EDF_SMP_Get_highest_ready,
_Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
- _Scheduler_EDF_SMP_Enqueue_fifo,
+ _Scheduler_EDF_SMP_Enqueue,
_Scheduler_EDF_SMP_Allocate_processor
);
diff --git a/cpukit/score/src/scheduleredfunblock.c b/cpukit/score/src/scheduleredfunblock.c
index 29355d04fa..91295f511c 100644
--- a/cpukit/score/src/scheduleredfunblock.c
+++ b/cpukit/score/src/scheduleredfunblock.c
@@ -31,15 +31,16 @@ void _Scheduler_EDF_Unblock(
Scheduler_EDF_Context *context;
Scheduler_EDF_Node *the_node;
Priority_Control priority;
- bool prepend_it;
+ Priority_Control insert_priority;
context = _Scheduler_EDF_Get_context( scheduler );
the_node = _Scheduler_EDF_Node_downcast( node );
- priority = _Scheduler_Node_get_priority( &the_node->Base, &prepend_it );
- (void) prepend_it;
+ priority = _Scheduler_Node_get_priority( &the_node->Base );
+ priority = SCHEDULER_PRIORITY_PURIFY( priority );
+ insert_priority = SCHEDULER_PRIORITY_APPEND( priority );
the_node->priority = priority;
- _Scheduler_EDF_Enqueue( context, the_node, priority );
+ _Scheduler_EDF_Enqueue( context, the_node, insert_priority );
/*
* If the thread that was unblocked is more important than the heir,
diff --git a/cpukit/score/src/schedulerpriority.c b/cpukit/score/src/schedulerpriority.c
index ddfd973e0a..5ac16a49a1 100644
--- a/cpukit/score/src/schedulerpriority.c
+++ b/cpukit/score/src/schedulerpriority.c
@@ -19,7 +19,6 @@
#endif
#include <rtems/score/schedulerpriorityimpl.h>
-#include <rtems/score/wkspace.h>
void _Scheduler_priority_Initialize( const Scheduler_Control *scheduler )
{
@@ -49,7 +48,7 @@ void _Scheduler_priority_Node_initialize(
the_node = _Scheduler_priority_Node_downcast( node );
_Scheduler_priority_Ready_queue_update(
&the_node->Ready_queue,
- priority,
+ SCHEDULER_PRIORITY_UNMAP( priority ),
&context->Bit_map,
&context->Ready[ 0 ]
);
diff --git a/cpukit/score/src/schedulerpriorityaffinitysmp.c b/cpukit/score/src/schedulerpriorityaffinitysmp.c
index 72b4ffb600..4808c84c3f 100644
--- a/cpukit/score/src/schedulerpriorityaffinitysmp.c
+++ b/cpukit/score/src/schedulerpriorityaffinitysmp.c
@@ -39,22 +39,13 @@
* + _Scheduler_priority_SMP_Do_update
*/
-static bool _Scheduler_priority_affinity_SMP_Insert_priority_lifo_order(
+static bool _Scheduler_priority_affinity_SMP_Priority_less_equal(
const void *to_insert,
const Chain_Node *next
)
{
return next != NULL
- && _Scheduler_SMP_Insert_priority_lifo_order( to_insert, next );
-}
-
-static bool _Scheduler_priority_affinity_SMP_Insert_priority_fifo_order(
- const void *to_insert,
- const Chain_Node *next
-)
-{
- return next != NULL
- && _Scheduler_SMP_Insert_priority_fifo_order( to_insert, next );
+ && _Scheduler_SMP_Priority_less_equal( to_insert, next );
}
static Scheduler_priority_affinity_SMP_Node *
@@ -242,19 +233,21 @@ static Scheduler_Node * _Scheduler_priority_affinity_SMP_Get_lowest_scheduled(
/*
* This method is unique to this scheduler because it must pass
* _Scheduler_priority_affinity_SMP_Get_lowest_scheduled into
- * _Scheduler_SMP_Enqueue_ordered.
+ * _Scheduler_SMP_Enqueue.
*/
static bool _Scheduler_priority_affinity_SMP_Enqueue_fifo(
Scheduler_Context *context,
- Scheduler_Node *node
+ Scheduler_Node *node,
+ Priority_Control insert_priority
)
{
- return _Scheduler_SMP_Enqueue_ordered(
+ return _Scheduler_SMP_Enqueue(
context,
node,
- _Scheduler_priority_affinity_SMP_Insert_priority_fifo_order,
- _Scheduler_priority_SMP_Insert_ready_fifo,
- _Scheduler_SMP_Insert_scheduled_fifo,
+ insert_priority,
+ _Scheduler_priority_affinity_SMP_Priority_less_equal,
+ _Scheduler_priority_SMP_Insert_ready,
+ _Scheduler_SMP_Insert_scheduled,
_Scheduler_priority_SMP_Move_from_scheduled_to_ready,
_Scheduler_priority_affinity_SMP_Get_lowest_scheduled,
_Scheduler_SMP_Allocate_processor_exact
@@ -280,6 +273,7 @@ static void _Scheduler_priority_affinity_SMP_Check_for_migrations(
while (1) {
Priority_Control lowest_scheduled_priority;
+ Priority_Control insert_priority;
if ( _Priority_bit_map_Is_empty( &self->Bit_map ) ) {
/* Nothing to do */
@@ -312,7 +306,7 @@ static void _Scheduler_priority_affinity_SMP_Check_for_migrations(
_Scheduler_SMP_Node_priority( lowest_scheduled );
if (
- _Scheduler_SMP_Insert_priority_lifo_order(
+ _Scheduler_SMP_Priority_less_equal(
&lowest_scheduled_priority,
&highest_ready->Node.Chain
)
@@ -326,11 +320,14 @@ static void _Scheduler_priority_affinity_SMP_Check_for_migrations(
*/
_Scheduler_priority_SMP_Extract_from_ready( context, highest_ready );
+ insert_priority = _Scheduler_SMP_Node_priority( highest_ready );
+ insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
_Scheduler_SMP_Enqueue_to_scheduled(
context,
highest_ready,
+ insert_priority,
lowest_scheduled,
- _Scheduler_SMP_Insert_scheduled_fifo,
+ _Scheduler_SMP_Insert_scheduled,
_Scheduler_priority_SMP_Move_from_scheduled_to_ready,
_Scheduler_SMP_Allocate_processor_exact
);
@@ -364,22 +361,21 @@ void _Scheduler_priority_affinity_SMP_Unblock(
/*
* This is unique to this scheduler because it passes scheduler specific
- * get_lowest_scheduled helper to _Scheduler_SMP_Enqueue_ordered.
+ * get_lowest_scheduled helper to _Scheduler_SMP_Enqueue.
*/
-static bool _Scheduler_priority_affinity_SMP_Enqueue_ordered(
- Scheduler_Context *context,
- Scheduler_Node *node,
- Chain_Node_order order,
- Scheduler_SMP_Insert insert_ready,
- Scheduler_SMP_Insert insert_scheduled
+static bool _Scheduler_priority_affinity_SMP_Enqueue(
+ Scheduler_Context *context,
+ Scheduler_Node *node,
+ Priority_Control insert_priority
)
{
- return _Scheduler_SMP_Enqueue_ordered(
+ return _Scheduler_SMP_Enqueue(
context,
node,
- order,
- insert_ready,
- insert_scheduled,
+ insert_priority,
+ _Scheduler_priority_affinity_SMP_Priority_less_equal,
+ _Scheduler_priority_SMP_Insert_ready,
+ _Scheduler_SMP_Insert_scheduled,
_Scheduler_priority_SMP_Move_from_scheduled_to_ready,
_Scheduler_priority_affinity_SMP_Get_lowest_scheduled,
_Scheduler_SMP_Allocate_processor_exact
@@ -387,88 +383,30 @@ static bool _Scheduler_priority_affinity_SMP_Enqueue_ordered(
}
/*
- * This is unique to this scheduler because it is on the path
- * to _Scheduler_priority_affinity_SMP_Enqueue_ordered() which
- * invokes a scheduler unique get_lowest_scheduled helper.
- */
-static bool _Scheduler_priority_affinity_SMP_Enqueue_lifo(
- Scheduler_Context *context,
- Scheduler_Node *node
-)
-{
- return _Scheduler_priority_affinity_SMP_Enqueue_ordered(
- context,
- node,
- _Scheduler_priority_affinity_SMP_Insert_priority_lifo_order,
- _Scheduler_priority_SMP_Insert_ready_lifo,
- _Scheduler_SMP_Insert_scheduled_lifo
- );
-}
-
-/*
* This method is unique to this scheduler because it must
- * invoke _Scheduler_SMP_Enqueue_scheduled_ordered() with
+ * invoke _Scheduler_SMP_Enqueue_scheduled() with
* this scheduler's get_highest_ready() helper.
*/
-static bool _Scheduler_priority_affinity_SMP_Enqueue_scheduled_ordered(
- Scheduler_Context *context,
- Scheduler_Node *node,
- Chain_Node_order order,
- Scheduler_SMP_Insert insert_ready,
- Scheduler_SMP_Insert insert_scheduled
+static bool _Scheduler_priority_affinity_SMP_Enqueue_scheduled(
+ Scheduler_Context *context,
+ Scheduler_Node *node,
+ Priority_Control insert_priority
)
{
- return _Scheduler_SMP_Enqueue_scheduled_ordered(
+ return _Scheduler_SMP_Enqueue_scheduled(
context,
node,
- order,
+ insert_priority,
+ _Scheduler_SMP_Priority_less_equal,
_Scheduler_priority_SMP_Extract_from_ready,
_Scheduler_priority_affinity_SMP_Get_highest_ready,
- insert_ready,
- insert_scheduled,
+ _Scheduler_priority_SMP_Insert_ready,
+ _Scheduler_SMP_Insert_scheduled,
_Scheduler_priority_SMP_Move_from_ready_to_scheduled,
_Scheduler_SMP_Allocate_processor_exact
);
}
-/*
- * This is unique to this scheduler because it is on the path
- * to _Scheduler_priority_affinity_SMP_Enqueue_scheduled__ordered() which
- * invokes a scheduler unique get_lowest_scheduled helper.
- */
-static bool _Scheduler_priority_affinity_SMP_Enqueue_scheduled_lifo(
- Scheduler_Context *context,
- Scheduler_Node *node
-)
-{
- return _Scheduler_priority_affinity_SMP_Enqueue_scheduled_ordered(
- context,
- node,
- _Scheduler_SMP_Insert_priority_lifo_order,
- _Scheduler_priority_SMP_Insert_ready_lifo,
- _Scheduler_SMP_Insert_scheduled_lifo
- );
-}
-
-/*
- * This is unique to this scheduler because it is on the path
- * to _Scheduler_priority_affinity_SMP_Enqueue_scheduled__ordered() which
- * invokes a scheduler unique get_lowest_scheduled helper.
- */
-static bool _Scheduler_priority_affinity_SMP_Enqueue_scheduled_fifo(
- Scheduler_Context *context,
- Scheduler_Node *node
-)
-{
- return _Scheduler_priority_affinity_SMP_Enqueue_scheduled_ordered(
- context,
- node,
- _Scheduler_SMP_Insert_priority_fifo_order,
- _Scheduler_priority_SMP_Insert_ready_fifo,
- _Scheduler_SMP_Insert_scheduled_fifo
- );
-}
-
static bool _Scheduler_priority_affinity_SMP_Do_ask_for_help(
Scheduler_Context *context,
Thread_Control *the_thread,
@@ -479,9 +417,9 @@ static bool _Scheduler_priority_affinity_SMP_Do_ask_for_help(
context,
the_thread,
node,
- _Scheduler_SMP_Insert_priority_lifo_order,
- _Scheduler_priority_SMP_Insert_ready_lifo,
- _Scheduler_SMP_Insert_scheduled_lifo,
+ _Scheduler_SMP_Priority_less_equal,
+ _Scheduler_priority_SMP_Insert_ready,
+ _Scheduler_SMP_Insert_scheduled,
_Scheduler_priority_SMP_Move_from_scheduled_to_ready,
_Scheduler_SMP_Get_lowest_scheduled,
_Scheduler_SMP_Allocate_processor_lazy
@@ -502,10 +440,8 @@ void _Scheduler_priority_affinity_SMP_Update_priority(
node,
_Scheduler_priority_SMP_Extract_from_ready,
_Scheduler_priority_SMP_Do_update,
- _Scheduler_priority_affinity_SMP_Enqueue_fifo,
- _Scheduler_priority_affinity_SMP_Enqueue_lifo,
- _Scheduler_priority_affinity_SMP_Enqueue_scheduled_fifo,
- _Scheduler_priority_affinity_SMP_Enqueue_scheduled_lifo,
+ _Scheduler_priority_affinity_SMP_Enqueue,
+ _Scheduler_priority_affinity_SMP_Enqueue_scheduled,
_Scheduler_priority_affinity_SMP_Do_ask_for_help
);
@@ -574,7 +510,7 @@ void _Scheduler_priority_affinity_SMP_Add_processor(
context,
idle,
_Scheduler_priority_SMP_Has_ready,
- _Scheduler_priority_affinity_SMP_Enqueue_scheduled_fifo,
+ _Scheduler_priority_affinity_SMP_Enqueue_scheduled,
_Scheduler_SMP_Do_nothing_register_idle
);
}
@@ -590,7 +526,7 @@ Thread_Control *_Scheduler_priority_affinity_SMP_Remove_processor(
context,
cpu,
_Scheduler_priority_SMP_Extract_from_ready,
- _Scheduler_priority_affinity_SMP_Enqueue_fifo
+ _Scheduler_priority_affinity_SMP_Enqueue
);
}
diff --git a/cpukit/score/src/schedulerprioritychangepriority.c b/cpukit/score/src/schedulerprioritychangepriority.c
index eb640fe683..6af475a8d6 100644
--- a/cpukit/score/src/schedulerprioritychangepriority.c
+++ b/cpukit/score/src/schedulerprioritychangepriority.c
@@ -29,8 +29,8 @@ void _Scheduler_priority_Update_priority(
{
Scheduler_priority_Context *context;
Scheduler_priority_Node *the_node;
- unsigned int priority;
- bool prepend_it;
+ unsigned int new_priority;
+ unsigned int unmapped_priority;
if ( !_Thread_Is_ready( the_thread ) ) {
/* Nothing to do */
@@ -38,10 +38,11 @@ void _Scheduler_priority_Update_priority(
}
the_node = _Scheduler_priority_Node_downcast( node );
- priority = (unsigned int )
- _Scheduler_Node_get_priority( &the_node->Base, &prepend_it );
+ new_priority = (unsigned int)
+ _Scheduler_Node_get_priority( &the_node->Base );
+ unmapped_priority = SCHEDULER_PRIORITY_UNMAP( new_priority );
- if ( priority == the_node->Ready_queue.current_priority ) {
+ if ( unmapped_priority == the_node->Ready_queue.current_priority ) {
/* Nothing to do */
return;
}
@@ -56,19 +57,19 @@ void _Scheduler_priority_Update_priority(
_Scheduler_priority_Ready_queue_update(
&the_node->Ready_queue,
- priority,
+ unmapped_priority,
&context->Bit_map,
&context->Ready[ 0 ]
);
- if ( prepend_it ) {
- _Scheduler_priority_Ready_queue_enqueue_first(
+ if ( SCHEDULER_PRIORITY_IS_APPEND( new_priority ) ) {
+ _Scheduler_priority_Ready_queue_enqueue(
&the_thread->Object.Node,
&the_node->Ready_queue,
&context->Bit_map
);
} else {
- _Scheduler_priority_Ready_queue_enqueue(
+ _Scheduler_priority_Ready_queue_enqueue_first(
&the_thread->Object.Node,
&the_node->Ready_queue,
&context->Bit_map
diff --git a/cpukit/score/src/schedulerprioritysmp.c b/cpukit/score/src/schedulerprioritysmp.c
index 071a4218f3..205d3257ca 100644
--- a/cpukit/score/src/schedulerprioritysmp.c
+++ b/cpukit/score/src/schedulerprioritysmp.c
@@ -68,7 +68,7 @@ void _Scheduler_priority_SMP_Node_initialize(
self = _Scheduler_priority_SMP_Get_self( context );
_Scheduler_priority_Ready_queue_update(
&the_node->Ready_queue,
- priority,
+ SCHEDULER_PRIORITY_UNMAP( priority ),
&self->Bit_map,
&self->Ready[ 0 ]
);
@@ -109,103 +109,45 @@ void _Scheduler_priority_SMP_Block(
);
}
-static bool _Scheduler_priority_SMP_Enqueue_ordered(
- Scheduler_Context *context,
- Scheduler_Node *node,
- Chain_Node_order order,
- Scheduler_SMP_Insert insert_ready,
- Scheduler_SMP_Insert insert_scheduled
+static bool _Scheduler_priority_SMP_Enqueue(
+ Scheduler_Context *context,
+ Scheduler_Node *node,
+ Priority_Control insert_priority
)
{
- return _Scheduler_SMP_Enqueue_ordered(
+ return _Scheduler_SMP_Enqueue(
context,
node,
- order,
- insert_ready,
- insert_scheduled,
+ insert_priority,
+ _Scheduler_SMP_Priority_less_equal,
+ _Scheduler_priority_SMP_Insert_ready,
+ _Scheduler_SMP_Insert_scheduled,
_Scheduler_priority_SMP_Move_from_scheduled_to_ready,
_Scheduler_SMP_Get_lowest_scheduled,
_Scheduler_SMP_Allocate_processor_lazy
);
}
-static bool _Scheduler_priority_SMP_Enqueue_lifo(
- Scheduler_Context *context,
- Scheduler_Node *node
-)
-{
- return _Scheduler_priority_SMP_Enqueue_ordered(
- context,
- node,
- _Scheduler_SMP_Insert_priority_lifo_order,
- _Scheduler_priority_SMP_Insert_ready_lifo,
- _Scheduler_SMP_Insert_scheduled_lifo
- );
-}
-
-static bool _Scheduler_priority_SMP_Enqueue_fifo(
+static bool _Scheduler_priority_SMP_Enqueue_scheduled(
Scheduler_Context *context,
- Scheduler_Node *node
+ Scheduler_Node *node,
+ Priority_Control insert_priority
)
{
- return _Scheduler_priority_SMP_Enqueue_ordered(
+ return _Scheduler_SMP_Enqueue_scheduled(
context,
node,
- _Scheduler_SMP_Insert_priority_fifo_order,
- _Scheduler_priority_SMP_Insert_ready_fifo,
- _Scheduler_SMP_Insert_scheduled_fifo
- );
-}
-
-static bool _Scheduler_priority_SMP_Enqueue_scheduled_ordered(
- Scheduler_Context *context,
- Scheduler_Node *node,
- Chain_Node_order order,
- Scheduler_SMP_Insert insert_ready,
- Scheduler_SMP_Insert insert_scheduled
-)
-{
- return _Scheduler_SMP_Enqueue_scheduled_ordered(
- context,
- node,
- order,
+ insert_priority,
+ _Scheduler_SMP_Priority_less_equal,
_Scheduler_priority_SMP_Extract_from_ready,
_Scheduler_priority_SMP_Get_highest_ready,
- insert_ready,
- insert_scheduled,
+ _Scheduler_priority_SMP_Insert_ready,
+ _Scheduler_SMP_Insert_scheduled,
_Scheduler_priority_SMP_Move_from_ready_to_scheduled,
_Scheduler_SMP_Allocate_processor_lazy
);
}
-static bool _Scheduler_priority_SMP_Enqueue_scheduled_lifo(
- Scheduler_Context *context,
- Scheduler_Node *node
-)
-{
- return _Scheduler_priority_SMP_Enqueue_scheduled_ordered(
- context,
- node,
- _Scheduler_SMP_Insert_priority_lifo_order,
- _Scheduler_priority_SMP_Insert_ready_lifo,
- _Scheduler_SMP_Insert_scheduled_lifo
- );
-}
-
-static bool _Scheduler_priority_SMP_Enqueue_scheduled_fifo(
- Scheduler_Context *context,
- Scheduler_Node *node
-)
-{
- return _Scheduler_priority_SMP_Enqueue_scheduled_ordered(
- context,
- node,
- _Scheduler_SMP_Insert_priority_fifo_order,
- _Scheduler_priority_SMP_Insert_ready_fifo,
- _Scheduler_SMP_Insert_scheduled_fifo
- );
-}
-
void _Scheduler_priority_SMP_Unblock(
const Scheduler_Control *scheduler,
Thread_Control *thread,
@@ -219,7 +161,7 @@ void _Scheduler_priority_SMP_Unblock(
thread,
node,
_Scheduler_priority_SMP_Do_update,
- _Scheduler_priority_SMP_Enqueue_fifo
+ _Scheduler_priority_SMP_Enqueue
);
}
@@ -233,9 +175,9 @@ static bool _Scheduler_priority_SMP_Do_ask_for_help(
context,
the_thread,
node,
- _Scheduler_SMP_Insert_priority_lifo_order,
- _Scheduler_priority_SMP_Insert_ready_lifo,
- _Scheduler_SMP_Insert_scheduled_lifo,
+ _Scheduler_SMP_Priority_less_equal,
+ _Scheduler_priority_SMP_Insert_ready,
+ _Scheduler_SMP_Insert_scheduled,
_Scheduler_priority_SMP_Move_from_scheduled_to_ready,
_Scheduler_SMP_Get_lowest_scheduled,
_Scheduler_SMP_Allocate_processor_lazy
@@ -256,10 +198,8 @@ void _Scheduler_priority_SMP_Update_priority(
node,
_Scheduler_priority_SMP_Extract_from_ready,
_Scheduler_priority_SMP_Do_update,
- _Scheduler_priority_SMP_Enqueue_fifo,
- _Scheduler_priority_SMP_Enqueue_lifo,
- _Scheduler_priority_SMP_Enqueue_scheduled_fifo,
- _Scheduler_priority_SMP_Enqueue_scheduled_lifo,
+ _Scheduler_priority_SMP_Enqueue,
+ _Scheduler_priority_SMP_Enqueue_scheduled,
_Scheduler_priority_SMP_Do_ask_for_help
);
}
@@ -323,7 +263,7 @@ void _Scheduler_priority_SMP_Add_processor(
context,
idle,
_Scheduler_priority_SMP_Has_ready,
- _Scheduler_priority_SMP_Enqueue_scheduled_fifo,
+ _Scheduler_priority_SMP_Enqueue_scheduled,
_Scheduler_SMP_Do_nothing_register_idle
);
}
@@ -339,7 +279,7 @@ Thread_Control *_Scheduler_priority_SMP_Remove_processor(
context,
cpu,
_Scheduler_priority_SMP_Extract_from_ready,
- _Scheduler_priority_SMP_Enqueue_fifo
+ _Scheduler_priority_SMP_Enqueue
);
}
@@ -356,7 +296,7 @@ void _Scheduler_priority_SMP_Yield(
thread,
node,
_Scheduler_priority_SMP_Extract_from_ready,
- _Scheduler_priority_SMP_Enqueue_fifo,
- _Scheduler_priority_SMP_Enqueue_scheduled_fifo
+ _Scheduler_priority_SMP_Enqueue,
+ _Scheduler_priority_SMP_Enqueue_scheduled
);
}
diff --git a/cpukit/score/src/schedulerpriorityunblock.c b/cpukit/score/src/schedulerpriorityunblock.c
index 42ba4de98f..784bc58611 100644
--- a/cpukit/score/src/schedulerpriorityunblock.c
+++ b/cpukit/score/src/schedulerpriorityunblock.c
@@ -31,18 +31,17 @@ void _Scheduler_priority_Unblock (
Scheduler_priority_Context *context;
Scheduler_priority_Node *the_node;
unsigned int priority;
- bool prepend_it;
+ unsigned int unmapped_priority;
context = _Scheduler_priority_Get_context( scheduler );
the_node = _Scheduler_priority_Node_downcast( node );
- priority = (unsigned int )
- _Scheduler_Node_get_priority( &the_node->Base, &prepend_it );
- (void) prepend_it;
+ priority = (unsigned int ) _Scheduler_Node_get_priority( &the_node->Base );
+ unmapped_priority = SCHEDULER_PRIORITY_UNMAP( priority );
- if ( priority != the_node->Ready_queue.current_priority ) {
+ if ( unmapped_priority != the_node->Ready_queue.current_priority ) {
_Scheduler_priority_Ready_queue_update(
&the_node->Ready_queue,
- priority,
+ unmapped_priority,
&context->Bit_map,
&context->Ready[ 0 ]
);
diff --git a/cpukit/score/src/schedulersimplechangepriority.c b/cpukit/score/src/schedulersimplechangepriority.c
index 8253a01421..c2c60a5f01 100644
--- a/cpukit/score/src/schedulersimplechangepriority.c
+++ b/cpukit/score/src/schedulersimplechangepriority.c
@@ -28,7 +28,7 @@ void _Scheduler_simple_Update_priority(
)
{
Scheduler_simple_Context *context;
- bool prepend_it;
+ unsigned int new_priority;
if ( !_Thread_Is_ready( the_thread ) ) {
/* Nothing to do */
@@ -36,15 +36,9 @@ void _Scheduler_simple_Update_priority(
}
context = _Scheduler_simple_Get_context( scheduler );
- _Scheduler_Node_get_priority( node, &prepend_it );
+ new_priority = (unsigned int ) _Scheduler_Node_get_priority( node );
_Scheduler_simple_Extract( scheduler, the_thread, node );
-
- if ( prepend_it ) {
- _Scheduler_simple_Insert_priority_lifo( &context->Ready, the_thread );
- } else {
- _Scheduler_simple_Insert_priority_fifo( &context->Ready, the_thread );
- }
-
+ _Scheduler_simple_Insert( &context->Ready, the_thread, new_priority );
_Scheduler_simple_Schedule_body( scheduler, the_thread, false );
}
diff --git a/cpukit/score/src/schedulersimplesmp.c b/cpukit/score/src/schedulersimplesmp.c
index df08a19eab..4ab4987c3a 100644
--- a/cpukit/score/src/schedulersimplesmp.c
+++ b/cpukit/score/src/schedulersimplesmp.c
@@ -99,17 +99,17 @@ static void _Scheduler_simple_SMP_Move_from_scheduled_to_ready(
)
{
Scheduler_simple_SMP_Context *self;
- Priority_Control priority_to_insert;
+ Priority_Control insert_priority;
self = _Scheduler_simple_SMP_Get_self( context );
- priority_to_insert = _Scheduler_SMP_Node_priority( scheduled_to_ready );
_Chain_Extract_unprotected( &scheduled_to_ready->Node.Chain );
+ insert_priority = _Scheduler_SMP_Node_priority( scheduled_to_ready );
_Chain_Insert_ordered_unprotected(
&self->Ready,
&scheduled_to_ready->Node.Chain,
- &priority_to_insert,
- _Scheduler_SMP_Insert_priority_lifo_order
+ &insert_priority,
+ _Scheduler_SMP_Priority_less_equal
);
}
@@ -119,55 +119,36 @@ static void _Scheduler_simple_SMP_Move_from_ready_to_scheduled(
)
{
Scheduler_simple_SMP_Context *self;
- Priority_Control priority_to_insert;
+ Priority_Control insert_priority;
self = _Scheduler_simple_SMP_Get_self( context );
- priority_to_insert = _Scheduler_SMP_Node_priority( ready_to_scheduled );
_Chain_Extract_unprotected( &ready_to_scheduled->Node.Chain );
+ insert_priority = _Scheduler_SMP_Node_priority( ready_to_scheduled );
+ insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
_Chain_Insert_ordered_unprotected(
&self->Base.Scheduled,
&ready_to_scheduled->Node.Chain,
- &priority_to_insert,
- _Scheduler_SMP_Insert_priority_fifo_order
+ &insert_priority,
+ _Scheduler_SMP_Priority_less_equal
);
}
-static void _Scheduler_simple_SMP_Insert_ready_lifo(
+static void _Scheduler_simple_SMP_Insert_ready(
Scheduler_Context *context,
- Scheduler_Node *node_to_insert
+ Scheduler_Node *node_to_insert,
+ Priority_Control insert_priority
)
{
Scheduler_simple_SMP_Context *self;
- Priority_Control priority_to_insert;
self = _Scheduler_simple_SMP_Get_self( context );
- priority_to_insert = _Scheduler_SMP_Node_priority( node_to_insert );
_Chain_Insert_ordered_unprotected(
&self->Ready,
&node_to_insert->Node.Chain,
- &priority_to_insert,
- _Scheduler_SMP_Insert_priority_lifo_order
- );
-}
-
-static void _Scheduler_simple_SMP_Insert_ready_fifo(
- Scheduler_Context *context,
- Scheduler_Node *node_to_insert
-)
-{
- Scheduler_simple_SMP_Context *self;
- Priority_Control priority_to_insert;
-
- self = _Scheduler_simple_SMP_Get_self( context );
- priority_to_insert = _Scheduler_SMP_Node_priority( node_to_insert );
-
- _Chain_Insert_ordered_unprotected(
- &self->Ready,
- &node_to_insert->Node.Chain,
- &priority_to_insert,
- _Scheduler_SMP_Insert_priority_fifo_order
+ &insert_priority,
+ _Scheduler_SMP_Priority_less_equal
);
}
@@ -200,103 +181,45 @@ void _Scheduler_simple_SMP_Block(
);
}
-static bool _Scheduler_simple_SMP_Enqueue_ordered(
- Scheduler_Context *context,
- Scheduler_Node *node,
- Chain_Node_order order,
- Scheduler_SMP_Insert insert_ready,
- Scheduler_SMP_Insert insert_scheduled
+static bool _Scheduler_simple_SMP_Enqueue(
+ Scheduler_Context *context,
+ Scheduler_Node *node,
+ Priority_Control insert_priority
)
{
- return _Scheduler_SMP_Enqueue_ordered(
+ return _Scheduler_SMP_Enqueue(
context,
node,
- order,
- insert_ready,
- insert_scheduled,
+ insert_priority,
+ _Scheduler_SMP_Priority_less_equal,
+ _Scheduler_simple_SMP_Insert_ready,
+ _Scheduler_SMP_Insert_scheduled,
_Scheduler_simple_SMP_Move_from_scheduled_to_ready,
_Scheduler_SMP_Get_lowest_scheduled,
_Scheduler_SMP_Allocate_processor_lazy
);
}
-static bool _Scheduler_simple_SMP_Enqueue_lifo(
- Scheduler_Context *context,
- Scheduler_Node *node
-)
-{
- return _Scheduler_simple_SMP_Enqueue_ordered(
- context,
- node,
- _Scheduler_SMP_Insert_priority_lifo_order,
- _Scheduler_simple_SMP_Insert_ready_lifo,
- _Scheduler_SMP_Insert_scheduled_lifo
- );
-}
-
-static bool _Scheduler_simple_SMP_Enqueue_fifo(
- Scheduler_Context *context,
- Scheduler_Node *node
-)
-{
- return _Scheduler_simple_SMP_Enqueue_ordered(
- context,
- node,
- _Scheduler_SMP_Insert_priority_fifo_order,
- _Scheduler_simple_SMP_Insert_ready_fifo,
- _Scheduler_SMP_Insert_scheduled_fifo
- );
-}
-
-static bool _Scheduler_simple_SMP_Enqueue_scheduled_ordered(
+static bool _Scheduler_simple_SMP_Enqueue_scheduled(
Scheduler_Context *context,
- Scheduler_Node *node,
- Chain_Node_order order,
- Scheduler_SMP_Insert insert_ready,
- Scheduler_SMP_Insert insert_scheduled
+ Scheduler_Node *node,
+ Priority_Control insert_priority
)
{
- return _Scheduler_SMP_Enqueue_scheduled_ordered(
+ return _Scheduler_SMP_Enqueue_scheduled(
context,
node,
- order,
+ insert_priority,
+ _Scheduler_SMP_Priority_less_equal,
_Scheduler_simple_SMP_Extract_from_ready,
_Scheduler_simple_SMP_Get_highest_ready,
- insert_ready,
- insert_scheduled,
+ _Scheduler_simple_SMP_Insert_ready,
+ _Scheduler_SMP_Insert_scheduled,
_Scheduler_simple_SMP_Move_from_ready_to_scheduled,
_Scheduler_SMP_Allocate_processor_lazy
);
}
-static bool _Scheduler_simple_SMP_Enqueue_scheduled_lifo(
- Scheduler_Context *context,
- Scheduler_Node *node
-)
-{
- return _Scheduler_simple_SMP_Enqueue_scheduled_ordered(
- context,
- node,
- _Scheduler_SMP_Insert_priority_lifo_order,
- _Scheduler_simple_SMP_Insert_ready_lifo,
- _Scheduler_SMP_Insert_scheduled_lifo
- );
-}
-
-static bool _Scheduler_simple_SMP_Enqueue_scheduled_fifo(
- Scheduler_Context *context,
- Scheduler_Node *node
-)
-{
- return _Scheduler_simple_SMP_Enqueue_scheduled_ordered(
- context,
- node,
- _Scheduler_SMP_Insert_priority_fifo_order,
- _Scheduler_simple_SMP_Insert_ready_fifo,
- _Scheduler_SMP_Insert_scheduled_fifo
- );
-}
-
void _Scheduler_simple_SMP_Unblock(
const Scheduler_Control *scheduler,
Thread_Control *thread,
@@ -310,7 +233,7 @@ void _Scheduler_simple_SMP_Unblock(
thread,
node,
_Scheduler_simple_SMP_Do_update,
- _Scheduler_simple_SMP_Enqueue_fifo
+ _Scheduler_simple_SMP_Enqueue
);
}
@@ -324,9 +247,9 @@ static bool _Scheduler_simple_SMP_Do_ask_for_help(
context,
the_thread,
node,
- _Scheduler_SMP_Insert_priority_lifo_order,
- _Scheduler_simple_SMP_Insert_ready_lifo,
- _Scheduler_SMP_Insert_scheduled_lifo,
+ _Scheduler_SMP_Priority_less_equal,
+ _Scheduler_simple_SMP_Insert_ready,
+ _Scheduler_SMP_Insert_scheduled,
_Scheduler_simple_SMP_Move_from_scheduled_to_ready,
_Scheduler_SMP_Get_lowest_scheduled,
_Scheduler_SMP_Allocate_processor_lazy
@@ -347,10 +270,8 @@ void _Scheduler_simple_SMP_Update_priority(
node,
_Scheduler_simple_SMP_Extract_from_ready,
_Scheduler_simple_SMP_Do_update,
- _Scheduler_simple_SMP_Enqueue_fifo,
- _Scheduler_simple_SMP_Enqueue_lifo,
- _Scheduler_simple_SMP_Enqueue_scheduled_fifo,
- _Scheduler_simple_SMP_Enqueue_scheduled_lifo,
+ _Scheduler_simple_SMP_Enqueue,
+ _Scheduler_simple_SMP_Enqueue_scheduled,
_Scheduler_simple_SMP_Do_ask_for_help
);
}
@@ -414,7 +335,7 @@ void _Scheduler_simple_SMP_Add_processor(
context,
idle,
_Scheduler_simple_SMP_Has_ready,
- _Scheduler_simple_SMP_Enqueue_scheduled_fifo,
+ _Scheduler_simple_SMP_Enqueue_scheduled,
_Scheduler_SMP_Do_nothing_register_idle
);
}
@@ -430,7 +351,7 @@ Thread_Control *_Scheduler_simple_SMP_Remove_processor(
context,
cpu,
_Scheduler_simple_SMP_Extract_from_ready,
- _Scheduler_simple_SMP_Enqueue_fifo
+ _Scheduler_simple_SMP_Enqueue
);
}
@@ -447,7 +368,7 @@ void _Scheduler_simple_SMP_Yield(
thread,
node,
_Scheduler_simple_SMP_Extract_from_ready,
- _Scheduler_simple_SMP_Enqueue_fifo,
- _Scheduler_simple_SMP_Enqueue_scheduled_fifo
+ _Scheduler_simple_SMP_Enqueue,
+ _Scheduler_simple_SMP_Enqueue_scheduled
);
}
diff --git a/cpukit/score/src/schedulersimpleunblock.c b/cpukit/score/src/schedulersimpleunblock.c
index 5540e20e87..2f5c8636f5 100644
--- a/cpukit/score/src/schedulersimpleunblock.c
+++ b/cpukit/score/src/schedulersimpleunblock.c
@@ -28,13 +28,15 @@ void _Scheduler_simple_Unblock(
)
{
Scheduler_simple_Context *context;
- Priority_Control priority;
+ unsigned int priority;
+ unsigned int insert_priority;
(void) node;
context = _Scheduler_simple_Get_context( scheduler );
- _Scheduler_simple_Insert_priority_fifo( &context->Ready, the_thread );
priority = _Thread_Get_priority( the_thread );
+ insert_priority = SCHEDULER_PRIORITY_APPEND( priority );
+ _Scheduler_simple_Insert( &context->Ready, the_thread, insert_priority );
/*
* If the thread that was unblocked is more important than the heir,
diff --git a/cpukit/score/src/schedulersimpleyield.c b/cpukit/score/src/schedulersimpleyield.c
index 0c150d8b1f..95f9cd3540 100644
--- a/cpukit/score/src/schedulersimpleyield.c
+++ b/cpukit/score/src/schedulersimpleyield.c
@@ -26,12 +26,16 @@ void _Scheduler_simple_Yield(
Scheduler_Node *node
)
{
- Scheduler_simple_Context *context =
- _Scheduler_simple_Get_context( scheduler );
+ Scheduler_simple_Context *context;
+ unsigned int insert_priority;
+
+ context = _Scheduler_simple_Get_context( scheduler );
(void) node;
_Chain_Extract_unprotected( &the_thread->Object.Node );
- _Scheduler_simple_Insert_priority_fifo( &context->Ready, the_thread );
+ insert_priority = (unsigned int) _Thread_Get_priority( the_thread );
+ insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
+ _Scheduler_simple_Insert( &context->Ready, the_thread, insert_priority );
_Scheduler_simple_Schedule_body( scheduler, the_thread, false );
}
diff --git a/cpukit/score/src/schedulerstrongapa.c b/cpukit/score/src/schedulerstrongapa.c
index 57ffb61367..19d4ebe348 100644
--- a/cpukit/score/src/schedulerstrongapa.c
+++ b/cpukit/score/src/schedulerstrongapa.c
@@ -66,7 +66,7 @@ static void _Scheduler_strong_APA_Move_from_ready_to_scheduled(
{
Scheduler_strong_APA_Context *self;
Scheduler_strong_APA_Node *node;
- Priority_Control priority;
+ Priority_Control insert_priority;
self = _Scheduler_strong_APA_Get_self( context );
node = _Scheduler_strong_APA_Node_downcast( ready_to_scheduled );
@@ -76,47 +76,41 @@ static void _Scheduler_strong_APA_Move_from_ready_to_scheduled(
&node->Ready_queue,
&self->Bit_map
);
- priority = node->Base.priority;
+ insert_priority = _Scheduler_SMP_Node_priority( &node->Base.Base );
+ insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
_Chain_Insert_ordered_unprotected(
&self->Base.Scheduled,
&node->Base.Base.Node.Chain,
- &priority,
- _Scheduler_SMP_Insert_priority_fifo_order
+ &insert_priority,
+ _Scheduler_SMP_Priority_less_equal
);
}
-static void _Scheduler_strong_APA_Insert_ready_lifo(
+static void _Scheduler_strong_APA_Insert_ready(
Scheduler_Context *context,
- Scheduler_Node *the_thread
-)
-{
- Scheduler_strong_APA_Context *self =
- _Scheduler_strong_APA_Get_self( context );
- Scheduler_strong_APA_Node *node =
- _Scheduler_strong_APA_Node_downcast( the_thread );
-
- _Scheduler_priority_Ready_queue_enqueue(
- &node->Base.Base.Node.Chain,
- &node->Ready_queue,
- &self->Bit_map
- );
-}
-
-static void _Scheduler_strong_APA_Insert_ready_fifo(
- Scheduler_Context *context,
- Scheduler_Node *the_thread
+ Scheduler_Node *node_base,
+ Priority_Control insert_priority
)
{
- Scheduler_strong_APA_Context *self =
- _Scheduler_strong_APA_Get_self( context );
- Scheduler_strong_APA_Node *node =
- _Scheduler_strong_APA_Node_downcast( the_thread );
+ Scheduler_strong_APA_Context *self;
+ Scheduler_strong_APA_Node *node;
- _Scheduler_priority_Ready_queue_enqueue_first(
- &node->Base.Base.Node.Chain,
- &node->Ready_queue,
- &self->Bit_map
- );
+ self = _Scheduler_strong_APA_Get_self( context );
+ node = _Scheduler_strong_APA_Node_downcast( node_base );
+
+ if ( SCHEDULER_PRIORITY_IS_APPEND( insert_priority ) ) {
+ _Scheduler_priority_Ready_queue_enqueue(
+ &node->Base.Base.Node.Chain,
+ &node->Ready_queue,
+ &self->Bit_map
+ );
+ } else {
+ _Scheduler_priority_Ready_queue_enqueue_first(
+ &node->Base.Base.Node.Chain,
+ &node->Ready_queue,
+ &self->Bit_map
+ );
+ }
}
static void _Scheduler_strong_APA_Extract_from_ready(
@@ -150,7 +144,7 @@ static void _Scheduler_strong_APA_Do_update(
_Scheduler_SMP_Node_update_priority( &node->Base, new_priority );
_Scheduler_priority_Ready_queue_update(
&node->Ready_queue,
- new_priority,
+ SCHEDULER_PRIORITY_UNMAP( new_priority ),
&self->Bit_map,
&self->Ready[ 0 ]
);
@@ -198,7 +192,7 @@ void _Scheduler_strong_APA_Node_initialize(
self = _Scheduler_strong_APA_Get_self( context );
_Scheduler_priority_Ready_queue_update(
&the_node->Ready_queue,
- priority,
+ SCHEDULER_PRIORITY_UNMAP( priority ),
&self->Bit_map,
&self->Ready[ 0 ]
);
@@ -247,103 +241,45 @@ void _Scheduler_strong_APA_Block(
);
}
-static bool _Scheduler_strong_APA_Enqueue_ordered(
- Scheduler_Context *context,
- Scheduler_Node *node,
- Chain_Node_order order,
- Scheduler_SMP_Insert insert_ready,
- Scheduler_SMP_Insert insert_scheduled
+static bool _Scheduler_strong_APA_Enqueue(
+ Scheduler_Context *context,
+ Scheduler_Node *node,
+ Priority_Control insert_priority
)
{
- return _Scheduler_SMP_Enqueue_ordered(
+ return _Scheduler_SMP_Enqueue(
context,
node,
- order,
- insert_ready,
- insert_scheduled,
+ insert_priority,
+ _Scheduler_SMP_Priority_less_equal,
+ _Scheduler_strong_APA_Insert_ready,
+ _Scheduler_SMP_Insert_scheduled,
_Scheduler_strong_APA_Move_from_scheduled_to_ready,
_Scheduler_SMP_Get_lowest_scheduled,
_Scheduler_SMP_Allocate_processor_exact
);
}
-static bool _Scheduler_strong_APA_Enqueue_lifo(
- Scheduler_Context *context,
- Scheduler_Node *node
-)
-{
- return _Scheduler_strong_APA_Enqueue_ordered(
- context,
- node,
- _Scheduler_SMP_Insert_priority_lifo_order,
- _Scheduler_strong_APA_Insert_ready_lifo,
- _Scheduler_SMP_Insert_scheduled_lifo
- );
-}
-
-static bool _Scheduler_strong_APA_Enqueue_fifo(
+static bool _Scheduler_strong_APA_Enqueue_scheduled(
Scheduler_Context *context,
- Scheduler_Node *node
-)
-{
- return _Scheduler_strong_APA_Enqueue_ordered(
- context,
- node,
- _Scheduler_SMP_Insert_priority_fifo_order,
- _Scheduler_strong_APA_Insert_ready_fifo,
- _Scheduler_SMP_Insert_scheduled_fifo
- );
-}
-
-static bool _Scheduler_strong_APA_Enqueue_scheduled_ordered(
- Scheduler_Context *context,
- Scheduler_Node *node,
- Chain_Node_order order,
- Scheduler_SMP_Insert insert_ready,
- Scheduler_SMP_Insert insert_scheduled
+ Scheduler_Node *node,
+ Priority_Control insert_priority
)
{
- return _Scheduler_SMP_Enqueue_scheduled_ordered(
+ return _Scheduler_SMP_Enqueue_scheduled(
context,
node,
- order,
+ insert_priority,
+ _Scheduler_SMP_Priority_less_equal,
_Scheduler_strong_APA_Extract_from_ready,
_Scheduler_strong_APA_Get_highest_ready,
- insert_ready,
- insert_scheduled,
+ _Scheduler_strong_APA_Insert_ready,
+ _Scheduler_SMP_Insert_scheduled,
_Scheduler_strong_APA_Move_from_ready_to_scheduled,
_Scheduler_SMP_Allocate_processor_exact
);
}
-static bool _Scheduler_strong_APA_Enqueue_scheduled_lifo(
- Scheduler_Context *context,
- Scheduler_Node *node
-)
-{
- return _Scheduler_strong_APA_Enqueue_scheduled_ordered(
- context,
- node,
- _Scheduler_SMP_Insert_priority_lifo_order,
- _Scheduler_strong_APA_Insert_ready_lifo,
- _Scheduler_SMP_Insert_scheduled_lifo
- );
-}
-
-static bool _Scheduler_strong_APA_Enqueue_scheduled_fifo(
- Scheduler_Context *context,
- Scheduler_Node *node
-)
-{
- return _Scheduler_strong_APA_Enqueue_scheduled_ordered(
- context,
- node,
- _Scheduler_SMP_Insert_priority_fifo_order,
- _Scheduler_strong_APA_Insert_ready_fifo,
- _Scheduler_SMP_Insert_scheduled_fifo
- );
-}
-
void _Scheduler_strong_APA_Unblock(
const Scheduler_Control *scheduler,
Thread_Control *the_thread,
@@ -357,7 +293,7 @@ void _Scheduler_strong_APA_Unblock(
the_thread,
node,
_Scheduler_strong_APA_Do_update,
- _Scheduler_strong_APA_Enqueue_fifo
+ _Scheduler_strong_APA_Enqueue
);
}
@@ -371,9 +307,9 @@ static bool _Scheduler_strong_APA_Do_ask_for_help(
context,
the_thread,
node,
- _Scheduler_SMP_Insert_priority_lifo_order,
- _Scheduler_strong_APA_Insert_ready_lifo,
- _Scheduler_SMP_Insert_scheduled_lifo,
+ _Scheduler_SMP_Priority_less_equal,
+ _Scheduler_strong_APA_Insert_ready,
+ _Scheduler_SMP_Insert_scheduled,
_Scheduler_strong_APA_Move_from_scheduled_to_ready,
_Scheduler_SMP_Get_lowest_scheduled,
_Scheduler_SMP_Allocate_processor_lazy
@@ -394,10 +330,8 @@ void _Scheduler_strong_APA_Update_priority(
node,
_Scheduler_strong_APA_Extract_from_ready,
_Scheduler_strong_APA_Do_update,
- _Scheduler_strong_APA_Enqueue_fifo,
- _Scheduler_strong_APA_Enqueue_lifo,
- _Scheduler_strong_APA_Enqueue_scheduled_fifo,
- _Scheduler_strong_APA_Enqueue_scheduled_lifo,
+ _Scheduler_strong_APA_Enqueue,
+ _Scheduler_strong_APA_Enqueue_scheduled,
_Scheduler_strong_APA_Do_ask_for_help
);
}
@@ -461,7 +395,7 @@ void _Scheduler_strong_APA_Add_processor(
context,
idle,
_Scheduler_strong_APA_Has_ready,
- _Scheduler_strong_APA_Enqueue_scheduled_fifo,
+ _Scheduler_strong_APA_Enqueue_scheduled,
_Scheduler_SMP_Do_nothing_register_idle
);
}
@@ -477,7 +411,7 @@ Thread_Control *_Scheduler_strong_APA_Remove_processor(
context,
cpu,
_Scheduler_strong_APA_Extract_from_ready,
- _Scheduler_strong_APA_Enqueue_fifo
+ _Scheduler_strong_APA_Enqueue
);
}
@@ -494,7 +428,7 @@ void _Scheduler_strong_APA_Yield(
the_thread,
node,
_Scheduler_strong_APA_Extract_from_ready,
- _Scheduler_strong_APA_Enqueue_fifo,
- _Scheduler_strong_APA_Enqueue_scheduled_fifo
+ _Scheduler_strong_APA_Enqueue,
+ _Scheduler_strong_APA_Enqueue_scheduled
);
}