summaryrefslogtreecommitdiffstats
path: root/cpukit/score/include/rtems/score/schedulersmpimpl.h
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2017-10-25 16:00:17 +0200
committerSebastian Huber <sebastian.huber@embedded-brains.de>2017-11-06 09:06:21 +0100
commit0c286e3d7c176a4fb7faf6ba9f809996d599ca10 (patch)
treebf30809e9a4f92d19254fa1acb1f833effa8dc40 /cpukit/score/include/rtems/score/schedulersmpimpl.h
parentscore: Remove superfluous include (diff)
downloadrtems-0c286e3d7c176a4fb7faf6ba9f809996d599ca10.tar.bz2
score: _Chain_Insert_ordered_unprotected()
Change the chain order relation to use a directly specified left hand side value. This is similar to _RBTree_Insert_inline() and helps the compiler to better optimize the code.
Diffstat (limited to 'cpukit/score/include/rtems/score/schedulersmpimpl.h')
-rw-r--r--cpukit/score/include/rtems/score/schedulersmpimpl.h58
1 files changed, 39 insertions, 19 deletions
diff --git a/cpukit/score/include/rtems/score/schedulersmpimpl.h b/cpukit/score/include/rtems/score/schedulersmpimpl.h
index 3194ee39a0..896b1306ab 100644
--- a/cpukit/score/include/rtems/score/schedulersmpimpl.h
+++ b/cpukit/score/include/rtems/score/schedulersmpimpl.h
@@ -352,29 +352,31 @@ static inline void _Scheduler_SMP_Do_nothing_register_idle(
}
static inline bool _Scheduler_SMP_Insert_priority_lifo_order(
- const Chain_Node *to_insert,
+ const void *to_insert,
const Chain_Node *next
)
{
- const Scheduler_SMP_Node *node_to_insert =
- (const Scheduler_SMP_Node *) to_insert;
- const Scheduler_SMP_Node *node_next =
- (const Scheduler_SMP_Node *) next;
+ const Priority_Control *priority_to_insert;
+ const Scheduler_SMP_Node *node_next;
- return node_to_insert->priority <= node_next->priority;
+ priority_to_insert = (const Priority_Control *) to_insert;
+ node_next = (const Scheduler_SMP_Node *) next;
+
+ return *priority_to_insert <= node_next->priority;
}
static inline bool _Scheduler_SMP_Insert_priority_fifo_order(
- const Chain_Node *to_insert,
+ const void *to_insert,
const Chain_Node *next
)
{
- const Scheduler_SMP_Node *node_to_insert =
- (const Scheduler_SMP_Node *) to_insert;
- const Scheduler_SMP_Node *node_next =
- (const Scheduler_SMP_Node *) next;
+ const Priority_Control *priority_to_insert;
+ const Scheduler_SMP_Node *node_next;
+
+ priority_to_insert = (const Priority_Control *) to_insert;
+ node_next = (const Scheduler_SMP_Node *) next;
- return node_to_insert->priority < node_next->priority;
+ return *priority_to_insert < node_next->priority;
}
static inline Scheduler_SMP_Context *_Scheduler_SMP_Get_self(
@@ -719,12 +721,14 @@ static inline bool _Scheduler_SMP_Enqueue_ordered(
Scheduler_SMP_Allocate_processor allocate_processor
)
{
- bool needs_help;
- Scheduler_Node *lowest_scheduled;
+ bool needs_help;
+ Scheduler_Node *lowest_scheduled;
+ Priority_Control node_priority;
lowest_scheduled = ( *get_lowest_scheduled )( context, node );
+ node_priority = _Scheduler_SMP_Node_priority( node );
- if ( ( *order )( &node->Node.Chain, &lowest_scheduled->Node.Chain ) ) {
+ if ( ( *order )( &node_priority, &lowest_scheduled->Node.Chain ) ) {
_Scheduler_SMP_Enqueue_to_scheduled(
context,
node,
@@ -776,8 +780,10 @@ static inline bool _Scheduler_SMP_Enqueue_scheduled_ordered(
while ( true ) {
Scheduler_Node *highest_ready;
Scheduler_Try_to_schedule_action action;
+ Priority_Control node_priority;
highest_ready = ( *get_highest_ready )( context, node );
+ node_priority = _Scheduler_SMP_Node_priority( node );
/*
* The node has been extracted from the scheduled chain. We have to place
@@ -785,7 +791,7 @@ static inline bool _Scheduler_SMP_Enqueue_scheduled_ordered(
*/
if (
node->sticky_level > 0
- && ( *order )( &node->Node.Chain, &highest_ready->Node.Chain )
+ && ( *order )( &node_priority, &highest_ready->Node.Chain )
) {
( *insert_scheduled )( context, node );
@@ -1165,11 +1171,16 @@ static inline void _Scheduler_SMP_Insert_scheduled_lifo(
Scheduler_Node *node_to_insert
)
{
- Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
+ Scheduler_SMP_Context *self;
+ Priority_Control priority_to_insert;
+
+ self = _Scheduler_SMP_Get_self( context );
+ priority_to_insert = _Scheduler_SMP_Node_priority( node_to_insert );
_Chain_Insert_ordered_unprotected(
&self->Scheduled,
&node_to_insert->Node.Chain,
+ &priority_to_insert,
_Scheduler_SMP_Insert_priority_lifo_order
);
}
@@ -1179,11 +1190,16 @@ static inline void _Scheduler_SMP_Insert_scheduled_fifo(
Scheduler_Node *node_to_insert
)
{
- Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
+ Scheduler_SMP_Context *self;
+ Priority_Control priority_to_insert;
+
+ self = _Scheduler_SMP_Get_self( context );
+ priority_to_insert = _Scheduler_SMP_Node_priority( node_to_insert );
_Chain_Insert_ordered_unprotected(
&self->Scheduled,
&node_to_insert->Node.Chain,
+ &priority_to_insert,
_Scheduler_SMP_Insert_priority_fifo_order
);
}
@@ -1214,7 +1230,11 @@ static inline bool _Scheduler_SMP_Ask_for_help(
node_state = _Scheduler_SMP_Node_state( node );
if ( node_state == SCHEDULER_SMP_NODE_BLOCKED ) {
- if ( ( *order )( &node->Node.Chain, &lowest_scheduled->Node.Chain ) ) {
+ Priority_Control node_priority;
+
+ node_priority = _Scheduler_SMP_Node_priority( node );
+
+ if ( ( *order )( &node_priority, &lowest_scheduled->Node.Chain ) ) {
_Thread_Scheduler_cancel_need_for_help(
thread,
_Thread_Get_CPU( thread )