summaryrefslogtreecommitdiffstats
path: root/cpukit/score/src
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2017-10-25 16:00:17 +0200
committerSebastian Huber <sebastian.huber@embedded-brains.de>2017-11-06 09:06:21 +0100
commit0c286e3d7c176a4fb7faf6ba9f809996d599ca10 (patch)
treebf30809e9a4f92d19254fa1acb1f833effa8dc40 /cpukit/score/src
parentscore: Remove superfluous include (diff)
downloadrtems-0c286e3d7c176a4fb7faf6ba9f809996d599ca10.tar.bz2
score: _Chain_Insert_ordered_unprotected()
Change the chain order relation to use a directly specified left hand side value. This is similar to _RBTree_Insert_inline() and helps the compiler to better optimize the code.
Diffstat (limited to 'cpukit/score/src')
-rw-r--r--cpukit/score/src/coremsginsert.c12
-rw-r--r--cpukit/score/src/schedulerpriorityaffinitysmp.c11
-rw-r--r--cpukit/score/src/schedulersimplesmp.c32
-rw-r--r--cpukit/score/src/schedulerstrongapa.c12
4 files changed, 48 insertions, 19 deletions
diff --git a/cpukit/score/src/coremsginsert.c b/cpukit/score/src/coremsginsert.c
index 4613f50436..a9aa1e584e 100644
--- a/cpukit/score/src/coremsginsert.c
+++ b/cpukit/score/src/coremsginsert.c
@@ -22,17 +22,17 @@
#if defined(RTEMS_SCORE_COREMSG_ENABLE_MESSAGE_PRIORITY)
static bool _CORE_message_queue_Order(
- const Chain_Node *left,
+ const void *left,
const Chain_Node *right
)
{
- const CORE_message_queue_Buffer_control *left_message;
+ const int *left_priority;
const CORE_message_queue_Buffer_control *right_message;
- left_message = (const CORE_message_queue_Buffer_control *) left;
+ left_priority = (const int *) left;
right_message = (const CORE_message_queue_Buffer_control *) right;
- return _CORE_message_queue_Get_message_priority( left_message ) <
+ return *left_priority <
_CORE_message_queue_Get_message_priority( right_message );
}
#endif
@@ -66,9 +66,13 @@ void _CORE_message_queue_Insert_message(
_Chain_Append_unprotected( pending_messages, &the_message->Node );
#if defined(RTEMS_SCORE_COREMSG_ENABLE_MESSAGE_PRIORITY)
} else if ( submit_type != CORE_MESSAGE_QUEUE_URGENT_REQUEST ) {
+ int priority;
+
+ priority = _CORE_message_queue_Get_message_priority( the_message );
_Chain_Insert_ordered_unprotected(
pending_messages,
&the_message->Node,
+ &priority,
_CORE_message_queue_Order
);
#endif
diff --git a/cpukit/score/src/schedulerpriorityaffinitysmp.c b/cpukit/score/src/schedulerpriorityaffinitysmp.c
index 6caf00c3a0..a948eef0fc 100644
--- a/cpukit/score/src/schedulerpriorityaffinitysmp.c
+++ b/cpukit/score/src/schedulerpriorityaffinitysmp.c
@@ -40,7 +40,7 @@
*/
static bool _Scheduler_priority_affinity_SMP_Insert_priority_lifo_order(
- const Chain_Node *to_insert,
+ const void *to_insert,
const Chain_Node *next
)
{
@@ -49,7 +49,7 @@ static bool _Scheduler_priority_affinity_SMP_Insert_priority_lifo_order(
}
static bool _Scheduler_priority_affinity_SMP_Insert_priority_fifo_order(
- const Chain_Node *to_insert,
+ const void *to_insert,
const Chain_Node *next
)
{
@@ -283,6 +283,8 @@ static void _Scheduler_priority_affinity_SMP_Check_for_migrations(
self = _Scheduler_priority_SMP_Get_self( context );
while (1) {
+ Priority_Control lowest_scheduled_priority;
+
if ( _Priority_bit_map_Is_empty( &self->Bit_map ) ) {
/* Nothing to do */
break;
@@ -310,9 +312,12 @@ static void _Scheduler_priority_affinity_SMP_Check_for_migrations(
if ( lowest_scheduled == NULL )
break;
+ lowest_scheduled_priority =
+ _Scheduler_SMP_Node_priority( lowest_scheduled );
+
if (
_Scheduler_SMP_Insert_priority_lifo_order(
- &lowest_scheduled->Node.Chain,
+ &lowest_scheduled_priority,
&highest_ready->Node.Chain
)
) {
diff --git a/cpukit/score/src/schedulersimplesmp.c b/cpukit/score/src/schedulersimplesmp.c
index 4be43abff1..df08a19eab 100644
--- a/cpukit/score/src/schedulersimplesmp.c
+++ b/cpukit/score/src/schedulersimplesmp.c
@@ -98,13 +98,17 @@ static void _Scheduler_simple_SMP_Move_from_scheduled_to_ready(
Scheduler_Node *scheduled_to_ready
)
{
- Scheduler_simple_SMP_Context *self =
- _Scheduler_simple_SMP_Get_self( context );
+ Scheduler_simple_SMP_Context *self;
+ Priority_Control priority_to_insert;
+
+ self = _Scheduler_simple_SMP_Get_self( context );
+ priority_to_insert = _Scheduler_SMP_Node_priority( scheduled_to_ready );
_Chain_Extract_unprotected( &scheduled_to_ready->Node.Chain );
_Chain_Insert_ordered_unprotected(
&self->Ready,
&scheduled_to_ready->Node.Chain,
+ &priority_to_insert,
_Scheduler_SMP_Insert_priority_lifo_order
);
}
@@ -114,13 +118,17 @@ static void _Scheduler_simple_SMP_Move_from_ready_to_scheduled(
Scheduler_Node *ready_to_scheduled
)
{
- Scheduler_simple_SMP_Context *self =
- _Scheduler_simple_SMP_Get_self( context );
+ Scheduler_simple_SMP_Context *self;
+ Priority_Control priority_to_insert;
+
+ self = _Scheduler_simple_SMP_Get_self( context );
+ priority_to_insert = _Scheduler_SMP_Node_priority( ready_to_scheduled );
_Chain_Extract_unprotected( &ready_to_scheduled->Node.Chain );
_Chain_Insert_ordered_unprotected(
&self->Base.Scheduled,
&ready_to_scheduled->Node.Chain,
+ &priority_to_insert,
_Scheduler_SMP_Insert_priority_fifo_order
);
}
@@ -130,12 +138,16 @@ static void _Scheduler_simple_SMP_Insert_ready_lifo(
Scheduler_Node *node_to_insert
)
{
- Scheduler_simple_SMP_Context *self =
- _Scheduler_simple_SMP_Get_self( context );
+ Scheduler_simple_SMP_Context *self;
+ Priority_Control priority_to_insert;
+
+ self = _Scheduler_simple_SMP_Get_self( context );
+ priority_to_insert = _Scheduler_SMP_Node_priority( node_to_insert );
_Chain_Insert_ordered_unprotected(
&self->Ready,
&node_to_insert->Node.Chain,
+ &priority_to_insert,
_Scheduler_SMP_Insert_priority_lifo_order
);
}
@@ -145,12 +157,16 @@ static void _Scheduler_simple_SMP_Insert_ready_fifo(
Scheduler_Node *node_to_insert
)
{
- Scheduler_simple_SMP_Context *self =
- _Scheduler_simple_SMP_Get_self( context );
+ Scheduler_simple_SMP_Context *self;
+ Priority_Control priority_to_insert;
+
+ self = _Scheduler_simple_SMP_Get_self( context );
+ priority_to_insert = _Scheduler_SMP_Node_priority( node_to_insert );
_Chain_Insert_ordered_unprotected(
&self->Ready,
&node_to_insert->Node.Chain,
+ &priority_to_insert,
_Scheduler_SMP_Insert_priority_fifo_order
);
}
diff --git a/cpukit/score/src/schedulerstrongapa.c b/cpukit/score/src/schedulerstrongapa.c
index d5bfed74f4..57ffb61367 100644
--- a/cpukit/score/src/schedulerstrongapa.c
+++ b/cpukit/score/src/schedulerstrongapa.c
@@ -64,19 +64,23 @@ static void _Scheduler_strong_APA_Move_from_ready_to_scheduled(
Scheduler_Node *ready_to_scheduled
)
{
- Scheduler_strong_APA_Context *self =
- _Scheduler_strong_APA_Get_self( context );
- Scheduler_strong_APA_Node *node =
- _Scheduler_strong_APA_Node_downcast( ready_to_scheduled );
+ Scheduler_strong_APA_Context *self;
+ Scheduler_strong_APA_Node *node;
+ Priority_Control priority;
+
+ self = _Scheduler_strong_APA_Get_self( context );
+ node = _Scheduler_strong_APA_Node_downcast( ready_to_scheduled );
_Scheduler_priority_Ready_queue_extract(
&node->Base.Base.Node.Chain,
&node->Ready_queue,
&self->Bit_map
);
+ priority = node->Base.priority;
_Chain_Insert_ordered_unprotected(
&self->Base.Scheduled,
&node->Base.Base.Node.Chain,
+ &priority,
_Scheduler_SMP_Insert_priority_fifo_order
);
}