summaryrefslogtreecommitdiffstats
path: root/cpukit/score/src/scheduleredfsmp.c
diff options
context:
space:
mode:
Diffstat (limited to 'cpukit/score/src/scheduleredfsmp.c')
-rw-r--r--cpukit/score/src/scheduleredfsmp.c376
1 files changed, 286 insertions, 90 deletions
diff --git a/cpukit/score/src/scheduleredfsmp.c b/cpukit/score/src/scheduleredfsmp.c
index 13d512118e..07b28042d7 100644
--- a/cpukit/score/src/scheduleredfsmp.c
+++ b/cpukit/score/src/scheduleredfsmp.c
@@ -1,3 +1,5 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
/**
* @file
*
@@ -11,15 +13,33 @@
* _Scheduler_EDF_SMP_Remove_processor(), _Scheduler_EDF_SMP_Set_affinity(),
* _Scheduler_EDF_SMP_Start_idle(), _Scheduler_EDF_SMP_Unblock(),
* _Scheduler_EDF_SMP_Unpin(), _Scheduler_EDF_SMP_Update_priority(),
- * _Scheduler_EDF_SMP_Withdraw_node(), and _Scheduler_EDF_SMP_Yield().
+ * _Scheduler_EDF_SMP_Withdraw_node(), _Scheduler_EDF_SMP_Make_sticky(),
+ * _Scheduler_EDF_SMP_Clean_sticky(), and _Scheduler_EDF_SMP_Yield().
*/
/*
- * Copyright (c) 2017 embedded brains GmbH.
+ * Copyright (c) 2017 embedded brains GmbH & Co. KG
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
*
- * The license and distribution terms for this file may be
- * found in the file LICENSE in this distribution or at
- * http://www.rtems.org/license/LICENSE.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
*/
#ifdef HAVE_CONFIG_H
@@ -66,6 +86,28 @@ static inline bool _Scheduler_EDF_SMP_Priority_less_equal(
return prio_left <= prio_right;
}
+static inline bool _Scheduler_EDF_SMP_Overall_less_equal(
+ const void *key,
+ const Chain_Node *to_insert,
+ const Chain_Node *next
+)
+{
+ Priority_Control insert_priority;
+ Priority_Control next_priority;
+ const Scheduler_EDF_SMP_Node *node_to_insert;
+ const Scheduler_EDF_SMP_Node *node_next;
+
+ insert_priority = *(const Priority_Control *) key;
+ insert_priority = SCHEDULER_PRIORITY_PURIFY( insert_priority );
+ node_to_insert = (const Scheduler_EDF_SMP_Node *) to_insert;
+ node_next = (const Scheduler_EDF_SMP_Node *) next;
+ next_priority = node_next->Base.priority;
+
+ return insert_priority < next_priority ||
+ ( insert_priority == next_priority &&
+ node_to_insert->generation <= node_next->generation );
+}
+
void _Scheduler_EDF_SMP_Initialize( const Scheduler_Control *scheduler )
{
Scheduler_EDF_SMP_Context *self =
@@ -196,21 +238,21 @@ static inline Scheduler_Node *_Scheduler_EDF_SMP_Get_highest_ready(
return &highest_ready->Base.Base;
}
-static inline void _Scheduler_EDF_SMP_Set_scheduled(
+static inline void _Scheduler_EDF_SMP_Set_allocated(
Scheduler_EDF_SMP_Context *self,
- Scheduler_EDF_SMP_Node *scheduled,
+ Scheduler_EDF_SMP_Node *allocated,
const Per_CPU_Control *cpu
)
{
- self->Ready[ _Per_CPU_Get_index( cpu ) + 1 ].scheduled = scheduled;
+ self->Ready[ _Per_CPU_Get_index( cpu ) + 1 ].allocated = allocated;
}
-static inline Scheduler_EDF_SMP_Node *_Scheduler_EDF_SMP_Get_scheduled(
+static inline Scheduler_EDF_SMP_Node *_Scheduler_EDF_SMP_Get_allocated(
const Scheduler_EDF_SMP_Context *self,
uint8_t rqi
)
{
- return self->Ready[ rqi ].scheduled;
+ return self->Ready[ rqi ].allocated;
}
static inline Scheduler_Node *_Scheduler_EDF_SMP_Get_lowest_scheduled(
@@ -226,45 +268,106 @@ static inline Scheduler_Node *_Scheduler_EDF_SMP_Get_lowest_scheduled(
if ( rqi != 0 ) {
Scheduler_EDF_SMP_Context *self;
- Scheduler_EDF_SMP_Node *node;
+ Scheduler_EDF_SMP_Node *affine_scheduled;
self = _Scheduler_EDF_SMP_Get_self( context );
- node = _Scheduler_EDF_SMP_Get_scheduled( self, rqi );
+ affine_scheduled = self->Ready[ rqi ].affine_scheduled;
- if ( node->ready_queue_index > 0 ) {
- _Assert( node->ready_queue_index == rqi );
- return &node->Base.Base;
+ if ( affine_scheduled != NULL ) {
+ _Assert( affine_scheduled->ready_queue_index == rqi );
+ return &affine_scheduled->Base.Base;
}
}
return _Scheduler_SMP_Get_lowest_scheduled( context, filter_base );
}
-static inline void _Scheduler_EDF_SMP_Insert_ready(
+static inline void _Scheduler_EDF_SMP_Update_generation(
Scheduler_Context *context,
Scheduler_Node *node_base,
Priority_Control insert_priority
)
{
+ Scheduler_EDF_SMP_Context *self;
+ Scheduler_EDF_SMP_Node *node;
+ int generation_index;
+ int increment;
+ int64_t generation;
+
+ self = _Scheduler_EDF_SMP_Get_self( context );
+ node = _Scheduler_EDF_SMP_Node_downcast( node_base );
+ generation_index = SCHEDULER_PRIORITY_IS_APPEND( insert_priority );
+ increment = ( generation_index << 1 ) - 1;
+
+ generation = self->generations[ generation_index ];
+ node->generation = generation;
+ self->generations[ generation_index ] = generation + increment;
+}
+
+static inline void _Scheduler_EDF_SMP_Insert_scheduled(
+ Scheduler_Context *context,
+ Scheduler_Node *node_base,
+ Priority_Control priority_to_insert
+)
+{
Scheduler_EDF_SMP_Context *self;
Scheduler_EDF_SMP_Node *node;
uint8_t rqi;
Scheduler_EDF_SMP_Ready_queue *ready_queue;
- int generation_index;
- int increment;
- int64_t generation;
self = _Scheduler_EDF_SMP_Get_self( context );
node = _Scheduler_EDF_SMP_Node_downcast( node_base );
rqi = node->ready_queue_index;
- generation_index = SCHEDULER_PRIORITY_IS_APPEND( insert_priority );
- increment = ( generation_index << 1 ) - 1;
ready_queue = &self->Ready[ rqi ];
- generation = self->generations[ generation_index ];
- node->generation = generation;
- self->generations[ generation_index ] = generation + increment;
+ _Chain_Insert_ordered_unprotected(
+ &self->Base.Scheduled,
+ &node_base->Node.Chain,
+ &priority_to_insert,
+ _Scheduler_EDF_SMP_Overall_less_equal
+ );
+
+ if ( rqi != 0 ) {
+ ready_queue->affine_scheduled = node;
+ if ( !_RBTree_Is_empty( &ready_queue->Queue ) ) {
+ _Chain_Extract_unprotected( &ready_queue->Node );
+ }
+ }
+}
+
+static inline void _Scheduler_EDF_SMP_Activate_ready_queue_if_necessary(
+ Scheduler_EDF_SMP_Context *self,
+ uint8_t rqi,
+ Scheduler_EDF_SMP_Ready_queue *ready_queue
+)
+{
+ if (
+ rqi != 0 &&
+ _RBTree_Is_empty( &ready_queue->Queue ) &&
+ ready_queue->affine_scheduled == NULL
+ ) {
+ _Chain_Append_unprotected( &self->Affine_queues, &ready_queue->Node );
+ }
+}
+
+static inline void _Scheduler_EDF_SMP_Insert_ready(
+ Scheduler_Context *context,
+ Scheduler_Node *node_base,
+ Priority_Control insert_priority
+)
+{
+ Scheduler_EDF_SMP_Context *self;
+ Scheduler_EDF_SMP_Node *node;
+ uint8_t rqi;
+ Scheduler_EDF_SMP_Ready_queue *ready_queue;
+
+ self = _Scheduler_EDF_SMP_Get_self( context );
+ node = _Scheduler_EDF_SMP_Node_downcast( node_base );
+ rqi = node->ready_queue_index;
+ ready_queue = &self->Ready[ rqi ];
+
+ _Scheduler_EDF_SMP_Activate_ready_queue_if_necessary( self, rqi, ready_queue );
_RBTree_Initialize_node( &node->Base.Base.Node.RBTree );
_RBTree_Insert_inline(
&ready_queue->Queue,
@@ -272,16 +375,6 @@ static inline void _Scheduler_EDF_SMP_Insert_ready(
&insert_priority,
_Scheduler_EDF_SMP_Priority_less_equal
);
-
- if ( rqi != 0 && _Chain_Is_node_off_chain( &ready_queue->Node ) ) {
- Scheduler_EDF_SMP_Node *scheduled;
-
- scheduled = _Scheduler_EDF_SMP_Get_scheduled( self, rqi );
-
- if ( scheduled->ready_queue_index == 0 ) {
- _Chain_Append_unprotected( &self->Affine_queues, &ready_queue->Node );
- }
- }
}
static inline void _Scheduler_EDF_SMP_Extract_from_scheduled(
@@ -305,6 +398,8 @@ static inline void _Scheduler_EDF_SMP_Extract_from_scheduled(
if ( rqi != 0 && !_RBTree_Is_empty( &ready_queue->Queue ) ) {
_Chain_Append_unprotected( &self->Affine_queues, &ready_queue->Node );
}
+
+ ready_queue->affine_scheduled = NULL;
}
static inline void _Scheduler_EDF_SMP_Extract_from_ready(
@@ -328,10 +423,9 @@ static inline void _Scheduler_EDF_SMP_Extract_from_ready(
if (
rqi != 0
&& _RBTree_Is_empty( &ready_queue->Queue )
- && !_Chain_Is_node_off_chain( &ready_queue->Node )
+ && ready_queue->affine_scheduled == NULL
) {
_Chain_Extract_unprotected( &ready_queue->Node );
- _Chain_Set_off_chain( &ready_queue->Node );
}
}
@@ -340,15 +434,21 @@ static inline void _Scheduler_EDF_SMP_Move_from_scheduled_to_ready(
Scheduler_Node *scheduled_to_ready
)
{
- Priority_Control insert_priority;
+ Scheduler_EDF_SMP_Context *self;
+ Scheduler_EDF_SMP_Node *node;
+ uint8_t rqi;
+ Scheduler_EDF_SMP_Ready_queue *ready_queue;
- _Scheduler_SMP_Extract_from_scheduled( context, scheduled_to_ready );
- insert_priority = _Scheduler_SMP_Node_priority( scheduled_to_ready );
- _Scheduler_EDF_SMP_Insert_ready(
- context,
- scheduled_to_ready,
- insert_priority
- );
+ _Scheduler_EDF_SMP_Extract_from_scheduled( context, scheduled_to_ready );
+
+ self = _Scheduler_EDF_SMP_Get_self( context );
+ node = _Scheduler_EDF_SMP_Node_downcast( scheduled_to_ready );
+ rqi = node->ready_queue_index;
+ ready_queue = &self->Ready[ rqi ];
+
+ _Scheduler_EDF_SMP_Activate_ready_queue_if_necessary( self, rqi, ready_queue );
+ _RBTree_Initialize_node( &node->Base.Base.Node.RBTree );
+ _RBTree_Prepend( &ready_queue->Queue, &node->Base.Base.Node.RBTree );
}
static inline void _Scheduler_EDF_SMP_Move_from_ready_to_scheduled(
@@ -361,64 +461,78 @@ static inline void _Scheduler_EDF_SMP_Move_from_ready_to_scheduled(
_Scheduler_EDF_SMP_Extract_from_ready( context, ready_to_scheduled );
insert_priority = _Scheduler_SMP_Node_priority( ready_to_scheduled );
insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
- _Scheduler_SMP_Insert_scheduled(
+ _Scheduler_EDF_SMP_Insert_scheduled(
context,
ready_to_scheduled,
insert_priority
);
}
+static inline Scheduler_Node *_Scheduler_EDF_SMP_Get_idle( void *arg )
+{
+ Scheduler_EDF_SMP_Context *self;
+ Scheduler_Node *lowest_ready;
+
+ self = _Scheduler_EDF_SMP_Get_self( arg );
+ lowest_ready = (Scheduler_Node *) _RBTree_Maximum( &self->Ready[ 0 ].Queue );
+ _Assert( lowest_ready != NULL );
+ _RBTree_Extract( &self->Ready[ 0 ].Queue, &lowest_ready->Node.RBTree );
+ _Chain_Initialize_node( &lowest_ready->Node.Chain );
+
+ return lowest_ready;
+}
+
+static inline void _Scheduler_EDF_SMP_Release_idle(
+ Scheduler_Node *node,
+ void *arg
+)
+{
+ Scheduler_EDF_SMP_Context *self;
+
+ self = _Scheduler_EDF_SMP_Get_self( arg );
+ _RBTree_Initialize_node( &node->Node.RBTree );
+ _RBTree_Append( &self->Ready[ 0 ].Queue, &node->Node.RBTree );
+}
+
static inline void _Scheduler_EDF_SMP_Allocate_processor(
Scheduler_Context *context,
Scheduler_Node *scheduled_base,
- Scheduler_Node *victim_base,
- Per_CPU_Control *victim_cpu
+ Per_CPU_Control *cpu
)
{
Scheduler_EDF_SMP_Context *self;
Scheduler_EDF_SMP_Node *scheduled;
uint8_t rqi;
- (void) victim_base;
self = _Scheduler_EDF_SMP_Get_self( context );
scheduled = _Scheduler_EDF_SMP_Node_downcast( scheduled_base );
rqi = scheduled->ready_queue_index;
if ( rqi != 0 ) {
- Scheduler_EDF_SMP_Ready_queue *ready_queue;
- Per_CPU_Control *desired_cpu;
+ Per_CPU_Control *affine_cpu;
- ready_queue = &self->Ready[ rqi ];
+ affine_cpu = _Per_CPU_Get_by_index( rqi - 1 );
- if ( !_Chain_Is_node_off_chain( &ready_queue->Node ) ) {
- _Chain_Extract_unprotected( &ready_queue->Node );
- _Chain_Set_off_chain( &ready_queue->Node );
- }
-
- desired_cpu = _Per_CPU_Get_by_index( rqi - 1 );
-
- if ( victim_cpu != desired_cpu ) {
+ if ( cpu != affine_cpu ) {
Scheduler_EDF_SMP_Node *node;
- node = _Scheduler_EDF_SMP_Get_scheduled( self, rqi );
+ node = _Scheduler_EDF_SMP_Get_allocated( self, rqi );
_Assert( node->ready_queue_index == 0 );
- _Scheduler_EDF_SMP_Set_scheduled( self, node, victim_cpu );
+ _Scheduler_EDF_SMP_Set_allocated( self, node, cpu );
_Scheduler_SMP_Allocate_processor_exact(
context,
&node->Base.Base,
- NULL,
- victim_cpu
+ cpu
);
- victim_cpu = desired_cpu;
+ cpu = affine_cpu;
}
}
- _Scheduler_EDF_SMP_Set_scheduled( self, scheduled, victim_cpu );
+ _Scheduler_EDF_SMP_Set_allocated( self, scheduled, cpu );
_Scheduler_SMP_Allocate_processor_exact(
context,
&scheduled->Base.Base,
- NULL,
- victim_cpu
+ cpu
);
}
@@ -438,7 +552,8 @@ void _Scheduler_EDF_SMP_Block(
_Scheduler_EDF_SMP_Extract_from_ready,
_Scheduler_EDF_SMP_Get_highest_ready,
_Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
- _Scheduler_EDF_SMP_Allocate_processor
+ _Scheduler_EDF_SMP_Allocate_processor,
+ _Scheduler_EDF_SMP_Get_idle
);
}
@@ -448,36 +563,44 @@ static inline bool _Scheduler_EDF_SMP_Enqueue(
Priority_Control insert_priority
)
{
+ _Scheduler_EDF_SMP_Update_generation( context, node, insert_priority );
+
return _Scheduler_SMP_Enqueue(
context,
node,
insert_priority,
- _Scheduler_SMP_Priority_less_equal,
+ _Scheduler_EDF_SMP_Overall_less_equal,
_Scheduler_EDF_SMP_Insert_ready,
- _Scheduler_SMP_Insert_scheduled,
+ _Scheduler_EDF_SMP_Insert_scheduled,
_Scheduler_EDF_SMP_Move_from_scheduled_to_ready,
+ _Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
_Scheduler_EDF_SMP_Get_lowest_scheduled,
- _Scheduler_EDF_SMP_Allocate_processor
+ _Scheduler_EDF_SMP_Allocate_processor,
+ _Scheduler_EDF_SMP_Get_idle,
+ _Scheduler_EDF_SMP_Release_idle
);
}
-static inline bool _Scheduler_EDF_SMP_Enqueue_scheduled(
+static inline void _Scheduler_EDF_SMP_Enqueue_scheduled(
Scheduler_Context *context,
Scheduler_Node *node,
Priority_Control insert_priority
)
{
- return _Scheduler_SMP_Enqueue_scheduled(
+ _Scheduler_EDF_SMP_Update_generation( context, node, insert_priority );
+ _Scheduler_SMP_Enqueue_scheduled(
context,
node,
insert_priority,
- _Scheduler_SMP_Priority_less_equal,
+ _Scheduler_EDF_SMP_Overall_less_equal,
_Scheduler_EDF_SMP_Extract_from_ready,
_Scheduler_EDF_SMP_Get_highest_ready,
_Scheduler_EDF_SMP_Insert_ready,
- _Scheduler_SMP_Insert_scheduled,
+ _Scheduler_EDF_SMP_Insert_scheduled,
_Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
- _Scheduler_EDF_SMP_Allocate_processor
+ _Scheduler_EDF_SMP_Allocate_processor,
+ _Scheduler_EDF_SMP_Get_idle,
+ _Scheduler_EDF_SMP_Release_idle
);
}
@@ -494,7 +617,8 @@ void _Scheduler_EDF_SMP_Unblock(
thread,
node,
_Scheduler_EDF_SMP_Do_update,
- _Scheduler_EDF_SMP_Enqueue
+ _Scheduler_EDF_SMP_Enqueue,
+ _Scheduler_EDF_SMP_Release_idle
);
}
@@ -508,12 +632,13 @@ static inline bool _Scheduler_EDF_SMP_Do_ask_for_help(
context,
the_thread,
node,
- _Scheduler_SMP_Priority_less_equal,
+ _Scheduler_EDF_SMP_Overall_less_equal,
_Scheduler_EDF_SMP_Insert_ready,
- _Scheduler_SMP_Insert_scheduled,
+ _Scheduler_EDF_SMP_Insert_scheduled,
_Scheduler_EDF_SMP_Move_from_scheduled_to_ready,
_Scheduler_EDF_SMP_Get_lowest_scheduled,
- _Scheduler_EDF_SMP_Allocate_processor
+ _Scheduler_EDF_SMP_Allocate_processor,
+ _Scheduler_EDF_SMP_Release_idle
);
}
@@ -529,6 +654,7 @@ void _Scheduler_EDF_SMP_Update_priority(
context,
thread,
node,
+ _Scheduler_EDF_SMP_Extract_from_scheduled,
_Scheduler_EDF_SMP_Extract_from_ready,
_Scheduler_EDF_SMP_Do_update,
_Scheduler_EDF_SMP_Enqueue,
@@ -578,10 +704,47 @@ void _Scheduler_EDF_SMP_Withdraw_node(
the_thread,
node,
next_state,
+ _Scheduler_EDF_SMP_Extract_from_scheduled,
+ _Scheduler_EDF_SMP_Extract_from_ready,
+ _Scheduler_EDF_SMP_Get_highest_ready,
+ _Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
+ _Scheduler_EDF_SMP_Allocate_processor,
+ _Scheduler_EDF_SMP_Get_idle
+ );
+}
+
+void _Scheduler_EDF_SMP_Make_sticky(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+)
+{
+ _Scheduler_SMP_Make_sticky(
+ scheduler,
+ the_thread,
+ node,
+ _Scheduler_EDF_SMP_Do_update,
+ _Scheduler_EDF_SMP_Enqueue
+ );
+}
+
+void _Scheduler_EDF_SMP_Clean_sticky(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+)
+{
+ _Scheduler_SMP_Clean_sticky(
+ scheduler,
+ the_thread,
+ node,
+ _Scheduler_EDF_SMP_Extract_from_scheduled,
_Scheduler_EDF_SMP_Extract_from_ready,
_Scheduler_EDF_SMP_Get_highest_ready,
_Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
- _Scheduler_EDF_SMP_Allocate_processor
+ _Scheduler_EDF_SMP_Allocate_processor,
+ _Scheduler_EDF_SMP_Get_idle,
+ _Scheduler_EDF_SMP_Release_idle
);
}
@@ -596,7 +759,12 @@ static inline void _Scheduler_EDF_SMP_Register_idle(
self = _Scheduler_EDF_SMP_Get_self( context );
idle = _Scheduler_EDF_SMP_Node_downcast( idle_base );
- _Scheduler_EDF_SMP_Set_scheduled( self, idle, cpu );
+ _Scheduler_EDF_SMP_Set_allocated( self, idle, cpu );
+ _Scheduler_EDF_SMP_Update_generation(
+ context,
+ idle_base,
+ PRIORITY_GROUP_LAST
+ );
}
void _Scheduler_EDF_SMP_Add_processor(
@@ -625,8 +793,11 @@ Thread_Control *_Scheduler_EDF_SMP_Remove_processor(
return _Scheduler_SMP_Remove_processor(
context,
cpu,
+ _Scheduler_EDF_SMP_Extract_from_scheduled,
_Scheduler_EDF_SMP_Extract_from_ready,
- _Scheduler_EDF_SMP_Enqueue
+ _Scheduler_EDF_SMP_Enqueue,
+ _Scheduler_EDF_SMP_Get_idle,
+ _Scheduler_EDF_SMP_Release_idle
);
}
@@ -642,6 +813,7 @@ void _Scheduler_EDF_SMP_Yield(
context,
thread,
node,
+ _Scheduler_EDF_SMP_Extract_from_scheduled,
_Scheduler_EDF_SMP_Extract_from_ready,
_Scheduler_EDF_SMP_Enqueue,
_Scheduler_EDF_SMP_Enqueue_scheduled
@@ -732,20 +904,41 @@ Status_Control _Scheduler_EDF_SMP_Set_affinity(
{
Scheduler_Context *context;
Scheduler_EDF_SMP_Node *node;
- Processor_mask local_affinity;
uint8_t rqi;
context = _Scheduler_Get_context( scheduler );
- _Processor_mask_And( &local_affinity, &context->Processors, affinity );
- if ( _Processor_mask_Is_zero( &local_affinity ) ) {
- return STATUS_INVALID_NUMBER;
- }
+ /*
+ * We support a thread to processor affinity to all online processors and an
+ * affinity to exactly one processor. This restriction is necessary to avoid
+ * issues if processors are added or removed to or from the scheduler.
+ */
if ( _Processor_mask_Is_equal( affinity, &_SMP_Online_processors ) ) {
rqi = 0;
} else {
- rqi = _Processor_mask_Find_last_set( &local_affinity );
+ Processor_mask local_affinity;
+ Processor_mask one_to_one;
+ uint32_t last;
+
+ _Processor_mask_And( &local_affinity, &context->Processors, affinity );
+
+ if ( _Processor_mask_Is_zero( &local_affinity ) ) {
+ return STATUS_INVALID_NUMBER;
+ }
+
+ last = _Processor_mask_Find_last_set( affinity );
+ _Processor_mask_From_index( &one_to_one, last - 1 );
+
+ /*
+ * Use the global affinity set and not the affinity set local to the
+ * scheduler to check for a one-to-one affinity.
+ */
+ if ( !_Processor_mask_Is_equal( &one_to_one, affinity ) ) {
+ return STATUS_INVALID_NUMBER;
+ }
+
+ rqi = last;
}
node = _Scheduler_EDF_SMP_Node_downcast( node_base );
@@ -758,11 +951,14 @@ Status_Control _Scheduler_EDF_SMP_Set_affinity(
node_base,
&rqi,
_Scheduler_EDF_SMP_Do_set_affinity,
+ _Scheduler_EDF_SMP_Extract_from_scheduled,
_Scheduler_EDF_SMP_Extract_from_ready,
_Scheduler_EDF_SMP_Get_highest_ready,
_Scheduler_EDF_SMP_Move_from_ready_to_scheduled,
_Scheduler_EDF_SMP_Enqueue,
- _Scheduler_EDF_SMP_Allocate_processor
+ _Scheduler_EDF_SMP_Allocate_processor,
+ _Scheduler_EDF_SMP_Get_idle,
+ _Scheduler_EDF_SMP_Release_idle
);
}