summaryrefslogtreecommitdiffstats
path: root/cpukit/score/src
diff options
context:
space:
mode:
authorRichi Dubey <richidubey@gmail.com>2021-06-16 11:30:16 +0530
committerSebastian Huber <sebastian.huber@embedded-brains.de>2021-06-24 14:16:21 +0200
commit6c23252cdd8ea63d0fe13d9f99b7befbf070fe80 (patch)
tree67fc1184613942a254d2db601fc99af52cfa7d2a /cpukit/score/src
parentsparc: Simplify trap table initialization (diff)
downloadrtems-6c23252cdd8ea63d0fe13d9f99b7befbf070fe80.tar.bz2
Update Strong APA Scheduler
This change allows for the migration of higher priority tasks on the arrival of a lower priority task limited by affinity constraints. Change license to BSD-2-Clause according to file history and re-licensing agreement. Update #3053.
Diffstat (limited to 'cpukit/score/src')
-rw-r--r--cpukit/score/src/schedulerstrongapa.c981
1 files changed, 789 insertions, 192 deletions
diff --git a/cpukit/score/src/schedulerstrongapa.c b/cpukit/score/src/schedulerstrongapa.c
index dcfb55601a..845d19d1a8 100644
--- a/cpukit/score/src/schedulerstrongapa.c
+++ b/cpukit/score/src/schedulerstrongapa.c
@@ -1,3 +1,5 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
/**
* @file
*
@@ -5,27 +7,57 @@
*
* @brief This source file contains the implementation of
* _Scheduler_strong_APA_Add_processor(),
+ * _Scheduler_strong_APA_Allocate_processor(),
* _Scheduler_strong_APA_Ask_for_help(), _Scheduler_strong_APA_Block(),
- * _Scheduler_strong_APA_Initialize(),
+ * _Scheduler_strong_APA_Do_ask_for_help(),
+ * _Scheduler_strong_APA_Do_enqueue(),
+ * _Scheduler_strong_APA_Do_set_affinity(),
+ * _Scheduler_strong_APA_Do_update(), _Scheduler_strong_APA_Enqueue(),
+ * _Scheduler_strong_APA_Enqueue_scheduled(),
+ * _Scheduler_strong_APA_Extract_from_ready(),
+ * _Scheduler_strong_APA_Extract_from_scheduled(),
+ * _Scheduler_strong_APA_Find_highest_ready(),
+ * _Scheduler_strong_APA_Get_highest_ready(),
+ * _Scheduler_strong_APA_Get_lowest_reachable(),
+ * _Scheduler_strong_APA_Get_lowest_scheduled(),
+ * _Scheduler_strong_APA_Has_ready(),
+ * _Scheduler_strong_APA_Initialize(), _Scheduler_strong_APA_Insert_ready(),
+ * _Scheduler_strong_APA_Move_from_ready_to_scheduled(),
+ * _Scheduler_strong_APA_Move_from_scheduled_to_ready(),
* _Scheduler_strong_APA_Node_initialize(),
* _Scheduler_strong_APA_Reconsider_help_request(),
- * _Scheduler_strong_APA_Remove_processor(), _Scheduler_strong_APA_Unblock(),
- * _Scheduler_strong_APA_Update_priority(),
+ * _Scheduler_strong_APA_Register_idle(),
+ * _Scheduler_strong_APA_Remove_processor(),
+ * _Scheduler_strong_APA_Set_affinity(),
+ * _Scheduler_strong_APA_Set_scheduled(), _Scheduler_strong_APA_Start_idle(),
+ * _Scheduler_strong_APA_Unblock(), _Scheduler_strong_APA_Update_priority(),
* _Scheduler_strong_APA_Withdraw_node(), and _Scheduler_strong_APA_Yield().
*/
/*
- * Copyright (c) 2013, 2016 embedded brains GmbH. All rights reserved.
+ * Copyright (C) 2020 Richi Dubey
+ * Copyright (C) 2013, 2016 embedded brains GmbH (http://www.embedded-brains.de)
*
- * embedded brains GmbH
- * Dornierstr. 4
- * 82178 Puchheim
- * Germany
- * <rtems@embedded-brains.de>
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
*
- * The license and distribution terms for this file may be
- * found in the file LICENSE in this distribution or at
- * http://www.rtems.org/license/LICENSE.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
*/
#ifdef HAVE_CONFIG_H
@@ -33,68 +65,220 @@
#endif
#include <rtems/score/schedulerstrongapa.h>
-#include <rtems/score/schedulerpriorityimpl.h>
#include <rtems/score/schedulersmpimpl.h>
+#include <rtems/score/assert.h>
-static Scheduler_strong_APA_Context *_Scheduler_strong_APA_Get_self(
- Scheduler_Context *context
-)
+#define STRONG_SCHEDULER_NODE_OF_CHAIN( node ) \
+ RTEMS_CONTAINER_OF( node, Scheduler_strong_APA_Node, Ready_node )
+
+static inline Scheduler_strong_APA_Context *
+_Scheduler_strong_APA_Get_context( const Scheduler_Control *scheduler )
+{
+ return (Scheduler_strong_APA_Context *) _Scheduler_Get_context( scheduler );
+}
+
+static inline Scheduler_strong_APA_Context *
+_Scheduler_strong_APA_Get_self( Scheduler_Context *context )
{
return (Scheduler_strong_APA_Context *) context;
}
-static Scheduler_strong_APA_Node *
+static inline Scheduler_strong_APA_Node *
_Scheduler_strong_APA_Node_downcast( Scheduler_Node *node )
{
return (Scheduler_strong_APA_Node *) node;
}
-static void _Scheduler_strong_APA_Move_from_scheduled_to_ready(
+static inline void _Scheduler_strong_APA_Do_update(
Scheduler_Context *context,
- Scheduler_Node *scheduled_to_ready
+ Scheduler_Node *node,
+ Priority_Control new_priority
)
{
- Scheduler_strong_APA_Context *self =
- _Scheduler_strong_APA_Get_self( context );
- Scheduler_strong_APA_Node *node =
- _Scheduler_strong_APA_Node_downcast( scheduled_to_ready );
-
- _Chain_Extract_unprotected( &node->Base.Base.Node.Chain );
- _Scheduler_priority_Ready_queue_enqueue_first(
- &node->Base.Base.Node.Chain,
- &node->Ready_queue,
- &self->Bit_map
- );
+ Scheduler_SMP_Node *smp_node;
+ (void) context;
+
+ smp_node = _Scheduler_SMP_Node_downcast( node );
+ _Scheduler_SMP_Node_update_priority( smp_node, new_priority );
}
-static void _Scheduler_strong_APA_Move_from_ready_to_scheduled(
- Scheduler_Context *context,
- Scheduler_Node *ready_to_scheduled
+/*
+ * Returns true if the Strong APA scheduler has ready nodes
+ * available for scheduling.
+ */
+static inline bool _Scheduler_strong_APA_Has_ready(
+ Scheduler_Context *context
)
{
Scheduler_strong_APA_Context *self;
+ const Chain_Node *tail;
+ Chain_Node *next;
Scheduler_strong_APA_Node *node;
- Priority_Control insert_priority;
self = _Scheduler_strong_APA_Get_self( context );
- node = _Scheduler_strong_APA_Node_downcast( ready_to_scheduled );
+ tail = _Chain_Immutable_tail( &self->Ready );
+ next = _Chain_First( &self->Ready );
+
+ while ( next != tail ) {
+ node = (Scheduler_strong_APA_Node *)STRONG_SCHEDULER_NODE_OF_CHAIN( next );
+
+ if (
+ _Scheduler_SMP_Node_state( &node->Base.Base ) ==
+ SCHEDULER_SMP_NODE_READY
+ ) {
+ return true;
+ }
+
+ next = _Chain_Next( next );
+ }
+
+ return false;
+}
+
+static inline void _Scheduler_strong_APA_Set_scheduled(
+ Scheduler_strong_APA_Context *self,
+ Scheduler_Node *executing,
+ const Per_CPU_Control *cpu
+)
+{
+ self->CPU[ _Per_CPU_Get_index( cpu ) ].executing = executing;
+}
+
+static inline Scheduler_Node *_Scheduler_strong_APA_Get_scheduled(
+ const Scheduler_strong_APA_Context *self,
+ const Per_CPU_Control *cpu
+)
+{
+ return self->CPU[ _Per_CPU_Get_index( cpu ) ].executing;
+}
+
+static inline void _Scheduler_strong_APA_Allocate_processor(
+ Scheduler_Context *context,
+ Scheduler_Node *scheduled_base,
+ Scheduler_Node *victim_base,
+ Per_CPU_Control *victim_cpu
+)
+{
+ Scheduler_strong_APA_Node *scheduled;
+ Scheduler_strong_APA_Context *self;
+
+ (void) victim_base;
+
+ scheduled = _Scheduler_strong_APA_Node_downcast( scheduled_base );
+ self = _Scheduler_strong_APA_Get_self( context );
- _Scheduler_priority_Ready_queue_extract(
- &node->Base.Base.Node.Chain,
- &node->Ready_queue,
- &self->Bit_map
+ _Scheduler_strong_APA_Set_scheduled( self, scheduled_base, victim_cpu );
+
+ _Scheduler_SMP_Allocate_processor_exact(
+ context,
+ &( scheduled->Base.Base ),
+ NULL,
+ victim_cpu
);
- insert_priority = _Scheduler_SMP_Node_priority( &node->Base.Base );
+}
+
+/*
+ * Finds and returns the highest ready node present by accessing the
+ * _Strong_APA_Context->CPU with front and rear values.
+ */
+static inline Scheduler_Node * _Scheduler_strong_APA_Find_highest_ready(
+ Scheduler_strong_APA_Context *self,
+ uint32_t front,
+ uint32_t rear
+)
+{
+ Scheduler_Node *highest_ready;
+ Scheduler_strong_APA_CPU *CPU;
+ const Chain_Node *tail;
+ Chain_Node *next;
+ Scheduler_strong_APA_Node *node;
+ Priority_Control min_priority_num;
+ Priority_Control curr_priority;
+ Per_CPU_Control *assigned_cpu;
+ Scheduler_SMP_Node_state curr_state;
+ Per_CPU_Control *curr_CPU;
+
+ CPU = self->CPU;
+ /*
+ * When the first task accessed has nothing to compare its priority against.
+ * So, it is the task with the highest priority witnessed so far.
+ */
+ min_priority_num = UINT64_MAX;
+
+ while ( front <= rear ) {
+ curr_CPU = CPU[ front++ ].cpu;
+
+ tail = _Chain_Immutable_tail( &self->Ready );
+ next = _Chain_First( &self->Ready );
+
+ while ( next != tail ) {
+ node = (Scheduler_strong_APA_Node*) STRONG_SCHEDULER_NODE_OF_CHAIN( next );
+ /*
+ * Check if the curr_CPU is in the affinity set of the node.
+ */
+ if (
+ _Processor_mask_Is_set( &node->Affinity, _Per_CPU_Get_index( curr_CPU ) )
+ ) {
+ curr_state = _Scheduler_SMP_Node_state( &node->Base.Base );
+
+ if ( curr_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
+ assigned_cpu = _Thread_Get_CPU( node->Base.Base.user );
+
+ if ( CPU[ _Per_CPU_Get_index( assigned_cpu ) ].visited == false ) {
+ CPU[ ++rear ].cpu = assigned_cpu;
+ CPU[ _Per_CPU_Get_index( assigned_cpu ) ].visited = true;
+ /*
+ * The curr CPU of the queue invoked this node to add its CPU
+ * that it is executing on to the queue. So this node might get
+ * preempted because of the invoker curr_CPU and this curr_CPU
+ * is the CPU that node should preempt in case this node
+ * gets preempted.
+ */
+ node->cpu_to_preempt = curr_CPU;
+ }
+ } else if ( curr_state == SCHEDULER_SMP_NODE_READY ) {
+ curr_priority = _Scheduler_Node_get_priority( &node->Base.Base );
+ curr_priority = SCHEDULER_PRIORITY_PURIFY( curr_priority );
+
+ if (
+ min_priority_num == UINT64_MAX ||
+ curr_priority < min_priority_num
+ ) {
+ min_priority_num = curr_priority;
+ highest_ready = &node->Base.Base;
+ /*
+ * In case curr_CPU is filter_CPU, we need to store the
+ * cpu_to_preempt value so that we go back to SMP_*
+ * function, rather than preempting the node ourselves.
+ */
+ node->cpu_to_preempt = curr_CPU;
+ }
+ }
+ }
+ next = _Chain_Next( next );
+ }
+ }
+
+ return highest_ready;
+}
+
+static inline void _Scheduler_strong_APA_Move_from_ready_to_scheduled(
+ Scheduler_Context *context,
+ Scheduler_Node *ready_to_scheduled
+)
+{
+ Priority_Control insert_priority;
+
+ insert_priority = _Scheduler_SMP_Node_priority( ready_to_scheduled );
insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
- _Chain_Insert_ordered_unprotected(
- &self->Base.Scheduled,
- &node->Base.Base.Node.Chain,
- &insert_priority,
- _Scheduler_SMP_Priority_less_equal
+ _Scheduler_SMP_Insert_scheduled(
+ context,
+ ready_to_scheduled,
+ insert_priority
);
}
-static void _Scheduler_strong_APA_Insert_ready(
+static inline void _Scheduler_strong_APA_Insert_ready(
Scheduler_Context *context,
Scheduler_Node *node_base,
Priority_Control insert_priority
@@ -106,228 +290,563 @@ static void _Scheduler_strong_APA_Insert_ready(
self = _Scheduler_strong_APA_Get_self( context );
node = _Scheduler_strong_APA_Node_downcast( node_base );
- if ( SCHEDULER_PRIORITY_IS_APPEND( insert_priority ) ) {
- _Scheduler_priority_Ready_queue_enqueue(
- &node->Base.Base.Node.Chain,
- &node->Ready_queue,
- &self->Bit_map
- );
- } else {
- _Scheduler_priority_Ready_queue_enqueue_first(
- &node->Base.Base.Node.Chain,
- &node->Ready_queue,
- &self->Bit_map
- );
+ if( _Chain_Is_node_off_chain( &node->Ready_node ) ) {
+ _Chain_Append_unprotected( &self->Ready, &node->Ready_node );
+ } else {
+ _Chain_Extract_unprotected( &node->Ready_node );
+ _Chain_Set_off_chain( &node->Ready_node );
+ _Chain_Append_unprotected( &self->Ready, &node->Ready_node );
}
}
-static void _Scheduler_strong_APA_Extract_from_ready(
+static inline void _Scheduler_strong_APA_Move_from_scheduled_to_ready(
Scheduler_Context *context,
- Scheduler_Node *the_thread
+ Scheduler_Node *scheduled_to_ready
)
{
- Scheduler_strong_APA_Context *self =
- _Scheduler_strong_APA_Get_self( context );
- Scheduler_strong_APA_Node *node =
- _Scheduler_strong_APA_Node_downcast( the_thread );
-
- _Scheduler_priority_Ready_queue_extract(
- &node->Base.Base.Node.Chain,
- &node->Ready_queue,
- &self->Bit_map
+ Priority_Control insert_priority;
+
+ if( !_Chain_Is_node_off_chain( &scheduled_to_ready->Node.Chain ) ) {
+ _Scheduler_SMP_Extract_from_scheduled( context, scheduled_to_ready );
+ }
+
+ insert_priority = _Scheduler_SMP_Node_priority( scheduled_to_ready );
+
+ _Scheduler_strong_APA_Insert_ready(
+ context,
+ scheduled_to_ready,
+ insert_priority
);
}
-static void _Scheduler_strong_APA_Do_update(
+/*
+ * Implement the BFS Algorithm for task departure to get the highest ready task
+ * for a particular CPU, returns the highest ready Scheduler_Node
+ * Scheduler_Node filter here points to the victim node that is blocked
+ * resulting which this function is called.
+ */
+static inline Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready(
Scheduler_Context *context,
- Scheduler_Node *node_to_update,
- Priority_Control new_priority
+ Scheduler_Node *filter
)
{
- Scheduler_strong_APA_Context *self =
- _Scheduler_strong_APA_Get_self( context );
- Scheduler_strong_APA_Node *node =
- _Scheduler_strong_APA_Node_downcast( node_to_update );
-
- _Scheduler_SMP_Node_update_priority( &node->Base, new_priority );
- _Scheduler_priority_Ready_queue_update(
- &node->Ready_queue,
- SCHEDULER_PRIORITY_UNMAP( new_priority ),
- &self->Bit_map,
- &self->Ready[ 0 ]
- );
+ Scheduler_strong_APA_Context *self;
+ Per_CPU_Control *filter_cpu;
+ Scheduler_strong_APA_Node *node;
+ Scheduler_Node *highest_ready;
+ Scheduler_Node *curr_node;
+ Scheduler_Node *next_node;
+ Scheduler_strong_APA_CPU *CPU;
+ uint32_t front;
+ uint32_t rear;
+ uint32_t cpu_max;
+ uint32_t cpu_index;
+
+ self = _Scheduler_strong_APA_Get_self( context );
+ /*
+ * Denotes front and rear of the queue
+ */
+ front = 0;
+ rear = -1;
+
+ filter_cpu = _Thread_Get_CPU( filter->user );
+ CPU = self->CPU;
+ cpu_max = _SMP_Get_processor_maximum();
+
+ for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) {
+ CPU[ cpu_index ].visited = false;
+ }
+
+ CPU[ ++rear ].cpu = filter_cpu;
+ CPU[ _Per_CPU_Get_index( filter_cpu ) ].visited = true;
+
+ highest_ready = _Scheduler_strong_APA_Find_highest_ready(
+ self,
+ front,
+ rear
+ );
+
+ if ( highest_ready != filter ) {
+ /*
+ * Backtrack on the path from
+ * filter_cpu to highest_ready, shifting along every task.
+ */
+
+ node = _Scheduler_strong_APA_Node_downcast( highest_ready );
+ /*
+ * Highest ready is not just directly reachable from the victim cpu
+ * So there is need for task shifting.
+ */
+ while ( node->cpu_to_preempt != filter_cpu ) {
+ curr_node = &node->Base.Base;
+ next_node = _Scheduler_strong_APA_Get_scheduled(
+ self,
+ node->cpu_to_preempt
+ );
+
+ (void) _Scheduler_SMP_Preempt(
+ context,
+ curr_node,
+ next_node,
+ _Scheduler_strong_APA_Allocate_processor
+ );
+
+ if ( curr_node == highest_ready ) {
+ _Scheduler_strong_APA_Move_from_ready_to_scheduled( context, curr_node );
+ }
+
+ node = _Scheduler_strong_APA_Node_downcast( next_node );
+ }
+ /*
+ * To save the last node so that the caller SMP_* function
+ * can do the allocation
+ */
+ curr_node = &node->Base.Base;
+ highest_ready = curr_node;
+ _Scheduler_strong_APA_Move_from_scheduled_to_ready( context, curr_node );
+ }
+
+ return highest_ready;
}
-static Scheduler_strong_APA_Context *
-_Scheduler_strong_APA_Get_context( const Scheduler_Control *scheduler )
+/*
+ * Checks the lowest scheduled directly reachable task
+ */
+static inline Scheduler_Node *_Scheduler_strong_APA_Get_lowest_scheduled(
+ Scheduler_Context *context,
+ Scheduler_Node *filter_base
+)
{
- return (Scheduler_strong_APA_Context *) _Scheduler_Get_context( scheduler );
+ uint32_t cpu_max;
+ uint32_t cpu_index;
+ Scheduler_Node *curr_node;
+ Scheduler_Node *lowest_scheduled = NULL;
+ Priority_Control max_priority_num;
+ Priority_Control curr_priority;
+ Scheduler_strong_APA_Node *filter_strong_node;
+ Scheduler_strong_APA_Context *self;
+
+ self = _Scheduler_strong_APA_Get_self( context );
+ max_priority_num = 0; /* Max (Lowest) priority encountered so far */
+ filter_strong_node = _Scheduler_strong_APA_Node_downcast( filter_base );
+
+ /* lowest_scheduled is NULL if affinity of a node is 0 */
+ _Assert( !_Processor_mask_Is_zero( &filter_strong_node->Affinity ) );
+ cpu_max = _SMP_Get_processor_maximum();
+
+ for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) {
+ /* Checks if the CPU is in the affinity set of filter_strong_node */
+ if ( _Processor_mask_Is_set( &filter_strong_node->Affinity, cpu_index ) ) {
+ Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index );
+
+ if ( _Per_CPU_Is_processor_online( cpu ) ) {
+ curr_node = _Scheduler_strong_APA_Get_scheduled( self, cpu );
+ curr_priority = _Scheduler_Node_get_priority( curr_node );
+ curr_priority = SCHEDULER_PRIORITY_PURIFY( curr_priority );
+
+ if ( curr_priority > max_priority_num ) {
+ lowest_scheduled = curr_node;
+ max_priority_num = curr_priority;
+ }
+ }
+ }
+ }
+
+ _Assert( lowest_scheduled != NULL );
+ return lowest_scheduled;
}
-void _Scheduler_strong_APA_Initialize( const Scheduler_Control *scheduler )
+static inline void _Scheduler_strong_APA_Extract_from_scheduled(
+ Scheduler_Context *context,
+ Scheduler_Node *node_to_extract
+)
{
- Scheduler_strong_APA_Context *self =
- _Scheduler_strong_APA_Get_context( scheduler );
+ Scheduler_strong_APA_Context *self;
+ Scheduler_strong_APA_Node *node;
- _Scheduler_SMP_Initialize( &self->Base );
- _Priority_bit_map_Initialize( &self->Bit_map );
- _Scheduler_priority_Ready_queue_initialize(
- &self->Ready[ 0 ],
- scheduler->maximum_priority
- );
+ self = _Scheduler_strong_APA_Get_self( context );
+ node = _Scheduler_strong_APA_Node_downcast( node_to_extract );
+
+ _Scheduler_SMP_Extract_from_scheduled( &self->Base.Base, &node->Base.Base );
+ /* Not removing it from Ready since the node could go in the READY state */
}
-void _Scheduler_strong_APA_Node_initialize(
- const Scheduler_Control *scheduler,
- Scheduler_Node *node,
- Thread_Control *the_thread,
- Priority_Control priority
+static inline void _Scheduler_strong_APA_Extract_from_ready(
+ Scheduler_Context *context,
+ Scheduler_Node *node_to_extract
)
{
- Scheduler_Context *context;
- Scheduler_strong_APA_Context *self;
- Scheduler_strong_APA_Node *the_node;
+ Scheduler_strong_APA_Node *node;
- the_node = _Scheduler_strong_APA_Node_downcast( node );
- _Scheduler_SMP_Node_initialize(
- scheduler,
- &the_node->Base,
- the_thread,
- priority
- );
+ node = _Scheduler_strong_APA_Node_downcast( node_to_extract );
+
+ if( !_Chain_Is_node_off_chain( &node->Ready_node ) ) {
+ _Chain_Extract_unprotected( &node->Ready_node );
+ _Chain_Set_off_chain( &node->Ready_node );
+ }
- context = _Scheduler_Get_context( scheduler );
- self = _Scheduler_strong_APA_Get_self( context );
- _Scheduler_priority_Ready_queue_update(
- &the_node->Ready_queue,
- SCHEDULER_PRIORITY_UNMAP( priority ),
- &self->Bit_map,
- &self->Ready[ 0 ]
- );
}
-static bool _Scheduler_strong_APA_Has_ready( Scheduler_Context *context )
+static inline Scheduler_Node* _Scheduler_strong_APA_Get_lowest_reachable(
+ Scheduler_strong_APA_Context *self,
+ uint32_t front,
+ uint32_t rear,
+ Per_CPU_Control **cpu_to_preempt
+)
{
- Scheduler_strong_APA_Context *self =
- _Scheduler_strong_APA_Get_self( context );
+ Scheduler_Node *lowest_reachable;
+ Priority_Control max_priority_num;
+ uint32_t cpu_max;
+ uint32_t cpu_index;
+ Thread_Control *curr_thread;
+ Per_CPU_Control *curr_CPU;
+ Priority_Control curr_priority;
+ Scheduler_Node *curr_node;
+ Scheduler_strong_APA_Node *curr_strong_node;
+ Scheduler_strong_APA_CPU *CPU;
+
+ /* Max (Lowest) priority encountered so far */
+ max_priority_num = 0;
+ CPU = self->CPU;
+ cpu_max = _SMP_Get_processor_maximum();
+
+ while ( front <= rear ) {
+ curr_CPU = CPU[ front ].cpu;
+ front = front + 1;
- return !_Priority_bit_map_Is_empty( &self->Bit_map );
+ curr_node = _Scheduler_strong_APA_Get_scheduled( self, curr_CPU );
+ curr_thread = curr_node->user;
+
+ curr_priority = _Scheduler_Node_get_priority( curr_node );
+ curr_priority = SCHEDULER_PRIORITY_PURIFY( curr_priority );
+
+ curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node );
+
+ if ( curr_priority > max_priority_num ) {
+ lowest_reachable = curr_node;
+ max_priority_num = curr_priority;
+ *cpu_to_preempt = curr_CPU;
+ }
+
+ if ( !curr_thread->is_idle ) {
+ for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) {
+ if ( _Processor_mask_Is_set( &curr_strong_node->Affinity, cpu_index ) ) {
+ /* Checks if the thread_CPU is in the affinity set of the node */
+ Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index );
+ if (
+ _Per_CPU_Is_processor_online( cpu ) &&
+ CPU[ cpu_index ].visited == false )
+ {
+ rear = rear + 1;
+ CPU[ rear ].cpu = cpu;
+ CPU[ cpu_index ].visited = true;
+ CPU[ cpu_index ].preempting_node = curr_node;
+ }
+ }
+ }
+ }
+ }
+
+ return lowest_reachable;
}
-static Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready(
+static inline bool _Scheduler_strong_APA_Do_enqueue(
Scheduler_Context *context,
- Scheduler_Node *node
+ Scheduler_Node *lowest_reachable,
+ Scheduler_Node *node,
+ Priority_Control insert_priority,
+ Per_CPU_Control *cpu_to_preempt
)
{
- Scheduler_strong_APA_Context *self =
- _Scheduler_strong_APA_Get_self( context );
+ bool needs_help;
+ Priority_Control node_priority;
+ Priority_Control lowest_priority;
+ Scheduler_strong_APA_CPU *CPU;
+ Scheduler_Node *curr_node;
+ /* The first node that gets removed from the cpu */
+ Scheduler_Node *first_node;
+ Scheduler_strong_APA_Node *curr_strong_node;
+ Per_CPU_Control *curr_CPU;
+ Scheduler_strong_APA_Context *self;
+ Scheduler_Node *next_node;
- (void) node;
- return (Scheduler_Node *) _Scheduler_priority_Ready_queue_first(
- &self->Bit_map,
- &self->Ready[ 0 ]
- );
+ self = _Scheduler_strong_APA_Get_self( context );
+ CPU = self->CPU;
+
+ node_priority = _Scheduler_Node_get_priority( node );
+ node_priority = SCHEDULER_PRIORITY_PURIFY( node_priority );
+
+ if( lowest_reachable == NULL ) {
+ /*
+ * This means the affinity set of the newly arrived node
+ * is empty.
+ */
+ lowest_priority = UINT64_MAX;
+ } else {
+ lowest_priority = _Scheduler_Node_get_priority( lowest_reachable );
+ lowest_priority = SCHEDULER_PRIORITY_PURIFY( lowest_priority );
+ }
+
+ if ( lowest_priority > node_priority ) {
+ /*
+ * Backtrack on the path from
+ * _Thread_Get_CPU(lowest_reachable->user) to lowest_reachable, shifting
+ * along every task
+ */
+
+ curr_node = CPU[ _Per_CPU_Get_index( cpu_to_preempt ) ].preempting_node;
+ curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node );
+ curr_strong_node->cpu_to_preempt = cpu_to_preempt;
+
+ /* Save which cpu to preempt in cpu_to_preempt value of the node */
+ while ( curr_node != node ) {
+ curr_CPU = _Thread_Get_CPU( curr_node->user );
+ curr_node = CPU[ _Per_CPU_Get_index( curr_CPU ) ].preempting_node;
+ curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node );
+ curr_strong_node->cpu_to_preempt = curr_CPU;
+ }
+
+ curr_CPU = curr_strong_node->cpu_to_preempt;
+ next_node = _Scheduler_strong_APA_Get_scheduled( self, curr_CPU );
+
+ node_priority = _Scheduler_Node_get_priority( curr_node );
+ node_priority = SCHEDULER_PRIORITY_PURIFY( node_priority );
+
+ _Scheduler_SMP_Enqueue_to_scheduled(
+ context,
+ curr_node,
+ node_priority,
+ next_node,
+ _Scheduler_SMP_Insert_scheduled,
+ _Scheduler_strong_APA_Move_from_scheduled_to_ready,
+ _Scheduler_strong_APA_Allocate_processor
+ );
+
+ curr_node = next_node;
+ first_node = curr_node;
+ curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node );
+
+ while ( curr_node != lowest_reachable ) {
+ curr_CPU = curr_strong_node->cpu_to_preempt;
+ next_node = _Scheduler_strong_APA_Get_scheduled( self, curr_CPU );
+ /* curr_node preempts the next_node; */
+ _Scheduler_SMP_Preempt(
+ context,
+ curr_node,
+ next_node,
+ _Scheduler_strong_APA_Allocate_processor
+ );
+
+ if(curr_node == first_node) {
+ _Scheduler_strong_APA_Move_from_ready_to_scheduled(context, first_node);
+ }
+ curr_node = next_node;
+ curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node );
+ }
+
+ _Scheduler_strong_APA_Move_from_scheduled_to_ready( context, lowest_reachable );
+
+ needs_help = false;
+ } else {
+ needs_help = true;
+ }
+
+ /* Add it to Ready chain since it is now either scheduled or just ready. */
+ _Scheduler_strong_APA_Insert_ready( context,node, insert_priority );
+
+ return needs_help;
}
-void _Scheduler_strong_APA_Block(
- const Scheduler_Control *scheduler,
- Thread_Control *the_thread,
- Scheduler_Node *node
+/*
+ * BFS Algorithm for task arrival
+ * Enqueue node either in the scheduled chain or in the ready chain.
+ * node is the newly arrived node and is currently not scheduled.
+ */
+static inline bool _Scheduler_strong_APA_Enqueue(
+ Scheduler_Context *context,
+ Scheduler_Node *node,
+ Priority_Control insert_priority
)
{
- Scheduler_Context *context = _Scheduler_Get_context( scheduler );
+ Scheduler_strong_APA_Context *self;
+ Scheduler_strong_APA_CPU *CPU;
+ uint32_t cpu_max;
+ uint32_t cpu_index;
+ Per_CPU_Control *cpu_to_preempt;
+ Scheduler_Node *lowest_reachable;
+ Scheduler_strong_APA_Node *strong_node;
- _Scheduler_SMP_Block(
+ /* Denotes front and rear of the queue */
+ uint32_t front;
+ uint32_t rear;
+
+ front = 0;
+ rear = -1;
+
+ self = _Scheduler_strong_APA_Get_self( context );
+ strong_node = _Scheduler_strong_APA_Node_downcast( node );
+ cpu_max = _SMP_Get_processor_maximum();
+ CPU = self->CPU;
+
+ for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) {
+ CPU[ cpu_index ].visited = false;
+
+ /* Checks if the thread_CPU is in the affinity set of the node */
+ if ( _Processor_mask_Is_set( &strong_node->Affinity, cpu_index ) ) {
+ Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index );
+
+ if ( _Per_CPU_Is_processor_online( cpu ) ) {
+ rear = rear + 1;
+ CPU[ rear ].cpu = cpu;
+ CPU[ cpu_index ].visited = true;
+ CPU[ cpu_index ].preempting_node = node;
+ }
+ }
+ }
+
+ lowest_reachable = _Scheduler_strong_APA_Get_lowest_reachable(
+ self,
+ front,
+ rear,
+ &cpu_to_preempt
+ );
+
+ return _Scheduler_strong_APA_Do_enqueue(
+ context,
+ lowest_reachable,
+ node,
+ insert_priority,
+ cpu_to_preempt
+ );
+}
+
+static inline bool _Scheduler_strong_APA_Enqueue_scheduled(
+ Scheduler_Context *context,
+ Scheduler_Node *node,
+ Priority_Control insert_priority
+)
+{
+ return _Scheduler_SMP_Enqueue_scheduled(
context,
- the_thread,
node,
- _Scheduler_SMP_Extract_from_scheduled,
+ insert_priority,
+ _Scheduler_SMP_Priority_less_equal,
_Scheduler_strong_APA_Extract_from_ready,
_Scheduler_strong_APA_Get_highest_ready,
+ _Scheduler_strong_APA_Insert_ready,
+ _Scheduler_SMP_Insert_scheduled,
_Scheduler_strong_APA_Move_from_ready_to_scheduled,
- _Scheduler_SMP_Allocate_processor_exact
+ _Scheduler_strong_APA_Allocate_processor
);
}
-static bool _Scheduler_strong_APA_Enqueue(
+static inline bool _Scheduler_strong_APA_Do_ask_for_help(
Scheduler_Context *context,
- Scheduler_Node *node,
- Priority_Control insert_priority
+ Thread_Control *the_thread,
+ Scheduler_Node *node
)
{
- return _Scheduler_SMP_Enqueue(
+ return _Scheduler_SMP_Ask_for_help(
context,
+ the_thread,
node,
- insert_priority,
_Scheduler_SMP_Priority_less_equal,
_Scheduler_strong_APA_Insert_ready,
_Scheduler_SMP_Insert_scheduled,
_Scheduler_strong_APA_Move_from_scheduled_to_ready,
- _Scheduler_SMP_Get_lowest_scheduled,
- _Scheduler_SMP_Allocate_processor_exact
+ _Scheduler_strong_APA_Get_lowest_scheduled,
+ _Scheduler_strong_APA_Allocate_processor
);
}
-static bool _Scheduler_strong_APA_Enqueue_scheduled(
+static inline void _Scheduler_strong_APA_Do_set_affinity(
Scheduler_Context *context,
- Scheduler_Node *node,
- Priority_Control insert_priority
+ Scheduler_Node *node_base,
+ void *arg
)
{
- return _Scheduler_SMP_Enqueue_scheduled(
+ Scheduler_strong_APA_Node *node;
+
+ node = _Scheduler_strong_APA_Node_downcast( node_base );
+ node->Affinity = *( (const Processor_mask *) arg );
+}
+
+void _Scheduler_strong_APA_Initialize( const Scheduler_Control *scheduler )
+{
+ Scheduler_strong_APA_Context *self =
+ _Scheduler_strong_APA_Get_context( scheduler );
+
+ _Scheduler_SMP_Initialize( &self->Base );
+ _Chain_Initialize_empty( &self->Ready );
+}
+
+void _Scheduler_strong_APA_Yield(
+ const Scheduler_Control *scheduler,
+ Thread_Control *thread,
+ Scheduler_Node *node
+)
+{
+ Scheduler_Context *context = _Scheduler_Get_context( scheduler );
+
+ _Scheduler_SMP_Yield(
context,
+ thread,
node,
- insert_priority,
- _Scheduler_SMP_Priority_less_equal,
_Scheduler_strong_APA_Extract_from_ready,
- _Scheduler_strong_APA_Get_highest_ready,
- _Scheduler_strong_APA_Insert_ready,
- _Scheduler_SMP_Insert_scheduled,
- _Scheduler_strong_APA_Move_from_ready_to_scheduled,
- _Scheduler_SMP_Allocate_processor_exact
+ _Scheduler_strong_APA_Enqueue,
+ _Scheduler_strong_APA_Enqueue_scheduled
);
}
-void _Scheduler_strong_APA_Unblock(
+void _Scheduler_strong_APA_Block(
const Scheduler_Control *scheduler,
- Thread_Control *the_thread,
+ Thread_Control *thread,
Scheduler_Node *node
)
{
Scheduler_Context *context = _Scheduler_Get_context( scheduler );
- _Scheduler_SMP_Unblock(
+ /*
+ * Needed in case the node is scheduled node, since _SMP_Block only extracts
+ * from the SMP scheduled chain and from the Strong APA Ready_chain
+ * when the node is ready. But the Strong APA Ready_chain stores both
+ * ready and scheduled nodes.
+ */
+ _Scheduler_strong_APA_Extract_from_ready(context, node);
+
+ _Scheduler_SMP_Block(
context,
- the_thread,
+ thread,
node,
- _Scheduler_strong_APA_Do_update,
- _Scheduler_strong_APA_Enqueue
+ _Scheduler_strong_APA_Extract_from_scheduled,
+ _Scheduler_strong_APA_Extract_from_ready,
+ _Scheduler_strong_APA_Get_highest_ready,
+ _Scheduler_strong_APA_Move_from_ready_to_scheduled,
+ _Scheduler_strong_APA_Allocate_processor
);
}
-static bool _Scheduler_strong_APA_Do_ask_for_help(
- Scheduler_Context *context,
- Thread_Control *the_thread,
- Scheduler_Node *node
+void _Scheduler_strong_APA_Unblock(
+ const Scheduler_Control *scheduler,
+ Thread_Control *thread,
+ Scheduler_Node *node
)
{
- return _Scheduler_SMP_Ask_for_help(
+ Scheduler_Context *context = _Scheduler_Get_context( scheduler );
+
+ _Scheduler_SMP_Unblock(
context,
- the_thread,
+ thread,
node,
- _Scheduler_SMP_Priority_less_equal,
- _Scheduler_strong_APA_Insert_ready,
- _Scheduler_SMP_Insert_scheduled,
- _Scheduler_strong_APA_Move_from_scheduled_to_ready,
- _Scheduler_SMP_Get_lowest_scheduled,
- _Scheduler_SMP_Allocate_processor_lazy
+ _Scheduler_strong_APA_Do_update,
+ _Scheduler_strong_APA_Enqueue
);
}
void _Scheduler_strong_APA_Update_priority(
const Scheduler_Control *scheduler,
- Thread_Control *the_thread,
+ Thread_Control *thread,
Scheduler_Node *node
)
{
@@ -335,7 +854,7 @@ void _Scheduler_strong_APA_Update_priority(
_Scheduler_SMP_Update_priority(
context,
- the_thread,
+ thread,
node,
_Scheduler_strong_APA_Extract_from_ready,
_Scheduler_strong_APA_Do_update,
@@ -353,7 +872,11 @@ bool _Scheduler_strong_APA_Ask_for_help(
{
Scheduler_Context *context = _Scheduler_Get_context( scheduler );
- return _Scheduler_strong_APA_Do_ask_for_help( context, the_thread, node );
+ return _Scheduler_strong_APA_Do_ask_for_help(
+ context,
+ the_thread,
+ node
+ );
}
void _Scheduler_strong_APA_Reconsider_help_request(
@@ -389,10 +912,22 @@ void _Scheduler_strong_APA_Withdraw_node(
_Scheduler_strong_APA_Extract_from_ready,
_Scheduler_strong_APA_Get_highest_ready,
_Scheduler_strong_APA_Move_from_ready_to_scheduled,
- _Scheduler_SMP_Allocate_processor_lazy
+ _Scheduler_strong_APA_Allocate_processor
);
}
+static inline void _Scheduler_strong_APA_Register_idle(
+ Scheduler_Context *context,
+ Scheduler_Node *idle_base,
+ Per_CPU_Control *cpu
+)
+{
+ Scheduler_strong_APA_Context *self;
+ self = _Scheduler_strong_APA_Get_self( context );
+
+ _Scheduler_strong_APA_Set_scheduled( self, idle_base, cpu );
+}
+
void _Scheduler_strong_APA_Add_processor(
const Scheduler_Control *scheduler,
Thread_Control *idle
@@ -405,7 +940,25 @@ void _Scheduler_strong_APA_Add_processor(
idle,
_Scheduler_strong_APA_Has_ready,
_Scheduler_strong_APA_Enqueue_scheduled,
- _Scheduler_SMP_Do_nothing_register_idle
+ _Scheduler_strong_APA_Register_idle
+ );
+}
+
+void _Scheduler_strong_APA_Start_idle(
+ const Scheduler_Control *scheduler,
+ Thread_Control *idle,
+ Per_CPU_Control *cpu
+)
+{
+ Scheduler_Context *context;
+
+ context = _Scheduler_Get_context( scheduler );
+
+ _Scheduler_SMP_Do_start_idle(
+ context,
+ idle,
+ cpu,
+ _Scheduler_strong_APA_Register_idle
);
}
@@ -424,20 +977,64 @@ Thread_Control *_Scheduler_strong_APA_Remove_processor(
);
}
-void _Scheduler_strong_APA_Yield(
+void _Scheduler_strong_APA_Node_initialize(
const Scheduler_Control *scheduler,
+ Scheduler_Node *node,
Thread_Control *the_thread,
- Scheduler_Node *node
+ Priority_Control priority
)
{
- Scheduler_Context *context = _Scheduler_Get_context( scheduler );
+ Scheduler_SMP_Node *smp_node;
+ Scheduler_strong_APA_Node *strong_node;
- _Scheduler_SMP_Yield(
- context,
- the_thread,
- node,
- _Scheduler_strong_APA_Extract_from_ready,
- _Scheduler_strong_APA_Enqueue,
- _Scheduler_strong_APA_Enqueue_scheduled
+ smp_node = _Scheduler_SMP_Node_downcast( node );
+ strong_node = _Scheduler_strong_APA_Node_downcast( node );
+
+ _Scheduler_SMP_Node_initialize( scheduler, smp_node, the_thread, priority );
+
+ _Processor_mask_Assign(
+ &strong_node->Affinity,
+ _SMP_Get_online_processors()
);
}
+
+Status_Control _Scheduler_strong_APA_Set_affinity(
+ const Scheduler_Control *scheduler,
+ Thread_Control *thread,
+ Scheduler_Node *node_base,
+ const Processor_mask *affinity
+)
+{
+ Scheduler_Context *context;
+ Scheduler_strong_APA_Node *node;
+ Processor_mask local_affinity;
+
+ context = _Scheduler_Get_context( scheduler );
+ _Processor_mask_And( &local_affinity, &context->Processors, affinity );
+
+ if ( _Processor_mask_Is_zero( &local_affinity ) ) {
+ return STATUS_INVALID_NUMBER;
+ }
+
+ node = _Scheduler_strong_APA_Node_downcast( node_base );
+
+ if ( _Processor_mask_Is_equal( &node->Affinity, affinity ) )
+ return STATUS_SUCCESSFUL; /* Nothing to do. Return true. */
+
+ _Processor_mask_Assign( &node->Affinity, &local_affinity );
+
+ _Scheduler_SMP_Set_affinity(
+ context,
+ thread,
+ node_base,
+ &local_affinity,
+ _Scheduler_strong_APA_Do_set_affinity,
+ _Scheduler_strong_APA_Extract_from_ready,
+ _Scheduler_strong_APA_Get_highest_ready,
+ _Scheduler_strong_APA_Move_from_ready_to_scheduled,
+ _Scheduler_strong_APA_Enqueue,
+ _Scheduler_strong_APA_Allocate_processor
+ );
+
+ return STATUS_SUCCESSFUL;
+}