From 6c23252cdd8ea63d0fe13d9f99b7befbf070fe80 Mon Sep 17 00:00:00 2001 From: Richi Dubey Date: Wed, 16 Jun 2021 11:30:16 +0530 Subject: Update Strong APA Scheduler This change allows for the migration of higher priority tasks on the arrival of a lower priority task limited by affinity constraints. Change license to BSD-2-Clause according to file history and re-licensing agreement. Update #3053. --- cpukit/include/rtems/scheduler.h | 10 +- cpukit/include/rtems/score/schedulerstrongapa.h | 162 +++- cpukit/score/src/schedulerstrongapa.c | 981 +++++++++++++++++++----- 3 files changed, 924 insertions(+), 229 deletions(-) (limited to 'cpukit') diff --git a/cpukit/include/rtems/scheduler.h b/cpukit/include/rtems/scheduler.h index 955a83cfb4..76d84fd787 100644 --- a/cpukit/include/rtems/scheduler.h +++ b/cpukit/include/rtems/scheduler.h @@ -251,22 +251,24 @@ #ifdef CONFIGURE_SCHEDULER_STRONG_APA #include + #ifndef CONFIGURE_MAXIMUM_PROCESSORS + #error "CONFIGURE_MAXIMUM_PROCESSORS must be defined to configure the Strong APA scheduler" + #endif + #define SCHEDULER_STRONG_APA_CONTEXT_NAME( name ) \ SCHEDULER_CONTEXT_NAME( strong_APA_ ## name ) #define RTEMS_SCHEDULER_STRONG_APA( name, prio_count ) \ static struct { \ Scheduler_strong_APA_Context Base; \ - Chain_Control Ready[ ( prio_count ) ]; \ + Scheduler_strong_APA_CPU CPU[ CONFIGURE_MAXIMUM_PROCESSORS ]; \ } SCHEDULER_STRONG_APA_CONTEXT_NAME( name ) #define RTEMS_SCHEDULER_TABLE_STRONG_APA( name, obj_name ) \ { \ &SCHEDULER_STRONG_APA_CONTEXT_NAME( name ).Base.Base.Base, \ SCHEDULER_STRONG_APA_ENTRY_POINTS, \ - RTEMS_ARRAY_SIZE( \ - SCHEDULER_STRONG_APA_CONTEXT_NAME( name ).Ready \ - ) - 1, \ + SCHEDULER_STRONG_APA_MAXIMUM_PRIORITY, \ ( obj_name ) \ SCHEDULER_CONTROL_IS_NON_PREEMPT_MODE_SUPPORTED( false ) \ } diff --git a/cpukit/include/rtems/score/schedulerstrongapa.h b/cpukit/include/rtems/score/schedulerstrongapa.h index 530eadc279..9ee922d46d 100644 --- a/cpukit/include/rtems/score/schedulerstrongapa.h +++ b/cpukit/include/rtems/score/schedulerstrongapa.h @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * @@ -8,30 +10,44 @@ */ /* - * Copyright (c) 2013, 2018 embedded brains GmbH. All rights reserved. + * Copyright (C) 2020 Richi Dubey + * Copyright (C) 2013, 2018 embedded brains GmbH (http://www.embedded-brains.de) * - * embedded brains GmbH - * Dornierstr. 4 - * 82178 Puchheim - * Germany - * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifndef _RTEMS_SCORE_SCHEDULERSTRONGAPA_H #define _RTEMS_SCORE_SCHEDULERSTRONGAPA_H #include -#include #include #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ +/* Forward Declaration of Per_CPU_Control */ +struct Per_CPU_Control; + /** * @defgroup RTEMSScoreSchedulerStrongAPA Strong APA Scheduler * @@ -39,42 +55,95 @@ extern "C" { * * @brief This group contains the Strong APA Scheduler implementation. * - * This is an implementation of the global fixed priority scheduler (G-FP). It - * uses one ready chain per priority to ensure constant time insert operations. - * The scheduled chain uses linear insert operations and has at most processor - * count entries. Since the processor and priority count are constants all - * scheduler operations complete in a bounded execution time. - * - * The the_thread preempt mode will be ignored. + * This is an implementation of the Strong APA scheduler defined by + * Cerqueira et al. in Linux's Processor Affinity API, Refined: + * Shifting Real-Time Tasks Towards Higher Schedulability. * + * The scheduled and ready nodes are accessed via the + * Scheduler_strong_APA_Context::Ready which helps in backtracking when a + * node which is executing on a CPU gets blocked. New node is allocated to + * the cpu by checking all the executing nodes in the affinity set of the + * node and the subsequent nodes executing on the processors in its + * affinity set. * @{ */ /** - * @brief Scheduler context specialization for Strong APA - * schedulers. + * @brief Scheduler node specialization for Strong APA schedulers. */ typedef struct { - Scheduler_SMP_Context Base; - Priority_bit_map_Control Bit_map; - Chain_Control Ready[ RTEMS_ZERO_LENGTH_ARRAY ]; -} Scheduler_strong_APA_Context; + /** + * @brief SMP scheduler node. + */ + Scheduler_SMP_Node Base; + + /** + * @brief Chain node for Scheduler_strong_APA_Context::Ready. + */ + Chain_Node Ready_node; + + /** + * @brief CPU that this node would preempt in the backtracking part of + * _Scheduler_strong_APA_Get_highest_ready and + * _Scheduler_strong_APA_Do_Enqueue. + */ + struct Per_CPU_Control *cpu_to_preempt; + + /** + * @brief The associated affinity set of this node. + */ + Processor_mask Affinity; +} Scheduler_strong_APA_Node; + /** - * @brief Scheduler node specialization for Strong APA - * schedulers. + * @brief CPU related variables and a CPU_Control to implement BFS. */ typedef struct { /** - * @brief SMP scheduler node. + * @brief CPU in a queue. */ - Scheduler_SMP_Node Base; + struct Per_CPU_Control *cpu; /** - * @brief The associated ready queue of this node. + * @brief The node that would preempt this CPU. */ - Scheduler_priority_Ready_queue Ready_queue; -} Scheduler_strong_APA_Node; + Scheduler_Node *preempting_node; + + /** + * @brief Whether or not this cpu has been added to the queue + * (visited in BFS). + */ + bool visited; + + /** + * @brief The node currently executing on this cpu. + */ + Scheduler_Node *executing; +} Scheduler_strong_APA_CPU; + +/** + * @brief Scheduler context and node definition for Strong APA scheduler. + */ +typedef struct { + /** + * @brief @see Scheduler_SMP_Context. + */ + Scheduler_SMP_Context Base; + + /** + * @brief Chain of all the ready and scheduled nodes present in + * the Strong APA scheduler. + */ + Chain_Control Ready; + + /** + * @brief Stores cpu-specific variables. + */ + Scheduler_strong_APA_CPU CPU[ RTEMS_ZERO_LENGTH_ARRAY ]; +} Scheduler_strong_APA_Context; + +#define SCHEDULER_STRONG_APA_MAXIMUM_PRIORITY 255 /** * @brief Entry points for the Strong APA Scheduler. @@ -101,8 +170,8 @@ typedef struct { _Scheduler_default_Release_job, \ _Scheduler_default_Cancel_job, \ _Scheduler_default_Tick, \ - _Scheduler_SMP_Start_idle \ - SCHEDULER_OPERATION_DEFAULT_GET_SET_AFFINITY \ + _Scheduler_strong_APA_Start_idle, \ + _Scheduler_strong_APA_Set_affinity \ } /** @@ -169,7 +238,7 @@ void _Scheduler_strong_APA_Update_priority( /** * @brief Asks for help. * - * @param scheduler The scheduler control instance. + * @param scheduler The scheduler control instance. * @param the_thread The thread that asks for help. * @param node The node of @a the_thread. * @@ -247,6 +316,33 @@ void _Scheduler_strong_APA_Yield( Scheduler_Node *node ); +/** + * @brief Starts an idle thread. + * + * @param scheduler The scheduler instance. + * @param[in, out] the_thread An idle thread. + * @param cpu The cpu for the operation. + */ +void _Scheduler_strong_APA_Start_idle( + const Scheduler_Control *scheduler, + Thread_Control *idle, + struct Per_CPU_Control *cpu +); + +/** + * @brief Sets the affinity . + * + * @param scheduler The scheduler control instance. + * @param the_thread The thread to yield. + * @param[in, out] node The node of @a the_thread. + */ +Status_Control _Scheduler_strong_APA_Set_affinity( + const Scheduler_Control *scheduler, + Thread_Control *thread, + Scheduler_Node *node_base, + const Processor_mask *affinity +); + /** @} */ #ifdef __cplusplus diff --git a/cpukit/score/src/schedulerstrongapa.c b/cpukit/score/src/schedulerstrongapa.c index dcfb55601a..845d19d1a8 100644 --- a/cpukit/score/src/schedulerstrongapa.c +++ b/cpukit/score/src/schedulerstrongapa.c @@ -1,3 +1,5 @@ +/* SPDX-License-Identifier: BSD-2-Clause */ + /** * @file * @@ -5,27 +7,57 @@ * * @brief This source file contains the implementation of * _Scheduler_strong_APA_Add_processor(), + * _Scheduler_strong_APA_Allocate_processor(), * _Scheduler_strong_APA_Ask_for_help(), _Scheduler_strong_APA_Block(), - * _Scheduler_strong_APA_Initialize(), + * _Scheduler_strong_APA_Do_ask_for_help(), + * _Scheduler_strong_APA_Do_enqueue(), + * _Scheduler_strong_APA_Do_set_affinity(), + * _Scheduler_strong_APA_Do_update(), _Scheduler_strong_APA_Enqueue(), + * _Scheduler_strong_APA_Enqueue_scheduled(), + * _Scheduler_strong_APA_Extract_from_ready(), + * _Scheduler_strong_APA_Extract_from_scheduled(), + * _Scheduler_strong_APA_Find_highest_ready(), + * _Scheduler_strong_APA_Get_highest_ready(), + * _Scheduler_strong_APA_Get_lowest_reachable(), + * _Scheduler_strong_APA_Get_lowest_scheduled(), + * _Scheduler_strong_APA_Has_ready(), + * _Scheduler_strong_APA_Initialize(), _Scheduler_strong_APA_Insert_ready(), + * _Scheduler_strong_APA_Move_from_ready_to_scheduled(), + * _Scheduler_strong_APA_Move_from_scheduled_to_ready(), * _Scheduler_strong_APA_Node_initialize(), * _Scheduler_strong_APA_Reconsider_help_request(), - * _Scheduler_strong_APA_Remove_processor(), _Scheduler_strong_APA_Unblock(), - * _Scheduler_strong_APA_Update_priority(), + * _Scheduler_strong_APA_Register_idle(), + * _Scheduler_strong_APA_Remove_processor(), + * _Scheduler_strong_APA_Set_affinity(), + * _Scheduler_strong_APA_Set_scheduled(), _Scheduler_strong_APA_Start_idle(), + * _Scheduler_strong_APA_Unblock(), _Scheduler_strong_APA_Update_priority(), * _Scheduler_strong_APA_Withdraw_node(), and _Scheduler_strong_APA_Yield(). */ /* - * Copyright (c) 2013, 2016 embedded brains GmbH. All rights reserved. + * Copyright (C) 2020 Richi Dubey + * Copyright (C) 2013, 2016 embedded brains GmbH (http://www.embedded-brains.de) * - * embedded brains GmbH - * Dornierstr. 4 - * 82178 Puchheim - * Germany - * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rtems.org/license/LICENSE. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H @@ -33,68 +65,220 @@ #endif #include -#include #include +#include -static Scheduler_strong_APA_Context *_Scheduler_strong_APA_Get_self( - Scheduler_Context *context -) +#define STRONG_SCHEDULER_NODE_OF_CHAIN( node ) \ + RTEMS_CONTAINER_OF( node, Scheduler_strong_APA_Node, Ready_node ) + +static inline Scheduler_strong_APA_Context * +_Scheduler_strong_APA_Get_context( const Scheduler_Control *scheduler ) +{ + return (Scheduler_strong_APA_Context *) _Scheduler_Get_context( scheduler ); +} + +static inline Scheduler_strong_APA_Context * +_Scheduler_strong_APA_Get_self( Scheduler_Context *context ) { return (Scheduler_strong_APA_Context *) context; } -static Scheduler_strong_APA_Node * +static inline Scheduler_strong_APA_Node * _Scheduler_strong_APA_Node_downcast( Scheduler_Node *node ) { return (Scheduler_strong_APA_Node *) node; } -static void _Scheduler_strong_APA_Move_from_scheduled_to_ready( +static inline void _Scheduler_strong_APA_Do_update( Scheduler_Context *context, - Scheduler_Node *scheduled_to_ready + Scheduler_Node *node, + Priority_Control new_priority ) { - Scheduler_strong_APA_Context *self = - _Scheduler_strong_APA_Get_self( context ); - Scheduler_strong_APA_Node *node = - _Scheduler_strong_APA_Node_downcast( scheduled_to_ready ); - - _Chain_Extract_unprotected( &node->Base.Base.Node.Chain ); - _Scheduler_priority_Ready_queue_enqueue_first( - &node->Base.Base.Node.Chain, - &node->Ready_queue, - &self->Bit_map - ); + Scheduler_SMP_Node *smp_node; + (void) context; + + smp_node = _Scheduler_SMP_Node_downcast( node ); + _Scheduler_SMP_Node_update_priority( smp_node, new_priority ); } -static void _Scheduler_strong_APA_Move_from_ready_to_scheduled( - Scheduler_Context *context, - Scheduler_Node *ready_to_scheduled +/* + * Returns true if the Strong APA scheduler has ready nodes + * available for scheduling. + */ +static inline bool _Scheduler_strong_APA_Has_ready( + Scheduler_Context *context ) { Scheduler_strong_APA_Context *self; + const Chain_Node *tail; + Chain_Node *next; Scheduler_strong_APA_Node *node; - Priority_Control insert_priority; self = _Scheduler_strong_APA_Get_self( context ); - node = _Scheduler_strong_APA_Node_downcast( ready_to_scheduled ); + tail = _Chain_Immutable_tail( &self->Ready ); + next = _Chain_First( &self->Ready ); + + while ( next != tail ) { + node = (Scheduler_strong_APA_Node *)STRONG_SCHEDULER_NODE_OF_CHAIN( next ); + + if ( + _Scheduler_SMP_Node_state( &node->Base.Base ) == + SCHEDULER_SMP_NODE_READY + ) { + return true; + } + + next = _Chain_Next( next ); + } + + return false; +} + +static inline void _Scheduler_strong_APA_Set_scheduled( + Scheduler_strong_APA_Context *self, + Scheduler_Node *executing, + const Per_CPU_Control *cpu +) +{ + self->CPU[ _Per_CPU_Get_index( cpu ) ].executing = executing; +} + +static inline Scheduler_Node *_Scheduler_strong_APA_Get_scheduled( + const Scheduler_strong_APA_Context *self, + const Per_CPU_Control *cpu +) +{ + return self->CPU[ _Per_CPU_Get_index( cpu ) ].executing; +} + +static inline void _Scheduler_strong_APA_Allocate_processor( + Scheduler_Context *context, + Scheduler_Node *scheduled_base, + Scheduler_Node *victim_base, + Per_CPU_Control *victim_cpu +) +{ + Scheduler_strong_APA_Node *scheduled; + Scheduler_strong_APA_Context *self; + + (void) victim_base; + + scheduled = _Scheduler_strong_APA_Node_downcast( scheduled_base ); + self = _Scheduler_strong_APA_Get_self( context ); - _Scheduler_priority_Ready_queue_extract( - &node->Base.Base.Node.Chain, - &node->Ready_queue, - &self->Bit_map + _Scheduler_strong_APA_Set_scheduled( self, scheduled_base, victim_cpu ); + + _Scheduler_SMP_Allocate_processor_exact( + context, + &( scheduled->Base.Base ), + NULL, + victim_cpu ); - insert_priority = _Scheduler_SMP_Node_priority( &node->Base.Base ); +} + +/* + * Finds and returns the highest ready node present by accessing the + * _Strong_APA_Context->CPU with front and rear values. + */ +static inline Scheduler_Node * _Scheduler_strong_APA_Find_highest_ready( + Scheduler_strong_APA_Context *self, + uint32_t front, + uint32_t rear +) +{ + Scheduler_Node *highest_ready; + Scheduler_strong_APA_CPU *CPU; + const Chain_Node *tail; + Chain_Node *next; + Scheduler_strong_APA_Node *node; + Priority_Control min_priority_num; + Priority_Control curr_priority; + Per_CPU_Control *assigned_cpu; + Scheduler_SMP_Node_state curr_state; + Per_CPU_Control *curr_CPU; + + CPU = self->CPU; + /* + * When the first task accessed has nothing to compare its priority against. + * So, it is the task with the highest priority witnessed so far. + */ + min_priority_num = UINT64_MAX; + + while ( front <= rear ) { + curr_CPU = CPU[ front++ ].cpu; + + tail = _Chain_Immutable_tail( &self->Ready ); + next = _Chain_First( &self->Ready ); + + while ( next != tail ) { + node = (Scheduler_strong_APA_Node*) STRONG_SCHEDULER_NODE_OF_CHAIN( next ); + /* + * Check if the curr_CPU is in the affinity set of the node. + */ + if ( + _Processor_mask_Is_set( &node->Affinity, _Per_CPU_Get_index( curr_CPU ) ) + ) { + curr_state = _Scheduler_SMP_Node_state( &node->Base.Base ); + + if ( curr_state == SCHEDULER_SMP_NODE_SCHEDULED ) { + assigned_cpu = _Thread_Get_CPU( node->Base.Base.user ); + + if ( CPU[ _Per_CPU_Get_index( assigned_cpu ) ].visited == false ) { + CPU[ ++rear ].cpu = assigned_cpu; + CPU[ _Per_CPU_Get_index( assigned_cpu ) ].visited = true; + /* + * The curr CPU of the queue invoked this node to add its CPU + * that it is executing on to the queue. So this node might get + * preempted because of the invoker curr_CPU and this curr_CPU + * is the CPU that node should preempt in case this node + * gets preempted. + */ + node->cpu_to_preempt = curr_CPU; + } + } else if ( curr_state == SCHEDULER_SMP_NODE_READY ) { + curr_priority = _Scheduler_Node_get_priority( &node->Base.Base ); + curr_priority = SCHEDULER_PRIORITY_PURIFY( curr_priority ); + + if ( + min_priority_num == UINT64_MAX || + curr_priority < min_priority_num + ) { + min_priority_num = curr_priority; + highest_ready = &node->Base.Base; + /* + * In case curr_CPU is filter_CPU, we need to store the + * cpu_to_preempt value so that we go back to SMP_* + * function, rather than preempting the node ourselves. + */ + node->cpu_to_preempt = curr_CPU; + } + } + } + next = _Chain_Next( next ); + } + } + + return highest_ready; +} + +static inline void _Scheduler_strong_APA_Move_from_ready_to_scheduled( + Scheduler_Context *context, + Scheduler_Node *ready_to_scheduled +) +{ + Priority_Control insert_priority; + + insert_priority = _Scheduler_SMP_Node_priority( ready_to_scheduled ); insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority ); - _Chain_Insert_ordered_unprotected( - &self->Base.Scheduled, - &node->Base.Base.Node.Chain, - &insert_priority, - _Scheduler_SMP_Priority_less_equal + _Scheduler_SMP_Insert_scheduled( + context, + ready_to_scheduled, + insert_priority ); } -static void _Scheduler_strong_APA_Insert_ready( +static inline void _Scheduler_strong_APA_Insert_ready( Scheduler_Context *context, Scheduler_Node *node_base, Priority_Control insert_priority @@ -106,228 +290,563 @@ static void _Scheduler_strong_APA_Insert_ready( self = _Scheduler_strong_APA_Get_self( context ); node = _Scheduler_strong_APA_Node_downcast( node_base ); - if ( SCHEDULER_PRIORITY_IS_APPEND( insert_priority ) ) { - _Scheduler_priority_Ready_queue_enqueue( - &node->Base.Base.Node.Chain, - &node->Ready_queue, - &self->Bit_map - ); - } else { - _Scheduler_priority_Ready_queue_enqueue_first( - &node->Base.Base.Node.Chain, - &node->Ready_queue, - &self->Bit_map - ); + if( _Chain_Is_node_off_chain( &node->Ready_node ) ) { + _Chain_Append_unprotected( &self->Ready, &node->Ready_node ); + } else { + _Chain_Extract_unprotected( &node->Ready_node ); + _Chain_Set_off_chain( &node->Ready_node ); + _Chain_Append_unprotected( &self->Ready, &node->Ready_node ); } } -static void _Scheduler_strong_APA_Extract_from_ready( +static inline void _Scheduler_strong_APA_Move_from_scheduled_to_ready( Scheduler_Context *context, - Scheduler_Node *the_thread + Scheduler_Node *scheduled_to_ready ) { - Scheduler_strong_APA_Context *self = - _Scheduler_strong_APA_Get_self( context ); - Scheduler_strong_APA_Node *node = - _Scheduler_strong_APA_Node_downcast( the_thread ); - - _Scheduler_priority_Ready_queue_extract( - &node->Base.Base.Node.Chain, - &node->Ready_queue, - &self->Bit_map + Priority_Control insert_priority; + + if( !_Chain_Is_node_off_chain( &scheduled_to_ready->Node.Chain ) ) { + _Scheduler_SMP_Extract_from_scheduled( context, scheduled_to_ready ); + } + + insert_priority = _Scheduler_SMP_Node_priority( scheduled_to_ready ); + + _Scheduler_strong_APA_Insert_ready( + context, + scheduled_to_ready, + insert_priority ); } -static void _Scheduler_strong_APA_Do_update( +/* + * Implement the BFS Algorithm for task departure to get the highest ready task + * for a particular CPU, returns the highest ready Scheduler_Node + * Scheduler_Node filter here points to the victim node that is blocked + * resulting which this function is called. + */ +static inline Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready( Scheduler_Context *context, - Scheduler_Node *node_to_update, - Priority_Control new_priority + Scheduler_Node *filter ) { - Scheduler_strong_APA_Context *self = - _Scheduler_strong_APA_Get_self( context ); - Scheduler_strong_APA_Node *node = - _Scheduler_strong_APA_Node_downcast( node_to_update ); - - _Scheduler_SMP_Node_update_priority( &node->Base, new_priority ); - _Scheduler_priority_Ready_queue_update( - &node->Ready_queue, - SCHEDULER_PRIORITY_UNMAP( new_priority ), - &self->Bit_map, - &self->Ready[ 0 ] - ); + Scheduler_strong_APA_Context *self; + Per_CPU_Control *filter_cpu; + Scheduler_strong_APA_Node *node; + Scheduler_Node *highest_ready; + Scheduler_Node *curr_node; + Scheduler_Node *next_node; + Scheduler_strong_APA_CPU *CPU; + uint32_t front; + uint32_t rear; + uint32_t cpu_max; + uint32_t cpu_index; + + self = _Scheduler_strong_APA_Get_self( context ); + /* + * Denotes front and rear of the queue + */ + front = 0; + rear = -1; + + filter_cpu = _Thread_Get_CPU( filter->user ); + CPU = self->CPU; + cpu_max = _SMP_Get_processor_maximum(); + + for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) { + CPU[ cpu_index ].visited = false; + } + + CPU[ ++rear ].cpu = filter_cpu; + CPU[ _Per_CPU_Get_index( filter_cpu ) ].visited = true; + + highest_ready = _Scheduler_strong_APA_Find_highest_ready( + self, + front, + rear + ); + + if ( highest_ready != filter ) { + /* + * Backtrack on the path from + * filter_cpu to highest_ready, shifting along every task. + */ + + node = _Scheduler_strong_APA_Node_downcast( highest_ready ); + /* + * Highest ready is not just directly reachable from the victim cpu + * So there is need for task shifting. + */ + while ( node->cpu_to_preempt != filter_cpu ) { + curr_node = &node->Base.Base; + next_node = _Scheduler_strong_APA_Get_scheduled( + self, + node->cpu_to_preempt + ); + + (void) _Scheduler_SMP_Preempt( + context, + curr_node, + next_node, + _Scheduler_strong_APA_Allocate_processor + ); + + if ( curr_node == highest_ready ) { + _Scheduler_strong_APA_Move_from_ready_to_scheduled( context, curr_node ); + } + + node = _Scheduler_strong_APA_Node_downcast( next_node ); + } + /* + * To save the last node so that the caller SMP_* function + * can do the allocation + */ + curr_node = &node->Base.Base; + highest_ready = curr_node; + _Scheduler_strong_APA_Move_from_scheduled_to_ready( context, curr_node ); + } + + return highest_ready; } -static Scheduler_strong_APA_Context * -_Scheduler_strong_APA_Get_context( const Scheduler_Control *scheduler ) +/* + * Checks the lowest scheduled directly reachable task + */ +static inline Scheduler_Node *_Scheduler_strong_APA_Get_lowest_scheduled( + Scheduler_Context *context, + Scheduler_Node *filter_base +) { - return (Scheduler_strong_APA_Context *) _Scheduler_Get_context( scheduler ); + uint32_t cpu_max; + uint32_t cpu_index; + Scheduler_Node *curr_node; + Scheduler_Node *lowest_scheduled = NULL; + Priority_Control max_priority_num; + Priority_Control curr_priority; + Scheduler_strong_APA_Node *filter_strong_node; + Scheduler_strong_APA_Context *self; + + self = _Scheduler_strong_APA_Get_self( context ); + max_priority_num = 0; /* Max (Lowest) priority encountered so far */ + filter_strong_node = _Scheduler_strong_APA_Node_downcast( filter_base ); + + /* lowest_scheduled is NULL if affinity of a node is 0 */ + _Assert( !_Processor_mask_Is_zero( &filter_strong_node->Affinity ) ); + cpu_max = _SMP_Get_processor_maximum(); + + for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) { + /* Checks if the CPU is in the affinity set of filter_strong_node */ + if ( _Processor_mask_Is_set( &filter_strong_node->Affinity, cpu_index ) ) { + Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index ); + + if ( _Per_CPU_Is_processor_online( cpu ) ) { + curr_node = _Scheduler_strong_APA_Get_scheduled( self, cpu ); + curr_priority = _Scheduler_Node_get_priority( curr_node ); + curr_priority = SCHEDULER_PRIORITY_PURIFY( curr_priority ); + + if ( curr_priority > max_priority_num ) { + lowest_scheduled = curr_node; + max_priority_num = curr_priority; + } + } + } + } + + _Assert( lowest_scheduled != NULL ); + return lowest_scheduled; } -void _Scheduler_strong_APA_Initialize( const Scheduler_Control *scheduler ) +static inline void _Scheduler_strong_APA_Extract_from_scheduled( + Scheduler_Context *context, + Scheduler_Node *node_to_extract +) { - Scheduler_strong_APA_Context *self = - _Scheduler_strong_APA_Get_context( scheduler ); + Scheduler_strong_APA_Context *self; + Scheduler_strong_APA_Node *node; - _Scheduler_SMP_Initialize( &self->Base ); - _Priority_bit_map_Initialize( &self->Bit_map ); - _Scheduler_priority_Ready_queue_initialize( - &self->Ready[ 0 ], - scheduler->maximum_priority - ); + self = _Scheduler_strong_APA_Get_self( context ); + node = _Scheduler_strong_APA_Node_downcast( node_to_extract ); + + _Scheduler_SMP_Extract_from_scheduled( &self->Base.Base, &node->Base.Base ); + /* Not removing it from Ready since the node could go in the READY state */ } -void _Scheduler_strong_APA_Node_initialize( - const Scheduler_Control *scheduler, - Scheduler_Node *node, - Thread_Control *the_thread, - Priority_Control priority +static inline void _Scheduler_strong_APA_Extract_from_ready( + Scheduler_Context *context, + Scheduler_Node *node_to_extract ) { - Scheduler_Context *context; - Scheduler_strong_APA_Context *self; - Scheduler_strong_APA_Node *the_node; + Scheduler_strong_APA_Node *node; - the_node = _Scheduler_strong_APA_Node_downcast( node ); - _Scheduler_SMP_Node_initialize( - scheduler, - &the_node->Base, - the_thread, - priority - ); + node = _Scheduler_strong_APA_Node_downcast( node_to_extract ); + + if( !_Chain_Is_node_off_chain( &node->Ready_node ) ) { + _Chain_Extract_unprotected( &node->Ready_node ); + _Chain_Set_off_chain( &node->Ready_node ); + } - context = _Scheduler_Get_context( scheduler ); - self = _Scheduler_strong_APA_Get_self( context ); - _Scheduler_priority_Ready_queue_update( - &the_node->Ready_queue, - SCHEDULER_PRIORITY_UNMAP( priority ), - &self->Bit_map, - &self->Ready[ 0 ] - ); } -static bool _Scheduler_strong_APA_Has_ready( Scheduler_Context *context ) +static inline Scheduler_Node* _Scheduler_strong_APA_Get_lowest_reachable( + Scheduler_strong_APA_Context *self, + uint32_t front, + uint32_t rear, + Per_CPU_Control **cpu_to_preempt +) { - Scheduler_strong_APA_Context *self = - _Scheduler_strong_APA_Get_self( context ); + Scheduler_Node *lowest_reachable; + Priority_Control max_priority_num; + uint32_t cpu_max; + uint32_t cpu_index; + Thread_Control *curr_thread; + Per_CPU_Control *curr_CPU; + Priority_Control curr_priority; + Scheduler_Node *curr_node; + Scheduler_strong_APA_Node *curr_strong_node; + Scheduler_strong_APA_CPU *CPU; + + /* Max (Lowest) priority encountered so far */ + max_priority_num = 0; + CPU = self->CPU; + cpu_max = _SMP_Get_processor_maximum(); + + while ( front <= rear ) { + curr_CPU = CPU[ front ].cpu; + front = front + 1; - return !_Priority_bit_map_Is_empty( &self->Bit_map ); + curr_node = _Scheduler_strong_APA_Get_scheduled( self, curr_CPU ); + curr_thread = curr_node->user; + + curr_priority = _Scheduler_Node_get_priority( curr_node ); + curr_priority = SCHEDULER_PRIORITY_PURIFY( curr_priority ); + + curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node ); + + if ( curr_priority > max_priority_num ) { + lowest_reachable = curr_node; + max_priority_num = curr_priority; + *cpu_to_preempt = curr_CPU; + } + + if ( !curr_thread->is_idle ) { + for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) { + if ( _Processor_mask_Is_set( &curr_strong_node->Affinity, cpu_index ) ) { + /* Checks if the thread_CPU is in the affinity set of the node */ + Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index ); + if ( + _Per_CPU_Is_processor_online( cpu ) && + CPU[ cpu_index ].visited == false ) + { + rear = rear + 1; + CPU[ rear ].cpu = cpu; + CPU[ cpu_index ].visited = true; + CPU[ cpu_index ].preempting_node = curr_node; + } + } + } + } + } + + return lowest_reachable; } -static Scheduler_Node *_Scheduler_strong_APA_Get_highest_ready( +static inline bool _Scheduler_strong_APA_Do_enqueue( Scheduler_Context *context, - Scheduler_Node *node + Scheduler_Node *lowest_reachable, + Scheduler_Node *node, + Priority_Control insert_priority, + Per_CPU_Control *cpu_to_preempt ) { - Scheduler_strong_APA_Context *self = - _Scheduler_strong_APA_Get_self( context ); + bool needs_help; + Priority_Control node_priority; + Priority_Control lowest_priority; + Scheduler_strong_APA_CPU *CPU; + Scheduler_Node *curr_node; + /* The first node that gets removed from the cpu */ + Scheduler_Node *first_node; + Scheduler_strong_APA_Node *curr_strong_node; + Per_CPU_Control *curr_CPU; + Scheduler_strong_APA_Context *self; + Scheduler_Node *next_node; - (void) node; - return (Scheduler_Node *) _Scheduler_priority_Ready_queue_first( - &self->Bit_map, - &self->Ready[ 0 ] - ); + self = _Scheduler_strong_APA_Get_self( context ); + CPU = self->CPU; + + node_priority = _Scheduler_Node_get_priority( node ); + node_priority = SCHEDULER_PRIORITY_PURIFY( node_priority ); + + if( lowest_reachable == NULL ) { + /* + * This means the affinity set of the newly arrived node + * is empty. + */ + lowest_priority = UINT64_MAX; + } else { + lowest_priority = _Scheduler_Node_get_priority( lowest_reachable ); + lowest_priority = SCHEDULER_PRIORITY_PURIFY( lowest_priority ); + } + + if ( lowest_priority > node_priority ) { + /* + * Backtrack on the path from + * _Thread_Get_CPU(lowest_reachable->user) to lowest_reachable, shifting + * along every task + */ + + curr_node = CPU[ _Per_CPU_Get_index( cpu_to_preempt ) ].preempting_node; + curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node ); + curr_strong_node->cpu_to_preempt = cpu_to_preempt; + + /* Save which cpu to preempt in cpu_to_preempt value of the node */ + while ( curr_node != node ) { + curr_CPU = _Thread_Get_CPU( curr_node->user ); + curr_node = CPU[ _Per_CPU_Get_index( curr_CPU ) ].preempting_node; + curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node ); + curr_strong_node->cpu_to_preempt = curr_CPU; + } + + curr_CPU = curr_strong_node->cpu_to_preempt; + next_node = _Scheduler_strong_APA_Get_scheduled( self, curr_CPU ); + + node_priority = _Scheduler_Node_get_priority( curr_node ); + node_priority = SCHEDULER_PRIORITY_PURIFY( node_priority ); + + _Scheduler_SMP_Enqueue_to_scheduled( + context, + curr_node, + node_priority, + next_node, + _Scheduler_SMP_Insert_scheduled, + _Scheduler_strong_APA_Move_from_scheduled_to_ready, + _Scheduler_strong_APA_Allocate_processor + ); + + curr_node = next_node; + first_node = curr_node; + curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node ); + + while ( curr_node != lowest_reachable ) { + curr_CPU = curr_strong_node->cpu_to_preempt; + next_node = _Scheduler_strong_APA_Get_scheduled( self, curr_CPU ); + /* curr_node preempts the next_node; */ + _Scheduler_SMP_Preempt( + context, + curr_node, + next_node, + _Scheduler_strong_APA_Allocate_processor + ); + + if(curr_node == first_node) { + _Scheduler_strong_APA_Move_from_ready_to_scheduled(context, first_node); + } + curr_node = next_node; + curr_strong_node = _Scheduler_strong_APA_Node_downcast( curr_node ); + } + + _Scheduler_strong_APA_Move_from_scheduled_to_ready( context, lowest_reachable ); + + needs_help = false; + } else { + needs_help = true; + } + + /* Add it to Ready chain since it is now either scheduled or just ready. */ + _Scheduler_strong_APA_Insert_ready( context,node, insert_priority ); + + return needs_help; } -void _Scheduler_strong_APA_Block( - const Scheduler_Control *scheduler, - Thread_Control *the_thread, - Scheduler_Node *node +/* + * BFS Algorithm for task arrival + * Enqueue node either in the scheduled chain or in the ready chain. + * node is the newly arrived node and is currently not scheduled. + */ +static inline bool _Scheduler_strong_APA_Enqueue( + Scheduler_Context *context, + Scheduler_Node *node, + Priority_Control insert_priority ) { - Scheduler_Context *context = _Scheduler_Get_context( scheduler ); + Scheduler_strong_APA_Context *self; + Scheduler_strong_APA_CPU *CPU; + uint32_t cpu_max; + uint32_t cpu_index; + Per_CPU_Control *cpu_to_preempt; + Scheduler_Node *lowest_reachable; + Scheduler_strong_APA_Node *strong_node; - _Scheduler_SMP_Block( + /* Denotes front and rear of the queue */ + uint32_t front; + uint32_t rear; + + front = 0; + rear = -1; + + self = _Scheduler_strong_APA_Get_self( context ); + strong_node = _Scheduler_strong_APA_Node_downcast( node ); + cpu_max = _SMP_Get_processor_maximum(); + CPU = self->CPU; + + for ( cpu_index = 0 ; cpu_index < cpu_max ; ++cpu_index ) { + CPU[ cpu_index ].visited = false; + + /* Checks if the thread_CPU is in the affinity set of the node */ + if ( _Processor_mask_Is_set( &strong_node->Affinity, cpu_index ) ) { + Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index ); + + if ( _Per_CPU_Is_processor_online( cpu ) ) { + rear = rear + 1; + CPU[ rear ].cpu = cpu; + CPU[ cpu_index ].visited = true; + CPU[ cpu_index ].preempting_node = node; + } + } + } + + lowest_reachable = _Scheduler_strong_APA_Get_lowest_reachable( + self, + front, + rear, + &cpu_to_preempt + ); + + return _Scheduler_strong_APA_Do_enqueue( + context, + lowest_reachable, + node, + insert_priority, + cpu_to_preempt + ); +} + +static inline bool _Scheduler_strong_APA_Enqueue_scheduled( + Scheduler_Context *context, + Scheduler_Node *node, + Priority_Control insert_priority +) +{ + return _Scheduler_SMP_Enqueue_scheduled( context, - the_thread, node, - _Scheduler_SMP_Extract_from_scheduled, + insert_priority, + _Scheduler_SMP_Priority_less_equal, _Scheduler_strong_APA_Extract_from_ready, _Scheduler_strong_APA_Get_highest_ready, + _Scheduler_strong_APA_Insert_ready, + _Scheduler_SMP_Insert_scheduled, _Scheduler_strong_APA_Move_from_ready_to_scheduled, - _Scheduler_SMP_Allocate_processor_exact + _Scheduler_strong_APA_Allocate_processor ); } -static bool _Scheduler_strong_APA_Enqueue( +static inline bool _Scheduler_strong_APA_Do_ask_for_help( Scheduler_Context *context, - Scheduler_Node *node, - Priority_Control insert_priority + Thread_Control *the_thread, + Scheduler_Node *node ) { - return _Scheduler_SMP_Enqueue( + return _Scheduler_SMP_Ask_for_help( context, + the_thread, node, - insert_priority, _Scheduler_SMP_Priority_less_equal, _Scheduler_strong_APA_Insert_ready, _Scheduler_SMP_Insert_scheduled, _Scheduler_strong_APA_Move_from_scheduled_to_ready, - _Scheduler_SMP_Get_lowest_scheduled, - _Scheduler_SMP_Allocate_processor_exact + _Scheduler_strong_APA_Get_lowest_scheduled, + _Scheduler_strong_APA_Allocate_processor ); } -static bool _Scheduler_strong_APA_Enqueue_scheduled( +static inline void _Scheduler_strong_APA_Do_set_affinity( Scheduler_Context *context, - Scheduler_Node *node, - Priority_Control insert_priority + Scheduler_Node *node_base, + void *arg ) { - return _Scheduler_SMP_Enqueue_scheduled( + Scheduler_strong_APA_Node *node; + + node = _Scheduler_strong_APA_Node_downcast( node_base ); + node->Affinity = *( (const Processor_mask *) arg ); +} + +void _Scheduler_strong_APA_Initialize( const Scheduler_Control *scheduler ) +{ + Scheduler_strong_APA_Context *self = + _Scheduler_strong_APA_Get_context( scheduler ); + + _Scheduler_SMP_Initialize( &self->Base ); + _Chain_Initialize_empty( &self->Ready ); +} + +void _Scheduler_strong_APA_Yield( + const Scheduler_Control *scheduler, + Thread_Control *thread, + Scheduler_Node *node +) +{ + Scheduler_Context *context = _Scheduler_Get_context( scheduler ); + + _Scheduler_SMP_Yield( context, + thread, node, - insert_priority, - _Scheduler_SMP_Priority_less_equal, _Scheduler_strong_APA_Extract_from_ready, - _Scheduler_strong_APA_Get_highest_ready, - _Scheduler_strong_APA_Insert_ready, - _Scheduler_SMP_Insert_scheduled, - _Scheduler_strong_APA_Move_from_ready_to_scheduled, - _Scheduler_SMP_Allocate_processor_exact + _Scheduler_strong_APA_Enqueue, + _Scheduler_strong_APA_Enqueue_scheduled ); } -void _Scheduler_strong_APA_Unblock( +void _Scheduler_strong_APA_Block( const Scheduler_Control *scheduler, - Thread_Control *the_thread, + Thread_Control *thread, Scheduler_Node *node ) { Scheduler_Context *context = _Scheduler_Get_context( scheduler ); - _Scheduler_SMP_Unblock( + /* + * Needed in case the node is scheduled node, since _SMP_Block only extracts + * from the SMP scheduled chain and from the Strong APA Ready_chain + * when the node is ready. But the Strong APA Ready_chain stores both + * ready and scheduled nodes. + */ + _Scheduler_strong_APA_Extract_from_ready(context, node); + + _Scheduler_SMP_Block( context, - the_thread, + thread, node, - _Scheduler_strong_APA_Do_update, - _Scheduler_strong_APA_Enqueue + _Scheduler_strong_APA_Extract_from_scheduled, + _Scheduler_strong_APA_Extract_from_ready, + _Scheduler_strong_APA_Get_highest_ready, + _Scheduler_strong_APA_Move_from_ready_to_scheduled, + _Scheduler_strong_APA_Allocate_processor ); } -static bool _Scheduler_strong_APA_Do_ask_for_help( - Scheduler_Context *context, - Thread_Control *the_thread, - Scheduler_Node *node +void _Scheduler_strong_APA_Unblock( + const Scheduler_Control *scheduler, + Thread_Control *thread, + Scheduler_Node *node ) { - return _Scheduler_SMP_Ask_for_help( + Scheduler_Context *context = _Scheduler_Get_context( scheduler ); + + _Scheduler_SMP_Unblock( context, - the_thread, + thread, node, - _Scheduler_SMP_Priority_less_equal, - _Scheduler_strong_APA_Insert_ready, - _Scheduler_SMP_Insert_scheduled, - _Scheduler_strong_APA_Move_from_scheduled_to_ready, - _Scheduler_SMP_Get_lowest_scheduled, - _Scheduler_SMP_Allocate_processor_lazy + _Scheduler_strong_APA_Do_update, + _Scheduler_strong_APA_Enqueue ); } void _Scheduler_strong_APA_Update_priority( const Scheduler_Control *scheduler, - Thread_Control *the_thread, + Thread_Control *thread, Scheduler_Node *node ) { @@ -335,7 +854,7 @@ void _Scheduler_strong_APA_Update_priority( _Scheduler_SMP_Update_priority( context, - the_thread, + thread, node, _Scheduler_strong_APA_Extract_from_ready, _Scheduler_strong_APA_Do_update, @@ -353,7 +872,11 @@ bool _Scheduler_strong_APA_Ask_for_help( { Scheduler_Context *context = _Scheduler_Get_context( scheduler ); - return _Scheduler_strong_APA_Do_ask_for_help( context, the_thread, node ); + return _Scheduler_strong_APA_Do_ask_for_help( + context, + the_thread, + node + ); } void _Scheduler_strong_APA_Reconsider_help_request( @@ -389,10 +912,22 @@ void _Scheduler_strong_APA_Withdraw_node( _Scheduler_strong_APA_Extract_from_ready, _Scheduler_strong_APA_Get_highest_ready, _Scheduler_strong_APA_Move_from_ready_to_scheduled, - _Scheduler_SMP_Allocate_processor_lazy + _Scheduler_strong_APA_Allocate_processor ); } +static inline void _Scheduler_strong_APA_Register_idle( + Scheduler_Context *context, + Scheduler_Node *idle_base, + Per_CPU_Control *cpu +) +{ + Scheduler_strong_APA_Context *self; + self = _Scheduler_strong_APA_Get_self( context ); + + _Scheduler_strong_APA_Set_scheduled( self, idle_base, cpu ); +} + void _Scheduler_strong_APA_Add_processor( const Scheduler_Control *scheduler, Thread_Control *idle @@ -405,7 +940,25 @@ void _Scheduler_strong_APA_Add_processor( idle, _Scheduler_strong_APA_Has_ready, _Scheduler_strong_APA_Enqueue_scheduled, - _Scheduler_SMP_Do_nothing_register_idle + _Scheduler_strong_APA_Register_idle + ); +} + +void _Scheduler_strong_APA_Start_idle( + const Scheduler_Control *scheduler, + Thread_Control *idle, + Per_CPU_Control *cpu +) +{ + Scheduler_Context *context; + + context = _Scheduler_Get_context( scheduler ); + + _Scheduler_SMP_Do_start_idle( + context, + idle, + cpu, + _Scheduler_strong_APA_Register_idle ); } @@ -424,20 +977,64 @@ Thread_Control *_Scheduler_strong_APA_Remove_processor( ); } -void _Scheduler_strong_APA_Yield( +void _Scheduler_strong_APA_Node_initialize( const Scheduler_Control *scheduler, + Scheduler_Node *node, Thread_Control *the_thread, - Scheduler_Node *node + Priority_Control priority ) { - Scheduler_Context *context = _Scheduler_Get_context( scheduler ); + Scheduler_SMP_Node *smp_node; + Scheduler_strong_APA_Node *strong_node; - _Scheduler_SMP_Yield( - context, - the_thread, - node, - _Scheduler_strong_APA_Extract_from_ready, - _Scheduler_strong_APA_Enqueue, - _Scheduler_strong_APA_Enqueue_scheduled + smp_node = _Scheduler_SMP_Node_downcast( node ); + strong_node = _Scheduler_strong_APA_Node_downcast( node ); + + _Scheduler_SMP_Node_initialize( scheduler, smp_node, the_thread, priority ); + + _Processor_mask_Assign( + &strong_node->Affinity, + _SMP_Get_online_processors() ); } + +Status_Control _Scheduler_strong_APA_Set_affinity( + const Scheduler_Control *scheduler, + Thread_Control *thread, + Scheduler_Node *node_base, + const Processor_mask *affinity +) +{ + Scheduler_Context *context; + Scheduler_strong_APA_Node *node; + Processor_mask local_affinity; + + context = _Scheduler_Get_context( scheduler ); + _Processor_mask_And( &local_affinity, &context->Processors, affinity ); + + if ( _Processor_mask_Is_zero( &local_affinity ) ) { + return STATUS_INVALID_NUMBER; + } + + node = _Scheduler_strong_APA_Node_downcast( node_base ); + + if ( _Processor_mask_Is_equal( &node->Affinity, affinity ) ) + return STATUS_SUCCESSFUL; /* Nothing to do. Return true. */ + + _Processor_mask_Assign( &node->Affinity, &local_affinity ); + + _Scheduler_SMP_Set_affinity( + context, + thread, + node_base, + &local_affinity, + _Scheduler_strong_APA_Do_set_affinity, + _Scheduler_strong_APA_Extract_from_ready, + _Scheduler_strong_APA_Get_highest_ready, + _Scheduler_strong_APA_Move_from_ready_to_scheduled, + _Scheduler_strong_APA_Enqueue, + _Scheduler_strong_APA_Allocate_processor + ); + + return STATUS_SUCCESSFUL; +} -- cgit v1.2.3