summaryrefslogblamecommitdiffstats
path: root/cpukit/score/include/rtems/score/schedulerimpl.h
blob: c41c3af3e6081dc6c7d3d0ceecd9474b2d66cd77 (plain) (tree)
1
2
3
4
5
6
7
8
9
10
11
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323



                                                                            
  

                                                                        



                                    
                                                                       
                                           


                                                           
                                         

   



                                    
                                   
                                
                                   
 


                  

   
                             
   
        

   







                                                                           






                                                               




                                                             
                                       






                                












                                                                 





















                                                                          
                      






                                                                    







                                                              
   

                                                                          
  
                                                                           
                                                  

   



                                                                          


                                    
   
                                      
  

                                                                   
  
                                                                   
   
                                                                           
 

                                                                    
                                                               

 
                      
































                                                                         






                                                                              



                                                                               

                                                 


























                                                                          



                                                               












                                                                        


      
   
                                                   
  


                                                                               
                                             
   
                                                                        
 
                                                                    

                             
 

              
                                                            



                                                     

 
   
                                                        
  



                                                                      

                                    
   
                                                                        
 

                                                                    
                                                            

 
   
                                                          
  



                                                                     

                                    
   
                                                                          
 
                                                                    

                             
 

              
                                                              



                                                     

 
   





                                                                               






                                                                               




                                        
                                                                        

                             
 

              





                                             



                                                     


   
                                       
  






                                                                            
   
                                                     

                                     

 
                                                                             

 
   
                                    
  




                                                                            
   
                                                  

                                     

 
                                                                   

 
   
                                                                              
  

                                                          
   
                                                     

                                

 

                                                                    




                                             

 
   
                                       
  











                                                                               

                                                     


                                     

 
                                                               


   
                                                                   
  

                                       

                                                 

                             

 

                                                                    
                                                                          

 

                                                      

                                                                   
                                                              


                                                                 
                                                 
 





                                                                      
                                               
 

                                                              

     

 


                                                            
                                                               
                                                              



                                                


                                      

 
                                                                      

 








































                                                                              









                                                                            
                                                             
                                                  
                                              
                                                        
                                                                           






                                                        










                                                       


                                                               






                                                                       
                                                     
      

















                                                                


                             








                                                               



                                                  
                                                               




                                               
                                                             




                                                                
                                                                  
      

   



                             





                                                 
 













                                                                             








                                                       

 
                                        


                                     

                                                                              


   
   
                                                                          
                               
   
                                                            


                                     

 
                                                               
 

   
                                                                           


                                                             


                                     

 
                                                               



                                                                               

                                      
                                                                         


                                     

 
                                                                           
 
 




                                                                          


                                      



                                                          
                                                                             








                                                                             



                                      



                                                          
                                                                             



                                                                




                                                             
                                                              






                   









                                                                               






                                                                         

                                               
                                       

 
                                                    
                                                                  
 
                           
 

                                                       

 









                                                                 






                                                         

                                                                

 
                                    

 





                                                        

                                             
                           

                                  






                      






















                                                                            





                                                               







                                                              











































                                                                        












                                                                              
                                                                      





                                                              
















































































































































































































































































































































































































                                                                                

      
         
 



                  

                         
/**
 * @file
 *
 * @brief Inlined Routines Associated with the Manipulation of the Scheduler
 *
 * This inline file contains all of the inlined routines associated with
 * the manipulation of the scheduler.
 */

/*
 *  Copyright (C) 2010 Gedare Bloom.
 *  Copyright (C) 2011 On-Line Applications Research Corporation (OAR).
 *  Copyright (c) 2014 embedded brains GmbH
 *
 *  The license and distribution terms for this file may be
 *  found in the file LICENSE in this distribution or at
 *  http://www.rtems.org/license/LICENSE.
 */

#ifndef _RTEMS_SCORE_SCHEDULERIMPL_H
#define _RTEMS_SCORE_SCHEDULERIMPL_H

#include <rtems/score/scheduler.h>
#include <rtems/score/cpusetimpl.h>
#include <rtems/score/smpimpl.h>
#include <rtems/score/threadimpl.h>

#ifdef __cplusplus
extern "C" {
#endif

/**
 * @addtogroup ScoreScheduler
 */
/**@{**/

/**
 *  @brief Initializes the scheduler to the policy chosen by the user.
 *
 *  This routine initializes the scheduler to the policy chosen by the user
 *  through confdefs, or to the priority scheduler with ready chains by
 *  default.
 */
void _Scheduler_Handler_initialization( void );

RTEMS_INLINE_ROUTINE Scheduler_Context *_Scheduler_Get_context(
  const Scheduler_Control *scheduler
)
{
  return scheduler->context;
}

RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get(
  const Thread_Control *the_thread
)
{
#if defined(RTEMS_SMP)
  return the_thread->Scheduler.control;
#else
  (void) the_thread;

  return &_Scheduler_Table[ 0 ];
#endif
}

RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_own(
  const Thread_Control *the_thread
)
{
#if defined(RTEMS_SMP)
  return the_thread->Scheduler.own_control;
#else
  (void) the_thread;

  return &_Scheduler_Table[ 0 ];
#endif
}

RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_by_CPU_index(
  uint32_t cpu_index
)
{
#if defined(RTEMS_SMP)
  return _Scheduler_Assignments[ cpu_index ].scheduler;
#else
  (void) cpu_index;

  return &_Scheduler_Table[ 0 ];
#endif
}

RTEMS_INLINE_ROUTINE const Scheduler_Control *_Scheduler_Get_by_CPU(
  const Per_CPU_Control *cpu
)
{
  uint32_t cpu_index = _Per_CPU_Get_index( cpu );

  return _Scheduler_Get_by_CPU_index( cpu_index );
}

#if defined(RTEMS_SMP)
RTEMS_INLINE_ROUTINE Scheduler_Node *_Scheduler_Thread_get_own_node(
  const Thread_Control *the_thread
)
{
  return the_thread->Scheduler.own_node;
}

RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Node_get_user(
  const Scheduler_Node *node
)
{
  return node->user;
}
#endif

/**
 * The preferred method to add a new scheduler is to define the jump table
 * entries and add a case to the _Scheduler_Initialize routine.
 *
 * Generic scheduling implementations that rely on the ready queue only can
 * be found in the _Scheduler_queue_XXX functions.
 */

/*
 * Passing the Scheduler_Control* to these functions allows for multiple
 * scheduler's to exist simultaneously, which could be useful on an SMP
 * system.  Then remote Schedulers may be accessible.  How to protect such
 * accesses remains an open problem.
 */

/**
 * @brief General scheduling decision.
 *
 * This kernel routine implements the scheduling decision logic for
 * the scheduler. It does NOT dispatch.
 *
 * @param[in] the_thread The thread which state changed previously.
 */
RTEMS_INLINE_ROUTINE void _Scheduler_Schedule( Thread_Control *the_thread )
{
  const Scheduler_Control *scheduler = _Scheduler_Get( the_thread );

  ( *scheduler->Operations.schedule )( scheduler, the_thread );
}

#if defined(RTEMS_SMP)
typedef struct {
  Thread_Control *needs_help;
  Thread_Control *next_needs_help;
} Scheduler_Ask_for_help_context ;

RTEMS_INLINE_ROUTINE bool _Scheduler_Ask_for_help_visitor(
  Resource_Node *resource_node,
  void          *arg
)
{
  bool done;
  Scheduler_Ask_for_help_context *help_context = arg;
  Thread_Control *previous_needs_help = help_context->needs_help;
  Thread_Control *next_needs_help;
  Thread_Control *offers_help =
    _Thread_Resource_node_to_thread( resource_node );
  const Scheduler_Control *scheduler = _Scheduler_Get_own( offers_help );

  next_needs_help = ( *scheduler->Operations.ask_for_help )(
    scheduler,
    offers_help,
    previous_needs_help
  );

  done = next_needs_help != previous_needs_help;

  if ( done ) {
    help_context->next_needs_help = next_needs_help;
  }

  return done;
}

/**
 * @brief Ask threads depending on resources owned by the thread for help.
 *
 * A thread is in need for help if it lost its assigned processor due to
 * pre-emption by a higher priority thread or it was not possible to assign it
 * a processor since its priority is to low on its current scheduler instance.
 *
 * The run-time of this function depends on the size of the resource tree of
 * the thread needing help and other resource trees in case threads in need for
 * help are produced during this operation.
 *
 * @param[in] needs_help The thread needing help.
 */
RTEMS_INLINE_ROUTINE void _Scheduler_Ask_for_help(
  Thread_Control *needs_help
)
{
  do {
    const Scheduler_Control *scheduler = _Scheduler_Get_own( needs_help );

    needs_help = ( *scheduler->Operations.ask_for_help )(
      scheduler,
      needs_help,
      needs_help
    );

    if ( needs_help != NULL ) {
      Scheduler_Ask_for_help_context help_context = { needs_help, NULL };

      _Resource_Iterate(
        &needs_help->Resource_node,
        _Scheduler_Ask_for_help_visitor,
        &help_context
      );

      needs_help = help_context.next_needs_help;
    }
  } while ( needs_help != NULL );
}

RTEMS_INLINE_ROUTINE void _Scheduler_Ask_for_help_if_necessary(
  Thread_Control *needs_help
)
{
  if (
    needs_help != NULL
      && _Resource_Node_owns_resources( &needs_help->Resource_node )
  ) {
    Scheduler_Node *node = _Scheduler_Thread_get_own_node( needs_help );

    if (
      node->help_state != SCHEDULER_HELP_ACTIVE_RIVAL
        || _Scheduler_Node_get_user( node ) != needs_help
    ) {
      _Scheduler_Ask_for_help( needs_help );
    }
  }
}
#endif

/**
 * @brief Scheduler yield with a particular thread.
 *
 * This routine is invoked when a thread wishes to voluntarily transfer control
 * of the processor to another thread.
 *
 * @param[in] the_thread The yielding thread.
 */
RTEMS_INLINE_ROUTINE void _Scheduler_Yield( Thread_Control *the_thread )
{
  const Scheduler_Control *scheduler = _Scheduler_Get( the_thread );
#if defined(RTEMS_SMP)
  Thread_Control *needs_help;

  needs_help =
#endif
  ( *scheduler->Operations.yield )( scheduler, the_thread );

#if defined(RTEMS_SMP)
  _Scheduler_Ask_for_help_if_necessary( needs_help );
#endif
}

/**
 * @brief Blocks a thread with respect to the scheduler.
 *
 * This routine removes @a the_thread from the scheduling decision for
 * the scheduler. The primary task is to remove the thread from the
 * ready queue.  It performs any necessary schedulering operations
 * including the selection of a new heir thread.
 *
 * @param[in] the_thread The thread.
 */
RTEMS_INLINE_ROUTINE void _Scheduler_Block( Thread_Control *the_thread )
{
  const Scheduler_Control *scheduler = _Scheduler_Get( the_thread );

  ( *scheduler->Operations.block )( scheduler, the_thread );
}

/**
 * @brief Unblocks a thread with respect to the scheduler.
 *
 * This routine adds @a the_thread to the scheduling decision for
 * the scheduler.  The primary task is to add the thread to the
 * ready queue per the schedulering policy and update any appropriate
 * scheduling variables, for example the heir thread.
 *
 * @param[in] the_thread The thread.
 */
RTEMS_INLINE_ROUTINE void _Scheduler_Unblock( Thread_Control *the_thread )
{
  const Scheduler_Control *scheduler = _Scheduler_Get( the_thread );
#if defined(RTEMS_SMP)
  Thread_Control *needs_help;

  needs_help =
#endif
  ( *scheduler->Operations.unblock )( scheduler, the_thread );

#if defined(RTEMS_SMP)
  _Scheduler_Ask_for_help_if_necessary( needs_help );
#endif
}

/**
 * @brief Propagates a priority change of a thread to the scheduler.
 *
 * The caller must ensure that the thread is in the ready state.  The caller
 * must ensure that the priority value actually changed and is not equal to the
 * current priority value.
 *
 * @param[in] the_thread The thread changing its priority.
 * @param[in] new_priority The new thread priority.
 * @param[in] prepend_it In case this is true, then enqueue the thread as the
 * first of its priority group, otherwise enqueue the thread as the last of its
 * priority group.
 */
RTEMS_INLINE_ROUTINE void _Scheduler_Change_priority(
  Thread_Control          *the_thread,
  Priority_Control         new_priority,
  bool                     prepend_it
)
{
  const Scheduler_Control *scheduler = _Scheduler_Get_own( the_thread );
#if defined(RTEMS_SMP)
  Thread_Control *needs_help;

  needs_help =
#endif
  ( *scheduler->Operations.change_priority )(
    scheduler,
    the_thread,
    new_priority,
    prepend_it
  );

#if defined(RTEMS_SMP)
  _Scheduler_Ask_for_help_if_necessary( needs_help );
#endif
}

/**
 * @brief Initializes a scheduler node.
 *
 * The scheduler node contains arbitrary data on function entry.  The caller
 * must ensure that _Scheduler_Node_destroy() will be called after a
 * _Scheduler_Node_initialize() before the memory of the scheduler node is
 * destroyed.
 *
 * @param[in] scheduler The scheduler instance.
 * @param[in] the_thread The thread containing the scheduler node.
 */
RTEMS_INLINE_ROUTINE void _Scheduler_Node_initialize(
  const Scheduler_Control *scheduler,
  Thread_Control          *the_thread
)
{
  return ( *scheduler->Operations.node_initialize )( scheduler, the_thread );
}

/**
 * @brief Destroys a scheduler node.
 *
 * The caller must ensure that _Scheduler_Node_destroy() will be called only
 * after a corresponding _Scheduler_Node_initialize().
 *
 * @param[in] scheduler The scheduler instance.
 * @param[in] the_thread The thread containing the scheduler node.
 */
RTEMS_INLINE_ROUTINE void _Scheduler_Node_destroy(
  const Scheduler_Control *scheduler,
  Thread_Control          *the_thread
)
{
  ( *scheduler->Operations.node_destroy )( scheduler, the_thread );
}

/**
 * @brief Updates the scheduler about a priority change of a not ready thread.
 *
 * @param[in] the_thread The thread.
 * @param[in] new_priority The new priority of the thread.
 */
RTEMS_INLINE_ROUTINE void _Scheduler_Update_priority(
  Thread_Control   *the_thread,
  Priority_Control  new_priority
)
{
  const Scheduler_Control *scheduler = _Scheduler_Get( the_thread );

  ( *scheduler->Operations.update_priority )(
    scheduler,
    the_thread,
    new_priority
  );
}

/**
 * @brief Compares two priority values.
 *
 * @param[in] scheduler The scheduler instance.
 * @param[in] p1 The first priority value.
 * @param[in] p2 The second priority value.
 *
 * @retval negative The value @a p1 encodes a lower priority than @a p2 in the
 * intuitive sense of priority.
 * @retval 0 The priorities @a p1 and @a p2 are equal.
 * @retval positive The value @a p1 encodes a higher priority than @a p2 in the
 * intuitive sense of priority.
 *
 * @see _Scheduler_Is_priority_lower_than() and
 * _Scheduler_Is_priority_higher_than().
 */
RTEMS_INLINE_ROUTINE int _Scheduler_Priority_compare(
  const Scheduler_Control *scheduler,
  Priority_Control         p1,
  Priority_Control         p2
)
{
  return ( *scheduler->Operations.priority_compare )( p1, p2 );
}

/**
 * @brief Releases a job of a thread with respect to the scheduler.
 *
 * @param[in] the_thread The thread.
 * @param[in] length The period length.
 */
RTEMS_INLINE_ROUTINE void _Scheduler_Release_job(
  Thread_Control *the_thread,
  uint32_t        length
)
{
  const Scheduler_Control *scheduler = _Scheduler_Get( the_thread );

  ( *scheduler->Operations.release_job )( scheduler, the_thread, length );
}

/**
 * @brief Scheduler method invoked at each clock tick.
 *
 * This method is invoked at each clock tick to allow the scheduler
 * implementation to perform any activities required.  For the
 * scheduler which support standard RTEMS features, this includes
 * time-slicing management.
 */
RTEMS_INLINE_ROUTINE void _Scheduler_Tick( void )
{
  uint32_t cpu_count = _SMP_Get_processor_count();
  uint32_t cpu_index;

  for ( cpu_index = 0 ; cpu_index < cpu_count ; ++cpu_index ) {
    const Per_CPU_Control *cpu = _Per_CPU_Get_by_index( cpu_index );
    const Scheduler_Control *scheduler = _Scheduler_Get_by_CPU( cpu );
    Thread_Control *executing = cpu->executing;

    if ( scheduler != NULL && executing != NULL ) {
      ( *scheduler->Operations.tick )( scheduler, executing );
    }
  }
}

/**
 * @brief Starts the idle thread for a particular processor.
 *
 * @param[in,out] the_thread The idle thread for the processor.
 * @parma[in,out] processor The processor for the idle thread.
 *
 * @see _Thread_Create_idle().
 */
RTEMS_INLINE_ROUTINE void _Scheduler_Start_idle(
  const Scheduler_Control *scheduler,
  Thread_Control          *the_thread,
  Per_CPU_Control         *cpu
)
{
  ( *scheduler->Operations.start_idle )( scheduler, the_thread, cpu );
}

#if defined(RTEMS_SMP)
RTEMS_INLINE_ROUTINE const Scheduler_Assignment *_Scheduler_Get_assignment(
  uint32_t cpu_index
)
{
  return &_Scheduler_Assignments[ cpu_index ];
}

RTEMS_INLINE_ROUTINE bool _Scheduler_Is_mandatory_processor(
  const Scheduler_Assignment *assignment
)
{
  return (assignment->attributes & SCHEDULER_ASSIGN_PROCESSOR_MANDATORY) != 0;
}

RTEMS_INLINE_ROUTINE bool _Scheduler_Should_start_processor(
  const Scheduler_Assignment *assignment
)
{
  return assignment->scheduler != NULL;
}
#endif /* defined(RTEMS_SMP) */

RTEMS_INLINE_ROUTINE bool _Scheduler_Has_processor_ownership(
  const Scheduler_Control *scheduler,
  uint32_t cpu_index
)
{
#if defined(RTEMS_SMP)
  const Scheduler_Assignment *assignment =
    _Scheduler_Get_assignment( cpu_index );

  return assignment->scheduler == scheduler;
#else
  (void) scheduler;
  (void) cpu_index;

  return true;
#endif
}

RTEMS_INLINE_ROUTINE void _Scheduler_Set(
  const Scheduler_Control *scheduler,
  Thread_Control          *the_thread
)
{
#if defined(RTEMS_SMP)
  const Scheduler_Control *current_scheduler = _Scheduler_Get( the_thread );

  if ( current_scheduler != scheduler ) {
    _Thread_Set_state( the_thread, STATES_MIGRATING );
    _Scheduler_Node_destroy( current_scheduler, the_thread );
    the_thread->Scheduler.own_control = scheduler;
    the_thread->Scheduler.control = scheduler;
    _Scheduler_Node_initialize( scheduler, the_thread );
    _Scheduler_Update_priority( the_thread, the_thread->current_priority );
    _Thread_Clear_state( the_thread, STATES_MIGRATING );
  }
#else
  (void) scheduler;
#endif
}

#if defined(__RTEMS_HAVE_SYS_CPUSET_H__)

RTEMS_INLINE_ROUTINE void _Scheduler_Get_processor_set(
  const Scheduler_Control *scheduler,
  size_t                   cpusetsize,
  cpu_set_t               *cpuset
)
{
  uint32_t cpu_count = _SMP_Get_processor_count();
  uint32_t cpu_index;

  CPU_ZERO_S( cpusetsize, cpuset );

  for ( cpu_index = 0 ; cpu_index < cpu_count ; ++cpu_index ) {
#if defined(RTEMS_SMP)
    if ( _Scheduler_Has_processor_ownership( scheduler, cpu_index ) ) {
      CPU_SET_S( (int) cpu_index, cpusetsize, cpuset );
    }
#else
    (void) scheduler;

    CPU_SET_S( (int) cpu_index, cpusetsize, cpuset );
#endif
  }
}

RTEMS_INLINE_ROUTINE bool _Scheduler_default_Get_affinity_body(
  const Scheduler_Control *scheduler,
  Thread_Control          *the_thread,
  size_t                   cpusetsize,
  cpu_set_t               *cpuset
)
{
  (void) the_thread;

  _Scheduler_Get_processor_set( scheduler, cpusetsize, cpuset );

  return true;
}

bool _Scheduler_Get_affinity(
  Thread_Control *the_thread,
  size_t          cpusetsize,
  cpu_set_t      *cpuset
);

RTEMS_INLINE_ROUTINE bool _Scheduler_default_Set_affinity_body(
  const Scheduler_Control *scheduler,
  Thread_Control          *the_thread,
  size_t                   cpusetsize,
  const cpu_set_t         *cpuset
)
{
  uint32_t cpu_count = _SMP_Get_processor_count();
  uint32_t cpu_index;
  bool     ok = true;

  for ( cpu_index = 0 ; cpu_index < cpu_count ; ++cpu_index ) {
#if defined(RTEMS_SMP)
    const Scheduler_Control *scheduler_of_cpu =
      _Scheduler_Get_by_CPU_index( cpu_index );

    ok = ok
      && ( CPU_ISSET_S( (int) cpu_index, cpusetsize, cpuset )
        || ( !CPU_ISSET_S( (int) cpu_index, cpusetsize, cpuset )
          && scheduler != scheduler_of_cpu ) );
#else
    (void) scheduler;

    ok = ok && CPU_ISSET_S( (int) cpu_index, cpusetsize, cpuset );
#endif
  }

  return ok;
}

bool _Scheduler_Set_affinity(
  Thread_Control          *the_thread,
  size_t                   cpusetsize,
  const cpu_set_t         *cpuset
);

#endif /* defined(__RTEMS_HAVE_SYS_CPUSET_H__) */

RTEMS_INLINE_ROUTINE void _Scheduler_Update_heir(
  Thread_Control *heir,
  bool force_dispatch
)
{
  Thread_Control *executing = _Thread_Executing;

  _Thread_Heir = heir;

  if ( executing != heir && ( force_dispatch || executing->is_preemptible ) )
    _Thread_Dispatch_necessary = true;
}

RTEMS_INLINE_ROUTINE void _Scheduler_Generic_block(
  const Scheduler_Control *scheduler,
  Thread_Control          *the_thread,
  void                  ( *extract )(
                             const Scheduler_Control *,
                             Thread_Control * ),
  void                  ( *schedule )(
                             const Scheduler_Control *,
                             Thread_Control *,
                             bool )
)
{
  ( *extract )( scheduler, the_thread );

  /* TODO: flash critical section? */

  if ( _Thread_Is_executing( the_thread ) || _Thread_Is_heir( the_thread ) ) {
    ( *schedule )( scheduler, the_thread, true );
  }
}

/**
 * @brief Returns true if @a p1 encodes a lower priority than @a p2 in the
 * intuitive sense of priority.
 */
RTEMS_INLINE_ROUTINE bool _Scheduler_Is_priority_lower_than(
  const Scheduler_Control *scheduler,
  Priority_Control         p1,
  Priority_Control         p2
)
{
  return _Scheduler_Priority_compare( scheduler, p1,  p2 ) < 0;
}

/**
 * @brief Returns true if @a p1 encodes a higher priority than @a p2 in the
 * intuitive sense of priority.
 */
RTEMS_INLINE_ROUTINE bool _Scheduler_Is_priority_higher_than(
  const Scheduler_Control *scheduler,
  Priority_Control         p1,
  Priority_Control         p2
)
{
  return _Scheduler_Priority_compare( scheduler, p1,  p2 ) > 0;
}

/**
 * @brief Returns the priority encoding @a p1 or @a p2 with the higher priority
 * in the intuitive sense of priority.
 */
RTEMS_INLINE_ROUTINE Priority_Control _Scheduler_Highest_priority_of_two(
  const Scheduler_Control *scheduler,
  Priority_Control         p1,
  Priority_Control         p2
)
{
  return _Scheduler_Is_priority_higher_than( scheduler, p1, p2 ) ? p1 : p2;
}

/**
 * @brief Sets the thread priority to @a priority if it is higher than the
 * current priority of the thread in the intuitive sense of priority.
 */
RTEMS_INLINE_ROUTINE void _Scheduler_Set_priority_if_higher(
  const Scheduler_Control *scheduler,
  Thread_Control          *the_thread,
  Priority_Control         priority
)
{
  Priority_Control current = the_thread->current_priority;

  if ( _Scheduler_Is_priority_higher_than( scheduler, priority, current ) ) {
    _Thread_Set_priority( the_thread, priority );
  }
}

/**
 * @brief Changes the thread priority to @a priority if it is higher than the
 * current priority of the thread in the intuitive sense of priority.
 */
RTEMS_INLINE_ROUTINE void _Scheduler_Change_priority_if_higher(
  const Scheduler_Control *scheduler,
  Thread_Control          *the_thread,
  Priority_Control         priority,
  bool                     prepend_it
)
{
  Priority_Control current = the_thread->current_priority;

  if ( _Scheduler_Is_priority_higher_than( scheduler, priority, current ) ) {
    _Thread_Change_priority( the_thread, priority, prepend_it );
  }
}

RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_processor_count(
  const Scheduler_Control *scheduler
)
{
#if defined(RTEMS_SMP)
  return _Scheduler_Get_context( scheduler )->processor_count;
#else
  (void) scheduler;

  return 1;
#endif
}

RTEMS_INLINE_ROUTINE Objects_Id _Scheduler_Build_id( uint32_t scheduler_index )
{
  return _Objects_Build_id(
    OBJECTS_FAKE_OBJECTS_API,
    OBJECTS_FAKE_OBJECTS_SCHEDULERS,
    _Objects_Local_node,
    scheduler_index + 1
  );
}

RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_index_by_id( Objects_Id id )
{
  uint32_t minimum_id = _Scheduler_Build_id( 0 );

  return id - minimum_id;
}

RTEMS_INLINE_ROUTINE bool _Scheduler_Get_by_id(
  Objects_Id                id,
  const Scheduler_Control **scheduler_p
)
{
  uint32_t index = _Scheduler_Get_index_by_id( id );
  const Scheduler_Control *scheduler = &_Scheduler_Table[ index ];

  *scheduler_p = scheduler;

  return index < _Scheduler_Count
    && _Scheduler_Get_processor_count( scheduler ) > 0;
}

RTEMS_INLINE_ROUTINE bool _Scheduler_Is_id_valid( Objects_Id id )
{
  const Scheduler_Control *scheduler;
  bool ok = _Scheduler_Get_by_id( id, &scheduler );

  (void) scheduler;

  return ok;
}

RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_index(
  const Scheduler_Control *scheduler
)
{
  return (uint32_t) (scheduler - &_Scheduler_Table[ 0 ]);
}

RTEMS_INLINE_ROUTINE Scheduler_Node *_Scheduler_Thread_get_node(
  const Thread_Control *the_thread
)
{
  return the_thread->Scheduler.node;
}

RTEMS_INLINE_ROUTINE void _Scheduler_Node_do_initialize(
  Scheduler_Node *node,
  Thread_Control *the_thread
)
{
#if defined(RTEMS_SMP)
  node->user = the_thread;
  node->help_state = SCHEDULER_HELP_YOURSELF;
  node->owner = the_thread;
  node->idle = NULL;
  node->accepts_help = the_thread;
#else
  (void) node;
  (void) the_thread;
#endif
}

#if defined(RTEMS_SMP)
/**
 * @brief Gets an idle thread from the scheduler instance.
 *
 * @param[in] context The scheduler instance context.
 *
 * @retval idle An idle thread for use.  This function must always return an
 * idle thread.  If none is available, then this is a fatal error.
 */
typedef Thread_Control *( *Scheduler_Get_idle_thread )(
  Scheduler_Context *context
);

/**
 * @brief Releases an idle thread to the scheduler instance for reuse.
 *
 * @param[in] context The scheduler instance context.
 * @param[in] idle The idle thread to release
 */
typedef void ( *Scheduler_Release_idle_thread )(
  Scheduler_Context *context,
  Thread_Control    *idle
);

RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Node_get_owner(
  const Scheduler_Node *node
)
{
  return node->owner;
}

RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Node_get_idle(
  const Scheduler_Node *node
)
{
  return node->idle;
}

RTEMS_INLINE_ROUTINE void _Scheduler_Node_set_user(
  Scheduler_Node *node,
  Thread_Control *user
)
{
  node->user = user;
}

RTEMS_INLINE_ROUTINE void _Scheduler_Thread_set_node(
  Thread_Control *the_thread,
  Scheduler_Node *node
)
{
  the_thread->Scheduler.node = node;
}

RTEMS_INLINE_ROUTINE void _Scheduler_Thread_set_scheduler_and_node(
  Thread_Control       *the_thread,
  Scheduler_Node       *node,
  const Thread_Control *previous_user_of_node
)
{
  const Scheduler_Control *scheduler =
    _Scheduler_Get_own( previous_user_of_node );

  the_thread->Scheduler.control = scheduler;
  _Scheduler_Thread_set_node( the_thread, node );
}

extern const bool _Scheduler_Thread_state_valid_state_changes[ 3 ][ 3 ];

RTEMS_INLINE_ROUTINE void _Scheduler_Thread_change_state(
  Thread_Control         *the_thread,
  Thread_Scheduler_state  new_state
)
{
  _Assert(
    _Scheduler_Thread_state_valid_state_changes
      [ the_thread->Scheduler.state ][ new_state ]
  );

  the_thread->Scheduler.state = new_state;
}

/**
 * @brief Changes the scheduler help state of a thread.
 *
 * @param[in] the_thread The thread.
 * @param[in] new_help_state The new help state.
 *
 * @return The previous help state.
 */
RTEMS_INLINE_ROUTINE Scheduler_Help_state _Scheduler_Thread_change_help_state(
  Thread_Control       *the_thread,
  Scheduler_Help_state  new_help_state
)
{
  Scheduler_Node *node = _Scheduler_Thread_get_own_node( the_thread );
  Scheduler_Help_state previous_help_state = node->help_state;

  node->help_state = new_help_state;

  return previous_help_state;
}

/**
 * @brief Changes the resource tree root of a thread.
 *
 * For each node of the resource sub-tree specified by the top thread the
 * scheduler asks for help.  So the root thread gains access to all scheduler
 * nodes corresponding to the resource sub-tree.  In case a thread previously
 * granted help is displaced by this operation, then the scheduler asks for
 * help using its remaining resource tree.
 *
 * The run-time of this function depends on the size of the resource sub-tree
 * and other resource trees in case threads in need for help are produced
 * during this operation.
 *
 * @param[in] top The thread specifying the resource sub-tree top.
 * @param[in] root The thread specifying the new resource sub-tree root.
 */
void _Scheduler_Thread_change_resource_root(
  Thread_Control *top,
  Thread_Control *root
);

/**
 * @brief Use an idle thread for this scheduler node.
 *
 * A thread in the SCHEDULER_HELP_ACTIVE_OWNER owner state may use an idle
 * thread for the scheduler node owned by itself in case it executes currently
 * using another scheduler node or in case it is in a blocking state.
 *
 * @param[in] context The scheduler instance context.
 * @param[in] node The node which wants to use the idle thread.
 * @param[in] get_idle_thread Function to get an idle thread.
 */
RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Use_idle_thread(
  Scheduler_Context         *context,
  Scheduler_Node            *node,
  Scheduler_Get_idle_thread  get_idle_thread
)
{
  Thread_Control *idle = ( *get_idle_thread )( context );

  _Assert( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER );
  _Assert( _Scheduler_Node_get_idle( node ) == NULL );
  _Assert(
    _Scheduler_Node_get_owner( node ) == _Scheduler_Node_get_user( node )
  );

  _Scheduler_Thread_set_node( idle, node );

  _Scheduler_Node_set_user( node, idle );
  node->idle = idle;

  return idle;
}

/**
 * @brief Try to schedule this scheduler node.
 *
 * @param[in] context The scheduler instance context.
 * @param[in] node The node which wants to get scheduled.
 * @param[in] get_idle_thread Function to get an idle thread.
 *
 * @retval true This node can be scheduled.
 * @retval false Otherwise.
 */
RTEMS_INLINE_ROUTINE bool _Scheduler_Try_to_schedule_node(
  Scheduler_Context         *context,
  Scheduler_Node            *node,
  Scheduler_Get_idle_thread  get_idle_thread
)
{
  bool schedule;
  Thread_Control *owner;
  Thread_Control *user;

  if ( node->help_state == SCHEDULER_HELP_YOURSELF ) {
    return true;
  }

  owner = _Scheduler_Node_get_owner( node );
  user = _Scheduler_Node_get_user( node );

  if ( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL) {
    if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) {
      _Scheduler_Thread_set_scheduler_and_node( user, node, owner );
    } else {
      _Scheduler_Node_set_user( node, owner );
    }

    schedule = true;
  } else if ( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER ) {
    if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) {
      _Scheduler_Thread_set_scheduler_and_node( user, node, owner );
    } else {
      _Scheduler_Use_idle_thread( context, node, get_idle_thread );
    }

    schedule = true;
  } else {
    _Assert( node->help_state == SCHEDULER_HELP_PASSIVE );

    if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) {
      _Scheduler_Thread_set_scheduler_and_node( user, node, owner );
      schedule = true;
    } else {
      schedule = false;
    }
  }

  return schedule;
}

/**
 * @brief Release an idle thread using this scheduler node.
 *
 * @param[in] context The scheduler instance context.
 * @param[in] node The node which may have an idle thread as user.
 * @param[in] release_idle_thread Function to release an idle thread.
 *
 * @retval idle The idle thread which used this node.
 * @retval NULL This node had no idle thread as an user.
 */
RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Release_idle_thread(
  Scheduler_Context             *context,
  Scheduler_Node                *node,
  Scheduler_Release_idle_thread  release_idle_thread
)
{
  Thread_Control *idle = _Scheduler_Node_get_idle( node );

  if ( idle != NULL ) {
    Thread_Control *owner = _Scheduler_Node_get_owner( node );

    node->idle = NULL;
    _Scheduler_Node_set_user( node, owner );
    _Scheduler_Thread_change_state( idle, THREAD_SCHEDULER_READY );
    _Scheduler_Thread_set_node( idle, idle->Scheduler.own_node );

    ( *release_idle_thread )( context, idle );
  }

  return idle;
}

/**
 * @brief Block this scheduler node.
 *
 * @param[in] context The scheduler instance context.
 * @param[in] node The node which wants to get blocked.
 * @param[in] is_scheduled This node is scheduled.
 * @param[in] get_idle_thread Function to get an idle thread.
 *
 * @retval true Continue with the blocking operation.
 * @retval false Otherwise.
 */
RTEMS_INLINE_ROUTINE bool _Scheduler_Block_node(
  Scheduler_Context         *context,
  Scheduler_Node            *node,
  bool                       is_scheduled,
  Scheduler_Get_idle_thread  get_idle_thread
)
{
  bool block;
  Thread_Control *old_user = _Scheduler_Node_get_user( node );
  Thread_Control *new_user;

  _Scheduler_Thread_change_state( old_user, THREAD_SCHEDULER_BLOCKED );

  if ( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL ) {
    new_user = _Scheduler_Node_get_owner( node );

    _Assert( new_user != old_user );
    _Scheduler_Node_set_user( node, new_user );
  } else if (
    node->help_state == SCHEDULER_HELP_ACTIVE_OWNER
      && is_scheduled
  ) {
    new_user = _Scheduler_Use_idle_thread( context, node, get_idle_thread );
  } else {
    new_user = NULL;
  }

  if ( new_user != NULL && is_scheduled ) {
    Per_CPU_Control *cpu = _Thread_Get_CPU( old_user );

    _Scheduler_Thread_change_state( new_user, THREAD_SCHEDULER_SCHEDULED );
    _Thread_Set_CPU( new_user, cpu );
    _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, new_user );

    block = false;
  } else {
    block = true;
  }

  return block;
}

/**
 * @brief Unblock this scheduler node.
 *
 * @param[in] context The scheduler instance context.
 * @param[in] the_thread The thread which wants to get unblocked.
 * @param[in] node The node which wants to get unblocked.
 * @param[in] is_scheduled This node is scheduled.
 * @param[in] release_idle_thread Function to release an idle thread.
 *
 * @retval true Continue with the unblocking operation.
 * @retval false Otherwise.
 */
RTEMS_INLINE_ROUTINE bool _Scheduler_Unblock_node(
  Scheduler_Context             *context,
  Thread_Control                *the_thread,
  Scheduler_Node                *node,
  bool                           is_scheduled,
  Scheduler_Release_idle_thread  release_idle_thread
)
{
  bool unblock;

  if ( is_scheduled ) {
    Thread_Control *old_user = _Scheduler_Node_get_user( node );
    Per_CPU_Control *cpu = _Thread_Get_CPU( old_user );

    if ( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER ) {
      Thread_Control *idle = _Scheduler_Release_idle_thread(
        context,
        node,
        release_idle_thread
      );

      _Assert( idle != NULL );
      (void) idle;
    } else {
      _Assert( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL );

      _Scheduler_Thread_change_state( old_user, THREAD_SCHEDULER_READY );
      _Scheduler_Node_set_user( node, the_thread );
    }

    _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_SCHEDULED );
    _Thread_Set_CPU( the_thread, cpu );
    _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, the_thread );

    unblock = false;
  } else {
    _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_READY );

    unblock = true;
  }

  return unblock;
}

/**
 * @brief Asks a ready scheduler node for help.
 *
 * @param[in] node The ready node offering help.
 * @param[in] needs_help The thread needing help.
 *
 * @retval needs_help The thread needing help.
 */
RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Ask_ready_node_for_help(
  Scheduler_Node *node,
  Thread_Control *needs_help
)
{
  _Scheduler_Node_set_user( node, needs_help );

  return needs_help;
}

/**
 * @brief Asks a scheduled scheduler node for help.
 *
 * @param[in] context The scheduler instance context.
 * @param[in] node The scheduled node offering help.
 * @param[in] offers_help The thread offering help.
 * @param[in] needs_help The thread needing help.
 * @param[in] previous_accepts_help The previous thread accepting help by this
 *   scheduler node.
 * @param[in] release_idle_thread Function to release an idle thread.
 *
 * @retval needs_help The previous thread accepting help by this scheduler node
 *   which was displaced by the thread needing help.
 * @retval NULL There are no more threads needing help.
 */
RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Ask_scheduled_node_for_help(
  Scheduler_Context             *context,
  Scheduler_Node                *node,
  Thread_Control                *offers_help,
  Thread_Control                *needs_help,
  Thread_Control                *previous_accepts_help,
  Scheduler_Release_idle_thread  release_idle_thread
)
{
  Thread_Control *next_needs_help = NULL;
  Thread_Control *old_user = NULL;
  Thread_Control *new_user = NULL;

  if (
    previous_accepts_help != needs_help
      && _Scheduler_Thread_get_node( previous_accepts_help ) == node
  ) {
    Thread_Control *idle = _Scheduler_Release_idle_thread(
      context,
      node,
      release_idle_thread
    );

    if ( idle != NULL ) {
      old_user = idle;
    } else {
      _Assert( _Scheduler_Node_get_user( node ) == previous_accepts_help );
      old_user = previous_accepts_help;
    }

    if ( needs_help->Scheduler.state == THREAD_SCHEDULER_READY ) {
      new_user = needs_help;
    } else {
      _Assert( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL );
      _Assert( offers_help->Scheduler.node == offers_help->Scheduler.own_node );

      new_user = offers_help;
    }

    if ( previous_accepts_help != offers_help ) {
      next_needs_help = previous_accepts_help;
    }
  } else if ( needs_help->Scheduler.state == THREAD_SCHEDULER_READY ) {
    Thread_Control *idle = _Scheduler_Release_idle_thread(
      context,
      node,
      release_idle_thread
    );

    if ( idle != NULL ) {
      old_user = idle;
    } else {
      old_user = _Scheduler_Node_get_user( node );
    }

    new_user = needs_help;
  } else {
    _Assert( needs_help->Scheduler.state == THREAD_SCHEDULER_SCHEDULED );
  }

  if ( new_user != old_user ) {
    Per_CPU_Control *cpu_self = _Per_CPU_Get();
    Per_CPU_Control *cpu = _Thread_Get_CPU( old_user );

    _Scheduler_Thread_change_state( old_user, THREAD_SCHEDULER_READY );
    _Scheduler_Thread_set_scheduler_and_node(
      old_user,
      _Scheduler_Thread_get_own_node( old_user ),
      old_user
    );

    _Scheduler_Thread_change_state( new_user, THREAD_SCHEDULER_SCHEDULED );
    _Scheduler_Thread_set_scheduler_and_node( new_user, node, offers_help );

    _Scheduler_Node_set_user( node, new_user );
    _Thread_Set_CPU( new_user, cpu );
    _Thread_Dispatch_update_heir( cpu_self, cpu, new_user );
  }

  return next_needs_help;
}

/**
 * @brief Asks a blocked scheduler node for help.
 *
 * @param[in] context The scheduler instance context.
 * @param[in] node The scheduled node offering help.
 * @param[in] offers_help The thread offering help.
 * @param[in] needs_help The thread needing help.
 *
 * @retval true Enqueue this scheduler node.
 * @retval false Otherwise.
 */
RTEMS_INLINE_ROUTINE bool _Scheduler_Ask_blocked_node_for_help(
  Scheduler_Context *context,
  Scheduler_Node    *node,
  Thread_Control    *offers_help,
  Thread_Control    *needs_help
)
{
  bool enqueue;

  _Assert( node->help_state == SCHEDULER_HELP_PASSIVE );

  if ( needs_help->Scheduler.state == THREAD_SCHEDULER_READY ) {
    _Scheduler_Node_set_user( node, needs_help );
    _Scheduler_Thread_set_scheduler_and_node( needs_help, node, offers_help );

    enqueue = true;
  } else {
    enqueue = false;
  }

  return enqueue;
}
#endif

/** @} */

#ifdef __cplusplus
}
#endif

#endif
/* end of include file */