summaryrefslogtreecommitdiffstats
path: root/cpukit/include/rtems/score/scheduleredfsmp.h
diff options
context:
space:
mode:
authorChris Johns <chrisj@rtems.org>2017-12-23 18:18:56 +1100
committerSebastian Huber <sebastian.huber@embedded-brains.de>2018-01-25 08:45:26 +0100
commit2afb22b7e1ebcbe40373ff7e0efae7d207c655a9 (patch)
tree44759efe9374f13200a97e96d91bd9a2b7e5ce2a /cpukit/include/rtems/score/scheduleredfsmp.h
parentMAINTAINERS: Add myself to Write After Approval. (diff)
downloadrtems-2afb22b7e1ebcbe40373ff7e0efae7d207c655a9.tar.bz2
Remove make preinstall
A speciality of the RTEMS build system was the make preinstall step. It copied header files from arbitrary locations into the build tree. The header files were included via the -Bsome/build/tree/path GCC command line option. This has at least seven problems: * The make preinstall step itself needs time and disk space. * Errors in header files show up in the build tree copy. This makes it hard for editors to open the right file to fix the error. * There is no clear relationship between source and build tree header files. This makes an audit of the build process difficult. * The visibility of all header files in the build tree makes it difficult to enforce API barriers. For example it is discouraged to use BSP-specifics in the cpukit. * An introduction of a new build system is difficult. * Include paths specified by the -B option are system headers. This may suppress warnings. * The parallel build had sporadic failures on some hosts. This patch removes the make preinstall step. All installed header files are moved to dedicated include directories in the source tree. Let @RTEMS_CPU@ be the target architecture, e.g. arm, powerpc, sparc, etc. Let @RTEMS_BSP_FAMILIY@ be a BSP family base directory, e.g. erc32, imx, qoriq, etc. The new cpukit include directories are: * cpukit/include * cpukit/score/cpu/@RTEMS_CPU@/include * cpukit/libnetworking The new BSP include directories are: * bsps/include * bsps/@RTEMS_CPU@/include * bsps/@RTEMS_CPU@/@RTEMS_BSP_FAMILIY@/include There are build tree include directories for generated files. The include directory order favours the most general header file, e.g. it is not possible to override general header files via the include path order. The "bootstrap -p" option was removed. The new "bootstrap -H" option should be used to regenerate the "headers.am" files. Update #3254.
Diffstat (limited to 'cpukit/include/rtems/score/scheduleredfsmp.h')
-rw-r--r--cpukit/include/rtems/score/scheduleredfsmp.h200
1 files changed, 200 insertions, 0 deletions
diff --git a/cpukit/include/rtems/score/scheduleredfsmp.h b/cpukit/include/rtems/score/scheduleredfsmp.h
new file mode 100644
index 0000000000..018568190e
--- /dev/null
+++ b/cpukit/include/rtems/score/scheduleredfsmp.h
@@ -0,0 +1,200 @@
+/**
+ * @file
+ *
+ * @brief EDF SMP Scheduler API
+ *
+ * @ingroup ScoreSchedulerSMPEDF
+ */
+
+/*
+ * Copyright (c) 2017 embedded brains GmbH.
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_SCHEDULEREDFSMP_H
+#define _RTEMS_SCORE_SCHEDULEREDFSMP_H
+
+#include <rtems/score/scheduler.h>
+#include <rtems/score/scheduleredf.h>
+#include <rtems/score/schedulersmp.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @defgroup ScoreSchedulerSMPEDF EDF Priority SMP Scheduler
+ *
+ * @ingroup ScoreSchedulerSMP
+ *
+ * @{
+ */
+
+typedef struct {
+ Scheduler_SMP_Node Base;
+
+ /**
+ * @brief Generation number to ensure FIFO/LIFO order for threads of the same
+ * priority across different ready queues.
+ */
+ int64_t generation;
+
+ /**
+ * @brief The ready queue index depending on the processor affinity of the thread.
+ *
+ * The ready queue index zero is used for threads with a one-to-all thread
+ * processor affinity. Threads with a one-to-one processor affinity use the
+ * processor index plus one as the ready queue index.
+ */
+ uint32_t ready_queue_index;
+} Scheduler_EDF_SMP_Node;
+
+typedef struct {
+ /**
+ * @brief Chain node for Scheduler_SMP_Context::Affine_queues.
+ */
+ Chain_Node Node;
+
+ /**
+ * @brief The ready threads of the corresponding affinity.
+ */
+ RBTree_Control Queue;
+
+ /**
+ * @brief The scheduled thread of the corresponding processor.
+ */
+ Scheduler_EDF_SMP_Node *scheduled;
+} Scheduler_EDF_SMP_Ready_queue;
+
+typedef struct {
+ Scheduler_SMP_Context Base;
+
+ /**
+ * @brief Current generation for LIFO (index 0) and FIFO (index 1) ordering.
+ */
+ int64_t generations[ 2 ];
+
+ /**
+ * @brief Chain of ready queues with affine threads to determine the highest
+ * priority ready thread.
+ */
+ Chain_Control Affine_queues;
+
+ /**
+ * @brief A table with ready queues.
+ *
+ * The index zero queue is used for threads with a one-to-all processor
+ * affinity. Index one corresponds to processor index zero, and so on.
+ */
+ Scheduler_EDF_SMP_Ready_queue Ready[ RTEMS_ZERO_LENGTH_ARRAY ];
+} Scheduler_EDF_SMP_Context;
+
+#define SCHEDULER_EDF_SMP_ENTRY_POINTS \
+ { \
+ _Scheduler_EDF_SMP_Initialize, \
+ _Scheduler_default_Schedule, \
+ _Scheduler_EDF_SMP_Yield, \
+ _Scheduler_EDF_SMP_Block, \
+ _Scheduler_EDF_SMP_Unblock, \
+ _Scheduler_EDF_SMP_Update_priority, \
+ _Scheduler_EDF_Map_priority, \
+ _Scheduler_EDF_Unmap_priority, \
+ _Scheduler_EDF_SMP_Ask_for_help, \
+ _Scheduler_EDF_SMP_Reconsider_help_request, \
+ _Scheduler_EDF_SMP_Withdraw_node, \
+ _Scheduler_EDF_SMP_Add_processor, \
+ _Scheduler_EDF_SMP_Remove_processor, \
+ _Scheduler_EDF_SMP_Node_initialize, \
+ _Scheduler_default_Node_destroy, \
+ _Scheduler_EDF_Release_job, \
+ _Scheduler_EDF_Cancel_job, \
+ _Scheduler_default_Tick, \
+ _Scheduler_EDF_SMP_Start_idle, \
+ _Scheduler_EDF_SMP_Set_affinity \
+ }
+
+void _Scheduler_EDF_SMP_Initialize( const Scheduler_Control *scheduler );
+
+void _Scheduler_EDF_SMP_Node_initialize(
+ const Scheduler_Control *scheduler,
+ Scheduler_Node *node,
+ Thread_Control *the_thread,
+ Priority_Control priority
+);
+
+void _Scheduler_EDF_SMP_Block(
+ const Scheduler_Control *scheduler,
+ Thread_Control *thread,
+ Scheduler_Node *node
+);
+
+void _Scheduler_EDF_SMP_Unblock(
+ const Scheduler_Control *scheduler,
+ Thread_Control *thread,
+ Scheduler_Node *node
+);
+
+void _Scheduler_EDF_SMP_Update_priority(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+);
+
+bool _Scheduler_EDF_SMP_Ask_for_help(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+);
+
+void _Scheduler_EDF_SMP_Reconsider_help_request(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node
+);
+
+void _Scheduler_EDF_SMP_Withdraw_node(
+ const Scheduler_Control *scheduler,
+ Thread_Control *the_thread,
+ Scheduler_Node *node,
+ Thread_Scheduler_state next_state
+);
+
+void _Scheduler_EDF_SMP_Add_processor(
+ const Scheduler_Control *scheduler,
+ Thread_Control *idle
+);
+
+Thread_Control *_Scheduler_EDF_SMP_Remove_processor(
+ const Scheduler_Control *scheduler,
+ struct Per_CPU_Control *cpu
+);
+
+void _Scheduler_EDF_SMP_Yield(
+ const Scheduler_Control *scheduler,
+ Thread_Control *thread,
+ Scheduler_Node *node
+);
+
+void _Scheduler_EDF_SMP_Start_idle(
+ const Scheduler_Control *scheduler,
+ Thread_Control *idle,
+ struct Per_CPU_Control *cpu
+);
+
+bool _Scheduler_EDF_SMP_Set_affinity(
+ const Scheduler_Control *scheduler,
+ Thread_Control *thread,
+ Scheduler_Node *node,
+ const Processor_mask *affinity
+);
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTEMS_SCORE_SCHEDULEREDFSMP_H */