summaryrefslogtreecommitdiffstats
path: root/cpukit/include/rtems/score/processormask.h
diff options
context:
space:
mode:
authorChris Johns <chrisj@rtems.org>2017-12-23 18:18:56 +1100
committerSebastian Huber <sebastian.huber@embedded-brains.de>2018-01-25 08:45:26 +0100
commit2afb22b7e1ebcbe40373ff7e0efae7d207c655a9 (patch)
tree44759efe9374f13200a97e96d91bd9a2b7e5ce2a /cpukit/include/rtems/score/processormask.h
parentMAINTAINERS: Add myself to Write After Approval. (diff)
downloadrtems-2afb22b7e1ebcbe40373ff7e0efae7d207c655a9.tar.bz2
Remove make preinstall
A speciality of the RTEMS build system was the make preinstall step. It copied header files from arbitrary locations into the build tree. The header files were included via the -Bsome/build/tree/path GCC command line option. This has at least seven problems: * The make preinstall step itself needs time and disk space. * Errors in header files show up in the build tree copy. This makes it hard for editors to open the right file to fix the error. * There is no clear relationship between source and build tree header files. This makes an audit of the build process difficult. * The visibility of all header files in the build tree makes it difficult to enforce API barriers. For example it is discouraged to use BSP-specifics in the cpukit. * An introduction of a new build system is difficult. * Include paths specified by the -B option are system headers. This may suppress warnings. * The parallel build had sporadic failures on some hosts. This patch removes the make preinstall step. All installed header files are moved to dedicated include directories in the source tree. Let @RTEMS_CPU@ be the target architecture, e.g. arm, powerpc, sparc, etc. Let @RTEMS_BSP_FAMILIY@ be a BSP family base directory, e.g. erc32, imx, qoriq, etc. The new cpukit include directories are: * cpukit/include * cpukit/score/cpu/@RTEMS_CPU@/include * cpukit/libnetworking The new BSP include directories are: * bsps/include * bsps/@RTEMS_CPU@/include * bsps/@RTEMS_CPU@/@RTEMS_BSP_FAMILIY@/include There are build tree include directories for generated files. The include directory order favours the most general header file, e.g. it is not possible to override general header files via the include path order. The "bootstrap -p" option was removed. The new "bootstrap -H" option should be used to regenerate the "headers.am" files. Update #3254.
Diffstat (limited to 'cpukit/include/rtems/score/processormask.h')
-rw-r--r--cpukit/include/rtems/score/processormask.h290
1 files changed, 290 insertions, 0 deletions
diff --git a/cpukit/include/rtems/score/processormask.h b/cpukit/include/rtems/score/processormask.h
new file mode 100644
index 0000000000..a06aa2a56b
--- /dev/null
+++ b/cpukit/include/rtems/score/processormask.h
@@ -0,0 +1,290 @@
+/**
+ * @file
+ *
+ * @brief Processor Mask API
+ *
+ * @ingroup ScoreProcessorMask
+ */
+
+/*
+ * Copyright (c) 2016, 2017 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Dornierstr. 4
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_PROCESSORMASK_H
+#define _RTEMS_SCORE_PROCESSORMASK_H
+
+#include <rtems/score/cpu.h>
+
+#include <sys/cpuset.h>
+
+#include <strings.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/**
+ * @defgroup ScoreProcessorMask Processor Mask
+ *
+ * @ingroup Score
+ *
+ * The processor mask provides a bit map large enough to provide one bit for
+ * each processor in the system. It is a fixed size internal data type
+ * provided for efficiency in addition to the API level cpu_set_t.
+ *
+ * @{
+ */
+
+/**
+ * @brief A bit map which is large enough to provide one bit for each processor
+ * in the system.
+ */
+typedef BITSET_DEFINE( Processor_mask, CPU_MAXIMUM_PROCESSORS ) Processor_mask;
+
+RTEMS_INLINE_ROUTINE void _Processor_mask_Zero( Processor_mask *mask )
+{
+ BIT_ZERO( CPU_MAXIMUM_PROCESSORS, mask );
+}
+
+RTEMS_INLINE_ROUTINE bool _Processor_mask_Is_zero( const Processor_mask *mask )
+{
+ return BIT_EMPTY( CPU_MAXIMUM_PROCESSORS, mask );
+}
+
+RTEMS_INLINE_ROUTINE void _Processor_mask_Fill( Processor_mask *mask )
+{
+ BIT_FILL( CPU_MAXIMUM_PROCESSORS, mask );
+}
+
+RTEMS_INLINE_ROUTINE void _Processor_mask_Assign(
+ Processor_mask *dst, const Processor_mask *src
+)
+{
+ BIT_COPY( CPU_MAXIMUM_PROCESSORS, src, dst );
+}
+
+RTEMS_INLINE_ROUTINE void _Processor_mask_Set(
+ Processor_mask *mask,
+ uint32_t index
+)
+{
+ BIT_SET( CPU_MAXIMUM_PROCESSORS, index, mask );
+}
+
+RTEMS_INLINE_ROUTINE void _Processor_mask_Clear(
+ Processor_mask *mask,
+ uint32_t index
+)
+{
+ BIT_CLR( CPU_MAXIMUM_PROCESSORS, index, mask );
+}
+
+RTEMS_INLINE_ROUTINE bool _Processor_mask_Is_set(
+ const Processor_mask *mask,
+ uint32_t index
+)
+{
+ return BIT_ISSET( CPU_MAXIMUM_PROCESSORS, index, mask );
+}
+
+/**
+ * @brief Returns true if the processor sets a and b are equal, and false
+ * otherwise.
+ */
+RTEMS_INLINE_ROUTINE bool _Processor_mask_Is_equal(
+ const Processor_mask *a,
+ const Processor_mask *b
+)
+{
+ return !BIT_CMP( CPU_MAXIMUM_PROCESSORS, a, b );
+}
+
+/**
+ * @brief Returns true if the intersection of the processor sets a and b is
+ * non-empty, and false otherwise.
+ */
+RTEMS_INLINE_ROUTINE bool _Processor_mask_Has_overlap(
+ const Processor_mask *a,
+ const Processor_mask *b
+)
+{
+ return BIT_OVERLAP( CPU_MAXIMUM_PROCESSORS, a, b );
+}
+
+/**
+ * @brief Returns true if the processor set small is a subset of processor set
+ * big, and false otherwise.
+ */
+RTEMS_INLINE_ROUTINE bool _Processor_mask_Is_subset(
+ const Processor_mask *big,
+ const Processor_mask *small
+)
+{
+ return BIT_SUBSET( CPU_MAXIMUM_PROCESSORS, big, small );
+}
+
+/**
+ * @brief Performs a bitwise a = b & c.
+ */
+RTEMS_INLINE_ROUTINE void _Processor_mask_And(
+ Processor_mask *a,
+ const Processor_mask *b,
+ const Processor_mask *c
+)
+{
+ BIT_AND2( CPU_MAXIMUM_PROCESSORS, a, b, c );
+}
+
+/**
+ * @brief Performs a bitwise a = b & ~c.
+ */
+RTEMS_INLINE_ROUTINE void _Processor_mask_Nand(
+ Processor_mask *a,
+ const Processor_mask *b,
+ const Processor_mask *c
+)
+{
+ BIT_NAND2( CPU_MAXIMUM_PROCESSORS, a, b, c );
+}
+
+/**
+ * @brief Performs a bitwise a = b | c.
+ */
+RTEMS_INLINE_ROUTINE void _Processor_mask_Or(
+ Processor_mask *a,
+ const Processor_mask *b,
+ const Processor_mask *c
+)
+{
+ BIT_OR2( CPU_MAXIMUM_PROCESSORS, a, b, c );
+}
+
+/**
+ * @brief Performs a bitwise a = b ^ c.
+ */
+RTEMS_INLINE_ROUTINE void _Processor_mask_Xor(
+ Processor_mask *a,
+ const Processor_mask *b,
+ const Processor_mask *c
+)
+{
+ BIT_XOR2( CPU_MAXIMUM_PROCESSORS, a, b, c );
+}
+
+RTEMS_INLINE_ROUTINE uint32_t _Processor_mask_Count( const Processor_mask *a )
+{
+ return (uint32_t) BIT_COUNT( CPU_MAXIMUM_PROCESSORS, a );
+}
+
+RTEMS_INLINE_ROUTINE uint32_t _Processor_mask_Find_last_set( const Processor_mask *a )
+{
+ return (uint32_t) BIT_FLS( CPU_MAXIMUM_PROCESSORS, a );
+}
+
+/**
+ * @brief Returns the subset of 32 processors containing the specified index as
+ * an unsigned 32-bit integer.
+ */
+RTEMS_INLINE_ROUTINE uint32_t _Processor_mask_To_uint32_t(
+ const Processor_mask *mask,
+ uint32_t index
+)
+{
+ long bits = mask->__bits[ __bitset_words( index ) ];
+
+ return (uint32_t) (bits >> (32 * (index % _BITSET_BITS) / 32));
+}
+
+/**
+ * @brief Creates a processor set from an unsigned 32-bit integer relative to
+ * the specified index.
+ */
+RTEMS_INLINE_ROUTINE void _Processor_mask_From_uint32_t(
+ Processor_mask *mask,
+ uint32_t bits,
+ uint32_t index
+)
+{
+ _Processor_mask_Zero( mask );
+ mask->__bits[ __bitset_words( index ) ] = ((long) bits) << (32 * (index % _BITSET_BITS) / 32);
+}
+
+/**
+ * @brief Creates a processor set from the specified index.
+ */
+RTEMS_INLINE_ROUTINE void _Processor_mask_From_index(
+ Processor_mask *mask,
+ uint32_t index
+)
+{
+ BIT_SETOF( CPU_MAXIMUM_PROCESSORS, (int) index, mask );
+}
+
+typedef enum {
+ PROCESSOR_MASK_COPY_LOSSLESS,
+ PROCESSOR_MASK_COPY_PARTIAL_LOSS,
+ PROCESSOR_MASK_COPY_COMPLETE_LOSS,
+ PROCESSOR_MASK_COPY_INVALID_SIZE
+} Processor_mask_Copy_status;
+
+RTEMS_INLINE_ROUTINE bool _Processor_mask_Is_at_most_partial_loss(
+ Processor_mask_Copy_status status
+)
+{
+ return (unsigned int) status <= PROCESSOR_MASK_COPY_PARTIAL_LOSS;
+}
+
+Processor_mask_Copy_status _Processor_mask_Copy(
+ long *dst,
+ size_t dst_size,
+ const long *src,
+ size_t src_size
+);
+
+RTEMS_INLINE_ROUTINE Processor_mask_Copy_status _Processor_mask_To_cpu_set_t(
+ const Processor_mask *src,
+ size_t dst_size,
+ cpu_set_t *dst
+)
+{
+ return _Processor_mask_Copy(
+ &dst->__bits[ 0 ],
+ dst_size,
+ &src->__bits[ 0 ],
+ sizeof( *src )
+ );
+}
+
+RTEMS_INLINE_ROUTINE Processor_mask_Copy_status _Processor_mask_From_cpu_set_t(
+ Processor_mask *dst,
+ size_t src_size,
+ const cpu_set_t *src
+)
+{
+ return _Processor_mask_Copy(
+ &dst->__bits[ 0 ],
+ sizeof( *dst ),
+ &src->__bits[ 0 ],
+ src_size
+ );
+}
+
+extern const Processor_mask _Processor_mask_The_one_and_only;
+
+/** @} */
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* _RTEMS_SCORE_PROCESSORMASK_H */