summaryrefslogtreecommitdiffstats
path: root/cpukit/include/rtems/score/smplockseq.h
diff options
context:
space:
mode:
authorChris Johns <chrisj@rtems.org>2017-12-23 18:18:56 +1100
committerSebastian Huber <sebastian.huber@embedded-brains.de>2018-01-25 08:45:26 +0100
commit2afb22b7e1ebcbe40373ff7e0efae7d207c655a9 (patch)
tree44759efe9374f13200a97e96d91bd9a2b7e5ce2a /cpukit/include/rtems/score/smplockseq.h
parentMAINTAINERS: Add myself to Write After Approval. (diff)
downloadrtems-2afb22b7e1ebcbe40373ff7e0efae7d207c655a9.tar.bz2
Remove make preinstall
A speciality of the RTEMS build system was the make preinstall step. It copied header files from arbitrary locations into the build tree. The header files were included via the -Bsome/build/tree/path GCC command line option. This has at least seven problems: * The make preinstall step itself needs time and disk space. * Errors in header files show up in the build tree copy. This makes it hard for editors to open the right file to fix the error. * There is no clear relationship between source and build tree header files. This makes an audit of the build process difficult. * The visibility of all header files in the build tree makes it difficult to enforce API barriers. For example it is discouraged to use BSP-specifics in the cpukit. * An introduction of a new build system is difficult. * Include paths specified by the -B option are system headers. This may suppress warnings. * The parallel build had sporadic failures on some hosts. This patch removes the make preinstall step. All installed header files are moved to dedicated include directories in the source tree. Let @RTEMS_CPU@ be the target architecture, e.g. arm, powerpc, sparc, etc. Let @RTEMS_BSP_FAMILIY@ be a BSP family base directory, e.g. erc32, imx, qoriq, etc. The new cpukit include directories are: * cpukit/include * cpukit/score/cpu/@RTEMS_CPU@/include * cpukit/libnetworking The new BSP include directories are: * bsps/include * bsps/@RTEMS_CPU@/include * bsps/@RTEMS_CPU@/@RTEMS_BSP_FAMILIY@/include There are build tree include directories for generated files. The include directory order favours the most general header file, e.g. it is not possible to override general header files via the include path order. The "bootstrap -p" option was removed. The new "bootstrap -H" option should be used to regenerate the "headers.am" files. Update #3254.
Diffstat (limited to 'cpukit/include/rtems/score/smplockseq.h')
-rw-r--r--cpukit/include/rtems/score/smplockseq.h176
1 files changed, 176 insertions, 0 deletions
diff --git a/cpukit/include/rtems/score/smplockseq.h b/cpukit/include/rtems/score/smplockseq.h
new file mode 100644
index 0000000000..5daaee9c6e
--- /dev/null
+++ b/cpukit/include/rtems/score/smplockseq.h
@@ -0,0 +1,176 @@
+/**
+ * @file
+ *
+ * @ingroup ScoreSMPLock
+ *
+ * @brief SMP Lock API
+ */
+
+/*
+ * Copyright (c) 2016 embedded brains GmbH
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#ifndef _RTEMS_SCORE_SMPLOCKSEQ_H
+#define _RTEMS_SCORE_SMPLOCKSEQ_H
+
+#include <rtems/score/cpuopts.h>
+
+#if defined(RTEMS_SMP)
+
+#include <rtems/score/assert.h>
+#include <rtems/score/atomic.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/**
+ * @addtogroup ScoreSMPLock
+ *
+ * @{
+ */
+
+/**
+ * @brief SMP sequence lock control.
+ *
+ * The sequence lock offers a consistent data set for readers in the presence
+ * of at most one concurrent writer. Due to the read-modify-write operation in
+ * _SMP_sequence_lock_Read_retry() the data corresponding to the last written
+ * sequence number is observed. To allow multiple writers an additional SMP
+ * lock is necessary to serialize writes.
+ *
+ * See also Hans-J. Boehm, HP Laboratories,
+ * "Can Seqlocks Get Along With Programming Language Memory Models?",
+ * http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf
+ */
+typedef struct {
+ /**
+ * @brief The sequence number.
+ *
+ * An odd value indicates that a write is in progress.
+ */
+ Atomic_Uint sequence;
+} SMP_sequence_lock_Control;
+
+/**
+ * @brief SMP sequence lock control initializer for static initialization.
+ */
+#define SMP_SEQUENCE_LOCK_INITIALIZER { ATOMIC_INITIALIZER_UINT( 0 ) }
+
+/**
+ * @brief Initializes an SMP sequence lock.
+ *
+ * Concurrent initialization leads to unpredictable results.
+ *
+ * @param lock The SMP sequence lock control.
+ */
+static inline void _SMP_sequence_lock_Initialize( SMP_sequence_lock_Control *lock )
+{
+ _Atomic_Init_uint( &lock->sequence, 0 );
+}
+
+/**
+ * @brief Destroys an SMP sequence lock.
+ *
+ * Concurrent destruction leads to unpredictable results.
+ *
+ * @param lock The SMP sequence lock control.
+ */
+static inline void _SMP_sequence_lock_Destroy( SMP_sequence_lock_Control *lock )
+{
+ (void) lock;
+}
+
+/**
+ * @brief Begins an SMP sequence lock write operation.
+ *
+ * This function will not disable interrupts. The caller must ensure that the
+ * current thread of execution is not interrupted indefinite since this would
+ * starve readers.
+ *
+ * @param lock The SMP sequence lock control.
+ *
+ * @return The current sequence number.
+ */
+static inline unsigned int _SMP_sequence_lock_Write_begin(
+ SMP_sequence_lock_Control *lock
+)
+{
+ unsigned int seq;
+
+ seq = _Atomic_Load_uint( &lock->sequence, ATOMIC_ORDER_RELAXED );
+ _Assert( seq % 2 == 0 );
+
+ _Atomic_Store_uint( &lock->sequence, seq + 1, ATOMIC_ORDER_RELAXED );
+
+ /* There is no atomic store with acquire/release semantics */
+ _Atomic_Fence( ATOMIC_ORDER_ACQ_REL );
+
+ return seq;
+}
+
+/**
+ * @brief Ends an SMP sequence lock write operation.
+ *
+ * @param lock The SMP sequence lock control.
+ * @param seq The sequence number returned by _SMP_sequence_lock_Write_begin().
+ */
+static inline void _SMP_sequence_lock_Write_end(
+ SMP_sequence_lock_Control *lock,
+ unsigned int seq
+)
+{
+ _Atomic_Store_uint( &lock->sequence, seq + 2, ATOMIC_ORDER_RELEASE );
+}
+
+/**
+ * @brief Begins an SMP sequence lock read operation.
+ *
+ * This function will not disable interrupts.
+ *
+ * @param lock The SMP sequence lock control.
+ *
+ * @return The current sequence number.
+ */
+static inline unsigned int _SMP_sequence_lock_Read_begin(
+ const SMP_sequence_lock_Control *lock
+)
+{
+ return _Atomic_Load_uint( &lock->sequence, ATOMIC_ORDER_ACQUIRE );
+}
+
+/**
+ * @brief Ends an SMP sequence lock read operation and indicates if a retry is
+ * necessary.
+ *
+ * @param lock The SMP sequence lock control.
+ * @param seq The sequence number returned by _SMP_sequence_lock_Read_begin().
+ *
+ * @retval true The read operation must be retried with a call to
+ * _SMP_sequence_lock_Read_begin().
+ * @retval false Otherwise.
+ */
+static inline bool _SMP_sequence_lock_Read_retry(
+ SMP_sequence_lock_Control *lock,
+ unsigned int seq
+)
+{
+ unsigned int seq2;
+
+ seq2 = _Atomic_Fetch_add_uint( &lock->sequence, 0, ATOMIC_ORDER_RELEASE );
+ return seq != seq2 || seq % 2 != 0;
+}
+
+/**@}*/
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* RTEMS_SMP */
+
+#endif /* _RTEMS_SCORE_SMPLOCKSEQ_H */