summaryrefslogtreecommitdiffstats
path: root/rtemsbsd
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2017-11-14 12:57:01 +0100
committerSebastian Huber <sebastian.huber@embedded-brains.de>2017-11-16 09:02:24 +0100
commit97a98f6cd767b3a68279890488c7b451788f84f4 (patch)
tree279ba49b9f03b0f111ff5df771e9d4f3b3fd4feb /rtemsbsd
parentLOCKING(9): Remove dead code (DDB) (diff)
downloadrtems-libbsd-97a98f6cd767b3a68279890488c7b451788f84f4.tar.bz2
RWLOCK(9): Add reader/writer lock implementation
Diffstat (limited to 'rtemsbsd')
-rw-r--r--rtemsbsd/include/machine/rtems-bsd-rwlock.h60
-rw-r--r--rtemsbsd/include/machine/rtems-bsd-rwlockimpl.h429
-rw-r--r--rtemsbsd/rtems/rtems-kernel-rwlock.c113
-rw-r--r--rtemsbsd/rtems/rtems-kernel-rwlockimpl.c177
4 files changed, 749 insertions, 30 deletions
diff --git a/rtemsbsd/include/machine/rtems-bsd-rwlock.h b/rtemsbsd/include/machine/rtems-bsd-rwlock.h
new file mode 100644
index 00000000..51b41604
--- /dev/null
+++ b/rtemsbsd/include/machine/rtems-bsd-rwlock.h
@@ -0,0 +1,60 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_machine
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2017 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Dornierstr. 4
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _RTEMS_BSD_MACHINE_RTEMS_BSD_RWLOCK_H_
+#define _RTEMS_BSD_MACHINE_RTEMS_BSD_RWLOCK_H_
+
+#include <rtems/score/threadq.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+typedef struct {
+ Thread_queue_Control writer_queue;
+ Thread_queue_Control reader_queue;
+ int readers;
+ int nest_level;
+} rtems_bsd_rwlock;
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* _RTEMS_BSD_MACHINE_RTEMS_BSD_RWLOCK_H_ */
diff --git a/rtemsbsd/include/machine/rtems-bsd-rwlockimpl.h b/rtemsbsd/include/machine/rtems-bsd-rwlockimpl.h
new file mode 100644
index 00000000..663e7000
--- /dev/null
+++ b/rtemsbsd/include/machine/rtems-bsd-rwlockimpl.h
@@ -0,0 +1,429 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_machine
+ *
+ * @brief Implementation of a reader/writer lock with priority inheritance for
+ * exclusive owners (writer).
+ */
+
+/*
+ * Copyright (c) 2017 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Dornierstr. 4
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _RTEMS_BSD_MACHINE_RTEMS_BSD_RWLOCKIMPL_H_
+#define _RTEMS_BSD_MACHINE_RTEMS_BSD_RWLOCKIMPL_H_
+
+#include <machine/rtems-bsd-rwlock.h>
+#include <machine/rtems-bsd-support.h>
+
+#include <sys/types.h>
+#include <sys/lock.h>
+
+#include <rtems/score/threadimpl.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+typedef struct {
+ Thread_queue_Context writer;
+ Thread_queue_Context reader;
+} rtems_bsd_rwlock_context;
+
+static inline void
+rtems_bsd_rwlock_context_init(rtems_bsd_rwlock_context *context)
+{
+
+ _Thread_queue_Context_initialize(&context->writer);
+}
+
+static inline void
+rtems_bsd_rwlock_init(struct lock_object *lock, rtems_bsd_rwlock *rw,
+ struct lock_class *class, const char *name, const char *type, int flags)
+{
+ _Thread_queue_Initialize(&rw->writer_queue, name);
+ _Thread_queue_Initialize(&rw->reader_queue, name);
+ rw->readers = 0;
+ rw->nest_level = 0;
+ lock_init(lock, class, name, type, flags);
+}
+
+void rtems_bsd_rwlock_wlock_more(const struct lock_object *lock,
+ rtems_bsd_rwlock *rw, Thread_Control *executing,
+ rtems_bsd_rwlock_context *context);
+
+void rtems_bsd_rwlock_wunlock_more(rtems_bsd_rwlock *rw,
+ Thread_Control *wowner, rtems_bsd_rwlock_context *context);
+
+void rtems_bsd_rwlock_rlock_more(rtems_bsd_rwlock *rw,
+ rtems_bsd_rwlock_context *context);
+
+void rtems_bsd_rwlock_runlock_more(rtems_bsd_rwlock *rw,
+ rtems_bsd_rwlock_context *context);
+
+void rtems_bsd_rwlock_ready_waiting_readers(rtems_bsd_rwlock *rw,
+ rtems_bsd_rwlock_context *context);
+
+#define rtems_bsd_rwlock_isr_disable(isr_level, context) \
+do { \
+ _ISR_Local_disable(isr_level); \
+ _ISR_lock_ISR_disable_profile( \
+ &(context)->writer.Lock_context.Lock_context) \
+} while (0)
+
+static inline void
+rtems_bsd_rwlock_acquire_critical(rtems_bsd_rwlock *rw,
+ rtems_bsd_rwlock_context *context)
+{
+
+ _Thread_queue_Queue_acquire_critical(&rw->writer_queue.Queue,
+ &rw->writer_queue.Lock_stats,
+ &context->writer.Lock_context.Lock_context);
+#if defined(RTEMS_SMP) && defined(RTEMS_DEBUG)
+ rw->writer_queue.owner = _SMP_lock_Who_am_I();
+#endif
+}
+
+static inline void
+rtems_bsd_rwlock_release(rtems_bsd_rwlock *rw, ISR_Level isr_level,
+ rtems_bsd_rwlock_context *context)
+{
+
+#if defined(RTEMS_SMP) && defined(RTEMS_DEBUG)
+ _Assert( _Thread_queue_Is_lock_owner( &rw->writer_queue ) );
+ rw->writer_queue.owner = SMP_LOCK_NO_OWNER;
+#endif
+ _Thread_queue_Queue_release_critical(&rw->writer_queue.Queue,
+ &context->writer.Lock_context.Lock_context);
+ _ISR_Local_enable(isr_level);
+}
+
+static inline void
+rtems_bsd_rwlock_set_isr_level(rtems_bsd_rwlock_context *context,
+ ISR_Level isr_level)
+{
+
+ _ISR_lock_Context_set_level(&context->writer.Lock_context.Lock_context,
+ isr_level);
+}
+
+static inline Thread_Control *
+rtems_bsd_rwlock_wowner(const rtems_bsd_rwlock *rw)
+{
+
+ return (rw->writer_queue.Queue.owner);
+}
+
+static inline void
+rtems_bsd_rwlock_set_wowner(rtems_bsd_rwlock *rw, Thread_Control *wowner)
+{
+
+ rw->writer_queue.Queue.owner = wowner;
+}
+
+static inline void
+rtems_bsd_rwlock_wlock(struct lock_object *lock, rtems_bsd_rwlock *rw)
+{
+ ISR_Level isr_level;
+ rtems_bsd_rwlock_context context;
+ Thread_Control *executing;
+
+ rtems_bsd_rwlock_context_init(&context);
+ rtems_bsd_rwlock_isr_disable(isr_level, &context);
+ executing = _Thread_Executing;
+ rtems_bsd_rwlock_acquire_critical(rw, &context);
+
+ if (__predict_true(rtems_bsd_rwlock_wowner(rw) == NULL &&
+ rw->readers == 0)) {
+ rtems_bsd_rwlock_set_wowner(rw, executing);
+ _Thread_Resource_count_increment(executing);
+ rtems_bsd_rwlock_release(rw, isr_level, &context);
+ } else {
+ rtems_bsd_rwlock_set_isr_level(&context, isr_level);
+ rtems_bsd_rwlock_wlock_more(lock, rw, executing,
+ &context);
+ }
+}
+
+static inline int
+rtems_bsd_rwlock_try_wlock(struct lock_object *lock, rtems_bsd_rwlock *rw)
+{
+ int success;
+ ISR_Level isr_level;
+ rtems_bsd_rwlock_context context;
+ Thread_Control *executing;
+
+ rtems_bsd_rwlock_context_init(&context);
+ rtems_bsd_rwlock_isr_disable(isr_level, &context);
+ executing = _Thread_Executing;
+ rtems_bsd_rwlock_acquire_critical(rw, &context);
+
+ if (rw->readers == 0) {
+ Thread_Control *wowner;
+
+ wowner = rtems_bsd_rwlock_wowner(rw);
+
+ if (wowner == NULL) {
+ rtems_bsd_rwlock_set_wowner(rw, executing);
+ _Thread_Resource_count_increment(executing);
+ success = 1;
+ } else if (wowner == executing) {
+ BSD_ASSERT(lock->lo_flags & LO_RECURSABLE);
+ ++rw->nest_level;
+ success = 1;
+ } else {
+ success = 0;
+ }
+ } else {
+ success = 0;
+ }
+
+ rtems_bsd_rwlock_release(rw, isr_level, &context);
+
+ return (success);
+}
+
+static inline void
+rtems_bsd_rwlock_wunlock(rtems_bsd_rwlock *rw)
+{
+ ISR_Level isr_level;
+ rtems_bsd_rwlock_context context;
+ Thread_Control *wowner;
+ int nest_level;
+
+ rtems_bsd_rwlock_context_init(&context);
+ rtems_bsd_rwlock_isr_disable(isr_level, &context);
+ rtems_bsd_rwlock_acquire_critical(rw, &context);
+
+ nest_level = rw->nest_level;
+ wowner = rtems_bsd_rwlock_wowner(rw);
+
+ BSD_ASSERT(wowner == _Thread_Executing);
+
+ if (__predict_true(nest_level == 0)) {
+ rtems_bsd_rwlock_set_wowner(rw, NULL);
+ _Thread_Resource_count_decrement(wowner);
+
+ if (__predict_true(
+ _Thread_queue_Is_empty(&rw->writer_queue.Queue) &&
+ _Thread_queue_Is_empty(&rw->reader_queue.Queue))) {
+ rtems_bsd_rwlock_release(rw, isr_level, &context);
+ } else {
+ rtems_bsd_rwlock_set_isr_level(&context,
+ isr_level);
+ rtems_bsd_rwlock_wunlock_more(rw, wowner,
+ &context);
+ }
+ } else {
+ rw->nest_level = nest_level - 1;
+ rtems_bsd_rwlock_release(rw, isr_level, &context);
+ }
+}
+
+static inline int
+rtems_bsd_rwlock_wowned(const rtems_bsd_rwlock *rw)
+{
+
+ return (rtems_bsd_rwlock_wowner(rw) == _Thread_Get_executing());
+}
+
+static inline int
+rtems_bsd_rwlock_recursed(const rtems_bsd_rwlock *rw)
+{
+
+ return (rw->nest_level != 0);
+}
+
+static inline void
+rtems_bsd_rwlock_rlock(struct lock_object *lock, rtems_bsd_rwlock *rw)
+{
+ ISR_Level isr_level;
+ rtems_bsd_rwlock_context context;
+
+ rtems_bsd_rwlock_context_init(&context);
+ rtems_bsd_rwlock_isr_disable(isr_level, &context);
+ rtems_bsd_rwlock_acquire_critical(rw, &context);
+
+ if (__predict_true(rtems_bsd_rwlock_wowner(rw) == NULL &&
+ _Thread_queue_Is_empty(&rw->writer_queue.Queue))) {
+ ++rw->readers;
+ _Thread_Resource_count_increment(_Thread_Executing);
+ rtems_bsd_rwlock_release(rw, isr_level, &context);
+ } else {
+ rtems_bsd_rwlock_set_isr_level(&context, isr_level);
+ rtems_bsd_rwlock_rlock_more(rw, &context);
+ }
+}
+
+static inline int
+rtems_bsd_rwlock_try_rlock(struct lock_object *lock, rtems_bsd_rwlock *rw)
+{
+ int success;
+ ISR_Level isr_level;
+ rtems_bsd_rwlock_context context;
+
+ rtems_bsd_rwlock_context_init(&context);
+ rtems_bsd_rwlock_isr_disable(isr_level, &context);
+ rtems_bsd_rwlock_acquire_critical(rw, &context);
+
+ if (__predict_true(rtems_bsd_rwlock_wowner(rw) == NULL &&
+ _Thread_queue_Is_empty(&rw->writer_queue.Queue))) {
+ ++rw->readers;
+ _Thread_Resource_count_increment(_Thread_Executing);
+ success = 1;
+ } else {
+ success = 0;
+ }
+
+ rtems_bsd_rwlock_release(rw, isr_level, &context);
+
+ return (success);
+}
+
+static inline void
+rtems_bsd_rwlock_runlock(rtems_bsd_rwlock *rw)
+{
+ ISR_Level isr_level;
+ rtems_bsd_rwlock_context context;
+ int readers;
+
+ rtems_bsd_rwlock_context_init(&context);
+ rtems_bsd_rwlock_isr_disable(isr_level, &context);
+ rtems_bsd_rwlock_acquire_critical(rw, &context);
+
+ readers = rw->readers;
+ _Thread_Resource_count_decrement(_Thread_Executing);
+
+ if (__predict_true(readers == 1)) {
+ rw->readers = 0;
+
+ if (__predict_true(
+ _Thread_queue_Is_empty(&rw->writer_queue.Queue) &&
+ _Thread_queue_Is_empty(&rw->reader_queue.Queue))) {
+ rtems_bsd_rwlock_release(rw, isr_level,
+ &context);
+ } else {
+ rtems_bsd_rwlock_set_isr_level(&context,
+ isr_level);
+ rtems_bsd_rwlock_runlock_more(rw, &context);
+ }
+ } else {
+ rw->readers = readers - 1;
+ rtems_bsd_rwlock_release(rw, isr_level, &context);
+ }
+}
+
+static inline int
+rtems_bsd_rwlock_try_upgrade(rtems_bsd_rwlock *rw)
+{
+ int success;
+ ISR_Level isr_level;
+ rtems_bsd_rwlock_context context;
+ Thread_Control *executing;
+ Thread_Control *wowner;
+
+ rtems_bsd_rwlock_context_init(&context);
+ rtems_bsd_rwlock_isr_disable(isr_level, &context);
+ executing = _Thread_Executing;
+ rtems_bsd_rwlock_acquire_critical(rw, &context);
+
+ wowner = rtems_bsd_rwlock_wowner(rw);
+ BSD_ASSERT(wowner == NULL);
+
+ if (rw->readers == 1) {
+ rw->readers = 0;
+ rtems_bsd_rwlock_set_wowner(rw, executing);
+ /* FIXME: priority inheritance */
+ success = 1;
+ } else {
+ success = 0;
+ }
+
+ rtems_bsd_rwlock_release(rw, isr_level, &context);
+
+ return (success);
+}
+
+static inline void
+rtems_bsd_rwlock_downgrade(rtems_bsd_rwlock *rw)
+{
+ ISR_Level isr_level;
+ rtems_bsd_rwlock_context context;
+ Thread_Control *wowner;
+
+ rtems_bsd_rwlock_context_init(&context);
+ rtems_bsd_rwlock_isr_disable(isr_level, &context);
+ rtems_bsd_rwlock_acquire_critical(rw, &context);
+
+ wowner = rtems_bsd_rwlock_wowner(rw);
+
+ BSD_ASSERT(wowner == _Thread_Executing);
+ BSD_ASSERT(rw->nest_level == 0);
+
+ rtems_bsd_rwlock_set_wowner(rw, NULL);
+ rw->readers = 1;
+
+ if (__predict_true(_Thread_queue_Is_empty(&rw->reader_queue.Queue))) {
+ rtems_bsd_rwlock_release(rw, isr_level, &context);
+ } else {
+ rtems_bsd_rwlock_set_isr_level(&context, isr_level);
+ rtems_bsd_rwlock_ready_waiting_readers(rw, &context);
+ }
+}
+
+static inline const char *
+rtems_bsd_rwlock_name(const rtems_bsd_rwlock *rw)
+{
+
+ return (rw->writer_queue.Queue.name);
+}
+
+static inline void
+rtems_bsd_rwlock_destroy(struct lock_object *lock, rtems_bsd_rwlock *rw)
+{
+ BSD_ASSERT(_Thread_queue_Is_empty(&rw->writer_queue.Queue));
+ BSD_ASSERT(_Thread_queue_Is_empty(&rw->reader_queue.Queue));
+
+ if (rtems_bsd_rwlock_wowned(rw)) {
+ rw->nest_level = 0;
+ rtems_bsd_rwlock_wunlock(rw);
+ }
+
+ _Thread_queue_Destroy(&rw->writer_queue);
+ _Thread_queue_Destroy(&rw->reader_queue);
+ lock_destroy(lock);
+}
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* _RTEMS_BSD_MACHINE_RTEMS_BSD_RWLOCKIMPL_H_ */
diff --git a/rtemsbsd/rtems/rtems-kernel-rwlock.c b/rtemsbsd/rtems/rtems-kernel-rwlock.c
index 80918f70..c2002e0e 100644
--- a/rtemsbsd/rtems/rtems-kernel-rwlock.c
+++ b/rtemsbsd/rtems/rtems-kernel-rwlock.c
@@ -15,7 +15,7 @@
* USA
* <kevin.kirspel@optimedical.com>
*
- * Copyright (c) 2013-2015 embedded brains GmbH. All rights reserved.
+ * Copyright (c) 2013, 2017 embedded brains GmbH. All rights reserved.
*
* embedded brains GmbH
* Dornierstr. 4
@@ -23,6 +23,9 @@
* Germany
* <rtems@embedded-brains.de>
*
+ * Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org>
+ * All rights reserved.
+ *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -46,7 +49,7 @@
*/
#include <machine/rtems-bsd-kernel-space.h>
-#include <machine/rtems-bsd-muteximpl.h>
+#include <machine/rtems-bsd-rwlockimpl.h>
#include <sys/param.h>
#include <sys/types.h>
@@ -70,9 +73,9 @@ struct lock_class lock_class_rw = {
.lc_unlock = unlock_rw,
};
-#define rw_wowner(rw) rtems_bsd_mutex_owner(&(rw)->mutex)
+#define rw_wowner(rw) rtems_bsd_rwlock_wowner(&(rw)->rwlock)
-#define rw_recursed(rw) rtems_bsd_mutex_recursed(&(rw)->mutex)
+#define rw_recursed(rw) rtems_bsd_rwlock_recursed(&(rw)->rwlock)
void
assert_rw(const struct lock_object *lock, int what)
@@ -84,16 +87,29 @@ assert_rw(const struct lock_object *lock, int what)
void
lock_rw(struct lock_object *lock, uintptr_t how)
{
+ struct rwlock *rw;
- rw_wlock((struct rwlock *)lock);
+ rw = (struct rwlock *)lock;
+ if (how)
+ rw_rlock(rw);
+ else
+ rw_wlock(rw);
}
uintptr_t
unlock_rw(struct lock_object *lock)
{
-
- rw_unlock((struct rwlock *)lock);
- return (0);
+ struct rwlock *rw;
+
+ rw = (struct rwlock *)lock;
+ rw_assert(rw, RA_LOCKED | LA_NOTRECURSED);
+ if (rw->rwlock.readers > 0) {
+ rw_runlock(rw);
+ return (1);
+ } else {
+ rw_wunlock(rw);
+ return (0);
+ }
}
void
@@ -105,7 +121,7 @@ rw_init_flags(struct rwlock *rw, const char *name, int opts)
if (opts & RW_RECURSE)
flags |= LO_RECURSABLE;
- rtems_bsd_mutex_init(&rw->lock_object, &rw->mutex, &lock_class_rw,
+ rtems_bsd_rwlock_init(&rw->lock_object, &rw->rwlock, &lock_class_rw,
name, NULL, flags);
}
@@ -113,77 +129,87 @@ void
rw_destroy(struct rwlock *rw)
{
- rtems_bsd_mutex_destroy(&rw->lock_object, &rw->mutex);
+ rtems_bsd_rwlock_destroy(&rw->lock_object, &rw->rwlock);
}
void
rw_sysinit(void *arg)
{
- struct rw_args *args = arg;
+ struct rw_args *args = arg;
- rw_init(args->ra_rw, args->ra_desc);
+ rw_init((struct rwlock *)args->ra_rw, args->ra_desc);
}
void
rw_sysinit_flags(void *arg)
{
- struct rw_args_flags *args = arg;
+ struct rw_args_flags *args = arg;
- rw_init_flags(args->ra_rw, args->ra_desc, args->ra_flags);
+ rw_init_flags((struct rwlock *)args->ra_rw, args->ra_desc,
+ args->ra_flags);
}
int
rw_wowned(struct rwlock *rw)
{
- return (rtems_bsd_mutex_owned(&rw->mutex));
+
+ return (rtems_bsd_rwlock_wowned(&rw->rwlock));
}
void
_rw_wlock(struct rwlock *rw, const char *file, int line)
{
- rtems_bsd_mutex_lock(&rw->lock_object, &rw->mutex);
+
+ rtems_bsd_rwlock_wlock(&rw->lock_object, &rw->rwlock);
}
int
_rw_try_wlock(struct rwlock *rw, const char *file, int line)
{
- return (rtems_bsd_mutex_trylock(&rw->lock_object, &rw->mutex));
+
+ return (rtems_bsd_rwlock_try_wlock(&rw->lock_object, &rw->rwlock));
}
void
_rw_wunlock(struct rwlock *rw, const char *file, int line)
{
- rtems_bsd_mutex_unlock(&rw->mutex);
+
+ rtems_bsd_rwlock_wunlock(&rw->rwlock);
}
void
_rw_rlock(struct rwlock *rw, const char *file, int line)
{
- rtems_bsd_mutex_lock(&rw->lock_object, &rw->mutex);
+
+ rtems_bsd_rwlock_rlock(&rw->lock_object, &rw->rwlock);
}
int
_rw_try_rlock(struct rwlock *rw, const char *file, int line)
{
- return (rtems_bsd_mutex_trylock(&rw->lock_object, &rw->mutex));
+
+ return (rtems_bsd_rwlock_try_rlock(&rw->lock_object, &rw->rwlock));
}
void
_rw_runlock(struct rwlock *rw, const char *file, int line)
{
- rtems_bsd_mutex_unlock(&rw->mutex);
+
+ rtems_bsd_rwlock_runlock(&rw->rwlock);
}
int
_rw_try_upgrade(struct rwlock *rw, const char *file, int line)
{
- return (1);
+
+ return (rtems_bsd_rwlock_try_upgrade(&rw->rwlock));
}
void
_rw_downgrade(struct rwlock *rw, const char *file, int line)
{
- /* Nothing to do */
+
+ rtems_bsd_rwlock_downgrade(&rw->rwlock);
}
#ifdef INVARIANT_SUPPORT
@@ -195,7 +221,7 @@ _rw_downgrade(struct rwlock *rw, const char *file, int line)
void
_rw_assert(const struct rwlock *rw, int what, const char *file, int line)
{
- const char *name = rtems_bsd_mutex_name(&rw->mutex);
+ const char *name = rtems_bsd_rwlock_name(&rw->rwlock);
switch (what) {
case RA_LOCKED:
@@ -204,6 +230,33 @@ _rw_assert(const struct rwlock *rw, int what, const char *file, int line)
case RA_RLOCKED:
case RA_RLOCKED | RA_RECURSED:
case RA_RLOCKED | RA_NOTRECURSED:
+#ifdef WITNESS
+ witness_assert(&rw->lock_object, what, file, line);
+#else
+ /*
+ * If some other thread has a write lock or we have one
+ * and are asserting a read lock, fail. Also, if no one
+ * has a lock at all, fail.
+ */
+ if ((rw->rwlock.readers == 0 && rw_wowner(rw) == NULL) ||
+ (rw->rwlock.readers == 0 && (what & RA_RLOCKED ||
+ rw_wowner(rw) != _Thread_Get_executing())))
+ panic("Lock %s not %slocked @ %s:%d\n",
+ name, (what & RA_RLOCKED) ?
+ "read " : "", file, line);
+
+ if (rw->rwlock.readers == 0 && !(what & RA_RLOCKED)) {
+ if (rw_recursed(rw)) {
+ if (what & RA_NOTRECURSED)
+ panic("Lock %s recursed @ %s:%d\n",
+ name, file,
+ line);
+ } else if (what & RA_RECURSED)
+ panic("Lock %s not recursed @ %s:%d\n",
+ name, file, line);
+ }
+#endif
+ break;
case RA_WLOCKED:
case RA_WLOCKED | RA_RECURSED:
case RA_WLOCKED | RA_NOTRECURSED:
@@ -212,11 +265,11 @@ _rw_assert(const struct rwlock *rw, int what, const char *file, int line)
name, file, line);
if (rw_recursed(rw)) {
if (what & RA_NOTRECURSED)
- panic("Lock %s recursed @ %s:%d\n", name, file,
- line);
+ panic("Lock %s recursed @ %s:%d\n",
+ name, file, line);
} else if (what & RA_RECURSED)
- panic("Lock %s not recursed @ %s:%d\n", name, file,
- line);
+ panic("Lock %s not recursed @ %s:%d\n",
+ name, file, line);
break;
case RA_UNLOCKED:
#ifdef WITNESS
@@ -227,8 +280,8 @@ _rw_assert(const struct rwlock *rw, int what, const char *file, int line)
* to see if we hold a read lock or not.
*/
if (rw_wowner(rw) == _Thread_Get_executing())
- panic("Lock %s exclusively locked @ %s:%d\n", name,
- file, line);
+ panic("Lock %s exclusively locked @ %s:%d\n",
+ name, file, line);
#endif
break;
default:
diff --git a/rtemsbsd/rtems/rtems-kernel-rwlockimpl.c b/rtemsbsd/rtems/rtems-kernel-rwlockimpl.c
new file mode 100644
index 00000000..2434e51d
--- /dev/null
+++ b/rtemsbsd/rtems/rtems-kernel-rwlockimpl.c
@@ -0,0 +1,177 @@
+/**
+ * @file
+ *
+ * @ingroup rtems_bsd_rtems
+ *
+ * @brief TODO.
+ */
+
+/*
+ * Copyright (c) 2017 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Dornierstr. 4
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <machine/rtems-bsd-kernel-space.h>
+#include <machine/rtems-bsd-rwlockimpl.h>
+
+#include <rtems/score/schedulerimpl.h>
+
+void
+rtems_bsd_rwlock_wlock_more(const struct lock_object *lock,
+ rtems_bsd_rwlock *rw, Thread_Control *executing,
+ rtems_bsd_rwlock_context *context)
+{
+ Thread_Control *wowner;
+
+ wowner = rtems_bsd_rwlock_wowner(rw);
+
+ if (wowner == executing) {
+ BSD_ASSERT(lock->lo_flags & LO_RECURSABLE);
+ ++rw->nest_level;
+
+ _Thread_queue_Release(&rw->writer_queue, &context->writer);
+ } else {
+ _Thread_queue_Context_set_thread_state(&context->writer,
+ STATES_WAITING_FOR_RWLOCK);
+ _Thread_queue_Context_set_enqueue_do_nothing_extra(
+ &context->writer);
+ _Thread_queue_Context_set_deadlock_callout(&context->writer,
+ _Thread_queue_Deadlock_fatal);
+ _Thread_queue_Enqueue(&rw->writer_queue.Queue,
+ &_Thread_queue_Operations_priority, executing,
+ &context->writer);
+ }
+}
+
+static Thread_Control *
+rtems_bsd_rwlock_flush_reader_filter(Thread_Control *reader,
+ Thread_queue_Queue *queue, Thread_queue_Context *queue_context)
+{
+ rtems_bsd_rwlock *rw;
+
+ rw = RTEMS_CONTAINER_OF(queue, rtems_bsd_rwlock, reader_queue.Queue);
+ ++rw->readers;
+ _Thread_Resource_count_increment(reader);
+ return (reader);
+}
+
+static void
+rtems_bsd_rwlock_flush_reader_post_release(Thread_queue_Queue *queue,
+ Thread_queue_Context *queue_context)
+{
+ rtems_bsd_rwlock *rw;
+ rtems_bsd_rwlock_context *context;
+
+ rw = RTEMS_CONTAINER_OF(queue, rtems_bsd_rwlock, reader_queue.Queue);
+ context = RTEMS_CONTAINER_OF(queue_context, rtems_bsd_rwlock_context,
+ reader);
+ _Thread_queue_Release(&rw->writer_queue, &context->writer);
+}
+
+void
+rtems_bsd_rwlock_wunlock_more(rtems_bsd_rwlock *rw, Thread_Control *wowner,
+ rtems_bsd_rwlock_context *context)
+{
+
+ if (!_Thread_queue_Is_empty(&rw->reader_queue.Queue)) {
+ BSD_ASSERT(rw->readers == 0);
+ rtems_bsd_rwlock_ready_waiting_readers(rw, context);
+ } else {
+ BSD_ASSERT(!_Thread_queue_Is_empty(&rw->writer_queue.Queue));
+ _Thread_queue_Surrender(&rw->writer_queue.Queue,
+ rw->writer_queue.Queue.heads, wowner, &context->writer,
+ &_Thread_queue_Operations_priority);
+ }
+}
+
+static void
+rtems_bsd_rwlock_reader_enqueue(Thread_queue_Queue *queue,
+ Thread_Control *executing, Per_CPU_Control *cpu_self,
+ Thread_queue_Context *queue_context
+)
+{
+ rtems_bsd_rwlock *rw;
+ rtems_bsd_rwlock_context *context;
+
+ rw = RTEMS_CONTAINER_OF(queue, rtems_bsd_rwlock, reader_queue.Queue);
+ context = RTEMS_CONTAINER_OF(queue_context, rtems_bsd_rwlock_context,
+ reader);
+ _Thread_queue_Release(&rw->writer_queue, &context->writer);
+}
+
+void
+rtems_bsd_rwlock_rlock_more(rtems_bsd_rwlock *rw,
+ rtems_bsd_rwlock_context *context)
+{
+ Thread_Control *executing;
+
+ executing = _Thread_Executing;
+
+ _Thread_queue_Context_initialize(&context->reader);
+ _Thread_queue_Context_set_thread_state(&context->reader,
+ STATES_WAITING_FOR_RWLOCK);
+ _Thread_queue_Context_set_enqueue_callout(
+ &context->reader, rtems_bsd_rwlock_reader_enqueue);
+ _Thread_queue_Context_set_deadlock_callout(&context->reader,
+ _Thread_queue_Deadlock_fatal);
+ _Thread_queue_Acquire(&rw->reader_queue, &context->reader);
+ _Thread_queue_Enqueue(&rw->reader_queue.Queue,
+ &_Thread_queue_Operations_FIFO, executing, &context->reader);
+}
+
+void
+rtems_bsd_rwlock_runlock_more(rtems_bsd_rwlock *rw,
+ rtems_bsd_rwlock_context *context)
+{
+
+ if (!_Thread_queue_Is_empty(&rw->writer_queue.Queue)) {
+ BSD_ASSERT(rw->readers == 0);
+
+ _Thread_queue_Surrender(&rw->writer_queue.Queue,
+ rw->writer_queue.Queue.heads, NULL, &context->writer,
+ &_Thread_queue_Operations_priority);
+ } else {
+ BSD_ASSERT(!_Thread_queue_Is_empty(&rw->reader_queue.Queue));
+ rtems_bsd_rwlock_ready_waiting_readers(rw, context);
+ }
+}
+
+void
+rtems_bsd_rwlock_ready_waiting_readers(rtems_bsd_rwlock *rw,
+ rtems_bsd_rwlock_context *context)
+{
+
+ _Thread_queue_Context_initialize(&context->reader);
+ _Thread_queue_Acquire(&rw->reader_queue, &context->reader);
+ _Thread_queue_Flush_critical(&rw->reader_queue.Queue,
+ &_Thread_queue_Operations_FIFO,
+ rtems_bsd_rwlock_flush_reader_filter,
+ rtems_bsd_rwlock_flush_reader_post_release,
+ &context->reader);
+}