summaryrefslogtreecommitdiffstats
path: root/rtemsbsd
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2015-05-20 13:49:05 +0200
committerSebastian Huber <sebastian.huber@embedded-brains.de>2015-05-20 15:03:32 +0200
commit595b333ad2b40d3bb23ef03374b14e4b8dcf49db (patch)
treef2584c50dd82fe577dfb599eb82b7807f2a012d1 /rtemsbsd
parentFix struct ucred warnings (diff)
downloadrtems-libbsd-595b333ad2b40d3bb23ef03374b14e4b8dcf49db.tar.bz2
Add INVARIANTS support
Diffstat (limited to 'rtemsbsd')
-rw-r--r--rtemsbsd/rtems/rtems-bsd-mutex.c1
-rw-r--r--rtemsbsd/rtems/rtems-bsd-page.c6
-rw-r--r--rtemsbsd/rtems/rtems-bsd-rwlock.c14
-rw-r--r--rtemsbsd/rtems/rtems-bsd-sx.c19
4 files changed, 34 insertions, 6 deletions
diff --git a/rtemsbsd/rtems/rtems-bsd-mutex.c b/rtemsbsd/rtems/rtems-bsd-mutex.c
index 26f6ce28..5931a7e5 100644
--- a/rtemsbsd/rtems/rtems-bsd-mutex.c
+++ b/rtemsbsd/rtems/rtems-bsd-mutex.c
@@ -46,6 +46,7 @@
#include <rtems/bsd/sys/lock.h>
#include <sys/mutex.h>
#include <sys/proc.h>
+#include <sys/conf.h>
static void assert_mtx(struct lock_object *lock, int what);
static void lock_mtx(struct lock_object *lock, int how);
diff --git a/rtemsbsd/rtems/rtems-bsd-page.c b/rtemsbsd/rtems/rtems-bsd-page.c
index 4721fc6e..b3f4325d 100644
--- a/rtemsbsd/rtems/rtems-bsd-page.c
+++ b/rtemsbsd/rtems/rtems-bsd-page.c
@@ -93,6 +93,12 @@ rtems_bsd_page_alloc(uintptr_t size_in_bytes, int wait)
mtx_unlock(&page_heap_mtx);
+#ifdef INVARIANTS
+ if (addr != NULL) {
+ memset(addr, 0, size_in_bytes);
+ }
+#endif
+
return (addr);
}
diff --git a/rtemsbsd/rtems/rtems-bsd-rwlock.c b/rtemsbsd/rtems/rtems-bsd-rwlock.c
index b6540b54..d0b911d5 100644
--- a/rtemsbsd/rtems/rtems-bsd-rwlock.c
+++ b/rtemsbsd/rtems/rtems-bsd-rwlock.c
@@ -15,7 +15,7 @@
* USA
* <kevin.kirspel@optimedical.com>
*
- * Copyright (c) 2013 embedded brains GmbH. All rights reserved.
+ * Copyright (c) 2013-2015 embedded brains GmbH. All rights reserved.
*
* embedded brains GmbH
* Dornierstr. 4
@@ -79,6 +79,10 @@ struct lock_class lock_class_rw = {
#endif
};
+#define rw_wowner(rw) ((rw)->mutex.owner)
+
+#define rw_recursed(rw) ((rw)->mutex.nest_level != 0)
+
void
assert_rw(struct lock_object *lock, int what)
{
@@ -223,6 +227,7 @@ _rw_assert(struct rwlock *rw, int what, const char *file, int line)
case RA_LOCKED | RA_RECURSED:
case RA_LOCKED | RA_NOTRECURSED:
case RA_RLOCKED:
+#ifndef __rtems__
#ifdef WITNESS
witness_assert(&rw->lock_object, what, file, line);
#else
@@ -250,10 +255,13 @@ _rw_assert(struct rwlock *rw, int what, const char *file, int line)
}
#endif
break;
+#else /* __rtems__ */
+ /* FALLTHROUGH */
+#endif /* __rtems__ */
case RA_WLOCKED:
case RA_WLOCKED | RA_RECURSED:
case RA_WLOCKED | RA_NOTRECURSED:
- if (rw_wowner(rw) != curthread)
+ if (rw_wowner(rw) != _Thread_Get_executing())
panic("Lock %s not exclusively locked @ %s:%d\n",
rw->lock_object.lo_name, file, line);
if (rw_recursed(rw)) {
@@ -272,7 +280,7 @@ _rw_assert(struct rwlock *rw, int what, const char *file, int line)
* If we hold a write lock fail. We can't reliably check
* to see if we hold a read lock or not.
*/
- if (rw_wowner(rw) == curthread)
+ if (rw_wowner(rw) == _Thread_Get_executing())
panic("Lock %s exclusively locked @ %s:%d\n",
rw->lock_object.lo_name, file, line);
#endif
diff --git a/rtemsbsd/rtems/rtems-bsd-sx.c b/rtemsbsd/rtems/rtems-bsd-sx.c
index 46ab2d17..dcf3a009 100644
--- a/rtemsbsd/rtems/rtems-bsd-sx.c
+++ b/rtemsbsd/rtems/rtems-bsd-sx.c
@@ -7,7 +7,7 @@
*/
/*
- * Copyright (c) 2009-2014 embedded brains GmbH. All rights reserved.
+ * Copyright (c) 2009-2015 embedded brains GmbH. All rights reserved.
*
* embedded brains GmbH
* Dornierstr. 4
@@ -39,6 +39,7 @@
#include <machine/rtems-bsd-kernel-space.h>
#include <machine/rtems-bsd-muteximpl.h>
+#include <machine/rtems-bsd-thread.h>
#include <rtems/bsd/sys/param.h>
#include <rtems/bsd/sys/types.h>
@@ -71,6 +72,10 @@ struct lock_class lock_class_sx = {
#endif
};
+#define sx_xholder(sx) ((sx)->mutex.owner)
+
+#define sx_recursed(sx) ((sx)->mutex.nest_level != 0)
+
void
assert_sx(struct lock_object *lock, int what)
{
@@ -177,9 +182,11 @@ _sx_downgrade(struct sx *sx, const char *file, int line)
void
_sx_assert(struct sx *sx, int what, const char *file, int line)
{
+#ifndef __rtems__
#ifndef WITNESS
int slocked = 0;
#endif
+#endif /* __rtems__ */
if (panicstr != NULL)
return;
@@ -187,13 +194,16 @@ _sx_assert(struct sx *sx, int what, const char *file, int line)
case SA_SLOCKED:
case SA_SLOCKED | SA_NOTRECURSED:
case SA_SLOCKED | SA_RECURSED:
+#ifndef __rtems__
#ifndef WITNESS
slocked = 1;
/* FALLTHROUGH */
#endif
+#endif /* __rtems__ */
case SA_LOCKED:
case SA_LOCKED | SA_NOTRECURSED:
case SA_LOCKED | SA_RECURSED:
+#ifndef __rtems__
#ifdef WITNESS
witness_assert(&sx->lock_object, what, file, line);
#else
@@ -221,10 +231,13 @@ _sx_assert(struct sx *sx, int what, const char *file, int line)
}
#endif
break;
+#else /* __rtems__ */
+ /* FALLTHROUGH */
+#endif /* __rtems__ */
case SA_XLOCKED:
case SA_XLOCKED | SA_NOTRECURSED:
case SA_XLOCKED | SA_RECURSED:
- if (sx_xholder(sx) != curthread)
+ if (sx_xholder(sx) != _Thread_Get_executing())
panic("Lock %s not exclusively locked @ %s:%d\n",
sx->lock_object.lo_name, file, line);
if (sx_recursed(sx)) {
@@ -244,7 +257,7 @@ _sx_assert(struct sx *sx, int what, const char *file, int line)
* reliably check to see if we hold a shared lock or
* not.
*/
- if (sx_xholder(sx) == curthread)
+ if (sx_xholder(sx) == _Thread_Get_executing())
panic("Lock %s exclusively locked @ %s:%d\n",
sx->lock_object.lo_name, file, line);
#endif