summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2015-05-20 13:49:05 +0200
committerSebastian Huber <sebastian.huber@embedded-brains.de>2015-05-20 15:03:32 +0200
commit595b333ad2b40d3bb23ef03374b14e4b8dcf49db (patch)
treef2584c50dd82fe577dfb599eb82b7807f2a012d1
parentFix struct ucred warnings (diff)
downloadrtems-libbsd-595b333ad2b40d3bb23ef03374b14e4b8dcf49db.tar.bz2
Add INVARIANTS support
-rw-r--r--Makefile1
-rw-r--r--freebsd/sys/kern/kern_event.c2
-rw-r--r--freebsd/sys/kern/kern_intr.c2
-rw-r--r--freebsd/sys/kern/kern_synch.c4
-rw-r--r--freebsd/sys/kern/kern_timeout.c4
-rw-r--r--freebsd/sys/kern/subr_sleepqueue.c2
-rw-r--r--freebsd/sys/kern/subr_taskqueue.c2
-rw-r--r--freebsd/sys/kern/subr_uio.c2
-rw-r--r--freebsd/sys/sys/conf.h4
-rw-r--r--freebsd/sys/sys/proc.h4
-rw-r--r--freebsd/sys/vm/uma_dbg.c315
-rwxr-xr-xlibbsd.py1
-rw-r--r--rtemsbsd/rtems/rtems-bsd-mutex.c1
-rw-r--r--rtemsbsd/rtems/rtems-bsd-page.c6
-rw-r--r--rtemsbsd/rtems/rtems-bsd-rwlock.c14
-rw-r--r--rtemsbsd/rtems/rtems-bsd-sx.c19
-rw-r--r--wscript1
17 files changed, 378 insertions, 6 deletions
diff --git a/Makefile b/Makefile
index 0e55577e..fcf31e74 100644
--- a/Makefile
+++ b/Makefile
@@ -103,6 +103,7 @@ LIB_C_FILES += freebsd/sys/libkern/fls.c
LIB_C_FILES += freebsd/sys/libkern/inet_ntoa.c
LIB_C_FILES += freebsd/sys/libkern/random.c
LIB_C_FILES += freebsd/sys/vm/uma_core.c
+LIB_C_FILES += freebsd/sys/vm/uma_dbg.c
LIB_C_FILES += freebsd/sys/cam/cam.c
LIB_C_FILES += freebsd/sys/cam/scsi/scsi_all.c
LIB_C_FILES += freebsd/sys/crypto/sha1.c
diff --git a/freebsd/sys/kern/kern_event.c b/freebsd/sys/kern/kern_event.c
index 89cd1765..17312338 100644
--- a/freebsd/sys/kern/kern_event.c
+++ b/freebsd/sys/kern/kern_event.c
@@ -2275,7 +2275,9 @@ knote_fdclose(struct thread *td, int fd)
struct knote *kn;
int influx;
+#ifndef __rtems__
FILEDESC_XLOCK_ASSERT(fdp);
+#endif /* __rtems__ */
/*
* We shouldn't have to worry about new kevents appearing on fd
diff --git a/freebsd/sys/kern/kern_intr.c b/freebsd/sys/kern/kern_intr.c
index 169e1c75..e602cd83 100644
--- a/freebsd/sys/kern/kern_intr.c
+++ b/freebsd/sys/kern/kern_intr.c
@@ -947,7 +947,9 @@ intr_event_schedule_thread(struct intr_event *ie)
RANDOM_INTERRUPT);
}
+#ifndef __rtems__
KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name));
+#endif /* __rtems__ */
/*
* Set it_need to tell the thread to keep running if it is already
diff --git a/freebsd/sys/kern/kern_synch.c b/freebsd/sys/kern/kern_synch.c
index f85ffc5c..2824c9a9 100644
--- a/freebsd/sys/kern/kern_synch.c
+++ b/freebsd/sys/kern/kern_synch.c
@@ -179,7 +179,9 @@ _sleep(void *ident, struct lock_object *lock, int priority,
"Sleeping on \"%s\"", wmesg);
KASSERT(timo != 0 || mtx_owned(&Giant) || lock != NULL,
("sleeping without a lock"));
+#ifndef __rtems__
KASSERT(p != NULL, ("msleep1"));
+#endif /* __rtems__ */
KASSERT(ident != NULL && TD_IS_RUNNING(td), ("msleep"));
#ifndef __rtems__
if (priority & PDROP)
@@ -415,8 +417,10 @@ wakeup(void *ident)
wakeup_swapper = sleepq_broadcast(ident, SLEEPQ_SLEEP, 0, 0);
sleepq_release(ident);
if (wakeup_swapper) {
+#ifndef __rtems__
KASSERT(ident != &proc0,
("wakeup and wakeup_swapper and proc0"));
+#endif /* __rtems__ */
kick_proc0();
}
}
diff --git a/freebsd/sys/kern/kern_timeout.c b/freebsd/sys/kern/kern_timeout.c
index 821b035d..00024aa3 100644
--- a/freebsd/sys/kern/kern_timeout.c
+++ b/freebsd/sys/kern/kern_timeout.c
@@ -1095,7 +1095,9 @@ again:
KASSERT(!cc_cme_migrating(cc),
("callout wrongly scheduled for migration"));
CC_UNLOCK(cc);
+#ifndef __rtems__
KASSERT(!sq_locked, ("sleepqueue chain locked"));
+#endif /* __rtems__ */
return (1);
} else if ((c->c_flags & CALLOUT_DFRMIGRATION) != 0) {
c->c_flags &= ~CALLOUT_DFRMIGRATION;
@@ -1107,7 +1109,9 @@ again:
CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p",
c, c->c_func, c->c_arg);
CC_UNLOCK(cc);
+#ifndef __rtems__
KASSERT(!sq_locked, ("sleepqueue chain still locked"));
+#endif /* __rtems__ */
return (0);
}
#ifndef __rtems__
diff --git a/freebsd/sys/kern/subr_sleepqueue.c b/freebsd/sys/kern/subr_sleepqueue.c
index a0f25b07..5ae475ab 100644
--- a/freebsd/sys/kern/subr_sleepqueue.c
+++ b/freebsd/sys/kern/subr_sleepqueue.c
@@ -314,8 +314,10 @@ sleepq_add(void *wchan, struct lock_object *lock, const char *wmesg, int flags,
MPASS((queue >= 0) && (queue < NR_SLEEPQS));
/* If this thread is not allowed to sleep, die a horrible death. */
+#ifndef __rtems__
KASSERT(!(td->td_pflags & TDP_NOSLEEPING),
("Trying sleep, but thread marked as sleeping prohibited"));
+#endif /* __rtems__ */
/* Look up the sleep queue associated with the wait channel 'wchan'. */
sq = sleepq_lookup(wchan);
diff --git a/freebsd/sys/kern/subr_taskqueue.c b/freebsd/sys/kern/subr_taskqueue.c
index 259b152d..99640026 100644
--- a/freebsd/sys/kern/subr_taskqueue.c
+++ b/freebsd/sys/kern/subr_taskqueue.c
@@ -268,7 +268,9 @@ taskqueue_enqueue_timeout(struct taskqueue *queue,
TQ_LOCK(queue);
KASSERT(timeout_task->q == NULL || timeout_task->q == queue,
("Migrated queue"));
+#ifndef __rtems__
KASSERT(!queue->tq_spin, ("Timeout for spin-queue"));
+#endif /* __rtems__ */
timeout_task->q = queue;
res = timeout_task->t.ta_pending;
if (ticks == 0) {
diff --git a/freebsd/sys/kern/subr_uio.c b/freebsd/sys/kern/subr_uio.c
index d38b337e..73e0ccee 100644
--- a/freebsd/sys/kern/subr_uio.c
+++ b/freebsd/sys/kern/subr_uio.c
@@ -235,8 +235,10 @@ uiomove_faultflag(void *cp, int n, struct uio *uio, int nofault)
KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
("uiomove: mode"));
+#ifndef __rtems__
KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == td,
("uiomove proc"));
+#endif /* __rtems__ */
if (!nofault)
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
"Calling uiomove()");
diff --git a/freebsd/sys/sys/conf.h b/freebsd/sys/sys/conf.h
index d65759f1..4ba6f033 100644
--- a/freebsd/sys/sys/conf.h
+++ b/freebsd/sys/sys/conf.h
@@ -352,7 +352,11 @@ int set_dumper(struct dumperinfo *);
int dump_write(struct dumperinfo *, void *, vm_offset_t, off_t, size_t);
void dumpsys(struct dumperinfo *);
int doadump(boolean_t);
+#ifndef __rtems__
extern int dumping; /* system is dumping */
+#else /* __rtems__ */
+#define dumping 0
+#endif /* __rtems__ */
#endif /* _KERNEL */
diff --git a/freebsd/sys/sys/proc.h b/freebsd/sys/sys/proc.h
index 95b1b9c1..32d297a5 100644
--- a/freebsd/sys/sys/proc.h
+++ b/freebsd/sys/sys/proc.h
@@ -478,7 +478,11 @@ do { \
#define TD_IS_SWAPPED(td) ((td)->td_inhibitors & TDI_SWAPPED)
#define TD_ON_LOCK(td) ((td)->td_inhibitors & TDI_LOCK)
#define TD_AWAITING_INTR(td) ((td)->td_inhibitors & TDI_IWAIT)
+#ifndef __rtems__
#define TD_IS_RUNNING(td) ((td)->td_state == TDS_RUNNING)
+#else /* __rtems__ */
+#define TD_IS_RUNNING(td) (1)
+#endif /* __rtems__ */
#define TD_ON_RUNQ(td) ((td)->td_state == TDS_RUNQ)
#define TD_CAN_RUN(td) ((td)->td_state == TDS_CAN_RUN)
#define TD_IS_INHIBITED(td) ((td)->td_state == TDS_INHIBITED)
diff --git a/freebsd/sys/vm/uma_dbg.c b/freebsd/sys/vm/uma_dbg.c
new file mode 100644
index 00000000..90c204e3
--- /dev/null
+++ b/freebsd/sys/vm/uma_dbg.c
@@ -0,0 +1,315 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+/*-
+ * Copyright (c) 2002, 2003, 2004, 2005 Jeffrey Roberson <jeff@FreeBSD.org>
+ * Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * uma_dbg.c Debugging features for UMA users
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <rtems/bsd/sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <rtems/bsd/sys/types.h>
+#include <sys/queue.h>
+#include <rtems/bsd/sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/malloc.h>
+
+#include <vm/vm.h>
+#include <vm/vm_object.h>
+#include <vm/vm_page.h>
+#include <vm/uma.h>
+#include <vm/uma_int.h>
+#include <vm/uma_dbg.h>
+
+static const u_int32_t uma_junk = 0xdeadc0de;
+
+/*
+ * Checks an item to make sure it hasn't been overwritten since it was freed,
+ * prior to subsequent reallocation.
+ *
+ * Complies with standard ctor arg/return
+ *
+ */
+int
+trash_ctor(void *mem, int size, void *arg, int flags)
+{
+ int cnt;
+ u_int32_t *p;
+
+ cnt = size / sizeof(uma_junk);
+
+ for (p = mem; cnt > 0; cnt--, p++)
+ if (*p != uma_junk) {
+ printf("Memory modified after free %p(%d) val=%x @ %p\n",
+ mem, size, *p, p);
+ return (0);
+ }
+ return (0);
+}
+
+/*
+ * Fills an item with predictable garbage
+ *
+ * Complies with standard dtor arg/return
+ *
+ */
+void
+trash_dtor(void *mem, int size, void *arg)
+{
+ int cnt;
+ u_int32_t *p;
+
+ cnt = size / sizeof(uma_junk);
+
+ for (p = mem; cnt > 0; cnt--, p++)
+ *p = uma_junk;
+}
+
+/*
+ * Fills an item with predictable garbage
+ *
+ * Complies with standard init arg/return
+ *
+ */
+int
+trash_init(void *mem, int size, int flags)
+{
+ trash_dtor(mem, size, NULL);
+ return (0);
+}
+
+/*
+ * Checks an item to make sure it hasn't been overwritten since it was freed.
+ *
+ * Complies with standard fini arg/return
+ *
+ */
+void
+trash_fini(void *mem, int size)
+{
+ (void)trash_ctor(mem, size, NULL, 0);
+}
+
+int
+mtrash_ctor(void *mem, int size, void *arg, int flags)
+{
+ struct malloc_type **ksp;
+ u_int32_t *p = mem;
+ int cnt;
+
+ size -= sizeof(struct malloc_type *);
+ ksp = (struct malloc_type **)mem;
+ ksp += size / sizeof(struct malloc_type *);
+ cnt = size / sizeof(uma_junk);
+
+ for (p = mem; cnt > 0; cnt--, p++)
+ if (*p != uma_junk) {
+ printf("Memory modified after free %p(%d) val=%x @ %p\n",
+ mem, size, *p, p);
+ panic("Most recently used by %s\n", (*ksp == NULL)?
+ "none" : (*ksp)->ks_shortdesc);
+ }
+ return (0);
+}
+
+/*
+ * Fills an item with predictable garbage
+ *
+ * Complies with standard dtor arg/return
+ *
+ */
+void
+mtrash_dtor(void *mem, int size, void *arg)
+{
+ int cnt;
+ u_int32_t *p;
+
+ size -= sizeof(struct malloc_type *);
+ cnt = size / sizeof(uma_junk);
+
+ for (p = mem; cnt > 0; cnt--, p++)
+ *p = uma_junk;
+}
+
+/*
+ * Fills an item with predictable garbage
+ *
+ * Complies with standard init arg/return
+ *
+ */
+int
+mtrash_init(void *mem, int size, int flags)
+{
+ struct malloc_type **ksp;
+
+ mtrash_dtor(mem, size, NULL);
+
+ ksp = (struct malloc_type **)mem;
+ ksp += (size / sizeof(struct malloc_type *)) - 1;
+ *ksp = NULL;
+ return (0);
+}
+
+/*
+ * Checks an item to make sure it hasn't been overwritten since it was freed,
+ * prior to freeing it back to available memory.
+ *
+ * Complies with standard fini arg/return
+ *
+ */
+void
+mtrash_fini(void *mem, int size)
+{
+ (void)mtrash_ctor(mem, size, NULL, 0);
+}
+
+static uma_slab_t
+uma_dbg_getslab(uma_zone_t zone, void *item)
+{
+ uma_slab_t slab;
+ uma_keg_t keg;
+ u_int8_t *mem;
+
+ mem = (u_int8_t *)((unsigned long)item & (~UMA_SLAB_MASK));
+ if (zone->uz_flags & UMA_ZONE_VTOSLAB) {
+ slab = vtoslab((vm_offset_t)mem);
+ } else {
+ keg = LIST_FIRST(&zone->uz_kegs)->kl_keg;
+ if (keg->uk_flags & UMA_ZONE_HASH)
+ slab = hash_sfind(&keg->uk_hash, mem);
+ else
+ slab = (uma_slab_t)(mem + keg->uk_pgoff);
+ }
+
+ return (slab);
+}
+
+/*
+ * Set up the slab's freei data such that uma_dbg_free can function.
+ *
+ */
+
+void
+uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item)
+{
+ uma_keg_t keg;
+ uma_slabrefcnt_t slabref;
+ int freei;
+
+ if (slab == NULL) {
+ slab = uma_dbg_getslab(zone, item);
+ if (slab == NULL)
+ panic("uma: item %p did not belong to zone %s\n",
+ item, zone->uz_name);
+ }
+ keg = slab->us_keg;
+
+ freei = ((unsigned long)item - (unsigned long)slab->us_data)
+ / keg->uk_rsize;
+
+ if (keg->uk_flags & UMA_ZONE_REFCNT) {
+ slabref = (uma_slabrefcnt_t)slab;
+ slabref->us_freelist[freei].us_item = 255;
+ } else {
+ slab->us_freelist[freei].us_item = 255;
+ }
+
+ return;
+}
+
+/*
+ * Verifies freed addresses. Checks for alignment, valid slab membership
+ * and duplicate frees.
+ *
+ */
+
+void
+uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item)
+{
+ uma_keg_t keg;
+ uma_slabrefcnt_t slabref;
+ int freei;
+
+ if (slab == NULL) {
+ slab = uma_dbg_getslab(zone, item);
+ if (slab == NULL)
+ panic("uma: Freed item %p did not belong to zone %s\n",
+ item, zone->uz_name);
+ }
+ keg = slab->us_keg;
+
+ freei = ((unsigned long)item - (unsigned long)slab->us_data)
+ / keg->uk_rsize;
+
+ if (freei >= keg->uk_ipers)
+ panic("zone: %s(%p) slab %p freelist %d out of range 0-%d\n",
+ zone->uz_name, zone, slab, freei, keg->uk_ipers-1);
+
+ if (((freei * keg->uk_rsize) + slab->us_data) != item) {
+ printf("zone: %s(%p) slab %p freed address %p unaligned.\n",
+ zone->uz_name, zone, slab, item);
+ panic("should be %p\n",
+ (freei * keg->uk_rsize) + slab->us_data);
+ }
+
+ if (keg->uk_flags & UMA_ZONE_REFCNT) {
+ slabref = (uma_slabrefcnt_t)slab;
+ if (slabref->us_freelist[freei].us_item != 255) {
+ printf("Slab at %p, freei %d = %d.\n",
+ slab, freei, slabref->us_freelist[freei].us_item);
+ panic("Duplicate free of item %p from zone %p(%s)\n",
+ item, zone, zone->uz_name);
+ }
+
+ /*
+ * When this is actually linked into the slab this will change.
+ * Until then the count of valid slabs will make sure we don't
+ * accidentally follow this and assume it's a valid index.
+ */
+ slabref->us_freelist[freei].us_item = 0;
+ } else {
+ if (slab->us_freelist[freei].us_item != 255) {
+ printf("Slab at %p, freei %d = %d.\n",
+ slab, freei, slab->us_freelist[freei].us_item);
+ panic("Duplicate free of item %p from zone %p(%s)\n",
+ item, zone, zone->uz_name);
+ }
+
+ /*
+ * When this is actually linked into the slab this will change.
+ * Until then the count of valid slabs will make sure we don't
+ * accidentally follow this and assume it's a valid index.
+ */
+ slab->us_freelist[freei].us_item = 0;
+ }
+}
diff --git a/libbsd.py b/libbsd.py
index 338e097c..4d98e515 100755
--- a/libbsd.py
+++ b/libbsd.py
@@ -328,6 +328,7 @@ def base(mm):
'sys/libkern/inet_ntoa.c',
'sys/libkern/random.c',
'sys/vm/uma_core.c',
+ 'sys/vm/uma_dbg.c',
],
mm.generator['source']()
)
diff --git a/rtemsbsd/rtems/rtems-bsd-mutex.c b/rtemsbsd/rtems/rtems-bsd-mutex.c
index 26f6ce28..5931a7e5 100644
--- a/rtemsbsd/rtems/rtems-bsd-mutex.c
+++ b/rtemsbsd/rtems/rtems-bsd-mutex.c
@@ -46,6 +46,7 @@
#include <rtems/bsd/sys/lock.h>
#include <sys/mutex.h>
#include <sys/proc.h>
+#include <sys/conf.h>
static void assert_mtx(struct lock_object *lock, int what);
static void lock_mtx(struct lock_object *lock, int how);
diff --git a/rtemsbsd/rtems/rtems-bsd-page.c b/rtemsbsd/rtems/rtems-bsd-page.c
index 4721fc6e..b3f4325d 100644
--- a/rtemsbsd/rtems/rtems-bsd-page.c
+++ b/rtemsbsd/rtems/rtems-bsd-page.c
@@ -93,6 +93,12 @@ rtems_bsd_page_alloc(uintptr_t size_in_bytes, int wait)
mtx_unlock(&page_heap_mtx);
+#ifdef INVARIANTS
+ if (addr != NULL) {
+ memset(addr, 0, size_in_bytes);
+ }
+#endif
+
return (addr);
}
diff --git a/rtemsbsd/rtems/rtems-bsd-rwlock.c b/rtemsbsd/rtems/rtems-bsd-rwlock.c
index b6540b54..d0b911d5 100644
--- a/rtemsbsd/rtems/rtems-bsd-rwlock.c
+++ b/rtemsbsd/rtems/rtems-bsd-rwlock.c
@@ -15,7 +15,7 @@
* USA
* <kevin.kirspel@optimedical.com>
*
- * Copyright (c) 2013 embedded brains GmbH. All rights reserved.
+ * Copyright (c) 2013-2015 embedded brains GmbH. All rights reserved.
*
* embedded brains GmbH
* Dornierstr. 4
@@ -79,6 +79,10 @@ struct lock_class lock_class_rw = {
#endif
};
+#define rw_wowner(rw) ((rw)->mutex.owner)
+
+#define rw_recursed(rw) ((rw)->mutex.nest_level != 0)
+
void
assert_rw(struct lock_object *lock, int what)
{
@@ -223,6 +227,7 @@ _rw_assert(struct rwlock *rw, int what, const char *file, int line)
case RA_LOCKED | RA_RECURSED:
case RA_LOCKED | RA_NOTRECURSED:
case RA_RLOCKED:
+#ifndef __rtems__
#ifdef WITNESS
witness_assert(&rw->lock_object, what, file, line);
#else
@@ -250,10 +255,13 @@ _rw_assert(struct rwlock *rw, int what, const char *file, int line)
}
#endif
break;
+#else /* __rtems__ */
+ /* FALLTHROUGH */
+#endif /* __rtems__ */
case RA_WLOCKED:
case RA_WLOCKED | RA_RECURSED:
case RA_WLOCKED | RA_NOTRECURSED:
- if (rw_wowner(rw) != curthread)
+ if (rw_wowner(rw) != _Thread_Get_executing())
panic("Lock %s not exclusively locked @ %s:%d\n",
rw->lock_object.lo_name, file, line);
if (rw_recursed(rw)) {
@@ -272,7 +280,7 @@ _rw_assert(struct rwlock *rw, int what, const char *file, int line)
* If we hold a write lock fail. We can't reliably check
* to see if we hold a read lock or not.
*/
- if (rw_wowner(rw) == curthread)
+ if (rw_wowner(rw) == _Thread_Get_executing())
panic("Lock %s exclusively locked @ %s:%d\n",
rw->lock_object.lo_name, file, line);
#endif
diff --git a/rtemsbsd/rtems/rtems-bsd-sx.c b/rtemsbsd/rtems/rtems-bsd-sx.c
index 46ab2d17..dcf3a009 100644
--- a/rtemsbsd/rtems/rtems-bsd-sx.c
+++ b/rtemsbsd/rtems/rtems-bsd-sx.c
@@ -7,7 +7,7 @@
*/
/*
- * Copyright (c) 2009-2014 embedded brains GmbH. All rights reserved.
+ * Copyright (c) 2009-2015 embedded brains GmbH. All rights reserved.
*
* embedded brains GmbH
* Dornierstr. 4
@@ -39,6 +39,7 @@
#include <machine/rtems-bsd-kernel-space.h>
#include <machine/rtems-bsd-muteximpl.h>
+#include <machine/rtems-bsd-thread.h>
#include <rtems/bsd/sys/param.h>
#include <rtems/bsd/sys/types.h>
@@ -71,6 +72,10 @@ struct lock_class lock_class_sx = {
#endif
};
+#define sx_xholder(sx) ((sx)->mutex.owner)
+
+#define sx_recursed(sx) ((sx)->mutex.nest_level != 0)
+
void
assert_sx(struct lock_object *lock, int what)
{
@@ -177,9 +182,11 @@ _sx_downgrade(struct sx *sx, const char *file, int line)
void
_sx_assert(struct sx *sx, int what, const char *file, int line)
{
+#ifndef __rtems__
#ifndef WITNESS
int slocked = 0;
#endif
+#endif /* __rtems__ */
if (panicstr != NULL)
return;
@@ -187,13 +194,16 @@ _sx_assert(struct sx *sx, int what, const char *file, int line)
case SA_SLOCKED:
case SA_SLOCKED | SA_NOTRECURSED:
case SA_SLOCKED | SA_RECURSED:
+#ifndef __rtems__
#ifndef WITNESS
slocked = 1;
/* FALLTHROUGH */
#endif
+#endif /* __rtems__ */
case SA_LOCKED:
case SA_LOCKED | SA_NOTRECURSED:
case SA_LOCKED | SA_RECURSED:
+#ifndef __rtems__
#ifdef WITNESS
witness_assert(&sx->lock_object, what, file, line);
#else
@@ -221,10 +231,13 @@ _sx_assert(struct sx *sx, int what, const char *file, int line)
}
#endif
break;
+#else /* __rtems__ */
+ /* FALLTHROUGH */
+#endif /* __rtems__ */
case SA_XLOCKED:
case SA_XLOCKED | SA_NOTRECURSED:
case SA_XLOCKED | SA_RECURSED:
- if (sx_xholder(sx) != curthread)
+ if (sx_xholder(sx) != _Thread_Get_executing())
panic("Lock %s not exclusively locked @ %s:%d\n",
sx->lock_object.lo_name, file, line);
if (sx_recursed(sx)) {
@@ -244,7 +257,7 @@ _sx_assert(struct sx *sx, int what, const char *file, int line)
* reliably check to see if we hold a shared lock or
* not.
*/
- if (sx_xholder(sx) == curthread)
+ if (sx_xholder(sx) == _Thread_Get_executing())
panic("Lock %s exclusively locked @ %s:%d\n",
sx->lock_object.lo_name, file, line);
#endif
diff --git a/wscript b/wscript
index c7926b5a..249604e2 100644
--- a/wscript
+++ b/wscript
@@ -698,6 +698,7 @@ def build(bld):
'freebsd/sys/opencrypto/skipjack.c',
'freebsd/sys/opencrypto/xform.c',
'freebsd/sys/vm/uma_core.c',
+ 'freebsd/sys/vm/uma_dbg.c',
'mDNSResponder/mDNSCore/CryptoAlg.c',
'mDNSResponder/mDNSCore/DNSCommon.c',
'mDNSResponder/mDNSCore/DNSDigest.c',