summaryrefslogtreecommitdiffstats
path: root/freebsd/sys/kern/kern_synch.c
diff options
context:
space:
mode:
Diffstat (limited to 'freebsd/sys/kern/kern_synch.c')
-rw-r--r--freebsd/sys/kern/kern_synch.c83
1 files changed, 83 insertions, 0 deletions
diff --git a/freebsd/sys/kern/kern_synch.c b/freebsd/sys/kern/kern_synch.c
index 2597f91d..7d24c248 100644
--- a/freebsd/sys/kern/kern_synch.c
+++ b/freebsd/sys/kern/kern_synch.c
@@ -54,6 +54,7 @@ __FBSDID("$FreeBSD$");
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/resourcevar.h>
+#include <sys/refcount.h>
#include <sys/sched.h>
#include <sys/sdt.h>
#include <sys/signalvar.h>
@@ -366,6 +367,75 @@ pause_sbt(const char *wmesg, sbintime_t sbt, sbintime_t pr, int flags)
}
/*
+ * Potentially release the last reference for refcount. Check for
+ * unlikely conditions and signal the caller as to whether it was
+ * the final ref.
+ */
+bool
+refcount_release_last(volatile u_int *count, u_int n, u_int old)
+{
+ u_int waiter;
+
+ waiter = old & REFCOUNT_WAITER;
+ old = REFCOUNT_COUNT(old);
+ if (__predict_false(n > old || REFCOUNT_SATURATED(old))) {
+ /*
+ * Avoid multiple destructor invocations if underflow occurred.
+ * This is not perfect since the memory backing the containing
+ * object may already have been reallocated.
+ */
+ _refcount_update_saturated(count);
+ return (false);
+ }
+
+ /*
+ * Attempt to atomically clear the waiter bit. Wakeup waiters
+ * if we are successful.
+ */
+ if (waiter != 0 && atomic_cmpset_int(count, REFCOUNT_WAITER, 0))
+ wakeup(__DEVOLATILE(u_int *, count));
+
+ /*
+ * Last reference. Signal the user to call the destructor.
+ *
+ * Ensure that the destructor sees all updates. The fence_rel
+ * at the start of refcount_releasen synchronizes with this fence.
+ */
+ atomic_thread_fence_acq();
+ return (true);
+}
+
+/*
+ * Wait for a refcount wakeup. This does not guarantee that the ref is still
+ * zero on return and may be subject to transient wakeups. Callers wanting
+ * a precise answer should use refcount_wait().
+ */
+void
+refcount_sleep(volatile u_int *count, const char *wmesg, int pri)
+{
+ void *wchan;
+ u_int old;
+
+ if (REFCOUNT_COUNT(*count) == 0)
+ return;
+ wchan = __DEVOLATILE(void *, count);
+ sleepq_lock(wchan);
+ old = *count;
+ for (;;) {
+ if (REFCOUNT_COUNT(old) == 0) {
+ sleepq_release(wchan);
+ return;
+ }
+ if (old & REFCOUNT_WAITER)
+ break;
+ if (atomic_fcmpset_int(count, &old, old | REFCOUNT_WAITER))
+ break;
+ }
+ sleepq_add(wchan, NULL, wmesg, 0, 0);
+ sleepq_wait(wchan, pri);
+}
+
+/*
* Make all threads sleeping on the specified identifier runnable.
*/
void
@@ -402,6 +472,19 @@ wakeup_one(void *ident)
kick_proc0();
}
+void
+wakeup_any(void *ident)
+{
+ int wakeup_swapper;
+
+ sleepq_lock(ident);
+ wakeup_swapper = sleepq_signal(ident, SLEEPQ_SLEEP | SLEEPQ_UNFAIR,
+ 0, 0);
+ sleepq_release(ident);
+ if (wakeup_swapper)
+ kick_proc0();
+}
+
#ifndef __rtems__
static void
kdb_switch(void)