summaryrefslogtreecommitdiffstats
path: root/freebsd/sys/kern/kern_synch.c
diff options
context:
space:
mode:
Diffstat (limited to 'freebsd/sys/kern/kern_synch.c')
-rw-r--r--freebsd/sys/kern/kern_synch.c149
1 files changed, 60 insertions, 89 deletions
diff --git a/freebsd/sys/kern/kern_synch.c b/freebsd/sys/kern/kern_synch.c
index 2824c9a9..6ecedfd2 100644
--- a/freebsd/sys/kern/kern_synch.c
+++ b/freebsd/sys/kern/kern_synch.c
@@ -39,7 +39,6 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
-#include <rtems/bsd/local/opt_kdtrace.h>
#include <rtems/bsd/local/opt_ktrace.h>
#include <rtems/bsd/local/opt_sched.h>
@@ -69,12 +68,6 @@ __FBSDID("$FreeBSD$");
#include <machine/cpu.h>
-#ifdef XEN
-#include <vm/vm.h>
-#include <vm/vm_param.h>
-#include <vm/pmap.h>
-#endif
-
#define KTDSTATE(td) \
(((td)->td_inhibitors & TDI_SLEEPING) != 0 ? "sleep" : \
((td)->td_inhibitors & TDI_SUSPENDED) != 0 ? "suspended" : \
@@ -89,7 +82,7 @@ SYSINIT(synch_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, synch_setup,
int hogticks;
#endif /* __rtems__ */
-static int pause_wchan;
+static uint8_t pause_wchan[MAXCPU];
#ifndef __rtems__
static struct callout loadav_callout;
@@ -113,21 +106,10 @@ static void loadav(void *arg);
SDT_PROVIDER_DECLARE(sched);
SDT_PROBE_DEFINE(sched, , , preempt);
-
-/*
- * These probes reference Solaris features that are not implemented in FreeBSD.
- * Create the probes anyway for compatibility with existing D scripts; they'll
- * just never fire.
- */
-SDT_PROBE_DEFINE(sched, , , cpucaps__sleep);
-SDT_PROBE_DEFINE(sched, , , cpucaps__wakeup);
-SDT_PROBE_DEFINE(sched, , , schedctl__nopreempt);
-SDT_PROBE_DEFINE(sched, , , schedctl__preempt);
-SDT_PROBE_DEFINE(sched, , , schedctl__yield);
#endif /* __rtems__ */
-void
-sleepinit(void)
+static void
+sleepinit(void *unused)
{
#ifndef __rtems__
@@ -137,13 +119,19 @@ sleepinit(void)
}
/*
+ * vmem tries to lock the sleepq mutexes when free'ing kva, so make sure
+ * it is available.
+ */
+SYSINIT(sleepinit, SI_SUB_KMEM, SI_ORDER_ANY, sleepinit, 0);
+
+/*
* General sleep call. Suspends the current thread until a wakeup is
* performed on the specified identifier. The thread will then be made
- * runnable with the specified priority. Sleeps at most timo/hz seconds
- * (0 means no timeout). If pri includes PCATCH flag, signals are checked
- * before and after sleeping, else signals are not checked. Returns 0 if
+ * runnable with the specified priority. Sleeps at most sbt units of time
+ * (0 means no timeout). If pri includes the PCATCH flag, let signals
+ * interrupt the sleep, otherwise ignore them while sleeping. Returns 0 if
* awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a
- * signal needs to be delivered, ERESTART is returned if the current system
+ * signal becomes pending, ERESTART is returned if the current system
* call should be restarted if possible, and EINTR is returned if the system
* call should be interrupted by the signal (return EINTR).
*
@@ -153,18 +141,15 @@ sleepinit(void)
*/
int
_sleep(void *ident, struct lock_object *lock, int priority,
- const char *wmesg, int timo)
+ const char *wmesg, sbintime_t sbt, sbintime_t pr, int flags)
{
struct thread *td;
#ifndef __rtems__
struct proc *p;
#endif /* __rtems__ */
struct lock_class *class;
-#ifndef __rtems__
- int catch, flags, lock_state, pri, rval;
-#else /* __rtems__ */
- int flags, lock_state, pri, rval;
-#endif /* __rtems__ */
+ uintptr_t lock_state;
+ int catch, pri, rval, sleepq_flags;
WITNESS_SAVE_DECL(lock_witness);
td = curthread;
@@ -177,7 +162,7 @@ _sleep(void *ident, struct lock_object *lock, int priority,
#endif
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, lock,
"Sleeping on \"%s\"", wmesg);
- KASSERT(timo != 0 || mtx_owned(&Giant) || lock != NULL,
+ KASSERT(sbt != 0 || mtx_owned(&Giant) || lock != NULL,
("sleeping without a lock"));
#ifndef __rtems__
KASSERT(p != NULL, ("msleep1"));
@@ -194,15 +179,7 @@ _sleep(void *ident, struct lock_object *lock, int priority,
class = NULL;
#ifndef __rtems__
- if (cold || SCHEDULER_STOPPED()) {
- /*
- * During autoconfiguration, just return;
- * don't run any other threads or panic below,
- * in case this is the idle thread and already asleep.
- * XXX: this used to do "s = splhigh(); splx(safepri);
- * splx(s);" to give interrupts a chance, but there is
- * no way to give interrupts a chance now.
- */
+ if (SCHEDULER_STOPPED()) {
if (lock != NULL && priority & PDROP)
class->lc_unlock(lock);
return (0);
@@ -210,6 +187,7 @@ _sleep(void *ident, struct lock_object *lock, int priority,
catch = priority & PCATCH;
pri = priority & PRIMASK;
#else /* __rtems__ */
+ (void)catch;
pri = priority;
#endif /* __rtems__ */
@@ -221,15 +199,14 @@ _sleep(void *ident, struct lock_object *lock, int priority,
if (TD_ON_SLEEPQ(td))
sleepq_remove(td, td->td_wchan);
- if (ident == &pause_wchan)
- flags = SLEEPQ_PAUSE;
+ if ((uint8_t *)ident >= &pause_wchan[0] &&
+ (uint8_t *)ident <= &pause_wchan[MAXCPU - 1])
+ sleepq_flags = SLEEPQ_PAUSE;
else
- flags = SLEEPQ_SLEEP;
+ sleepq_flags = SLEEPQ_SLEEP;
#ifndef __rtems__
if (catch)
- flags |= SLEEPQ_INTERRUPTIBLE;
- if (priority & PBDRY)
- flags |= SLEEPQ_STOP_ON_BDRY;
+ sleepq_flags |= SLEEPQ_INTERRUPTIBLE;
#endif /* __rtems__ */
sleepq_lock(ident);
@@ -256,9 +233,9 @@ _sleep(void *ident, struct lock_object *lock, int priority,
* stopped, then td will no longer be on a sleep queue upon
* return from cursig().
*/
- sleepq_add(ident, lock, wmesg, flags, 0);
- if (timo)
- sleepq_set_timeout(ident, timo);
+ sleepq_add(ident, lock, wmesg, sleepq_flags, 0);
+ if (sbt != 0)
+ sleepq_set_timeout_sbt(ident, sbt, pr, flags);
if (lock != NULL && class->lc_flags & LC_SLEEPABLE) {
sleepq_release(ident);
WITNESS_SAVE(lock, lock_witness);
@@ -266,11 +243,11 @@ _sleep(void *ident, struct lock_object *lock, int priority,
sleepq_lock(ident);
}
#ifndef __rtems__
- if (timo && catch)
+ if (sbt != 0 && catch)
rval = sleepq_timedwait_sig(ident, pri);
- else if (timo)
+ else if (sbt != 0)
#else /* __rtems__ */
- if (timo)
+ if (sbt != 0)
#endif /* __rtems__ */
rval = sleepq_timedwait(ident, pri);
#ifndef __rtems__
@@ -295,7 +272,8 @@ _sleep(void *ident, struct lock_object *lock, int priority,
#ifndef __rtems__
int
-msleep_spin(void *ident, struct mtx *mtx, const char *wmesg, int timo)
+msleep_spin_sbt(void *ident, struct mtx *mtx, const char *wmesg,
+ sbintime_t sbt, sbintime_t pr, int flags)
{
struct thread *td;
struct proc *p;
@@ -308,17 +286,8 @@ msleep_spin(void *ident, struct mtx *mtx, const char *wmesg, int timo)
KASSERT(p != NULL, ("msleep1"));
KASSERT(ident != NULL && TD_IS_RUNNING(td), ("msleep"));
- if (cold || SCHEDULER_STOPPED()) {
- /*
- * During autoconfiguration, just return;
- * don't run any other threads or panic below,
- * in case this is the idle thread and already asleep.
- * XXX: this used to do "s = splhigh(); splx(safepri);
- * splx(s);" to give interrupts a chance, but there is
- * no way to give interrupts a chance now.
- */
+ if (SCHEDULER_STOPPED())
return (0);
- }
sleepq_lock(ident);
CTR5(KTR_PROC, "msleep_spin: thread %ld (pid %ld, %s) on %s (%p)",
@@ -333,8 +302,8 @@ msleep_spin(void *ident, struct mtx *mtx, const char *wmesg, int timo)
* We put ourselves on the sleep queue and start our timeout.
*/
sleepq_add(ident, &mtx->lock_object, wmesg, SLEEPQ_SLEEP, 0);
- if (timo)
- sleepq_set_timeout(ident, timo);
+ if (sbt != 0)
+ sleepq_set_timeout_sbt(ident, sbt, pr, flags);
/*
* Can't call ktrace with any spin locks held so it can lock the
@@ -356,7 +325,7 @@ msleep_spin(void *ident, struct mtx *mtx, const char *wmesg, int timo)
wmesg);
sleepq_lock(ident);
#endif
- if (timo)
+ if (sbt != 0)
rval = sleepq_timedwait(ident, 0);
else {
sleepq_wait(ident, 0);
@@ -381,28 +350,32 @@ msleep_spin(void *ident, struct mtx *mtx, const char *wmesg, int timo)
* to a "timo" value of one.
*/
int
-pause(const char *wmesg, int timo)
+pause_sbt(const char *wmesg, sbintime_t sbt, sbintime_t pr, int flags)
{
- KASSERT(timo >= 0, ("pause: timo must be >= 0"));
+ KASSERT(sbt >= 0, ("pause: timeout must be >= 0"));
/* silently convert invalid timeouts */
- if (timo < 1)
- timo = 1;
+ if (sbt == 0)
+ sbt = tick_sbt;
- if (cold) {
+#ifndef __rtems__
+ if (cold || kdb_active || SCHEDULER_STOPPED()) {
/*
- * We delay one HZ at a time to avoid overflowing the
+ * We delay one second at a time to avoid overflowing the
* system specific DELAY() function(s):
*/
- while (timo >= hz) {
+ while (sbt >= SBT_1S) {
DELAY(1000000);
- timo -= hz;
+ sbt -= SBT_1S;
}
- if (timo > 0)
- DELAY(timo * tick);
+ /* Do the delay remainder, if any */
+ sbt = howmany(sbt, SBT_1US);
+ if (sbt > 0)
+ DELAY(sbt);
return (0);
}
- return (tsleep(&pause_wchan, 0, wmesg, timo));
+#endif /* __rtems__ */
+ return (_sleep(&pause_wchan[curcpu], NULL, 0, wmesg, sbt, pr, flags));
}
/*
@@ -460,11 +433,9 @@ mi_switch(int flags, struct thread *newtd)
{
uint64_t runtime, new_switchtime;
struct thread *td;
- struct proc *p;
td = curthread; /* XXX */
THREAD_LOCK_ASSERT(td, MA_OWNED | MA_NOTRECURSED);
- p = td->td_proc; /* XXX */
KASSERT(!TD_ON_RUNQ(td), ("mi_switch: called by old code"));
#ifdef INVARIANTS
if (!TD_ON_LOCK(td) && !TD_IS_RUNNING(td))
@@ -486,8 +457,10 @@ mi_switch(int flags, struct thread *newtd)
if (flags & SW_VOL) {
td->td_ru.ru_nvcsw++;
td->td_swvoltick = ticks;
- } else
+ } else {
td->td_ru.ru_nivcsw++;
+ td->td_swinvoltick = ticks;
+ }
#ifdef SCHED_STATS
SCHED_STAT_INC(sched_switch_stats[flags & SW_TYPE_MASK]);
#endif
@@ -504,7 +477,7 @@ mi_switch(int flags, struct thread *newtd)
PCPU_INC(cnt.v_swtch);
PCPU_SET(switchticks, ticks);
CTR4(KTR_PROC, "mi_switch: old thread %ld (td_sched %p, pid %ld, %s)",
- td->td_tid, td->td_sched, p->p_pid, td->td_name);
+ td->td_tid, td_get_sched(td), td->td_proc->p_pid, td->td_name);
#if (KTR_COMPILE & KTR_SCHED) != 0
if (TD_IS_IDLETHREAD(td))
KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "idle",
@@ -515,15 +488,12 @@ mi_switch(int flags, struct thread *newtd)
"lockname:\"%s\"", td->td_lockname);
#endif
SDT_PROBE0(sched, , , preempt);
-#ifdef XEN
- PT_UPDATES_FLUSH();
-#endif
sched_switch(td, newtd, flags);
KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "running",
"prio:%d", td->td_priority);
CTR4(KTR_PROC, "mi_switch: new thread %ld (td_sched %p, pid %ld, %s)",
- td->td_tid, td->td_sched, p->p_pid, td->td_name);
+ td->td_tid, td_get_sched(td), td->td_proc->p_pid, td->td_name);
/*
* If the last thread was exiting, finish cleaning it up.
@@ -596,15 +566,16 @@ loadav(void *arg)
* random variation to avoid synchronisation with processes that
* run at regular intervals.
*/
- callout_reset(&loadav_callout, hz * 4 + (int)(random() % (hz * 2 + 1)),
- loadav, NULL);
+ callout_reset_sbt(&loadav_callout,
+ SBT_1US * (4000000 + (int)(random() % 2000001)), SBT_1US,
+ loadav, NULL, C_DIRECT_EXEC | C_PREL(32));
}
/* ARGSUSED */
static void
synch_setup(void *dummy)
{
- callout_init(&loadav_callout, CALLOUT_MPSAFE);
+ callout_init(&loadav_callout, 1);
/* Kick off timeout driven events by calling first time. */
loadav(NULL);