summaryrefslogtreecommitdiffstats
path: root/freebsd/sys/kern/subr_lock.c
diff options
context:
space:
mode:
Diffstat (limited to 'freebsd/sys/kern/subr_lock.c')
-rw-r--r--freebsd/sys/kern/subr_lock.c67
1 files changed, 38 insertions, 29 deletions
diff --git a/freebsd/sys/kern/subr_lock.c b/freebsd/sys/kern/subr_lock.c
index 4a55a95a..83c63010 100644
--- a/freebsd/sys/kern/subr_lock.c
+++ b/freebsd/sys/kern/subr_lock.c
@@ -81,8 +81,8 @@ lock_init(struct lock_object *lock, struct lock_class *class, const char *name,
int i;
/* Check for double-init and zero object. */
- KASSERT(!lock_initalized(lock), ("lock \"%s\" %p already initialized",
- name, lock));
+ KASSERT(flags & LO_NEW || !lock_initialized(lock),
+ ("lock \"%s\" %p already initialized", name, lock));
/* Look up lock class to find its index. */
for (i = 0; i < LOCK_CLASS_MAX; i++)
@@ -104,15 +104,45 @@ lock_destroy(struct lock_object *lock)
{
#ifndef __rtems__
- KASSERT(lock_initalized(lock), ("lock %p is not initialized", lock));
+ KASSERT(lock_initialized(lock), ("lock %p is not initialized", lock));
#else /* __rtems__ */
- BSD_ASSERT(lock_initalized(lock));
+ BSD_ASSERT(lock_initialized(lock));
#endif /* __rtems__ */
WITNESS_DESTROY(lock);
LOCK_LOG_DESTROY(lock, 0);
lock->lo_flags &= ~LO_INITIALIZED;
}
+#ifndef __rtems__
+void
+lock_delay(struct lock_delay_arg *la)
+{
+ u_int i, delay, backoff, min, max;
+ struct lock_delay_config *lc = la->config;
+
+ delay = la->delay;
+
+ if (delay == 0)
+ delay = lc->initial;
+ else {
+ delay += lc->step;
+ max = lc->max;
+ if (delay > max)
+ delay = max;
+ }
+
+ backoff = cpu_ticks() % delay;
+ min = lc->min;
+ if (backoff < min)
+ backoff = min;
+ for (i = 0; i < backoff; i++)
+ cpu_spinwait();
+
+ la->delay = delay;
+ la->spin_cnt += backoff;
+}
+#endif /* __rtems__ */
+
#ifdef DDB
DB_SHOW_COMMAND(lock, db_show_lock)
{
@@ -248,34 +278,13 @@ lock_prof_init(void *arg)
}
SYSINIT(lockprof, SI_SUB_SMP, SI_ORDER_ANY, lock_prof_init, NULL);
-/*
- * To be certain that lock profiling has idled on all cpus before we
- * reset, we schedule the resetting thread on all active cpus. Since
- * all operations happen within critical sections we can be sure that
- * it is safe to zero the profiling structures.
- */
-static void
-lock_prof_idle(void)
-{
- struct thread *td;
- int cpu;
-
- td = curthread;
- thread_lock(td);
- CPU_FOREACH(cpu) {
- sched_bind(td, cpu);
- }
- sched_unbind(td);
- thread_unlock(td);
-}
-
static void
lock_prof_reset_wait(void)
{
/*
- * Spin relinquishing our cpu so that lock_prof_idle may
- * run on it.
+ * Spin relinquishing our cpu so that quiesce_all_cpus may
+ * complete.
*/
while (lock_prof_resetting)
sched_relinquish(curthread);
@@ -297,7 +306,7 @@ lock_prof_reset(void)
atomic_store_rel_int(&lock_prof_resetting, 1);
enabled = lock_prof_enable;
lock_prof_enable = 0;
- lock_prof_idle();
+ quiesce_all_cpus("profreset", 0);
/*
* Some objects may have migrated between CPUs. Clear all links
* before we zero the structures. Some items may still be linked
@@ -409,7 +418,7 @@ dump_lock_prof_stats(SYSCTL_HANDLER_ARGS)
"max", "wait_max", "total", "wait_total", "count", "avg", "wait_avg", "cnt_hold", "cnt_lock", "name");
enabled = lock_prof_enable;
lock_prof_enable = 0;
- lock_prof_idle();
+ quiesce_all_cpus("profstat", 0);
t = ticks;
for (cpu = 0; cpu <= mp_maxid; cpu++) {
if (lp_cpu[cpu] == NULL)