summaryrefslogtreecommitdiffstats
path: root/freebsd/sys/kern/kern_timeout.c
diff options
context:
space:
mode:
Diffstat (limited to 'freebsd/sys/kern/kern_timeout.c')
-rw-r--r--freebsd/sys/kern/kern_timeout.c524
1 files changed, 362 insertions, 162 deletions
diff --git a/freebsd/sys/kern/kern_timeout.c b/freebsd/sys/kern/kern_timeout.c
index a7fe5d59..73cbd571 100644
--- a/freebsd/sys/kern/kern_timeout.c
+++ b/freebsd/sys/kern/kern_timeout.c
@@ -58,14 +58,18 @@ __FBSDID("$FreeBSD$");
#include <sys/sysctl.h>
#include <sys/smp.h>
+#ifdef SMP
+#include <machine/cpu.h>
+#endif
+
#ifdef __rtems__
int ncallout = 16;
#endif /* __rtems__ */
SDT_PROVIDER_DEFINE(callout_execute);
-SDT_PROBE_DEFINE(callout_execute, kernel, , callout_start);
+SDT_PROBE_DEFINE(callout_execute, kernel, , callout_start, callout-start);
SDT_PROBE_ARGTYPE(callout_execute, kernel, , callout_start, 0,
"struct callout *");
-SDT_PROBE_DEFINE(callout_execute, kernel, , callout_end);
+SDT_PROBE_DEFINE(callout_execute, kernel, , callout_end, callout-end);
SDT_PROBE_ARGTYPE(callout_execute, kernel, , callout_end, 0,
"struct callout *");
@@ -88,6 +92,21 @@ SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0,
int callwheelsize, callwheelbits, callwheelmask;
/*
+ * The callout cpu migration entity represents informations necessary for
+ * describing the migrating callout to the new callout cpu.
+ * The cached informations are very important for deferring migration when
+ * the migrating callout is already running.
+ */
+struct cc_mig_ent {
+#ifdef SMP
+ void (*ce_migration_func)(void *);
+ void *ce_migration_arg;
+ int ce_migration_cpu;
+ int ce_migration_ticks;
+#endif
+};
+
+/*
* There is one struct callout_cpu per cpu, holding all relevant
* state for the callout processing thread on the individual CPU.
* In particular:
@@ -105,6 +124,7 @@ int callwheelsize, callwheelbits, callwheelmask;
* when the callout should be served.
*/
struct callout_cpu {
+ struct cc_mig_ent cc_migrating_entity;
struct mtx cc_lock;
struct callout *cc_callout;
struct callout_tailq *cc_callwheel;
@@ -119,7 +139,13 @@ struct callout_cpu {
};
#ifdef SMP
+#define cc_migration_func cc_migrating_entity.ce_migration_func
+#define cc_migration_arg cc_migrating_entity.ce_migration_arg
+#define cc_migration_cpu cc_migrating_entity.ce_migration_cpu
+#define cc_migration_ticks cc_migrating_entity.ce_migration_ticks
+
struct callout_cpu cc_cpu[MAXCPU];
+#define CPUBLOCK MAXCPU
#define CC_CPU(cpu) (&cc_cpu[(cpu)])
#define CC_SELF() CC_CPU(PCPU_GET(cpuid))
#else
@@ -129,6 +155,7 @@ struct callout_cpu cc_cpu;
#endif
#define CC_LOCK(cc) mtx_lock_spin(&(cc)->cc_lock)
#define CC_UNLOCK(cc) mtx_unlock_spin(&(cc)->cc_lock)
+#define CC_LOCK_ASSERT(cc) mtx_assert(&(cc)->cc_lock, MA_OWNED)
static int timeout_cpu;
@@ -152,6 +179,35 @@ MALLOC_DEFINE(M_CALLOUT, "callout", "Callout datastructures");
*/
/*
+ * Resets the migration entity tied to a specific callout cpu.
+ */
+static void
+cc_cme_cleanup(struct callout_cpu *cc)
+{
+
+#ifdef SMP
+ cc->cc_migration_cpu = CPUBLOCK;
+ cc->cc_migration_ticks = 0;
+ cc->cc_migration_func = NULL;
+ cc->cc_migration_arg = NULL;
+#endif
+}
+
+/*
+ * Checks if migration is requested by a specific callout cpu.
+ */
+static int
+cc_cme_migrating(struct callout_cpu *cc)
+{
+
+#ifdef SMP
+ return (cc->cc_migration_cpu != CPUBLOCK);
+#else
+ return (0);
+#endif
+}
+
+/*
* kern_timeout_callwheel_alloc() - kernel low level callwheel initialization
*
* This code is called very early in the kernel initialization sequence,
@@ -237,6 +293,7 @@ callout_cpu_init(struct callout_cpu *cc)
for (i = 0; i < callwheelsize; i++) {
TAILQ_INIT(&cc->cc_callwheel[i]);
}
+ cc_cme_cleanup(cc);
if (cc->cc_callout == NULL)
return;
for (i = 0; i < ncallout; i++) {
@@ -247,6 +304,36 @@ callout_cpu_init(struct callout_cpu *cc)
}
}
+#ifdef SMP
+/*
+ * Switches the cpu tied to a specific callout.
+ * The function expects a locked incoming callout cpu and returns with
+ * locked outcoming callout cpu.
+ */
+static struct callout_cpu *
+callout_cpu_switch(struct callout *c, struct callout_cpu *cc, int new_cpu)
+{
+ struct callout_cpu *new_cc;
+
+ MPASS(c != NULL && cc != NULL);
+ CC_LOCK_ASSERT(cc);
+
+ /*
+ * Avoid interrupts and preemption firing after the callout cpu
+ * is blocked in order to avoid deadlocks as the new thread
+ * may be willing to acquire the callout cpu lock.
+ */
+ c->c_cpu = CPUBLOCK;
+ spinlock_enter();
+ CC_UNLOCK(cc);
+ new_cc = CC_CPU(new_cpu);
+ CC_LOCK(new_cc);
+ spinlock_exit();
+ c->c_cpu = new_cpu;
+ return (new_cc);
+}
+#endif
+
#ifndef __rtems__
/*
* kern_timeout_callwheel_init() - initialize previously reserved callwheel
@@ -281,11 +368,9 @@ start_softclock(void *dummy)
panic("died while creating standard software ithreads");
cc->cc_cookie = softclock_ih;
#ifdef SMP
- for (cpu = 0; cpu <= mp_maxid; cpu++) {
+ CPU_FOREACH(cpu) {
if (cpu == timeout_cpu)
continue;
- if (CPU_ABSENT(cpu))
- continue;
cc = CC_CPU(cpu);
if (swi_add(NULL, "clock", softclock, cc, SWI_CLOCK,
INTR_MPSAFE, &cc->cc_cookie))
@@ -340,6 +425,13 @@ callout_lock(struct callout *c)
for (;;) {
cpu = c->c_cpu;
+#ifdef SMP
+ if (cpu == CPUBLOCK) {
+ while (c->c_cpu == CPUBLOCK)
+ cpu_spinwait();
+ continue;
+ }
+#endif
cc = CC_CPU(cpu);
CC_LOCK(cc);
if (cpu == c->c_cpu)
@@ -349,6 +441,202 @@ callout_lock(struct callout *c)
return (cc);
}
+static void
+callout_cc_add(struct callout *c, struct callout_cpu *cc, int to_ticks,
+ void (*func)(void *), void *arg, int cpu)
+{
+
+ CC_LOCK_ASSERT(cc);
+
+ if (to_ticks <= 0)
+ to_ticks = 1;
+ c->c_arg = arg;
+ c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING);
+ c->c_func = func;
+ c->c_time = cc->cc_ticks + to_ticks;
+ TAILQ_INSERT_TAIL(&cc->cc_callwheel[c->c_time & callwheelmask],
+ c, c_links.tqe);
+}
+
+static void
+callout_cc_del(struct callout *c, struct callout_cpu *cc)
+{
+
+ if (cc->cc_next == c)
+ cc->cc_next = TAILQ_NEXT(c, c_links.tqe);
+ if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
+ c->c_func = NULL;
+ SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle);
+ }
+}
+
+static struct callout *
+softclock_call_cc(struct callout *c, struct callout_cpu *cc, int *mpcalls,
+ int *lockcalls, int *gcalls)
+{
+ void (*c_func)(void *);
+ void *c_arg;
+ struct lock_class *class;
+ struct lock_object *c_lock;
+ int c_flags, sharedlock;
+#ifdef SMP
+ struct callout_cpu *new_cc;
+ void (*new_func)(void *);
+ void *new_arg;
+ int new_cpu, new_ticks;
+#endif
+#ifdef DIAGNOSTIC
+ struct bintime bt1, bt2;
+ struct timespec ts2;
+ static uint64_t maxdt = 36893488147419102LL; /* 2 msec */
+ static timeout_t *lastfunc;
+#endif
+
+ cc->cc_next = TAILQ_NEXT(c, c_links.tqe);
+ class = (c->c_lock != NULL) ? LOCK_CLASS(c->c_lock) : NULL;
+ sharedlock = (c->c_flags & CALLOUT_SHAREDLOCK) ? 0 : 1;
+ c_lock = c->c_lock;
+ c_func = c->c_func;
+ c_arg = c->c_arg;
+ c_flags = c->c_flags;
+ if (c->c_flags & CALLOUT_LOCAL_ALLOC)
+ c->c_flags = CALLOUT_LOCAL_ALLOC;
+ else
+ c->c_flags &= ~CALLOUT_PENDING;
+ cc->cc_curr = c;
+ cc->cc_cancel = 0;
+ CC_UNLOCK(cc);
+ if (c_lock != NULL) {
+ class->lc_lock(c_lock, sharedlock);
+ /*
+ * The callout may have been cancelled
+ * while we switched locks.
+ */
+ if (cc->cc_cancel) {
+ class->lc_unlock(c_lock);
+ goto skip;
+ }
+ /* The callout cannot be stopped now. */
+ cc->cc_cancel = 1;
+
+ if (c_lock == &Giant.lock_object) {
+ (*gcalls)++;
+ CTR3(KTR_CALLOUT, "callout %p func %p arg %p",
+ c, c_func, c_arg);
+ } else {
+ (*lockcalls)++;
+ CTR3(KTR_CALLOUT, "callout lock %p func %p arg %p",
+ c, c_func, c_arg);
+ }
+ } else {
+ (*mpcalls)++;
+ CTR3(KTR_CALLOUT, "callout mpsafe %p func %p arg %p",
+ c, c_func, c_arg);
+ }
+#ifdef DIAGNOSTIC
+ binuptime(&bt1);
+#endif
+#ifndef __rtems__
+ THREAD_NO_SLEEPING();
+ SDT_PROBE(callout_execute, kernel, , callout_start, c, 0, 0, 0, 0);
+#endif /* __rtems__ */
+ c_func(c_arg);
+#ifndef __rtems__
+ SDT_PROBE(callout_execute, kernel, , callout_end, c, 0, 0, 0, 0);
+ THREAD_SLEEPING_OK();
+#endif /* __rtems__ */
+#ifdef DIAGNOSTIC
+ binuptime(&bt2);
+ bintime_sub(&bt2, &bt1);
+ if (bt2.frac > maxdt) {
+ if (lastfunc != c_func || bt2.frac > maxdt * 2) {
+ bintime2timespec(&bt2, &ts2);
+ printf(
+ "Expensive timeout(9) function: %p(%p) %jd.%09ld s\n",
+ c_func, c_arg, (intmax_t)ts2.tv_sec, ts2.tv_nsec);
+ }
+ maxdt = bt2.frac;
+ lastfunc = c_func;
+ }
+#endif
+ CTR1(KTR_CALLOUT, "callout %p finished", c);
+ if ((c_flags & CALLOUT_RETURNUNLOCKED) == 0)
+ class->lc_unlock(c_lock);
+skip:
+ CC_LOCK(cc);
+ /*
+ * If the current callout is locally allocated (from
+ * timeout(9)) then put it on the freelist.
+ *
+ * Note: we need to check the cached copy of c_flags because
+ * if it was not local, then it's not safe to deref the
+ * callout pointer.
+ */
+ if (c_flags & CALLOUT_LOCAL_ALLOC) {
+ KASSERT(c->c_flags == CALLOUT_LOCAL_ALLOC,
+ ("corrupted callout"));
+ c->c_func = NULL;
+ SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle);
+ }
+ cc->cc_curr = NULL;
+ if (cc->cc_waiting) {
+ /*
+ * There is someone waiting for the
+ * callout to complete.
+ * If the callout was scheduled for
+ * migration just cancel it.
+ */
+ if (cc_cme_migrating(cc))
+ cc_cme_cleanup(cc);
+ cc->cc_waiting = 0;
+ CC_UNLOCK(cc);
+ wakeup(&cc->cc_waiting);
+ CC_LOCK(cc);
+ } else if (cc_cme_migrating(cc)) {
+#ifdef SMP
+ /*
+ * If the callout was scheduled for
+ * migration just perform it now.
+ */
+ new_cpu = cc->cc_migration_cpu;
+ new_ticks = cc->cc_migration_ticks;
+ new_func = cc->cc_migration_func;
+ new_arg = cc->cc_migration_arg;
+ cc_cme_cleanup(cc);
+
+ /*
+ * Handle deferred callout stops
+ */
+ if ((c->c_flags & CALLOUT_DFRMIGRATION) == 0) {
+ CTR3(KTR_CALLOUT,
+ "deferred cancelled %p func %p arg %p",
+ c, new_func, new_arg);
+ callout_cc_del(c, cc);
+ goto nextc;
+ }
+
+ c->c_flags &= ~CALLOUT_DFRMIGRATION;
+
+ /*
+ * It should be assert here that the
+ * callout is not destroyed but that
+ * is not easy.
+ */
+ new_cc = callout_cpu_switch(c, cc, new_cpu);
+ callout_cc_add(c, new_cc, new_ticks, new_func, new_arg,
+ new_cpu);
+ CC_UNLOCK(new_cc);
+ CC_LOCK(cc);
+#else
+ panic("migration should not happen");
+#endif
+ }
+#ifdef SMP
+nextc:
+#endif
+ return (cc->cc_next);
+}
+
/*
* The callout mechanism is based on the work of Adam M. Costello and
* George Varghese, published in a technical report entitled "Redesigning
@@ -377,12 +665,6 @@ softclock(void *arg)
int mpcalls;
int lockcalls;
int gcalls;
-#ifdef DIAGNOSTIC
- struct bintime bt1, bt2;
- struct timespec ts2;
- static uint64_t maxdt = 36893488147419102LL; /* 2 msec */
- static timeout_t *lastfunc;
-#endif
#ifndef MAX_SOFTCLOCK_STEPS
#define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */
@@ -404,7 +686,7 @@ softclock(void *arg)
cc->cc_softticks++;
bucket = &cc->cc_callwheel[curticks & callwheelmask];
c = TAILQ_FIRST(bucket);
- while (c) {
+ while (c != NULL) {
depth++;
if (c->c_time != curticks) {
c = TAILQ_NEXT(c, c_links.tqe);
@@ -419,128 +701,10 @@ softclock(void *arg)
steps = 0;
}
} else {
- void (*c_func)(void *);
- void *c_arg;
- struct lock_class *class;
- struct lock_object *c_lock;
- int c_flags, sharedlock;
-
- cc->cc_next = TAILQ_NEXT(c, c_links.tqe);
TAILQ_REMOVE(bucket, c, c_links.tqe);
- class = (c->c_lock != NULL) ?
- LOCK_CLASS(c->c_lock) : NULL;
- sharedlock = (c->c_flags & CALLOUT_SHAREDLOCK) ?
- 0 : 1;
- c_lock = c->c_lock;
- c_func = c->c_func;
- c_arg = c->c_arg;
- c_flags = c->c_flags;
- if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
- c->c_flags = CALLOUT_LOCAL_ALLOC;
- } else {
- c->c_flags =
- (c->c_flags & ~CALLOUT_PENDING);
- }
- cc->cc_curr = c;
- cc->cc_cancel = 0;
- CC_UNLOCK(cc);
- if (c_lock != NULL) {
- class->lc_lock(c_lock, sharedlock);
- /*
- * The callout may have been cancelled
- * while we switched locks.
- */
- if (cc->cc_cancel) {
- class->lc_unlock(c_lock);
- goto skip;
- }
- /* The callout cannot be stopped now. */
- cc->cc_cancel = 1;
-
- if (c_lock == &Giant.lock_object) {
- gcalls++;
- CTR3(KTR_CALLOUT,
- "callout %p func %p arg %p",
- c, c_func, c_arg);
- } else {
- lockcalls++;
- CTR3(KTR_CALLOUT, "callout lock"
- " %p func %p arg %p",
- c, c_func, c_arg);
- }
- } else {
- mpcalls++;
- CTR3(KTR_CALLOUT,
- "callout mpsafe %p func %p arg %p",
- c, c_func, c_arg);
- }
-#ifdef DIAGNOSTIC
- binuptime(&bt1);
-#endif
-#ifndef __rtems__
- THREAD_NO_SLEEPING();
- SDT_PROBE(callout_execute, kernel, ,
- callout_start, c, 0, 0, 0, 0);
-#endif /* __rtems__ */
- c_func(c_arg);
-#ifndef __rtems__
- SDT_PROBE(callout_execute, kernel, ,
- callout_end, c, 0, 0, 0, 0);
- THREAD_SLEEPING_OK();
-#endif /* __rtems__ */
-#ifdef DIAGNOSTIC
- binuptime(&bt2);
- bintime_sub(&bt2, &bt1);
- if (bt2.frac > maxdt) {
- if (lastfunc != c_func ||
- bt2.frac > maxdt * 2) {
- bintime2timespec(&bt2, &ts2);
- printf(
- "Expensive timeout(9) function: %p(%p) %jd.%09ld s\n",
- c_func, c_arg,
- (intmax_t)ts2.tv_sec,
- ts2.tv_nsec);
- }
- maxdt = bt2.frac;
- lastfunc = c_func;
- }
-#endif
- CTR1(KTR_CALLOUT, "callout %p finished", c);
- if ((c_flags & CALLOUT_RETURNUNLOCKED) == 0)
- class->lc_unlock(c_lock);
- skip:
- CC_LOCK(cc);
- /*
- * If the current callout is locally
- * allocated (from timeout(9))
- * then put it on the freelist.
- *
- * Note: we need to check the cached
- * copy of c_flags because if it was not
- * local, then it's not safe to deref the
- * callout pointer.
- */
- if (c_flags & CALLOUT_LOCAL_ALLOC) {
- KASSERT(c->c_flags ==
- CALLOUT_LOCAL_ALLOC,
- ("corrupted callout"));
- c->c_func = NULL;
- SLIST_INSERT_HEAD(&cc->cc_callfree, c,
- c_links.sle);
- }
- cc->cc_curr = NULL;
- if (cc->cc_waiting) {
- /*
- * There is someone waiting
- * for the callout to complete.
- */
- cc->cc_waiting = 0;
- CC_UNLOCK(cc);
- wakeup(&cc->cc_waiting);
- CC_LOCK(cc);
- }
+ c = softclock_call_cc(c, cc, &mpcalls,
+ &lockcalls, &gcalls);
steps = 0;
- c = cc->cc_next;
}
}
}
@@ -650,7 +814,6 @@ callout_reset_on(struct callout *c, int to_ticks, void (*ftn)(void *),
*/
if (c->c_flags & CALLOUT_LOCAL_ALLOC)
cpu = c->c_cpu;
-retry:
cc = callout_lock(c);
if (cc->cc_curr == c) {
/*
@@ -682,25 +845,31 @@ retry:
cancelled = 1;
c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING);
}
+
+#ifdef SMP
/*
- * If the lock must migrate we have to check the state again as
- * we can't hold both the new and old locks simultaneously.
+ * If the callout must migrate try to perform it immediately.
+ * If the callout is currently running, just defer the migration
+ * to a more appropriate moment.
*/
if (c->c_cpu != cpu) {
- c->c_cpu = cpu;
- CC_UNLOCK(cc);
- goto retry;
+ if (cc->cc_curr == c) {
+ cc->cc_migration_cpu = cpu;
+ cc->cc_migration_ticks = to_ticks;
+ cc->cc_migration_func = ftn;
+ cc->cc_migration_arg = arg;
+ c->c_flags |= CALLOUT_DFRMIGRATION;
+ CTR5(KTR_CALLOUT,
+ "migration of %p func %p arg %p in %d to %u deferred",
+ c, c->c_func, c->c_arg, to_ticks, cpu);
+ CC_UNLOCK(cc);
+ return (cancelled);
+ }
+ cc = callout_cpu_switch(c, cc, cpu);
}
+#endif
- if (to_ticks <= 0)
- to_ticks = 1;
-
- c->c_arg = arg;
- c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING);
- c->c_func = ftn;
- c->c_time = cc->cc_ticks + to_ticks;
- TAILQ_INSERT_TAIL(&cc->cc_callwheel[c->c_time & callwheelmask],
- c, c_links.tqe);
+ callout_cc_add(c, cc, to_ticks, ftn, arg, cpu);
CTR5(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d",
cancelled ? "re" : "", c, c->c_func, c->c_arg, to_ticks);
CC_UNLOCK(cc);
@@ -728,13 +897,13 @@ _callout_stop_safe(c, safe)
struct callout *c;
int safe;
{
- struct callout_cpu *cc;
+ struct callout_cpu *cc, *old_cc;
struct lock_class *class;
#ifndef __rtems__
int use_lock, sq_locked;
#else /* __rtems__ */
int use_lock;
-#endif /* __rtems__ */
+#endif /* __rtems__ */
/*
* Some old subsystems don't hold Giant while running a callout_stop(),
@@ -753,9 +922,30 @@ _callout_stop_safe(c, safe)
#ifndef __rtems__
sq_locked = 0;
+ old_cc = NULL;
again:
-#endif /* __rtems__ */
+#endif /* __rtems__ */
cc = callout_lock(c);
+
+#ifndef __rtems__
+ /*
+ * If the callout was migrating while the callout cpu lock was
+ * dropped, just drop the sleepqueue lock and check the states
+ * again.
+ */
+ if (sq_locked != 0 && cc != old_cc) {
+#ifdef SMP
+ CC_UNLOCK(cc);
+ sleepq_release(&old_cc->cc_waiting);
+ sq_locked = 0;
+ old_cc = NULL;
+ goto again;
+#else
+ panic("migration should not happen");
+#endif
+ }
+#endif /* __rtems__ */
+
/*
* If the callout isn't pending, it's not on the queue, so
* don't attempt to remove it from the queue. We can try to
@@ -775,7 +965,7 @@ again:
#ifndef __rtems__
if (sq_locked)
sleepq_release(&cc->cc_waiting);
-#endif /* __rtems__ */
+#endif /* __rtems__ */
return (0);
}
@@ -810,8 +1000,16 @@ again:
CC_UNLOCK(cc);
sleepq_lock(&cc->cc_waiting);
sq_locked = 1;
+ old_cc = cc;
goto again;
}
+
+ /*
+ * Migration could be cancelled here, but
+ * as long as it is still not sure when it
+ * will be packed up, just let softclock()
+ * take care of it.
+ */
cc->cc_waiting = 1;
DROP_GIANT();
CC_UNLOCK(cc);
@@ -820,6 +1018,7 @@ again:
SLEEPQ_SLEEP, 0);
sleepq_wait(&cc->cc_waiting, 0);
sq_locked = 0;
+ old_cc = NULL;
/* Reacquire locks previously released. */
PICKUP_GIANT();
@@ -847,9 +1046,17 @@ again:
cc->cc_cancel = 1;
CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p",
c, c->c_func, c->c_arg);
+ KASSERT(!cc_cme_migrating(cc),
+ ("callout wrongly scheduled for migration"));
CC_UNLOCK(cc);
KASSERT(!sq_locked, ("sleepqueue chain locked"));
return (1);
+ } else if ((c->c_flags & CALLOUT_DFRMIGRATION) != 0) {
+ c->c_flags &= ~CALLOUT_DFRMIGRATION;
+ CTR3(KTR_CALLOUT, "postponing stop %p func %p arg %p",
+ c, c->c_func, c->c_arg);
+ CC_UNLOCK(cc);
+ return (1);
}
CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p",
c, c->c_func, c->c_arg);
@@ -860,23 +1067,16 @@ again:
#ifndef __rtems__
if (sq_locked)
sleepq_release(&cc->cc_waiting);
-#endif /* __rtems__ */
+#endif /* __rtems__ */
c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING);
- if (cc->cc_next == c) {
- cc->cc_next = TAILQ_NEXT(c, c_links.tqe);
- }
- TAILQ_REMOVE(&cc->cc_callwheel[c->c_time & callwheelmask], c,
- c_links.tqe);
-
CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p",
c, c->c_func, c->c_arg);
+ TAILQ_REMOVE(&cc->cc_callwheel[c->c_time & callwheelmask], c,
+ c_links.tqe);
+ callout_cc_del(c, cc);
- if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
- c->c_func = NULL;
- SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle);
- }
CC_UNLOCK(cc);
return (1);
}