summaryrefslogtreecommitdiffstats
path: root/freebsd/sys/kern/kern_intr.c
diff options
context:
space:
mode:
Diffstat (limited to 'freebsd/sys/kern/kern_intr.c')
-rw-r--r--freebsd/sys/kern/kern_intr.c188
1 files changed, 94 insertions, 94 deletions
diff --git a/freebsd/sys/kern/kern_intr.c b/freebsd/sys/kern/kern_intr.c
index 87b7241d..501b453e 100644
--- a/freebsd/sys/kern/kern_intr.c
+++ b/freebsd/sys/kern/kern_intr.c
@@ -30,6 +30,7 @@
__FBSDID("$FreeBSD$");
#include <rtems/bsd/local/opt_ddb.h>
+#include <rtems/bsd/local/opt_kstack_usage_prof.h>
#include <rtems/bsd/sys/param.h>
#include <sys/bus.h>
@@ -95,18 +96,15 @@ struct intr_event *clk_intr_event;
#ifndef __rtems__
struct intr_event *tty_intr_event;
void *vm_ih;
-#endif /* __rtems__ */
struct proc *intrproc;
+#endif /* __rtems__ */
static MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads");
static int intr_storm_threshold = 1000;
-#ifndef __rtems__
-TUNABLE_INT("hw.intr_storm_threshold", &intr_storm_threshold);
-SYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RW,
+SYSCTL_INT(_hw, OID_AUTO, intr_storm_threshold, CTLFLAG_RWTUN,
&intr_storm_threshold, 0,
"Number of consecutive interrupts before storm protection is enabled");
-#endif /* __rtems__ */
static TAILQ_HEAD(, intr_event) event_list =
TAILQ_HEAD_INITIALIZER(event_list);
static struct mtx event_lock;
@@ -272,7 +270,7 @@ intr_event_update(struct intr_event *ie)
int
intr_event_create(struct intr_event **event, void *source, int flags, int irq,
void (*pre_ithread)(void *), void (*post_ithread)(void *),
- void (*post_filter)(void *), int (*assign_cpu)(void *, u_char),
+ void (*post_filter)(void *), int (*assign_cpu)(void *, int),
const char *fmt, ...)
{
struct intr_event *ie;
@@ -316,9 +314,8 @@ intr_event_create(struct intr_event **event, void *source, int flags, int irq,
* the interrupt event.
*/
int
-intr_event_bind(struct intr_event *ie, u_char cpu)
+intr_event_bind(struct intr_event *ie, int cpu)
{
- cpuset_t mask;
lwpid_t id;
int error;
@@ -339,14 +336,9 @@ intr_event_bind(struct intr_event *ie, u_char cpu)
*/
mtx_lock(&ie->ie_lock);
if (ie->ie_thread != NULL) {
- CPU_ZERO(&mask);
- if (cpu == NOCPU)
- CPU_COPY(cpuset_root, &mask);
- else
- CPU_SET(cpu, &mask);
id = ie->ie_thread->it_thread->td_tid;
mtx_unlock(&ie->ie_lock);
- error = cpuset_setthread(id, &mask);
+ error = cpuset_setithread(id, cpu);
if (error)
return (error);
} else
@@ -355,14 +347,10 @@ intr_event_bind(struct intr_event *ie, u_char cpu)
if (error) {
mtx_lock(&ie->ie_lock);
if (ie->ie_thread != NULL) {
- CPU_ZERO(&mask);
- if (ie->ie_cpu == NOCPU)
- CPU_COPY(cpuset_root, &mask);
- else
- CPU_SET(cpu, &mask);
+ cpu = ie->ie_cpu;
id = ie->ie_thread->it_thread->td_tid;
mtx_unlock(&ie->ie_lock);
- (void)cpuset_setthread(id, &mask);
+ (void)cpuset_setithread(id, cpu);
} else
mtx_unlock(&ie->ie_lock);
return (error);
@@ -395,8 +383,7 @@ intr_setaffinity(int irq, void *m)
{
struct intr_event *ie;
cpuset_t *mask;
- u_char cpu;
- int n;
+ int cpu, n;
mask = m;
cpu = NOCPU;
@@ -410,7 +397,7 @@ intr_setaffinity(int irq, void *m)
continue;
if (cpu != NOCPU)
return (EINVAL);
- cpu = (u_char)n;
+ cpu = n;
}
}
ie = intr_lookup(irq);
@@ -469,6 +456,9 @@ intr_event_destroy(struct intr_event *ie)
static struct intr_thread *
ithread_create(const char *name)
{
+#ifdef __rtems__
+ struct proc *intrproc;
+#endif /* __rtems__ */
struct intr_thread *ithd;
struct thread *td;
int error;
@@ -498,6 +488,9 @@ ithread_create(const char *name)
static struct intr_thread *
ithread_create(const char *name, struct intr_handler *ih)
{
+#ifdef __rtems__
+ struct proc *intrproc;
+#endif /* __rtems__ */
struct intr_thread *ithd;
struct thread *td;
int error;
@@ -659,7 +652,7 @@ intr_event_add_handler(struct intr_event *ie, const char *name,
mtx_lock(&ie->ie_lock);
it->it_event = ie;
ih->ih_thread = it;
- ithread_update(it); // XXX - do we really need this?!?!?
+ ithread_update(it); /* XXX - do we really need this?!?!? */
} else { /* Create the global per-event thread if we need one. */
while (ie->ie_thread == NULL && handler != NULL) {
if (ie->ie_flags & IE_ADDING_THREAD)
@@ -733,9 +726,9 @@ intr_event_describe_handler(struct intr_event *ie, void *cookie,
* description at that point. If one is not found, find the
* end of the name to use as the insertion point.
*/
- start = index(ih->ih_name, ':');
+ start = strchr(ih->ih_name, ':');
if (start == NULL)
- start = index(ih->ih_name, 0);
+ start = strchr(ih->ih_name, 0);
/*
* See if there is enough remaining room in the string for the
@@ -877,6 +870,12 @@ ok:
* Ensure that the thread will process the handler list
* again and remove this handler if it has already passed
* it on the list.
+ *
+ * The release part of the following store ensures
+ * that the update of ih_flags is ordered before the
+ * it_need setting. See the comment before
+ * atomic_cmpset_acq(&ithd->it_need, ...) operation in
+ * the ithread_execute_handlers().
*/
atomic_store_rel_int(&ie->ie_thread->it_need, 1);
} else
@@ -938,13 +937,10 @@ intr_event_schedule_thread(struct intr_event *ie)
* If any of the handlers for this ithread claim to be good
* sources of entropy, then gather some.
*/
- if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) {
- CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__,
- p->p_pid, td->td_name);
+ if (ie->ie_flags & IE_ENTROPY) {
entropy.event = (uintptr_t)ie;
entropy.td = ctd;
- random_harvest(&entropy, sizeof(entropy), 2, 0,
- RANDOM_INTERRUPT);
+ random_harvest_queue(&entropy, sizeof(entropy), 2, RANDOM_INTERRUPT);
}
#ifndef __rtems__
@@ -955,6 +951,10 @@ intr_event_schedule_thread(struct intr_event *ie)
* Set it_need to tell the thread to keep running if it is already
* running. Then, lock the thread and see if we actually need to
* put it on the runqueue.
+ *
+ * Use store_rel to arrange that the store to ih_need in
+ * swi_sched() is before the store to it_need and prepare for
+ * transfer of this order to loads in the ithread.
*/
atomic_store_rel_int(&it->it_need, 1);
thread_lock(td);
@@ -1042,6 +1042,12 @@ ok:
* Ensure that the thread will process the handler list
* again and remove this handler if it has already passed
* it on the list.
+ *
+ * The release part of the following store ensures
+ * that the update of ih_flags is ordered before the
+ * it_need setting. See the comment before
+ * atomic_cmpset_acq(&ithd->it_need, ...) operation in
+ * the ithread_execute_handlers().
*/
atomic_store_rel_int(&it->it_need, 1);
} else
@@ -1107,13 +1113,10 @@ intr_event_schedule_thread(struct intr_event *ie, struct intr_thread *it)
* If any of the handlers for this ithread claim to be good
* sources of entropy, then gather some.
*/
- if (harvest.interrupt && ie->ie_flags & IE_ENTROPY) {
- CTR3(KTR_INTR, "%s: pid %d (%s) gathering entropy", __func__,
- p->p_pid, td->td_name);
+ if (ie->ie_flags & IE_ENTROPY) {
entropy.event = (uintptr_t)ie;
entropy.td = ctd;
- random_harvest(&entropy, sizeof(entropy), 2, 0,
- RANDOM_INTERRUPT);
+ random_harvest_queue(&entropy, sizeof(entropy), 2, RANDOM_INTERRUPT);
}
KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name));
@@ -1122,6 +1125,10 @@ intr_event_schedule_thread(struct intr_event *ie, struct intr_thread *it)
* Set it_need to tell the thread to keep running if it is already
* running. Then, lock the thread and see if we actually need to
* put it on the runqueue.
+ *
+ * Use store_rel to arrange that the store to ih_need in
+ * swi_sched() is before the store to it_need and prepare for
+ * transfer of this order to loads in the ithread.
*/
atomic_store_rel_int(&it->it_need, 1);
thread_lock(td);
@@ -1147,7 +1154,7 @@ intr_event_schedule_thread(struct intr_event *ie, struct intr_thread *it)
* a PIC.
*/
static int
-swi_assign_cpu(void *arg, u_char cpu)
+swi_assign_cpu(void *arg, int cpu)
{
return (0);
@@ -1161,7 +1168,6 @@ int
swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler,
void *arg, int pri, enum intr_type flags, void **cookiep)
{
- struct thread *td;
struct intr_event *ie;
int error;
@@ -1183,19 +1189,7 @@ swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler,
}
error = intr_event_add_handler(ie, name, NULL, handler, arg,
PI_SWI(pri), flags, cookiep);
- if (error)
- return (error);
-#ifndef __rtems__
- if (pri == SWI_CLOCK) {
- td = ie->ie_thread->it_thread;
- thread_lock(td);
- td->td_flags |= TDF_NOLOAD;
- thread_unlock(td);
- }
-#else /* __rtems__ */
- // Do _not_ ignore the thread in the load avarage
-#endif /* __rtems__ */
- return (0);
+ return (error);
}
/*
@@ -1212,21 +1206,16 @@ swi_sched(void *cookie, int flags)
CTR3(KTR_INTR, "swi_sched: %s %s need=%d", ie->ie_name, ih->ih_name,
ih->ih_need);
- if (harvest.swi) {
- CTR2(KTR_INTR, "swi_sched: pid %d (%s) gathering entropy",
- curproc->p_pid, curthread->td_name);
- entropy.event = (uintptr_t)ih;
- entropy.td = curthread;
- random_harvest(&entropy, sizeof(entropy), 1, 0,
- RANDOM_INTERRUPT);
- }
+ entropy.event = (uintptr_t)ih;
+ entropy.td = curthread;
+ random_harvest_queue(&entropy, sizeof(entropy), 1, RANDOM_SWI);
/*
* Set ih_need for this handler so that if the ithread is already
* running it will execute this handler on the next pass. Otherwise,
* it will execute it the next time it runs.
*/
- atomic_store_rel_int(&ih->ih_need, 1);
+ ih->ih_need = 1;
if (!(flags & SWI_DELAY)) {
#ifndef __rtems__
@@ -1320,13 +1309,14 @@ intr_event_execute_handlers(struct proc *p, struct intr_event *ie)
* For software interrupt threads, we only execute
* handlers that have their need flag set. Hardware
* interrupt threads always invoke all of their handlers.
+ *
+ * ih_need can only be 0 or 1. Failed cmpset below
+ * means that there is no request to execute handlers,
+ * so a retry of the cmpset is not needed.
*/
- if (ie->ie_flags & IE_SOFT) {
- if (atomic_load_acq_int(&ih->ih_need) == 0)
- continue;
- else
- atomic_store_rel_int(&ih->ih_need, 0);
- }
+ if ((ie->ie_flags & IE_SOFT) != 0 &&
+ atomic_cmpset_int(&ih->ih_need, 1, 0) == 0)
+ continue;
/* Execute this handler. */
CTR6(KTR_INTR, "%s: pid %d exec %p(%p) for %s flg=%x",
@@ -1434,17 +1424,13 @@ ithread_loop(void *arg)
* Service interrupts. If another interrupt arrives while
* we are running, it will set it_need to note that we
* should make another pass.
+ *
+ * The load_acq part of the following cmpset ensures
+ * that the load of ih_need in ithread_execute_handlers()
+ * is ordered after the load of it_need here.
*/
- while (atomic_load_acq_int(&ithd->it_need) != 0) {
- /*
- * This might need a full read and write barrier
- * to make sure that this write posts before any
- * of the memory or device accesses in the
- * handlers.
- */
- atomic_store_rel_int(&ithd->it_need, 0);
+ while (atomic_cmpset_acq_int(&ithd->it_need, 1, 0) != 0)
ithread_execute_handlers(p, ie);
- }
WITNESS_WARN(WARN_PANIC, NULL, "suspending ithread");
mtx_assert(&Giant, MA_NOTOWNED);
@@ -1454,8 +1440,8 @@ ithread_loop(void *arg)
* set again, so we have to check it again.
*/
thread_lock(td);
- if ((atomic_load_acq_int(&ithd->it_need) == 0) &&
- !(ithd->it_flags & (IT_DEAD | IT_WAIT))) {
+ if (atomic_load_acq_int(&ithd->it_need) == 0 &&
+ (ithd->it_flags & (IT_DEAD | IT_WAIT)) == 0) {
#ifndef __rtems__
TD_SET_IWAIT(td);
ie->ie_count = 0;
@@ -1507,6 +1493,10 @@ intr_event_handle(struct intr_event *ie, struct trapframe *frame)
td = curthread;
+#ifdef KSTACK_USAGE_PROF
+ intr_prof_stack_use(td, frame);
+#endif
+
/* An interrupt with no event or handlers is a stray interrupt. */
if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers))
return (EINVAL);
@@ -1573,12 +1563,7 @@ intr_event_handle(struct intr_event *ie, struct trapframe *frame)
/* Schedule the ithread if needed. */
if (thread) {
error = intr_event_schedule_thread(ie);
-#ifndef XEN
KASSERT(error == 0, ("bad stray interrupt"));
-#else
- if (error != 0)
- log(LOG_WARNING, "bad stray interrupt");
-#endif
}
critical_exit();
td->td_intr_nesting_level--;
@@ -1631,15 +1616,12 @@ ithread_loop(void *arg)
* Service interrupts. If another interrupt arrives while
* we are running, it will set it_need to note that we
* should make another pass.
+ *
+ * The load_acq part of the following cmpset ensures
+ * that the load of ih_need in ithread_execute_handlers()
+ * is ordered after the load of it_need here.
*/
- while (atomic_load_acq_int(&ithd->it_need) != 0) {
- /*
- * This might need a full read and write barrier
- * to make sure that this write posts before any
- * of the memory or device accesses in the
- * handlers.
- */
- atomic_store_rel_int(&ithd->it_need, 0);
+ while (atomic_cmpset_acq_int(&ithd->it_need, 1, 0) != 0) {
if (priv)
priv_ithread_execute_handler(p, ih);
else
@@ -1654,8 +1636,8 @@ ithread_loop(void *arg)
* set again, so we have to check it again.
*/
thread_lock(td);
- if ((atomic_load_acq_int(&ithd->it_need) == 0) &&
- !(ithd->it_flags & (IT_DEAD | IT_WAIT))) {
+ if (atomic_load_acq_int(&ithd->it_need) == 0 &&
+ (ithd->it_flags & (IT_DEAD | IT_WAIT)) == 0) {
TD_SET_IWAIT(td);
ie->ie_count = 0;
mi_switch(SW_VOL | SWT_IWAIT, NULL);
@@ -1953,8 +1935,8 @@ DB_SHOW_COMMAND(intr, db_show_intr)
struct intr_event *ie;
int all, verbose;
- verbose = index(modif, 'v') != NULL;
- all = index(modif, 'a') != NULL;
+ verbose = strchr(modif, 'v') != NULL;
+ all = strchr(modif, 'a') != NULL;
TAILQ_FOREACH(ie, &event_list, ie_list) {
if (!all && TAILQ_EMPTY(&ie->ie_handlers))
continue;
@@ -1999,6 +1981,24 @@ SYSCTL_PROC(_hw, OID_AUTO, intrnames, CTLTYPE_OPAQUE | CTLFLAG_RD,
static int
sysctl_intrcnt(SYSCTL_HANDLER_ARGS)
{
+#ifdef SCTL_MASK32
+ uint32_t *intrcnt32;
+ unsigned i;
+ int error;
+
+ if (req->flags & SCTL_MASK32) {
+ if (!req->oldptr)
+ return (sysctl_handle_opaque(oidp, NULL, sintrcnt / 2, req));
+ intrcnt32 = malloc(sintrcnt / 2, M_TEMP, M_NOWAIT);
+ if (intrcnt32 == NULL)
+ return (ENOMEM);
+ for (i = 0; i < sintrcnt / sizeof (u_long); i++)
+ intrcnt32[i] = intrcnt[i];
+ error = sysctl_handle_opaque(oidp, intrcnt32, sintrcnt / 2, req);
+ free(intrcnt32, M_TEMP);
+ return (error);
+ }
+#endif
return (sysctl_handle_opaque(oidp, intrcnt, sintrcnt, req));
}