summaryrefslogtreecommitdiffstats
path: root/freebsd/sys/kern/kern_intr.c
diff options
context:
space:
mode:
Diffstat (limited to 'freebsd/sys/kern/kern_intr.c')
-rw-r--r--freebsd/sys/kern/kern_intr.c104
1 files changed, 95 insertions, 9 deletions
diff --git a/freebsd/sys/kern/kern_intr.c b/freebsd/sys/kern/kern_intr.c
index 20117eac..9c647eb6 100644
--- a/freebsd/sys/kern/kern_intr.c
+++ b/freebsd/sys/kern/kern_intr.c
@@ -756,6 +756,28 @@ intr_event_barrier(struct intr_event *ie)
atomic_thread_fence_acq();
}
+static void
+intr_handler_barrier(struct intr_handler *handler)
+{
+ struct intr_event *ie;
+
+ ie = handler->ih_event;
+ mtx_assert(&ie->ie_lock, MA_OWNED);
+ KASSERT((handler->ih_flags & IH_DEAD) == 0,
+ ("update for a removed handler"));
+
+ if (ie->ie_thread == NULL) {
+ intr_event_barrier(ie);
+ return;
+ }
+ if ((handler->ih_flags & IH_CHANGED) == 0) {
+ handler->ih_flags |= IH_CHANGED;
+ intr_event_schedule_thread(ie);
+ }
+ while ((handler->ih_flags & IH_CHANGED) != 0)
+ msleep(handler, &ie->ie_lock, 0, "ih_barr", 0);
+}
+
/*
* Sleep until an ithread finishes executing an interrupt handler.
*
@@ -880,6 +902,49 @@ intr_event_remove_handler(void *cookie)
}
#endif /* __rtems__ */
+int
+intr_event_suspend_handler(void *cookie)
+{
+ struct intr_handler *handler = (struct intr_handler *)cookie;
+ struct intr_event *ie;
+
+ if (handler == NULL)
+ return (EINVAL);
+ ie = handler->ih_event;
+ KASSERT(ie != NULL,
+ ("interrupt handler \"%s\" has a NULL interrupt event",
+ handler->ih_name));
+ mtx_lock(&ie->ie_lock);
+ handler->ih_flags |= IH_SUSP;
+ intr_handler_barrier(handler);
+ mtx_unlock(&ie->ie_lock);
+ return (0);
+}
+
+int
+intr_event_resume_handler(void *cookie)
+{
+ struct intr_handler *handler = (struct intr_handler *)cookie;
+ struct intr_event *ie;
+
+ if (handler == NULL)
+ return (EINVAL);
+ ie = handler->ih_event;
+ KASSERT(ie != NULL,
+ ("interrupt handler \"%s\" has a NULL interrupt event",
+ handler->ih_name));
+
+ /*
+ * intr_handler_barrier() acts not only as a barrier,
+ * it also allows to check for any pending interrupts.
+ */
+ mtx_lock(&ie->ie_lock);
+ handler->ih_flags &= ~IH_SUSP;
+ intr_handler_barrier(handler);
+ mtx_unlock(&ie->ie_lock);
+ return (0);
+}
+
static int
intr_event_schedule_thread(struct intr_event *ie)
{
@@ -1068,10 +1133,21 @@ intr_event_execute_handlers(struct proc *p, struct intr_event *ie)
*/
ihp = ih;
+ if ((ih->ih_flags & IH_CHANGED) != 0) {
+ mtx_lock(&ie->ie_lock);
+ ih->ih_flags &= ~IH_CHANGED;
+ wakeup(ih);
+ mtx_unlock(&ie->ie_lock);
+ }
+
/* Skip filter only handlers */
if (ih->ih_handler == NULL)
continue;
+ /* Skip suspended handlers */
+ if ((ih->ih_flags & IH_SUSP) != 0)
+ continue;
+
/*
* For software interrupt threads, we only execute
* handlers that have their need flag set. Hardware
@@ -1255,8 +1331,9 @@ intr_event_handle(struct intr_event *ie, struct trapframe *frame)
struct intr_handler *ih;
struct trapframe *oldframe;
struct thread *td;
- int ret, thread;
int phase;
+ int ret;
+ bool filter, thread;
td = curthread;
@@ -1275,7 +1352,8 @@ intr_event_handle(struct intr_event *ie, struct trapframe *frame)
* a trapframe as its argument.
*/
td->td_intr_nesting_level++;
- thread = 0;
+ filter = false;
+ thread = false;
ret = 0;
critical_enter();
oldframe = td->td_intr_frame;
@@ -1291,8 +1369,10 @@ intr_event_handle(struct intr_event *ie, struct trapframe *frame)
atomic_thread_fence_seq_cst();
CK_SLIST_FOREACH(ih, &ie->ie_handlers, ih_next) {
+ if ((ih->ih_flags & IH_SUSP) != 0)
+ continue;
if (ih->ih_filter == NULL) {
- thread = 1;
+ thread = true;
continue;
}
CTR4(KTR_INTR, "%s: exec %p(%p) for %s", __func__,
@@ -1307,24 +1387,25 @@ intr_event_handle(struct intr_event *ie, struct trapframe *frame)
(ret & ~(FILTER_SCHEDULE_THREAD | FILTER_HANDLED)) == 0),
("%s: incorrect return value %#x from %s", __func__, ret,
ih->ih_name));
+ filter = filter || ret == FILTER_HANDLED;
- /*
+ /*
* Wrapper handler special handling:
*
- * in some particular cases (like pccard and pccbb),
+ * in some particular cases (like pccard and pccbb),
* the _real_ device handler is wrapped in a couple of
* functions - a filter wrapper and an ithread wrapper.
- * In this case (and just in this case), the filter wrapper
+ * In this case (and just in this case), the filter wrapper
* could ask the system to schedule the ithread and mask
* the interrupt source if the wrapped handler is composed
* of just an ithread handler.
*
- * TODO: write a generic wrapper to avoid people rolling
- * their own
+ * TODO: write a generic wrapper to avoid people rolling
+ * their own.
*/
if (!thread) {
if (ret == FILTER_SCHEDULE_THREAD)
- thread = 1;
+ thread = true;
}
}
atomic_add_rel_int(&ie->ie_active[phase], -1);
@@ -1348,6 +1429,11 @@ intr_event_handle(struct intr_event *ie, struct trapframe *frame)
}
critical_exit();
td->td_intr_nesting_level--;
+#ifdef notyet
+ /* The interrupt is not aknowledged by any filter and has no ithread. */
+ if (!thread && !filter)
+ return (EINVAL);
+#endif
return (0);
}