summaryrefslogtreecommitdiffstats
path: root/freebsd/sys/kern/kern_intr.c
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2018-08-07 12:12:37 +0200
committerSebastian Huber <sebastian.huber@embedded-brains.de>2018-09-21 10:29:36 +0200
commitde261e0404e1fe54544275fc57d5b982df4f42b4 (patch)
tree856cbdf23d6809b99c4d642d066bc45cd67c26e6 /freebsd/sys/kern/kern_intr.c
parentlibbsd.txt: Use rtems_bsd_ifconfig_lo0() (diff)
downloadrtems-libbsd-de261e0404e1fe54544275fc57d5b982df4f42b4.tar.bz2
Update to FreeBSD head 2017-06-01
Git mirror commit dfb26efac4ce9101dda240e94d9ab53f80a9e131. Update #3472.
Diffstat (limited to 'freebsd/sys/kern/kern_intr.c')
-rw-r--r--freebsd/sys/kern/kern_intr.c136
1 files changed, 105 insertions, 31 deletions
diff --git a/freebsd/sys/kern/kern_intr.c b/freebsd/sys/kern/kern_intr.c
index c3d30c31..fbb5a1e9 100644
--- a/freebsd/sys/kern/kern_intr.c
+++ b/freebsd/sys/kern/kern_intr.c
@@ -314,13 +314,11 @@ intr_event_create(struct intr_event **event, void *source, int flags, int irq,
/*
* Bind an interrupt event to the specified CPU. Note that not all
* platforms support binding an interrupt to a CPU. For those
- * platforms this request will fail. For supported platforms, any
- * associated ithreads as well as the primary interrupt context will
- * be bound to the specificed CPU. Using a cpu id of NOCPU unbinds
+ * platforms this request will fail. Using a cpu id of NOCPU unbinds
* the interrupt event.
*/
-int
-intr_event_bind(struct intr_event *ie, int cpu)
+static int
+_intr_event_bind(struct intr_event *ie, int cpu, bool bindirq, bool bindithread)
{
lwpid_t id;
int error;
@@ -340,35 +338,75 @@ intr_event_bind(struct intr_event *ie, int cpu)
* If we have any ithreads try to set their mask first to verify
* permissions, etc.
*/
- mtx_lock(&ie->ie_lock);
- if (ie->ie_thread != NULL) {
- id = ie->ie_thread->it_thread->td_tid;
- mtx_unlock(&ie->ie_lock);
- error = cpuset_setithread(id, cpu);
- if (error)
- return (error);
- } else
- mtx_unlock(&ie->ie_lock);
- error = ie->ie_assign_cpu(ie->ie_source, cpu);
- if (error) {
+ if (bindithread) {
mtx_lock(&ie->ie_lock);
if (ie->ie_thread != NULL) {
- cpu = ie->ie_cpu;
id = ie->ie_thread->it_thread->td_tid;
mtx_unlock(&ie->ie_lock);
- (void)cpuset_setithread(id, cpu);
+ error = cpuset_setithread(id, cpu);
+ if (error)
+ return (error);
} else
mtx_unlock(&ie->ie_lock);
+ }
+ if (bindirq)
+ error = ie->ie_assign_cpu(ie->ie_source, cpu);
+ if (error) {
+ if (bindithread) {
+ mtx_lock(&ie->ie_lock);
+ if (ie->ie_thread != NULL) {
+ cpu = ie->ie_cpu;
+ id = ie->ie_thread->it_thread->td_tid;
+ mtx_unlock(&ie->ie_lock);
+ (void)cpuset_setithread(id, cpu);
+ } else
+ mtx_unlock(&ie->ie_lock);
+ }
return (error);
}
- mtx_lock(&ie->ie_lock);
- ie->ie_cpu = cpu;
- mtx_unlock(&ie->ie_lock);
+ if (bindirq) {
+ mtx_lock(&ie->ie_lock);
+ ie->ie_cpu = cpu;
+ mtx_unlock(&ie->ie_lock);
+ }
return (error);
}
+/*
+ * Bind an interrupt event to the specified CPU. For supported platforms, any
+ * associated ithreads as well as the primary interrupt context will be bound
+ * to the specificed CPU.
+ */
+int
+intr_event_bind(struct intr_event *ie, int cpu)
+{
+
+ return (_intr_event_bind(ie, cpu, true, true));
+}
+
+/*
+ * Bind an interrupt event to the specified CPU, but do not bind associated
+ * ithreads.
+ */
+int
+intr_event_bind_irqonly(struct intr_event *ie, int cpu)
+{
+
+ return (_intr_event_bind(ie, cpu, true, false));
+}
+
+/*
+ * Bind an interrupt event's ithread to the specified CPU.
+ */
+int
+intr_event_bind_ithread(struct intr_event *ie, int cpu)
+{
+
+ return (_intr_event_bind(ie, cpu, false, true));
+}
+
static struct intr_event *
intr_lookup(int irq)
{
@@ -385,7 +423,7 @@ intr_lookup(int irq)
}
int
-intr_setaffinity(int irq, void *m)
+intr_setaffinity(int irq, int mode, void *m)
{
struct intr_event *ie;
cpuset_t *mask;
@@ -409,26 +447,62 @@ intr_setaffinity(int irq, void *m)
ie = intr_lookup(irq);
if (ie == NULL)
return (ESRCH);
- return (intr_event_bind(ie, cpu));
+ switch (mode) {
+ case CPU_WHICH_IRQ:
+ return (intr_event_bind(ie, cpu));
+ case CPU_WHICH_INTRHANDLER:
+ return (intr_event_bind_irqonly(ie, cpu));
+ case CPU_WHICH_ITHREAD:
+ return (intr_event_bind_ithread(ie, cpu));
+ default:
+ return (EINVAL);
+ }
}
int
-intr_getaffinity(int irq, void *m)
+intr_getaffinity(int irq, int mode, void *m)
{
struct intr_event *ie;
+ struct thread *td;
+ struct proc *p;
cpuset_t *mask;
+ lwpid_t id;
+ int error;
mask = m;
ie = intr_lookup(irq);
if (ie == NULL)
return (ESRCH);
+
+ error = 0;
CPU_ZERO(mask);
- mtx_lock(&ie->ie_lock);
- if (ie->ie_cpu == NOCPU)
- CPU_COPY(cpuset_root, mask);
- else
- CPU_SET(ie->ie_cpu, mask);
- mtx_unlock(&ie->ie_lock);
+ switch (mode) {
+ case CPU_WHICH_IRQ:
+ case CPU_WHICH_INTRHANDLER:
+ mtx_lock(&ie->ie_lock);
+ if (ie->ie_cpu == NOCPU)
+ CPU_COPY(cpuset_root, mask);
+ else
+ CPU_SET(ie->ie_cpu, mask);
+ mtx_unlock(&ie->ie_lock);
+ break;
+ case CPU_WHICH_ITHREAD:
+ mtx_lock(&ie->ie_lock);
+ if (ie->ie_thread == NULL) {
+ mtx_unlock(&ie->ie_lock);
+ CPU_COPY(cpuset_root, mask);
+ } else {
+ id = ie->ie_thread->it_thread->td_tid;
+ mtx_unlock(&ie->ie_lock);
+ error = cpuset_which(CPU_WHICH_TID, id, &p, &td, NULL);
+ if (error != 0)
+ return (error);
+ CPU_COPY(&td->td_cpuset->cs_mask, mask);
+ PROC_UNLOCK(p);
+ }
+ default:
+ return (EINVAL);
+ }
return (0);
}
@@ -1225,7 +1299,7 @@ swi_sched(void *cookie, int flags)
if (!(flags & SWI_DELAY)) {
#ifndef __rtems__
- PCPU_INC(cnt.v_soft);
+ VM_CNT_INC(v_soft);
#endif /* __rtems__ */
#ifdef INTR_FILTER
error = intr_event_schedule_thread(ie, ie->ie_thread);