summaryrefslogtreecommitdiffstats
path: root/bsps/x86_64
diff options
context:
space:
mode:
Diffstat (limited to 'bsps/x86_64')
-rw-r--r--bsps/x86_64/amd64/interrupts/idt.c151
-rw-r--r--bsps/x86_64/amd64/interrupts/isr_handler.S191
-rw-r--r--bsps/x86_64/amd64/start/bspstart.c2
-rw-r--r--bsps/x86_64/headers.am4
-rw-r--r--bsps/x86_64/include/bsp/irq.h46
5 files changed, 394 insertions, 0 deletions
diff --git a/bsps/x86_64/amd64/interrupts/idt.c b/bsps/x86_64/amd64/interrupts/idt.c
new file mode 100644
index 0000000000..e5964e36a1
--- /dev/null
+++ b/bsps/x86_64/amd64/interrupts/idt.c
@@ -0,0 +1,151 @@
+/*
+ * Copyright (c) 2018.
+ * Amaan Cheval <amaan.cheval@gmail.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+#include <rtems.h>
+#include <rtems/score/idt.h>
+#include <rtems/score/basedefs.h>
+#include <rtems/score/x86_64.h>
+#include <rtems/score/cpuimpl.h>
+#include <bsp/irq-generic.h>
+
+/*
+ * The IDT maps every interrupt vector to an interrupt_descriptor based on the
+ * vector number.
+ */
+interrupt_descriptor amd64_idt[IDT_SIZE] RTEMS_ALIGNED(8) = { { 0 } };
+
+struct idt_record idtr = {
+ .limit = (IDT_SIZE * 16) - 1,
+ .base = (uintptr_t) amd64_idt
+};
+
+/**
+ * IRQs that the RTEMS Interrupt Manager will manage
+ * @see DISTINCT_INTERRUPT_ENTRY
+ */
+static uintptr_t rtemsIRQs[BSP_IRQ_VECTOR_NUMBER] = {
+ (uintptr_t) rtems_irq_prologue_0,
+ (uintptr_t) rtems_irq_prologue_1,
+ (uintptr_t) rtems_irq_prologue_2,
+ (uintptr_t) rtems_irq_prologue_3,
+ (uintptr_t) rtems_irq_prologue_4,
+ (uintptr_t) rtems_irq_prologue_5,
+ (uintptr_t) rtems_irq_prologue_6,
+ (uintptr_t) rtems_irq_prologue_7,
+ (uintptr_t) rtems_irq_prologue_8,
+ (uintptr_t) rtems_irq_prologue_9,
+ (uintptr_t) rtems_irq_prologue_10,
+ (uintptr_t) rtems_irq_prologue_11,
+ (uintptr_t) rtems_irq_prologue_12,
+ (uintptr_t) rtems_irq_prologue_13,
+ (uintptr_t) rtems_irq_prologue_14,
+ (uintptr_t) rtems_irq_prologue_15,
+ (uintptr_t) rtems_irq_prologue_16,
+ (uintptr_t) rtems_irq_prologue_17,
+ (uintptr_t) rtems_irq_prologue_18,
+ (uintptr_t) rtems_irq_prologue_19,
+ (uintptr_t) rtems_irq_prologue_20,
+ (uintptr_t) rtems_irq_prologue_21,
+ (uintptr_t) rtems_irq_prologue_22,
+ (uintptr_t) rtems_irq_prologue_23,
+ (uintptr_t) rtems_irq_prologue_24,
+ (uintptr_t) rtems_irq_prologue_25,
+ (uintptr_t) rtems_irq_prologue_26,
+ (uintptr_t) rtems_irq_prologue_27,
+ (uintptr_t) rtems_irq_prologue_28,
+ (uintptr_t) rtems_irq_prologue_29,
+ (uintptr_t) rtems_irq_prologue_30,
+ (uintptr_t) rtems_irq_prologue_31,
+ (uintptr_t) rtems_irq_prologue_32
+};
+
+void lidt(struct idt_record *ptr)
+{
+ __asm__ volatile ("lidt %0" :: "m"(*ptr));
+}
+
+interrupt_descriptor amd64_create_interrupt_descriptor(
+ uintptr_t handler, uint8_t types_and_attributes
+)
+{
+ interrupt_descriptor entry = {
+ .offset_0 = handler & 0xffff,
+ .segment_selector = amd64_get_cs(),
+ .interrupt_stack_table = 0,
+ .type_and_attributes = types_and_attributes,
+ .offset_1 = (handler >> 16) & 0xffff,
+ .offset_2 = handler >> 32,
+ .reserved_zero = 0,
+ };
+ return entry;
+}
+
+uintptr_t amd64_get_handler_from_idt(uint32_t vector)
+{
+ interrupt_descriptor entry = amd64_idt[vector];
+ uintptr_t handler = entry.offset_0 | (entry.offset_1 << 16) |
+ ((uint64_t) entry.offset_2 << 32);
+ return handler;
+}
+
+void amd64_install_raw_interrupt(
+ uint32_t vector, uintptr_t new_handler, uintptr_t *old_handler
+)
+{
+ *old_handler = amd64_get_handler_from_idt(vector);
+ interrupt_descriptor new_desc = amd64_create_interrupt_descriptor(
+ new_handler,
+ IDT_INTERRUPT_GATE | IDT_PRESENT
+ );
+ amd64_idt[vector] = new_desc;
+}
+
+void amd64_dispatch_isr(rtems_vector_number vector)
+{
+ bsp_interrupt_handler_dispatch(vector);
+}
+
+rtems_status_code bsp_interrupt_facility_initialize(void)
+{
+ uintptr_t old;
+ for (uint32_t i = 0; i < BSP_IRQ_VECTOR_NUMBER; i++) {
+ amd64_install_raw_interrupt(i, rtemsIRQs[i], &old);
+ }
+
+ lidt(&idtr);
+
+ return RTEMS_SUCCESSFUL;
+}
+
+void bsp_interrupt_vector_disable(rtems_vector_number vector)
+{
+ /* XXX */
+}
+void bsp_interrupt_vector_enable(rtems_vector_number vector)
+{
+ /* XXX */
+}
diff --git a/bsps/x86_64/amd64/interrupts/isr_handler.S b/bsps/x86_64/amd64/interrupts/isr_handler.S
new file mode 100644
index 0000000000..fc2c9a9e76
--- /dev/null
+++ b/bsps/x86_64/amd64/interrupts/isr_handler.S
@@ -0,0 +1,191 @@
+/*
+ * This file contains the _ISR_Handler that acts as the common handler for all
+ * vectors to be managed by the RTEMS interrupt manager.
+ */
+
+/*
+ * Copyright (c) 2018.
+ * Amaan Cheval <amaan.cheval@gmail.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <rtems/asm.h>
+#include <rtems/score/cpu.h>
+#include <rtems/score/percpu.h>
+
+#ifndef CPU_STACK_ALIGNMENT
+#error "Missing header? CPU_STACK_ALIGNMENT not defined"
+#endif
+
+BEGIN_CODE
+
+PUBLIC(apic_spurious_handler)
+SYM(apic_spurious_handler):
+ iretq
+
+/*
+ * These are callee-saved registers, which means we can use them in our
+ * interrupts as persistent scratch registers (i.e. calls will not destroy
+ * them), as long as we also save and restore it for the interrupted task.
+ */
+.set SCRATCH_REG0, rbp
+.set SCRATCH_REG1, rbx
+
+/*
+ * We need to set a distinct handler for every interrupt vector so that
+ * we can pass the vector number to _ISR_Handler correctly.
+ */
+#define DISTINCT_INTERRUPT_ENTRY(vector) \
+ .p2align 4 ; \
+ PUBLIC(rtems_irq_prologue_ ## vector) ; \
+SYM(rtems_irq_prologue_ ## vector): ; \
+ pushq REG_ARG0 ; \
+ movq $vector, REG_ARG0 ; \
+ pushq SCRATCH_REG0 ; \
+ pushq SCRATCH_REG1 ; \
+ jmp SYM(_ISR_Handler)
+
+DISTINCT_INTERRUPT_ENTRY(0)
+DISTINCT_INTERRUPT_ENTRY(1)
+DISTINCT_INTERRUPT_ENTRY(2)
+DISTINCT_INTERRUPT_ENTRY(3)
+DISTINCT_INTERRUPT_ENTRY(4)
+DISTINCT_INTERRUPT_ENTRY(5)
+DISTINCT_INTERRUPT_ENTRY(6)
+DISTINCT_INTERRUPT_ENTRY(7)
+DISTINCT_INTERRUPT_ENTRY(8)
+DISTINCT_INTERRUPT_ENTRY(9)
+DISTINCT_INTERRUPT_ENTRY(10)
+DISTINCT_INTERRUPT_ENTRY(11)
+DISTINCT_INTERRUPT_ENTRY(12)
+DISTINCT_INTERRUPT_ENTRY(13)
+DISTINCT_INTERRUPT_ENTRY(14)
+DISTINCT_INTERRUPT_ENTRY(15)
+DISTINCT_INTERRUPT_ENTRY(16)
+DISTINCT_INTERRUPT_ENTRY(17)
+DISTINCT_INTERRUPT_ENTRY(18)
+DISTINCT_INTERRUPT_ENTRY(19)
+DISTINCT_INTERRUPT_ENTRY(20)
+DISTINCT_INTERRUPT_ENTRY(21)
+DISTINCT_INTERRUPT_ENTRY(22)
+DISTINCT_INTERRUPT_ENTRY(23)
+DISTINCT_INTERRUPT_ENTRY(24)
+DISTINCT_INTERRUPT_ENTRY(25)
+DISTINCT_INTERRUPT_ENTRY(26)
+DISTINCT_INTERRUPT_ENTRY(27)
+DISTINCT_INTERRUPT_ENTRY(28)
+DISTINCT_INTERRUPT_ENTRY(29)
+DISTINCT_INTERRUPT_ENTRY(30)
+DISTINCT_INTERRUPT_ENTRY(31)
+DISTINCT_INTERRUPT_ENTRY(32)
+
+SYM(_ISR_Handler):
+.save_cpu_interrupt_frame:
+.set SAVED_RSP, SCRATCH_REG0
+ movq rsp, SAVED_RSP
+
+ /* Make space for CPU_Interrupt_frame */
+ subq $CPU_INTERRUPT_FRAME_SIZE, rsp
+.set ALIGNMENT_MASK, ~(CPU_STACK_ALIGNMENT - 1)
+ andq $ALIGNMENT_MASK, rsp
+ // XXX: Save interrupt mask?
+
+ /* Save caller-saved registers to CPU_Interrupt_frame */
+ movq rax, (8 * CPU_SIZEOF_POINTER)(rsp)
+ movq rcx, (7 * CPU_SIZEOF_POINTER)(rsp)
+ movq rdx, (6 * CPU_SIZEOF_POINTER)(rsp)
+ movq rsi, (5 * CPU_SIZEOF_POINTER)(rsp)
+ movq r8, (4 * CPU_SIZEOF_POINTER)(rsp)
+ movq r9, (3 * CPU_SIZEOF_POINTER)(rsp)
+ movq r10, (2 * CPU_SIZEOF_POINTER)(rsp)
+ movq r11, (1 * CPU_SIZEOF_POINTER)(rsp)
+
+ /* Save the initial rsp */
+ movq SAVED_RSP, (0 * CPU_SIZEOF_POINTER)(rsp)
+
+.switch_stack_if_needed:
+ /* Save current aligned rsp so we can find CPU_Interrupt_frame again later */
+ movq rsp, SAVED_RSP
+
+ /*
+ * Switch to interrupt stack if necessary; it's necessary if this is the
+ * outermost interrupt, which means we've been using the task's stack so far
+ */
+
+#ifdef RTEMS_SMP
+ /* XXX: We should call _CPU_SMP_Get_current_processor here */
+#endif
+.set Per_CPU_Info, SCRATCH_REG1
+ movq $SYM(_Per_CPU_Information), Per_CPU_Info
+ cmpq $0, PER_CPU_ISR_NEST_LEVEL(Per_CPU_Info)
+ jne .skip_switch
+.switch_stack:
+ movq PER_CPU_INTERRUPT_STACK_HIGH(Per_CPU_Info), rsp
+.skip_switch:
+ incq PER_CPU_ISR_NEST_LEVEL(Per_CPU_Info)
+ incq PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL(Per_CPU_Info)
+
+.call_isr_dispatch:
+ /* REG_ARG0 already includes the vector number, so we can simply call */
+ call amd64_dispatch_isr
+
+.restore_stack:
+ /* If this is the outermost stack, this restores the task stack */
+ movq SAVED_RSP, rsp
+
+ decq PER_CPU_ISR_NEST_LEVEL(Per_CPU_Info)
+ decq PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL(Per_CPU_Info)
+ /* XXX: Bug in QEMU causing ZF to not be set by decq necessitating the cmpb */
+ cmpb $0, PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL(Per_CPU_Info)
+ /* If dispatch is non-zero, it is disabled, so skip scheduling it */
+ jne .restore_cpu_interrupt_frame
+
+.schedule_dispatch:
+ cmpb $0, PER_CPU_DISPATCH_NEEDED(Per_CPU_Info)
+ je .restore_cpu_interrupt_frame
+ call _Thread_Dispatch
+
+.restore_cpu_interrupt_frame:
+ /* Restore registers from CPU_Interrupt_frame */
+ movq (8 * CPU_SIZEOF_POINTER)(rsp), rax
+ movq (7 * CPU_SIZEOF_POINTER)(rsp), rcx
+ movq (6 * CPU_SIZEOF_POINTER)(rsp), rdx
+ movq (5 * CPU_SIZEOF_POINTER)(rsp), rsi
+ movq (4 * CPU_SIZEOF_POINTER)(rsp), r8
+ movq (3 * CPU_SIZEOF_POINTER)(rsp), r9
+ movq (2 * CPU_SIZEOF_POINTER)(rsp), r10
+ movq (1 * CPU_SIZEOF_POINTER)(rsp), r11
+
+ /* Restore the rsp value from just before _ISR_Handler was called */
+ movq (0 * CPU_SIZEOF_POINTER)(rsp), SAVED_RSP
+ movq SAVED_RSP, rsp
+
+ /* Restore args DISTINCT_INTERRUPT_ENTRY pushed to task stack */
+ popq SCRATCH_REG1
+ popq SCRATCH_REG0
+ popq REG_ARG0
+ iretq
+
+END_CODE
+
+END
diff --git a/bsps/x86_64/amd64/start/bspstart.c b/bsps/x86_64/amd64/start/bspstart.c
index 5a5b46bcec..d1e1e4b2f2 100644
--- a/bsps/x86_64/amd64/start/bspstart.c
+++ b/bsps/x86_64/amd64/start/bspstart.c
@@ -27,8 +27,10 @@
#include <bsp.h>
#include <bsp/bootcard.h>
#include <libcpu/page.h>
+#include <bsp/irq-generic.h>
void bsp_start(void)
{
paging_init();
+ bsp_interrupt_initialize();
}
diff --git a/bsps/x86_64/headers.am b/bsps/x86_64/headers.am
index 6f79bca7c0..ce203e0dc3 100644
--- a/bsps/x86_64/headers.am
+++ b/bsps/x86_64/headers.am
@@ -1,5 +1,9 @@
## This file was generated by "./boostrap -H".
+include_bspdir = $(includedir)/bsp
+include_bsp_HEADERS =
+include_bsp_HEADERS += ../../../../../bsps/x86_64/include/bsp/irq.h
+
include_libcpudir = $(includedir)/libcpu
include_libcpu_HEADERS =
include_libcpu_HEADERS += ../../../../../bsps/x86_64/include/libcpu/page.h
diff --git a/bsps/x86_64/include/bsp/irq.h b/bsps/x86_64/include/bsp/irq.h
new file mode 100644
index 0000000000..f8be997e8f
--- /dev/null
+++ b/bsps/x86_64/include/bsp/irq.h
@@ -0,0 +1,46 @@
+/*
+ * This file contains the mandatory defines to support the irq.h and
+ * irq-generic.c interfaces (initialized finally with bsp_interrupt_initialize).
+ */
+
+/*
+ * Copyright (c) 2018.
+ * Amaan Cheval <amaan.cheval@gmail.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef LIBBSP_GENERIC_AMD64_IRQ_H
+#define LIBBSP_GENERIC_AMD64_IRQ_H
+
+#ifndef ASM
+
+#include <rtems.h>
+#include <rtems/irq.h>
+#include <rtems/irq-extension.h>
+
+#define BSP_INTERRUPT_VECTOR_MIN 0x0
+#define BSP_IRQ_VECTOR_NUMBER 34
+#define BSP_INTERRUPT_VECTOR_MAX BSP_IRQ_VECTOR_NUMBER
+
+#endif /* !ASM */
+#endif /* LIBBSP_GENERIC_RISCV_IRQ_H */