From 4544749e3c163c511a9a28992b3ac429515c0a5a Mon Sep 17 00:00:00 2001 From: Amaan Cheval Date: Mon, 13 Aug 2018 16:03:12 +0530 Subject: bsps/x86_64: Add paging support with 1GiB super pages Updates #2898. --- bsps/x86_64/amd64/start/bspstart.c | 2 + bsps/x86_64/amd64/start/page.c | 172 +++++++++++++++++++++ bsps/x86_64/headers.am | 5 + bsps/x86_64/include/libcpu/page.h | 68 ++++++++ c/src/lib/libbsp/x86_64/amd64/Makefile.am | 1 + .../score/cpu/x86_64/include/rtems/score/cpu_asm.h | 13 ++ 6 files changed, 261 insertions(+) create mode 100644 bsps/x86_64/amd64/start/page.c create mode 100644 bsps/x86_64/headers.am create mode 100644 bsps/x86_64/include/libcpu/page.h diff --git a/bsps/x86_64/amd64/start/bspstart.c b/bsps/x86_64/amd64/start/bspstart.c index 784748ce3f..5a5b46bcec 100644 --- a/bsps/x86_64/amd64/start/bspstart.c +++ b/bsps/x86_64/amd64/start/bspstart.c @@ -26,7 +26,9 @@ #include #include +#include void bsp_start(void) { + paging_init(); } diff --git a/bsps/x86_64/amd64/start/page.c b/bsps/x86_64/amd64/start/page.c new file mode 100644 index 0000000000..64bdf21707 --- /dev/null +++ b/bsps/x86_64/amd64/start/page.c @@ -0,0 +1,172 @@ +/* + * This file sets up page sizes to 1GiB (i.e. huge pages, using only the PML4 + * and PDPT, skipping the PDT, and PT). + * We set up identity-page mapping for the 512 GiBs addressable by using static + * PML4 and PDPT tables. + * + * Section 4.5 "4-Level Paging" of Volume 3 of the Intel Software Developer + * Manual guides a lot of the code used in this file. + */ + +/* + * Copyright (c) 2018. + * Amaan Cheval + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include + +uint64_t amd64_pml4[NUM_PAGE_TABLE_ENTRIES] RTEMS_ALIGNED(4096); +uint64_t amd64_pdpt[NUM_PAGE_TABLE_ENTRIES] RTEMS_ALIGNED(4096); + +bool paging_1gib_pages_supported(void) +{ + /* + * If CPUID.80000001H:EDX.Page1GB [bit 26] = 1, 1-GByte pages are supported + * with 4-level paging. + */ + uint32_t a, b, c, d; + cpuid(0x80000001, &a, &b, &c, &d); + return (d >> 26) & 1; +} + +uint8_t get_maxphysaddr(void) +{ + /* + * CPUID.80000008H:EAX[15:8] reports the linear-address width supported by the + * processor. Generally, this value is 48 if CPUID.80000001H:EDX.LM [bit 29] = + * 1 and 32 otherwise. + */ + uint32_t a, b, c, d; + cpuid(0x80000008, &a, &b, &c, &d); + + uint8_t maxphysaddr = (a >> 8) & 0xff; + /* This width is referred to as MAXPHYADDR. MAXPHYADDR is at most 52. */ + assert(maxphysaddr <= 52); + + return maxphysaddr; +} + +uint64_t get_mask_for_bits(uint8_t start, uint8_t end) +{ + /* + * Create a mask that lets you select bits start:end when logically ANDed with + * a value. For eg. + * get_mask_for_bits(48, 64) = 0xffff000000000000 + */ + uint64_t mask = (((uint64_t) 1 << (end - start)) - 1) << start; + return mask; +} + +RTEMS_INLINE_ROUTINE void assert_0s_from_bit(uint64_t entry, uint8_t bit_pos) +{ + /* Confirm that bit_pos:64 are all 0s */ + assert((entry & get_mask_for_bits(bit_pos, 64)) == 0); +} + +uint64_t create_cr3_entry( + uint64_t phys_addr, uint8_t maxphysaddr, uint64_t flags +) +{ + /* Confirm PML4 address is aligned on a 4KiB boundary */ + assert((phys_addr & 0xfff) == 0); + uint64_t entry = (phys_addr & get_mask_for_bits(12, maxphysaddr)) | flags; + + /* Confirm that bits maxphysaddr:64 are 0s */ + assert_0s_from_bit(entry, maxphysaddr); + return entry; +} + +uint64_t create_pml4_entry( + uint64_t phys_addr, uint8_t maxphysaddr, uint64_t flags +) +{ + /* Confirm address we're writing is aligned on a 4KiB boundary */ + assert((phys_addr & 0xfff) == 0); + uint64_t entry = (phys_addr & get_mask_for_bits(12, maxphysaddr)) | flags; + + /* + * Confirm that bits maxphysaddr:64 are 0s; there are other usable bits there + * such as PAGE_FLAGS_NO_EXECUTE, but we're asserting that those aren't set + * either. + */ + assert_0s_from_bit(entry, maxphysaddr); + return entry; +} + +uint64_t create_pdpt_entry( + uint64_t phys_addr, uint8_t maxphysaddr, uint64_t flags +) +{ + /* Confirm physical address is a 1GiB aligned page address */ + assert((phys_addr & 0x3fffffff) == 0); + uint64_t entry = (phys_addr & get_mask_for_bits(30, maxphysaddr)) | flags; + + /* + * Confirm that bits maxphysaddr:64 are 0s; there are other usable bits there + * such as the protection key and PAGE_FLAGS_NO_EXECUTE, but we're asserting + * that those aren't set either. + */ + assert_0s_from_bit(entry, maxphysaddr); + return entry; +} + +void paging_init(void) +{ + if ( !paging_1gib_pages_supported() ) { + printf("warning: 1 GiB pages aren't supported - trying anyway.\n"); + } + const uint8_t maxphysaddr = get_maxphysaddr(); + DBG_PRINTF("maxphysaddr = %d\n", maxphysaddr); + + const uint64_t gib = (1 << 30); + + for (uint32_t i = 0; i < NUM_PAGE_TABLE_ENTRIES; i++) { + amd64_pdpt[i] = create_pdpt_entry( + /* This is the i-th GiB for identity-mapping */ + (uint64_t) i * gib, + maxphysaddr, + /* Setting huge page in the PDPTE gives us 1 GiB pages */ + PAGE_FLAGS_DEFAULTS | PAGE_FLAGS_HUGE_PAGE + ); + + amd64_pml4[i] = create_pml4_entry( + (uint64_t) amd64_pdpt, + maxphysaddr, + PAGE_FLAGS_DEFAULTS + ); + } + + amd64_set_cr3( + create_cr3_entry( + (uint64_t) &amd64_pml4, + maxphysaddr, + PAGE_FLAGS_WRITE_THROUGH + ) + ); +} diff --git a/bsps/x86_64/headers.am b/bsps/x86_64/headers.am new file mode 100644 index 0000000000..6f79bca7c0 --- /dev/null +++ b/bsps/x86_64/headers.am @@ -0,0 +1,5 @@ +## This file was generated by "./boostrap -H". + +include_libcpudir = $(includedir)/libcpu +include_libcpu_HEADERS = +include_libcpu_HEADERS += ../../../../../bsps/x86_64/include/libcpu/page.h diff --git a/bsps/x86_64/include/libcpu/page.h b/bsps/x86_64/include/libcpu/page.h new file mode 100644 index 0000000000..1903ae975e --- /dev/null +++ b/bsps/x86_64/include/libcpu/page.h @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2018. + * Amaan Cheval + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#ifndef _LIBCPU_AMD64_PAGE_H +#define _LIBCPU_AMD64_PAGE_H + +#ifndef ASM + +#define NUM_PAGE_TABLE_ENTRIES 512 + +extern uint64_t amd64_pml4[NUM_PAGE_TABLE_ENTRIES]; +extern uint64_t amd64_pdpt[NUM_PAGE_TABLE_ENTRIES]; + +bool paging_1gib_pages_supported(void); +uint8_t get_maxphysaddr(void); +uint64_t get_mask_for_bits(uint8_t start, uint8_t end); +uint64_t create_cr3_entry( + uint64_t phys_addr, uint8_t maxphysaddr, uint64_t flags +); +uint64_t create_pml4_entry( + uint64_t phys_addr, uint8_t maxphysaddr, uint64_t flags +); +uint64_t create_pdpt_entry( + uint64_t phys_addr, uint8_t maxphysaddr, uint64_t flags +); + +void paging_init(void); + +#define PAGE_FLAGS_PRESENT (1 << 0) +#define PAGE_FLAGS_WRITABLE (1 << 1) +#define PAGE_FLAGS_USER_ACCESSIBLE (1 << 2) +#define PAGE_FLAGS_WRITE_THROUGH (1 << 3) +#define PAGE_FLAGS_NO_CACHE (1 << 4) +#define PAGE_FLAGS_ACCESSED (1 << 5) +#define PAGE_FLAGS_DIRTY (1 << 6) +#define PAGE_FLAGS_HUGE_PAGE (1 << 7) +#define PAGE_FLAGS_GLOBAL (1 << 8) +#define PAGE_FLAGS_NO_EXECUTE (1 << 63) + +#define PAGE_FLAGS_DEFAULTS \ + (PAGE_FLAGS_PRESENT | PAGE_FLAGS_WRITABLE | PAGE_FLAGS_USER_ACCESSIBLE \ + | PAGE_FLAGS_WRITE_THROUGH | PAGE_FLAGS_NO_CACHE | PAGE_FLAGS_GLOBAL) + +#endif /* !ASM */ +#endif diff --git a/c/src/lib/libbsp/x86_64/amd64/Makefile.am b/c/src/lib/libbsp/x86_64/amd64/Makefile.am index aa40f6224f..93c4e90fbc 100644 --- a/c/src/lib/libbsp/x86_64/amd64/Makefile.am +++ b/c/src/lib/libbsp/x86_64/amd64/Makefile.am @@ -23,6 +23,7 @@ librtemsbsp_a_SOURCES += ../../../../../../bsps/shared/start/bspfatal-default.c librtemsbsp_a_SOURCES += ../../../../../../bsps/shared/start/bspgetworkarea-default.c librtemsbsp_a_SOURCES += ../../../../../../bsps/x86_64/amd64/start/bspstart.c librtemsbsp_a_SOURCES += ../../../../../../bsps/x86_64/amd64/start/start.c +librtemsbsp_a_SOURCES += ../../../../../../bsps/x86_64/amd64/start/page.c librtemsbsp_a_SOURCES += ../../../../../../bsps/shared/start/sbrk.c librtemsbsp_a_SOURCES += ../../../../../../bsps/shared/dev/getentropy/getentropy-cpucounter.c librtemsbsp_a_SOURCES += ../../../../../../bsps/shared/start/bspreset-empty.c diff --git a/cpukit/score/cpu/x86_64/include/rtems/score/cpu_asm.h b/cpukit/score/cpu/x86_64/include/rtems/score/cpu_asm.h index ac43a6366d..5d4b608eb8 100644 --- a/cpukit/score/cpu/x86_64/include/rtems/score/cpu_asm.h +++ b/cpukit/score/cpu/x86_64/include/rtems/score/cpu_asm.h @@ -45,6 +45,19 @@ RTEMS_INLINE_ROUTINE void outport_byte(uint16_t port, uint8_t val) __asm__ volatile ( "outb %0, %1" : : "a" (val), "Nd" (port) ); } + +RTEMS_INLINE_ROUTINE void amd64_set_cr3(uint64_t segment) +{ + __asm__ volatile ( "movq %0, %%cr3" : "=r" (segment) : "0" (segment) ); +} + +RTEMS_INLINE_ROUTINE void cpuid( + uint32_t code, uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx +) { + __asm__ volatile ( "cpuid" + : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx) + : "a" (code) ); +} #endif /* !ASM */ #endif -- cgit v1.2.3