From 9b5af6a47f799952c178967e04e83053bef57422 Mon Sep 17 00:00:00 2001 From: Sebastian Huber Date: Wed, 8 Apr 2020 13:19:51 +0200 Subject: bsps/powerpc: Fix tlbie instruction usage GCC 10 no longer passes -many to the assembler. This enables more checks in the assembler. The 0 in the tlbie instruction is the L operand which selects a 4KiB page size. --- bsps/powerpc/gen5200/start/start.S | 2 +- bsps/powerpc/gen83xx/start/cpuinit.c | 2 +- bsps/powerpc/include/libcpu/mmu.h | 2 +- bsps/powerpc/motorola_powerpc/bootloader/head.S | 2 +- bsps/powerpc/motorola_powerpc/bootloader/mm.c | 4 ++-- bsps/powerpc/mvme5500/start/start.S | 2 +- bsps/powerpc/shared/mmu/pte121.c | 11 ++++++----- bsps/powerpc/shared/start/start.S | 2 +- 8 files changed, 14 insertions(+), 13 deletions(-) diff --git a/bsps/powerpc/gen5200/start/start.S b/bsps/powerpc/gen5200/start/start.S index 9e9e504861..fbb58cdfb1 100644 --- a/bsps/powerpc/gen5200/start/start.S +++ b/bsps/powerpc/gen5200/start/start.S @@ -775,7 +775,7 @@ TLB_init: /* Initialize translation lookaside buffers (TLBs) */ xor r29, r29, r29 TLB_init_loop: - tlbie r29 + tlbie r29, 0 tlbsync addi r29, r29, 0x1000 addi r30, r30, 0x01 diff --git a/bsps/powerpc/gen83xx/start/cpuinit.c b/bsps/powerpc/gen83xx/start/cpuinit.c index 1b0fd1efef..ad8fe98456 100644 --- a/bsps/powerpc/gen83xx/start/cpuinit.c +++ b/bsps/powerpc/gen83xx/start/cpuinit.c @@ -133,7 +133,7 @@ static void clear_mmu_regs( void) /* Clear TLBs */ for (i = 0;i < 32;i++) { - __asm__ volatile( "tlbie %0\n" : : "r" (i << (31 - 19))); + __asm__ volatile( "tlbie %0, 0\n" : : "r" (i << (31 - 19))); } } diff --git a/bsps/powerpc/include/libcpu/mmu.h b/bsps/powerpc/include/libcpu/mmu.h index d3081316eb..6e7abb15da 100644 --- a/bsps/powerpc/include/libcpu/mmu.h +++ b/bsps/powerpc/include/libcpu/mmu.h @@ -165,7 +165,7 @@ typedef struct _MMU_context { /* invalidate a TLB entry */ static inline void _tlbie(unsigned long va) { - asm volatile ("tlbie %0" : : "r"(va)); + asm volatile ("tlbie %0, 0" : : "r"(va)); } extern void _tlbia(void); /* invalidate all TLB entries */ diff --git a/bsps/powerpc/motorola_powerpc/bootloader/head.S b/bsps/powerpc/motorola_powerpc/bootloader/head.S index 974b78a51c..b7e423e75d 100644 --- a/bsps/powerpc/motorola_powerpc/bootloader/head.S +++ b/bsps/powerpc/motorola_powerpc/bootloader/head.S @@ -383,7 +383,7 @@ MMUoff: blr flush_tlb: lis r11,0x1000 1: addic. r11,r11,-0x1000 - tlbie r11 + tlbie r11, 0 bnl 1b /* tlbsync is not implemented on 601, so use sync which seems to be a superset * of tlbsync in all cases and do not bother with CPU dependant code diff --git a/bsps/powerpc/motorola_powerpc/bootloader/mm.c b/bsps/powerpc/motorola_powerpc/bootloader/mm.c index 1b3df41d49..2675396145 100644 --- a/bsps/powerpc/motorola_powerpc/bootloader/mm.c +++ b/bsps/powerpc/motorola_powerpc/bootloader/mm.c @@ -199,7 +199,7 @@ void _handler(int vec, ctxt *p) { flushva |= ((hte[i].key<<21)&0xf0000000) | ((hte[i].key<<22)&0x0fc00000); hte[i].key=0; - asm volatile("sync; tlbie %0; sync" : : "r" (flushva)); + asm volatile("sync; tlbie %0, 0; sync" : : "r" (flushva)); found: hte[i].rpn = rpn; asm volatile("eieio": : ); @@ -583,7 +583,7 @@ void vflush(map *virtmap) { | ((p[i].key<<22)&0x0fc00000); if (va>=virtmap->base && va<=virtmap->end) { p[i].key=0; - asm volatile("sync; tlbie %0; sync" : : + asm volatile("sync; tlbie %0, 0; sync" : : "r" (va)); } } diff --git a/bsps/powerpc/mvme5500/start/start.S b/bsps/powerpc/mvme5500/start/start.S index c948c9c1ef..7e339147fa 100644 --- a/bsps/powerpc/mvme5500/start/start.S +++ b/bsps/powerpc/mvme5500/start/start.S @@ -193,7 +193,7 @@ _return_to_ppcbug: flush_tlbs: lis r20, 0x1000 1: addic. r20, r20, -0x1000 - tlbie r20 + tlbie r20, 0 bgt 1b sync blr diff --git a/bsps/powerpc/shared/mmu/pte121.c b/bsps/powerpc/shared/mmu/pte121.c index 93ef909776..778d6353a9 100644 --- a/bsps/powerpc/shared/mmu/pte121.c +++ b/bsps/powerpc/shared/mmu/pte121.c @@ -122,9 +122,10 @@ * instructions in order to flush all TLBs. * On the 750 and 7400, there are 128 two way I and D TLBs, * indexed by EA[14:19]. Hence calling - * tlbie rx + * tlbie rx, 0 * where rx scans 0x00000, 0x01000, 0x02000, ... 0x3f000 - * is sufficient to do the job + * is sufficient to do the job. The 0 in the tlbie instruction is the L operand + * which selects a 4KiB page size. */ #define NUM_TLB_PER_WAY 64 /* 750 and 7400 have 128 two way TLBs */ #define FLUSH_EA_RANGE (NUM_TLB_PER_WAY<v = 0; do_dssall (); __asm__ volatile (" sync \n\t" - " tlbie %0 \n\t" + " tlbie %0, 0 \n\t" " eieio \n\t" " tlbsync \n\t" " sync \n\t"::"r" (ea):"memory"); @@ -960,7 +961,7 @@ triv121ChangeEaAttributes (unsigned long ea, int wimg, int pp) pte->wimg = wimg; if (pp >= 0) pte->pp = pp; - __asm__ volatile ("tlbie %0; eieio"::"r" (ea):"memory"); + __asm__ volatile ("tlbie %0, 0; eieio"::"r" (ea):"memory"); pte->v = 1; __asm__ volatile ("tlbsync; sync":::"memory"); diff --git a/bsps/powerpc/shared/start/start.S b/bsps/powerpc/shared/start/start.S index 76d4fc3e2a..eb91a6ce57 100644 --- a/bsps/powerpc/shared/start/start.S +++ b/bsps/powerpc/shared/start/start.S @@ -192,7 +192,7 @@ _return_to_ppcbug: flush_tlbs: lis r20, 0x1000 1: addic. r20, r20, -0x1000 - tlbie r20 + tlbie r20, 0 bgt 1b sync blr -- cgit v1.2.3