summaryrefslogtreecommitdiffstats
path: root/bsps/powerpc/shared
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2018-04-20 10:19:28 +0200
committerSebastian Huber <sebastian.huber@embedded-brains.de>2018-04-20 13:08:36 +0200
commitfbcd7c8fa65eb695e96a62ea1c1ac7a024fa9dfc (patch)
treea17e285cf22cd49cd42e8b3ad562febc3987d566 /bsps/powerpc/shared
parentbsps: Move console drivers to bsps (diff)
downloadrtems-fbcd7c8fa65eb695e96a62ea1c1ac7a024fa9dfc.tar.bz2
bsps: Move start files to bsps
This patch is a part of the BSP source reorganization. Update #3285.
Diffstat (limited to 'bsps/powerpc/shared')
-rw-r--r--bsps/powerpc/shared/start/preload.S278
-rw-r--r--bsps/powerpc/shared/start/rtems_crti.S68
-rw-r--r--bsps/powerpc/shared/start/rtems_crtn.S27
-rw-r--r--bsps/powerpc/shared/start/start.S207
-rw-r--r--bsps/powerpc/shared/start/vectors_entry.S22
5 files changed, 602 insertions, 0 deletions
diff --git a/bsps/powerpc/shared/start/preload.S b/bsps/powerpc/shared/start/preload.S
new file mode 100644
index 0000000000..d8b47dfd2d
--- /dev/null
+++ b/bsps/powerpc/shared/start/preload.S
@@ -0,0 +1,278 @@
+/*
+ * Mini-loader for the SVGM BSP.
+ *
+ * Author: Till Straumann, 10/2001 <strauman@slac.stanford.edu>
+ *
+ * Some ideas are borrowed from the powerpc/shared/bootloader
+ * by
+ * Copyright (C) 1998, 1999 Gabriel Paubert, paubert@iram.es
+ * Copyright (C) 1999 Eric Valette. valette@crf.canon.fr
+ *
+ * The SMON firmware is unable to load the RTEMS image below
+ * 0x2000 (I believe their stack is growing below 0x1000).
+ *
+ * The code provided by this file is responsible for the performing
+ * the following steps:
+ *
+ * 1) Save commandline parameters to an area that is
+ * a) not covered by the downloaded image
+ * b) will not be overwritten by the moved image
+ * nor the final BSS segment (rtems clears BSS
+ * before saving the command line).
+ * 2) Move the entire image (including this very file) to
+ * its final location starting at 0x0000.
+ * It is important to note that _NO_STACK_ is available
+ * during this step. Also, there is probably no return to
+ * SMON because relocating RTEMS will destroy vital SMON
+ * data (such as its stack).
+ * 3) Flush the cache to make sure the relocated image is actually
+ * in memory.
+ * 4) setup RTEMS environment (initial register values), most
+ * notably an initial STACK. The initial stack may be small and
+ * is used by RTEMS only at a very early stage.
+ * A safe place for the stack seems to be the 00..0x7f area.
+ * NOTE: we should respect the MAILBOX area 0x80..0xff!
+ * 5) switch the MMU off (because that's what RTEMS is expecting
+ * it to be at startup).
+ * 6) fire up rtems...
+ *
+ *
+ * Calling convention:
+ * R1: SMON SP
+ * R3: command line string start
+ * R4: command line string end + 1
+ * R5: where SMON put the image
+ * if R5 is 0, the preloader will use its entry point
+ * as the image starting address.
+ * See NOTE below.
+ * R6: end of the image (i.e. R6-R5 is the image length)
+ * if R6 is 0, _edata will be used as the image length
+ * See NOTE below.
+ *
+ * NOTE: if the symbol DONT_USE_R5_ENTRY is defined,
+ * R5/R6 are never used and the necessary parameters are
+ * determined at runtime (R5) / linkage (R6) [_edata]
+ *
+ * ASSUMPTIONS:
+ * The code RELIES on the assumption that the image will be
+ * moved DOWNWARDS in memory and that the this loader is
+ * prepended to the image, i.e. it is safe to do
+ * codemove(codemove,0,codemove_end - codemove);
+ * (*0)(codemove_end, codemove_end-codemove, __rtems_end-codemove_end);
+ * where codemove(from, to, nbytes) is defined as
+ * codemove(from, to, nbytes) { while (nbytes--) *(to++)=*(from++); }
+ * Implicit to these assumptions is the assumption that the destination
+ * address is cache block aligned.
+ * Furthermore, the byte count is assumed to be a multiple
+ * of four
+ *
+ */
+#if 0
+#include <rtems/score/powerpc.h>
+#else
+#ifndef PPC_CACHE_ALIGNMENT
+#define PPC_CACHE_ALIGNMENT 32
+#endif
+#endif
+
+#include <rtems/score/cpu.h>
+#include <rtems/asm.h>
+
+/* Note that major modifications may be needed
+ * if DESTINATION_ADDR is not 0
+ */
+#define KERNELBASE 0x0
+#define INITIAL_STACK 0x70 /* 16-byte aligned */
+#define CACHE_LINE_SIZE PPC_CACHE_ALIGNMENT /* autodetect doesn't work, see below */
+#define ASSUME_RTEMS_INSTALLS_VECTORS /* assume we need not load vectors */
+#define DONT_USE_R5_ENTRY /* always dynamically determine the address we're running from */
+
+ /* put this into its own section which we want to
+ * be loaded at the very beginning. We should probably
+ * not use more than 255 bytes.
+ */
+ PUBLIC_VAR(__rtems_start)
+ PUBLIC_VAR(__rtems_entry_point)
+ PUBLIC_VAR(__rtems_end)
+ .section .entry_point_section,"awx",@progbits
+preload:
+ /* find out where we are */
+ bl here
+here:
+ xor r0,r0,r0
+ mtmsr r0 /* clear MSR to known state */
+ mflr r5
+ addi r5,r5,-(here-preload)
+ lis r27,_edata@h
+ ori r27,r27,_edata@l
+
+ /* at this point the register contents are
+ * R3: command line start
+ * R4: R3 + command line length
+ * R5: address we are running from / loaded to
+ * R27: image end
+ */
+
+ /* save command line start */
+ mr r6, r3
+ /* save the command line parameters if they are to be overwritten */
+ sub. r17, r4, r3 /* string length */
+ ble leaveparms /* <=0 -> no parameters */
+ /* copy has to be out of the way of the bss; therefore we must
+ * put the string out of the way of both, the current end of
+ * the image (without bss) AND the end of the loaded image
+ * (including bss):
+ * |......image.........| downloaded image
+ * |image_bss...........| loaded image with bss appended
+ *
+ * ^ safe place for string
+ *
+ * the alternative scenario looks like this:
+ * |..image.............| downloaded image
+ * |image_bss...........| loaded image with bss appended
+ * ^ safe place for string
+ */
+ lis r18, __rtems_end+0x10000@h /* round up, save one instruction */
+ add r16, r5, r27 /* image end + 1 */
+ cmpw r16, r18
+ bge ishighenough
+ mr r16,r18 /* __rtems_end is higher than the image end
+ * (without bss)
+ */
+ishighenough:
+ cmpw r16, r3 /* destination start > current string start ? */
+ ble leaveparms /* string already after dst, leave it */
+ /* copy string from the last byte downwards */
+ add r6, r16, r17 /* last byte of destination + 1 */
+ mtctr r17
+1:
+ lbzu r3, -1(r4)
+ stbu r3, -1(r6)
+ bdnz 1b
+leaveparms:
+ add r7, r6, r17 /* destination + strlen */
+
+#ifndef CACHE_LINE_SIZE
+ /* Oh well, SMON has inhibited the cache, so this
+ * nice routine doesn't work...
+ */
+ /* figure out the cache line size */
+ li r16, 0x80
+ cmpw r5, r16 /* 'from' must be > 0x80 */
+ blt panic
+
+1: /* store some arbitrary, nonzero stuff in 0..0x7c */
+ stwu r16,-4(r16)
+ cmpwi r16,0
+ bne 1b
+ dcbz 0,r16 /* zero out one cache line */
+ subi r16,r16,4
+2: lwzu r0,4(r16) /* search for a non-zero word */
+ cmpwi r0,0
+ beq 2b
+ /* OK, r16 now hold the size of a cache line in bytes */
+#else
+ li r16,CACHE_LINE_SIZE
+#endif
+
+ lis r3,preload@h
+ ori r3,r3,preload@l
+ mr r4,r5 /* from-addr */
+ li r5,_preload_size/* this is never > 16k */
+ /* now move ourselves to the link address ('preload').
+ * We set up the LR, so domove() 'returns' to the
+ * relocated copy
+ */
+ lis r0,return_here@h
+ ori r0,r0,return_here@l
+ mtlr r0
+ b domove /* move the preloader itself */
+return_here:
+ /* now we move the entire rest of the image */
+#ifdef ASSUME_RTEMS_INSTALLS_VECTORS
+ lis r3,__rtems_start@h
+ ori r3,r3,__rtems_start@l
+ lis r0,preload@h /* calculate/adjust from address */
+ ori r0,r0,preload@l
+ sub r0,r3,r0
+ add r4,r4,r0
+ sub r5,r27,r3
+#else
+ add r3,r3,r5 /* add preloader size to destination */
+ add r4,r4,r5 /* and source addresses */
+ sub r5,r27,r5 /* length of the remaining rest */
+#endif
+ bl domove
+ /* OK, now everything should be in place.
+ * we are ready to start...
+ */
+
+ /* setup initial stack for rtems early boot */
+ li r1,INITIAL_STACK
+ /* tag TOS with a NULL pointer (for stack trace) */
+ li r0, 0
+ stw r0, 0(r1)
+ /* disable the MMU and fire up rtems */
+ mfmsr r0
+ ori r0,r0,MSR_IR|MSR_DR|MSR_IP|MSR_ME
+ xori r0,r0,MSR_IR|MSR_DR
+ mtsrr1 r0
+ lis r0,__rtems_entry_point@h
+ ori r0,r0,__rtems_entry_point@l
+ mtsrr0 r0
+ /* R6: start of command line */
+ /* R7: end of command line +1 */
+ rfi
+
+ /* domove(to, from, nbytes):
+ *
+ * move a R5 bytes from R4 to R3 and flush
+ * the caches for the destination memory
+ * region. R16 provides the cache line size.
+ * DESTROYS: R0, R17, R18, CTR, CR
+ */
+domove:
+ addi r0,r5,3 /* convert to word count */
+ srwi. r0,r0,2
+ beq 3f /* nothing to do */
+ cmpw r3,r4 /* from == to ? */
+ beq 3f
+ mtctr r0
+ la r18,-4(r4)
+ la r17,-4(r3)
+1: lwzu r0,4(r18)
+ stwu r0,4(r17)
+ bdnz 1b /* move data */
+ /* now, we must flush the destination cache region */
+#ifndef CACHE_LINE_SIZE
+ cmpwi r16,0
+ beq 3f /* nothing to do */
+#endif
+#if defined(CACHE_LINE_SIZE) && CACHE_LINE_SIZE > 0
+ add r17,r3,r5 /* target end pointer */
+ subi r0,r16,1
+ add r17,r17,r0
+ andc r17,r17,r0 /* cache aligned target end pointer */
+ mr r18,r3
+2: cmpw r18,r17
+ dcbst 0,r18 /* write out data cache line */
+ icbi 0,r18 /* invalidate corresponding i-cache line */
+ add r18,r18,r16
+ blt 2b
+ sync /* make sure data is written back */
+ isync /* invalidate possibly preloaded instructions */
+#endif
+3:
+ blr
+
+#if !defined(CACHE_LINE_SIZE)
+panic:
+ li r10,0x63
+ mfmsr r0
+ ori r0,r0,MSR_IP
+ mtmsr r0
+ sc
+#endif
+
+/* DONT PUT ANY CODE BELOW HERE */
+_preload_size = . - preload
diff --git a/bsps/powerpc/shared/start/rtems_crti.S b/bsps/powerpc/shared/start/rtems_crti.S
new file mode 100644
index 0000000000..a664ae2522
--- /dev/null
+++ b/bsps/powerpc/shared/start/rtems_crti.S
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2017 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Dornierstr. 4
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+/* rtems_crti.S */
+#include <rtems/asm.h>
+#include <rtems/score/cpu.h>
+
+#if defined(__powerpc64__)
+ .section ".init","ax"
+ .align 2
+ .globl _init
+ .type _init,@function
+_init:
+ mflr r0
+ std r0,16(1)
+ stdu r1,-96(1)
+
+ .section ".fini","ax"
+ .align 2
+ .globl _fini
+ .type _fini,@function
+_fini:
+ mflr r0
+ std r0,16(r1)
+ stdu r1,-96(r1)
+#else
+ /* terminate the __init() function and create
+ * a new head '_init' for use by RTEMS to
+ * invoke C++ global constructors
+ * NOTE: it is essential that this snippet
+ * is hooked between ecrti and crtbegin
+ *
+ * ecrti has the following .init section:
+ * __init:
+ * stwu r1,-16(r1)
+ * mflr r0
+ * stw r0,20(r1)
+ *
+ * The reason for this is that we want to call
+ * __eabi() at an early stage but prevent __eabi()
+ * from branching to __init (C++ exception init
+ * and global CTORs). Hence we make __init a no-op
+ * and create a new entry point:
+ */
+ .section ".init","ax"
+ .align 2
+ lwz r0,r20(r1)
+ mtlr r0
+ addi r1,r1,16
+ blr
+ .globl _init
+ .type _init,@function
+_init:
+ stwu r1,-16(r1)
+ mflr r0
+ stw r0,20(r1)
+#endif
diff --git a/bsps/powerpc/shared/start/rtems_crtn.S b/bsps/powerpc/shared/start/rtems_crtn.S
new file mode 100644
index 0000000000..747d83dbce
--- /dev/null
+++ b/bsps/powerpc/shared/start/rtems_crtn.S
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2017 embedded brains GmbH. All rights reserved.
+ *
+ * embedded brains GmbH
+ * Dornierstr. 4
+ * 82178 Puchheim
+ * Germany
+ * <rtems@embedded-brains.de>
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#if defined(__powerpc64__)
+ .section ".init","ax"
+ addi 1,1,96
+ ld 0,16(1)
+ mtlr 0
+ blr
+
+ .section ".fini","ax"
+ addi 1,1,96
+ ld 0,16(1)
+ mtlr 0
+ blr
+#endif
diff --git a/bsps/powerpc/shared/start/start.S b/bsps/powerpc/shared/start/start.S
new file mode 100644
index 0000000000..354b9a967e
--- /dev/null
+++ b/bsps/powerpc/shared/start/start.S
@@ -0,0 +1,207 @@
+/*
+ * start.S : RTEMS entry point
+ *
+ * Copyright (C) 1999 Eric Valette. valette@crf.canon.fr
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ *
+ */
+
+#include <rtems/asm.h>
+#include <rtems/score/cpu.h>
+#include <rtems/powerpc/powerpc.h>
+
+#include <libcpu/io.h>
+#include <libcpu/bat.h>
+#include <bspopts.h>
+
+#define SYNC \
+ sync; \
+ isync
+
+#define KERNELBASE 0x0
+
+#define MONITOR_ENTER \
+ mfmsr r10 ; \
+ ori r10,r10,MSR_IP ; \
+ mtmsr r10 ; \
+ li r10,0x63 ; \
+ sc
+
+ .text
+ .globl __rtems_entry_point
+ .type __rtems_entry_point,@function
+__rtems_entry_point:
+#ifdef DEBUG_EARLY_START
+ MONITOR_ENTER
+#endif
+
+/*
+ * PREP
+ * This is jumped to on prep systems right after the kernel is relocated
+ * to its proper place in memory by the boot loader. The expected layout
+ * of the regs is:
+ * r3: ptr to residual data
+ * r4: initrd_start or if no initrd then 0
+ * r5: initrd_end - unused if r4 is 0
+ * r6: Start of command line string
+ * r7: End of command line string
+ *
+ * The Prep boot loader insure that the MMU is currently off...
+ *
+ */
+
+ mr r31,r3 /* save parameters */
+ mr r30,r4
+ mr r29,r5
+ mr r28,r6
+ mr r27,r7
+
+#ifdef __ALTIVEC__
+ /* enable altivec; gcc may use it! */
+ mfmsr r0
+ oris r0, r0, (1<<(31-16-6))
+ mtmsr r0
+ isync
+ /*
+ * set vscr and vrsave to known values
+ */
+ li r0, 0
+ mtvrsave r0
+ vxor 0,0,0
+ mtvscr 0
+#endif
+
+ /*
+ * Make sure we have nothing in BATS and TLB
+ */
+ bl CPU_clear_bats_early
+ bl flush_tlbs
+/*
+ * Use the first pair of BAT registers to map the 1st 256MB
+ * of RAM to KERNELBASE.
+ */
+ lis r11,KERNELBASE@h
+/* set up BAT registers for 604 */
+ ori r11,r11,0x1ffe
+ li r8,2 /* R/W access */
+ isync
+ mtspr DBAT0L,r8 /* N.B. 6xx (not 601) have valid */
+ mtspr DBAT0U,r11 /* bit in upper BAT register */
+ mtspr IBAT0L,r8
+ mtspr IBAT0U,r11
+ isync
+/* Map section where residual is located if outside
+ * the first 256Mb of RAM. This is to support cases
+ * where the available system memory is larger than
+ * 256Mb of RAM.
+ */
+ mr r9, r1 /* Get where residual was mapped */
+ lis r12,0xf0000000@h
+ and r9,r9,r12
+ cmpi 0,1,r9, 0
+ beq enter_C_code
+ isync
+ ori r11,r9,0x1ffe
+ mtspr DBAT1L,r8 /* N.B. 6xx (not 601) have valid */
+ mtspr DBAT1U,r11 /* bit in upper BAT register */
+ mtspr IBAT1L,r8
+ mtspr IBAT1U,r11
+ isync
+
+/*
+ * we now have the 1st 256M of ram mapped with the bats. We are still
+ * running on the bootloader stack and cannot switch to an RTEMS allocated
+ * init stack before copying the residual data that may have been set just after
+ * rtems_end address. This bug has been experienced on MVME2304. Thank to
+ * Till Straumann <strauman@SLAC.Stanford.EDU> for hunting it and suggesting
+ * the appropriate code.
+ */
+
+enter_C_code:
+ bl MMUon
+ bl __eabi /* setup EABI and SYSV environment */
+ bl zero_bss
+ /*
+ * restore prep boot params
+ */
+ mr r3,r31
+ mr r4,r30
+ mr r5,r29
+ mr r6,r28
+ mr r7,r27
+ bl save_boot_params
+ /*
+ * stack = &__rtems_end + 4096
+ */
+ addis r9,r0, __stack-PPC_MINIMUM_STACK_FRAME_SIZE@ha
+ addi r9,r9, __stack-PPC_MINIMUM_STACK_FRAME_SIZE@l
+ /*
+ * align initial stack
+ * (we hope that the bootloader stack was 16-byte aligned
+ * or we haven't used altivec yet...)
+ */
+ li r0, (CPU_STACK_ALIGNMENT-1)
+ andc r1, r9, r0
+ /*
+ * Tag TOS with a NULL (terminator for stack dump)
+ */
+ li r0, 0
+ stw r0, 0(r1)
+
+ /*
+ * We are now in a environment that is totally independent from
+ * bootloader setup.
+ */
+ /* pass result of 'save_boot_params' to 'boot_card' in R3 */
+ bl boot_card
+ bl _return_to_ppcbug
+
+ .globl MMUon
+ .type MMUon,@function
+MMUon:
+ mfmsr r0
+ ori r0,r0, MSR_IP | MSR_RI | MSR_IR | MSR_DR | MSR_EE | MSR_FE0 | MSR_FE1 | MSR_FP
+#if (PPC_HAS_FPU == 0)
+ xori r0, r0, MSR_EE | MSR_IP | MSR_FP
+#else
+ xori r0, r0, MSR_EE | MSR_IP | MSR_FE0 | MSR_FE1
+#endif
+ mflr r11
+ mtsrr0 r11
+ mtsrr1 r0
+ SYNC
+ rfi
+
+ .globl MMUoff
+ .type MMUoff,@function
+MMUoff:
+ mfmsr r0
+ ori r0,r0,MSR_IR| MSR_DR | MSR_IP
+ mflr r11
+ xori r0,r0,MSR_IR|MSR_DR
+ mtsrr0 r11
+ mtsrr1 r0
+ SYNC
+ rfi
+
+ .globl _return_to_ppcbug
+ .type _return_to_ppcbug,@function
+
+_return_to_ppcbug:
+ mflr r30
+ bl MMUoff
+ MONITOR_ENTER
+ bl MMUon
+ mtctr r30
+ bctr
+
+flush_tlbs:
+ lis r20, 0x1000
+1: addic. r20, r20, -0x1000
+ tlbie r20
+ bgt 1b
+ sync
+ blr
diff --git a/bsps/powerpc/shared/start/vectors_entry.S b/bsps/powerpc/shared/start/vectors_entry.S
new file mode 100644
index 0000000000..07b17a48af
--- /dev/null
+++ b/bsps/powerpc/shared/start/vectors_entry.S
@@ -0,0 +1,22 @@
+/*
+ * (c) 2007, Thomas Doerfler <Thomas.Doerfler@embedded-brains.de>
+ *
+ *
+ * This file contains the entry point vector needed by some bootloaders
+ * derived from "vectors.S"
+ */
+
+#include <rtems/asm.h>
+#include <rtems/score/cpu.h>
+
+ PUBLIC_VAR (__rtems_start)
+ .section .entry_point_section,"awx",@progbits
+/*
+ * Entry point information used by bootloader code
+ */
+SYM (__rtems_start):
+ .long __rtems_entry_point
+
+ /*
+ * end of special Entry point section
+ */