summaryrefslogtreecommitdiffstats
path: root/bsps
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2018-03-13 06:18:38 +0100
committerSebastian Huber <sebastian.huber@embedded-brains.de>2018-03-19 07:00:20 +0100
commit7dbc43da43901228713d12d5deab637d2c9af820 (patch)
tree3d2baa0b31b8a1d854e418631ba9eaeafa129bee /bsps
parentbsps/powerpc: Move legacy IRQ support (diff)
downloadrtems-7dbc43da43901228713d12d5deab637d2c9af820.tar.bz2
bsps/powerpc: Move basic support to bsps
This patch is a part of the BSP source reorganization. Update #3285.
Diffstat (limited to 'bsps')
-rw-r--r--bsps/powerpc/shared/cpu.c138
-rw-r--r--bsps/powerpc/shared/cpuIdent.c243
-rw-r--r--bsps/powerpc/shared/cpu_asm.S595
-rw-r--r--bsps/powerpc/shared/ppc-print-stack.c43
-rw-r--r--bsps/powerpc/shared/shared.am4
5 files changed, 1023 insertions, 0 deletions
diff --git a/bsps/powerpc/shared/cpu.c b/bsps/powerpc/shared/cpu.c
new file mode 100644
index 0000000000..7c37f0e0d8
--- /dev/null
+++ b/bsps/powerpc/shared/cpu.c
@@ -0,0 +1,138 @@
+/*
+ * PowerPC CPU Dependent Source
+ */
+
+/*
+ * Author: Andrew Bray <andy@i-cubed.co.uk>
+ *
+ * COPYRIGHT (c) 1995 by i-cubed ltd.
+ *
+ * To anyone who acknowledges that this file is provided "AS IS"
+ * without any express or implied warranty:
+ * permission to use, copy, modify, and distribute this file
+ * for any purpose is hereby granted without fee, provided that
+ * the above copyright notice and this notice appears in all
+ * copies, and that the name of i-cubed limited not be used in
+ * advertising or publicity pertaining to distribution of the
+ * software without specific, written prior permission.
+ * i-cubed limited makes no representations about the suitability
+ * of this software for any purpose.
+ *
+ * Derived from c/src/exec/cpu/no_cpu/cpu.c:
+ *
+ * COPYRIGHT (c) 1989-1997.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * The license and distribution terms for this file may be found in
+ * the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <string.h>
+
+#include <rtems/system.h>
+#include <rtems/score/isr.h>
+#include <rtems/score/context.h>
+#include <rtems/score/thread.h>
+#include <rtems/score/interr.h>
+#include <rtems/score/cpu.h>
+#include <rtems/score/tls.h>
+#include <rtems/powerpc/powerpc.h>
+
+/* _CPU_Initialize
+ *
+ * This routine performs processor dependent initialization.
+ */
+void _CPU_Initialize(void)
+{
+#if defined(__ALTIVEC__) && !defined(PPC_MULTILIB_ALTIVEC)
+ _CPU_Initialize_altivec();
+#endif
+}
+
+/*
+ * _CPU_Context_Initialize
+ */
+void _CPU_Context_Initialize(
+ Context_Control *the_context,
+ void *stack_base,
+ size_t size,
+ uint32_t new_level,
+ void *entry_point,
+ bool is_fp,
+ void *tls_area
+)
+{
+ ppc_context *the_ppc_context;
+ uint32_t msr_value = 0;
+ uintptr_t sp;
+ uintptr_t stack_alignment;
+
+ sp = (uintptr_t) stack_base + size - PPC_MINIMUM_STACK_FRAME_SIZE;
+
+ stack_alignment = CPU_STACK_ALIGNMENT;
+ sp &= ~(stack_alignment - 1);
+
+ sp = (uintptr_t) memset((void *) sp, 0, PPC_MINIMUM_STACK_FRAME_SIZE);
+
+ the_ppc_context = ppc_get_context( the_context );
+
+#if !defined(PPC_DISABLE_MSR_ACCESS)
+ _CPU_MSR_GET( msr_value );
+
+ /*
+ * Setting the interrupt mask here is not strictly necessary
+ * since the IRQ level will be established from _Thread_Handler()
+ * again, as soon as the task starts execution.
+ * Because we have to establish a defined state anyways we
+ * can as well leave this code here.
+ * I.e., simply (and unconditionally) saying
+ *
+ * msr_value &= ~ppc_interrupt_get_disable_mask();
+ *
+ * would be an alternative.
+ */
+
+ if (!(new_level & CPU_MODES_INTERRUPT_MASK)) {
+ msr_value |= ppc_interrupt_get_disable_mask();
+ }
+ else {
+ msr_value &= ~ppc_interrupt_get_disable_mask();
+ }
+
+#ifdef PPC_MULTILIB_FPU
+ /*
+ * The FP bit of the MSR should only be enabled if this is a floating
+ * point task. Unfortunately, the vfprintf_r routine in newlib
+ * ends up pushing a floating point register regardless of whether or
+ * not a floating point number is being printed. Serious restructuring
+ * of vfprintf.c will be required to avoid this behavior. At this
+ * time (7 July 1997), this restructuring is not being done.
+ */
+ msr_value |= MSR_FP;
+#endif
+
+#ifdef PPC_MULTILIB_ALTIVEC
+ msr_value |= MSR_VE;
+#endif
+#endif /* END PPC_DISABLE_MSR_ACCESS */
+
+#ifdef PPC_MULTILIB_ALTIVEC
+ the_ppc_context->vrsave = 0;
+#endif
+
+ the_ppc_context->gpr1 = sp;
+ the_ppc_context->msr = msr_value;
+ the_ppc_context->lr = (uintptr_t) entry_point;
+ the_ppc_context->isr_dispatch_disable = 0;
+
+#if defined(__ALTIVEC__) && !defined(PPC_MULTILIB_ALTIVEC)
+ _CPU_Context_initialize_altivec( the_ppc_context );
+#endif
+
+ if ( tls_area != NULL ) {
+ void *tls_block = _TLS_TCB_before_TLS_block_initialize( tls_area );
+
+ the_ppc_context->tp = (uintptr_t) tls_block + 0x7000;
+ }
+}
diff --git a/bsps/powerpc/shared/cpuIdent.c b/bsps/powerpc/shared/cpuIdent.c
new file mode 100644
index 0000000000..ad3fec2293
--- /dev/null
+++ b/bsps/powerpc/shared/cpuIdent.c
@@ -0,0 +1,243 @@
+/*
+ * cpuIdent.c -- Cpu identification code
+ *
+ * Copyright (C) 1999 Eric Valette. valette@crf.canon.fr
+ *
+ * Added MPC8260 Andy Dachs <a.dachs@sstl.co.uk>
+ * Surrey Satellite Technology Limited
+ *
+ * The license and distribution terms for this file may be
+ * found in the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ *
+ */
+
+#include <libcpu/cpuIdent.h>
+#include <libcpu/spr.h>
+#include <rtems/bspIo.h>
+
+/*
+ * Generate inline code to read Processor Version Register
+ */
+SPR_RO(PPC_PVR)
+
+ppc_cpu_id_t current_ppc_cpu = PPC_UNKNOWN;
+ppc_cpu_revision_t current_ppc_revision = 0xff;
+ppc_feature_t current_ppc_features;
+
+const char *get_ppc_cpu_type_name(ppc_cpu_id_t cpu)
+{
+ switch (cpu) {
+ case PPC_405: return "PPC405";
+ case PPC_405GP: return "PPC405GP";
+ case PPC_405EX: return "PPC405EX";
+ case PPC_440: return "PPC440";
+ case PPC_601: return "MPC601";
+ case PPC_5XX: return "MPC5XX";
+ case PPC_603: return "MPC603";
+ case PPC_603ev: return "MPC603ev";
+ case PPC_604: return "MPC604";
+ case PPC_750: return "MPC750";
+ case PPC_750_IBM: return "IBM PPC750";
+ case PPC_7400: return "MPC7400";
+ case PPC_7455: return "MPC7455";
+ case PPC_7457: return "MPC7457";
+ case PPC_603le: return "MPC603le";
+ case PPC_604e: return "MPC604e";
+ case PPC_604r: return "MPC604r";
+ case PPC_620: return "MPC620";
+ case PPC_860: return "MPC860";
+ case PPC_8260: return "MPC8260";
+ case PPC_8245: return "MPC8245";
+ case PPC_8540: return "MPC8540";
+ case PPC_PSIM: return "PSIM";
+ case PPC_e200z0: return "e200z0";
+ case PPC_e200z1: return "e200z1";
+ case PPC_e200z4: return "e200z4";
+ case PPC_e200z6: return "e200z6";
+ case PPC_e200z7: return "e200z7";
+ case PPC_e500v2: return "e500v2";
+ case PPC_e6500: return "e6500";
+ default:
+ printk("Unknown CPU value of 0x%x. Please add it to "
+ "<libcpu/powerpc/shared/include/cpuIdent.c>\n", cpu );
+ }
+ return "UNKNOWN";
+}
+
+ppc_cpu_id_t get_ppc_cpu_type(void)
+{
+ /*
+ * cpu types listed here have the lowermost nibble as a version identifier
+ * we will tweak them to the standard version
+ */
+ const uint32_t ppc_cpu_id_version_nibble[] = {
+ PPC_e200z0,
+ PPC_e200z1,
+ PPC_e200z4,
+ PPC_e200z6,
+ PPC_e200z7
+ };
+
+ unsigned int pvr;
+ int i;
+
+ if ( PPC_UNKNOWN != current_ppc_cpu )
+ return current_ppc_cpu;
+
+ pvr = (_read_PPC_PVR() >> 16);
+ /*
+ * apply tweaks to ignore version
+ */
+ for (i = 0;
+ i < (sizeof(ppc_cpu_id_version_nibble)
+ /sizeof(ppc_cpu_id_version_nibble[0]));
+ i++) {
+ if ((pvr & 0xfff0) == (ppc_cpu_id_version_nibble[i] & 0xfff0)) {
+ pvr = ppc_cpu_id_version_nibble[i];
+ break;
+ }
+ }
+
+ current_ppc_cpu = (ppc_cpu_id_t) pvr;
+
+ switch (pvr) {
+ case PPC_405:
+ case PPC_405GP:
+ case PPC_405EX:
+ case PPC_440:
+ case PPC_601:
+ case PPC_5XX:
+ case PPC_603:
+ case PPC_603ev:
+ case PPC_603le:
+ case PPC_604:
+ case PPC_604r:
+ case PPC_750:
+ case PPC_750_IBM:
+ case PPC_7400:
+ case PPC_7455:
+ case PPC_7457:
+ case PPC_604e:
+ case PPC_620:
+ case PPC_860:
+ case PPC_8260:
+ case PPC_8245:
+ case PPC_PSIM:
+ case PPC_8540:
+ case PPC_e200z0:
+ case PPC_e200z1:
+ case PPC_e200z4:
+ case PPC_e200z6:
+ case PPC_e200z7:
+ case PPC_e300c1:
+ case PPC_e300c2:
+ case PPC_e300c3:
+ case PPC_e500v2:
+ case PPC_e6500:
+ break;
+ default:
+ printk("Unknown PVR value of 0x%x. Please add it to "
+ "<libcpu/powerpc/shared/include/cpuIdent.c>\n", pvr );
+ return PPC_UNKNOWN;
+ }
+
+ /* determine features */
+
+ /* FIXME: This is incomplete; I couldn't go through all the
+ * manuals (yet).
+ */
+ switch ( current_ppc_cpu ) {
+ case PPC_7455:
+ case PPC_7457:
+ current_ppc_features.has_8_bats = 1;
+ case PPC_7400:
+ /* NOTE: PSIM PVR doesn't tell us anything (its
+ * contents are not set based on what model
+ * the user chooses but has to be programmed via
+ * the device file with the special value 0xfffe
+ * telling us that we have a 'psim cpu').
+ *
+ * I'm not sure pagetables work if the user chooses
+ * e.g., the 603 model...
+ */
+ case PPC_PSIM:
+ current_ppc_features.has_altivec = 1;
+ case PPC_604:
+ case PPC_604e:
+ case PPC_604r:
+ case PPC_750:
+ case PPC_750_IBM:
+ current_ppc_features.has_hw_ptbl_lkup = 1;
+ case PPC_8260:
+ case PPC_8245:
+ case PPC_601:
+ case PPC_603:
+ case PPC_603e:
+ case PPC_603ev:
+ case PPC_603le:
+ current_ppc_features.is_60x = 1;
+ default:
+ break;
+ }
+
+ switch ( current_ppc_cpu ) {
+ case PPC_e6500:
+ current_ppc_features.has_altivec = 1;
+ break;
+ default:
+ break;
+ }
+
+ switch ( current_ppc_cpu ) {
+ case PPC_405:
+ case PPC_405GP:
+ case PPC_405EX:
+ current_ppc_features.is_bookE = PPC_BOOKE_405;
+ break;
+ case PPC_440:
+ current_ppc_features.is_bookE = PPC_BOOKE_STD;
+ break;
+ case PPC_8540:
+ case PPC_e200z0:
+ case PPC_e200z1:
+ case PPC_e200z4:
+ case PPC_e200z6:
+ case PPC_e200z7:
+ case PPC_e500v2:
+ case PPC_e6500:
+ current_ppc_features.is_bookE = PPC_BOOKE_E500;
+ break;
+ default:
+ break;
+ }
+
+ switch ( current_ppc_cpu ) {
+ case PPC_860:
+ current_ppc_features.has_16byte_clne = 1;
+ default:
+ break;
+ }
+
+ switch ( current_ppc_cpu ) {
+ case PPC_603e:
+ case PPC_603ev:
+ case PPC_603le:
+ case PPC_e300c1:
+ case PPC_e300c2:
+ case PPC_e300c3:
+ case PPC_8240:
+ current_ppc_features.has_shadowed_gprs = 1;
+ default:
+ break;
+ }
+
+ return current_ppc_cpu;
+}
+
+ppc_cpu_revision_t get_ppc_cpu_revision(void)
+{
+ ppc_cpu_revision_t rev = (ppc_cpu_revision_t) (_read_PPC_PVR() & 0xffff);
+ current_ppc_revision = rev;
+ return rev;
+}
diff --git a/bsps/powerpc/shared/cpu_asm.S b/bsps/powerpc/shared/cpu_asm.S
new file mode 100644
index 0000000000..5b095d9cea
--- /dev/null
+++ b/bsps/powerpc/shared/cpu_asm.S
@@ -0,0 +1,595 @@
+/* cpu_asm.s 1.1 - 95/12/04
+ *
+ * This file contains the assembly code for the PowerPC implementation
+ * of RTEMS.
+ *
+ * Author: Andrew Bray <andy@i-cubed.co.uk>
+ *
+ * COPYRIGHT (c) 1995 by i-cubed ltd.
+ *
+ * To anyone who acknowledges that this file is provided "AS IS"
+ * without any express or implied warranty:
+ * permission to use, copy, modify, and distribute this file
+ * for any purpose is hereby granted without fee, provided that
+ * the above copyright notice and this notice appears in all
+ * copies, and that the name of i-cubed limited not be used in
+ * advertising or publicity pertaining to distribution of the
+ * software without specific, written prior permission.
+ * i-cubed limited makes no representations about the suitability
+ * of this software for any purpose.
+ *
+ * Derived from c/src/exec/cpu/no_cpu/cpu_asm.c:
+ *
+ * COPYRIGHT (c) 1989-1997.
+ * On-Line Applications Research Corporation (OAR).
+ *
+ * Copyright (c) 2011, 2017 embedded brains GmbH
+ *
+ * The license and distribution terms for this file may in
+ * the file LICENSE in this distribution or at
+ * http://www.rtems.org/license/LICENSE.
+ */
+
+#include <rtems/asm.h>
+#include <rtems/powerpc/powerpc.h>
+#include <rtems/score/percpu.h>
+#include <libcpu/powerpc-utility.h>
+#include <bspopts.h>
+
+#ifdef BSP_USE_DATA_CACHE_BLOCK_TOUCH
+ #define DATA_CACHE_TOUCH(rega, regb) \
+ dcbt rega, regb
+#else
+ #define DATA_CACHE_TOUCH(rega, regb)
+#endif
+
+#if BSP_DATA_CACHE_ENABLED && PPC_DEFAULT_CACHE_LINE_SIZE == 32
+ #define DATA_CACHE_ZERO_AND_TOUCH(reg, offset) \
+ li reg, offset; dcbz reg, r3; DATA_CACHE_TOUCH(reg, r4)
+#else
+ #define DATA_CACHE_ZERO_AND_TOUCH(reg, offset)
+#endif
+
+#define PPC_CONTEXT_CACHE_LINE_0 (1 * PPC_DEFAULT_CACHE_LINE_SIZE)
+#define PPC_CONTEXT_CACHE_LINE_1 (2 * PPC_DEFAULT_CACHE_LINE_SIZE)
+#define PPC_CONTEXT_CACHE_LINE_2 (3 * PPC_DEFAULT_CACHE_LINE_SIZE)
+#define PPC_CONTEXT_CACHE_LINE_3 (4 * PPC_DEFAULT_CACHE_LINE_SIZE)
+#define PPC_CONTEXT_CACHE_LINE_4 (5 * PPC_DEFAULT_CACHE_LINE_SIZE)
+#define PPC_CONTEXT_CACHE_LINE_5 (6 * PPC_DEFAULT_CACHE_LINE_SIZE)
+
+ BEGIN_CODE
+
+#if PPC_HAS_FPU == 1
+
+/*
+ * Offsets for Context_Control_fp
+ */
+
+#if (PPC_HAS_DOUBLE==1)
+ .set FP_SIZE, 8
+#define LDF lfd
+#define STF stfd
+#else
+ .set FP_SIZE, 4
+#define LDF lfs
+#define STF stfs
+#endif
+
+ .set FP_0, 0
+ .set FP_1, (FP_0 + FP_SIZE)
+ .set FP_2, (FP_1 + FP_SIZE)
+ .set FP_3, (FP_2 + FP_SIZE)
+ .set FP_4, (FP_3 + FP_SIZE)
+ .set FP_5, (FP_4 + FP_SIZE)
+ .set FP_6, (FP_5 + FP_SIZE)
+ .set FP_7, (FP_6 + FP_SIZE)
+ .set FP_8, (FP_7 + FP_SIZE)
+ .set FP_9, (FP_8 + FP_SIZE)
+ .set FP_10, (FP_9 + FP_SIZE)
+ .set FP_11, (FP_10 + FP_SIZE)
+ .set FP_12, (FP_11 + FP_SIZE)
+ .set FP_13, (FP_12 + FP_SIZE)
+ .set FP_14, (FP_13 + FP_SIZE)
+ .set FP_15, (FP_14 + FP_SIZE)
+ .set FP_16, (FP_15 + FP_SIZE)
+ .set FP_17, (FP_16 + FP_SIZE)
+ .set FP_18, (FP_17 + FP_SIZE)
+ .set FP_19, (FP_18 + FP_SIZE)
+ .set FP_20, (FP_19 + FP_SIZE)
+ .set FP_21, (FP_20 + FP_SIZE)
+ .set FP_22, (FP_21 + FP_SIZE)
+ .set FP_23, (FP_22 + FP_SIZE)
+ .set FP_24, (FP_23 + FP_SIZE)
+ .set FP_25, (FP_24 + FP_SIZE)
+ .set FP_26, (FP_25 + FP_SIZE)
+ .set FP_27, (FP_26 + FP_SIZE)
+ .set FP_28, (FP_27 + FP_SIZE)
+ .set FP_29, (FP_28 + FP_SIZE)
+ .set FP_30, (FP_29 + FP_SIZE)
+ .set FP_31, (FP_30 + FP_SIZE)
+ .set FP_FPSCR, (FP_31 + FP_SIZE)
+
+/*
+ * _CPU_Context_save_fp_context
+ *
+ * This routine is responsible for saving the FP context
+ * at *fp_context_ptr. If the point to load the FP context
+ * from is changed then the pointer is modified by this routine.
+ *
+ * Sometimes a macro implementation of this is in cpu.h which dereferences
+ * the ** and a similarly named routine in this file is passed something
+ * like a (Context_Control_fp *). The general rule on making this decision
+ * is to avoid writing assembly language.
+ */
+
+ ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER)
+ PUBLIC_PROC (_CPU_Context_save_fp)
+PROC (_CPU_Context_save_fp):
+/* A FP context switch may occur in an ISR or exception handler when the FPU is not
+ * available. Therefore, we must explicitely enable it here!
+ */
+#if !defined(PPC_DISABLE_MSR_ACCESS)
+ mfmsr r4
+ andi. r5,r4,MSR_FP
+ bne 1f
+ ori r5,r4,MSR_FP
+ mtmsr r5
+ isync
+#endif /* END PPC_DISABLE_MSR_ACCESS */
+
+1:
+ lwz r3, 0(r3)
+ STF f0, FP_0(r3)
+ STF f1, FP_1(r3)
+ STF f2, FP_2(r3)
+ STF f3, FP_3(r3)
+ STF f4, FP_4(r3)
+ STF f5, FP_5(r3)
+ STF f6, FP_6(r3)
+ STF f7, FP_7(r3)
+ STF f8, FP_8(r3)
+ STF f9, FP_9(r3)
+ STF f10, FP_10(r3)
+ STF f11, FP_11(r3)
+ STF f12, FP_12(r3)
+ STF f13, FP_13(r3)
+ STF f14, FP_14(r3)
+ STF f15, FP_15(r3)
+ STF f16, FP_16(r3)
+ STF f17, FP_17(r3)
+ STF f18, FP_18(r3)
+ STF f19, FP_19(r3)
+ STF f20, FP_20(r3)
+ STF f21, FP_21(r3)
+ STF f22, FP_22(r3)
+ STF f23, FP_23(r3)
+ STF f24, FP_24(r3)
+ STF f25, FP_25(r3)
+ STF f26, FP_26(r3)
+ STF f27, FP_27(r3)
+ STF f28, FP_28(r3)
+ STF f29, FP_29(r3)
+ STF f30, FP_30(r3)
+ STF f31, FP_31(r3)
+ mffs f2
+ STF f2, FP_FPSCR(r3)
+#if !defined(PPC_DISABLE_MSR_ACCESS)
+ bne 1f
+ mtmsr r4
+ isync
+#endif /* END PPC_DISABLE_MSR_ACCESS */
+
+1:
+ blr
+
+/*
+ * _CPU_Context_restore_fp_context
+ *
+ * This routine is responsible for restoring the FP context
+ * at *fp_context_ptr. If the point to load the FP context
+ * from is changed then the pointer is modified by this routine.
+ *
+ * Sometimes a macro implementation of this is in cpu.h which dereferences
+ * the ** and a similarly named routine in this file is passed something
+ * like a (Context_Control_fp *). The general rule on making this decision
+ * is to avoid writing assembly language.
+ */
+
+ ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER)
+ PUBLIC_PROC (_CPU_Context_restore_fp)
+PROC (_CPU_Context_restore_fp):
+ lwz r3, 0(r3)
+/* A FP context switch may occur in an ISR or exception handler when the FPU is not
+ * available. Therefore, we must explicitely enable it here!
+ */
+#if !defined(PPC_DISABLE_MSR_ACCESS)
+ mfmsr r4
+ andi. r5,r4,MSR_FP
+ bne 1f
+ ori r5,r4,MSR_FP
+ mtmsr r5
+ isync
+#endif /* END PPC_DISABLE_MSR_ACCESS */
+
+1:
+ LDF f2, FP_FPSCR(r3)
+ mtfsf 255, f2
+ LDF f0, FP_0(r3)
+ LDF f1, FP_1(r3)
+ LDF f2, FP_2(r3)
+ LDF f3, FP_3(r3)
+ LDF f4, FP_4(r3)
+ LDF f5, FP_5(r3)
+ LDF f6, FP_6(r3)
+ LDF f7, FP_7(r3)
+ LDF f8, FP_8(r3)
+ LDF f9, FP_9(r3)
+ LDF f10, FP_10(r3)
+ LDF f11, FP_11(r3)
+ LDF f12, FP_12(r3)
+ LDF f13, FP_13(r3)
+ LDF f14, FP_14(r3)
+ LDF f15, FP_15(r3)
+ LDF f16, FP_16(r3)
+ LDF f17, FP_17(r3)
+ LDF f18, FP_18(r3)
+ LDF f19, FP_19(r3)
+ LDF f20, FP_20(r3)
+ LDF f21, FP_21(r3)
+ LDF f22, FP_22(r3)
+ LDF f23, FP_23(r3)
+ LDF f24, FP_24(r3)
+ LDF f25, FP_25(r3)
+ LDF f26, FP_26(r3)
+ LDF f27, FP_27(r3)
+ LDF f28, FP_28(r3)
+ LDF f29, FP_29(r3)
+ LDF f30, FP_30(r3)
+ LDF f31, FP_31(r3)
+ bne 1f
+#if !defined(PPC_DISABLE_MSR_ACCESS)
+ mtmsr r4
+ isync
+#endif /* END PPC_DISABLE_MSR_ACCESS */
+
+1:
+ blr
+#endif /* PPC_HAS_FPU == 1 */
+
+ ALIGN (PPC_CACHE_ALIGNMENT, PPC_CACHE_ALIGN_POWER)
+ PUBLIC_PROC (_CPU_Context_switch)
+PROC (_CPU_Context_switch):
+
+#ifdef BSP_USE_SYNC_IN_CONTEXT_SWITCH
+ sync
+ isync
+#endif
+
+ /* Align to a cache line */
+ clrrwi r3, r3, PPC_DEFAULT_CACHE_LINE_POWER
+ clrrwi r5, r4, PPC_DEFAULT_CACHE_LINE_POWER
+
+ DATA_CACHE_ZERO_AND_TOUCH(r10, PPC_CONTEXT_CACHE_LINE_0)
+
+#if PPC_CONTEXT_CACHE_LINE_2 <= PPC_CONTEXT_VOLATILE_SIZE
+ DATA_CACHE_ZERO_AND_TOUCH(r11, PPC_CONTEXT_CACHE_LINE_1)
+#endif
+
+ /* Save context to r3 */
+
+ GET_SELF_CPU_CONTROL r12
+#if !defined(PPC_DISABLE_MSR_ACCESS)
+ mfmsr r6
+#endif /* END PPC_DISABLE_MSR_ACCESS */
+ mfcr r7
+ mflr r8
+ lwz r11, PER_CPU_ISR_DISPATCH_DISABLE(r12)
+
+ /*
+ * We have to clear the reservation of the executing thread. See also
+ * Book E section 6.1.6.2 "Atomic Update Primitives". Recent GCC
+ * versions use atomic operations in the C++ library for example. On
+ * SMP configurations the reservation is cleared later during the
+ * context switch.
+ */
+#if PPC_CONTEXT_OFFSET_GPR1 != PPC_CONTEXT_CACHE_LINE_0 \
+ || !BSP_DATA_CACHE_ENABLED \
+ || PPC_DEFAULT_CACHE_LINE_SIZE != 32
+ li r10, PPC_CONTEXT_OFFSET_GPR1
+#endif
+#ifndef RTEMS_SMP
+ stwcx. r1, r3, r10
+#endif
+
+ stw r6, PPC_CONTEXT_OFFSET_MSR(r3)
+ stw r7, PPC_CONTEXT_OFFSET_CR(r3)
+ PPC_REG_STORE r1, PPC_CONTEXT_OFFSET_GPR1(r3)
+ PPC_REG_STORE r8, PPC_CONTEXT_OFFSET_LR(r3)
+
+ PPC_GPR_STORE r14, PPC_CONTEXT_OFFSET_GPR14(r3)
+ PPC_GPR_STORE r15, PPC_CONTEXT_OFFSET_GPR15(r3)
+
+#if PPC_CONTEXT_OFFSET_GPR20 == PPC_CONTEXT_CACHE_LINE_2
+ DATA_CACHE_ZERO_AND_TOUCH(r10, PPC_CONTEXT_CACHE_LINE_2)
+#endif
+
+ PPC_GPR_STORE r16, PPC_CONTEXT_OFFSET_GPR16(r3)
+ PPC_GPR_STORE r17, PPC_CONTEXT_OFFSET_GPR17(r3)
+
+#if PPC_CONTEXT_OFFSET_GPR26 == PPC_CONTEXT_CACHE_LINE_2
+ DATA_CACHE_ZERO_AND_TOUCH(r10, PPC_CONTEXT_CACHE_LINE_2)
+#endif
+
+ PPC_GPR_STORE r18, PPC_CONTEXT_OFFSET_GPR18(r3)
+ PPC_GPR_STORE r19, PPC_CONTEXT_OFFSET_GPR19(r3)
+
+#if PPC_CONTEXT_OFFSET_GPR24 == PPC_CONTEXT_CACHE_LINE_3
+ DATA_CACHE_ZERO_AND_TOUCH(r10, PPC_CONTEXT_CACHE_LINE_3)
+#endif
+
+ PPC_GPR_STORE r20, PPC_CONTEXT_OFFSET_GPR20(r3)
+ PPC_GPR_STORE r21, PPC_CONTEXT_OFFSET_GPR21(r3)
+ PPC_GPR_STORE r22, PPC_CONTEXT_OFFSET_GPR22(r3)
+ PPC_GPR_STORE r23, PPC_CONTEXT_OFFSET_GPR23(r3)
+
+#if PPC_CONTEXT_OFFSET_GPR28 == PPC_CONTEXT_CACHE_LINE_4
+ DATA_CACHE_ZERO_AND_TOUCH(r10, PPC_CONTEXT_CACHE_LINE_4)
+#endif
+
+ PPC_GPR_STORE r24, PPC_CONTEXT_OFFSET_GPR24(r3)
+ PPC_GPR_STORE r25, PPC_CONTEXT_OFFSET_GPR25(r3)
+
+#if PPC_CONTEXT_OFFSET_V22 == PPC_CONTEXT_CACHE_LINE_2
+ DATA_CACHE_ZERO_AND_TOUCH(r10, PPC_CONTEXT_CACHE_LINE_2)
+#endif
+
+ PPC_GPR_STORE r26, PPC_CONTEXT_OFFSET_GPR26(r3)
+ PPC_GPR_STORE r27, PPC_CONTEXT_OFFSET_GPR27(r3)
+
+ PPC_GPR_STORE r28, PPC_CONTEXT_OFFSET_GPR28(r3)
+ PPC_GPR_STORE r29, PPC_CONTEXT_OFFSET_GPR29(r3)
+ PPC_GPR_STORE r30, PPC_CONTEXT_OFFSET_GPR30(r3)
+ PPC_GPR_STORE r31, PPC_CONTEXT_OFFSET_GPR31(r3)
+
+ stw r11, PPC_CONTEXT_OFFSET_ISR_DISPATCH_DISABLE(r3)
+
+#ifdef PPC_MULTILIB_ALTIVEC
+ li r9, PPC_CONTEXT_OFFSET_V20
+ stvx v20, r3, r9
+ li r9, PPC_CONTEXT_OFFSET_V21
+ stvx v21, r3, r9
+
+#if PPC_CONTEXT_OFFSET_V26 == PPC_CONTEXT_CACHE_LINE_3
+ DATA_CACHE_ZERO_AND_TOUCH(r10, PPC_CONTEXT_CACHE_LINE_3)
+#endif
+
+ li r9, PPC_CONTEXT_OFFSET_V22
+ stvx v22, r3, r9
+ li r9, PPC_CONTEXT_OFFSET_V23
+ stvx v23, r3, r9
+ li r9, PPC_CONTEXT_OFFSET_V24
+ stvx v24, r3, r9
+ li r9, PPC_CONTEXT_OFFSET_V25
+ stvx v25, r3, r9
+
+#if PPC_CONTEXT_OFFSET_V30 == PPC_CONTEXT_CACHE_LINE_4
+ DATA_CACHE_ZERO_AND_TOUCH(r10, PPC_CONTEXT_CACHE_LINE_4)
+#endif
+
+ li r9, PPC_CONTEXT_OFFSET_V26
+ stvx v26, r3, r9
+ li r9, PPC_CONTEXT_OFFSET_V27
+ stvx v27, r3, r9
+ li r9, PPC_CONTEXT_OFFSET_V28
+ stvx v28, r3, r9
+ li r9, PPC_CONTEXT_OFFSET_V29
+ stvx v29, r3, r9
+
+#if PPC_CONTEXT_OFFSET_F17 == PPC_CONTEXT_CACHE_LINE_5
+ DATA_CACHE_ZERO_AND_TOUCH(r10, PPC_CONTEXT_CACHE_LINE_5)
+#endif
+
+ li r9, PPC_CONTEXT_OFFSET_V30
+ stvx v30, r3, r9
+ li r9, PPC_CONTEXT_OFFSET_V31
+ stvx v31, r3, r9
+ mfvrsave r9
+ stw r9, PPC_CONTEXT_OFFSET_VRSAVE(r3)
+#endif
+
+#ifdef PPC_MULTILIB_FPU
+ stfd f14, PPC_CONTEXT_OFFSET_F14(r3)
+ stfd f15, PPC_CONTEXT_OFFSET_F15(r3)
+ stfd f16, PPC_CONTEXT_OFFSET_F16(r3)
+ stfd f17, PPC_CONTEXT_OFFSET_F17(r3)
+ stfd f18, PPC_CONTEXT_OFFSET_F18(r3)
+ stfd f19, PPC_CONTEXT_OFFSET_F19(r3)
+ stfd f20, PPC_CONTEXT_OFFSET_F20(r3)
+ stfd f21, PPC_CONTEXT_OFFSET_F21(r3)
+ stfd f22, PPC_CONTEXT_OFFSET_F22(r3)
+ stfd f23, PPC_CONTEXT_OFFSET_F23(r3)
+ stfd f24, PPC_CONTEXT_OFFSET_F24(r3)
+ stfd f25, PPC_CONTEXT_OFFSET_F25(r3)
+ stfd f26, PPC_CONTEXT_OFFSET_F26(r3)
+ stfd f27, PPC_CONTEXT_OFFSET_F27(r3)
+ stfd f28, PPC_CONTEXT_OFFSET_F28(r3)
+ stfd f29, PPC_CONTEXT_OFFSET_F29(r3)
+ stfd f30, PPC_CONTEXT_OFFSET_F30(r3)
+ stfd f31, PPC_CONTEXT_OFFSET_F31(r3)
+#endif
+
+#ifdef RTEMS_SMP
+ /*
+ * The executing thread no longer executes on this processor. Switch
+ * the stack to the temporary interrupt stack of this processor. Mark
+ * the context of the executing thread as not executing.
+ */
+ msync
+
+ addi r1, r12, PER_CPU_INTERRUPT_FRAME_AREA + CPU_INTERRUPT_FRAME_SIZE
+ li r6, 0
+ stw r6, PPC_CONTEXT_OFFSET_IS_EXECUTING(r3)
+
+.Lcheck_is_executing:
+
+ /* Check the is executing indicator of the heir context */
+ addi r6, r5, PPC_CONTEXT_OFFSET_IS_EXECUTING
+ lwarx r7, r0, r6
+ cmpwi r7, 0
+ bne .Lget_potential_new_heir
+
+ /* Try to update the is executing indicator of the heir context */
+ li r7, 1
+ stwcx. r7, r0, r6
+ bne .Lget_potential_new_heir
+ isync
+#endif
+
+ /* Restore context from r5 */
+restore_context:
+
+#if defined(__ALTIVEC__) && !defined(PPC_MULTILIB_ALTIVEC)
+ mr r4, r5
+ .extern _CPU_Context_switch_altivec
+ bl _CPU_Context_switch_altivec
+#endif
+
+ lwz r6, PPC_CONTEXT_OFFSET_MSR(r5)
+ lwz r7, PPC_CONTEXT_OFFSET_CR(r5)
+ PPC_REG_LOAD r1, PPC_CONTEXT_OFFSET_GPR1(r5)
+ PPC_REG_LOAD r8, PPC_CONTEXT_OFFSET_LR(r5)
+
+ PPC_GPR_LOAD r14, PPC_CONTEXT_OFFSET_GPR14(r5)
+ PPC_GPR_LOAD r15, PPC_CONTEXT_OFFSET_GPR15(r5)
+
+ DATA_CACHE_TOUCH(r0, r1)
+
+ PPC_GPR_LOAD r16, PPC_CONTEXT_OFFSET_GPR16(r5)
+ PPC_GPR_LOAD r17, PPC_CONTEXT_OFFSET_GPR17(r5)
+ PPC_GPR_LOAD r18, PPC_CONTEXT_OFFSET_GPR18(r5)
+ PPC_GPR_LOAD r19, PPC_CONTEXT_OFFSET_GPR19(r5)
+
+ PPC_GPR_LOAD r20, PPC_CONTEXT_OFFSET_GPR20(r5)
+ PPC_GPR_LOAD r21, PPC_CONTEXT_OFFSET_GPR21(r5)
+ PPC_GPR_LOAD r22, PPC_CONTEXT_OFFSET_GPR22(r5)
+ PPC_GPR_LOAD r23, PPC_CONTEXT_OFFSET_GPR23(r5)
+
+ PPC_GPR_LOAD r24, PPC_CONTEXT_OFFSET_GPR24(r5)
+ PPC_GPR_LOAD r25, PPC_CONTEXT_OFFSET_GPR25(r5)
+ PPC_GPR_LOAD r26, PPC_CONTEXT_OFFSET_GPR26(r5)
+ PPC_GPR_LOAD r27, PPC_CONTEXT_OFFSET_GPR27(r5)
+
+ PPC_GPR_LOAD r28, PPC_CONTEXT_OFFSET_GPR28(r5)
+ PPC_GPR_LOAD r29, PPC_CONTEXT_OFFSET_GPR29(r5)
+ PPC_GPR_LOAD r30, PPC_CONTEXT_OFFSET_GPR30(r5)
+ PPC_GPR_LOAD r31, PPC_CONTEXT_OFFSET_GPR31(r5)
+
+#ifdef __powerpc64__
+ ld r13, PPC_CONTEXT_OFFSET_TP(r5)
+#else
+ lwz r2, PPC_CONTEXT_OFFSET_TP(r5)
+#endif
+ lwz r11, PPC_CONTEXT_OFFSET_ISR_DISPATCH_DISABLE(r5)
+
+#ifdef PPC_MULTILIB_ALTIVEC
+ li r9, PPC_CONTEXT_OFFSET_V20
+ lvx v20, r5, r9
+ li r9, PPC_CONTEXT_OFFSET_V21
+ lvx v21, r5, r9
+ li r9, PPC_CONTEXT_OFFSET_V22
+ lvx v22, r5, r9
+ li r9, PPC_CONTEXT_OFFSET_V23
+ lvx v23, r5, r9
+ li r9, PPC_CONTEXT_OFFSET_V24
+ lvx v24, r5, r9
+ li r9, PPC_CONTEXT_OFFSET_V25
+ lvx v25, r5, r9
+ li r9, PPC_CONTEXT_OFFSET_V26
+ lvx v26, r5, r9
+ li r9, PPC_CONTEXT_OFFSET_V27
+ lvx v27, r5, r9
+ li r9, PPC_CONTEXT_OFFSET_V28
+ lvx v28, r5, r9
+ li r9, PPC_CONTEXT_OFFSET_V29
+ lvx v29, r5, r9
+ li r9, PPC_CONTEXT_OFFSET_V30
+ lvx v30, r5, r9
+ li r9, PPC_CONTEXT_OFFSET_V31
+ lvx v31, r5, r9
+ lwz r9, PPC_CONTEXT_OFFSET_VRSAVE(r5)
+ mtvrsave r9
+#endif
+
+#ifdef PPC_MULTILIB_FPU
+ lfd f14, PPC_CONTEXT_OFFSET_F14(r5)
+ lfd f15, PPC_CONTEXT_OFFSET_F15(r5)
+ lfd f16, PPC_CONTEXT_OFFSET_F16(r5)
+ lfd f17, PPC_CONTEXT_OFFSET_F17(r5)
+ lfd f18, PPC_CONTEXT_OFFSET_F18(r5)
+ lfd f19, PPC_CONTEXT_OFFSET_F19(r5)
+ lfd f20, PPC_CONTEXT_OFFSET_F20(r5)
+ lfd f21, PPC_CONTEXT_OFFSET_F21(r5)
+ lfd f22, PPC_CONTEXT_OFFSET_F22(r5)
+ lfd f23, PPC_CONTEXT_OFFSET_F23(r5)
+ lfd f24, PPC_CONTEXT_OFFSET_F24(r5)
+ lfd f25, PPC_CONTEXT_OFFSET_F25(r5)
+ lfd f26, PPC_CONTEXT_OFFSET_F26(r5)
+ lfd f27, PPC_CONTEXT_OFFSET_F27(r5)
+ lfd f28, PPC_CONTEXT_OFFSET_F28(r5)
+ lfd f29, PPC_CONTEXT_OFFSET_F29(r5)
+ lfd f30, PPC_CONTEXT_OFFSET_F30(r5)
+ lfd f31, PPC_CONTEXT_OFFSET_F31(r5)
+#endif
+
+ mtlr r8
+ mtcr r7
+#if !defined(PPC_DISABLE_MSR_ACCESS)
+ mtmsr r6
+#endif /* END PPC_DISABLE_MSR_ACCESS */
+ stw r11, PER_CPU_ISR_DISPATCH_DISABLE(r12)
+
+#ifdef BSP_USE_SYNC_IN_CONTEXT_SWITCH
+ isync
+#endif
+
+ blr
+
+ PUBLIC_PROC (_CPU_Context_restore)
+PROC (_CPU_Context_restore):
+ /* Align to a cache line */
+ clrrwi r5, r3, PPC_DEFAULT_CACHE_LINE_POWER
+
+ GET_SELF_CPU_CONTROL r12
+
+#if defined(__ALTIVEC__) && !defined(PPC_MULTILIB_ALTIVEC)
+ li r3, 0
+#endif
+
+ b restore_context
+
+#ifdef RTEMS_SMP
+.Lget_potential_new_heir:
+
+ /* We may have a new heir */
+
+ /* Read the executing and heir */
+ PPC_REG_LOAD r7, PER_CPU_OFFSET_EXECUTING(r12)
+ PPC_REG_LOAD r8, PER_CPU_OFFSET_HEIR(r12)
+
+ /*
+ * Update the executing only if necessary to avoid cache line
+ * monopolization.
+ */
+ PPC_REG_CMP r7, r8
+ beq .Lcheck_is_executing
+
+ /* Calculate the heir context pointer */
+ sub r7, r4, r7
+ add r4, r8, r7
+ clrrwi r5, r4, PPC_DEFAULT_CACHE_LINE_POWER
+
+ /* Update the executing */
+ PPC_REG_STORE r8, PER_CPU_OFFSET_EXECUTING(r12)
+
+ b .Lcheck_is_executing
+#endif
diff --git a/bsps/powerpc/shared/ppc-print-stack.c b/bsps/powerpc/shared/ppc-print-stack.c
new file mode 100644
index 0000000000..1e13b3d9ed
--- /dev/null
+++ b/bsps/powerpc/shared/ppc-print-stack.c
@@ -0,0 +1,43 @@
+#include <inttypes.h>
+#include <rtems/bspIo.h>
+#include <libcpu/stackTrace.h>
+#include <libcpu/spr.h>
+
+SPR_RO(PPC_LR)
+
+typedef struct FrameRec_ {
+ struct FrameRec_ *up;
+ void *lr;
+} FrameRec, *Frame;
+
+#define CPU_STACK_TRACE_DEPTH 40
+
+void CPU_stack_take_snapshot(void **stack, int size, void *pc, void *lr, void *r1)
+{
+register Frame p = (Frame)lr;
+register int i=0;
+ if (pc) stack[i++]=pc;
+ if (!p)
+ p = (Frame)_read_PPC_LR();
+ stack[i++]=p;
+ p = r1;
+ if (!p) /* no macro for reading user regs */
+ __asm__ __volatile__("mr %0, %%r1":"=r"(p));
+ for (; i<size-1 && p->up; p=p->up, i++) {
+ stack[i]=p->up->lr;
+ }
+ stack[i]=0;
+}
+
+void CPU_print_stack(void)
+{
+ void *stck[CPU_STACK_TRACE_DEPTH];
+ int i;
+ CPU_stack_take_snapshot(stck,CPU_STACK_TRACE_DEPTH,0,0,0);
+ for (i=0; stck[i]; i++) {
+ if (i%5) printk("--> ");
+ else printk("\n");
+ printk("0x%08" PRIuPTR, (uintptr_t)stck[i]);
+ }
+ printk("\n");
+}
diff --git a/bsps/powerpc/shared/shared.am b/bsps/powerpc/shared/shared.am
new file mode 100644
index 0000000000..7b779361f4
--- /dev/null
+++ b/bsps/powerpc/shared/shared.am
@@ -0,0 +1,4 @@
+libbsp_a_SOURCES += ../../../../../../bsps/powerpc/shared/cpu_asm.S
+libbsp_a_SOURCES += ../../../../../../bsps/powerpc/shared/cpu.c
+libbsp_a_SOURCES += ../../../../../../bsps/powerpc/shared/cpuIdent.c
+libbsp_a_SOURCES += ../../../../../../bsps/powerpc/shared/ppc-print-stack.c