summaryrefslogtreecommitdiffstats
path: root/c
diff options
context:
space:
mode:
authorJoel Sherrill <joel.sherrill@OARcorp.com>2005-10-06 19:36:41 +0000
committerJoel Sherrill <joel.sherrill@OARcorp.com>2005-10-06 19:36:41 +0000
commit384ba7563aaede68c38be361b9b5edd8bf4b3d0e (patch)
tree34cedbf5553935b6543aa91010157a09254f9adb /c
parent2005-10-06 Joel Sherrill <joel@OARcorp.com> (diff)
downloadrtems-384ba7563aaede68c38be361b9b5edd8bf4b3d0e.tar.bz2
2005-10-06 Till Straumann <strauman@slac.stanford.edu>
PR 833/bsps * irq/irq_asm.S: Currently, all (new exception) BSPs explicitely enable the FPU across the user ISR but DONT save/restore the FPU context. Any use of the FPU fron the user handler (e.g., due to GCC optimizations) result in corruption. The fix results in an exception in such cases (user ISR must explicitely save/enable/restore FPU).
Diffstat (limited to 'c')
-rw-r--r--c/src/lib/libbsp/powerpc/mbx8xx/ChangeLog9
-rw-r--r--c/src/lib/libbsp/powerpc/mbx8xx/irq/irq_asm.S27
-rw-r--r--c/src/lib/libbsp/powerpc/mpc8260ads/ChangeLog9
-rw-r--r--c/src/lib/libbsp/powerpc/mpc8260ads/irq/irq_asm.S28
-rw-r--r--c/src/lib/libbsp/powerpc/shared/ChangeLog9
-rw-r--r--c/src/lib/libbsp/powerpc/shared/irq/irq_asm.S28
6 files changed, 98 insertions, 12 deletions
diff --git a/c/src/lib/libbsp/powerpc/mbx8xx/ChangeLog b/c/src/lib/libbsp/powerpc/mbx8xx/ChangeLog
index 19aa7e73f6..f821ef7988 100644
--- a/c/src/lib/libbsp/powerpc/mbx8xx/ChangeLog
+++ b/c/src/lib/libbsp/powerpc/mbx8xx/ChangeLog
@@ -1,3 +1,12 @@
+2005-10-06 Till Straumann <strauman@slac.stanford.edu>
+
+ PR 833/bsps
+ * irq/irq_asm.S: Currently, all (new exception) BSPs explicitely enable
+ the FPU across the user ISR but DONT save/restore the FPU context.
+ Any use of the FPU fron the user handler (e.g., due to GCC
+ optimizations) result in corruption. The fix results in an exception
+ in such cases (user ISR must explicitely save/enable/restore FPU).
+
2005-09-12 Thomas Doerfler <Thomas.Doerfler@imd-systems.de>
PR 527/bsps
diff --git a/c/src/lib/libbsp/powerpc/mbx8xx/irq/irq_asm.S b/c/src/lib/libbsp/powerpc/mbx8xx/irq/irq_asm.S
index 0e3ad7af7b..d94e9d2493 100644
--- a/c/src/lib/libbsp/powerpc/mbx8xx/irq/irq_asm.S
+++ b/c/src/lib/libbsp/powerpc/mbx8xx/irq/irq_asm.S
@@ -12,6 +12,8 @@
* Till Straumann <strauman@slac.stanford.edu>, 2003/7:
* - store isr nesting level in _ISR_Nest_level rather than
* SPRG0 - RTEMS relies on that variable.
+ * Till Straumann <strauman@slac.stanford.edu>, 2005/4:
+ * - DONT enable FP across user ISR since fpregs are never saved!!
*
* $Id$
*/
@@ -126,11 +128,7 @@ SYM (shared_raw_irq_code_entry):
* also, on CPUs with FP, enable FP so that FP context can be
* saved and restored (using FP instructions)
*/
-#if (PPC_HAS_FPU == 0)
ori r3, r3, MSR_RI | MSR_IR | MSR_DR
-#else
- ori r3, r3, MSR_RI | MSR_IR | MSR_DR | MSR_FP
-#endif
mtmsr r3
SYNC
/*
@@ -337,6 +335,27 @@ nested:
rfi
switch:
+#if ( PPC_HAS_FPU != 0 )
+#if ! defined( CPU_USE_DEFERRED_FP_SWITCH )
+#error missing include file???
+#endif
+ mfmsr r4
+#if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE )
+ /* if the executing thread has FP enabled propagate
+ * this now so _Thread_Dispatch can save/restore the FPREGS
+ * NOTE: it is *crucial* to disable the FPU across the
+ * user ISR [independent of using the 'deferred'
+ * strategy or not]. We don't save FP regs across
+ * the user ISR and hence we prefer an exception to
+ * be raised rather than experiencing corruption.
+ */
+ lwz r3, SRR1_FRAME_OFFSET(r1)
+ rlwimi r4, r3, 0, 18, 18 /* MSR_FP */
+#else
+ ori r4, r4, MSR_FP
+#endif
+ mtmsr r4
+#endif
bl SYM (_Thread_Dispatch)
easy_exit:
diff --git a/c/src/lib/libbsp/powerpc/mpc8260ads/ChangeLog b/c/src/lib/libbsp/powerpc/mpc8260ads/ChangeLog
index 0829c772ba..acde6db4c7 100644
--- a/c/src/lib/libbsp/powerpc/mpc8260ads/ChangeLog
+++ b/c/src/lib/libbsp/powerpc/mpc8260ads/ChangeLog
@@ -1,3 +1,12 @@
+2005-10-06 Till Straumann <strauman@slac.stanford.edu>
+
+ PR 833/bsps
+ * irq/irq_asm.S: Currently, all (new exception) BSPs explicitely enable
+ the FPU across the user ISR but DONT save/restore the FPU context.
+ Any use of the FPU fron the user handler (e.g., due to GCC
+ optimizations) result in corruption. The fix results in an exception
+ in such cases (user ISR must explicitely save/enable/restore FPU).
+
2005-09-19 Ralf Corsepius <ralf.corsepius@rtems.org>
* network/if_hdlcsubr.c, network/network.c: Include <errno.h>
diff --git a/c/src/lib/libbsp/powerpc/mpc8260ads/irq/irq_asm.S b/c/src/lib/libbsp/powerpc/mpc8260ads/irq/irq_asm.S
index a8971f5b1d..a0aa73abc0 100644
--- a/c/src/lib/libbsp/powerpc/mpc8260ads/irq/irq_asm.S
+++ b/c/src/lib/libbsp/powerpc/mpc8260ads/irq/irq_asm.S
@@ -12,6 +12,9 @@
* Modifications to store nesting level in global _ISR_Nest_level
* variable instead of SPRG0. Andy Dachs <a.dachs@sstl.co.uk>
*
+ * Till Straumann <strauman@slac.stanford.edu>, 2005/4:
+ * - DONT enable FP across user ISR since fpregs are never saved!!
+ *
* $Id$
*/
@@ -92,11 +95,7 @@ SYM (shared_raw_irq_code_entry):
* also, on CPUs with FP, enable FP so that FP context can be
* saved and restored (using FP instructions)
*/
-#if (PPC_HAS_FPU == 0)
ori r3, r3, MSR_RI /*| MSR_IR | MSR_DR*/
-#else
- ori r3, r3, MSR_RI | /*MSR_IR | MSR_DR |*/ MSR_FP
-#endif
mtmsr r3
SYNC
@@ -297,6 +296,27 @@ nested:
rfi
switch:
+#if ( PPC_HAS_FPU != 0 )
+#if ! defined( CPU_USE_DEFERRED_FP_SWITCH )
+#error missing include file???
+#endif
+ mfmsr r4
+#if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE )
+ /* if the executing thread has FP enabled propagate
+ * this now so _Thread_Dispatch can save/restore the FPREGS
+ * NOTE: it is *crucial* to disable the FPU across the
+ * user ISR [independent of using the 'deferred'
+ * strategy or not]. We don't save FP regs across
+ * the user ISR and hence we prefer an exception to
+ * be raised rather than experiencing corruption.
+ */
+ lwz r3, SRR1_FRAME_OFFSET(r1)
+ rlwimi r4, r3, 0, 18, 18 /* MSR_FP */
+#else
+ ori r4, r4, MSR_FP
+#endif
+ mtmsr r4
+#endif
bl SYM (_Thread_Dispatch)
easy_exit:
diff --git a/c/src/lib/libbsp/powerpc/shared/ChangeLog b/c/src/lib/libbsp/powerpc/shared/ChangeLog
index 52fab35824..21bc8df9a6 100644
--- a/c/src/lib/libbsp/powerpc/shared/ChangeLog
+++ b/c/src/lib/libbsp/powerpc/shared/ChangeLog
@@ -1,3 +1,12 @@
+2005-10-06 Till Straumann <strauman@slac.stanford.edu>
+
+ PR 833/bsps
+ * irq/irq_asm.S: Currently, all (new exception) BSPs explicitely enable
+ the FPU across the user ISR but DONT save/restore the FPU context.
+ Any use of the FPU fron the user handler (e.g., due to GCC
+ optimizations) result in corruption. The fix results in an exception
+ in such cases (user ISR must explicitely save/enable/restore FPU).
+
2005-09-16 richard.campbell@OARCorp.com
* startup/bspstart.c: Rewrote get_eumbbar() in C. Function was written
diff --git a/c/src/lib/libbsp/powerpc/shared/irq/irq_asm.S b/c/src/lib/libbsp/powerpc/shared/irq/irq_asm.S
index dbdfc964c8..a8e5d82103 100644
--- a/c/src/lib/libbsp/powerpc/shared/irq/irq_asm.S
+++ b/c/src/lib/libbsp/powerpc/shared/irq/irq_asm.S
@@ -12,6 +12,9 @@
* Till Straumann <strauman@slac.stanford.edu>, 2003/7:
* - store isr nesting level in _ISR_Nest_level rather than
* SPRG0 - RTEMS relies on that variable.
+ * Till Straumann <strauman@slac.stanford.edu>, 2005/4:
+ * - DONT enable FP across ISR since fpregs are not saved!!
+ * FPU is used by Thread_Dispatch however...
*
* $Id$
*/
@@ -94,11 +97,7 @@ SYM (shared_raw_irq_code_entry):
* also, on CPUs with FP, enable FP so that FP context can be
* saved and restored (using FP instructions)
*/
-#if (PPC_HAS_FPU == 0)
ori r3, r3, MSR_RI | MSR_IR | MSR_DR
-#else
- ori r3, r3, MSR_RI | MSR_IR | MSR_DR | MSR_FP
-#endif
mtmsr r3
SYNC
/*
@@ -301,6 +300,27 @@ nested:
rfi
switch:
+#if ( PPC_HAS_FPU != 0 )
+#if ! defined( CPU_USE_DEFERRED_FP_SWITCH )
+#error missing include file???
+#endif
+ mfmsr r4
+#if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE )
+ /* if the executing thread has FP enabled propagate
+ * this now so _Thread_Dispatch can save/restore the FPREGS
+ * NOTE: it is *crucial* to disable the FPU across the
+ * user ISR [independent of using the 'deferred'
+ * strategy or not]. We don't save FP regs across
+ * the user ISR and hence we prefer an exception to
+ * be raised rather than experiencing corruption.
+ */
+ lwz r3, SRR1_FRAME_OFFSET(r1)
+ rlwimi r4, r3, 0, 18, 18 /* MSR_FP */
+#else
+ ori r4, r4, MSR_FP
+#endif
+ mtmsr r4
+#endif
bl SYM (_Thread_Dispatch)
easy_exit: