summaryrefslogtreecommitdiffstats
path: root/cpukit/score/cpu
diff options
context:
space:
mode:
authorJoel Sherrill <joel.sherrill@OARcorp.com>2007-05-10 18:40:49 +0000
committerJoel Sherrill <joel.sherrill@OARcorp.com>2007-05-10 18:40:49 +0000
commita32835a34a8847b5ec36ccb4f458a21c90001cb7 (patch)
treea6bc120f01cc909810b807fe312f477aa3b50a43 /cpukit/score/cpu
parentBack off patch which should not have been committed. (diff)
downloadrtems-a32835a34a8847b5ec36ccb4f458a21c90001cb7.tar.bz2
2007-05-10 Joel Sherrill <joel.sherrill@OARcorp.com>
PR 1237/rtems * cpu.c, cpu_asm.S, rtems/score/cpu.h: Add logic to prevent stack creep when interrupts occur at a sufficient rate that the interrupted thread never gets to clean its stack. This patch ensures that an interrupted thread will not nest ISR dispatches on its stack.
Diffstat (limited to 'cpukit/score/cpu')
-rw-r--r--cpukit/score/cpu/sparc/ChangeLog8
-rw-r--r--cpukit/score/cpu/sparc/cpu.c13
-rw-r--r--cpukit/score/cpu/sparc/cpu_asm.S80
-rw-r--r--cpukit/score/cpu/sparc/rtems/score/cpu.h13
4 files changed, 108 insertions, 6 deletions
diff --git a/cpukit/score/cpu/sparc/ChangeLog b/cpukit/score/cpu/sparc/ChangeLog
index 8605e9598b..5a69480be4 100644
--- a/cpukit/score/cpu/sparc/ChangeLog
+++ b/cpukit/score/cpu/sparc/ChangeLog
@@ -1,3 +1,11 @@
+2007-05-10 Joel Sherrill <joel.sherrill@OARcorp.com>
+
+ PR 1237/rtems
+ * cpu.c, cpu_asm.S, rtems/score/cpu.h: Add logic to prevent stack creep
+ when interrupts occur at a sufficient rate that the interrupted
+ thread never gets to clean its stack. This patch ensures that an
+ interrupted thread will not nest ISR dispatches on its stack.
+
2007-05-09 Ralf Corsépius <ralf.corsepius@rtems.org>
* rtems/score/cpu.h: Remove CPU_HAS_OWN_HOST_TO_NETWORK_ROUTINES.
diff --git a/cpukit/score/cpu/sparc/cpu.c b/cpukit/score/cpu/sparc/cpu.c
index ed240fc8a8..5e6bf3717d 100644
--- a/cpukit/score/cpu/sparc/cpu.c
+++ b/cpukit/score/cpu/sparc/cpu.c
@@ -71,6 +71,13 @@ void _CPU_Initialize(
*/
_CPU_Table = *cpu_table;
+
+ /*
+ * Since no tasks have been created yet and no interrupts have occurred,
+ * there is no way that the currently executing thread can have an
+ * _ISR_Dispatch stack frame on its stack.
+ */
+ _CPU_ISR_Dispatch_disable = 0;
}
/*PAGE
@@ -327,4 +334,10 @@ void _CPU_Context_Initialize(
tmp_psr |= SPARC_PSR_EF_MASK;
#endif
the_context->psr = tmp_psr;
+
+ /*
+ * Since THIS thread is being created, there is no way that THIS
+ * thread can have an _ISR_Dispatch stack frame on its stack.
+ */
+ the_context->isr_dispatch_disable = 0;
}
diff --git a/cpukit/score/cpu/sparc/cpu_asm.S b/cpukit/score/cpu/sparc/cpu_asm.S
index 8bebc08c6c..0bf33beaeb 100644
--- a/cpukit/score/cpu/sparc/cpu_asm.S
+++ b/cpukit/score/cpu/sparc/cpu_asm.S
@@ -143,11 +143,19 @@ SYM(_CPU_Context_switch):
std %g4, [%o0 + G4_OFFSET]
std %g6, [%o0 + G6_OFFSET]
+ ! load the address of the ISR stack nesting prevention flag
+ sethi %hi(SYM(_CPU_ISR_Dispatch_disable)), %g2
+ ld [%g2 + %lo(SYM(_CPU_ISR_Dispatch_disable))], %g2
+ ! save it a bit later so we do not waste a couple of cycles
+
std %l0, [%o0 + L0_OFFSET] ! save the local registers
std %l2, [%o0 + L2_OFFSET]
std %l4, [%o0 + L4_OFFSET]
std %l6, [%o0 + L6_OFFSET]
+ ! Now actually save ISR stack nesting prevention flag
+ st %g2, [%o0 + ISR_DISPATCH_DISABLE_STACK_OFFSET]
+
std %i0, [%o0 + I0_OFFSET] ! save the input registers
std %i2, [%o0 + I2_OFFSET]
std %i4, [%o0 + I4_OFFSET]
@@ -270,11 +278,19 @@ done_flushing:
ldd [%o1 + G4_OFFSET], %g4
ldd [%o1 + G6_OFFSET], %g6
+ ! Load thread specific ISR dispatch prevention flag
+ ld [%o1 + ISR_DISPATCH_DISABLE_STACK_OFFSET], %o2
+ sethi %hi(SYM(_CPU_ISR_Dispatch_disable)), %o3
+ ! Store it to memory later to use the cycles
+
ldd [%o1 + L0_OFFSET], %l0 ! restore the local registers
ldd [%o1 + L2_OFFSET], %l2
ldd [%o1 + L4_OFFSET], %l4
ldd [%o1 + L6_OFFSET], %l6
+ ! Now restore thread specific ISR dispatch prevention flag
+ st %o2,[%o3 + %lo(SYM(_CPU_ISR_Dispatch_disable))]
+
ldd [%o1 + I0_OFFSET], %i0 ! restore the output registers
ldd [%o1 + I2_OFFSET], %i2
ldd [%o1 + I4_OFFSET], %i4
@@ -633,26 +649,34 @@ dont_fix_pil2:
orcc %l6, %g0, %g0 ! Is dispatching disabled?
bnz simple_return ! Yes, then do a "simple" exit
- nop ! delay slot
+ ! NOTE: Use the delay slot
+ sethi %hi(SYM(_CPU_ISR_Dispatch_disable)), %l6
+
+ ! Are we dispatching from a previous ISR in the interrupted thread?
+ ld [%l6 + %lo(SYM(_CPU_ISR_Dispatch_disable))], %l7
+ orcc %l7, %g0, %g0 ! Is this thread already doing an ISR?
+ bnz simple_return ! Yes, then do a "simple" exit
+ ! NOTE: Use the delay slot
+ sethi %hi(SYM(_Context_Switch_necessary)), %l4
+
/*
* If a context switch is necessary, then do fudge stack to
* return to the interrupt dispatcher.
*/
- sethi %hi(SYM(_Context_Switch_necessary)), %l4
ld [%l4 + %lo(SYM(_Context_Switch_necessary))], %l5
orcc %l5, %g0, %g0 ! Is thread switch necessary?
bnz SYM(_ISR_Dispatch) ! yes, then invoke the dispatcher
- nop ! delay slot
+ ! NOTE: Use the delay slot
+ sethi %hi(SYM(_ISR_Signals_to_thread_executing)), %l6
/*
* Finally, check to see if signals were sent to the currently
* executing task. If so, we need to invoke the interrupt dispatcher.
*/
- sethi %hi(SYM(_ISR_Signals_to_thread_executing)), %l6
ld [%l6 + %lo(SYM(_ISR_Signals_to_thread_executing))], %l7
orcc %l7, %g0, %g0 ! Were signals sent to the currently
@@ -669,6 +693,10 @@ dont_fix_pil2:
PUBLIC(_ISR_Dispatch)
SYM(_ISR_Dispatch):
+ ! Set ISR dispatch nesting prevention flag
+ mov 1,%l6
+ sethi %hi(SYM(_CPU_ISR_Dispatch_disable)), %l5
+ st %l6,[%l5 + %lo(SYM(_CPU_ISR_Dispatch_disable))]
/*
* The following subtract should get us back on the interrupted
@@ -684,7 +712,7 @@ SYM(_ISR_Dispatch):
nop
nop
nop
-
+isr_dispatch:
call SYM(_Thread_Dispatch), 0
nop
@@ -700,6 +728,48 @@ SYM(_ISR_Dispatch):
ta 0 ! syscall (disable interrupts)
/*
+ * While we had ISR dispatching disabled in this thread,
+ * did we miss anything. If so, then we need to do another
+ * _Thread_Dispatch before leaving this ISR Dispatch context.
+ */
+
+ sethi %hi(SYM(_Context_Switch_necessary)), %l4
+ ld [%l4 + %lo(SYM(_Context_Switch_necessary))], %l5
+
+ ! NOTE: Use some of delay slot to start loading this
+ sethi %hi(SYM(_ISR_Signals_to_thread_executing)), %l6
+ ld [%l6 + %lo(SYM(_ISR_Signals_to_thread_executing))], %l7
+
+ orcc %l5, %g0, %g0 ! Is thread switch necessary?
+ bnz dispatchAgain ! yes, then invoke the dispatcher AGAIN
+ ! NOTE: Use the delay slot to catch the orcc below
+
+ /*
+ * Finally, check to see if signals were sent to the currently
+ * executing task. If so, we need to invoke the interrupt dispatcher.
+ */
+
+ ! NOTE: Delay slots above were used to perform the load AND
+ ! this orcc falls into the delay slot for bnz above
+ orcc %l7, %g0, %g0 ! Were signals sent to the currently
+ ! executing thread?
+ bz allow_nest_again ! No, then clear out and return
+ ! NOTE: use the delay slot from the bz to load 3 into %g1
+
+ ! Yes, then invoke the dispatcher
+dispatchAgain:
+ mov 3,%g1 ! syscall (enable interrupts)
+ ta 0 ! syscall (enable interrupts)
+ ba isr_dispatch
+ nop
+
+allow_nest_again:
+
+ ! Zero out ISR stack nesting prevention flag
+ sethi %hi(SYM(_CPU_ISR_Dispatch_disable)), %l5
+ st %g0,[%l5 + %lo(SYM(_CPU_ISR_Dispatch_disable))]
+
+ /*
* The CWP in place at this point may be different from
* that which was in effect at the beginning of the ISR if we
* have been context switched between the beginning of this invocation
diff --git a/cpukit/score/cpu/sparc/rtems/score/cpu.h b/cpukit/score/cpu/sparc/rtems/score/cpu.h
index 71a8ee59c4..dfe9978c7f 100644
--- a/cpukit/score/cpu/sparc/rtems/score/cpu.h
+++ b/cpukit/score/cpu/sparc/rtems/score/cpu.h
@@ -333,6 +333,7 @@ typedef struct {
uint32_t o7;
uint32_t psr;
+ uint32_t isr_dispatch_disable;
} Context_Control;
#endif /* ASM */
@@ -378,8 +379,9 @@ typedef struct {
#define O7_OFFSET 0x7C
#define PSR_OFFSET 0x80
+#define ISR_DISPATCH_DISABLE_STACK_OFFSET 0x84
-#define CONTEXT_CONTROL_SIZE 0x84
+#define CONTEXT_CONTROL_SIZE 0x88
/*
* The floating point context area.
@@ -553,6 +555,15 @@ SCORE_EXTERN void *_CPU_Interrupt_stack_low;
SCORE_EXTERN void *_CPU_Interrupt_stack_high;
/*
+ * This flag is context switched with each thread. It indicates
+ * that THIS thread has an _ISR_Dispatch stack frame on its stack.
+ * By using this flag, we can avoid nesting more interrupt dispatching
+ * attempts on a previously interrupted thread's stack.
+ */
+
+SCORE_EXTERN volatile uint32_t _CPU_ISR_Dispatch_disable;
+
+/*
* The following type defines an entry in the SPARC's trap table.
*
* NOTE: The instructions chosen are RTEMS dependent although one is