summaryrefslogtreecommitdiffstats
path: root/cpukit/score/cpu/arm/arm_exc_interrupt.S
blob: 5afd12d13cbc95d0c1ed46567c6af356f6518cc9 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
/**
 * @file
 *
 * @ingroup RTEMSScoreCPU
 *
 * @brief ARM interrupt exception prologue and epilogue.
 */

/*
 * Copyright (c) 2009, 2016 embedded brains GmbH.  All rights reserved.
 *
 *  embedded brains GmbH
 *  Dornierstr. 4
 *  82178 Puchheim
 *  Germany
 *  <rtems@embedded-brains.de>
 *
 * The license and distribution terms for this file may be
 * found in the file LICENSE in this distribution or at
 * http://www.rtems.org/license/LICENSE.
 */

/*
 * The upper EXCHANGE_SIZE bytes of the INT stack area are used for data
 * exchange between INT and SVC mode.  Below of this is the actual INT stack.
 * The exchange area is only accessed if INT is disabled.
 */

#ifdef HAVE_CONFIG_H
#include "config.h"
#endif

#include <rtems/asm.h>

#ifdef ARM_MULTILIB_ARCH_V4

#define EXCHANGE_LR r4
#define EXCHANGE_SPSR r5
#define EXCHANGE_CPSR r6
#define EXCHANGE_INT_SP r8

#define EXCHANGE_LIST {EXCHANGE_LR, EXCHANGE_SPSR, EXCHANGE_CPSR, EXCHANGE_INT_SP}
#define EXCHANGE_SIZE 16

#define SELF_CPU_CONTROL r7
#define NON_VOLATILE_SCRATCH r9

#define CONTEXT_LIST {r0, r1, r2, r3, EXCHANGE_LR, EXCHANGE_SPSR, SELF_CPU_CONTROL, r12}
#define CONTEXT_SIZE 32

.arm
.globl _ARMV4_Exception_interrupt
_ARMV4_Exception_interrupt:

	/* Save exchange registers to exchange area */
	stmdb	sp, EXCHANGE_LIST

	/* Set exchange registers */
	mov	EXCHANGE_LR, lr
	mrs	EXCHANGE_SPSR, SPSR
	mrs	EXCHANGE_CPSR, CPSR
	sub	EXCHANGE_INT_SP, sp, #EXCHANGE_SIZE

	/* Switch to SVC mode */
	orr	EXCHANGE_CPSR, EXCHANGE_CPSR, #0x1
	msr	CPSR_c, EXCHANGE_CPSR

	/*
	 * Save context.  We save the link register separately because it has
	 * to be restored in SVC mode.  The other registers can be restored in
	 * INT mode.  Ensure that stack remains 8 byte aligned.  Use register
	 * necessary for the stack alignment for the stack pointer of the
	 * interrupted context.
	 */
	stmdb	sp!, CONTEXT_LIST
	stmdb	sp!, {NON_VOLATILE_SCRATCH, lr}

#ifdef ARM_MULTILIB_VFP
	/* Save VFP context */
	vmrs	r0, FPSCR
	vstmdb	sp!, {d0-d7}
#ifdef ARM_MULTILIB_VFP_D32
	vstmdb	sp!, {d16-d31}
#endif
	stmdb	sp!, {r0, r1}
#endif /* ARM_MULTILIB_VFP */

	/* Get per-CPU control of current processor */
	GET_SELF_CPU_CONTROL	SELF_CPU_CONTROL

	/* Remember INT stack pointer */
	mov	r1, EXCHANGE_INT_SP

	/* Restore exchange registers from exchange area */
	ldmia	r1, EXCHANGE_LIST

	/* Get interrupt nest level */
	ldr	r2, [SELF_CPU_CONTROL, #PER_CPU_ISR_NEST_LEVEL]

	/* Switch stack if necessary and save original stack pointer */
	mov	NON_VOLATILE_SCRATCH, sp
	cmp	r2, #0
	moveq	sp, r1

	/* Switch to Thumb-2 instructions if necessary */
	SWITCH_FROM_ARM_TO_THUMB_2	r1

	/* Increment interrupt nest and thread dispatch disable level */
	ldr	r3, [SELF_CPU_CONTROL, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
	add	r2, #1
	add	r3, #1
	str	r2, [SELF_CPU_CONTROL, #PER_CPU_ISR_NEST_LEVEL]
	str	r3, [SELF_CPU_CONTROL, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]

	/* Call BSP dependent interrupt dispatcher */
#ifdef RTEMS_PROFILING
	cmp	r2, #1
	bne	.Lskip_profiling
	BLX_TO_THUMB_1	_CPU_Counter_read
	mov	SELF_CPU_CONTROL, r0
	BLX_TO_THUMB_1	bsp_interrupt_dispatch
	BLX_TO_THUMB_1	_CPU_Counter_read
	mov	r2, r0
	mov	r1, SELF_CPU_CONTROL
	GET_SELF_CPU_CONTROL	r0
	mov	SELF_CPU_CONTROL, r0
	BLX_TO_THUMB_1	_Profiling_Outer_most_interrupt_entry_and_exit
.Lprofiling_done:
#else
	BLX_TO_THUMB_1	bsp_interrupt_dispatch
#endif

	/* Load some per-CPU variables */
	ldr	r0, [SELF_CPU_CONTROL, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
	ldrb	r1, [SELF_CPU_CONTROL, #PER_CPU_DISPATCH_NEEDED]
	ldr	r2, [SELF_CPU_CONTROL, #PER_CPU_ISR_DISPATCH_DISABLE]
	ldr	r3, [SELF_CPU_CONTROL, #PER_CPU_ISR_NEST_LEVEL]

	/* Restore stack pointer */
	mov	sp, NON_VOLATILE_SCRATCH

	/* Save CPSR in non-volatile register */
	mrs	NON_VOLATILE_SCRATCH, CPSR

	/* Decrement levels and determine thread dispatch state */
	eor	r1, r0
	sub	r0, #1
	orr	r1, r0
	orr	r1, r2
	sub	r3, #1

	/* Store thread dispatch disable and ISR nest levels */
	str	r0, [SELF_CPU_CONTROL, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
	str	r3, [SELF_CPU_CONTROL, #PER_CPU_ISR_NEST_LEVEL]

	/*
	 * Check thread dispatch necessary, ISR dispatch disable and thread
	 * dispatch disable level.
	 */
	cmp	r1, #0
	bne	.Lthread_dispatch_done

	/* Thread dispatch */
	mrs	NON_VOLATILE_SCRATCH, CPSR

.Ldo_thread_dispatch:

	/* Set ISR dispatch disable and thread dispatch disable level to one */
	mov	r0, #1
	str	r0, [SELF_CPU_CONTROL, #PER_CPU_ISR_DISPATCH_DISABLE]
	str	r0, [SELF_CPU_CONTROL, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]

	/* Call _Thread_Do_dispatch(), this function will enable interrupts */
	mov	r0, SELF_CPU_CONTROL
	mov	r1, NON_VOLATILE_SCRATCH
	mov	r2, #0x80
	bic	r1, r2
	BLX_TO_THUMB_1	_Thread_Do_dispatch

	/* Disable interrupts */
	msr	CPSR, NON_VOLATILE_SCRATCH

#ifdef RTEMS_SMP
	GET_SELF_CPU_CONTROL	SELF_CPU_CONTROL
#endif

	/* Check if we have to do the thread dispatch again */
	ldrb	r0, [SELF_CPU_CONTROL, #PER_CPU_DISPATCH_NEEDED]
	cmp	r0, #0
	bne	.Ldo_thread_dispatch

	/* We are done with thread dispatching */
	mov	r0, #0
	str	r0, [SELF_CPU_CONTROL, #PER_CPU_ISR_DISPATCH_DISABLE]

.Lthread_dispatch_done:

	/* Switch to ARM instructions if necessary */
	SWITCH_FROM_THUMB_2_TO_ARM

#ifdef ARM_MULTILIB_VFP
	/* Restore VFP context */
	ldmia	sp!, {r0, r1}
#ifdef ARM_MULTILIB_VFP_D32
	vldmia	sp!, {d16-d31}
#endif
	vldmia	sp!, {d0-d7}
	vmsr	FPSCR, r0
#endif /* ARM_MULTILIB_VFP */

	/* Restore NON_VOLATILE_SCRATCH register and link register */
	ldmia	sp!, {NON_VOLATILE_SCRATCH, lr}

	/*
	 * XXX: Remember and restore stack pointer.  The data on the stack is
	 * still in use.  So the stack is now in an inconsistent state.  The
	 * FIQ handler implementation must not use this area.
	 */
	mov	r0, sp
	add	sp, #CONTEXT_SIZE

	/* Get INT mode program status register */
	mrs	r1, CPSR
	bic	r1, r1, #0x1

	/* Switch to INT mode */
	msr	CPSR_c, r1

	/* Save EXCHANGE_LR and EXCHANGE_SPSR registers to exchange area */
	stmdb	sp!, {EXCHANGE_LR, EXCHANGE_SPSR}

	/* Restore context */
	ldmia	r0, CONTEXT_LIST

	/* Set return address and program status */
	mov	lr, EXCHANGE_LR
	msr	SPSR_fsxc, EXCHANGE_SPSR

	/* Restore EXCHANGE_LR and EXCHANGE_SPSR registers from exchange area */
	ldmia	sp!, {EXCHANGE_LR, EXCHANGE_SPSR}

#ifdef ARM_MULTILIB_HAS_LOAD_STORE_EXCLUSIVE
	/*
	 * We must clear reservations here, since otherwise compare-and-swap
	 * atomic operations with interrupts enabled may yield wrong results.
	 * A compare-and-swap atomic operation is generated by the compiler
	 * like this:
	 *
	 *   .L1:
	 *     ldrex r1, [r0]
	 *     cmp   r1, r3
	 *     bne   .L2
	 *     strex r3, r2, [r0]
	 *     cmp   r3, #0
	 *     bne   .L1
	 *   .L2:
	 *
	 * Consider the following scenario.  A thread is interrupted right
	 * before the strex.  The interrupt updates the value using a
	 * compare-and-swap sequence.  Everything is fine up to this point.
	 * The interrupt performs now a compare-and-swap sequence which fails
	 * with a branch to .L2.  The current processor has now a reservation.
	 * The interrupt returns without further strex.  The thread updates the
	 * value using the unrelated reservation of the interrupt.
	 */
	clrex
#endif

	/* Return from interrupt */
	subs	pc, lr, #4

#ifdef RTEMS_PROFILING
#ifdef __thumb2__
.thumb
#else
.arm
#endif
.Lskip_profiling:
	BLX_TO_THUMB_1	bsp_interrupt_dispatch
	b	.Lprofiling_done
#endif

#endif /* ARM_MULTILIB_ARCH_V4 */