summaryrefslogtreecommitdiffstats
path: root/cpukit/score/cpu/arm/arm_exc_interrupt.S
blob: a16dc8858527a412dfd86e10d0472014ff943f7e (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
/**
 * @file
 *
 * @ingroup RTEMSScoreCPUARM
 *
 * @brief ARM interrupt exception prologue and epilogue.
 */

/*
 * Copyright (c) 2009, 2022 embedded brains GmbH.  All rights reserved.
 *
 *  embedded brains GmbH
 *  Dornierstr. 4
 *  82178 Puchheim
 *  Germany
 *  <rtems@embedded-brains.de>
 *
 * The license and distribution terms for this file may be
 * found in the file LICENSE in this distribution or at
 * http://www.rtems.org/license/LICENSE.
 */

/*
 * The upper EXCHANGE_SIZE bytes of the INT stack area are used for data
 * exchange between INT and SVC mode.  Below of this is the actual INT stack.
 * The exchange area is only accessed if INT is disabled.
 */

#ifdef HAVE_CONFIG_H
#include "config.h"
#endif

#include <rtems/asm.h>

#ifdef ARM_MULTILIB_ARCH_V4

#define STACK_POINTER_ADJUST r7
#define NON_VOLATILE_SCRATCH r9

#define EXCHANGE_LR r4
#define EXCHANGE_SPSR r5
#define EXCHANGE_CPSR r6
#define EXCHANGE_INT_SP r8

#define EXCHANGE_LIST {EXCHANGE_LR, EXCHANGE_SPSR, EXCHANGE_CPSR, EXCHANGE_INT_SP}
#define EXCHANGE_SIZE 16

#define CONTEXT_LIST {r0, r1, r2, r3, EXCHANGE_LR, EXCHANGE_SPSR, NON_VOLATILE_SCRATCH, r12}
#define CONTEXT_SIZE 32

.arm
.globl _ARMV4_Exception_interrupt
_ARMV4_Exception_interrupt:

	/* Save exchange registers to exchange area */
	stmdb	sp, EXCHANGE_LIST

	/* Set exchange registers */
	mov	EXCHANGE_LR, lr
	mrs	EXCHANGE_SPSR, SPSR
	mrs	EXCHANGE_CPSR, CPSR
	sub	EXCHANGE_INT_SP, sp, #EXCHANGE_SIZE

	/* Switch to SVC mode */
	orr	EXCHANGE_CPSR, EXCHANGE_CPSR, #0x1
	msr	CPSR_c, EXCHANGE_CPSR

	/*
	 * Save context.  We save the link register separately because it has
	 * to be restored in SVC mode.  The other registers can be restored in
	 * INT mode.  Ensure that the size of the saved registers is an
	 * integral multiple of 8 bytes.  Provide a non-volatile scratch
	 * register which may be used accross function calls.
	 */
	push	CONTEXT_LIST
	push	{STACK_POINTER_ADJUST, lr}

	/*
	 * On a public interface, the stack pointer must be aligned on an
	 * 8-byte boundary.  However, it may temporarily be only aligned on a
	 * 4-byte boundary.  Make sure the stack pointer is aligned on an
	 * 8-byte boundary.
	 */
	and	STACK_POINTER_ADJUST, sp, #0x4
	sub	sp, sp, STACK_POINTER_ADJUST

	/* Get per-CPU control of current processor */
	GET_SELF_CPU_CONTROL	r0

#ifdef ARM_MULTILIB_VFP
	/* Save VFP context */
	vmrs	r2, FPSCR
	vpush	{d0-d7}
#ifdef ARM_MULTILIB_VFP_D32
	vpush	{d16-d31}
#endif
	push	{r2, r3}
#endif /* ARM_MULTILIB_VFP */

	/* Remember INT stack pointer */
	mov	r1, EXCHANGE_INT_SP

	/* Restore exchange registers from exchange area */
	ldmia	r1, EXCHANGE_LIST

	/* Get interrupt nest level */
	ldr	r2, [r0, #PER_CPU_ISR_NEST_LEVEL]

	/* Switch stack if necessary and save original stack pointer */
	mov	NON_VOLATILE_SCRATCH, sp
	cmp	r2, #0
	moveq	sp, r1

	/* Increment interrupt nest and thread dispatch disable level */
	ldr	r3, [r0, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
	add	r2, r2, #1
	add	r3, r3, #1
	str	r2, [r0, #PER_CPU_ISR_NEST_LEVEL]
	str	r3, [r0, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]

	/* Call BSP dependent interrupt dispatcher */
#ifdef RTEMS_PROFILING
	cmp	r2, #1
	bne	.Lskip_profiling
	BLX_TO_THUMB_1	_CPU_Counter_read
	push	{r0, r1}
	GET_SELF_CPU_CONTROL	r0
	BLX_TO_THUMB_1	bsp_interrupt_dispatch
	BLX_TO_THUMB_1	_CPU_Counter_read
	pop	{r1, r3}
	mov	r2, r0
	GET_SELF_CPU_CONTROL	r0
	BLX_TO_THUMB_1	_Profiling_Outer_most_interrupt_entry_and_exit
.Lprofiling_done:
#else
	BLX_TO_THUMB_1	bsp_interrupt_dispatch
#endif

	/* Get per-CPU control of current processor */
	GET_SELF_CPU_CONTROL	r0

	/* Load some per-CPU variables */
	ldr	r12, [r0, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
	ldrb	r1, [r0, #PER_CPU_DISPATCH_NEEDED]
	ldr	r2, [r0, #PER_CPU_ISR_DISPATCH_DISABLE]
	ldr	r3, [r0, #PER_CPU_ISR_NEST_LEVEL]

	/* Restore stack pointer */
	mov	sp, NON_VOLATILE_SCRATCH

	/* Decrement levels and determine thread dispatch state */
	eor	r1, r1, r12
	sub	r12, r12, #1
	orr	r1, r1, r12
	orr	r1, r1, r2
	sub	r3, r3, #1

	/* Store thread dispatch disable and ISR nest levels */
	str	r12, [r0, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
	str	r3, [r0, #PER_CPU_ISR_NEST_LEVEL]

	/*
	 * Check thread dispatch necessary, ISR dispatch disable and thread
	 * dispatch disable level.
	 */
	cmp	r1, #0
	bne	.Lthread_dispatch_done

	/* Save CPSR in non-volatile register */
	mrs	NON_VOLATILE_SCRATCH, CPSR

	/* Thread dispatch */

.Ldo_thread_dispatch:

	/* Set ISR dispatch disable and thread dispatch disable level to one */
	mov	r12, #1
	str	r12, [r0, #PER_CPU_ISR_DISPATCH_DISABLE]
	str	r12, [r0, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]

	/* Call _Thread_Do_dispatch(), this function will enable interrupts */
	bic	r1, NON_VOLATILE_SCRATCH, #0x80
	BLX_TO_THUMB_1	_Thread_Do_dispatch

	/* Disable interrupts */
	msr	CPSR, NON_VOLATILE_SCRATCH

	/*
	 * Get per-CPU control of current processor.  In SMP configurations, we
	 * may run on another processor after the _Thread_Do_dispatch() call.
	 */
	GET_SELF_CPU_CONTROL	r0

	/* Check if we have to do the thread dispatch again */
	ldrb	r12, [r0, #PER_CPU_DISPATCH_NEEDED]
	cmp	r12, #0
	bne	.Ldo_thread_dispatch

	/* We are done with thread dispatching */
	mov	r12, #0
	str	r12, [r0, #PER_CPU_ISR_DISPATCH_DISABLE]

.Lthread_dispatch_done:

#ifdef ARM_MULTILIB_VFP
	/* Restore VFP context */
	pop	{r2, r3}
#ifdef ARM_MULTILIB_VFP_D32
	vpop	{d16-d31}
#endif
	vpop	{d0-d7}
	vmsr	FPSCR, r2
#endif /* ARM_MULTILIB_VFP */

	/* Undo stack pointer adjustment */
	add	sp, sp, STACK_POINTER_ADJUST

	/* Restore STACK_POINTER_ADJUST register and link register */
	pop	{STACK_POINTER_ADJUST, lr}

	/*
	 * XXX: Remember and restore stack pointer.  The data on the stack is
	 * still in use.  So the stack is now in an inconsistent state.  The
	 * FIQ handler implementation must not use this area.
	 */
	mov	r12, sp
	add	sp, #CONTEXT_SIZE

	/* Get INT mode program status register */
	mrs	r1, CPSR
	bic	r1, r1, #0x1

	/* Switch to INT mode */
	msr	CPSR_c, r1

	/* Save EXCHANGE_LR and EXCHANGE_SPSR registers to exchange area */
	push	{EXCHANGE_LR, EXCHANGE_SPSR}

	/* Restore context */
	ldmia	r12, CONTEXT_LIST

	/* Set return address and program status */
	mov	lr, EXCHANGE_LR
	msr	SPSR_fsxc, EXCHANGE_SPSR

	/* Restore EXCHANGE_LR and EXCHANGE_SPSR registers from exchange area */
	pop	{EXCHANGE_LR, EXCHANGE_SPSR}

#ifdef ARM_MULTILIB_HAS_LOAD_STORE_EXCLUSIVE
	/*
	 * We must clear reservations here, since otherwise compare-and-swap
	 * atomic operations with interrupts enabled may yield wrong results.
	 * A compare-and-swap atomic operation is generated by the compiler
	 * like this:
	 *
	 *   .L1:
	 *     ldrex r1, [r0]
	 *     cmp   r1, r3
	 *     bne   .L2
	 *     strex r3, r2, [r0]
	 *     cmp   r3, #0
	 *     bne   .L1
	 *   .L2:
	 *
	 * Consider the following scenario.  A thread is interrupted right
	 * before the strex.  The interrupt updates the value using a
	 * compare-and-swap sequence.  Everything is fine up to this point.
	 * The interrupt performs now a compare-and-swap sequence which fails
	 * with a branch to .L2.  The current processor has now a reservation.
	 * The interrupt returns without further strex.  The thread updates the
	 * value using the unrelated reservation of the interrupt.
	 */
	clrex
#endif

	/* Return from interrupt */
	subs	pc, lr, #4

#ifdef RTEMS_PROFILING
.Lskip_profiling:
	BLX_TO_THUMB_1	bsp_interrupt_dispatch
	b	.Lprofiling_done
#endif

#endif /* ARM_MULTILIB_ARCH_V4 */