summaryrefslogtreecommitdiffstats
path: root/cpukit/score/cpu/aarch64/aarch64-exception-interrupt.S
blob: cb0954a29be5c26acb84161e0691464e1275e4be (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
/* SPDX-License-Identifier: BSD-2-Clause */

/**
 * @file
 *
 * @ingroup RTEMSScoreCPUAArch64
 *
 * @brief Implementation of AArch64 interrupt exception handling
 *
 * This file implements the SP0 and SPx interrupt exception handlers to
 * deal with nested and non-nested interrupts.
 */

/*
 * Copyright (C) 2020 On-Line Applications Research Corporation (OAR)
 * Written by Kinsey Moore <kinsey.moore@oarcorp.com>
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 * POSSIBILITY OF SUCH DAMAGE.
 */

#ifdef HAVE_CONFIG_H
#include "config.h"
#endif

#include <rtems/asm.h>

.globl	_AArch64_Exception_interrupt_no_nest
.globl	_AArch64_Exception_interrupt_nest

#ifdef AARCH64_MULTILIB_ARCH_V8_ILP32
  #define SELF_CPU_CONTROL_GET_REG w19
#else
  #define SELF_CPU_CONTROL_GET_REG x19
#endif
#define SELF_CPU_CONTROL x19
#define NON_VOLATILE_SCRATCH x20

/* It's understood that CPU state is saved prior to and restored after this */
/*
 * NOTE: This function does not follow the AArch64 procedure call specification
 * because all relevant state is known to be saved in the interrupt context,
 * hence the blind usage of x19, x20, and x21
 */
.AArch64_Interrupt_Handler:
/* Get per-CPU control of current processor */
	GET_SELF_CPU_CONTROL	SELF_CPU_CONTROL_GET_REG

/* Increment interrupt nest and thread dispatch disable level */
	ldr	w2, [SELF_CPU_CONTROL, #PER_CPU_ISR_NEST_LEVEL]
	ldr	w3, [SELF_CPU_CONTROL, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
	add	w2, w2, #1
	add	w3, w3, #1
	str	w2, [SELF_CPU_CONTROL, #PER_CPU_ISR_NEST_LEVEL]
	str	w3, [SELF_CPU_CONTROL, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]

/* Save LR */
	mov	x21, LR

/* Call BSP dependent interrupt dispatcher */
	bl	bsp_interrupt_dispatch

/* Restore LR */
	mov	LR, x21

/* Load some per-CPU variables */
	ldr	w0, [SELF_CPU_CONTROL, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
	ldrb	w1, [SELF_CPU_CONTROL, #PER_CPU_DISPATCH_NEEDED]
	ldr	w2, [SELF_CPU_CONTROL, #PER_CPU_ISR_DISPATCH_DISABLE]
	ldr	w3, [SELF_CPU_CONTROL, #PER_CPU_ISR_NEST_LEVEL]

/* Decrement levels and determine thread dispatch state */
	eor	w1, w1, w0
	sub	w0, w0, #1
	orr	w1, w1, w0
	orr	w1, w1, w2
	sub	w3, w3, #1

/* Store thread dispatch disable and ISR nest levels */
	str	w0, [SELF_CPU_CONTROL, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
	str	w3, [SELF_CPU_CONTROL, #PER_CPU_ISR_NEST_LEVEL]

/* Return should_skip_thread_dispatch in x0 */
	mov	x0, x1
/* Return from handler */
	ret

/* NOTE: This function does not follow the AArch64 procedure call specification
 * because all relevant state is known to be saved in the interrupt context,
 * hence the blind usage of x19, x20, and x21 */
.AArch64_Perform_Thread_Dispatch:
/* Get per-CPU control of current processor */
	GET_SELF_CPU_CONTROL	SELF_CPU_CONTROL_GET_REG

/* Thread dispatch */
	mrs	NON_VOLATILE_SCRATCH, DAIF

.Ldo_thread_dispatch:

/* Set ISR dispatch disable and thread dispatch disable level to one */
	mov	w0, #1
	str	w0, [SELF_CPU_CONTROL, #PER_CPU_ISR_DISPATCH_DISABLE]
	str	w0, [SELF_CPU_CONTROL, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]

/* Save LR */
	mov	x21, LR

/* Call _Thread_Do_dispatch(), this function will enable interrupts */
	mov	x0, SELF_CPU_CONTROL
	mov	x1, NON_VOLATILE_SCRATCH
	mov	x2, #0x80
	bic	x1, x1, x2
	bl	_Thread_Do_dispatch

/* Restore LR */
	mov	LR, x21

/* Disable interrupts */
	msr	DAIF, NON_VOLATILE_SCRATCH

#ifdef RTEMS_SMP
	GET_SELF_CPU_CONTROL	SELF_CPU_CONTROL_GET_REG
#endif

/* Check if we have to do the thread dispatch again */
	ldrb	w0, [SELF_CPU_CONTROL, #PER_CPU_DISPATCH_NEEDED]
	cmp	w0, #0
	bne	.Ldo_thread_dispatch

/* We are done with thread dispatching */
	mov	w0, #0
	str	w0, [SELF_CPU_CONTROL, #PER_CPU_ISR_DISPATCH_DISABLE]

/* Return from thread dispatch */
	ret

/*
 * Must save corruptible registers and non-corruptible registers expected to be
 * used, x0 and lr expected to be already saved on the stack
 */
.macro	push_interrupt_context
/*
 * Push x1-x21 on to the stack, need 19-21 because they're modified without
 * obeying PCS
 */
	stp lr,		x1,	[sp, #-0x10]!
	stp x2,		x3,	[sp, #-0x10]!
	stp x4,		x5,	[sp, #-0x10]!
	stp x6,		x7,	[sp, #-0x10]!
	stp x8,		x9,	[sp, #-0x10]!
	stp x10,	x11,	[sp, #-0x10]!
	stp x12,	x13,	[sp, #-0x10]!
	stp x14,	x15,	[sp, #-0x10]!
	stp x16,	x17,	[sp, #-0x10]!
	stp x18,	x19,	[sp, #-0x10]!
	stp x20,	x21,	[sp, #-0x10]!
/*
 * Push q0-q31 on to the stack, need everything because parts of every register
 * are volatile/corruptible
 */
	stp q0,		q1,	[sp, #-0x20]!
	stp q2,		q3,	[sp, #-0x20]!
	stp q4,		q5,	[sp, #-0x20]!
	stp q6,		q7,	[sp, #-0x20]!
	stp q8,		q9,	[sp, #-0x20]!
	stp q10,	q11,	[sp, #-0x20]!
	stp q12,	q13,	[sp, #-0x20]!
	stp q14,	q15,	[sp, #-0x20]!
	stp q16,	q17,	[sp, #-0x20]!
	stp q18,	q19,	[sp, #-0x20]!
	stp q20,	q21,	[sp, #-0x20]!
	stp q22,	q23,	[sp, #-0x20]!
	stp q24,	q25,	[sp, #-0x20]!
	stp q26,	q27,	[sp, #-0x20]!
	stp q28,	q29,	[sp, #-0x20]!
	stp q30,	q31,	[sp, #-0x20]!
/* Get exception LR for PC and spsr */
	mrs x0, ELR_EL1
	mrs x1, SPSR_EL1
/* Push pc and spsr */
	stp x0,		x1,	[sp, #-0x10]!
/* Get fpcr and fpsr */
	mrs x0, FPSR
	mrs x1, FPCR
/* Push fpcr and fpsr */
	stp x0,		x1,	[sp, #-0x10]!
.endm

/* Must match inverse order of .push_interrupt_context */
.macro pop_interrupt_context
/* Pop fpcr and fpsr */
	ldp x0,		x1,	[sp], #0x10
/* Restore fpcr and fpsr */
	msr FPCR, x1
	msr FPSR, x0
/* Pop pc and spsr */
	ldp x0,		x1,	[sp], #0x10
/* Restore exception LR for PC and spsr */
	msr SPSR_EL1, x1
	msr ELR_EL1, x0
/* Pop q0-q31 */
	ldp q30,	q31,	[sp], #0x20
	ldp q28,	q29,	[sp], #0x20
	ldp q26,	q27,	[sp], #0x20
	ldp q24,	q25,	[sp], #0x20
	ldp q22,	q23,	[sp], #0x20
	ldp q20,	q21,	[sp], #0x20
	ldp q18,	q19,	[sp], #0x20
	ldp q16,	q17,	[sp], #0x20
	ldp q14,	q15,	[sp], #0x20
	ldp q12,	q13,	[sp], #0x20
	ldp q10,	q11,	[sp], #0x20
	ldp q8,		q9,	[sp], #0x20
	ldp q6,		q7,	[sp], #0x20
	ldp q4,		q5,	[sp], #0x20
	ldp q2,		q3,	[sp], #0x20
	ldp q0,		q1,	[sp], #0x20
/* Pop x1-x21 */
	ldp x20,	x21,	[sp], #0x10
	ldp x18,	x19,	[sp], #0x10
	ldp x16,	x17,	[sp], #0x10
	ldp x14,	x15,	[sp], #0x10
	ldp x12,	x13,	[sp], #0x10
	ldp x10,	x11,	[sp], #0x10
	ldp x8,		x9,	[sp], #0x10
	ldp x6,		x7,	[sp], #0x10
	ldp x4,		x5,	[sp], #0x10
	ldp x2,		x3,	[sp], #0x10
	ldp lr,		x1,	[sp], #0x10
/* Must clear reservations here to ensure consistency with atomic operations */
	clrex
.endm

_AArch64_Exception_interrupt_nest:

/* Execution template:
Save volatile regs on interrupt stack
Execute irq handler
Restore volatile regs from interrupt stack
Return to embedded exception vector code
*/

/* Push interrupt context */
	push_interrupt_context

/* Jump into the handler, ignore return value */
	bl .AArch64_Interrupt_Handler

/*
 * SP should be where it was pre-handler (pointing at the exception frame)
 * or something has leaked stack space
 */
/* Pop interrupt context */
	pop_interrupt_context
/* Return to vector for final cleanup */
	ret

_AArch64_Exception_interrupt_no_nest:
/* Execution template:
Save volatile registers on thread stack(some x, all q, ELR, etc.)
Switch to interrupt stack
Execute interrupt handler
Switch to thread stack
Call thread dispatch
Restore volatile registers from thread stack
Return to embedded exception vector code
*/


/* Push interrupt context */
	push_interrupt_context

/*
 * Switch to interrupt stack, interrupt dispatch may enable interrupts causing
 * nesting
 */
	msr	spsel, #0

/* Jump into the handler */
	bl .AArch64_Interrupt_Handler

/*
 * Switch back to thread stack, interrupt dispatch should disable interrupts
 * before returning
 */
	msr	spsel, #1

/*
 * Check thread dispatch necessary, ISR dispatch disable and thread dispatch
 * disable level.
 */
	cmp	x0, #0
	bne	.Lno_need_thread_dispatch
	bl .AArch64_Perform_Thread_Dispatch

.Lno_need_thread_dispatch:
/*
 * SP should be where it was pre-handler (pointing at the exception frame)
 * or something has leaked stack space
 */
/* Pop interrupt context */
	pop_interrupt_context
/* Return to vector for final cleanup */
	ret