summaryrefslogtreecommitdiffstats
path: root/cpukit/score/cpu/aarch64/aarch64-context-validate.S
blob: 1daa0d6bf2f20a6d855a05dbce117b3eda466f50 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
/* SPDX-License-Identifier: BSD-2-Clause */

/**
 * @file
 *
 * @ingroup RTEMSScoreCPUAArch64
 *
 * @brief Implementation of _CPU_Context_validate
 *
 * This file implements _CPU_Context_validate for use in spcontext01.
 */

/*
 * Copyright (C) 2020 On-Line Applications Research Corporation (OAR)
 * Written by Kinsey Moore <kinsey.moore@oarcorp.com>
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 * POSSIBILITY OF SUCH DAMAGE.
 */

#ifdef HAVE_CONFIG_H
#include "config.h"
#endif

#include <rtems/asm.h>
#include <rtems/score/cpu.h>
#include <rtems/score/basedefs.h>

/*
 * This register size applies to X (integer) registers as well as the D (lower
 * half floating point) registers. It does not apply to V (full size floating
 * point) registers or W (lower half integer) registers.
 */
#define AARCH64_REGISTER_SIZE 8

/* According to the AAPCS64, X19-X28 are callee-saved registers */
#define FRAME_OFFSET_X19  0x00
#define FRAME_OFFSET_X20  0x08
#define FRAME_OFFSET_X21  0x10
#define FRAME_OFFSET_X22  0x18
#define FRAME_OFFSET_X23  0x20
#define FRAME_OFFSET_X24  0x28
#define FRAME_OFFSET_X25  0x30
#define FRAME_OFFSET_X26  0x38
#define FRAME_OFFSET_X27  0x40
#define FRAME_OFFSET_X28  0x48
#define FRAME_OFFSET_LR   0x50

#ifdef AARCH64_MULTILIB_VFP
  /*
   * According to the AAPCS64, V8-V15 are callee-saved registers, but only the
   * bottom 8 bytes are required to be saved which correspond to D8-D15.
   */
  #define FRAME_OFFSET_D8  0x58
  #define FRAME_OFFSET_D9  0x60
  #define FRAME_OFFSET_D10 0x68
  #define FRAME_OFFSET_D11 0x70
  #define FRAME_OFFSET_D12 0x78
  #define FRAME_OFFSET_D13 0x80
  #define FRAME_OFFSET_D14 0x88
  #define FRAME_OFFSET_D15 0x90

  /*
   * Force 16 byte alignment of the frame size to avoid stack pointer alignment
   * exceptions.
   */
  #define FRAME_SIZE RTEMS_ALIGN_UP( FRAME_OFFSET_D15 + AARCH64_REGISTER_SIZE, 16 )
#else
  #define FRAME_SIZE RTEMS_ALIGN_UP( FRAME_OFFSET_LR + AARCH64_REGISTER_SIZE, 16 )
#endif

	.section	.text

FUNCTION_ENTRY(_CPU_Context_validate)

	/* Save */

	sub	sp, sp, #FRAME_SIZE

	str	x19, [sp, #FRAME_OFFSET_X19]
	str	x20, [sp, #FRAME_OFFSET_X20]
	str	x21, [sp, #FRAME_OFFSET_X21]
	str	x22, [sp, #FRAME_OFFSET_X22]
	str	x23, [sp, #FRAME_OFFSET_X23]
	str	x24, [sp, #FRAME_OFFSET_X24]
	str	x25, [sp, #FRAME_OFFSET_X25]
	str	x26, [sp, #FRAME_OFFSET_X26]
	str	x27, [sp, #FRAME_OFFSET_X27]
	str	x28, [sp, #FRAME_OFFSET_X28]
	str	lr, [sp, #FRAME_OFFSET_LR]

#ifdef AARCH64_MULTILIB_VFP
	str	d8, [sp, #FRAME_OFFSET_D8]
	str	d9, [sp, #FRAME_OFFSET_D9]
	str	d10, [sp, #FRAME_OFFSET_D10]
	str	d11, [sp, #FRAME_OFFSET_D11]
	str	d12, [sp, #FRAME_OFFSET_D12]
	str	d13, [sp, #FRAME_OFFSET_D13]
	str	d14, [sp, #FRAME_OFFSET_D14]
	str	d15, [sp, #FRAME_OFFSET_D15]
#endif

	/* Fill */

	/* R1 is used for temporary values */
	mov	x1, x0

	/* R2 contains the stack pointer */
	mov	x2, sp

.macro fill_register reg
	add	x1, x1, #1
	mov	\reg, x1
.endm


#ifdef AARCH64_MULTILIB_VFP
	/* X3 contains the FPSR */
	mrs	x3, FPSR
	ldr	x4, =0xf000001f
	bic	x3, x3, x4
	and	x4, x4, x0
	orr	x3, x3, x4
	msr	FPSR, x3
#else
	fill_register	x3
#endif

	fill_register	x4
	fill_register	x5
	fill_register	x6
	fill_register	x7
	fill_register	x8
	fill_register	x9
	fill_register	x10
	fill_register	x11
	fill_register	x12
	fill_register	x13
	fill_register	x14
	fill_register	x15
	fill_register	x16
	fill_register	x17
	fill_register	x18
	fill_register	x19
	fill_register	x20
	fill_register	x21
	fill_register	x22
	fill_register	x23
	fill_register	x24
	fill_register	x25
	fill_register	x26
	fill_register	x27
	fill_register	x28
	fill_register	x29
	fill_register	lr

#ifdef AARCH64_MULTILIB_VFP
.macro fill_vfp_register regnum
	add	x1, x1, #1
	fmov	d\regnum\(), x1
	fmov	v\regnum\().D[1], x1
.endm

	fill_vfp_register	0
	fill_vfp_register	1
	fill_vfp_register	2
	fill_vfp_register	3
	fill_vfp_register	4
	fill_vfp_register	5
	fill_vfp_register	6
	fill_vfp_register	7
	fill_vfp_register	8
	fill_vfp_register	9
	fill_vfp_register	10
	fill_vfp_register	11
	fill_vfp_register	12
	fill_vfp_register	13
	fill_vfp_register	14
	fill_vfp_register	15
	fill_vfp_register	16
	fill_vfp_register	17
	fill_vfp_register	18
	fill_vfp_register	19
	fill_vfp_register	20
	fill_vfp_register	21
	fill_vfp_register	22
	fill_vfp_register	23
	fill_vfp_register	24
	fill_vfp_register	25
	fill_vfp_register	26
	fill_vfp_register	27
	fill_vfp_register	28
	fill_vfp_register	29
	fill_vfp_register	30
	fill_vfp_register	31
#endif /* AARCH64_MULTILIB_VFP */

	/* Check */
check:

.macro check_register reg
	add	x1, x1, #1
	cmp	\reg, x1
	bne	restore
.endm

	mov	x1, sp
	cmp	x2, x1
	bne	restore

	mov	x1, x0

#ifndef AARCH64_MULTILIB_VFP
	check_register	x3
#endif

	check_register	x4
	check_register	x5
	check_register	x6
	check_register	x7
	check_register	x8
	check_register	x9
	check_register	x10
	check_register	x11
	check_register	x12
	check_register	x13
	check_register	x14
	check_register	x15
	check_register	x16
	check_register	x17
	check_register	x18
	check_register	x19
	check_register	x20
	check_register	x21
	check_register	x22
	check_register	x23
	check_register	x24
	check_register	x25
	check_register	x26
	check_register	x27
	check_register	x28
	check_register	x29
	check_register	lr

#ifdef AARCH64_MULTILIB_VFP
	b	check_vfp
#endif

	b	check

	/* Restore */
restore:

	ldr	x19, [sp, #FRAME_OFFSET_X19]
	ldr	x20, [sp, #FRAME_OFFSET_X20]
	ldr	x21, [sp, #FRAME_OFFSET_X21]
	ldr	x22, [sp, #FRAME_OFFSET_X22]
	ldr	x23, [sp, #FRAME_OFFSET_X23]
	ldr	x24, [sp, #FRAME_OFFSET_X24]
	ldr	x25, [sp, #FRAME_OFFSET_X25]
	ldr	x26, [sp, #FRAME_OFFSET_X26]
	ldr	x27, [sp, #FRAME_OFFSET_X27]
	ldr	x28, [sp, #FRAME_OFFSET_X28]
	ldr	lr, [sp, #FRAME_OFFSET_LR]

#ifdef AARCH64_MULTILIB_VFP
	ldr	d8, [sp, #FRAME_OFFSET_D8]
	ldr	d9, [sp, #FRAME_OFFSET_D9]
	ldr	d10, [sp, #FRAME_OFFSET_D10]
	ldr	d11, [sp, #FRAME_OFFSET_D11]
	ldr	d12, [sp, #FRAME_OFFSET_D12]
	ldr	d13, [sp, #FRAME_OFFSET_D13]
	ldr	d14, [sp, #FRAME_OFFSET_D14]
	ldr	d15, [sp, #FRAME_OFFSET_D15]
#endif

	add	sp, sp, #FRAME_SIZE

	ret

FUNCTION_END(_CPU_Context_validate)

#ifdef AARCH64_MULTILIB_VFP
check_vfp:

.macro check_vfp_register regnum
	add	x1, x1, #1
	fmov	x4, d\regnum
	fmov	x5, v\regnum\().D[1]
	cmp	x5, x4
	bne	1f
	cmp	x1, x4
	bne	1f
	b	2f
1:
	b	restore
2:
.endm

	mrs	x4, FPSR
	cmp	x4, x3
	bne	restore

	check_vfp_register	0
	check_vfp_register	1
	check_vfp_register	2
	check_vfp_register	3
	check_vfp_register	4
	check_vfp_register	5
	check_vfp_register	6
	check_vfp_register	7
	check_vfp_register	8
	check_vfp_register	9
	check_vfp_register	10
	check_vfp_register	11
	check_vfp_register	12
	check_vfp_register	13
	check_vfp_register	14
	check_vfp_register	15
	check_vfp_register	16
	check_vfp_register	17
	check_vfp_register	18
	check_vfp_register	19
	check_vfp_register	20
	check_vfp_register	21
	check_vfp_register	22
	check_vfp_register	23
	check_vfp_register	24
	check_vfp_register	25
	check_vfp_register	26
	check_vfp_register	27
	check_vfp_register	28
	check_vfp_register	29
	check_vfp_register	30
	check_vfp_register	31

	/* Restore x4 and x5 */
	mov	x1, x0
	fill_register	x4
	fill_register	x5

	b	check
#endif /* AARCH64_MULTILIB_VFP */