1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
|
/* SPDX-License-Identifier: BSD-2-Clause */
/**
* @file
*
* @ingroup RTEMSScoreCPUAArch64
*
* @brief Implementation of _CPU_Context_validate
*
* This file implements _CPU_Context_validate for use in spcontext01.
*/
/*
* Copyright (C) 2020 On-Line Applications Research Corporation (OAR)
* Written by Kinsey Moore <kinsey.moore@oarcorp.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <rtems/asm.h>
#include <rtems/score/cpu.h>
#include <rtems/score/basedefs.h>
/* These must be 8 byte aligned to avoid misaligned accesses */
#define FRAME_OFFSET_X4 0x00
#define FRAME_OFFSET_X5 0x08
#define FRAME_OFFSET_X6 0x10
#define FRAME_OFFSET_X7 0x18
#define FRAME_OFFSET_X8 0x20
#define FRAME_OFFSET_X9 0x28
#define FRAME_OFFSET_X10 0x30
#define FRAME_OFFSET_X11 0x38
#define FRAME_OFFSET_LR 0x40
#ifdef AARCH64_MULTILIB_VFP
/* These must be 16 byte aligned to avoid misaligned accesses */
#define FRAME_OFFSET_V8 0x50
#define FRAME_OFFSET_V9 0x60
#define FRAME_OFFSET_V10 0x70
#define FRAME_OFFSET_V11 0x80
#define FRAME_OFFSET_V12 0x90
#define FRAME_OFFSET_V13 0xA0
#define FRAME_OFFSET_V14 0xB0
#define FRAME_OFFSET_V15 0xC0
/*
* Force 16 byte alignment of the frame size to avoid stack pointer alignment
* exceptions.
*/
#define FRAME_SIZE RTEMS_ALIGN_UP( FRAME_OFFSET_V15, 16 )
#else
#define FRAME_SIZE RTEMS_ALIGN_UP( FRAME_OFFSET_LR, 16 )
#endif
.section .text
FUNCTION_ENTRY(_CPU_Context_validate)
/* Save */
sub sp, sp, #FRAME_SIZE
str x4, [sp, #FRAME_OFFSET_X4]
str x5, [sp, #FRAME_OFFSET_X5]
str x6, [sp, #FRAME_OFFSET_X6]
str x7, [sp, #FRAME_OFFSET_X7]
str x8, [sp, #FRAME_OFFSET_X8]
str x9, [sp, #FRAME_OFFSET_X9]
str x10, [sp, #FRAME_OFFSET_X10]
str x11, [sp, #FRAME_OFFSET_X11]
str lr, [sp, #FRAME_OFFSET_LR]
#ifdef AARCH64_MULTILIB_VFP
str d8, [sp, #FRAME_OFFSET_V8]
str d9, [sp, #FRAME_OFFSET_V9]
str d10, [sp, #FRAME_OFFSET_V10]
str d11, [sp, #FRAME_OFFSET_V11]
str d12, [sp, #FRAME_OFFSET_V12]
str d13, [sp, #FRAME_OFFSET_V13]
str d14, [sp, #FRAME_OFFSET_V14]
str d15, [sp, #FRAME_OFFSET_V15]
#endif
/* Fill */
/* R1 is used for temporary values */
mov x1, x0
/* R2 contains the stack pointer */
mov x2, sp
.macro fill_register reg
add x1, x1, #1
mov \reg, x1
.endm
#ifdef AARCH64_MULTILIB_VFP
/* X3 contains the FPSCR */
mrs x3, FPSR
ldr x4, =0xf000001f
bic x3, x3, x4
and x4, x4, x0
orr x3, x3, x4
msr FPSR, x3
#else
fill_register x3
#endif
fill_register x4
fill_register x5
fill_register x6
fill_register x7
fill_register x8
fill_register x9
fill_register x10
fill_register x11
fill_register x12
fill_register lr
#ifdef AARCH64_MULTILIB_VFP
.macro fill_vfp_register regnum
add x1, x1, #1
fmov d\regnum\(), x1
fmov v\regnum\().D[1], x1
.endm
fill_vfp_register 0
fill_vfp_register 1
fill_vfp_register 2
fill_vfp_register 3
fill_vfp_register 4
fill_vfp_register 5
fill_vfp_register 6
fill_vfp_register 7
fill_vfp_register 8
fill_vfp_register 9
fill_vfp_register 10
fill_vfp_register 11
fill_vfp_register 12
fill_vfp_register 13
fill_vfp_register 14
fill_vfp_register 15
fill_vfp_register 16
fill_vfp_register 17
fill_vfp_register 18
fill_vfp_register 19
fill_vfp_register 20
fill_vfp_register 21
fill_vfp_register 22
fill_vfp_register 23
fill_vfp_register 24
fill_vfp_register 25
fill_vfp_register 26
fill_vfp_register 27
fill_vfp_register 28
fill_vfp_register 29
fill_vfp_register 30
fill_vfp_register 31
#endif /* AARCH64_MULTILIB_VFP */
/* Check */
check:
.macro check_register reg
add x1, x1, #1
cmp \reg, x1
bne restore
.endm
/* A compare involving the stack pointer is deprecated */
mov x1, sp
cmp x2, x1
bne restore
mov x1, x0
#ifndef AARCH64_MULTILIB_VFP
check_register x3
#endif
check_register x4
check_register x5
check_register x6
check_register x7
check_register x8
check_register x9
check_register x10
check_register x11
check_register x12
check_register lr
#ifdef AARCH64_MULTILIB_VFP
b check_vfp
#endif
b check
/* Restore */
restore:
ldr x4, [sp, #FRAME_OFFSET_X4]
ldr x5, [sp, #FRAME_OFFSET_X5]
ldr x6, [sp, #FRAME_OFFSET_X6]
ldr x7, [sp, #FRAME_OFFSET_X7]
ldr x8, [sp, #FRAME_OFFSET_X8]
ldr x9, [sp, #FRAME_OFFSET_X9]
ldr x10, [sp, #FRAME_OFFSET_X10]
ldr x11, [sp, #FRAME_OFFSET_X11]
ldr lr, [sp, #FRAME_OFFSET_LR]
#ifdef AARCH64_MULTILIB_VFP
ldr d8, [sp, #FRAME_OFFSET_V8]
ldr d9, [sp, #FRAME_OFFSET_V9]
ldr d10, [sp, #FRAME_OFFSET_V10]
ldr d11, [sp, #FRAME_OFFSET_V11]
ldr d12, [sp, #FRAME_OFFSET_V12]
ldr d13, [sp, #FRAME_OFFSET_V13]
ldr d14, [sp, #FRAME_OFFSET_V14]
ldr d15, [sp, #FRAME_OFFSET_V15]
#endif
add sp, sp, #FRAME_SIZE
ret
FUNCTION_END(_CPU_Context_validate)
#ifdef AARCH64_MULTILIB_VFP
check_vfp:
.macro check_vfp_register regnum
add x1, x1, #1
fmov x4, d\regnum
fmov x5, v\regnum\().D[1]
cmp x5, x4
bne 1f
cmp x1, x4
bne 1f
b 2f
1:
b restore
2:
.endm
mrs x4, FPSR
cmp x4, x3
bne restore
check_vfp_register 0
check_vfp_register 1
check_vfp_register 2
check_vfp_register 3
check_vfp_register 4
check_vfp_register 5
check_vfp_register 6
check_vfp_register 7
check_vfp_register 8
check_vfp_register 9
check_vfp_register 10
check_vfp_register 11
check_vfp_register 12
check_vfp_register 13
check_vfp_register 14
check_vfp_register 15
check_vfp_register 16
check_vfp_register 17
check_vfp_register 18
check_vfp_register 19
check_vfp_register 20
check_vfp_register 21
check_vfp_register 22
check_vfp_register 23
check_vfp_register 24
check_vfp_register 25
check_vfp_register 26
check_vfp_register 27
check_vfp_register 28
check_vfp_register 29
check_vfp_register 30
check_vfp_register 31
/* Restore x4 and x5 */
mov x1, x0
fill_register x4
fill_register x5
b check
#endif /* AARCH64_MULTILIB_VFP */
|