summaryrefslogtreecommitdiffstats
path: root/cpukit/score/cpu/x86_64/include/rtems/score/cpu.h
blob: 80dbc252146d0fc60c6ed1c9d12c01e03b6c17d8 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
/**
 * @file
 *
 * @brief x86_64 Dependent Source
 *
 * This include file contains information pertaining to the x86_64 processor.
 */

/*
 * Copyright (c) 2018.
 * Amaan Cheval <amaan.cheval@gmail.com>
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 *
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
 * SUCH DAMAGE.
 */

#ifndef _RTEMS_SCORE_CPU_H
#define _RTEMS_SCORE_CPU_H

#ifdef __cplusplus
extern "C" {
#endif

#include <rtems/score/basedefs.h>
#include <rtems/score/cpu_asm.h>
#include <rtems/score/x86_64.h>

#define CPU_SIMPLE_VECTORED_INTERRUPTS FALSE
#define CPU_ISR_PASSES_FRAME_POINTER FALSE
#define CPU_HARDWARE_FP FALSE
#define CPU_SOFTWARE_FP FALSE
#define CPU_ALL_TASKS_ARE_FP FALSE
#define CPU_IDLE_TASK_IS_FP FALSE
#define CPU_USE_DEFERRED_FP_SWITCH FALSE
#define CPU_ENABLE_ROBUST_THREAD_DISPATCH FALSE
#define CPU_STACK_GROWS_UP               FALSE

#define CPU_STRUCTURE_ALIGNMENT RTEMS_ALIGNED(64)
#define CPU_CACHE_LINE_BYTES 64
#define CPU_MODES_INTERRUPT_MASK   0x00000001
#define CPU_MAXIMUM_PROCESSORS 32

#define CPU_EFLAGS_INTERRUPTS_ON  0x00003202
#define CPU_EFLAGS_INTERRUPTS_OFF 0x00003002

#ifndef ASM

typedef struct {
  uint64_t rflags;

  /**
   * Callee-saved registers as listed in the SysV ABI document:
   * https://github.com/hjl-tools/x86-psABI/wiki/X86-psABI
   */
  uint64_t rbx;
  void    *rsp;
  void    *rbp;
  uint64_t r12;
  uint64_t r13;
  uint64_t r14;
  uint64_t r15;

  // XXX: FS segment descriptor for TLS

#ifdef RTEMS_SMP
    volatile bool is_executing;
#endif
} Context_Control;

#define _CPU_Context_Get_SP( _context ) \
  (_context)->rsp

/*
 * Caller-saved registers for interrupt frames
 */
typedef struct {
  /**
   * @note: rdi is a caller-saved register too, but it's used in function calls
   * and is hence saved separately on the stack;
   *
   * @see DISTINCT_INTERRUPT_ENTRY
   * @see _ISR_Handler
   */

  uint64_t rax;
  uint64_t rcx;
  uint64_t rdx;
  uint64_t rsi;
  uint64_t r8;
  uint64_t r9;
  uint64_t r10;
  uint64_t r11;

  /*
   * This holds the rsp just before _ISR_Handler is called; it's needed because
   * in the handler, we align the stack to make further calls, and we're not
   * sure how alignment may move the stack-pointer around, leaving no way to get
   * back to the stack, and therefore the interrupt frame.
   */
  uint64_t saved_rsp;

  /* XXX:
   * - FS segment selector for TLS
   * - x87 status word?
   * - MMX?
   * - XMM?
   */
} CPU_Interrupt_frame;

#endif /* !ASM */

#define CPU_INTERRUPT_FRAME_SIZE 72

/*
 * When SMP is enabled, percpuasm.c has a similar assert, but since we use the
 * interrupt frame regardless of SMP, we'll confirm it here.
 */
#ifndef ASM
  RTEMS_STATIC_ASSERT(
    sizeof(CPU_Interrupt_frame) == CPU_INTERRUPT_FRAME_SIZE,
    CPU_INTERRUPT_FRAME_SIZE
  );
#endif

#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
#define CPU_STACK_MINIMUM_SIZE          (1024*4)
#define CPU_SIZEOF_POINTER         8
#define CPU_ALIGNMENT              8
#define CPU_HEAP_ALIGNMENT         CPU_ALIGNMENT
#define CPU_STACK_ALIGNMENT        16
#define CPU_INTERRUPT_STACK_ALIGNMENT CPU_CACHE_LINE_BYTES

/*
 *  ISR handler macros
 */

#ifndef ASM

#define _CPU_Initialize_vectors()

#define _CPU_ISR_Enable(_level)                             \
{                                                           \
  amd64_enable_interrupts();                                \
  _level = 0;                                               \
  (void) _level; /* Prevent -Wunused-but-set-variable */    \
}

#define _CPU_ISR_Disable(_level)                            \
{                                                           \
  amd64_enable_interrupts();                                \
  _level = 1;                                               \
  (void) _level; /* Prevent -Wunused-but-set-variable */    \
}

#define _CPU_ISR_Flash(_level)                              \
{                                                           \
  amd64_enable_interrupts();                                \
  amd64_disable_interrupts();                               \
  _level = 1;                                               \
  (void) _level; /* Prevent -Wunused-but-set-variable */    \
}

RTEMS_INLINE_ROUTINE bool _CPU_ISR_Is_enabled(uint32_t level)
{
  return (level & EFLAGS_INTR_ENABLE) != 0;
}

RTEMS_INLINE_ROUTINE void _CPU_ISR_Set_level(uint32_t new_level)
{
  if ( new_level ) {
    amd64_disable_interrupts();
  }
  else {
    amd64_enable_interrupts();
  }
}

RTEMS_INLINE_ROUTINE uint32_t _CPU_ISR_Get_level(void)
{
  uint64_t rflags;

  __asm__ volatile ( "pushf; \
                      popq %0"
                     : "=rm" (rflags)
  );

  uint32_t level = (rflags & EFLAGS_INTR_ENABLE) ? 0 : 1;
  return level;
}

/* end of ISR handler macros */

/* Context handler macros */
#define _CPU_Context_Destroy( _the_thread, _the_context ) \
  { \
  }

void _CPU_Context_Initialize(
  Context_Control *the_context,
  void *stack_area_begin,
  size_t stack_area_size,
  uint32_t new_level,
  void (*entry_point)( void ),
  bool is_fp,
  void *tls_area
);

#define _CPU_Context_Restart_self( _the_context ) \
   _CPU_Context_restore( (_the_context) );

#define _CPU_Context_Initialize_fp( _destination )  \
  { \
   *(*(_destination)) = _CPU_Null_fp_context; \
  }

/* end of Context handler macros */

/* Fatal Error manager macros */

#define _CPU_Fatal_halt( _source, _error ) \
  { \
  }

/* end of Fatal Error manager macros */

#define CPU_USE_LIBC_INIT_FINI_ARRAY FALSE

/* Bitfield handler macros */

#define CPU_USE_GENERIC_BITFIELD_CODE TRUE

#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
#define _CPU_Bitfield_Find_first_bit( _value, _output ) \
  { \
    (_output) = 0;   /* do something to prevent warnings */ \
  }
#endif

/* end of Bitfield handler macros */

#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
#define _CPU_Priority_Mask( _bit_number ) \
  ( 1 << (_bit_number) )
#endif

#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
#define _CPU_Priority_bits_index( _priority ) \
  (_priority)
#endif

/* end of Priority handler macros */

/* functions */

void _CPU_Initialize(void);

void *_CPU_Thread_Idle_body( uintptr_t ignored );

void _CPU_Context_switch(
  Context_Control  *run,
  Context_Control  *heir
);

RTEMS_NO_RETURN void _CPU_Context_restore( Context_Control *new_context );

typedef struct {
  uint32_t processor_state_register;
  uint32_t integer_registers [1];
  double float_registers [1];
} CPU_Exception_frame;

void _CPU_Exception_frame_print( const CPU_Exception_frame *frame );

static inline uint32_t CPU_swap_u32(
  uint32_t value
)
{
  uint32_t byte1, byte2, byte3, byte4, swapped;

  byte4 = (value >> 24) & 0xff;
  byte3 = (value >> 16) & 0xff;
  byte2 = (value >> 8)  & 0xff;
  byte1 =  value        & 0xff;

  swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
  return swapped;
}

#define CPU_swap_u16( value ) \
  (((value&0xff) << 8) | ((value >> 8)&0xff))

typedef uint32_t CPU_Counter_ticks;

uint32_t _CPU_Counter_frequency( void );

CPU_Counter_ticks _CPU_Counter_read( void );


static inline CPU_Counter_ticks _CPU_Counter_difference(
  CPU_Counter_ticks second,
  CPU_Counter_ticks first
)
{
  return second - first;
}

#ifdef RTEMS_SMP
   *
  uint32_t _CPU_SMP_Initialize( void );

  bool _CPU_SMP_Start_processor( uint32_t cpu_index );

  void _CPU_SMP_Finalize_initialization( uint32_t cpu_count );

  void _CPU_SMP_Prepare_start_multitasking( void );

  static inline uint32_t _CPU_SMP_Get_current_processor( void )
  {
    return 123;
  }

  void _CPU_SMP_Send_interrupt( uint32_t target_processor_index );

  static inline void _CPU_SMP_Processor_event_broadcast( void )
  {
    __asm__ volatile ( "" : : : "memory" );
  }

  static inline void _CPU_SMP_Processor_event_receive( void )
  {
    __asm__ volatile ( "" : : : "memory" );
  }

  static inline bool _CPU_Context_Get_is_executing(
    const Context_Control *context
  )
    return context->is_executing;
  }

  static inline void _CPU_Context_Set_is_executing(
    Context_Control *context,
    bool is_executing
  )
  {
  }

#endif /* RTEMS_SMP */

typedef uintptr_t CPU_Uint32ptr;

#ifdef __cplusplus
}
#endif

#endif /* ASM */

#endif /* _RTEMS_SCORE_CPU_H */