summaryrefslogtreecommitdiffstats
path: root/cpukit/score/cpu/i386/rtems/score/cpuatomic.h
blob: ae57584de4d3a536430b69a7ac128f3e586480bd (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
/**
 * @file  rtems/score/cpuatomic.h
 * 
 * This include file implements the atomic operations for i386 and defines 
 * atomic data types which are used by the atomic operations API file. This
 * file should use fixed name cpuatomic.h and should be included in atomic
 * operations API file atomic.h. Most of the parts of implementations are 
 * imported from FreeBSD kernel.
 */

/*
 * Copyright (c) 1998 Doug Rabson
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 *
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
 * SUCH DAMAGE.
 *
 * $FreeBSD$
 */

#ifndef _RTEMS_SCORE_ATOMIC_CPU_H
#define _RTEMS_SCORE_ATOMIC_CPU_H

#include <rtems/score/genericcpuatomic.h>

#ifdef __cplusplus
extern "C" {
#endif

/**
 * @defgroup RTEMS atomic implementation
 *
 */

/**@{*/

#if defined(RTEMS_SMP)
#define	MPLOCKED	"lock ; "
#else
#define	MPLOCKED
#endif

#if !defined(RTEMS_SMP)
/*
 * We assume that a = b will do atomic loads and stores.  However, on a
 * PentiumPro or higher, reads may pass writes, so for that case we have
 * to use a serializing instruction (i.e. with LOCK) to do the load in
 * SMP kernels.  For UP kernels, however, the cache of the single processor
 * is always consistent, so we only need to take care of compiler.
 */
#define	ATOMIC_STORE_LOAD(NAME, TYPE, LOP, SOP)               \
static inline Atomic_##TYPE                           \
_CPU_Atomic_Load_##NAME(volatile Atomic_##TYPE *p)      \
{                                                       \
  Atomic_##TYPE tmp;                                    \
                                                        \
  tmp = *p;                                             \
  __asm __volatile("" : : : "memory");                  \
  return (tmp);                                         \
}                                                       \
                                                        \
static inline _CPU_Atomic_Load_acq_##NAME(volatile Atomic_##TYPE *p)  \
{                                                       \
  Atomic_##TYPE tmp;                                    \
                                                        \
  tmp = *p;                                             \
  __asm __volatile("" : : : "memory");                  \
  return (tmp);                                         \
}                                                       \
                                                        \
static inline void                                    \
_CPU_Atomic_Store_##NAME(volatile Atomic_##TYPE *p, Atomic_##TYPE v) \
{                                                                    \
  __asm __volatile("" : : : "memory");                               \
  *p = v;                                                            \
}                                                                    \
                                                        \
static inline void                                    \
_CPU_Atomic_Store_rel_##NAME(volatile Atomic_##TYPE *p, Atomic_##TYPE v) \
{                                                                        \
  __asm __volatile("" : : : "memory");                                   \
  *p = v;                                                                \
}                                                                        \

#else /* !(!SMP) */

#define	ATOMIC_STORE_LOAD(NAME, TYPE, LOP, SOP)               \
static inline Atomic_##TYPE                           \
_CPU_Atomic_Load_##NAME(volatile Atomic_##TYPE *p)      \
{                                                       \
  Atomic_##TYPE res;                                    \
                                                        \
  __asm __volatile(MPLOCKED LOP                         \
  : "=a" (res),                 /* 0 */                 \
  "=m" (*p)                     /* 1 */                 \
  : "m" (*p)                    /* 2 */                 \
  : "memory", "cc");                                    \
                                                        \
  return (res);                                         \
}                                                       \
                                                        \
static inline Atomic_##TYPE                           \
_CPU_Atomic_Load_acq_##NAME(volatile Atomic_##TYPE *p)  \
{                                                       \
  Atomic_##TYPE res;                                    \
                                                        \
  __asm __volatile(MPLOCKED LOP                         \
  : "=a" (res),			/* 0 */                 \
  "=m" (*p)			/* 1 */                 \
  : "m" (*p)			/* 2 */                 \
  : "memory", "cc");                                    \
                                                        \
  return (res);                                         \
}							\
							\
/*							\
 * The XCHG instruction asserts LOCK automagically.	\
 */							\
static inline void                                    \
_CPU_Atomic_Store_##NAME(volatile Atomic_##TYPE *p, Atomic_##TYPE v) \
{                                                                    \
  __asm __volatile(SOP                                               \
  : "=m" (*p),                  /* 0 */                              \
  "+r" (v)                      /* 1 */                              \
  : "m" (*p)                    /* 2 */                              \
  : "memory");                                                       \
}                                                                    \
static inline void					             \
_CPU_Atomic_Store_rel_##NAME(volatile Atomic_##TYPE *p, Atomic_##TYPE v) \
{                                                                        \
  __asm __volatile(SOP                                                   \
  : "=m" (*p),			/* 0 */                                  \
  "+r" (v)			/* 1 */		                         \
  : "m" (*p)			/* 2 */	                                 \
  : "memory");                                                           \
}                                                                        \

#endif /* !SMP */

/*
 * The assembly is volatilized to avoid code chunk removal by the compiler.
 * GCC aggressively reorders operations and memory clobbering is necessary
 * in order to avoid that for memory barriers.
 */
#define	ATOMIC_FETCH_GENERIC(NAME, TYPENAME, TYPE, OP, CONS, V)                         \
static inline void                                                                      \
_CPU_Atomic_Fetch_##NAME##_##TYPENAME(volatile Atomic_##TYPE *p, Atomic_##TYPE v) \
{                                                                             \
  __asm __volatile(MPLOCKED OP                                                \
  : "=m" (*p)                                                                 \
  : CONS (V), "m" (*p)                                                        \
  : "cc");                                                                    \
}                                                                             \
                                                                              \
static inline void                                                            \
_CPU_Atomic_Fetch_##NAME##_barr_##TYPENAME(volatile Atomic_##TYPE *p, Atomic_##TYPE v)\
{                                                                             \
  __asm __volatile(MPLOCKED OP                                                \
  : "=m" (*p)                                                                 \
  : CONS (V), "m" (*p)                                                        \
  : "memory", "cc");                                                          \
}                                                                             \

/*
 * Atomic compare and set, used by the mutex functions
 *
 * if (*dst == expect) *dst = src (all 32 bit words)
 *
 * Returns 0 on failure, non-zero on success
 */
static inline int
_CPU_Atomic_Compare_exchange_int(volatile Atomic_Int *dst, Atomic_Int expect, Atomic_Int src)
{
  unsigned char res;

  __asm __volatile(
  "    " MPLOCKED "    "
  "    cmpxchgl %2,%1 ;    "
  "    sete	%0 ;       "
  "1:                      "
  "# atomic_cmpset_int"
  : "=a" (res),              /* 0 */
    "=m" (*dst)              /* 1 */
  : "r" (src),               /* 2 */
    "a" (expect),            /* 3 */
    "m" (*dst)               /* 4 */
  : "memory", "cc");

  return (res);
}

static inline int
_CPU_Atomic_Compare_exchange_long(volatile Atomic_Long *dst, Atomic_Long expect, Atomic_Long src)
{

  return (_CPU_Atomic_Compare_exchange_int((volatile Atomic_Int *)dst, (Atomic_Int)expect,
         (Atomic_Int)src));
}

ATOMIC_STORE_LOAD(int, Int,	"cmpxchgl %0,%1",  "xchgl %1,%0");
ATOMIC_STORE_LOAD(long, Long,	"cmpxchgl %0,%1",  "xchgl %1,%0");

ATOMIC_FETCH_GENERIC(add, int, Int, "addl %1,%0", "ir", v);
ATOMIC_FETCH_GENERIC(sub, int, Int, "subl %1,%0", "ir", v);
ATOMIC_FETCH_GENERIC(or,  int, Int, "orl %1,%0",  "ir", v);
ATOMIC_FETCH_GENERIC(and, int, Int, "andl %1,%0", "ir", v);

ATOMIC_FETCH_GENERIC(add, long, Long, "addl %1,%0", "ir", v);
ATOMIC_FETCH_GENERIC(sub, long, Long, "subl %1,%0", "ir", v);
ATOMIC_FETCH_GENERIC(or,  long, Long, "orl %1,%0",  "ir", v);
ATOMIC_FETCH_GENERIC(and, long, Long, "andl %1,%0", "ir", v);

#define	_CPU_Atomic_Fetch_or_acq_int		_CPU_Atomic_Fetch_or_barr_int
#define	_CPU_Atomic_Fetch_or_rel_int		_CPU_Atomic_Fetch_or_barr_int
#define	_CPU_Atomic_Fetch_and_acq_int		_CPU_Atomic_Fetch_and_barr_int
#define	_CPU_Atomic_Fetch_and_rel_int		_CPU_Atomic_Fetch_and_barr_int
#define	_CPU_Atomic_Fetch_add_acq_int		_CPU_Atomic_Fetch_add_barr_int
#define	_CPU_Atomic_Fetch_add_rel_int		_CPU_Atomic_Fetch_add_barr_int
#define	_CPU_Atomic_Fetch_sub_acq_int		_CPU_Atomic_Fetch_sub_barr_int
#define	_CPU_Atomic_Fetch_sub_rel_int		_CPU_Atomic_Fetch_sub_barr_int
#define	_CPU_Atomic_Compare_exchange_acq_int  _CPU_Atomic_Compare_exchange_int
#define	_CPU_Atomic_Compare_exchange_rel_int  _CPU_Atomic_Compare_exchange_int

#define	_CPU_Atomic_Fetch_or_acq_long		_CPU_Atomic_Fetch_or_barr_long
#define	_CPU_Atomic_Fetch_or_rel_long		_CPU_Atomic_Fetch_or_barr_long
#define	_CPU_Atomic_Fetch_and_acq_long		_CPU_Atomic_Fetch_and_barr_long
#define	_CPU_Atomic_Fetch_and_rel_long		_CPU_Atomic_Fetch_and_barr_long
#define	_CPU_Atomic_Fetch_add_acq_long		_CPU_Atomic_Fetch_add_barr_long
#define	_CPU_Atomic_Fetch_add_rel_long		_CPU_Atomic_Fetch_add_barr_long
#define	_CPU_Atomic_Fetch_sub_acq_long	        _CPU_Atomic_Fetch_sub_barr_long
#define	_CPU_Atomic_Fetch_sub_rel_long	        _CPU_Atomic_Fetch_sub_barr_long
#define	_CPU_Atomic_Compare_exchange_acq_long _CPU_Atomic_Compare_exchange_long
#define	_CPU_Atomic_Compare_exchange_rel_long _CPU_Atomic_Compare_exchange_long

/* Operations on 32-bit double words. */
#define	_CPU_Atomic_Fetch_or_32(p, v)  \
    _CPU_Atomic_Fetch_or_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
#define	_CPU_Atomic_Fetch_or_acq_32(p, v)  \
    _CPU_Atomic_Fetch_or_acq_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
#define	_CPU_Atomic_Fetch_or_rel_32(p, v)  \
    _CPU_Atomic_Fetch_or_rel_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
#define	_CPU_Atomic_Fetch_and_32(p, v)  \
    _CPU_Atomic_Fetch_and_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
#define	_CPU_Atomic_Fetch_and_acq_32(p, v)  \
    _CPU_Atomic_Fetch_and_acq_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
#define	_CPU_Atomic_Fetch_and_rel_32(p, v)  \
    _CPU_Atomic_Fetch_and_rel_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
#define	_CPU_Atomic_Fetch_add_32(p, v)  \
    _CPU_Atomic_Fetch_add_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
#define	_CPU_Atomic_Fetch_add_acq_32(p, v)  \
    _CPU_Atomic_Fetch_add_acq_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
#define	_CPU_Atomic_Fetch_add_rel_32(p, v)  \
    _CPU_Atomic_Fetch_add_rel_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
#define	_CPU_Atomic_Fetch_sub_32(p, v)  \
    _CPU_Atomic_Fetch_sub_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
#define	_CPU_Atomic_Fetch_sub_acq_32(p, v)  \
    _CPU_Atomic_Fetch_sub_acq_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
#define	_CPU_Atomic_Fetch_sub_rel_32(p, v)  \
    _CPU_Atomic_Fetch_sub_rel_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
#define _CPU_Atomic_Load_32(p)  \
    _CPU_Atomic_Load_int((volatile Atomic_Int *)(p))
#define	_CPU_Atomic_Load_acq_32(p)  \
    _CPU_Atomic_Load_acq_int((volatile Atomic_Int *)(p))
#define _CPU_Atomic_Store_32(p, v)  \
    _CPU_Atomic_Store_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
#define	_CPU_Atomic_Store_rel_32(p, v)  \
    _CPU_Atomic_Store_rel_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
#define	_CPU_Atomic_Compare_exchange_32(dst, old, new)  \
    _CPU_Atomic_Compare_exchange_int((volatile Atomic_Int *)(dst), (Atomic_Int)(old), (Atomic_Int)(new))
#define	_CPU_Atomic_Compare_exchange_acq_32(dst, old, new)  \
    _CPU_Atomic_Compare_exchange_acq_int((volatile Atomic_Int *)(dst), (Atomic_Int)(old), (Atomic_Int)(new))
#define	_CPU_Atomic_Compare_exchange_rel_32(dst, old, new)  \
    _CPU_Atomic_Compare_exchange_rel_int((volatile Atomic_Int *)(dst), (Atomic_Int)(old), (Atomic_Int)(new))

/* Operations on pointers. */
#define	_CPU_Atomic_Fetch_or_ptr(p, v) \
    _CPU_Atomic_Fetch_or_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
#define	_CPU_Atomic_Fetch_or_acq_ptr(p, v) \
    _CPU_Atomic_Fetch_or_acq_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
#define	_CPU_Atomic_Fetch_or_rel_ptr(p, v) \
    _CPU_Atomic_Fetch_or_rel_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
#define	_CPU_Atomic_Fetch_and_ptr(p, v) \
    _CPU_Atomic_Fetch_and_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
#define	_CPU_Atomic_Fetch_and_acq_ptr(p, v) \
    _CPU_Atomic_Fetch_and_acq_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
#define	_CPU_Atomic_Fetch_and_rel_ptr(p, v) \
    _CPU_Atomic_Fetch_and_rel_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
#define	_CPU_Atomic_Fetch_add_ptr(p, v) \
    _CPU_Atomic_Fetch_add_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
#define	_CPU_Atomic_Fetch_add_acq_ptr(p, v) \
    _CPU_Atomic_Fetch_add_acq_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
#define	_CPU_Atomic_Fetch_add_rel_ptr(p, v) \
    _CPU_Atomic_Fetch_add_rel_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
#define	_CPU_Atomic_Fetch_sub_ptr(p, v) \
    _CPU_Atomic_Fetch_sub_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
#define	_CPU_Atomic_Fetch_sub_acq_ptr(p, v) \
    _CPU_Atomic_Fetch_sub_acq_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
#define	_CPU_Atomic_Fetch_sub_rel_ptr(p, v) \
    _CPU_Atomic_Fetch_sub_rel_int((volatile Atomic_Int *)(p), (Atomic_Int)(v))
#define _CPU_Atomic_Load_ptr(p) \
    _CPU_Atomic_Load_int((volatile Atomic_Int *)(p))
#define	_CPU_Atomic_Load_acq_ptr(p) \
    _CPU_Atomic_Load_acq_int((volatile Atomic_Int *)(p))
#define _CPU_Atomic_Store_ptr(p, v) \
    _CPU_Atomic_Store_int((volatile Atomic_Int *)(p), (v))
#define	_CPU_Atomic_Store_rel_ptr(p, v) \
    _CPU_Atomic_Store_rel_int((volatile Atomic_Int *)(p), (v))
#define	_CPU_Atomic_Compare_exchange_ptr(dst, old, new) \
    _CPU_Atomic_Compare_exchange_int((volatile Atomic_Int *)(dst), (Atomic_Int)(old), (Atomic_Int)(new))
#define	_CPU_Atomic_Compare_exchange_acq_ptr(dst, old, new) \
    _CPU_Atomic_Compare_exchange_acq_int((volatile Atomic_Int *)(dst), (Atomic_Int)(old), \
            (Atomic_Int)(new))
#define	_CPU_Atomic_Compare_exchange_rel_ptr(dst, old, new) \
    _CPU_Atomic_Compare_exchange_rel_int((volatile Atomic_Int *)(dst), (Atomic_Int)(old), \
            (Atomic_Int)(new))

#ifdef __cplusplus
}
#endif

/**@}*/
#endif
/*  end of include file */