BDE 4.14.0 Production release
Loading...
Searching...
No Matches
bsls_atomicoperations_x86_all_gcc.h
Go to the documentation of this file.
1/// @file bsls_atomicoperations_x86_all_gcc.h
2///
3/// The content of this file has been pre-processed for Doxygen.
4///
5
6
7// bsls_atomicoperations_x86_all_gcc.h -*-C++-*-
8#ifndef INCLUDED_BSLS_ATOMICOPERATIONS_X86_ALL_GCC
9#define INCLUDED_BSLS_ATOMICOPERATIONS_X86_ALL_GCC
10
11#include <bsls_ident.h>
12BSLS_IDENT("$Id: $")
13
14/// @defgroup bsls_atomicoperations_x86_all_gcc bsls_atomicoperations_x86_all_gcc
15/// @brief Provide implementations of atomic operations for X86/GCC.
16/// @addtogroup bsl
17/// @{
18/// @addtogroup bsls
19/// @{
20/// @addtogroup bsls_atomicoperations_x86_all_gcc
21/// @{
22///
23/// <h1> Outline </h1>
24/// * <a href="#bsls_atomicoperations_x86_all_gcc-purpose"> Purpose</a>
25/// * <a href="#bsls_atomicoperations_x86_all_gcc-classes"> Classes </a>
26/// * <a href="#bsls_atomicoperations_x86_all_gcc-description"> Description </a>
27///
28/// # Purpose {#bsls_atomicoperations_x86_all_gcc-purpose}
29/// Provide implementations of atomic operations for X86/GCC.
30///
31/// # Classes {#bsls_atomicoperations_x86_all_gcc-classes}
32///
33/// - bsls::AtomicOperations_X86_ALL_GCC: implementation of atomics for X86/GCC.
34///
35/// # Description {#bsls_atomicoperations_x86_all_gcc-description}
36/// This component provides classes necessary to implement atomics
37/// on the Linux X86 platform with GCC. The classes are for private use only.
38/// See @ref bsls_atomicoperations and @ref bsls_atomic for the public interface to
39/// atomics.
40/// @}
41/** @} */
42/** @} */
43
44/** @addtogroup bsl
45 * @{
46 */
47/** @addtogroup bsls
48 * @{
49 */
50/** @addtogroup bsls_atomicoperations_x86_all_gcc
51 * @{
52 */
53
54#include <bsls_atomicoperations_default.h>
55#include <bsls_platform.h>
56#include <bsls_types.h>
57
58#if defined(BSLS_PLATFORM_CPU_X86) \
59 && (defined(BSLS_PLATFORM_CMP_GNU) || defined(BSLS_PLATFORM_CMP_CLANG))
60
61
62
63namespace bsls {
64
65struct AtomicOperations_X86_ALL_GCC;
66typedef AtomicOperations_X86_ALL_GCC AtomicOperations_Imp;
67
68 // ======================================================
69 // struct Atomic_TypeTraits<AtomicOperations_X86_ALL_GCC>
70 // ======================================================
71
72template <>
73struct Atomic_TypeTraits<AtomicOperations_X86_ALL_GCC>
74{
75 struct Int
76 {
77 volatile int d_value __attribute__((__aligned__(sizeof(int))));
78 };
79
80 struct Int64
81 {
82 volatile Types::Int64 d_value
83 __attribute__((__aligned__(sizeof(Types::Int64))));
84 };
85
86 struct Uint
87 {
88 volatile unsigned int d_value
89 __attribute__((__aligned__(sizeof(unsigned int))));
90 };
91
92 struct Uint64
93 {
94 volatile Types::Uint64 d_value
95 __attribute__((__aligned__(sizeof(Types::Uint64))));
96 };
97
98 struct Pointer
99 {
100 void * volatile d_value __attribute__((__aligned__(sizeof(void *))));
101 };
102};
103
104 // ===================================
105 // struct AtomicOperations_X86_ALL_GCC
106 // ===================================
107
108struct AtomicOperations_X86_ALL_GCC
109 : AtomicOperations_Default32<AtomicOperations_X86_ALL_GCC>
110{
111 typedef Atomic_TypeTraits<AtomicOperations_X86_ALL_GCC> AtomicTypes;
112
113 // *** atomic functions for int ***
114
115 static int getInt(const AtomicTypes::Int *atomicInt);
116
117 static void setInt(AtomicTypes::Int *atomicInt, int value);
118
119 static void setIntRelease(AtomicTypes::Int *atomicInt, int value);
120
121 static int swapInt(AtomicTypes::Int *atomicInt, int swapValue);
122
123 static int testAndSwapInt(AtomicTypes::Int *atomicInt,
124 int compareValue,
125 int swapValue);
126
127 static int addIntNv(AtomicTypes::Int *atomicInt, int value);
128
129 // *** atomic functions for Int64 ***
130
131 static Types::Int64 getInt64(const AtomicTypes::Int64 *atomicInt);
132
133 static void setInt64(AtomicTypes::Int64 *atomicInt, Types::Int64 value);
134
135 static Types::Int64 swapInt64(AtomicTypes::Int64 *atomicInt,
136 Types::Int64 swapValue);
137
138 static Types::Int64 testAndSwapInt64(AtomicTypes::Int64 *atomicInt,
139 Types::Int64 compareValue,
140 Types::Int64 swapValue);
141
142 static Types::Int64 addInt64Nv(AtomicTypes::Int64 *atomicInt,
143 Types::Int64 value);
144};
145
146// ===========================================================================
147// INLINE FUNCTION DEFINITIONS
148// ===========================================================================
149
150 // -----------------------------------
151 // struct AtomicOperations_X86_ALL_GCC
152 // -----------------------------------
153
154inline
155int AtomicOperations_X86_ALL_GCC::
156 getInt(const AtomicTypes::Int *atomicInt)
157{
158 int result;
159
160 asm volatile (
161 " movl %[obj], %[res] \n\t"
162
163 : [res] "=r" (result)
164 : [obj] "m" (*atomicInt)
165 : "memory");
166
167 return result;
168}
169
170inline
171void AtomicOperations_X86_ALL_GCC::
172 setInt(AtomicTypes::Int *atomicInt, int value)
173{
174#ifdef __SSE2__
175 asm volatile (
176 " movl %[val], %[obj] \n\t"
177 " mfence \n\t"
178
179 : [obj] "=m" (*atomicInt)
180 : [val] "r" (value)
181 : "memory");
182#else
183 asm volatile (
184 " movl %[val], %[obj] \n\t"
185 " lock addl $0, 0(%%esp) \n\t"
186
187 : [obj] "=m" (*atomicInt)
188 : [val] "r" (value)
189 : "memory", "cc");
190#endif
191}
192
193inline
194void AtomicOperations_X86_ALL_GCC::
195 setIntRelease(AtomicTypes::Int *atomicInt, int value)
196{
197 asm volatile (
198 " movl %[val], %[obj] \n\t"
199
200 : [obj] "=m" (*atomicInt)
201 : [val] "r" (value)
202 : "memory");
203}
204
205inline
206int AtomicOperations_X86_ALL_GCC::
207 swapInt(AtomicTypes::Int *atomicInt, int swapValue)
208{
209 asm volatile (
210 " lock xchgl %[val], %[obj] \n\t"
211
212 : [obj] "=m" (*atomicInt),
213 [val] "=r" (swapValue)
214 : "1" (swapValue), "m" (*atomicInt)
215 : "memory");
216
217 return swapValue;
218}
219
220inline
221int AtomicOperations_X86_ALL_GCC::
222 testAndSwapInt(AtomicTypes::Int *atomicInt,
223 int compareValue,
224 int swapValue)
225{
226 return __sync_val_compare_and_swap(&atomicInt->d_value,
227 compareValue,
228 swapValue);
229}
230
231inline
232int AtomicOperations_X86_ALL_GCC::
233 addIntNv(AtomicTypes::Int *atomicInt, int value)
234{
235 return __sync_add_and_fetch(&atomicInt->d_value, value);
236}
237
238inline
239Types::Int64 AtomicOperations_X86_ALL_GCC::
240 getInt64(const AtomicTypes::Int64 *atomicInt)
241{
242#if BSLS_PLATFORM_CMP_VER_MAJOR >= 40300 // gcc >= 4.3
243 Types::Int64 value = atomicInt->d_value;
244 return __sync_val_compare_and_swap(
245 const_cast<Types::Int64 *>(&atomicInt->d_value),
246 value,
247 value);
248#else
249 Types::Int64 result;
250 asm volatile (
251#ifdef __PIC__
252 " pushl %%ebx \n\t"
253#endif
254 " movl %%ebx, %%eax \n\t"
255 " movl %%ecx, %%edx \n\t"
256#if __GNUC__ != 3
257 " lock cmpxchg8b %[obj] \n\t"
258#else
259 // gcc 3.4 seems to think that it can take edx as %1.
260 " lock cmpxchg8b (%[obj]) \n\t"
261#endif
262#ifdef __PIC__
263 " popl %%ebx \n\t"
264#endif
265 : [res] "=&A" (result)
266 :
267#if __GNUC__ != 3
268 [obj] "m" (*atomicInt),
269#else
270 [obj] "S" (atomicInt),
271#endif
272 "0" (0)
273 :
274#ifndef __PIC__
275 "ebx",
276#endif
277
278#if defined(BSLS_PLATFORM_CMP_CLANG) && defined(__PIC__)
279 "ebx", // Clang wants to reuse 'ebx' even in PIC mode
280 // and generates invalid code.
281 // Mark 'ebx' as clobbered to prevent that.
282#endif
283 "ecx", "cc", "memory");
284 return result;
285#endif
286
287}
288
289inline
290void AtomicOperations_X86_ALL_GCC::
291 setInt64(AtomicTypes::Int64 *atomicInt, Types::Int64 value)
292{
293 swapInt64(atomicInt, value);
294}
295
296inline
297Types::Int64 AtomicOperations_X86_ALL_GCC::
298 swapInt64(AtomicTypes::Int64 *atomicInt,
299 Types::Int64 swapValue)
300{
301#if BSLS_PLATFORM_CMP_VER_MAJOR >= 40300 // gcc >= 4.3
302 Types::Int64 oldValue;
303
304 do
305 {
306 oldValue = atomicInt->d_value;
307 } while (__sync_val_compare_and_swap(&atomicInt->d_value,
308 oldValue,
309 swapValue)
310 != oldValue);
311
312 return oldValue;
313#else
314 Types::Int64 result;
315 asm volatile (
316#ifdef __PIC__
317 " pushl %%ebx \n\t"
318 " movl %[val], %%ebx \n\t"
319#endif
320 "1: \n\t"
321 " lock cmpxchg8b %[obj] \n\t"
322 " jnz 1b \n\t"
323#ifdef __PIC__
324 " popl %%ebx \n\t"
325#endif
326 : [res] "=A" (result),
327 [obj] "+m" (*atomicInt)
328 :
329#ifdef __PIC__
330 [val] "g" ((unsigned int) swapValue),
331#else
332 [val] "b" ((unsigned int) swapValue),
333#endif
334 "c" ((int) (swapValue >> 32)),
335 "A" (*atomicInt)
336 :
337#if defined(BSLS_PLATFORM_CMP_CLANG) && defined(__PIC__)
338 "ebx", // Clang wants to reuse 'ebx' even in PIC mode
339 // and generates invalid code.
340 // Mark 'ebx' as clobbered to prevent that.
341#endif
342 "memory", "cc");
343
344 return result;
345#endif
346}
347
348inline
349Types::Int64 AtomicOperations_X86_ALL_GCC::
350 testAndSwapInt64(AtomicTypes::Int64 *atomicInt,
351 Types::Int64 compareValue,
352 Types::Int64 swapValue)
353{
354#if BSLS_PLATFORM_CMP_VER_MAJOR >= 40300 // gcc >= 4.3
355 return __sync_val_compare_and_swap(&atomicInt->d_value,
356 compareValue,
357 swapValue);
358#else
359 asm volatile (
360#ifdef __PIC__
361 " pushl %%ebx \n\t"
362 " movl %[val], %%ebx \n\t"
363#endif
364 " lock cmpxchg8b %[obj] \n\t"
365#ifdef __PIC__
366 " popl %%ebx \n\t"
367#endif
368 : [cmp] "=A" (compareValue),
369 [obj] "+m" (*atomicInt)
370 :
371#ifdef __PIC__
372 [val] "g" ((unsigned int) swapValue),
373#else
374 [val] "b" ((unsigned int) swapValue),
375#endif
376 "c" ((int) (swapValue >> 32)),
377 "0" (compareValue)
378 :
379#if defined(BSLS_PLATFORM_CMP_CLANG) && defined(__PIC__)
380 "ebx", // Clang wants to reuse 'ebx' even in PIC mode
381 // and generates invalid code.
382 // Mark 'ebx' as clobbered to prevent that.
383#endif
384 "memory", "cc");
385
386 return compareValue;
387#endif
388}
389
390inline
391Types::Int64 AtomicOperations_X86_ALL_GCC::
392 addInt64Nv(AtomicTypes::Int64 *atomicInt,
393 Types::Int64 value)
394{
395 return __sync_add_and_fetch(&atomicInt->d_value, value);
396}
397
398} // close package namespace
399
400
401
402#endif // defined(BSLS_PLATFORM_CPU_X86) && (CMP_GNU || CMP_CLANG)
403
404#endif
405
406// ----------------------------------------------------------------------------
407// Copyright 2013 Bloomberg Finance L.P.
408//
409// Licensed under the Apache License, Version 2.0 (the "License");
410// you may not use this file except in compliance with the License.
411// You may obtain a copy of the License at
412//
413// http://www.apache.org/licenses/LICENSE-2.0
414//
415// Unless required by applicable law or agreed to in writing, software
416// distributed under the License is distributed on an "AS IS" BASIS,
417// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
418// See the License for the specific language governing permissions and
419// limitations under the License.
420// ----------------------------- END-OF-FILE ----------------------------------
421
422/** @} */
423/** @} */
424/** @} */
#define BSLS_IDENT(str)
Definition bsls_ident.h:195
Definition bdlt_iso8601util.h:691
long long Int64
Definition bsls_types.h:132