summaryrefslogtreecommitdiffstats
path: root/ipc/chromium/src/base/atomicops_internals_mutex.h
blob: f8fe4c6acb7eb466d145a4b70bbe509f3a82f8d8 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
 * License, v. 2.0. If a copy of the MPL was not distributed with this
 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */

// This file is an internal atomic implementation, use
// base/atomicops.h instead.
//
// This is a very slow fallback implementation of atomic operations
// that uses a mutex instead of atomic instructions.
//
// (NB: a small "optimization" here would be using a spinlock instead
// of a blocking mutex, but it's probably not worth the time.)

#ifndef base_atomicops_internals_mutex_h
#define base_atomicops_internals_mutex_h

#include "base/lock.h"

namespace base {
namespace subtle {

extern Lock gAtomicsMutex;

template<typename T>
T Locked_CAS(volatile T* ptr, T old_value, T new_value) {
  AutoLock _(gAtomicsMutex);

  T current_value = *ptr;
  if (current_value == old_value)
    *ptr = new_value;

  return current_value;
}

template<typename T>
T Locked_AtomicExchange(volatile T* ptr, T new_value) {
  AutoLock _(gAtomicsMutex);

  T current_value = *ptr;
  *ptr = new_value;
  return current_value;
}

template<typename T>
T Locked_AtomicIncrement(volatile T* ptr, T increment) {
  AutoLock _(gAtomicsMutex);
  return *ptr += increment;
}

template<typename T>
void Locked_Store(volatile T* ptr, T value) {
  AutoLock _(gAtomicsMutex);
  *ptr = value;
}

template<typename T>
T Locked_Load(volatile const T* ptr) {
  AutoLock _(gAtomicsMutex);
  return *ptr;
}

inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
                                         Atomic32 old_value,
                                         Atomic32 new_value) {
  return Locked_CAS(ptr, old_value, new_value);
}

inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
                                         Atomic32 new_value) {
  return Locked_AtomicExchange(ptr, new_value);
}

inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
                                          Atomic32 increment) {
  return Locked_AtomicIncrement(ptr, increment);
}

inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
                                        Atomic32 increment) {
  return Locked_AtomicIncrement(ptr, increment);
}

inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
                                       Atomic32 old_value,
                                       Atomic32 new_value) {
  return Locked_CAS(ptr, old_value, new_value);
}

inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
                                       Atomic32 old_value,
                                       Atomic32 new_value) {
  return Locked_CAS(ptr, old_value, new_value);
}

inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
  return Locked_Store(ptr, value);
}

inline void MemoryBarrier() {
  AutoLock _(gAtomicsMutex);
  // lock/unlock work as a barrier here
}

inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
  return Locked_Store(ptr, value);
}

inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
  return Locked_Store(ptr, value);
}

inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
  return Locked_Load(ptr);
}

inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
  return NoBarrier_Load(ptr);
}

inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
  return Locked_Load(ptr);
}

#ifdef ARCH_CPU_64_BITS

inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
                                         Atomic64 old_value,
                                         Atomic64 new_value) {
  return Locked_CAS(ptr, old_value, new_value);
}

inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
                                         Atomic64 new_value) {
  return Locked_AtomicExchange(ptr, new_value);
}

inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
                                          Atomic64 increment) {
  return Locked_AtomicIncrement(ptr, increment);
}

inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
                                        Atomic64 increment) {
  return Locked_AtomicIncrement(ptr, increment);
}

inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
  return Locked_Store(ptr, value);
}

inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
                                       Atomic64 old_value,
                                       Atomic64 new_value) {
  return Locked_CAS(ptr, old_value, new_value);
}

inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
  return Locked_Store(ptr, value);
}

inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
  return Locked_Store(ptr, value);
}

inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
  return Locked_Load(ptr);
}

inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
  return Locked_Load(ptr);
}

inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
  return Locked_Load(ptr);
}

#endif  // ARCH_CPU_64_BITS

#ifdef OS_MACOSX
// From atomicops_internals_x86_macosx.h:
//
//   MacOS uses long for intptr_t, AtomicWord and Atomic32 are always
//   different on the Mac, even when they are the same size.  We need
//   to explicitly cast from AtomicWord to Atomic32/64 to implement
//   the AtomicWord interface.

inline AtomicWord NoBarrier_CompareAndSwap(volatile AtomicWord* ptr,
                                           AtomicWord old_value,
                                           AtomicWord new_value) {
  return Locked_CAS(ptr, old_value, new_value);
}

inline AtomicWord NoBarrier_AtomicExchange(volatile AtomicWord* ptr,
                                           AtomicWord new_value) {
  return Locked_AtomicExchange(ptr, new_value);
}

inline AtomicWord NoBarrier_AtomicIncrement(volatile AtomicWord* ptr,
                                            AtomicWord increment) {
  return Locked_AtomicIncrement(ptr, increment);
}

inline AtomicWord Barrier_AtomicIncrement(volatile AtomicWord* ptr,
                                          AtomicWord increment) {
  return Locked_AtomicIncrement(ptr, increment);
}

inline AtomicWord Acquire_CompareAndSwap(volatile AtomicWord* ptr,
                                         AtomicWord old_value,
                                         AtomicWord new_value) {
  return Locked_CAS(ptr, old_value, new_value);
}

inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr,
                                         AtomicWord old_value,
                                         AtomicWord new_value) {
  return Locked_CAS(ptr, old_value, new_value);
}

inline void NoBarrier_Store(volatile AtomicWord *ptr, AtomicWord value) {
  return Locked_Store(ptr, value);
}

inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) {
  return Locked_Store(ptr, value);
}

inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) {
  return Locked_Store(ptr, value);
}

inline AtomicWord NoBarrier_Load(volatile const AtomicWord *ptr) {
  return Locked_Load(ptr);
}

inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) {
  return Locked_Load(ptr);
}

inline AtomicWord Release_Load(volatile const AtomicWord* ptr) {
  return Locked_Load(ptr);
}

#endif  // OS_MACOSX

}  // namespace subtle
}  // namespace base

#endif  // base_atomicops_internals_mutex_h