diff options
author | Matt A. Tobin <mattatobin@localhost.localdomain> | 2018-02-02 04:16:08 -0500 |
---|---|---|
committer | Matt A. Tobin <mattatobin@localhost.localdomain> | 2018-02-02 04:16:08 -0500 |
commit | 5f8de423f190bbb79a62f804151bc24824fa32d8 (patch) | |
tree | 10027f336435511475e392454359edea8e25895d /ipc/chromium/src/base/atomicops.h | |
parent | 49ee0794b5d912db1f95dce6eb52d781dc210db5 (diff) | |
download | UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.gz UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.lz UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.xz UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.zip |
Add m-esr52 at 52.6.0
Diffstat (limited to 'ipc/chromium/src/base/atomicops.h')
-rw-r--r-- | ipc/chromium/src/base/atomicops.h | 155 |
1 files changed, 155 insertions, 0 deletions
diff --git a/ipc/chromium/src/base/atomicops.h b/ipc/chromium/src/base/atomicops.h new file mode 100644 index 000000000..dcb691710 --- /dev/null +++ b/ipc/chromium/src/base/atomicops.h @@ -0,0 +1,155 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// For atomic operations on reference counts, see atomic_refcount.h. +// For atomic operations on sequence numbers, see atomic_sequence_num.h. + +// The routines exported by this module are subtle. If you use them, even if +// you get the code right, it will depend on careful reasoning about atomicity +// and memory ordering; it will be less readable, and harder to maintain. If +// you plan to use these routines, you should have a good reason, such as solid +// evidence that performance would otherwise suffer, or there being no +// alternative. You should assume only properties explicitly guaranteed by the +// specifications in this file. You are almost certainly _not_ writing code +// just for the x86; if you assume x86 semantics, x86 hardware bugs and +// implementations on other archtectures will cause your code to break. If you +// do not know what you are doing, avoid these routines, and use a Mutex. +// +// It is incorrect to make direct assignments to/from an atomic variable. +// You should use one of the Load or Store routines. The NoBarrier +// versions are provided when no barriers are needed: +// NoBarrier_Store() +// NoBarrier_Load() +// Although there are currently no compiler enforcement, you are encouraged +// to use these. +// + +#ifndef BASE_ATOMICOPS_H_ +#define BASE_ATOMICOPS_H_ + +#include "base/basictypes.h" +#include "base/port.h" + +namespace base { +namespace subtle { + +// Bug 1308991. We need this for /Wp64, to mark it safe for AtomicWord casting. +#ifndef OS_WIN +#define __w64 +#endif +typedef __w64 int32_t Atomic32; +#ifdef ARCH_CPU_64_BITS +typedef int64_t Atomic64; +#endif + +// Use AtomicWord for a machine-sized pointer. It will use the Atomic32 or +// Atomic64 routines below, depending on your architecture. +#ifdef OS_OPENBSD +#ifdef ARCH_CPU_64_BITS +typedef Atomic64 AtomicWord; +#else +typedef Atomic32 AtomicWord; +#endif // ARCH_CPU_64_BITS +#else +typedef intptr_t AtomicWord; +#endif // OS_OPENBSD + +// Atomically execute: +// result = *ptr; +// if (*ptr == old_value) +// *ptr = new_value; +// return result; +// +// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value". +// Always return the old value of "*ptr" +// +// This routine implies no memory barriers. +Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, + Atomic32 new_value); + +// Atomically store new_value into *ptr, returning the previous value held in +// *ptr. This routine implies no memory barriers. +Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value); + +// Atomically increment *ptr by "increment". Returns the new value of +// *ptr with the increment applied. This routine implies no memory barriers. +Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, Atomic32 increment); + +Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, + Atomic32 increment); + +// These following lower-level operations are typically useful only to people +// implementing higher-level synchronization operations like spinlocks, +// mutexes, and condition-variables. They combine CompareAndSwap(), a load, or +// a store with appropriate memory-ordering instructions. "Acquire" operations +// ensure that no later memory access can be reordered ahead of the operation. +// "Release" operations ensure that no previous memory access can be reordered +// after the operation. "Barrier" operations have both "Acquire" and "Release" +// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory +// access. +Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, + Atomic32 new_value); +Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, + Atomic32 new_value); + +void MemoryBarrier(); +void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value); +void Acquire_Store(volatile Atomic32* ptr, Atomic32 value); +void Release_Store(volatile Atomic32* ptr, Atomic32 value); + +Atomic32 NoBarrier_Load(volatile const Atomic32* ptr); +Atomic32 Acquire_Load(volatile const Atomic32* ptr); +Atomic32 Release_Load(volatile const Atomic32* ptr); + +// 64-bit atomic operations (only available on 64-bit processors). +#ifdef ARCH_CPU_64_BITS +Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, + Atomic64 old_value, + Atomic64 new_value); +Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value); +Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment); +Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment); + +Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, + Atomic64 old_value, + Atomic64 new_value); +Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, + Atomic64 old_value, + Atomic64 new_value); +void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value); +void Acquire_Store(volatile Atomic64* ptr, Atomic64 value); +void Release_Store(volatile Atomic64* ptr, Atomic64 value); +Atomic64 NoBarrier_Load(volatile const Atomic64* ptr); +Atomic64 Acquire_Load(volatile const Atomic64* ptr); +Atomic64 Release_Load(volatile const Atomic64* ptr); +#endif // CPU_ARCH_64_BITS + +} // namespace base::subtle +} // namespace base + +// Include our platform specific implementation. +#if defined(OS_WIN) && defined(ARCH_CPU_X86_FAMILY) +#include "base/atomicops_internals_x86_msvc.h" +#elif defined(OS_MACOSX) && defined(ARCH_CPU_X86_FAMILY) +#include "base/atomicops_internals_x86_macosx.h" +#elif defined(COMPILER_GCC) && defined(ARCH_CPU_X86_FAMILY) +#include "base/atomicops_internals_x86_gcc.h" +#elif defined(COMPILER_GCC) && defined(ARCH_CPU_ARMEL) +#include "base/atomicops_internals_arm_gcc.h" +#elif defined(COMPILER_GCC) && defined(ARCH_CPU_ARM64) +#include "base/atomicops_internals_arm64_gcc.h" +#elif defined(COMPILER_GCC) && defined(ARCH_CPU_MIPS) +#include "base/atomicops_internals_mips_gcc.h" +#elif defined(COMPILER_GCC) && defined(ARCH_CPU_PPC_FAMILY) +#include "base/atomicops_internals_ppc_gcc.h" +#else +#include "base/atomicops_internals_mutex.h" +#endif + +#endif // BASE_ATOMICOPS_H_ |