summaryrefslogtreecommitdiffstats
path: root/security/sandbox/chromium/base/memory
diff options
context:
space:
mode:
Diffstat (limited to 'security/sandbox/chromium/base/memory')
-rw-r--r--security/sandbox/chromium/base/memory/aligned_memory.h117
-rw-r--r--security/sandbox/chromium/base/memory/raw_scoped_refptr_mismatch_checker.h64
-rw-r--r--security/sandbox/chromium/base/memory/ref_counted.cc53
-rw-r--r--security/sandbox/chromium/base/memory/ref_counted.h446
-rw-r--r--security/sandbox/chromium/base/memory/scoped_ptr.h607
-rw-r--r--security/sandbox/chromium/base/memory/singleton.cc34
-rw-r--r--security/sandbox/chromium/base/memory/singleton.h284
-rw-r--r--security/sandbox/chromium/base/memory/weak_ptr.h345
8 files changed, 1950 insertions, 0 deletions
diff --git a/security/sandbox/chromium/base/memory/aligned_memory.h b/security/sandbox/chromium/base/memory/aligned_memory.h
new file mode 100644
index 000000000..bb7bd872c
--- /dev/null
+++ b/security/sandbox/chromium/base/memory/aligned_memory.h
@@ -0,0 +1,117 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// AlignedMemory is a POD type that gives you a portable way to specify static
+// or local stack data of a given alignment and size. For example, if you need
+// static storage for a class, but you want manual control over when the object
+// is constructed and destructed (you don't want static initialization and
+// destruction), use AlignedMemory:
+//
+// static AlignedMemory<sizeof(MyClass), ALIGNOF(MyClass)> my_class;
+//
+// // ... at runtime:
+// new(my_class.void_data()) MyClass();
+//
+// // ... use it:
+// MyClass* mc = my_class.data_as<MyClass>();
+//
+// // ... later, to destruct my_class:
+// my_class.data_as<MyClass>()->MyClass::~MyClass();
+//
+// Alternatively, a runtime sized aligned allocation can be created:
+//
+// float* my_array = static_cast<float*>(AlignedAlloc(size, alignment));
+//
+// // ... later, to release the memory:
+// AlignedFree(my_array);
+//
+// Or using scoped_ptr:
+//
+// scoped_ptr<float, AlignedFreeDeleter> my_array(
+// static_cast<float*>(AlignedAlloc(size, alignment)));
+
+#ifndef BASE_MEMORY_ALIGNED_MEMORY_H_
+#define BASE_MEMORY_ALIGNED_MEMORY_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+
+#if defined(COMPILER_MSVC)
+#include <malloc.h>
+#else
+#include <stdlib.h>
+#endif
+
+namespace base {
+
+// AlignedMemory is specialized for all supported alignments.
+// Make sure we get a compiler error if someone uses an unsupported alignment.
+template <size_t Size, size_t ByteAlignment>
+struct AlignedMemory {};
+
+#define BASE_DECL_ALIGNED_MEMORY(byte_alignment) \
+ template <size_t Size> \
+ class AlignedMemory<Size, byte_alignment> { \
+ public: \
+ ALIGNAS(byte_alignment) uint8_t data_[Size]; \
+ void* void_data() { return static_cast<void*>(data_); } \
+ const void* void_data() const { return static_cast<const void*>(data_); } \
+ template <typename Type> \
+ Type* data_as() { \
+ return static_cast<Type*>(void_data()); \
+ } \
+ template <typename Type> \
+ const Type* data_as() const { \
+ return static_cast<const Type*>(void_data()); \
+ } \
+ \
+ private: \
+ void* operator new(size_t); \
+ void operator delete(void*); \
+ }
+
+// Specialization for all alignments is required because MSVC (as of VS 2008)
+// does not understand ALIGNAS(ALIGNOF(Type)) or ALIGNAS(template_param).
+// Greater than 4096 alignment is not supported by some compilers, so 4096 is
+// the maximum specified here.
+BASE_DECL_ALIGNED_MEMORY(1);
+BASE_DECL_ALIGNED_MEMORY(2);
+BASE_DECL_ALIGNED_MEMORY(4);
+BASE_DECL_ALIGNED_MEMORY(8);
+BASE_DECL_ALIGNED_MEMORY(16);
+BASE_DECL_ALIGNED_MEMORY(32);
+BASE_DECL_ALIGNED_MEMORY(64);
+BASE_DECL_ALIGNED_MEMORY(128);
+BASE_DECL_ALIGNED_MEMORY(256);
+BASE_DECL_ALIGNED_MEMORY(512);
+BASE_DECL_ALIGNED_MEMORY(1024);
+BASE_DECL_ALIGNED_MEMORY(2048);
+BASE_DECL_ALIGNED_MEMORY(4096);
+
+#undef BASE_DECL_ALIGNED_MEMORY
+
+BASE_EXPORT void* AlignedAlloc(size_t size, size_t alignment);
+
+inline void AlignedFree(void* ptr) {
+#if defined(COMPILER_MSVC)
+ _aligned_free(ptr);
+#else
+ free(ptr);
+#endif
+}
+
+// Deleter for use with scoped_ptr. E.g., use as
+// scoped_ptr<Foo, base::AlignedFreeDeleter> foo;
+struct AlignedFreeDeleter {
+ inline void operator()(void* ptr) const {
+ AlignedFree(ptr);
+ }
+};
+
+} // namespace base
+
+#endif // BASE_MEMORY_ALIGNED_MEMORY_H_
diff --git a/security/sandbox/chromium/base/memory/raw_scoped_refptr_mismatch_checker.h b/security/sandbox/chromium/base/memory/raw_scoped_refptr_mismatch_checker.h
new file mode 100644
index 000000000..09f982b12
--- /dev/null
+++ b/security/sandbox/chromium/base/memory/raw_scoped_refptr_mismatch_checker.h
@@ -0,0 +1,64 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_RAW_SCOPED_REFPTR_MISMATCH_CHECKER_H_
+#define BASE_MEMORY_RAW_SCOPED_REFPTR_MISMATCH_CHECKER_H_
+
+#include "base/memory/ref_counted.h"
+#include "base/template_util.h"
+#include "base/tuple.h"
+#include "build/build_config.h"
+
+// It is dangerous to post a task with a T* argument where T is a subtype of
+// RefCounted(Base|ThreadSafeBase), since by the time the parameter is used, the
+// object may already have been deleted since it was not held with a
+// scoped_refptr. Example: http://crbug.com/27191
+// The following set of traits are designed to generate a compile error
+// whenever this antipattern is attempted.
+
+namespace base {
+
+// This is a base internal implementation file used by task.h and callback.h.
+// Not for public consumption, so we wrap it in namespace internal.
+namespace internal {
+
+template <typename T>
+struct NeedsScopedRefptrButGetsRawPtr {
+#if defined(OS_WIN)
+ enum {
+ value = base::false_type::value
+ };
+#else
+ enum {
+ // Human readable translation: you needed to be a scoped_refptr if you are a
+ // raw pointer type and are convertible to a RefCounted(Base|ThreadSafeBase)
+ // type.
+ value = (is_pointer<T>::value &&
+ (is_convertible<T, subtle::RefCountedBase*>::value ||
+ is_convertible<T, subtle::RefCountedThreadSafeBase*>::value))
+ };
+#endif
+};
+
+template <typename Params>
+struct ParamsUseScopedRefptrCorrectly {
+ enum { value = 0 };
+};
+
+template <>
+struct ParamsUseScopedRefptrCorrectly<Tuple<>> {
+ enum { value = 1 };
+};
+
+template <typename Head, typename... Tail>
+struct ParamsUseScopedRefptrCorrectly<Tuple<Head, Tail...>> {
+ enum { value = !NeedsScopedRefptrButGetsRawPtr<Head>::value &&
+ ParamsUseScopedRefptrCorrectly<Tuple<Tail...>>::value };
+};
+
+} // namespace internal
+
+} // namespace base
+
+#endif // BASE_MEMORY_RAW_SCOPED_REFPTR_MISMATCH_CHECKER_H_
diff --git a/security/sandbox/chromium/base/memory/ref_counted.cc b/security/sandbox/chromium/base/memory/ref_counted.cc
new file mode 100644
index 000000000..f5924d0fe
--- /dev/null
+++ b/security/sandbox/chromium/base/memory/ref_counted.cc
@@ -0,0 +1,53 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/ref_counted.h"
+#include "base/threading/thread_collision_warner.h"
+
+namespace base {
+
+namespace subtle {
+
+bool RefCountedThreadSafeBase::HasOneRef() const {
+ return AtomicRefCountIsOne(
+ &const_cast<RefCountedThreadSafeBase*>(this)->ref_count_);
+}
+
+RefCountedThreadSafeBase::RefCountedThreadSafeBase() : ref_count_(0) {
+#ifndef NDEBUG
+ in_dtor_ = false;
+#endif
+}
+
+RefCountedThreadSafeBase::~RefCountedThreadSafeBase() {
+#ifndef NDEBUG
+ DCHECK(in_dtor_) << "RefCountedThreadSafe object deleted without "
+ "calling Release()";
+#endif
+}
+
+void RefCountedThreadSafeBase::AddRef() const {
+#ifndef NDEBUG
+ DCHECK(!in_dtor_);
+#endif
+ AtomicRefCountInc(&ref_count_);
+}
+
+bool RefCountedThreadSafeBase::Release() const {
+#ifndef NDEBUG
+ DCHECK(!in_dtor_);
+ DCHECK(!AtomicRefCountIsZero(&ref_count_));
+#endif
+ if (!AtomicRefCountDec(&ref_count_)) {
+#ifndef NDEBUG
+ in_dtor_ = true;
+#endif
+ return true;
+ }
+ return false;
+}
+
+} // namespace subtle
+
+} // namespace base
diff --git a/security/sandbox/chromium/base/memory/ref_counted.h b/security/sandbox/chromium/base/memory/ref_counted.h
new file mode 100644
index 000000000..a1c126969
--- /dev/null
+++ b/security/sandbox/chromium/base/memory/ref_counted.h
@@ -0,0 +1,446 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_REF_COUNTED_H_
+#define BASE_MEMORY_REF_COUNTED_H_
+
+#include <cassert>
+#include <iosfwd>
+
+#include "base/atomic_ref_count.h"
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#ifndef NDEBUG
+#include "base/logging.h"
+#endif
+#include "base/threading/thread_collision_warner.h"
+#include "build/build_config.h"
+
+namespace base {
+
+namespace subtle {
+
+class BASE_EXPORT RefCountedBase {
+ public:
+ bool HasOneRef() const { return ref_count_ == 1; }
+
+ protected:
+ RefCountedBase()
+ : ref_count_(0)
+ #ifndef NDEBUG
+ , in_dtor_(false)
+ #endif
+ {
+ }
+
+ ~RefCountedBase() {
+ #ifndef NDEBUG
+ DCHECK(in_dtor_) << "RefCounted object deleted without calling Release()";
+ #endif
+ }
+
+
+ void AddRef() const {
+ // TODO(maruel): Add back once it doesn't assert 500 times/sec.
+ // Current thread books the critical section "AddRelease"
+ // without release it.
+ // DFAKE_SCOPED_LOCK_THREAD_LOCKED(add_release_);
+ #ifndef NDEBUG
+ DCHECK(!in_dtor_);
+ #endif
+ ++ref_count_;
+ }
+
+ // Returns true if the object should self-delete.
+ bool Release() const {
+ // TODO(maruel): Add back once it doesn't assert 500 times/sec.
+ // Current thread books the critical section "AddRelease"
+ // without release it.
+ // DFAKE_SCOPED_LOCK_THREAD_LOCKED(add_release_);
+ #ifndef NDEBUG
+ DCHECK(!in_dtor_);
+ #endif
+ if (--ref_count_ == 0) {
+ #ifndef NDEBUG
+ in_dtor_ = true;
+ #endif
+ return true;
+ }
+ return false;
+ }
+
+ private:
+ mutable int ref_count_;
+#ifndef NDEBUG
+ mutable bool in_dtor_;
+#endif
+
+ DFAKE_MUTEX(add_release_);
+
+ DISALLOW_COPY_AND_ASSIGN(RefCountedBase);
+};
+
+class BASE_EXPORT RefCountedThreadSafeBase {
+ public:
+ bool HasOneRef() const;
+
+ protected:
+ RefCountedThreadSafeBase();
+ ~RefCountedThreadSafeBase();
+
+ void AddRef() const;
+
+ // Returns true if the object should self-delete.
+ bool Release() const;
+
+ private:
+ mutable AtomicRefCount ref_count_;
+#ifndef NDEBUG
+ mutable bool in_dtor_;
+#endif
+
+ DISALLOW_COPY_AND_ASSIGN(RefCountedThreadSafeBase);
+};
+
+} // namespace subtle
+
+//
+// A base class for reference counted classes. Otherwise, known as a cheap
+// knock-off of WebKit's RefCounted<T> class. To use this guy just extend your
+// class from it like so:
+//
+// class MyFoo : public base::RefCounted<MyFoo> {
+// ...
+// private:
+// friend class base::RefCounted<MyFoo>;
+// ~MyFoo();
+// };
+//
+// You should always make your destructor non-public, to avoid any code deleting
+// the object accidently while there are references to it.
+template <class T>
+class RefCounted : public subtle::RefCountedBase {
+ public:
+ RefCounted() {}
+
+ void AddRef() const {
+ subtle::RefCountedBase::AddRef();
+ }
+
+ void Release() const {
+ if (subtle::RefCountedBase::Release()) {
+ delete static_cast<const T*>(this);
+ }
+ }
+
+ protected:
+ ~RefCounted() {}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(RefCounted<T>);
+};
+
+// Forward declaration.
+template <class T, typename Traits> class RefCountedThreadSafe;
+
+// Default traits for RefCountedThreadSafe<T>. Deletes the object when its ref
+// count reaches 0. Overload to delete it on a different thread etc.
+template<typename T>
+struct DefaultRefCountedThreadSafeTraits {
+ static void Destruct(const T* x) {
+ // Delete through RefCountedThreadSafe to make child classes only need to be
+ // friend with RefCountedThreadSafe instead of this struct, which is an
+ // implementation detail.
+ RefCountedThreadSafe<T,
+ DefaultRefCountedThreadSafeTraits>::DeleteInternal(x);
+ }
+};
+
+//
+// A thread-safe variant of RefCounted<T>
+//
+// class MyFoo : public base::RefCountedThreadSafe<MyFoo> {
+// ...
+// };
+//
+// If you're using the default trait, then you should add compile time
+// asserts that no one else is deleting your object. i.e.
+// private:
+// friend class base::RefCountedThreadSafe<MyFoo>;
+// ~MyFoo();
+template <class T, typename Traits = DefaultRefCountedThreadSafeTraits<T> >
+class RefCountedThreadSafe : public subtle::RefCountedThreadSafeBase {
+ public:
+ RefCountedThreadSafe() {}
+
+ void AddRef() const {
+ subtle::RefCountedThreadSafeBase::AddRef();
+ }
+
+ void Release() const {
+ if (subtle::RefCountedThreadSafeBase::Release()) {
+ Traits::Destruct(static_cast<const T*>(this));
+ }
+ }
+
+ protected:
+ ~RefCountedThreadSafe() {}
+
+ private:
+ friend struct DefaultRefCountedThreadSafeTraits<T>;
+ static void DeleteInternal(const T* x) { delete x; }
+
+ DISALLOW_COPY_AND_ASSIGN(RefCountedThreadSafe);
+};
+
+//
+// A thread-safe wrapper for some piece of data so we can place other
+// things in scoped_refptrs<>.
+//
+template<typename T>
+class RefCountedData
+ : public base::RefCountedThreadSafe< base::RefCountedData<T> > {
+ public:
+ RefCountedData() : data() {}
+ RefCountedData(const T& in_value) : data(in_value) {}
+
+ T data;
+
+ private:
+ friend class base::RefCountedThreadSafe<base::RefCountedData<T> >;
+ ~RefCountedData() {}
+};
+
+} // namespace base
+
+//
+// A smart pointer class for reference counted objects. Use this class instead
+// of calling AddRef and Release manually on a reference counted object to
+// avoid common memory leaks caused by forgetting to Release an object
+// reference. Sample usage:
+//
+// class MyFoo : public RefCounted<MyFoo> {
+// ...
+// };
+//
+// void some_function() {
+// scoped_refptr<MyFoo> foo = new MyFoo();
+// foo->Method(param);
+// // |foo| is released when this function returns
+// }
+//
+// void some_other_function() {
+// scoped_refptr<MyFoo> foo = new MyFoo();
+// ...
+// foo = NULL; // explicitly releases |foo|
+// ...
+// if (foo)
+// foo->Method(param);
+// }
+//
+// The above examples show how scoped_refptr<T> acts like a pointer to T.
+// Given two scoped_refptr<T> classes, it is also possible to exchange
+// references between the two objects, like so:
+//
+// {
+// scoped_refptr<MyFoo> a = new MyFoo();
+// scoped_refptr<MyFoo> b;
+//
+// b.swap(a);
+// // now, |b| references the MyFoo object, and |a| references NULL.
+// }
+//
+// To make both |a| and |b| in the above example reference the same MyFoo
+// object, simply use the assignment operator:
+//
+// {
+// scoped_refptr<MyFoo> a = new MyFoo();
+// scoped_refptr<MyFoo> b;
+//
+// b = a;
+// // now, |a| and |b| each own a reference to the same MyFoo object.
+// }
+//
+template <class T>
+class scoped_refptr {
+ public:
+ typedef T element_type;
+
+ scoped_refptr() : ptr_(NULL) {
+ }
+
+ scoped_refptr(T* p) : ptr_(p) {
+ if (ptr_)
+ AddRef(ptr_);
+ }
+
+ // Copy constructor.
+ scoped_refptr(const scoped_refptr<T>& r) : ptr_(r.ptr_) {
+ if (ptr_)
+ AddRef(ptr_);
+ }
+
+ // Copy conversion constructor.
+ template <typename U>
+ scoped_refptr(const scoped_refptr<U>& r) : ptr_(r.get()) {
+ if (ptr_)
+ AddRef(ptr_);
+ }
+
+ // Move constructor. This is required in addition to the conversion
+ // constructor below in order for clang to warn about pessimizing moves.
+ scoped_refptr(scoped_refptr&& r) : ptr_(r.get()) { r.ptr_ = nullptr; }
+
+ // Move conversion constructor.
+ template <typename U>
+ scoped_refptr(scoped_refptr<U>&& r) : ptr_(r.get()) {
+ r.ptr_ = nullptr;
+ }
+
+ ~scoped_refptr() {
+ if (ptr_)
+ Release(ptr_);
+ }
+
+ T* get() const { return ptr_; }
+
+ T& operator*() const {
+ assert(ptr_ != NULL);
+ return *ptr_;
+ }
+
+ T* operator->() const {
+ assert(ptr_ != NULL);
+ return ptr_;
+ }
+
+ scoped_refptr<T>& operator=(T* p) {
+ // AddRef first so that self assignment should work
+ if (p)
+ AddRef(p);
+ T* old_ptr = ptr_;
+ ptr_ = p;
+ if (old_ptr)
+ Release(old_ptr);
+ return *this;
+ }
+
+ scoped_refptr<T>& operator=(const scoped_refptr<T>& r) {
+ return *this = r.ptr_;
+ }
+
+ template <typename U>
+ scoped_refptr<T>& operator=(const scoped_refptr<U>& r) {
+ return *this = r.get();
+ }
+
+ scoped_refptr<T>& operator=(scoped_refptr<T>&& r) {
+ scoped_refptr<T>(std::move(r)).swap(*this);
+ return *this;
+ }
+
+ template <typename U>
+ scoped_refptr<T>& operator=(scoped_refptr<U>&& r) {
+ scoped_refptr<T>(std::move(r)).swap(*this);
+ return *this;
+ }
+
+ void swap(T** pp) {
+ T* p = ptr_;
+ ptr_ = *pp;
+ *pp = p;
+ }
+
+ void swap(scoped_refptr<T>& r) {
+ swap(&r.ptr_);
+ }
+
+ private:
+ template <typename U> friend class scoped_refptr;
+
+ // Allow scoped_refptr<T> to be used in boolean expressions, but not
+ // implicitly convertible to a real bool (which is dangerous).
+ //
+ // Note that this trick is only safe when the == and != operators
+ // are declared explicitly, as otherwise "refptr1 == refptr2"
+ // will compile but do the wrong thing (i.e., convert to Testable
+ // and then do the comparison).
+ typedef T* scoped_refptr::*Testable;
+
+ public:
+ operator Testable() const { return ptr_ ? &scoped_refptr::ptr_ : nullptr; }
+
+ template <typename U>
+ bool operator==(const scoped_refptr<U>& rhs) const {
+ return ptr_ == rhs.get();
+ }
+
+ template <typename U>
+ bool operator!=(const scoped_refptr<U>& rhs) const {
+ return !operator==(rhs);
+ }
+
+ template <typename U>
+ bool operator<(const scoped_refptr<U>& rhs) const {
+ return ptr_ < rhs.get();
+ }
+
+ protected:
+ T* ptr_;
+
+ private:
+ // Non-inline helpers to allow:
+ // class Opaque;
+ // extern template class scoped_refptr<Opaque>;
+ // Otherwise the compiler will complain that Opaque is an incomplete type.
+ static void AddRef(T* ptr);
+ static void Release(T* ptr);
+};
+
+template <typename T>
+void scoped_refptr<T>::AddRef(T* ptr) {
+ ptr->AddRef();
+}
+
+template <typename T>
+void scoped_refptr<T>::Release(T* ptr) {
+ ptr->Release();
+}
+
+// Handy utility for creating a scoped_refptr<T> out of a T* explicitly without
+// having to retype all the template arguments
+template <typename T>
+scoped_refptr<T> make_scoped_refptr(T* t) {
+ return scoped_refptr<T>(t);
+}
+
+// Temporary operator overloads to facilitate the transition. See
+// https://crbug.com/110610.
+template <typename T, typename U>
+bool operator==(const scoped_refptr<T>& lhs, const U* rhs) {
+ return lhs.get() == rhs;
+}
+
+template <typename T, typename U>
+bool operator==(const T* lhs, const scoped_refptr<U>& rhs) {
+ return lhs == rhs.get();
+}
+
+template <typename T, typename U>
+bool operator!=(const scoped_refptr<T>& lhs, const U* rhs) {
+ return !operator==(lhs, rhs);
+}
+
+template <typename T, typename U>
+bool operator!=(const T* lhs, const scoped_refptr<U>& rhs) {
+ return !operator==(lhs, rhs);
+}
+
+template <typename T>
+std::ostream& operator<<(std::ostream& out, const scoped_refptr<T>& p) {
+ return out << p.get();
+}
+
+#endif // BASE_MEMORY_REF_COUNTED_H_
diff --git a/security/sandbox/chromium/base/memory/scoped_ptr.h b/security/sandbox/chromium/base/memory/scoped_ptr.h
new file mode 100644
index 000000000..282a01486
--- /dev/null
+++ b/security/sandbox/chromium/base/memory/scoped_ptr.h
@@ -0,0 +1,607 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Scopers help you manage ownership of a pointer, helping you easily manage a
+// pointer within a scope, and automatically destroying the pointer at the end
+// of a scope. There are two main classes you will use, which correspond to the
+// operators new/delete and new[]/delete[].
+//
+// Example usage (scoped_ptr<T>):
+// {
+// scoped_ptr<Foo> foo(new Foo("wee"));
+// } // foo goes out of scope, releasing the pointer with it.
+//
+// {
+// scoped_ptr<Foo> foo; // No pointer managed.
+// foo.reset(new Foo("wee")); // Now a pointer is managed.
+// foo.reset(new Foo("wee2")); // Foo("wee") was destroyed.
+// foo.reset(new Foo("wee3")); // Foo("wee2") was destroyed.
+// foo->Method(); // Foo::Method() called.
+// foo.get()->Method(); // Foo::Method() called.
+// SomeFunc(foo.release()); // SomeFunc takes ownership, foo no longer
+// // manages a pointer.
+// foo.reset(new Foo("wee4")); // foo manages a pointer again.
+// foo.reset(); // Foo("wee4") destroyed, foo no longer
+// // manages a pointer.
+// } // foo wasn't managing a pointer, so nothing was destroyed.
+//
+// Example usage (scoped_ptr<T[]>):
+// {
+// scoped_ptr<Foo[]> foo(new Foo[100]);
+// foo.get()->Method(); // Foo::Method on the 0th element.
+// foo[10].Method(); // Foo::Method on the 10th element.
+// }
+//
+// These scopers also implement part of the functionality of C++11 unique_ptr
+// in that they are "movable but not copyable." You can use the scopers in
+// the parameter and return types of functions to signify ownership transfer
+// in to and out of a function. When calling a function that has a scoper
+// as the argument type, it must be called with an rvalue of a scoper, which
+// can be created by using std::move(), or the result of another function that
+// generates a temporary; passing by copy will NOT work. Here is an example
+// using scoped_ptr:
+//
+// void TakesOwnership(scoped_ptr<Foo> arg) {
+// // Do something with arg.
+// }
+// scoped_ptr<Foo> CreateFoo() {
+// // No need for calling std::move() for returning a move-only value, or
+// // when you already have an rvalue as we do here.
+// return scoped_ptr<Foo>(new Foo("new"));
+// }
+// scoped_ptr<Foo> PassThru(scoped_ptr<Foo> arg) {
+// return arg;
+// }
+//
+// {
+// scoped_ptr<Foo> ptr(new Foo("yay")); // ptr manages Foo("yay").
+// TakesOwnership(std::move(ptr)); // ptr no longer owns Foo("yay").
+// scoped_ptr<Foo> ptr2 = CreateFoo(); // ptr2 owns the return Foo.
+// scoped_ptr<Foo> ptr3 = // ptr3 now owns what was in ptr2.
+// PassThru(std::move(ptr2)); // ptr2 is correspondingly nullptr.
+// }
+//
+// Notice that if you do not call std::move() when returning from PassThru(), or
+// when invoking TakesOwnership(), the code will not compile because scopers
+// are not copyable; they only implement move semantics which require calling
+// the std::move() function to signify a destructive transfer of state.
+// CreateFoo() is different though because we are constructing a temporary on
+// the return line and thus can avoid needing to call std::move().
+//
+// The conversion move-constructor properly handles upcast in initialization,
+// i.e. you can use a scoped_ptr<Child> to initialize a scoped_ptr<Parent>:
+//
+// scoped_ptr<Foo> foo(new Foo());
+// scoped_ptr<FooParent> parent(std::move(foo));
+
+#ifndef BASE_MEMORY_SCOPED_PTR_H_
+#define BASE_MEMORY_SCOPED_PTR_H_
+
+// This is an implementation designed to match the anticipated future TR2
+// implementation of the scoped_ptr class.
+
+#include <assert.h>
+#include <stddef.h>
+#include <stdlib.h>
+
+#include <iosfwd>
+#include <memory>
+#include <type_traits>
+#include <utility>
+
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#include "base/move.h"
+#include "base/template_util.h"
+
+namespace base {
+
+namespace subtle {
+class RefCountedBase;
+class RefCountedThreadSafeBase;
+} // namespace subtle
+
+// Function object which invokes 'free' on its parameter, which must be
+// a pointer. Can be used to store malloc-allocated pointers in scoped_ptr:
+//
+// scoped_ptr<int, base::FreeDeleter> foo_ptr(
+// static_cast<int*>(malloc(sizeof(int))));
+struct FreeDeleter {
+ inline void operator()(void* ptr) const {
+ free(ptr);
+ }
+};
+
+namespace internal {
+
+template <typename T> struct IsNotRefCounted {
+ enum {
+ value = !base::is_convertible<T*, base::subtle::RefCountedBase*>::value &&
+ !base::is_convertible<T*, base::subtle::RefCountedThreadSafeBase*>::
+ value
+ };
+};
+
+// Minimal implementation of the core logic of scoped_ptr, suitable for
+// reuse in both scoped_ptr and its specializations.
+template <class T, class D>
+class scoped_ptr_impl {
+ public:
+ explicit scoped_ptr_impl(T* p) : data_(p) {}
+
+ // Initializer for deleters that have data parameters.
+ scoped_ptr_impl(T* p, const D& d) : data_(p, d) {}
+
+ // Templated constructor that destructively takes the value from another
+ // scoped_ptr_impl.
+ template <typename U, typename V>
+ scoped_ptr_impl(scoped_ptr_impl<U, V>* other)
+ : data_(other->release(), other->get_deleter()) {
+ // We do not support move-only deleters. We could modify our move
+ // emulation to have base::subtle::move() and base::subtle::forward()
+ // functions that are imperfect emulations of their C++11 equivalents,
+ // but until there's a requirement, just assume deleters are copyable.
+ }
+
+ template <typename U, typename V>
+ void TakeState(scoped_ptr_impl<U, V>* other) {
+ // See comment in templated constructor above regarding lack of support
+ // for move-only deleters.
+ reset(other->release());
+ get_deleter() = other->get_deleter();
+ }
+
+ ~scoped_ptr_impl() {
+ // Match libc++, which calls reset() in its destructor.
+ // Use nullptr as the new value for three reasons:
+ // 1. libc++ does it.
+ // 2. Avoids infinitely recursing into destructors if two classes are owned
+ // in a reference cycle (see ScopedPtrTest.ReferenceCycle).
+ // 3. If |this| is accessed in the future, in a use-after-free bug, attempts
+ // to dereference |this|'s pointer should cause either a failure or a
+ // segfault closer to the problem. If |this| wasn't reset to nullptr,
+ // the access would cause the deleted memory to be read or written
+ // leading to other more subtle issues.
+ reset(nullptr);
+ }
+
+ void reset(T* p) {
+ // Match C++11's definition of unique_ptr::reset(), which requires changing
+ // the pointer before invoking the deleter on the old pointer. This prevents
+ // |this| from being accessed after the deleter is run, which may destroy
+ // |this|.
+ T* old = data_.ptr;
+ data_.ptr = p;
+ if (old != nullptr)
+ static_cast<D&>(data_)(old);
+ }
+
+ T* get() const { return data_.ptr; }
+
+ D& get_deleter() { return data_; }
+ const D& get_deleter() const { return data_; }
+
+ void swap(scoped_ptr_impl& p2) {
+ // Standard swap idiom: 'using std::swap' ensures that std::swap is
+ // present in the overload set, but we call swap unqualified so that
+ // any more-specific overloads can be used, if available.
+ using std::swap;
+ swap(static_cast<D&>(data_), static_cast<D&>(p2.data_));
+ swap(data_.ptr, p2.data_.ptr);
+ }
+
+ T* release() {
+ T* old_ptr = data_.ptr;
+ data_.ptr = nullptr;
+ return old_ptr;
+ }
+
+ private:
+ // Needed to allow type-converting constructor.
+ template <typename U, typename V> friend class scoped_ptr_impl;
+
+ // Use the empty base class optimization to allow us to have a D
+ // member, while avoiding any space overhead for it when D is an
+ // empty class. See e.g. http://www.cantrip.org/emptyopt.html for a good
+ // discussion of this technique.
+ struct Data : public D {
+ explicit Data(T* ptr_in) : ptr(ptr_in) {}
+ Data(T* ptr_in, const D& other) : D(other), ptr(ptr_in) {}
+ T* ptr;
+ };
+
+ Data data_;
+
+ DISALLOW_COPY_AND_ASSIGN(scoped_ptr_impl);
+};
+
+} // namespace internal
+
+} // namespace base
+
+// A scoped_ptr<T> is like a T*, except that the destructor of scoped_ptr<T>
+// automatically deletes the pointer it holds (if any).
+// That is, scoped_ptr<T> owns the T object that it points to.
+// Like a T*, a scoped_ptr<T> may hold either nullptr or a pointer to a T
+// object. Also like T*, scoped_ptr<T> is thread-compatible, and once you
+// dereference it, you get the thread safety guarantees of T.
+//
+// The size of scoped_ptr is small. On most compilers, when using the
+// std::default_delete, sizeof(scoped_ptr<T>) == sizeof(T*). Custom deleters
+// will increase the size proportional to whatever state they need to have. See
+// comments inside scoped_ptr_impl<> for details.
+//
+// Current implementation targets having a strict subset of C++11's
+// unique_ptr<> features. Known deficiencies include not supporting move-only
+// deleteres, function pointers as deleters, and deleters with reference
+// types.
+template <class T, class D = std::default_delete<T>>
+class scoped_ptr {
+ DISALLOW_COPY_AND_ASSIGN_WITH_MOVE_FOR_BIND(scoped_ptr)
+
+ static_assert(!std::is_array<T>::value,
+ "scoped_ptr doesn't support array with size");
+ static_assert(base::internal::IsNotRefCounted<T>::value,
+ "T is a refcounted type and needs a scoped_refptr");
+
+ public:
+ // The element and deleter types.
+ using element_type = T;
+ using deleter_type = D;
+
+ // Constructor. Defaults to initializing with nullptr.
+ scoped_ptr() : impl_(nullptr) {}
+
+ // Constructor. Takes ownership of p.
+ explicit scoped_ptr(element_type* p) : impl_(p) {}
+
+ // Constructor. Allows initialization of a stateful deleter.
+ scoped_ptr(element_type* p, const D& d) : impl_(p, d) {}
+
+ // Constructor. Allows construction from a nullptr.
+ scoped_ptr(std::nullptr_t) : impl_(nullptr) {}
+
+ // Move constructor.
+ //
+ // IMPLEMENTATION NOTE: Clang requires a move constructor to be defined (and
+ // not just the conversion constructor) in order to warn on pessimizing moves.
+ // The requirements for the move constructor are specified in C++11
+ // 20.7.1.2.1.15-17, which has some subtleties around reference deleters. As
+ // we don't support reference (or move-only) deleters, the post conditions are
+ // trivially true: we always copy construct the deleter from other's deleter.
+ scoped_ptr(scoped_ptr&& other) : impl_(&other.impl_) {}
+
+ // Conversion constructor. Allows construction from a scoped_ptr rvalue for a
+ // convertible type and deleter.
+ //
+ // IMPLEMENTATION NOTE: C++ 20.7.1.2.1.19 requires this constructor to only
+ // participate in overload resolution if all the following are true:
+ // - U is implicitly convertible to T: this is important for 2 reasons:
+ // 1. So type traits don't incorrectly return true, e.g.
+ // std::is_convertible<scoped_ptr<Base>, scoped_ptr<Derived>>::value
+ // should be false.
+ // 2. To make sure code like this compiles:
+ // void F(scoped_ptr<int>);
+ // void F(scoped_ptr<Base>);
+ // // Ambiguous since both conversion constructors match.
+ // F(scoped_ptr<Derived>());
+ // - U is not an array type: to prevent conversions from scoped_ptr<T[]> to
+ // scoped_ptr<T>.
+ // - D is a reference type and E is the same type, or D is not a reference
+ // type and E is implicitly convertible to D: again, we don't support
+ // reference deleters, so we only worry about the latter requirement.
+ template <typename U,
+ typename E,
+ typename std::enable_if<!std::is_array<U>::value &&
+ std::is_convertible<U*, T*>::value &&
+ std::is_convertible<E, D>::value>::type* =
+ nullptr>
+ scoped_ptr(scoped_ptr<U, E>&& other)
+ : impl_(&other.impl_) {}
+
+ // operator=.
+ //
+ // IMPLEMENTATION NOTE: Unlike the move constructor, Clang does not appear to
+ // require a move assignment operator to trigger the pessimizing move warning:
+ // in this case, the warning triggers when moving a temporary. For consistency
+ // with the move constructor, we define it anyway. C++11 20.7.1.2.3.1-3
+ // defines several requirements around this: like the move constructor, the
+ // requirements are simplified by the fact that we don't support move-only or
+ // reference deleters.
+ scoped_ptr& operator=(scoped_ptr&& rhs) {
+ impl_.TakeState(&rhs.impl_);
+ return *this;
+ }
+
+ // operator=. Allows assignment from a scoped_ptr rvalue for a convertible
+ // type and deleter.
+ //
+ // IMPLEMENTATION NOTE: C++11 unique_ptr<> keeps this operator= distinct from
+ // the normal move assignment operator. C++11 20.7.1.2.3.4-7 contains the
+ // requirement for this operator, but like the conversion constructor, the
+ // requirements are greatly simplified by not supporting move-only or
+ // reference deleters.
+ template <typename U,
+ typename E,
+ typename std::enable_if<!std::is_array<U>::value &&
+ std::is_convertible<U*, T*>::value &&
+ // Note that this really should be
+ // std::is_assignable, but <type_traits>
+ // appears to be missing this on some
+ // platforms. This is close enough (though
+ // it's not the same).
+ std::is_convertible<D, E>::value>::type* =
+ nullptr>
+ scoped_ptr& operator=(scoped_ptr<U, E>&& rhs) {
+ impl_.TakeState(&rhs.impl_);
+ return *this;
+ }
+
+ // operator=. Allows assignment from a nullptr. Deletes the currently owned
+ // object, if any.
+ scoped_ptr& operator=(std::nullptr_t) {
+ reset();
+ return *this;
+ }
+
+ // Reset. Deletes the currently owned object, if any.
+ // Then takes ownership of a new object, if given.
+ void reset(element_type* p = nullptr) { impl_.reset(p); }
+
+ // Accessors to get the owned object.
+ // operator* and operator-> will assert() if there is no current object.
+ element_type& operator*() const {
+ assert(impl_.get() != nullptr);
+ return *impl_.get();
+ }
+ element_type* operator->() const {
+ assert(impl_.get() != nullptr);
+ return impl_.get();
+ }
+ element_type* get() const { return impl_.get(); }
+
+ // Access to the deleter.
+ deleter_type& get_deleter() { return impl_.get_deleter(); }
+ const deleter_type& get_deleter() const { return impl_.get_deleter(); }
+
+ // Allow scoped_ptr<element_type> to be used in boolean expressions, but not
+ // implicitly convertible to a real bool (which is dangerous).
+ //
+ // Note that this trick is only safe when the == and != operators
+ // are declared explicitly, as otherwise "scoped_ptr1 ==
+ // scoped_ptr2" will compile but do the wrong thing (i.e., convert
+ // to Testable and then do the comparison).
+ private:
+ typedef base::internal::scoped_ptr_impl<element_type, deleter_type>
+ scoped_ptr::*Testable;
+
+ public:
+ operator Testable() const {
+ return impl_.get() ? &scoped_ptr::impl_ : nullptr;
+ }
+
+ // Swap two scoped pointers.
+ void swap(scoped_ptr& p2) {
+ impl_.swap(p2.impl_);
+ }
+
+ // Release a pointer.
+ // The return value is the current pointer held by this object. If this object
+ // holds a nullptr, the return value is nullptr. After this operation, this
+ // object will hold a nullptr, and will not own the object any more.
+ element_type* release() WARN_UNUSED_RESULT {
+ return impl_.release();
+ }
+
+ private:
+ // Needed to reach into |impl_| in the constructor.
+ template <typename U, typename V> friend class scoped_ptr;
+ base::internal::scoped_ptr_impl<element_type, deleter_type> impl_;
+
+ // Forbidden for API compatibility with std::unique_ptr.
+ explicit scoped_ptr(int disallow_construction_from_null);
+};
+
+template <class T, class D>
+class scoped_ptr<T[], D> {
+ DISALLOW_COPY_AND_ASSIGN_WITH_MOVE_FOR_BIND(scoped_ptr)
+
+ public:
+ // The element and deleter types.
+ using element_type = T;
+ using deleter_type = D;
+
+ // Constructor. Defaults to initializing with nullptr.
+ scoped_ptr() : impl_(nullptr) {}
+
+ // Constructor. Stores the given array. Note that the argument's type
+ // must exactly match T*. In particular:
+ // - it cannot be a pointer to a type derived from T, because it is
+ // inherently unsafe in the general case to access an array through a
+ // pointer whose dynamic type does not match its static type (eg., if
+ // T and the derived types had different sizes access would be
+ // incorrectly calculated). Deletion is also always undefined
+ // (C++98 [expr.delete]p3). If you're doing this, fix your code.
+ // - it cannot be const-qualified differently from T per unique_ptr spec
+ // (http://cplusplus.github.com/LWG/lwg-active.html#2118). Users wanting
+ // to work around this may use const_cast<const T*>().
+ explicit scoped_ptr(element_type* array) : impl_(array) {}
+
+ // Constructor. Allows construction from a nullptr.
+ scoped_ptr(std::nullptr_t) : impl_(nullptr) {}
+
+ // Constructor. Allows construction from a scoped_ptr rvalue.
+ scoped_ptr(scoped_ptr&& other) : impl_(&other.impl_) {}
+
+ // operator=. Allows assignment from a scoped_ptr rvalue.
+ scoped_ptr& operator=(scoped_ptr&& rhs) {
+ impl_.TakeState(&rhs.impl_);
+ return *this;
+ }
+
+ // operator=. Allows assignment from a nullptr. Deletes the currently owned
+ // array, if any.
+ scoped_ptr& operator=(std::nullptr_t) {
+ reset();
+ return *this;
+ }
+
+ // Reset. Deletes the currently owned array, if any.
+ // Then takes ownership of a new object, if given.
+ void reset(element_type* array = nullptr) { impl_.reset(array); }
+
+ // Accessors to get the owned array.
+ element_type& operator[](size_t i) const {
+ assert(impl_.get() != nullptr);
+ return impl_.get()[i];
+ }
+ element_type* get() const { return impl_.get(); }
+
+ // Access to the deleter.
+ deleter_type& get_deleter() { return impl_.get_deleter(); }
+ const deleter_type& get_deleter() const { return impl_.get_deleter(); }
+
+ // Allow scoped_ptr<element_type> to be used in boolean expressions, but not
+ // implicitly convertible to a real bool (which is dangerous).
+ private:
+ typedef base::internal::scoped_ptr_impl<element_type, deleter_type>
+ scoped_ptr::*Testable;
+
+ public:
+ operator Testable() const {
+ return impl_.get() ? &scoped_ptr::impl_ : nullptr;
+ }
+
+ // Swap two scoped pointers.
+ void swap(scoped_ptr& p2) {
+ impl_.swap(p2.impl_);
+ }
+
+ // Release a pointer.
+ // The return value is the current pointer held by this object. If this object
+ // holds a nullptr, the return value is nullptr. After this operation, this
+ // object will hold a nullptr, and will not own the object any more.
+ element_type* release() WARN_UNUSED_RESULT {
+ return impl_.release();
+ }
+
+ private:
+ // Force element_type to be a complete type.
+ enum { type_must_be_complete = sizeof(element_type) };
+
+ // Actually hold the data.
+ base::internal::scoped_ptr_impl<element_type, deleter_type> impl_;
+
+ // Disable initialization from any type other than element_type*, by
+ // providing a constructor that matches such an initialization, but is
+ // private and has no definition. This is disabled because it is not safe to
+ // call delete[] on an array whose static type does not match its dynamic
+ // type.
+ template <typename U> explicit scoped_ptr(U* array);
+ explicit scoped_ptr(int disallow_construction_from_null);
+
+ // Disable reset() from any type other than element_type*, for the same
+ // reasons as the constructor above.
+ template <typename U> void reset(U* array);
+ void reset(int disallow_reset_from_null);
+};
+
+// Free functions
+template <class T, class D>
+void swap(scoped_ptr<T, D>& p1, scoped_ptr<T, D>& p2) {
+ p1.swap(p2);
+}
+
+template <class T1, class D1, class T2, class D2>
+bool operator==(const scoped_ptr<T1, D1>& p1, const scoped_ptr<T2, D2>& p2) {
+ return p1.get() == p2.get();
+}
+template <class T, class D>
+bool operator==(const scoped_ptr<T, D>& p, std::nullptr_t) {
+ return p.get() == nullptr;
+}
+template <class T, class D>
+bool operator==(std::nullptr_t, const scoped_ptr<T, D>& p) {
+ return p.get() == nullptr;
+}
+
+template <class T1, class D1, class T2, class D2>
+bool operator!=(const scoped_ptr<T1, D1>& p1, const scoped_ptr<T2, D2>& p2) {
+ return !(p1 == p2);
+}
+template <class T, class D>
+bool operator!=(const scoped_ptr<T, D>& p, std::nullptr_t) {
+ return !(p == nullptr);
+}
+template <class T, class D>
+bool operator!=(std::nullptr_t, const scoped_ptr<T, D>& p) {
+ return !(p == nullptr);
+}
+
+template <class T1, class D1, class T2, class D2>
+bool operator<(const scoped_ptr<T1, D1>& p1, const scoped_ptr<T2, D2>& p2) {
+ return p1.get() < p2.get();
+}
+template <class T, class D>
+bool operator<(const scoped_ptr<T, D>& p, std::nullptr_t) {
+ return p.get() < nullptr;
+}
+template <class T, class D>
+bool operator<(std::nullptr_t, const scoped_ptr<T, D>& p) {
+ return nullptr < p.get();
+}
+
+template <class T1, class D1, class T2, class D2>
+bool operator>(const scoped_ptr<T1, D1>& p1, const scoped_ptr<T2, D2>& p2) {
+ return p2 < p1;
+}
+template <class T, class D>
+bool operator>(const scoped_ptr<T, D>& p, std::nullptr_t) {
+ return nullptr < p;
+}
+template <class T, class D>
+bool operator>(std::nullptr_t, const scoped_ptr<T, D>& p) {
+ return p < nullptr;
+}
+
+template <class T1, class D1, class T2, class D2>
+bool operator<=(const scoped_ptr<T1, D1>& p1, const scoped_ptr<T2, D2>& p2) {
+ return !(p1 > p2);
+}
+template <class T, class D>
+bool operator<=(const scoped_ptr<T, D>& p, std::nullptr_t) {
+ return !(p > nullptr);
+}
+template <class T, class D>
+bool operator<=(std::nullptr_t, const scoped_ptr<T, D>& p) {
+ return !(nullptr > p);
+}
+
+template <class T1, class D1, class T2, class D2>
+bool operator>=(const scoped_ptr<T1, D1>& p1, const scoped_ptr<T2, D2>& p2) {
+ return !(p1 < p2);
+}
+template <class T, class D>
+bool operator>=(const scoped_ptr<T, D>& p, std::nullptr_t) {
+ return !(p < nullptr);
+}
+template <class T, class D>
+bool operator>=(std::nullptr_t, const scoped_ptr<T, D>& p) {
+ return !(nullptr < p);
+}
+
+// A function to convert T* into scoped_ptr<T>
+// Doing e.g. make_scoped_ptr(new FooBarBaz<type>(arg)) is a shorter notation
+// for scoped_ptr<FooBarBaz<type> >(new FooBarBaz<type>(arg))
+template <typename T>
+scoped_ptr<T> make_scoped_ptr(T* ptr) {
+ return scoped_ptr<T>(ptr);
+}
+
+template <typename T>
+std::ostream& operator<<(std::ostream& out, const scoped_ptr<T>& p) {
+ return out << p.get();
+}
+
+#endif // BASE_MEMORY_SCOPED_PTR_H_
diff --git a/security/sandbox/chromium/base/memory/singleton.cc b/security/sandbox/chromium/base/memory/singleton.cc
new file mode 100644
index 000000000..f68ecaa8d
--- /dev/null
+++ b/security/sandbox/chromium/base/memory/singleton.cc
@@ -0,0 +1,34 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/singleton.h"
+#include "base/threading/platform_thread.h"
+
+namespace base {
+namespace internal {
+
+subtle::AtomicWord WaitForInstance(subtle::AtomicWord* instance) {
+ // Handle the race. Another thread beat us and either:
+ // - Has the object in BeingCreated state
+ // - Already has the object created...
+ // We know value != NULL. It could be kBeingCreatedMarker, or a valid ptr.
+ // Unless your constructor can be very time consuming, it is very unlikely
+ // to hit this race. When it does, we just spin and yield the thread until
+ // the object has been created.
+ subtle::AtomicWord value;
+ while (true) {
+ // The load has acquire memory ordering as the thread which reads the
+ // instance pointer must acquire visibility over the associated data.
+ // The pairing Release_Store operation is in Singleton::get().
+ value = subtle::Acquire_Load(instance);
+ if (value != kBeingCreatedMarker)
+ break;
+ PlatformThread::YieldCurrentThread();
+ }
+ return value;
+}
+
+} // namespace internal
+} // namespace base
+
diff --git a/security/sandbox/chromium/base/memory/singleton.h b/security/sandbox/chromium/base/memory/singleton.h
new file mode 100644
index 000000000..79e4441a8
--- /dev/null
+++ b/security/sandbox/chromium/base/memory/singleton.h
@@ -0,0 +1,284 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// PLEASE READ: Do you really need a singleton?
+//
+// Singletons make it hard to determine the lifetime of an object, which can
+// lead to buggy code and spurious crashes.
+//
+// Instead of adding another singleton into the mix, try to identify either:
+// a) An existing singleton that can manage your object's lifetime
+// b) Locations where you can deterministically create the object and pass
+// into other objects
+//
+// If you absolutely need a singleton, please keep them as trivial as possible
+// and ideally a leaf dependency. Singletons get problematic when they attempt
+// to do too much in their destructor or have circular dependencies.
+
+#ifndef BASE_MEMORY_SINGLETON_H_
+#define BASE_MEMORY_SINGLETON_H_
+
+#include "base/at_exit.h"
+#include "base/atomicops.h"
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/memory/aligned_memory.h"
+#include "base/threading/thread_restrictions.h"
+
+namespace base {
+namespace internal {
+
+// Our AtomicWord doubles as a spinlock, where a value of
+// kBeingCreatedMarker means the spinlock is being held for creation.
+static const subtle::AtomicWord kBeingCreatedMarker = 1;
+
+// We pull out some of the functionality into a non-templated function, so that
+// we can implement the more complicated pieces out of line in the .cc file.
+BASE_EXPORT subtle::AtomicWord WaitForInstance(subtle::AtomicWord* instance);
+
+class DeleteTraceLogForTesting;
+
+} // namespace internal
+
+
+// Default traits for Singleton<Type>. Calls operator new and operator delete on
+// the object. Registers automatic deletion at process exit.
+// Overload if you need arguments or another memory allocation function.
+template<typename Type>
+struct DefaultSingletonTraits {
+ // Allocates the object.
+ static Type* New() {
+ // The parenthesis is very important here; it forces POD type
+ // initialization.
+ return new Type();
+ }
+
+ // Destroys the object.
+ static void Delete(Type* x) {
+ delete x;
+ }
+
+ // Set to true to automatically register deletion of the object on process
+ // exit. See below for the required call that makes this happen.
+ static const bool kRegisterAtExit = true;
+
+#ifndef NDEBUG
+ // Set to false to disallow access on a non-joinable thread. This is
+ // different from kRegisterAtExit because StaticMemorySingletonTraits allows
+ // access on non-joinable threads, and gracefully handles this.
+ static const bool kAllowedToAccessOnNonjoinableThread = false;
+#endif
+};
+
+
+// Alternate traits for use with the Singleton<Type>. Identical to
+// DefaultSingletonTraits except that the Singleton will not be cleaned up
+// at exit.
+template<typename Type>
+struct LeakySingletonTraits : public DefaultSingletonTraits<Type> {
+ static const bool kRegisterAtExit = false;
+#ifndef NDEBUG
+ static const bool kAllowedToAccessOnNonjoinableThread = true;
+#endif
+};
+
+
+// Alternate traits for use with the Singleton<Type>. Allocates memory
+// for the singleton instance from a static buffer. The singleton will
+// be cleaned up at exit, but can't be revived after destruction unless
+// the Resurrect() method is called.
+//
+// This is useful for a certain category of things, notably logging and
+// tracing, where the singleton instance is of a type carefully constructed to
+// be safe to access post-destruction.
+// In logging and tracing you'll typically get stray calls at odd times, like
+// during static destruction, thread teardown and the like, and there's a
+// termination race on the heap-based singleton - e.g. if one thread calls
+// get(), but then another thread initiates AtExit processing, the first thread
+// may call into an object residing in unallocated memory. If the instance is
+// allocated from the data segment, then this is survivable.
+//
+// The destructor is to deallocate system resources, in this case to unregister
+// a callback the system will invoke when logging levels change. Note that
+// this is also used in e.g. Chrome Frame, where you have to allow for the
+// possibility of loading briefly into someone else's process space, and
+// so leaking is not an option, as that would sabotage the state of your host
+// process once you've unloaded.
+template <typename Type>
+struct StaticMemorySingletonTraits {
+ // WARNING: User has to deal with get() in the singleton class
+ // this is traits for returning NULL.
+ static Type* New() {
+ // Only constructs once and returns pointer; otherwise returns NULL.
+ if (subtle::NoBarrier_AtomicExchange(&dead_, 1))
+ return NULL;
+
+ return new(buffer_.void_data()) Type();
+ }
+
+ static void Delete(Type* p) {
+ if (p != NULL)
+ p->Type::~Type();
+ }
+
+ static const bool kRegisterAtExit = true;
+ static const bool kAllowedToAccessOnNonjoinableThread = true;
+
+ // Exposed for unittesting.
+ static void Resurrect() { subtle::NoBarrier_Store(&dead_, 0); }
+
+ private:
+ static AlignedMemory<sizeof(Type), ALIGNOF(Type)> buffer_;
+ // Signal the object was already deleted, so it is not revived.
+ static subtle::Atomic32 dead_;
+};
+
+template <typename Type>
+AlignedMemory<sizeof(Type), ALIGNOF(Type)>
+ StaticMemorySingletonTraits<Type>::buffer_;
+template <typename Type>
+subtle::Atomic32 StaticMemorySingletonTraits<Type>::dead_ = 0;
+
+// The Singleton<Type, Traits, DifferentiatingType> class manages a single
+// instance of Type which will be created on first use and will be destroyed at
+// normal process exit). The Trait::Delete function will not be called on
+// abnormal process exit.
+//
+// DifferentiatingType is used as a key to differentiate two different
+// singletons having the same memory allocation functions but serving a
+// different purpose. This is mainly used for Locks serving different purposes.
+//
+// Example usage:
+//
+// In your header:
+// template <typename T> struct DefaultSingletonTraits;
+// class FooClass {
+// public:
+// static FooClass* GetInstance(); <-- See comment below on this.
+// void Bar() { ... }
+// private:
+// FooClass() { ... }
+// friend struct DefaultSingletonTraits<FooClass>;
+//
+// DISALLOW_COPY_AND_ASSIGN(FooClass);
+// };
+//
+// In your source file:
+// #include "base/memory/singleton.h"
+// FooClass* FooClass::GetInstance() {
+// return Singleton<FooClass>::get();
+// }
+//
+// And to call methods on FooClass:
+// FooClass::GetInstance()->Bar();
+//
+// NOTE: The method accessing Singleton<T>::get() has to be named as GetInstance
+// and it is important that FooClass::GetInstance() is not inlined in the
+// header. This makes sure that when source files from multiple targets include
+// this header they don't end up with different copies of the inlined code
+// creating multiple copies of the singleton.
+//
+// Singleton<> has no non-static members and doesn't need to actually be
+// instantiated.
+//
+// This class is itself thread-safe. The underlying Type must of course be
+// thread-safe if you want to use it concurrently. Two parameters may be tuned
+// depending on the user's requirements.
+//
+// Glossary:
+// RAE = kRegisterAtExit
+//
+// On every platform, if Traits::RAE is true, the singleton will be destroyed at
+// process exit. More precisely it uses AtExitManager which requires an
+// object of this type to be instantiated. AtExitManager mimics the semantics
+// of atexit() such as LIFO order but under Windows is safer to call. For more
+// information see at_exit.h.
+//
+// If Traits::RAE is false, the singleton will not be freed at process exit,
+// thus the singleton will be leaked if it is ever accessed. Traits::RAE
+// shouldn't be false unless absolutely necessary. Remember that the heap where
+// the object is allocated may be destroyed by the CRT anyway.
+//
+// Caveats:
+// (a) Every call to get(), operator->() and operator*() incurs some overhead
+// (16ns on my P4/2.8GHz) to check whether the object has already been
+// initialized. You may wish to cache the result of get(); it will not
+// change.
+//
+// (b) Your factory function must never throw an exception. This class is not
+// exception-safe.
+//
+
+template <typename Type,
+ typename Traits = DefaultSingletonTraits<Type>,
+ typename DifferentiatingType = Type>
+class Singleton {
+ private:
+ // Classes using the Singleton<T> pattern should declare a GetInstance()
+ // method and call Singleton::get() from within that.
+ friend Type* Type::GetInstance();
+
+ // Allow TraceLog tests to test tracing after OnExit.
+ friend class internal::DeleteTraceLogForTesting;
+
+ // This class is safe to be constructed and copy-constructed since it has no
+ // member.
+
+ // Return a pointer to the one true instance of the class.
+ static Type* get() {
+#ifndef NDEBUG
+ // Avoid making TLS lookup on release builds.
+ if (!Traits::kAllowedToAccessOnNonjoinableThread)
+ ThreadRestrictions::AssertSingletonAllowed();
+#endif
+
+ // The load has acquire memory ordering as the thread which reads the
+ // instance_ pointer must acquire visibility over the singleton data.
+ subtle::AtomicWord value = subtle::Acquire_Load(&instance_);
+ if (value != 0 && value != internal::kBeingCreatedMarker) {
+ return reinterpret_cast<Type*>(value);
+ }
+
+ // Object isn't created yet, maybe we will get to create it, let's try...
+ if (subtle::Acquire_CompareAndSwap(&instance_, 0,
+ internal::kBeingCreatedMarker) == 0) {
+ // instance_ was NULL and is now kBeingCreatedMarker. Only one thread
+ // will ever get here. Threads might be spinning on us, and they will
+ // stop right after we do this store.
+ Type* newval = Traits::New();
+
+ // Releases the visibility over instance_ to the readers.
+ subtle::Release_Store(&instance_,
+ reinterpret_cast<subtle::AtomicWord>(newval));
+
+ if (newval != NULL && Traits::kRegisterAtExit)
+ AtExitManager::RegisterCallback(OnExit, NULL);
+
+ return newval;
+ }
+
+ // We hit a race. Wait for the other thread to complete it.
+ value = internal::WaitForInstance(&instance_);
+
+ return reinterpret_cast<Type*>(value);
+ }
+
+ // Adapter function for use with AtExit(). This should be called single
+ // threaded, so don't use atomic operations.
+ // Calling OnExit while singleton is in use by other threads is a mistake.
+ static void OnExit(void* /*unused*/) {
+ // AtExit should only ever be register after the singleton instance was
+ // created. We should only ever get here with a valid instance_ pointer.
+ Traits::Delete(reinterpret_cast<Type*>(subtle::NoBarrier_Load(&instance_)));
+ instance_ = 0;
+ }
+ static subtle::AtomicWord instance_;
+};
+
+template <typename Type, typename Traits, typename DifferentiatingType>
+subtle::AtomicWord Singleton<Type, Traits, DifferentiatingType>::instance_ = 0;
+
+} // namespace base
+
+#endif // BASE_MEMORY_SINGLETON_H_
diff --git a/security/sandbox/chromium/base/memory/weak_ptr.h b/security/sandbox/chromium/base/memory/weak_ptr.h
new file mode 100644
index 000000000..33d1e4736
--- /dev/null
+++ b/security/sandbox/chromium/base/memory/weak_ptr.h
@@ -0,0 +1,345 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Weak pointers are pointers to an object that do not affect its lifetime,
+// and which may be invalidated (i.e. reset to NULL) by the object, or its
+// owner, at any time, most commonly when the object is about to be deleted.
+
+// Weak pointers are useful when an object needs to be accessed safely by one
+// or more objects other than its owner, and those callers can cope with the
+// object vanishing and e.g. tasks posted to it being silently dropped.
+// Reference-counting such an object would complicate the ownership graph and
+// make it harder to reason about the object's lifetime.
+
+// EXAMPLE:
+//
+// class Controller {
+// public:
+// Controller() : weak_factory_(this) {}
+// void SpawnWorker() { Worker::StartNew(weak_factory_.GetWeakPtr()); }
+// void WorkComplete(const Result& result) { ... }
+// private:
+// // Member variables should appear before the WeakPtrFactory, to ensure
+// // that any WeakPtrs to Controller are invalidated before its members
+// // variable's destructors are executed, rendering them invalid.
+// WeakPtrFactory<Controller> weak_factory_;
+// };
+//
+// class Worker {
+// public:
+// static void StartNew(const WeakPtr<Controller>& controller) {
+// Worker* worker = new Worker(controller);
+// // Kick off asynchronous processing...
+// }
+// private:
+// Worker(const WeakPtr<Controller>& controller)
+// : controller_(controller) {}
+// void DidCompleteAsynchronousProcessing(const Result& result) {
+// if (controller_)
+// controller_->WorkComplete(result);
+// }
+// WeakPtr<Controller> controller_;
+// };
+//
+// With this implementation a caller may use SpawnWorker() to dispatch multiple
+// Workers and subsequently delete the Controller, without waiting for all
+// Workers to have completed.
+
+// ------------------------- IMPORTANT: Thread-safety -------------------------
+
+// Weak pointers may be passed safely between threads, but must always be
+// dereferenced and invalidated on the same SequencedTaskRunner otherwise
+// checking the pointer would be racey.
+//
+// To ensure correct use, the first time a WeakPtr issued by a WeakPtrFactory
+// is dereferenced, the factory and its WeakPtrs become bound to the calling
+// thread or current SequencedWorkerPool token, and cannot be dereferenced or
+// invalidated on any other task runner. Bound WeakPtrs can still be handed
+// off to other task runners, e.g. to use to post tasks back to object on the
+// bound sequence.
+//
+// If all WeakPtr objects are destroyed or invalidated then the factory is
+// unbound from the SequencedTaskRunner/Thread. The WeakPtrFactory may then be
+// destroyed, or new WeakPtr objects may be used, from a different sequence.
+//
+// Thus, at least one WeakPtr object must exist and have been dereferenced on
+// the correct thread to enforce that other WeakPtr objects will enforce they
+// are used on the desired thread.
+
+#ifndef BASE_MEMORY_WEAK_PTR_H_
+#define BASE_MEMORY_WEAK_PTR_H_
+
+#include "base/base_export.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/sequence_checker.h"
+#include "base/template_util.h"
+
+namespace base {
+
+template <typename T> class SupportsWeakPtr;
+template <typename T> class WeakPtr;
+
+namespace internal {
+// These classes are part of the WeakPtr implementation.
+// DO NOT USE THESE CLASSES DIRECTLY YOURSELF.
+
+class BASE_EXPORT WeakReference {
+ public:
+ // Although Flag is bound to a specific SequencedTaskRunner, it may be
+ // deleted from another via base::WeakPtr::~WeakPtr().
+ class BASE_EXPORT Flag : public RefCountedThreadSafe<Flag> {
+ public:
+ Flag();
+
+ void Invalidate();
+ bool IsValid() const;
+
+ private:
+ friend class base::RefCountedThreadSafe<Flag>;
+
+ ~Flag();
+
+ SequenceChecker sequence_checker_;
+ bool is_valid_;
+ };
+
+ WeakReference();
+ explicit WeakReference(const Flag* flag);
+ ~WeakReference();
+
+ bool is_valid() const;
+
+ private:
+ scoped_refptr<const Flag> flag_;
+};
+
+class BASE_EXPORT WeakReferenceOwner {
+ public:
+ WeakReferenceOwner();
+ ~WeakReferenceOwner();
+
+ WeakReference GetRef() const;
+
+ bool HasRefs() const {
+ return flag_.get() && !flag_->HasOneRef();
+ }
+
+ void Invalidate();
+
+ private:
+ mutable scoped_refptr<WeakReference::Flag> flag_;
+};
+
+// This class simplifies the implementation of WeakPtr's type conversion
+// constructor by avoiding the need for a public accessor for ref_. A
+// WeakPtr<T> cannot access the private members of WeakPtr<U>, so this
+// base class gives us a way to access ref_ in a protected fashion.
+class BASE_EXPORT WeakPtrBase {
+ public:
+ WeakPtrBase();
+ ~WeakPtrBase();
+
+ protected:
+ explicit WeakPtrBase(const WeakReference& ref);
+
+ WeakReference ref_;
+};
+
+// This class provides a common implementation of common functions that would
+// otherwise get instantiated separately for each distinct instantiation of
+// SupportsWeakPtr<>.
+class SupportsWeakPtrBase {
+ public:
+ // A safe static downcast of a WeakPtr<Base> to WeakPtr<Derived>. This
+ // conversion will only compile if there is exists a Base which inherits
+ // from SupportsWeakPtr<Base>. See base::AsWeakPtr() below for a helper
+ // function that makes calling this easier.
+ template<typename Derived>
+ static WeakPtr<Derived> StaticAsWeakPtr(Derived* t) {
+ typedef
+ is_convertible<Derived, internal::SupportsWeakPtrBase&> convertible;
+ static_assert(convertible::value,
+ "AsWeakPtr argument must inherit from SupportsWeakPtr");
+ return AsWeakPtrImpl<Derived>(t, *t);
+ }
+
+ private:
+ // This template function uses type inference to find a Base of Derived
+ // which is an instance of SupportsWeakPtr<Base>. We can then safely
+ // static_cast the Base* to a Derived*.
+ template <typename Derived, typename Base>
+ static WeakPtr<Derived> AsWeakPtrImpl(
+ Derived* t, const SupportsWeakPtr<Base>&) {
+ WeakPtr<Base> ptr = t->Base::AsWeakPtr();
+ return WeakPtr<Derived>(ptr.ref_, static_cast<Derived*>(ptr.ptr_));
+ }
+};
+
+} // namespace internal
+
+template <typename T> class WeakPtrFactory;
+
+// The WeakPtr class holds a weak reference to |T*|.
+//
+// This class is designed to be used like a normal pointer. You should always
+// null-test an object of this class before using it or invoking a method that
+// may result in the underlying object being destroyed.
+//
+// EXAMPLE:
+//
+// class Foo { ... };
+// WeakPtr<Foo> foo;
+// if (foo)
+// foo->method();
+//
+template <typename T>
+class WeakPtr : public internal::WeakPtrBase {
+ public:
+ WeakPtr() : ptr_(NULL) {
+ }
+
+ // Allow conversion from U to T provided U "is a" T. Note that this
+ // is separate from the (implicit) copy constructor.
+ template <typename U>
+ WeakPtr(const WeakPtr<U>& other) : WeakPtrBase(other), ptr_(other.ptr_) {
+ }
+
+ T* get() const { return ref_.is_valid() ? ptr_ : NULL; }
+
+ T& operator*() const {
+ DCHECK(get() != NULL);
+ return *get();
+ }
+ T* operator->() const {
+ DCHECK(get() != NULL);
+ return get();
+ }
+
+ // Allow WeakPtr<element_type> to be used in boolean expressions, but not
+ // implicitly convertible to a real bool (which is dangerous).
+ //
+ // Note that this trick is only safe when the == and != operators
+ // are declared explicitly, as otherwise "weak_ptr1 == weak_ptr2"
+ // will compile but do the wrong thing (i.e., convert to Testable
+ // and then do the comparison).
+ private:
+ typedef T* WeakPtr::*Testable;
+
+ public:
+ operator Testable() const { return get() ? &WeakPtr::ptr_ : NULL; }
+
+ void reset() {
+ ref_ = internal::WeakReference();
+ ptr_ = NULL;
+ }
+
+ private:
+ // Explicitly declare comparison operators as required by the bool
+ // trick, but keep them private.
+ template <class U> bool operator==(WeakPtr<U> const&) const;
+ template <class U> bool operator!=(WeakPtr<U> const&) const;
+
+ friend class internal::SupportsWeakPtrBase;
+ template <typename U> friend class WeakPtr;
+ friend class SupportsWeakPtr<T>;
+ friend class WeakPtrFactory<T>;
+
+ WeakPtr(const internal::WeakReference& ref, T* ptr)
+ : WeakPtrBase(ref),
+ ptr_(ptr) {
+ }
+
+ // This pointer is only valid when ref_.is_valid() is true. Otherwise, its
+ // value is undefined (as opposed to NULL).
+ T* ptr_;
+};
+
+// A class may be composed of a WeakPtrFactory and thereby
+// control how it exposes weak pointers to itself. This is helpful if you only
+// need weak pointers within the implementation of a class. This class is also
+// useful when working with primitive types. For example, you could have a
+// WeakPtrFactory<bool> that is used to pass around a weak reference to a bool.
+template <class T>
+class WeakPtrFactory {
+ public:
+ explicit WeakPtrFactory(T* ptr) : ptr_(ptr) {
+ }
+
+ ~WeakPtrFactory() {
+ ptr_ = NULL;
+ }
+
+ WeakPtr<T> GetWeakPtr() {
+ DCHECK(ptr_);
+ return WeakPtr<T>(weak_reference_owner_.GetRef(), ptr_);
+ }
+
+ // Call this method to invalidate all existing weak pointers.
+ void InvalidateWeakPtrs() {
+ DCHECK(ptr_);
+ weak_reference_owner_.Invalidate();
+ }
+
+ // Call this method to determine if any weak pointers exist.
+ bool HasWeakPtrs() const {
+ DCHECK(ptr_);
+ return weak_reference_owner_.HasRefs();
+ }
+
+ private:
+ internal::WeakReferenceOwner weak_reference_owner_;
+ T* ptr_;
+ DISALLOW_IMPLICIT_CONSTRUCTORS(WeakPtrFactory);
+};
+
+// A class may extend from SupportsWeakPtr to let others take weak pointers to
+// it. This avoids the class itself implementing boilerplate to dispense weak
+// pointers. However, since SupportsWeakPtr's destructor won't invalidate
+// weak pointers to the class until after the derived class' members have been
+// destroyed, its use can lead to subtle use-after-destroy issues.
+template <class T>
+class SupportsWeakPtr : public internal::SupportsWeakPtrBase {
+ public:
+ SupportsWeakPtr() {}
+
+ WeakPtr<T> AsWeakPtr() {
+ return WeakPtr<T>(weak_reference_owner_.GetRef(), static_cast<T*>(this));
+ }
+
+ protected:
+ ~SupportsWeakPtr() {}
+
+ private:
+ internal::WeakReferenceOwner weak_reference_owner_;
+ DISALLOW_COPY_AND_ASSIGN(SupportsWeakPtr);
+};
+
+// Helper function that uses type deduction to safely return a WeakPtr<Derived>
+// when Derived doesn't directly extend SupportsWeakPtr<Derived>, instead it
+// extends a Base that extends SupportsWeakPtr<Base>.
+//
+// EXAMPLE:
+// class Base : public base::SupportsWeakPtr<Producer> {};
+// class Derived : public Base {};
+//
+// Derived derived;
+// base::WeakPtr<Derived> ptr = base::AsWeakPtr(&derived);
+//
+// Note that the following doesn't work (invalid type conversion) since
+// Derived::AsWeakPtr() is WeakPtr<Base> SupportsWeakPtr<Base>::AsWeakPtr(),
+// and there's no way to safely cast WeakPtr<Base> to WeakPtr<Derived> at
+// the caller.
+//
+// base::WeakPtr<Derived> ptr = derived.AsWeakPtr(); // Fails.
+
+template <typename Derived>
+WeakPtr<Derived> AsWeakPtr(Derived* t) {
+ return internal::SupportsWeakPtrBase::StaticAsWeakPtr<Derived>(t);
+}
+
+} // namespace base
+
+#endif // BASE_MEMORY_WEAK_PTR_H_