summaryrefslogtreecommitdiffstats
path: root/ipc/chromium/src/base
diff options
context:
space:
mode:
Diffstat (limited to 'ipc/chromium/src/base')
-rw-r--r--ipc/chromium/src/base/at_exit.cc74
-rw-r--r--ipc/chromium/src/base/at_exit.h75
-rw-r--r--ipc/chromium/src/base/atomic_sequence_num.h62
-rw-r--r--ipc/chromium/src/base/atomicops.h155
-rw-r--r--ipc/chromium/src/base/atomicops_internals_arm64_gcc.h362
-rw-r--r--ipc/chromium/src/base/atomicops_internals_arm_gcc.h126
-rw-r--r--ipc/chromium/src/base/atomicops_internals_mips_gcc.h305
-rw-r--r--ipc/chromium/src/base/atomicops_internals_mutex.cc15
-rw-r--r--ipc/chromium/src/base/atomicops_internals_mutex.h251
-rw-r--r--ipc/chromium/src/base/atomicops_internals_ppc_gcc.h134
-rw-r--r--ipc/chromium/src/base/atomicops_internals_x86_gcc.cc106
-rw-r--r--ipc/chromium/src/base/atomicops_internals_x86_gcc.h261
-rw-r--r--ipc/chromium/src/base/atomicops_internals_x86_macosx.h281
-rw-r--r--ipc/chromium/src/base/atomicops_internals_x86_msvc.h178
-rw-r--r--ipc/chromium/src/base/basictypes.h272
-rw-r--r--ipc/chromium/src/base/chrome_application_mac.h62
-rw-r--r--ipc/chromium/src/base/chrome_application_mac.mm68
-rw-r--r--ipc/chromium/src/base/command_line.cc394
-rw-r--r--ipc/chromium/src/base/command_line.h197
-rw-r--r--ipc/chromium/src/base/compiler_specific.h79
-rw-r--r--ipc/chromium/src/base/condition_variable.h176
-rw-r--r--ipc/chromium/src/base/condition_variable_posix.cc91
-rw-r--r--ipc/chromium/src/base/condition_variable_win.cc448
-rw-r--r--ipc/chromium/src/base/cpu.cc56
-rw-r--r--ipc/chromium/src/base/cpu.h44
-rw-r--r--ipc/chromium/src/base/dir_reader_bsd.h103
-rw-r--r--ipc/chromium/src/base/dir_reader_fallback.h33
-rw-r--r--ipc/chromium/src/base/dir_reader_linux.h101
-rw-r--r--ipc/chromium/src/base/dir_reader_posix.h43
-rw-r--r--ipc/chromium/src/base/eintr_wrapper.h35
-rw-r--r--ipc/chromium/src/base/file_descriptor_posix.h38
-rw-r--r--ipc/chromium/src/base/file_descriptor_shuffle.cc96
-rw-r--r--ipc/chromium/src/base/file_descriptor_shuffle.h83
-rw-r--r--ipc/chromium/src/base/file_path.cc332
-rw-r--r--ipc/chromium/src/base/file_path.h277
-rw-r--r--ipc/chromium/src/base/file_util.cc244
-rw-r--r--ipc/chromium/src/base/file_util.h229
-rw-r--r--ipc/chromium/src/base/file_util_mac.mm35
-rw-r--r--ipc/chromium/src/base/file_util_posix.cc337
-rw-r--r--ipc/chromium/src/base/file_util_win.cc356
-rw-r--r--ipc/chromium/src/base/hash_tables.h136
-rw-r--r--ipc/chromium/src/base/histogram.cc1176
-rw-r--r--ipc/chromium/src/base/histogram.h780
-rw-r--r--ipc/chromium/src/base/id_map.h107
-rw-r--r--ipc/chromium/src/base/linked_ptr.h176
-rw-r--r--ipc/chromium/src/base/lock.cc9
-rw-r--r--ipc/chromium/src/base/lock.h77
-rw-r--r--ipc/chromium/src/base/lock_impl.h79
-rw-r--r--ipc/chromium/src/base/lock_impl_posix.cc50
-rw-r--r--ipc/chromium/src/base/lock_impl_win.cc75
-rw-r--r--ipc/chromium/src/base/logging.cc99
-rw-r--r--ipc/chromium/src/base/logging.h130
-rw-r--r--ipc/chromium/src/base/mac_util.h17
-rw-r--r--ipc/chromium/src/base/mac_util.mm34
-rw-r--r--ipc/chromium/src/base/message_loop.cc579
-rw-r--r--ipc/chromium/src/base/message_loop.h558
-rw-r--r--ipc/chromium/src/base/message_pump.h143
-rw-r--r--ipc/chromium/src/base/message_pump_android.cc125
-rw-r--r--ipc/chromium/src/base/message_pump_android.h77
-rw-r--r--ipc/chromium/src/base/message_pump_default.cc105
-rw-r--r--ipc/chromium/src/base/message_pump_default.h43
-rw-r--r--ipc/chromium/src/base/message_pump_glib.cc341
-rw-r--r--ipc/chromium/src/base/message_pump_glib.h147
-rw-r--r--ipc/chromium/src/base/message_pump_libevent.cc458
-rw-r--r--ipc/chromium/src/base/message_pump_libevent.h220
-rw-r--r--ipc/chromium/src/base/message_pump_mac.h279
-rw-r--r--ipc/chromium/src/base/message_pump_mac.mm752
-rw-r--r--ipc/chromium/src/base/message_pump_qt.cc191
-rw-r--r--ipc/chromium/src/base/message_pump_qt.h85
-rw-r--r--ipc/chromium/src/base/message_pump_win.cc547
-rw-r--r--ipc/chromium/src/base/message_pump_win.h348
-rw-r--r--ipc/chromium/src/base/object_watcher.cc137
-rw-r--r--ipc/chromium/src/base/object_watcher.h96
-rw-r--r--ipc/chromium/src/base/observer_list.h186
-rw-r--r--ipc/chromium/src/base/pickle.cc588
-rw-r--r--ipc/chromium/src/base/pickle.h326
-rw-r--r--ipc/chromium/src/base/platform_file.h48
-rw-r--r--ipc/chromium/src/base/platform_file_posix.cc70
-rw-r--r--ipc/chromium/src/base/platform_file_win.cc64
-rw-r--r--ipc/chromium/src/base/platform_thread.h87
-rw-r--r--ipc/chromium/src/base/platform_thread_mac.mm77
-rw-r--r--ipc/chromium/src/base/platform_thread_posix.cc164
-rw-r--r--ipc/chromium/src/base/platform_thread_win.cc109
-rw-r--r--ipc/chromium/src/base/port.h65
-rw-r--r--ipc/chromium/src/base/process.h81
-rw-r--r--ipc/chromium/src/base/process_posix.cc62
-rw-r--r--ipc/chromium/src/base/process_util.h321
-rw-r--r--ipc/chromium/src/base/process_util_bsd.cc167
-rw-r--r--ipc/chromium/src/base/process_util_linux.cc274
-rw-r--r--ipc/chromium/src/base/process_util_mac.mm202
-rw-r--r--ipc/chromium/src/base/process_util_posix.cc348
-rw-r--r--ipc/chromium/src/base/process_util_win.cc489
-rw-r--r--ipc/chromium/src/base/process_win.cc64
-rw-r--r--ipc/chromium/src/base/rand_util.cc42
-rw-r--r--ipc/chromium/src/base/rand_util.h32
-rw-r--r--ipc/chromium/src/base/rand_util_posix.cc31
-rw-r--r--ipc/chromium/src/base/rand_util_win.cc46
-rw-r--r--ipc/chromium/src/base/revocable_store.cc49
-rw-r--r--ipc/chromium/src/base/revocable_store.h81
-rw-r--r--ipc/chromium/src/base/scoped_bstr_win.h145
-rw-r--r--ipc/chromium/src/base/scoped_cftyperef.h80
-rw-r--r--ipc/chromium/src/base/scoped_handle.h56
-rw-r--r--ipc/chromium/src/base/scoped_handle_win.h219
-rw-r--r--ipc/chromium/src/base/scoped_nsautorelease_pool.h54
-rw-r--r--ipc/chromium/src/base/scoped_nsautorelease_pool.mm30
-rw-r--r--ipc/chromium/src/base/scoped_nsobject.h78
-rw-r--r--ipc/chromium/src/base/shared_memory.h209
-rw-r--r--ipc/chromium/src/base/shared_memory_posix.cc338
-rw-r--r--ipc/chromium/src/base/shared_memory_win.cc176
-rw-r--r--ipc/chromium/src/base/singleton.h183
-rw-r--r--ipc/chromium/src/base/singleton_objc.h62
-rw-r--r--ipc/chromium/src/base/stack_container.h255
-rw-r--r--ipc/chromium/src/base/stl_util-inl.h452
-rw-r--r--ipc/chromium/src/base/string16.cc74
-rw-r--r--ipc/chromium/src/base/string16.h175
-rw-r--r--ipc/chromium/src/base/string_piece.cc217
-rw-r--r--ipc/chromium/src/base/string_piece.h186
-rw-r--r--ipc/chromium/src/base/string_util.cc780
-rw-r--r--ipc/chromium/src/base/string_util.h237
-rw-r--r--ipc/chromium/src/base/string_util_posix.h46
-rw-r--r--ipc/chromium/src/base/string_util_win.h53
-rw-r--r--ipc/chromium/src/base/sys_info.h65
-rw-r--r--ipc/chromium/src/base/sys_info_mac.cc13
-rw-r--r--ipc/chromium/src/base/sys_info_posix.cc158
-rw-r--r--ipc/chromium/src/base/sys_info_win.cc100
-rw-r--r--ipc/chromium/src/base/sys_string_conversions.h35
-rw-r--r--ipc/chromium/src/base/sys_string_conversions_mac.mm145
-rw-r--r--ipc/chromium/src/base/sys_string_conversions_win.cc72
-rw-r--r--ipc/chromium/src/base/task.h369
-rw-r--r--ipc/chromium/src/base/thread.cc200
-rw-r--r--ipc/chromium/src/base/thread.h176
-rw-r--r--ipc/chromium/src/base/thread_local.h124
-rw-r--r--ipc/chromium/src/base/thread_local_posix.cc38
-rw-r--r--ipc/chromium/src/base/thread_local_storage.h96
-rw-r--r--ipc/chromium/src/base/thread_local_storage_posix.cc46
-rw-r--r--ipc/chromium/src/base/thread_local_storage_win.cc188
-rw-r--r--ipc/chromium/src/base/thread_local_win.cc40
-rw-r--r--ipc/chromium/src/base/time.cc104
-rw-r--r--ipc/chromium/src/base/time.h486
-rw-r--r--ipc/chromium/src/base/time_mac.cc127
-rw-r--r--ipc/chromium/src/base/time_posix.cc203
-rw-r--r--ipc/chromium/src/base/time_win.cc376
-rw-r--r--ipc/chromium/src/base/timer.cc31
-rw-r--r--ipc/chromium/src/base/timer.h274
-rw-r--r--ipc/chromium/src/base/tuple.h884
-rw-r--r--ipc/chromium/src/base/waitable_event.h175
-rw-r--r--ipc/chromium/src/base/waitable_event_posix.cc392
-rw-r--r--ipc/chromium/src/base/waitable_event_win.cc88
-rw-r--r--ipc/chromium/src/base/win_util.cc40
-rw-r--r--ipc/chromium/src/base/win_util.h22
150 files changed, 28035 insertions, 0 deletions
diff --git a/ipc/chromium/src/base/at_exit.cc b/ipc/chromium/src/base/at_exit.cc
new file mode 100644
index 000000000..9481f36e2
--- /dev/null
+++ b/ipc/chromium/src/base/at_exit.cc
@@ -0,0 +1,74 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/at_exit.h"
+#include "base/logging.h"
+
+namespace base {
+
+// Keep a stack of registered AtExitManagers. We always operate on the most
+// recent, and we should never have more than one outside of testing, when we
+// use the shadow version of the constructor. We don't protect this for
+// thread-safe access, since it will only be modified in testing.
+static AtExitManager* g_top_manager = NULL;
+
+AtExitManager::AtExitManager() : next_manager_(NULL) {
+ DCHECK(!g_top_manager);
+ g_top_manager = this;
+}
+
+AtExitManager::AtExitManager(bool shadow) : next_manager_(g_top_manager) {
+ DCHECK(shadow || !g_top_manager);
+ g_top_manager = this;
+}
+
+AtExitManager::~AtExitManager() {
+ if (!g_top_manager) {
+ NOTREACHED() << "Tried to ~AtExitManager without an AtExitManager";
+ return;
+ }
+ DCHECK(g_top_manager == this);
+
+ ProcessCallbacksNow();
+ g_top_manager = next_manager_;
+}
+
+// static
+void AtExitManager::RegisterCallback(AtExitCallbackType func, void* param) {
+ if (!g_top_manager) {
+ NOTREACHED() << "Tried to RegisterCallback without an AtExitManager";
+ return;
+ }
+
+ DCHECK(func);
+
+ AutoLock lock(g_top_manager->lock_);
+ g_top_manager->stack_.push(CallbackAndParam(func, param));
+}
+
+// static
+void AtExitManager::ProcessCallbacksNow() {
+ if (!g_top_manager) {
+ NOTREACHED() << "Tried to ProcessCallbacksNow without an AtExitManager";
+ return;
+ }
+
+ AutoLock lock(g_top_manager->lock_);
+
+ while (!g_top_manager->stack_.empty()) {
+ CallbackAndParam callback_and_param = g_top_manager->stack_.top();
+ g_top_manager->stack_.pop();
+
+ callback_and_param.func_(callback_and_param.param_);
+ }
+}
+
+// static
+bool AtExitManager::AlreadyRegistered() {
+ return !!g_top_manager;
+}
+
+} // namespace base
diff --git a/ipc/chromium/src/base/at_exit.h b/ipc/chromium/src/base/at_exit.h
new file mode 100644
index 000000000..5644dfa52
--- /dev/null
+++ b/ipc/chromium/src/base/at_exit.h
@@ -0,0 +1,75 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_AT_EXIT_H_
+#define BASE_AT_EXIT_H_
+
+#include <stack>
+
+#include "base/basictypes.h"
+#include "base/lock.h"
+
+namespace base {
+
+// This class provides a facility similar to the CRT atexit(), except that
+// we control when the callbacks are executed. Under Windows for a DLL they
+// happen at a really bad time and under the loader lock. This facility is
+// mostly used by base::Singleton.
+//
+// The usage is simple. Early in the main() or WinMain() scope create an
+// AtExitManager object on the stack:
+// int main(...) {
+// base::AtExitManager exit_manager;
+//
+// }
+// When the exit_manager object goes out of scope, all the registered
+// callbacks and singleton destructors will be called.
+
+class AtExitManager {
+ protected:
+ // This constructor will allow this instance of AtExitManager to be created
+ // even if one already exists. This should only be used for testing!
+ // AtExitManagers are kept on a global stack, and it will be removed during
+ // destruction. This allows you to shadow another AtExitManager.
+ explicit AtExitManager(bool shadow);
+
+ public:
+ typedef void (*AtExitCallbackType)(void*);
+
+ AtExitManager();
+
+ // The dtor calls all the registered callbacks. Do not try to register more
+ // callbacks after this point.
+ ~AtExitManager();
+
+ // Registers the specified function to be called at exit. The prototype of
+ // the callback function is void func().
+ static void RegisterCallback(AtExitCallbackType func, void* param);
+
+ // Calls the functions registered with RegisterCallback in LIFO order. It
+ // is possible to register new callbacks after calling this function.
+ static void ProcessCallbacksNow();
+
+ static bool AlreadyRegistered();
+
+ private:
+ struct CallbackAndParam {
+ CallbackAndParam(AtExitCallbackType func, void* param)
+ : func_(func), param_(param) { }
+ AtExitCallbackType func_;
+ void* param_;
+ };
+
+ Lock lock_;
+ std::stack<CallbackAndParam> stack_;
+ AtExitManager* next_manager_; // Stack of managers to allow shadowing.
+
+ DISALLOW_COPY_AND_ASSIGN(AtExitManager);
+};
+
+} // namespace base
+
+#endif // BASE_AT_EXIT_H_
diff --git a/ipc/chromium/src/base/atomic_sequence_num.h b/ipc/chromium/src/base/atomic_sequence_num.h
new file mode 100644
index 000000000..cd0ad9560
--- /dev/null
+++ b/ipc/chromium/src/base/atomic_sequence_num.h
@@ -0,0 +1,62 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ATOMIC_SEQUENCE_NUM_H_
+#define BASE_ATOMIC_SEQUENCE_NUM_H_
+
+#include "base/atomicops.h"
+#include "base/basictypes.h"
+
+namespace base {
+
+class AtomicSequenceNumber;
+
+// Static (POD) AtomicSequenceNumber that MUST be used in global scope (or
+// non-function scope) ONLY. This implementation does not generate any static
+// initializer. Note that it does not implement any constructor which means
+// that its fields are not initialized except when it is stored in the global
+// data section (.data in ELF). If you want to allocate an atomic sequence
+// number on the stack (or heap), please use the AtomicSequenceNumber class
+// declared below.
+class StaticAtomicSequenceNumber {
+ public:
+ inline int GetNext() {
+ return static_cast<int>(
+ base::subtle::NoBarrier_AtomicIncrement(&seq_, 1) - 1);
+ }
+
+ private:
+ friend class AtomicSequenceNumber;
+
+ inline void Reset() {
+ base::subtle::Release_Store(&seq_, 0);
+ }
+
+ base::subtle::Atomic32 seq_;
+};
+
+// AtomicSequenceNumber that can be stored and used safely (i.e. its fields are
+// always initialized as opposed to StaticAtomicSequenceNumber declared above).
+// Please use StaticAtomicSequenceNumber if you want to declare an atomic
+// sequence number in the global scope.
+class AtomicSequenceNumber {
+ public:
+ AtomicSequenceNumber() {
+ seq_.Reset();
+ }
+
+ inline int GetNext() {
+ return seq_.GetNext();
+ }
+
+ private:
+ StaticAtomicSequenceNumber seq_;
+ DISALLOW_COPY_AND_ASSIGN(AtomicSequenceNumber);
+};
+
+} // namespace base
+
+#endif // BASE_ATOMIC_SEQUENCE_NUM_H_
diff --git a/ipc/chromium/src/base/atomicops.h b/ipc/chromium/src/base/atomicops.h
new file mode 100644
index 000000000..dcb691710
--- /dev/null
+++ b/ipc/chromium/src/base/atomicops.h
@@ -0,0 +1,155 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// For atomic operations on reference counts, see atomic_refcount.h.
+// For atomic operations on sequence numbers, see atomic_sequence_num.h.
+
+// The routines exported by this module are subtle. If you use them, even if
+// you get the code right, it will depend on careful reasoning about atomicity
+// and memory ordering; it will be less readable, and harder to maintain. If
+// you plan to use these routines, you should have a good reason, such as solid
+// evidence that performance would otherwise suffer, or there being no
+// alternative. You should assume only properties explicitly guaranteed by the
+// specifications in this file. You are almost certainly _not_ writing code
+// just for the x86; if you assume x86 semantics, x86 hardware bugs and
+// implementations on other archtectures will cause your code to break. If you
+// do not know what you are doing, avoid these routines, and use a Mutex.
+//
+// It is incorrect to make direct assignments to/from an atomic variable.
+// You should use one of the Load or Store routines. The NoBarrier
+// versions are provided when no barriers are needed:
+// NoBarrier_Store()
+// NoBarrier_Load()
+// Although there are currently no compiler enforcement, you are encouraged
+// to use these.
+//
+
+#ifndef BASE_ATOMICOPS_H_
+#define BASE_ATOMICOPS_H_
+
+#include "base/basictypes.h"
+#include "base/port.h"
+
+namespace base {
+namespace subtle {
+
+// Bug 1308991. We need this for /Wp64, to mark it safe for AtomicWord casting.
+#ifndef OS_WIN
+#define __w64
+#endif
+typedef __w64 int32_t Atomic32;
+#ifdef ARCH_CPU_64_BITS
+typedef int64_t Atomic64;
+#endif
+
+// Use AtomicWord for a machine-sized pointer. It will use the Atomic32 or
+// Atomic64 routines below, depending on your architecture.
+#ifdef OS_OPENBSD
+#ifdef ARCH_CPU_64_BITS
+typedef Atomic64 AtomicWord;
+#else
+typedef Atomic32 AtomicWord;
+#endif // ARCH_CPU_64_BITS
+#else
+typedef intptr_t AtomicWord;
+#endif // OS_OPENBSD
+
+// Atomically execute:
+// result = *ptr;
+// if (*ptr == old_value)
+// *ptr = new_value;
+// return result;
+//
+// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
+// Always return the old value of "*ptr"
+//
+// This routine implies no memory barriers.
+Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value);
+
+// Atomically store new_value into *ptr, returning the previous value held in
+// *ptr. This routine implies no memory barriers.
+Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value);
+
+// Atomically increment *ptr by "increment". Returns the new value of
+// *ptr with the increment applied. This routine implies no memory barriers.
+Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, Atomic32 increment);
+
+Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment);
+
+// These following lower-level operations are typically useful only to people
+// implementing higher-level synchronization operations like spinlocks,
+// mutexes, and condition-variables. They combine CompareAndSwap(), a load, or
+// a store with appropriate memory-ordering instructions. "Acquire" operations
+// ensure that no later memory access can be reordered ahead of the operation.
+// "Release" operations ensure that no previous memory access can be reordered
+// after the operation. "Barrier" operations have both "Acquire" and "Release"
+// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
+// access.
+Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value);
+Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value);
+
+void MemoryBarrier();
+void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value);
+void Acquire_Store(volatile Atomic32* ptr, Atomic32 value);
+void Release_Store(volatile Atomic32* ptr, Atomic32 value);
+
+Atomic32 NoBarrier_Load(volatile const Atomic32* ptr);
+Atomic32 Acquire_Load(volatile const Atomic32* ptr);
+Atomic32 Release_Load(volatile const Atomic32* ptr);
+
+// 64-bit atomic operations (only available on 64-bit processors).
+#ifdef ARCH_CPU_64_BITS
+Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value);
+Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value);
+Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
+Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
+
+Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value);
+Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value);
+void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value);
+void Acquire_Store(volatile Atomic64* ptr, Atomic64 value);
+void Release_Store(volatile Atomic64* ptr, Atomic64 value);
+Atomic64 NoBarrier_Load(volatile const Atomic64* ptr);
+Atomic64 Acquire_Load(volatile const Atomic64* ptr);
+Atomic64 Release_Load(volatile const Atomic64* ptr);
+#endif // CPU_ARCH_64_BITS
+
+} // namespace base::subtle
+} // namespace base
+
+// Include our platform specific implementation.
+#if defined(OS_WIN) && defined(ARCH_CPU_X86_FAMILY)
+#include "base/atomicops_internals_x86_msvc.h"
+#elif defined(OS_MACOSX) && defined(ARCH_CPU_X86_FAMILY)
+#include "base/atomicops_internals_x86_macosx.h"
+#elif defined(COMPILER_GCC) && defined(ARCH_CPU_X86_FAMILY)
+#include "base/atomicops_internals_x86_gcc.h"
+#elif defined(COMPILER_GCC) && defined(ARCH_CPU_ARMEL)
+#include "base/atomicops_internals_arm_gcc.h"
+#elif defined(COMPILER_GCC) && defined(ARCH_CPU_ARM64)
+#include "base/atomicops_internals_arm64_gcc.h"
+#elif defined(COMPILER_GCC) && defined(ARCH_CPU_MIPS)
+#include "base/atomicops_internals_mips_gcc.h"
+#elif defined(COMPILER_GCC) && defined(ARCH_CPU_PPC_FAMILY)
+#include "base/atomicops_internals_ppc_gcc.h"
+#else
+#include "base/atomicops_internals_mutex.h"
+#endif
+
+#endif // BASE_ATOMICOPS_H_
diff --git a/ipc/chromium/src/base/atomicops_internals_arm64_gcc.h b/ipc/chromium/src/base/atomicops_internals_arm64_gcc.h
new file mode 100644
index 000000000..c5d1a9290
--- /dev/null
+++ b/ipc/chromium/src/base/atomicops_internals_arm64_gcc.h
@@ -0,0 +1,362 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is an internal atomic implementation, use base/atomicops.h instead.
+
+// TODO(rmcilroy): Investigate whether we can use __sync__ intrinsics instead of
+// the hand coded assembly without introducing perf regressions.
+// TODO(rmcilroy): Investigate whether we can use acquire / release versions of
+// exclusive load / store assembly instructions and do away with
+// the barriers.
+
+#ifndef BASE_ATOMICOPS_INTERNALS_ARM64_GCC_H_
+#define BASE_ATOMICOPS_INTERNALS_ARM64_GCC_H_
+
+#if defined(OS_QNX)
+#include <sys/cpuinline.h>
+#endif
+
+namespace base {
+namespace subtle {
+
+inline void MemoryBarrier() {
+ __asm__ __volatile__ ( // NOLINT
+ "dmb ish \n\t" // Data memory barrier.
+ ::: "memory"
+ ); // NOLINT
+}
+
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 prev;
+ int32_t temp;
+
+ __asm__ __volatile__ ( // NOLINT
+ "0: \n\t"
+ "ldxr %w[prev], %[ptr] \n\t" // Load the previous value.
+ "cmp %w[prev], %w[old_value] \n\t"
+ "bne 1f \n\t"
+ "stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value.
+ "cbnz %w[temp], 0b \n\t" // Retry if it did not work.
+ "1: \n\t"
+ "clrex \n\t" // In case we didn't swap.
+ : [prev]"=&r" (prev),
+ [temp]"=&r" (temp),
+ [ptr]"+Q" (*ptr)
+ : [old_value]"r" (old_value),
+ [new_value]"r" (new_value)
+ : "memory", "cc"
+ ); // NOLINT
+
+ return prev;
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value) {
+ Atomic32 result;
+ int32_t temp;
+
+ __asm__ __volatile__ ( // NOLINT
+ "0: \n\t"
+ "ldxr %w[result], %[ptr] \n\t" // Load the previous value.
+ "stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value.
+ "cbnz %w[temp], 0b \n\t" // Retry if it did not work.
+ : [result]"=&r" (result),
+ [temp]"=&r" (temp),
+ [ptr]"+Q" (*ptr)
+ : [new_value]"r" (new_value)
+ : "memory"
+ ); // NOLINT
+
+ return result;
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ Atomic32 result;
+ int32_t temp;
+
+ __asm__ __volatile__ ( // NOLINT
+ "0: \n\t"
+ "ldxr %w[result], %[ptr] \n\t" // Load the previous value.
+ "add %w[result], %w[result], %w[increment]\n\t"
+ "stxr %w[temp], %w[result], %[ptr] \n\t" // Try to store the result.
+ "cbnz %w[temp], 0b \n\t" // Retry on failure.
+ : [result]"=&r" (result),
+ [temp]"=&r" (temp),
+ [ptr]"+Q" (*ptr)
+ : [increment]"r" (increment)
+ : "memory"
+ ); // NOLINT
+
+ return result;
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ MemoryBarrier();
+ Atomic32 result = NoBarrier_AtomicIncrement(ptr, increment);
+ MemoryBarrier();
+
+ return result;
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 prev;
+ int32_t temp;
+
+ __asm__ __volatile__ ( // NOLINT
+ "0: \n\t"
+ "ldxr %w[prev], %[ptr] \n\t" // Load the previous value.
+ "cmp %w[prev], %w[old_value] \n\t"
+ "bne 1f \n\t"
+ "stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value.
+ "cbnz %w[temp], 0b \n\t" // Retry if it did not work.
+ "dmb ish \n\t" // Data memory barrier.
+ "1: \n\t"
+ // If the compare failed the 'dmb' is unnecessary, but we still need a
+ // 'clrex'.
+ "clrex \n\t"
+ : [prev]"=&r" (prev),
+ [temp]"=&r" (temp),
+ [ptr]"+Q" (*ptr)
+ : [old_value]"r" (old_value),
+ [new_value]"r" (new_value)
+ : "memory", "cc"
+ ); // NOLINT
+
+ return prev;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 prev;
+ int32_t temp;
+
+ MemoryBarrier();
+
+ __asm__ __volatile__ ( // NOLINT
+ "0: \n\t"
+ "ldxr %w[prev], %[ptr] \n\t" // Load the previous value.
+ "cmp %w[prev], %w[old_value] \n\t"
+ "bne 1f \n\t"
+ "stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value.
+ "cbnz %w[temp], 0b \n\t" // Retry if it did not work.
+ "1: \n\t"
+ // If the compare failed the we still need a 'clrex'.
+ "clrex \n\t"
+ : [prev]"=&r" (prev),
+ [temp]"=&r" (temp),
+ [ptr]"+Q" (*ptr)
+ : [old_value]"r" (old_value),
+ [new_value]"r" (new_value)
+ : "memory", "cc"
+ ); // NOLINT
+
+ return prev;
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+ *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+ *ptr = value;
+ MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+ MemoryBarrier();
+ *ptr = value;
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+ return *ptr;
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+ Atomic32 value = *ptr;
+ MemoryBarrier();
+ return value;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+ MemoryBarrier();
+ return *ptr;
+}
+
+// 64-bit versions of the operations.
+// See the 32-bit versions for comments.
+
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ Atomic64 prev;
+ int32_t temp;
+
+ __asm__ __volatile__ ( // NOLINT
+ "0: \n\t"
+ "ldxr %[prev], %[ptr] \n\t"
+ "cmp %[prev], %[old_value] \n\t"
+ "bne 1f \n\t"
+ "stxr %w[temp], %[new_value], %[ptr] \n\t"
+ "cbnz %w[temp], 0b \n\t"
+ "1: \n\t"
+ "clrex \n\t"
+ : [prev]"=&r" (prev),
+ [temp]"=&r" (temp),
+ [ptr]"+Q" (*ptr)
+ : [old_value]"r" (old_value),
+ [new_value]"r" (new_value)
+ : "memory", "cc"
+ ); // NOLINT
+
+ return prev;
+}
+
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+ Atomic64 new_value) {
+ Atomic64 result;
+ int32_t temp;
+
+ __asm__ __volatile__ ( // NOLINT
+ "0: \n\t"
+ "ldxr %[result], %[ptr] \n\t"
+ "stxr %w[temp], %[new_value], %[ptr] \n\t"
+ "cbnz %w[temp], 0b \n\t"
+ : [result]"=&r" (result),
+ [temp]"=&r" (temp),
+ [ptr]"+Q" (*ptr)
+ : [new_value]"r" (new_value)
+ : "memory"
+ ); // NOLINT
+
+ return result;
+}
+
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ Atomic64 result;
+ int32_t temp;
+
+ __asm__ __volatile__ ( // NOLINT
+ "0: \n\t"
+ "ldxr %[result], %[ptr] \n\t"
+ "add %[result], %[result], %[increment] \n\t"
+ "stxr %w[temp], %[result], %[ptr] \n\t"
+ "cbnz %w[temp], 0b \n\t"
+ : [result]"=&r" (result),
+ [temp]"=&r" (temp),
+ [ptr]"+Q" (*ptr)
+ : [increment]"r" (increment)
+ : "memory"
+ ); // NOLINT
+
+ return result;
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ MemoryBarrier();
+ Atomic64 result = NoBarrier_AtomicIncrement(ptr, increment);
+ MemoryBarrier();
+
+ return result;
+}
+
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ Atomic64 prev;
+ int32_t temp;
+
+ __asm__ __volatile__ ( // NOLINT
+ "0: \n\t"
+ "ldxr %[prev], %[ptr] \n\t"
+ "cmp %[prev], %[old_value] \n\t"
+ "bne 1f \n\t"
+ "stxr %w[temp], %[new_value], %[ptr] \n\t"
+ "cbnz %w[temp], 0b \n\t"
+ "dmb ish \n\t"
+ "1: \n\t"
+ "clrex \n\t"
+ : [prev]"=&r" (prev),
+ [temp]"=&r" (temp),
+ [ptr]"+Q" (*ptr)
+ : [old_value]"r" (old_value),
+ [new_value]"r" (new_value)
+ : "memory", "cc"
+ ); // NOLINT
+
+ return prev;
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ Atomic64 prev;
+ int32_t temp;
+
+ MemoryBarrier();
+
+ __asm__ __volatile__ ( // NOLINT
+ "0: \n\t"
+ "ldxr %[prev], %[ptr] \n\t"
+ "cmp %[prev], %[old_value] \n\t"
+ "bne 1f \n\t"
+ "stxr %w[temp], %[new_value], %[ptr] \n\t"
+ "cbnz %w[temp], 0b \n\t"
+ "1: \n\t"
+ "clrex \n\t"
+ : [prev]"=&r" (prev),
+ [temp]"=&r" (temp),
+ [ptr]"+Q" (*ptr)
+ : [old_value]"r" (old_value),
+ [new_value]"r" (new_value)
+ : "memory", "cc"
+ ); // NOLINT
+
+ return prev;
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+ *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
+ *ptr = value;
+ MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+ MemoryBarrier();
+ *ptr = value;
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
+ return *ptr;
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+ Atomic64 value = *ptr;
+ MemoryBarrier();
+ return value;
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
+ MemoryBarrier();
+ return *ptr;
+}
+
+} // namespace base::subtle
+} // namespace base
+
+#endif // BASE_ATOMICOPS_INTERNALS_ARM64_GCC_H_
diff --git a/ipc/chromium/src/base/atomicops_internals_arm_gcc.h b/ipc/chromium/src/base/atomicops_internals_arm_gcc.h
new file mode 100644
index 000000000..628eb3d46
--- /dev/null
+++ b/ipc/chromium/src/base/atomicops_internals_arm_gcc.h
@@ -0,0 +1,126 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is an internal atomic implementation, use base/atomicops.h instead.
+//
+// LinuxKernelCmpxchg and Barrier_AtomicIncrement are from Google Gears.
+
+#ifndef BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_
+#define BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_
+
+namespace base {
+namespace subtle {
+
+// 0xffff0fc0 is the hard coded address of a function provided by
+// the kernel which implements an atomic compare-exchange. On older
+// ARM architecture revisions (pre-v6) this may be implemented using
+// a syscall. This address is stable, and in active use (hard coded)
+// by at least glibc-2.7 and the Android C library.
+typedef Atomic32 (*LinuxKernelCmpxchgFunc)(Atomic32 old_value,
+ Atomic32 new_value,
+ volatile Atomic32* ptr);
+LinuxKernelCmpxchgFunc pLinuxKernelCmpxchg __attribute__((weak)) =
+ (LinuxKernelCmpxchgFunc) 0xffff0fc0;
+
+typedef void (*LinuxKernelMemoryBarrierFunc)(void);
+LinuxKernelMemoryBarrierFunc pLinuxKernelMemoryBarrier __attribute__((weak)) =
+ (LinuxKernelMemoryBarrierFunc) 0xffff0fa0;
+
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 prev_value = *ptr;
+ do {
+ if (!pLinuxKernelCmpxchg(old_value, new_value,
+ const_cast<Atomic32*>(ptr))) {
+ return old_value;
+ }
+ prev_value = *ptr;
+ } while (prev_value == old_value);
+ return prev_value;
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value) {
+ Atomic32 old_value;
+ do {
+ old_value = *ptr;
+ } while (pLinuxKernelCmpxchg(old_value, new_value,
+ const_cast<Atomic32*>(ptr)));
+ return old_value;
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ return Barrier_AtomicIncrement(ptr, increment);
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ for (;;) {
+ // Atomic exchange the old value with an incremented one.
+ Atomic32 old_value = *ptr;
+ Atomic32 new_value = old_value + increment;
+ if (pLinuxKernelCmpxchg(old_value, new_value,
+ const_cast<Atomic32*>(ptr)) == 0) {
+ // The exchange took place as expected.
+ return new_value;
+ }
+ // Otherwise, *ptr changed mid-loop and we need to retry.
+ }
+
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+ *ptr = value;
+}
+
+inline void MemoryBarrier() {
+ pLinuxKernelMemoryBarrier();
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+ *ptr = value;
+ MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+ MemoryBarrier();
+ *ptr = value;
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+ return *ptr;
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+ Atomic32 value = *ptr;
+ MemoryBarrier();
+ return value;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+ MemoryBarrier();
+ return *ptr;
+}
+
+} // namespace base::subtle
+} // namespace base
+
+#endif // BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_
diff --git a/ipc/chromium/src/base/atomicops_internals_mips_gcc.h b/ipc/chromium/src/base/atomicops_internals_mips_gcc.h
new file mode 100644
index 000000000..aec4561b2
--- /dev/null
+++ b/ipc/chromium/src/base/atomicops_internals_mips_gcc.h
@@ -0,0 +1,305 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This file is an internal atomic implementation, use base/atomicops.h instead.
+//
+// LinuxKernelCmpxchg and Barrier_AtomicIncrement are from Google Gears.
+
+#ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_MIPS_GCC_H_
+#define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_MIPS_GCC_H_
+
+namespace base {
+namespace subtle {
+
+// Atomically execute:
+// result = *ptr;
+// if (*ptr == old_value)
+// *ptr = new_value;
+// return result;
+//
+// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
+// Always return the old value of "*ptr"
+//
+// This routine implies no memory barriers.
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 prev, tmp;
+ __asm__ __volatile__(".set push\n"
+ ".set noreorder\n"
+ "1:\n"
+ "ll %0, %5\n" // prev = *ptr
+ "bne %0, %3, 2f\n" // if (prev != old_value) goto 2
+ "move %2, %4\n" // tmp = new_value
+ "sc %2, %1\n" // *ptr = tmp (with atomic check)
+ "beqz %2, 1b\n" // start again on atomic error
+ "nop\n" // delay slot nop
+ "2:\n"
+ ".set pop\n"
+ : "=&r" (prev), "=m" (*ptr), "=&r" (tmp)
+ : "r" (old_value), "r" (new_value), "m" (*ptr)
+ : "memory");
+ return prev;
+}
+
+// Atomically store new_value into *ptr, returning the previous value held in
+// *ptr. This routine implies no memory barriers.
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value) {
+ Atomic32 temp, old;
+ __asm__ __volatile__(".set push\n"
+ ".set noreorder\n"
+ "1:\n"
+ "ll %1, %4\n" // old = *ptr
+ "move %0, %3\n" // temp = new_value
+ "sc %0, %2\n" // *ptr = temp (with atomic check)
+ "beqz %0, 1b\n" // start again on atomic error
+ "nop\n" // delay slot nop
+ ".set pop\n"
+ : "=&r" (temp), "=&r" (old), "=m" (*ptr)
+ : "r" (new_value), "m" (*ptr)
+ : "memory");
+
+ return old;
+}
+
+// Atomically increment *ptr by "increment". Returns the new value of
+// *ptr with the increment applied. This routine implies no memory barriers.
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ Atomic32 temp, temp2;
+
+ __asm__ __volatile__(".set push\n"
+ ".set noreorder\n"
+ "1:\n"
+ "ll %0, %4\n" // temp = *ptr
+ "addu %1, %0, %3\n" // temp2 = temp + increment
+ "sc %1, %2\n" // *ptr = temp2 (with atomic check)
+ "beqz %1, 1b\n" // start again on atomic error
+ "addu %1, %0, %3\n" // temp2 = temp + increment
+ ".set pop\n"
+ : "=&r" (temp), "=&r" (temp2), "=m" (*ptr)
+ : "Ir" (increment), "m" (*ptr)
+ : "memory");
+ // temp2 now holds the final value.
+ return temp2;
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ MemoryBarrier();
+ Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment);
+ MemoryBarrier();
+ return res;
+}
+
+// "Acquire" operations
+// ensure that no later memory access can be reordered ahead of the operation.
+// "Release" operations ensure that no previous memory access can be reordered
+// after the operation. "Barrier" operations have both "Acquire" and "Release"
+// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
+// access.
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+ MemoryBarrier();
+ return res;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ MemoryBarrier();
+ return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+ *ptr = value;
+}
+
+inline void MemoryBarrier() {
+ __asm__ __volatile__("sync" : : : "memory");
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+ *ptr = value;
+ MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+ MemoryBarrier();
+ *ptr = value;
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+ return *ptr;
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+ Atomic32 value = *ptr;
+ MemoryBarrier();
+ return value;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+ MemoryBarrier();
+ return *ptr;
+}
+
+#if defined(__LP64__)
+// 64-bit versions of the atomic ops.
+
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ Atomic64 prev, tmp;
+ __asm__ __volatile__(".set push\n"
+ ".set noreorder\n"
+ "1:\n"
+ "lld %0, %5\n" // prev = *ptr
+ "bne %0, %3, 2f\n" // if (prev != old_value) goto 2
+ "move %2, %4\n" // tmp = new_value
+ "scd %2, %1\n" // *ptr = tmp (with atomic check)
+ "beqz %2, 1b\n" // start again on atomic error
+ "nop\n" // delay slot nop
+ "2:\n"
+ ".set pop\n"
+ : "=&r" (prev), "=m" (*ptr), "=&r" (tmp)
+ : "r" (old_value), "r" (new_value), "m" (*ptr)
+ : "memory");
+ return prev;
+}
+
+// Atomically store new_value into *ptr, returning the previous value held in
+// *ptr. This routine implies no memory barriers.
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+ Atomic64 new_value) {
+ Atomic64 temp, old;
+ __asm__ __volatile__(".set push\n"
+ ".set noreorder\n"
+ "1:\n"
+ "lld %1, %4\n" // old = *ptr
+ "move %0, %3\n" // temp = new_value
+ "scd %0, %2\n" // *ptr = temp (with atomic check)
+ "beqz %0, 1b\n" // start again on atomic error
+ "nop\n" // delay slot nop
+ ".set pop\n"
+ : "=&r" (temp), "=&r" (old), "=m" (*ptr)
+ : "r" (new_value), "m" (*ptr)
+ : "memory");
+
+ return old;
+}
+
+// Atomically increment *ptr by "increment". Returns the new value of
+// *ptr with the increment applied. This routine implies no memory barriers.
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ Atomic64 temp, temp2;
+
+ __asm__ __volatile__(".set push\n"
+ ".set noreorder\n"
+ "1:\n"
+ "lld %0, %4\n" // temp = *ptr
+ "daddu %1, %0, %3\n" // temp2 = temp + increment
+ "scd %1, %2\n" // *ptr = temp2 (with atomic check)
+ "beqz %1, 1b\n" // start again on atomic error
+ "daddu %1, %0, %3\n" // temp2 = temp + increment
+ ".set pop\n"
+ : "=&r" (temp), "=&r" (temp2), "=m" (*ptr)
+ : "Ir" (increment), "m" (*ptr)
+ : "memory");
+ // temp2 now holds the final value.
+ return temp2;
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ MemoryBarrier();
+ Atomic64 res = NoBarrier_AtomicIncrement(ptr, increment);
+ MemoryBarrier();
+ return res;
+}
+
+// "Acquire" operations
+// ensure that no later memory access can be reordered ahead of the operation.
+// "Release" operations ensure that no previous memory access can be reordered
+// after the operation. "Barrier" operations have both "Acquire" and "Release"
+// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
+// access.
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ Atomic64 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+ MemoryBarrier();
+ return res;
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ MemoryBarrier();
+ return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+ *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
+ *ptr = value;
+ MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+ MemoryBarrier();
+ *ptr = value;
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
+ return *ptr;
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+ Atomic64 value = *ptr;
+ MemoryBarrier();
+ return value;
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
+ MemoryBarrier();
+ return *ptr;
+}
+#endif
+
+} // namespace base::subtle
+} // namespace base
+
+#endif // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_MIPS_GCC_H_
diff --git a/ipc/chromium/src/base/atomicops_internals_mutex.cc b/ipc/chromium/src/base/atomicops_internals_mutex.cc
new file mode 100644
index 000000000..b129c86e8
--- /dev/null
+++ b/ipc/chromium/src/base/atomicops_internals_mutex.cc
@@ -0,0 +1,15 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "base/atomicops.h"
+
+namespace base {
+namespace subtle {
+
+Lock gAtomicsMutex;
+
+} // namespace subtle
+} // namespace base
diff --git a/ipc/chromium/src/base/atomicops_internals_mutex.h b/ipc/chromium/src/base/atomicops_internals_mutex.h
new file mode 100644
index 000000000..f8fe4c6ac
--- /dev/null
+++ b/ipc/chromium/src/base/atomicops_internals_mutex.h
@@ -0,0 +1,251 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// This file is an internal atomic implementation, use
+// base/atomicops.h instead.
+//
+// This is a very slow fallback implementation of atomic operations
+// that uses a mutex instead of atomic instructions.
+//
+// (NB: a small "optimization" here would be using a spinlock instead
+// of a blocking mutex, but it's probably not worth the time.)
+
+#ifndef base_atomicops_internals_mutex_h
+#define base_atomicops_internals_mutex_h
+
+#include "base/lock.h"
+
+namespace base {
+namespace subtle {
+
+extern Lock gAtomicsMutex;
+
+template<typename T>
+T Locked_CAS(volatile T* ptr, T old_value, T new_value) {
+ AutoLock _(gAtomicsMutex);
+
+ T current_value = *ptr;
+ if (current_value == old_value)
+ *ptr = new_value;
+
+ return current_value;
+}
+
+template<typename T>
+T Locked_AtomicExchange(volatile T* ptr, T new_value) {
+ AutoLock _(gAtomicsMutex);
+
+ T current_value = *ptr;
+ *ptr = new_value;
+ return current_value;
+}
+
+template<typename T>
+T Locked_AtomicIncrement(volatile T* ptr, T increment) {
+ AutoLock _(gAtomicsMutex);
+ return *ptr += increment;
+}
+
+template<typename T>
+void Locked_Store(volatile T* ptr, T value) {
+ AutoLock _(gAtomicsMutex);
+ *ptr = value;
+}
+
+template<typename T>
+T Locked_Load(volatile const T* ptr) {
+ AutoLock _(gAtomicsMutex);
+ return *ptr;
+}
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ return Locked_CAS(ptr, old_value, new_value);
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value) {
+ return Locked_AtomicExchange(ptr, new_value);
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ return Locked_AtomicIncrement(ptr, increment);
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ return Locked_AtomicIncrement(ptr, increment);
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ return Locked_CAS(ptr, old_value, new_value);
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ return Locked_CAS(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+ return Locked_Store(ptr, value);
+}
+
+inline void MemoryBarrier() {
+ AutoLock _(gAtomicsMutex);
+ // lock/unlock work as a barrier here
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+ return Locked_Store(ptr, value);
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+ return Locked_Store(ptr, value);
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+ return Locked_Load(ptr);
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+ return NoBarrier_Load(ptr);
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+ return Locked_Load(ptr);
+}
+
+#ifdef ARCH_CPU_64_BITS
+
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ return Locked_CAS(ptr, old_value, new_value);
+}
+
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+ Atomic64 new_value) {
+ return Locked_AtomicExchange(ptr, new_value);
+}
+
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ return Locked_AtomicIncrement(ptr, increment);
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ return Locked_AtomicIncrement(ptr, increment);
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+ return Locked_Store(ptr, value);
+}
+
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ return Locked_CAS(ptr, old_value, new_value);
+}
+
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
+ return Locked_Store(ptr, value);
+}
+
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+ return Locked_Store(ptr, value);
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
+ return Locked_Load(ptr);
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+ return Locked_Load(ptr);
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
+ return Locked_Load(ptr);
+}
+
+#endif // ARCH_CPU_64_BITS
+
+#ifdef OS_MACOSX
+// From atomicops_internals_x86_macosx.h:
+//
+// MacOS uses long for intptr_t, AtomicWord and Atomic32 are always
+// different on the Mac, even when they are the same size. We need
+// to explicitly cast from AtomicWord to Atomic32/64 to implement
+// the AtomicWord interface.
+
+inline AtomicWord NoBarrier_CompareAndSwap(volatile AtomicWord* ptr,
+ AtomicWord old_value,
+ AtomicWord new_value) {
+ return Locked_CAS(ptr, old_value, new_value);
+}
+
+inline AtomicWord NoBarrier_AtomicExchange(volatile AtomicWord* ptr,
+ AtomicWord new_value) {
+ return Locked_AtomicExchange(ptr, new_value);
+}
+
+inline AtomicWord NoBarrier_AtomicIncrement(volatile AtomicWord* ptr,
+ AtomicWord increment) {
+ return Locked_AtomicIncrement(ptr, increment);
+}
+
+inline AtomicWord Barrier_AtomicIncrement(volatile AtomicWord* ptr,
+ AtomicWord increment) {
+ return Locked_AtomicIncrement(ptr, increment);
+}
+
+inline AtomicWord Acquire_CompareAndSwap(volatile AtomicWord* ptr,
+ AtomicWord old_value,
+ AtomicWord new_value) {
+ return Locked_CAS(ptr, old_value, new_value);
+}
+
+inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr,
+ AtomicWord old_value,
+ AtomicWord new_value) {
+ return Locked_CAS(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile AtomicWord *ptr, AtomicWord value) {
+ return Locked_Store(ptr, value);
+}
+
+inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) {
+ return Locked_Store(ptr, value);
+}
+
+inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) {
+ return Locked_Store(ptr, value);
+}
+
+inline AtomicWord NoBarrier_Load(volatile const AtomicWord *ptr) {
+ return Locked_Load(ptr);
+}
+
+inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) {
+ return Locked_Load(ptr);
+}
+
+inline AtomicWord Release_Load(volatile const AtomicWord* ptr) {
+ return Locked_Load(ptr);
+}
+
+#endif // OS_MACOSX
+
+} // namespace subtle
+} // namespace base
+
+#endif // base_atomicops_internals_mutex_h
diff --git a/ipc/chromium/src/base/atomicops_internals_ppc_gcc.h b/ipc/chromium/src/base/atomicops_internals_ppc_gcc.h
new file mode 100644
index 000000000..90c01e4a3
--- /dev/null
+++ b/ipc/chromium/src/base/atomicops_internals_ppc_gcc.h
@@ -0,0 +1,134 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// This file is an internal atomic implementation, use atomicops.h instead.
+//
+#ifndef BASE_ATOMICOPS_INTERNALS_PPC_H_
+#define BASE_ATOMICOPS_INTERNALS_PPC_H_
+namespace base {
+namespace subtle {
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ return (__sync_val_compare_and_swap(ptr, old_value, new_value));
+}
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value) {
+ Atomic32 old_value;
+ do {
+ old_value = *ptr;
+ } while (__sync_bool_compare_and_swap(ptr, old_value, new_value) == false);
+ return old_value;
+}
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ return Barrier_AtomicIncrement(ptr, increment);
+}
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ for (;;) {
+ Atomic32 old_value = *ptr;
+ Atomic32 new_value = old_value + increment;
+ if (__sync_bool_compare_and_swap(ptr, old_value, new_value)) {
+ return new_value;
+ // The exchange took place as expected.
+ }
+ // Otherwise, *ptr changed mid-loop and we need to retry.
+ }
+}
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value, Atomic32 new_value) {
+ return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value, Atomic32 new_value) {
+ return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+ *ptr = value;
+}
+inline void MemoryBarrier() {
+ __asm__ __volatile__("sync" : : : "memory"); }
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+ *ptr = value;
+ MemoryBarrier();
+}
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+ MemoryBarrier();
+ *ptr = value;
+}
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { return *ptr; }
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+ Atomic32 value = *ptr;
+ MemoryBarrier();
+ return value;
+}
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+ MemoryBarrier();
+ return *ptr;
+}
+#ifdef ARCH_CPU_PPC64
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ return (__sync_val_compare_and_swap(ptr, old_value, new_value));
+}
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+ Atomic64 new_value) {
+ Atomic64 old_value;
+ do {
+ old_value = *ptr;
+ } while (__sync_bool_compare_and_swap(ptr, old_value, new_value) == false);
+ return old_value;
+}
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ return Barrier_AtomicIncrement(ptr, increment);
+}
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ for (;;) {
+ Atomic64 old_value = *ptr;
+ Atomic64 new_value = old_value + increment;
+ if (__sync_bool_compare_and_swap(ptr, old_value, new_value)) {
+ return new_value;
+ // The exchange took place as expected.
+ }
+ // Otherwise, *ptr changed mid-loop and we need to retry.
+ }
+}
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value, Atomic64 new_value) {
+ return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value, Atomic64 new_value) {
+ return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+ *ptr = value;
+}
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
+ *ptr = value;
+ MemoryBarrier();
+}
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+ MemoryBarrier();
+ *ptr = value;
+}
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { return *ptr; }
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+ Atomic64 value = *ptr;
+ MemoryBarrier();
+ return value;
+}
+inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
+ MemoryBarrier();
+ return *ptr;
+}
+#endif
+} // namespace base::subtle
+} // namespace base
+#endif // BASE_ATOMICOPS_INTERNALS_PPC_GCC_H_
diff --git a/ipc/chromium/src/base/atomicops_internals_x86_gcc.cc b/ipc/chromium/src/base/atomicops_internals_x86_gcc.cc
new file mode 100644
index 000000000..c5b11b31c
--- /dev/null
+++ b/ipc/chromium/src/base/atomicops_internals_x86_gcc.cc
@@ -0,0 +1,106 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This module gets enough CPU information to optimize the
+// atomicops module on x86.
+
+#include <string.h>
+
+#include "base/atomicops.h"
+#include "base/basictypes.h"
+
+// This file only makes sense with atomicops_internals_x86_gcc.h -- it
+// depends on structs that are defined in that file. If atomicops.h
+// doesn't sub-include that file, then we aren't needed, and shouldn't
+// try to do anything.
+#ifdef BASE_ATOMICOPS_INTERNALS_X86_GCC_H_
+
+// Inline cpuid instruction. In PIC compilations, %ebx contains the address
+// of the global offset table. To avoid breaking such executables, this code
+// must preserve that register's value across cpuid instructions.
+#if defined(__i386__)
+#define cpuid(a, b, c, d, inp) \
+ asm ("mov %%ebx, %%edi\n" \
+ "cpuid\n" \
+ "xchg %%edi, %%ebx\n" \
+ : "=a" (a), "=D" (b), "=c" (c), "=d" (d) : "a" (inp))
+#elif defined (__x86_64__)
+#define cpuid(a, b, c, d, inp) \
+ asm ("mov %%rbx, %%rdi\n" \
+ "cpuid\n" \
+ "xchg %%rdi, %%rbx\n" \
+ : "=a" (a), "=D" (b), "=c" (c), "=d" (d) : "a" (inp))
+#endif
+
+#if defined(cpuid) // initialize the struct only on x86
+
+// Set the flags so that code will run correctly and conservatively, so even
+// if we haven't been initialized yet, we're probably single threaded, and our
+// default values should hopefully be pretty safe.
+struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures = {
+ false, // bug can't exist before process spawns multiple threads
+ false, // no SSE2
+};
+
+// Initialize the AtomicOps_Internalx86CPUFeatures struct.
+static void AtomicOps_Internalx86CPUFeaturesInit() {
+ uint32_t eax;
+ uint32_t ebx;
+ uint32_t ecx;
+ uint32_t edx;
+
+ // Get vendor string (issue CPUID with eax = 0)
+ cpuid(eax, ebx, ecx, edx, 0);
+ char vendor[13];
+ memcpy(vendor, &ebx, 4);
+ memcpy(vendor + 4, &edx, 4);
+ memcpy(vendor + 8, &ecx, 4);
+ vendor[12] = 0;
+
+ // get feature flags in ecx/edx, and family/model in eax
+ cpuid(eax, ebx, ecx, edx, 1);
+
+ int family = (eax >> 8) & 0xf; // family and model fields
+ int model = (eax >> 4) & 0xf;
+ if (family == 0xf) { // use extended family and model fields
+ family += (eax >> 20) & 0xff;
+ model += ((eax >> 16) & 0xf) << 4;
+ }
+
+ // Opteron Rev E has a bug in which on very rare occasions a locked
+ // instruction doesn't act as a read-acquire barrier if followed by a
+ // non-locked read-modify-write instruction. Rev F has this bug in
+ // pre-release versions, but not in versions released to customers,
+ // so we test only for Rev E, which is family 15, model 32..63 inclusive.
+ if (strcmp(vendor, "AuthenticAMD") == 0 && // AMD
+ family == 15 &&
+ 32 <= model && model <= 63) {
+ AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug = true;
+ } else {
+ AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug = false;
+ }
+
+ // edx bit 26 is SSE2 which we use to tell use whether we can use mfence
+ AtomicOps_Internalx86CPUFeatures.has_sse2 = ((edx >> 26) & 1);
+}
+
+namespace {
+
+class AtomicOpsx86Initializer {
+ public:
+ AtomicOpsx86Initializer() {
+ AtomicOps_Internalx86CPUFeaturesInit();
+ }
+};
+
+// A global to get use initialized on startup via static initialization :/
+AtomicOpsx86Initializer g_initer;
+
+} // namespace
+
+#endif // if x86
+
+#endif // ifdef BASE_ATOMICOPS_INTERNALS_X86_GCC_H_
diff --git a/ipc/chromium/src/base/atomicops_internals_x86_gcc.h b/ipc/chromium/src/base/atomicops_internals_x86_gcc.h
new file mode 100644
index 000000000..8088e7f6a
--- /dev/null
+++ b/ipc/chromium/src/base/atomicops_internals_x86_gcc.h
@@ -0,0 +1,261 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is an internal atomic implementation, use base/atomicops.h instead.
+
+#ifndef BASE_ATOMICOPS_INTERNALS_X86_GCC_H_
+#define BASE_ATOMICOPS_INTERNALS_X86_GCC_H_
+
+// This struct is not part of the public API of this module; clients may not
+// use it.
+// Features of this x86. Values may not be correct before main() is run,
+// but are set conservatively.
+struct AtomicOps_x86CPUFeatureStruct {
+ bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence
+ // after acquire compare-and-swap.
+ bool has_sse2; // Processor has SSE2.
+};
+extern struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures;
+
+#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
+
+namespace base {
+namespace subtle {
+
+// 32-bit low-level operations on any platform.
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 prev;
+ __asm__ __volatile__("lock; cmpxchgl %1,%2"
+ : "=a" (prev)
+ : "q" (new_value), "m" (*ptr), "0" (old_value)
+ : "memory");
+ return prev;
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value) {
+ __asm__ __volatile__("xchgl %1,%0" // The lock prefix is implicit for xchg.
+ : "=r" (new_value)
+ : "m" (*ptr), "0" (new_value)
+ : "memory");
+ return new_value; // Now it's the previous value.
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ Atomic32 temp = increment;
+ __asm__ __volatile__("lock; xaddl %0,%1"
+ : "+r" (temp), "+m" (*ptr)
+ : : "memory");
+ // temp now holds the old value of *ptr
+ return temp + increment;
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ Atomic32 temp = increment;
+ __asm__ __volatile__("lock; xaddl %0,%1"
+ : "+r" (temp), "+m" (*ptr)
+ : : "memory");
+ // temp now holds the old value of *ptr
+ if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
+ __asm__ __volatile__("lfence" : : : "memory");
+ }
+ return temp + increment;
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+ if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
+ __asm__ __volatile__("lfence" : : : "memory");
+ }
+ return x;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+ *ptr = value;
+}
+
+#if defined(__x86_64__)
+
+// 64-bit implementations of memory barrier can be simpler, because it
+// "mfence" is guaranteed to exist.
+inline void MemoryBarrier() {
+ __asm__ __volatile__("mfence" : : : "memory");
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+ *ptr = value;
+ MemoryBarrier();
+}
+
+#else
+
+inline void MemoryBarrier() {
+ if (AtomicOps_Internalx86CPUFeatures.has_sse2) {
+ __asm__ __volatile__("mfence" : : : "memory");
+ } else { // mfence is faster but not present on PIII
+ Atomic32 x = 0;
+ NoBarrier_AtomicExchange(&x, 0); // acts as a barrier on PIII
+ }
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+ if (AtomicOps_Internalx86CPUFeatures.has_sse2) {
+ *ptr = value;
+ __asm__ __volatile__("mfence" : : : "memory");
+ } else {
+ NoBarrier_AtomicExchange(ptr, value);
+ // acts as a barrier on PIII
+ }
+}
+#endif
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+ ATOMICOPS_COMPILER_BARRIER();
+ *ptr = value; // An x86 store acts as a release barrier.
+ // See comments in Atomic64 version of Release_Store(), below.
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+ return *ptr;
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+ Atomic32 value = *ptr; // An x86 load acts as a acquire barrier.
+ // See comments in Atomic64 version of Release_Store(), below.
+ ATOMICOPS_COMPILER_BARRIER();
+ return value;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+ MemoryBarrier();
+ return *ptr;
+}
+
+#if defined(__x86_64__)
+
+// 64-bit low-level operations on 64-bit platform.
+
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ Atomic64 prev;
+ __asm__ __volatile__("lock; cmpxchgq %1,%2"
+ : "=a" (prev)
+ : "q" (new_value), "m" (*ptr), "0" (old_value)
+ : "memory");
+ return prev;
+}
+
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+ Atomic64 new_value) {
+ __asm__ __volatile__("xchgq %1,%0" // The lock prefix is implicit for xchg.
+ : "=r" (new_value)
+ : "m" (*ptr), "0" (new_value)
+ : "memory");
+ return new_value; // Now it's the previous value.
+}
+
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ Atomic64 temp = increment;
+ __asm__ __volatile__("lock; xaddq %0,%1"
+ : "+r" (temp), "+m" (*ptr)
+ : : "memory");
+ // temp now contains the previous value of *ptr
+ return temp + increment;
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ Atomic64 temp = increment;
+ __asm__ __volatile__("lock; xaddq %0,%1"
+ : "+r" (temp), "+m" (*ptr)
+ : : "memory");
+ // temp now contains the previous value of *ptr
+ if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
+ __asm__ __volatile__("lfence" : : : "memory");
+ }
+ return temp + increment;
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+ *ptr = value;
+}
+
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ Atomic64 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+ /* XXX/cjones: no idea if this is necessary... */
+ if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
+ __asm__ __volatile__("lfence" : : : "memory");
+ }
+ return x;
+}
+
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
+ *ptr = value;
+ MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+ ATOMICOPS_COMPILER_BARRIER();
+
+ *ptr = value; // An x86 store acts as a release barrier
+ // for current AMD/Intel chips as of Jan 2008.
+ // See also Acquire_Load(), below.
+
+ // When new chips come out, check:
+ // IA-32 Intel Architecture Software Developer's Manual, Volume 3:
+ // System Programming Guide, Chatper 7: Multiple-processor management,
+ // Section 7.2, Memory Ordering.
+ // Last seen at:
+ // http://developer.intel.com/design/pentium4/manuals/index_new.htm
+ //
+ // x86 stores/loads fail to act as barriers for a few instructions (clflush
+ // maskmovdqu maskmovq movntdq movnti movntpd movntps movntq) but these are
+ // not generated by the compiler, and are rare. Users of these instructions
+ // need to know about cache behaviour in any case since all of these involve
+ // either flushing cache lines or non-temporal cache hints.
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
+ return *ptr;
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+ Atomic64 value = *ptr; // An x86 load acts as a acquire barrier,
+ // for current AMD/Intel chips as of Jan 2008.
+ // See also Release_Store(), above.
+ ATOMICOPS_COMPILER_BARRIER();
+ return value;
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
+ MemoryBarrier();
+ return *ptr;
+}
+#endif // defined(__x86_64__)
+
+} // namespace base::subtle
+} // namespace base
+
+#undef ATOMICOPS_COMPILER_BARRIER
+
+#endif // BASE_ATOMICOPS_INTERNALS_X86_GCC_H_
diff --git a/ipc/chromium/src/base/atomicops_internals_x86_macosx.h b/ipc/chromium/src/base/atomicops_internals_x86_macosx.h
new file mode 100644
index 000000000..c8f3a67af
--- /dev/null
+++ b/ipc/chromium/src/base/atomicops_internals_x86_macosx.h
@@ -0,0 +1,281 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is an internal atomic implementation, use base/atomicops.h instead.
+
+#ifndef BASE_ATOMICOPS_INTERNALS_X86_MACOSX_H_
+#define BASE_ATOMICOPS_INTERNALS_X86_MACOSX_H_
+
+#include <libkern/OSAtomic.h>
+
+namespace base {
+namespace subtle {
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 prev_value;
+ do {
+ if (OSAtomicCompareAndSwap32(old_value, new_value,
+ const_cast<Atomic32*>(ptr))) {
+ return old_value;
+ }
+ prev_value = *ptr;
+ } while (prev_value == old_value);
+ return prev_value;
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr,
+ Atomic32 new_value) {
+ Atomic32 old_value;
+ do {
+ old_value = *ptr;
+ } while (!OSAtomicCompareAndSwap32(old_value, new_value,
+ const_cast<Atomic32*>(ptr)));
+ return old_value;
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32 *ptr,
+ Atomic32 increment) {
+ return OSAtomicAdd32(increment, const_cast<Atomic32*>(ptr));
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32 *ptr,
+ Atomic32 increment) {
+ return OSAtomicAdd32Barrier(increment, const_cast<Atomic32*>(ptr));
+}
+
+inline void MemoryBarrier() {
+ OSMemoryBarrier();
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 prev_value;
+ do {
+ if (OSAtomicCompareAndSwap32Barrier(old_value, new_value,
+ const_cast<Atomic32*>(ptr))) {
+ return old_value;
+ }
+ prev_value = *ptr;
+ } while (prev_value == old_value);
+ return prev_value;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ return Acquire_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+ *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) {
+ *ptr = value;
+ MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) {
+ MemoryBarrier();
+ *ptr = value;
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+ return *ptr;
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) {
+ Atomic32 value = *ptr;
+ MemoryBarrier();
+ return value;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32 *ptr) {
+ MemoryBarrier();
+ return *ptr;
+}
+
+#ifdef __LP64__
+
+// 64-bit implementation on 64-bit platform
+
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ Atomic64 prev_value;
+ do {
+ if (OSAtomicCompareAndSwap64(old_value, new_value,
+ const_cast<Atomic64*>(ptr))) {
+ return old_value;
+ }
+ prev_value = *ptr;
+ } while (prev_value == old_value);
+ return prev_value;
+}
+
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr,
+ Atomic64 new_value) {
+ Atomic64 old_value;
+ do {
+ old_value = *ptr;
+ } while (!OSAtomicCompareAndSwap64(old_value, new_value,
+ const_cast<Atomic64*>(ptr)));
+ return old_value;
+}
+
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64 *ptr,
+ Atomic64 increment) {
+ return OSAtomicAdd64(increment, const_cast<Atomic64*>(ptr));
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64 *ptr,
+ Atomic64 increment) {
+ return OSAtomicAdd64Barrier(increment, const_cast<Atomic64*>(ptr));
+}
+
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ Atomic64 prev_value;
+ do {
+ if (OSAtomicCompareAndSwap64Barrier(old_value, new_value,
+ const_cast<Atomic64*>(ptr))) {
+ return old_value;
+ }
+ prev_value = *ptr;
+ } while (prev_value == old_value);
+ return prev_value;
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64 *ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ // The lib kern interface does not distinguish between
+ // Acquire and Release memory barriers; they are equivalent.
+ return Acquire_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+ *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) {
+ *ptr = value;
+ MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) {
+ MemoryBarrier();
+ *ptr = value;
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
+ return *ptr;
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) {
+ Atomic64 value = *ptr;
+ MemoryBarrier();
+ return value;
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64 *ptr) {
+ MemoryBarrier();
+ return *ptr;
+}
+
+#endif // defined(__LP64__)
+
+// MacOS uses long for intptr_t, AtomicWord and Atomic32 are always different
+// on the Mac, even when they are the same size. We need to explicitly cast
+// from AtomicWord to Atomic32/64 to implement the AtomicWord interface.
+#ifdef __LP64__
+#define AtomicWordCastType Atomic64
+#else
+#define AtomicWordCastType Atomic32
+#endif
+
+inline AtomicWord NoBarrier_CompareAndSwap(volatile AtomicWord* ptr,
+ AtomicWord old_value,
+ AtomicWord new_value) {
+ return NoBarrier_CompareAndSwap(
+ reinterpret_cast<volatile AtomicWordCastType*>(ptr),
+ old_value, new_value);
+}
+
+inline AtomicWord NoBarrier_AtomicExchange(volatile AtomicWord* ptr,
+ AtomicWord new_value) {
+ return NoBarrier_AtomicExchange(
+ reinterpret_cast<volatile AtomicWordCastType*>(ptr), new_value);
+}
+
+inline AtomicWord NoBarrier_AtomicIncrement(volatile AtomicWord* ptr,
+ AtomicWord increment) {
+ return NoBarrier_AtomicIncrement(
+ reinterpret_cast<volatile AtomicWordCastType*>(ptr), increment);
+}
+
+inline AtomicWord Barrier_AtomicIncrement(volatile AtomicWord* ptr,
+ AtomicWord increment) {
+ return Barrier_AtomicIncrement(
+ reinterpret_cast<volatile AtomicWordCastType*>(ptr), increment);
+}
+
+inline AtomicWord Acquire_CompareAndSwap(volatile AtomicWord* ptr,
+ AtomicWord old_value,
+ AtomicWord new_value) {
+ return base::subtle::Acquire_CompareAndSwap(
+ reinterpret_cast<volatile AtomicWordCastType*>(ptr),
+ old_value, new_value);
+}
+
+inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr,
+ AtomicWord old_value,
+ AtomicWord new_value) {
+ return base::subtle::Release_CompareAndSwap(
+ reinterpret_cast<volatile AtomicWordCastType*>(ptr),
+ old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile AtomicWord *ptr, AtomicWord value) {
+ NoBarrier_Store(
+ reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
+}
+
+inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) {
+ return base::subtle::Acquire_Store(
+ reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
+}
+
+inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) {
+ return base::subtle::Release_Store(
+ reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
+}
+
+inline AtomicWord NoBarrier_Load(volatile const AtomicWord *ptr) {
+ return NoBarrier_Load(
+ reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
+}
+
+inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) {
+ return base::subtle::Acquire_Load(
+ reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
+}
+
+inline AtomicWord Release_Load(volatile const AtomicWord* ptr) {
+ return base::subtle::Release_Load(
+ reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
+}
+
+#undef AtomicWordCastType
+
+} // namespace base::subtle
+} // namespace base
+
+#endif // BASE_ATOMICOPS_INTERNALS_X86_MACOSX_H_
diff --git a/ipc/chromium/src/base/atomicops_internals_x86_msvc.h b/ipc/chromium/src/base/atomicops_internals_x86_msvc.h
new file mode 100644
index 000000000..524046935
--- /dev/null
+++ b/ipc/chromium/src/base/atomicops_internals_x86_msvc.h
@@ -0,0 +1,178 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is an internal atomic implementation, use base/atomicops.h instead.
+
+#ifndef BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
+#define BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
+
+#include <windows.h>
+
+namespace base {
+namespace subtle {
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ LONG result = InterlockedCompareExchange(
+ reinterpret_cast<volatile LONG*>(ptr),
+ static_cast<LONG>(new_value),
+ static_cast<LONG>(old_value));
+ return static_cast<Atomic32>(result);
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value) {
+ LONG result = InterlockedExchange(
+ reinterpret_cast<volatile LONG*>(ptr),
+ static_cast<LONG>(new_value));
+ return static_cast<Atomic32>(result);
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ return InterlockedExchangeAdd(
+ reinterpret_cast<volatile LONG*>(ptr),
+ static_cast<LONG>(increment)) + increment;
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ return Barrier_AtomicIncrement(ptr, increment);
+}
+
+inline void MemoryBarrier() {
+ // We use MemoryBarrier from WinNT.h
+ ::MemoryBarrier();
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+ *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+ NoBarrier_AtomicExchange(ptr, value);
+ // acts as a barrier in this implementation
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+ *ptr = value; // works w/o barrier for current Intel chips as of June 2005
+ // See comments in Atomic64 version of Release_Store() below.
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+ return *ptr;
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+ Atomic32 value = *ptr;
+ return value;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+ MemoryBarrier();
+ return *ptr;
+}
+
+#if defined(_WIN64)
+
+// 64-bit low-level operations on 64-bit platform.
+
+COMPILE_ASSERT(sizeof(Atomic64) == sizeof(PVOID), atomic_word_is_atomic);
+
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ PVOID result = InterlockedCompareExchangePointer(
+ reinterpret_cast<volatile PVOID*>(ptr),
+ reinterpret_cast<PVOID>(new_value), reinterpret_cast<PVOID>(old_value));
+ return reinterpret_cast<Atomic64>(result);
+}
+
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+ Atomic64 new_value) {
+ PVOID result = InterlockedExchangePointer(
+ reinterpret_cast<volatile PVOID*>(ptr),
+ reinterpret_cast<PVOID>(new_value));
+ return reinterpret_cast<Atomic64>(result);
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ return InterlockedExchangeAdd64(
+ reinterpret_cast<volatile LONGLONG*>(ptr),
+ static_cast<LONGLONG>(increment)) + increment;
+}
+
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ return Barrier_AtomicIncrement(ptr, increment);
+}
+
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+ *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
+ NoBarrier_AtomicExchange(ptr, value);
+ // acts as a barrier in this implementation
+}
+
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+ *ptr = value; // works w/o barrier for current Intel chips as of June 2005
+
+ // When new chips come out, check:
+ // IA-32 Intel Architecture Software Developer's Manual, Volume 3:
+ // System Programming Guide, Chatper 7: Multiple-processor management,
+ // Section 7.2, Memory Ordering.
+ // Last seen at:
+ // http://developer.intel.com/design/pentium4/manuals/index_new.htm
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
+ return *ptr;
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+ Atomic64 value = *ptr;
+ return value;
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
+ MemoryBarrier();
+ return *ptr;
+}
+
+#endif // defined(_WIN64)
+
+} // namespace base::subtle
+} // namespace base
+
+#endif // BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
diff --git a/ipc/chromium/src/base/basictypes.h b/ipc/chromium/src/base/basictypes.h
new file mode 100644
index 000000000..6afc7e238
--- /dev/null
+++ b/ipc/chromium/src/base/basictypes.h
@@ -0,0 +1,272 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_BASICTYPES_H_
+#define BASE_BASICTYPES_H_
+
+#include <limits.h> // So we can set the bounds of our types
+#include <stddef.h> // For size_t
+#include <string.h> // for memcpy
+
+#include "base/port.h" // Types that only need exist on certain systems
+
+#include "mozilla/Assertions.h"
+#include "mozilla/IntegerPrintfMacros.h"
+
+// A type to represent a Unicode code-point value. As of Unicode 4.0,
+// such values require up to 21 bits.
+// (For type-checking on pointers, make this explicitly signed,
+// and it should always be the signed version of whatever int32_t is.)
+typedef signed int char32;
+
+const uint8_t kuint8max = (( uint8_t) 0xFF);
+const uint16_t kuint16max = ((uint16_t) 0xFFFF);
+const uint32_t kuint32max = ((uint32_t) 0xFFFFFFFF);
+const uint64_t kuint64max = ((uint64_t) GG_LONGLONG(0xFFFFFFFFFFFFFFFF));
+const int8_t kint8min = (( int8_t) 0x80);
+const int8_t kint8max = (( int8_t) 0x7F);
+const int16_t kint16min = (( int16_t) 0x8000);
+const int16_t kint16max = (( int16_t) 0x7FFF);
+const int32_t kint32min = (( int32_t) 0x80000000);
+const int32_t kint32max = (( int32_t) 0x7FFFFFFF);
+const int64_t kint64min = (( int64_t) GG_LONGLONG(0x8000000000000000));
+const int64_t kint64max = (( int64_t) GG_LONGLONG(0x7FFFFFFFFFFFFFFF));
+
+// Platform- and hardware-dependent printf specifiers
+# if defined(OS_POSIX)
+# define PRId64L "I64d"
+# define PRIu64L "I64u"
+# define PRIx64L "I64x"
+# elif defined(OS_WIN)
+# define PRId64L L"I64d"
+# define PRIu64L L"I64u"
+# define PRIx64L L"I64x"
+# endif
+
+// A macro to disallow the copy constructor and operator= functions
+// This should be used in the private: declarations for a class
+#undef DISALLOW_COPY_AND_ASSIGN
+#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
+ TypeName(const TypeName&); \
+ void operator=(const TypeName&)
+
+// An older, deprecated, politically incorrect name for the above.
+#undef DISALLOW_EVIL_CONSTRUCTORS
+#define DISALLOW_EVIL_CONSTRUCTORS(TypeName) DISALLOW_COPY_AND_ASSIGN(TypeName)
+
+// A macro to disallow all the implicit constructors, namely the
+// default constructor, copy constructor and operator= functions.
+//
+// This should be used in the private: declarations for a class
+// that wants to prevent anyone from instantiating it. This is
+// especially useful for classes containing only static methods.
+#undef DISALLOW_IMPLICIT_CONSTRUCTORS
+#define DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \
+ TypeName(); \
+ DISALLOW_COPY_AND_ASSIGN(TypeName)
+
+// The arraysize(arr) macro returns the # of elements in an array arr.
+// The expression is a compile-time constant, and therefore can be
+// used in defining new arrays, for example. If you use arraysize on
+// a pointer by mistake, you will get a compile-time error.
+//
+// One caveat is that arraysize() doesn't accept any array of an
+// anonymous type or a type defined inside a function. In these rare
+// cases, you have to use the unsafe ARRAYSIZE_UNSAFE() macro below. This is
+// due to a limitation in C++'s template system. The limitation might
+// eventually be removed, but it hasn't happened yet.
+
+// This template function declaration is used in defining arraysize.
+// Note that the function doesn't need an implementation, as we only
+// use its type.
+template <typename T, size_t N>
+char (&ArraySizeHelper(T (&array)[N]))[N];
+
+// That gcc wants both of these prototypes seems mysterious. VC, for
+// its part, can't decide which to use (another mystery). Matching of
+// template overloads: the final frontier.
+#ifndef _MSC_VER
+template <typename T, size_t N>
+char (&ArraySizeHelper(const T (&array)[N]))[N];
+#endif
+
+#define arraysize(array) (sizeof(ArraySizeHelper(array)))
+
+// ARRAYSIZE_UNSAFE performs essentially the same calculation as arraysize,
+// but can be used on anonymous types or types defined inside
+// functions. It's less safe than arraysize as it accepts some
+// (although not all) pointers. Therefore, you should use arraysize
+// whenever possible.
+//
+// The expression ARRAYSIZE_UNSAFE(a) is a compile-time constant of type
+// size_t.
+//
+// ARRAYSIZE_UNSAFE catches a few type errors. If you see a compiler error
+//
+// "warning: division by zero in ..."
+//
+// when using ARRAYSIZE_UNSAFE, you are (wrongfully) giving it a pointer.
+// You should only use ARRAYSIZE_UNSAFE on statically allocated arrays.
+//
+// The following comments are on the implementation details, and can
+// be ignored by the users.
+//
+// ARRAYSIZE_UNSAFE(arr) works by inspecting sizeof(arr) (the # of bytes in
+// the array) and sizeof(*(arr)) (the # of bytes in one array
+// element). If the former is divisible by the latter, perhaps arr is
+// indeed an array, in which case the division result is the # of
+// elements in the array. Otherwise, arr cannot possibly be an array,
+// and we generate a compiler error to prevent the code from
+// compiling.
+//
+// Since the size of bool is implementation-defined, we need to cast
+// !(sizeof(a) & sizeof(*(a))) to size_t in order to ensure the final
+// result has type size_t.
+//
+// This macro is not perfect as it wrongfully accepts certain
+// pointers, namely where the pointer size is divisible by the pointee
+// size. Since all our code has to go through a 32-bit compiler,
+// where a pointer is 4 bytes, this means all pointers to a type whose
+// size is 3 or greater than 4 will be (righteously) rejected.
+
+#define ARRAYSIZE_UNSAFE(a) \
+ ((sizeof(a) / sizeof(*(a))) / \
+ static_cast<size_t>(!(sizeof(a) % sizeof(*(a)))))
+
+
+// Use implicit_cast as a safe version of static_cast or const_cast
+// for upcasting in the type hierarchy (i.e. casting a pointer to Foo
+// to a pointer to SuperclassOfFoo or casting a pointer to Foo to
+// a const pointer to Foo).
+// When you use implicit_cast, the compiler checks that the cast is safe.
+// Such explicit implicit_casts are necessary in surprisingly many
+// situations where C++ demands an exact type match instead of an
+// argument type convertable to a target type.
+//
+// The From type can be inferred, so the preferred syntax for using
+// implicit_cast is the same as for static_cast etc.:
+//
+// implicit_cast<ToType>(expr)
+//
+// implicit_cast would have been part of the C++ standard library,
+// but the proposal was submitted too late. It will probably make
+// its way into the language in the future.
+template<typename To, typename From>
+inline To implicit_cast(From const &f) {
+ return f;
+}
+
+// The COMPILE_ASSERT macro (below) creates an otherwise-unused typedef. This
+// triggers compiler warnings with gcc 4.8 and higher, so mark the typedef
+// as permissibly-unused to disable the warnings.
+# if defined(__GNUC__)
+# define COMPILE_ASSERT_UNUSED_ATTRIBUTE __attribute__((unused))
+# else
+# define COMPILE_ASSERT_UNUSED_ATTRIBUTE /* nothing */
+# endif
+
+// The COMPILE_ASSERT macro can be used to verify that a compile time
+// expression is true. For example, you could use it to verify the
+// size of a static array:
+//
+// COMPILE_ASSERT(ARRAYSIZE_UNSAFE(content_type_names) == CONTENT_NUM_TYPES,
+// content_type_names_incorrect_size);
+//
+// or to make sure a struct is smaller than a certain size:
+//
+// COMPILE_ASSERT(sizeof(foo) < 128, foo_too_large);
+//
+// The second argument to the macro is the name of the variable. If
+// the expression is false, most compilers will issue a warning/error
+// containing the name of the variable.
+
+// Avoid multiple definitions for webrtc
+#if !defined(COMPILE_ASSERT)
+template <bool>
+struct CompileAssert {
+};
+
+#define COMPILE_ASSERT(expr, msg) \
+ typedef CompileAssert<(bool(expr))> msg[bool(expr) ? 1 : -1] \
+ COMPILE_ASSERT_UNUSED_ATTRIBUTE
+#endif
+
+// Implementation details of COMPILE_ASSERT:
+//
+// - COMPILE_ASSERT works by defining an array type that has -1
+// elements (and thus is invalid) when the expression is false.
+//
+// - The simpler definition
+//
+// #define COMPILE_ASSERT(expr, msg) typedef char msg[(expr) ? 1 : -1]
+//
+// does not work, as gcc supports variable-length arrays whose sizes
+// are determined at run-time (this is gcc's extension and not part
+// of the C++ standard). As a result, gcc fails to reject the
+// following code with the simple definition:
+//
+// int foo;
+// COMPILE_ASSERT(foo, msg); // not supposed to compile as foo is
+// // not a compile-time constant.
+//
+// - By using the type CompileAssert<(bool(expr))>, we ensures that
+// expr is a compile-time constant. (Template arguments must be
+// determined at compile-time.)
+//
+// - The outter parentheses in CompileAssert<(bool(expr))> are necessary
+// to work around a bug in gcc 3.4.4 and 4.0.1. If we had written
+//
+// CompileAssert<bool(expr)>
+//
+// instead, these compilers will refuse to compile
+//
+// COMPILE_ASSERT(5 > 0, some_message);
+//
+// (They seem to think the ">" in "5 > 0" marks the end of the
+// template argument list.)
+//
+// - The array size is (bool(expr) ? 1 : -1), instead of simply
+//
+// ((expr) ? 1 : -1).
+//
+// This is to avoid running into a bug in MS VC 7.1, which
+// causes ((0.0) ? 1 : -1) to incorrectly evaluate to 1.
+
+
+// MetatagId refers to metatag-id that we assign to
+// each metatag <name, value> pair..
+typedef uint32_t MetatagId;
+
+// Argument type used in interfaces that can optionally take ownership
+// of a passed in argument. If TAKE_OWNERSHIP is passed, the called
+// object takes ownership of the argument. Otherwise it does not.
+enum Ownership {
+ DO_NOT_TAKE_OWNERSHIP,
+ TAKE_OWNERSHIP
+};
+
+// The following enum should be used only as a constructor argument to indicate
+// that the variable has static storage class, and that the constructor should
+// do nothing to its state. It indicates to the reader that it is legal to
+// declare a static instance of the class, provided the constructor is given
+// the base::LINKER_INITIALIZED argument. Normally, it is unsafe to declare a
+// static variable that has a constructor or a destructor because invocation
+// order is undefined. However, IF the type can be initialized by filling with
+// zeroes (which the loader does for static variables), AND the destructor also
+// does nothing to the storage, AND there are no virtual methods, then a
+// constructor declared as
+// explicit MyClass(base::LinkerInitialized x) {}
+// and invoked as
+// static MyClass my_variable_name(base::LINKER_INITIALIZED);
+namespace base {
+enum LinkerInitialized { LINKER_INITIALIZED };
+} // base
+
+
+#include "nscore.h" // pick up mozalloc operator new() etc.
+
+
+#endif // BASE_BASICTYPES_H_
diff --git a/ipc/chromium/src/base/chrome_application_mac.h b/ipc/chromium/src/base/chrome_application_mac.h
new file mode 100644
index 000000000..2f9719438
--- /dev/null
+++ b/ipc/chromium/src/base/chrome_application_mac.h
@@ -0,0 +1,62 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CHROME_APPLICATION_MAC_H_
+#define BASE_CHROME_APPLICATION_MAC_H_
+
+#import <AppKit/AppKit.h>
+
+#include "base/basictypes.h"
+#include "base/scoped_nsobject.h"
+
+// Event hooks must implement this protocol.
+@protocol CrApplicationEventHookProtocol
+- (void)hookForEvent:(NSEvent*)theEvent;
+@end
+
+
+@interface CrApplication : NSApplication {
+ @private
+ BOOL handlingSendEvent_;
+ // Array of objects implementing the CrApplicationEventHookProtocol
+ scoped_nsobject<NSMutableArray> eventHooks_;
+}
+@property(readonly,
+ getter=isHandlingSendEvent,
+ nonatomic) BOOL handlingSendEvent;
+
+// Add or remove an event hook to be called for every sendEvent:
+// that the application receives. These handlers are called before
+// the normal [NSApplication sendEvent:] call is made.
+
+// This is not a good alternative to a nested event loop. It should
+// be used only when normal event logic and notification breaks down
+// (e.g. when clicking outside a canBecomeKey:NO window to "switch
+// context" out of it).
+- (void)addEventHook:(id<CrApplicationEventHookProtocol>)hook;
+- (void)removeEventHook:(id<CrApplicationEventHookProtocol>)hook;
+
++ (NSApplication*)sharedApplication;
+@end
+
+namespace chrome_application_mac {
+
+// Controls the state of |handlingSendEvent_| in the event loop so that it is
+// reset properly.
+class ScopedSendingEvent {
+ public:
+ ScopedSendingEvent();
+ ~ScopedSendingEvent();
+
+ private:
+ CrApplication* app_;
+ BOOL handling_;
+ DISALLOW_COPY_AND_ASSIGN(ScopedSendingEvent);
+};
+
+} // chrome_application_mac
+
+#endif // BASE_CHROME_APPLICATION_MAC_H_
diff --git a/ipc/chromium/src/base/chrome_application_mac.mm b/ipc/chromium/src/base/chrome_application_mac.mm
new file mode 100644
index 000000000..b9341c6ce
--- /dev/null
+++ b/ipc/chromium/src/base/chrome_application_mac.mm
@@ -0,0 +1,68 @@
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#import "chrome_application_mac.h"
+
+#include "base/logging.h"
+
+@interface CrApplication ()
+@property(readwrite,
+ getter=isHandlingSendEvent,
+ nonatomic) BOOL handlingSendEvent;
+@end
+
+@implementation CrApplication
+@synthesize handlingSendEvent = handlingSendEvent_;
+
+// Initialize NSApplication using the custom subclass. Check whether NSApp
+// was already initialized using another class, because that would break
+// some things.
++ (NSApplication*)sharedApplication {
+ NSApplication* app = [super sharedApplication];
+ if (![NSApp isKindOfClass:self]) {
+ CHROMIUM_LOG(ERROR) << "NSApp should be of type " << [[self className] UTF8String]
+ << ", not " << [[NSApp className] UTF8String];
+ DCHECK(false) << "NSApp is of wrong type";
+ }
+ return app;
+}
+
+- (id)init {
+ if ((self = [super init])) {
+ eventHooks_.reset([[NSMutableArray alloc] init]);
+ }
+ return self;
+}
+
+- (void)sendEvent:(NSEvent*)event {
+ chrome_application_mac::ScopedSendingEvent sendingEventScoper;
+ for (id<CrApplicationEventHookProtocol> handler in eventHooks_.get()) {
+ [handler hookForEvent:event];
+ }
+ [super sendEvent:event];
+}
+
+- (void)addEventHook:(id<CrApplicationEventHookProtocol>)handler {
+ [eventHooks_ addObject:handler];
+}
+
+- (void)removeEventHook:(id<CrApplicationEventHookProtocol>)handler {
+ [eventHooks_ removeObject:handler];
+}
+
+@end
+
+namespace chrome_application_mac {
+
+ScopedSendingEvent::ScopedSendingEvent()
+ : app_(static_cast<CrApplication*>([CrApplication sharedApplication])),
+ handling_([app_ isHandlingSendEvent]) {
+ [app_ setHandlingSendEvent:YES];
+}
+
+ScopedSendingEvent::~ScopedSendingEvent() {
+ [app_ setHandlingSendEvent:handling_];
+}
+
+} // namespace chrome_application_mac
diff --git a/ipc/chromium/src/base/command_line.cc b/ipc/chromium/src/base/command_line.cc
new file mode 100644
index 000000000..85fd953e1
--- /dev/null
+++ b/ipc/chromium/src/base/command_line.cc
@@ -0,0 +1,394 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/command_line.h"
+
+#if defined(OS_WIN)
+#include <windows.h>
+#include <shellapi.h>
+#endif
+
+#include <algorithm>
+
+#include "base/logging.h"
+#include "base/singleton.h"
+#include "base/string_piece.h"
+#include "base/string_util.h"
+#include "base/sys_string_conversions.h"
+
+CommandLine* CommandLine::current_process_commandline_ = NULL;
+
+// Since we use a lazy match, make sure that longer versions (like L"--")
+// are listed before shorter versions (like L"-") of similar prefixes.
+#if defined(OS_WIN)
+const wchar_t* const kSwitchPrefixes[] = {L"--", L"-", L"/"};
+const wchar_t kSwitchTerminator[] = L"--";
+const wchar_t kSwitchValueSeparator[] = L"=";
+#elif defined(OS_POSIX)
+// Unixes don't use slash as a switch.
+const char* const kSwitchPrefixes[] = {"--", "-"};
+const char kSwitchTerminator[] = "--";
+const char kSwitchValueSeparator[] = "=";
+#endif
+
+#if defined(OS_WIN)
+// Lowercase a string. This is used to lowercase switch names.
+// Is this what we really want? It seems crazy to me. I've left it in
+// for backwards compatibility on Windows.
+static void Lowercase(std::wstring* parameter) {
+ transform(parameter->begin(), parameter->end(), parameter->begin(),
+ tolower);
+}
+#endif
+
+#if defined(OS_WIN)
+void CommandLine::ParseFromString(const std::wstring& command_line) {
+ TrimWhitespace(command_line, TRIM_ALL, &command_line_string_);
+
+ if (command_line_string_.empty())
+ return;
+
+ int num_args = 0;
+ wchar_t** args = NULL;
+
+ args = CommandLineToArgvW(command_line_string_.c_str(), &num_args);
+
+ // Populate program_ with the trimmed version of the first arg.
+ TrimWhitespace(args[0], TRIM_ALL, &program_);
+
+ bool parse_switches = true;
+ for (int i = 1; i < num_args; ++i) {
+ std::wstring arg;
+ TrimWhitespace(args[i], TRIM_ALL, &arg);
+
+ if (!parse_switches) {
+ loose_values_.push_back(arg);
+ continue;
+ }
+
+ if (arg == kSwitchTerminator) {
+ parse_switches = false;
+ continue;
+ }
+
+ std::string switch_string;
+ std::wstring switch_value;
+ if (IsSwitch(arg, &switch_string, &switch_value)) {
+ switches_[switch_string] = switch_value;
+ } else {
+ loose_values_.push_back(arg);
+ }
+ }
+
+ if (args)
+ LocalFree(args);
+}
+CommandLine::CommandLine(const std::wstring& program) {
+ if (!program.empty()) {
+ program_ = program;
+ command_line_string_ = L'"' + program + L'"';
+ }
+}
+#elif defined(OS_POSIX)
+CommandLine::CommandLine(int argc, const char* const* argv) {
+ for (int i = 0; i < argc; ++i)
+ argv_.push_back(argv[i]);
+ InitFromArgv();
+}
+CommandLine::CommandLine(const std::vector<std::string>& argv) {
+ argv_ = argv;
+ InitFromArgv();
+}
+
+void CommandLine::InitFromArgv() {
+ bool parse_switches = true;
+ for (size_t i = 1; i < argv_.size(); ++i) {
+ const std::string& arg = argv_[i];
+
+ if (!parse_switches) {
+ loose_values_.push_back(arg);
+ continue;
+ }
+
+ if (arg == kSwitchTerminator) {
+ parse_switches = false;
+ continue;
+ }
+
+ std::string switch_string;
+ std::string switch_value;
+ if (IsSwitch(arg, &switch_string, &switch_value)) {
+ switches_[switch_string] = switch_value;
+ } else {
+ loose_values_.push_back(arg);
+ }
+ }
+}
+
+CommandLine::CommandLine(const std::wstring& program) {
+ argv_.push_back(WideToASCII(program));
+}
+#endif
+
+// static
+bool CommandLine::IsSwitch(const StringType& parameter_string,
+ std::string* switch_string,
+ StringType* switch_value) {
+ switch_string->clear();
+ switch_value->clear();
+
+ for (size_t i = 0; i < arraysize(kSwitchPrefixes); ++i) {
+ StringType prefix(kSwitchPrefixes[i]);
+ if (parameter_string.find(prefix) != 0)
+ continue;
+
+ const size_t switch_start = prefix.length();
+ const size_t equals_position = parameter_string.find(
+ kSwitchValueSeparator, switch_start);
+ StringType switch_native;
+ if (equals_position == StringType::npos) {
+ switch_native = parameter_string.substr(switch_start);
+ } else {
+ switch_native = parameter_string.substr(
+ switch_start, equals_position - switch_start);
+ *switch_value = parameter_string.substr(equals_position + 1);
+ }
+#if defined(OS_WIN)
+ Lowercase(&switch_native);
+ *switch_string = WideToASCII(switch_native);
+#else
+ *switch_string = switch_native;
+#endif
+
+ return true;
+ }
+
+ return false;
+}
+
+// static
+void CommandLine::Init(int argc, const char* const* argv) {
+ DCHECK(current_process_commandline_ == NULL);
+#if defined(OS_WIN)
+ current_process_commandline_ = new CommandLine;
+ current_process_commandline_->ParseFromString(::GetCommandLineW());
+#elif defined(OS_POSIX)
+ current_process_commandline_ = new CommandLine(argc, argv);
+#endif
+}
+
+void CommandLine::Terminate() {
+ DCHECK(current_process_commandline_ != NULL);
+ delete current_process_commandline_;
+ current_process_commandline_ = NULL;
+}
+
+bool CommandLine::HasSwitch(const std::wstring& switch_string) const {
+ std::wstring lowercased_switch(switch_string);
+#if defined(OS_WIN)
+ Lowercase(&lowercased_switch);
+#endif
+ return switches_.find(WideToASCII(lowercased_switch)) != switches_.end();
+}
+
+std::wstring CommandLine::GetSwitchValue(
+ const std::wstring& switch_string) const {
+ std::wstring lowercased_switch(switch_string);
+#if defined(OS_WIN)
+ Lowercase(&lowercased_switch);
+#endif
+
+ std::map<std::string, StringType>::const_iterator result =
+ switches_.find(WideToASCII(lowercased_switch));
+
+ if (result == switches_.end()) {
+ return L"";
+ } else {
+#if defined(OS_WIN)
+ return result->second;
+#else
+ return ASCIIToWide(result->second);
+#endif
+ }
+}
+
+#if defined(OS_WIN)
+std::vector<std::wstring> CommandLine::GetLooseValues() const {
+ return loose_values_;
+}
+std::wstring CommandLine::program() const {
+ return program_;
+}
+#else
+std::vector<std::wstring> CommandLine::GetLooseValues() const {
+ std::vector<std::wstring> values;
+ for (size_t i = 0; i < loose_values_.size(); ++i)
+ values.push_back(ASCIIToWide(loose_values_[i]));
+ return values;
+}
+std::wstring CommandLine::program() const {
+ DCHECK(argv_.size() > 0);
+ return ASCIIToWide(argv_[0]);
+}
+#endif
+
+
+// static
+std::wstring CommandLine::PrefixedSwitchString(
+ const std::wstring& switch_string) {
+ return StringPrintf(L"%ls%ls",
+ kSwitchPrefixes[0],
+ switch_string.c_str());
+}
+
+// static
+std::wstring CommandLine::PrefixedSwitchStringWithValue(
+ const std::wstring& switch_string, const std::wstring& value_string) {
+ if (value_string.empty()) {
+ return PrefixedSwitchString(switch_string);
+ }
+
+ return StringPrintf(L"%ls%ls%ls%ls",
+ kSwitchPrefixes[0],
+ switch_string.c_str(),
+ kSwitchValueSeparator,
+ value_string.c_str());
+}
+
+#if defined(OS_WIN)
+void CommandLine::AppendSwitch(const std::wstring& switch_string) {
+ std::wstring prefixed_switch_string = PrefixedSwitchString(switch_string);
+ command_line_string_.append(L" ");
+ command_line_string_.append(prefixed_switch_string);
+ switches_[WideToASCII(switch_string)] = L"";
+}
+
+// Quote a string if necessary, such that CommandLineToArgvW() will
+// always process it as a single argument.
+static std::wstring WindowsStyleQuote(const std::wstring& arg) {
+ // We follow the quoting rules of CommandLineToArgvW.
+ // http://msdn.microsoft.com/en-us/library/17w5ykft.aspx
+ if (arg.find_first_of(L" \\\"\t") == std::wstring::npos) {
+ // No quoting necessary.
+ return arg;
+ }
+
+ std::wstring out;
+ out.push_back(L'"');
+ for (size_t i = 0; i < arg.size(); ++i) {
+ if (arg[i] == '\\') {
+ // Find the extent of this run of backslashes.
+ size_t start = i, end = start + 1;
+ for (; end < arg.size() && arg[end] == '\\'; ++end)
+ /* empty */;
+ size_t backslash_count = end - start;
+
+ // Backslashes are escapes only if the run is followed by a double quote.
+ // Since we also will end the string with a double quote, we escape for
+ // either a double quote or the end of the string.
+ if (end == arg.size() || arg[end] == '"') {
+ // To quote, we need to output 2x as many backslashes.
+ backslash_count *= 2;
+ }
+ for (size_t j = 0; j < backslash_count; ++j)
+ out.push_back('\\');
+
+ // Advance i to one before the end to balance i++ in loop.
+ i = end - 1;
+ } else if (arg[i] == '"') {
+ out.push_back('\\');
+ out.push_back('"');
+ } else {
+ out.push_back(arg[i]);
+ }
+ }
+ out.push_back('"');
+
+ return out;
+}
+
+void CommandLine::AppendSwitchWithValue(const std::wstring& switch_string,
+ const std::wstring& value_string) {
+ std::wstring quoted_value_string = WindowsStyleQuote(value_string);
+ std::wstring combined_switch_string =
+ PrefixedSwitchStringWithValue(switch_string, quoted_value_string);
+
+ command_line_string_.append(L" ");
+ command_line_string_.append(combined_switch_string);
+
+ switches_[WideToASCII(switch_string)] = value_string;
+}
+
+void CommandLine::AppendLooseValue(const std::wstring& value) {
+ command_line_string_.append(L" ");
+ command_line_string_.append(WindowsStyleQuote(value));
+}
+
+void CommandLine::AppendArguments(const CommandLine& other,
+ bool include_program) {
+ // Verify include_program is used correctly.
+ // Logic could be shorter but this is clearer.
+ DCHECK(include_program ? !other.program().empty() : other.program().empty());
+ command_line_string_ += L" " + other.command_line_string_;
+ std::map<std::string, StringType>::const_iterator i;
+ for (i = other.switches_.begin(); i != other.switches_.end(); ++i)
+ switches_[i->first] = i->second;
+}
+
+void CommandLine::PrependWrapper(const std::wstring& wrapper) {
+ // The wrapper may have embedded arguments (like "gdb --args"). In this case,
+ // we don't pretend to do anything fancy, we just split on spaces.
+ std::vector<std::wstring> wrapper_and_args;
+ SplitString(wrapper, ' ', &wrapper_and_args);
+ program_ = wrapper_and_args[0];
+ command_line_string_ = wrapper + L" " + command_line_string_;
+}
+
+#elif defined(OS_POSIX)
+void CommandLine::AppendSwitch(const std::wstring& switch_string) {
+ std::string ascii_switch = WideToASCII(switch_string);
+ argv_.push_back(kSwitchPrefixes[0] + ascii_switch);
+ switches_[ascii_switch] = "";
+}
+
+void CommandLine::AppendSwitchWithValue(const std::wstring& switch_string,
+ const std::wstring& value_string) {
+ std::string ascii_switch = WideToASCII(switch_string);
+ std::string ascii_value = WideToASCII(value_string);
+
+ argv_.push_back(kSwitchPrefixes[0] + ascii_switch +
+ kSwitchValueSeparator + ascii_value);
+ switches_[ascii_switch] = ascii_value;
+}
+
+void CommandLine::AppendLooseValue(const std::wstring& value) {
+ argv_.push_back(WideToASCII(value));
+}
+
+void CommandLine::AppendArguments(const CommandLine& other,
+ bool include_program) {
+ // Verify include_program is used correctly.
+ // Logic could be shorter but this is clearer.
+ DCHECK(include_program ? !other.program().empty() : other.program().empty());
+
+ size_t first_arg = include_program ? 0 : 1;
+ for (size_t i = first_arg; i < other.argv_.size(); ++i)
+ argv_.push_back(other.argv_[i]);
+ std::map<std::string, StringType>::const_iterator i;
+ for (i = other.switches_.begin(); i != other.switches_.end(); ++i)
+ switches_[i->first] = i->second;
+}
+
+void CommandLine::PrependWrapper(const std::wstring& wrapper_wide) {
+ // The wrapper may have embedded arguments (like "gdb --args"). In this case,
+ // we don't pretend to do anything fancy, we just split on spaces.
+ const std::string wrapper = WideToASCII(wrapper_wide);
+ std::vector<std::string> wrapper_and_args;
+ SplitString(wrapper, ' ', &wrapper_and_args);
+ argv_.insert(argv_.begin(), wrapper_and_args.begin(), wrapper_and_args.end());
+}
+
+#endif
diff --git a/ipc/chromium/src/base/command_line.h b/ipc/chromium/src/base/command_line.h
new file mode 100644
index 000000000..3ce94ed1b
--- /dev/null
+++ b/ipc/chromium/src/base/command_line.h
@@ -0,0 +1,197 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This class works with command lines: building and parsing.
+// Switches can optionally have a value attached using an equals sign,
+// as in "-switch=value". Arguments that aren't prefixed with a
+// switch prefix are considered "loose parameters". Switch names are
+// case-insensitive. An argument of "--" will terminate switch
+// parsing, causing everything after to be considered as loose
+// parameters.
+
+// There is a singleton read-only CommandLine that represents the command
+// line that the current process was started with. It must be initialized
+// in main() (or whatever the platform's equivalent function is).
+
+#ifndef BASE_COMMAND_LINE_H_
+#define BASE_COMMAND_LINE_H_
+
+#include "build/build_config.h"
+
+#include <map>
+#include <string>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/logging.h"
+
+class InProcessBrowserTest;
+
+class CommandLine {
+ public:
+#if defined(OS_WIN)
+ // Creates a parsed version of the given command-line string.
+ // The program name is assumed to be the first item in the string.
+ void ParseFromString(const std::wstring& command_line);
+#elif defined(OS_POSIX)
+ // Initialize from an argv vector (or directly from main()'s argv).
+ CommandLine(int argc, const char* const* argv);
+ explicit CommandLine(const std::vector<std::string>& argv);
+#endif
+
+ // Construct a new, empty command line.
+ // |program| is the name of the program to run (aka argv[0]).
+ // TODO(port): should be a FilePath.
+ explicit CommandLine(const std::wstring& program);
+
+ // Initialize the current process CommandLine singleton. On Windows,
+ // ignores its arguments (we instead parse GetCommandLineW()
+ // directly) because we don't trust the CRT's parsing of the command
+ // line, but it still must be called to set up the command line.
+ static void Init(int argc, const char* const* argv);
+
+ // Destroys the current process CommandLine singleton. This is necessary if
+ // you want to reset the base library to its initial state (for example in an
+ // outer library that needs to be able to terminate, and be re-initialized).
+ // If Init is called only once, e.g. in main(), calling Terminate() is not
+ // necessary.
+ static void Terminate();
+
+ // Get the singleton CommandLine representing the current process's
+ // command line.
+ static const CommandLine* ForCurrentProcess() {
+ DCHECK(current_process_commandline_);
+ return current_process_commandline_;
+ }
+
+ static bool IsInitialized() {
+ return !!current_process_commandline_;
+ }
+
+ // Returns true if this command line contains the given switch.
+ // (Switch names are case-insensitive.)
+ bool HasSwitch(const std::wstring& switch_string) const;
+
+ // Returns the value associated with the given switch. If the
+ // switch has no value or isn't present, this method returns
+ // the empty string.
+ std::wstring GetSwitchValue(const std::wstring& switch_string) const;
+
+ // Get the remaining arguments to the command.
+ // WARNING: this is incorrect on POSIX; we must do string conversions.
+ std::vector<std::wstring> GetLooseValues() const;
+
+#if defined(OS_WIN)
+ // Returns the original command line string.
+ const std::wstring& command_line_string() const {
+ return command_line_string_;
+ }
+#elif defined(OS_POSIX)
+ // Returns the original command line string as a vector of strings.
+ const std::vector<std::string>& argv() const {
+ return argv_;
+ }
+#endif
+
+ // Returns the program part of the command line string (the first item).
+ std::wstring program() const;
+
+ // Return a copy of the string prefixed with a switch prefix.
+ // Used internally.
+ static std::wstring PrefixedSwitchString(const std::wstring& switch_string);
+
+ // Return a copy of the string prefixed with a switch prefix,
+ // and appended with the given value. Used internally.
+ static std::wstring PrefixedSwitchStringWithValue(
+ const std::wstring& switch_string,
+ const std::wstring& value_string);
+
+ // Appends the given switch string (preceded by a space and a switch
+ // prefix) to the given string.
+ void AppendSwitch(const std::wstring& switch_string);
+
+ // Appends the given switch string (preceded by a space and a switch
+ // prefix) to the given string, with the given value attached.
+ void AppendSwitchWithValue(const std::wstring& switch_string,
+ const std::wstring& value_string);
+
+ // Append a loose value to the command line.
+ void AppendLooseValue(const std::wstring& value);
+
+#if defined(OS_WIN)
+ void AppendLooseValue(const wchar_t* value) {
+ AppendLooseValue(std::wstring(value));
+ }
+#endif
+
+ // Append the arguments from another command line to this one.
+ // If |include_program| is true, include |other|'s program as well.
+ void AppendArguments(const CommandLine& other,
+ bool include_program);
+
+ // On POSIX systems it's common to run processes via a wrapper (like
+ // "valgrind" or "gdb --args").
+ void PrependWrapper(const std::wstring& wrapper);
+
+ private:
+ friend class InProcessBrowserTest;
+
+ CommandLine() {}
+
+ // Used by InProcessBrowserTest.
+ static CommandLine* ForCurrentProcessMutable() {
+ DCHECK(current_process_commandline_);
+ return current_process_commandline_;
+ }
+
+ // The singleton CommandLine instance representing the current process's
+ // command line.
+ static CommandLine* current_process_commandline_;
+
+ // We store a platform-native version of the command line, used when building
+ // up a new command line to be executed. This ifdef delimits that code.
+
+#if defined(OS_WIN)
+ // The quoted, space-separated command-line string.
+ std::wstring command_line_string_;
+
+ // The name of the program.
+ std::wstring program_;
+
+ // The type of native command line arguments.
+ typedef std::wstring StringType;
+
+#elif defined(OS_POSIX)
+ // The argv array, with the program name in argv_[0].
+ std::vector<std::string> argv_;
+
+ // The type of native command line arguments.
+ typedef std::string StringType;
+
+ // Shared by the two POSIX constructor forms. Initalize from argv_.
+ void InitFromArgv();
+#endif
+
+ // Returns true and fills in |switch_string| and |switch_value|
+ // if |parameter_string| represents a switch.
+ static bool IsSwitch(const StringType& parameter_string,
+ std::string* switch_string,
+ StringType* switch_value);
+
+ // Parsed-out values.
+ std::map<std::string, StringType> switches_;
+
+ // Non-switch command-line arguments.
+ std::vector<StringType> loose_values_;
+
+ // We allow copy constructors, because a common pattern is to grab a
+ // copy of the current process's command line and then add some
+ // flags to it. E.g.:
+ // CommandLine cl(*CommandLine::ForCurrentProcess());
+ // cl.AppendSwitch(...);
+};
+
+#endif // BASE_COMMAND_LINE_H_
diff --git a/ipc/chromium/src/base/compiler_specific.h b/ipc/chromium/src/base/compiler_specific.h
new file mode 100644
index 000000000..509dfa4b4
--- /dev/null
+++ b/ipc/chromium/src/base/compiler_specific.h
@@ -0,0 +1,79 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_COMPILER_SPECIFIC_H_
+#define BASE_COMPILER_SPECIFIC_H_
+
+#include "build/build_config.h"
+
+#if defined(COMPILER_MSVC)
+
+// Macros for suppressing and disabling warnings on MSVC.
+//
+// Warning numbers are enumerated at:
+// http://msdn.microsoft.com/en-us/library/8x5x43k7(VS.80).aspx
+//
+// The warning pragma:
+// http://msdn.microsoft.com/en-us/library/2c8f766e(VS.80).aspx
+//
+// Using __pragma instead of #pragma inside macros:
+// http://msdn.microsoft.com/en-us/library/d9x1s805.aspx
+
+// MSVC_SUPPRESS_WARNING disables warning |n| for the remainder of the line and
+// for the next line of the source file.
+#define MSVC_SUPPRESS_WARNING(n) __pragma(warning(suppress:n))
+
+// MSVC_PUSH_DISABLE_WARNING pushes |n| onto a stack of warnings to be disabled.
+// The warning remains disabled until popped by MSVC_POP_WARNING.
+#define MSVC_PUSH_DISABLE_WARNING(n) __pragma(warning(push)) \
+ __pragma(warning(disable:n))
+
+// MSVC_PUSH_WARNING_LEVEL pushes |n| as the global warning level. The level
+// remains in effect until popped by MSVC_POP_WARNING(). Use 0 to disable all
+// warnings.
+#define MSVC_PUSH_WARNING_LEVEL(n) __pragma(warning(push, n))
+
+// Pop effects of innermost MSVC_PUSH_* macro.
+#define MSVC_POP_WARNING() __pragma(warning(pop))
+
+#define MSVC_DISABLE_OPTIMIZE() __pragma(optimize("", off))
+#define MSVC_ENABLE_OPTIMIZE() __pragma(optimize("", on))
+
+// Allows |this| to be passed as an argument in constructor initializer lists.
+// This uses push/pop instead of the seemingly simpler suppress feature to avoid
+// having the warning be disabled for more than just |code|.
+//
+// Example usage:
+// Foo::Foo() : x(NULL), ALLOW_THIS_IN_INITIALIZER_LIST(y(this)), z(3) {}
+//
+// Compiler warning C4355: 'this': used in base member initializer list:
+// http://msdn.microsoft.com/en-us/library/3c594ae3(VS.80).aspx
+#define ALLOW_THIS_IN_INITIALIZER_LIST(code) MSVC_PUSH_DISABLE_WARNING(4355) \
+ code \
+ MSVC_POP_WARNING()
+
+#else // Not MSVC
+
+#define MSVC_SUPPRESS_WARNING(n)
+#define MSVC_PUSH_DISABLE_WARNING(n)
+#define MSVC_PUSH_WARNING_LEVEL(n)
+#define MSVC_POP_WARNING()
+#define MSVC_DISABLE_OPTIMIZE()
+#define MSVC_ENABLE_OPTIMIZE()
+#define ALLOW_THIS_IN_INITIALIZER_LIST(code) code
+
+#endif // COMPILER_MSVC
+
+
+#if defined(COMPILER_GCC)
+#define ALLOW_UNUSED __attribute__((unused))
+#define WARN_UNUSED_RESULT __attribute__((warn_unused_result))
+#else // Not GCC
+#define ALLOW_UNUSED
+#define WARN_UNUSED_RESULT
+#endif
+
+#endif // BASE_COMPILER_SPECIFIC_H_
diff --git a/ipc/chromium/src/base/condition_variable.h b/ipc/chromium/src/base/condition_variable.h
new file mode 100644
index 000000000..4c95dc0bc
--- /dev/null
+++ b/ipc/chromium/src/base/condition_variable.h
@@ -0,0 +1,176 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// ConditionVariable wraps pthreads condition variable synchronization or, on
+// Windows, simulates it. This functionality is very helpful for having
+// several threads wait for an event, as is common with a thread pool managed
+// by a master. The meaning of such an event in the (worker) thread pool
+// scenario is that additional tasks are now available for processing. It is
+// used in Chrome in the DNS prefetching system to notify worker threads that
+// a queue now has items (tasks) which need to be tended to. A related use
+// would have a pool manager waiting on a ConditionVariable, waiting for a
+// thread in the pool to announce (signal) that there is now more room in a
+// (bounded size) communications queue for the manager to deposit tasks, or,
+// as a second example, that the queue of tasks is completely empty and all
+// workers are waiting.
+//
+// USAGE NOTE 1: spurious signal events are possible with this and
+// most implementations of condition variables. As a result, be
+// *sure* to retest your condition before proceeding. The following
+// is a good example of doing this correctly:
+//
+// while (!work_to_be_done()) Wait(...);
+//
+// In contrast do NOT do the following:
+//
+// if (!work_to_be_done()) Wait(...); // Don't do this.
+//
+// Especially avoid the above if you are relying on some other thread only
+// issuing a signal up *if* there is work-to-do. There can/will
+// be spurious signals. Recheck state on waiting thread before
+// assuming the signal was intentional. Caveat caller ;-).
+//
+// USAGE NOTE 2: Broadcast() frees up all waiting threads at once,
+// which leads to contention for the locks they all held when they
+// called Wait(). This results in POOR performance. A much better
+// approach to getting a lot of threads out of Wait() is to have each
+// thread (upon exiting Wait()) call Signal() to free up another
+// Wait'ing thread. Look at condition_variable_unittest.cc for
+// both examples.
+//
+// Broadcast() can be used nicely during teardown, as it gets the job
+// done, and leaves no sleeping threads... and performance is less
+// critical at that point.
+//
+// The semantics of Broadcast() are carefully crafted so that *all*
+// threads that were waiting when the request was made will indeed
+// get signaled. Some implementations mess up, and don't signal them
+// all, while others allow the wait to be effectively turned off (for
+// a while while waiting threads come around). This implementation
+// appears correct, as it will not "lose" any signals, and will guarantee
+// that all threads get signaled by Broadcast().
+//
+// This implementation offers support for "performance" in its selection of
+// which thread to revive. Performance, in direct contrast with "fairness,"
+// assures that the thread that most recently began to Wait() is selected by
+// Signal to revive. Fairness would (if publicly supported) assure that the
+// thread that has Wait()ed the longest is selected. The default policy
+// may improve performance, as the selected thread may have a greater chance of
+// having some of its stack data in various CPU caches.
+//
+// For a discussion of the many very subtle implementation details, see the FAQ
+// at the end of condition_variable_win.cc.
+
+#ifndef BASE_CONDITION_VARIABLE_H_
+#define BASE_CONDITION_VARIABLE_H_
+
+#include "base/lock.h"
+
+namespace base {
+ class TimeDelta;
+}
+
+class ConditionVariable {
+ public:
+ // Construct a cv for use with ONLY one user lock.
+ explicit ConditionVariable(Lock* user_lock);
+
+ ~ConditionVariable();
+
+ // Wait() releases the caller's critical section atomically as it starts to
+ // sleep, and the reacquires it when it is signaled.
+ void Wait();
+ void TimedWait(const base::TimeDelta& max_time);
+
+ // Broadcast() revives all waiting threads.
+ void Broadcast();
+ // Signal() revives one waiting thread.
+ void Signal();
+
+ private:
+
+#if defined(OS_WIN)
+
+ // Define Event class that is used to form circularly linked lists.
+ // The list container is an element with NULL as its handle_ value.
+ // The actual list elements have a non-zero handle_ value.
+ // All calls to methods MUST be done under protection of a lock so that links
+ // can be validated. Without the lock, some links might asynchronously
+ // change, and the assertions would fail (as would list change operations).
+ class Event {
+ public:
+ // Default constructor with no arguments creates a list container.
+ Event();
+ ~Event();
+
+ // InitListElement transitions an instance from a container, to an element.
+ void InitListElement();
+
+ // Methods for use on lists.
+ bool IsEmpty() const;
+ void PushBack(Event* other);
+ Event* PopFront();
+ Event* PopBack();
+
+ // Methods for use on list elements.
+ // Accessor method.
+ HANDLE handle() const;
+ // Pull an element from a list (if it's in one).
+ Event* Extract();
+
+ // Method for use on a list element or on a list.
+ bool IsSingleton() const;
+
+ private:
+ // Provide pre/post conditions to validate correct manipulations.
+ bool ValidateAsDistinct(Event* other) const;
+ bool ValidateAsItem() const;
+ bool ValidateAsList() const;
+ bool ValidateLinks() const;
+
+ HANDLE handle_;
+ Event* next_;
+ Event* prev_;
+ DISALLOW_COPY_AND_ASSIGN(Event);
+ };
+
+ // Note that RUNNING is an unlikely number to have in RAM by accident.
+ // This helps with defensive destructor coding in the face of user error.
+ enum RunState { SHUTDOWN = 0, RUNNING = 64213 };
+
+ // Internal implementation methods supporting Wait().
+ Event* GetEventForWaiting();
+ void RecycleEvent(Event* used_event);
+
+ RunState run_state_;
+
+ // Private critical section for access to member data.
+ Lock internal_lock_;
+
+ // Lock that is acquired before calling Wait().
+ Lock& user_lock_;
+
+ // Events that threads are blocked on.
+ Event waiting_list_;
+
+ // Free list for old events.
+ Event recycling_list_;
+ int recycling_list_size_;
+
+ // The number of allocated, but not yet deleted events.
+ int allocation_counter_;
+
+#elif defined(OS_POSIX)
+
+ pthread_cond_t condition_;
+ pthread_mutex_t* user_mutex_;
+
+#endif
+
+ DISALLOW_COPY_AND_ASSIGN(ConditionVariable);
+};
+
+#endif // BASE_CONDITION_VARIABLE_H_
diff --git a/ipc/chromium/src/base/condition_variable_posix.cc b/ipc/chromium/src/base/condition_variable_posix.cc
new file mode 100644
index 000000000..42cedad15
--- /dev/null
+++ b/ipc/chromium/src/base/condition_variable_posix.cc
@@ -0,0 +1,91 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/condition_variable.h"
+
+#include <errno.h>
+#include <sys/time.h>
+
+#include "base/lock.h"
+#include "base/lock_impl.h"
+#include "base/logging.h"
+#include "base/time.h"
+
+using base::Time;
+using base::TimeDelta;
+
+ConditionVariable::ConditionVariable(Lock* user_lock)
+ : user_mutex_(user_lock->lock_impl()->os_lock()) {
+ int rv = 0;
+#if !defined(OS_MACOSX) && \
+ !(defined(OS_ANDROID) && defined(HAVE_PTHREAD_COND_TIMEDWAIT_MONOTONIC))
+ pthread_condattr_t attrs;
+ rv = pthread_condattr_init(&attrs);
+ DCHECK_EQ(0, rv);
+ pthread_condattr_setclock(&attrs, CLOCK_MONOTONIC);
+ rv = pthread_cond_init(&condition_, &attrs);
+ pthread_condattr_destroy(&attrs);
+#else
+ rv = pthread_cond_init(&condition_, NULL);
+#endif
+ DCHECK_EQ(0, rv);
+}
+
+ConditionVariable::~ConditionVariable() {
+ int rv = pthread_cond_destroy(&condition_);
+ DCHECK(rv == 0);
+}
+
+void ConditionVariable::Wait() {
+ int rv = pthread_cond_wait(&condition_, user_mutex_);
+ DCHECK(rv == 0);
+}
+
+void ConditionVariable::TimedWait(const TimeDelta& max_time) {
+ int64_t usecs = max_time.InMicroseconds();
+
+ struct timespec relative_time;
+ relative_time.tv_sec = usecs / Time::kMicrosecondsPerSecond;
+ relative_time.tv_nsec =
+ (usecs % Time::kMicrosecondsPerSecond) * Time::kNanosecondsPerMicrosecond;
+
+#if defined(OS_MACOSX)
+ int rv = pthread_cond_timedwait_relative_np(
+ &condition_, user_mutex_, &relative_time);
+#else
+ // The timeout argument to pthread_cond_timedwait is in absolute time.
+ struct timespec now;
+ clock_gettime(CLOCK_MONOTONIC, &now);
+
+ struct timespec absolute_time;
+ absolute_time.tv_sec = now.tv_sec;
+ absolute_time.tv_nsec = now.tv_nsec;
+ absolute_time.tv_sec += relative_time.tv_sec;
+ absolute_time.tv_nsec += relative_time.tv_nsec;
+ absolute_time.tv_sec += absolute_time.tv_nsec / Time::kNanosecondsPerSecond;
+ absolute_time.tv_nsec %= Time::kNanosecondsPerSecond;
+ DCHECK_GE(absolute_time.tv_sec, now.tv_sec); // Overflow paranoia
+
+#if defined(OS_ANDROID) && defined(HAVE_PTHREAD_COND_TIMEDWAIT_MONOTONIC)
+ int rv = pthread_cond_timedwait_monotonic_np(
+ &condition_, user_mutex_, &absolute_time);
+#else
+ int rv = pthread_cond_timedwait(&condition_, user_mutex_, &absolute_time);
+#endif // OS_ANDROID && HAVE_PTHREAD_COND_TIMEDWAIT_MONOTONIC
+#endif // OS_MACOSX
+
+ DCHECK(rv == 0 || rv == ETIMEDOUT);
+}
+
+void ConditionVariable::Broadcast() {
+ int rv = pthread_cond_broadcast(&condition_);
+ DCHECK(rv == 0);
+}
+
+void ConditionVariable::Signal() {
+ int rv = pthread_cond_signal(&condition_);
+ DCHECK(rv == 0);
+}
diff --git a/ipc/chromium/src/base/condition_variable_win.cc b/ipc/chromium/src/base/condition_variable_win.cc
new file mode 100644
index 000000000..b0a3df5fa
--- /dev/null
+++ b/ipc/chromium/src/base/condition_variable_win.cc
@@ -0,0 +1,448 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/condition_variable.h"
+
+#include <stack>
+
+#include "base/lock.h"
+#include "base/logging.h"
+#include "base/time.h"
+
+using base::TimeDelta;
+
+ConditionVariable::ConditionVariable(Lock* user_lock)
+ : user_lock_(*user_lock),
+ run_state_(RUNNING),
+ allocation_counter_(0),
+ recycling_list_size_(0) {
+ DCHECK(user_lock);
+}
+
+ConditionVariable::~ConditionVariable() {
+ AutoLock auto_lock(internal_lock_);
+ run_state_ = SHUTDOWN; // Prevent any more waiting.
+
+ DCHECK_EQ(recycling_list_size_, allocation_counter_);
+ if (recycling_list_size_ != allocation_counter_) { // Rare shutdown problem.
+ // There are threads of execution still in this->TimedWait() and yet the
+ // caller has instigated the destruction of this instance :-/.
+ // A common reason for such "overly hasty" destruction is that the caller
+ // was not willing to wait for all the threads to terminate. Such hasty
+ // actions are a violation of our usage contract, but we'll give the
+ // waiting thread(s) one last chance to exit gracefully (prior to our
+ // destruction).
+ // Note: waiting_list_ *might* be empty, but recycling is still pending.
+ AutoUnlock auto_unlock(internal_lock_);
+ Broadcast(); // Make sure all waiting threads have been signaled.
+ Sleep(10); // Give threads a chance to grab internal_lock_.
+ // All contained threads should be blocked on user_lock_ by now :-).
+ } // Reacquire internal_lock_.
+
+ DCHECK_EQ(recycling_list_size_, allocation_counter_);
+}
+
+void ConditionVariable::Wait() {
+ // Default to "wait forever" timing, which means have to get a Signal()
+ // or Broadcast() to come out of this wait state.
+ TimedWait(TimeDelta::FromMilliseconds(INFINITE));
+}
+
+void ConditionVariable::TimedWait(const TimeDelta& max_time) {
+ Event* waiting_event;
+ HANDLE handle;
+ {
+ AutoLock auto_lock(internal_lock_);
+ if (RUNNING != run_state_) return; // Destruction in progress.
+ waiting_event = GetEventForWaiting();
+ handle = waiting_event->handle();
+ DCHECK(handle);
+ } // Release internal_lock.
+
+ {
+ AutoUnlock unlock(user_lock_); // Release caller's lock
+ WaitForSingleObject(handle, static_cast<DWORD>(max_time.InMilliseconds()));
+ // Minimize spurious signal creation window by recycling asap.
+ AutoLock auto_lock(internal_lock_);
+ RecycleEvent(waiting_event);
+ // Release internal_lock_
+ } // Reacquire callers lock to depth at entry.
+}
+
+// Broadcast() is guaranteed to signal all threads that were waiting (i.e., had
+// a cv_event internally allocated for them) before Broadcast() was called.
+void ConditionVariable::Broadcast() {
+ std::stack<HANDLE> handles; // See FAQ-question-10.
+ {
+ AutoLock auto_lock(internal_lock_);
+ if (waiting_list_.IsEmpty())
+ return;
+ while (!waiting_list_.IsEmpty())
+ // This is not a leak from waiting_list_. See FAQ-question 12.
+ handles.push(waiting_list_.PopBack()->handle());
+ } // Release internal_lock_.
+ while (!handles.empty()) {
+ SetEvent(handles.top());
+ handles.pop();
+ }
+}
+
+// Signal() will select one of the waiting threads, and signal it (signal its
+// cv_event). For better performance we signal the thread that went to sleep
+// most recently (LIFO). If we want fairness, then we wake the thread that has
+// been sleeping the longest (FIFO).
+void ConditionVariable::Signal() {
+ HANDLE handle;
+ {
+ AutoLock auto_lock(internal_lock_);
+ if (waiting_list_.IsEmpty())
+ return; // No one to signal.
+ // Only performance option should be used.
+ // This is not a leak from waiting_list. See FAQ-question 12.
+ handle = waiting_list_.PopBack()->handle(); // LIFO.
+ } // Release internal_lock_.
+ SetEvent(handle);
+}
+
+// GetEventForWaiting() provides a unique cv_event for any caller that needs to
+// wait. This means that (worst case) we may over time create as many cv_event
+// objects as there are threads simultaneously using this instance's Wait()
+// functionality.
+ConditionVariable::Event* ConditionVariable::GetEventForWaiting() {
+ // We hold internal_lock, courtesy of Wait().
+ Event* cv_event;
+ if (0 == recycling_list_size_) {
+ DCHECK(recycling_list_.IsEmpty());
+ cv_event = new Event();
+ cv_event->InitListElement();
+ allocation_counter_++;
+ // CHECK_NE is not defined in our codebase, so we have to use CHECK
+ CHECK(cv_event->handle());
+ } else {
+ cv_event = recycling_list_.PopFront();
+ recycling_list_size_--;
+ }
+ waiting_list_.PushBack(cv_event);
+ return cv_event;
+}
+
+// RecycleEvent() takes a cv_event that was previously used for Wait()ing, and
+// recycles it for use in future Wait() calls for this or other threads.
+// Note that there is a tiny chance that the cv_event is still signaled when we
+// obtain it, and that can cause spurious signals (if/when we re-use the
+// cv_event), but such is quite rare (see FAQ-question-5).
+void ConditionVariable::RecycleEvent(Event* used_event) {
+ // We hold internal_lock, courtesy of Wait().
+ // If the cv_event timed out, then it is necessary to remove it from
+ // waiting_list_. If it was selected by Broadcast() or Signal(), then it is
+ // already gone.
+ used_event->Extract(); // Possibly redundant
+ recycling_list_.PushBack(used_event);
+ recycling_list_size_++;
+}
+//------------------------------------------------------------------------------
+// The next section provides the implementation for the private Event class.
+//------------------------------------------------------------------------------
+
+// Event provides a doubly-linked-list of events for use exclusively by the
+// ConditionVariable class.
+
+// This custom container was crafted because no simple combination of STL
+// classes appeared to support the functionality required. The specific
+// unusual requirement for a linked-list-class is support for the Extract()
+// method, which can remove an element from a list, potentially for insertion
+// into a second list. Most critically, the Extract() method is idempotent,
+// turning the indicated element into an extracted singleton whether it was
+// contained in a list or not. This functionality allows one (or more) of
+// threads to do the extraction. The iterator that identifies this extractable
+// element (in this case, a pointer to the list element) can be used after
+// arbitrary manipulation of the (possibly) enclosing list container. In
+// general, STL containers do not provide iterators that can be used across
+// modifications (insertions/extractions) of the enclosing containers, and
+// certainly don't provide iterators that can be used if the identified
+// element is *deleted* (removed) from the container.
+
+// It is possible to use multiple redundant containers, such as an STL list,
+// and an STL map, to achieve similar container semantics. This container has
+// only O(1) methods, while the corresponding (multiple) STL container approach
+// would have more complex O(log(N)) methods (yeah... N isn't that large).
+// Multiple containers also makes correctness more difficult to assert, as
+// data is redundantly stored and maintained, which is generally evil.
+
+ConditionVariable::Event::Event() : handle_(0) {
+ next_ = prev_ = this; // Self referencing circular.
+}
+
+ConditionVariable::Event::~Event() {
+ if (0 == handle_) {
+ // This is the list holder
+ while (!IsEmpty()) {
+ Event* cv_event = PopFront();
+ DCHECK(cv_event->ValidateAsItem());
+ delete cv_event;
+ }
+ }
+ DCHECK(IsSingleton());
+ if (0 != handle_) {
+ int ret_val = CloseHandle(handle_);
+ DCHECK(ret_val);
+ }
+}
+
+// Change a container instance permanently into an element of a list.
+void ConditionVariable::Event::InitListElement() {
+ DCHECK(!handle_);
+ handle_ = CreateEvent(NULL, false, false, NULL);
+ CHECK(handle_);
+}
+
+// Methods for use on lists.
+bool ConditionVariable::Event::IsEmpty() const {
+ DCHECK(ValidateAsList());
+ return IsSingleton();
+}
+
+void ConditionVariable::Event::PushBack(Event* other) {
+ DCHECK(ValidateAsList());
+ DCHECK(other->ValidateAsItem());
+ DCHECK(other->IsSingleton());
+ // Prepare other for insertion.
+ other->prev_ = prev_;
+ other->next_ = this;
+ // Cut into list.
+ prev_->next_ = other;
+ prev_ = other;
+ DCHECK(ValidateAsDistinct(other));
+}
+
+ConditionVariable::Event* ConditionVariable::Event::PopFront() {
+ DCHECK(ValidateAsList());
+ DCHECK(!IsSingleton());
+ return next_->Extract();
+}
+
+ConditionVariable::Event* ConditionVariable::Event::PopBack() {
+ DCHECK(ValidateAsList());
+ DCHECK(!IsSingleton());
+ return prev_->Extract();
+}
+
+// Methods for use on list elements.
+// Accessor method.
+HANDLE ConditionVariable::Event::handle() const {
+ DCHECK(ValidateAsItem());
+ return handle_;
+}
+
+// Pull an element from a list (if it's in one).
+ConditionVariable::Event* ConditionVariable::Event::Extract() {
+ DCHECK(ValidateAsItem());
+ if (!IsSingleton()) {
+ // Stitch neighbors together.
+ next_->prev_ = prev_;
+ prev_->next_ = next_;
+ // Make extractee into a singleton.
+ prev_ = next_ = this;
+ }
+ DCHECK(IsSingleton());
+ return this;
+}
+
+// Method for use on a list element or on a list.
+bool ConditionVariable::Event::IsSingleton() const {
+ DCHECK(ValidateLinks());
+ return next_ == this;
+}
+
+// Provide pre/post conditions to validate correct manipulations.
+bool ConditionVariable::Event::ValidateAsDistinct(Event* other) const {
+ return ValidateLinks() && other->ValidateLinks() && (this != other);
+}
+
+bool ConditionVariable::Event::ValidateAsItem() const {
+ return (0 != handle_) && ValidateLinks();
+}
+
+bool ConditionVariable::Event::ValidateAsList() const {
+ return (0 == handle_) && ValidateLinks();
+}
+
+bool ConditionVariable::Event::ValidateLinks() const {
+ // Make sure both of our neighbors have links that point back to us.
+ // We don't do the O(n) check and traverse the whole loop, and instead only
+ // do a local check to (and returning from) our immediate neighbors.
+ return (next_->prev_ == this) && (prev_->next_ == this);
+}
+
+
+/*
+FAQ On subtle implementation details:
+
+1) What makes this problem subtle? Please take a look at "Strategies
+for Implementing POSIX Condition Variables on Win32" by Douglas
+C. Schmidt and Irfan Pyarali.
+http://www.cs.wustl.edu/~schmidt/win32-cv-1.html It includes
+discussions of numerous flawed strategies for implementing this
+functionality. I'm not convinced that even the final proposed
+implementation has semantics that are as nice as this implementation
+(especially with regard to Broadcast() and the impact on threads that
+try to Wait() after a Broadcast() has been called, but before all the
+original waiting threads have been signaled).
+
+2) Why can't you use a single wait_event for all threads that call
+Wait()? See FAQ-question-1, or consider the following: If a single
+event were used, then numerous threads calling Wait() could release
+their cs locks, and be preempted just before calling
+WaitForSingleObject(). If a call to Broadcast() was then presented on
+a second thread, it would be impossible to actually signal all
+waiting(?) threads. Some number of SetEvent() calls *could* be made,
+but there could be no guarantee that those led to to more than one
+signaled thread (SetEvent()'s may be discarded after the first!), and
+there could be no guarantee that the SetEvent() calls didn't just
+awaken "other" threads that hadn't even started waiting yet (oops).
+Without any limit on the number of requisite SetEvent() calls, the
+system would be forced to do many such calls, allowing many new waits
+to receive spurious signals.
+
+3) How does this implementation cause spurious signal events? The
+cause in this implementation involves a race between a signal via
+time-out and a signal via Signal() or Broadcast(). The series of
+actions leading to this are:
+
+a) Timer fires, and a waiting thread exits the line of code:
+
+ WaitForSingleObject(waiting_event, max_time.InMilliseconds());
+
+b) That thread (in (a)) is randomly pre-empted after the above line,
+leaving the waiting_event reset (unsignaled) and still in the
+waiting_list_.
+
+c) A call to Signal() (or Broadcast()) on a second thread proceeds, and
+selects the waiting cv_event (identified in step (b)) as the event to revive
+via a call to SetEvent().
+
+d) The Signal() method (step c) calls SetEvent() on waiting_event (step b).
+
+e) The waiting cv_event (step b) is now signaled, but no thread is
+waiting on it.
+
+f) When that waiting_event (step b) is reused, it will immediately
+be signaled (spuriously).
+
+
+4) Why do you recycle events, and cause spurious signals? First off,
+the spurious events are very rare. They can only (I think) appear
+when the race described in FAQ-question-3 takes place. This should be
+very rare. Most(?) uses will involve only timer expiration, or only
+Signal/Broadcast() actions. When both are used, it will be rare that
+the race will appear, and it would require MANY Wait() and signaling
+activities. If this implementation did not recycle events, then it
+would have to create and destroy events for every call to Wait().
+That allocation/deallocation and associated construction/destruction
+would be costly (per wait), and would only be a rare benefit (when the
+race was "lost" and a spurious signal took place). That would be bad
+(IMO) optimization trade-off. Finally, such spurious events are
+allowed by the specification of condition variables (such as
+implemented in Vista), and hence it is better if any user accommodates
+such spurious events (see usage note in condition_variable.h).
+
+5) Why don't you reset events when you are about to recycle them, or
+about to reuse them, so that the spurious signals don't take place?
+The thread described in FAQ-question-3 step c may be pre-empted for an
+arbitrary length of time before proceeding to step d. As a result,
+the wait_event may actually be re-used *before* step (e) is reached.
+As a result, calling reset would not help significantly.
+
+6) How is it that the callers lock is released atomically with the
+entry into a wait state? We commit to the wait activity when we
+allocate the wait_event for use in a given call to Wait(). This
+allocation takes place before the caller's lock is released (and
+actually before our internal_lock_ is released). That allocation is
+the defining moment when "the wait state has been entered," as that
+thread *can* now be signaled by a call to Broadcast() or Signal().
+Hence we actually "commit to wait" before releasing the lock, making
+the pair effectively atomic.
+
+8) Why do you need to lock your data structures during waiting, as the
+caller is already in possession of a lock? We need to Acquire() and
+Release() our internal lock during Signal() and Broadcast(). If we tried
+to use a callers lock for this purpose, we might conflict with their
+external use of the lock. For example, the caller may use to consistently
+hold a lock on one thread while calling Signal() on another, and that would
+block Signal().
+
+9) Couldn't a more efficient implementation be provided if you
+preclude using more than one external lock in conjunction with a
+single ConditionVariable instance? Yes, at least it could be viewed
+as a simpler API (since you don't have to reiterate the lock argument
+in each Wait() call). One of the constructors now takes a specific
+lock as an argument, and a there are corresponding Wait() calls that
+don't specify a lock now. It turns that the resulting implmentation
+can't be made more efficient, as the internal lock needs to be used by
+Signal() and Broadcast(), to access internal data structures. As a
+result, I was not able to utilize the user supplied lock (which is
+being used by the user elsewhere presumably) to protect the private
+member access.
+
+9) Since you have a second lock, how can be be sure that there is no
+possible deadlock scenario? Our internal_lock_ is always the last
+lock acquired, and the first one released, and hence a deadlock (due
+to critical section problems) is impossible as a consequence of our
+lock.
+
+10) When doing a Broadcast(), why did you copy all the events into
+an STL queue, rather than making a linked-loop, and iterating over it?
+The iterating during Broadcast() is done so outside the protection
+of the internal lock. As a result, other threads, such as the thread
+wherein a related event is waiting, could asynchronously manipulate
+the links around a cv_event. As a result, the link structure cannot
+be used outside a lock. Broadcast() could iterate over waiting
+events by cycling in-and-out of the protection of the internal_lock,
+but that appears more expensive than copying the list into an STL
+stack.
+
+11) Why did the lock.h file need to be modified so much for this
+change? Central to a Condition Variable is the atomic release of a
+lock during a Wait(). This places Wait() functionality exactly
+mid-way between the two classes, Lock and Condition Variable. Given
+that there can be nested Acquire()'s of locks, and Wait() had to
+Release() completely a held lock, it was necessary to augment the Lock
+class with a recursion counter. Even more subtle is the fact that the
+recursion counter (in a Lock) must be protected, as many threads can
+access it asynchronously. As a positive fallout of this, there are
+now some DCHECKS to be sure no one Release()s a Lock more than they
+Acquire()ed it, and there is ifdef'ed functionality that can detect
+nested locks (legal under windows, but not under Posix).
+
+12) Why is it that the cv_events removed from list in Broadcast() and Signal()
+are not leaked? How are they recovered?? The cv_events that appear to leak are
+taken from the waiting_list_. For each element in that list, there is currently
+a thread in or around the WaitForSingleObject() call of Wait(), and those
+threads have references to these otherwise leaked events. They are passed as
+arguments to be recycled just aftre returning from WaitForSingleObject().
+
+13) Why did you use a custom container class (the linked list), when STL has
+perfectly good containers, such as an STL list? The STL list, as with any
+container, does not guarantee the utility of an iterator across manipulation
+(such as insertions and deletions) of the underlying container. The custom
+double-linked-list container provided that assurance. I don't believe any
+combination of STL containers provided the services that were needed at the same
+O(1) efficiency as the custom linked list. The unusual requirement
+for the container class is that a reference to an item within a container (an
+iterator) needed to be maintained across an arbitrary manipulation of the
+container. This requirement exposes itself in the Wait() method, where a
+waiting_event must be selected prior to the WaitForSingleObject(), and then it
+must be used as part of recycling to remove the related instance from the
+waiting_list. A hash table (STL map) could be used, but I was embarrased to
+use a complex and relatively low efficiency container when a doubly linked list
+provided O(1) performance in all required operations. Since other operations
+to provide performance-and/or-fairness required queue (FIFO) and list (LIFO)
+containers, I would also have needed to use an STL list/queue as well as an STL
+map. In the end I decided it would be "fun" to just do it right, and I
+put so many assertions (DCHECKs) into the container class that it is trivial to
+code review and validate its correctness.
+
+*/
diff --git a/ipc/chromium/src/base/cpu.cc b/ipc/chromium/src/base/cpu.cc
new file mode 100644
index 000000000..919b69bd0
--- /dev/null
+++ b/ipc/chromium/src/base/cpu.cc
@@ -0,0 +1,56 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/cpu.h"
+#include <intrin.h>
+#include <string>
+
+namespace base {
+
+CPU::CPU()
+ : type_(0),
+ family_(0),
+ model_(0),
+ stepping_(0),
+ ext_model_(0),
+ ext_family_(0),
+ cpu_vendor_("unknown") {
+ Initialize();
+}
+
+void CPU::Initialize() {
+ int cpu_info[4] = {-1};
+ char cpu_string[0x20];
+
+ // __cpuid with an InfoType argument of 0 returns the number of
+ // valid Ids in CPUInfo[0] and the CPU identification string in
+ // the other three array elements. The CPU identification string is
+ // not in linear order. The code below arranges the information
+ // in a human readable form.
+ //
+ // More info can be found here:
+ // http://msdn.microsoft.com/en-us/library/hskdteyh.aspx
+ __cpuid(cpu_info, 0);
+ int num_ids = cpu_info[0];
+ memset(cpu_string, 0, sizeof(cpu_string));
+ *(reinterpret_cast<int*>(cpu_string)) = cpu_info[1];
+ *(reinterpret_cast<int*>(cpu_string+4)) = cpu_info[3];
+ *(reinterpret_cast<int*>(cpu_string+8)) = cpu_info[2];
+
+ // Interpret CPU feature information.
+ if (num_ids > 0) {
+ __cpuid(cpu_info, 1);
+ stepping_ = cpu_info[0] & 0xf;
+ model_ = (cpu_info[0] >> 4) & 0xf;
+ family_ = (cpu_info[0] >> 8) & 0xf;
+ type_ = (cpu_info[0] >> 12) & 0x3;
+ ext_model_ = (cpu_info[0] >> 16) & 0xf;
+ ext_family_ = (cpu_info[0] >> 20) & 0xff;
+ cpu_vendor_ = cpu_string;
+ }
+}
+
+} // namespace base
diff --git a/ipc/chromium/src/base/cpu.h b/ipc/chromium/src/base/cpu.h
new file mode 100644
index 000000000..8260fdb46
--- /dev/null
+++ b/ipc/chromium/src/base/cpu.h
@@ -0,0 +1,44 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CPU_H_
+#define BASE_CPU_H_
+
+#include <string>
+
+namespace base {
+
+// Query information about the processor.
+class CPU {
+ public:
+ // Constructor
+ CPU();
+
+ // Accessors for CPU information.
+ const std::string& vendor_name() const { return cpu_vendor_; }
+ int stepping() const { return stepping_; }
+ int model() const { return model_; }
+ int family() const { return family_; }
+ int type() const { return type_; }
+ int extended_model() const { return ext_model_; }
+ int extended_family() const { return ext_family_; }
+
+ private:
+ // Query the processor for CPUID information.
+ void Initialize();
+
+ int type_; // process type
+ int family_; // family of the processor
+ int model_; // model of processor
+ int stepping_; // processor revision number
+ int ext_model_;
+ int ext_family_;
+ std::string cpu_vendor_;
+};
+
+} // namespace base
+
+#endif // BASE_CPU_H_
diff --git a/ipc/chromium/src/base/dir_reader_bsd.h b/ipc/chromium/src/base/dir_reader_bsd.h
new file mode 100644
index 000000000..35ba6bd15
--- /dev/null
+++ b/ipc/chromium/src/base/dir_reader_bsd.h
@@ -0,0 +1,103 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// derived from dir_reader_linux.h
+
+#ifndef BASE_DIR_READER_BSD_H_
+#define BASE_DIR_READER_BSD_H_
+#pragma once
+
+#include <dirent.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <stdint.h>
+#include <unistd.h>
+
+#include "base/logging.h"
+#include "base/eintr_wrapper.h"
+
+// See the comments in dir_reader_posix.h about this.
+
+namespace base {
+
+class DirReaderBSD {
+ public:
+ explicit DirReaderBSD(const char* directory_path)
+#ifdef O_DIRECTORY
+ : fd_(open(directory_path, O_RDONLY | O_DIRECTORY)),
+#else
+ : fd_(open(directory_path, O_RDONLY)),
+#endif
+ offset_(0),
+ size_(0) {
+ memset(buf_, 0, sizeof(buf_));
+ }
+
+ ~DirReaderBSD() {
+ if (fd_ >= 0) {
+ if (HANDLE_EINTR(close(fd_)))
+ DLOG(ERROR) << "Failed to close directory handle";
+ }
+ }
+
+ bool IsValid() const {
+ return fd_ >= 0;
+ }
+
+ // Move to the next entry returning false if the iteration is complete.
+ bool Next() {
+ if (size_) {
+ struct dirent* dirent = reinterpret_cast<struct dirent*>(&buf_[offset_]);
+#ifdef OS_DRAGONFLY
+ offset_ += _DIRENT_DIRSIZ(dirent);
+#else
+ offset_ += dirent->d_reclen;
+#endif
+ }
+
+ if (offset_ != size_)
+ return true;
+
+ const int r = getdents(fd_, buf_, sizeof(buf_));
+ if (r == 0)
+ return false;
+ if (r == -1) {
+ DLOG(ERROR) << "getdents returned an error: " << errno;
+ return false;
+ }
+ size_ = r;
+ offset_ = 0;
+ return true;
+ }
+
+ const char* name() const {
+ if (!size_)
+ return NULL;
+
+ const struct dirent* dirent =
+ reinterpret_cast<const struct dirent*>(&buf_[offset_]);
+ return dirent->d_name;
+ }
+
+ int fd() const {
+ return fd_;
+ }
+
+ static bool IsFallback() {
+ return false;
+ }
+
+ private:
+ const int fd_;
+ char buf_[512];
+ size_t offset_, size_;
+
+ DISALLOW_COPY_AND_ASSIGN(DirReaderBSD);
+};
+
+} // namespace base
+
+#endif // BASE_DIR_READER_BSD_H_
diff --git a/ipc/chromium/src/base/dir_reader_fallback.h b/ipc/chromium/src/base/dir_reader_fallback.h
new file mode 100644
index 000000000..788ba32ef
--- /dev/null
+++ b/ipc/chromium/src/base/dir_reader_fallback.h
@@ -0,0 +1,33 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_DIR_READER_FALLBACK_H_
+#define BASE_DIR_READER_FALLBACK_H_
+#pragma once
+
+namespace base {
+
+class DirReaderFallback {
+ public:
+ // Open a directory. If |IsValid| is true, then |Next| can be called to start
+ // the iteration at the beginning of the directory.
+ explicit DirReaderFallback(const char* directory_path) { }
+ // After construction, IsValid returns true iff the directory was
+ // successfully opened.
+ bool IsValid() const { return false; }
+ // Move to the next entry returning false if the iteration is complete.
+ bool Next() { return false; }
+ // Return the name of the current directory entry.
+ const char* name() { return 0;}
+ // Return the file descriptor which is being used.
+ int fd() const { return -1; }
+ // Returns true if this is a no-op fallback class (for testing).
+ static bool IsFallback() { return true; }
+};
+
+} // namespace base
+
+#endif // BASE_DIR_READER_FALLBACK_H_
diff --git a/ipc/chromium/src/base/dir_reader_linux.h b/ipc/chromium/src/base/dir_reader_linux.h
new file mode 100644
index 000000000..530fe2e01
--- /dev/null
+++ b/ipc/chromium/src/base/dir_reader_linux.h
@@ -0,0 +1,101 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_DIR_READER_LINUX_H_
+#define BASE_DIR_READER_LINUX_H_
+#pragma once
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stdint.h>
+#include <sys/syscall.h>
+#include <unistd.h>
+
+#include "base/logging.h"
+#include "base/eintr_wrapper.h"
+
+// See the comments in dir_reader_posix.h about this.
+
+namespace base {
+
+struct linux_dirent {
+ uint64_t d_ino;
+ int64_t d_off;
+ unsigned short d_reclen;
+ unsigned char d_type;
+ char d_name[0];
+};
+
+class DirReaderLinux {
+ public:
+ explicit DirReaderLinux(const char* directory_path)
+ : fd_(open(directory_path, O_RDONLY | O_DIRECTORY)),
+ offset_(0),
+ size_(0) {
+ memset(buf_, 0, sizeof(buf_));
+ }
+
+ ~DirReaderLinux() {
+ if (fd_ >= 0) {
+ if (HANDLE_EINTR(close(fd_)))
+ DLOG(ERROR) << "Failed to close directory handle";
+ }
+ }
+
+ bool IsValid() const {
+ return fd_ >= 0;
+ }
+
+ // Move to the next entry returning false if the iteration is complete.
+ bool Next() {
+ if (size_) {
+ linux_dirent* dirent = reinterpret_cast<linux_dirent*>(&buf_[offset_]);
+ offset_ += dirent->d_reclen;
+ }
+
+ if (offset_ != size_)
+ return true;
+
+ const int r = syscall(__NR_getdents64, fd_, buf_, sizeof(buf_));
+ if (r == 0)
+ return false;
+ if (r == -1) {
+ DLOG(ERROR) << "getdents64 returned an error: " << errno;
+ return false;
+ }
+ size_ = r;
+ offset_ = 0;
+ return true;
+ }
+
+ const char* name() const {
+ if (!size_)
+ return NULL;
+
+ const linux_dirent* dirent =
+ reinterpret_cast<const linux_dirent*>(&buf_[offset_]);
+ return dirent->d_name;
+ }
+
+ int fd() const {
+ return fd_;
+ }
+
+ static bool IsFallback() {
+ return false;
+ }
+
+ private:
+ const int fd_;
+ unsigned char buf_[512];
+ size_t offset_, size_;
+
+ DISALLOW_COPY_AND_ASSIGN(DirReaderLinux);
+};
+
+} // namespace base
+
+#endif // BASE_DIR_READER_LINUX_H_
diff --git a/ipc/chromium/src/base/dir_reader_posix.h b/ipc/chromium/src/base/dir_reader_posix.h
new file mode 100644
index 000000000..4be04bd53
--- /dev/null
+++ b/ipc/chromium/src/base/dir_reader_posix.h
@@ -0,0 +1,43 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_DIR_READER_POSIX_H_
+#define BASE_DIR_READER_POSIX_H_
+#pragma once
+
+#include "build/build_config.h"
+
+// This header provides a class, DirReaderPosix, which allows one to open and
+// read from directories without allocating memory. For the interface, see
+// the generic fallback in dir_reader_fallback.h.
+
+// Mac note: OS X has getdirentries, but it only works if we restrict Chrome to
+// 32-bit inodes. There is a getdirentries64 syscall in 10.6, but it's not
+// wrapped and the direct syscall interface is unstable. Using an unstable API
+// seems worse than falling back to enumerating all file descriptors so we will
+// probably never implement this on the Mac.
+
+#if defined(OS_LINUX)
+#include "base/dir_reader_linux.h"
+#elif defined(OS_BSD) && !defined(__GLIBC__)
+#include "base/dir_reader_bsd.h"
+#else
+#include "base/dir_reader_fallback.h"
+#endif
+
+namespace base {
+
+#if defined(OS_LINUX)
+typedef DirReaderLinux DirReaderPosix;
+#elif defined(OS_BSD) && !defined(__GLIBC__)
+typedef DirReaderBSD DirReaderPosix;
+#else
+typedef DirReaderFallback DirReaderPosix;
+#endif
+
+} // namespace base
+
+#endif // BASE_DIR_READER_POSIX_H_
diff --git a/ipc/chromium/src/base/eintr_wrapper.h b/ipc/chromium/src/base/eintr_wrapper.h
new file mode 100644
index 000000000..ea4546d9d
--- /dev/null
+++ b/ipc/chromium/src/base/eintr_wrapper.h
@@ -0,0 +1,35 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This provides a wrapper around system calls which may be interrupted by a
+// signal and return EINTR. See man 7 signal.
+//
+// On Windows, this wrapper macro does nothing.
+
+#ifndef BASE_EINTR_WRAPPER_H_
+#define BASE_EINTR_WRAPPER_H_
+
+#include "build/build_config.h"
+
+#if defined(OS_POSIX)
+
+#include <errno.h>
+
+#define HANDLE_EINTR(x) ({ \
+ typeof(x) __eintr_result__; \
+ do { \
+ __eintr_result__ = x; \
+ } while (__eintr_result__ == -1 && errno == EINTR); \
+ __eintr_result__;\
+})
+
+#else
+
+#define HANDLE_EINTR(x) x
+
+#endif // OS_POSIX
+
+#endif // !BASE_EINTR_WRAPPER_H_
diff --git a/ipc/chromium/src/base/file_descriptor_posix.h b/ipc/chromium/src/base/file_descriptor_posix.h
new file mode 100644
index 000000000..adbf6234f
--- /dev/null
+++ b/ipc/chromium/src/base/file_descriptor_posix.h
@@ -0,0 +1,38 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_FILE_DESCRIPTOR_POSIX_H_
+#define BASE_FILE_DESCRIPTOR_POSIX_H_
+
+namespace base {
+
+// -----------------------------------------------------------------------------
+// We introduct a special structure for file descriptors in order that we are
+// able to use template specialisation to special-case their handling.
+//
+// WARNING: (Chromium only) There are subtleties to consider if serialising
+// these objects over IPC. See comments in chrome/common/ipc_message_utils.h
+// above the template specialisation for this structure.
+// -----------------------------------------------------------------------------
+struct FileDescriptor {
+ FileDescriptor()
+ : fd(-1),
+ auto_close(false) { }
+
+ FileDescriptor(int ifd, bool iauto_close)
+ : fd(ifd),
+ auto_close(iauto_close) { }
+
+ int fd;
+ // If true, this file descriptor should be closed after it has been used. For
+ // example an IPC system might interpret this flag as indicating that the
+ // file descriptor it has been given should be closed after use.
+ bool auto_close;
+};
+
+} // namespace base
+
+#endif // BASE_FILE_DESCRIPTOR_POSIX_H_
diff --git a/ipc/chromium/src/base/file_descriptor_shuffle.cc b/ipc/chromium/src/base/file_descriptor_shuffle.cc
new file mode 100644
index 000000000..13f0c1e70
--- /dev/null
+++ b/ipc/chromium/src/base/file_descriptor_shuffle.cc
@@ -0,0 +1,96 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/file_descriptor_shuffle.h"
+
+#include <errno.h>
+#include <unistd.h>
+
+#include "base/eintr_wrapper.h"
+#include "base/logging.h"
+
+namespace base {
+
+bool PerformInjectiveMultimapDestructive(
+ InjectiveMultimap* m, InjectionDelegate* delegate) {
+ static const size_t kMaxExtraFDs = 16;
+ int extra_fds[kMaxExtraFDs];
+ unsigned next_extra_fd = 0;
+
+ // DANGER: this function may not allocate.
+
+ for (InjectiveMultimap::iterator i = m->begin(); i != m->end(); ++i) {
+ int temp_fd = -1;
+
+ // We DCHECK the injectiveness of the mapping.
+ for (InjectiveMultimap::iterator j = i + 1; j != m->end(); ++j) {
+ DCHECK(i->dest != j->dest) << "Both fd " << i->source
+ << " and " << j->source << " map to " << i->dest;
+ }
+
+ const bool is_identity = i->source == i->dest;
+
+ for (InjectiveMultimap::iterator j = i + 1; j != m->end(); ++j) {
+ if (!is_identity && i->dest == j->source) {
+ if (temp_fd == -1) {
+ if (!delegate->Duplicate(&temp_fd, i->dest))
+ return false;
+ if (next_extra_fd < kMaxExtraFDs) {
+ extra_fds[next_extra_fd++] = temp_fd;
+ } else {
+ DLOG(ERROR) << "PerformInjectiveMultimapDestructive overflowed "
+ << "extra_fds. Leaking file descriptors!";
+ }
+ }
+
+ j->source = temp_fd;
+ j->close = false;
+ }
+
+ if (i->close && i->source == j->dest)
+ i->close = false;
+
+ if (i->close && i->source == j->source) {
+ i->close = false;
+ j->close = true;
+ }
+ }
+
+ if (!is_identity) {
+ if (!delegate->Move(i->source, i->dest))
+ return false;
+ }
+
+ if (!is_identity && i->close)
+ delegate->Close(i->source);
+ }
+
+ for (unsigned i = 0; i < next_extra_fd; i++)
+ delegate->Close(extra_fds[i]);
+
+ return true;
+}
+
+bool PerformInjectiveMultimap(const InjectiveMultimap& m_in,
+ InjectionDelegate* delegate) {
+ InjectiveMultimap m(m_in);
+ return PerformInjectiveMultimapDestructive(&m, delegate);
+}
+
+bool FileDescriptorTableInjection::Duplicate(int* result, int fd) {
+ *result = HANDLE_EINTR(dup(fd));
+ return *result >= 0;
+}
+
+bool FileDescriptorTableInjection::Move(int src, int dest) {
+ return HANDLE_EINTR(dup2(src, dest)) != -1;
+}
+
+void FileDescriptorTableInjection::Close(int fd) {
+ HANDLE_EINTR(close(fd));
+}
+
+} // namespace base
diff --git a/ipc/chromium/src/base/file_descriptor_shuffle.h b/ipc/chromium/src/base/file_descriptor_shuffle.h
new file mode 100644
index 000000000..a0a8a325c
--- /dev/null
+++ b/ipc/chromium/src/base/file_descriptor_shuffle.h
@@ -0,0 +1,83 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_FILE_DESCRIPTOR_SHUFFLE_H_
+#define BASE_FILE_DESCRIPTOR_SHUFFLE_H_
+
+#include "mozilla/Attributes.h"
+
+// This code exists to perform the shuffling of file descriptors which is
+// commonly needed when forking subprocesses. The naive approve is very simple,
+// just call dup2 to setup the desired descriptors, but wrong. It's tough to
+// handle the edge cases (like mapping 0 -> 1, 1 -> 0) correctly.
+//
+// In order to unittest this code, it's broken into the abstract action (an
+// injective multimap) and the concrete code for dealing with file descriptors.
+// Users should use the code like this:
+// base::InjectiveMultimap file_descriptor_map;
+// file_descriptor_map.push_back(base::InjectionArc(devnull, 0, true));
+// file_descriptor_map.push_back(base::InjectionArc(devnull, 2, true));
+// file_descriptor_map.push_back(base::InjectionArc(pipe[1], 1, true));
+// base::ShuffleFileDescriptors(file_descriptor_map);
+//
+// and trust the the Right Thing will get done.
+
+#include <vector>
+
+namespace base {
+
+// A Delegate which performs the actions required to perform an injective
+// multimapping in place.
+class InjectionDelegate {
+ public:
+ // Duplicate |fd|, an element of the domain, and write a fresh element of the
+ // domain into |result|. Returns true iff successful.
+ virtual bool Duplicate(int* result, int fd) = 0;
+ // Destructively move |src| to |dest|, overwriting |dest|. Returns true iff
+ // successful.
+ virtual bool Move(int src, int dest) = 0;
+ // Delete an element of the domain.
+ virtual void Close(int fd) = 0;
+};
+
+// An implementation of the InjectionDelegate interface using the file
+// descriptor table of the current process as the domain.
+class FileDescriptorTableInjection : public InjectionDelegate {
+ virtual bool Duplicate(int* result, int fd) override;
+ virtual bool Move(int src, int dest) override;
+ virtual void Close(int fd) override;
+};
+
+// A single arc of the directed graph which describes an injective multimapping.
+struct InjectionArc {
+ InjectionArc(int in_source, int in_dest, bool in_close)
+ : source(in_source),
+ dest(in_dest),
+ close(in_close) {
+ }
+
+ int source;
+ int dest;
+ bool close; // if true, delete the source element after performing the
+ // mapping.
+};
+
+typedef std::vector<InjectionArc> InjectiveMultimap;
+
+bool PerformInjectiveMultimap(const InjectiveMultimap& map,
+ InjectionDelegate* delegate);
+bool PerformInjectiveMultimapDestructive(InjectiveMultimap* map,
+ InjectionDelegate* delegate);
+
+// This function will not call malloc but will mutate |map|
+static inline bool ShuffleFileDescriptors(InjectiveMultimap *map) {
+ FileDescriptorTableInjection delegate;
+ return PerformInjectiveMultimapDestructive(map, &delegate);
+}
+
+} // namespace base
+
+#endif // !BASE_FILE_DESCRIPTOR_SHUFFLE_H_
diff --git a/ipc/chromium/src/base/file_path.cc b/ipc/chromium/src/base/file_path.cc
new file mode 100644
index 000000000..f901b583d
--- /dev/null
+++ b/ipc/chromium/src/base/file_path.cc
@@ -0,0 +1,332 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <fstream>
+
+#include "base/file_path.h"
+#include "base/logging.h"
+
+// These includes are just for the *Hack functions, and should be removed
+// when those functions are removed.
+#include "base/string_piece.h"
+#include "base/string_util.h"
+#include "base/sys_string_conversions.h"
+
+#if defined(FILE_PATH_USES_WIN_SEPARATORS)
+const FilePath::CharType FilePath::kSeparators[] = FILE_PATH_LITERAL("\\/");
+#else // FILE_PATH_USES_WIN_SEPARATORS
+const FilePath::CharType FilePath::kSeparators[] = FILE_PATH_LITERAL("/");
+#endif // FILE_PATH_USES_WIN_SEPARATORS
+
+const FilePath::CharType FilePath::kCurrentDirectory[] = FILE_PATH_LITERAL(".");
+const FilePath::CharType FilePath::kParentDirectory[] = FILE_PATH_LITERAL("..");
+
+const FilePath::CharType FilePath::kExtensionSeparator = FILE_PATH_LITERAL('.');
+
+
+namespace {
+
+// If this FilePath contains a drive letter specification, returns the
+// position of the last character of the drive letter specification,
+// otherwise returns npos. This can only be true on Windows, when a pathname
+// begins with a letter followed by a colon. On other platforms, this always
+// returns npos.
+FilePath::StringType::size_type FindDriveLetter(
+ const FilePath::StringType& path) {
+#if defined(FILE_PATH_USES_DRIVE_LETTERS)
+ // This is dependent on an ASCII-based character set, but that's a
+ // reasonable assumption. iswalpha can be too inclusive here.
+ if (path.length() >= 2 && path[1] == L':' &&
+ ((path[0] >= L'A' && path[0] <= L'Z') ||
+ (path[0] >= L'a' && path[0] <= L'z'))) {
+ return 1;
+ }
+#endif // FILE_PATH_USES_DRIVE_LETTERS
+ return FilePath::StringType::npos;
+}
+
+bool IsPathAbsolute(const FilePath::StringType& path) {
+#if defined(FILE_PATH_USES_DRIVE_LETTERS)
+ FilePath::StringType::size_type letter = FindDriveLetter(path);
+ if (letter != FilePath::StringType::npos) {
+ // Look for a separator right after the drive specification.
+ return path.length() > letter + 1 &&
+ FilePath::IsSeparator(path[letter + 1]);
+ }
+ // Look for a pair of leading separators.
+ return path.length() > 1 &&
+ FilePath::IsSeparator(path[0]) && FilePath::IsSeparator(path[1]);
+#else // FILE_PATH_USES_DRIVE_LETTERS
+ // Look for a separator in the first position.
+ return path.length() > 0 && FilePath::IsSeparator(path[0]);
+#endif // FILE_PATH_USES_DRIVE_LETTERS
+}
+
+} // namespace
+
+bool FilePath::IsSeparator(CharType character) {
+ for (size_t i = 0; i < arraysize(kSeparators) - 1; ++i) {
+ if (character == kSeparators[i]) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+// libgen's dirname and basename aren't guaranteed to be thread-safe and aren't
+// guaranteed to not modify their input strings, and in fact are implemented
+// differently in this regard on different platforms. Don't use them, but
+// adhere to their behavior.
+FilePath FilePath::DirName() const {
+ FilePath new_path(path_);
+ new_path.StripTrailingSeparatorsInternal();
+
+ // The drive letter, if any, always needs to remain in the output. If there
+ // is no drive letter, as will always be the case on platforms which do not
+ // support drive letters, letter will be npos, or -1, so the comparisons and
+ // resizes below using letter will still be valid.
+ StringType::size_type letter = FindDriveLetter(new_path.path_);
+
+ StringType::size_type last_separator =
+ new_path.path_.find_last_of(kSeparators, StringType::npos,
+ arraysize(kSeparators) - 1);
+ if (last_separator == StringType::npos) {
+ // path_ is in the current directory.
+ new_path.path_.resize(letter + 1);
+ } else if (last_separator == letter + 1) {
+ // path_ is in the root directory.
+ new_path.path_.resize(letter + 2);
+ } else if (last_separator == letter + 2 &&
+ IsSeparator(new_path.path_[letter + 1])) {
+ // path_ is in "//" (possibly with a drive letter); leave the double
+ // separator intact indicating alternate root.
+ new_path.path_.resize(letter + 3);
+ } else if (last_separator != 0) {
+ // path_ is somewhere else, trim the basename.
+ new_path.path_.resize(last_separator);
+ }
+
+ new_path.StripTrailingSeparatorsInternal();
+ if (!new_path.path_.length())
+ new_path.path_ = kCurrentDirectory;
+
+ return new_path;
+}
+
+FilePath FilePath::BaseName() const {
+ FilePath new_path(path_);
+ new_path.StripTrailingSeparatorsInternal();
+
+ // The drive letter, if any, is always stripped.
+ StringType::size_type letter = FindDriveLetter(new_path.path_);
+ if (letter != StringType::npos) {
+ new_path.path_.erase(0, letter + 1);
+ }
+
+ // Keep everything after the final separator, but if the pathname is only
+ // one character and it's a separator, leave it alone.
+ StringType::size_type last_separator =
+ new_path.path_.find_last_of(kSeparators, StringType::npos,
+ arraysize(kSeparators) - 1);
+ if (last_separator != StringType::npos &&
+ last_separator < new_path.path_.length() - 1) {
+ new_path.path_.erase(0, last_separator + 1);
+ }
+
+ return new_path;
+}
+
+FilePath::StringType FilePath::Extension() const {
+ // BaseName() calls StripTrailingSeparators, so cases like /foo.baz/// work.
+ StringType base = BaseName().value();
+
+ // Special case "." and ".."
+ if (base == kCurrentDirectory || base == kParentDirectory)
+ return StringType();
+
+ const StringType::size_type last_dot = base.rfind(kExtensionSeparator);
+ if (last_dot == StringType::npos)
+ return StringType();
+ return StringType(base, last_dot);
+}
+
+FilePath FilePath::RemoveExtension() const {
+ StringType ext = Extension();
+ // It's important to check Extension() since that verifies that the
+ // kExtensionSeparator actually appeared in the last path component.
+ if (ext.empty())
+ return FilePath(path_);
+ // Since Extension() verified that the extension is in fact in the last path
+ // component, this substr will effectively strip trailing separators.
+ const StringType::size_type last_dot = path_.rfind(kExtensionSeparator);
+ return FilePath(path_.substr(0, last_dot));
+}
+
+FilePath FilePath::InsertBeforeExtension(const StringType& suffix) const {
+ if (suffix.empty())
+ return FilePath(path_);
+
+ if (path_.empty())
+ return FilePath();
+
+ StringType base = BaseName().value();
+ if (base.empty())
+ return FilePath();
+ if (*(base.end() - 1) == kExtensionSeparator) {
+ // Special case "." and ".."
+ if (base == kCurrentDirectory || base == kParentDirectory) {
+ return FilePath();
+ }
+ }
+
+ StringType ext = Extension();
+ StringType ret = RemoveExtension().value();
+ ret.append(suffix);
+ ret.append(ext);
+ return FilePath(ret);
+}
+
+FilePath FilePath::ReplaceExtension(const StringType& extension) const {
+ if (path_.empty())
+ return FilePath();
+
+ StringType base = BaseName().value();
+ if (base.empty())
+ return FilePath();
+ if (*(base.end() - 1) == kExtensionSeparator) {
+ // Special case "." and ".."
+ if (base == kCurrentDirectory || base == kParentDirectory) {
+ return FilePath();
+ }
+ }
+
+ FilePath no_ext = RemoveExtension();
+ // If the new extension is "" or ".", then just remove the current extension.
+ if (extension.empty() || extension == StringType(1, kExtensionSeparator))
+ return no_ext;
+
+ StringType str = no_ext.value();
+ if (extension[0] != kExtensionSeparator)
+ str.append(1, kExtensionSeparator);
+ str.append(extension);
+ return FilePath(str);
+}
+
+FilePath FilePath::Append(const StringType& component) const {
+ DCHECK(!IsPathAbsolute(component));
+ if (path_.compare(kCurrentDirectory) == 0) {
+ // Append normally doesn't do any normalization, but as a special case,
+ // when appending to kCurrentDirectory, just return a new path for the
+ // component argument. Appending component to kCurrentDirectory would
+ // serve no purpose other than needlessly lengthening the path, and
+ // it's likely in practice to wind up with FilePath objects containing
+ // only kCurrentDirectory when calling DirName on a single relative path
+ // component.
+ return FilePath(component);
+ }
+
+ FilePath new_path(path_);
+ new_path.StripTrailingSeparatorsInternal();
+
+ // Don't append a separator if the path is empty (indicating the current
+ // directory) or if the path component is empty (indicating nothing to
+ // append).
+ if (component.length() > 0 && new_path.path_.length() > 0) {
+
+ // Don't append a separator if the path still ends with a trailing
+ // separator after stripping (indicating the root directory).
+ if (!IsSeparator(new_path.path_[new_path.path_.length() - 1])) {
+
+ // Don't append a separator if the path is just a drive letter.
+ if (FindDriveLetter(new_path.path_) + 1 != new_path.path_.length()) {
+ new_path.path_.append(1, kSeparators[0]);
+ }
+ }
+ }
+
+ new_path.path_.append(component);
+ return new_path;
+}
+
+FilePath FilePath::Append(const FilePath& component) const {
+ return Append(component.value());
+}
+
+FilePath FilePath::AppendASCII(const std::string& component) const {
+ DCHECK(IsStringASCII(component));
+#if defined(OS_WIN)
+ return Append(ASCIIToWide(component));
+#elif defined(OS_POSIX)
+ return Append(component);
+#endif
+}
+
+bool FilePath::IsAbsolute() const {
+ return IsPathAbsolute(path_);
+}
+
+#if defined(OS_POSIX)
+// See file_path.h for a discussion of the encoding of paths on POSIX
+// platforms. These *Hack() functions are not quite correct, but they're
+// only temporary while we fix the remainder of the code.
+// Remember to remove the #includes at the top when you remove these.
+
+// static
+FilePath FilePath::FromWStringHack(const std::wstring& wstring) {
+ return FilePath(base::SysWideToNativeMB(wstring));
+}
+std::wstring FilePath::ToWStringHack() const {
+ return base::SysNativeMBToWide(path_);
+}
+#elif defined(OS_WIN)
+// static
+FilePath FilePath::FromWStringHack(const std::wstring& wstring) {
+ return FilePath(wstring);
+}
+std::wstring FilePath::ToWStringHack() const {
+ return path_;
+}
+#endif
+
+void FilePath::OpenInputStream(std::ifstream& stream) const {
+ stream.open(
+#ifndef __MINGW32__
+ path_.c_str(),
+#else
+ base::SysWideToNativeMB(path_).c_str(),
+#endif
+ std::ios::in | std::ios::binary);
+}
+
+FilePath FilePath::StripTrailingSeparators() const {
+ FilePath new_path(path_);
+ new_path.StripTrailingSeparatorsInternal();
+
+ return new_path;
+}
+
+void FilePath::StripTrailingSeparatorsInternal() {
+ // If there is no drive letter, start will be 1, which will prevent stripping
+ // the leading separator if there is only one separator. If there is a drive
+ // letter, start will be set appropriately to prevent stripping the first
+ // separator following the drive letter, if a separator immediately follows
+ // the drive letter.
+ StringType::size_type start = FindDriveLetter(path_) + 2;
+
+ StringType::size_type last_stripped = StringType::npos;
+ for (StringType::size_type pos = path_.length();
+ pos > start && IsSeparator(path_[pos - 1]);
+ --pos) {
+ // If the string only has two separators and they're at the beginning,
+ // don't strip them, unless the string began with more than two separators.
+ if (pos != start + 1 || last_stripped == start + 2 ||
+ !IsSeparator(path_[start - 1])) {
+ path_.resize(pos - 1);
+ last_stripped = pos;
+ }
+ }
+}
diff --git a/ipc/chromium/src/base/file_path.h b/ipc/chromium/src/base/file_path.h
new file mode 100644
index 000000000..b2e9f9a00
--- /dev/null
+++ b/ipc/chromium/src/base/file_path.h
@@ -0,0 +1,277 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// FilePath is a container for pathnames stored in a platform's native string
+// type, providing containers for manipulation in according with the
+// platform's conventions for pathnames. It supports the following path
+// types:
+//
+// POSIX Windows
+// --------------- ----------------------------------
+// Fundamental type char[] wchar_t[]
+// Encoding unspecified* UTF-16
+// Separator / \, tolerant of /
+// Drive letters no case-insensitive A-Z followed by :
+// Alternate root // (surprise!) \\, for UNC paths
+//
+// * The encoding need not be specified on POSIX systems, although some
+// POSIX-compliant systems do specify an encoding. Mac OS X uses UTF-8.
+// Linux does not specify an encoding, but in practice, the locale's
+// character set may be used.
+//
+// FilePath objects are intended to be used anywhere paths are. An
+// application may pass FilePath objects around internally, masking the
+// underlying differences between systems, only differing in implementation
+// where interfacing directly with the system. For example, a single
+// OpenFile(const FilePath &) function may be made available, allowing all
+// callers to operate without regard to the underlying implementation. On
+// POSIX-like platforms, OpenFile might wrap fopen, and on Windows, it might
+// wrap _wfopen_s, perhaps both by calling file_path.value().c_str(). This
+// allows each platform to pass pathnames around without requiring conversions
+// between encodings, which has an impact on performance, but more imporantly,
+// has an impact on correctness on platforms that do not have well-defined
+// encodings for pathnames.
+//
+// Several methods are available to perform common operations on a FilePath
+// object, such as determining the parent directory (DirName), isolating the
+// final path component (BaseName), and appending a relative pathname string
+// to an existing FilePath object (Append). These methods are highly
+// recommended over attempting to split and concatenate strings directly.
+// These methods are based purely on string manipulation and knowledge of
+// platform-specific pathname conventions, and do not consult the filesystem
+// at all, making them safe to use without fear of blocking on I/O operations.
+// These methods do not function as mutators but instead return distinct
+// instances of FilePath objects, and are therefore safe to use on const
+// objects. The objects themselves are safe to share between threads.
+//
+// To aid in initialization of FilePath objects from string literals, a
+// FILE_PATH_LITERAL macro is provided, which accounts for the difference
+// between char[]-based pathnames on POSIX systems and wchar_t[]-based
+// pathnames on Windows.
+//
+// Because a FilePath object should not be instantiated at the global scope,
+// instead, use a FilePath::CharType[] and initialize it with
+// FILE_PATH_LITERAL. At runtime, a FilePath object can be created from the
+// character array. Example:
+//
+// | const FilePath::CharType kLogFileName[] = FILE_PATH_LITERAL("log.txt");
+// |
+// | void Function() {
+// | FilePath log_file_path(kLogFileName);
+// | [...]
+// | }
+
+#ifndef BASE_FILE_PATH_H_
+#define BASE_FILE_PATH_H_
+
+#include <string>
+
+#include "base/basictypes.h"
+#include "base/compiler_specific.h"
+#include "base/hash_tables.h"
+
+// Windows-style drive letter support and pathname separator characters can be
+// enabled and disabled independently, to aid testing. These #defines are
+// here so that the same setting can be used in both the implementation and
+// in the unit test.
+#if defined(OS_WIN)
+#define FILE_PATH_USES_DRIVE_LETTERS
+#define FILE_PATH_USES_WIN_SEPARATORS
+#endif // OS_WIN
+
+// An abstraction to isolate users from the differences between native
+// pathnames on different platforms.
+class FilePath {
+ public:
+#if defined(OS_POSIX)
+ // On most platforms, native pathnames are char arrays, and the encoding
+ // may or may not be specified. On Mac OS X, native pathnames are encoded
+ // in UTF-8.
+ typedef std::string StringType;
+#elif defined(OS_WIN)
+ // On Windows, for Unicode-aware applications, native pathnames are wchar_t
+ // arrays encoded in UTF-16.
+ typedef std::wstring StringType;
+#endif // OS_WIN
+
+ typedef StringType::value_type CharType;
+
+ // Null-terminated array of separators used to separate components in
+ // hierarchical paths. Each character in this array is a valid separator,
+ // but kSeparators[0] is treated as the canonical separator and will be used
+ // when composing pathnames.
+ static const CharType kSeparators[];
+
+ // A special path component meaning "this directory."
+ static const CharType kCurrentDirectory[];
+
+ // A special path component meaning "the parent directory."
+ static const CharType kParentDirectory[];
+
+ // The character used to identify a file extension.
+ static const CharType kExtensionSeparator;
+
+ FilePath() {}
+ FilePath(const FilePath& that) : path_(that.path_) {}
+ explicit FilePath(const StringType& path) : path_(path) {}
+
+#if defined(OS_WIN)
+ explicit FilePath(const wchar_t* path) : path_(path) {}
+#endif
+
+ FilePath& operator=(const FilePath& that) {
+ path_ = that.path_;
+ return *this;
+ }
+
+ bool operator==(const FilePath& that) const {
+ return path_ == that.path_;
+ }
+
+ bool operator!=(const FilePath& that) const {
+ return path_ != that.path_;
+ }
+
+ // Required for some STL containers and operations
+ bool operator<(const FilePath& that) const {
+ return path_ < that.path_;
+ }
+
+ const StringType& value() const { return path_; }
+
+ bool empty() const { return path_.empty(); }
+
+ // Returns true if |character| is in kSeparators.
+ static bool IsSeparator(CharType character);
+
+ // Returns a FilePath corresponding to the directory containing the path
+ // named by this object, stripping away the file component. If this object
+ // only contains one component, returns a FilePath identifying
+ // kCurrentDirectory. If this object already refers to the root directory,
+ // returns a FilePath identifying the root directory.
+ FilePath DirName() const;
+
+ // Returns a FilePath corresponding to the last path component of this
+ // object, either a file or a directory. If this object already refers to
+ // the root directory, returns a FilePath identifying the root directory;
+ // this is the only situation in which BaseName will return an absolute path.
+ FilePath BaseName() const;
+
+ // Returns ".jpg" for path "C:\pics\jojo.jpg", or an empty string if
+ // the file has no extension. If non-empty, Extension() will always start
+ // with precisely one ".". The following code should always work regardless
+ // of the value of path.
+ // new_path = path.RemoveExtension().value().append(path.Extension());
+ // ASSERT(new_path == path.value());
+ // NOTE: this is different from the original file_util implementation which
+ // returned the extension without a leading "." ("jpg" instead of ".jpg")
+ StringType Extension() const;
+
+ // Returns "C:\pics\jojo" for path "C:\pics\jojo.jpg"
+ // NOTE: this is slightly different from the similar file_util implementation
+ // which returned simply 'jojo'.
+ FilePath RemoveExtension() const;
+
+ // Inserts |suffix| after the file name portion of |path| but before the
+ // extension. Returns "" if BaseName() == "." or "..".
+ // Examples:
+ // path == "C:\pics\jojo.jpg" suffix == " (1)", returns "C:\pics\jojo (1).jpg"
+ // path == "jojo.jpg" suffix == " (1)", returns "jojo (1).jpg"
+ // path == "C:\pics\jojo" suffix == " (1)", returns "C:\pics\jojo (1)"
+ // path == "C:\pics.old\jojo" suffix == " (1)", returns "C:\pics.old\jojo (1)"
+ FilePath InsertBeforeExtension(const StringType& suffix) const;
+
+ // Replaces the extension of |file_name| with |extension|. If |file_name|
+ // does not have an extension, them |extension| is added. If |extension| is
+ // empty, then the extension is removed from |file_name|.
+ // Returns "" if BaseName() == "." or "..".
+ FilePath ReplaceExtension(const StringType& extension) const;
+
+ // Returns a FilePath by appending a separator and the supplied path
+ // component to this object's path. Append takes care to avoid adding
+ // excessive separators if this object's path already ends with a separator.
+ // If this object's path is kCurrentDirectory, a new FilePath corresponding
+ // only to |component| is returned. |component| must be a relative path;
+ // it is an error to pass an absolute path.
+ FilePath Append(const StringType& component) const WARN_UNUSED_RESULT;
+ FilePath Append(const FilePath& component) const WARN_UNUSED_RESULT;
+
+ // Although Windows StringType is std::wstring, since the encoding it uses for
+ // paths is well defined, it can handle ASCII path components as well.
+ // Mac uses UTF8, and since ASCII is a subset of that, it works there as well.
+ // On Linux, although it can use any 8-bit encoding for paths, we assume that
+ // ASCII is a valid subset, regardless of the encoding, since many operating
+ // system paths will always be ASCII.
+ FilePath AppendASCII(const std::string& component) const WARN_UNUSED_RESULT;
+
+ // Returns true if this FilePath contains an absolute path. On Windows, an
+ // absolute path begins with either a drive letter specification followed by
+ // a separator character, or with two separator characters. On POSIX
+ // platforms, an absolute path begins with a separator character.
+ bool IsAbsolute() const;
+
+ // Returns a copy of this FilePath that does not end with a trailing
+ // separator.
+ FilePath StripTrailingSeparators() const;
+
+ // Calls open on given ifstream instance
+ void OpenInputStream(std::ifstream &stream) const;
+
+ // Older Chromium code assumes that paths are always wstrings.
+ // This function converts a wstring to a FilePath, and is useful to smooth
+ // porting that old code to the FilePath API.
+ // It has "Hack" in its name so people feel bad about using it.
+ // TODO(port): remove these functions.
+ static FilePath FromWStringHack(const std::wstring& wstring);
+
+ // Older Chromium code assumes that paths are always wstrings.
+ // This function produces a wstring from a FilePath, and is useful to smooth
+ // porting that old code to the FilePath API.
+ // It has "Hack" in its name so people feel bad about using it.
+ // TODO(port): remove these functions.
+ std::wstring ToWStringHack() const;
+
+ private:
+ // Remove trailing separators from this object. If the path is absolute, it
+ // will never be stripped any more than to refer to the absolute root
+ // directory, so "////" will become "/", not "". A leading pair of
+ // separators is never stripped, to support alternate roots. This is used to
+ // support UNC paths on Windows.
+ void StripTrailingSeparatorsInternal();
+
+ StringType path_;
+};
+
+// Macros for string literal initialization of FilePath::CharType[].
+#if defined(OS_POSIX)
+#define FILE_PATH_LITERAL(x) x
+#elif defined(OS_WIN)
+#define FILE_PATH_LITERAL(x) L ## x
+#endif // OS_WIN
+
+// Implement hash function so that we can use FilePaths in hashsets and maps.
+#if defined(COMPILER_GCC) && !defined(ANDROID)
+namespace __gnu_cxx {
+
+template<>
+struct hash<FilePath> {
+ size_t operator()(const FilePath& f) const {
+ return hash<FilePath::StringType>()(f.value());
+ }
+};
+
+} // namespace __gnu_cxx
+#elif defined(COMPILER_MSVC)
+namespace stdext {
+
+inline size_t hash_value(const FilePath& f) {
+ return hash_value(f.value());
+}
+
+} // namespace stdext
+#endif // COMPILER
+
+#endif // BASE_FILE_PATH_H_
diff --git a/ipc/chromium/src/base/file_util.cc b/ipc/chromium/src/base/file_util.cc
new file mode 100644
index 000000000..6f7d92f3a
--- /dev/null
+++ b/ipc/chromium/src/base/file_util.cc
@@ -0,0 +1,244 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/file_util.h"
+
+#if defined(OS_WIN)
+#include <io.h>
+#endif
+#include <stdio.h>
+#if defined(ANDROID) || defined(OS_POSIX)
+#include <unistd.h>
+#endif
+
+#include <fstream>
+
+#include "base/file_path.h"
+#include "base/logging.h"
+#include "base/string_util.h"
+
+#include "base/string_piece.h"
+#include "base/sys_string_conversions.h"
+
+namespace {
+
+const FilePath::CharType kExtensionSeparator = FILE_PATH_LITERAL('.');
+
+} // namespace
+
+namespace file_util {
+
+bool EndsWithSeparator(const FilePath& path) {
+ FilePath::StringType value = path.value();
+ if (value.empty())
+ return false;
+
+ return FilePath::IsSeparator(value[value.size() - 1]);
+}
+
+void TrimTrailingSeparator(std::wstring* dir) {
+ while (dir->length() > 1 && EndsWithSeparator(dir))
+ dir->resize(dir->length() - 1);
+}
+
+FilePath::StringType GetFileExtensionFromPath(const FilePath& path) {
+ FilePath::StringType file_name = path.BaseName().value();
+ const FilePath::StringType::size_type last_dot =
+ file_name.rfind(kExtensionSeparator);
+ return FilePath::StringType(last_dot == FilePath::StringType::npos ?
+ FILE_PATH_LITERAL("") :
+ file_name, last_dot+1);
+}
+
+void InsertBeforeExtension(FilePath* path, const FilePath::StringType& suffix) {
+ FilePath::StringType& value =
+ const_cast<FilePath::StringType&>(path->value());
+
+ const FilePath::StringType::size_type last_dot =
+ value.rfind(kExtensionSeparator);
+ const FilePath::StringType::size_type last_separator =
+ value.find_last_of(FilePath::StringType(FilePath::kSeparators));
+
+ if (last_dot == FilePath::StringType::npos ||
+ (last_separator != std::wstring::npos && last_dot < last_separator)) {
+ // The path looks something like "C:\pics.old\jojo" or "C:\pics\jojo".
+ // We should just append the suffix to the entire path.
+ value.append(suffix);
+ return;
+ }
+
+ value.insert(last_dot, suffix);
+}
+
+void ReplaceExtension(FilePath* path, const FilePath::StringType& extension) {
+ FilePath::StringType clean_extension;
+ // If the new extension is "" or ".", then we will just remove the current
+ // extension.
+ if (!extension.empty() &&
+ extension != FilePath::StringType(&kExtensionSeparator, 1)) {
+ if (extension[0] != kExtensionSeparator)
+ clean_extension.append(&kExtensionSeparator, 1);
+ clean_extension.append(extension);
+ }
+
+ FilePath::StringType& value =
+ const_cast<FilePath::StringType&>(path->value());
+ const FilePath::StringType::size_type last_dot =
+ value.rfind(kExtensionSeparator);
+ const FilePath::StringType::size_type last_separator =
+ value.find_last_of(FilePath::StringType(FilePath::kSeparators));
+
+ // Erase the current extension, if any.
+ if ((last_dot > last_separator ||
+ last_separator == FilePath::StringType::npos) &&
+ last_dot != FilePath::StringType::npos)
+ value.erase(last_dot);
+
+ value.append(clean_extension);
+}
+
+FILE* CreateAndOpenTemporaryFile(FilePath* path) {
+ FilePath directory;
+ if (!GetTempDir(&directory))
+ return NULL;
+
+ return CreateAndOpenTemporaryFileInDir(directory, path);
+}
+
+bool GetFileSize(const FilePath& file_path, int64_t* file_size) {
+ FileInfo info;
+ if (!GetFileInfo(file_path, &info))
+ return false;
+ *file_size = info.size;
+ return true;
+}
+
+bool CloseFile(FILE* file) {
+ if (file == NULL)
+ return true;
+ return fclose(file) == 0;
+}
+
+// Deprecated functions ----------------------------------------------------
+
+bool AbsolutePath(std::wstring* path_str) {
+ FilePath path(FilePath::FromWStringHack(*path_str));
+ if (!AbsolutePath(&path))
+ return false;
+ *path_str = path.ToWStringHack();
+ return true;
+}
+void AppendToPath(std::wstring* path, const std::wstring& new_ending) {
+ if (!path) {
+ NOTREACHED();
+ return; // Don't crash in this function in release builds.
+ }
+
+ if (!EndsWithSeparator(path))
+ path->push_back(FilePath::kSeparators[0]);
+ path->append(new_ending);
+}
+bool CopyFile(const std::wstring& from_path, const std::wstring& to_path) {
+ return CopyFile(FilePath::FromWStringHack(from_path),
+ FilePath::FromWStringHack(to_path));
+}
+bool CreateDirectory(const std::wstring& full_path) {
+ return CreateDirectory(FilePath::FromWStringHack(full_path));
+}
+bool CreateNewTempDirectory(const std::wstring& prefix,
+ std::wstring* new_temp_path) {
+#if defined(OS_WIN)
+ FilePath::StringType dir_prefix(prefix);
+#elif defined(OS_POSIX)
+ FilePath::StringType dir_prefix = WideToUTF8(prefix);
+#endif
+ FilePath temp_path;
+ if (!CreateNewTempDirectory(dir_prefix, &temp_path))
+ return false;
+ *new_temp_path = temp_path.ToWStringHack();
+ return true;
+}
+bool CreateTemporaryFileName(std::wstring* temp_file) {
+ FilePath temp_file_path;
+ if (!CreateTemporaryFileName(&temp_file_path))
+ return false;
+ *temp_file = temp_file_path.ToWStringHack();
+ return true;
+}
+bool Delete(const std::wstring& path) {
+ return Delete(FilePath::FromWStringHack(path));
+}
+bool DirectoryExists(const std::wstring& path) {
+ return DirectoryExists(FilePath::FromWStringHack(path));
+}
+bool EndsWithSeparator(std::wstring* path) {
+ return EndsWithSeparator(FilePath::FromWStringHack(*path));
+}
+bool EndsWithSeparator(const std::wstring& path) {
+ return EndsWithSeparator(FilePath::FromWStringHack(path));
+}
+bool GetCurrentDirectory(std::wstring* path_str) {
+ FilePath path;
+ if (!GetCurrentDirectory(&path))
+ return false;
+ *path_str = path.ToWStringHack();
+ return true;
+}
+std::wstring GetFileExtensionFromPath(const std::wstring& path) {
+ FilePath::StringType extension =
+ GetFileExtensionFromPath(FilePath::FromWStringHack(path));
+#if defined(OS_WIN)
+ return extension;
+#elif defined(OS_POSIX)
+ return UTF8ToWide(extension);
+#endif
+}
+bool GetFileInfo(const std::wstring& file_path, FileInfo* results) {
+ return GetFileInfo(FilePath::FromWStringHack(file_path), results);
+}
+std::wstring GetFilenameFromPath(const std::wstring& path) {
+ if (path.empty() || EndsWithSeparator(path))
+ return std::wstring();
+
+ return FilePath::FromWStringHack(path).BaseName().ToWStringHack();
+}
+bool GetFileSize(const std::wstring& file_path, int64_t* file_size) {
+ return GetFileSize(FilePath::FromWStringHack(file_path), file_size);
+}
+bool GetTempDir(std::wstring* path_str) {
+ FilePath path;
+ if (!GetTempDir(&path))
+ return false;
+ *path_str = path.ToWStringHack();
+ return true;
+}
+FILE* OpenFile(const std::wstring& filename, const char* mode) {
+ return OpenFile(FilePath::FromWStringHack(filename), mode);
+}
+bool PathExists(const std::wstring& path) {
+ return PathExists(FilePath::FromWStringHack(path));
+}
+bool PathIsWritable(const std::wstring& path) {
+ return PathIsWritable(FilePath::FromWStringHack(path));
+}
+int ReadFile(const std::wstring& filename, char* data, int size) {
+ return ReadFile(FilePath::FromWStringHack(filename), data, size);
+}
+bool SetCurrentDirectory(const std::wstring& directory) {
+ return SetCurrentDirectory(FilePath::FromWStringHack(directory));
+}
+void UpOneDirectory(std::wstring* dir) {
+ FilePath path = FilePath::FromWStringHack(*dir);
+ FilePath directory = path.DirName();
+ // If there is no separator, we will get back kCurrentDirectory.
+ // In this case don't change |dir|.
+ if (directory.value() != FilePath::kCurrentDirectory)
+ *dir = directory.ToWStringHack();
+}
+int WriteFile(const std::wstring& filename, const char* data, int size) {
+ return WriteFile(FilePath::FromWStringHack(filename), data, size);
+}
+} // namespace
diff --git a/ipc/chromium/src/base/file_util.h b/ipc/chromium/src/base/file_util.h
new file mode 100644
index 000000000..c6bcc1895
--- /dev/null
+++ b/ipc/chromium/src/base/file_util.h
@@ -0,0 +1,229 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains utility functions for dealing with the local
+// filesystem.
+
+#ifndef BASE_FILE_UTIL_H_
+#define BASE_FILE_UTIL_H_
+
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include <windows.h>
+#elif defined(ANDROID)
+#include <sys/stat.h>
+#elif defined(OS_POSIX)
+#include <sys/types.h>
+#include <sys/stat.h>
+#endif
+
+#include <stdio.h>
+
+#include <stack>
+#include <string>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/file_path.h"
+
+namespace file_util {
+
+//-----------------------------------------------------------------------------
+// Functions that operate purely on a path string w/o touching the filesystem:
+
+// Returns true if the given path ends with a path separator character.
+bool EndsWithSeparator(const FilePath& path);
+// These two versions are both deprecated. TODO(estade): remove them.
+bool EndsWithSeparator(std::wstring* path);
+bool EndsWithSeparator(const std::wstring& path);
+
+// Modifies a string by trimming all trailing separators from the end.
+// Deprecated. FilePath does this automatically, and if it's constructed from a
+// path with a trailing separator, StripTrailingSeparators() may be used.
+void TrimTrailingSeparator(std::wstring* dir);
+
+// Strips the topmost directory from the end of 'dir'. Assumes 'dir' does not
+// refer to a file.
+// If 'dir' is a root directory, return without change.
+// Deprecated. Use FilePath::DirName instead.
+void UpOneDirectory(std::wstring* dir);
+
+// Returns the filename portion of 'path', without any leading \'s or /'s.
+// Deprecated. Use FilePath::BaseName instead.
+std::wstring GetFilenameFromPath(const std::wstring& path);
+
+// Deprecated compatibility function. Use FilePath::Extension.
+FilePath::StringType GetFileExtensionFromPath(const FilePath& path);
+// Deprecated temporary compatibility function.
+std::wstring GetFileExtensionFromPath(const std::wstring& path);
+
+// Appends new_ending to path, adding a separator between the two if necessary.
+void AppendToPath(std::wstring* path, const std::wstring& new_ending);
+
+// Convert provided relative path into an absolute path. Returns false on
+// error. On POSIX, this function fails if the path does not exist.
+bool AbsolutePath(FilePath* path);
+// Deprecated temporary compatibility function.
+bool AbsolutePath(std::wstring* path);
+
+// Deprecated compatibility function. Use FilePath::InsertBeforeExtension.
+void InsertBeforeExtension(FilePath* path, const FilePath::StringType& suffix);
+
+// Deprecated compatibility function. Use FilePath::ReplaceExtension.
+void ReplaceExtension(FilePath* file_name,
+ const FilePath::StringType& extension);
+
+#if defined(OS_WIN)
+// Deprecated temporary compatibility functions.
+void InsertBeforeExtension(std::wstring* path, const std::wstring& suffix);
+void ReplaceExtension(std::wstring* file_name, const std::wstring& extension);
+#endif
+
+//-----------------------------------------------------------------------------
+// Functions that involve filesystem access or modification:
+
+// Deletes the given path, whether it's a file or a directory.
+// If it's a directory, it's perfectly happy to delete all of the
+// directory's contents.
+// Returns true if successful, false otherwise.
+bool Delete(const FilePath& path);
+// Deprecated temporary compatibility function.
+bool Delete(const std::wstring& path);
+
+// Copies a single file. Use CopyDirectory to copy directories.
+bool CopyFile(const FilePath& from_path, const FilePath& to_path);
+// Deprecated temporary compatibility function.
+bool CopyFile(const std::wstring& from_path, const std::wstring& to_path);
+
+// Returns true if the given path exists on the local filesystem,
+// false otherwise.
+bool PathExists(const FilePath& path);
+// Deprecated temporary compatibility function.
+bool PathExists(const std::wstring& path);
+
+// Returns true if the given path is writable by the user, false otherwise.
+bool PathIsWritable(const FilePath& path);
+// Deprecated temporary compatibility function.
+bool PathIsWritable(const std::wstring& path);
+
+// Returns true if the given path exists and is a directory, false otherwise.
+bool DirectoryExists(const FilePath& path);
+// Deprecated temporary compatibility function.
+bool DirectoryExists(const std::wstring& path);
+
+#if defined(OS_POSIX)
+// Read exactly |bytes| bytes from file descriptor |fd|, storing the result
+// in |buffer|. This function is protected against EINTR and partial reads.
+// Returns true iff |bytes| bytes have been successfuly read from |fd|.
+bool ReadFromFD(int fd, char* buffer, size_t bytes);
+#endif // defined(OS_POSIX)
+
+// Get the temporary directory provided by the system.
+bool GetTempDir(FilePath* path);
+// Deprecated temporary compatibility function.
+bool GetTempDir(std::wstring* path);
+// Get a temporary directory for shared memory files.
+// Only useful on POSIX; redirects to GetTempDir() on Windows.
+bool GetShmemTempDir(FilePath* path);
+
+// Creates a temporary file. The full path is placed in |path|, and the
+// function returns true if was successful in creating the file. The file will
+// be empty and all handles closed after this function returns.
+// TODO(erikkay): rename this function and track down all of the callers.
+// (Clarification of erik's comment: the intent is to rename the BlahFileName()
+// calls into BlahFile(), since they create temp files (not temp filenames).)
+bool CreateTemporaryFileName(FilePath* path);
+// Deprecated temporary compatibility function.
+bool CreateTemporaryFileName(std::wstring* temp_file);
+
+// Create and open a temporary file. File is opened for read/write.
+// The full path is placed in |path|, and the function returns true if
+// was successful in creating and opening the file.
+FILE* CreateAndOpenTemporaryFile(FilePath* path);
+// Like above but for shmem files. Only useful for POSIX.
+FILE* CreateAndOpenTemporaryShmemFile(FilePath* path);
+
+// Similar to CreateAndOpenTemporaryFile, but the file is created in |dir|.
+FILE* CreateAndOpenTemporaryFileInDir(const FilePath& dir, FilePath* path);
+
+// Same as CreateTemporaryFileName but the file is created in |dir|.
+bool CreateTemporaryFileNameInDir(const std::wstring& dir,
+ std::wstring* temp_file);
+
+// Create a new directory under TempPath. If prefix is provided, the new
+// directory name is in the format of prefixyyyy.
+// NOTE: prefix is ignored in the POSIX implementation.
+// TODO(erikkay): is this OK?
+// If success, return true and output the full path of the directory created.
+bool CreateNewTempDirectory(const FilePath::StringType& prefix,
+ FilePath* new_temp_path);
+// Deprecated temporary compatibility function.
+bool CreateNewTempDirectory(const std::wstring& prefix,
+ std::wstring* new_temp_path);
+
+// Creates a directory, as well as creating any parent directories, if they
+// don't exist. Returns 'true' on successful creation, or if the directory
+// already exists.
+bool CreateDirectory(const FilePath& full_path);
+// Deprecated temporary compatibility function.
+bool CreateDirectory(const std::wstring& full_path);
+
+// Returns the file size. Returns true on success.
+bool GetFileSize(const FilePath& file_path, int64_t* file_size);
+// Deprecated temporary compatibility function.
+bool GetFileSize(const std::wstring& file_path, int64_t* file_size);
+
+// Used to hold information about a given file path. See GetFileInfo below.
+struct FileInfo {
+ // The size of the file in bytes. Undefined when is_directory is true.
+ int64_t size;
+
+ // True if the file corresponds to a directory.
+ bool is_directory;
+
+ // Add additional fields here as needed.
+};
+
+// Returns information about the given file path.
+bool GetFileInfo(const FilePath& file_path, FileInfo* info);
+// Deprecated temporary compatibility function.
+bool GetFileInfo(const std::wstring& file_path, FileInfo* info);
+
+// Wrapper for fopen-like calls. Returns non-NULL FILE* on success.
+FILE* OpenFile(const FilePath& filename, const char* mode);
+// Deprecated temporary compatibility functions.
+FILE* OpenFile(const std::string& filename, const char* mode);
+FILE* OpenFile(const std::wstring& filename, const char* mode);
+
+// Closes file opened by OpenFile. Returns true on success.
+bool CloseFile(FILE* file);
+
+// Reads the given number of bytes from the file into the buffer. Returns
+// the number of read bytes, or -1 on error.
+int ReadFile(const FilePath& filename, char* data, int size);
+// Deprecated temporary compatibility function.
+int ReadFile(const std::wstring& filename, char* data, int size);
+
+// Writes the given buffer into the file, overwriting any data that was
+// previously there. Returns the number of bytes written, or -1 on error.
+int WriteFile(const FilePath& filename, const char* data, int size);
+// Deprecated temporary compatibility function.
+int WriteFile(const std::wstring& filename, const char* data, int size);
+
+// Gets the current working directory for the process.
+bool GetCurrentDirectory(FilePath* path);
+// Deprecated temporary compatibility function.
+bool GetCurrentDirectory(std::wstring* path);
+
+// Sets the current working directory for the process.
+bool SetCurrentDirectory(const FilePath& path);
+// Deprecated temporary compatibility function.
+bool SetCurrentDirectory(const std::wstring& current_directory);
+
+} // namespace file_util
+
+#endif // BASE_FILE_UTIL_H_
diff --git a/ipc/chromium/src/base/file_util_mac.mm b/ipc/chromium/src/base/file_util_mac.mm
new file mode 100644
index 000000000..f801e7d7f
--- /dev/null
+++ b/ipc/chromium/src/base/file_util_mac.mm
@@ -0,0 +1,35 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/file_util.h"
+
+#import <Cocoa/Cocoa.h>
+#include <copyfile.h>
+
+#include "base/file_path.h"
+#include "base/logging.h"
+#include "base/string_util.h"
+#include "base/scoped_nsautorelease_pool.h"
+
+namespace file_util {
+
+bool GetTempDir(FilePath* path) {
+ base::ScopedNSAutoreleasePool autorelease_pool;
+ NSString* tmp = NSTemporaryDirectory();
+ if (tmp == nil)
+ return false;
+ *path = FilePath([tmp fileSystemRepresentation]);
+ return true;
+}
+
+bool GetShmemTempDir(FilePath* path) {
+ return GetTempDir(path);
+}
+
+bool CopyFile(const FilePath& from_path, const FilePath& to_path) {
+ return (copyfile(from_path.value().c_str(),
+ to_path.value().c_str(), NULL, COPYFILE_ALL) == 0);
+}
+
+} // namespace
diff --git a/ipc/chromium/src/base/file_util_posix.cc b/ipc/chromium/src/base/file_util_posix.cc
new file mode 100644
index 000000000..d64897404
--- /dev/null
+++ b/ipc/chromium/src/base/file_util_posix.cc
@@ -0,0 +1,337 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/file_util.h"
+
+#include <dirent.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <fnmatch.h>
+#include <libgen.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <sys/mman.h>
+#define _DARWIN_USE_64_BIT_INODE // Use 64-bit inode data structures
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <time.h>
+#include <unistd.h>
+
+#include <fstream>
+#include <string>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/eintr_wrapper.h"
+#include "base/file_path.h"
+#include "base/logging.h"
+#include "base/string_util.h"
+#include "base/time.h"
+
+namespace file_util {
+
+#if defined(GOOGLE_CHROME_BUILD)
+static const char* kTempFileName = "com.google.chrome.XXXXXX";
+#else
+static const char* kTempFileName = "org.chromium.XXXXXX";
+#endif
+
+bool AbsolutePath(FilePath* path) {
+ char full_path[PATH_MAX];
+ if (realpath(path->value().c_str(), full_path) == NULL)
+ return false;
+ *path = FilePath(full_path);
+ return true;
+}
+
+// TODO(erikkay): The Windows version of this accepts paths like "foo/bar/*"
+// which works both with and without the recursive flag. I'm not sure we need
+// that functionality. If not, remove from file_util_win.cc, otherwise add it
+// here.
+bool Delete(const FilePath& path) {
+ const char* path_str = path.value().c_str();
+ struct stat file_info;
+ int test = stat(path_str, &file_info);
+ if (test != 0) {
+ // The Windows version defines this condition as success.
+ bool ret = (errno == ENOENT || errno == ENOTDIR);
+ return ret;
+ }
+ if (!S_ISDIR(file_info.st_mode))
+ return (unlink(path_str) == 0);
+
+ return (rmdir(path_str) == 0);
+}
+
+bool PathExists(const FilePath& path) {
+ struct stat file_info;
+ return (stat(path.value().c_str(), &file_info) == 0);
+}
+
+bool PathIsWritable(const FilePath& path) {
+ FilePath test_path(path);
+ struct stat file_info;
+ if (stat(test_path.value().c_str(), &file_info) != 0) {
+ // If the path doesn't exist, test the parent dir.
+ test_path = test_path.DirName();
+ // If the parent dir doesn't exist, then return false (the path is not
+ // directly writable).
+ if (stat(test_path.value().c_str(), &file_info) != 0)
+ return false;
+ }
+ if (S_IWOTH & file_info.st_mode)
+ return true;
+ if (getegid() == file_info.st_gid && (S_IWGRP & file_info.st_mode))
+ return true;
+ if (geteuid() == file_info.st_uid && (S_IWUSR & file_info.st_mode))
+ return true;
+ return false;
+}
+
+bool DirectoryExists(const FilePath& path) {
+ struct stat file_info;
+ if (stat(path.value().c_str(), &file_info) == 0)
+ return S_ISDIR(file_info.st_mode);
+ return false;
+}
+
+bool ReadFromFD(int fd, char* buffer, size_t bytes) {
+ size_t total_read = 0;
+ while (total_read < bytes) {
+ ssize_t bytes_read =
+ HANDLE_EINTR(read(fd, buffer + total_read, bytes - total_read));
+ if (bytes_read <= 0)
+ break;
+ total_read += bytes_read;
+ }
+ return total_read == bytes;
+}
+
+// Creates and opens a temporary file in |directory|, returning the
+// file descriptor. |path| is set to the temporary file path.
+// Note TODO(erikkay) comment in header for BlahFileName() calls; the
+// intent is to rename these files BlahFile() (since they create
+// files, not filenames). This function does NOT unlink() the file.
+int CreateAndOpenFdForTemporaryFile(FilePath directory, FilePath* path) {
+ *path = directory.Append(kTempFileName);
+ const std::string& tmpdir_string = path->value();
+ // this should be OK since mkstemp just replaces characters in place
+ char* buffer = const_cast<char*>(tmpdir_string.c_str());
+
+ return mkstemp(buffer);
+}
+
+bool CreateTemporaryFileName(FilePath* path) {
+ FilePath directory;
+ if (!GetTempDir(&directory))
+ return false;
+ int fd = CreateAndOpenFdForTemporaryFile(directory, path);
+ if (fd < 0)
+ return false;
+ close(fd);
+ return true;
+}
+
+FILE* CreateAndOpenTemporaryShmemFile(FilePath* path) {
+ FilePath directory;
+ if (!GetShmemTempDir(&directory))
+ return NULL;
+
+ return CreateAndOpenTemporaryFileInDir(directory, path);
+}
+
+FILE* CreateAndOpenTemporaryFileInDir(const FilePath& dir, FilePath* path) {
+ int fd = CreateAndOpenFdForTemporaryFile(dir, path);
+ if (fd < 0)
+ return NULL;
+
+ return fdopen(fd, "a+");
+}
+
+bool CreateTemporaryFileNameInDir(const std::wstring& dir,
+ std::wstring* temp_file) {
+ // Not implemented yet.
+ NOTREACHED();
+ return false;
+}
+
+bool CreateNewTempDirectory(const FilePath::StringType& prefix,
+ FilePath* new_temp_path) {
+ FilePath tmpdir;
+ if (!GetTempDir(&tmpdir))
+ return false;
+ tmpdir = tmpdir.Append(kTempFileName);
+ std::string tmpdir_string = tmpdir.value();
+#ifdef ANDROID
+ char* dtemp = NULL;
+#else
+ // this should be OK since mkdtemp just replaces characters in place
+ char* buffer = const_cast<char*>(tmpdir_string.c_str());
+ char* dtemp = mkdtemp(buffer);
+#endif
+ if (!dtemp)
+ return false;
+ *new_temp_path = FilePath(dtemp);
+ return true;
+}
+
+bool CreateDirectory(const FilePath& full_path) {
+ std::vector<FilePath> subpaths;
+
+ // Collect a list of all parent directories.
+ FilePath last_path = full_path;
+ subpaths.push_back(full_path);
+ for (FilePath path = full_path.DirName();
+ path.value() != last_path.value(); path = path.DirName()) {
+ subpaths.push_back(path);
+ last_path = path;
+ }
+
+ // Iterate through the parents and create the missing ones.
+ for (std::vector<FilePath>::reverse_iterator i = subpaths.rbegin();
+ i != subpaths.rend(); ++i) {
+ if (!DirectoryExists(*i)) {
+ if (mkdir(i->value().c_str(), 0777) != 0)
+ return false;
+ }
+ }
+ return true;
+}
+
+bool GetFileInfo(const FilePath& file_path, FileInfo* results) {
+ struct stat file_info;
+ if (stat(file_path.value().c_str(), &file_info) != 0)
+ return false;
+ results->is_directory = S_ISDIR(file_info.st_mode);
+ results->size = file_info.st_size;
+ return true;
+}
+
+FILE* OpenFile(const std::string& filename, const char* mode) {
+ return OpenFile(FilePath(filename), mode);
+}
+
+FILE* OpenFile(const FilePath& filename, const char* mode) {
+ return fopen(filename.value().c_str(), mode);
+}
+
+int ReadFile(const FilePath& filename, char* data, int size) {
+ int fd = open(filename.value().c_str(), O_RDONLY);
+ if (fd < 0)
+ return -1;
+
+ int ret_value = HANDLE_EINTR(read(fd, data, size));
+ HANDLE_EINTR(close(fd));
+ return ret_value;
+}
+
+int WriteFile(const FilePath& filename, const char* data, int size) {
+ int fd = creat(filename.value().c_str(), 0666);
+ if (fd < 0)
+ return -1;
+
+ // Allow for partial writes
+ ssize_t bytes_written_total = 0;
+ do {
+ ssize_t bytes_written_partial =
+ HANDLE_EINTR(write(fd, data + bytes_written_total,
+ size - bytes_written_total));
+ if (bytes_written_partial < 0) {
+ HANDLE_EINTR(close(fd));
+ return -1;
+ }
+ bytes_written_total += bytes_written_partial;
+ } while (bytes_written_total < size);
+
+ HANDLE_EINTR(close(fd));
+ return bytes_written_total;
+}
+
+// Gets the current working directory for the process.
+bool GetCurrentDirectory(FilePath* dir) {
+ char system_buffer[PATH_MAX] = "";
+ if (!getcwd(system_buffer, sizeof(system_buffer))) {
+ NOTREACHED();
+ return false;
+ }
+ *dir = FilePath(system_buffer);
+ return true;
+}
+
+// Sets the current working directory for the process.
+bool SetCurrentDirectory(const FilePath& path) {
+ int ret = chdir(path.value().c_str());
+ return !ret;
+}
+
+#if !defined(OS_MACOSX)
+bool GetTempDir(FilePath* path) {
+ const char* tmp = getenv("TMPDIR");
+ if (tmp)
+ *path = FilePath(tmp);
+ else
+ *path = FilePath("/tmp");
+ return true;
+}
+
+bool GetShmemTempDir(FilePath* path) {
+#if defined(OS_LINUX) && !defined(ANDROID)
+ *path = FilePath("/dev/shm");
+ return true;
+#else
+ return GetTempDir(path);
+#endif
+}
+
+bool CopyFile(const FilePath& from_path, const FilePath& to_path) {
+ int infile = open(from_path.value().c_str(), O_RDONLY);
+ if (infile < 0)
+ return false;
+
+ int outfile = creat(to_path.value().c_str(), 0666);
+ if (outfile < 0) {
+ close(infile);
+ return false;
+ }
+
+ const size_t kBufferSize = 32768;
+ std::vector<char> buffer(kBufferSize);
+ bool result = true;
+
+ while (result) {
+ ssize_t bytes_read = HANDLE_EINTR(read(infile, &buffer[0], buffer.size()));
+ if (bytes_read < 0) {
+ result = false;
+ break;
+ }
+ if (bytes_read == 0)
+ break;
+ // Allow for partial writes
+ ssize_t bytes_written_per_read = 0;
+ do {
+ ssize_t bytes_written_partial = HANDLE_EINTR(write(
+ outfile,
+ &buffer[bytes_written_per_read],
+ bytes_read - bytes_written_per_read));
+ if (bytes_written_partial < 0) {
+ result = false;
+ break;
+ }
+ bytes_written_per_read += bytes_written_partial;
+ } while (bytes_written_per_read < bytes_read);
+ }
+
+ if (HANDLE_EINTR(close(infile)) < 0)
+ result = false;
+ if (HANDLE_EINTR(close(outfile)) < 0)
+ result = false;
+
+ return result;
+}
+#endif // !defined(OS_MACOSX)
+
+} // namespace file_util
diff --git a/ipc/chromium/src/base/file_util_win.cc b/ipc/chromium/src/base/file_util_win.cc
new file mode 100644
index 000000000..fd3fdbeeb
--- /dev/null
+++ b/ipc/chromium/src/base/file_util_win.cc
@@ -0,0 +1,356 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/file_util.h"
+
+#include <windows.h>
+#include <shellapi.h>
+#include <shlobj.h>
+#include <time.h>
+#include <string>
+
+#include "base/file_path.h"
+#include "base/logging.h"
+#include "base/scoped_handle.h"
+#include "base/string_util.h"
+#include "base/time.h"
+#include "base/win_util.h"
+
+namespace file_util {
+
+bool AbsolutePath(FilePath* path) {
+ wchar_t file_path_buf[MAX_PATH];
+ if (!_wfullpath(file_path_buf, path->value().c_str(), MAX_PATH))
+ return false;
+ *path = FilePath(file_path_buf);
+ return true;
+}
+
+bool Delete(const FilePath& path) {
+ if (path.value().length() >= MAX_PATH)
+ return false;
+
+ // Use DeleteFile; it should be faster. DeleteFile
+ // fails if passed a directory though, which is why we fall through on
+ // failure to the SHFileOperation.
+ if (DeleteFile(path.value().c_str()) != 0)
+ return true;
+
+ // SHFILEOPSTRUCT wants the path to be terminated with two NULLs,
+ // so we have to use wcscpy because wcscpy_s writes non-NULLs
+ // into the rest of the buffer.
+ wchar_t double_terminated_path[MAX_PATH + 1] = {0};
+#pragma warning(suppress:4996) // don't complain about wcscpy deprecation
+ wcscpy(double_terminated_path, path.value().c_str());
+
+ SHFILEOPSTRUCT file_operation = {0};
+ file_operation.wFunc = FO_DELETE;
+ file_operation.pFrom = double_terminated_path;
+ file_operation.fFlags = FOF_NOERRORUI | FOF_SILENT | FOF_NOCONFIRMATION;
+ file_operation.fFlags |= FOF_NORECURSION | FOF_FILESONLY;
+ int err = SHFileOperation(&file_operation);
+ // Some versions of Windows return ERROR_FILE_NOT_FOUND when
+ // deleting an empty directory.
+ return (err == 0 || err == ERROR_FILE_NOT_FOUND);
+}
+
+bool CopyFile(const FilePath& from_path, const FilePath& to_path) {
+ // NOTE: I suspect we could support longer paths, but that would involve
+ // analyzing all our usage of files.
+ if (from_path.value().length() >= MAX_PATH ||
+ to_path.value().length() >= MAX_PATH) {
+ return false;
+ }
+ return (::CopyFile(from_path.value().c_str(), to_path.value().c_str(),
+ false) != 0);
+}
+
+bool ShellCopy(const FilePath& from_path, const FilePath& to_path,
+ bool recursive) {
+ // NOTE: I suspect we could support longer paths, but that would involve
+ // analyzing all our usage of files.
+ if (from_path.value().length() >= MAX_PATH ||
+ to_path.value().length() >= MAX_PATH) {
+ return false;
+ }
+
+ // SHFILEOPSTRUCT wants the path to be terminated with two NULLs,
+ // so we have to use wcscpy because wcscpy_s writes non-NULLs
+ // into the rest of the buffer.
+ wchar_t double_terminated_path_from[MAX_PATH + 1] = {0};
+ wchar_t double_terminated_path_to[MAX_PATH + 1] = {0};
+#pragma warning(suppress:4996) // don't complain about wcscpy deprecation
+ wcscpy(double_terminated_path_from, from_path.value().c_str());
+#pragma warning(suppress:4996) // don't complain about wcscpy deprecation
+ wcscpy(double_terminated_path_to, to_path.value().c_str());
+
+ SHFILEOPSTRUCT file_operation = {0};
+ file_operation.wFunc = FO_COPY;
+ file_operation.pFrom = double_terminated_path_from;
+ file_operation.pTo = double_terminated_path_to;
+ file_operation.fFlags = FOF_NOERRORUI | FOF_SILENT | FOF_NOCONFIRMATION |
+ FOF_NOCONFIRMMKDIR;
+ if (!recursive)
+ file_operation.fFlags |= FOF_NORECURSION | FOF_FILESONLY;
+
+ return (SHFileOperation(&file_operation) == 0);
+}
+
+bool PathExists(const FilePath& path) {
+ return (GetFileAttributes(path.value().c_str()) != INVALID_FILE_ATTRIBUTES);
+}
+
+bool PathIsWritable(const FilePath& path) {
+ HANDLE dir =
+ CreateFile(path.value().c_str(), FILE_ADD_FILE,
+ FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,
+ NULL, OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, NULL);
+
+ if (dir == INVALID_HANDLE_VALUE)
+ return false;
+
+ CloseHandle(dir);
+ return true;
+}
+
+bool DirectoryExists(const FilePath& path) {
+ DWORD fileattr = GetFileAttributes(path.value().c_str());
+ if (fileattr != INVALID_FILE_ATTRIBUTES)
+ return (fileattr & FILE_ATTRIBUTE_DIRECTORY) != 0;
+ return false;
+}
+
+bool GetTempDir(FilePath* path) {
+ wchar_t temp_path[MAX_PATH + 1];
+ DWORD path_len = ::GetTempPath(MAX_PATH, temp_path);
+ if (path_len >= MAX_PATH || path_len <= 0)
+ return false;
+ // TODO(evanm): the old behavior of this function was to always strip the
+ // trailing slash. We duplicate this here, but it shouldn't be necessary
+ // when everyone is using the appropriate FilePath APIs.
+ std::wstring path_str(temp_path);
+ TrimTrailingSeparator(&path_str);
+ *path = FilePath(path_str);
+ return true;
+}
+
+bool GetShmemTempDir(FilePath* path) {
+ return GetTempDir(path);
+}
+
+bool CreateTemporaryFileName(FilePath* path) {
+ std::wstring temp_path, temp_file;
+
+ if (!GetTempDir(&temp_path))
+ return false;
+
+ if (CreateTemporaryFileNameInDir(temp_path, &temp_file)) {
+ *path = FilePath(temp_file);
+ return true;
+ }
+
+ return false;
+}
+
+FILE* CreateAndOpenTemporaryShmemFile(FilePath* path) {
+ return CreateAndOpenTemporaryFile(path);
+}
+
+// On POSIX we have semantics to create and open a temporary file
+// atomically.
+// TODO(jrg): is there equivalent call to use on Windows instead of
+// going 2-step?
+FILE* CreateAndOpenTemporaryFileInDir(const FilePath& dir, FilePath* path) {
+ std::wstring wstring_path;
+ if (!CreateTemporaryFileNameInDir(dir.value(), &wstring_path)) {
+ return NULL;
+ }
+ *path = FilePath(wstring_path);
+ // Open file in binary mode, to avoid problems with fwrite. On Windows
+ // it replaces \n's with \r\n's, which may surprise you.
+ // Reference: http://msdn.microsoft.com/en-us/library/h9t88zwz(VS.71).aspx
+ return OpenFile(*path, "wb+");
+}
+
+bool CreateTemporaryFileNameInDir(const std::wstring& dir,
+ std::wstring* temp_file) {
+ wchar_t temp_name[MAX_PATH + 1];
+
+ if (!GetTempFileName(dir.c_str(), L"", 0, temp_name))
+ return false; // fail!
+
+ DWORD path_len = GetLongPathName(temp_name, temp_name, MAX_PATH);
+ if (path_len > MAX_PATH + 1 || path_len == 0)
+ return false; // fail!
+
+ temp_file->assign(temp_name, path_len);
+ return true;
+}
+
+bool CreateNewTempDirectory(const FilePath::StringType& prefix,
+ FilePath* new_temp_path) {
+ FilePath system_temp_dir;
+ if (!GetTempDir(&system_temp_dir))
+ return false;
+
+ FilePath path_to_create;
+ srand(static_cast<uint32_t>(time(NULL)));
+
+ int count = 0;
+ while (count < 50) {
+ // Try create a new temporary directory with random generated name. If
+ // the one exists, keep trying another path name until we reach some limit.
+ path_to_create = system_temp_dir;
+ std::wstring new_dir_name;
+ new_dir_name.assign(prefix);
+ new_dir_name.append(IntToWString(rand() % kint16max));
+ path_to_create = path_to_create.Append(new_dir_name);
+
+ if (::CreateDirectory(path_to_create.value().c_str(), NULL))
+ break;
+ count++;
+ }
+
+ if (count == 50) {
+ return false;
+ }
+
+ *new_temp_path = path_to_create;
+ return true;
+}
+
+bool CreateDirectory(const FilePath& full_path) {
+ if (DirectoryExists(full_path))
+ return true;
+ int err = SHCreateDirectoryEx(NULL, full_path.value().c_str(), NULL);
+ return err == ERROR_SUCCESS;
+}
+
+bool GetFileInfo(const FilePath& file_path, FileInfo* results) {
+ WIN32_FILE_ATTRIBUTE_DATA attr;
+ if (!GetFileAttributesEx(file_path.ToWStringHack().c_str(),
+ GetFileExInfoStandard, &attr)) {
+ return false;
+ }
+
+ ULARGE_INTEGER size;
+ size.HighPart = attr.nFileSizeHigh;
+ size.LowPart = attr.nFileSizeLow;
+ results->size = size.QuadPart;
+
+ results->is_directory =
+ (attr.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) != 0;
+ return true;
+}
+
+FILE* OpenFile(const FilePath& filename, const char* mode) {
+ std::wstring w_mode = ASCIIToWide(std::string(mode));
+ FILE* file;
+ if (_wfopen_s(&file, filename.value().c_str(), w_mode.c_str()) != 0) {
+ return NULL;
+ }
+ return file;
+}
+
+FILE* OpenFile(const std::string& filename, const char* mode) {
+ FILE* file;
+ if (fopen_s(&file, filename.c_str(), mode) != 0) {
+ return NULL;
+ }
+ return file;
+}
+
+int ReadFile(const FilePath& filename, char* data, int size) {
+ ScopedHandle file(CreateFile(filename.value().c_str(),
+ GENERIC_READ,
+ FILE_SHARE_READ | FILE_SHARE_WRITE,
+ NULL,
+ OPEN_EXISTING,
+ FILE_FLAG_SEQUENTIAL_SCAN,
+ NULL));
+ if (file == INVALID_HANDLE_VALUE)
+ return -1;
+
+ int ret_value;
+ DWORD read;
+ if (::ReadFile(file, data, size, &read, NULL) && read == size) {
+ ret_value = static_cast<int>(read);
+ } else {
+ ret_value = -1;
+ }
+
+ return ret_value;
+}
+
+int WriteFile(const FilePath& filename, const char* data, int size) {
+ ScopedHandle file(CreateFile(filename.value().c_str(),
+ GENERIC_WRITE,
+ 0,
+ NULL,
+ CREATE_ALWAYS,
+ 0,
+ NULL));
+ if (file == INVALID_HANDLE_VALUE) {
+ CHROMIUM_LOG(WARNING) << "CreateFile failed for path " << filename.value() <<
+ " error code=" << GetLastError() <<
+ " error text=" << win_util::FormatLastWin32Error();
+ return -1;
+ }
+
+ DWORD written;
+ BOOL result = ::WriteFile(file, data, size, &written, NULL);
+ if (result && written == size)
+ return static_cast<int>(written);
+
+ if (!result) {
+ // WriteFile failed.
+ CHROMIUM_LOG(WARNING) << "writing file " << filename.value() <<
+ " failed, error code=" << GetLastError() <<
+ " description=" << win_util::FormatLastWin32Error();
+ } else {
+ // Didn't write all the bytes.
+ CHROMIUM_LOG(WARNING) << "wrote" << written << " bytes to " <<
+ filename.value() << " expected " << size;
+ }
+ return -1;
+}
+
+// Gets the current working directory for the process.
+bool GetCurrentDirectory(FilePath* dir) {
+ wchar_t system_buffer[MAX_PATH];
+ system_buffer[0] = 0;
+ DWORD len = ::GetCurrentDirectory(MAX_PATH, system_buffer);
+ if (len == 0 || len > MAX_PATH)
+ return false;
+ // TODO(evanm): the old behavior of this function was to always strip the
+ // trailing slash. We duplicate this here, but it shouldn't be necessary
+ // when everyone is using the appropriate FilePath APIs.
+ std::wstring dir_str(system_buffer);
+ file_util::TrimTrailingSeparator(&dir_str);
+ *dir = FilePath(dir_str);
+ return true;
+}
+
+// Sets the current working directory for the process.
+bool SetCurrentDirectory(const FilePath& directory) {
+ BOOL ret = ::SetCurrentDirectory(directory.value().c_str());
+ return ret != 0;
+}
+
+// Deprecated functions ----------------------------------------------------
+
+void InsertBeforeExtension(std::wstring* path_str,
+ const std::wstring& suffix) {
+ FilePath path(*path_str);
+ InsertBeforeExtension(&path, suffix);
+ path_str->assign(path.value());
+}
+void ReplaceExtension(std::wstring* file_name, const std::wstring& extension) {
+ FilePath path(*file_name);
+ ReplaceExtension(&path, extension);
+ file_name->assign(path.value());
+}
+} // namespace file_util
diff --git a/ipc/chromium/src/base/hash_tables.h b/ipc/chromium/src/base/hash_tables.h
new file mode 100644
index 000000000..956cadb3d
--- /dev/null
+++ b/ipc/chromium/src/base/hash_tables.h
@@ -0,0 +1,136 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+
+//
+// Deal with the differences between Microsoft and GNU implemenations
+// of hash_map. Allows all platforms to use |base::hash_map| and
+// |base::hash_set|.
+// eg:
+// base::hash_map<int> my_map;
+// base::hash_set<int> my_set;
+//
+
+#ifndef BASE_HASH_TABLES_H_
+#define BASE_HASH_TABLES_H_
+
+#include "build/build_config.h"
+
+#include "base/string16.h"
+
+#if defined(COMPILER_MSVC) || (defined(ANDROID) && defined(_STLP_STD_NAME))
+#ifdef COMPILER_MSVC
+#pragma push_macro("_SILENCE_STDEXT_HASH_DEPRECATION_WARNINGS")
+#define _SILENCE_STDEXT_HASH_DEPRECATION_WARNINGS
+#endif
+
+// Suppress -Wshadow warnings from stlport headers.
+#ifdef __GNUC__
+# pragma GCC diagnostic push
+# pragma GCC diagnostic ignored "-Wshadow"
+# if MOZ_GCC_VERSION_AT_LEAST(4, 9, 0)
+# pragma GCC diagnostic ignored "-Wshadow-local"
+# endif
+#endif
+
+#include <hash_map>
+#include <hash_set>
+
+#ifdef __GNUC__
+# if MOZ_GCC_VERSION_AT_LEAST(4, 9, 0)
+# pragma GCC diagnostic pop // -Wshadow-local
+# endif
+# pragma GCC diagnostic pop // -Wshadow
+#endif
+
+#ifdef COMPILER_MSVC
+#pragma pop_macro("_SILENCE_STDEXT_HASH_DEPRECATION_WARNINGS")
+#endif
+namespace base {
+#ifdef ANDROID
+using _STLP_STD_NAME::hash_map;
+using _STLP_STD_NAME::hash_set;
+#else
+using stdext::hash_map;
+using stdext::hash_set;
+#endif
+}
+#elif defined(COMPILER_GCC)
+// This is a hack to disable the gcc 4.4 warning about hash_map and hash_set
+// being deprecated. We can get rid of this when we upgrade to VS2008 and we
+// can use <tr1/unordered_map> and <tr1/unordered_set>.
+#ifdef __DEPRECATED
+#define CHROME_OLD__DEPRECATED __DEPRECATED
+#undef __DEPRECATED
+#endif
+
+#include <ext/hash_map>
+#include <ext/hash_set>
+#include <string>
+
+#ifdef CHROME_OLD__DEPRECATED
+#define __DEPRECATED CHROME_OLD__DEPRECATED
+#undef CHROME_OLD__DEPRECATED
+#endif
+
+namespace base {
+using __gnu_cxx::hash_map;
+using __gnu_cxx::hash_set;
+} // namespace base
+
+namespace __gnu_cxx {
+
+// The GNU C++ library provides identiy hash functions for many integral types,
+// but not for |long long|. This hash function will truncate if |size_t| is
+// narrower than |long long|. This is probably good enough for what we will
+// use it for.
+
+#define DEFINE_TRIVIAL_HASH(integral_type) \
+ template<> \
+ struct hash<integral_type> { \
+ std::size_t operator()(integral_type value) const { \
+ return static_cast<std::size_t>(value); \
+ } \
+ }
+
+DEFINE_TRIVIAL_HASH(long long);
+DEFINE_TRIVIAL_HASH(unsigned long long);
+
+#undef DEFINE_TRIVIAL_HASH
+
+// Implement string hash functions so that strings of various flavors can
+// be used as keys in STL maps and sets. The hash algorithm comes from the
+// GNU C++ library, in <tr1/functional>. It is duplicated here because GCC
+// versions prior to 4.3.2 are unable to compile <tr1/functional> when RTTI
+// is disabled, as it is in our build.
+
+#define DEFINE_STRING_HASH(string_type) \
+ template<> \
+ struct hash<string_type> { \
+ std::size_t operator()(const string_type& s) const { \
+ std::size_t result = 0; \
+ for (string_type::const_iterator i = s.begin(); i != s.end(); ++i) \
+ result = (result * 131) + *i; \
+ return result; \
+ } \
+ }
+
+DEFINE_STRING_HASH(std::string);
+DEFINE_STRING_HASH(std::wstring);
+
+#if defined(WCHAR_T_IS_UTF32)
+// If string16 and std::wstring are not the same type, provide a
+// specialization for string16.
+DEFINE_STRING_HASH(string16);
+#endif // WCHAR_T_IS_UTF32
+
+#undef DEFINE_STRING_HASH
+
+} // namespace __gnu_cxx
+
+#endif // COMPILER
+
+#endif // BASE_HASH_TABLES_H_
diff --git a/ipc/chromium/src/base/histogram.cc b/ipc/chromium/src/base/histogram.cc
new file mode 100644
index 000000000..49561bf60
--- /dev/null
+++ b/ipc/chromium/src/base/histogram.cc
@@ -0,0 +1,1176 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Histogram is an object that aggregates statistics, and can summarize them in
+// various forms, including ASCII graphical, HTML, and numerically (as a
+// vector of numbers corresponding to each of the aggregating buckets).
+// See header file for details and examples.
+
+#include "base/histogram.h"
+
+#include <math.h>
+
+#include <algorithm>
+#include <string>
+
+#include "base/logging.h"
+#include "base/pickle.h"
+#include "base/string_util.h"
+#include "base/logging.h"
+
+namespace base {
+
+#define DVLOG(x) CHROMIUM_LOG(ERROR)
+#define CHECK_GT DCHECK_GT
+#define CHECK_LT DCHECK_LT
+typedef ::Lock Lock;
+typedef ::AutoLock AutoLock;
+
+// Static table of checksums for all possible 8 bit bytes.
+const uint32_t Histogram::kCrcTable[256] = {0x0, 0x77073096L, 0xee0e612cL,
+0x990951baL, 0x76dc419L, 0x706af48fL, 0xe963a535L, 0x9e6495a3L, 0xedb8832L,
+0x79dcb8a4L, 0xe0d5e91eL, 0x97d2d988L, 0x9b64c2bL, 0x7eb17cbdL, 0xe7b82d07L,
+0x90bf1d91L, 0x1db71064L, 0x6ab020f2L, 0xf3b97148L, 0x84be41deL, 0x1adad47dL,
+0x6ddde4ebL, 0xf4d4b551L, 0x83d385c7L, 0x136c9856L, 0x646ba8c0L, 0xfd62f97aL,
+0x8a65c9ecL, 0x14015c4fL, 0x63066cd9L, 0xfa0f3d63L, 0x8d080df5L, 0x3b6e20c8L,
+0x4c69105eL, 0xd56041e4L, 0xa2677172L, 0x3c03e4d1L, 0x4b04d447L, 0xd20d85fdL,
+0xa50ab56bL, 0x35b5a8faL, 0x42b2986cL, 0xdbbbc9d6L, 0xacbcf940L, 0x32d86ce3L,
+0x45df5c75L, 0xdcd60dcfL, 0xabd13d59L, 0x26d930acL, 0x51de003aL, 0xc8d75180L,
+0xbfd06116L, 0x21b4f4b5L, 0x56b3c423L, 0xcfba9599L, 0xb8bda50fL, 0x2802b89eL,
+0x5f058808L, 0xc60cd9b2L, 0xb10be924L, 0x2f6f7c87L, 0x58684c11L, 0xc1611dabL,
+0xb6662d3dL, 0x76dc4190L, 0x1db7106L, 0x98d220bcL, 0xefd5102aL, 0x71b18589L,
+0x6b6b51fL, 0x9fbfe4a5L, 0xe8b8d433L, 0x7807c9a2L, 0xf00f934L, 0x9609a88eL,
+0xe10e9818L, 0x7f6a0dbbL, 0x86d3d2dL, 0x91646c97L, 0xe6635c01L, 0x6b6b51f4L,
+0x1c6c6162L, 0x856530d8L, 0xf262004eL, 0x6c0695edL, 0x1b01a57bL, 0x8208f4c1L,
+0xf50fc457L, 0x65b0d9c6L, 0x12b7e950L, 0x8bbeb8eaL, 0xfcb9887cL, 0x62dd1ddfL,
+0x15da2d49L, 0x8cd37cf3L, 0xfbd44c65L, 0x4db26158L, 0x3ab551ceL, 0xa3bc0074L,
+0xd4bb30e2L, 0x4adfa541L, 0x3dd895d7L, 0xa4d1c46dL, 0xd3d6f4fbL, 0x4369e96aL,
+0x346ed9fcL, 0xad678846L, 0xda60b8d0L, 0x44042d73L, 0x33031de5L, 0xaa0a4c5fL,
+0xdd0d7cc9L, 0x5005713cL, 0x270241aaL, 0xbe0b1010L, 0xc90c2086L, 0x5768b525L,
+0x206f85b3L, 0xb966d409L, 0xce61e49fL, 0x5edef90eL, 0x29d9c998L, 0xb0d09822L,
+0xc7d7a8b4L, 0x59b33d17L, 0x2eb40d81L, 0xb7bd5c3bL, 0xc0ba6cadL, 0xedb88320L,
+0x9abfb3b6L, 0x3b6e20cL, 0x74b1d29aL, 0xead54739L, 0x9dd277afL, 0x4db2615L,
+0x73dc1683L, 0xe3630b12L, 0x94643b84L, 0xd6d6a3eL, 0x7a6a5aa8L, 0xe40ecf0bL,
+0x9309ff9dL, 0xa00ae27L, 0x7d079eb1L, 0xf00f9344L, 0x8708a3d2L, 0x1e01f268L,
+0x6906c2feL, 0xf762575dL, 0x806567cbL, 0x196c3671L, 0x6e6b06e7L, 0xfed41b76L,
+0x89d32be0L, 0x10da7a5aL, 0x67dd4accL, 0xf9b9df6fL, 0x8ebeeff9L, 0x17b7be43L,
+0x60b08ed5L, 0xd6d6a3e8L, 0xa1d1937eL, 0x38d8c2c4L, 0x4fdff252L, 0xd1bb67f1L,
+0xa6bc5767L, 0x3fb506ddL, 0x48b2364bL, 0xd80d2bdaL, 0xaf0a1b4cL, 0x36034af6L,
+0x41047a60L, 0xdf60efc3L, 0xa867df55L, 0x316e8eefL, 0x4669be79L, 0xcb61b38cL,
+0xbc66831aL, 0x256fd2a0L, 0x5268e236L, 0xcc0c7795L, 0xbb0b4703L, 0x220216b9L,
+0x5505262fL, 0xc5ba3bbeL, 0xb2bd0b28L, 0x2bb45a92L, 0x5cb36a04L, 0xc2d7ffa7L,
+0xb5d0cf31L, 0x2cd99e8bL, 0x5bdeae1dL, 0x9b64c2b0L, 0xec63f226L, 0x756aa39cL,
+0x26d930aL, 0x9c0906a9L, 0xeb0e363fL, 0x72076785L, 0x5005713L, 0x95bf4a82L,
+0xe2b87a14L, 0x7bb12baeL, 0xcb61b38L, 0x92d28e9bL, 0xe5d5be0dL, 0x7cdcefb7L,
+0xbdbdf21L, 0x86d3d2d4L, 0xf1d4e242L, 0x68ddb3f8L, 0x1fda836eL, 0x81be16cdL,
+0xf6b9265bL, 0x6fb077e1L, 0x18b74777L, 0x88085ae6L, 0xff0f6a70L, 0x66063bcaL,
+0x11010b5cL, 0x8f659effL, 0xf862ae69L, 0x616bffd3L, 0x166ccf45L, 0xa00ae278L,
+0xd70dd2eeL, 0x4e048354L, 0x3903b3c2L, 0xa7672661L, 0xd06016f7L, 0x4969474dL,
+0x3e6e77dbL, 0xaed16a4aL, 0xd9d65adcL, 0x40df0b66L, 0x37d83bf0L, 0xa9bcae53L,
+0xdebb9ec5L, 0x47b2cf7fL, 0x30b5ffe9L, 0xbdbdf21cL, 0xcabac28aL, 0x53b39330L,
+0x24b4a3a6L, 0xbad03605L, 0xcdd70693L, 0x54de5729L, 0x23d967bfL, 0xb3667a2eL,
+0xc4614ab8L, 0x5d681b02L, 0x2a6f2b94L, 0xb40bbe37L, 0xc30c8ea1L, 0x5a05df1bL,
+0x2d02ef8dL,
+};
+
+typedef Histogram::Count Count;
+
+// static
+const size_t Histogram::kBucketCount_MAX = 16384u;
+
+Histogram* Histogram::FactoryGet(const std::string& name,
+ Sample minimum,
+ Sample maximum,
+ size_t bucket_count,
+ Flags flags) {
+ Histogram* histogram(NULL);
+
+ // Defensive code.
+ if (minimum < 1)
+ minimum = 1;
+ if (maximum > kSampleType_MAX - 1)
+ maximum = kSampleType_MAX - 1;
+
+ if (!StatisticsRecorder::FindHistogram(name, &histogram)) {
+ // Extra variable is not needed... but this keeps this section basically
+ // identical to other derived classes in this file (and compiler will
+ // optimize away the extra variable.
+ Histogram* tentative_histogram =
+ new Histogram(name, minimum, maximum, bucket_count);
+ tentative_histogram->InitializeBucketRange();
+ tentative_histogram->SetFlags(flags);
+ histogram =
+ StatisticsRecorder::RegisterOrDeleteDuplicate(tentative_histogram);
+ }
+
+ DCHECK_EQ(HISTOGRAM, histogram->histogram_type());
+ DCHECK(histogram->HasConstructorArguments(minimum, maximum, bucket_count));
+ return histogram;
+}
+
+Histogram* Histogram::FactoryTimeGet(const std::string& name,
+ TimeDelta minimum,
+ TimeDelta maximum,
+ size_t bucket_count,
+ Flags flags) {
+ return FactoryGet(name, minimum.InMilliseconds(), maximum.InMilliseconds(),
+ bucket_count, flags);
+}
+
+void Histogram::Add(int value) {
+ if (value > kSampleType_MAX - 1)
+ value = kSampleType_MAX - 1;
+ if (value < 0)
+ value = 0;
+ size_t index = BucketIndex(value);
+ DCHECK_GE(value, ranges(index));
+ DCHECK_LT(value, ranges(index + 1));
+ Accumulate(value, 1, index);
+}
+
+void Histogram::Subtract(int value) {
+ if (value > kSampleType_MAX - 1)
+ value = kSampleType_MAX - 1;
+ if (value < 0)
+ value = 0;
+ size_t index = BucketIndex(value);
+ DCHECK_GE(value, ranges(index));
+ DCHECK_LT(value, ranges(index + 1));
+ Accumulate(value, -1, index);
+}
+
+void Histogram::AddBoolean(bool value) {
+ DCHECK(false);
+}
+
+void Histogram::AddSampleSet(const SampleSet& sample) {
+ sample_.Add(sample);
+}
+
+void Histogram::Clear() {
+ SampleSet ss;
+ ss.Resize(*this);
+ sample_ = ss;
+}
+
+void Histogram::SetRangeDescriptions(const DescriptionPair descriptions[]) {
+ DCHECK(false);
+}
+
+// The following methods provide a graphical histogram display.
+void Histogram::WriteHTMLGraph(std::string* output) const {
+ // TBD(jar) Write a nice HTML bar chart, with divs an mouse-overs etc.
+ output->append("<PRE>");
+ WriteAscii(true, "<br>", output);
+ output->append("</PRE>");
+}
+
+void Histogram::WriteAscii(bool graph_it, const std::string& newline,
+ std::string* output) const {
+ // Get local (stack) copies of all effectively volatile class data so that we
+ // are consistent across our output activities.
+ SampleSet snapshot;
+ SnapshotSample(&snapshot);
+
+ Count sample_count = snapshot.TotalCount();
+
+ WriteAsciiHeader(snapshot, sample_count, output);
+ output->append(newline);
+
+ // Prepare to normalize graphical rendering of bucket contents.
+ double max_size = 0;
+ if (graph_it)
+ max_size = GetPeakBucketSize(snapshot);
+
+ // Calculate space needed to print bucket range numbers. Leave room to print
+ // nearly the largest bucket range without sliding over the histogram.
+ size_t largest_non_empty_bucket = bucket_count() - 1;
+ while (0 == snapshot.counts(largest_non_empty_bucket)) {
+ if (0 == largest_non_empty_bucket)
+ break; // All buckets are empty.
+ --largest_non_empty_bucket;
+ }
+
+ // Calculate largest print width needed for any of our bucket range displays.
+ size_t print_width = 1;
+ for (size_t i = 0; i < bucket_count(); ++i) {
+ if (snapshot.counts(i)) {
+ size_t width = GetAsciiBucketRange(i).size() + 1;
+ if (width > print_width)
+ print_width = width;
+ }
+ }
+
+ int64_t remaining = sample_count;
+ int64_t past = 0;
+ // Output the actual histogram graph.
+ for (size_t i = 0; i < bucket_count(); ++i) {
+ Count current = snapshot.counts(i);
+ if (!current && !PrintEmptyBucket(i))
+ continue;
+ remaining -= current;
+ std::string range = GetAsciiBucketRange(i);
+ output->append(range);
+ for (size_t j = 0; range.size() + j < print_width + 1; ++j)
+ output->push_back(' ');
+ if (0 == current &&
+ i < bucket_count() - 1 && 0 == snapshot.counts(i + 1)) {
+ while (i < bucket_count() - 1 && 0 == snapshot.counts(i + 1))
+ ++i;
+ output->append("... ");
+ output->append(newline);
+ continue; // No reason to plot emptiness.
+ }
+ double current_size = GetBucketSize(current, i);
+ if (graph_it)
+ WriteAsciiBucketGraph(current_size, max_size, output);
+ WriteAsciiBucketContext(past, current, remaining, i, output);
+ output->append(newline);
+ past += current;
+ }
+ DCHECK_EQ(sample_count, past);
+}
+
+//------------------------------------------------------------------------------
+// Methods for the validating a sample and a related histogram.
+//------------------------------------------------------------------------------
+
+Histogram::Inconsistencies
+Histogram::FindCorruption(const SampleSet& snapshot) const
+{
+ int inconsistencies = NO_INCONSISTENCIES;
+ Sample previous_range = -1; // Bottom range is always 0.
+ int64_t count = 0;
+ for (size_t index = 0; index < bucket_count(); ++index) {
+ count += snapshot.counts(index);
+ int new_range = ranges(index);
+ if (previous_range >= new_range)
+ inconsistencies |= BUCKET_ORDER_ERROR;
+ previous_range = new_range;
+ }
+
+ if (!HasValidRangeChecksum())
+ inconsistencies |= RANGE_CHECKSUM_ERROR;
+
+ int64_t delta64 = snapshot.redundant_count() - count;
+ if (delta64 != 0) {
+ int delta = static_cast<int>(delta64);
+ if (delta != delta64)
+ delta = INT_MAX; // Flag all giant errors as INT_MAX.
+ // Since snapshots of histograms are taken asynchronously relative to
+ // sampling (and snapped from different threads), it is pretty likely that
+ // we'll catch a redundant count that doesn't match the sample count. We
+ // allow for a certain amount of slop before flagging this as an
+ // inconsistency. Even with an inconsistency, we'll snapshot it again (for
+ // UMA in about a half hour, so we'll eventually get the data, if it was
+ // not the result of a corruption. If histograms show that 1 is "too tight"
+ // then we may try to use 2 or 3 for this slop value.
+ const int kCommonRaceBasedCountMismatch = 1;
+ if (delta > 0) {
+ UMA_HISTOGRAM_COUNTS("Histogram.InconsistentCountHigh", delta);
+ if (delta > kCommonRaceBasedCountMismatch)
+ inconsistencies |= COUNT_HIGH_ERROR;
+ } else {
+ DCHECK_GT(0, delta);
+ UMA_HISTOGRAM_COUNTS("Histogram.InconsistentCountLow", -delta);
+ if (-delta > kCommonRaceBasedCountMismatch)
+ inconsistencies |= COUNT_LOW_ERROR;
+ }
+ }
+ return static_cast<Inconsistencies>(inconsistencies);
+}
+
+Histogram::ClassType Histogram::histogram_type() const {
+ return HISTOGRAM;
+}
+
+Histogram::Sample Histogram::ranges(size_t i) const {
+ return ranges_[i];
+}
+
+size_t Histogram::bucket_count() const {
+ return bucket_count_;
+}
+
+void Histogram::SnapshotSample(SampleSet* sample) const {
+ *sample = sample_;
+}
+
+bool Histogram::HasConstructorArguments(Sample minimum,
+ Sample maximum,
+ size_t bucket_count) {
+ return ((minimum == declared_min_) && (maximum == declared_max_) &&
+ (bucket_count == bucket_count_));
+}
+
+bool Histogram::HasConstructorTimeDeltaArguments(TimeDelta minimum,
+ TimeDelta maximum,
+ size_t bucket_count) {
+ return ((minimum.InMilliseconds() == declared_min_) &&
+ (maximum.InMilliseconds() == declared_max_) &&
+ (bucket_count == bucket_count_));
+}
+
+bool Histogram::HasValidRangeChecksum() const {
+ return CalculateRangeChecksum() == range_checksum_;
+}
+
+size_t Histogram::SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf)
+{
+ size_t n = 0;
+ n += aMallocSizeOf(this);
+ // We're not allowed to do deep dives into STL data structures. This
+ // is as close as we can get to measuring this array.
+ n += aMallocSizeOf(&ranges_[0]);
+ n += sample_.SizeOfExcludingThis(aMallocSizeOf);
+ return n;
+}
+
+size_t
+Histogram::SampleSet::SizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf)
+{
+ // We're not allowed to do deep dives into STL data structures. This
+ // is as close as we can get to measuring this array.
+ return aMallocSizeOf(&counts_[0]);
+}
+
+Histogram::Histogram(const std::string& name, Sample minimum,
+ Sample maximum, size_t bucket_count)
+ : sample_(),
+ histogram_name_(name),
+ declared_min_(minimum),
+ declared_max_(maximum),
+ bucket_count_(bucket_count),
+ flags_(kNoFlags),
+ ranges_(bucket_count + 1, 0),
+ range_checksum_(0),
+ recording_enabled_(true) {
+ Initialize();
+}
+
+Histogram::Histogram(const std::string& name, TimeDelta minimum,
+ TimeDelta maximum, size_t bucket_count)
+ : sample_(),
+ histogram_name_(name),
+ declared_min_(static_cast<int> (minimum.InMilliseconds())),
+ declared_max_(static_cast<int> (maximum.InMilliseconds())),
+ bucket_count_(bucket_count),
+ flags_(kNoFlags),
+ ranges_(bucket_count + 1, 0),
+ range_checksum_(0),
+ recording_enabled_(true) {
+ Initialize();
+}
+
+Histogram::~Histogram() {
+ if (StatisticsRecorder::dump_on_exit()) {
+ std::string output;
+ WriteAscii(true, "\n", &output);
+ CHROMIUM_LOG(INFO) << output;
+ }
+
+ // Just to make sure most derived class did this properly...
+ DCHECK(ValidateBucketRanges());
+}
+
+// Calculate what range of values are held in each bucket.
+// We have to be careful that we don't pick a ratio between starting points in
+// consecutive buckets that is sooo small, that the integer bounds are the same
+// (effectively making one bucket get no values). We need to avoid:
+// ranges_[i] == ranges_[i + 1]
+// To avoid that, we just do a fine-grained bucket width as far as we need to
+// until we get a ratio that moves us along at least 2 units at a time. From
+// that bucket onward we do use the exponential growth of buckets.
+void Histogram::InitializeBucketRange() {
+ double log_max = log(static_cast<double>(declared_max()));
+ double log_ratio;
+ double log_next;
+ size_t bucket_index = 1;
+ Sample current = declared_min();
+ SetBucketRange(bucket_index, current);
+ while (bucket_count() > ++bucket_index) {
+ double log_current;
+ log_current = log(static_cast<double>(current));
+ // Calculate the count'th root of the range.
+ log_ratio = (log_max - log_current) / (bucket_count() - bucket_index);
+ // See where the next bucket would start.
+ log_next = log_current + log_ratio;
+ int next;
+ next = static_cast<int>(floor(exp(log_next) + 0.5));
+ if (next > current)
+ current = next;
+ else
+ ++current; // Just do a narrow bucket, and keep trying.
+ SetBucketRange(bucket_index, current);
+ }
+ ResetRangeChecksum();
+
+ DCHECK_EQ(bucket_count(), bucket_index);
+}
+
+bool Histogram::PrintEmptyBucket(size_t index) const {
+ return true;
+}
+
+size_t Histogram::BucketIndex(Sample value) const {
+ // Use simple binary search. This is very general, but there are better
+ // approaches if we knew that the buckets were linearly distributed.
+ DCHECK_LE(ranges(0), value);
+ DCHECK_GT(ranges(bucket_count()), value);
+ size_t under = 0;
+ size_t over = bucket_count();
+ size_t mid;
+
+ do {
+ DCHECK_GE(over, under);
+ mid = under + (over - under)/2;
+ if (mid == under)
+ break;
+ if (ranges(mid) <= value)
+ under = mid;
+ else
+ over = mid;
+ } while (true);
+
+ DCHECK_LE(ranges(mid), value);
+ CHECK_GT(ranges(mid+1), value);
+ return mid;
+}
+
+// Use the actual bucket widths (like a linear histogram) until the widths get
+// over some transition value, and then use that transition width. Exponentials
+// get so big so fast (and we don't expect to see a lot of entries in the large
+// buckets), so we need this to make it possible to see what is going on and
+// not have 0-graphical-height buckets.
+double Histogram::GetBucketSize(Count current, size_t i) const {
+ DCHECK_GT(ranges(i + 1), ranges(i));
+ static const double kTransitionWidth = 5;
+ double denominator = ranges(i + 1) - ranges(i);
+ if (denominator > kTransitionWidth)
+ denominator = kTransitionWidth; // Stop trying to normalize.
+ return current/denominator;
+}
+
+void Histogram::ResetRangeChecksum() {
+ range_checksum_ = CalculateRangeChecksum();
+}
+
+const std::string Histogram::GetAsciiBucketRange(size_t i) const {
+ std::string result;
+ if (kHexRangePrintingFlag & flags_)
+ StringAppendF(&result, "%#x", ranges(i));
+ else
+ StringAppendF(&result, "%d", ranges(i));
+ return result;
+}
+
+// Update histogram data with new sample.
+void Histogram::Accumulate(Sample value, Count count, size_t index) {
+ sample_.Accumulate(value, count, index);
+}
+
+void Histogram::SetBucketRange(size_t i, Sample value) {
+ DCHECK_GT(bucket_count_, i);
+ ranges_[i] = value;
+}
+
+bool Histogram::ValidateBucketRanges() const {
+ // Standard assertions that all bucket ranges should satisfy.
+ DCHECK_EQ(bucket_count_ + 1, ranges_.size());
+ DCHECK_EQ(0, ranges_[0]);
+ DCHECK_EQ(declared_min(), ranges_[1]);
+ DCHECK_EQ(declared_max(), ranges_[bucket_count_ - 1]);
+ DCHECK_EQ(kSampleType_MAX, ranges_[bucket_count_]);
+ return true;
+}
+
+uint32_t Histogram::CalculateRangeChecksum() const {
+ DCHECK_EQ(ranges_.size(), bucket_count() + 1);
+ uint32_t checksum = static_cast<uint32_t>(ranges_.size()); // Seed checksum.
+ for (size_t index = 0; index < bucket_count(); ++index)
+ checksum = Crc32(checksum, ranges(index));
+ return checksum;
+}
+
+void Histogram::Initialize() {
+ sample_.Resize(*this);
+ if (declared_min_ < 1)
+ declared_min_ = 1;
+ if (declared_max_ > kSampleType_MAX - 1)
+ declared_max_ = kSampleType_MAX - 1;
+ DCHECK_LE(declared_min_, declared_max_);
+ DCHECK_GT(bucket_count_, 1u);
+ CHECK_LT(bucket_count_, kBucketCount_MAX);
+ size_t maximal_bucket_count = declared_max_ - declared_min_ + 2;
+ DCHECK_LE(bucket_count_, maximal_bucket_count);
+ DCHECK_EQ(0, ranges_[0]);
+ ranges_[bucket_count_] = kSampleType_MAX;
+}
+
+// We generate the CRC-32 using the low order bits to select whether to XOR in
+// the reversed polynomial 0xedb88320L. This is nice and simple, and allows us
+// to keep the quotient in a uint32_t. Since we're not concerned about the nature
+// of corruptions (i.e., we don't care about bit sequencing, since we are
+// handling memory changes, which are more grotesque) so we don't bother to
+// get the CRC correct for big-endian vs little-ending calculations. All we
+// need is a nice hash, that tends to depend on all the bits of the sample, with
+// very little chance of changes in one place impacting changes in another
+// place.
+uint32_t Histogram::Crc32(uint32_t sum, Histogram::Sample range) {
+ const bool kUseRealCrc = true; // TODO(jar): Switch to false and watch stats.
+ if (kUseRealCrc) {
+ union {
+ Histogram::Sample range;
+ unsigned char bytes[sizeof(Histogram::Sample)];
+ } converter;
+ converter.range = range;
+ for (size_t i = 0; i < sizeof(converter); ++i)
+ sum = kCrcTable[(sum & 0xff) ^ converter.bytes[i]] ^ (sum >> 8);
+ } else {
+ // Use hash techniques provided in ReallyFastHash, except we don't care
+ // about "avalanching" (which would worsten the hash, and add collisions),
+ // and we don't care about edge cases since we have an even number of bytes.
+ union {
+ Histogram::Sample range;
+ uint16_t ints[sizeof(Histogram::Sample) / 2];
+ } converter;
+ DCHECK_EQ(sizeof(Histogram::Sample), sizeof(converter));
+ converter.range = range;
+ sum += converter.ints[0];
+ sum = (sum << 16) ^ sum ^ (static_cast<uint32_t>(converter.ints[1]) << 11);
+ sum += sum >> 11;
+ }
+ return sum;
+}
+
+//------------------------------------------------------------------------------
+// Private methods
+
+double Histogram::GetPeakBucketSize(const SampleSet& snapshot) const {
+ double max = 0;
+ for (size_t i = 0; i < bucket_count() ; ++i) {
+ double current_size
+ = GetBucketSize(snapshot.counts(i), i);
+ if (current_size > max)
+ max = current_size;
+ }
+ return max;
+}
+
+void Histogram::WriteAsciiHeader(const SampleSet& snapshot,
+ Count sample_count,
+ std::string* output) const {
+ StringAppendF(output,
+ "Histogram: %s recorded %d samples",
+ histogram_name().c_str(),
+ sample_count);
+ int64_t snapshot_sum = snapshot.sum();
+ if (0 == sample_count) {
+ DCHECK_EQ(snapshot_sum, 0);
+ } else {
+ double average = static_cast<float>(snapshot_sum) / sample_count;
+
+ StringAppendF(output, ", average = %.1f", average);
+ }
+ if (flags_ & ~kHexRangePrintingFlag)
+ StringAppendF(output, " (flags = 0x%x)", flags_ & ~kHexRangePrintingFlag);
+}
+
+void Histogram::WriteAsciiBucketContext(const int64_t past,
+ const Count current,
+ const int64_t remaining,
+ const size_t i,
+ std::string* output) const {
+ double scaled_sum = (past + current + remaining) / 100.0;
+ WriteAsciiBucketValue(current, scaled_sum, output);
+ if (0 < i) {
+ double percentage = past / scaled_sum;
+ StringAppendF(output, " {%3.1f%%}", percentage);
+ }
+}
+
+void Histogram::WriteAsciiBucketValue(Count current, double scaled_sum,
+ std::string* output) const {
+ StringAppendF(output, " (%d = %3.1f%%)", current, current/scaled_sum);
+}
+
+void Histogram::WriteAsciiBucketGraph(double current_size, double max_size,
+ std::string* output) const {
+ const int k_line_length = 72; // Maximal horizontal width of graph.
+ int x_count = static_cast<int>(k_line_length * (current_size / max_size)
+ + 0.5);
+ int x_remainder = k_line_length - x_count;
+
+ while (0 < x_count--)
+ output->append("-");
+ output->append("O");
+ while (0 < x_remainder--)
+ output->append(" ");
+}
+
+//------------------------------------------------------------------------------
+// Methods for the Histogram::SampleSet class
+//------------------------------------------------------------------------------
+
+Histogram::SampleSet::SampleSet()
+ : counts_(),
+ sum_(0),
+ redundant_count_(0) {
+}
+
+Histogram::SampleSet::~SampleSet() {
+}
+
+void Histogram::SampleSet::Resize(const Histogram& histogram) {
+ counts_.resize(histogram.bucket_count(), 0);
+}
+
+void Histogram::SampleSet::Accumulate(Sample value, Count count,
+ size_t index) {
+ DCHECK(count == 1 || count == -1);
+ counts_[index] += count;
+ redundant_count_ += count;
+ sum_ += static_cast<int64_t>(count) * value;
+ DCHECK_GE(counts_[index], 0);
+ DCHECK_GE(sum_, 0);
+ DCHECK_GE(redundant_count_, 0);
+}
+
+Count Histogram::SampleSet::TotalCount() const {
+ Count total = 0;
+ for (Counts::const_iterator it = counts_.begin();
+ it != counts_.end();
+ ++it) {
+ total += *it;
+ }
+ return total;
+}
+
+void Histogram::SampleSet::Add(const SampleSet& other) {
+ DCHECK_EQ(counts_.size(), other.counts_.size());
+ sum_ += other.sum_;
+ redundant_count_ += other.redundant_count_;
+ for (size_t index = 0; index < counts_.size(); ++index)
+ counts_[index] += other.counts_[index];
+}
+
+//------------------------------------------------------------------------------
+// LinearHistogram: This histogram uses a traditional set of evenly spaced
+// buckets.
+//------------------------------------------------------------------------------
+
+LinearHistogram::~LinearHistogram() {
+}
+
+Histogram* LinearHistogram::FactoryGet(const std::string& name,
+ Sample minimum,
+ Sample maximum,
+ size_t bucket_count,
+ Flags flags) {
+ Histogram* histogram(NULL);
+
+ if (minimum < 1)
+ minimum = 1;
+ if (maximum > kSampleType_MAX - 1)
+ maximum = kSampleType_MAX - 1;
+
+ if (!StatisticsRecorder::FindHistogram(name, &histogram)) {
+ LinearHistogram* tentative_histogram =
+ new LinearHistogram(name, minimum, maximum, bucket_count);
+ tentative_histogram->InitializeBucketRange();
+ tentative_histogram->SetFlags(flags);
+ histogram =
+ StatisticsRecorder::RegisterOrDeleteDuplicate(tentative_histogram);
+ }
+
+ DCHECK_EQ(LINEAR_HISTOGRAM, histogram->histogram_type());
+ DCHECK(histogram->HasConstructorArguments(minimum, maximum, bucket_count));
+ return histogram;
+}
+
+Histogram* LinearHistogram::FactoryTimeGet(const std::string& name,
+ TimeDelta minimum,
+ TimeDelta maximum,
+ size_t bucket_count,
+ Flags flags) {
+ return FactoryGet(name, minimum.InMilliseconds(), maximum.InMilliseconds(),
+ bucket_count, flags);
+}
+
+Histogram::ClassType LinearHistogram::histogram_type() const {
+ return LINEAR_HISTOGRAM;
+}
+
+void LinearHistogram::Accumulate(Sample value, Count count, size_t index) {
+ sample_.Accumulate(value, count, index);
+}
+
+void LinearHistogram::SetRangeDescriptions(
+ const DescriptionPair descriptions[]) {
+ for (int i =0; descriptions[i].description; ++i) {
+ bucket_description_[descriptions[i].sample] = descriptions[i].description;
+ }
+}
+
+LinearHistogram::LinearHistogram(const std::string& name,
+ Sample minimum,
+ Sample maximum,
+ size_t bucket_count)
+ : Histogram(name, minimum >= 1 ? minimum : 1, maximum, bucket_count) {
+}
+
+LinearHistogram::LinearHistogram(const std::string& name,
+ TimeDelta minimum,
+ TimeDelta maximum,
+ size_t bucket_count)
+ : Histogram(name, minimum >= TimeDelta::FromMilliseconds(1) ?
+ minimum : TimeDelta::FromMilliseconds(1),
+ maximum, bucket_count) {
+}
+
+void LinearHistogram::InitializeBucketRange() {
+ DCHECK_GT(declared_min(), 0); // 0 is the underflow bucket here.
+ double min = declared_min();
+ double max = declared_max();
+ size_t i;
+ for (i = 1; i < bucket_count(); ++i) {
+ double linear_range = (min * (bucket_count() -1 - i) + max * (i - 1)) /
+ (bucket_count() - 2);
+ SetBucketRange(i, static_cast<int> (linear_range + 0.5));
+ }
+ ResetRangeChecksum();
+}
+
+double LinearHistogram::GetBucketSize(Count current, size_t i) const {
+ DCHECK_GT(ranges(i + 1), ranges(i));
+ // Adjacent buckets with different widths would have "surprisingly" many (few)
+ // samples in a histogram if we didn't normalize this way.
+ double denominator = ranges(i + 1) - ranges(i);
+ return current/denominator;
+}
+
+const std::string LinearHistogram::GetAsciiBucketRange(size_t i) const {
+ int range = ranges(i);
+ BucketDescriptionMap::const_iterator it = bucket_description_.find(range);
+ if (it == bucket_description_.end())
+ return Histogram::GetAsciiBucketRange(i);
+ return it->second;
+}
+
+bool LinearHistogram::PrintEmptyBucket(size_t index) const {
+ return bucket_description_.find(ranges(index)) == bucket_description_.end();
+}
+
+
+//------------------------------------------------------------------------------
+// This section provides implementation for BooleanHistogram.
+//------------------------------------------------------------------------------
+
+Histogram* BooleanHistogram::FactoryGet(const std::string& name, Flags flags) {
+ Histogram* histogram(NULL);
+
+ if (!StatisticsRecorder::FindHistogram(name, &histogram)) {
+ BooleanHistogram* tentative_histogram = new BooleanHistogram(name);
+ tentative_histogram->InitializeBucketRange();
+ tentative_histogram->SetFlags(flags);
+ histogram =
+ StatisticsRecorder::RegisterOrDeleteDuplicate(tentative_histogram);
+ }
+
+ DCHECK_EQ(BOOLEAN_HISTOGRAM, histogram->histogram_type());
+ return histogram;
+}
+
+Histogram::ClassType BooleanHistogram::histogram_type() const {
+ return BOOLEAN_HISTOGRAM;
+}
+
+void BooleanHistogram::AddBoolean(bool value) {
+ Add(value ? 1 : 0);
+}
+
+BooleanHistogram::BooleanHistogram(const std::string& name)
+ : LinearHistogram(name, 1, 2, 3) {
+}
+
+void
+BooleanHistogram::Accumulate(Sample value, Count count, size_t index)
+{
+ // Callers will have computed index based on the non-booleanified value.
+ // So we need to adjust the index manually.
+ LinearHistogram::Accumulate(!!value, count, value ? 1 : 0);
+}
+
+//------------------------------------------------------------------------------
+// FlagHistogram:
+//------------------------------------------------------------------------------
+
+Histogram *
+FlagHistogram::FactoryGet(const std::string &name, Flags flags)
+{
+ Histogram *h(nullptr);
+
+ if (!StatisticsRecorder::FindHistogram(name, &h)) {
+ FlagHistogram *fh = new FlagHistogram(name);
+ fh->InitializeBucketRange();
+ fh->SetFlags(flags);
+ size_t zero_index = fh->BucketIndex(0);
+ fh->LinearHistogram::Accumulate(0, 1, zero_index);
+ h = StatisticsRecorder::RegisterOrDeleteDuplicate(fh);
+ }
+
+ return h;
+}
+
+FlagHistogram::FlagHistogram(const std::string &name)
+ : BooleanHistogram(name), mSwitched(false) {
+}
+
+Histogram::ClassType
+FlagHistogram::histogram_type() const
+{
+ return FLAG_HISTOGRAM;
+}
+
+void
+FlagHistogram::Accumulate(Sample value, Count count, size_t index)
+{
+ if (mSwitched) {
+ return;
+ }
+
+ mSwitched = true;
+ DCHECK_EQ(value, 1);
+ LinearHistogram::Accumulate(value, 1, index);
+ size_t zero_index = BucketIndex(0);
+ LinearHistogram::Accumulate(0, -1, zero_index);
+}
+
+void
+FlagHistogram::AddSampleSet(const SampleSet& sample) {
+ DCHECK_EQ(bucket_count(), sample.size());
+ // We can't be sure the SampleSet provided came from another FlagHistogram,
+ // so we take the following steps:
+ // - If our flag has already been set do nothing.
+ // - Set our flag if the following hold:
+ // - The sum of the counts in the provided SampleSet is 1.
+ // - The bucket index for that single value is the same as the index where we
+ // would place our set flag.
+ // - Otherwise, take no action.
+
+ if (mSwitched) {
+ return;
+ }
+
+ if (sample.sum() != 1) {
+ return;
+ }
+
+ size_t one_index = BucketIndex(1);
+ if (sample.counts(one_index) == 1) {
+ Accumulate(1, 1, one_index);
+ }
+}
+
+void
+FlagHistogram::Clear() {
+ Histogram::Clear();
+
+ mSwitched = false;
+ size_t zero_index = BucketIndex(0);
+ LinearHistogram::Accumulate(0, 1, zero_index);
+}
+
+//------------------------------------------------------------------------------
+// CountHistogram:
+//------------------------------------------------------------------------------
+
+Histogram *
+CountHistogram::FactoryGet(const std::string &name, Flags flags)
+{
+ Histogram *h(nullptr);
+
+ if (!StatisticsRecorder::FindHistogram(name, &h)) {
+ CountHistogram *fh = new CountHistogram(name);
+ fh->InitializeBucketRange();
+ fh->SetFlags(flags);
+ h = StatisticsRecorder::RegisterOrDeleteDuplicate(fh);
+ }
+
+ return h;
+}
+
+CountHistogram::CountHistogram(const std::string &name)
+ : LinearHistogram(name, 1, 2, 3) {
+}
+
+Histogram::ClassType
+CountHistogram::histogram_type() const
+{
+ return COUNT_HISTOGRAM;
+}
+
+void
+CountHistogram::Accumulate(Sample value, Count count, size_t index)
+{
+ size_t zero_index = BucketIndex(0);
+ LinearHistogram::Accumulate(value, 1, zero_index);
+}
+
+void
+CountHistogram::AddSampleSet(const SampleSet& sample) {
+ DCHECK_EQ(bucket_count(), sample.size());
+ // We can't be sure the SampleSet provided came from another CountHistogram,
+ // so we at least check that the unused buckets are empty.
+
+ const size_t indices[] = { BucketIndex(0), BucketIndex(1), BucketIndex(2) };
+
+ if (sample.counts(indices[1]) != 0 || sample.counts(indices[2]) != 0) {
+ return;
+ }
+
+ if (sample.counts(indices[0]) != 0) {
+ Accumulate(1, sample.counts(indices[0]), indices[0]);
+ }
+}
+
+
+//------------------------------------------------------------------------------
+// CustomHistogram:
+//------------------------------------------------------------------------------
+
+Histogram* CustomHistogram::FactoryGet(const std::string& name,
+ const std::vector<Sample>& custom_ranges,
+ Flags flags) {
+ Histogram* histogram(NULL);
+
+ // Remove the duplicates in the custom ranges array.
+ std::vector<int> ranges = custom_ranges;
+ ranges.push_back(0); // Ensure we have a zero value.
+ std::sort(ranges.begin(), ranges.end());
+ ranges.erase(std::unique(ranges.begin(), ranges.end()), ranges.end());
+ if (ranges.size() <= 1) {
+ DCHECK(false);
+ // Note that we pushed a 0 in above, so for defensive code....
+ ranges.push_back(1); // Put in some data so we can index to [1].
+ }
+
+ DCHECK_LT(ranges.back(), kSampleType_MAX);
+
+ if (!StatisticsRecorder::FindHistogram(name, &histogram)) {
+ CustomHistogram* tentative_histogram = new CustomHistogram(name, ranges);
+ tentative_histogram->InitializedCustomBucketRange(ranges);
+ tentative_histogram->SetFlags(flags);
+ histogram =
+ StatisticsRecorder::RegisterOrDeleteDuplicate(tentative_histogram);
+ }
+
+ DCHECK_EQ(histogram->histogram_type(), CUSTOM_HISTOGRAM);
+ DCHECK(histogram->HasConstructorArguments(ranges[1], ranges.back(),
+ ranges.size()));
+ return histogram;
+}
+
+Histogram::ClassType CustomHistogram::histogram_type() const {
+ return CUSTOM_HISTOGRAM;
+}
+
+CustomHistogram::CustomHistogram(const std::string& name,
+ const std::vector<Sample>& custom_ranges)
+ : Histogram(name, custom_ranges[1], custom_ranges.back(),
+ custom_ranges.size()) {
+ DCHECK_GT(custom_ranges.size(), 1u);
+ DCHECK_EQ(custom_ranges[0], 0);
+}
+
+void CustomHistogram::InitializedCustomBucketRange(
+ const std::vector<Sample>& custom_ranges) {
+ DCHECK_GT(custom_ranges.size(), 1u);
+ DCHECK_EQ(custom_ranges[0], 0);
+ DCHECK_LE(custom_ranges.size(), bucket_count());
+ for (size_t index = 0; index < custom_ranges.size(); ++index)
+ SetBucketRange(index, custom_ranges[index]);
+ ResetRangeChecksum();
+}
+
+double CustomHistogram::GetBucketSize(Count current, size_t i) const {
+ return 1;
+}
+
+//------------------------------------------------------------------------------
+// The next section handles global (central) support for all histograms, as well
+// as startup/teardown of this service.
+//------------------------------------------------------------------------------
+
+// This singleton instance should be started during the single threaded portion
+// of main(), and hence it is not thread safe. It initializes globals to
+// provide support for all future calls.
+StatisticsRecorder::StatisticsRecorder() {
+ DCHECK(!histograms_);
+ if (lock_ == NULL) {
+ // This will leak on purpose. It's the only way to make sure we won't race
+ // against the static uninitialization of the module while one of our
+ // static methods relying on the lock get called at an inappropriate time
+ // during the termination phase. Since it's a static data member, we will
+ // leak one per process, which would be similar to the instance allocated
+ // during static initialization and released only on process termination.
+ lock_ = new base::Lock;
+ }
+ base::AutoLock auto_lock(*lock_);
+ histograms_ = new HistogramMap;
+}
+
+StatisticsRecorder::~StatisticsRecorder() {
+ DCHECK(histograms_ && lock_);
+
+ if (dump_on_exit_) {
+ std::string output;
+ WriteGraph("", &output);
+ CHROMIUM_LOG(INFO) << output;
+ }
+ // Clean up.
+ HistogramMap* histograms = NULL;
+ {
+ base::AutoLock auto_lock(*lock_);
+ histograms = histograms_;
+ histograms_ = NULL;
+ for (HistogramMap::iterator it = histograms->begin();
+ histograms->end() != it;
+ ++it) {
+ // No other clients permanently hold Histogram references, so we
+ // have the only one and it is safe to delete it.
+ delete it->second;
+ }
+ }
+ delete histograms;
+ // We don't delete lock_ on purpose to avoid having to properly protect
+ // against it going away after we checked for NULL in the static methods.
+}
+
+// static
+bool StatisticsRecorder::IsActive() {
+ if (lock_ == NULL)
+ return false;
+ base::AutoLock auto_lock(*lock_);
+ return NULL != histograms_;
+}
+
+Histogram* StatisticsRecorder::RegisterOrDeleteDuplicate(Histogram* histogram) {
+ DCHECK(histogram->HasValidRangeChecksum());
+ if (lock_ == NULL)
+ return histogram;
+ base::AutoLock auto_lock(*lock_);
+ if (!histograms_)
+ return histogram;
+ const std::string name = histogram->histogram_name();
+ HistogramMap::iterator it = histograms_->find(name);
+ // Avoid overwriting a previous registration.
+ if (histograms_->end() == it) {
+ (*histograms_)[name] = histogram;
+ } else {
+ delete histogram; // We already have one by this name.
+ histogram = it->second;
+ }
+ return histogram;
+}
+
+// static
+void StatisticsRecorder::WriteHTMLGraph(const std::string& query,
+ std::string* output) {
+ if (!IsActive())
+ return;
+ output->append("<html><head><title>About Histograms");
+ if (!query.empty())
+ output->append(" - " + query);
+ output->append("</title>"
+ // We'd like the following no-cache... but it doesn't work.
+ // "<META HTTP-EQUIV=\"Pragma\" CONTENT=\"no-cache\">"
+ "</head><body>");
+
+ Histograms snapshot;
+ GetSnapshot(query, &snapshot);
+ for (Histograms::iterator it = snapshot.begin();
+ it != snapshot.end();
+ ++it) {
+ (*it)->WriteHTMLGraph(output);
+ output->append("<br><hr><br>");
+ }
+ output->append("</body></html>");
+}
+
+// static
+void StatisticsRecorder::WriteGraph(const std::string& query,
+ std::string* output) {
+ if (!IsActive())
+ return;
+ if (query.length())
+ StringAppendF(output, "Collections of histograms for %s\n", query.c_str());
+ else
+ output->append("Collections of all histograms\n");
+
+ Histograms snapshot;
+ GetSnapshot(query, &snapshot);
+ for (Histograms::iterator it = snapshot.begin();
+ it != snapshot.end();
+ ++it) {
+ (*it)->WriteAscii(true, "\n", output);
+ output->append("\n");
+ }
+}
+
+// static
+void StatisticsRecorder::GetHistograms(Histograms* output) {
+ if (lock_ == NULL)
+ return;
+ base::AutoLock auto_lock(*lock_);
+ if (!histograms_)
+ return;
+ for (HistogramMap::iterator it = histograms_->begin();
+ histograms_->end() != it;
+ ++it) {
+ DCHECK_EQ(it->first, it->second->histogram_name());
+ output->push_back(it->second);
+ }
+}
+
+bool StatisticsRecorder::FindHistogram(const std::string& name,
+ Histogram** histogram) {
+ if (lock_ == NULL)
+ return false;
+ base::AutoLock auto_lock(*lock_);
+ if (!histograms_)
+ return false;
+ HistogramMap::iterator it = histograms_->find(name);
+ if (histograms_->end() == it)
+ return false;
+ *histogram = it->second;
+ return true;
+}
+
+// private static
+void StatisticsRecorder::GetSnapshot(const std::string& query,
+ Histograms* snapshot) {
+ if (lock_ == NULL)
+ return;
+ base::AutoLock auto_lock(*lock_);
+ if (!histograms_)
+ return;
+ for (HistogramMap::iterator it = histograms_->begin();
+ histograms_->end() != it;
+ ++it) {
+ if (it->first.find(query) != std::string::npos)
+ snapshot->push_back(it->second);
+ }
+}
+
+// static
+StatisticsRecorder::HistogramMap* StatisticsRecorder::histograms_ = NULL;
+// static
+base::Lock* StatisticsRecorder::lock_ = NULL;
+// static
+bool StatisticsRecorder::dump_on_exit_ = false;
+
+} // namespace base
diff --git a/ipc/chromium/src/base/histogram.h b/ipc/chromium/src/base/histogram.h
new file mode 100644
index 000000000..20868e9dc
--- /dev/null
+++ b/ipc/chromium/src/base/histogram.h
@@ -0,0 +1,780 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Histogram is an object that aggregates statistics, and can summarize them in
+// various forms, including ASCII graphical, HTML, and numerically (as a
+// vector of numbers corresponding to each of the aggregating buckets).
+
+// It supports calls to accumulate either time intervals (which are processed
+// as integral number of milliseconds), or arbitrary integral units.
+
+// The default layout of buckets is exponential. For example, buckets might
+// contain (sequentially) the count of values in the following intervals:
+// [0,1), [1,2), [2,4), [4,8), [8,16), [16,32), [32,64), [64,infinity)
+// That bucket allocation would actually result from construction of a histogram
+// for values between 1 and 64, with 8 buckets, such as:
+// Histogram count(L"some name", 1, 64, 8);
+// Note that the underflow bucket [0,1) and the overflow bucket [64,infinity)
+// are not counted by the constructor in the user supplied "bucket_count"
+// argument.
+// The above example has an exponential ratio of 2 (doubling the bucket width
+// in each consecutive bucket. The Histogram class automatically calculates
+// the smallest ratio that it can use to construct the number of buckets
+// selected in the constructor. An another example, if you had 50 buckets,
+// and millisecond time values from 1 to 10000, then the ratio between
+// consecutive bucket widths will be approximately somewhere around the 50th
+// root of 10000. This approach provides very fine grain (narrow) buckets
+// at the low end of the histogram scale, but allows the histogram to cover a
+// gigantic range with the addition of very few buckets.
+
+// Histograms use a pattern involving a function static variable, that is a
+// pointer to a histogram. This static is explicitly initialized on any thread
+// that detects a uninitialized (NULL) pointer. The potentially racy
+// initialization is not a problem as it is always set to point to the same
+// value (i.e., the FactoryGet always returns the same value). FactoryGet
+// is also completely thread safe, which results in a completely thread safe,
+// and relatively fast, set of counters. To avoid races at shutdown, the static
+// pointer is NOT deleted, and we leak the histograms at process termination.
+
+#ifndef BASE_METRICS_HISTOGRAM_H_
+#define BASE_METRICS_HISTOGRAM_H_
+#pragma once
+
+#include "mozilla/Atomics.h"
+#include "mozilla/MemoryReporting.h"
+
+#include <map>
+#include <string>
+#include <vector>
+
+#include "base/time.h"
+#include "base/lock.h"
+
+namespace base {
+
+//------------------------------------------------------------------------------
+// Provide easy general purpose histogram in a macro, just like stats counters.
+// The first four macros use 50 buckets.
+
+#define HISTOGRAM_TIMES(name, sample) HISTOGRAM_CUSTOM_TIMES( \
+ name, sample, base::TimeDelta::FromMilliseconds(1), \
+ base::TimeDelta::FromSeconds(10), 50)
+
+#define HISTOGRAM_COUNTS(name, sample) HISTOGRAM_CUSTOM_COUNTS( \
+ name, sample, 1, 1000000, 50)
+
+#define HISTOGRAM_COUNTS_100(name, sample) HISTOGRAM_CUSTOM_COUNTS( \
+ name, sample, 1, 100, 50)
+
+#define HISTOGRAM_COUNTS_10000(name, sample) HISTOGRAM_CUSTOM_COUNTS( \
+ name, sample, 1, 10000, 50)
+
+#define HISTOGRAM_CUSTOM_COUNTS(name, sample, min, max, bucket_count) do { \
+ static base::Histogram* counter(NULL); \
+ if (!counter) \
+ counter = base::Histogram::FactoryGet(name, min, max, bucket_count, \
+ base::Histogram::kNoFlags); \
+ DCHECK_EQ(name, counter->histogram_name()); \
+ counter->Add(sample); \
+ } while (0)
+
+#define HISTOGRAM_PERCENTAGE(name, under_one_hundred) \
+ HISTOGRAM_ENUMERATION(name, under_one_hundred, 101)
+
+// For folks that need real specific times, use this to select a precise range
+// of times you want plotted, and the number of buckets you want used.
+#define HISTOGRAM_CUSTOM_TIMES(name, sample, min, max, bucket_count) do { \
+ static base::Histogram* counter(NULL); \
+ if (!counter) \
+ counter = base::Histogram::FactoryTimeGet(name, min, max, bucket_count, \
+ base::Histogram::kNoFlags); \
+ DCHECK_EQ(name, counter->histogram_name()); \
+ counter->AddTime(sample); \
+ } while (0)
+
+// DO NOT USE THIS. It is being phased out, in favor of HISTOGRAM_CUSTOM_TIMES.
+#define HISTOGRAM_CLIPPED_TIMES(name, sample, min, max, bucket_count) do { \
+ static base::Histogram* counter(NULL); \
+ if (!counter) \
+ counter = base::Histogram::FactoryTimeGet(name, min, max, bucket_count, \
+ base::Histogram::kNoFlags); \
+ DCHECK_EQ(name, counter->histogram_name()); \
+ if ((sample) < (max)) counter->AddTime(sample); \
+ } while (0)
+
+// Support histograming of an enumerated value. The samples should always be
+// less than boundary_value.
+
+#define HISTOGRAM_ENUMERATION(name, sample, boundary_value) do { \
+ static base::Histogram* counter(NULL); \
+ if (!counter) \
+ counter = base::LinearHistogram::FactoryGet(name, 1, boundary_value, \
+ boundary_value + 1, base::Histogram::kNoFlags); \
+ DCHECK_EQ(name, counter->histogram_name()); \
+ counter->Add(sample); \
+ } while (0)
+
+#define HISTOGRAM_CUSTOM_ENUMERATION(name, sample, custom_ranges) do { \
+ static base::Histogram* counter(NULL); \
+ if (!counter) \
+ counter = base::CustomHistogram::FactoryGet(name, custom_ranges, \
+ base::Histogram::kNoFlags); \
+ DCHECK_EQ(name, counter->histogram_name()); \
+ counter->Add(sample); \
+ } while (0)
+
+
+//------------------------------------------------------------------------------
+// Define Debug vs non-debug flavors of macros.
+#ifndef NDEBUG
+
+#define DHISTOGRAM_TIMES(name, sample) HISTOGRAM_TIMES(name, sample)
+#define DHISTOGRAM_COUNTS(name, sample) HISTOGRAM_COUNTS(name, sample)
+#define DHISTOGRAM_PERCENTAGE(name, under_one_hundred) HISTOGRAM_PERCENTAGE(\
+ name, under_one_hundred)
+#define DHISTOGRAM_CUSTOM_TIMES(name, sample, min, max, bucket_count) \
+ HISTOGRAM_CUSTOM_TIMES(name, sample, min, max, bucket_count)
+#define DHISTOGRAM_CLIPPED_TIMES(name, sample, min, max, bucket_count) \
+ HISTOGRAM_CLIPPED_TIMES(name, sample, min, max, bucket_count)
+#define DHISTOGRAM_CUSTOM_COUNTS(name, sample, min, max, bucket_count) \
+ HISTOGRAM_CUSTOM_COUNTS(name, sample, min, max, bucket_count)
+#define DHISTOGRAM_ENUMERATION(name, sample, boundary_value) \
+ HISTOGRAM_ENUMERATION(name, sample, boundary_value)
+#define DHISTOGRAM_CUSTOM_ENUMERATION(name, sample, custom_ranges) \
+ HISTOGRAM_CUSTOM_ENUMERATION(name, sample, custom_ranges)
+
+#else // NDEBUG
+
+#define DHISTOGRAM_TIMES(name, sample) do {} while (0)
+#define DHISTOGRAM_COUNTS(name, sample) do {} while (0)
+#define DHISTOGRAM_PERCENTAGE(name, under_one_hundred) do {} while (0)
+#define DHISTOGRAM_CUSTOM_TIMES(name, sample, min, max, bucket_count) \
+ do {} while (0)
+#define DHISTOGRAM_CLIPPED_TIMES(name, sample, min, max, bucket_count) \
+ do {} while (0)
+#define DHISTOGRAM_CUSTOM_COUNTS(name, sample, min, max, bucket_count) \
+ do {} while (0)
+#define DHISTOGRAM_ENUMERATION(name, sample, boundary_value) do {} while (0)
+#define DHISTOGRAM_CUSTOM_ENUMERATION(name, sample, custom_ranges) \
+ do {} while (0)
+
+#endif // NDEBUG
+
+//------------------------------------------------------------------------------
+// The following macros provide typical usage scenarios for callers that wish
+// to record histogram data, and have the data submitted/uploaded via UMA.
+// Not all systems support such UMA, but if they do, the following macros
+// should work with the service.
+
+#define UMA_HISTOGRAM_TIMES(name, sample) UMA_HISTOGRAM_CUSTOM_TIMES( \
+ name, sample, base::TimeDelta::FromMilliseconds(1), \
+ base::TimeDelta::FromSeconds(10), 50)
+
+#define UMA_HISTOGRAM_MEDIUM_TIMES(name, sample) UMA_HISTOGRAM_CUSTOM_TIMES( \
+ name, sample, base::TimeDelta::FromMilliseconds(10), \
+ base::TimeDelta::FromMinutes(3), 50)
+
+// Use this macro when times can routinely be much longer than 10 seconds.
+#define UMA_HISTOGRAM_LONG_TIMES(name, sample) UMA_HISTOGRAM_CUSTOM_TIMES( \
+ name, sample, base::TimeDelta::FromMilliseconds(1), \
+ base::TimeDelta::FromHours(1), 50)
+
+#define UMA_HISTOGRAM_CUSTOM_TIMES(name, sample, min, max, bucket_count) do { \
+ static base::Histogram* counter(NULL); \
+ if (!counter) \
+ counter = base::Histogram::FactoryTimeGet(name, min, max, bucket_count, \
+ base::Histogram::kUmaTargetedHistogramFlag); \
+ DCHECK_EQ(name, counter->histogram_name()); \
+ counter->AddTime(sample); \
+ } while (0)
+
+// DO NOT USE THIS. It is being phased out, in favor of HISTOGRAM_CUSTOM_TIMES.
+#define UMA_HISTOGRAM_CLIPPED_TIMES(name, sample, min, max, bucket_count) do { \
+ static base::Histogram* counter(NULL); \
+ if (!counter) \
+ counter = base::Histogram::FactoryTimeGet(name, min, max, bucket_count, \
+ base::Histogram::kUmaTargetedHistogramFlag); \
+ DCHECK_EQ(name, counter->histogram_name()); \
+ if ((sample) < (max)) counter->AddTime(sample); \
+ } while (0)
+
+#define UMA_HISTOGRAM_COUNTS(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS( \
+ name, sample, 1, 1000000, 50)
+
+#define UMA_HISTOGRAM_COUNTS_100(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS( \
+ name, sample, 1, 100, 50)
+
+#define UMA_HISTOGRAM_COUNTS_10000(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS( \
+ name, sample, 1, 10000, 50)
+
+#define UMA_HISTOGRAM_CUSTOM_COUNTS(name, sample, min, max, bucket_count) do { \
+ static base::Histogram* counter(NULL); \
+ if (!counter) \
+ counter = base::Histogram::FactoryGet(name, min, max, bucket_count, \
+ base::Histogram::kUmaTargetedHistogramFlag); \
+ DCHECK_EQ(name, counter->histogram_name()); \
+ counter->Add(sample); \
+ } while (0)
+
+#define UMA_HISTOGRAM_MEMORY_KB(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS( \
+ name, sample, 1000, 500000, 50)
+
+#define UMA_HISTOGRAM_MEMORY_MB(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS( \
+ name, sample, 1, 1000, 50)
+
+#define UMA_HISTOGRAM_PERCENTAGE(name, under_one_hundred) \
+ UMA_HISTOGRAM_ENUMERATION(name, under_one_hundred, 101)
+
+#define UMA_HISTOGRAM_BOOLEAN(name, sample) do { \
+ static base::Histogram* counter(NULL); \
+ if (!counter) \
+ counter = base::BooleanHistogram::FactoryGet(name, \
+ base::Histogram::kUmaTargetedHistogramFlag); \
+ DCHECK_EQ(name, counter->histogram_name()); \
+ counter->AddBoolean(sample); \
+ } while (0)
+
+#define UMA_HISTOGRAM_ENUMERATION(name, sample, boundary_value) do { \
+ static base::Histogram* counter(NULL); \
+ if (!counter) \
+ counter = base::LinearHistogram::FactoryGet(name, 1, boundary_value, \
+ boundary_value + 1, base::Histogram::kUmaTargetedHistogramFlag); \
+ DCHECK_EQ(name, counter->histogram_name()); \
+ counter->Add(sample); \
+ } while (0)
+
+#define UMA_HISTOGRAM_CUSTOM_ENUMERATION(name, sample, custom_ranges) do { \
+ static base::Histogram* counter(NULL); \
+ if (!counter) \
+ counter = base::CustomHistogram::FactoryGet(name, custom_ranges, \
+ base::Histogram::kUmaTargetedHistogramFlag); \
+ DCHECK_EQ(name, counter->histogram_name()); \
+ counter->Add(sample); \
+ } while (0)
+
+//------------------------------------------------------------------------------
+
+class BooleanHistogram;
+class CustomHistogram;
+class Histogram;
+class LinearHistogram;
+
+class Histogram {
+ public:
+ typedef int Sample; // Used for samples (and ranges of samples).
+ typedef int Count; // Used to count samples in a bucket.
+ static const Sample kSampleType_MAX = INT_MAX;
+ // Initialize maximum number of buckets in histograms as 16,384.
+ static const size_t kBucketCount_MAX;
+
+ typedef std::vector<Count> Counts;
+ typedef std::vector<Sample> Ranges;
+
+ // These enums are used to facilitate deserialization of renderer histograms
+ // into the browser.
+ enum ClassType {
+ HISTOGRAM,
+ LINEAR_HISTOGRAM,
+ BOOLEAN_HISTOGRAM,
+ FLAG_HISTOGRAM,
+ COUNT_HISTOGRAM,
+ CUSTOM_HISTOGRAM,
+ NOT_VALID_IN_RENDERER
+ };
+
+ enum BucketLayout {
+ EXPONENTIAL,
+ LINEAR,
+ CUSTOM
+ };
+
+ enum Flags {
+ kNoFlags = 0,
+ kUmaTargetedHistogramFlag = 0x1, // Histogram should be UMA uploaded.
+
+ kHexRangePrintingFlag = 0x8000 // Fancy bucket-naming supported.
+ };
+
+ enum Inconsistencies {
+ NO_INCONSISTENCIES = 0x0,
+ RANGE_CHECKSUM_ERROR = 0x1,
+ BUCKET_ORDER_ERROR = 0x2,
+ COUNT_HIGH_ERROR = 0x4,
+ COUNT_LOW_ERROR = 0x8,
+
+ NEVER_EXCEEDED_VALUE = 0x10
+ };
+
+ struct DescriptionPair {
+ Sample sample;
+ const char* description; // Null means end of a list of pairs.
+ };
+
+ size_t SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf);
+
+ //----------------------------------------------------------------------------
+ // Statistic values, developed over the life of the histogram.
+
+ class SampleSet {
+ public:
+ explicit SampleSet();
+ ~SampleSet();
+
+ // None of the methods in this class are thread-safe. Callers
+ // must deal with locking themselves.
+
+ // Adjust size of counts_ for use with given histogram.
+ void Resize(const Histogram& histogram);
+
+ // Accessor for histogram to make routine additions.
+ void Accumulate(Sample value, Count count, size_t index);
+
+ // Arithmetic manipulation of corresponding elements of the set.
+ void Add(const SampleSet& other);
+
+ size_t SizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf);
+
+ Count counts(size_t i) const {
+ return counts_[i];
+ }
+ Count TotalCount() const;
+ int64_t sum() const {
+ return sum_;
+ }
+ int64_t redundant_count() const {
+ return redundant_count_;
+ }
+ size_t size() const {
+ return counts_.size();
+ }
+
+ protected:
+ // Actual histogram data is stored in buckets, showing the count of values
+ // that fit into each bucket.
+ Counts counts_;
+
+ // Save simple stats locally. Note that this MIGHT get done in base class
+ // without shared memory at some point.
+ int64_t sum_; // sum of samples.
+
+ // To help identify memory corruption, we reduntantly save the number of
+ // samples we've accumulated into all of our buckets. We can compare this
+ // count to the sum of the counts in all buckets, and detect problems. Note
+ // that due to races in histogram accumulation (if a histogram is indeed
+ // updated on several threads simultaneously), the tallies might mismatch,
+ // and also the snapshotting code may asynchronously get a mismatch (though
+ // generally either race based mismatch cause is VERY rare).
+ int64_t redundant_count_;
+ };
+
+ //----------------------------------------------------------------------------
+ // minimum should start from 1. 0 is invalid as a minimum. 0 is an implicit
+ // default underflow bucket.
+ static Histogram* FactoryGet(const std::string& name,
+ Sample minimum,
+ Sample maximum,
+ size_t bucket_count,
+ Flags flags);
+ static Histogram* FactoryTimeGet(const std::string& name,
+ base::TimeDelta minimum,
+ base::TimeDelta maximum,
+ size_t bucket_count,
+ Flags flags);
+
+ void Add(int value);
+ void Subtract(int value);
+
+ // TODO: Currently recording_enabled_ is not used by any Histogram class, but
+ // rather examined only by the telemetry code (via IsRecordingEnabled).
+ // Move handling to Histogram's Add() etc after simplifying Histogram.
+ void SetRecordingEnabled(bool aEnabled) { recording_enabled_ = aEnabled; };
+ bool IsRecordingEnabled() const { return recording_enabled_; };
+
+ // This method is an interface, used only by BooleanHistogram.
+ virtual void AddBoolean(bool value);
+
+ // Accept a TimeDelta to increment.
+ void AddTime(TimeDelta time) {
+ Add(static_cast<int>(time.InMilliseconds()));
+ }
+
+ virtual void AddSampleSet(const SampleSet& sample);
+
+ virtual void Clear();
+
+ // This method is an interface, used only by LinearHistogram.
+ virtual void SetRangeDescriptions(const DescriptionPair descriptions[]);
+
+ // The following methods provide graphical histogram displays.
+ void WriteHTMLGraph(std::string* output) const;
+ void WriteAscii(bool graph_it, const std::string& newline,
+ std::string* output) const;
+
+ // Support generic flagging of Histograms.
+ // 0x1 Currently used to mark this histogram to be recorded by UMA..
+ // 0x8000 means print ranges in hex.
+ void SetFlags(Flags flags) { flags_ = static_cast<Flags> (flags_ | flags); }
+ void ClearFlags(Flags flags) { flags_ = static_cast<Flags>(flags_ & ~flags); }
+ int flags() const { return flags_; }
+
+ // Check to see if bucket ranges, counts and tallies in the snapshot are
+ // consistent with the bucket ranges and checksums in our histogram. This can
+ // produce a false-alarm if a race occurred in the reading of the data during
+ // a SnapShot process, but should otherwise be false at all times (unless we
+ // have memory over-writes, or DRAM failures).
+ virtual Inconsistencies FindCorruption(const SampleSet& snapshot) const;
+
+ //----------------------------------------------------------------------------
+ // Accessors for factory constuction, serialization and testing.
+ //----------------------------------------------------------------------------
+ virtual ClassType histogram_type() const;
+ const std::string& histogram_name() const { return histogram_name_; }
+ Sample declared_min() const { return declared_min_; }
+ Sample declared_max() const { return declared_max_; }
+ virtual Sample ranges(size_t i) const;
+ uint32_t range_checksum() const { return range_checksum_; }
+ virtual size_t bucket_count() const;
+
+ // Do a safe atomic snapshot of sample data. The caller is assumed to
+ // have exclusive access to the destination, |*sample|, and no locking
+ // of it is done here.
+ virtual void SnapshotSample(SampleSet* sample) const;
+
+ virtual bool HasConstructorArguments(Sample minimum, Sample maximum,
+ size_t bucket_count);
+
+ virtual bool HasConstructorTimeDeltaArguments(TimeDelta minimum,
+ TimeDelta maximum,
+ size_t bucket_count);
+ // Return true iff the range_checksum_ matches current ranges_ vector.
+ bool HasValidRangeChecksum() const;
+
+ protected:
+ Histogram(const std::string& name, Sample minimum,
+ Sample maximum, size_t bucket_count);
+ Histogram(const std::string& name, TimeDelta minimum,
+ TimeDelta maximum, size_t bucket_count);
+
+ virtual ~Histogram();
+
+ // Initialize ranges_ mapping.
+ void InitializeBucketRange();
+
+ // Method to override to skip the display of the i'th bucket if it's empty.
+ virtual bool PrintEmptyBucket(size_t index) const;
+
+ //----------------------------------------------------------------------------
+ // Methods to override to create histogram with different bucket widths.
+ //----------------------------------------------------------------------------
+ // Find bucket to increment for sample value.
+ virtual size_t BucketIndex(Sample value) const;
+ // Get normalized size, relative to the ranges_[i].
+ virtual double GetBucketSize(Count current, size_t i) const;
+
+ // Recalculate range_checksum_.
+ void ResetRangeChecksum();
+
+ // Return a string description of what goes in a given bucket.
+ // Most commonly this is the numeric value, but in derived classes it may
+ // be a name (or string description) given to the bucket.
+ virtual const std::string GetAsciiBucketRange(size_t it) const;
+
+ //----------------------------------------------------------------------------
+ // Methods to override to create thread safe histogram.
+ //----------------------------------------------------------------------------
+ // Update all our internal data, including histogram
+ virtual void Accumulate(Sample value, Count count, size_t index);
+
+ //----------------------------------------------------------------------------
+ // Accessors for derived classes.
+ //----------------------------------------------------------------------------
+ void SetBucketRange(size_t i, Sample value);
+
+ // Validate that ranges_ was created sensibly (top and bottom range
+ // values relate properly to the declared_min_ and declared_max_)..
+ bool ValidateBucketRanges() const;
+
+ virtual uint32_t CalculateRangeChecksum() const;
+
+ // Finally, provide the state that changes with the addition of each new
+ // sample.
+ SampleSet sample_;
+
+ private:
+ friend class StatisticsRecorder; // To allow it to delete duplicates.
+
+ // Post constructor initialization.
+ void Initialize();
+
+ // Checksum function for accumulating range values into a checksum.
+ static uint32_t Crc32(uint32_t sum, Sample range);
+
+ //----------------------------------------------------------------------------
+ // Helpers for emitting Ascii graphic. Each method appends data to output.
+
+ // Find out how large the (graphically) the largest bucket will appear to be.
+ double GetPeakBucketSize(const SampleSet& snapshot) const;
+
+ // Write a common header message describing this histogram.
+ void WriteAsciiHeader(const SampleSet& snapshot,
+ Count sample_count, std::string* output) const;
+
+ // Write information about previous, current, and next buckets.
+ // Information such as cumulative percentage, etc.
+ void WriteAsciiBucketContext(const int64_t past, const Count current,
+ const int64_t remaining, const size_t i,
+ std::string* output) const;
+
+ // Write textual description of the bucket contents (relative to histogram).
+ // Output is the count in the buckets, as well as the percentage.
+ void WriteAsciiBucketValue(Count current, double scaled_sum,
+ std::string* output) const;
+
+ // Produce actual graph (set of blank vs non blank char's) for a bucket.
+ void WriteAsciiBucketGraph(double current_size, double max_size,
+ std::string* output) const;
+
+ //----------------------------------------------------------------------------
+ // Table for generating Crc32 values.
+ static const uint32_t kCrcTable[256];
+ //----------------------------------------------------------------------------
+ // Invariant values set at/near construction time
+
+ // ASCII version of original name given to the constructor. All identically
+ // named instances will be coalesced cross-project.
+ const std::string histogram_name_;
+ Sample declared_min_; // Less than this goes into counts_[0]
+ Sample declared_max_; // Over this goes into counts_[bucket_count_ - 1].
+ size_t bucket_count_; // Dimension of counts_[].
+
+ // Flag the histogram for recording by UMA via metric_services.h.
+ Flags flags_;
+
+ // For each index, show the least value that can be stored in the
+ // corresponding bucket. We also append one extra element in this array,
+ // containing kSampleType_MAX, to make calculations easy.
+ // The dimension of ranges_ is bucket_count + 1.
+ Ranges ranges_;
+
+ // For redundancy, we store a checksum of all the sample ranges when ranges
+ // are generated. If ever there is ever a difference, then the histogram must
+ // have been corrupted.
+ uint32_t range_checksum_;
+
+ // When false, new samples are completely ignored.
+ mozilla::Atomic<bool, mozilla::Relaxed> recording_enabled_;
+
+ DISALLOW_COPY_AND_ASSIGN(Histogram);
+};
+
+//------------------------------------------------------------------------------
+
+// LinearHistogram is a more traditional histogram, with evenly spaced
+// buckets.
+class LinearHistogram : public Histogram {
+ public:
+ virtual ~LinearHistogram();
+
+ /* minimum should start from 1. 0 is as minimum is invalid. 0 is an implicit
+ default underflow bucket. */
+ static Histogram* FactoryGet(const std::string& name,
+ Sample minimum,
+ Sample maximum,
+ size_t bucket_count,
+ Flags flags);
+ static Histogram* FactoryTimeGet(const std::string& name,
+ TimeDelta minimum,
+ TimeDelta maximum,
+ size_t bucket_count,
+ Flags flags);
+
+ // Overridden from Histogram:
+ virtual ClassType histogram_type() const;
+
+ virtual void Accumulate(Sample value, Count count, size_t index);
+
+ // Store a list of number/text values for use in rendering the histogram.
+ // The last element in the array has a null in its "description" slot.
+ virtual void SetRangeDescriptions(const DescriptionPair descriptions[]);
+
+ protected:
+ LinearHistogram(const std::string& name, Sample minimum,
+ Sample maximum, size_t bucket_count);
+
+ LinearHistogram(const std::string& name, TimeDelta minimum,
+ TimeDelta maximum, size_t bucket_count);
+
+ // Initialize ranges_ mapping.
+ void InitializeBucketRange();
+ virtual double GetBucketSize(Count current, size_t i) const;
+
+ // If we have a description for a bucket, then return that. Otherwise
+ // let parent class provide a (numeric) description.
+ virtual const std::string GetAsciiBucketRange(size_t i) const;
+
+ // Skip printing of name for numeric range if we have a name (and if this is
+ // an empty bucket).
+ virtual bool PrintEmptyBucket(size_t index) const;
+
+ private:
+ // For some ranges, we store a printable description of a bucket range.
+ // If there is no desciption, then GetAsciiBucketRange() uses parent class
+ // to provide a description.
+ typedef std::map<Sample, std::string> BucketDescriptionMap;
+ BucketDescriptionMap bucket_description_;
+
+ DISALLOW_COPY_AND_ASSIGN(LinearHistogram);
+};
+
+//------------------------------------------------------------------------------
+
+// BooleanHistogram is a histogram for booleans.
+class BooleanHistogram : public LinearHistogram {
+ public:
+ static Histogram* FactoryGet(const std::string& name, Flags flags);
+
+ virtual ClassType histogram_type() const;
+
+ virtual void AddBoolean(bool value);
+
+ virtual void Accumulate(Sample value, Count count, size_t index);
+
+ protected:
+ explicit BooleanHistogram(const std::string& name);
+
+ DISALLOW_COPY_AND_ASSIGN(BooleanHistogram);
+};
+
+//------------------------------------------------------------------------------
+
+// FlagHistogram is like boolean histogram, but only allows a single off/on value.
+class FlagHistogram : public BooleanHistogram
+{
+public:
+ static Histogram *FactoryGet(const std::string &name, Flags flags);
+
+ virtual ClassType histogram_type() const;
+
+ virtual void Accumulate(Sample value, Count count, size_t index);
+
+ virtual void AddSampleSet(const SampleSet& sample);
+
+ virtual void Clear();
+
+private:
+ explicit FlagHistogram(const std::string &name);
+ bool mSwitched;
+
+ DISALLOW_COPY_AND_ASSIGN(FlagHistogram);
+};
+
+// CountHistogram only allows a single monotic counter value.
+class CountHistogram : public LinearHistogram
+{
+public:
+ static Histogram *FactoryGet(const std::string &name, Flags flags);
+
+ virtual ClassType histogram_type() const;
+
+ virtual void Accumulate(Sample value, Count count, size_t index);
+
+ virtual void AddSampleSet(const SampleSet& sample);
+
+private:
+ explicit CountHistogram(const std::string &name);
+
+ DISALLOW_COPY_AND_ASSIGN(CountHistogram);
+};
+
+//------------------------------------------------------------------------------
+
+// CustomHistogram is a histogram for a set of custom integers.
+class CustomHistogram : public Histogram {
+ public:
+
+ static Histogram* FactoryGet(const std::string& name,
+ const std::vector<Sample>& custom_ranges,
+ Flags flags);
+
+ // Overridden from Histogram:
+ virtual ClassType histogram_type() const;
+
+ protected:
+ CustomHistogram(const std::string& name,
+ const std::vector<Sample>& custom_ranges);
+
+ // Initialize ranges_ mapping.
+ void InitializedCustomBucketRange(const std::vector<Sample>& custom_ranges);
+ virtual double GetBucketSize(Count current, size_t i) const;
+
+ DISALLOW_COPY_AND_ASSIGN(CustomHistogram);
+};
+
+//------------------------------------------------------------------------------
+// StatisticsRecorder handles all histograms in the system. It provides a
+// general place for histograms to register, and supports a global API for
+// accessing (i.e., dumping, or graphing) the data in all the histograms.
+
+class StatisticsRecorder {
+ public:
+ typedef std::vector<Histogram*> Histograms;
+
+ StatisticsRecorder();
+
+ ~StatisticsRecorder();
+
+ // Find out if histograms can now be registered into our list.
+ static bool IsActive();
+
+ // Register, or add a new histogram to the collection of statistics. If an
+ // identically named histogram is already registered, then the argument
+ // |histogram| will deleted. The returned value is always the registered
+ // histogram (either the argument, or the pre-existing registered histogram).
+ static Histogram* RegisterOrDeleteDuplicate(Histogram* histogram);
+
+ // Methods for printing histograms. Only histograms which have query as
+ // a substring are written to output (an empty string will process all
+ // registered histograms).
+ static void WriteHTMLGraph(const std::string& query, std::string* output);
+ static void WriteGraph(const std::string& query, std::string* output);
+
+ // Method for extracting histograms which were marked for use by UMA.
+ static void GetHistograms(Histograms* output);
+
+ // Find a histogram by name. It matches the exact name. This method is thread
+ // safe. If a matching histogram is not found, then the |histogram| is
+ // not changed.
+ static bool FindHistogram(const std::string& query, Histogram** histogram);
+
+ static bool dump_on_exit() { return dump_on_exit_; }
+
+ static void set_dump_on_exit(bool enable) { dump_on_exit_ = enable; }
+
+ // GetSnapshot copies some of the pointers to registered histograms into the
+ // caller supplied vector (Histograms). Only histograms with names matching
+ // query are returned. The query must be a substring of histogram name for its
+ // pointer to be copied.
+ static void GetSnapshot(const std::string& query, Histograms* snapshot);
+
+
+ private:
+ // We keep all registered histograms in a map, from name to histogram.
+ typedef std::map<std::string, Histogram*> HistogramMap;
+
+ static HistogramMap* histograms_;
+
+ // lock protects access to the above map.
+ static Lock* lock_;
+
+ // Dump all known histograms to log.
+ static bool dump_on_exit_;
+
+ DISALLOW_COPY_AND_ASSIGN(StatisticsRecorder);
+};
+
+} // namespace base
+
+#endif // BASE_METRICS_HISTOGRAM_H_
diff --git a/ipc/chromium/src/base/id_map.h b/ipc/chromium/src/base/id_map.h
new file mode 100644
index 000000000..bdd17effd
--- /dev/null
+++ b/ipc/chromium/src/base/id_map.h
@@ -0,0 +1,107 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ID_MAP_H__
+#define BASE_ID_MAP_H__
+
+#include "base/basictypes.h"
+#include "base/hash_tables.h"
+#include "base/logging.h"
+
+// This object maintains a list of IDs that can be quickly converted to
+// pointers to objects. It is implemented as a hash table, optimized for
+// relatively small data sets (in the common case, there will be exactly one
+// item in the list).
+//
+// Items can be inserted into the container with arbitrary ID, but the caller
+// must ensure they are unique. Inserting IDs and relying on automatically
+// generated ones is not allowed because they can collide.
+template<class T>
+class IDMap {
+ private:
+ typedef base::hash_map<int32_t, T*> HashTable;
+ typedef typename HashTable::iterator iterator;
+
+ public:
+ // support const iterators over the items
+ // Note, use iterator->first to get the ID, iterator->second to get the T*
+ typedef typename HashTable::const_iterator const_iterator;
+
+ IDMap() : next_id_(1) {
+ }
+ IDMap(const IDMap& other) : next_id_(other.next_id_),
+ data_(other.data_) {
+ }
+
+ const_iterator begin() const {
+ return data_.begin();
+ }
+ const_iterator end() const {
+ return data_.end();
+ }
+
+ // Adds a view with an automatically generated unique ID. See AddWithID.
+ int32_t Add(T* data) {
+ int32_t this_id = next_id_;
+ DCHECK(data_.find(this_id) == data_.end()) << "Inserting duplicate item";
+ data_[this_id] = data;
+ next_id_++;
+ return this_id;
+ }
+
+ // Adds a new data member with the specified ID. The ID must not be in
+ // the list. The caller either must generate all unique IDs itself and use
+ // this function, or allow this object to generate IDs and call Add. These
+ // two methods may not be mixed, or duplicate IDs may be generated
+ void AddWithID(T* data, int32_t id) {
+ DCHECK(data_.find(id) == data_.end()) << "Inserting duplicate item";
+ data_[id] = data;
+ }
+
+ void Remove(int32_t id) {
+ iterator i = data_.find(id);
+ if (i == data_.end()) {
+ NOTREACHED() << "Attempting to remove an item not in the list";
+ return;
+ }
+ data_.erase(i);
+ }
+
+ bool IsEmpty() const {
+ return data_.empty();
+ }
+
+ void Clear() {
+ data_.clear();
+ }
+
+ bool HasData(const T* data) const {
+ // XXX would like to use <algorithm> here ...
+ for (const_iterator it = begin(); it != end(); ++it)
+ if (data == it->second)
+ return true;
+ return false;
+ }
+
+ T* Lookup(int32_t id) const {
+ const_iterator i = data_.find(id);
+ if (i == data_.end())
+ return NULL;
+ return i->second;
+ }
+
+ size_t size() const {
+ return data_.size();
+ }
+
+ protected:
+ // The next ID that we will return from Add()
+ int32_t next_id_;
+
+ HashTable data_;
+};
+
+#endif // BASE_ID_MAP_H__
diff --git a/ipc/chromium/src/base/linked_ptr.h b/ipc/chromium/src/base/linked_ptr.h
new file mode 100644
index 000000000..1484bb4a3
--- /dev/null
+++ b/ipc/chromium/src/base/linked_ptr.h
@@ -0,0 +1,176 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// A "smart" pointer type with reference tracking. Every pointer to a
+// particular object is kept on a circular linked list. When the last pointer
+// to an object is destroyed or reassigned, the object is deleted.
+//
+// Used properly, this deletes the object when the last reference goes away.
+// There are several caveats:
+// - Like all reference counting schemes, cycles lead to leaks.
+// - Each smart pointer is actually two pointers (8 bytes instead of 4).
+// - Every time a pointer is released, the entire list of pointers to that
+// object is traversed. This class is therefore NOT SUITABLE when there
+// will often be more than two or three pointers to a particular object.
+// - References are only tracked as long as linked_ptr<> objects are copied.
+// If a linked_ptr<> is converted to a raw pointer and back, BAD THINGS
+// will happen (double deletion).
+//
+// A good use of this class is storing object references in STL containers.
+// You can safely put linked_ptr<> in a vector<>.
+// Other uses may not be as good.
+//
+// Note: If you use an incomplete type with linked_ptr<>, the class
+// *containing* linked_ptr<> must have a constructor and destructor (even
+// if they do nothing!).
+//
+// Thread Safety:
+// A linked_ptr is NOT thread safe. Copying a linked_ptr object is
+// effectively a read-write operation.
+//
+// Alternative: to linked_ptr is shared_ptr, which
+// - is also two pointers in size (8 bytes for 32 bit addresses)
+// - is thread safe for copying and deletion
+// - supports weak_ptrs
+
+#ifndef BASE_LINKED_PTR_H_
+#define BASE_LINKED_PTR_H_
+
+#include "base/logging.h" // for CHECK macros
+
+// This is used internally by all instances of linked_ptr<>. It needs to be
+// a non-template class because different types of linked_ptr<> can refer to
+// the same object (linked_ptr<Superclass>(obj) vs linked_ptr<Subclass>(obj)).
+// So, it needs to be possible for different types of linked_ptr to participate
+// in the same circular linked list, so we need a single class type here.
+//
+// DO NOT USE THIS CLASS DIRECTLY YOURSELF. Use linked_ptr<T>.
+class linked_ptr_internal {
+ public:
+ // Create a new circle that includes only this instance.
+ void join_new() {
+ next_ = this;
+ }
+
+ // Join an existing circle.
+ void join(linked_ptr_internal const* ptr) {
+ next_ = ptr->next_;
+ ptr->next_ = this;
+ }
+
+ // Leave whatever circle we're part of. Returns true iff we were the
+ // last member of the circle. Once this is done, you can join() another.
+ bool depart() {
+ if (next_ == this) return true;
+ linked_ptr_internal const* p = next_;
+ while (p->next_ != this) p = p->next_;
+ p->next_ = next_;
+ return false;
+ }
+
+ private:
+ mutable linked_ptr_internal const* next_;
+};
+
+template <typename T>
+class linked_ptr {
+ public:
+ typedef T element_type;
+
+ // Take over ownership of a raw pointer. This should happen as soon as
+ // possible after the object is created.
+ explicit linked_ptr(T* ptr = NULL) { capture(ptr); }
+ ~linked_ptr() { depart(); }
+
+ // Copy an existing linked_ptr<>, adding ourselves to the list of references.
+ template <typename U> linked_ptr(linked_ptr<U> const& ptr) { copy(&ptr); }
+ linked_ptr(linked_ptr const& ptr) { DCHECK_NE(&ptr, this); copy(&ptr); }
+
+ // Assignment releases the old value and acquires the new.
+ template <typename U> linked_ptr& operator=(linked_ptr<U> const& ptr) {
+ depart();
+ copy(&ptr);
+ return *this;
+ }
+
+ linked_ptr& operator=(linked_ptr const& ptr) {
+ if (&ptr != this) {
+ depart();
+ copy(&ptr);
+ }
+ return *this;
+ }
+
+ // Smart pointer members.
+ void reset(T* ptr = NULL) { depart(); capture(ptr); }
+ T* get() const { return value_; }
+ T* operator->() const { return value_; }
+ T& operator*() const { return *value_; }
+ // Release ownership of the pointed object and returns it.
+ // Sole ownership by this linked_ptr object is required.
+ T* release() {
+ bool last = link_.depart();
+ CHECK(last);
+ T* v = value_;
+ value_ = NULL;
+ return v;
+ }
+
+ bool operator==(const T* p) const { return value_ == p; }
+ bool operator!=(const T* p) const { return value_ != p; }
+ template <typename U>
+ bool operator==(linked_ptr<U> const& ptr) const {
+ return value_ == ptr.get();
+ }
+ template <typename U>
+ bool operator!=(linked_ptr<U> const& ptr) const {
+ return value_ != ptr.get();
+ }
+
+ private:
+ template <typename U>
+ friend class linked_ptr;
+
+ T* value_;
+ linked_ptr_internal link_;
+
+ void depart() {
+ if (link_.depart()) delete value_;
+ }
+
+ void capture(T* ptr) {
+ value_ = ptr;
+ link_.join_new();
+ }
+
+ template <typename U> void copy(linked_ptr<U> const* ptr) {
+ value_ = ptr->get();
+ if (value_)
+ link_.join(&ptr->link_);
+ else
+ link_.join_new();
+ }
+};
+
+template<typename T> inline
+bool operator==(T* ptr, const linked_ptr<T>& x) {
+ return ptr == x.get();
+}
+
+template<typename T> inline
+bool operator!=(T* ptr, const linked_ptr<T>& x) {
+ return ptr != x.get();
+}
+
+// A function to convert T* into linked_ptr<T>
+// Doing e.g. make_linked_ptr(new FooBarBaz<type>(arg)) is a shorter notation
+// for linked_ptr<FooBarBaz<type> >(new FooBarBaz<type>(arg))
+template <typename T>
+linked_ptr<T> make_linked_ptr(T* ptr) {
+ return linked_ptr<T>(ptr);
+}
+
+#endif // BASE_LINKED_PTR_H_
diff --git a/ipc/chromium/src/base/lock.cc b/ipc/chromium/src/base/lock.cc
new file mode 100644
index 000000000..47d3a4de1
--- /dev/null
+++ b/ipc/chromium/src/base/lock.cc
@@ -0,0 +1,9 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Lock class.
+
+// Depricated file. See lock_impl_*.cc for platform specific versions.
diff --git a/ipc/chromium/src/base/lock.h b/ipc/chromium/src/base/lock.h
new file mode 100644
index 000000000..3f8b96a8f
--- /dev/null
+++ b/ipc/chromium/src/base/lock.h
@@ -0,0 +1,77 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_LOCK_H_
+#define BASE_LOCK_H_
+
+#include "base/lock_impl.h"
+
+// A convenient wrapper for an OS specific critical section.
+
+class Lock {
+ public:
+ Lock() : lock_() {}
+ ~Lock() {}
+ void Acquire() { lock_.Lock(); }
+ void Release() { lock_.Unlock(); }
+ // If the lock is not held, take it and return true. If the lock is already
+ // held by another thread, immediately return false.
+ bool Try() { return lock_.Try(); }
+
+ // In debug builds this method checks that the lock has been acquired by the
+ // calling thread. If the lock has not been acquired, then the method
+ // will DCHECK(). In non-debug builds, the LockImpl's implementation of
+ // AssertAcquired() is an empty inline method.
+ void AssertAcquired() const { return lock_.AssertAcquired(); }
+
+ // Return the underlying lock implementation.
+ // TODO(awalker): refactor lock and condition variables so that this is
+ // unnecessary.
+ LockImpl* lock_impl() { return &lock_; }
+
+ private:
+ LockImpl lock_; // Platform specific underlying lock implementation.
+
+ DISALLOW_COPY_AND_ASSIGN(Lock);
+};
+
+// A helper class that acquires the given Lock while the AutoLock is in scope.
+class AutoLock {
+ public:
+ explicit AutoLock(Lock& lock) : lock_(lock) {
+ lock_.Acquire();
+ }
+
+ ~AutoLock() {
+ lock_.AssertAcquired();
+ lock_.Release();
+ }
+
+ private:
+ Lock& lock_;
+ DISALLOW_COPY_AND_ASSIGN(AutoLock);
+};
+
+// AutoUnlock is a helper that will Release() the |lock| argument in the
+// constructor, and re-Acquire() it in the destructor.
+class AutoUnlock {
+ public:
+ explicit AutoUnlock(Lock& lock) : lock_(lock) {
+ // We require our caller to have the lock.
+ lock_.AssertAcquired();
+ lock_.Release();
+ }
+
+ ~AutoUnlock() {
+ lock_.Acquire();
+ }
+
+ private:
+ Lock& lock_;
+ DISALLOW_COPY_AND_ASSIGN(AutoUnlock);
+};
+
+#endif // BASE_LOCK_H_
diff --git a/ipc/chromium/src/base/lock_impl.h b/ipc/chromium/src/base/lock_impl.h
new file mode 100644
index 000000000..aea01bd88
--- /dev/null
+++ b/ipc/chromium/src/base/lock_impl.h
@@ -0,0 +1,79 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_LOCK_IMPL_H_
+#define BASE_LOCK_IMPL_H_
+
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include <windows.h>
+#elif defined(OS_POSIX)
+#include <pthread.h>
+#endif
+
+#include "base/basictypes.h"
+#include "base/platform_thread.h"
+
+// This class implements the underlying platform-specific spin-lock mechanism
+// used for the Lock class. Most users should not use LockImpl directly, but
+// should instead use Lock.
+class LockImpl {
+ public:
+#if defined(OS_WIN)
+ typedef CRITICAL_SECTION OSLockType;
+#elif defined(OS_POSIX)
+ typedef pthread_mutex_t OSLockType;
+#endif
+
+ LockImpl();
+ ~LockImpl();
+
+ // If the lock is not held, take it and return true. If the lock is already
+ // held by something else, immediately return false.
+ bool Try();
+
+ // Take the lock, blocking until it is available if necessary.
+ void Lock();
+
+ // Release the lock. This must only be called by the lock's holder: after
+ // a successful call to Try, or a call to Lock.
+ void Unlock();
+
+ // Debug-only method that will DCHECK() if the lock is not acquired by the
+ // current thread. In non-debug builds, no check is performed.
+ // Because linux and mac condition variables modify the underlyning lock
+ // through the os_lock() method, runtime assertions can not be done on those
+ // builds.
+#if defined(NDEBUG) || !defined(OS_WIN)
+ void AssertAcquired() const {}
+#else
+ void AssertAcquired() const;
+#endif
+
+ // Return the native underlying lock. Not supported for Windows builds.
+ // TODO(awalker): refactor lock and condition variables so that this is
+ // unnecessary.
+#if !defined(OS_WIN)
+ OSLockType* os_lock() { return &os_lock_; }
+#endif
+
+ private:
+ OSLockType os_lock_;
+
+#if !defined(NDEBUG) && defined(OS_WIN)
+ // All private data is implicitly protected by lock_.
+ // Be VERY careful to only access members under that lock.
+ PlatformThreadId owning_thread_id_;
+ int32_t recursion_count_shadow_;
+ bool recursion_used_; // Allow debugging to continued after a DCHECK().
+#endif // NDEBUG
+
+ DISALLOW_COPY_AND_ASSIGN(LockImpl);
+};
+
+
+#endif // BASE_LOCK_IMPL_H_
diff --git a/ipc/chromium/src/base/lock_impl_posix.cc b/ipc/chromium/src/base/lock_impl_posix.cc
new file mode 100644
index 000000000..50fec95db
--- /dev/null
+++ b/ipc/chromium/src/base/lock_impl_posix.cc
@@ -0,0 +1,50 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/lock_impl.h"
+
+#include <errno.h>
+
+#include "base/logging.h"
+
+LockImpl::LockImpl() {
+#ifndef NDEBUG
+ // In debug, setup attributes for lock error checking.
+ pthread_mutexattr_t mta;
+ int rv = pthread_mutexattr_init(&mta);
+ DCHECK_EQ(rv, 0);
+ rv = pthread_mutexattr_settype(&mta, PTHREAD_MUTEX_ERRORCHECK);
+ DCHECK_EQ(rv, 0);
+ rv = pthread_mutex_init(&os_lock_, &mta);
+ DCHECK_EQ(rv, 0);
+ rv = pthread_mutexattr_destroy(&mta);
+ DCHECK_EQ(rv, 0);
+#else
+ // In release, go with the default lock attributes.
+ pthread_mutex_init(&os_lock_, NULL);
+#endif
+}
+
+LockImpl::~LockImpl() {
+ int rv = pthread_mutex_destroy(&os_lock_);
+ DCHECK_EQ(rv, 0);
+}
+
+bool LockImpl::Try() {
+ int rv = pthread_mutex_trylock(&os_lock_);
+ DCHECK(rv == 0 || rv == EBUSY);
+ return rv == 0;
+}
+
+void LockImpl::Lock() {
+ int rv = pthread_mutex_lock(&os_lock_);
+ DCHECK_EQ(rv, 0);
+}
+
+void LockImpl::Unlock() {
+ int rv = pthread_mutex_unlock(&os_lock_);
+ DCHECK_EQ(rv, 0);
+}
diff --git a/ipc/chromium/src/base/lock_impl_win.cc b/ipc/chromium/src/base/lock_impl_win.cc
new file mode 100644
index 000000000..2a50fb234
--- /dev/null
+++ b/ipc/chromium/src/base/lock_impl_win.cc
@@ -0,0 +1,75 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/lock_impl.h"
+#include "base/logging.h"
+
+// NOTE: Although windows critical sections support recursive locks, we do not
+// allow this, and we will commonly fire a DCHECK() if a thread attempts to
+// acquire the lock a second time (while already holding it).
+
+LockImpl::LockImpl() {
+#ifndef NDEBUG
+ recursion_count_shadow_ = 0;
+ recursion_used_ = false;
+ owning_thread_id_ = 0;
+#endif // NDEBUG
+ // The second parameter is the spin count, for short-held locks it avoid the
+ // contending thread from going to sleep which helps performance greatly.
+ ::InitializeCriticalSectionAndSpinCount(&os_lock_, 2000);
+}
+
+LockImpl::~LockImpl() {
+ ::DeleteCriticalSection(&os_lock_);
+}
+
+bool LockImpl::Try() {
+ if (::TryEnterCriticalSection(&os_lock_) != FALSE) {
+#ifndef NDEBUG
+ // ONLY access data after locking.
+ owning_thread_id_ = PlatformThread::CurrentId();
+ DCHECK_NE(owning_thread_id_, 0);
+ recursion_count_shadow_++;
+ if (2 == recursion_count_shadow_ && !recursion_used_) {
+ recursion_used_ = true;
+ DCHECK(false); // Catch accidental redundant lock acquisition.
+ }
+#endif
+ return true;
+ }
+ return false;
+}
+
+void LockImpl::Lock() {
+ ::EnterCriticalSection(&os_lock_);
+#ifndef NDEBUG
+ // ONLY access data after locking.
+ owning_thread_id_ = PlatformThread::CurrentId();
+ DCHECK_NE(owning_thread_id_, 0);
+ recursion_count_shadow_++;
+ if (2 == recursion_count_shadow_ && !recursion_used_) {
+ recursion_used_ = true;
+ DCHECK(false); // Catch accidental redundant lock acquisition.
+ }
+#endif // NDEBUG
+}
+
+void LockImpl::Unlock() {
+#ifndef NDEBUG
+ --recursion_count_shadow_; // ONLY access while lock is still held.
+ DCHECK(0 <= recursion_count_shadow_);
+ owning_thread_id_ = 0;
+#endif // NDEBUG
+ ::LeaveCriticalSection(&os_lock_);
+}
+
+// In non-debug builds, this method is declared as an empty inline method.
+#ifndef NDEBUG
+void LockImpl::AssertAcquired() const {
+ DCHECK(recursion_count_shadow_ > 0);
+ DCHECK_EQ(owning_thread_id_, PlatformThread::CurrentId());
+}
+#endif
diff --git a/ipc/chromium/src/base/logging.cc b/ipc/chromium/src/base/logging.cc
new file mode 100644
index 000000000..d793990de
--- /dev/null
+++ b/ipc/chromium/src/base/logging.cc
@@ -0,0 +1,99 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/logging.h"
+#include "prmem.h"
+#include "prprf.h"
+#include "base/string_util.h"
+#include "nsXPCOM.h"
+
+namespace mozilla {
+
+Logger::~Logger()
+{
+ LogLevel prlevel = LogLevel::Debug;
+ int xpcomlevel = -1;
+
+ switch (mSeverity) {
+ case LOG_INFO:
+ prlevel = LogLevel::Debug;
+ xpcomlevel = -1;
+ break;
+
+ case LOG_WARNING:
+ prlevel = LogLevel::Warning;
+ xpcomlevel = NS_DEBUG_WARNING;
+ break;
+
+ case LOG_ERROR:
+ prlevel = LogLevel::Error;
+ xpcomlevel = NS_DEBUG_WARNING;
+ break;
+
+ case LOG_ERROR_REPORT:
+ prlevel = LogLevel::Error;
+ xpcomlevel = NS_DEBUG_ASSERTION;
+ break;
+
+ case LOG_FATAL:
+ prlevel = LogLevel::Error;
+ xpcomlevel = NS_DEBUG_ABORT;
+ break;
+ }
+
+ MOZ_LOG(gChromiumPRLog, prlevel, ("%s:%i: %s", mFile, mLine, mMsg ? mMsg : "<no message>"));
+ if (xpcomlevel != -1)
+ NS_DebugBreak(xpcomlevel, mMsg, NULL, mFile, mLine);
+
+ PR_Free(mMsg);
+}
+
+void
+Logger::printf(const char* fmt, ...)
+{
+ va_list args;
+ va_start(args, fmt);
+ mMsg = PR_vsprintf_append(mMsg, fmt, args);
+ va_end(args);
+}
+
+LazyLogModule Logger::gChromiumPRLog("chromium");
+} // namespace mozilla
+
+mozilla::Logger&
+operator<<(mozilla::Logger& log, const char* s)
+{
+ log.printf("%s", s);
+ return log;
+}
+
+mozilla::Logger&
+operator<<(mozilla::Logger& log, const std::string& s)
+{
+ log.printf("%s", s.c_str());
+ return log;
+}
+
+mozilla::Logger&
+operator<<(mozilla::Logger& log, int i)
+{
+ log.printf("%i", i);
+ return log;
+}
+
+mozilla::Logger&
+operator<<(mozilla::Logger& log, const std::wstring& s)
+{
+ log.printf("%s", WideToASCII(s).c_str());
+ return log;
+}
+
+mozilla::Logger&
+operator<<(mozilla::Logger& log, void* p)
+{
+ log.printf("%p", p);
+ return log;
+}
diff --git a/ipc/chromium/src/base/logging.h b/ipc/chromium/src/base/logging.h
new file mode 100644
index 000000000..37523d307
--- /dev/null
+++ b/ipc/chromium/src/base/logging.h
@@ -0,0 +1,130 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_LOGGING_H_
+#define BASE_LOGGING_H_
+
+#include <string>
+#include <cstring>
+
+#include "base/basictypes.h"
+#include "mozilla/Logging.h"
+
+#ifdef NO_CHROMIUM_LOGGING
+#include <sstream>
+#endif
+
+// Replace the Chromium logging code with NSPR-based logging code and
+// some C++ wrappers to emulate std::ostream
+
+namespace mozilla {
+
+enum LogSeverity {
+ LOG_INFO,
+ LOG_WARNING,
+ LOG_ERROR,
+ LOG_ERROR_REPORT,
+ LOG_FATAL,
+ LOG_0 = LOG_ERROR
+};
+
+class Logger
+{
+public:
+ Logger(LogSeverity severity, const char* file, int line)
+ : mSeverity(severity)
+ , mFile(file)
+ , mLine(line)
+ , mMsg(NULL)
+ { }
+
+ ~Logger();
+
+ // not private so that the operator<< overloads can get to it
+ void printf(const char* fmt, ...);
+
+private:
+ static mozilla::LazyLogModule gChromiumPRLog;
+// static PRLogModuleInfo* GetLog();
+
+ LogSeverity mSeverity;
+ const char* mFile;
+ int mLine;
+ char* mMsg;
+
+ DISALLOW_EVIL_CONSTRUCTORS(Logger);
+};
+
+class LogWrapper
+{
+public:
+ LogWrapper(LogSeverity severity, const char* file, int line) :
+ log(severity, file, line) { }
+
+ operator Logger&() const { return log; }
+
+private:
+ mutable Logger log;
+
+ DISALLOW_EVIL_CONSTRUCTORS(LogWrapper);
+};
+
+struct EmptyLog
+{
+};
+
+} // namespace mozilla
+
+mozilla::Logger& operator<<(mozilla::Logger& log, const char* s);
+mozilla::Logger& operator<<(mozilla::Logger& log, const std::string& s);
+mozilla::Logger& operator<<(mozilla::Logger& log, int i);
+mozilla::Logger& operator<<(mozilla::Logger& log, const std::wstring& s);
+mozilla::Logger& operator<<(mozilla::Logger& log, void* p);
+
+template<class T>
+const mozilla::EmptyLog& operator <<(const mozilla::EmptyLog& log, const T&)
+{
+ return log;
+}
+
+#ifdef NO_CHROMIUM_LOGGING
+#define CHROMIUM_LOG(info) std::stringstream()
+#define LOG_IF(info, condition) if (!(condition)) std::stringstream()
+#else
+#define CHROMIUM_LOG(info) mozilla::LogWrapper(mozilla::LOG_ ## info, __FILE__, __LINE__)
+#define LOG_IF(info, condition) \
+ if (!(condition)) mozilla::LogWrapper(mozilla::LOG_ ## info, __FILE__, __LINE__)
+#endif
+
+
+#ifdef DEBUG
+#define DLOG(info) CHROMIUM_LOG(info)
+#define DLOG_IF(info) LOG_IF(info)
+#define DCHECK(condition) CHECK(condition)
+#else
+#define DLOG(info) mozilla::EmptyLog()
+#define DLOG_IF(info, condition) mozilla::EmptyLog()
+#define DCHECK(condition) while (false && (condition)) mozilla::EmptyLog()
+#endif
+
+#undef LOG_ASSERT
+#define LOG_ASSERT(cond) CHECK(0)
+#define DLOG_ASSERT(cond) DCHECK(0)
+
+#define NOTREACHED() CHROMIUM_LOG(ERROR)
+#define NOTIMPLEMENTED() CHROMIUM_LOG(ERROR)
+
+#undef CHECK
+#define CHECK(condition) LOG_IF(WARNING, condition)
+
+#define DCHECK_EQ(v1, v2) DCHECK((v1) == (v2))
+#define DCHECK_NE(v1, v2) DCHECK((v1) != (v2))
+#define DCHECK_LE(v1, v2) DCHECK((v1) <= (v2))
+#define DCHECK_LT(v1, v2) DCHECK((v1) < (v2))
+#define DCHECK_GE(v1, v2) DCHECK((v1) >= (v2))
+#define DCHECK_GT(v1, v2) DCHECK((v1) > (v2))
+
+#endif // BASE_LOGGING_H_
diff --git a/ipc/chromium/src/base/mac_util.h b/ipc/chromium/src/base/mac_util.h
new file mode 100644
index 000000000..6af2bd703
--- /dev/null
+++ b/ipc/chromium/src/base/mac_util.h
@@ -0,0 +1,17 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MAC_UTIL_H_
+#define BASE_MAC_UTIL_H_
+
+namespace mac_util {
+
+// Returns true if the application is running from a bundle
+bool AmIBundled();
+
+} // namespace mac_util
+
+#endif // BASE_MAC_UTIL_H_
diff --git a/ipc/chromium/src/base/mac_util.mm b/ipc/chromium/src/base/mac_util.mm
new file mode 100644
index 000000000..212458367
--- /dev/null
+++ b/ipc/chromium/src/base/mac_util.mm
@@ -0,0 +1,34 @@
+// Copyright (c) 2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/mac_util.h"
+
+#include <Carbon/Carbon.h>
+#import <Cocoa/Cocoa.h>
+
+#include "base/file_path.h"
+#include "base/logging.h"
+#include "base/scoped_cftyperef.h"
+#include "base/sys_string_conversions.h"
+
+namespace mac_util {
+
+// Adapted from http://developer.apple.com/carbon/tipsandtricks.html#AmIBundled
+bool AmIBundled() {
+ ProcessSerialNumber psn = {0, kCurrentProcess};
+
+ FSRef fsref;
+ if (GetProcessBundleLocation(&psn, &fsref) != noErr)
+ return false;
+
+ FSCatalogInfo info;
+ if (FSGetCatalogInfo(&fsref, kFSCatInfoNodeFlags, &info,
+ NULL, NULL, NULL) != noErr) {
+ return false;
+ }
+
+ return info.nodeFlags & kFSNodeIsDirectoryMask;
+}
+
+} // namespace mac_util
diff --git a/ipc/chromium/src/base/message_loop.cc b/ipc/chromium/src/base/message_loop.cc
new file mode 100644
index 000000000..3c794b276
--- /dev/null
+++ b/ipc/chromium/src/base/message_loop.cc
@@ -0,0 +1,579 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/message_loop.h"
+
+#include <algorithm>
+
+#include "mozilla/Atomics.h"
+#include "base/compiler_specific.h"
+#include "base/logging.h"
+#include "base/message_pump_default.h"
+#include "base/string_util.h"
+#include "base/thread_local.h"
+
+#if defined(OS_MACOSX)
+#include "base/message_pump_mac.h"
+#endif
+#if defined(OS_POSIX)
+#include "base/message_pump_libevent.h"
+#endif
+#if defined(OS_LINUX) || defined(OS_BSD)
+#if defined(MOZ_WIDGET_GTK)
+#include "base/message_pump_glib.h"
+#endif
+#endif
+#ifdef ANDROID
+#include "base/message_pump_android.h"
+#endif
+#ifdef MOZ_TASK_TRACER
+#include "GeckoTaskTracer.h"
+#include "TracedTaskCommon.h"
+#endif
+
+#include "MessagePump.h"
+
+using base::Time;
+using base::TimeDelta;
+using base::TimeTicks;
+
+using mozilla::Move;
+using mozilla::Runnable;
+
+static base::ThreadLocalPointer<MessageLoop>& get_tls_ptr() {
+ static base::ThreadLocalPointer<MessageLoop> tls_ptr;
+ return tls_ptr;
+}
+
+//------------------------------------------------------------------------------
+
+// Logical events for Histogram profiling. Run with -message-loop-histogrammer
+// to get an accounting of messages and actions taken on each thread.
+static const int kTaskRunEvent = 0x1;
+static const int kTimerEvent = 0x2;
+
+// Provide range of message IDs for use in histogramming and debug display.
+static const int kLeastNonZeroMessageId = 1;
+static const int kMaxMessageId = 1099;
+static const int kNumberOfDistinctMessagesDisplayed = 1100;
+
+//------------------------------------------------------------------------------
+
+#if defined(OS_WIN)
+
+// Upon a SEH exception in this thread, it restores the original unhandled
+// exception filter.
+static int SEHFilter(LPTOP_LEVEL_EXCEPTION_FILTER old_filter) {
+ ::SetUnhandledExceptionFilter(old_filter);
+ return EXCEPTION_CONTINUE_SEARCH;
+}
+
+// Retrieves a pointer to the current unhandled exception filter. There
+// is no standalone getter method.
+static LPTOP_LEVEL_EXCEPTION_FILTER GetTopSEHFilter() {
+ LPTOP_LEVEL_EXCEPTION_FILTER top_filter = NULL;
+ top_filter = ::SetUnhandledExceptionFilter(0);
+ ::SetUnhandledExceptionFilter(top_filter);
+ return top_filter;
+}
+
+#endif // defined(OS_WIN)
+
+//------------------------------------------------------------------------------
+
+// static
+MessageLoop* MessageLoop::current() {
+ return get_tls_ptr().Get();
+}
+
+static mozilla::Atomic<int32_t> message_loop_id_seq(0);
+
+MessageLoop::MessageLoop(Type type, nsIThread* aThread)
+ : type_(type),
+ id_(++message_loop_id_seq),
+ nestable_tasks_allowed_(true),
+ exception_restoration_(false),
+ state_(NULL),
+ run_depth_base_(1),
+#ifdef OS_WIN
+ os_modal_loop_(false),
+#endif // OS_WIN
+ transient_hang_timeout_(0),
+ permanent_hang_timeout_(0),
+ next_sequence_num_(0) {
+ DCHECK(!current()) << "should only have one message loop per thread";
+ get_tls_ptr().Set(this);
+
+ switch (type_) {
+ case TYPE_MOZILLA_PARENT:
+ MOZ_RELEASE_ASSERT(!aThread);
+ pump_ = new mozilla::ipc::MessagePump(aThread);
+ return;
+ case TYPE_MOZILLA_CHILD:
+ MOZ_RELEASE_ASSERT(!aThread);
+ pump_ = new mozilla::ipc::MessagePumpForChildProcess();
+ // There is a MessageLoop Run call from XRE_InitChildProcess
+ // and another one from MessagePumpForChildProcess. The one
+ // from MessagePumpForChildProcess becomes the base, so we need
+ // to set run_depth_base_ to 2 or we'll never be able to process
+ // Idle tasks.
+ run_depth_base_ = 2;
+ return;
+ case TYPE_MOZILLA_NONMAINTHREAD:
+ pump_ = new mozilla::ipc::MessagePumpForNonMainThreads(aThread);
+ return;
+#if defined(OS_WIN)
+ case TYPE_MOZILLA_NONMAINUITHREAD:
+ pump_ = new mozilla::ipc::MessagePumpForNonMainUIThreads(aThread);
+ return;
+#endif
+ default:
+ // Create one of Chromium's standard MessageLoop types below.
+ break;
+ }
+
+#if defined(OS_WIN)
+ // TODO(rvargas): Get rid of the OS guards.
+ if (type_ == TYPE_DEFAULT) {
+ pump_ = new base::MessagePumpDefault();
+ } else if (type_ == TYPE_IO) {
+ pump_ = new base::MessagePumpForIO();
+ } else {
+ DCHECK(type_ == TYPE_UI);
+ pump_ = new base::MessagePumpForUI();
+ }
+#elif defined(OS_POSIX)
+ if (type_ == TYPE_UI) {
+#if defined(OS_MACOSX)
+ pump_ = base::MessagePumpMac::Create();
+#elif defined(OS_LINUX) || defined(OS_BSD)
+ pump_ = new base::MessagePumpForUI();
+#endif // OS_LINUX
+ } else if (type_ == TYPE_IO) {
+ pump_ = new base::MessagePumpLibevent();
+ } else {
+ pump_ = new base::MessagePumpDefault();
+ }
+#endif // OS_POSIX
+}
+
+MessageLoop::~MessageLoop() {
+ DCHECK(this == current());
+
+ // Let interested parties have one last shot at accessing this.
+ FOR_EACH_OBSERVER(DestructionObserver, destruction_observers_,
+ WillDestroyCurrentMessageLoop());
+
+ DCHECK(!state_);
+
+ // Clean up any unprocessed tasks, but take care: deleting a task could
+ // result in the addition of more tasks (e.g., via DeleteSoon). We set a
+ // limit on the number of times we will allow a deleted task to generate more
+ // tasks. Normally, we should only pass through this loop once or twice. If
+ // we end up hitting the loop limit, then it is probably due to one task that
+ // is being stubborn. Inspect the queues to see who is left.
+ bool did_work;
+ for (int i = 0; i < 100; ++i) {
+ DeletePendingTasks();
+ ReloadWorkQueue();
+ // If we end up with empty queues, then break out of the loop.
+ did_work = DeletePendingTasks();
+ if (!did_work)
+ break;
+ }
+ DCHECK(!did_work);
+
+ // OK, now make it so that no one can find us.
+ get_tls_ptr().Set(NULL);
+}
+
+void MessageLoop::AddDestructionObserver(DestructionObserver *obs) {
+ DCHECK(this == current());
+ destruction_observers_.AddObserver(obs);
+}
+
+void MessageLoop::RemoveDestructionObserver(DestructionObserver *obs) {
+ DCHECK(this == current());
+ destruction_observers_.RemoveObserver(obs);
+}
+
+void MessageLoop::Run() {
+ AutoRunState save_state(this);
+ RunHandler();
+}
+
+// Runs the loop in two different SEH modes:
+// enable_SEH_restoration_ = false : any unhandled exception goes to the last
+// one that calls SetUnhandledExceptionFilter().
+// enable_SEH_restoration_ = true : any unhandled exception goes to the filter
+// that was existed before the loop was run.
+void MessageLoop::RunHandler() {
+#if defined(OS_WIN)
+ if (exception_restoration_) {
+ LPTOP_LEVEL_EXCEPTION_FILTER current_filter = GetTopSEHFilter();
+ MOZ_SEH_TRY {
+ RunInternal();
+ } MOZ_SEH_EXCEPT(SEHFilter(current_filter)) {
+ }
+ return;
+ }
+#endif
+
+ RunInternal();
+}
+
+//------------------------------------------------------------------------------
+
+void MessageLoop::RunInternal() {
+ DCHECK(this == current());
+ pump_->Run(this);
+}
+
+//------------------------------------------------------------------------------
+// Wrapper functions for use in above message loop framework.
+
+bool MessageLoop::ProcessNextDelayedNonNestableTask() {
+ if (state_->run_depth > run_depth_base_)
+ return false;
+
+ if (deferred_non_nestable_work_queue_.empty())
+ return false;
+
+ RefPtr<Runnable> task = deferred_non_nestable_work_queue_.front().task.forget();
+ deferred_non_nestable_work_queue_.pop();
+
+ RunTask(task.forget());
+ return true;
+}
+
+//------------------------------------------------------------------------------
+
+void MessageLoop::Quit() {
+ DCHECK(current() == this);
+ if (state_) {
+ state_->quit_received = true;
+ } else {
+ NOTREACHED() << "Must be inside Run to call Quit";
+ }
+}
+
+void MessageLoop::PostTask(already_AddRefed<Runnable> task) {
+ PostTask_Helper(Move(task), 0);
+}
+
+void MessageLoop::PostDelayedTask(already_AddRefed<Runnable> task, int delay_ms) {
+ PostTask_Helper(Move(task), delay_ms);
+}
+
+void MessageLoop::PostIdleTask(already_AddRefed<Runnable> task) {
+ DCHECK(current() == this);
+ MOZ_ASSERT(NS_IsMainThread());
+
+ PendingTask pending_task(Move(task), false);
+ deferred_non_nestable_work_queue_.push(Move(pending_task));
+}
+
+// Possibly called on a background thread!
+void MessageLoop::PostTask_Helper(already_AddRefed<Runnable> task, int delay_ms) {
+ if (nsIEventTarget* target = pump_->GetXPCOMThread()) {
+ nsresult rv;
+ if (delay_ms) {
+ rv = target->DelayedDispatch(Move(task), delay_ms);
+ } else {
+ rv = target->Dispatch(Move(task), 0);
+ }
+ MOZ_ALWAYS_SUCCEEDS(rv);
+ return;
+ }
+
+ PendingTask pending_task(Move(task), true);
+
+ if (delay_ms > 0) {
+ pending_task.delayed_run_time =
+ TimeTicks::Now() + TimeDelta::FromMilliseconds(delay_ms);
+ } else {
+ DCHECK(delay_ms == 0) << "delay should not be negative";
+ }
+
+ // Warning: Don't try to short-circuit, and handle this thread's tasks more
+ // directly, as it could starve handling of foreign threads. Put every task
+ // into this queue.
+
+ RefPtr<base::MessagePump> pump;
+ {
+ AutoLock locked(incoming_queue_lock_);
+ incoming_queue_.push(Move(pending_task));
+ pump = pump_;
+ }
+ // Since the incoming_queue_ may contain a task that destroys this message
+ // loop, we cannot exit incoming_queue_lock_ until we are done with |this|.
+ // We use a stack-based reference to the message pump so that we can call
+ // ScheduleWork outside of incoming_queue_lock_.
+
+ pump->ScheduleWork();
+}
+
+void MessageLoop::SetNestableTasksAllowed(bool allowed) {
+ if (nestable_tasks_allowed_ != allowed) {
+ nestable_tasks_allowed_ = allowed;
+ if (!nestable_tasks_allowed_)
+ return;
+ // Start the native pump if we are not already pumping.
+ pump_->ScheduleWorkForNestedLoop();
+ }
+}
+
+void MessageLoop::ScheduleWork() {
+ // Start the native pump if we are not already pumping.
+ pump_->ScheduleWork();
+}
+
+bool MessageLoop::NestableTasksAllowed() const {
+ return nestable_tasks_allowed_;
+}
+
+//------------------------------------------------------------------------------
+
+void MessageLoop::RunTask(already_AddRefed<Runnable> aTask) {
+ DCHECK(nestable_tasks_allowed_);
+ // Execute the task and assume the worst: It is probably not reentrant.
+ nestable_tasks_allowed_ = false;
+
+ RefPtr<Runnable> task = aTask;
+ task->Run();
+ task = nullptr;
+
+ nestable_tasks_allowed_ = true;
+}
+
+bool MessageLoop::DeferOrRunPendingTask(PendingTask&& pending_task) {
+ if (pending_task.nestable || state_->run_depth <= run_depth_base_) {
+ RunTask(pending_task.task.forget());
+ // Show that we ran a task (Note: a new one might arrive as a
+ // consequence!).
+ return true;
+ }
+
+ // We couldn't run the task now because we're in a nested message loop
+ // and the task isn't nestable.
+ deferred_non_nestable_work_queue_.push(Move(pending_task));
+ return false;
+}
+
+void MessageLoop::AddToDelayedWorkQueue(const PendingTask& pending_task) {
+ // Move to the delayed work queue. Initialize the sequence number
+ // before inserting into the delayed_work_queue_. The sequence number
+ // is used to faciliate FIFO sorting when two tasks have the same
+ // delayed_run_time value.
+ PendingTask new_pending_task(pending_task);
+ new_pending_task.sequence_num = next_sequence_num_++;
+ delayed_work_queue_.push(Move(new_pending_task));
+}
+
+void MessageLoop::ReloadWorkQueue() {
+ // We can improve performance of our loading tasks from incoming_queue_ to
+ // work_queue_ by waiting until the last minute (work_queue_ is empty) to
+ // load. That reduces the number of locks-per-task significantly when our
+ // queues get large.
+ if (!work_queue_.empty())
+ return; // Wait till we *really* need to lock and load.
+
+ // Acquire all we can from the inter-thread queue with one lock acquisition.
+ {
+ AutoLock lock(incoming_queue_lock_);
+ if (incoming_queue_.empty())
+ return;
+ std::swap(incoming_queue_, work_queue_);
+ DCHECK(incoming_queue_.empty());
+ }
+}
+
+bool MessageLoop::DeletePendingTasks() {
+ MOZ_ASSERT(work_queue_.empty());
+ bool did_work = !deferred_non_nestable_work_queue_.empty();
+ while (!deferred_non_nestable_work_queue_.empty()) {
+ deferred_non_nestable_work_queue_.pop();
+ }
+ did_work |= !delayed_work_queue_.empty();
+ while (!delayed_work_queue_.empty()) {
+ delayed_work_queue_.pop();
+ }
+ return did_work;
+}
+
+bool MessageLoop::DoWork() {
+ if (!nestable_tasks_allowed_) {
+ // Task can't be executed right now.
+ return false;
+ }
+
+ for (;;) {
+ ReloadWorkQueue();
+ if (work_queue_.empty())
+ break;
+
+ // Execute oldest task.
+ do {
+ PendingTask pending_task = Move(work_queue_.front());
+ work_queue_.pop();
+ if (!pending_task.delayed_run_time.is_null()) {
+ // NB: Don't move, because we use this later!
+ AddToDelayedWorkQueue(pending_task);
+ // If we changed the topmost task, then it is time to re-schedule.
+ if (delayed_work_queue_.top().task == pending_task.task)
+ pump_->ScheduleDelayedWork(pending_task.delayed_run_time);
+ } else {
+ if (DeferOrRunPendingTask(Move(pending_task)))
+ return true;
+ }
+ } while (!work_queue_.empty());
+ }
+
+ // Nothing happened.
+ return false;
+}
+
+bool MessageLoop::DoDelayedWork(TimeTicks* next_delayed_work_time) {
+ if (!nestable_tasks_allowed_ || delayed_work_queue_.empty()) {
+ *next_delayed_work_time = TimeTicks();
+ return false;
+ }
+
+ if (delayed_work_queue_.top().delayed_run_time > TimeTicks::Now()) {
+ *next_delayed_work_time = delayed_work_queue_.top().delayed_run_time;
+ return false;
+ }
+
+ PendingTask pending_task = delayed_work_queue_.top();
+ delayed_work_queue_.pop();
+
+ if (!delayed_work_queue_.empty())
+ *next_delayed_work_time = delayed_work_queue_.top().delayed_run_time;
+
+ return DeferOrRunPendingTask(Move(pending_task));
+}
+
+bool MessageLoop::DoIdleWork() {
+ if (ProcessNextDelayedNonNestableTask())
+ return true;
+
+ if (state_->quit_received)
+ pump_->Quit();
+
+ return false;
+}
+
+//------------------------------------------------------------------------------
+// MessageLoop::AutoRunState
+
+MessageLoop::AutoRunState::AutoRunState(MessageLoop* loop) : loop_(loop) {
+ // Make the loop reference us.
+ previous_state_ = loop_->state_;
+ if (previous_state_) {
+ run_depth = previous_state_->run_depth + 1;
+ } else {
+ run_depth = 1;
+ }
+ loop_->state_ = this;
+
+ // Initialize the other fields:
+ quit_received = false;
+#if defined(OS_WIN)
+ dispatcher = NULL;
+#endif
+}
+
+MessageLoop::AutoRunState::~AutoRunState() {
+ loop_->state_ = previous_state_;
+}
+
+//------------------------------------------------------------------------------
+// MessageLoop::PendingTask
+
+bool MessageLoop::PendingTask::operator<(const PendingTask& other) const {
+ // Since the top of a priority queue is defined as the "greatest" element, we
+ // need to invert the comparison here. We want the smaller time to be at the
+ // top of the heap.
+
+ if (delayed_run_time < other.delayed_run_time)
+ return false;
+
+ if (delayed_run_time > other.delayed_run_time)
+ return true;
+
+ // If the times happen to match, then we use the sequence number to decide.
+ // Compare the difference to support integer roll-over.
+ return (sequence_num - other.sequence_num) > 0;
+}
+
+//------------------------------------------------------------------------------
+// MessageLoopForUI
+
+#if defined(OS_WIN)
+
+void MessageLoopForUI::Run(Dispatcher* dispatcher) {
+ AutoRunState save_state(this);
+ state_->dispatcher = dispatcher;
+ RunHandler();
+}
+
+void MessageLoopForUI::AddObserver(Observer* observer) {
+ pump_win()->AddObserver(observer);
+}
+
+void MessageLoopForUI::RemoveObserver(Observer* observer) {
+ pump_win()->RemoveObserver(observer);
+}
+
+void MessageLoopForUI::WillProcessMessage(const MSG& message) {
+ pump_win()->WillProcessMessage(message);
+}
+void MessageLoopForUI::DidProcessMessage(const MSG& message) {
+ pump_win()->DidProcessMessage(message);
+}
+void MessageLoopForUI::PumpOutPendingPaintMessages() {
+ pump_ui()->PumpOutPendingPaintMessages();
+}
+
+#endif // defined(OS_WIN)
+
+//------------------------------------------------------------------------------
+// MessageLoopForIO
+
+#if defined(OS_WIN)
+
+void MessageLoopForIO::RegisterIOHandler(HANDLE file, IOHandler* handler) {
+ pump_io()->RegisterIOHandler(file, handler);
+}
+
+bool MessageLoopForIO::WaitForIOCompletion(DWORD timeout, IOHandler* filter) {
+ return pump_io()->WaitForIOCompletion(timeout, filter);
+}
+
+#elif defined(OS_POSIX)
+
+bool MessageLoopForIO::WatchFileDescriptor(int fd,
+ bool persistent,
+ Mode mode,
+ FileDescriptorWatcher *controller,
+ Watcher *delegate) {
+ return pump_libevent()->WatchFileDescriptor(
+ fd,
+ persistent,
+ static_cast<base::MessagePumpLibevent::Mode>(mode),
+ controller,
+ delegate);
+}
+
+bool
+MessageLoopForIO::CatchSignal(int sig,
+ SignalEvent* sigevent,
+ SignalWatcher* delegate)
+{
+ return pump_libevent()->CatchSignal(sig, sigevent, delegate);
+}
+
+#endif
diff --git a/ipc/chromium/src/base/message_loop.h b/ipc/chromium/src/base/message_loop.h
new file mode 100644
index 000000000..c07d7e079
--- /dev/null
+++ b/ipc/chromium/src/base/message_loop.h
@@ -0,0 +1,558 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MESSAGE_LOOP_H_
+#define BASE_MESSAGE_LOOP_H_
+
+#include <deque>
+#include <queue>
+#include <string>
+#include <vector>
+#include <map>
+
+#include "base/lock.h"
+#include "base/message_pump.h"
+#include "base/observer_list.h"
+
+#if defined(OS_WIN)
+// We need this to declare base::MessagePumpWin::Dispatcher, which we should
+// really just eliminate.
+#include "base/message_pump_win.h"
+#elif defined(OS_POSIX)
+#include "base/message_pump_libevent.h"
+#endif
+
+#include "nsAutoPtr.h"
+#include "nsThreadUtils.h"
+
+class nsIThread;
+
+namespace mozilla {
+namespace ipc {
+
+class DoWorkRunnable;
+
+} /* namespace ipc */
+} /* namespace mozilla */
+
+// A MessageLoop is used to process events for a particular thread. There is
+// at most one MessageLoop instance per thread.
+//
+// Events include at a minimum Task instances submitted to PostTask or those
+// managed by TimerManager. Depending on the type of message pump used by the
+// MessageLoop other events such as UI messages may be processed. On Windows
+// APC calls (as time permits) and signals sent to a registered set of HANDLEs
+// may also be processed.
+//
+// NOTE: Unless otherwise specified, a MessageLoop's methods may only be called
+// on the thread where the MessageLoop's Run method executes.
+//
+// NOTE: MessageLoop has task reentrancy protection. This means that if a
+// task is being processed, a second task cannot start until the first task is
+// finished. Reentrancy can happen when processing a task, and an inner
+// message pump is created. That inner pump then processes native messages
+// which could implicitly start an inner task. Inner message pumps are created
+// with dialogs (DialogBox), common dialogs (GetOpenFileName), OLE functions
+// (DoDragDrop), printer functions (StartDoc) and *many* others.
+//
+// Sample workaround when inner task processing is needed:
+// bool old_state = MessageLoop::current()->NestableTasksAllowed();
+// MessageLoop::current()->SetNestableTasksAllowed(true);
+// HRESULT hr = DoDragDrop(...); // Implicitly runs a modal message loop here.
+// MessageLoop::current()->SetNestableTasksAllowed(old_state);
+// // Process hr (the result returned by DoDragDrop().
+//
+// Please be SURE your task is reentrant (nestable) and all global variables
+// are stable and accessible before calling SetNestableTasksAllowed(true).
+//
+class MessageLoop : public base::MessagePump::Delegate {
+
+ friend class mozilla::ipc::DoWorkRunnable;
+
+public:
+ // A DestructionObserver is notified when the current MessageLoop is being
+ // destroyed. These obsevers are notified prior to MessageLoop::current()
+ // being changed to return NULL. This gives interested parties the chance to
+ // do final cleanup that depends on the MessageLoop.
+ //
+ // NOTE: Any tasks posted to the MessageLoop during this notification will
+ // not be run. Instead, they will be deleted.
+ //
+ class DestructionObserver {
+ public:
+ virtual ~DestructionObserver() {}
+ virtual void WillDestroyCurrentMessageLoop() = 0;
+ };
+
+ // Add a DestructionObserver, which will start receiving notifications
+ // immediately.
+ void AddDestructionObserver(DestructionObserver* destruction_observer);
+
+ // Remove a DestructionObserver. It is safe to call this method while a
+ // DestructionObserver is receiving a notification callback.
+ void RemoveDestructionObserver(DestructionObserver* destruction_observer);
+
+ // The "PostTask" family of methods call the task's Run method asynchronously
+ // from within a message loop at some point in the future.
+ //
+ // With the PostTask variant, tasks are invoked in FIFO order, inter-mixed
+ // with normal UI or IO event processing. With the PostDelayedTask variant,
+ // tasks are called after at least approximately 'delay_ms' have elapsed.
+ //
+ // The NonNestable variants work similarly except that they promise never to
+ // dispatch the task from a nested invocation of MessageLoop::Run. Instead,
+ // such tasks get deferred until the top-most MessageLoop::Run is executing.
+ //
+ // The MessageLoop takes ownership of the Task, and deletes it after it has
+ // been Run().
+ //
+ // NOTE: These methods may be called on any thread. The Task will be invoked
+ // on the thread that executes MessageLoop::Run().
+
+ void PostTask(already_AddRefed<mozilla::Runnable> task);
+
+ void PostDelayedTask(already_AddRefed<mozilla::Runnable> task, int delay_ms);
+
+ // PostIdleTask is not thread safe and should be called on this thread
+ void PostIdleTask(already_AddRefed<mozilla::Runnable> task);
+
+ // Run the message loop.
+ void Run();
+
+ // Signals the Run method to return after it is done processing all pending
+ // messages. This method may only be called on the same thread that called
+ // Run, and Run must still be on the call stack.
+ //
+ // Use QuitTask if you need to Quit another thread's MessageLoop, but note
+ // that doing so is fairly dangerous if the target thread makes nested calls
+ // to MessageLoop::Run. The problem being that you won't know which nested
+ // run loop you are quiting, so be careful!
+ //
+ void Quit();
+
+ // Invokes Quit on the current MessageLoop when run. Useful to schedule an
+ // arbitrary MessageLoop to Quit.
+ class QuitTask : public mozilla::Runnable {
+ public:
+ NS_IMETHOD Run() override {
+ MessageLoop::current()->Quit();
+ return NS_OK;
+ }
+ };
+
+ // A MessageLoop has a particular type, which indicates the set of
+ // asynchronous events it may process in addition to tasks and timers.
+ //
+ // TYPE_DEFAULT
+ // This type of ML only supports tasks and timers.
+ //
+ // TYPE_UI
+ // This type of ML also supports native UI events (e.g., Windows messages).
+ // See also MessageLoopForUI.
+ //
+ // TYPE_IO
+ // This type of ML also supports asynchronous IO. See also
+ // MessageLoopForIO.
+ //
+ // TYPE_MOZILLA_CHILD
+ // This type of ML is used in Mozilla child processes which initialize
+ // XPCOM and use the gecko event loop.
+ //
+ // TYPE_MOZILLA_PARENT
+ // This type of ML is used in Mozilla parent processes which initialize
+ // XPCOM and use the gecko event loop.
+ //
+ // TYPE_MOZILLA_NONMAINTHREAD
+ // This type of ML is used in Mozilla parent processes which initialize
+ // XPCOM and use the nsThread event loop.
+ //
+ // TYPE_MOZILLA_NONMAINUITHREAD
+ // This type of ML is used in Mozilla processes which initialize XPCOM
+ // and use TYPE_UI loop logic.
+ //
+ enum Type {
+ TYPE_DEFAULT,
+ TYPE_UI,
+ TYPE_IO,
+ TYPE_MOZILLA_CHILD,
+ TYPE_MOZILLA_PARENT,
+ TYPE_MOZILLA_NONMAINTHREAD,
+ TYPE_MOZILLA_NONMAINUITHREAD
+ };
+
+ // Normally, it is not necessary to instantiate a MessageLoop. Instead, it
+ // is typical to make use of the current thread's MessageLoop instance.
+ explicit MessageLoop(Type type = TYPE_DEFAULT, nsIThread* aThread = nullptr);
+ ~MessageLoop();
+
+ // Returns the type passed to the constructor.
+ Type type() const { return type_; }
+
+ // Unique, non-repeating ID for this message loop.
+ int32_t id() const { return id_; }
+
+ // Optional call to connect the thread name with this loop.
+ void set_thread_name(const std::string& aThreadName) {
+ DCHECK(thread_name_.empty()) << "Should not rename this thread!";
+ thread_name_ = aThreadName;
+ }
+ const std::string& thread_name() const { return thread_name_; }
+
+ // Returns the MessageLoop object for the current thread, or null if none.
+ static MessageLoop* current();
+
+ // Enables or disables the recursive task processing. This happens in the case
+ // of recursive message loops. Some unwanted message loop may occurs when
+ // using common controls or printer functions. By default, recursive task
+ // processing is disabled.
+ //
+ // The specific case where tasks get queued is:
+ // - The thread is running a message loop.
+ // - It receives a task #1 and execute it.
+ // - The task #1 implicitly start a message loop, like a MessageBox in the
+ // unit test. This can also be StartDoc or GetSaveFileName.
+ // - The thread receives a task #2 before or while in this second message
+ // loop.
+ // - With NestableTasksAllowed set to true, the task #2 will run right away.
+ // Otherwise, it will get executed right after task #1 completes at "thread
+ // message loop level".
+ void SetNestableTasksAllowed(bool allowed);
+ void ScheduleWork();
+ bool NestableTasksAllowed() const;
+
+ // Enables or disables the restoration during an exception of the unhandled
+ // exception filter that was active when Run() was called. This can happen
+ // if some third party code call SetUnhandledExceptionFilter() and never
+ // restores the previous filter.
+ void set_exception_restoration(bool restore) {
+ exception_restoration_ = restore;
+ }
+
+#if defined(OS_WIN)
+ void set_os_modal_loop(bool os_modal_loop) {
+ os_modal_loop_ = os_modal_loop;
+ }
+
+ bool & os_modal_loop() {
+ return os_modal_loop_;
+ }
+#endif // OS_WIN
+
+ // Set the timeouts for background hang monitoring.
+ // A value of 0 indicates there is no timeout.
+ void set_hang_timeouts(uint32_t transient_timeout_ms,
+ uint32_t permanent_timeout_ms) {
+ transient_hang_timeout_ = transient_timeout_ms;
+ permanent_hang_timeout_ = permanent_timeout_ms;
+ }
+ uint32_t transient_hang_timeout() const {
+ return transient_hang_timeout_;
+ }
+ uint32_t permanent_hang_timeout() const {
+ return permanent_hang_timeout_;
+ }
+
+ //----------------------------------------------------------------------------
+ protected:
+ struct RunState {
+ // Used to count how many Run() invocations are on the stack.
+ int run_depth;
+
+ // Used to record that Quit() was called, or that we should quit the pump
+ // once it becomes idle.
+ bool quit_received;
+
+#if defined(OS_WIN)
+ base::MessagePumpWin::Dispatcher* dispatcher;
+#endif
+ };
+
+ class AutoRunState : RunState {
+ public:
+ explicit AutoRunState(MessageLoop* loop);
+ ~AutoRunState();
+ private:
+ MessageLoop* loop_;
+ RunState* previous_state_;
+ };
+
+ // This structure is copied around by value.
+ struct PendingTask {
+ RefPtr<mozilla::Runnable> task; // The task to run.
+ base::TimeTicks delayed_run_time; // The time when the task should be run.
+ int sequence_num; // Secondary sort key for run time.
+ bool nestable; // OK to dispatch from a nested loop.
+
+ PendingTask(already_AddRefed<mozilla::Runnable> aTask, bool aNestable)
+ : task(aTask), sequence_num(0), nestable(aNestable) {
+ }
+
+ PendingTask(PendingTask&& aOther)
+ : task(aOther.task.forget()),
+ delayed_run_time(aOther.delayed_run_time),
+ sequence_num(aOther.sequence_num),
+ nestable(aOther.nestable) {
+ }
+
+ // std::priority_queue<T>::top is dumb, so we have to have this.
+ PendingTask(const PendingTask& aOther)
+ : task(aOther.task),
+ delayed_run_time(aOther.delayed_run_time),
+ sequence_num(aOther.sequence_num),
+ nestable(aOther.nestable) {
+ }
+ PendingTask& operator=(const PendingTask& aOther)
+ {
+ task = aOther.task;
+ delayed_run_time = aOther.delayed_run_time;
+ sequence_num = aOther.sequence_num;
+ nestable = aOther.nestable;
+ return *this;
+ }
+
+ // Used to support sorting.
+ bool operator<(const PendingTask& other) const;
+ };
+
+ typedef std::queue<PendingTask> TaskQueue;
+ typedef std::priority_queue<PendingTask> DelayedTaskQueue;
+
+#if defined(OS_WIN)
+ base::MessagePumpWin* pump_win() {
+ return static_cast<base::MessagePumpWin*>(pump_.get());
+ }
+#elif defined(OS_POSIX)
+ base::MessagePumpLibevent* pump_libevent() {
+ return static_cast<base::MessagePumpLibevent*>(pump_.get());
+ }
+#endif
+
+ // A function to encapsulate all the exception handling capability in the
+ // stacks around the running of a main message loop. It will run the message
+ // loop in a SEH try block or not depending on the set_SEH_restoration()
+ // flag.
+ void RunHandler();
+
+ // A surrounding stack frame around the running of the message loop that
+ // supports all saving and restoring of state, as is needed for any/all (ugly)
+ // recursive calls.
+ void RunInternal();
+
+ // Called to process any delayed non-nestable tasks.
+ bool ProcessNextDelayedNonNestableTask();
+
+ //----------------------------------------------------------------------------
+ // Run a work_queue_ task or new_task, and delete it (if it was processed by
+ // PostTask). If there are queued tasks, the oldest one is executed and
+ // new_task is queued. new_task is optional and can be NULL. In this NULL
+ // case, the method will run one pending task (if any exist). Returns true if
+ // it executes a task. Queued tasks accumulate only when there is a
+ // non-nestable task currently processing, in which case the new_task is
+ // appended to the list work_queue_. Such re-entrancy generally happens when
+ // an unrequested message pump (typical of a native dialog) is executing in
+ // the context of a task.
+ bool QueueOrRunTask(already_AddRefed<mozilla::Runnable> new_task);
+
+ // Runs the specified task and deletes it.
+ void RunTask(already_AddRefed<mozilla::Runnable> task);
+
+ // Calls RunTask or queues the pending_task on the deferred task list if it
+ // cannot be run right now. Returns true if the task was run.
+ bool DeferOrRunPendingTask(PendingTask&& pending_task);
+
+ // Adds the pending task to delayed_work_queue_.
+ void AddToDelayedWorkQueue(const PendingTask& pending_task);
+
+ // Load tasks from the incoming_queue_ into work_queue_ if the latter is
+ // empty. The former requires a lock to access, while the latter is directly
+ // accessible on this thread.
+ void ReloadWorkQueue();
+
+ // Delete tasks that haven't run yet without running them. Used in the
+ // destructor to make sure all the task's destructors get called. Returns
+ // true if some work was done.
+ bool DeletePendingTasks();
+
+ // Post a task to our incomming queue.
+ void PostTask_Helper(already_AddRefed<mozilla::Runnable> task, int delay_ms);
+
+ // base::MessagePump::Delegate methods:
+ virtual bool DoWork() override;
+ virtual bool DoDelayedWork(base::TimeTicks* next_delayed_work_time) override;
+ virtual bool DoIdleWork() override;
+
+ Type type_;
+ int32_t id_;
+
+ // A list of tasks that need to be processed by this instance. Note that
+ // this queue is only accessed (push/pop) by our current thread.
+ TaskQueue work_queue_;
+
+ // Contains delayed tasks, sorted by their 'delayed_run_time' property.
+ DelayedTaskQueue delayed_work_queue_;
+
+ // A queue of non-nestable tasks that we had to defer because when it came
+ // time to execute them we were in a nested message loop. They will execute
+ // once we're out of nested message loops.
+ TaskQueue deferred_non_nestable_work_queue_;
+
+ RefPtr<base::MessagePump> pump_;
+
+ base::ObserverList<DestructionObserver> destruction_observers_;
+
+ // A recursion block that prevents accidentally running additonal tasks when
+ // insider a (accidentally induced?) nested message pump.
+ bool nestable_tasks_allowed_;
+
+ bool exception_restoration_;
+
+ std::string thread_name_;
+
+ // A null terminated list which creates an incoming_queue of tasks that are
+ // aquired under a mutex for processing on this instance's thread. These tasks
+ // have not yet been sorted out into items for our work_queue_ vs items that
+ // will be handled by the TimerManager.
+ TaskQueue incoming_queue_;
+ // Protect access to incoming_queue_.
+ Lock incoming_queue_lock_;
+
+ RunState* state_;
+ int run_depth_base_;
+
+#if defined(OS_WIN)
+ // Should be set to true before calling Windows APIs like TrackPopupMenu, etc
+ // which enter a modal message loop.
+ bool os_modal_loop_;
+#endif
+
+ // Timeout values for hang monitoring
+ uint32_t transient_hang_timeout_;
+ uint32_t permanent_hang_timeout_;
+
+ // The next sequence number to use for delayed tasks.
+ int next_sequence_num_;
+
+ DISALLOW_COPY_AND_ASSIGN(MessageLoop);
+};
+
+//-----------------------------------------------------------------------------
+// MessageLoopForUI extends MessageLoop with methods that are particular to a
+// MessageLoop instantiated with TYPE_UI.
+//
+// This class is typically used like so:
+// MessageLoopForUI::current()->...call some method...
+//
+class MessageLoopForUI : public MessageLoop {
+ public:
+ explicit MessageLoopForUI(Type aType=TYPE_UI) : MessageLoop(aType) {
+ }
+
+ // Returns the MessageLoopForUI of the current thread.
+ static MessageLoopForUI* current() {
+ MessageLoop* loop = MessageLoop::current();
+ if (!loop)
+ return NULL;
+ Type type = loop->type();
+ DCHECK(type == MessageLoop::TYPE_UI ||
+ type == MessageLoop::TYPE_MOZILLA_PARENT ||
+ type == MessageLoop::TYPE_MOZILLA_CHILD);
+ return static_cast<MessageLoopForUI*>(loop);
+ }
+
+#if defined(OS_WIN)
+ typedef base::MessagePumpWin::Dispatcher Dispatcher;
+ typedef base::MessagePumpWin::Observer Observer;
+
+ // Please see MessagePumpWin for definitions of these methods.
+ void Run(Dispatcher* dispatcher);
+ void AddObserver(Observer* observer);
+ void RemoveObserver(Observer* observer);
+ void WillProcessMessage(const MSG& message);
+ void DidProcessMessage(const MSG& message);
+ void PumpOutPendingPaintMessages();
+
+ protected:
+ // TODO(rvargas): Make this platform independent.
+ base::MessagePumpForUI* pump_ui() {
+ return static_cast<base::MessagePumpForUI*>(pump_.get());
+ }
+#endif // defined(OS_WIN)
+};
+
+// Do not add any member variables to MessageLoopForUI! This is important b/c
+// MessageLoopForUI is often allocated via MessageLoop(TYPE_UI). Any extra
+// data that you need should be stored on the MessageLoop's pump_ instance.
+COMPILE_ASSERT(sizeof(MessageLoop) == sizeof(MessageLoopForUI),
+ MessageLoopForUI_should_not_have_extra_member_variables);
+
+//-----------------------------------------------------------------------------
+// MessageLoopForIO extends MessageLoop with methods that are particular to a
+// MessageLoop instantiated with TYPE_IO.
+//
+// This class is typically used like so:
+// MessageLoopForIO::current()->...call some method...
+//
+class MessageLoopForIO : public MessageLoop {
+ public:
+ MessageLoopForIO() : MessageLoop(TYPE_IO) {
+ }
+
+ // Returns the MessageLoopForIO of the current thread.
+ static MessageLoopForIO* current() {
+ MessageLoop* loop = MessageLoop::current();
+ DCHECK_EQ(MessageLoop::TYPE_IO, loop->type());
+ return static_cast<MessageLoopForIO*>(loop);
+ }
+
+#if defined(OS_WIN)
+ typedef base::MessagePumpForIO::IOHandler IOHandler;
+ typedef base::MessagePumpForIO::IOContext IOContext;
+
+ // Please see MessagePumpWin for definitions of these methods.
+ void RegisterIOHandler(HANDLE file_handle, IOHandler* handler);
+ bool WaitForIOCompletion(DWORD timeout, IOHandler* filter);
+
+ protected:
+ // TODO(rvargas): Make this platform independent.
+ base::MessagePumpForIO* pump_io() {
+ return static_cast<base::MessagePumpForIO*>(pump_.get());
+ }
+
+#elif defined(OS_POSIX)
+ typedef base::MessagePumpLibevent::Watcher Watcher;
+ typedef base::MessagePumpLibevent::FileDescriptorWatcher
+ FileDescriptorWatcher;
+ typedef base::LineWatcher LineWatcher;
+
+ enum Mode {
+ WATCH_READ = base::MessagePumpLibevent::WATCH_READ,
+ WATCH_WRITE = base::MessagePumpLibevent::WATCH_WRITE,
+ WATCH_READ_WRITE = base::MessagePumpLibevent::WATCH_READ_WRITE
+ };
+
+ // Please see MessagePumpLibevent for definition.
+ bool WatchFileDescriptor(int fd,
+ bool persistent,
+ Mode mode,
+ FileDescriptorWatcher *controller,
+ Watcher *delegate);
+
+ typedef base::MessagePumpLibevent::SignalEvent SignalEvent;
+ typedef base::MessagePumpLibevent::SignalWatcher SignalWatcher;
+ bool CatchSignal(int sig,
+ SignalEvent* sigevent,
+ SignalWatcher* delegate);
+
+#endif // defined(OS_POSIX)
+};
+
+// Do not add any member variables to MessageLoopForIO! This is important b/c
+// MessageLoopForIO is often allocated via MessageLoop(TYPE_IO). Any extra
+// data that you need should be stored on the MessageLoop's pump_ instance.
+COMPILE_ASSERT(sizeof(MessageLoop) == sizeof(MessageLoopForIO),
+ MessageLoopForIO_should_not_have_extra_member_variables);
+
+#endif // BASE_MESSAGE_LOOP_H_
diff --git a/ipc/chromium/src/base/message_pump.h b/ipc/chromium/src/base/message_pump.h
new file mode 100644
index 000000000..08bdbe30a
--- /dev/null
+++ b/ipc/chromium/src/base/message_pump.h
@@ -0,0 +1,143 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MESSAGE_PUMP_H_
+#define BASE_MESSAGE_PUMP_H_
+
+#include "nsISupportsImpl.h"
+
+class nsIEventTarget;
+
+namespace base {
+
+class TimeTicks;
+
+class MessagePump {
+ public:
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MessagePump)
+
+ // Please see the comments above the Run method for an illustration of how
+ // these delegate methods are used.
+ class Delegate {
+ public:
+ virtual ~Delegate() {}
+
+ // Called from within Run in response to ScheduleWork or when the message
+ // pump would otherwise call DoDelayedWork. Returns true to indicate that
+ // work was done. DoDelayedWork will not be called if DoWork returns true.
+ virtual bool DoWork() = 0;
+
+ // Called from within Run in response to ScheduleDelayedWork or when the
+ // message pump would otherwise sleep waiting for more work. Returns true
+ // to indicate that delayed work was done. DoIdleWork will not be called
+ // if DoDelayedWork returns true. Upon return |next_delayed_work_time|
+ // indicates the time when DoDelayedWork should be called again. If
+ // |next_delayed_work_time| is null (per Time::is_null), then the queue of
+ // future delayed work (timer events) is currently empty, and no additional
+ // calls to this function need to be scheduled.
+ virtual bool DoDelayedWork(TimeTicks* next_delayed_work_time) = 0;
+
+ // Called from within Run just before the message pump goes to sleep.
+ // Returns true to indicate that idle work was done.
+ virtual bool DoIdleWork() = 0;
+ };
+
+ // The Run method is called to enter the message pump's run loop.
+ //
+ // Within the method, the message pump is responsible for processing native
+ // messages as well as for giving cycles to the delegate periodically. The
+ // message pump should take care to mix delegate callbacks with native
+ // message processing so neither type of event starves the other of cycles.
+ //
+ // The anatomy of a typical run loop:
+ //
+ // for (;;) {
+ // bool did_work = DoInternalWork();
+ // if (should_quit_)
+ // break;
+ //
+ // did_work |= delegate_->DoWork();
+ // if (should_quit_)
+ // break;
+ //
+ // did_work |= delegate_->DoDelayedWork();
+ // if (should_quit_)
+ // break;
+ //
+ // if (did_work)
+ // continue;
+ //
+ // did_work = delegate_->DoIdleWork();
+ // if (should_quit_)
+ // break;
+ //
+ // if (did_work)
+ // continue;
+ //
+ // WaitForWork();
+ // }
+ //
+ // Here, DoInternalWork is some private method of the message pump that is
+ // responsible for dispatching the next UI message or notifying the next IO
+ // completion (for example). WaitForWork is a private method that simply
+ // blocks until there is more work of any type to do.
+ //
+ // Notice that the run loop cycles between calling DoInternalWork, DoWork,
+ // and DoDelayedWork methods. This helps ensure that neither work queue
+ // starves the other. This is important for message pumps that are used to
+ // drive animations, for example.
+ //
+ // Notice also that after each callout to foreign code, the run loop checks
+ // to see if it should quit. The Quit method is responsible for setting this
+ // flag. No further work is done once the quit flag is set.
+ //
+ // NOTE: Care must be taken to handle Run being called again from within any
+ // of the callouts to foreign code. Native message pumps may also need to
+ // deal with other native message pumps being run outside their control
+ // (e.g., the MessageBox API on Windows pumps UI messages!). To be specific,
+ // the callouts (DoWork and DoDelayedWork) MUST still be provided even in
+ // nested sub-loops that are "seemingly" outside the control of this message
+ // pump. DoWork in particular must never be starved for time slices unless
+ // it returns false (meaning it has run out of things to do).
+ //
+ virtual void Run(Delegate* delegate) = 0;
+
+ // Quit immediately from the most recently entered run loop. This method may
+ // only be used on the thread that called Run.
+ virtual void Quit() = 0;
+
+ // Schedule a DoWork callback to happen reasonably soon. Does nothing if a
+ // DoWork callback is already scheduled. This method may be called from any
+ // thread. Once this call is made, DoWork should not be "starved" at least
+ // until it returns a value of false.
+ virtual void ScheduleWork() = 0;
+
+ // This method may only called from the thread that called Run.
+ //
+ // Ensure that DoWork will be called if a nested loop is entered.
+ // If a MessagePump can already guarantee that DoWork will be called
+ // "reasonably soon", this method can be a no-op to avoid expensive
+ // atomic tests and/or syscalls required for ScheduleWork().
+ virtual void ScheduleWorkForNestedLoop() { ScheduleWork(); };
+
+ // Schedule a DoDelayedWork callback to happen at the specified time,
+ // cancelling any pending DoDelayedWork callback. This method may only be
+ // used on the thread that called Run.
+ virtual void ScheduleDelayedWork(const TimeTicks& delayed_work_time) = 0;
+
+ // If returned, just use the nsThread.
+ virtual nsIEventTarget* GetXPCOMThread()
+ {
+ return nullptr;
+ }
+
+protected:
+ virtual ~MessagePump() {};
+};
+
+} // namespace base
+
+#endif // BASE_MESSAGE_PUMP_H_
diff --git a/ipc/chromium/src/base/message_pump_android.cc b/ipc/chromium/src/base/message_pump_android.cc
new file mode 100644
index 000000000..2d8f310d1
--- /dev/null
+++ b/ipc/chromium/src/base/message_pump_android.cc
@@ -0,0 +1,125 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/message_pump_android.h"
+
+#include <fcntl.h>
+#include <math.h>
+
+#include "base/eintr_wrapper.h"
+#include "base/logging.h"
+#include "base/platform_thread.h"
+
+namespace mozilla {
+bool ProcessNextEvent();
+void NotifyEvent();
+}
+
+namespace base {
+
+MessagePumpForUI::MessagePumpForUI()
+ : state_(NULL)
+ , pump(*this)
+{
+}
+
+MessagePumpForUI::~MessagePumpForUI() {
+}
+
+MessagePumpAndroid::MessagePumpAndroid(MessagePumpForUI &aPump)
+ : pump(aPump)
+{
+}
+
+MessagePumpAndroid::~MessagePumpAndroid()
+{
+}
+
+void MessagePumpForUI::Run(Delegate* delegate) {
+ RunState state;
+ state.delegate = delegate;
+ state.should_quit = false;
+ state.run_depth = state_ ? state_->run_depth + 1 : 1;
+ // We really only do a single task for each iteration of the loop. If we
+ // have done something, assume there is likely something more to do. This
+ // will mean that we don't block on the message pump until there was nothing
+ // more to do. We also set this to true to make sure not to block on the
+ // first iteration of the loop, so RunAllPending() works correctly.
+ state.more_work_is_plausible = true;
+
+ RunState* previous_state = state_;
+ state_ = &state;
+
+ // We run our own loop instead of using g_main_loop_quit in one of the
+ // callbacks. This is so we only quit our own loops, and we don't quit
+ // nested loops run by others. TODO(deanm): Is this what we want?
+
+ while (!state_->should_quit) {
+ mozilla::ProcessNextEvent();
+ if (work_scheduled) {
+ work_scheduled = false;
+ HandleDispatch();
+ }
+ }
+
+ state_ = previous_state;
+}
+
+void MessagePumpForUI::HandleDispatch() {
+ // We should only ever have a single message on the wakeup pipe, since we
+ // are only signaled when the queue went from empty to non-empty. The qApp
+ // poll will tell us whether there was data, so this read shouldn't block.
+ if (state_->should_quit)
+ return;
+
+ state_->more_work_is_plausible = false;
+
+ if (state_->delegate->DoWork())
+ state_->more_work_is_plausible = true;
+
+ if (state_->should_quit)
+ return;
+
+ if (state_->delegate->DoDelayedWork(&delayed_work_time_))
+ state_->more_work_is_plausible = true;
+ if (state_->should_quit)
+ return;
+
+ // Don't do idle work if we think there are more important things
+ // that we could be doing.
+ if (state_->more_work_is_plausible)
+ return;
+
+ if (state_->delegate->DoIdleWork())
+ state_->more_work_is_plausible = true;
+ if (state_->should_quit)
+ return;
+}
+
+void MessagePumpForUI::Quit() {
+ if (state_) {
+ state_->should_quit = true;
+ } else {
+ NOTREACHED() << "Quit called outside Run!";
+ }
+}
+
+void MessagePumpForUI::ScheduleWork() {
+ // This can be called on any thread, so we don't want to touch any state
+ // variables as we would then need locks all over. This ensures that if
+ // we are sleeping in a poll that we will wake up.
+ work_scheduled = true;
+ mozilla::NotifyEvent();
+}
+
+void MessagePumpForUI::ScheduleDelayedWork(const TimeTicks& delayed_work_time) {
+ // We need to wake up the loop in case the poll timeout needs to be
+ // adjusted. This will cause us to try to do work, but that's ok.
+ delayed_work_time_ = delayed_work_time;
+ ScheduleWork();
+}
+
+} // namespace base
diff --git a/ipc/chromium/src/base/message_pump_android.h b/ipc/chromium/src/base/message_pump_android.h
new file mode 100644
index 000000000..ac9801843
--- /dev/null
+++ b/ipc/chromium/src/base/message_pump_android.h
@@ -0,0 +1,77 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MESSAGE_PUMP_ANDROID_H_
+#define BASE_MESSAGE_PUMP_ANDROID_H_
+
+#include "base/message_pump.h"
+#include "base/time.h"
+
+namespace base {
+
+class MessagePumpForUI;
+
+class MessagePumpAndroid {
+
+ public:
+ MessagePumpAndroid(MessagePumpForUI &pump);
+ ~MessagePumpAndroid();
+
+ private:
+ base::MessagePumpForUI &pump;
+};
+
+// This class implements a MessagePump needed for TYPE_UI MessageLoops on
+// Android
+class MessagePumpForUI : public MessagePump {
+
+ public:
+ MessagePumpForUI();
+ ~MessagePumpForUI();
+
+ virtual void Run(Delegate* delegate);
+ virtual void Quit();
+ virtual void ScheduleWork();
+ virtual void ScheduleDelayedWork(const TimeTicks& delayed_work_time);
+
+ // Internal methods used for processing the pump callbacks. They are
+ // public for simplicity but should not be used directly.
+ // HandleDispatch is called after the poll has completed.
+ void HandleDispatch();
+
+ private:
+ // We may make recursive calls to Run, so we save state that needs to be
+ // separate between them in this structure type.
+ struct RunState {
+ Delegate* delegate;
+
+ // Used to flag that the current Run() invocation should return ASAP.
+ bool should_quit;
+
+ // Used to count how many Run() invocations are on the stack.
+ int run_depth;
+
+ // Used internally for controlling whether we want a message pump
+ // iteration to be blocking or not.
+ bool more_work_is_plausible;
+ };
+
+ RunState* state_;
+
+ // This is the time when we need to do delayed work.
+ TimeTicks delayed_work_time_;
+
+ bool work_scheduled;
+
+ // MessagePump implementation for Android based on the GLib implement.
+ MessagePumpAndroid pump;
+
+ DISALLOW_COPY_AND_ASSIGN(MessagePumpForUI);
+};
+
+} // namespace base
+
+#endif // BASE_MESSAGE_PUMP_ANDROID_H_
diff --git a/ipc/chromium/src/base/message_pump_default.cc b/ipc/chromium/src/base/message_pump_default.cc
new file mode 100644
index 000000000..ef37b6296
--- /dev/null
+++ b/ipc/chromium/src/base/message_pump_default.cc
@@ -0,0 +1,105 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/message_pump_default.h"
+
+#include "base/logging.h"
+#include "base/message_loop.h"
+#include "base/scoped_nsautorelease_pool.h"
+#include "GeckoProfiler.h"
+
+#include "mozilla/BackgroundHangMonitor.h"
+
+namespace base {
+
+MessagePumpDefault::MessagePumpDefault()
+ : keep_running_(true),
+ event_(false, false) {
+}
+
+void MessagePumpDefault::Run(Delegate* delegate) {
+ DCHECK(keep_running_) << "Quit must have been called outside of Run!";
+
+ const MessageLoop* const loop = MessageLoop::current();
+ mozilla::BackgroundHangMonitor hangMonitor(
+ loop->thread_name().c_str(),
+ loop->transient_hang_timeout(),
+ loop->permanent_hang_timeout());
+
+ for (;;) {
+ ScopedNSAutoreleasePool autorelease_pool;
+
+ hangMonitor.NotifyActivity();
+ bool did_work = delegate->DoWork();
+ if (!keep_running_)
+ break;
+
+ hangMonitor.NotifyActivity();
+ did_work |= delegate->DoDelayedWork(&delayed_work_time_);
+ if (!keep_running_)
+ break;
+
+ if (did_work)
+ continue;
+
+ hangMonitor.NotifyActivity();
+ did_work = delegate->DoIdleWork();
+ if (!keep_running_)
+ break;
+
+ if (did_work)
+ continue;
+
+ if (delayed_work_time_.is_null()) {
+ hangMonitor.NotifyWait();
+ PROFILER_LABEL("MessagePump", "Wait",
+ js::ProfileEntry::Category::OTHER);
+ {
+ GeckoProfilerSleepRAII profiler_sleep;
+ event_.Wait();
+ }
+ } else {
+ TimeDelta delay = delayed_work_time_ - TimeTicks::Now();
+ if (delay > TimeDelta()) {
+ hangMonitor.NotifyWait();
+ PROFILER_LABEL("MessagePump", "Wait",
+ js::ProfileEntry::Category::OTHER);
+ {
+ GeckoProfilerSleepRAII profiler_sleep;
+ event_.TimedWait(delay);
+ }
+ } else {
+ // It looks like delayed_work_time_ indicates a time in the past, so we
+ // need to call DoDelayedWork now.
+ delayed_work_time_ = TimeTicks();
+ }
+ }
+ // Since event_ is auto-reset, we don't need to do anything special here
+ // other than service each delegate method.
+ }
+
+ keep_running_ = true;
+}
+
+void MessagePumpDefault::Quit() {
+ keep_running_ = false;
+}
+
+void MessagePumpDefault::ScheduleWork() {
+ // Since this can be called on any thread, we need to ensure that our Run
+ // loop wakes up.
+ event_.Signal();
+}
+
+void MessagePumpDefault::ScheduleDelayedWork(
+ const TimeTicks& delayed_work_time) {
+ // We know that we can't be blocked on Wait right now since this method can
+ // only be called on the same thread as Run, so we only need to update our
+ // record of how long to sleep when we do sleep.
+ delayed_work_time_ = delayed_work_time;
+}
+
+} // namespace base
diff --git a/ipc/chromium/src/base/message_pump_default.h b/ipc/chromium/src/base/message_pump_default.h
new file mode 100644
index 000000000..98c204e62
--- /dev/null
+++ b/ipc/chromium/src/base/message_pump_default.h
@@ -0,0 +1,43 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MESSAGE_PUMP_DEFAULT_H_
+#define BASE_MESSAGE_PUMP_DEFAULT_H_
+
+#include "base/message_pump.h"
+#include "base/time.h"
+#include "base/waitable_event.h"
+
+namespace base {
+
+class MessagePumpDefault : public MessagePump {
+ public:
+ MessagePumpDefault();
+ ~MessagePumpDefault() {}
+
+ // MessagePump methods:
+ virtual void Run(Delegate* delegate);
+ virtual void Quit();
+ virtual void ScheduleWork();
+ virtual void ScheduleDelayedWork(const TimeTicks& delayed_work_time);
+
+ protected:
+ // This flag is set to false when Run should return.
+ bool keep_running_;
+
+ // Used to sleep until there is more work to do.
+ WaitableEvent event_;
+
+ // The time at which we should call DoDelayedWork.
+ TimeTicks delayed_work_time_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MessagePumpDefault);
+};
+
+} // namespace base
+
+#endif // BASE_MESSAGE_PUMP_DEFAULT_H_
diff --git a/ipc/chromium/src/base/message_pump_glib.cc b/ipc/chromium/src/base/message_pump_glib.cc
new file mode 100644
index 000000000..36ebfdd69
--- /dev/null
+++ b/ipc/chromium/src/base/message_pump_glib.cc
@@ -0,0 +1,341 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/message_pump_glib.h"
+
+#include <fcntl.h>
+#include <math.h>
+
+#include <gtk/gtk.h>
+#include <glib.h>
+
+#include "base/eintr_wrapper.h"
+#include "base/logging.h"
+#include "base/platform_thread.h"
+
+namespace {
+
+// Return a timeout suitable for the glib loop, -1 to block forever,
+// 0 to return right away, or a timeout in milliseconds from now.
+int GetTimeIntervalMilliseconds(const base::TimeTicks& from) {
+ if (from.is_null())
+ return -1;
+
+ // Be careful here. TimeDelta has a precision of microseconds, but we want a
+ // value in milliseconds. If there are 5.5ms left, should the delay be 5 or
+ // 6? It should be 6 to avoid executing delayed work too early.
+ int delay = static_cast<int>(
+ ceil((from - base::TimeTicks::Now()).InMillisecondsF()));
+
+ // If this value is negative, then we need to run delayed work soon.
+ return delay < 0 ? 0 : delay;
+}
+
+// A brief refresher on GLib:
+// GLib sources have four callbacks: Prepare, Check, Dispatch and Finalize.
+// On each iteration of the GLib pump, it calls each source's Prepare function.
+// This function should return TRUE if it wants GLib to call its Dispatch, and
+// FALSE otherwise. It can also set a timeout in this case for the next time
+// Prepare should be called again (it may be called sooner).
+// After the Prepare calls, GLib does a poll to check for events from the
+// system. File descriptors can be attached to the sources. The poll may block
+// if none of the Prepare calls returned TRUE. It will block indefinitely, or
+// by the minimum time returned by a source in Prepare.
+// After the poll, GLib calls Check for each source that returned FALSE
+// from Prepare. The return value of Check has the same meaning as for Prepare,
+// making Check a second chance to tell GLib we are ready for Dispatch.
+// Finally, GLib calls Dispatch for each source that is ready. If Dispatch
+// returns FALSE, GLib will destroy the source. Dispatch calls may be recursive
+// (i.e., you can call Run from them), but Prepare and Check cannot.
+// Finalize is called when the source is destroyed.
+// NOTE: It is common for subsytems to want to process pending events while
+// doing intensive work, for example the flash plugin. They usually use the
+// following pattern (recommended by the GTK docs):
+// while (gtk_events_pending()) {
+// gtk_main_iteration();
+// }
+//
+// gtk_events_pending just calls g_main_context_pending, which does the
+// following:
+// - Call prepare on all the sources.
+// - Do the poll with a timeout of 0 (not blocking).
+// - Call check on all the sources.
+// - *Does not* call dispatch on the sources.
+// - Return true if any of prepare() or check() returned true.
+//
+// gtk_main_iteration just calls g_main_context_iteration, which does the whole
+// thing, respecting the timeout for the poll (and block, although it is
+// expected not to if gtk_events_pending returned true), and call dispatch.
+//
+// Thus it is important to only return true from prepare or check if we
+// actually have events or work to do. We also need to make sure we keep
+// internal state consistent so that if prepare/check return true when called
+// from gtk_events_pending, they will still return true when called right
+// after, from gtk_main_iteration.
+//
+// For the GLib pump we try to follow the Windows UI pump model:
+// - Whenever we receive a wakeup event or the timer for delayed work expires,
+// we run DoWork and/or DoDelayedWork. That part will also run in the other
+// event pumps.
+// - We also run DoWork, DoDelayedWork, and possibly DoIdleWork in the main
+// loop, around event handling.
+
+struct WorkSource : public GSource {
+ base::MessagePumpForUI* pump;
+};
+
+gboolean WorkSourcePrepare(GSource* source,
+ gint* timeout_ms) {
+ *timeout_ms = static_cast<WorkSource*>(source)->pump->HandlePrepare();
+ // We always return FALSE, so that our timeout is honored. If we were
+ // to return TRUE, the timeout would be considered to be 0 and the poll
+ // would never block. Once the poll is finished, Check will be called.
+ return FALSE;
+}
+
+gboolean WorkSourceCheck(GSource* source) {
+ // Only return TRUE if Dispatch should be called.
+ return static_cast<WorkSource*>(source)->pump->HandleCheck();
+}
+
+gboolean WorkSourceDispatch(GSource* source,
+ GSourceFunc unused_func,
+ gpointer unused_data) {
+
+ static_cast<WorkSource*>(source)->pump->HandleDispatch();
+ // Always return TRUE so our source stays registered.
+ return TRUE;
+}
+
+// I wish these could be const, but g_source_new wants non-const.
+GSourceFuncs WorkSourceFuncs = {
+ WorkSourcePrepare,
+ WorkSourceCheck,
+ WorkSourceDispatch,
+ NULL
+};
+
+} // namespace
+
+
+namespace base {
+
+MessagePumpForUI::MessagePumpForUI()
+ : state_(NULL),
+ context_(g_main_context_default()),
+ wakeup_gpollfd_(new GPollFD),
+ pipe_full_(false) {
+ // Create our wakeup pipe, which is used to flag when work was scheduled.
+ int fds[2];
+ CHECK(pipe(fds) == 0);
+ wakeup_pipe_read_ = fds[0];
+ wakeup_pipe_write_ = fds[1];
+ wakeup_gpollfd_->fd = wakeup_pipe_read_;
+ wakeup_gpollfd_->events = G_IO_IN;
+
+ work_source_ = g_source_new(&WorkSourceFuncs, sizeof(WorkSource));
+ static_cast<WorkSource*>(work_source_)->pump = this;
+ g_source_add_poll(work_source_, wakeup_gpollfd_.get());
+ // Use a low priority so that we let other events in the queue go first.
+ g_source_set_priority(work_source_, G_PRIORITY_DEFAULT_IDLE);
+ // This is needed to allow Run calls inside Dispatch.
+ g_source_set_can_recurse(work_source_, TRUE);
+ g_source_attach(work_source_, context_);
+ gdk_event_handler_set(&EventDispatcher, this, NULL);
+}
+
+MessagePumpForUI::~MessagePumpForUI() {
+ gdk_event_handler_set(reinterpret_cast<GdkEventFunc>(gtk_main_do_event),
+ this, NULL);
+ g_source_destroy(work_source_);
+ g_source_unref(work_source_);
+ close(wakeup_pipe_read_);
+ close(wakeup_pipe_write_);
+}
+
+void MessagePumpForUI::RunWithDispatcher(Delegate* delegate,
+ Dispatcher* dispatcher) {
+#ifndef NDEBUG
+ // Make sure we only run this on one thread. GTK only has one message pump
+ // so we can only have one UI loop per process.
+ static PlatformThreadId thread_id = PlatformThread::CurrentId();
+ DCHECK(thread_id == PlatformThread::CurrentId()) <<
+ "Running MessagePumpForUI on two different threads; "
+ "this is unsupported by GLib!";
+#endif
+
+ RunState state;
+ state.delegate = delegate;
+ state.dispatcher = dispatcher;
+ state.should_quit = false;
+ state.run_depth = state_ ? state_->run_depth + 1 : 1;
+ state.has_work = false;
+
+ RunState* previous_state = state_;
+ state_ = &state;
+
+ // We really only do a single task for each iteration of the loop. If we
+ // have done something, assume there is likely something more to do. This
+ // will mean that we don't block on the message pump until there was nothing
+ // more to do. We also set this to true to make sure not to block on the
+ // first iteration of the loop, so RunAllPending() works correctly.
+ bool more_work_is_plausible = true;
+
+ // We run our own loop instead of using g_main_loop_quit in one of the
+ // callbacks. This is so we only quit our own loops, and we don't quit
+ // nested loops run by others. TODO(deanm): Is this what we want?
+ for (;;) {
+ // Don't block if we think we have more work to do.
+ bool block = !more_work_is_plausible;
+
+ // g_main_context_iteration returns true if events have been dispatched.
+ more_work_is_plausible = g_main_context_iteration(context_, block);
+ if (state_->should_quit)
+ break;
+
+ more_work_is_plausible |= state_->delegate->DoWork();
+ if (state_->should_quit)
+ break;
+
+ more_work_is_plausible |=
+ state_->delegate->DoDelayedWork(&delayed_work_time_);
+ if (state_->should_quit)
+ break;
+
+ if (more_work_is_plausible)
+ continue;
+
+ more_work_is_plausible = state_->delegate->DoIdleWork();
+ if (state_->should_quit)
+ break;
+ }
+
+ state_ = previous_state;
+}
+
+// Return the timeout we want passed to poll.
+int MessagePumpForUI::HandlePrepare() {
+ // We know we have work, but we haven't called HandleDispatch yet. Don't let
+ // the pump block so that we can do some processing.
+ if (state_ && // state_ may be null during tests.
+ state_->has_work)
+ return 0;
+
+ // We don't think we have work to do, but make sure not to block
+ // longer than the next time we need to run delayed work.
+ return GetTimeIntervalMilliseconds(delayed_work_time_);
+}
+
+bool MessagePumpForUI::HandleCheck() {
+ if (!state_) // state_ may be null during tests.
+ return false;
+
+ // We should only ever have a single message on the wakeup pipe since we only
+ // write to the pipe when pipe_full_ is false. The glib poll will tell us
+ // whether there was data, so this read shouldn't block.
+ if (wakeup_gpollfd_->revents & G_IO_IN) {
+ pipe_full_ = false;
+
+ char msg;
+ if (HANDLE_EINTR(read(wakeup_pipe_read_, &msg, 1)) != 1 || msg != '!') {
+ NOTREACHED() << "Error reading from the wakeup pipe.";
+ }
+ // Since we ate the message, we need to record that we have more work,
+ // because HandleCheck() may be called without HandleDispatch being called
+ // afterwards.
+ state_->has_work = true;
+ }
+
+ if (state_->has_work)
+ return true;
+
+ if (GetTimeIntervalMilliseconds(delayed_work_time_) == 0) {
+ // The timer has expired. That condition will stay true until we process
+ // that delayed work, so we don't need to record this differently.
+ return true;
+ }
+
+ return false;
+}
+
+void MessagePumpForUI::HandleDispatch() {
+ state_->has_work = false;
+ if (state_->delegate->DoWork()) {
+ // NOTE: on Windows at this point we would call ScheduleWork (see
+ // MessagePumpForUI::HandleWorkMessage in message_pump_win.cc). But here,
+ // instead of posting a message on the wakeup pipe, we can avoid the
+ // syscalls and just signal that we have more work.
+ state_->has_work = true;
+ }
+
+ if (state_->should_quit)
+ return;
+
+ state_->delegate->DoDelayedWork(&delayed_work_time_);
+}
+
+void MessagePumpForUI::AddObserver(Observer* observer) {
+ observers_.AddObserver(observer);
+}
+
+void MessagePumpForUI::RemoveObserver(Observer* observer) {
+ observers_.RemoveObserver(observer);
+}
+
+void MessagePumpForUI::WillProcessEvent(GdkEvent* event) {
+ FOR_EACH_OBSERVER(Observer, observers_, WillProcessEvent(event));
+}
+
+void MessagePumpForUI::DidProcessEvent(GdkEvent* event) {
+ FOR_EACH_OBSERVER(Observer, observers_, DidProcessEvent(event));
+}
+
+void MessagePumpForUI::Quit() {
+ if (state_) {
+ state_->should_quit = true;
+ } else {
+ NOTREACHED() << "Quit called outside Run!";
+ }
+}
+
+void MessagePumpForUI::ScheduleWork() {
+ bool was_full = pipe_full_.exchange(true);
+ if (was_full) {
+ return;
+ }
+
+ // This can be called on any thread, so we don't want to touch any state
+ // variables as we would then need locks all over. This ensures that if
+ // we are sleeping in a poll that we will wake up.
+ char msg = '!';
+ if (HANDLE_EINTR(write(wakeup_pipe_write_, &msg, 1)) != 1) {
+ NOTREACHED() << "Could not write to the UI message loop wakeup pipe!";
+ }
+}
+
+void MessagePumpForUI::ScheduleDelayedWork(const TimeTicks& delayed_work_time) {
+ // We need to wake up the loop in case the poll timeout needs to be
+ // adjusted. This will cause us to try to do work, but that's ok.
+ delayed_work_time_ = delayed_work_time;
+ ScheduleWork();
+}
+
+// static
+void MessagePumpForUI::EventDispatcher(GdkEvent* event, gpointer data) {
+ MessagePumpForUI* message_pump = reinterpret_cast<MessagePumpForUI*>(data);
+
+ message_pump->WillProcessEvent(event);
+ if (message_pump->state_ && // state_ may be null during tests.
+ message_pump->state_->dispatcher) {
+ if (!message_pump->state_->dispatcher->Dispatch(event))
+ message_pump->state_->should_quit = true;
+ } else {
+ gtk_main_do_event(event);
+ }
+ message_pump->DidProcessEvent(event);
+}
+
+} // namespace base
diff --git a/ipc/chromium/src/base/message_pump_glib.h b/ipc/chromium/src/base/message_pump_glib.h
new file mode 100644
index 000000000..1d0e05709
--- /dev/null
+++ b/ipc/chromium/src/base/message_pump_glib.h
@@ -0,0 +1,147 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MESSAGE_PUMP_GLIB_H_
+#define BASE_MESSAGE_PUMP_GLIB_H_
+
+#include "base/message_pump.h"
+#include "base/observer_list.h"
+#include "base/time.h"
+#include "mozilla/UniquePtr.h"
+#include "mozilla/Atomics.h"
+
+typedef union _GdkEvent GdkEvent;
+typedef struct _GMainContext GMainContext;
+typedef struct _GPollFD GPollFD;
+typedef struct _GSource GSource;
+
+namespace base {
+
+// This class implements a MessagePump needed for TYPE_UI MessageLoops on
+// OS_LINUX platforms using GLib.
+class MessagePumpForUI : public MessagePump {
+ public:
+ // Observer is notified prior to a GdkEvent event being dispatched. As
+ // Observers are notified of every change, they have to be FAST!
+ class Observer {
+ public:
+ virtual ~Observer() {}
+
+ // This method is called before processing a message.
+ virtual void WillProcessEvent(GdkEvent* event) = 0;
+
+ // This method is called after processing a message.
+ virtual void DidProcessEvent(GdkEvent* event) = 0;
+ };
+
+ // Dispatcher is used during a nested invocation of Run to dispatch events.
+ // If Run is invoked with a non-NULL Dispatcher, MessageLoop does not
+ // dispatch events (or invoke gtk_main_do_event), rather every event is
+ // passed to Dispatcher's Dispatch method for dispatch. It is up to the
+ // Dispatcher to dispatch, or not, the event.
+ //
+ // The nested loop is exited by either posting a quit, or returning false
+ // from Dispatch.
+ class Dispatcher {
+ public:
+ virtual ~Dispatcher() {}
+ // Dispatches the event. If true is returned processing continues as
+ // normal. If false is returned, the nested loop exits immediately.
+ virtual bool Dispatch(GdkEvent* event) = 0;
+ };
+
+ MessagePumpForUI();
+ virtual ~MessagePumpForUI();
+
+ // Like MessagePump::Run, but GdkEvent objects are routed through dispatcher.
+ virtual void RunWithDispatcher(Delegate* delegate, Dispatcher* dispatcher);
+
+ virtual void Run(Delegate* delegate) { RunWithDispatcher(delegate, NULL); }
+ virtual void Quit();
+ virtual void ScheduleWork();
+ virtual void ScheduleDelayedWork(const TimeTicks& delayed_work_time);
+
+ // Internal methods used for processing the pump callbacks. They are
+ // public for simplicity but should not be used directly. HandlePrepare
+ // is called during the prepare step of glib, and returns a timeout that
+ // will be passed to the poll. HandleCheck is called after the poll
+ // has completed, and returns whether or not HandleDispatch should be called.
+ // HandleDispatch is called if HandleCheck returned true.
+ int HandlePrepare();
+ bool HandleCheck();
+ void HandleDispatch();
+
+ // Adds an Observer, which will start receiving notifications immediately.
+ void AddObserver(Observer* observer);
+
+ // Removes an Observer. It is safe to call this method while an Observer is
+ // receiving a notification callback.
+ void RemoveObserver(Observer* observer);
+
+ private:
+ // We may make recursive calls to Run, so we save state that needs to be
+ // separate between them in this structure type.
+ struct RunState {
+ Delegate* delegate;
+ Dispatcher* dispatcher;
+
+ // Used to flag that the current Run() invocation should return ASAP.
+ bool should_quit;
+
+ // Used to count how many Run() invocations are on the stack.
+ int run_depth;
+
+ // This keeps the state of whether the pump got signaled that there was new
+ // work to be done. Since we eat the message on the wake up pipe as soon as
+ // we get it, we keep that state here to stay consistent.
+ bool has_work;
+ };
+
+ // Invoked from EventDispatcher. Notifies all observers we're about to
+ // process an event.
+ void WillProcessEvent(GdkEvent* event);
+
+ // Invoked from EventDispatcher. Notifies all observers we processed an
+ // event.
+ void DidProcessEvent(GdkEvent* event);
+
+ // Callback prior to gdk dispatching an event.
+ static void EventDispatcher(GdkEvent* event, void* data);
+
+ RunState* state_;
+
+ // This is a GLib structure that we can add event sources to. We use the
+ // default GLib context, which is the one to which all GTK events are
+ // dispatched.
+ GMainContext* context_;
+
+ // This is the time when we need to do delayed work.
+ TimeTicks delayed_work_time_;
+
+ // The work source. It is shared by all calls to Run and destroyed when
+ // the message pump is destroyed.
+ GSource* work_source_;
+
+ // We use a wakeup pipe to make sure we'll get out of the glib polling phase
+ // when another thread has scheduled us to do some work. There is a glib
+ // mechanism g_main_context_wakeup, but this won't guarantee that our event's
+ // Dispatch() will be called.
+ int wakeup_pipe_read_;
+ int wakeup_pipe_write_;
+ // Use an autoptr to avoid needing the definition of GPollFD in the header.
+ mozilla::UniquePtr<GPollFD> wakeup_gpollfd_;
+
+ mozilla::Atomic<bool> pipe_full_;
+
+ // List of observers.
+ ObserverList<Observer> observers_;
+
+ DISALLOW_COPY_AND_ASSIGN(MessagePumpForUI);
+};
+
+} // namespace base
+
+#endif // BASE_MESSAGE_PUMP_GLIB_H_
diff --git a/ipc/chromium/src/base/message_pump_libevent.cc b/ipc/chromium/src/base/message_pump_libevent.cc
new file mode 100644
index 000000000..3cca238c1
--- /dev/null
+++ b/ipc/chromium/src/base/message_pump_libevent.cc
@@ -0,0 +1,458 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/message_pump_libevent.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#if defined(ANDROID) || defined(OS_POSIX)
+#include <unistd.h>
+#endif
+
+#include "eintr_wrapper.h"
+#include "base/logging.h"
+#include "base/scoped_nsautorelease_pool.h"
+#include "base/time.h"
+#include "nsDependentSubstring.h"
+#include "event.h"
+#include "mozilla/UniquePtr.h"
+
+// This macro checks that the _EVENT_SIZEOF_* constants defined in
+// ipc/chromiume/src/third_party/<platform>/event2/event-config.h are correct.
+#if defined(_EVENT_SIZEOF_SHORT)
+#define CHECK_EVENT_SIZEOF(TYPE, type) \
+ static_assert(_EVENT_SIZEOF_##TYPE == sizeof(type), \
+ "bad _EVENT_SIZEOF_"#TYPE);
+#elif defined(EVENT__SIZEOF_SHORT)
+#define CHECK_EVENT_SIZEOF(TYPE, type) \
+ static_assert(EVENT__SIZEOF_##TYPE == sizeof(type), \
+ "bad EVENT__SIZEOF_"#TYPE);
+#else
+#error Cannot find libevent type sizes
+#endif
+
+CHECK_EVENT_SIZEOF(LONG, long);
+CHECK_EVENT_SIZEOF(LONG_LONG, long long);
+CHECK_EVENT_SIZEOF(PTHREAD_T, pthread_t);
+CHECK_EVENT_SIZEOF(SHORT, short);
+CHECK_EVENT_SIZEOF(SIZE_T, size_t);
+CHECK_EVENT_SIZEOF(VOID_P, void*);
+
+// Lifecycle of struct event
+// Libevent uses two main data structures:
+// struct event_base (of which there is one per message pump), and
+// struct event (of which there is roughly one per socket).
+// The socket's struct event is created in
+// MessagePumpLibevent::WatchFileDescriptor(),
+// is owned by the FileDescriptorWatcher, and is destroyed in
+// StopWatchingFileDescriptor().
+// It is moved into and out of lists in struct event_base by
+// the libevent functions event_add() and event_del().
+//
+// TODO(dkegel):
+// At the moment bad things happen if a FileDescriptorWatcher
+// is active after its MessagePumpLibevent has been destroyed.
+// See MessageLoopTest.FileDescriptorWatcherOutlivesMessageLoop
+// Not clear yet whether that situation occurs in practice,
+// but if it does, we need to fix it.
+
+namespace base {
+
+// Return 0 on success
+// Too small a function to bother putting in a library?
+static int SetNonBlocking(int fd) {
+ int flags = fcntl(fd, F_GETFL, 0);
+ if (flags == -1)
+ flags = 0;
+ return fcntl(fd, F_SETFL, flags | O_NONBLOCK);
+}
+
+MessagePumpLibevent::FileDescriptorWatcher::FileDescriptorWatcher()
+ : is_persistent_(false),
+ event_(NULL) {
+}
+
+MessagePumpLibevent::FileDescriptorWatcher::~FileDescriptorWatcher() {
+ if (event_) {
+ StopWatchingFileDescriptor();
+ }
+}
+
+void MessagePumpLibevent::FileDescriptorWatcher::Init(event *e,
+ bool is_persistent) {
+ DCHECK(e);
+ DCHECK(event_ == NULL);
+
+ is_persistent_ = is_persistent;
+ event_ = e;
+}
+
+event *MessagePumpLibevent::FileDescriptorWatcher::ReleaseEvent() {
+ struct event *e = event_;
+ event_ = NULL;
+ return e;
+}
+
+bool MessagePumpLibevent::FileDescriptorWatcher::StopWatchingFileDescriptor() {
+ event* e = ReleaseEvent();
+ if (e == NULL)
+ return true;
+
+ // event_del() is a no-op if the event isn't active.
+ int rv = event_del(e);
+ delete e;
+ return (rv == 0);
+}
+
+// Called if a byte is received on the wakeup pipe.
+void MessagePumpLibevent::OnWakeup(int socket, short flags, void* context) {
+ base::MessagePumpLibevent* that =
+ static_cast<base::MessagePumpLibevent*>(context);
+ DCHECK(that->wakeup_pipe_out_ == socket);
+
+ // Remove and discard the wakeup byte.
+ char buf;
+ int nread = HANDLE_EINTR(read(socket, &buf, 1));
+ DCHECK_EQ(nread, 1);
+ // Tell libevent to break out of inner loop.
+ event_base_loopbreak(that->event_base_);
+}
+
+MessagePumpLibevent::MessagePumpLibevent()
+ : keep_running_(true),
+ in_run_(false),
+ event_base_(event_base_new()),
+ wakeup_pipe_in_(-1),
+ wakeup_pipe_out_(-1) {
+ if (!Init())
+ NOTREACHED();
+}
+
+bool MessagePumpLibevent::Init() {
+ int fds[2];
+ if (pipe(fds)) {
+ DLOG(ERROR) << "pipe() failed, errno: " << errno;
+ return false;
+ }
+ if (SetNonBlocking(fds[0])) {
+ DLOG(ERROR) << "SetNonBlocking for pipe fd[0] failed, errno: " << errno;
+ return false;
+ }
+ if (SetNonBlocking(fds[1])) {
+ DLOG(ERROR) << "SetNonBlocking for pipe fd[1] failed, errno: " << errno;
+ return false;
+ }
+ wakeup_pipe_out_ = fds[0];
+ wakeup_pipe_in_ = fds[1];
+
+ wakeup_event_ = new event;
+ event_set(wakeup_event_, wakeup_pipe_out_, EV_READ | EV_PERSIST,
+ OnWakeup, this);
+ event_base_set(event_base_, wakeup_event_);
+
+ if (event_add(wakeup_event_, 0))
+ return false;
+ return true;
+}
+
+MessagePumpLibevent::~MessagePumpLibevent() {
+ DCHECK(wakeup_event_);
+ DCHECK(event_base_);
+ event_del(wakeup_event_);
+ delete wakeup_event_;
+ if (wakeup_pipe_in_ >= 0)
+ close(wakeup_pipe_in_);
+ if (wakeup_pipe_out_ >= 0)
+ close(wakeup_pipe_out_);
+ event_base_free(event_base_);
+}
+
+bool MessagePumpLibevent::WatchFileDescriptor(int fd,
+ bool persistent,
+ Mode mode,
+ FileDescriptorWatcher *controller,
+ Watcher *delegate) {
+ DCHECK(fd > 0);
+ DCHECK(controller);
+ DCHECK(delegate);
+ DCHECK(mode == WATCH_READ || mode == WATCH_WRITE || mode == WATCH_READ_WRITE);
+
+ int event_mask = persistent ? EV_PERSIST : 0;
+ if ((mode & WATCH_READ) != 0) {
+ event_mask |= EV_READ;
+ }
+ if ((mode & WATCH_WRITE) != 0) {
+ event_mask |= EV_WRITE;
+ }
+
+ // |should_delete_event| is true if we're modifying an event that's currently
+ // active in |controller|.
+ // If we're modifying an existing event and there's an error then we need to
+ // tell libevent to clean it up via event_delete() before returning.
+ bool should_delete_event = true;
+ mozilla::UniquePtr<event> evt(controller->ReleaseEvent());
+ if (evt.get() == NULL) {
+ should_delete_event = false;
+ // Ownership is transferred to the controller.
+ evt = mozilla::MakeUnique<event>();
+ } else {
+ // It's illegal to use this function to listen on 2 separate fds with the
+ // same |controller|.
+ if (EVENT_FD(evt.get()) != fd) {
+ NOTREACHED() << "FDs don't match" << EVENT_FD(evt.get()) << "!=" << fd;
+ return false;
+ }
+
+ // Make sure we don't pick up any funky internal libevent masks.
+ int old_interest_mask = evt.get()->ev_events &
+ (EV_READ | EV_WRITE | EV_PERSIST);
+
+ // Combine old/new event masks.
+ event_mask |= old_interest_mask;
+
+ // Must disarm the event before we can reuse it.
+ event_del(evt.get());
+ }
+
+ // Set current interest mask and message pump for this event.
+ event_set(evt.get(), fd, event_mask, OnLibeventNotification,
+ delegate);
+
+ // Tell libevent which message pump this socket will belong to when we add it.
+ if (event_base_set(event_base_, evt.get()) != 0) {
+ if (should_delete_event) {
+ event_del(evt.get());
+ }
+ return false;
+ }
+
+ // Add this socket to the list of monitored sockets.
+ if (event_add(evt.get(), NULL) != 0) {
+ if (should_delete_event) {
+ event_del(evt.get());
+ }
+ return false;
+ }
+
+ // Transfer ownership of evt to controller.
+ controller->Init(evt.release(), persistent);
+ return true;
+}
+
+
+void MessagePumpLibevent::OnLibeventNotification(int fd, short flags,
+ void* context) {
+ Watcher* watcher = static_cast<Watcher*>(context);
+
+ if (flags & EV_WRITE) {
+ watcher->OnFileCanWriteWithoutBlocking(fd);
+ }
+ if (flags & EV_READ) {
+ watcher->OnFileCanReadWithoutBlocking(fd);
+ }
+}
+
+
+MessagePumpLibevent::SignalEvent::SignalEvent() :
+ event_(NULL)
+{
+}
+
+MessagePumpLibevent::SignalEvent::~SignalEvent()
+{
+ if (event_) {
+ StopCatching();
+ }
+}
+
+void
+MessagePumpLibevent::SignalEvent::Init(event *e)
+{
+ DCHECK(e);
+ DCHECK(event_ == NULL);
+ event_ = e;
+}
+
+bool
+MessagePumpLibevent::SignalEvent::StopCatching()
+{
+ // XXX/cjones: this code could be shared with
+ // FileDescriptorWatcher. ironic that libevent is "more"
+ // object-oriented than this C++
+ event* e = ReleaseEvent();
+ if (e == NULL)
+ return true;
+
+ // event_del() is a no-op if the event isn't active.
+ int rv = event_del(e);
+ delete e;
+ return (rv == 0);
+}
+
+event *
+MessagePumpLibevent::SignalEvent::ReleaseEvent()
+{
+ event *e = event_;
+ event_ = NULL;
+ return e;
+}
+
+bool
+MessagePumpLibevent::CatchSignal(int sig,
+ SignalEvent* sigevent,
+ SignalWatcher* delegate)
+{
+ DCHECK(sig > 0);
+ DCHECK(sigevent);
+ DCHECK(delegate);
+ // TODO if we want to support re-using SignalEvents, this code needs
+ // to jump through the same hoops as WatchFileDescriptor(). Not
+ // needed at present
+ DCHECK(NULL == sigevent->event_);
+
+ mozilla::UniquePtr<event> evt = mozilla::MakeUnique<event>();
+ signal_set(evt.get(), sig, OnLibeventSignalNotification, delegate);
+
+ if (event_base_set(event_base_, evt.get()))
+ return false;
+
+ if (signal_add(evt.get(), NULL))
+ return false;
+
+ // Transfer ownership of evt to controller.
+ sigevent->Init(evt.release());
+ return true;
+}
+
+void
+MessagePumpLibevent::OnLibeventSignalNotification(int sig, short flags,
+ void* context)
+{
+ DCHECK(sig > 0);
+ DCHECK(EV_SIGNAL == flags);
+ DCHECK(context);
+ reinterpret_cast<SignalWatcher*>(context)->OnSignal(sig);
+}
+
+
+// Reentrant!
+void MessagePumpLibevent::Run(Delegate* delegate) {
+ DCHECK(keep_running_) << "Quit must have been called outside of Run!";
+
+ bool old_in_run = in_run_;
+ in_run_ = true;
+
+ for (;;) {
+ ScopedNSAutoreleasePool autorelease_pool;
+
+ bool did_work = delegate->DoWork();
+ if (!keep_running_)
+ break;
+
+ did_work |= delegate->DoDelayedWork(&delayed_work_time_);
+ if (!keep_running_)
+ break;
+
+ if (did_work)
+ continue;
+
+ did_work = delegate->DoIdleWork();
+ if (!keep_running_)
+ break;
+
+ if (did_work)
+ continue;
+
+ // EVLOOP_ONCE tells libevent to only block once,
+ // but to service all pending events when it wakes up.
+ if (delayed_work_time_.is_null()) {
+ event_base_loop(event_base_, EVLOOP_ONCE);
+ } else {
+ TimeDelta delay = delayed_work_time_ - TimeTicks::Now();
+ if (delay > TimeDelta()) {
+ struct timeval poll_tv;
+ poll_tv.tv_sec = delay.InSeconds();
+ poll_tv.tv_usec = delay.InMicroseconds() % Time::kMicrosecondsPerSecond;
+ event_base_loopexit(event_base_, &poll_tv);
+ event_base_loop(event_base_, EVLOOP_ONCE);
+ } else {
+ // It looks like delayed_work_time_ indicates a time in the past, so we
+ // need to call DoDelayedWork now.
+ delayed_work_time_ = TimeTicks();
+ }
+ }
+ }
+
+ keep_running_ = true;
+ in_run_ = old_in_run;
+}
+
+void MessagePumpLibevent::Quit() {
+ DCHECK(in_run_);
+ // Tell both libevent and Run that they should break out of their loops.
+ keep_running_ = false;
+ ScheduleWork();
+}
+
+void MessagePumpLibevent::ScheduleWork() {
+ // Tell libevent (in a threadsafe way) that it should break out of its loop.
+ char buf = 0;
+ int nwrite = HANDLE_EINTR(write(wakeup_pipe_in_, &buf, 1));
+ DCHECK(nwrite == 1 || errno == EAGAIN)
+ << "[nwrite:" << nwrite << "] [errno:" << errno << "]";
+}
+
+void MessagePumpLibevent::ScheduleDelayedWork(
+ const TimeTicks& delayed_work_time) {
+ // We know that we can't be blocked on Wait right now since this method can
+ // only be called on the same thread as Run, so we only need to update our
+ // record of how long to sleep when we do sleep.
+ delayed_work_time_ = delayed_work_time;
+}
+
+void LineWatcher::OnFileCanReadWithoutBlocking(int aFd)
+{
+ ssize_t length = 0;
+
+ while (true) {
+ length = read(aFd, mReceiveBuffer.get(), mBufferSize - mReceivedIndex);
+ DCHECK(length <= ssize_t(mBufferSize - mReceivedIndex));
+ if (length <= 0) {
+ if (length < 0) {
+ if (errno == EINTR) {
+ continue; // retry system call when interrupted
+ }
+ if (errno == EAGAIN || errno == EWOULDBLOCK) {
+ return; // no data available: return and re-poll
+ }
+ DLOG(ERROR) << "Can't read from fd, error " << errno;
+ } else {
+ DLOG(ERROR) << "End of file";
+ }
+ // At this point, assume that we can't actually access
+ // the socket anymore, and indicate an error.
+ OnError();
+ mReceivedIndex = 0;
+ return;
+ }
+
+ while (length-- > 0) {
+ DCHECK(mReceivedIndex < mBufferSize);
+ if (mReceiveBuffer[mReceivedIndex] == mTerminator) {
+ nsDependentCSubstring message(mReceiveBuffer.get(), mReceivedIndex);
+ OnLineRead(aFd, message);
+ if (length > 0) {
+ DCHECK(mReceivedIndex < (mBufferSize - 1));
+ memmove(&mReceiveBuffer[0], &mReceiveBuffer[mReceivedIndex + 1], length);
+ }
+ mReceivedIndex = 0;
+ } else {
+ mReceivedIndex++;
+ }
+ }
+ }
+}
+} // namespace base
diff --git a/ipc/chromium/src/base/message_pump_libevent.h b/ipc/chromium/src/base/message_pump_libevent.h
new file mode 100644
index 000000000..224f70932
--- /dev/null
+++ b/ipc/chromium/src/base/message_pump_libevent.h
@@ -0,0 +1,220 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MESSAGE_PUMP_LIBEVENT_H_
+#define BASE_MESSAGE_PUMP_LIBEVENT_H_
+
+#include "base/message_pump.h"
+#include "base/time.h"
+#include "mozilla/UniquePtr.h"
+
+// Declare structs we need from libevent.h rather than including it
+struct event_base;
+struct event;
+
+class nsDependentCSubstring;
+
+namespace base {
+
+// Class to monitor sockets and issue callbacks when sockets are ready for I/O
+// TODO(dkegel): add support for background file IO somehow
+class MessagePumpLibevent : public MessagePump {
+ public:
+
+ // Object returned by WatchFileDescriptor to manage further watching.
+ class FileDescriptorWatcher {
+ public:
+ FileDescriptorWatcher();
+ ~FileDescriptorWatcher(); // Implicitly calls StopWatchingFileDescriptor.
+
+ // NOTE: These methods aren't called StartWatching()/StopWatching() to
+ // avoid confusion with the win32 ObjectWatcher class.
+
+ // Stop watching the FD, always safe to call. No-op if there's nothing
+ // to do.
+ bool StopWatchingFileDescriptor();
+
+ private:
+ // Called by MessagePumpLibevent, ownership of |e| is transferred to this
+ // object.
+ void Init(event* e, bool is_persistent);
+
+ // Used by MessagePumpLibevent to take ownership of event_.
+ event *ReleaseEvent();
+ friend class MessagePumpLibevent;
+
+ private:
+ bool is_persistent_; // false if this event is one-shot.
+ event* event_;
+ DISALLOW_COPY_AND_ASSIGN(FileDescriptorWatcher);
+ };
+
+ // Used with WatchFileDescptor to asynchronously monitor the I/O readiness of
+ // a File Descriptor.
+ class Watcher {
+ public:
+ virtual ~Watcher() {}
+ // Called from MessageLoop::Run when an FD can be read from/written to
+ // without blocking
+ virtual void OnFileCanReadWithoutBlocking(int fd) = 0;
+ virtual void OnFileCanWriteWithoutBlocking(int fd) = 0;
+ };
+
+ MessagePumpLibevent();
+
+ enum Mode {
+ WATCH_READ = 1 << 0,
+ WATCH_WRITE = 1 << 1,
+ WATCH_READ_WRITE = WATCH_READ | WATCH_WRITE
+ };
+
+ // Have the current thread's message loop watch for a a situation in which
+ // reading/writing to the FD can be performed without Blocking.
+ // Callers must provide a preallocated FileDescriptorWatcher object which
+ // can later be used to manage the Lifetime of this event.
+ // If a FileDescriptorWatcher is passed in which is already attached to
+ // an event, then the effect is cumulative i.e. after the call |controller|
+ // will watch both the previous event and the new one.
+ // If an error occurs while calling this method in a cumulative fashion, the
+ // event previously attached to |controller| is aborted.
+ // Returns true on success.
+ // TODO(dkegel): switch to edge-triggered readiness notification
+ bool WatchFileDescriptor(int fd,
+ bool persistent,
+ Mode mode,
+ FileDescriptorWatcher *controller,
+ Watcher *delegate);
+
+
+ // This is analagous to FileDescriptorWatcher above, which really is
+ // just a wrapper around libevent's |struct event|. This class acts
+ // as a sort of "scoped event watcher" in that it guarantees that
+ // when this class is out of scope, the signal-event it wraps is
+ // removed from libevent's guts.
+ //
+ // XXX/cjones: this isn't my favorite API, but preserving it in
+ // order to match code above
+ class SignalEvent {
+ friend class MessagePumpLibevent;
+
+ public:
+ SignalEvent();
+ ~SignalEvent(); // implicitly calls StopCatching()
+
+ // Have libevent forget this event.
+ bool StopCatching();
+
+ private:
+ void Init(event* e);
+ event* ReleaseEvent();
+
+ event* event_;
+
+ DISALLOW_COPY_AND_ASSIGN(SignalEvent);
+ };
+
+ class SignalWatcher {
+ public:
+ virtual ~SignalWatcher() {}
+ // Called from MessageLoop::Run when |sig| has been delivered to
+ // this process
+ virtual void OnSignal(int sig) = 0;
+ };
+
+ // Have the current thread's message loop catch the signal |sig|.
+ // Multiple watchers can catch the same signal; they're all notified
+ // upon its delivery. Callers must provide a preallocated
+ // SignalEvent object which can be used to manage the lifetime of
+ // this event. Returns true on success.
+ bool CatchSignal(int sig,
+ SignalEvent* sigevent,
+ SignalWatcher* delegate);
+
+
+ // MessagePump methods:
+ virtual void Run(Delegate* delegate) override;
+ virtual void Quit() override;
+ virtual void ScheduleWork() override;
+ virtual void ScheduleDelayedWork(const TimeTicks& delayed_work_time) override;
+
+ protected:
+
+ virtual ~MessagePumpLibevent();
+
+ private:
+
+ // Risky part of constructor. Returns true on success.
+ bool Init();
+
+ // This flag is set to false when Run should return.
+ bool keep_running_;
+
+ // This flag is set when inside Run.
+ bool in_run_;
+
+ // The time at which we should call DoDelayedWork.
+ TimeTicks delayed_work_time_;
+
+ // Libevent dispatcher. Watches all sockets registered with it, and sends
+ // readiness callbacks when a socket is ready for I/O.
+ event_base* event_base_;
+
+ // Called by libevent to tell us a registered FD can be read/written to.
+ static void OnLibeventNotification(int fd, short flags,
+ void* context);
+
+ // Called by libevent upon receiving a signal
+ static void OnLibeventSignalNotification(int sig, short flags,
+ void* context);
+
+ // Unix pipe used to implement ScheduleWork()
+ // ... callback; called by libevent inside Run() when pipe is ready to read
+ static void OnWakeup(int socket, short flags, void* context);
+ // ... write end; ScheduleWork() writes a single byte to it
+ int wakeup_pipe_in_;
+ // ... read end; OnWakeup reads it and then breaks Run() out of its sleep
+ int wakeup_pipe_out_;
+ // ... libevent wrapper for read end
+ event* wakeup_event_;
+
+ DISALLOW_COPY_AND_ASSIGN(MessagePumpLibevent);
+};
+
+/**
+ * LineWatcher overrides OnFileCanReadWithoutBlocking. It separates the read
+ * data by mTerminator and passes each line to OnLineRead.
+ */
+class LineWatcher : public MessagePumpLibevent::Watcher
+{
+public:
+ LineWatcher(char aTerminator, int aBufferSize) : mReceivedIndex(0),
+ mBufferSize(aBufferSize),
+ mTerminator(aTerminator)
+ {
+ mReceiveBuffer = mozilla::MakeUnique<char[]>(mBufferSize);
+ }
+
+ ~LineWatcher() {}
+
+protected:
+ /**
+ * OnError will be called when |read| returns error. Derived class should
+ * implement this function to handle error cases when needed.
+ */
+ virtual void OnError() {}
+ virtual void OnLineRead(int aFd, nsDependentCSubstring& aMessage) = 0;
+ virtual void OnFileCanWriteWithoutBlocking(int /* aFd */) override {}
+private:
+ virtual void OnFileCanReadWithoutBlocking(int aFd) final override;
+
+ mozilla::UniquePtr<char[]> mReceiveBuffer;
+ int mReceivedIndex;
+ int mBufferSize;
+ char mTerminator;
+};
+} // namespace base
+
+#endif // BASE_MESSAGE_PUMP_LIBEVENT_H_
diff --git a/ipc/chromium/src/base/message_pump_mac.h b/ipc/chromium/src/base/message_pump_mac.h
new file mode 100644
index 000000000..7c8b3625f
--- /dev/null
+++ b/ipc/chromium/src/base/message_pump_mac.h
@@ -0,0 +1,279 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// The basis for all native run loops on the Mac is the CFRunLoop. It can be
+// used directly, it can be used as the driving force behind the similar
+// Foundation NSRunLoop, and it can be used to implement higher-level event
+// loops such as the NSApplication event loop.
+//
+// This file introduces a basic CFRunLoop-based implementation of the
+// MessagePump interface called CFRunLoopBase. CFRunLoopBase contains all
+// of the machinery necessary to dispatch events to a delegate, but does not
+// implement the specific run loop. Concrete subclasses must provide their
+// own DoRun and Quit implementations.
+//
+// A concrete subclass that just runs a CFRunLoop loop is provided in
+// MessagePumpCFRunLoop. For an NSRunLoop, the similar MessagePumpNSRunLoop
+// is provided.
+//
+// For the application's event loop, an implementation based on AppKit's
+// NSApplication event system is provided in MessagePumpNSApplication.
+//
+// Typically, MessagePumpNSApplication only makes sense on a Cocoa
+// application's main thread. If a CFRunLoop-based message pump is needed on
+// any other thread, one of the other concrete subclasses is preferrable.
+// MessagePumpMac::Create is defined, which returns a new NSApplication-based
+// or NSRunLoop-based MessagePump subclass depending on which thread it is
+// called on.
+
+#ifndef BASE_MESSAGE_PUMP_MAC_H_
+#define BASE_MESSAGE_PUMP_MAC_H_
+
+#include "base/message_pump.h"
+
+#include "base/basictypes.h"
+
+#include <CoreFoundation/CoreFoundation.h>
+#include <IOKit/IOKitLib.h>
+
+#if defined(__OBJC__)
+@class NSAutoreleasePool;
+#else // defined(__OBJC__)
+class NSAutoreleasePool;
+#endif // defined(__OBJC__)
+
+namespace base {
+
+class TimeTicks;
+
+class MessagePumpCFRunLoopBase : public MessagePump {
+ // Needs access to CreateAutoreleasePool.
+ friend class MessagePumpScopedAutoreleasePool;
+ public:
+ MessagePumpCFRunLoopBase();
+ virtual ~MessagePumpCFRunLoopBase();
+
+ // Subclasses should implement the work they need to do in MessagePump::Run
+ // in the DoRun method. MessagePumpCFRunLoopBase::Run calls DoRun directly.
+ // This arrangement is used because MessagePumpCFRunLoopBase needs to set
+ // up and tear down things before and after the "meat" of DoRun.
+ virtual void Run(Delegate* delegate);
+ virtual void DoRun(Delegate* delegate) = 0;
+
+ virtual void ScheduleWork();
+ virtual void ScheduleDelayedWork(const TimeTicks& delayed_work_time);
+
+ protected:
+ // Accessors for private data members to be used by subclasses.
+ CFRunLoopRef run_loop() const { return run_loop_; }
+ int nesting_level() const { return nesting_level_; }
+ int run_nesting_level() const { return run_nesting_level_; }
+
+ // Return an autorelease pool to wrap around any work being performed.
+ // In some cases, CreateAutoreleasePool may return nil intentionally to
+ // preventing an autorelease pool from being created, allowing any
+ // objects autoreleased by work to fall into the current autorelease pool.
+ virtual NSAutoreleasePool* CreateAutoreleasePool();
+
+ private:
+ // Timer callback scheduled by ScheduleDelayedWork. This does not do any
+ // work, but it signals delayed_work_source_ so that delayed work can be
+ // performed within the appropriate priority constraints.
+ static void RunDelayedWorkTimer(CFRunLoopTimerRef timer, void* info);
+
+ // Perform highest-priority work. This is associated with work_source_
+ // signalled by ScheduleWork. The static method calls the instance method;
+ // the instance method returns true if work was done.
+ static void RunWorkSource(void* info);
+ bool RunWork();
+
+ // Perform delayed-priority work. This is associated with
+ // delayed_work_source_ signalled by RunDelayedWorkTimer, and is responsible
+ // for calling ScheduleDelayedWork again if appropriate. The static method
+ // calls the instance method; the instance method returns true if more
+ // delayed work is available.
+ static void RunDelayedWorkSource(void* info);
+ bool RunDelayedWork();
+
+ // Perform idle-priority work. This is normally called by PreWaitObserver,
+ // but is also associated with idle_work_source_. When this function
+ // actually does perform idle work, it will resignal that source. The
+ // static method calls the instance method; the instance method returns
+ // true if idle work was done.
+ static void RunIdleWorkSource(void* info);
+ bool RunIdleWork();
+
+ // Perform work that may have been deferred because it was not runnable
+ // within a nested run loop. This is associated with
+ // nesting_deferred_work_source_ and is signalled by
+ // MaybeScheduleNestingDeferredWork when returning from a nested loop,
+ // so that an outer loop will be able to perform the necessary tasks if it
+ // permits nestable tasks.
+ static void RunNestingDeferredWorkSource(void* info);
+ bool RunNestingDeferredWork();
+
+ // Schedules possible nesting-deferred work to be processed before the run
+ // loop goes to sleep, exits, or begins processing sources at the top of its
+ // loop. If this function detects that a nested loop had run since the
+ // previous attempt to schedule nesting-deferred work, it will schedule a
+ // call to RunNestingDeferredWorkSource.
+ void MaybeScheduleNestingDeferredWork();
+
+ // Observer callback responsible for performing idle-priority work, before
+ // the run loop goes to sleep. Associated with idle_work_observer_.
+ static void PreWaitObserver(CFRunLoopObserverRef observer,
+ CFRunLoopActivity activity, void* info);
+
+ // Observer callback called before the run loop processes any sources.
+ // Associated with pre_source_observer_.
+ static void PreSourceObserver(CFRunLoopObserverRef observer,
+ CFRunLoopActivity activity, void* info);
+
+ // Observer callback called when the run loop starts and stops, at the
+ // beginning and end of calls to CFRunLoopRun. This is used to maintain
+ // nesting_level_. Associated with enter_exit_observer_.
+ static void EnterExitObserver(CFRunLoopObserverRef observer,
+ CFRunLoopActivity activity, void* info);
+
+ // Called by EnterExitObserver after performing maintenance on nesting_level_.
+ // This allows subclasses an opportunity to perform additional processing on
+ // the basis of run loops starting and stopping.
+ virtual void EnterExitRunLoop(CFRunLoopActivity activity);
+
+ // IOKit power state change notification callback, called when the system
+ // enters and leaves the sleep state.
+ static void PowerStateNotification(void* info, io_service_t service,
+ uint32_t message_type,
+ void* message_argument);
+
+ // The thread's run loop.
+ CFRunLoopRef run_loop_;
+
+ // The timer, sources, and observers are described above alongside their
+ // callbacks.
+ CFRunLoopTimerRef delayed_work_timer_;
+ CFRunLoopSourceRef work_source_;
+ CFRunLoopSourceRef delayed_work_source_;
+ CFRunLoopSourceRef idle_work_source_;
+ CFRunLoopSourceRef nesting_deferred_work_source_;
+ CFRunLoopObserverRef pre_wait_observer_;
+ CFRunLoopObserverRef pre_source_observer_;
+ CFRunLoopObserverRef enter_exit_observer_;
+
+ // Objects used for power state notification. See PowerStateNotification.
+ io_connect_t root_power_domain_;
+ IONotificationPortRef power_notification_port_;
+ io_object_t power_notification_object_;
+
+ // (weak) Delegate passed as an argument to the innermost Run call.
+ Delegate* delegate_;
+
+ // The time that delayed_work_timer_ is scheduled to fire. This is tracked
+ // independently of CFRunLoopTimerGetNextFireDate(delayed_work_timer_)
+ // to be able to reset the timer properly after waking from system sleep.
+ // See PowerStateNotification.
+ CFAbsoluteTime delayed_work_fire_time_;
+
+ // The recursion depth of the currently-executing CFRunLoopRun loop on the
+ // run loop's thread. 0 if no run loops are running inside of whatever scope
+ // the object was created in.
+ int nesting_level_;
+
+ // The recursion depth (calculated in the same way as nesting_level_) of the
+ // innermost executing CFRunLoopRun loop started by a call to Run.
+ int run_nesting_level_;
+
+ // The deepest (numerically highest) recursion depth encountered since the
+ // most recent attempt to run nesting-deferred work.
+ int deepest_nesting_level_;
+
+ // "Delegateless" work flags are set when work is ready to be performed but
+ // must wait until a delegate is available to process it. This can happen
+ // when a MessagePumpCFRunLoopBase is instantiated and work arrives without
+ // any call to Run on the stack. The Run method will check for delegateless
+ // work on entry and redispatch it as needed once a delegate is available.
+ bool delegateless_work_;
+ bool delegateless_delayed_work_;
+ bool delegateless_idle_work_;
+
+ DISALLOW_COPY_AND_ASSIGN(MessagePumpCFRunLoopBase);
+};
+
+class MessagePumpCFRunLoop : public MessagePumpCFRunLoopBase {
+ public:
+ MessagePumpCFRunLoop();
+
+ virtual void DoRun(Delegate* delegate);
+ virtual void Quit();
+
+ private:
+ virtual void EnterExitRunLoop(CFRunLoopActivity activity);
+
+ // True if Quit is called to stop the innermost MessagePump
+ // (innermost_quittable_) but some other CFRunLoopRun loop (nesting_level_)
+ // is running inside the MessagePump's innermost Run call.
+ bool quit_pending_;
+
+ DISALLOW_COPY_AND_ASSIGN(MessagePumpCFRunLoop);
+};
+
+class MessagePumpNSRunLoop : public MessagePumpCFRunLoopBase {
+ public:
+ MessagePumpNSRunLoop();
+ virtual ~MessagePumpNSRunLoop();
+
+ virtual void DoRun(Delegate* delegate);
+ virtual void Quit();
+
+ private:
+ // A source that doesn't do anything but provide something signalable
+ // attached to the run loop. This source will be signalled when Quit
+ // is called, to cause the loop to wake up so that it can stop.
+ CFRunLoopSourceRef quit_source_;
+
+ // False after Quit is called.
+ bool keep_running_;
+
+ DISALLOW_COPY_AND_ASSIGN(MessagePumpNSRunLoop);
+};
+
+class MessagePumpNSApplication : public MessagePumpCFRunLoopBase {
+ public:
+ MessagePumpNSApplication();
+
+ virtual void DoRun(Delegate* delegate);
+ virtual void Quit();
+
+ protected:
+ // Returns nil if NSApp is currently in the middle of calling -sendEvent.
+ virtual NSAutoreleasePool* CreateAutoreleasePool();
+
+ private:
+ // False after Quit is called.
+ bool keep_running_;
+
+ // True if DoRun is managing its own run loop as opposed to letting
+ // -[NSApplication run] handle it. The outermost run loop in the application
+ // is managed by -[NSApplication run], inner run loops are handled by a loop
+ // in DoRun.
+ bool running_own_loop_;
+
+ DISALLOW_COPY_AND_ASSIGN(MessagePumpNSApplication);
+};
+
+class MessagePumpMac {
+ public:
+ // Returns a new instance of MessagePumpNSApplication if called on the main
+ // thread. Otherwise, returns a new instance of MessagePumpNSRunLoop.
+ static MessagePump* Create();
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(MessagePumpMac);
+};
+
+} // namespace base
+
+#endif // BASE_MESSAGE_PUMP_MAC_H_
diff --git a/ipc/chromium/src/base/message_pump_mac.mm b/ipc/chromium/src/base/message_pump_mac.mm
new file mode 100644
index 000000000..703284945
--- /dev/null
+++ b/ipc/chromium/src/base/message_pump_mac.mm
@@ -0,0 +1,752 @@
+// Copyright (c) 2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/message_pump_mac.h"
+
+#import <AppKit/AppKit.h>
+#import <Foundation/Foundation.h>
+#include <IOKit/IOMessage.h>
+#include <IOKit/pwr_mgt/IOPMLib.h>
+
+#include <limits>
+
+#import "base/chrome_application_mac.h"
+#include "base/logging.h"
+#include "base/time.h"
+
+namespace {
+
+void NoOp(void* info) {
+}
+
+const CFTimeInterval kCFTimeIntervalMax =
+ std::numeric_limits<CFTimeInterval>::max();
+
+} // namespace
+
+namespace base {
+
+// A scoper for autorelease pools created from message pump run loops.
+// Avoids dirtying up the ScopedNSAutoreleasePool interface for the rare
+// case where an autorelease pool needs to be passed in.
+class MessagePumpScopedAutoreleasePool {
+ public:
+ explicit MessagePumpScopedAutoreleasePool(MessagePumpCFRunLoopBase* pump) :
+ pool_(pump->CreateAutoreleasePool()) {
+ }
+ ~MessagePumpScopedAutoreleasePool() {
+ [pool_ drain];
+ }
+
+ private:
+ NSAutoreleasePool* pool_;
+ DISALLOW_COPY_AND_ASSIGN(MessagePumpScopedAutoreleasePool);
+};
+
+// Must be called on the run loop thread.
+MessagePumpCFRunLoopBase::MessagePumpCFRunLoopBase()
+ : delegate_(NULL),
+ delayed_work_fire_time_(kCFTimeIntervalMax),
+ nesting_level_(0),
+ run_nesting_level_(0),
+ deepest_nesting_level_(0),
+ delegateless_work_(false),
+ delegateless_delayed_work_(false),
+ delegateless_idle_work_(false) {
+ run_loop_ = CFRunLoopGetCurrent();
+ CFRetain(run_loop_);
+
+ // Set a repeating timer with a preposterous firing time and interval. The
+ // timer will effectively never fire as-is. The firing time will be adjusted
+ // as needed when ScheduleDelayedWork is called.
+ CFRunLoopTimerContext timer_context = CFRunLoopTimerContext();
+ timer_context.info = this;
+ delayed_work_timer_ = CFRunLoopTimerCreate(NULL, // allocator
+ kCFTimeIntervalMax, // fire time
+ kCFTimeIntervalMax, // interval
+ 0, // flags
+ 0, // priority
+ RunDelayedWorkTimer,
+ &timer_context);
+ CFRunLoopAddTimer(run_loop_, delayed_work_timer_, kCFRunLoopCommonModes);
+
+ CFRunLoopSourceContext source_context = CFRunLoopSourceContext();
+ source_context.info = this;
+ source_context.perform = RunWorkSource;
+ work_source_ = CFRunLoopSourceCreate(NULL, // allocator
+ 1, // priority
+ &source_context);
+ CFRunLoopAddSource(run_loop_, work_source_, kCFRunLoopCommonModes);
+
+ source_context.perform = RunDelayedWorkSource;
+ delayed_work_source_ = CFRunLoopSourceCreate(NULL, // allocator
+ 2, // priority
+ &source_context);
+ CFRunLoopAddSource(run_loop_, delayed_work_source_, kCFRunLoopCommonModes);
+
+ source_context.perform = RunIdleWorkSource;
+ idle_work_source_ = CFRunLoopSourceCreate(NULL, // allocator
+ 3, // priority
+ &source_context);
+ CFRunLoopAddSource(run_loop_, idle_work_source_, kCFRunLoopCommonModes);
+
+ source_context.perform = RunNestingDeferredWorkSource;
+ nesting_deferred_work_source_ = CFRunLoopSourceCreate(NULL, // allocator
+ 0, // priority
+ &source_context);
+ CFRunLoopAddSource(run_loop_, nesting_deferred_work_source_,
+ kCFRunLoopCommonModes);
+
+ CFRunLoopObserverContext observer_context = CFRunLoopObserverContext();
+ observer_context.info = this;
+ pre_wait_observer_ = CFRunLoopObserverCreate(NULL, // allocator
+ kCFRunLoopBeforeWaiting,
+ true, // repeat
+ 0, // priority
+ PreWaitObserver,
+ &observer_context);
+ CFRunLoopAddObserver(run_loop_, pre_wait_observer_, kCFRunLoopCommonModes);
+
+ pre_source_observer_ = CFRunLoopObserverCreate(NULL, // allocator
+ kCFRunLoopBeforeSources,
+ true, // repeat
+ 0, // priority
+ PreSourceObserver,
+ &observer_context);
+ CFRunLoopAddObserver(run_loop_, pre_source_observer_, kCFRunLoopCommonModes);
+
+ enter_exit_observer_ = CFRunLoopObserverCreate(NULL, // allocator
+ kCFRunLoopEntry |
+ kCFRunLoopExit,
+ true, // repeat
+ 0, // priority
+ EnterExitObserver,
+ &observer_context);
+ CFRunLoopAddObserver(run_loop_, enter_exit_observer_, kCFRunLoopCommonModes);
+
+ root_power_domain_ = IORegisterForSystemPower(this,
+ &power_notification_port_,
+ PowerStateNotification,
+ &power_notification_object_);
+ if (root_power_domain_ != MACH_PORT_NULL) {
+ CFRunLoopAddSource(
+ run_loop_,
+ IONotificationPortGetRunLoopSource(power_notification_port_),
+ kCFRunLoopCommonModes);
+ }
+}
+
+// Ideally called on the run loop thread. If other run loops were running
+// lower on the run loop thread's stack when this object was created, the
+// same number of run loops must be running when this object is destroyed.
+MessagePumpCFRunLoopBase::~MessagePumpCFRunLoopBase() {
+ if (root_power_domain_ != MACH_PORT_NULL) {
+ CFRunLoopRemoveSource(
+ run_loop_,
+ IONotificationPortGetRunLoopSource(power_notification_port_),
+ kCFRunLoopCommonModes);
+ IODeregisterForSystemPower(&power_notification_object_);
+ IOServiceClose(root_power_domain_);
+ IONotificationPortDestroy(power_notification_port_);
+ }
+
+ CFRunLoopRemoveObserver(run_loop_, enter_exit_observer_,
+ kCFRunLoopCommonModes);
+ CFRelease(enter_exit_observer_);
+
+ CFRunLoopRemoveObserver(run_loop_, pre_source_observer_,
+ kCFRunLoopCommonModes);
+ CFRelease(pre_source_observer_);
+
+ CFRunLoopRemoveObserver(run_loop_, pre_wait_observer_,
+ kCFRunLoopCommonModes);
+ CFRelease(pre_wait_observer_);
+
+ CFRunLoopRemoveSource(run_loop_, nesting_deferred_work_source_,
+ kCFRunLoopCommonModes);
+ CFRelease(nesting_deferred_work_source_);
+
+ CFRunLoopRemoveSource(run_loop_, idle_work_source_, kCFRunLoopCommonModes);
+ CFRelease(idle_work_source_);
+
+ CFRunLoopRemoveSource(run_loop_, delayed_work_source_, kCFRunLoopCommonModes);
+ CFRelease(delayed_work_source_);
+
+ CFRunLoopRemoveSource(run_loop_, work_source_, kCFRunLoopCommonModes);
+ CFRelease(work_source_);
+
+ CFRunLoopRemoveTimer(run_loop_, delayed_work_timer_, kCFRunLoopCommonModes);
+ CFRelease(delayed_work_timer_);
+
+ CFRelease(run_loop_);
+}
+
+// Must be called on the run loop thread.
+void MessagePumpCFRunLoopBase::Run(Delegate* delegate) {
+ // nesting_level_ will be incremented in EnterExitRunLoop, so set
+ // run_nesting_level_ accordingly.
+ int last_run_nesting_level = run_nesting_level_;
+ run_nesting_level_ = nesting_level_ + 1;
+
+ Delegate* last_delegate = delegate_;
+ delegate_ = delegate;
+
+ if (delegate) {
+ // If any work showed up but could not be dispatched for want of a
+ // delegate, set it up for dispatch again now that a delegate is
+ // available.
+ if (delegateless_work_) {
+ CFRunLoopSourceSignal(work_source_);
+ delegateless_work_ = false;
+ }
+ if (delegateless_delayed_work_) {
+ CFRunLoopSourceSignal(delayed_work_source_);
+ delegateless_delayed_work_ = false;
+ }
+ if (delegateless_idle_work_) {
+ CFRunLoopSourceSignal(idle_work_source_);
+ delegateless_idle_work_ = false;
+ }
+ }
+
+ DoRun(delegate);
+
+ // Restore the previous state of the object.
+ delegate_ = last_delegate;
+ run_nesting_level_ = last_run_nesting_level;
+}
+
+// May be called on any thread.
+void MessagePumpCFRunLoopBase::ScheduleWork() {
+ CFRunLoopSourceSignal(work_source_);
+ CFRunLoopWakeUp(run_loop_);
+}
+
+// Must be called on the run loop thread.
+void MessagePumpCFRunLoopBase::ScheduleDelayedWork(
+ const TimeTicks& delayed_work_time) {
+ TimeDelta delta = delayed_work_time - TimeTicks::Now();
+ delayed_work_fire_time_ = CFAbsoluteTimeGetCurrent() + delta.InSecondsF();
+ CFRunLoopTimerSetNextFireDate(delayed_work_timer_, delayed_work_fire_time_);
+}
+
+// Called from the run loop.
+// static
+void MessagePumpCFRunLoopBase::RunDelayedWorkTimer(CFRunLoopTimerRef timer,
+ void* info) {
+ MessagePumpCFRunLoopBase* self = static_cast<MessagePumpCFRunLoopBase*>(info);
+
+ // The timer won't fire again until it's reset.
+ self->delayed_work_fire_time_ = kCFTimeIntervalMax;
+
+ // CFRunLoopTimers fire outside of the priority scheme for CFRunLoopSources.
+ // In order to establish the proper priority where delegate_->DoDelayedWork
+ // can only be called if delegate_->DoWork returns false, the timer used
+ // to schedule delayed work must signal a CFRunLoopSource set at a lower
+ // priority than the one used for delegate_->DoWork.
+ CFRunLoopSourceSignal(self->delayed_work_source_);
+}
+
+// Called from the run loop.
+// static
+void MessagePumpCFRunLoopBase::RunWorkSource(void* info) {
+ MessagePumpCFRunLoopBase* self = static_cast<MessagePumpCFRunLoopBase*>(info);
+ self->RunWork();
+}
+
+// Called by MessagePumpCFRunLoopBase::RunWorkSource.
+bool MessagePumpCFRunLoopBase::RunWork() {
+ if (!delegate_) {
+ // This point can be reached with a NULL delegate_ if Run is not on the
+ // stack but foreign code is spinning the CFRunLoop. Arrange to come back
+ // here when a delegate is available.
+ delegateless_work_ = true;
+ return false;
+ }
+
+ // The NSApplication-based run loop only drains the autorelease pool at each
+ // UI event (NSEvent). The autorelease pool is not drained for each
+ // CFRunLoopSource target that's run. Use a local pool for any autoreleased
+ // objects if the app is not currently handling a UI event to ensure they're
+ // released promptly even in the absence of UI events.
+ MessagePumpScopedAutoreleasePool autorelease_pool(this);
+
+ // Call DoWork once, and if something was done, arrange to come back here
+ // again as long as the loop is still running.
+ bool did_work = delegate_->DoWork();
+ if (did_work) {
+ CFRunLoopSourceSignal(work_source_);
+ }
+
+ return did_work;
+}
+
+// Called from the run loop.
+// static
+void MessagePumpCFRunLoopBase::RunDelayedWorkSource(void* info) {
+ MessagePumpCFRunLoopBase* self = static_cast<MessagePumpCFRunLoopBase*>(info);
+ self->RunDelayedWork();
+}
+
+// Called by MessagePumpCFRunLoopBase::RunDelayedWorkSource.
+bool MessagePumpCFRunLoopBase::RunDelayedWork() {
+ if (!delegate_) {
+ // This point can be reached with a NULL delegate_ if Run is not on the
+ // stack but foreign code is spinning the CFRunLoop. Arrange to come back
+ // here when a delegate is available.
+ delegateless_delayed_work_ = true;
+ return false;
+ }
+
+ // The NSApplication-based run loop only drains the autorelease pool at each
+ // UI event (NSEvent). The autorelease pool is not drained for each
+ // CFRunLoopSource target that's run. Use a local pool for any autoreleased
+ // objects if the app is not currently handling a UI event to ensure they're
+ // released promptly even in the absence of UI events.
+ MessagePumpScopedAutoreleasePool autorelease_pool(this);
+
+ TimeTicks next_time;
+ delegate_->DoDelayedWork(&next_time);
+
+ bool more_work = !next_time.is_null();
+ if (more_work) {
+ TimeDelta delay = next_time - TimeTicks::Now();
+ if (delay > TimeDelta()) {
+ // There's more delayed work to be done in the future.
+ ScheduleDelayedWork(next_time);
+ } else {
+ // There's more delayed work to be done, and its time is in the past.
+ // Arrange to come back here directly as long as the loop is still
+ // running.
+ CFRunLoopSourceSignal(delayed_work_source_);
+ }
+ }
+
+ return more_work;
+}
+
+// Called from the run loop.
+// static
+void MessagePumpCFRunLoopBase::RunIdleWorkSource(void* info) {
+ MessagePumpCFRunLoopBase* self = static_cast<MessagePumpCFRunLoopBase*>(info);
+ self->RunIdleWork();
+}
+
+// Called by MessagePumpCFRunLoopBase::RunIdleWorkSource.
+bool MessagePumpCFRunLoopBase::RunIdleWork() {
+ if (!delegate_) {
+ // This point can be reached with a NULL delegate_ if Run is not on the
+ // stack but foreign code is spinning the CFRunLoop. Arrange to come back
+ // here when a delegate is available.
+ delegateless_idle_work_ = true;
+ return false;
+ }
+
+ // The NSApplication-based run loop only drains the autorelease pool at each
+ // UI event (NSEvent). The autorelease pool is not drained for each
+ // CFRunLoopSource target that's run. Use a local pool for any autoreleased
+ // objects if the app is not currently handling a UI event to ensure they're
+ // released promptly even in the absence of UI events.
+ MessagePumpScopedAutoreleasePool autorelease_pool(this);
+
+ // Call DoIdleWork once, and if something was done, arrange to come back here
+ // again as long as the loop is still running.
+ bool did_work = delegate_->DoIdleWork();
+ if (did_work) {
+ CFRunLoopSourceSignal(idle_work_source_);
+ }
+
+ return did_work;
+}
+
+// Called from the run loop.
+// static
+void MessagePumpCFRunLoopBase::RunNestingDeferredWorkSource(void* info) {
+ MessagePumpCFRunLoopBase* self = static_cast<MessagePumpCFRunLoopBase*>(info);
+ self->RunNestingDeferredWork();
+}
+
+// Called by MessagePumpCFRunLoopBase::RunNestingDeferredWorkSource.
+bool MessagePumpCFRunLoopBase::RunNestingDeferredWork() {
+ if (!delegate_) {
+ // This point can be reached with a NULL delegate_ if Run is not on the
+ // stack but foreign code is spinning the CFRunLoop. There's no sense in
+ // attempting to do any work or signalling the work sources because
+ // without a delegate, work is not possible.
+ return false;
+ }
+
+ // Immediately try work in priority order.
+ if (!RunWork()) {
+ if (!RunDelayedWork()) {
+ if (!RunIdleWork()) {
+ return false;
+ }
+ } else {
+ // There was no work, and delayed work was done. Arrange for the loop
+ // to try non-nestable idle work on a subsequent pass.
+ CFRunLoopSourceSignal(idle_work_source_);
+ }
+ } else {
+ // Work was done. Arrange for the loop to try non-nestable delayed and
+ // idle work on a subsequent pass.
+ CFRunLoopSourceSignal(delayed_work_source_);
+ CFRunLoopSourceSignal(idle_work_source_);
+ }
+
+ return true;
+}
+
+// Called before the run loop goes to sleep or exits, or processes sources.
+void MessagePumpCFRunLoopBase::MaybeScheduleNestingDeferredWork() {
+ // deepest_nesting_level_ is set as run loops are entered. If the deepest
+ // level encountered is deeper than the current level, a nested loop
+ // (relative to the current level) ran since the last time nesting-deferred
+ // work was scheduled. When that situation is encountered, schedule
+ // nesting-deferred work in case any work was deferred because nested work
+ // was disallowed.
+ if (deepest_nesting_level_ > nesting_level_) {
+ deepest_nesting_level_ = nesting_level_;
+ CFRunLoopSourceSignal(nesting_deferred_work_source_);
+ }
+}
+
+// Called from the run loop.
+// static
+void MessagePumpCFRunLoopBase::PreWaitObserver(CFRunLoopObserverRef observer,
+ CFRunLoopActivity activity,
+ void* info) {
+ MessagePumpCFRunLoopBase* self = static_cast<MessagePumpCFRunLoopBase*>(info);
+
+ // Attempt to do some idle work before going to sleep.
+ self->RunIdleWork();
+
+ // The run loop is about to go to sleep. If any of the work done since it
+ // started or woke up resulted in a nested run loop running,
+ // nesting-deferred work may have accumulated. Schedule it for processing
+ // if appropriate.
+ self->MaybeScheduleNestingDeferredWork();
+}
+
+// Called from the run loop.
+// static
+void MessagePumpCFRunLoopBase::PreSourceObserver(CFRunLoopObserverRef observer,
+ CFRunLoopActivity activity,
+ void* info) {
+ MessagePumpCFRunLoopBase* self = static_cast<MessagePumpCFRunLoopBase*>(info);
+
+ // The run loop has reached the top of the loop and is about to begin
+ // processing sources. If the last iteration of the loop at this nesting
+ // level did not sleep or exit, nesting-deferred work may have accumulated
+ // if a nested loop ran. Schedule nesting-deferred work for processing if
+ // appropriate.
+ self->MaybeScheduleNestingDeferredWork();
+}
+
+// Called from the run loop.
+// static
+void MessagePumpCFRunLoopBase::EnterExitObserver(CFRunLoopObserverRef observer,
+ CFRunLoopActivity activity,
+ void* info) {
+ MessagePumpCFRunLoopBase* self = static_cast<MessagePumpCFRunLoopBase*>(info);
+
+ switch (activity) {
+ case kCFRunLoopEntry:
+ ++self->nesting_level_;
+ if (self->nesting_level_ > self->deepest_nesting_level_) {
+ self->deepest_nesting_level_ = self->nesting_level_;
+ }
+ break;
+
+ case kCFRunLoopExit:
+ // Not all run loops go to sleep. If a run loop is stopped before it
+ // goes to sleep due to a CFRunLoopStop call, or if the timeout passed
+ // to CFRunLoopRunInMode expires, the run loop may proceed directly from
+ // handling sources to exiting without any sleep. This most commonly
+ // occurs when CFRunLoopRunInMode is passed a timeout of 0, causing it
+ // to make a single pass through the loop and exit without sleep. Some
+ // native loops use CFRunLoop in this way. Because PreWaitObserver will
+ // not be called in these case, MaybeScheduleNestingDeferredWork needs
+ // to be called here, as the run loop exits.
+ //
+ // MaybeScheduleNestingDeferredWork consults self->nesting_level_
+ // to determine whether to schedule nesting-deferred work. It expects
+ // the nesting level to be set to the depth of the loop that is going
+ // to sleep or exiting. It must be called before decrementing the
+ // value so that the value still corresponds to the level of the exiting
+ // loop.
+ self->MaybeScheduleNestingDeferredWork();
+ --self->nesting_level_;
+ break;
+
+ default:
+ break;
+ }
+
+ self->EnterExitRunLoop(activity);
+}
+
+// Called from the run loop.
+// static
+void MessagePumpCFRunLoopBase::PowerStateNotification(void* info,
+ io_service_t service,
+ uint32_t message_type,
+ void* message_argument) {
+ // CFRunLoopTimer (NSTimer) is scheduled in terms of CFAbsoluteTime, which
+ // measures the number of seconds since 2001-01-01 00:00:00.0 Z. It is
+ // implemented in terms of kernel ticks, as in mach_absolute_time. While an
+ // offset and scale factor can be applied to convert between the two time
+ // bases at any time after boot, the kernel clock stops while the system is
+ // asleep, altering the offset. (The offset will also change when the
+ // real-time clock is adjusted.) CFRunLoopTimers are not readjusted to take
+ // this into account when the system wakes up, so any timers that were
+ // pending while the system was asleep will be delayed by the sleep
+ // duration.
+ //
+ // The MessagePump interface assumes that scheduled delayed work will be
+ // performed at the time ScheduleDelayedWork was asked to perform it. The
+ // delay caused by the CFRunLoopTimer not firing at the appropriate time
+ // results in a stall of queued delayed work when the system wakes up.
+ // With this limitation, scheduled work would not be performed until
+ // (system wake time + scheduled work time - system sleep time), while it
+ // would be expected to be performed at (scheduled work time).
+ //
+ // To work around this problem, when the system wakes up from sleep, if a
+ // delayed work timer is pending, it is rescheduled to fire at the original
+ // time that it was scheduled to fire.
+ //
+ // This mechanism is not resilient if the real-time clock does not maintain
+ // stable time while the system is sleeping, but it matches the behavior of
+ // the various other MessagePump implementations, and MessageLoop seems to
+ // be limited in the same way.
+ //
+ // References
+ // - Chris Kane, "NSTimer and deep sleep," cocoa-dev@lists.apple.com,
+ // http://lists.apple.com/archives/Cocoa-dev/2002/May/msg01547.html
+ // - Apple Technical Q&A QA1340, "Registering and unregistering for sleep
+ // and wake notifications,"
+ // http://developer.apple.com/mac/library/qa/qa2004/qa1340.html
+ // - Core Foundation source code, CF-550/CFRunLoop.c and CF-550/CFDate.c,
+ // http://www.opensource.apple.com/
+
+ MessagePumpCFRunLoopBase* self = static_cast<MessagePumpCFRunLoopBase*>(info);
+
+ switch (message_type) {
+ case kIOMessageSystemWillPowerOn:
+ if (self->delayed_work_fire_time_ != kCFTimeIntervalMax) {
+ CFRunLoopTimerSetNextFireDate(self->delayed_work_timer_,
+ self->delayed_work_fire_time_);
+ }
+ break;
+
+ case kIOMessageSystemWillSleep:
+ case kIOMessageCanSystemSleep:
+ // The system will wait for 30 seconds before entering sleep if neither
+ // IOAllowPowerChange nor IOCancelPowerChange are called. That would be
+ // pretty antisocial.
+ IOAllowPowerChange(self->root_power_domain_,
+ reinterpret_cast<long>(message_argument));
+ break;
+
+ default:
+ break;
+ }
+}
+
+// Called by MessagePumpCFRunLoopBase::EnterExitRunLoop. The default
+// implementation is a no-op.
+void MessagePumpCFRunLoopBase::EnterExitRunLoop(CFRunLoopActivity activity) {
+}
+
+// Base version returns a standard NSAutoreleasePool.
+NSAutoreleasePool* MessagePumpCFRunLoopBase::CreateAutoreleasePool() {
+ return [[NSAutoreleasePool alloc] init];
+}
+
+MessagePumpCFRunLoop::MessagePumpCFRunLoop()
+ : quit_pending_(false) {
+}
+
+// Called by MessagePumpCFRunLoopBase::DoRun. If other CFRunLoopRun loops were
+// running lower on the run loop thread's stack when this object was created,
+// the same number of CFRunLoopRun loops must be running for the outermost call
+// to Run. Run/DoRun are reentrant after that point.
+void MessagePumpCFRunLoop::DoRun(Delegate* delegate) {
+ // This is completely identical to calling CFRunLoopRun(), except autorelease
+ // pool management is introduced.
+ int result;
+ do {
+ MessagePumpScopedAutoreleasePool autorelease_pool(this);
+ result = CFRunLoopRunInMode(kCFRunLoopDefaultMode,
+ kCFTimeIntervalMax,
+ false);
+ } while (result != kCFRunLoopRunStopped && result != kCFRunLoopRunFinished);
+}
+
+// Must be called on the run loop thread.
+void MessagePumpCFRunLoop::Quit() {
+ // Stop the innermost run loop managed by this MessagePumpCFRunLoop object.
+ if (nesting_level() == run_nesting_level()) {
+ // This object is running the innermost loop, just stop it.
+ CFRunLoopStop(run_loop());
+ } else {
+ // There's another loop running inside the loop managed by this object.
+ // In other words, someone else called CFRunLoopRunInMode on the same
+ // thread, deeper on the stack than the deepest Run call. Don't preempt
+ // other run loops, just mark this object to quit the innermost Run as
+ // soon as the other inner loops not managed by Run are done.
+ quit_pending_ = true;
+ }
+}
+
+// Called by MessagePumpCFRunLoopBase::EnterExitObserver.
+void MessagePumpCFRunLoop::EnterExitRunLoop(CFRunLoopActivity activity) {
+ if (activity == kCFRunLoopExit &&
+ nesting_level() == run_nesting_level() &&
+ quit_pending_) {
+ // Quit was called while loops other than those managed by this object
+ // were running further inside a run loop managed by this object. Now
+ // that all unmanaged inner run loops are gone, stop the loop running
+ // just inside Run.
+ CFRunLoopStop(run_loop());
+ quit_pending_ = false;
+ }
+}
+
+MessagePumpNSRunLoop::MessagePumpNSRunLoop()
+ : keep_running_(true) {
+ CFRunLoopSourceContext source_context = CFRunLoopSourceContext();
+ source_context.perform = NoOp;
+ quit_source_ = CFRunLoopSourceCreate(NULL, // allocator
+ 0, // priority
+ &source_context);
+ CFRunLoopAddSource(run_loop(), quit_source_, kCFRunLoopCommonModes);
+}
+
+MessagePumpNSRunLoop::~MessagePumpNSRunLoop() {
+ CFRunLoopRemoveSource(run_loop(), quit_source_, kCFRunLoopCommonModes);
+ CFRelease(quit_source_);
+}
+
+void MessagePumpNSRunLoop::DoRun(Delegate* delegate) {
+ while (keep_running_) {
+ // NSRunLoop manages autorelease pools itself.
+ [[NSRunLoop currentRunLoop] runMode:NSDefaultRunLoopMode
+ beforeDate:[NSDate distantFuture]];
+ }
+
+ keep_running_ = true;
+}
+
+void MessagePumpNSRunLoop::Quit() {
+ keep_running_ = false;
+ CFRunLoopSourceSignal(quit_source_);
+ CFRunLoopWakeUp(run_loop());
+}
+
+MessagePumpNSApplication::MessagePumpNSApplication()
+ : keep_running_(true),
+ running_own_loop_(false) {
+}
+
+void MessagePumpNSApplication::DoRun(Delegate* delegate) {
+ bool last_running_own_loop_ = running_own_loop_;
+
+ // TODO(dmaclach): Get rid of this gratuitous sharedApplication.
+ // Tests should be setting up their applications on their own.
+ [CrApplication sharedApplication];
+
+ if (![NSApp isRunning]) {
+ running_own_loop_ = false;
+ // NSApplication manages autorelease pools itself when run this way.
+ [NSApp run];
+ } else {
+ running_own_loop_ = true;
+ NSDate* distant_future = [NSDate distantFuture];
+ while (keep_running_) {
+ MessagePumpScopedAutoreleasePool autorelease_pool(this);
+ NSEvent* event = [NSApp nextEventMatchingMask:NSAnyEventMask
+ untilDate:distant_future
+ inMode:NSDefaultRunLoopMode
+ dequeue:YES];
+ if (event) {
+ [NSApp sendEvent:event];
+ }
+ }
+ keep_running_ = true;
+ }
+
+ running_own_loop_ = last_running_own_loop_;
+}
+
+void MessagePumpNSApplication::Quit() {
+ if (!running_own_loop_) {
+ [NSApp stop:nil];
+ } else {
+ keep_running_ = false;
+ }
+
+ // Send a fake event to wake the loop up.
+ [NSApp postEvent:[NSEvent otherEventWithType:NSApplicationDefined
+ location:NSMakePoint(0, 0)
+ modifierFlags:0
+ timestamp:0
+ windowNumber:0
+ context:NULL
+ subtype:0
+ data1:0
+ data2:0]
+ atStart:NO];
+}
+
+// Prevents an autorelease pool from being created if the app is in the midst of
+// handling a UI event because various parts of AppKit depend on objects that
+// are created while handling a UI event to be autoreleased in the event loop.
+// An example of this is NSWindowController. When a window with a window
+// controller is closed it goes through a stack like this:
+// (Several stack frames elided for clarity)
+//
+// #0 [NSWindowController autorelease]
+// #1 DoAClose
+// #2 MessagePumpCFRunLoopBase::DoWork()
+// #3 [NSRunLoop run]
+// #4 [NSButton performClick:]
+// #5 [NSWindow sendEvent:]
+// #6 [NSApp sendEvent:]
+// #7 [NSApp run]
+//
+// -performClick: spins a nested run loop. If the pool created in DoWork was a
+// standard NSAutoreleasePool, it would release the objects that were
+// autoreleased into it once DoWork released it. This would cause the window
+// controller, which autoreleased itself in frame #0, to release itself, and
+// possibly free itself. Unfortunately this window controller controls the
+// window in frame #5. When the stack is unwound to frame #5, the window would
+// no longer exists and crashes may occur. Apple gets around this by never
+// releasing the pool it creates in frame #4, and letting frame #7 clean it up
+// when it cleans up the pool that wraps frame #7. When an autorelease pool is
+// released it releases all other pools that were created after it on the
+// autorelease pool stack.
+//
+// CrApplication is responsible for setting handlingSendEvent to true just
+// before it sends the event throught the event handling mechanism, and
+// returning it to its previous value once the event has been sent.
+NSAutoreleasePool* MessagePumpNSApplication::CreateAutoreleasePool() {
+ NSAutoreleasePool* pool = nil;
+ DCHECK([NSApp isKindOfClass:[CrApplication class]]);
+ if (![static_cast<CrApplication*>(NSApp) isHandlingSendEvent]) {
+ pool = MessagePumpCFRunLoopBase::CreateAutoreleasePool();
+ }
+ return pool;
+}
+
+// static
+MessagePump* MessagePumpMac::Create() {
+ if ([NSThread isMainThread]) {
+ return new MessagePumpNSApplication;
+ }
+
+ return new MessagePumpNSRunLoop;
+}
+
+} // namespace base
diff --git a/ipc/chromium/src/base/message_pump_qt.cc b/ipc/chromium/src/base/message_pump_qt.cc
new file mode 100644
index 000000000..0fcd685b9
--- /dev/null
+++ b/ipc/chromium/src/base/message_pump_qt.cc
@@ -0,0 +1,191 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <qabstracteventdispatcher.h>
+#include <qevent.h>
+#include <QCoreApplication>
+#include <QThread>
+#include <qtimer.h>
+
+#include "base/message_pump_qt.h"
+
+#include <fcntl.h>
+#include <limits>
+#include <math.h>
+
+#include "base/eintr_wrapper.h"
+#include "base/logging.h"
+#include "base/platform_thread.h"
+
+namespace {
+// Cached QEvent user type, registered for our event system
+static int sPokeEvent;
+} // namespace
+
+namespace base {
+
+MessagePumpForUI::MessagePumpForUI()
+ : state_(NULL),
+ qt_pump(*this)
+{
+}
+
+MessagePumpForUI::~MessagePumpForUI() {
+}
+
+MessagePumpQt::MessagePumpQt(MessagePumpForUI &aPump)
+ : pump(aPump), mTimer(new QTimer(this))
+{
+ // Register our custom event type, to use in qApp event loop
+ sPokeEvent = QEvent::registerEventType();
+ connect(mTimer, SIGNAL(timeout()), this, SLOT(dispatchDelayed()));
+ mTimer->setSingleShot(true);
+}
+
+MessagePumpQt::~MessagePumpQt()
+{
+ mTimer->stop();
+ delete mTimer;
+}
+
+bool
+MessagePumpQt::event(QEvent *e)
+{
+ if (e->type() == sPokeEvent) {
+ pump.HandleDispatch();
+ return true;
+ }
+ return false;
+}
+
+void
+MessagePumpQt::scheduleDelayedIfNeeded(const TimeTicks& delayed_work_time)
+{
+ if (delayed_work_time.is_null()) {
+ return;
+ }
+
+ if (mTimer->isActive()) {
+ mTimer->stop();
+ }
+
+ TimeDelta later = delayed_work_time - TimeTicks::Now();
+ // later.InMilliseconds() returns an int64_t, QTimer only accepts int's for start(),
+ // std::min only works on exact same types.
+ int laterMsecs = later.InMilliseconds() > std::numeric_limits<int>::max() ?
+ std::numeric_limits<int>::max() : later.InMilliseconds();
+ mTimer->start(laterMsecs > 0 ? laterMsecs : 0);
+}
+
+void
+MessagePumpQt::dispatchDelayed()
+{
+ pump.HandleDispatch();
+}
+
+void MessagePumpForUI::Run(Delegate* delegate) {
+ RunState state;
+ state.delegate = delegate;
+ state.should_quit = false;
+ state.run_depth = state_ ? state_->run_depth + 1 : 1;
+ // We really only do a single task for each iteration of the loop. If we
+ // have done something, assume there is likely something more to do. This
+ // will mean that we don't block on the message pump until there was nothing
+ // more to do. We also set this to true to make sure not to block on the
+ // first iteration of the loop, so RunAllPending() works correctly.
+ bool more_work_is_plausible = true;
+
+ RunState* previous_state = state_;
+ state_ = &state;
+
+ for(;;) {
+ QEventLoop::ProcessEventsFlags block = QEventLoop::AllEvents;
+ if (!more_work_is_plausible) {
+ block |= QEventLoop::WaitForMoreEvents;
+ }
+
+ QAbstractEventDispatcher* dispatcher =
+ QAbstractEventDispatcher::instance(QThread::currentThread());
+ // An assertion seems too much here, as during startup,
+ // the dispatcher might not be ready yet.
+ if (!dispatcher) {
+ return;
+ }
+
+ // processEvent's returns true if an event has been processed.
+ more_work_is_plausible = dispatcher->processEvents(block);
+
+ if (state_->should_quit) {
+ break;
+ }
+
+ more_work_is_plausible |= state_->delegate->DoWork();
+ if (state_->should_quit) {
+ break;
+ }
+
+ more_work_is_plausible |=
+ state_->delegate->DoDelayedWork(&delayed_work_time_);
+ if (state_->should_quit) {
+ break;
+ }
+
+ qt_pump.scheduleDelayedIfNeeded(delayed_work_time_);
+
+ if (more_work_is_plausible) {
+ continue;
+ }
+
+ more_work_is_plausible = state_->delegate->DoIdleWork();
+ if (state_->should_quit) {
+ break;
+ }
+ }
+
+ state_ = previous_state;
+}
+
+void MessagePumpForUI::HandleDispatch() {
+ if (state_->should_quit) {
+ return;
+ }
+
+ if (state_->delegate->DoWork()) {
+ // there might be more, see more_work_is_plausible
+ // variable above, that's why we ScheduleWork() to keep going.
+ ScheduleWork();
+ }
+
+ if (state_->should_quit) {
+ return;
+ }
+
+ state_->delegate->DoDelayedWork(&delayed_work_time_);
+ qt_pump.scheduleDelayedIfNeeded(delayed_work_time_);
+}
+
+void MessagePumpForUI::Quit() {
+ if (state_) {
+ state_->should_quit = true;
+ } else {
+ NOTREACHED() << "Quit called outside Run!";
+ }
+}
+
+void MessagePumpForUI::ScheduleWork() {
+ QCoreApplication::postEvent(&qt_pump,
+ new QEvent((QEvent::Type) sPokeEvent));
+}
+
+void MessagePumpForUI::ScheduleDelayedWork(const TimeTicks& delayed_work_time) {
+ // On GLib implementation, a work source is defined which explicitly checks the
+ // time that has passed. Here, on Qt we can use a QTimer that enqueues our
+ // event signal in an event queue.
+ delayed_work_time_ = delayed_work_time;
+ qt_pump.scheduleDelayedIfNeeded(delayed_work_time_);
+}
+
+} // namespace base
diff --git a/ipc/chromium/src/base/message_pump_qt.h b/ipc/chromium/src/base/message_pump_qt.h
new file mode 100644
index 000000000..625b10095
--- /dev/null
+++ b/ipc/chromium/src/base/message_pump_qt.h
@@ -0,0 +1,85 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MESSAGE_PUMP_QT_H_
+#define BASE_MESSAGE_PUMP_QT_H_
+
+#include <qobject.h>
+
+#include "base/message_pump.h"
+#include "base/time.h"
+
+class QTimer;
+
+namespace base {
+
+class MessagePumpForUI;
+
+class MessagePumpQt : public QObject {
+ Q_OBJECT
+
+ public:
+ MessagePumpQt(MessagePumpForUI &pump);
+ ~MessagePumpQt();
+
+ virtual bool event (QEvent *e);
+ void scheduleDelayedIfNeeded(const TimeTicks& delayed_work_time);
+
+ public Q_SLOTS:
+ void dispatchDelayed();
+
+ private:
+ base::MessagePumpForUI &pump;
+ QTimer* mTimer;
+};
+
+// This class implements a MessagePump needed for TYPE_UI MessageLoops on
+// OS_LINUX platforms using QApplication event loop
+class MessagePumpForUI : public MessagePump {
+
+ public:
+ MessagePumpForUI();
+ ~MessagePumpForUI();
+
+ virtual void Run(Delegate* delegate);
+ virtual void Quit();
+ virtual void ScheduleWork();
+ virtual void ScheduleDelayedWork(const TimeTicks& delayed_work_time);
+
+ // Internal methods used for processing the pump callbacks. They are
+ // public for simplicity but should not be used directly.
+ // HandleDispatch is called after the poll has completed.
+ void HandleDispatch();
+
+ private:
+ // We may make recursive calls to Run, so we save state that needs to be
+ // separate between them in this structure type.
+ struct RunState {
+ Delegate* delegate;
+
+ // Used to flag that the current Run() invocation should return ASAP.
+ bool should_quit;
+
+ // Used to count how many Run() invocations are on the stack.
+ int run_depth;
+ };
+
+ RunState* state_;
+
+ // This is the time when we need to do delayed work.
+ TimeTicks delayed_work_time_;
+
+ // MessagePump implementation for Qt based on the GLib implement.
+ // On Qt we use a QObject base class and the
+ // default qApp in order to process events through QEventLoop.
+ MessagePumpQt qt_pump;
+
+ DISALLOW_COPY_AND_ASSIGN(MessagePumpForUI);
+};
+
+} // namespace base
+
+#endif // BASE_MESSAGE_PUMP_QT_H_
diff --git a/ipc/chromium/src/base/message_pump_win.cc b/ipc/chromium/src/base/message_pump_win.cc
new file mode 100644
index 000000000..407a47065
--- /dev/null
+++ b/ipc/chromium/src/base/message_pump_win.cc
@@ -0,0 +1,547 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/message_pump_win.h"
+
+#include <math.h>
+
+#include "base/message_loop.h"
+#include "base/histogram.h"
+#include "base/win_util.h"
+#include "WinUtils.h"
+
+using base::Time;
+
+namespace base {
+
+static const wchar_t kWndClass[] = L"Chrome_MessagePumpWindow";
+
+// Message sent to get an additional time slice for pumping (processing) another
+// task (a series of such messages creates a continuous task pump).
+static const int kMsgHaveWork = WM_USER + 1;
+
+//-----------------------------------------------------------------------------
+// MessagePumpWin public:
+
+void MessagePumpWin::AddObserver(Observer* observer) {
+ observers_.AddObserver(observer);
+}
+
+void MessagePumpWin::RemoveObserver(Observer* observer) {
+ observers_.RemoveObserver(observer);
+}
+
+void MessagePumpWin::WillProcessMessage(const MSG& msg) {
+ FOR_EACH_OBSERVER(Observer, observers_, WillProcessMessage(msg));
+}
+
+void MessagePumpWin::DidProcessMessage(const MSG& msg) {
+ FOR_EACH_OBSERVER(Observer, observers_, DidProcessMessage(msg));
+}
+
+void MessagePumpWin::RunWithDispatcher(
+ Delegate* delegate, Dispatcher* dispatcher) {
+ RunState s;
+ s.delegate = delegate;
+ s.dispatcher = dispatcher;
+ s.should_quit = false;
+ s.run_depth = state_ ? state_->run_depth + 1 : 1;
+
+ RunState* previous_state = state_;
+ state_ = &s;
+
+ DoRunLoop();
+
+ state_ = previous_state;
+}
+
+void MessagePumpWin::Quit() {
+ DCHECK(state_);
+ state_->should_quit = true;
+}
+
+//-----------------------------------------------------------------------------
+// MessagePumpWin protected:
+
+int MessagePumpWin::GetCurrentDelay() const {
+ if (delayed_work_time_.is_null())
+ return -1;
+
+ // Be careful here. TimeDelta has a precision of microseconds, but we want a
+ // value in milliseconds. If there are 5.5ms left, should the delay be 5 or
+ // 6? It should be 6 to avoid executing delayed work too early.
+ double timeout =
+ ceil((delayed_work_time_ - TimeTicks::Now()).InMillisecondsF());
+
+ // If this value is negative, then we need to run delayed work soon.
+ int delay = static_cast<int>(timeout);
+ if (delay < 0)
+ delay = 0;
+
+ return delay;
+}
+
+//-----------------------------------------------------------------------------
+// MessagePumpForUI public:
+
+MessagePumpForUI::MessagePumpForUI() {
+ InitMessageWnd();
+}
+
+MessagePumpForUI::~MessagePumpForUI() {
+ DestroyWindow(message_hwnd_);
+ UnregisterClass(kWndClass, GetModuleHandle(NULL));
+}
+
+void MessagePumpForUI::ScheduleWork() {
+ if (InterlockedExchange(&have_work_, 1))
+ return; // Someone else continued the pumping.
+
+ // Make sure the MessagePump does some work for us.
+ PostMessage(message_hwnd_, kMsgHaveWork, reinterpret_cast<WPARAM>(this), 0);
+
+ // In order to wake up any cross-process COM calls which may currently be
+ // pending on the main thread, we also have to post a UI message.
+ PostMessage(message_hwnd_, WM_NULL, 0, 0);
+}
+
+void MessagePumpForUI::ScheduleDelayedWork(const TimeTicks& delayed_work_time) {
+ //
+ // We would *like* to provide high resolution timers. Windows timers using
+ // SetTimer() have a 10ms granularity. We have to use WM_TIMER as a wakeup
+ // mechanism because the application can enter modal windows loops where it
+ // is not running our MessageLoop; the only way to have our timers fire in
+ // these cases is to post messages there.
+ //
+ // To provide sub-10ms timers, we process timers directly from our run loop.
+ // For the common case, timers will be processed there as the run loop does
+ // its normal work. However, we *also* set the system timer so that WM_TIMER
+ // events fire. This mops up the case of timers not being able to work in
+ // modal message loops. It is possible for the SetTimer to pop and have no
+ // pending timers, because they could have already been processed by the
+ // run loop itself.
+ //
+ // We use a single SetTimer corresponding to the timer that will expire
+ // soonest. As new timers are created and destroyed, we update SetTimer.
+ // Getting a spurrious SetTimer event firing is benign, as we'll just be
+ // processing an empty timer queue.
+ //
+ delayed_work_time_ = delayed_work_time;
+
+ int delay_msec = GetCurrentDelay();
+ DCHECK(delay_msec >= 0);
+ if (delay_msec < USER_TIMER_MINIMUM)
+ delay_msec = USER_TIMER_MINIMUM;
+
+ // Create a WM_TIMER event that will wake us up to check for any pending
+ // timers (in case we are running within a nested, external sub-pump).
+ SetTimer(message_hwnd_, reinterpret_cast<UINT_PTR>(this), delay_msec, NULL);
+}
+
+void MessagePumpForUI::PumpOutPendingPaintMessages() {
+ // If we are being called outside of the context of Run, then don't try to do
+ // any work.
+ if (!state_)
+ return;
+
+ // Create a mini-message-pump to force immediate processing of only Windows
+ // WM_PAINT messages. Don't provide an infinite loop, but do enough peeking
+ // to get the job done. Actual common max is 4 peeks, but we'll be a little
+ // safe here.
+ const int kMaxPeekCount = 20;
+ int peek_count;
+ for (peek_count = 0; peek_count < kMaxPeekCount; ++peek_count) {
+ MSG msg;
+ if (!PeekMessage(&msg, NULL, 0, 0, PM_REMOVE | PM_QS_PAINT))
+ break;
+ ProcessMessageHelper(msg);
+ if (state_->should_quit) // Handle WM_QUIT.
+ break;
+ }
+ // Histogram what was really being used, to help to adjust kMaxPeekCount.
+ DHISTOGRAM_COUNTS("Loop.PumpOutPendingPaintMessages Peeks", peek_count);
+}
+
+//-----------------------------------------------------------------------------
+// MessagePumpForUI private:
+
+// static
+LRESULT CALLBACK MessagePumpForUI::WndProcThunk(
+ HWND hwnd, UINT message, WPARAM wparam, LPARAM lparam) {
+ switch (message) {
+ case kMsgHaveWork:
+ reinterpret_cast<MessagePumpForUI*>(wparam)->HandleWorkMessage();
+ break;
+ case WM_TIMER:
+ reinterpret_cast<MessagePumpForUI*>(wparam)->HandleTimerMessage();
+ break;
+ }
+ return DefWindowProc(hwnd, message, wparam, lparam);
+}
+
+void MessagePumpForUI::DoRunLoop() {
+ // IF this was just a simple PeekMessage() loop (servicing all possible work
+ // queues), then Windows would try to achieve the following order according
+ // to MSDN documentation about PeekMessage with no filter):
+ // * Sent messages
+ // * Posted messages
+ // * Sent messages (again)
+ // * WM_PAINT messages
+ // * WM_TIMER messages
+ //
+ // Summary: none of the above classes is starved, and sent messages has twice
+ // the chance of being processed (i.e., reduced service time).
+
+ for (;;) {
+ // If we do any work, we may create more messages etc., and more work may
+ // possibly be waiting in another task group. When we (for example)
+ // ProcessNextWindowsMessage(), there is a good chance there are still more
+ // messages waiting. On the other hand, when any of these methods return
+ // having done no work, then it is pretty unlikely that calling them again
+ // quickly will find any work to do. Finally, if they all say they had no
+ // work, then it is a good time to consider sleeping (waiting) for more
+ // work.
+
+ bool more_work_is_plausible = ProcessNextWindowsMessage();
+ if (state_->should_quit)
+ break;
+
+ more_work_is_plausible |= state_->delegate->DoWork();
+ if (state_->should_quit)
+ break;
+
+ more_work_is_plausible |=
+ state_->delegate->DoDelayedWork(&delayed_work_time_);
+ // If we did not process any delayed work, then we can assume that our
+ // existing WM_TIMER if any will fire when delayed work should run. We
+ // don't want to disturb that timer if it is already in flight. However,
+ // if we did do all remaining delayed work, then lets kill the WM_TIMER.
+ if (more_work_is_plausible && delayed_work_time_.is_null())
+ KillTimer(message_hwnd_, reinterpret_cast<UINT_PTR>(this));
+ if (state_->should_quit)
+ break;
+
+ if (more_work_is_plausible)
+ continue;
+
+ more_work_is_plausible = state_->delegate->DoIdleWork();
+ if (state_->should_quit)
+ break;
+
+ if (more_work_is_plausible)
+ continue;
+
+ WaitForWork(); // Wait (sleep) until we have work to do again.
+ }
+}
+
+void MessagePumpForUI::InitMessageWnd() {
+ HINSTANCE hinst = GetModuleHandle(NULL);
+
+ WNDCLASSEX wc = {0};
+ wc.cbSize = sizeof(wc);
+ wc.lpfnWndProc = WndProcThunk;
+ wc.hInstance = hinst;
+ wc.lpszClassName = kWndClass;
+ RegisterClassEx(&wc);
+
+ message_hwnd_ =
+ CreateWindow(kWndClass, 0, 0, 0, 0, 0, 0, HWND_MESSAGE, 0, hinst, 0);
+ DCHECK(message_hwnd_);
+}
+
+void MessagePumpForUI::WaitForWork() {
+ // Wait until a message is available, up to the time needed by the timer
+ // manager to fire the next set of timers.
+ int delay = GetCurrentDelay();
+ if (delay < 0) // Negative value means no timers waiting.
+ delay = INFINITE;
+
+ mozilla::widget::WinUtils::WaitForMessage(delay);
+}
+
+void MessagePumpForUI::HandleWorkMessage() {
+ // If we are being called outside of the context of Run, then don't try to do
+ // any work. This could correspond to a MessageBox call or something of that
+ // sort.
+ if (!state_) {
+ // Since we handled a kMsgHaveWork message, we must still update this flag.
+ InterlockedExchange(&have_work_, 0);
+ return;
+ }
+
+ // Let whatever would have run had we not been putting messages in the queue
+ // run now. This is an attempt to make our dummy message not starve other
+ // messages that may be in the Windows message queue.
+ ProcessPumpReplacementMessage();
+
+ // Now give the delegate a chance to do some work. He'll let us know if he
+ // needs to do more work.
+ if (state_->delegate->DoWork())
+ ScheduleWork();
+}
+
+void MessagePumpForUI::HandleTimerMessage() {
+ KillTimer(message_hwnd_, reinterpret_cast<UINT_PTR>(this));
+
+ // If we are being called outside of the context of Run, then don't do
+ // anything. This could correspond to a MessageBox call or something of
+ // that sort.
+ if (!state_)
+ return;
+
+ state_->delegate->DoDelayedWork(&delayed_work_time_);
+ if (!delayed_work_time_.is_null()) {
+ // A bit gratuitous to set delayed_work_time_ again, but oh well.
+ ScheduleDelayedWork(delayed_work_time_);
+ }
+}
+
+bool MessagePumpForUI::ProcessNextWindowsMessage() {
+ // If there are sent messages in the queue then PeekMessage internally
+ // dispatches the message and returns false. We return true in this
+ // case to ensure that the message loop peeks again instead of calling
+ // MsgWaitForMultipleObjectsEx again.
+ bool sent_messages_in_queue = false;
+ DWORD queue_status = GetQueueStatus(QS_SENDMESSAGE);
+ if (HIWORD(queue_status) & QS_SENDMESSAGE)
+ sent_messages_in_queue = true;
+
+ MSG msg;
+ if (PeekMessage(&msg, NULL, 0, 0, PM_REMOVE))
+ return ProcessMessageHelper(msg);
+
+ return sent_messages_in_queue;
+}
+
+bool MessagePumpForUI::ProcessMessageHelper(const MSG& msg) {
+ if (WM_QUIT == msg.message) {
+ // Repost the QUIT message so that it will be retrieved by the primary
+ // GetMessage() loop.
+ state_->should_quit = true;
+ PostQuitMessage(static_cast<int>(msg.wParam));
+ return false;
+ }
+
+ // While running our main message pump, we discard kMsgHaveWork messages.
+ if (msg.message == kMsgHaveWork && msg.hwnd == message_hwnd_)
+ return ProcessPumpReplacementMessage();
+
+ WillProcessMessage(msg);
+
+ if (state_->dispatcher) {
+ if (!state_->dispatcher->Dispatch(msg))
+ state_->should_quit = true;
+ } else {
+ TranslateMessage(&msg);
+ DispatchMessage(&msg);
+ }
+
+ DidProcessMessage(msg);
+ return true;
+}
+
+bool MessagePumpForUI::ProcessPumpReplacementMessage() {
+ // When we encounter a kMsgHaveWork message, this method is called to peek
+ // and process a replacement message, such as a WM_PAINT or WM_TIMER. The
+ // goal is to make the kMsgHaveWork as non-intrusive as possible, even though
+ // a continuous stream of such messages are posted. This method carefully
+ // peeks a message while there is no chance for a kMsgHaveWork to be pending,
+ // then resets the have_work_ flag (allowing a replacement kMsgHaveWork to
+ // possibly be posted), and finally dispatches that peeked replacement. Note
+ // that the re-post of kMsgHaveWork may be asynchronous to this thread!!
+
+ MSG msg;
+ bool have_message = false;
+ if (MessageLoop::current()->os_modal_loop()) {
+ // We only peek out WM_PAINT and WM_TIMER here for reasons mentioned above.
+ have_message = PeekMessage(&msg, NULL, WM_PAINT, WM_PAINT, PM_REMOVE) ||
+ PeekMessage(&msg, NULL, WM_TIMER, WM_TIMER, PM_REMOVE);
+ } else {
+ have_message = (0 != PeekMessage(&msg, NULL, 0, 0, PM_REMOVE));
+
+ if (have_message && msg.message == WM_NULL)
+ have_message = (0 != PeekMessage(&msg, NULL, 0, 0, PM_REMOVE));
+ }
+
+ DCHECK(!have_message || kMsgHaveWork != msg.message ||
+ msg.hwnd != message_hwnd_);
+
+ // Since we discarded a kMsgHaveWork message, we must update the flag.
+ int old_have_work = InterlockedExchange(&have_work_, 0);
+ DCHECK(old_have_work);
+
+ // We don't need a special time slice if we didn't have_message to process.
+ if (!have_message)
+ return false;
+
+ // Guarantee we'll get another time slice in the case where we go into native
+ // windows code. This ScheduleWork() may hurt performance a tiny bit when
+ // tasks appear very infrequently, but when the event queue is busy, the
+ // kMsgHaveWork events get (percentage wise) rarer and rarer.
+ ScheduleWork();
+ return ProcessMessageHelper(msg);
+}
+
+//-----------------------------------------------------------------------------
+// MessagePumpForIO public:
+
+MessagePumpForIO::MessagePumpForIO() {
+ port_.Set(CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, 0, 1));
+ DCHECK(port_.IsValid());
+}
+
+void MessagePumpForIO::ScheduleWork() {
+ if (InterlockedExchange(&have_work_, 1))
+ return; // Someone else continued the pumping.
+
+ // Make sure the MessagePump does some work for us.
+ BOOL ret = PostQueuedCompletionStatus(port_, 0,
+ reinterpret_cast<ULONG_PTR>(this),
+ reinterpret_cast<OVERLAPPED*>(this));
+ DCHECK(ret);
+}
+
+void MessagePumpForIO::ScheduleDelayedWork(const TimeTicks& delayed_work_time) {
+ // We know that we can't be blocked right now since this method can only be
+ // called on the same thread as Run, so we only need to update our record of
+ // how long to sleep when we do sleep.
+ delayed_work_time_ = delayed_work_time;
+}
+
+void MessagePumpForIO::RegisterIOHandler(HANDLE file_handle,
+ IOHandler* handler) {
+ ULONG_PTR key = reinterpret_cast<ULONG_PTR>(handler);
+ HANDLE port = CreateIoCompletionPort(file_handle, port_, key, 1);
+ DCHECK(port == port_.Get());
+}
+
+//-----------------------------------------------------------------------------
+// MessagePumpForIO private:
+
+void MessagePumpForIO::DoRunLoop() {
+ for (;;) {
+ // If we do any work, we may create more messages etc., and more work may
+ // possibly be waiting in another task group. When we (for example)
+ // WaitForIOCompletion(), there is a good chance there are still more
+ // messages waiting. On the other hand, when any of these methods return
+ // having done no work, then it is pretty unlikely that calling them
+ // again quickly will find any work to do. Finally, if they all say they
+ // had no work, then it is a good time to consider sleeping (waiting) for
+ // more work.
+
+ bool more_work_is_plausible = state_->delegate->DoWork();
+ if (state_->should_quit)
+ break;
+
+ more_work_is_plausible |= WaitForIOCompletion(0, NULL);
+ if (state_->should_quit)
+ break;
+
+ more_work_is_plausible |=
+ state_->delegate->DoDelayedWork(&delayed_work_time_);
+ if (state_->should_quit)
+ break;
+
+ if (more_work_is_plausible)
+ continue;
+
+ more_work_is_plausible = state_->delegate->DoIdleWork();
+ if (state_->should_quit)
+ break;
+
+ if (more_work_is_plausible)
+ continue;
+
+ WaitForWork(); // Wait (sleep) until we have work to do again.
+ }
+}
+
+// Wait until IO completes, up to the time needed by the timer manager to fire
+// the next set of timers.
+void MessagePumpForIO::WaitForWork() {
+ // We do not support nested IO message loops. This is to avoid messy
+ // recursion problems.
+ DCHECK(state_->run_depth == 1) << "Cannot nest an IO message loop!";
+
+ int timeout = GetCurrentDelay();
+ if (timeout < 0) // Negative value means no timers waiting.
+ timeout = INFINITE;
+
+ WaitForIOCompletion(timeout, NULL);
+}
+
+bool MessagePumpForIO::WaitForIOCompletion(DWORD timeout, IOHandler* filter) {
+ IOItem item;
+ if (completed_io_.empty() || !MatchCompletedIOItem(filter, &item)) {
+ // We have to ask the system for another IO completion.
+ if (!GetIOItem(timeout, &item))
+ return false;
+
+ if (ProcessInternalIOItem(item))
+ return true;
+ }
+
+ if (item.context->handler) {
+ if (filter && item.handler != filter) {
+ // Save this item for later
+ completed_io_.push_back(item);
+ } else {
+ DCHECK(item.context->handler == item.handler);
+ item.handler->OnIOCompleted(item.context, item.bytes_transfered,
+ item.error);
+ }
+ } else {
+ // The handler must be gone by now, just cleanup the mess.
+ delete item.context;
+ }
+ return true;
+}
+
+// Asks the OS for another IO completion result.
+bool MessagePumpForIO::GetIOItem(DWORD timeout, IOItem* item) {
+ memset(item, 0, sizeof(*item));
+ ULONG_PTR key = 0;
+ OVERLAPPED* overlapped = NULL;
+ if (!GetQueuedCompletionStatus(port_.Get(), &item->bytes_transfered, &key,
+ &overlapped, timeout)) {
+ if (!overlapped)
+ return false; // Nothing in the queue.
+ item->error = GetLastError();
+ item->bytes_transfered = 0;
+ }
+
+ item->handler = reinterpret_cast<IOHandler*>(key);
+ item->context = reinterpret_cast<IOContext*>(overlapped);
+ return true;
+}
+
+bool MessagePumpForIO::ProcessInternalIOItem(const IOItem& item) {
+ if (this == reinterpret_cast<MessagePumpForIO*>(item.context) &&
+ this == reinterpret_cast<MessagePumpForIO*>(item.handler)) {
+ // This is our internal completion.
+ DCHECK(!item.bytes_transfered);
+ InterlockedExchange(&have_work_, 0);
+ return true;
+ }
+ return false;
+}
+
+// Returns a completion item that was previously received.
+bool MessagePumpForIO::MatchCompletedIOItem(IOHandler* filter, IOItem* item) {
+ DCHECK(!completed_io_.empty());
+ for (std::list<IOItem>::iterator it = completed_io_.begin();
+ it != completed_io_.end(); ++it) {
+ if (!filter || it->handler == filter) {
+ *item = *it;
+ completed_io_.erase(it);
+ return true;
+ }
+ }
+ return false;
+}
+
+} // namespace base
diff --git a/ipc/chromium/src/base/message_pump_win.h b/ipc/chromium/src/base/message_pump_win.h
new file mode 100644
index 000000000..ddb6e0d45
--- /dev/null
+++ b/ipc/chromium/src/base/message_pump_win.h
@@ -0,0 +1,348 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MESSAGE_PUMP_WIN_H_
+#define BASE_MESSAGE_PUMP_WIN_H_
+
+#include <windows.h>
+
+#include <list>
+
+#include "base/lock.h"
+#include "base/message_pump.h"
+#include "base/observer_list.h"
+#include "base/scoped_handle.h"
+#include "base/time.h"
+
+namespace base {
+
+// MessagePumpWin serves as the base for specialized versions of the MessagePump
+// for Windows. It provides basic functionality like handling of observers and
+// controlling the lifetime of the message pump.
+class MessagePumpWin : public MessagePump {
+ public:
+ // An Observer is an object that receives global notifications from the
+ // MessageLoop.
+ //
+ // NOTE: An Observer implementation should be extremely fast!
+ //
+ class Observer {
+ public:
+ virtual ~Observer() {}
+
+ // This method is called before processing a message.
+ // The message may be undefined in which case msg.message is 0
+ virtual void WillProcessMessage(const MSG& msg) = 0;
+
+ // This method is called when control returns from processing a UI message.
+ // The message may be undefined in which case msg.message is 0
+ virtual void DidProcessMessage(const MSG& msg) = 0;
+ };
+
+ // Dispatcher is used during a nested invocation of Run to dispatch events.
+ // If Run is invoked with a non-NULL Dispatcher, MessageLoop does not
+ // dispatch events (or invoke TranslateMessage), rather every message is
+ // passed to Dispatcher's Dispatch method for dispatch. It is up to the
+ // Dispatcher to dispatch, or not, the event.
+ //
+ // The nested loop is exited by either posting a quit, or returning false
+ // from Dispatch.
+ class Dispatcher {
+ public:
+ virtual ~Dispatcher() {}
+ // Dispatches the event. If true is returned processing continues as
+ // normal. If false is returned, the nested loop exits immediately.
+ virtual bool Dispatch(const MSG& msg) = 0;
+ };
+
+ MessagePumpWin() : have_work_(0), state_(NULL) {}
+ virtual ~MessagePumpWin() {}
+
+ // Add an Observer, which will start receiving notifications immediately.
+ void AddObserver(Observer* observer);
+
+ // Remove an Observer. It is safe to call this method while an Observer is
+ // receiving a notification callback.
+ void RemoveObserver(Observer* observer);
+
+ // Give a chance to code processing additional messages to notify the
+ // message loop observers that another message has been processed.
+ void WillProcessMessage(const MSG& msg);
+ void DidProcessMessage(const MSG& msg);
+
+ // Like MessagePump::Run, but MSG objects are routed through dispatcher.
+ void RunWithDispatcher(Delegate* delegate, Dispatcher* dispatcher);
+
+ // MessagePump methods:
+ virtual void Run(Delegate* delegate) { RunWithDispatcher(delegate, NULL); }
+ virtual void Quit();
+
+ protected:
+ struct RunState {
+ Delegate* delegate;
+ Dispatcher* dispatcher;
+
+ // Used to flag that the current Run() invocation should return ASAP.
+ bool should_quit;
+
+ // Used to count how many Run() invocations are on the stack.
+ int run_depth;
+ };
+
+ virtual void DoRunLoop() = 0;
+ int GetCurrentDelay() const;
+
+ ObserverList<Observer> observers_;
+
+ // The time at which delayed work should run.
+ TimeTicks delayed_work_time_;
+
+ // A boolean value used to indicate if there is a kMsgDoWork message pending
+ // in the Windows Message queue. There is at most one such message, and it
+ // can drive execution of tasks when a native message pump is running.
+ LONG have_work_;
+
+ // State for the current invocation of Run.
+ RunState* state_;
+};
+
+//-----------------------------------------------------------------------------
+// MessagePumpForUI extends MessagePumpWin with methods that are particular to a
+// MessageLoop instantiated with TYPE_UI.
+//
+// MessagePumpForUI implements a "traditional" Windows message pump. It contains
+// a nearly infinite loop that peeks out messages, and then dispatches them.
+// Intermixed with those peeks are callouts to DoWork for pending tasks, and
+// DoDelayedWork for pending timers. When there are no events to be serviced,
+// this pump goes into a wait state. In most cases, this message pump handles
+// all processing.
+//
+// However, when a task, or windows event, invokes on the stack a native dialog
+// box or such, that window typically provides a bare bones (native?) message
+// pump. That bare-bones message pump generally supports little more than a
+// peek of the Windows message queue, followed by a dispatch of the peeked
+// message. MessageLoop extends that bare-bones message pump to also service
+// Tasks, at the cost of some complexity.
+//
+// The basic structure of the extension (refered to as a sub-pump) is that a
+// special message, kMsgHaveWork, is repeatedly injected into the Windows
+// Message queue. Each time the kMsgHaveWork message is peeked, checks are
+// made for an extended set of events, including the availability of Tasks to
+// run.
+//
+// After running a task, the special message kMsgHaveWork is again posted to
+// the Windows Message queue, ensuring a future time slice for processing a
+// future event. To prevent flooding the Windows Message queue, care is taken
+// to be sure that at most one kMsgHaveWork message is EVER pending in the
+// Window's Message queue.
+//
+// There are a few additional complexities in this system where, when there are
+// no Tasks to run, this otherwise infinite stream of messages which drives the
+// sub-pump is halted. The pump is automatically re-started when Tasks are
+// queued.
+//
+// A second complexity is that the presence of this stream of posted tasks may
+// prevent a bare-bones message pump from ever peeking a WM_PAINT or WM_TIMER.
+// Such paint and timer events always give priority to a posted message, such as
+// kMsgHaveWork messages. As a result, care is taken to do some peeking in
+// between the posting of each kMsgHaveWork message (i.e., after kMsgHaveWork
+// is peeked, and before a replacement kMsgHaveWork is posted).
+//
+// NOTE: Although it may seem odd that messages are used to start and stop this
+// flow (as opposed to signaling objects, etc.), it should be understood that
+// the native message pump will *only* respond to messages. As a result, it is
+// an excellent choice. It is also helpful that the starter messages that are
+// placed in the queue when new task arrive also awakens DoRunLoop.
+//
+class MessagePumpForUI : public MessagePumpWin {
+ public:
+ MessagePumpForUI();
+ virtual ~MessagePumpForUI();
+
+ // MessagePump methods:
+ virtual void ScheduleWork();
+ virtual void ScheduleDelayedWork(const TimeTicks& delayed_work_time);
+
+ // Applications can call this to encourage us to process all pending WM_PAINT
+ // messages. This method will process all paint messages the Windows Message
+ // queue can provide, up to some fixed number (to avoid any infinite loops).
+ void PumpOutPendingPaintMessages();
+
+protected:
+ virtual void DoRunLoop();
+
+ bool ProcessNextWindowsMessage();
+ void InitMessageWnd();
+ void WaitForWork();
+ void HandleWorkMessage();
+ void HandleTimerMessage();
+ bool ProcessMessageHelper(const MSG& msg);
+ bool ProcessPumpReplacementMessage();
+
+ // A hidden message-only window.
+ HWND message_hwnd_;
+
+ private:
+ static LRESULT CALLBACK WndProcThunk(
+ HWND hwnd, UINT message, WPARAM wparam, LPARAM lparam);
+};
+
+//-----------------------------------------------------------------------------
+// MessagePumpForIO extends MessagePumpWin with methods that are particular to a
+// MessageLoop instantiated with TYPE_IO. This version of MessagePump does not
+// deal with Windows mesagges, and instead has a Run loop based on Completion
+// Ports so it is better suited for IO operations.
+//
+class MessagePumpForIO : public MessagePumpWin {
+ public:
+ struct IOContext;
+
+ // Clients interested in receiving OS notifications when asynchronous IO
+ // operations complete should implement this interface and register themselves
+ // with the message pump.
+ //
+ // Typical use #1:
+ // // Use only when there are no user's buffers involved on the actual IO,
+ // // so that all the cleanup can be done by the message pump.
+ // class MyFile : public IOHandler {
+ // MyFile() {
+ // ...
+ // context_ = new IOContext;
+ // context_->handler = this;
+ // message_pump->RegisterIOHandler(file_, this);
+ // }
+ // ~MyFile() {
+ // if (pending_) {
+ // // By setting the handler to NULL, we're asking for this context
+ // // to be deleted when received, without calling back to us.
+ // context_->handler = NULL;
+ // } else {
+ // delete context_;
+ // }
+ // }
+ // virtual void OnIOCompleted(IOContext* context, DWORD bytes_transfered,
+ // DWORD error) {
+ // pending_ = false;
+ // }
+ // void DoSomeIo() {
+ // ...
+ // // The only buffer required for this operation is the overlapped
+ // // structure.
+ // ConnectNamedPipe(file_, &context_->overlapped);
+ // pending_ = true;
+ // }
+ // bool pending_;
+ // IOContext* context_;
+ // HANDLE file_;
+ // };
+ //
+ // Typical use #2:
+ // class MyFile : public IOHandler {
+ // MyFile() {
+ // ...
+ // message_pump->RegisterIOHandler(file_, this);
+ // }
+ // // Plus some code to make sure that this destructor is not called
+ // // while there are pending IO operations.
+ // ~MyFile() {
+ // }
+ // virtual void OnIOCompleted(IOContext* context, DWORD bytes_transfered,
+ // DWORD error) {
+ // ...
+ // delete context;
+ // }
+ // void DoSomeIo() {
+ // ...
+ // IOContext* context = new IOContext;
+ // // This is not used for anything. It just prevents the context from
+ // // being considered "abandoned".
+ // context->handler = this;
+ // ReadFile(file_, buffer, num_bytes, &read, &context->overlapped);
+ // }
+ // HANDLE file_;
+ // };
+ //
+ // Typical use #3:
+ // Same as the previous example, except that in order to deal with the
+ // requirement stated for the destructor, the class calls WaitForIOCompletion
+ // from the destructor to block until all IO finishes.
+ // ~MyFile() {
+ // while(pending_)
+ // message_pump->WaitForIOCompletion(INFINITE, this);
+ // }
+ //
+ class IOHandler {
+ public:
+ virtual ~IOHandler() {}
+ // This will be called once the pending IO operation associated with
+ // |context| completes. |error| is the Win32 error code of the IO operation
+ // (ERROR_SUCCESS if there was no error). |bytes_transfered| will be zero
+ // on error.
+ virtual void OnIOCompleted(IOContext* context, DWORD bytes_transfered,
+ DWORD error) = 0;
+ };
+
+ // The extended context that should be used as the base structure on every
+ // overlapped IO operation. |handler| must be set to the registered IOHandler
+ // for the given file when the operation is started, and it can be set to NULL
+ // before the operation completes to indicate that the handler should not be
+ // called anymore, and instead, the IOContext should be deleted when the OS
+ // notifies the completion of this operation. Please remember that any buffers
+ // involved with an IO operation should be around until the callback is
+ // received, so this technique can only be used for IO that do not involve
+ // additional buffers (other than the overlapped structure itself).
+ struct IOContext {
+ OVERLAPPED overlapped;
+ IOHandler* handler;
+ };
+
+ MessagePumpForIO();
+ virtual ~MessagePumpForIO() {}
+
+ // MessagePump methods:
+ virtual void ScheduleWork();
+ virtual void ScheduleDelayedWork(const TimeTicks& delayed_work_time);
+
+ // Register the handler to be used when asynchronous IO for the given file
+ // completes. The registration persists as long as |file_handle| is valid, so
+ // |handler| must be valid as long as there is pending IO for the given file.
+ void RegisterIOHandler(HANDLE file_handle, IOHandler* handler);
+
+ // Waits for the next IO completion that should be processed by |filter|, for
+ // up to |timeout| milliseconds. Return true if any IO operation completed,
+ // regardless of the involved handler, and false if the timeout expired. If
+ // the completion port received any message and the involved IO handler
+ // matches |filter|, the callback is called before returning from this code;
+ // if the handler is not the one that we are looking for, the callback will
+ // be postponed for another time, so reentrancy problems can be avoided.
+ // External use of this method should be reserved for the rare case when the
+ // caller is willing to allow pausing regular task dispatching on this thread.
+ bool WaitForIOCompletion(DWORD timeout, IOHandler* filter);
+
+ private:
+ struct IOItem {
+ IOHandler* handler;
+ IOContext* context;
+ DWORD bytes_transfered;
+ DWORD error;
+ };
+
+ virtual void DoRunLoop();
+ void WaitForWork();
+ bool MatchCompletedIOItem(IOHandler* filter, IOItem* item);
+ bool GetIOItem(DWORD timeout, IOItem* item);
+ bool ProcessInternalIOItem(const IOItem& item);
+
+ // The completion port associated with this thread.
+ ScopedHandle port_;
+ // This list will be empty almost always. It stores IO completions that have
+ // not been delivered yet because somebody was doing cleanup.
+ std::list<IOItem> completed_io_;
+};
+
+} // namespace base
+
+#endif // BASE_MESSAGE_PUMP_WIN_H_
diff --git a/ipc/chromium/src/base/object_watcher.cc b/ipc/chromium/src/base/object_watcher.cc
new file mode 100644
index 000000000..ced911fa2
--- /dev/null
+++ b/ipc/chromium/src/base/object_watcher.cc
@@ -0,0 +1,137 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/object_watcher.h"
+
+#include "base/logging.h"
+
+namespace base {
+
+//-----------------------------------------------------------------------------
+
+class ObjectWatcher::Watch : public mozilla::Runnable {
+public:
+ ObjectWatcher* watcher; // The associated ObjectWatcher instance
+ HANDLE object; // The object being watched
+ HANDLE wait_object; // Returned by RegisterWaitForSingleObject
+ MessageLoop* origin_loop; // Used to get back to the origin thread
+ Delegate* delegate; // Delegate to notify when signaled
+ bool did_signal; // DoneWaiting was called
+
+ NS_IMETHOD Run() override {
+ // The watcher may have already been torn down, in which case we need to
+ // just get out of dodge.
+ if (!watcher)
+ return NS_OK;
+
+ DCHECK(did_signal);
+ watcher->StopWatching();
+
+ delegate->OnObjectSignaled(object);
+
+ return NS_OK;
+ }
+};
+
+//-----------------------------------------------------------------------------
+
+ObjectWatcher::ObjectWatcher() : watch_(nullptr) {
+}
+
+ObjectWatcher::~ObjectWatcher() {
+ StopWatching();
+}
+
+bool ObjectWatcher::StartWatching(HANDLE object, Delegate* delegate) {
+ if (watch_) {
+ NOTREACHED() << "Already watching an object";
+ return false;
+ }
+
+ RefPtr<Watch> watch = new Watch;
+ watch->watcher = this;
+ watch->object = object;
+ watch->origin_loop = MessageLoop::current();
+ watch->delegate = delegate;
+ watch->did_signal = false;
+
+ // Since our job is to just notice when an object is signaled and report the
+ // result back to this thread, we can just run on a Windows wait thread.
+ DWORD wait_flags = WT_EXECUTEDEFAULT | WT_EXECUTEONLYONCE;
+
+ if (!RegisterWaitForSingleObject(&watch->wait_object, object, DoneWaiting,
+ watch.get(), INFINITE, wait_flags)) {
+ NOTREACHED() << "RegisterWaitForSingleObject failed: " << GetLastError();
+ return false;
+ }
+
+ watch_ = watch.forget();
+
+ // We need to know if the current message loop is going away so we can
+ // prevent the wait thread from trying to access a dead message loop.
+ MessageLoop::current()->AddDestructionObserver(this);
+ return true;
+}
+
+bool ObjectWatcher::StopWatching() {
+ if (!watch_)
+ return false;
+
+ // Make sure ObjectWatcher is used in a single-threaded fashion.
+ DCHECK(watch_->origin_loop == MessageLoop::current());
+
+ // If DoneWaiting is in progress, we wait for it to finish. We know whether
+ // DoneWaiting happened or not by inspecting the did_signal flag.
+ if (!UnregisterWaitEx(watch_->wait_object, INVALID_HANDLE_VALUE)) {
+ NOTREACHED() << "UnregisterWaitEx failed: " << GetLastError();
+ return false;
+ }
+
+ // Make sure that we see any mutation to did_signal. This should be a no-op
+ // since we expect that UnregisterWaitEx resulted in a memory barrier, but
+ // just to be sure, we're going to be explicit.
+ MemoryBarrier();
+
+ // If the watch has been posted, then we need to make sure it knows not to do
+ // anything once it is run.
+ watch_->watcher = NULL;
+
+ watch_ = nullptr;
+
+ MessageLoop::current()->RemoveDestructionObserver(this);
+ return true;
+}
+
+HANDLE ObjectWatcher::GetWatchedObject() {
+ if (!watch_)
+ return NULL;
+
+ return watch_->object;
+}
+
+// static
+void CALLBACK ObjectWatcher::DoneWaiting(void* param, BOOLEAN timed_out) {
+ DCHECK(!timed_out);
+
+ Watch* watch = static_cast<Watch*>(param);
+ RefPtr<Watch> addrefedWatch = watch;
+
+ // Record that we ran this function.
+ watch->did_signal = true;
+
+ // We rely on the locking in PostTask() to ensure that a memory barrier is
+ // provided, which in turn ensures our change to did_signal can be observed
+ // on the target thread.
+ watch->origin_loop->PostTask(addrefedWatch.forget());
+}
+
+void ObjectWatcher::WillDestroyCurrentMessageLoop() {
+ // Need to shutdown the watch so that we don't try to access the MessageLoop
+ // after this point.
+ StopWatching();
+}
+
+} // namespace base
diff --git a/ipc/chromium/src/base/object_watcher.h b/ipc/chromium/src/base/object_watcher.h
new file mode 100644
index 000000000..f3ddf7ef8
--- /dev/null
+++ b/ipc/chromium/src/base/object_watcher.h
@@ -0,0 +1,96 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_OBJECT_WATCHER_H_
+#define BASE_OBJECT_WATCHER_H_
+
+#include <windows.h>
+#ifdef GetClassName
+#undef GetClassName
+#endif
+
+#include "base/message_loop.h"
+
+namespace base {
+
+// A class that provides a means to asynchronously wait for a Windows object to
+// become signaled. It is an abstraction around RegisterWaitForSingleObject
+// that provides a notification callback, OnObjectSignaled, that runs back on
+// the origin thread (i.e., the thread that called StartWatching).
+//
+// This class acts like a smart pointer such that when it goes out-of-scope,
+// UnregisterWaitEx is automatically called, and any in-flight notification is
+// suppressed.
+//
+// Typical usage:
+//
+// class MyClass : public base::ObjectWatcher::Delegate {
+// public:
+// void DoStuffWhenSignaled(HANDLE object) {
+// watcher_.StartWatching(object, this);
+// }
+// virtual void OnObjectSignaled(HANDLE object) {
+// // OK, time to do stuff!
+// }
+// private:
+// base::ObjectWatcher watcher_;
+// };
+//
+// In the above example, MyClass wants to "do stuff" when object becomes
+// signaled. ObjectWatcher makes this task easy. When MyClass goes out of
+// scope, the watcher_ will be destroyed, and there is no need to worry about
+// OnObjectSignaled being called on a deleted MyClass pointer. Easy!
+//
+class ObjectWatcher : public MessageLoop::DestructionObserver {
+ public:
+ class Delegate {
+ public:
+ virtual ~Delegate() {}
+ // Called from the MessageLoop when a signaled object is detected. To
+ // continue watching the object, AddWatch must be called again.
+ virtual void OnObjectSignaled(HANDLE object) = 0;
+ };
+
+ ObjectWatcher();
+ ~ObjectWatcher();
+
+ // When the object is signaled, the given delegate is notified on the thread
+ // where StartWatching is called. The ObjectWatcher is not responsible for
+ // deleting the delegate.
+ //
+ // Returns true if the watch was started. Otherwise, false is returned.
+ //
+ bool StartWatching(HANDLE object, Delegate* delegate);
+
+ // Stops watching. Does nothing if the watch has already completed. If the
+ // watch is still active, then it is canceled, and the associated delegate is
+ // not notified.
+ //
+ // Returns true if the watch was canceled. Otherwise, false is returned.
+ //
+ bool StopWatching();
+
+ // Returns the handle of the object being watched, or NULL if the object
+ // watcher is stopped.
+ HANDLE GetWatchedObject();
+
+ private:
+ // Called on a background thread when done waiting.
+ static void CALLBACK DoneWaiting(void* param, BOOLEAN timed_out);
+
+ // MessageLoop::DestructionObserver implementation:
+ virtual void WillDestroyCurrentMessageLoop();
+
+ // Internal state.
+ class Watch;
+ RefPtr<Watch> watch_;
+
+ DISALLOW_COPY_AND_ASSIGN(ObjectWatcher);
+};
+
+} // namespace base
+
+#endif // BASE_OBJECT_WATCHER_H_
diff --git a/ipc/chromium/src/base/observer_list.h b/ipc/chromium/src/base/observer_list.h
new file mode 100644
index 000000000..4dce7ae59
--- /dev/null
+++ b/ipc/chromium/src/base/observer_list.h
@@ -0,0 +1,186 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_OBSERVER_LIST_H__
+#define BASE_OBSERVER_LIST_H__
+
+#include <algorithm>
+#include <limits>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/logging.h"
+
+#if defined(ANDROID) && defined(_STLP_STD_NAME)
+using _STLP_STD_NAME::find;
+#endif
+
+namespace base {
+
+///////////////////////////////////////////////////////////////////////////////
+//
+// OVERVIEW:
+//
+// A container for a list of observers. Unlike a normal STL vector or list,
+// this container can be modified during iteration without invalidating the
+// iterator. So, it safely handles the case of an observer removing itself
+// or other observers from the list while observers are being notified.
+//
+// TYPICAL USAGE:
+//
+// class MyWidget {
+// public:
+// ...
+//
+// class Observer {
+// public:
+// virtual void OnFoo(MyWidget* w) = 0;
+// virtual void OnBar(MyWidget* w, int x, int y) = 0;
+// };
+//
+// void AddObserver(Observer* obs) {
+// observer_list_.AddObserver(obs);
+// }
+//
+// void RemoveObserver(Observer* obs) {
+// observer_list_.RemoveObserver(obs);
+// }
+//
+// void NotifyFoo() {
+// FOR_EACH_OBSERVER(Observer, observer_list_, OnFoo(this));
+// }
+//
+// void NotifyBar(int x, int y) {
+// FOR_EACH_OBSERVER(Observer, observer_list_, OnBar(this, x, y));
+// }
+//
+// private:
+// ObserverList<Observer> observer_list_;
+// };
+//
+//
+///////////////////////////////////////////////////////////////////////////////
+
+template <class ObserverType, bool check_empty = false>
+class ObserverList {
+ public:
+ // Enumeration of which observers are notified.
+ enum NotificationType {
+ // Specifies that any observers added during notification are notified.
+ // This is the default type if non type is provided to the constructor.
+ NOTIFY_ALL,
+
+ // Specifies that observers added while sending out notification are not
+ // notified.
+ NOTIFY_EXISTING_ONLY
+ };
+
+ ObserverList() : notify_depth_(0), type_(NOTIFY_ALL) {}
+ explicit ObserverList(NotificationType type) : notify_depth_(0), type_(type) {}
+ ~ObserverList() {
+ // When check_empty is true, assert that the list is empty on destruction.
+ if (check_empty) {
+ Compact();
+ DCHECK_EQ(observers_.size(), 0U);
+ }
+ }
+
+ // Add an observer to the list.
+ void AddObserver(ObserverType* obs) {
+ DCHECK(find(observers_.begin(), observers_.end(), obs) == observers_.end())
+ << "Observers can only be added once!";
+ observers_.push_back(obs);
+ }
+
+ // Remove an observer from the list.
+ void RemoveObserver(ObserverType* obs) {
+ typename ListType::iterator it =
+ std::find(observers_.begin(), observers_.end(), obs);
+ if (it != observers_.end()) {
+ if (notify_depth_) {
+ *it = 0;
+ } else {
+ observers_.erase(it);
+ }
+ }
+ }
+
+ size_t size() const {
+ return observers_.size();
+ }
+
+ ObserverType* GetElementAt(int index) const {
+ return observers_[index];
+ }
+
+ // An iterator class that can be used to access the list of observers. See
+ // also the FOREACH_OBSERVER macro defined below.
+ class Iterator {
+ public:
+ explicit Iterator(const ObserverList<ObserverType>& list)
+ : list_(list),
+ index_(0),
+ max_index_(list.type_ == NOTIFY_ALL ?
+ std::numeric_limits<size_t>::max() :
+ list.observers_.size()) {
+ ++list_.notify_depth_;
+ }
+
+ ~Iterator() {
+ if (--list_.notify_depth_ == 0)
+ list_.Compact();
+ }
+
+ ObserverType* GetNext() {
+ ListType& observers = list_.observers_;
+ // Advance if the current element is null
+ size_t max_index = std::min(max_index_, observers.size());
+ while (index_ < max_index && !observers[index_])
+ ++index_;
+ return index_ < max_index ? observers[index_++] : NULL;
+ }
+
+ private:
+ const ObserverList<ObserverType>& list_;
+ size_t index_;
+ size_t max_index_;
+ };
+
+ private:
+ typedef std::vector<ObserverType*> ListType;
+
+ void Compact() const {
+ typename ListType::iterator it = observers_.begin();
+ while (it != observers_.end()) {
+ if (*it) {
+ ++it;
+ } else {
+ it = observers_.erase(it);
+ }
+ }
+ }
+
+ // These are marked mutable to facilitate having NotifyAll be const.
+ mutable ListType observers_;
+ mutable int notify_depth_;
+ NotificationType type_;
+
+ friend class ObserverList::Iterator;
+
+ DISALLOW_EVIL_CONSTRUCTORS(ObserverList);
+};
+
+} // namespace base
+
+#define FOR_EACH_OBSERVER(ObserverType, observer_list, func) \
+ do { \
+ base::ObserverList<ObserverType>::Iterator it(observer_list); \
+ ObserverType* obs; \
+ while ((obs = it.GetNext()) != NULL) \
+ obs->func; \
+ } while (0)
+
+#endif // BASE_OBSERVER_LIST_H__
diff --git a/ipc/chromium/src/base/pickle.cc b/ipc/chromium/src/base/pickle.cc
new file mode 100644
index 000000000..4422ea125
--- /dev/null
+++ b/ipc/chromium/src/base/pickle.cc
@@ -0,0 +1,588 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/pickle.h"
+
+#include "mozilla/Alignment.h"
+#include "mozilla/CheckedInt.h"
+#include "mozilla/EndianUtils.h"
+#include "mozilla/TypeTraits.h"
+
+#include <stdlib.h>
+
+#include <limits>
+#include <string>
+#include <algorithm>
+
+#include "nsDebug.h"
+
+//------------------------------------------------------------------------------
+
+static_assert(MOZ_ALIGNOF(Pickle::memberAlignmentType) >= MOZ_ALIGNOF(uint32_t),
+ "Insufficient alignment");
+
+static const uint32_t kHeaderSegmentCapacity = 64;
+
+static const uint32_t kDefaultSegmentCapacity = 4096;
+
+static const char kBytePaddingMarker = char(0xbf);
+
+namespace {
+
+// We want to copy data to our payload as efficiently as possible.
+// memcpy fits the bill for copying, but not all compilers or
+// architectures support inlining memcpy from void*, which has unknown
+// static alignment. However, we know that all the members of our
+// payload will be aligned on memberAlignmentType boundaries. We
+// therefore use that knowledge to construct a copier that will copy
+// efficiently (via standard C++ assignment mechanisms) if the datatype
+// needs that alignment or less, and memcpy otherwise. (The compiler
+// may still inline memcpy, of course.)
+
+template<typename T, size_t size, bool hasSufficientAlignment>
+struct Copier
+{
+ static void Copy(T* dest, const char* iter) {
+ memcpy(dest, iter, sizeof(T));
+ }
+};
+
+// Copying 64-bit quantities happens often enough and can easily be made
+// worthwhile on 32-bit platforms, so handle it specially. Only do it
+// if 64-bit types aren't sufficiently aligned; the alignment
+// requirements for them vary between 32-bit platforms.
+#ifndef HAVE_64BIT_BUILD
+template<typename T>
+struct Copier<T, sizeof(uint64_t), false>
+{
+ static void Copy(T* dest, const char* iter) {
+#if MOZ_LITTLE_ENDIAN
+ static const int loIndex = 0, hiIndex = 1;
+#else
+ static const int loIndex = 1, hiIndex = 0;
+#endif
+ static_assert(MOZ_ALIGNOF(uint32_t*) == MOZ_ALIGNOF(void*),
+ "Pointers have different alignments");
+ const uint32_t* src = reinterpret_cast<const uint32_t*>(iter);
+ uint32_t* uint32dest = reinterpret_cast<uint32_t*>(dest);
+ uint32dest[loIndex] = src[loIndex];
+ uint32dest[hiIndex] = src[hiIndex];
+ }
+};
+#endif
+
+template<typename T, size_t size>
+struct Copier<T, size, true>
+{
+ static void Copy(T* dest, const char* iter) {
+ *dest = *reinterpret_cast<const T*>(iter);
+ }
+};
+
+} // anonymous namespace
+
+PickleIterator::PickleIterator(const Pickle& pickle)
+ : iter_(pickle.buffers_.Iter()) {
+ iter_.Advance(pickle.buffers_, pickle.header_size_);
+}
+
+template<typename T>
+void
+PickleIterator::CopyInto(T* dest) {
+ static_assert(mozilla::IsPod<T>::value, "Copied type must be a POD type");
+ Copier<T, sizeof(T), (MOZ_ALIGNOF(T) <= sizeof(Pickle::memberAlignmentType))>::Copy(dest, iter_.Data());
+}
+
+bool Pickle::IteratorHasRoomFor(const PickleIterator& iter, uint32_t len) const {
+ // Make sure we don't get into trouble where AlignInt(len) == 0.
+ MOZ_RELEASE_ASSERT(len < 64);
+
+ return iter.iter_.HasRoomFor(AlignInt(len));
+}
+
+void Pickle::UpdateIter(PickleIterator* iter, uint32_t bytes) const {
+ // Make sure we don't get into trouble where AlignInt(bytes) == 0.
+ MOZ_RELEASE_ASSERT(bytes < 64);
+
+ iter->iter_.Advance(buffers_, AlignInt(bytes));
+}
+
+// Payload is sizeof(Pickle::memberAlignmentType) aligned.
+
+Pickle::Pickle(uint32_t header_size)
+ : buffers_(AlignInt(header_size), kHeaderSegmentCapacity, kDefaultSegmentCapacity),
+ header_(nullptr),
+ header_size_(AlignInt(header_size)) {
+ DCHECK(static_cast<memberAlignmentType>(header_size) >= sizeof(Header));
+ DCHECK(header_size_ <= kHeaderSegmentCapacity);
+ header_ = reinterpret_cast<Header*>(buffers_.Start());
+ header_->payload_size = 0;
+}
+
+Pickle::Pickle(uint32_t header_size, const char* data, uint32_t length)
+ : buffers_(length, AlignCapacity(length), kDefaultSegmentCapacity),
+ header_(nullptr),
+ header_size_(AlignInt(header_size)) {
+ DCHECK(static_cast<memberAlignmentType>(header_size) >= sizeof(Header));
+ DCHECK(header_size <= kHeaderSegmentCapacity);
+ MOZ_RELEASE_ASSERT(header_size <= length);
+
+ header_ = reinterpret_cast<Header*>(buffers_.Start());
+ memcpy(header_, data, length);
+}
+
+Pickle::Pickle(Pickle&& other)
+ : buffers_(mozilla::Move(other.buffers_)),
+ header_(other.header_),
+ header_size_(other.header_size_) {
+ other.header_ = nullptr;
+}
+
+Pickle::~Pickle() {
+}
+
+Pickle& Pickle::operator=(Pickle&& other) {
+ BufferList tmp = mozilla::Move(other.buffers_);
+ other.buffers_ = mozilla::Move(buffers_);
+ buffers_ = mozilla::Move(tmp);
+
+ //std::swap(buffers_, other.buffers_);
+ std::swap(header_, other.header_);
+ std::swap(header_size_, other.header_size_);
+ return *this;
+}
+
+bool Pickle::ReadBool(PickleIterator* iter, bool* result) const {
+ DCHECK(iter);
+
+ int tmp;
+ if (!ReadInt(iter, &tmp))
+ return false;
+ DCHECK(0 == tmp || 1 == tmp);
+ *result = tmp ? true : false;
+ return true;
+}
+
+bool Pickle::ReadInt16(PickleIterator* iter, int16_t* result) const {
+ DCHECK(iter);
+
+ if (!IteratorHasRoomFor(*iter, sizeof(*result)))
+ return ReadBytesInto(iter, result, sizeof(*result));
+
+ iter->CopyInto(result);
+
+ UpdateIter(iter, sizeof(*result));
+ return true;
+}
+
+bool Pickle::ReadUInt16(PickleIterator* iter, uint16_t* result) const {
+ DCHECK(iter);
+
+ if (!IteratorHasRoomFor(*iter, sizeof(*result)))
+ return ReadBytesInto(iter, result, sizeof(*result));
+
+ iter->CopyInto(result);
+
+ UpdateIter(iter, sizeof(*result));
+ return true;
+}
+
+bool Pickle::ReadInt(PickleIterator* iter, int* result) const {
+ DCHECK(iter);
+
+ if (!IteratorHasRoomFor(*iter, sizeof(*result)))
+ return ReadBytesInto(iter, result, sizeof(*result));
+
+ iter->CopyInto(result);
+
+ UpdateIter(iter, sizeof(*result));
+ return true;
+}
+
+// Always written as a 64-bit value since the size for this type can
+// differ between architectures.
+bool Pickle::ReadLong(PickleIterator* iter, long* result) const {
+ DCHECK(iter);
+
+ int64_t big_result = 0;
+ if (IteratorHasRoomFor(*iter, sizeof(big_result))) {
+ iter->CopyInto(&big_result);
+ UpdateIter(iter, sizeof(big_result));
+ } else {
+ if (!ReadBytesInto(iter, &big_result, sizeof(big_result))) {
+ return false;
+ }
+ }
+ DCHECK(big_result <= LONG_MAX && big_result >= LONG_MIN);
+ *result = static_cast<long>(big_result);
+
+ return true;
+}
+
+// Always written as a 64-bit value since the size for this type can
+// differ between architectures.
+bool Pickle::ReadULong(PickleIterator* iter, unsigned long* result) const {
+ DCHECK(iter);
+
+ uint64_t big_result = 0;
+ if (IteratorHasRoomFor(*iter, sizeof(big_result))) {
+ iter->CopyInto(&big_result);
+ UpdateIter(iter, sizeof(big_result));
+ } else {
+ if (!ReadBytesInto(iter, &big_result, sizeof(big_result))) {
+ return false;
+ }
+ }
+ DCHECK(big_result <= ULONG_MAX);
+ *result = static_cast<unsigned long>(big_result);
+
+ return true;
+}
+
+bool Pickle::ReadLength(PickleIterator* iter, int* result) const {
+ if (!ReadInt(iter, result))
+ return false;
+ return ((*result) >= 0);
+}
+
+// Always written as a 64-bit value since the size for this type can
+// differ between architectures.
+bool Pickle::ReadSize(PickleIterator* iter, size_t* result) const {
+ DCHECK(iter);
+
+ uint64_t big_result = 0;
+ if (IteratorHasRoomFor(*iter, sizeof(big_result))) {
+ iter->CopyInto(&big_result);
+ UpdateIter(iter, sizeof(big_result));
+ } else {
+ if (!ReadBytesInto(iter, &big_result, sizeof(big_result))) {
+ return false;
+ }
+ }
+ DCHECK(big_result <= std::numeric_limits<size_t>::max());
+ *result = static_cast<size_t>(big_result);
+
+ return true;
+}
+
+bool Pickle::ReadInt32(PickleIterator* iter, int32_t* result) const {
+ DCHECK(iter);
+
+ if (!IteratorHasRoomFor(*iter, sizeof(*result)))
+ return ReadBytesInto(iter, result, sizeof(*result));
+
+ iter->CopyInto(result);
+
+ UpdateIter(iter, sizeof(*result));
+ return true;
+}
+
+bool Pickle::ReadUInt32(PickleIterator* iter, uint32_t* result) const {
+ DCHECK(iter);
+
+ if (!IteratorHasRoomFor(*iter, sizeof(*result)))
+ return ReadBytesInto(iter, result, sizeof(*result));
+
+ iter->CopyInto(result);
+
+ UpdateIter(iter, sizeof(*result));
+ return true;
+}
+
+bool Pickle::ReadInt64(PickleIterator* iter, int64_t* result) const {
+ DCHECK(iter);
+
+ if (!IteratorHasRoomFor(*iter, sizeof(*result)))
+ return ReadBytesInto(iter, result, sizeof(*result));
+
+ iter->CopyInto(result);
+
+ UpdateIter(iter, sizeof(*result));
+ return true;
+}
+
+bool Pickle::ReadUInt64(PickleIterator* iter, uint64_t* result) const {
+ DCHECK(iter);
+
+ if (!IteratorHasRoomFor(*iter, sizeof(*result)))
+ return ReadBytesInto(iter, result, sizeof(*result));
+
+ iter->CopyInto(result);
+
+ UpdateIter(iter, sizeof(*result));
+ return true;
+}
+
+bool Pickle::ReadDouble(PickleIterator* iter, double* result) const {
+ DCHECK(iter);
+
+ if (!IteratorHasRoomFor(*iter, sizeof(*result)))
+ return ReadBytesInto(iter, result, sizeof(*result));
+
+ iter->CopyInto(result);
+
+ UpdateIter(iter, sizeof(*result));
+ return true;
+}
+
+// Always written as a 64-bit value since the size for this type can
+// differ between architectures.
+bool Pickle::ReadIntPtr(PickleIterator* iter, intptr_t* result) const {
+ DCHECK(iter);
+
+ int64_t big_result = 0;
+ if (IteratorHasRoomFor(*iter, sizeof(big_result))) {
+ iter->CopyInto(&big_result);
+ UpdateIter(iter, sizeof(big_result));
+ } else {
+ if (!ReadBytesInto(iter, &big_result, sizeof(big_result))) {
+ return false;
+ }
+ }
+
+ DCHECK(big_result <= std::numeric_limits<intptr_t>::max() && big_result >= std::numeric_limits<intptr_t>::min());
+ *result = static_cast<intptr_t>(big_result);
+
+ return true;
+}
+
+bool Pickle::ReadUnsignedChar(PickleIterator* iter, unsigned char* result) const {
+ DCHECK(iter);
+
+ if (!IteratorHasRoomFor(*iter, sizeof(*result)))
+ return ReadBytesInto(iter, result, sizeof(*result));
+
+ iter->CopyInto(result);
+
+ UpdateIter(iter, sizeof(*result));
+ return true;
+}
+
+bool Pickle::ReadString(PickleIterator* iter, std::string* result) const {
+ DCHECK(iter);
+
+ int len;
+ if (!ReadLength(iter, &len))
+ return false;
+
+ auto chars = mozilla::MakeUnique<char[]>(len);
+ if (!ReadBytesInto(iter, chars.get(), len)) {
+ return false;
+ }
+ result->assign(chars.get(), len);
+
+ return true;
+}
+
+bool Pickle::ReadWString(PickleIterator* iter, std::wstring* result) const {
+ DCHECK(iter);
+
+ int len;
+ if (!ReadLength(iter, &len))
+ return false;
+ // Avoid integer multiplication overflow.
+ if (len > INT_MAX / static_cast<int>(sizeof(wchar_t)))
+ return false;
+
+ auto chars = mozilla::MakeUnique<wchar_t[]>(len);
+ if (!ReadBytesInto(iter, chars.get(), len * sizeof(wchar_t))) {
+ return false;
+ }
+ result->assign(chars.get(), len);
+
+ return true;
+}
+
+bool Pickle::ExtractBuffers(PickleIterator* iter, size_t length, BufferList* buffers,
+ uint32_t alignment) const
+{
+ DCHECK(iter);
+ DCHECK(buffers);
+ DCHECK(alignment == 4 || alignment == 8);
+ DCHECK(intptr_t(header_) % alignment == 0);
+
+ if (AlignInt(length) < length) {
+ return false;
+ }
+
+ uint32_t padding_len = intptr_t(iter->iter_.Data()) % alignment;
+ if (!iter->iter_.AdvanceAcrossSegments(buffers_, padding_len)) {
+ return false;
+ }
+
+ bool success;
+ *buffers = const_cast<BufferList*>(&buffers_)->Extract(iter->iter_, length, &success);
+ if (!success) {
+ return false;
+ }
+
+ return iter->iter_.AdvanceAcrossSegments(buffers_, AlignInt(length) - length);
+}
+
+bool Pickle::ReadBytesInto(PickleIterator* iter, void* data, uint32_t length) const {
+ if (AlignInt(length) < length) {
+ return false;
+ }
+
+ if (!buffers_.ReadBytes(iter->iter_, reinterpret_cast<char*>(data), length)) {
+ return false;
+ }
+
+ return iter->iter_.AdvanceAcrossSegments(buffers_, AlignInt(length) - length);
+}
+
+#ifdef MOZ_PICKLE_SENTINEL_CHECKING
+bool Pickle::ReadSentinel(PickleIterator* iter, uint32_t sentinel) const {
+ uint32_t found;
+ if (!ReadUInt32(iter, &found)) {
+ return false;
+ }
+ return found == sentinel;
+}
+
+bool Pickle::WriteSentinel(uint32_t sentinel) {
+ return WriteUInt32(sentinel);
+}
+#endif
+
+void Pickle::EndRead(PickleIterator& iter) const {
+ DCHECK(iter.iter_.Done());
+}
+
+void Pickle::BeginWrite(uint32_t length, uint32_t alignment) {
+ DCHECK(alignment % 4 == 0) << "Must be at least 32-bit aligned!";
+
+ // write at an alignment-aligned offset from the beginning of the header
+ uint32_t offset = AlignInt(header_->payload_size);
+ uint32_t padding = (header_size_ + offset) % alignment;
+ uint32_t new_size = offset + padding + AlignInt(length);
+ MOZ_RELEASE_ASSERT(new_size >= header_->payload_size);
+
+ DCHECK(intptr_t(header_) % alignment == 0);
+
+#ifdef ARCH_CPU_64_BITS
+ DCHECK_LE(length, std::numeric_limits<uint32_t>::max());
+#endif
+
+ if (padding) {
+ MOZ_RELEASE_ASSERT(padding <= 8);
+ static const char padding_data[8] = {
+ kBytePaddingMarker, kBytePaddingMarker, kBytePaddingMarker, kBytePaddingMarker,
+ kBytePaddingMarker, kBytePaddingMarker, kBytePaddingMarker, kBytePaddingMarker,
+ };
+ buffers_.WriteBytes(padding_data, padding);
+ }
+
+ DCHECK((header_size_ + header_->payload_size + padding) % alignment == 0);
+
+ header_->payload_size = new_size;
+}
+
+void Pickle::EndWrite(uint32_t length) {
+ // Zero-pad to keep tools like purify from complaining about uninitialized
+ // memory.
+ uint32_t padding = AlignInt(length) - length;
+ if (padding) {
+ MOZ_RELEASE_ASSERT(padding <= 4);
+ static const char padding_data[4] = {
+ kBytePaddingMarker, kBytePaddingMarker, kBytePaddingMarker, kBytePaddingMarker,
+ };
+ buffers_.WriteBytes(padding_data, padding);
+ }
+}
+
+bool Pickle::WriteBytes(const void* data, uint32_t data_len, uint32_t alignment) {
+ DCHECK(alignment == 4 || alignment == 8);
+ DCHECK(intptr_t(header_) % alignment == 0);
+
+ BeginWrite(data_len, alignment);
+
+ buffers_.WriteBytes(reinterpret_cast<const char*>(data), data_len);
+
+ EndWrite(data_len);
+ return true;
+}
+
+bool Pickle::WriteString(const std::string& value) {
+#ifdef MOZ_FAULTY
+ std::string v(value);
+ Singleton<mozilla::ipc::Faulty>::get()->FuzzString(v);
+ if (!WriteInt(static_cast<int>(v.size())))
+ return false;
+
+ return WriteBytes(v.data(), static_cast<int>(v.size()));
+#else
+ if (!WriteInt(static_cast<int>(value.size())))
+ return false;
+
+ return WriteBytes(value.data(), static_cast<int>(value.size()));
+#endif
+}
+
+bool Pickle::WriteWString(const std::wstring& value) {
+#ifdef MOZ_FAULTY
+ std::wstring v(value);
+ Singleton<mozilla::ipc::Faulty>::get()->FuzzWString(v);
+ if (!WriteInt(static_cast<int>(v.size())))
+ return false;
+
+ return WriteBytes(v.data(),
+ static_cast<int>(v.size() * sizeof(wchar_t)));
+#else
+ if (!WriteInt(static_cast<int>(value.size())))
+ return false;
+
+ return WriteBytes(value.data(),
+ static_cast<int>(value.size() * sizeof(wchar_t)));
+#endif
+}
+
+bool Pickle::WriteData(const char* data, uint32_t length) {
+#ifdef MOZ_FAULTY
+ std::string v(data, length);
+ Singleton<mozilla::ipc::Faulty>::get()->FuzzData(v, v.size());
+ return WriteInt(v.size()) && WriteBytes(v.data(), v.size());
+#else
+ return WriteInt(length) && WriteBytes(data, length);
+#endif
+}
+
+void Pickle::InputBytes(const char* data, uint32_t length) {
+ buffers_.WriteBytes(data, length);
+}
+
+int32_t* Pickle::GetInt32PtrForTest(uint32_t offset) {
+ size_t pos = buffers_.Size() - offset;
+ BufferList::IterImpl iter(buffers_);
+ MOZ_RELEASE_ASSERT(iter.AdvanceAcrossSegments(buffers_, pos));
+ return reinterpret_cast<int32_t*>(iter.Data());
+}
+
+// static
+uint32_t Pickle::MessageSize(uint32_t header_size,
+ const char* start,
+ const char* end) {
+ DCHECK(header_size == AlignInt(header_size));
+ DCHECK(header_size <= static_cast<memberAlignmentType>(kHeaderSegmentCapacity));
+
+ if (end < start)
+ return 0;
+ size_t length = static_cast<size_t>(end - start);
+ if (length < sizeof(Header))
+ return 0;
+
+ const Header* hdr = reinterpret_cast<const Header*>(start);
+ if (length < header_size)
+ return 0;
+
+ mozilla::CheckedInt<uint32_t> sum(header_size);
+ sum += hdr->payload_size;
+
+ if (!sum.isValid())
+ return 0;
+
+ return sum.value();
+}
diff --git a/ipc/chromium/src/base/pickle.h b/ipc/chromium/src/base/pickle.h
new file mode 100644
index 000000000..d031f6124
--- /dev/null
+++ b/ipc/chromium/src/base/pickle.h
@@ -0,0 +1,326 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_PICKLE_H__
+#define BASE_PICKLE_H__
+
+#include <string>
+
+#include "base/basictypes.h"
+#include "base/logging.h"
+#include "base/string16.h"
+
+#include "mozilla/Attributes.h"
+#include "mozilla/BufferList.h"
+#include "mozilla/mozalloc.h"
+
+#ifdef MOZ_FAULTY
+#include "base/singleton.h"
+#include "mozilla/ipc/Faulty.h"
+#endif
+
+#if !defined(RELEASE_OR_BETA) || defined(DEBUG)
+#define MOZ_PICKLE_SENTINEL_CHECKING
+#endif
+
+class Pickle;
+
+class PickleIterator {
+public:
+ explicit PickleIterator(const Pickle& pickle);
+
+private:
+ friend class Pickle;
+
+ mozilla::BufferList<InfallibleAllocPolicy>::IterImpl iter_;
+
+ template<typename T>
+ void CopyInto(T* dest);
+};
+
+// This class provides facilities for basic binary value packing and unpacking.
+//
+// The Pickle class supports appending primitive values (ints, strings, etc.)
+// to a pickle instance. The Pickle instance grows its internal memory buffer
+// dynamically to hold the sequence of primitive values. The internal memory
+// buffer is exposed as the "data" of the Pickle. This "data" can be passed
+// to a Pickle object to initialize it for reading.
+//
+// When reading from a Pickle object, it is important for the consumer to know
+// what value types to read and in what order to read them as the Pickle does
+// not keep track of the type of data written to it.
+//
+// The Pickle's data has a header which contains the size of the Pickle's
+// payload. It can optionally support additional space in the header. That
+// space is controlled by the header_size parameter passed to the Pickle
+// constructor.
+//
+class Pickle {
+ public:
+ ~Pickle();
+
+ Pickle() = delete;
+
+ // Initialize a Pickle object with the specified header size in bytes, which
+ // must be greater-than-or-equal-to sizeof(Pickle::Header). The header size
+ // will be rounded up to ensure that the header size is 32bit-aligned.
+ explicit Pickle(uint32_t header_size);
+
+ Pickle(uint32_t header_size, const char* data, uint32_t length);
+
+ Pickle(const Pickle& other) = delete;
+
+ Pickle(Pickle&& other);
+
+ // Performs a deep copy.
+ Pickle& operator=(const Pickle& other) = delete;
+
+ Pickle& operator=(Pickle&& other);
+
+ // Returns the size of the Pickle's data.
+ uint32_t size() const { return header_size_ + header_->payload_size; }
+
+ typedef mozilla::BufferList<InfallibleAllocPolicy> BufferList;
+
+ const BufferList& Buffers() const { return buffers_; }
+
+ uint32_t CurrentSize() const { return buffers_.Size(); }
+
+ // Methods for reading the payload of the Pickle. To read from the start of
+ // the Pickle, initialize *iter to NULL. If successful, these methods return
+ // true. Otherwise, false is returned to indicate that the result could not
+ // be extracted.
+ MOZ_MUST_USE bool ReadBool(PickleIterator* iter, bool* result) const;
+ MOZ_MUST_USE bool ReadInt16(PickleIterator* iter, int16_t* result) const;
+ MOZ_MUST_USE bool ReadUInt16(PickleIterator* iter, uint16_t* result) const;
+ MOZ_MUST_USE bool ReadShort(PickleIterator* iter, short* result) const;
+ MOZ_MUST_USE bool ReadInt(PickleIterator* iter, int* result) const;
+ MOZ_MUST_USE bool ReadLong(PickleIterator* iter, long* result) const;
+ MOZ_MUST_USE bool ReadULong(PickleIterator* iter, unsigned long* result) const;
+ MOZ_MUST_USE bool ReadSize(PickleIterator* iter, size_t* result) const;
+ MOZ_MUST_USE bool ReadInt32(PickleIterator* iter, int32_t* result) const;
+ MOZ_MUST_USE bool ReadUInt32(PickleIterator* iter, uint32_t* result) const;
+ MOZ_MUST_USE bool ReadInt64(PickleIterator* iter, int64_t* result) const;
+ MOZ_MUST_USE bool ReadUInt64(PickleIterator* iter, uint64_t* result) const;
+ MOZ_MUST_USE bool ReadDouble(PickleIterator* iter, double* result) const;
+ MOZ_MUST_USE bool ReadIntPtr(PickleIterator* iter, intptr_t* result) const;
+ MOZ_MUST_USE bool ReadUnsignedChar(PickleIterator* iter, unsigned char* result) const;
+ MOZ_MUST_USE bool ReadString(PickleIterator* iter, std::string* result) const;
+ MOZ_MUST_USE bool ReadWString(PickleIterator* iter, std::wstring* result) const;
+ MOZ_MUST_USE bool ReadBytesInto(PickleIterator* iter, void* data, uint32_t length) const;
+ MOZ_MUST_USE bool ExtractBuffers(PickleIterator* iter, size_t length, BufferList* buffers,
+ uint32_t alignment = sizeof(memberAlignmentType)) const;
+
+ // Safer version of ReadInt() checks for the result not being negative.
+ // Use it for reading the object sizes.
+ MOZ_MUST_USE bool ReadLength(PickleIterator* iter, int* result) const;
+
+ MOZ_MUST_USE bool ReadSentinel(PickleIterator* iter, uint32_t sentinel) const
+#ifdef MOZ_PICKLE_SENTINEL_CHECKING
+ ;
+#else
+ {
+ return true;
+ }
+#endif
+
+ void EndRead(PickleIterator& iter) const;
+
+ // Methods for adding to the payload of the Pickle. These values are
+ // appended to the end of the Pickle's payload. When reading values from a
+ // Pickle, it is important to read them in the order in which they were added
+ // to the Pickle.
+ bool WriteBool(bool value) {
+#ifdef MOZ_FAULTY
+ Singleton<mozilla::ipc::Faulty>::get()->FuzzBool(&value);
+#endif
+ return WriteInt(value ? 1 : 0);
+ }
+ bool WriteInt16(int16_t value) {
+#ifdef MOZ_FAULTY
+ Singleton<mozilla::ipc::Faulty>::get()->FuzzInt16(&value);
+#endif
+ return WriteBytes(&value, sizeof(value));
+ }
+ bool WriteUInt16(uint16_t value) {
+#ifdef MOZ_FAULTY
+ Singleton<mozilla::ipc::Faulty>::get()->FuzzUInt16(&value);
+#endif
+ return WriteBytes(&value, sizeof(value));
+ }
+ bool WriteInt(int value) {
+#ifdef MOZ_FAULTY
+ Singleton<mozilla::ipc::Faulty>::get()->FuzzInt(&value);
+#endif
+ return WriteBytes(&value, sizeof(value));
+ }
+ bool WriteLong(long value) {
+ // Always written as a 64-bit value since the size for this type can
+ // differ between architectures.
+#ifdef MOZ_FAULTY
+ Singleton<mozilla::ipc::Faulty>::get()->FuzzLong(&value);
+#endif
+ return WriteInt64(int64_t(value));
+ }
+ bool WriteULong(unsigned long value) {
+ // Always written as a 64-bit value since the size for this type can
+ // differ between architectures.
+#ifdef MOZ_FAULTY
+ Singleton<mozilla::ipc::Faulty>::get()->FuzzULong(&value);
+#endif
+ return WriteUInt64(uint64_t(value));
+ }
+ bool WriteSize(size_t value) {
+ // Always written as a 64-bit value since the size for this type can
+ // differ between architectures.
+#ifdef MOZ_FAULTY
+ Singleton<mozilla::ipc::Faulty>::get()->FuzzSize(&value);
+#endif
+ return WriteUInt64(uint64_t(value));
+ }
+ bool WriteInt32(int32_t value) {
+#ifdef MOZ_FAULTY
+ Singleton<mozilla::ipc::Faulty>::get()->FuzzInt(&value);
+#endif
+ return WriteBytes(&value, sizeof(value));
+ }
+ bool WriteUInt32(uint32_t value) {
+#ifdef MOZ_FAULTY
+ Singleton<mozilla::ipc::Faulty>::get()->FuzzUInt32(&value);
+#endif
+ return WriteBytes(&value, sizeof(value));
+ }
+ bool WriteInt64(int64_t value) {
+#ifdef MOZ_FAULTY
+ Singleton<mozilla::ipc::Faulty>::get()->FuzzInt64(&value);
+#endif
+ return WriteBytes(&value, sizeof(value));
+ }
+ bool WriteUInt64(uint64_t value) {
+#ifdef MOZ_FAULTY
+ Singleton<mozilla::ipc::Faulty>::get()->FuzzUInt64(&value);
+#endif
+ return WriteBytes(&value, sizeof(value));
+ }
+ bool WriteDouble(double value) {
+#ifdef MOZ_FAULTY
+ Singleton<mozilla::ipc::Faulty>::get()->FuzzDouble(&value);
+#endif
+ return WriteBytes(&value, sizeof(value));
+ }
+ bool WriteIntPtr(intptr_t value) {
+ // Always written as a 64-bit value since the size for this type can
+ // differ between architectures.
+ return WriteInt64(int64_t(value));
+ }
+ bool WriteUnsignedChar(unsigned char value) {
+#ifdef MOZ_FAULTY
+ Singleton<mozilla::ipc::Faulty>::get()->FuzzUChar(&value);
+#endif
+ return WriteBytes(&value, sizeof(value));
+ }
+ bool WriteString(const std::string& value);
+ bool WriteWString(const std::wstring& value);
+ bool WriteData(const char* data, uint32_t length);
+ bool WriteBytes(const void* data, uint32_t data_len,
+ uint32_t alignment = sizeof(memberAlignmentType));
+
+ bool WriteSentinel(uint32_t sentinel)
+#ifdef MOZ_PICKLE_SENTINEL_CHECKING
+ ;
+#else
+ {
+ return true;
+ }
+#endif
+
+ int32_t* GetInt32PtrForTest(uint32_t offset);
+
+ void InputBytes(const char* data, uint32_t length);
+
+ // Payload follows after allocation of Header (header size is customizable).
+ struct Header {
+ uint32_t payload_size; // Specifies the size of the payload.
+ };
+
+ // Returns the header, cast to a user-specified type T. The type T must be a
+ // subclass of Header and its size must correspond to the header_size passed
+ // to the Pickle constructor.
+ template <class T>
+ T* headerT() {
+ DCHECK(sizeof(T) == header_size_);
+ return static_cast<T*>(header_);
+ }
+ template <class T>
+ const T* headerT() const {
+ DCHECK(sizeof(T) == header_size_);
+ return static_cast<const T*>(header_);
+ }
+
+ typedef uint32_t memberAlignmentType;
+
+ protected:
+ uint32_t payload_size() const { return header_->payload_size; }
+
+ // Resizes the buffer for use when writing the specified amount of data. The
+ // location that the data should be written at is returned, or NULL if there
+ // was an error. Call EndWrite with the returned offset and the given length
+ // to pad out for the next write.
+ void BeginWrite(uint32_t length, uint32_t alignment);
+
+ // Completes the write operation by padding the data with NULL bytes until it
+ // is padded. Should be paired with BeginWrite, but it does not necessarily
+ // have to be called after the data is written.
+ void EndWrite(uint32_t length);
+
+ // Round 'bytes' up to the next multiple of 'alignment'. 'alignment' must be
+ // a power of 2.
+ template<uint32_t alignment> struct ConstantAligner {
+ static uint32_t align(int bytes) {
+ static_assert((alignment & (alignment - 1)) == 0,
+ "alignment must be a power of two");
+ return (bytes + (alignment - 1)) & ~static_cast<uint32_t>(alignment - 1);
+ }
+ };
+
+ static uint32_t AlignInt(int bytes) {
+ return ConstantAligner<sizeof(memberAlignmentType)>::align(bytes);
+ }
+
+ static uint32_t AlignCapacity(int bytes) {
+ return ConstantAligner<kSegmentAlignment>::align(bytes);
+ }
+
+ // Returns true if the given iterator could point to data with the given
+ // length. If there is no room for the given data before the end of the
+ // payload, returns false.
+ bool IteratorHasRoomFor(const PickleIterator& iter, uint32_t len) const;
+
+ // Moves the iterator by the given number of bytes, making sure it is aligned.
+ // Pointer (iterator) is NOT aligned, but the change in the pointer
+ // is guaranteed to be a multiple of sizeof(memberAlignmentType).
+ void UpdateIter(PickleIterator* iter, uint32_t bytes) const;
+
+ // Figure out how big the message starting at range_start is. Returns 0 if
+ // there's no enough data to determine (i.e., if [range_start, range_end) does
+ // not contain enough of the message header to know the size).
+ static uint32_t MessageSize(uint32_t header_size,
+ const char* range_start,
+ const char* range_end);
+
+ // Segments capacities are aligned to 8 bytes to ensure that all reads/writes
+ // at 8-byte aligned offsets will be on 8-byte aligned pointers.
+ static const uint32_t kSegmentAlignment = 8;
+
+ private:
+ friend class PickleIterator;
+
+ BufferList buffers_;
+ Header* header_;
+ uint32_t header_size_;
+};
+
+#endif // BASE_PICKLE_H__
diff --git a/ipc/chromium/src/base/platform_file.h b/ipc/chromium/src/base/platform_file.h
new file mode 100644
index 000000000..326a7a0e1
--- /dev/null
+++ b/ipc/chromium/src/base/platform_file.h
@@ -0,0 +1,48 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_PLATFORM_FILE_H_
+#define BASE_PLATFORM_FILE_H_
+
+#include "build/build_config.h"
+#if defined(OS_WIN)
+#include <windows.h>
+#endif
+
+#include <string>
+
+namespace base {
+
+#if defined(OS_WIN)
+typedef HANDLE PlatformFile;
+const PlatformFile kInvalidPlatformFileValue = INVALID_HANDLE_VALUE;
+#elif defined(OS_POSIX)
+typedef int PlatformFile;
+const PlatformFile kInvalidPlatformFileValue = -1;
+#endif
+
+enum PlatformFileFlags {
+ PLATFORM_FILE_OPEN = 1,
+ PLATFORM_FILE_CREATE = 2,
+ PLATFORM_FILE_OPEN_ALWAYS = 4, // May create a new file.
+ PLATFORM_FILE_CREATE_ALWAYS = 8, // May overwrite an old file.
+ PLATFORM_FILE_READ = 16,
+ PLATFORM_FILE_WRITE = 32,
+ PLATFORM_FILE_EXCLUSIVE_READ = 64, // EXCLUSIVE is opposite of Windows SHARE
+ PLATFORM_FILE_EXCLUSIVE_WRITE = 128,
+ PLATFORM_FILE_ASYNC = 256
+};
+
+// Creates or open the given file. If PLATFORM_FILE_OPEN_ALWAYS is used, and
+// |created| is provided, |created| will be set to true if the file was created
+// or to false in case the file was just opened.
+PlatformFile CreatePlatformFile(const std::wstring& name,
+ int flags,
+ bool* created);
+
+} // namespace base
+
+#endif // BASE_PLATFORM_FILE_H_
diff --git a/ipc/chromium/src/base/platform_file_posix.cc b/ipc/chromium/src/base/platform_file_posix.cc
new file mode 100644
index 000000000..4289afbc6
--- /dev/null
+++ b/ipc/chromium/src/base/platform_file_posix.cc
@@ -0,0 +1,70 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/platform_file.h"
+
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <errno.h>
+#ifdef ANDROID
+#include <linux/stat.h>
+#endif
+
+#include "base/logging.h"
+#include "base/string_util.h"
+
+namespace base {
+
+// TODO(erikkay): does it make sense to support PLATFORM_FILE_EXCLUSIVE_* here?
+PlatformFile CreatePlatformFile(const std::wstring& name,
+ int flags,
+ bool* created) {
+ int open_flags = 0;
+ if (flags & PLATFORM_FILE_CREATE)
+ open_flags = O_CREAT | O_EXCL;
+
+ if (flags & PLATFORM_FILE_CREATE_ALWAYS) {
+ DCHECK(!open_flags);
+ open_flags = O_CREAT | O_TRUNC;
+ }
+
+ if (!open_flags && !(flags & PLATFORM_FILE_OPEN) &&
+ !(flags & PLATFORM_FILE_OPEN_ALWAYS)) {
+ NOTREACHED();
+ errno = ENOTSUP;
+ return kInvalidPlatformFileValue;
+ }
+
+ if (flags & PLATFORM_FILE_WRITE && flags & PLATFORM_FILE_READ) {
+ open_flags |= O_RDWR;
+ } else if (flags & PLATFORM_FILE_WRITE) {
+ open_flags |= O_WRONLY;
+ } else if (!(flags & PLATFORM_FILE_READ)) {
+ NOTREACHED();
+ }
+
+ DCHECK(O_RDONLY == 0);
+
+ int descriptor = open(WideToUTF8(name).c_str(), open_flags,
+ S_IRUSR | S_IWUSR);
+
+ if (flags & PLATFORM_FILE_OPEN_ALWAYS) {
+ if (descriptor > 0) {
+ if (created)
+ *created = false;
+ } else {
+ open_flags |= O_CREAT;
+ descriptor = open(WideToUTF8(name).c_str(), open_flags,
+ S_IRUSR | S_IWUSR);
+ if (created && descriptor > 0)
+ *created = true;
+ }
+ }
+
+ return descriptor;
+}
+
+} // namespace base
diff --git a/ipc/chromium/src/base/platform_file_win.cc b/ipc/chromium/src/base/platform_file_win.cc
new file mode 100644
index 000000000..a624acd65
--- /dev/null
+++ b/ipc/chromium/src/base/platform_file_win.cc
@@ -0,0 +1,64 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/platform_file.h"
+
+#include "base/logging.h"
+
+namespace base {
+
+PlatformFile CreatePlatformFile(const std::wstring& name,
+ int flags,
+ bool* created) {
+ DWORD disposition = 0;
+
+ if (flags & PLATFORM_FILE_OPEN)
+ disposition = OPEN_EXISTING;
+
+ if (flags & PLATFORM_FILE_CREATE) {
+ DCHECK(!disposition);
+ disposition = CREATE_NEW;
+ }
+
+ if (flags & PLATFORM_FILE_OPEN_ALWAYS) {
+ DCHECK(!disposition);
+ disposition = OPEN_ALWAYS;
+ }
+
+ if (flags & PLATFORM_FILE_CREATE_ALWAYS) {
+ DCHECK(!disposition);
+ disposition = CREATE_ALWAYS;
+ }
+
+ if (!disposition) {
+ NOTREACHED();
+ return NULL;
+ }
+
+ DWORD access = (flags & PLATFORM_FILE_READ) ? GENERIC_READ : 0;
+ if (flags & PLATFORM_FILE_WRITE)
+ access |= GENERIC_WRITE;
+
+ DWORD sharing = (flags & PLATFORM_FILE_EXCLUSIVE_READ) ? 0 : FILE_SHARE_READ;
+ if (!(flags & PLATFORM_FILE_EXCLUSIVE_WRITE))
+ sharing |= FILE_SHARE_WRITE;
+
+ DWORD create_flags = 0;
+ if (flags & PLATFORM_FILE_ASYNC)
+ create_flags |= FILE_FLAG_OVERLAPPED;
+
+ HANDLE file = CreateFile(name.c_str(), access, sharing, NULL, disposition,
+ create_flags, NULL);
+
+ if ((flags & PLATFORM_FILE_OPEN_ALWAYS) && created &&
+ INVALID_HANDLE_VALUE != file) {
+ *created = (ERROR_ALREADY_EXISTS != GetLastError());
+ }
+
+ return file;
+}
+
+} // namespace disk_cache
diff --git a/ipc/chromium/src/base/platform_thread.h b/ipc/chromium/src/base/platform_thread.h
new file mode 100644
index 000000000..727a13a84
--- /dev/null
+++ b/ipc/chromium/src/base/platform_thread.h
@@ -0,0 +1,87 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// WARNING: You should *NOT* be using this class directly. PlatformThread is
+// the low-level platform-specific abstraction to the OS's threading interface.
+// You should instead be using a message-loop driven Thread, see thread.h.
+
+#ifndef BASE_PLATFORM_THREAD_H_
+#define BASE_PLATFORM_THREAD_H_
+
+#include "base/basictypes.h"
+
+// PlatformThreadHandle should not be assumed to be a numeric type, since the
+// standard intends to allow pthread_t to be a structure. This means you
+// should not initialize it to a value, like 0. If it's a member variable, the
+// constructor can safely "value initialize" using () in the initializer list.
+#if defined(OS_WIN)
+#include <windows.h>
+typedef DWORD PlatformThreadId;
+typedef void* PlatformThreadHandle; // HANDLE
+#elif defined(OS_POSIX)
+#include <pthread.h>
+typedef pthread_t PlatformThreadHandle;
+#if defined(OS_LINUX) || defined(OS_OPENBSD) || defined(__GLIBC__)
+#include <unistd.h>
+typedef pid_t PlatformThreadId;
+#elif defined(OS_BSD)
+#include <sys/types.h>
+typedef lwpid_t PlatformThreadId;
+#elif defined(OS_MACOSX)
+#include <mach/mach.h>
+typedef mach_port_t PlatformThreadId;
+#endif
+#endif
+
+// A namespace for low-level thread functions.
+class PlatformThread {
+ public:
+ // Gets the current thread id, which may be useful for logging purposes.
+ static PlatformThreadId CurrentId();
+
+ // Yield the current thread so another thread can be scheduled.
+ static void YieldCurrentThread();
+
+ // Sleeps for the specified duration (units are milliseconds).
+ static void Sleep(int duration_ms);
+
+ // Sets the thread name visible to a debugger. This has no effect otherwise.
+ static void SetName(const char* name);
+
+ // Implement this interface to run code on a background thread. Your
+ // ThreadMain method will be called on the newly created thread.
+ class Delegate {
+ public:
+ virtual ~Delegate() {}
+ virtual void ThreadMain() = 0;
+ };
+
+ // Creates a new thread. The |stack_size| parameter can be 0 to indicate
+ // that the default stack size should be used. Upon success,
+ // |*thread_handle| will be assigned a handle to the newly created thread,
+ // and |delegate|'s ThreadMain method will be executed on the newly created
+ // thread.
+ // NOTE: When you are done with the thread handle, you must call Join to
+ // release system resources associated with the thread. You must ensure that
+ // the Delegate object outlives the thread.
+ static bool Create(size_t stack_size, Delegate* delegate,
+ PlatformThreadHandle* thread_handle);
+
+ // CreateNonJoinable() does the same thing as Create() except the thread
+ // cannot be Join()'d. Therefore, it also does not output a
+ // PlatformThreadHandle.
+ static bool CreateNonJoinable(size_t stack_size, Delegate* delegate);
+
+ // Joins with a thread created via the Create function. This function blocks
+ // the caller until the designated thread exits. This will invalidate
+ // |thread_handle|.
+ static void Join(PlatformThreadHandle thread_handle);
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(PlatformThread);
+};
+
+#endif // BASE_PLATFORM_THREAD_H_
diff --git a/ipc/chromium/src/base/platform_thread_mac.mm b/ipc/chromium/src/base/platform_thread_mac.mm
new file mode 100644
index 000000000..ef558a796
--- /dev/null
+++ b/ipc/chromium/src/base/platform_thread_mac.mm
@@ -0,0 +1,77 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/platform_thread.h"
+
+#import <Foundation/Foundation.h>
+#include <dlfcn.h>
+
+#include "base/logging.h"
+#include "base/scoped_nsautorelease_pool.h"
+
+// A simple class that demonstrates our impressive ability to do nothing.
+@interface NoOp : NSObject
+
+// Does the deed. Or does it?
++ (void)noOp;
+
+@end
+
+@implementation NoOp
+
++ (void)noOp {
+}
+
+@end
+
+namespace base {
+
+// If Cocoa is to be used on more than one thread, it must know that the
+// application is multithreaded. Since it's possible to enter Cocoa code
+// from threads created by pthread_thread_create, Cocoa won't necessarily
+// be aware that the application is multithreaded. Spawning an NSThread is
+// enough to get Cocoa to set up for multithreaded operation, so this is done
+// if necessary before pthread_thread_create spawns any threads.
+//
+// http://developer.apple.com/documentation/Cocoa/Conceptual/Multithreading/CreatingThreads/chapter_4_section_4.html
+void InitThreading() {
+ // this is called early in startup, before the event loop, so provide
+ // an autorelease pool to prevent leaks here
+ ScopedNSAutoreleasePool pool;
+
+ static BOOL multithreaded = [NSThread isMultiThreaded];
+ if (!multithreaded) {
+ [NSThread detachNewThreadSelector:@selector(noOp)
+ toTarget:[NoOp class]
+ withObject:nil];
+ multithreaded = YES;
+
+ DCHECK([NSThread isMultiThreaded]);
+ }
+}
+
+} // namespace base
+
+// static
+void PlatformThread::SetName(const char* name) {
+ // pthread_setname_np is only available in 10.6 or later, so test
+ // for it at runtime.
+ int (*dynamic_pthread_setname_np)(const char*);
+ *reinterpret_cast<void**>(&dynamic_pthread_setname_np) =
+ dlsym(RTLD_DEFAULT, "pthread_setname_np");
+ if (!dynamic_pthread_setname_np)
+ return;
+
+ // Mac OS X does not expose the length limit of the name, so
+ // hardcode it.
+ const int kMaxNameLength = 63;
+ std::string shortened_name = std::string(name).substr(0, kMaxNameLength);
+ // pthread_setname() fails (harmlessly) in the sandbox, ignore when it does.
+ // See http://crbug.com/47058
+
+ // The name parameter is copied thus it's safe to release it after calling.
+ // Checked against the bionic implementation in bionic/libc/bionic/pthread.c
+ dynamic_pthread_setname_np(shortened_name.c_str());
+}
+
diff --git a/ipc/chromium/src/base/platform_thread_posix.cc b/ipc/chromium/src/base/platform_thread_posix.cc
new file mode 100644
index 000000000..6c978336e
--- /dev/null
+++ b/ipc/chromium/src/base/platform_thread_posix.cc
@@ -0,0 +1,164 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/platform_thread.h"
+
+#include <errno.h>
+#include <sched.h>
+
+#if defined(OS_MACOSX)
+#include <mach/mach.h>
+#elif defined(OS_NETBSD)
+#include <lwp.h>
+#elif defined(OS_LINUX)
+#include <sys/syscall.h>
+#include <sys/prctl.h>
+#endif
+
+#if !defined(OS_MACOSX)
+#include <unistd.h>
+#endif
+
+#if defined(OS_BSD) && !defined(OS_NETBSD) && !defined(__GLIBC__)
+#include <pthread_np.h>
+#endif
+
+#if defined(OS_MACOSX)
+namespace base {
+void InitThreading();
+} // namespace
+#endif
+
+static void* ThreadFunc(void* closure) {
+ PlatformThread::Delegate* delegate =
+ static_cast<PlatformThread::Delegate*>(closure);
+ delegate->ThreadMain();
+ return NULL;
+}
+
+// static
+PlatformThreadId PlatformThread::CurrentId() {
+ // Pthreads doesn't have the concept of a thread ID, so we have to reach down
+ // into the kernel.
+#if defined(OS_MACOSX)
+ mach_port_t port = mach_thread_self();
+ mach_port_deallocate(mach_task_self(), port);
+ return port;
+#elif defined(OS_LINUX)
+#ifdef MOZ_WIDGET_GONK
+ return (intptr_t) (pthread_self());
+#else
+ return syscall(__NR_gettid);
+#endif
+#elif defined(OS_OPENBSD) || defined(__GLIBC__)
+ return (intptr_t) (pthread_self());
+#elif defined(OS_NETBSD)
+ return _lwp_self();
+#elif defined(OS_DRAGONFLY)
+ return lwp_gettid();
+#elif defined(OS_FREEBSD)
+ return pthread_getthreadid_np();
+#endif
+}
+
+// static
+void PlatformThread::YieldCurrentThread() {
+ sched_yield();
+}
+
+// static
+void PlatformThread::Sleep(int duration_ms) {
+ struct timespec sleep_time, remaining;
+
+ // Contains the portion of duration_ms >= 1 sec.
+ sleep_time.tv_sec = duration_ms / 1000;
+ duration_ms -= sleep_time.tv_sec * 1000;
+
+ // Contains the portion of duration_ms < 1 sec.
+ sleep_time.tv_nsec = duration_ms * 1000 * 1000; // nanoseconds.
+
+ while (nanosleep(&sleep_time, &remaining) == -1 && errno == EINTR)
+ sleep_time = remaining;
+}
+
+#ifndef OS_MACOSX
+// Mac is implemented in platform_thread_mac.mm.
+
+// static
+void PlatformThread::SetName(const char* name) {
+ // On linux we can get the thread names to show up in the debugger by setting
+ // the process name for the LWP. We don't want to do this for the main
+ // thread because that would rename the process, causing tools like killall
+ // to stop working.
+ if (PlatformThread::CurrentId() == getpid())
+ return;
+
+ // http://0pointer.de/blog/projects/name-your-threads.html
+ // Set the name for the LWP (which gets truncated to 15 characters).
+ // Note that glibc also has a 'pthread_setname_np' api, but it may not be
+ // available everywhere and it's only benefit over using prctl directly is
+ // that it can set the name of threads other than the current thread.
+#if defined(OS_LINUX)
+ prctl(PR_SET_NAME, reinterpret_cast<uintptr_t>(name), 0, 0, 0);
+#elif defined(OS_NETBSD)
+ pthread_setname_np(pthread_self(), "%s", (void *)name);
+#elif defined(OS_BSD) && !defined(__GLIBC__)
+ pthread_set_name_np(pthread_self(), name);
+#else
+#endif
+}
+#endif // !OS_MACOSX
+
+namespace {
+
+bool CreateThread(size_t stack_size, bool joinable,
+ PlatformThread::Delegate* delegate,
+ PlatformThreadHandle* thread_handle) {
+#if defined(OS_MACOSX)
+ base::InitThreading();
+#endif // OS_MACOSX
+
+ bool success = false;
+ pthread_attr_t attributes;
+ pthread_attr_init(&attributes);
+
+ // Pthreads are joinable by default, so only specify the detached attribute if
+ // the thread should be non-joinable.
+ if (!joinable) {
+ pthread_attr_setdetachstate(&attributes, PTHREAD_CREATE_DETACHED);
+ }
+
+ if (stack_size > 0)
+ pthread_attr_setstacksize(&attributes, stack_size);
+
+ success = !pthread_create(thread_handle, &attributes, ThreadFunc, delegate);
+
+ pthread_attr_destroy(&attributes);
+ return success;
+}
+
+} // anonymous namespace
+
+// static
+bool PlatformThread::Create(size_t stack_size, Delegate* delegate,
+ PlatformThreadHandle* thread_handle) {
+ return CreateThread(stack_size, true /* joinable thread */,
+ delegate, thread_handle);
+}
+
+// static
+bool PlatformThread::CreateNonJoinable(size_t stack_size, Delegate* delegate) {
+ PlatformThreadHandle unused;
+
+ bool result = CreateThread(stack_size, false /* non-joinable thread */,
+ delegate, &unused);
+ return result;
+}
+
+// static
+void PlatformThread::Join(PlatformThreadHandle thread_handle) {
+ pthread_join(thread_handle, NULL);
+}
diff --git a/ipc/chromium/src/base/platform_thread_win.cc b/ipc/chromium/src/base/platform_thread_win.cc
new file mode 100644
index 000000000..2e7614230
--- /dev/null
+++ b/ipc/chromium/src/base/platform_thread_win.cc
@@ -0,0 +1,109 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/platform_thread.h"
+
+#include "base/logging.h"
+#include "base/win_util.h"
+
+namespace {
+
+// The information on how to set the thread name comes from
+// a MSDN article: http://msdn2.microsoft.com/en-us/library/xcb2z8hs.aspx
+const DWORD kVCThreadNameException = 0x406D1388;
+
+typedef struct tagTHREADNAME_INFO {
+ DWORD dwType; // Must be 0x1000.
+ LPCSTR szName; // Pointer to name (in user addr space).
+ DWORD dwThreadID; // Thread ID (-1=caller thread).
+ DWORD dwFlags; // Reserved for future use, must be zero.
+} THREADNAME_INFO;
+
+DWORD __stdcall ThreadFunc(void* closure) {
+ PlatformThread::Delegate* delegate =
+ static_cast<PlatformThread::Delegate*>(closure);
+ delegate->ThreadMain();
+ return 0;
+}
+
+} // namespace
+
+// static
+PlatformThreadId PlatformThread::CurrentId() {
+ return GetCurrentThreadId();
+}
+
+// static
+void PlatformThread::YieldCurrentThread() {
+ ::Sleep(0);
+}
+
+// static
+void PlatformThread::Sleep(int duration_ms) {
+ ::Sleep(duration_ms);
+}
+
+// static
+void PlatformThread::SetName(const char* name) {
+#ifdef HAVE_SEH_EXCEPTIONS
+ // The debugger needs to be around to catch the name in the exception. If
+ // there isn't a debugger, we are just needlessly throwing an exception.
+ if (!::IsDebuggerPresent())
+ return;
+
+ THREADNAME_INFO info;
+ info.dwType = 0x1000;
+ info.szName = name;
+ info.dwThreadID = CurrentId();
+ info.dwFlags = 0;
+
+ MOZ_SEH_TRY {
+ RaiseException(kVCThreadNameException, 0, sizeof(info)/sizeof(DWORD),
+ reinterpret_cast<DWORD_PTR*>(&info));
+ } MOZ_SEH_EXCEPT(EXCEPTION_CONTINUE_EXECUTION) {
+ }
+#endif
+}
+
+// static
+bool PlatformThread::Create(size_t stack_size, Delegate* delegate,
+ PlatformThreadHandle* thread_handle) {
+ unsigned int flags = 0;
+ if (stack_size > 0) {
+ flags = STACK_SIZE_PARAM_IS_A_RESERVATION;
+ } else {
+ stack_size = 0;
+ }
+
+ // Using CreateThread here vs _beginthreadex makes thread creation a bit
+ // faster and doesn't require the loader lock to be available. Our code will
+ // have to work running on CreateThread() threads anyway, since we run code
+ // on the Windows thread pool, etc. For some background on the difference:
+ // http://www.microsoft.com/msj/1099/win32/win321099.aspx
+ *thread_handle = CreateThread(
+ NULL, stack_size, ThreadFunc, delegate, flags, NULL);
+ return *thread_handle != NULL;
+}
+
+// static
+bool PlatformThread::CreateNonJoinable(size_t stack_size, Delegate* delegate) {
+ PlatformThreadHandle thread_handle;
+ bool result = Create(stack_size, delegate, &thread_handle);
+ CloseHandle(thread_handle);
+ return result;
+}
+
+// static
+void PlatformThread::Join(PlatformThreadHandle thread_handle) {
+ DCHECK(thread_handle);
+
+ // Wait for the thread to exit. It should already have terminated but make
+ // sure this assumption is valid.
+ DWORD result = WaitForSingleObject(thread_handle, INFINITE);
+ DCHECK_EQ(WAIT_OBJECT_0, result);
+
+ CloseHandle(thread_handle);
+}
diff --git a/ipc/chromium/src/base/port.h b/ipc/chromium/src/base/port.h
new file mode 100644
index 000000000..d206d2530
--- /dev/null
+++ b/ipc/chromium/src/base/port.h
@@ -0,0 +1,65 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_PORT_H_
+#define BASE_PORT_H_
+
+#include <stdarg.h>
+#include "build/build_config.h"
+
+#ifdef COMPILER_MSVC
+#define GG_LONGLONG(x) x##I64
+#define GG_ULONGLONG(x) x##UI64
+#else
+#define GG_LONGLONG(x) x##LL
+#define GG_ULONGLONG(x) x##ULL
+#endif
+
+// Per C99 7.8.14, define __STDC_CONSTANT_MACROS before including <stdint.h>
+// to get the INTn_C and UINTn_C macros for integer constants. It's difficult
+// to guarantee any specific ordering of header includes, so it's difficult to
+// guarantee that the INTn_C macros can be defined by including <stdint.h> at
+// any specific point. Provide GG_INTn_C macros instead.
+
+#define GG_INT8_C(x) (x)
+#define GG_INT16_C(x) (x)
+#define GG_INT32_C(x) (x)
+#define GG_INT64_C(x) GG_LONGLONG(x)
+
+#define GG_UINT8_C(x) (x ## U)
+#define GG_UINT16_C(x) (x ## U)
+#define GG_UINT32_C(x) (x ## U)
+#define GG_UINT64_C(x) GG_ULONGLONG(x)
+
+namespace base {
+
+// It's possible for functions that use a va_list, such as StringPrintf, to
+// invalidate the data in it upon use. The fix is to make a copy of the
+// structure before using it and use that copy instead. va_copy is provided
+// for this purpose. MSVC does not provide va_copy, so define an
+// implementation here. It is not guaranteed that assignment is a copy, so the
+// StringUtil.VariableArgsFunc unit test tests this capability.
+
+// The C standard says that va_copy is a "macro", not a function. Trying to
+// use va_list as ref args to a function, as above, breaks some machines.
+# if defined(COMPILER_GCC)
+# define base_va_copy(_a, _b) ::va_copy(_a, _b)
+# elif defined(COMPILER_MSVC)
+# define base_va_copy(_a, _b) (_a = _b)
+# else
+# error No va_copy for your compiler
+# endif
+
+} // namespace base
+
+// Define an OS-neutral wrapper for shared library entry points
+#if defined(OS_WIN)
+#define API_CALL __stdcall
+#elif defined(OS_LINUX) || defined(OS_MACOSX)
+#define API_CALL
+#endif
+
+#endif // BASE_PORT_H_
diff --git a/ipc/chromium/src/base/process.h b/ipc/chromium/src/base/process.h
new file mode 100644
index 000000000..b562771e6
--- /dev/null
+++ b/ipc/chromium/src/base/process.h
@@ -0,0 +1,81 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_PROCESS_H_
+#define BASE_PROCESS_H_
+
+#include "base/basictypes.h"
+
+#include <sys/types.h>
+#ifdef OS_WIN
+#include <windows.h>
+#endif
+
+namespace base {
+
+// ProcessHandle is a platform specific type which represents the underlying OS
+// handle to a process.
+// ProcessId is a number which identifies the process in the OS.
+#if defined(OS_WIN)
+typedef HANDLE ProcessHandle;
+typedef DWORD ProcessId;
+#elif defined(OS_POSIX)
+// On POSIX, our ProcessHandle will just be the PID.
+typedef pid_t ProcessHandle;
+typedef pid_t ProcessId;
+#endif
+
+class Process {
+ public:
+ Process() : process_(0), last_working_set_size_(0) {}
+ explicit Process(ProcessHandle aHandle) :
+ process_(aHandle), last_working_set_size_(0) {}
+
+ // A handle to the current process.
+ static Process Current();
+
+ // Get/Set the handle for this process. The handle will be 0 if the process
+ // is no longer running.
+ ProcessHandle handle() const { return process_; }
+ void set_handle(ProcessHandle aHandle) { process_ = aHandle; }
+
+ // Get the PID for this process.
+ ProcessId pid() const;
+
+ // Is the this process the current process.
+ bool is_current() const;
+
+ // Close the process handle. This will not terminate the process.
+ void Close();
+
+ // Terminates the process with extreme prejudice. The given result code will
+ // be the exit code of the process. If the process has already exited, this
+ // will do nothing.
+ void Terminate(int result_code);
+
+ // A process is backgrounded when it's priority is lower than normal.
+ // Return true if this process is backgrounded, false otherwise.
+ bool IsProcessBackgrounded() const;
+
+ // Set a prcess as backgrounded. If value is true, the priority
+ // of the process will be lowered. If value is false, the priority
+ // of the process will be made "normal" - equivalent to default
+ // process priority.
+ // Returns true if the priority was changed, false otherwise.
+ bool SetProcessBackgrounded(bool value);
+
+ // Releases as much of the working set back to the OS as possible.
+ // Returns true if successful, false otherwise.
+ bool EmptyWorkingSet();
+
+ private:
+ ProcessHandle process_;
+ size_t last_working_set_size_;
+};
+
+} // namespace base
+
+#endif // BASE_PROCESS_H_
diff --git a/ipc/chromium/src/base/process_posix.cc b/ipc/chromium/src/base/process_posix.cc
new file mode 100644
index 000000000..d8ea644c6
--- /dev/null
+++ b/ipc/chromium/src/base/process_posix.cc
@@ -0,0 +1,62 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process.h"
+#include "base/process_util.h"
+
+namespace base {
+
+void Process::Close() {
+ process_ = 0;
+ // if the process wasn't termiated (so we waited) or the state
+ // wasn't already collected w/ a wait from process_utils, we're gonna
+ // end up w/ a zombie when it does finally exit.
+}
+
+void Process::Terminate(int result_code) {
+ // result_code isn't supportable.
+ if (!process_)
+ return;
+ // We don't wait here. It's the responsibility of other code to reap the
+ // child.
+ KillProcess(process_, result_code, false);
+}
+
+bool Process::IsProcessBackgrounded() const {
+ // http://code.google.com/p/chromium/issues/detail?id=8083
+ return false;
+}
+
+bool Process::SetProcessBackgrounded(bool value) {
+ // http://code.google.com/p/chromium/issues/detail?id=8083
+ // Just say we did it to keep renderer happy at the moment. Need to finish
+ // cleaning this up w/in higher layers since windows is probably the only
+ // one that can raise priorities w/o privileges.
+ return true;
+}
+
+bool Process::EmptyWorkingSet() {
+ // http://code.google.com/p/chromium/issues/detail?id=8083
+ return false;
+}
+
+ProcessId Process::pid() const {
+ if (process_ == 0)
+ return 0;
+
+ return GetProcId(process_);
+}
+
+bool Process::is_current() const {
+ return process_ == GetCurrentProcessHandle();
+}
+
+// static
+Process Process::Current() {
+ return Process(GetCurrentProcessHandle());
+}
+
+} // namspace base
diff --git a/ipc/chromium/src/base/process_util.h b/ipc/chromium/src/base/process_util.h
new file mode 100644
index 000000000..4df1f29fb
--- /dev/null
+++ b/ipc/chromium/src/base/process_util.h
@@ -0,0 +1,321 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file/namespace contains utility functions for enumerating, ending and
+// computing statistics of processes.
+
+#ifndef BASE_PROCESS_UTIL_H_
+#define BASE_PROCESS_UTIL_H_
+
+#include "base/basictypes.h"
+
+#if defined(OS_WIN)
+#include <windows.h>
+#include <tlhelp32.h>
+#include <io.h>
+#ifndef STDOUT_FILENO
+#define STDOUT_FILENO 1
+#endif
+#elif defined(OS_LINUX) || defined(__GLIBC__)
+#include <dirent.h>
+#include <limits.h>
+#include <sys/types.h>
+#elif defined(OS_MACOSX)
+#include <mach/mach.h>
+#endif
+
+#include <map>
+#include <string>
+#include <vector>
+#include <stdio.h>
+#include <stdlib.h>
+#ifndef OS_WIN
+#include <unistd.h>
+#endif
+
+#include "base/command_line.h"
+#include "base/process.h"
+
+#if defined(OS_WIN)
+typedef PROCESSENTRY32 ProcessEntry;
+typedef IO_COUNTERS IoCounters;
+#elif defined(OS_POSIX)
+// TODO(port): we should not rely on a Win32 structure.
+struct ProcessEntry {
+ int pid;
+ int ppid;
+ char szExeFile[NAME_MAX + 1];
+};
+
+struct IoCounters {
+ unsigned long long ReadOperationCount;
+ unsigned long long WriteOperationCount;
+ unsigned long long OtherOperationCount;
+ unsigned long long ReadTransferCount;
+ unsigned long long WriteTransferCount;
+ unsigned long long OtherTransferCount;
+};
+
+#include "base/file_descriptor_shuffle.h"
+#endif
+
+#if defined(OS_MACOSX)
+struct kinfo_proc;
+#endif
+
+namespace base {
+
+// These can be used in a 32-bit bitmask.
+enum ProcessArchitecture {
+ PROCESS_ARCH_I386 = 0x1,
+ PROCESS_ARCH_X86_64 = 0x2,
+ PROCESS_ARCH_PPC = 0x4,
+ PROCESS_ARCH_ARM = 0x8,
+ PROCESS_ARCH_MIPS = 0x10,
+ PROCESS_ARCH_ARM64 = 0x20
+};
+
+inline ProcessArchitecture GetCurrentProcessArchitecture()
+{
+ base::ProcessArchitecture currentArchitecture;
+#if defined(ARCH_CPU_X86)
+ currentArchitecture = base::PROCESS_ARCH_I386;
+#elif defined(ARCH_CPU_X86_64)
+ currentArchitecture = base::PROCESS_ARCH_X86_64;
+#elif defined(ARCH_CPU_PPC)
+ currentArchitecture = base::PROCESS_ARCH_PPC;
+#elif defined(ARCH_CPU_ARMEL)
+ currentArchitecture = base::PROCESS_ARCH_ARM;
+#elif defined(ARCH_CPU_MIPS)
+ currentArchitecture = base::PROCESS_ARCH_MIPS;
+#elif defined(ARCH_CPU_ARM64)
+ currentArchitecture = base::PROCESS_ARCH_ARM64;
+#endif
+ return currentArchitecture;
+}
+
+// A minimalistic but hopefully cross-platform set of exit codes.
+// Do not change the enumeration values or you will break third-party
+// installers.
+enum {
+ PROCESS_END_NORMAL_TERMINATON = 0,
+ PROCESS_END_KILLED_BY_USER = 1,
+ PROCESS_END_PROCESS_WAS_HUNG = 2
+};
+
+// Returns the id of the current process.
+ProcessId GetCurrentProcId();
+
+// Returns the ProcessHandle of the current process.
+ProcessHandle GetCurrentProcessHandle();
+
+// Converts a PID to a process handle. This handle must be closed by
+// CloseProcessHandle when you are done with it. Returns true on success.
+bool OpenProcessHandle(ProcessId pid, ProcessHandle* handle);
+
+// Converts a PID to a process handle. On Windows the handle is opened
+// with more access rights and must only be used by trusted code.
+// You have to close returned handle using CloseProcessHandle. Returns true
+// on success.
+bool OpenPrivilegedProcessHandle(ProcessId pid, ProcessHandle* handle);
+
+// Closes the process handle opened by OpenProcessHandle.
+void CloseProcessHandle(ProcessHandle process);
+
+// Returns the unique ID for the specified process. This is functionally the
+// same as Windows' GetProcessId(), but works on versions of Windows before
+// Win XP SP1 as well.
+ProcessId GetProcId(ProcessHandle process);
+
+#if defined(OS_POSIX)
+// Sets all file descriptors to close on exec except for stdin, stdout
+// and stderr.
+// TODO(agl): remove this function
+// WARNING: do not use. It's inherently race-prone in the face of
+// multi-threading.
+void SetAllFDsToCloseOnExec();
+// Close all file descriptors, expect those which are a destination in the
+// given multimap. Only call this function in a child process where you know
+// that there aren't any other threads.
+void CloseSuperfluousFds(const base::InjectiveMultimap& saved_map);
+#endif
+
+enum ChildPrivileges {
+ PRIVILEGES_DEFAULT,
+ PRIVILEGES_UNPRIVILEGED,
+ PRIVILEGES_INHERIT,
+ PRIVILEGES_LAST
+};
+
+#if defined(OS_WIN)
+// Runs the given application name with the given command line. Normally, the
+// first command line argument should be the path to the process, and don't
+// forget to quote it.
+//
+// If wait is true, it will block and wait for the other process to finish,
+// otherwise, it will just continue asynchronously.
+//
+// Example (including literal quotes)
+// cmdline = "c:\windows\explorer.exe" -foo "c:\bar\"
+//
+// If process_handle is non-NULL, the process handle of the launched app will be
+// stored there on a successful launch.
+// NOTE: In this case, the caller is responsible for closing the handle so
+// that it doesn't leak!
+bool LaunchApp(const std::wstring& cmdline,
+ bool wait, bool start_hidden, ProcessHandle* process_handle);
+#elif defined(OS_POSIX)
+// Runs the application specified in argv[0] with the command line argv.
+// Before launching all FDs open in the parent process will be marked as
+// close-on-exec. |fds_to_remap| defines a mapping of src fd->dest fd to
+// propagate FDs into the child process.
+//
+// As above, if wait is true, execute synchronously. The pid will be stored
+// in process_handle if that pointer is non-null.
+//
+// Note that the first argument in argv must point to the filename,
+// and must be fully specified.
+typedef std::vector<std::pair<int, int> > file_handle_mapping_vector;
+bool LaunchApp(const std::vector<std::string>& argv,
+ const file_handle_mapping_vector& fds_to_remap,
+ bool wait, ProcessHandle* process_handle);
+
+typedef std::map<std::string, std::string> environment_map;
+bool LaunchApp(const std::vector<std::string>& argv,
+ const file_handle_mapping_vector& fds_to_remap,
+ const environment_map& env_vars_to_set,
+ ChildPrivileges privs,
+ bool wait, ProcessHandle* process_handle,
+ ProcessArchitecture arch=GetCurrentProcessArchitecture());
+bool LaunchApp(const std::vector<std::string>& argv,
+ const file_handle_mapping_vector& fds_to_remap,
+ const environment_map& env_vars_to_set,
+ bool wait, ProcessHandle* process_handle,
+ ProcessArchitecture arch=GetCurrentProcessArchitecture());
+#endif
+
+// Adjust the privileges of this process to match |privs|. Only
+// returns if privileges were successfully adjusted.
+void SetCurrentProcessPrivileges(ChildPrivileges privs);
+
+// Executes the application specified by cl. This function delegates to one
+// of the above two platform-specific functions.
+bool LaunchApp(const CommandLine& cl,
+ bool wait, bool start_hidden, ProcessHandle* process_handle);
+
+// Used to filter processes by process ID.
+class ProcessFilter {
+ public:
+ // Returns true to indicate set-inclusion and false otherwise. This method
+ // should not have side-effects and should be idempotent.
+ virtual bool Includes(ProcessId pid, ProcessId parent_pid) const = 0;
+ virtual ~ProcessFilter() { }
+};
+
+// Attempts to kill the process identified by the given process
+// entry structure, giving it the specified exit code. If |wait| is true, wait
+// for the process to be actually terminated before returning.
+// Returns true if this is successful, false otherwise.
+bool KillProcess(ProcessHandle process, int exit_code, bool wait);
+
+// Get the termination status (exit code) of the process and return true if the
+// status indicates the process crashed. |child_exited| is set to true iff the
+// child process has terminated. (|child_exited| may be NULL.)
+//
+// On Windows, it is an error to call this if the process hasn't terminated
+// yet. On POSIX, |child_exited| is set correctly since we detect terminate in
+// a different manner on POSIX.
+bool DidProcessCrash(bool* child_exited, ProcessHandle handle);
+
+// Provides performance metrics for a specified process (CPU usage, memory and
+// IO counters). To use it, invoke CreateProcessMetrics() to get an instance
+// for a specific process, then access the information with the different get
+// methods.
+class ProcessMetrics {
+ public:
+ // Creates a ProcessMetrics for the specified process.
+ // The caller owns the returned object.
+ static ProcessMetrics* CreateProcessMetrics(ProcessHandle process);
+
+ ~ProcessMetrics();
+
+ // Returns the CPU usage in percent since the last time this method was
+ // called. The first time this method is called it returns 0 and will return
+ // the actual CPU info on subsequent calls.
+ // Note that on multi-processor machines, the CPU usage value is for all
+ // CPUs. So if you have 2 CPUs and your process is using all the cycles
+ // of 1 CPU and not the other CPU, this method returns 50.
+ int GetCPUUsage();
+
+ private:
+ explicit ProcessMetrics(ProcessHandle process);
+
+ ProcessHandle process_;
+
+ int processor_count_;
+
+ // Used to store the previous times so we can compute the CPU usage.
+ int64_t last_time_;
+ int64_t last_system_time_;
+
+ DISALLOW_EVIL_CONSTRUCTORS(ProcessMetrics);
+};
+
+} // namespace base
+
+namespace mozilla {
+
+class EnvironmentLog
+{
+public:
+ explicit EnvironmentLog(const char* varname) {
+ const char *e = getenv(varname);
+ if (e && *e) {
+ fname_ = e;
+ }
+ }
+
+ ~EnvironmentLog() {}
+
+ void print(const char* format, ...) {
+ if (!fname_.size())
+ return;
+
+ FILE* f;
+ if (fname_.compare("-") == 0) {
+ f = fdopen(dup(STDOUT_FILENO), "a");
+ } else {
+ f = fopen(fname_.c_str(), "a");
+ }
+
+ if (!f)
+ return;
+
+ va_list a;
+ va_start(a, format);
+ vfprintf(f, format, a);
+ va_end(a);
+ fclose(f);
+ }
+
+private:
+ std::string fname_;
+
+ DISALLOW_EVIL_CONSTRUCTORS(EnvironmentLog);
+};
+
+} // namespace mozilla
+
+#if defined(OS_WIN)
+// Undo the windows.h damage
+#undef GetMessage
+#undef CreateEvent
+#undef GetClassName
+#undef GetBinaryType
+#undef RemoveDirectory
+#endif
+
+#endif // BASE_PROCESS_UTIL_H_
diff --git a/ipc/chromium/src/base/process_util_bsd.cc b/ipc/chromium/src/base/process_util_bsd.cc
new file mode 100644
index 000000000..57cf7fef9
--- /dev/null
+++ b/ipc/chromium/src/base/process_util_bsd.cc
@@ -0,0 +1,167 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// derived from process_util_mac.cc
+
+#include "base/process_util.h"
+
+#include <fcntl.h>
+#include <spawn.h>
+#include <sys/wait.h>
+
+#include <string>
+
+#include "nspr.h"
+#include "base/eintr_wrapper.h"
+
+namespace {
+
+static mozilla::EnvironmentLog gProcessLog("MOZ_PROCESS_LOG");
+
+} // namespace
+
+namespace base {
+
+void FreeEnvVarsArray(char* array[], int length)
+{
+ for (int i = 0; i < length; i++) {
+ free(array[i]);
+ }
+ delete[] array;
+}
+
+bool LaunchApp(const std::vector<std::string>& argv,
+ const file_handle_mapping_vector& fds_to_remap,
+ bool wait, ProcessHandle* process_handle) {
+ return LaunchApp(argv, fds_to_remap, environment_map(),
+ wait, process_handle);
+}
+
+bool LaunchApp(const std::vector<std::string>& argv,
+ const file_handle_mapping_vector& fds_to_remap,
+ const environment_map& env_vars_to_set,
+ bool wait, ProcessHandle* process_handle,
+ ProcessArchitecture arch) {
+ return LaunchApp(argv, fds_to_remap, env_vars_to_set,
+ PRIVILEGES_INHERIT,
+ wait, process_handle);
+}
+
+bool LaunchApp(const std::vector<std::string>& argv,
+ const file_handle_mapping_vector& fds_to_remap,
+ const environment_map& env_vars_to_set,
+ ChildPrivileges privs,
+ bool wait, ProcessHandle* process_handle,
+ ProcessArchitecture arch) {
+ bool retval = true;
+
+ char* argv_copy[argv.size() + 1];
+ for (size_t i = 0; i < argv.size(); i++) {
+ argv_copy[i] = const_cast<char*>(argv[i].c_str());
+ }
+ argv_copy[argv.size()] = NULL;
+
+ // Make sure we don't leak any FDs to the child process by marking all FDs
+ // as close-on-exec.
+ SetAllFDsToCloseOnExec();
+
+ // Copy environment to a new char array and add the variables
+ // in env_vars_to_set.
+ // Existing variables are overwritten by env_vars_to_set.
+ int pos = 0;
+ environment_map combined_env_vars = env_vars_to_set;
+ char **environ = PR_DuplicateEnvironment();
+ while(environ[pos] != NULL) {
+ std::string varString = environ[pos];
+ std::string varName = varString.substr(0, varString.find_first_of('='));
+ std::string varValue = varString.substr(varString.find_first_of('=') + 1);
+ if (combined_env_vars.find(varName) == combined_env_vars.end()) {
+ combined_env_vars[varName] = varValue;
+ }
+ PR_Free(environ[pos++]);
+ }
+ PR_Free(environ);
+ int varsLen = combined_env_vars.size() + 1;
+
+ char** vars = new char*[varsLen];
+ int i = 0;
+ for (environment_map::const_iterator it = combined_env_vars.begin();
+ it != combined_env_vars.end(); ++it) {
+ std::string entry(it->first);
+ entry += "=";
+ entry += it->second;
+ vars[i] = strdup(entry.c_str());
+ i++;
+ }
+ vars[i] = NULL;
+
+ posix_spawn_file_actions_t file_actions;
+ if (posix_spawn_file_actions_init(&file_actions) != 0) {
+ FreeEnvVarsArray(vars, varsLen);
+ return false;
+ }
+
+ // Turn fds_to_remap array into a set of dup2 calls.
+ for (file_handle_mapping_vector::const_iterator it = fds_to_remap.begin();
+ it != fds_to_remap.end();
+ ++it) {
+ int src_fd = it->first;
+ int dest_fd = it->second;
+
+ if (src_fd == dest_fd) {
+ int flags = fcntl(src_fd, F_GETFD);
+ if (flags != -1) {
+ fcntl(src_fd, F_SETFD, flags & ~FD_CLOEXEC);
+ }
+ } else {
+ if (posix_spawn_file_actions_adddup2(&file_actions, src_fd, dest_fd) != 0) {
+ posix_spawn_file_actions_destroy(&file_actions);
+ FreeEnvVarsArray(vars, varsLen);
+ return false;
+ }
+ }
+ }
+
+ pid_t pid = 0;
+ int spawn_succeeded = (posix_spawnp(&pid,
+ argv_copy[0],
+ &file_actions,
+ NULL,
+ argv_copy,
+ vars) == 0);
+
+ FreeEnvVarsArray(vars, varsLen);
+
+ posix_spawn_file_actions_destroy(&file_actions);
+
+ bool process_handle_valid = pid > 0;
+ if (!spawn_succeeded || !process_handle_valid) {
+ retval = false;
+ } else {
+ gProcessLog.print("==> process %d launched child process %d\n",
+ GetCurrentProcId(), pid);
+ if (wait)
+ HANDLE_EINTR(waitpid(pid, 0, 0));
+
+ if (process_handle)
+ *process_handle = pid;
+ }
+
+ return retval;
+}
+
+bool LaunchApp(const CommandLine& cl,
+ bool wait, bool start_hidden, ProcessHandle* process_handle) {
+ // TODO(playmobil): Do we need to respect the start_hidden flag?
+ file_handle_mapping_vector no_files;
+ return LaunchApp(cl.argv(), no_files, wait, process_handle);
+}
+
+void SetCurrentProcessPrivileges(ChildPrivileges privs) {
+
+}
+
+} // namespace base
diff --git a/ipc/chromium/src/base/process_util_linux.cc b/ipc/chromium/src/base/process_util_linux.cc
new file mode 100644
index 000000000..204017db5
--- /dev/null
+++ b/ipc/chromium/src/base/process_util_linux.cc
@@ -0,0 +1,274 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process_util.h"
+
+#include <ctype.h>
+#include <fcntl.h>
+#include <memory>
+#include <unistd.h>
+#include <string>
+#include <sys/types.h>
+#include <sys/wait.h>
+
+#include "base/eintr_wrapper.h"
+#include "base/file_util.h"
+#include "base/logging.h"
+#include "base/string_util.h"
+#include "nsLiteralString.h"
+#include "mozilla/UniquePtr.h"
+
+#include "prenv.h"
+#include "prmem.h"
+
+#ifdef MOZ_WIDGET_GONK
+/*
+ * AID_APP is the first application UID used by Android. We're using
+ * it as our unprivilegied UID. This ensure the UID used is not
+ * shared with any other processes than our own childs.
+ */
+# include <private/android_filesystem_config.h>
+# define CHILD_UNPRIVILEGED_UID AID_APP
+# define CHILD_UNPRIVILEGED_GID AID_APP
+#else
+/*
+ * On platforms that are not gonk based, we fall back to an arbitrary
+ * UID. This is generally the UID for user `nobody', albeit it is not
+ * always the case.
+ */
+# define CHILD_UNPRIVILEGED_UID 65534
+# define CHILD_UNPRIVILEGED_GID 65534
+#endif
+
+namespace {
+
+enum ParsingState {
+ KEY_NAME,
+ KEY_VALUE
+};
+
+static mozilla::EnvironmentLog gProcessLog("MOZ_PROCESS_LOG");
+
+} // namespace
+
+namespace base {
+
+class EnvironmentEnvp
+{
+public:
+ EnvironmentEnvp()
+ : mEnvp(PR_DuplicateEnvironment()) {}
+
+ explicit EnvironmentEnvp(const environment_map &em)
+ {
+ mEnvp = (char **)PR_Malloc(sizeof(char *) * (em.size() + 1));
+ if (!mEnvp) {
+ return;
+ }
+ char **e = mEnvp;
+ for (environment_map::const_iterator it = em.begin();
+ it != em.end(); ++it, ++e) {
+ std::string str = it->first;
+ str += "=";
+ str += it->second;
+ size_t len = str.length() + 1;
+ *e = static_cast<char*>(PR_Malloc(len));
+ memcpy(*e, str.c_str(), len);
+ }
+ *e = NULL;
+ }
+
+ ~EnvironmentEnvp()
+ {
+ if (!mEnvp) {
+ return;
+ }
+ for (char **e = mEnvp; *e; ++e) {
+ PR_Free(*e);
+ }
+ PR_Free(mEnvp);
+ }
+
+ char * const *AsEnvp() { return mEnvp; }
+
+ void ToMap(environment_map &em)
+ {
+ if (!mEnvp) {
+ return;
+ }
+ em.clear();
+ for (char **e = mEnvp; *e; ++e) {
+ const char *eq;
+ if ((eq = strchr(*e, '=')) != NULL) {
+ std::string varname(*e, eq - *e);
+ em[varname.c_str()] = &eq[1];
+ }
+ }
+ }
+
+private:
+ char **mEnvp;
+};
+
+class Environment : public environment_map
+{
+public:
+ Environment()
+ {
+ EnvironmentEnvp envp;
+ envp.ToMap(*this);
+ }
+
+ char * const *AsEnvp() {
+ mEnvp.reset(new EnvironmentEnvp(*this));
+ return mEnvp->AsEnvp();
+ }
+
+ void Merge(const environment_map &em)
+ {
+ for (const_iterator it = em.begin(); it != em.end(); ++it) {
+ (*this)[it->first] = it->second;
+ }
+ }
+private:
+ std::auto_ptr<EnvironmentEnvp> mEnvp;
+};
+
+bool LaunchApp(const std::vector<std::string>& argv,
+ const file_handle_mapping_vector& fds_to_remap,
+ bool wait, ProcessHandle* process_handle) {
+ return LaunchApp(argv, fds_to_remap, environment_map(),
+ wait, process_handle);
+}
+
+bool LaunchApp(const std::vector<std::string>& argv,
+ const file_handle_mapping_vector& fds_to_remap,
+ const environment_map& env_vars_to_set,
+ bool wait, ProcessHandle* process_handle,
+ ProcessArchitecture arch) {
+ return LaunchApp(argv, fds_to_remap, env_vars_to_set,
+ PRIVILEGES_INHERIT,
+ wait, process_handle);
+}
+
+bool LaunchApp(const std::vector<std::string>& argv,
+ const file_handle_mapping_vector& fds_to_remap,
+ const environment_map& env_vars_to_set,
+ ChildPrivileges privs,
+ bool wait, ProcessHandle* process_handle,
+ ProcessArchitecture arch) {
+ mozilla::UniquePtr<char*[]> argv_cstr(new char*[argv.size() + 1]);
+ // Illegal to allocate memory after fork and before execvp
+ InjectiveMultimap fd_shuffle1, fd_shuffle2;
+ fd_shuffle1.reserve(fds_to_remap.size());
+ fd_shuffle2.reserve(fds_to_remap.size());
+
+ Environment env;
+ env.Merge(env_vars_to_set);
+ char * const *envp = env.AsEnvp();
+ if (!envp) {
+ DLOG(ERROR) << "FAILED to duplicate environment for: " << argv_cstr[0];
+ return false;
+ }
+
+ pid_t pid = fork();
+ if (pid < 0)
+ return false;
+
+ if (pid == 0) {
+ for (file_handle_mapping_vector::const_iterator
+ it = fds_to_remap.begin(); it != fds_to_remap.end(); ++it) {
+ fd_shuffle1.push_back(InjectionArc(it->first, it->second, false));
+ fd_shuffle2.push_back(InjectionArc(it->first, it->second, false));
+ }
+
+ if (!ShuffleFileDescriptors(&fd_shuffle1))
+ _exit(127);
+
+ CloseSuperfluousFds(fd_shuffle2);
+
+ for (size_t i = 0; i < argv.size(); i++)
+ argv_cstr[i] = const_cast<char*>(argv[i].c_str());
+ argv_cstr[argv.size()] = NULL;
+
+ SetCurrentProcessPrivileges(privs);
+
+ execve(argv_cstr[0], argv_cstr.get(), envp);
+ // if we get here, we're in serious trouble and should complain loudly
+ // NOTE: This is async signal unsafe; it could deadlock instead. (But
+ // only on debug builds; otherwise it's a signal-safe no-op.)
+ DLOG(ERROR) << "FAILED TO exec() CHILD PROCESS, path: " << argv_cstr[0];
+ _exit(127);
+ } else {
+ gProcessLog.print("==> process %d launched child process %d\n",
+ GetCurrentProcId(), pid);
+ if (wait)
+ HANDLE_EINTR(waitpid(pid, 0, 0));
+
+ if (process_handle)
+ *process_handle = pid;
+ }
+
+ return true;
+}
+
+bool LaunchApp(const CommandLine& cl,
+ bool wait, bool start_hidden,
+ ProcessHandle* process_handle) {
+ file_handle_mapping_vector no_files;
+ return LaunchApp(cl.argv(), no_files, wait, process_handle);
+}
+
+void SetCurrentProcessPrivileges(ChildPrivileges privs) {
+ if (privs == PRIVILEGES_INHERIT) {
+ return;
+ }
+
+ gid_t gid = CHILD_UNPRIVILEGED_GID;
+ uid_t uid = CHILD_UNPRIVILEGED_UID;
+#ifdef MOZ_WIDGET_GONK
+ {
+ static bool checked_pix_max, pix_max_ok;
+ if (!checked_pix_max) {
+ checked_pix_max = true;
+ int fd = open("/proc/sys/kernel/pid_max", O_CLOEXEC | O_RDONLY);
+ if (fd < 0) {
+ DLOG(ERROR) << "Failed to open pid_max";
+ _exit(127);
+ }
+ char buf[PATH_MAX];
+ ssize_t len = read(fd, buf, sizeof(buf) - 1);
+ close(fd);
+ if (len < 0) {
+ DLOG(ERROR) << "Failed to read pid_max";
+ _exit(127);
+ }
+ buf[len] = '\0';
+ int pid_max = atoi(buf);
+ pix_max_ok =
+ (pid_max + CHILD_UNPRIVILEGED_UID > CHILD_UNPRIVILEGED_UID);
+ }
+ if (!pix_max_ok) {
+ DLOG(ERROR) << "Can't safely get unique uid/gid";
+ _exit(127);
+ }
+ gid += getpid();
+ uid += getpid();
+ }
+#endif
+ if (setgid(gid) != 0) {
+ DLOG(ERROR) << "FAILED TO setgid() CHILD PROCESS";
+ _exit(127);
+ }
+ if (setuid(uid) != 0) {
+ DLOG(ERROR) << "FAILED TO setuid() CHILD PROCESS";
+ _exit(127);
+ }
+ if (chdir("/") != 0)
+ gProcessLog.print("==> could not chdir()\n");
+}
+
+} // namespace base
diff --git a/ipc/chromium/src/base/process_util_mac.mm b/ipc/chromium/src/base/process_util_mac.mm
new file mode 100644
index 000000000..7d06411b2
--- /dev/null
+++ b/ipc/chromium/src/base/process_util_mac.mm
@@ -0,0 +1,202 @@
+// Copyright (c) 2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+#include "base/process_util.h"
+
+#import <Cocoa/Cocoa.h>
+#include <crt_externs.h>
+#include <spawn.h>
+#include <sys/wait.h>
+
+#include <string>
+
+#include "base/eintr_wrapper.h"
+#include "base/logging.h"
+#include "base/rand_util.h"
+#include "base/string_util.h"
+#include "base/time.h"
+
+namespace {
+
+static mozilla::EnvironmentLog gProcessLog("MOZ_PROCESS_LOG");
+
+} // namespace
+
+namespace base {
+
+void FreeEnvVarsArray(char* array[], int length)
+{
+ for (int i = 0; i < length; i++) {
+ free(array[i]);
+ }
+ delete[] array;
+}
+
+bool LaunchApp(const std::vector<std::string>& argv,
+ const file_handle_mapping_vector& fds_to_remap,
+ bool wait, ProcessHandle* process_handle) {
+ return LaunchApp(argv, fds_to_remap, environment_map(),
+ wait, process_handle);
+}
+
+bool LaunchApp(const std::vector<std::string>& argv,
+ const file_handle_mapping_vector& fds_to_remap,
+ const environment_map& env_vars_to_set,
+ bool wait, ProcessHandle* process_handle,
+ ProcessArchitecture arch) {
+ return LaunchApp(argv, fds_to_remap, env_vars_to_set,
+ PRIVILEGES_INHERIT,
+ wait, process_handle);
+}
+
+bool LaunchApp(const std::vector<std::string>& argv,
+ const file_handle_mapping_vector& fds_to_remap,
+ const environment_map& env_vars_to_set,
+ ChildPrivileges privs,
+ bool wait, ProcessHandle* process_handle,
+ ProcessArchitecture arch) {
+ bool retval = true;
+
+ char* argv_copy[argv.size() + 1];
+ for (size_t i = 0; i < argv.size(); i++) {
+ argv_copy[i] = const_cast<char*>(argv[i].c_str());
+ }
+ argv_copy[argv.size()] = NULL;
+
+ // Make sure we don't leak any FDs to the child process by marking all FDs
+ // as close-on-exec.
+ SetAllFDsToCloseOnExec();
+
+ // Copy _NSGetEnviron() to a new char array and add the variables
+ // in env_vars_to_set.
+ // Existing variables are overwritten by env_vars_to_set.
+ int pos = 0;
+ environment_map combined_env_vars = env_vars_to_set;
+ while((*_NSGetEnviron())[pos] != NULL) {
+ std::string varString = (*_NSGetEnviron())[pos];
+ std::string varName = varString.substr(0, varString.find_first_of('='));
+ std::string varValue = varString.substr(varString.find_first_of('=') + 1);
+ if (combined_env_vars.find(varName) == combined_env_vars.end()) {
+ combined_env_vars[varName] = varValue;
+ }
+ pos++;
+ }
+ int varsLen = combined_env_vars.size() + 1;
+
+ char** vars = new char*[varsLen];
+ int i = 0;
+ for (environment_map::const_iterator it = combined_env_vars.begin();
+ it != combined_env_vars.end(); ++it) {
+ std::string entry(it->first);
+ entry += "=";
+ entry += it->second;
+ vars[i] = strdup(entry.c_str());
+ i++;
+ }
+ vars[i] = NULL;
+
+ posix_spawn_file_actions_t file_actions;
+ if (posix_spawn_file_actions_init(&file_actions) != 0) {
+ FreeEnvVarsArray(vars, varsLen);
+ return false;
+ }
+
+ // Turn fds_to_remap array into a set of dup2 calls.
+ for (file_handle_mapping_vector::const_iterator it = fds_to_remap.begin();
+ it != fds_to_remap.end();
+ ++it) {
+ int src_fd = it->first;
+ int dest_fd = it->second;
+
+ if (src_fd == dest_fd) {
+ int flags = fcntl(src_fd, F_GETFD);
+ if (flags != -1) {
+ fcntl(src_fd, F_SETFD, flags & ~FD_CLOEXEC);
+ }
+ } else {
+ if (posix_spawn_file_actions_adddup2(&file_actions, src_fd, dest_fd) != 0) {
+ posix_spawn_file_actions_destroy(&file_actions);
+ FreeEnvVarsArray(vars, varsLen);
+ return false;
+ }
+ }
+ }
+
+ // Set up the CPU preference array.
+ cpu_type_t cpu_types[1];
+ switch (arch) {
+ case PROCESS_ARCH_I386:
+ cpu_types[0] = CPU_TYPE_X86;
+ break;
+ case PROCESS_ARCH_X86_64:
+ cpu_types[0] = CPU_TYPE_X86_64;
+ break;
+ case PROCESS_ARCH_PPC:
+ cpu_types[0] = CPU_TYPE_POWERPC;
+ break;
+ default:
+ cpu_types[0] = CPU_TYPE_ANY;
+ break;
+ }
+
+ // Initialize spawn attributes.
+ posix_spawnattr_t spawnattr;
+ if (posix_spawnattr_init(&spawnattr) != 0) {
+ FreeEnvVarsArray(vars, varsLen);
+ return false;
+ }
+
+ // Set spawn attributes.
+ size_t attr_count = 1;
+ size_t attr_ocount = 0;
+ if (posix_spawnattr_setbinpref_np(&spawnattr, attr_count, cpu_types, &attr_ocount) != 0 ||
+ attr_ocount != attr_count) {
+ FreeEnvVarsArray(vars, varsLen);
+ posix_spawnattr_destroy(&spawnattr);
+ return false;
+ }
+
+ int pid = 0;
+ int spawn_succeeded = (posix_spawnp(&pid,
+ argv_copy[0],
+ &file_actions,
+ &spawnattr,
+ argv_copy,
+ vars) == 0);
+
+ FreeEnvVarsArray(vars, varsLen);
+
+ posix_spawn_file_actions_destroy(&file_actions);
+
+ posix_spawnattr_destroy(&spawnattr);
+
+ bool process_handle_valid = pid > 0;
+ if (!spawn_succeeded || !process_handle_valid) {
+ retval = false;
+ } else {
+ gProcessLog.print("==> process %d launched child process %d\n",
+ GetCurrentProcId(), pid);
+ if (wait)
+ HANDLE_EINTR(waitpid(pid, 0, 0));
+
+ if (process_handle)
+ *process_handle = pid;
+ }
+
+ return retval;
+}
+
+bool LaunchApp(const CommandLine& cl,
+ bool wait, bool start_hidden, ProcessHandle* process_handle) {
+ // TODO(playmobil): Do we need to respect the start_hidden flag?
+ file_handle_mapping_vector no_files;
+ return LaunchApp(cl.argv(), no_files, wait, process_handle);
+}
+
+void SetCurrentProcessPrivileges(ChildPrivileges privs) {
+
+}
+
+} // namespace base
diff --git a/ipc/chromium/src/base/process_util_posix.cc b/ipc/chromium/src/base/process_util_posix.cc
new file mode 100644
index 000000000..ade3ebe5e
--- /dev/null
+++ b/ipc/chromium/src/base/process_util_posix.cc
@@ -0,0 +1,348 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <dirent.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <sys/resource.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#include <limits>
+#include <set>
+
+#include "base/basictypes.h"
+#include "base/eintr_wrapper.h"
+#include "base/logging.h"
+#include "base/platform_thread.h"
+#include "base/process_util.h"
+#include "base/sys_info.h"
+#include "base/time.h"
+#include "base/waitable_event.h"
+#include "base/dir_reader_posix.h"
+
+#include "mozilla/UniquePtr.h"
+
+const int kMicrosecondsPerSecond = 1000000;
+
+namespace base {
+
+ProcessId GetCurrentProcId() {
+ return getpid();
+}
+
+ProcessHandle GetCurrentProcessHandle() {
+ return GetCurrentProcId();
+}
+
+bool OpenProcessHandle(ProcessId pid, ProcessHandle* handle) {
+ // On Posix platforms, process handles are the same as PIDs, so we
+ // don't need to do anything.
+ *handle = pid;
+ return true;
+}
+
+bool OpenPrivilegedProcessHandle(ProcessId pid, ProcessHandle* handle) {
+ // On POSIX permissions are checked for each operation on process,
+ // not when opening a "handle".
+ return OpenProcessHandle(pid, handle);
+}
+
+void CloseProcessHandle(ProcessHandle process) {
+ // See OpenProcessHandle, nothing to do.
+ return;
+}
+
+ProcessId GetProcId(ProcessHandle process) {
+ return process;
+}
+
+// Attempts to kill the process identified by the given process
+// entry structure. Ignores specified exit_code; posix can't force that.
+// Returns true if this is successful, false otherwise.
+bool KillProcess(ProcessHandle process_id, int exit_code, bool wait) {
+ bool result = kill(process_id, SIGTERM) == 0;
+
+ if (result && wait) {
+ int tries = 60;
+ bool exited = false;
+ // The process may not end immediately due to pending I/O
+ while (tries-- > 0) {
+ int pid = HANDLE_EINTR(waitpid(process_id, NULL, WNOHANG));
+ if (pid == process_id) {
+ exited = true;
+ break;
+ }
+
+ sleep(1);
+ }
+
+ if (!exited) {
+ result = kill(process_id, SIGKILL) == 0;
+ }
+ }
+
+ if (!result)
+ DLOG(ERROR) << "Unable to terminate process.";
+
+ return result;
+}
+
+#ifdef ANDROID
+typedef unsigned long int rlim_t;
+#endif
+
+// A class to handle auto-closing of DIR*'s.
+class ScopedDIRClose {
+ public:
+ inline void operator()(DIR* x) const {
+ if (x) {
+ closedir(x);
+ }
+ }
+};
+typedef mozilla::UniquePtr<DIR, ScopedDIRClose> ScopedDIR;
+
+
+void CloseSuperfluousFds(const base::InjectiveMultimap& saved_mapping) {
+ // DANGER: no calls to malloc are allowed from now on:
+ // http://crbug.com/36678
+#if defined(ANDROID)
+ static const rlim_t kSystemDefaultMaxFds = 1024;
+ static const char kFDDir[] = "/proc/self/fd";
+#elif defined(OS_LINUX)
+ static const rlim_t kSystemDefaultMaxFds = 8192;
+ static const char kFDDir[] = "/proc/self/fd";
+#elif defined(OS_MACOSX)
+ static const rlim_t kSystemDefaultMaxFds = 256;
+ static const char kFDDir[] = "/dev/fd";
+#elif defined(OS_BSD)
+ // the getrlimit below should never fail, so whatever ..
+ static const rlim_t kSystemDefaultMaxFds = 1024;
+ // at least /dev/fd will exist
+ static const char kFDDir[] = "/dev/fd";
+#endif
+
+ // Get the maximum number of FDs possible.
+ struct rlimit nofile;
+ rlim_t max_fds;
+ if (getrlimit(RLIMIT_NOFILE, &nofile)) {
+ // getrlimit failed. Take a best guess.
+ max_fds = kSystemDefaultMaxFds;
+ DLOG(ERROR) << "getrlimit(RLIMIT_NOFILE) failed: " << errno;
+ } else {
+ max_fds = nofile.rlim_cur;
+ }
+
+ if (max_fds > INT_MAX)
+ max_fds = INT_MAX;
+
+ DirReaderPosix fd_dir(kFDDir);
+
+ if (!fd_dir.IsValid()) {
+ // Fallback case: Try every possible fd.
+ for (rlim_t i = 0; i < max_fds; ++i) {
+ const int fd = static_cast<int>(i);
+ if (fd == STDIN_FILENO || fd == STDOUT_FILENO || fd == STDERR_FILENO)
+ continue;
+ InjectiveMultimap::const_iterator j;
+ for (j = saved_mapping.begin(); j != saved_mapping.end(); j++) {
+ if (fd == j->dest)
+ break;
+ }
+ if (j != saved_mapping.end())
+ continue;
+
+ // Since we're just trying to close anything we can find,
+ // ignore any error return values of close().
+ HANDLE_EINTR(close(fd));
+ }
+ return;
+ }
+
+ const int dir_fd = fd_dir.fd();
+
+ for ( ; fd_dir.Next(); ) {
+ // Skip . and .. entries.
+ if (fd_dir.name()[0] == '.')
+ continue;
+
+ char *endptr;
+ errno = 0;
+ const long int fd = strtol(fd_dir.name(), &endptr, 10);
+ if (fd_dir.name()[0] == 0 || *endptr || fd < 0 || errno)
+ continue;
+ if (fd == STDIN_FILENO || fd == STDOUT_FILENO || fd == STDERR_FILENO)
+ continue;
+ InjectiveMultimap::const_iterator i;
+ for (i = saved_mapping.begin(); i != saved_mapping.end(); i++) {
+ if (fd == i->dest)
+ break;
+ }
+ if (i != saved_mapping.end())
+ continue;
+ if (fd == dir_fd)
+ continue;
+
+ // When running under Valgrind, Valgrind opens several FDs for its
+ // own use and will complain if we try to close them. All of
+ // these FDs are >= |max_fds|, so we can check against that here
+ // before closing. See https://bugs.kde.org/show_bug.cgi?id=191758
+ if (fd < static_cast<int>(max_fds)) {
+ int ret = HANDLE_EINTR(close(fd));
+ if (ret != 0) {
+ DLOG(ERROR) << "Problem closing fd";
+ }
+ }
+ }
+}
+
+// Sets all file descriptors to close on exec except for stdin, stdout
+// and stderr.
+// TODO(agl): Remove this function. It's fundamentally broken for multithreaded
+// apps.
+void SetAllFDsToCloseOnExec() {
+#if defined(OS_LINUX)
+ const char fd_dir[] = "/proc/self/fd";
+#elif defined(OS_MACOSX) || defined(OS_BSD)
+ const char fd_dir[] = "/dev/fd";
+#endif
+ ScopedDIR dir_closer(opendir(fd_dir));
+ DIR *dir = dir_closer.get();
+ if (NULL == dir) {
+ DLOG(ERROR) << "Unable to open " << fd_dir;
+ return;
+ }
+
+ struct dirent *ent;
+ while ((ent = readdir(dir))) {
+ // Skip . and .. entries.
+ if (ent->d_name[0] == '.')
+ continue;
+ int i = atoi(ent->d_name);
+ // We don't close stdin, stdout or stderr.
+ if (i <= STDERR_FILENO)
+ continue;
+
+ int flags = fcntl(i, F_GETFD);
+ if ((flags == -1) || (fcntl(i, F_SETFD, flags | FD_CLOEXEC) == -1)) {
+ DLOG(ERROR) << "fcntl failure.";
+ }
+ }
+}
+
+ProcessMetrics::ProcessMetrics(ProcessHandle process) : process_(process),
+ last_time_(0),
+ last_system_time_(0) {
+ processor_count_ = base::SysInfo::NumberOfProcessors();
+}
+
+// static
+ProcessMetrics* ProcessMetrics::CreateProcessMetrics(ProcessHandle process) {
+ return new ProcessMetrics(process);
+}
+
+ProcessMetrics::~ProcessMetrics() { }
+
+bool DidProcessCrash(bool* child_exited, ProcessHandle handle) {
+ int status;
+ const int result = HANDLE_EINTR(waitpid(handle, &status, WNOHANG));
+ if (result == -1) {
+ // This shouldn't happen, but sometimes it does. The error is
+ // probably ECHILD and the reason is probably that a pid was
+ // waited on again after a previous wait reclaimed its zombie.
+ // (It could also occur if the process isn't a direct child, but
+ // don't do that.) This is bad, because it risks interfering with
+ // an unrelated child process if the pid is reused.
+ //
+ // So, lacking reliable information, we indicate that the process
+ // is dead, in the hope that the caller will give up and stop
+ // calling us. See also bug 943174 and bug 933680.
+ CHROMIUM_LOG(ERROR) << "waitpid failed pid:" << handle << " errno:" << errno;
+ if (child_exited)
+ *child_exited = true;
+ return false;
+ } else if (result == 0) {
+ // the child hasn't exited yet.
+ if (child_exited)
+ *child_exited = false;
+ return false;
+ }
+
+ if (child_exited)
+ *child_exited = true;
+
+ if (WIFSIGNALED(status)) {
+ switch(WTERMSIG(status)) {
+ case SIGSYS:
+ case SIGSEGV:
+ case SIGILL:
+ case SIGABRT:
+ case SIGFPE:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ if (WIFEXITED(status))
+ return WEXITSTATUS(status) != 0;
+
+ return false;
+}
+
+namespace {
+
+int64_t TimeValToMicroseconds(const struct timeval& tv) {
+ return tv.tv_sec * kMicrosecondsPerSecond + tv.tv_usec;
+}
+
+}
+
+int ProcessMetrics::GetCPUUsage() {
+ struct timeval now;
+ struct rusage usage;
+
+ int retval = gettimeofday(&now, NULL);
+ if (retval)
+ return 0;
+ retval = getrusage(RUSAGE_SELF, &usage);
+ if (retval)
+ return 0;
+
+ int64_t system_time = (TimeValToMicroseconds(usage.ru_stime) +
+ TimeValToMicroseconds(usage.ru_utime)) /
+ processor_count_;
+ int64_t time = TimeValToMicroseconds(now);
+
+ if ((last_system_time_ == 0) || (last_time_ == 0)) {
+ // First call, just set the last values.
+ last_system_time_ = system_time;
+ last_time_ = time;
+ return 0;
+ }
+
+ int64_t system_time_delta = system_time - last_system_time_;
+ int64_t time_delta = time - last_time_;
+ DCHECK(time_delta != 0);
+ if (time_delta == 0)
+ return 0;
+
+ // We add time_delta / 2 so the result is rounded.
+ int cpu = static_cast<int>((system_time_delta * 100 + time_delta / 2) /
+ time_delta);
+
+ last_system_time_ = system_time;
+ last_time_ = time;
+
+ return cpu;
+}
+
+} // namespace base
diff --git a/ipc/chromium/src/base/process_util_win.cc b/ipc/chromium/src/base/process_util_win.cc
new file mode 100644
index 000000000..12fbc23a5
--- /dev/null
+++ b/ipc/chromium/src/base/process_util_win.cc
@@ -0,0 +1,489 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// We need extended process and thread attribute support
+#undef _WIN32_WINNT
+#define _WIN32_WINNT 0x0600
+
+#include "base/process_util.h"
+
+#include <windows.h>
+#include <winternl.h>
+#include <psapi.h>
+
+#include "base/histogram.h"
+#include "base/logging.h"
+#include "base/win_util.h"
+
+#include <algorithm>
+#include "prenv.h"
+
+#include "mozilla/WindowsVersion.h"
+
+namespace {
+
+// System pagesize. This value remains constant on x86/64 architectures.
+const int PAGESIZE_KB = 4;
+
+// HeapSetInformation function pointer.
+typedef BOOL (WINAPI* HeapSetFn)(HANDLE, HEAP_INFORMATION_CLASS, PVOID, SIZE_T);
+
+typedef BOOL (WINAPI * InitializeProcThreadAttributeListFn)(
+ LPPROC_THREAD_ATTRIBUTE_LIST lpAttributeList,
+ DWORD dwAttributeCount,
+ DWORD dwFlags,
+ PSIZE_T lpSize
+);
+
+typedef BOOL (WINAPI * DeleteProcThreadAttributeListFn)(
+ LPPROC_THREAD_ATTRIBUTE_LIST lpAttributeList
+);
+
+typedef BOOL (WINAPI * UpdateProcThreadAttributeFn)(
+ LPPROC_THREAD_ATTRIBUTE_LIST lpAttributeList,
+ DWORD dwFlags,
+ DWORD_PTR Attribute,
+ PVOID lpValue,
+ SIZE_T cbSize,
+ PVOID lpPreviousValue,
+ PSIZE_T lpReturnSize
+);
+
+static InitializeProcThreadAttributeListFn InitializeProcThreadAttributeListPtr;
+static DeleteProcThreadAttributeListFn DeleteProcThreadAttributeListPtr;
+static UpdateProcThreadAttributeFn UpdateProcThreadAttributePtr;
+
+static mozilla::EnvironmentLog gProcessLog("MOZ_PROCESS_LOG");
+
+} // namespace
+
+namespace base {
+
+ProcessId GetCurrentProcId() {
+ return ::GetCurrentProcessId();
+}
+
+ProcessHandle GetCurrentProcessHandle() {
+ return ::GetCurrentProcess();
+}
+
+bool OpenProcessHandle(ProcessId pid, ProcessHandle* handle) {
+ // TODO(phajdan.jr): Take even more permissions out of this list.
+ ProcessHandle result = OpenProcess(PROCESS_DUP_HANDLE |
+ PROCESS_TERMINATE |
+ PROCESS_QUERY_INFORMATION |
+ SYNCHRONIZE,
+ FALSE, pid);
+
+ if (result == NULL) {
+ return false;
+ }
+
+ *handle = result;
+ return true;
+}
+
+bool OpenPrivilegedProcessHandle(ProcessId pid, ProcessHandle* handle) {
+ ProcessHandle result = OpenProcess(PROCESS_DUP_HANDLE |
+ PROCESS_TERMINATE |
+ PROCESS_QUERY_INFORMATION |
+ PROCESS_VM_READ |
+ SYNCHRONIZE,
+ FALSE, pid);
+
+ if (result == NULL) {
+ return false;
+ }
+
+ *handle = result;
+ return true;
+}
+
+void CloseProcessHandle(ProcessHandle process) {
+ // closing a handle twice on Windows can be catastrophic - after the first
+ // close the handle value may be reused, so the second close will kill that
+ // other new handle.
+ BOOL ok = CloseHandle(process);
+ DCHECK(ok);
+}
+
+// Helper for GetProcId()
+bool GetProcIdViaGetProcessId(ProcessHandle process, DWORD* id) {
+ // Dynamically get a pointer to GetProcessId().
+ typedef DWORD (WINAPI *GetProcessIdFunction)(HANDLE);
+ static GetProcessIdFunction GetProcessIdPtr = NULL;
+ static bool initialize_get_process_id = true;
+ if (initialize_get_process_id) {
+ initialize_get_process_id = false;
+ HMODULE kernel32_handle = GetModuleHandle(L"kernel32.dll");
+ if (!kernel32_handle) {
+ NOTREACHED();
+ return false;
+ }
+ GetProcessIdPtr = reinterpret_cast<GetProcessIdFunction>(GetProcAddress(
+ kernel32_handle, "GetProcessId"));
+ }
+ if (!GetProcessIdPtr)
+ return false;
+ // Ask for the process ID.
+ *id = (*GetProcessIdPtr)(process);
+ return true;
+}
+
+// Helper for GetProcId()
+bool GetProcIdViaNtQueryInformationProcess(ProcessHandle process, DWORD* id) {
+ // Dynamically get a pointer to NtQueryInformationProcess().
+ typedef NTSTATUS (WINAPI *NtQueryInformationProcessFunction)(
+ HANDLE, PROCESSINFOCLASS, PVOID, ULONG, PULONG);
+ static NtQueryInformationProcessFunction NtQueryInformationProcessPtr = NULL;
+ static bool initialize_query_information_process = true;
+ if (initialize_query_information_process) {
+ initialize_query_information_process = false;
+ // According to nsylvain, ntdll.dll is guaranteed to be loaded, even though
+ // the Windows docs seem to imply that you should LoadLibrary() it.
+ HMODULE ntdll_handle = GetModuleHandle(L"ntdll.dll");
+ if (!ntdll_handle) {
+ NOTREACHED();
+ return false;
+ }
+ NtQueryInformationProcessPtr =
+ reinterpret_cast<NtQueryInformationProcessFunction>(GetProcAddress(
+ ntdll_handle, "NtQueryInformationProcess"));
+ }
+ if (!NtQueryInformationProcessPtr)
+ return false;
+ // Ask for the process ID.
+ PROCESS_BASIC_INFORMATION info;
+ ULONG bytes_returned;
+ NTSTATUS status = (*NtQueryInformationProcessPtr)(process,
+ ProcessBasicInformation,
+ &info, sizeof info,
+ &bytes_returned);
+ if (!SUCCEEDED(status) || (bytes_returned != (sizeof info)))
+ return false;
+
+ *id = static_cast<DWORD>(info.UniqueProcessId);
+ return true;
+}
+
+ProcessId GetProcId(ProcessHandle process) {
+ // Get a handle to |process| that has PROCESS_QUERY_INFORMATION rights.
+ HANDLE current_process = GetCurrentProcess();
+ HANDLE process_with_query_rights;
+ if (DuplicateHandle(current_process, process, current_process,
+ &process_with_query_rights, PROCESS_QUERY_INFORMATION,
+ false, 0)) {
+ // Try to use GetProcessId(), if it exists. Fall back on
+ // NtQueryInformationProcess() otherwise (< Win XP SP1).
+ DWORD id;
+ bool success =
+ GetProcIdViaGetProcessId(process_with_query_rights, &id) ||
+ GetProcIdViaNtQueryInformationProcess(process_with_query_rights, &id);
+ CloseHandle(process_with_query_rights);
+ if (success)
+ return id;
+ }
+
+ // We're screwed.
+ NOTREACHED();
+ return 0;
+}
+
+// from sandbox_policy_base.cc in a later version of the chromium ipc code...
+bool IsInheritableHandle(HANDLE handle) {
+ if (!handle)
+ return false;
+ if (handle == INVALID_HANDLE_VALUE)
+ return false;
+ // File handles (FILE_TYPE_DISK) and pipe handles are known to be
+ // inheritable. Console handles (FILE_TYPE_CHAR) are not
+ // inheritable via PROC_THREAD_ATTRIBUTE_HANDLE_LIST.
+ DWORD handle_type = GetFileType(handle);
+ return handle_type == FILE_TYPE_DISK || handle_type == FILE_TYPE_PIPE;
+}
+
+void LoadThreadAttributeFunctions() {
+ HMODULE kernel32 = GetModuleHandle(L"kernel32.dll");
+ InitializeProcThreadAttributeListPtr =
+ reinterpret_cast<InitializeProcThreadAttributeListFn>
+ (GetProcAddress(kernel32, "InitializeProcThreadAttributeList"));
+ DeleteProcThreadAttributeListPtr =
+ reinterpret_cast<DeleteProcThreadAttributeListFn>
+ (GetProcAddress(kernel32, "DeleteProcThreadAttributeList"));
+ UpdateProcThreadAttributePtr =
+ reinterpret_cast<UpdateProcThreadAttributeFn>
+ (GetProcAddress(kernel32, "UpdateProcThreadAttribute"));
+}
+
+// Creates and returns a "thread attribute list" to pass to the child process.
+// On return, is a pointer to a THREAD_ATTRIBUTE_LIST or NULL if either the
+// functions we need aren't available (eg, XP or earlier) or the functions we
+// need failed.
+// The result of this function must be passed to FreeThreadAttributeList.
+// Note that the pointer to the HANDLE array ends up embedded in the result of
+// this function and must stay alive until FreeThreadAttributeList is called,
+// hence it is passed in so the owner is the caller of this function.
+LPPROC_THREAD_ATTRIBUTE_LIST CreateThreadAttributeList(HANDLE *handlesToInherit,
+ int handleCount) {
+ if (!InitializeProcThreadAttributeListPtr ||
+ !DeleteProcThreadAttributeListPtr ||
+ !UpdateProcThreadAttributePtr)
+ LoadThreadAttributeFunctions();
+ // shouldn't happen as we are only called for Vista+, but better safe than sorry...
+ if (!InitializeProcThreadAttributeListPtr ||
+ !DeleteProcThreadAttributeListPtr ||
+ !UpdateProcThreadAttributePtr)
+ return NULL;
+
+ SIZE_T threadAttrSize;
+ LPPROC_THREAD_ATTRIBUTE_LIST lpAttributeList;
+
+ if (!(*InitializeProcThreadAttributeListPtr)(NULL, 1, 0, &threadAttrSize) &&
+ GetLastError() != ERROR_INSUFFICIENT_BUFFER)
+ goto fail;
+ lpAttributeList = reinterpret_cast<LPPROC_THREAD_ATTRIBUTE_LIST>
+ (malloc(threadAttrSize));
+ if (!lpAttributeList ||
+ !(*InitializeProcThreadAttributeListPtr)(lpAttributeList, 1, 0, &threadAttrSize))
+ goto fail;
+
+ if (!(*UpdateProcThreadAttributePtr)(lpAttributeList,
+ 0, PROC_THREAD_ATTRIBUTE_HANDLE_LIST,
+ handlesToInherit,
+ sizeof(handlesToInherit[0]) * handleCount,
+ NULL, NULL)) {
+ (*DeleteProcThreadAttributeListPtr)(lpAttributeList);
+ goto fail;
+ }
+ return lpAttributeList;
+
+fail:
+ if (lpAttributeList)
+ free(lpAttributeList);
+ return NULL;
+}
+
+// Frees the data returned by CreateThreadAttributeList.
+void FreeThreadAttributeList(LPPROC_THREAD_ATTRIBUTE_LIST lpAttributeList) {
+ // must be impossible to get a NULL DeleteProcThreadAttributeListPtr, as
+ // we already checked it existed when creating the data we are now freeing.
+ (*DeleteProcThreadAttributeListPtr)(lpAttributeList);
+ free(lpAttributeList);
+}
+
+bool LaunchApp(const std::wstring& cmdline,
+ bool wait, bool start_hidden, ProcessHandle* process_handle) {
+
+ // We want to inherit the std handles so dump() statements and assertion
+ // messages in the child process can be seen - but we *do not* want to
+ // blindly have all handles inherited. Vista and later has a technique
+ // where only specified handles are inherited - so we use this technique if
+ // we can. If that technique isn't available (or it fails), we just don't
+ // inherit anything. This can cause us a problem for Windows XP testing,
+ // because we sometimes need the handles to get inherited for test logging to
+ // work. So we also inherit when a specific environment variable is set.
+ DWORD dwCreationFlags = 0;
+ BOOL bInheritHandles = FALSE;
+ // We use a STARTUPINFOEX, but if we can't do the thread attribute thing, we
+ // just pass the size of a STARTUPINFO.
+ STARTUPINFOEX startup_info_ex;
+ ZeroMemory(&startup_info_ex, sizeof(startup_info_ex));
+ STARTUPINFO &startup_info = startup_info_ex.StartupInfo;
+ startup_info.cb = sizeof(startup_info);
+ startup_info.dwFlags = STARTF_USESHOWWINDOW;
+ startup_info.wShowWindow = start_hidden ? SW_HIDE : SW_SHOW;
+
+ LPPROC_THREAD_ATTRIBUTE_LIST lpAttributeList = NULL;
+ // Don't even bother trying pre-Vista...
+ if (mozilla::IsVistaOrLater()) {
+ // setup our handle array first - if we end up with no handles that can
+ // be inherited we can avoid trying to do the ThreadAttributeList dance...
+ HANDLE handlesToInherit[2];
+ int handleCount = 0;
+ HANDLE stdOut = ::GetStdHandle(STD_OUTPUT_HANDLE);
+ HANDLE stdErr = ::GetStdHandle(STD_ERROR_HANDLE);
+
+ if (IsInheritableHandle(stdOut))
+ handlesToInherit[handleCount++] = stdOut;
+ if (stdErr != stdOut && IsInheritableHandle(stdErr))
+ handlesToInherit[handleCount++] = stdErr;
+
+ if (handleCount) {
+ lpAttributeList = CreateThreadAttributeList(handlesToInherit, handleCount);
+ if (lpAttributeList) {
+ // it's safe to inherit handles, so arrange for that...
+ startup_info.cb = sizeof(startup_info_ex);
+ startup_info.dwFlags |= STARTF_USESTDHANDLES;
+ startup_info.hStdOutput = stdOut;
+ startup_info.hStdError = stdErr;
+ startup_info.hStdInput = INVALID_HANDLE_VALUE;
+ startup_info_ex.lpAttributeList = lpAttributeList;
+ dwCreationFlags |= EXTENDED_STARTUPINFO_PRESENT;
+ bInheritHandles = TRUE;
+ }
+ }
+ } else if (PR_GetEnv("MOZ_WIN_INHERIT_STD_HANDLES_PRE_VISTA")) {
+ // Even if we can't limit what gets inherited, we sometimes want to inherit
+ // stdout/err for testing purposes.
+ startup_info.dwFlags |= STARTF_USESTDHANDLES;
+ startup_info.hStdOutput = ::GetStdHandle(STD_OUTPUT_HANDLE);
+ startup_info.hStdError = ::GetStdHandle(STD_ERROR_HANDLE);
+ startup_info.hStdInput = INVALID_HANDLE_VALUE;
+ bInheritHandles = TRUE;
+ }
+
+ PROCESS_INFORMATION process_info;
+ BOOL createdOK = CreateProcess(NULL,
+ const_cast<wchar_t*>(cmdline.c_str()), NULL, NULL,
+ bInheritHandles, dwCreationFlags, NULL, NULL,
+ &startup_info, &process_info);
+ if (lpAttributeList)
+ FreeThreadAttributeList(lpAttributeList);
+ if (!createdOK)
+ return false;
+
+ gProcessLog.print("==> process %d launched child process %d (%S)\n",
+ GetCurrentProcId(),
+ process_info.dwProcessId,
+ cmdline.c_str());
+
+ // Handles must be closed or they will leak
+ CloseHandle(process_info.hThread);
+
+ if (wait)
+ WaitForSingleObject(process_info.hProcess, INFINITE);
+
+ // If the caller wants the process handle, we won't close it.
+ if (process_handle) {
+ *process_handle = process_info.hProcess;
+ } else {
+ CloseHandle(process_info.hProcess);
+ }
+ return true;
+}
+
+bool LaunchApp(const CommandLine& cl,
+ bool wait, bool start_hidden, ProcessHandle* process_handle) {
+ return LaunchApp(cl.command_line_string(), wait,
+ start_hidden, process_handle);
+}
+
+bool KillProcess(ProcessHandle process, int exit_code, bool wait) {
+ bool result = (TerminateProcess(process, exit_code) != FALSE);
+ if (result && wait) {
+ // The process may not end immediately due to pending I/O
+ if (WAIT_OBJECT_0 != WaitForSingleObject(process, 60 * 1000))
+ DLOG(ERROR) << "Error waiting for process exit: " << GetLastError();
+ } else if (!result) {
+ DLOG(ERROR) << "Unable to terminate process: " << GetLastError();
+ }
+ return result;
+}
+
+bool DidProcessCrash(bool* child_exited, ProcessHandle handle) {
+ DWORD exitcode = 0;
+
+ if (child_exited)
+ *child_exited = true; // On Windows it an error to call this function if
+ // the child hasn't already exited.
+ if (!::GetExitCodeProcess(handle, &exitcode)) {
+ NOTREACHED();
+ return false;
+ }
+ if (exitcode == STILL_ACTIVE) {
+ // The process is likely not dead or it used 0x103 as exit code.
+ NOTREACHED();
+ return false;
+ }
+
+ // Warning, this is not generic code; it heavily depends on the way
+ // the rest of the code kills a process.
+
+ if (exitcode == PROCESS_END_NORMAL_TERMINATON ||
+ exitcode == PROCESS_END_KILLED_BY_USER ||
+ exitcode == PROCESS_END_PROCESS_WAS_HUNG ||
+ exitcode == 0xC0000354 || // STATUS_DEBUGGER_INACTIVE.
+ exitcode == 0xC000013A || // Control-C/end session.
+ exitcode == 0x40010004) { // Debugger terminated process/end session.
+ return false;
+ }
+
+ return true;
+}
+
+void SetCurrentProcessPrivileges(ChildPrivileges privs) {
+
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// ProcesMetrics
+
+ProcessMetrics::ProcessMetrics(ProcessHandle process) : process_(process),
+ last_time_(0),
+ last_system_time_(0) {
+ SYSTEM_INFO system_info;
+ GetSystemInfo(&system_info);
+ processor_count_ = system_info.dwNumberOfProcessors;
+}
+
+// static
+ProcessMetrics* ProcessMetrics::CreateProcessMetrics(ProcessHandle process) {
+ return new ProcessMetrics(process);
+}
+
+ProcessMetrics::~ProcessMetrics() { }
+
+static uint64_t FileTimeToUTC(const FILETIME& ftime) {
+ LARGE_INTEGER li;
+ li.LowPart = ftime.dwLowDateTime;
+ li.HighPart = ftime.dwHighDateTime;
+ return li.QuadPart;
+}
+
+int ProcessMetrics::GetCPUUsage() {
+ FILETIME now;
+ FILETIME creation_time;
+ FILETIME exit_time;
+ FILETIME kernel_time;
+ FILETIME user_time;
+
+ GetSystemTimeAsFileTime(&now);
+
+ if (!GetProcessTimes(process_, &creation_time, &exit_time,
+ &kernel_time, &user_time)) {
+ // We don't assert here because in some cases (such as in the Task Manager)
+ // we may call this function on a process that has just exited but we have
+ // not yet received the notification.
+ return 0;
+ }
+ int64_t system_time = (FileTimeToUTC(kernel_time) + FileTimeToUTC(user_time)) /
+ processor_count_;
+ int64_t time = FileTimeToUTC(now);
+
+ if ((last_system_time_ == 0) || (last_time_ == 0)) {
+ // First call, just set the last values.
+ last_system_time_ = system_time;
+ last_time_ = time;
+ return 0;
+ }
+
+ int64_t system_time_delta = system_time - last_system_time_;
+ int64_t time_delta = time - last_time_;
+ DCHECK(time_delta != 0);
+ if (time_delta == 0)
+ return 0;
+
+ // We add time_delta / 2 so the result is rounded.
+ int cpu = static_cast<int>((system_time_delta * 100 + time_delta / 2) /
+ time_delta);
+
+ last_system_time_ = system_time;
+ last_time_ = time;
+
+ return cpu;
+}
+
+} // namespace base
diff --git a/ipc/chromium/src/base/process_win.cc b/ipc/chromium/src/base/process_win.cc
new file mode 100644
index 000000000..4d0f137c4
--- /dev/null
+++ b/ipc/chromium/src/base/process_win.cc
@@ -0,0 +1,64 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process.h"
+#include "base/logging.h"
+#include "base/process_util.h"
+
+namespace base {
+
+void Process::Close() {
+ if (!process_)
+ return;
+ CloseProcessHandle(process_);
+ process_ = NULL;
+}
+
+void Process::Terminate(int result_code) {
+ if (!process_)
+ return;
+ ::TerminateProcess(process_, result_code);
+}
+
+bool Process::IsProcessBackgrounded() const {
+ DCHECK(process_);
+ DWORD priority = GetPriorityClass(process_);
+ if (priority == 0)
+ return false; // Failure case.
+ return priority == BELOW_NORMAL_PRIORITY_CLASS;
+}
+
+bool Process::SetProcessBackgrounded(bool value) {
+ DCHECK(process_);
+ DWORD priority = value ? BELOW_NORMAL_PRIORITY_CLASS : NORMAL_PRIORITY_CLASS;
+ return (SetPriorityClass(process_, priority) != 0);
+}
+
+bool Process::EmptyWorkingSet() {
+ if (!process_)
+ return false;
+
+ BOOL rv = SetProcessWorkingSetSize(process_, -1, -1);
+ return rv == TRUE;
+}
+
+ProcessId Process::pid() const {
+ if (process_ == 0)
+ return 0;
+
+ return GetProcId(process_);
+}
+
+bool Process::is_current() const {
+ return process_ == GetCurrentProcess();
+}
+
+// static
+Process Process::Current() {
+ return Process(GetCurrentProcess());
+}
+
+} // namespace base
diff --git a/ipc/chromium/src/base/rand_util.cc b/ipc/chromium/src/base/rand_util.cc
new file mode 100644
index 000000000..912978616
--- /dev/null
+++ b/ipc/chromium/src/base/rand_util.cc
@@ -0,0 +1,42 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/rand_util.h"
+
+#include <math.h>
+
+#include <limits>
+
+#include "base/basictypes.h"
+#include "base/logging.h"
+
+namespace base {
+
+int RandInt(int min, int max) {
+ DCHECK(min <= max);
+
+ uint64_t range = static_cast<int64_t>(max) - min + 1;
+ uint64_t number = base::RandUint64();
+ int result = min + static_cast<int>(number % range);
+ DCHECK(result >= min && result <= max);
+ return result;
+}
+
+double RandDouble() {
+ // We try to get maximum precision by masking out as many bits as will fit
+ // in the target type's mantissa, and raising it to an appropriate power to
+ // produce output in the range [0, 1). For IEEE 754 doubles, the mantissa
+ // is expected to accommodate 53 bits.
+
+ COMPILE_ASSERT(std::numeric_limits<double>::radix == 2, otherwise_use_scalbn);
+ static const int kBits = std::numeric_limits<double>::digits;
+ uint64_t random_bits = base::RandUint64() & ((GG_UINT64_C(1) << kBits) - 1);
+ double result = ldexp(static_cast<double>(random_bits), -1 * kBits);
+ DCHECK(result >= 0.0 && result < 1.0);
+ return result;
+}
+
+} // namespace base
diff --git a/ipc/chromium/src/base/rand_util.h b/ipc/chromium/src/base/rand_util.h
new file mode 100644
index 000000000..fd014441f
--- /dev/null
+++ b/ipc/chromium/src/base/rand_util.h
@@ -0,0 +1,32 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_RAND_UTIL_H_
+#define BASE_RAND_UTIL_H_
+
+#include "base/basictypes.h"
+
+namespace base {
+
+// Returns a random number in range [0, kuint64max]. Thread-safe.
+uint64_t RandUint64();
+
+// Returns a random number between min and max (inclusive). Thread-safe.
+int RandInt(int min, int max);
+
+// Returns a random double in range [0, 1). Thread-safe.
+double RandDouble();
+
+// Fills |output_length| bytes of |output| with random data.
+//
+// WARNING:
+// Do not use for security-sensitive purposes.
+// See crypto/ for cryptographically secure random number generation APIs.
+void RandBytes(void* output, size_t output_length);
+
+} // namespace base
+
+#endif // BASE_RAND_UTIL_H_
diff --git a/ipc/chromium/src/base/rand_util_posix.cc b/ipc/chromium/src/base/rand_util_posix.cc
new file mode 100644
index 000000000..9a6b0f0ef
--- /dev/null
+++ b/ipc/chromium/src/base/rand_util_posix.cc
@@ -0,0 +1,31 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/rand_util.h"
+
+#include <fcntl.h>
+#include <unistd.h>
+
+#include "base/file_util.h"
+#include "base/logging.h"
+
+namespace base {
+
+uint64_t RandUint64() {
+ uint64_t number;
+
+ int urandom_fd = open("/dev/urandom", O_RDONLY);
+ CHECK(urandom_fd >= 0);
+ bool success = file_util::ReadFromFD(urandom_fd,
+ reinterpret_cast<char*>(&number),
+ sizeof(number));
+ CHECK(success);
+ close(urandom_fd);
+
+ return number;
+}
+
+} // namespace base
diff --git a/ipc/chromium/src/base/rand_util_win.cc b/ipc/chromium/src/base/rand_util_win.cc
new file mode 100644
index 000000000..ee189f28e
--- /dev/null
+++ b/ipc/chromium/src/base/rand_util_win.cc
@@ -0,0 +1,46 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/rand_util.h"
+#include <windows.h>
+#include <stddef.h>
+#include <stdint.h>
+
+// #define needed to link in RtlGenRandom(), a.k.a. SystemFunction036. See the
+// "Community Additions" comment on MSDN here:
+// http://msdn.microsoft.com/en-us/library/windows/desktop/aa387694.aspx
+#define SystemFunction036 NTAPI SystemFunction036
+#include <ntsecapi.h>
+#undef SystemFunction036
+
+#include <algorithm>
+#include <limits>
+
+#include "base/logging.h"
+
+namespace base {
+
+// NOTE: This function must be cryptographically secure. http://crbug.com/140076
+uint64_t RandUint64() {
+ uint64_t number;
+ RandBytes(&number, sizeof(number));
+ return number;
+}
+
+void RandBytes(void* output, size_t output_length) {
+ char* output_ptr = static_cast<char*>(output);
+ while (output_length > 0) {
+ const ULONG output_bytes_this_pass = static_cast<ULONG>(std::min(
+ output_length, static_cast<size_t>(std::numeric_limits<ULONG>::max())));
+ const bool success =
+ RtlGenRandom(output_ptr, output_bytes_this_pass) != FALSE;
+ CHECK(success);
+ output_length -= output_bytes_this_pass;
+ output_ptr += output_bytes_this_pass;
+ }
+}
+
+} // namespace base
diff --git a/ipc/chromium/src/base/revocable_store.cc b/ipc/chromium/src/base/revocable_store.cc
new file mode 100644
index 000000000..9a586a5a6
--- /dev/null
+++ b/ipc/chromium/src/base/revocable_store.cc
@@ -0,0 +1,49 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/revocable_store.h"
+
+#include "base/logging.h"
+
+RevocableStore::Revocable::Revocable(RevocableStore* store)
+ : store_reference_(store->owning_reference_) {
+ // We AddRef() the owning reference.
+ DCHECK(store_reference_->store());
+ store_reference_->store()->Add(this);
+}
+
+RevocableStore::Revocable::~Revocable() {
+ if (!revoked()) {
+ // Notify the store of our destruction.
+ --(store_reference_->store()->count_);
+ }
+}
+
+RevocableStore::RevocableStore() : count_(0) {
+ // Create a new owning reference.
+ owning_reference_ = new StoreRef(this);
+}
+
+RevocableStore::~RevocableStore() {
+ // Revoke all the items in the store.
+ owning_reference_->set_store(NULL);
+}
+
+void RevocableStore::Add(Revocable* item) {
+ DCHECK(!item->revoked());
+ ++count_;
+}
+
+void RevocableStore::RevokeAll() {
+ // We revoke all the existing items in the store and reset our count.
+ owning_reference_->set_store(NULL);
+ count_ = 0;
+
+ // Then we create a new owning reference for new items that get added.
+ // This Release()s the old owning reference, allowing it to be freed after
+ // all the items that were in the store are eventually destroyed.
+ owning_reference_ = new StoreRef(this);
+}
diff --git a/ipc/chromium/src/base/revocable_store.h b/ipc/chromium/src/base/revocable_store.h
new file mode 100644
index 000000000..13095728b
--- /dev/null
+++ b/ipc/chromium/src/base/revocable_store.h
@@ -0,0 +1,81 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_REVOCABLE_STORE_H_
+#define BASE_REVOCABLE_STORE_H_
+
+#include "base/basictypes.h"
+#include "nsISupportsImpl.h"
+#include "nsAutoPtr.h"
+
+// |RevocableStore| is a container of items that can be removed from the store.
+class RevocableStore {
+ public:
+ // A |StoreRef| is used to link the |RevocableStore| to its items. There is
+ // one StoreRef per store, and each item holds a reference to it. If the
+ // store wishes to revoke its items, it sets |store_| to null. Items are
+ // permitted to release their reference to the |StoreRef| when they no longer
+ // require the store.
+ class StoreRef final {
+ public:
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(StoreRef)
+ explicit StoreRef(RevocableStore* aStore) : store_(aStore) { }
+
+ void set_store(RevocableStore* aStore) { store_ = aStore; }
+ RevocableStore* store() const { return store_; }
+
+ protected:
+ ~StoreRef() {}
+ private:
+ RevocableStore* store_;
+
+ DISALLOW_EVIL_CONSTRUCTORS(StoreRef);
+ };
+
+ // An item in the store. On construction, the object adds itself to the
+ // store.
+ class Revocable {
+ public:
+ explicit Revocable(RevocableStore* store);
+ ~Revocable();
+
+ // This item has been revoked if it no longer has a pointer to the store.
+ bool revoked() const { return !store_reference_->store(); }
+
+ private:
+ // We hold a reference to the store through this ref pointer. We release
+ // this reference on destruction.
+ RefPtr<StoreRef> store_reference_;
+
+ DISALLOW_EVIL_CONSTRUCTORS(Revocable);
+ };
+
+ RevocableStore();
+ ~RevocableStore();
+
+ // Revokes all the items in the store.
+ void RevokeAll();
+
+ // Returns true if there are no items in the store.
+ bool empty() const { return count_ == 0; }
+
+ private:
+ friend class Revocable;
+
+ // Adds an item to the store. To add an item to the store, construct it
+ // with a pointer to the store.
+ void Add(Revocable* item);
+
+ // This is the reference the unrevoked items in the store hold.
+ RefPtr<StoreRef> owning_reference_;
+
+ // The number of unrevoked items in the store.
+ int count_;
+
+ DISALLOW_EVIL_CONSTRUCTORS(RevocableStore);
+};
+
+#endif // BASE_REVOCABLE_STORE_H_
diff --git a/ipc/chromium/src/base/scoped_bstr_win.h b/ipc/chromium/src/base/scoped_bstr_win.h
new file mode 100644
index 000000000..86cfb0254
--- /dev/null
+++ b/ipc/chromium/src/base/scoped_bstr_win.h
@@ -0,0 +1,145 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SCOPED_BSTR_WIN_H_
+#define BASE_SCOPED_BSTR_WIN_H_
+
+#include "base/basictypes.h" // needed to pick up OS_WIN
+
+#include "base/logging.h"
+
+#include <windows.h>
+#include <oleauto.h>
+
+// Manages a BSTR string pointer.
+// The class interface is based on scoped_ptr.
+class ScopedBstr {
+ public:
+ ScopedBstr() : bstr_(NULL) {
+ }
+
+ // Constructor to create a new BSTR.
+ // NOTE: Do not pass a BSTR to this constructor expecting ownership to
+ // be transferred - even though it compiles! ;-)
+ explicit ScopedBstr(const wchar_t* non_bstr);
+ ~ScopedBstr();
+
+ // Give ScopedBstr ownership over an already allocated BSTR or NULL.
+ // If you need to allocate a new BSTR instance, use |allocate| instead.
+ void Reset(BSTR bstr = NULL);
+
+ // Releases ownership of the BSTR to the caller.
+ BSTR Release();
+
+ // Creates a new BSTR from a wide string.
+ // If you already have a BSTR and want to transfer ownership to the
+ // ScopedBstr instance, call |reset| instead.
+ // Returns a pointer to the new BSTR, or NULL if allocation failed.
+ BSTR Allocate(const wchar_t* wide_str);
+
+ // Allocates a new BSTR with the specified number of bytes.
+ // Returns a pointer to the new BSTR, or NULL if allocation failed.
+ BSTR AllocateBytes(int bytes);
+
+ // Sets the allocated length field of the already-allocated BSTR to be
+ // |bytes|. This is useful when the BSTR was preallocated with e.g.
+ // SysAllocStringLen or SysAllocStringByteLen (call |AllocateBytes|) and
+ // then not all the bytes are being used.
+ // Note that if you want to set the length to a specific number of characters,
+ // you need to multiply by sizeof(wchar_t). Oddly, there's no public API to
+ // set the length, so we do this ourselves by hand.
+ //
+ // NOTE: The actual allocated size of the BSTR MUST be >= bytes.
+ // That responsibility is with the caller.
+ void SetByteLen(uint32_t bytes);
+
+ // Swap values of two ScopedBstr's.
+ void Swap(ScopedBstr& bstr2);
+
+ // Retrieves the pointer address.
+ // Used to receive BSTRs as out arguments (and take ownership).
+ // The function DCHECKs on the current value being NULL.
+ // Usage: GetBstr(bstr.Receive());
+ BSTR* Receive();
+
+ // Returns number of chars in the BSTR.
+ uint32_t Length() const;
+
+ // Returns the number of bytes allocated for the BSTR.
+ uint32_t ByteLength() const;
+
+ operator BSTR() const {
+ return bstr_;
+ }
+
+ protected:
+ BSTR bstr_;
+
+ private:
+ // Forbid comparison of ScopedBstr types. You should never have the same
+ // BSTR owned by two different scoped_ptrs.
+ bool operator==(const ScopedBstr& bstr2) const;
+ bool operator!=(const ScopedBstr& bstr2) const;
+ DISALLOW_COPY_AND_ASSIGN(ScopedBstr);
+};
+
+// Template class to generate a BSTR from a static wide string
+// without touching the heap. Use this class via the StackBstrVar and
+// StackBstr macros.
+template <uint32_t string_bytes>
+class StackBstrT {
+ public:
+ // Try to stay as const as we can in an attempt to avoid someone
+ // using the class incorrectly (e.g. by supplying a variable instead
+ // of a verbatim string. We also have an assert in the constructor
+ // as an extra runtime check since the const-ness only catches one case.
+ explicit StackBstrT(const wchar_t* const str) {
+ // The BSTR API uses UINT, but we prefer uint32_t.
+ // Make sure we'll know about it if these types don't match.
+ COMPILE_ASSERT(sizeof(uint32_t) == sizeof(UINT), UintToUint32);
+ COMPILE_ASSERT(sizeof(wchar_t) == sizeof(OLECHAR), WcharToOlechar);
+
+ // You shouldn't pass string pointers to this constructor since
+ // there's no way for the compiler to calculate the length of the
+ // string (string_bytes will be equal to pointer size in those cases).
+ DCHECK(lstrlenW(str) == (string_bytes / sizeof(bstr_.str_[0])) - 1) <<
+ "not expecting a string pointer";
+ memcpy(bstr_.str_, str, string_bytes);
+ bstr_.len_ = string_bytes - sizeof(wchar_t);
+ }
+
+ operator BSTR() {
+ return bstr_.str_;
+ }
+
+ protected:
+ struct BstrInternal {
+ uint32_t len_;
+ wchar_t str_[string_bytes / sizeof(wchar_t)];
+ } bstr_;
+};
+
+// Use this macro to generate an inline BSTR from a wide string.
+// This is about 6 times faster than using the SysAllocXxx functions to
+// allocate a BSTR and helps with keeping heap fragmentation down.
+// Example:
+// DoBstrStuff(StackBstr(L"This is my BSTR"));
+// Where DoBstrStuff is:
+// HRESULT DoBstrStuff(BSTR bstr) { ... }
+#define StackBstr(str) \
+ static_cast<BSTR>(StackBstrT<sizeof(str)>(str))
+
+// If you need a named BSTR variable that's based on a fixed string
+// (e.g. if the BSTR is used inside a loop or more than one place),
+// use StackBstrVar to declare a variable.
+// Example:
+// StackBstrVar(L"my_property", myprop);
+// for (int i = 0; i < objects.length(); ++i)
+// ProcessValue(objects[i].GetProp(myprop)); // GetProp accepts BSTR
+#define StackBstrVar(str, var) \
+ StackBstrT<sizeof(str)> var(str)
+
+#endif // BASE_SCOPED_BSTR_WIN_H_
diff --git a/ipc/chromium/src/base/scoped_cftyperef.h b/ipc/chromium/src/base/scoped_cftyperef.h
new file mode 100644
index 000000000..703a5130b
--- /dev/null
+++ b/ipc/chromium/src/base/scoped_cftyperef.h
@@ -0,0 +1,80 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SCOPED_CFTYPEREF_H_
+#define BASE_SCOPED_CFTYPEREF_H_
+
+#include <CoreFoundation/CoreFoundation.h>
+#include "base/basictypes.h"
+
+// scoped_cftyperef<> is patterned after scoped_ptr<>, but maintains ownership
+// of a CoreFoundation object: any object that can be represented as a
+// CFTypeRef. Style deviations here are solely for compatibility with
+// scoped_ptr<>'s interface, with which everyone is already familiar.
+//
+// When scoped_cftyperef<> takes ownership of an object (in the constructor or
+// in reset()), it takes over the caller's existing ownership claim. The
+// caller must own the object it gives to scoped_cftyperef<>, and relinquishes
+// an ownership claim to that object. scoped_cftyperef<> does not call
+// CFRetain().
+template<typename CFT>
+class scoped_cftyperef {
+ public:
+ typedef CFT element_type;
+
+ explicit scoped_cftyperef(CFT object = NULL)
+ : object_(object) {
+ }
+
+ ~scoped_cftyperef() {
+ if (object_)
+ CFRelease(object_);
+ }
+
+ void reset(CFT object = NULL) {
+ if (object_)
+ CFRelease(object_);
+ object_ = object;
+ }
+
+ bool operator==(CFT that) const {
+ return object_ == that;
+ }
+
+ bool operator!=(CFT that) const {
+ return object_ != that;
+ }
+
+ operator CFT() const {
+ return object_;
+ }
+
+ CFT get() const {
+ return object_;
+ }
+
+ void swap(scoped_cftyperef& that) {
+ CFT temp = that.object_;
+ that.object_ = object_;
+ object_ = temp;
+ }
+
+ // scoped_cftyperef<>::release() is like scoped_ptr<>::release. It is NOT
+ // a wrapper for CFRelease(). To force a scoped_cftyperef<> object to call
+ // CFRelease(), use scoped_cftyperef<>::reset().
+ CFT release() {
+ CFT temp = object_;
+ object_ = NULL;
+ return temp;
+ }
+
+ private:
+ CFT object_;
+
+ DISALLOW_COPY_AND_ASSIGN(scoped_cftyperef);
+};
+
+#endif // BASE_SCOPED_CFTYPEREF_H_
diff --git a/ipc/chromium/src/base/scoped_handle.h b/ipc/chromium/src/base/scoped_handle.h
new file mode 100644
index 000000000..f4fc29b66
--- /dev/null
+++ b/ipc/chromium/src/base/scoped_handle.h
@@ -0,0 +1,56 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SCOPED_HANDLE_H_
+#define BASE_SCOPED_HANDLE_H_
+
+#include <stdio.h>
+
+#include "base/basictypes.h"
+
+#if defined(OS_WIN)
+#include "base/scoped_handle_win.h"
+#endif
+
+class ScopedStdioHandle {
+ public:
+ ScopedStdioHandle()
+ : handle_(NULL) { }
+
+ explicit ScopedStdioHandle(FILE* handle)
+ : handle_(handle) { }
+
+ ~ScopedStdioHandle() {
+ Close();
+ }
+
+ void Close() {
+ if (handle_) {
+ fclose(handle_);
+ handle_ = NULL;
+ }
+ }
+
+ FILE* get() const { return handle_; }
+
+ FILE* Take() {
+ FILE* temp = handle_;
+ handle_ = NULL;
+ return temp;
+ }
+
+ void Set(FILE* newhandle) {
+ Close();
+ handle_ = newhandle;
+ }
+
+ private:
+ FILE* handle_;
+
+ DISALLOW_EVIL_CONSTRUCTORS(ScopedStdioHandle);
+};
+
+#endif // BASE_SCOPED_HANDLE_H_
diff --git a/ipc/chromium/src/base/scoped_handle_win.h b/ipc/chromium/src/base/scoped_handle_win.h
new file mode 100644
index 000000000..d4d8fccc4
--- /dev/null
+++ b/ipc/chromium/src/base/scoped_handle_win.h
@@ -0,0 +1,219 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SCOPED_HANDLE_WIN_H_
+#define BASE_SCOPED_HANDLE_WIN_H_
+
+#include <windows.h>
+
+#include "base/basictypes.h"
+#include "base/logging.h"
+
+// Used so we always remember to close the handle.
+// The class interface matches that of ScopedStdioHandle in addition to an
+// IsValid() method since invalid handles on windows can be either NULL or
+// INVALID_HANDLE_VALUE (-1).
+//
+// Example:
+// ScopedHandle hfile(CreateFile(...));
+// if (!hfile.Get())
+// ...process error
+// ReadFile(hfile.Get(), ...);
+//
+// To sqirrel the handle away somewhere else:
+// secret_handle_ = hfile.Take();
+//
+// To explicitly close the handle:
+// hfile.Close();
+class ScopedHandle {
+ public:
+ ScopedHandle() : handle_(NULL) {
+ }
+
+ explicit ScopedHandle(HANDLE h) : handle_(NULL) {
+ Set(h);
+ }
+
+ ~ScopedHandle() {
+ Close();
+ }
+
+ // Use this instead of comparing to INVALID_HANDLE_VALUE to pick up our NULL
+ // usage for errors.
+ bool IsValid() const {
+ return handle_ != NULL;
+ }
+
+ void Set(HANDLE new_handle) {
+ Close();
+
+ // Windows is inconsistent about invalid handles, so we always use NULL
+ if (new_handle != INVALID_HANDLE_VALUE)
+ handle_ = new_handle;
+ }
+
+ HANDLE Get() {
+ return handle_;
+ }
+
+ operator HANDLE() { return handle_; }
+
+ HANDLE Take() {
+ // transfers ownership away from this object
+ HANDLE h = handle_;
+ handle_ = NULL;
+ return h;
+ }
+
+ void Close() {
+ if (handle_) {
+ if (!::CloseHandle(handle_)) {
+ NOTREACHED();
+ }
+ handle_ = NULL;
+ }
+ }
+
+ private:
+ HANDLE handle_;
+ DISALLOW_EVIL_CONSTRUCTORS(ScopedHandle);
+};
+
+// Like ScopedHandle, but for HANDLEs returned from FindFile().
+class ScopedFindFileHandle {
+ public:
+ explicit ScopedFindFileHandle(HANDLE handle) : handle_(handle) {
+ // Windows is inconsistent about invalid handles, so we always use NULL
+ if (handle_ == INVALID_HANDLE_VALUE)
+ handle_ = NULL;
+ }
+
+ ~ScopedFindFileHandle() {
+ if (handle_)
+ FindClose(handle_);
+ }
+
+ // Use this instead of comparing to INVALID_HANDLE_VALUE to pick up our NULL
+ // usage for errors.
+ bool IsValid() const { return handle_ != NULL; }
+
+ operator HANDLE() { return handle_; }
+
+ private:
+ HANDLE handle_;
+
+ DISALLOW_EVIL_CONSTRUCTORS(ScopedFindFileHandle);
+};
+
+// Like ScopedHandle but for HDC. Only use this on HDCs returned from
+// CreateCompatibleDC. For an HDC returned by GetDC, use ReleaseDC instead.
+class ScopedHDC {
+ public:
+ ScopedHDC() : hdc_(NULL) { }
+ explicit ScopedHDC(HDC h) : hdc_(h) { }
+
+ ~ScopedHDC() {
+ Close();
+ }
+
+ HDC Get() {
+ return hdc_;
+ }
+
+ void Set(HDC h) {
+ Close();
+ hdc_ = h;
+ }
+
+ operator HDC() { return hdc_; }
+
+ private:
+ void Close() {
+#ifdef NOGDI
+ assert(false);
+#else
+ if (hdc_)
+ DeleteDC(hdc_);
+#endif // NOGDI
+ }
+
+ HDC hdc_;
+ DISALLOW_EVIL_CONSTRUCTORS(ScopedHDC);
+};
+
+// Like ScopedHandle but for GDI objects.
+template<class T>
+class ScopedGDIObject {
+ public:
+ ScopedGDIObject() : object_(NULL) {}
+ explicit ScopedGDIObject(T object) : object_(object) {}
+
+ ~ScopedGDIObject() {
+ Close();
+ }
+
+ T Get() {
+ return object_;
+ }
+
+ void Set(T object) {
+ if (object_ && object != object_)
+ Close();
+ object_ = object;
+ }
+
+ ScopedGDIObject& operator=(T object) {
+ Set(object);
+ return *this;
+ }
+
+ operator T() { return object_; }
+
+ private:
+ void Close() {
+ if (object_)
+ DeleteObject(object_);
+ }
+
+ T object_;
+ DISALLOW_COPY_AND_ASSIGN(ScopedGDIObject);
+};
+
+// Typedefs for some common use cases.
+typedef ScopedGDIObject<HBITMAP> ScopedBitmap;
+typedef ScopedGDIObject<HRGN> ScopedHRGN;
+typedef ScopedGDIObject<HFONT> ScopedHFONT;
+
+
+// Like ScopedHandle except for HGLOBAL.
+template<class T>
+class ScopedHGlobal {
+ public:
+ explicit ScopedHGlobal(HGLOBAL glob) : glob_(glob) {
+ data_ = static_cast<T*>(GlobalLock(glob_));
+ }
+ ~ScopedHGlobal() {
+ GlobalUnlock(glob_);
+ }
+
+ T* get() { return data_; }
+
+ size_t Size() const { return GlobalSize(glob_); }
+
+ T* operator->() const {
+ assert(data_ != 0);
+ return data_;
+ }
+
+ private:
+ HGLOBAL glob_;
+
+ T* data_;
+
+ DISALLOW_EVIL_CONSTRUCTORS(ScopedHGlobal);
+};
+
+#endif // BASE_SCOPED_HANDLE_WIN_H_
diff --git a/ipc/chromium/src/base/scoped_nsautorelease_pool.h b/ipc/chromium/src/base/scoped_nsautorelease_pool.h
new file mode 100644
index 000000000..2136b9127
--- /dev/null
+++ b/ipc/chromium/src/base/scoped_nsautorelease_pool.h
@@ -0,0 +1,54 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SCOPED_NSAUTORELEASE_POOL_H_
+#define BASE_SCOPED_NSAUTORELEASE_POOL_H_
+
+#include "base/basictypes.h"
+
+#if defined(OS_MACOSX)
+#if defined(__OBJC__)
+@class NSAutoreleasePool;
+#else // __OBJC__
+class NSAutoreleasePool;
+#endif // __OBJC__
+#endif // OS_MACOSX
+
+namespace base {
+
+// On the Mac, ScopedNSAutoreleasePool allocates an NSAutoreleasePool when
+// instantiated and sends it a -drain message when destroyed. This allows an
+// autorelease pool to be maintained in ordinary C++ code without bringing in
+// any direct Objective-C dependency.
+//
+// On other platforms, ScopedNSAutoreleasePool is an empty object with no
+// effects. This allows it to be used directly in cross-platform code without
+// ugly #ifdefs.
+class ScopedNSAutoreleasePool {
+ public:
+#if !defined(OS_MACOSX)
+ ScopedNSAutoreleasePool() {}
+ void Recycle() { }
+#else // OS_MACOSX
+ ScopedNSAutoreleasePool();
+ ~ScopedNSAutoreleasePool();
+
+ // Clear out the pool in case its position on the stack causes it to be
+ // alive for long periods of time (such as the entire length of the app).
+ // Only use then when you're certain the items currently in the pool are
+ // no longer needed.
+ void Recycle();
+ private:
+ NSAutoreleasePool* autorelease_pool_;
+#endif // OS_MACOSX
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ScopedNSAutoreleasePool);
+};
+
+} // namespace base
+
+#endif // BASE_SCOPED_NSAUTORELEASE_POOL_H_
diff --git a/ipc/chromium/src/base/scoped_nsautorelease_pool.mm b/ipc/chromium/src/base/scoped_nsautorelease_pool.mm
new file mode 100644
index 000000000..174fb821d
--- /dev/null
+++ b/ipc/chromium/src/base/scoped_nsautorelease_pool.mm
@@ -0,0 +1,30 @@
+// Copyright (c) 2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/scoped_nsautorelease_pool.h"
+
+#import <Foundation/Foundation.h>
+
+#include "base/logging.h"
+
+namespace base {
+
+ScopedNSAutoreleasePool::ScopedNSAutoreleasePool()
+ : autorelease_pool_([[NSAutoreleasePool alloc] init]) {
+ DCHECK(autorelease_pool_);
+}
+
+ScopedNSAutoreleasePool::~ScopedNSAutoreleasePool() {
+ [autorelease_pool_ drain];
+}
+
+// Cycle the internal pool, allowing everything there to get cleaned up and
+// start anew.
+void ScopedNSAutoreleasePool::Recycle() {
+ [autorelease_pool_ drain];
+ autorelease_pool_ = [[NSAutoreleasePool alloc] init];
+ DCHECK(autorelease_pool_);
+}
+
+} // namespace base
diff --git a/ipc/chromium/src/base/scoped_nsobject.h b/ipc/chromium/src/base/scoped_nsobject.h
new file mode 100644
index 000000000..204114021
--- /dev/null
+++ b/ipc/chromium/src/base/scoped_nsobject.h
@@ -0,0 +1,78 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SCOPED_NSOBJECT_H_
+#define BASE_SCOPED_NSOBJECT_H_
+
+#import <Foundation/Foundation.h>
+#include "base/basictypes.h"
+
+// scoped_nsobject<> is patterned after scoped_ptr<>, but maintains ownership
+// of an NSObject subclass object. Style deviations here are solely for
+// compatibility with scoped_ptr<>'s interface, with which everyone is already
+// familiar.
+//
+// When scoped_nsobject<> takes ownership of an object (in the constructor or
+// in reset()), it takes over the caller's existing ownership claim. The
+// caller must own the object it gives to scoped_nsobject<>, and relinquishes
+// an ownership claim to that object. scoped_nsobject<> does not call
+// -retain.
+template<typename NST>
+class scoped_nsobject {
+ public:
+ typedef NST* element_type;
+
+ explicit scoped_nsobject(NST* object = nil)
+ : object_(object) {
+ }
+
+ ~scoped_nsobject() {
+ [object_ release];
+ }
+
+ void reset(NST* object = nil) {
+ [object_ release];
+ object_ = object;
+ }
+
+ bool operator==(NST* that) const {
+ return object_ == that;
+ }
+
+ bool operator!=(NST* that) const {
+ return object_ != that;
+ }
+
+ operator NST*() const {
+ return object_;
+ }
+
+ NST* get() const {
+ return object_;
+ }
+
+ void swap(scoped_nsobject& that) {
+ NST* temp = that.object_;
+ that.object_ = object_;
+ object_ = temp;
+ }
+
+ // scoped_nsobject<>::release() is like scoped_ptr<>::release. It is NOT
+ // a wrapper for [object_ release]. To force a scoped_nsobject<> object to
+ // call [object_ release], use scoped_nsobject<>::reset().
+ NST* release() {
+ NST* temp = object_;
+ object_ = nil;
+ return temp;
+ }
+
+ private:
+ NST* object_;
+
+ DISALLOW_COPY_AND_ASSIGN(scoped_nsobject);
+};
+
+#endif // BASE_SCOPED_NSOBJECT_H_
diff --git a/ipc/chromium/src/base/shared_memory.h b/ipc/chromium/src/base/shared_memory.h
new file mode 100644
index 000000000..15a0a95fe
--- /dev/null
+++ b/ipc/chromium/src/base/shared_memory.h
@@ -0,0 +1,209 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SHARED_MEMORY_H_
+#define BASE_SHARED_MEMORY_H_
+
+#include "build/build_config.h"
+
+#if defined(OS_POSIX)
+#include <sys/types.h>
+#include <semaphore.h>
+#include "base/file_descriptor_posix.h"
+#endif
+#include <string>
+
+#include "base/basictypes.h"
+#include "base/process.h"
+
+namespace base {
+
+// SharedMemoryHandle is a platform specific type which represents
+// the underlying OS handle to a shared memory segment.
+#if defined(OS_WIN)
+typedef HANDLE SharedMemoryHandle;
+typedef HANDLE SharedMemoryLock;
+#elif defined(OS_POSIX)
+// A SharedMemoryId is sufficient to identify a given shared memory segment on a
+// system, but insufficient to map it.
+typedef FileDescriptor SharedMemoryHandle;
+typedef ino_t SharedMemoryId;
+// On POSIX, the lock is implemented as a lockf() on the mapped file,
+// so no additional member (or definition of SharedMemoryLock) is
+// needed.
+#endif
+
+// Platform abstraction for shared memory. Provides a C++ wrapper
+// around the OS primitive for a memory mapped file.
+class SharedMemory {
+ public:
+ // Create a new SharedMemory object.
+ SharedMemory();
+
+ // Create a new SharedMemory object from an existing, open
+ // shared memory file.
+ SharedMemory(SharedMemoryHandle init_handle, bool read_only)
+ : SharedMemory() {
+ SetHandle(init_handle, read_only);
+ }
+
+ // Destructor. Will close any open files.
+ ~SharedMemory();
+
+ // Initialize a new SharedMemory object from an existing, open
+ // shared memory file.
+ bool SetHandle(SharedMemoryHandle handle, bool read_only);
+
+ // Return true iff the given handle is valid (i.e. not the distingished
+ // invalid value; NULL for a HANDLE and -1 for a file descriptor)
+ static bool IsHandleValid(const SharedMemoryHandle& handle);
+
+ // Return invalid handle (see comment above for exact definition).
+ static SharedMemoryHandle NULLHandle();
+
+ // Creates or opens a shared memory segment based on a name.
+ // If read_only is true, opens the memory as read-only.
+ // If open_existing is true, and the shared memory already exists,
+ // opens the existing shared memory and ignores the size parameter.
+ // If name is the empty string, use a unique name.
+ // Returns true on success, false on failure.
+ bool Create(const std::string& name, bool read_only, bool open_existing,
+ size_t size);
+
+ // Deletes resources associated with a shared memory segment based on name.
+ // Not all platforms require this call.
+ bool Delete(const std::wstring& name);
+
+ // Opens a shared memory segment based on a name.
+ // If read_only is true, opens for read-only access.
+ // If name is the empty string, use a unique name.
+ // Returns true on success, false on failure.
+ bool Open(const std::wstring& name, bool read_only);
+
+ // Maps the shared memory into the caller's address space.
+ // Returns true on success, false otherwise. The memory address
+ // is accessed via the memory() accessor.
+ bool Map(size_t bytes);
+
+ // Unmaps the shared memory from the caller's address space.
+ // Returns true if successful; returns false on error or if the
+ // memory is not mapped.
+ bool Unmap();
+
+ // Get the size of the opened shared memory backing file.
+ // Note: This size is only available to the creator of the
+ // shared memory, and not to those that opened shared memory
+ // created externally.
+ // Returns 0 if not opened or unknown.
+ size_t max_size() const { return max_size_; }
+
+ // Gets a pointer to the opened memory space if it has been
+ // Mapped via Map(). Returns NULL if it is not mapped.
+ void *memory() const { return memory_; }
+
+ // Get access to the underlying OS handle for this segment.
+ // Use of this handle for anything other than an opaque
+ // identifier is not portable.
+ SharedMemoryHandle handle() const;
+
+#if defined(OS_POSIX)
+ // Return a unique identifier for this shared memory segment. Inode numbers
+ // are technically only unique to a single filesystem. However, we always
+ // allocate shared memory backing files from the same directory, so will end
+ // up on the same filesystem.
+ SharedMemoryId id() const { return inode_; }
+#endif
+
+ // Closes the open shared memory segment.
+ // It is safe to call Close repeatedly.
+ void Close(bool unmap_view = true);
+
+ // Share the shared memory to another process. Attempts
+ // to create a platform-specific new_handle which can be
+ // used in a remote process to access the shared memory
+ // file. new_handle is an ouput parameter to receive
+ // the handle for use in the remote process.
+ // Returns true on success, false otherwise.
+ bool ShareToProcess(base::ProcessId target_pid,
+ SharedMemoryHandle* new_handle) {
+ return ShareToProcessCommon(target_pid, new_handle, false);
+ }
+
+ // Logically equivalent to:
+ // bool ok = ShareToProcess(process, new_handle);
+ // Close();
+ // return ok;
+ // Note that the memory is unmapped by calling this method, regardless of the
+ // return value.
+ bool GiveToProcess(ProcessId target_pid,
+ SharedMemoryHandle* new_handle) {
+ return ShareToProcessCommon(target_pid, new_handle, true);
+ }
+
+ // Lock the shared memory.
+ // This is a cross-process lock which may be recursively
+ // locked by the same thread.
+ // TODO(port):
+ // WARNING: on POSIX the lock only works across processes, not
+ // across threads. 2 threads in the same process can both grab the
+ // lock at the same time. There are several solutions for this
+ // (futex, lockf+anon_semaphore) but none are both clean and common
+ // across Mac and Linux.
+ void Lock();
+
+ // Release the shared memory lock.
+ void Unlock();
+
+ private:
+#if defined(OS_POSIX)
+ bool CreateOrOpen(const std::wstring &name, int posix_flags, size_t size);
+ bool FilenameForMemoryName(const std::wstring &memname,
+ std::wstring *filename);
+ void LockOrUnlockCommon(int function);
+
+#endif
+ bool ShareToProcessCommon(ProcessId target_pid,
+ SharedMemoryHandle* new_handle,
+ bool close_self);
+
+#if defined(OS_WIN)
+ std::wstring name_;
+ HANDLE mapped_file_;
+#elif defined(OS_POSIX)
+ int mapped_file_;
+ ino_t inode_;
+#endif
+ void* memory_;
+ bool read_only_;
+ size_t max_size_;
+#if !defined(OS_POSIX)
+ SharedMemoryLock lock_;
+#endif
+
+ DISALLOW_EVIL_CONSTRUCTORS(SharedMemory);
+};
+
+// A helper class that acquires the shared memory lock while
+// the SharedMemoryAutoLock is in scope.
+class SharedMemoryAutoLock {
+ public:
+ explicit SharedMemoryAutoLock(SharedMemory* shared_memory)
+ : shared_memory_(shared_memory) {
+ shared_memory_->Lock();
+ }
+
+ ~SharedMemoryAutoLock() {
+ shared_memory_->Unlock();
+ }
+
+ private:
+ SharedMemory* shared_memory_;
+ DISALLOW_EVIL_CONSTRUCTORS(SharedMemoryAutoLock);
+};
+
+} // namespace base
+
+#endif // BASE_SHARED_MEMORY_H_
diff --git a/ipc/chromium/src/base/shared_memory_posix.cc b/ipc/chromium/src/base/shared_memory_posix.cc
new file mode 100644
index 000000000..b02326df9
--- /dev/null
+++ b/ipc/chromium/src/base/shared_memory_posix.cc
@@ -0,0 +1,338 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/shared_memory.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#include "base/file_util.h"
+#include "base/logging.h"
+#include "base/platform_thread.h"
+#include "base/string_util.h"
+#include "mozilla/UniquePtr.h"
+
+namespace base {
+
+SharedMemory::SharedMemory()
+ : mapped_file_(-1),
+ inode_(0),
+ memory_(NULL),
+ read_only_(false),
+ max_size_(0) {
+}
+
+SharedMemory::~SharedMemory() {
+ Close();
+}
+
+bool SharedMemory::SetHandle(SharedMemoryHandle handle, bool read_only) {
+ DCHECK(mapped_file_ == -1);
+
+ struct stat st;
+ if (fstat(handle.fd, &st) < 0) {
+ return false;
+ }
+
+ mapped_file_ = handle.fd;
+ inode_ = st.st_ino;
+ read_only_ = read_only;
+ return true;
+}
+
+// static
+bool SharedMemory::IsHandleValid(const SharedMemoryHandle& handle) {
+ return handle.fd >= 0;
+}
+
+// static
+SharedMemoryHandle SharedMemory::NULLHandle() {
+ return SharedMemoryHandle();
+}
+
+bool SharedMemory::Create(const std::string &cname, bool read_only,
+ bool open_existing, size_t size) {
+ read_only_ = read_only;
+
+ std::wstring name = UTF8ToWide(cname);
+
+ int posix_flags = 0;
+ posix_flags |= read_only ? O_RDONLY : O_RDWR;
+ if (!open_existing || mapped_file_ <= 0)
+ posix_flags |= O_CREAT;
+
+ if (!CreateOrOpen(name, posix_flags, size))
+ return false;
+
+ max_size_ = size;
+ return true;
+}
+
+// Our current implementation of shmem is with mmap()ing of files.
+// These files need to be deleted explicitly.
+// In practice this call is only needed for unit tests.
+bool SharedMemory::Delete(const std::wstring& name) {
+ std::wstring mem_filename;
+ if (FilenameForMemoryName(name, &mem_filename) == false)
+ return false;
+
+ FilePath path(WideToUTF8(mem_filename));
+ if (file_util::PathExists(path)) {
+ return file_util::Delete(path);
+ }
+
+ // Doesn't exist, so success.
+ return true;
+}
+
+bool SharedMemory::Open(const std::wstring &name, bool read_only) {
+ read_only_ = read_only;
+
+ int posix_flags = 0;
+ posix_flags |= read_only ? O_RDONLY : O_RDWR;
+
+ return CreateOrOpen(name, posix_flags, 0);
+}
+
+// For the given shmem named |memname|, return a filename to mmap()
+// (and possibly create). Modifies |filename|. Return false on
+// error, or true of we are happy.
+bool SharedMemory::FilenameForMemoryName(const std::wstring &memname,
+ std::wstring *filename) {
+ std::wstring mem_filename;
+
+ // mem_name will be used for a filename; make sure it doesn't
+ // contain anything which will confuse us.
+ DCHECK(memname.find_first_of(L"/") == std::string::npos);
+ DCHECK(memname.find_first_of(L"\0") == std::string::npos);
+
+ FilePath temp_dir;
+ if (file_util::GetShmemTempDir(&temp_dir) == false)
+ return false;
+
+ mem_filename = UTF8ToWide(temp_dir.value());
+ file_util::AppendToPath(&mem_filename, L"com.google.chrome.shmem." + memname);
+ *filename = mem_filename;
+ return true;
+}
+
+namespace {
+
+// A class to handle auto-closing of FILE*'s.
+class ScopedFILEClose {
+ public:
+ inline void operator()(FILE* x) const {
+ if (x) {
+ fclose(x);
+ }
+ }
+};
+
+typedef mozilla::UniquePtr<FILE, ScopedFILEClose> ScopedFILE;
+
+}
+
+// Chromium mostly only use the unique/private shmem as specified by
+// "name == L"". The exception is in the StatsTable.
+// TODO(jrg): there is no way to "clean up" all unused named shmem if
+// we restart from a crash. (That isn't a new problem, but it is a problem.)
+// In case we want to delete it later, it may be useful to save the value
+// of mem_filename after FilenameForMemoryName().
+bool SharedMemory::CreateOrOpen(const std::wstring &name,
+ int posix_flags, size_t size) {
+ DCHECK(mapped_file_ == -1);
+
+ ScopedFILE file_closer;
+ FILE *fp;
+
+ if (name == L"") {
+ // It doesn't make sense to have a read-only private piece of shmem
+ DCHECK(posix_flags & (O_RDWR | O_WRONLY));
+
+ FilePath path;
+ fp = file_util::CreateAndOpenTemporaryShmemFile(&path);
+
+ // Deleting the file prevents anyone else from mapping it in
+ // (making it private), and prevents the need for cleanup (once
+ // the last fd is closed, it is truly freed).
+ file_util::Delete(path);
+ } else {
+ std::wstring mem_filename;
+ if (FilenameForMemoryName(name, &mem_filename) == false)
+ return false;
+
+ std::string mode;
+ switch (posix_flags) {
+ case (O_RDWR | O_CREAT):
+ // Careful: "w+" will truncate if it already exists.
+ mode = "a+";
+ break;
+ case O_RDWR:
+ mode = "r+";
+ break;
+ case O_RDONLY:
+ mode = "r";
+ break;
+ default:
+ NOTIMPLEMENTED();
+ break;
+ }
+
+ fp = file_util::OpenFile(mem_filename, mode.c_str());
+ }
+
+ if (fp == NULL)
+ return false;
+ file_closer.reset(fp); // close when we go out of scope
+
+ // Make sure the (new) file is the right size.
+ // According to the man page, "Use of truncate() to extend a file is
+ // not portable."
+ if (size && (posix_flags & (O_RDWR | O_CREAT))) {
+ // Get current size.
+ struct stat stat;
+ if (fstat(fileno(fp), &stat) != 0)
+ return false;
+ size_t current_size = stat.st_size;
+ if (current_size != size) {
+ if (ftruncate(fileno(fp), size) != 0)
+ return false;
+ if (fseeko(fp, size, SEEK_SET) != 0)
+ return false;
+ }
+ }
+
+ mapped_file_ = dup(fileno(fp));
+ DCHECK(mapped_file_ >= 0);
+
+ struct stat st;
+ if (fstat(mapped_file_, &st))
+ NOTREACHED();
+ inode_ = st.st_ino;
+
+ return true;
+}
+
+bool SharedMemory::Map(size_t bytes) {
+ if (mapped_file_ == -1)
+ return false;
+
+ memory_ = mmap(NULL, bytes, PROT_READ | (read_only_ ? 0 : PROT_WRITE),
+ MAP_SHARED, mapped_file_, 0);
+
+ if (memory_)
+ max_size_ = bytes;
+
+ bool mmap_succeeded = (memory_ != (void*)-1);
+ DCHECK(mmap_succeeded) << "Call to mmap failed, errno=" << errno;
+ return mmap_succeeded;
+}
+
+bool SharedMemory::Unmap() {
+ if (memory_ == NULL)
+ return false;
+
+ munmap(memory_, max_size_);
+ memory_ = NULL;
+ max_size_ = 0;
+ return true;
+}
+
+bool SharedMemory::ShareToProcessCommon(ProcessId processId,
+ SharedMemoryHandle *new_handle,
+ bool close_self) {
+ const int new_fd = dup(mapped_file_);
+ DCHECK(new_fd >= -1);
+ new_handle->fd = new_fd;
+ new_handle->auto_close = true;
+
+ if (close_self)
+ Close();
+
+ return true;
+}
+
+
+void SharedMemory::Close(bool unmap_view) {
+ if (unmap_view) {
+ Unmap();
+ }
+
+ if (mapped_file_ >= 0) {
+ close(mapped_file_);
+ mapped_file_ = -1;
+ }
+}
+
+#ifdef ANDROID
+void SharedMemory::LockOrUnlockCommon(int function) {
+ DCHECK(mapped_file_ >= 0);
+ struct flock lockreq;
+ lockreq.l_type = function;
+ lockreq.l_whence = SEEK_SET;
+ lockreq.l_start = 0;
+ lockreq.l_len = 0;
+ while (fcntl(mapped_file_, F_SETLKW, &lockreq) < 0) {
+ if (errno == EINTR) {
+ continue;
+ } else if (errno == ENOLCK) {
+ // temporary kernel resource exaustion
+ PlatformThread::Sleep(500);
+ continue;
+ } else {
+ NOTREACHED() << "lockf() failed."
+ << " function:" << function
+ << " fd:" << mapped_file_
+ << " errno:" << errno
+ << " msg:" << strerror(errno);
+ }
+ }
+}
+
+void SharedMemory::Lock() {
+ LockOrUnlockCommon(F_WRLCK);
+}
+
+void SharedMemory::Unlock() {
+ LockOrUnlockCommon(F_UNLCK);
+}
+#else
+void SharedMemory::LockOrUnlockCommon(int function) {
+ DCHECK(mapped_file_ >= 0);
+ while (lockf(mapped_file_, function, 0) < 0) {
+ if (errno == EINTR) {
+ continue;
+ } else if (errno == ENOLCK) {
+ // temporary kernel resource exaustion
+ PlatformThread::Sleep(500);
+ continue;
+ } else {
+ NOTREACHED() << "lockf() failed."
+ << " function:" << function
+ << " fd:" << mapped_file_
+ << " errno:" << errno
+ << " msg:" << strerror(errno);
+ }
+ }
+}
+
+void SharedMemory::Lock() {
+ LockOrUnlockCommon(F_LOCK);
+}
+
+void SharedMemory::Unlock() {
+ LockOrUnlockCommon(F_ULOCK);
+}
+#endif
+
+SharedMemoryHandle SharedMemory::handle() const {
+ return FileDescriptor(mapped_file_, false);
+}
+
+} // namespace base
diff --git a/ipc/chromium/src/base/shared_memory_win.cc b/ipc/chromium/src/base/shared_memory_win.cc
new file mode 100644
index 000000000..042adc834
--- /dev/null
+++ b/ipc/chromium/src/base/shared_memory_win.cc
@@ -0,0 +1,176 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/shared_memory.h"
+
+#include "base/logging.h"
+#include "base/win_util.h"
+#include "base/string_util.h"
+#include "mozilla/ipc/ProtocolUtils.h"
+
+namespace base {
+
+SharedMemory::SharedMemory()
+ : mapped_file_(NULL),
+ memory_(NULL),
+ read_only_(false),
+ max_size_(0),
+ lock_(NULL) {
+}
+
+SharedMemory::~SharedMemory() {
+ Close();
+ if (lock_ != NULL)
+ CloseHandle(lock_);
+}
+
+bool SharedMemory::SetHandle(SharedMemoryHandle handle, bool read_only) {
+ DCHECK(mapped_file_ == NULL);
+
+ mapped_file_ = handle;
+ read_only_ = read_only;
+ return true;
+}
+
+// static
+bool SharedMemory::IsHandleValid(const SharedMemoryHandle& handle) {
+ return handle != NULL;
+}
+
+// static
+SharedMemoryHandle SharedMemory::NULLHandle() {
+ return NULL;
+}
+
+bool SharedMemory::Create(const std::string &cname, bool read_only,
+ bool open_existing, size_t size) {
+ DCHECK(mapped_file_ == NULL);
+ std::wstring name = UTF8ToWide(cname);
+ name_ = name;
+ read_only_ = read_only;
+ mapped_file_ = CreateFileMapping(INVALID_HANDLE_VALUE, NULL,
+ read_only_ ? PAGE_READONLY : PAGE_READWRITE, 0, static_cast<DWORD>(size),
+ name.empty() ? NULL : name.c_str());
+ if (!mapped_file_)
+ return false;
+
+ // Check if the shared memory pre-exists.
+ if (GetLastError() == ERROR_ALREADY_EXISTS && !open_existing) {
+ Close();
+ return false;
+ }
+ max_size_ = size;
+ return true;
+}
+
+bool SharedMemory::Delete(const std::wstring& name) {
+ // intentionally empty -- there is nothing for us to do on Windows.
+ return true;
+}
+
+bool SharedMemory::Open(const std::wstring &name, bool read_only) {
+ DCHECK(mapped_file_ == NULL);
+
+ name_ = name;
+ read_only_ = read_only;
+ mapped_file_ = OpenFileMapping(
+ read_only_ ? FILE_MAP_READ : FILE_MAP_ALL_ACCESS, false,
+ name.empty() ? NULL : name.c_str());
+ if (mapped_file_ != NULL) {
+ // Note: size_ is not set in this case.
+ return true;
+ }
+ return false;
+}
+
+bool SharedMemory::Map(size_t bytes) {
+ if (mapped_file_ == NULL)
+ return false;
+
+ memory_ = MapViewOfFile(mapped_file_,
+ read_only_ ? FILE_MAP_READ : FILE_MAP_ALL_ACCESS, 0, 0, bytes);
+ if (memory_ != NULL) {
+ return true;
+ }
+ return false;
+}
+
+bool SharedMemory::Unmap() {
+ if (memory_ == NULL)
+ return false;
+
+ UnmapViewOfFile(memory_);
+ memory_ = NULL;
+ return true;
+}
+
+bool SharedMemory::ShareToProcessCommon(ProcessId processId,
+ SharedMemoryHandle *new_handle,
+ bool close_self) {
+ *new_handle = 0;
+ DWORD access = STANDARD_RIGHTS_REQUIRED | FILE_MAP_READ;
+ DWORD options = 0;
+ HANDLE mapped_file = mapped_file_;
+ HANDLE result;
+ if (!read_only_)
+ access |= FILE_MAP_WRITE;
+ if (close_self) {
+ // DUPLICATE_CLOSE_SOURCE causes DuplicateHandle to close mapped_file.
+ options = DUPLICATE_CLOSE_SOURCE;
+ mapped_file_ = NULL;
+ Unmap();
+ }
+
+ if (processId == GetCurrentProcId() && close_self) {
+ *new_handle = mapped_file;
+ return true;
+ }
+
+ if (!mozilla::ipc::DuplicateHandle(mapped_file, processId, &result, access,
+ options)) {
+ return false;
+ }
+
+ *new_handle = result;
+ return true;
+}
+
+
+void SharedMemory::Close(bool unmap_view) {
+ if (unmap_view) {
+ Unmap();
+ }
+
+ if (mapped_file_ != NULL) {
+ CloseHandle(mapped_file_);
+ mapped_file_ = NULL;
+ }
+}
+
+void SharedMemory::Lock() {
+ if (lock_ == NULL) {
+ std::wstring name = name_;
+ name.append(L"lock");
+ lock_ = CreateMutex(NULL, FALSE, name.c_str());
+ DCHECK(lock_ != NULL);
+ if (lock_ == NULL) {
+ DLOG(ERROR) << "Could not create mutex" << GetLastError();
+ return; // there is nothing good we can do here.
+ }
+ }
+ WaitForSingleObject(lock_, INFINITE);
+}
+
+void SharedMemory::Unlock() {
+ DCHECK(lock_ != NULL);
+ ReleaseMutex(lock_);
+}
+
+SharedMemoryHandle SharedMemory::handle() const {
+ return mapped_file_;
+}
+
+} // namespace base
diff --git a/ipc/chromium/src/base/singleton.h b/ipc/chromium/src/base/singleton.h
new file mode 100644
index 000000000..9f333af97
--- /dev/null
+++ b/ipc/chromium/src/base/singleton.h
@@ -0,0 +1,183 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SINGLETON_H_
+#define BASE_SINGLETON_H_
+
+#include "base/at_exit.h"
+#include "base/atomicops.h"
+#include "base/platform_thread.h"
+
+// Default traits for Singleton<Type>. Calls operator new and operator delete on
+// the object. Registers automatic deletion at process exit.
+// Overload if you need arguments or another memory allocation function.
+template<typename Type>
+struct DefaultSingletonTraits {
+ // Allocates the object.
+ static Type* New() {
+ // The parenthesis is very important here; it forces POD type
+ // initialization.
+ return new Type();
+ }
+
+ // Destroys the object.
+ static void Delete(Type* x) {
+ delete x;
+ }
+
+ // Set to true to automatically register deletion of the object on process
+ // exit. See below for the required call that makes this happen.
+ static const bool kRegisterAtExit = true;
+};
+
+
+// Alternate traits for use with the Singleton<Type>. Identical to
+// DefaultSingletonTraits except that the Singleton will not be cleaned up
+// at exit.
+template<typename Type>
+struct LeakySingletonTraits : public DefaultSingletonTraits<Type> {
+ static const bool kRegisterAtExit = false;
+};
+
+
+// The Singleton<Type, Traits, DifferentiatingType> class manages a single
+// instance of Type which will be created on first use and will be destroyed at
+// normal process exit). The Trait::Delete function will not be called on
+// abnormal process exit.
+//
+// DifferentiatingType is used as a key to differentiate two different
+// singletons having the same memory allocation functions but serving a
+// different purpose. This is mainly used for Locks serving different purposes.
+//
+// Example usages: (none are preferred, they all result in the same code)
+// 1. FooClass* ptr = Singleton<FooClass>::get();
+// ptr->Bar();
+// 2. Singleton<FooClass>()->Bar();
+// 3. Singleton<FooClass>::get()->Bar();
+//
+// Singleton<> has no non-static members and doesn't need to actually be
+// instantiated. It does no harm to instantiate it and use it as a class member
+// or at global level since it is acting as a POD type.
+//
+// This class is itself thread-safe. The underlying Type must of course be
+// thread-safe if you want to use it concurrently. Two parameters may be tuned
+// depending on the user's requirements.
+//
+// Glossary:
+// RAE = kRegisterAtExit
+//
+// On every platform, if Traits::RAE is true, the singleton will be destroyed at
+// process exit. More precisely it uses base::AtExitManager which requires an
+// object of this type to be instanciated. AtExitManager mimics the semantics
+// of atexit() such as LIFO order but under Windows is safer to call. For more
+// information see at_exit.h.
+//
+// If Traits::RAE is false, the singleton will not be freed at process exit,
+// thus the singleton will be leaked if it is ever accessed. Traits::RAE
+// shouldn't be false unless absolutely necessary. Remember that the heap where
+// the object is allocated may be destroyed by the CRT anyway.
+//
+// If you want to ensure that your class can only exist as a singleton, make
+// its constructors private, and make DefaultSingletonTraits<> a friend:
+//
+// #include "base/singleton.h"
+// class FooClass {
+// public:
+// void Bar() { ... }
+// private:
+// FooClass() { ... }
+// friend struct DefaultSingletonTraits<FooClass>;
+//
+// DISALLOW_EVIL_CONSTRUCTORS(FooClass);
+// };
+//
+// Caveats:
+// (a) Every call to get(), operator->() and operator*() incurs some overhead
+// (16ns on my P4/2.8GHz) to check whether the object has already been
+// initialized. You may wish to cache the result of get(); it will not
+// change.
+//
+// (b) Your factory function must never throw an exception. This class is not
+// exception-safe.
+//
+template <typename Type,
+ typename Traits = DefaultSingletonTraits<Type>,
+ typename DifferentiatingType = Type>
+class Singleton {
+ public:
+ // This class is safe to be constructed and copy-constructed since it has no
+ // member.
+
+ // Return a pointer to the one true instance of the class.
+ static Type* get() {
+ // Our AtomicWord doubles as a spinlock, where a value of
+ // kBeingCreatedMarker means the spinlock is being held for creation.
+ static const base::subtle::AtomicWord kBeingCreatedMarker = 1;
+
+ base::subtle::AtomicWord value = base::subtle::NoBarrier_Load(&instance_);
+ if (value != 0 && value != kBeingCreatedMarker)
+ return reinterpret_cast<Type*>(value);
+
+ // Object isn't created yet, maybe we will get to create it, let's try...
+ if (base::subtle::Acquire_CompareAndSwap(&instance_,
+ 0,
+ kBeingCreatedMarker) == 0) {
+ // instance_ was NULL and is now kBeingCreatedMarker. Only one thread
+ // will ever get here. Threads might be spinning on us, and they will
+ // stop right after we do this store.
+ Type* newval = Traits::New();
+ base::subtle::Release_Store(
+ &instance_, reinterpret_cast<base::subtle::AtomicWord>(newval));
+
+ if (Traits::kRegisterAtExit)
+ base::AtExitManager::RegisterCallback(OnExit, NULL);
+
+ return newval;
+ }
+
+ // We hit a race. Another thread beat us and either:
+ // - Has the object in BeingCreated state
+ // - Already has the object created...
+ // We know value != NULL. It could be kBeingCreatedMarker, or a valid ptr.
+ // Unless your constructor can be very time consuming, it is very unlikely
+ // to hit this race. When it does, we just spin and yield the thread until
+ // the object has been created.
+ while (true) {
+ value = base::subtle::NoBarrier_Load(&instance_);
+ if (value != kBeingCreatedMarker)
+ break;
+ PlatformThread::YieldCurrentThread();
+ }
+
+ return reinterpret_cast<Type*>(value);
+ }
+
+ // Shortcuts.
+ Type& operator*() {
+ return *get();
+ }
+
+ Type* operator->() {
+ return get();
+ }
+
+ private:
+ // Adapter function for use with AtExit(). This should be called single
+ // threaded, but we might as well take the precautions anyway.
+ static void OnExit(void* unused) {
+ // AtExit should only ever be register after the singleton instance was
+ // created. We should only ever get here with a valid instance_ pointer.
+ Traits::Delete(reinterpret_cast<Type*>(
+ base::subtle::NoBarrier_AtomicExchange(&instance_, 0)));
+ }
+ static base::subtle::AtomicWord instance_;
+};
+
+template <typename Type, typename Traits, typename DifferentiatingType>
+base::subtle::AtomicWord Singleton<Type, Traits, DifferentiatingType>::
+ instance_ = 0;
+
+#endif // BASE_SINGLETON_H_
diff --git a/ipc/chromium/src/base/singleton_objc.h b/ipc/chromium/src/base/singleton_objc.h
new file mode 100644
index 000000000..07fc092ab
--- /dev/null
+++ b/ipc/chromium/src/base/singleton_objc.h
@@ -0,0 +1,62 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Support for using the Singleton<T> pattern with Objective-C objects. A
+// SingletonObjC is the same as a Singleton, except the default traits are
+// appropriate for Objective-C objects. A typical Objective-C object of type
+// NSExampleType can be maintained as a singleton and accessed with:
+//
+// NSExampleType* exampleSingleton = SingletonObjC<NSExampleType>::get();
+//
+// The first time this is used, it will create exampleSingleton as the result
+// of [[NSExampleType alloc] init]. Subsequent calls will return the same
+// NSExampleType* object. The object will be released by calling
+// -[NSExampleType release] when Singleton's atexit routines run
+// (see singleton.h).
+//
+// For Objective-C objects initialized through means other than the
+// no-parameter -init selector, DefaultSingletonObjCTraits may be extended
+// as needed:
+//
+// struct FooSingletonTraits : public DefaultSingletonObjCTraits<Foo> {
+// static Foo* New() {
+// return [[Foo alloc] initWithName:@"selecty"];
+// }
+// }
+// ...
+// Foo* widgetSingleton = SingletonObjC<Foo, FooSingletonTraits>::get();
+
+#ifndef BASE_SINGLETON_OBJC_H_
+#define BASE_SINGLETON_OBJC_H_
+
+#import <Foundation/Foundation.h>
+#include "base/singleton.h"
+
+// Singleton traits usable to manage traditional Objective-C objects, which
+// are instantiated by sending |alloc| and |init| messages, and are deallocated
+// in a memory-managed environment when their retain counts drop to 0 by
+// sending |release| messages.
+template<typename Type>
+struct DefaultSingletonObjCTraits : public DefaultSingletonTraits<Type> {
+ static Type* New() {
+ return [[Type alloc] init];
+ }
+
+ static void Delete(Type* object) {
+ [object release];
+ }
+};
+
+// Exactly like Singleton, but without the DefaultSingletonObjCTraits as the
+// default trait class. This makes it straightforward for Objective-C++ code
+// to hold Objective-C objects as singletons.
+template<typename Type,
+ typename Traits = DefaultSingletonObjCTraits<Type>,
+ typename DifferentiatingType = Type>
+class SingletonObjC : public Singleton<Type, Traits, DifferentiatingType> {
+};
+
+#endif // BASE_SINGLETON_OBJC_H_
diff --git a/ipc/chromium/src/base/stack_container.h b/ipc/chromium/src/base/stack_container.h
new file mode 100644
index 000000000..23991c2f9
--- /dev/null
+++ b/ipc/chromium/src/base/stack_container.h
@@ -0,0 +1,255 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_STACK_CONTAINER_H_
+#define BASE_STACK_CONTAINER_H_
+
+#include <string>
+#include <vector>
+
+#include "base/basictypes.h"
+
+// This allocator can be used with STL containers to provide a stack buffer
+// from which to allocate memory and overflows onto the heap. This stack buffer
+// would be allocated on the stack and allows us to avoid heap operations in
+// some situations.
+//
+// STL likes to make copies of allocators, so the allocator itself can't hold
+// the data. Instead, we make the creator responsible for creating a
+// StackAllocator::Source which contains the data. Copying the allocator
+// merely copies the pointer to this shared source, so all allocators created
+// based on our allocator will share the same stack buffer.
+//
+// This stack buffer implementation is very simple. The first allocation that
+// fits in the stack buffer will use the stack buffer. Any subsequent
+// allocations will not use the stack buffer, even if there is unused room.
+// This makes it appropriate for array-like containers, but the caller should
+// be sure to reserve() in the container up to the stack buffer size. Otherwise
+// the container will allocate a small array which will "use up" the stack
+// buffer.
+template<typename T, size_t stack_capacity>
+class StackAllocator : public std::allocator<T> {
+ public:
+ typedef typename std::allocator<T>::pointer pointer;
+ typedef typename std::allocator<T>::size_type size_type;
+
+ // Backing store for the allocator. The container owner is responsible for
+ // maintaining this for as long as any containers using this allocator are
+ // live.
+ struct Source {
+ Source() : used_stack_buffer_(false) {
+ }
+
+ // Casts the buffer in its right type.
+ T* stack_buffer() { return reinterpret_cast<T*>(stack_buffer_); }
+ const T* stack_buffer() const {
+ return reinterpret_cast<const T*>(stack_buffer_);
+ }
+
+ //
+ // IMPORTANT: Take care to ensure that stack_buffer_ is aligned
+ // since it is used to mimic an array of T.
+ // Be careful while declaring any unaligned types (like bool)
+ // before stack_buffer_.
+ //
+
+ // The buffer itself. It is not of type T because we don't want the
+ // constructors and destructors to be automatically called. Define a POD
+ // buffer of the right size instead.
+ char stack_buffer_[sizeof(T[stack_capacity])];
+
+ // Set when the stack buffer is used for an allocation. We do not track
+ // how much of the buffer is used, only that somebody is using it.
+ bool used_stack_buffer_;
+ };
+
+ // Used by containers when they want to refer to an allocator of type U.
+ template<typename U>
+ struct rebind {
+ typedef StackAllocator<U, stack_capacity> other;
+ };
+
+ // For the straight up copy c-tor, we can share storage.
+ StackAllocator(const StackAllocator<T, stack_capacity>& rhs)
+ : source_(rhs.source_) {
+ }
+
+ // ISO C++ requires the following constructor to be defined,
+ // and std::vector in VC++2008SP1 Release fails with an error
+ // in the class _Container_base_aux_alloc_real (from <xutility>)
+ // if the constructor does not exist.
+ // For this constructor, we cannot share storage; there's
+ // no guarantee that the Source buffer of Ts is large enough
+ // for Us.
+ // TODO: If we were fancy pants, perhaps we could share storage
+ // iff sizeof(T) == sizeof(U).
+ template<typename U, size_t other_capacity>
+ explicit StackAllocator(const StackAllocator<U, other_capacity>& other)
+ : source_(NULL) {
+ }
+
+ explicit StackAllocator(Source* source) : source_(source) {
+ }
+
+ // Actually do the allocation. Use the stack buffer if nobody has used it yet
+ // and the size requested fits. Otherwise, fall through to the standard
+ // allocator.
+ pointer allocate(size_type n, void* hint = 0) {
+ if (source_ != NULL && !source_->used_stack_buffer_
+ && n <= stack_capacity) {
+ source_->used_stack_buffer_ = true;
+ return source_->stack_buffer();
+ } else {
+ return std::allocator<T>::allocate(n, hint);
+ }
+ }
+
+ // Free: when trying to free the stack buffer, just mark it as free. For
+ // non-stack-buffer pointers, just fall though to the standard allocator.
+ void deallocate(pointer p, size_type n) {
+ if (source_ != NULL && p == source_->stack_buffer())
+ source_->used_stack_buffer_ = false;
+ else
+ std::allocator<T>::deallocate(p, n);
+ }
+
+ private:
+ Source* source_;
+};
+
+// A wrapper around STL containers that maintains a stack-sized buffer that the
+// initial capacity of the vector is based on. Growing the container beyond the
+// stack capacity will transparently overflow onto the heap. The container must
+// support reserve().
+//
+// WATCH OUT: the ContainerType MUST use the proper StackAllocator for this
+// type. This object is really intended to be used only internally. You'll want
+// to use the wrappers below for different types.
+template<typename TContainerType, int stack_capacity>
+class StackContainer {
+ public:
+ typedef TContainerType ContainerType;
+ typedef typename ContainerType::value_type ContainedType;
+ typedef StackAllocator<ContainedType, stack_capacity> Allocator;
+
+ // Allocator must be constructed before the container!
+ StackContainer() : allocator_(&stack_data_), container_(allocator_) {
+ // Make the container use the stack allocation by reserving our buffer size
+ // before doing anything else.
+ container_.reserve(stack_capacity);
+ }
+
+ // Getters for the actual container.
+ //
+ // Danger: any copies of this made using the copy constructor must have
+ // shorter lifetimes than the source. The copy will share the same allocator
+ // and therefore the same stack buffer as the original. Use std::copy to
+ // copy into a "real" container for longer-lived objects.
+ ContainerType& container() { return container_; }
+ const ContainerType& container() const { return container_; }
+
+ // Support operator-> to get to the container. This allows nicer syntax like:
+ // StackContainer<...> foo;
+ // std::sort(foo->begin(), foo->end());
+ ContainerType* operator->() { return &container_; }
+ const ContainerType* operator->() const { return &container_; }
+
+#ifdef UNIT_TEST
+ // Retrieves the stack source so that that unit tests can verify that the
+ // buffer is being used properly.
+ const typename Allocator::Source& stack_data() const {
+ return stack_data_;
+ }
+#endif
+
+ protected:
+ typename Allocator::Source stack_data_;
+ Allocator allocator_;
+ ContainerType container_;
+
+ DISALLOW_EVIL_CONSTRUCTORS(StackContainer);
+};
+
+// StackString
+template<size_t stack_capacity>
+class StackString : public StackContainer<
+ std::basic_string<char,
+ std::char_traits<char>,
+ StackAllocator<char, stack_capacity> >,
+ stack_capacity> {
+ public:
+ StackString() : StackContainer<
+ std::basic_string<char,
+ std::char_traits<char>,
+ StackAllocator<char, stack_capacity> >,
+ stack_capacity>() {
+ }
+
+ private:
+ DISALLOW_EVIL_CONSTRUCTORS(StackString);
+};
+
+// StackWString
+template<size_t stack_capacity>
+class StackWString : public StackContainer<
+ std::basic_string<wchar_t,
+ std::char_traits<wchar_t>,
+ StackAllocator<wchar_t, stack_capacity> >,
+ stack_capacity> {
+ public:
+ StackWString() : StackContainer<
+ std::basic_string<wchar_t,
+ std::char_traits<wchar_t>,
+ StackAllocator<wchar_t, stack_capacity> >,
+ stack_capacity>() {
+ }
+
+ private:
+ DISALLOW_EVIL_CONSTRUCTORS(StackWString);
+};
+
+// StackVector
+//
+// Example:
+// StackVector<int, 16> foo;
+// foo->push_back(22); // we have overloaded operator->
+// foo[0] = 10; // as well as operator[]
+template<typename T, size_t stack_capacity>
+class StackVector : public StackContainer<
+ std::vector<T, StackAllocator<T, stack_capacity> >,
+ stack_capacity> {
+ public:
+ StackVector() : StackContainer<
+ std::vector<T, StackAllocator<T, stack_capacity> >,
+ stack_capacity>() {
+ }
+
+ // We need to put this in STL containers sometimes, which requires a copy
+ // constructor. We can't call the regular copy constructor because that will
+ // take the stack buffer from the original. Here, we create an empty object
+ // and make a stack buffer of its own.
+ StackVector(const StackVector<T, stack_capacity>& other)
+ : StackContainer<
+ std::vector<T, StackAllocator<T, stack_capacity> >,
+ stack_capacity>() {
+ this->container().assign(other->begin(), other->end());
+ }
+
+ StackVector<T, stack_capacity>& operator=(
+ const StackVector<T, stack_capacity>& other) {
+ this->container().assign(other->begin(), other->end());
+ return *this;
+ }
+
+ // Vectors are commonly indexed, which isn't very convenient even with
+ // operator-> (using "->at()" does exception stuff we don't want).
+ T& operator[](size_t i) { return this->container().operator[](i); }
+ const T& operator[](size_t i) const {
+ return this->container().operator[](i);
+ }
+};
+
+#endif // BASE_STACK_CONTAINER_H_
diff --git a/ipc/chromium/src/base/stl_util-inl.h b/ipc/chromium/src/base/stl_util-inl.h
new file mode 100644
index 000000000..994263277
--- /dev/null
+++ b/ipc/chromium/src/base/stl_util-inl.h
@@ -0,0 +1,452 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// STL utility functions. Usually, these replace built-in, but slow(!),
+// STL functions with more efficient versions.
+
+#ifndef BASE_STL_UTIL_INL_H_
+#define BASE_STL_UTIL_INL_H_
+
+#include <string.h> // for memcpy
+#include <functional>
+#include <set>
+#include <string>
+#include <vector>
+#include <cassert>
+
+// Clear internal memory of an STL object.
+// STL clear()/reserve(0) does not always free internal memory allocated
+// This function uses swap/destructor to ensure the internal memory is freed.
+template<class T> void STLClearObject(T* obj) {
+ T tmp;
+ tmp.swap(*obj);
+ obj->reserve(0); // this is because sometimes "T tmp" allocates objects with
+ // memory (arena implementation?). use reserve()
+ // to clear() even if it doesn't always work
+}
+
+// Reduce memory usage on behalf of object if it is using more than
+// "bytes" bytes of space. By default, we clear objects over 1MB.
+template <class T> inline void STLClearIfBig(T* obj, size_t limit = 1<<20) {
+ if (obj->capacity() >= limit) {
+ STLClearObject(obj);
+ } else {
+ obj->clear();
+ }
+}
+
+// Reserve space for STL object.
+// STL's reserve() will always copy.
+// This function avoid the copy if we already have capacity
+template<class T> void STLReserveIfNeeded(T* obj, int new_size) {
+ if (obj->capacity() < new_size) // increase capacity
+ obj->reserve(new_size);
+ else if (obj->size() > new_size) // reduce size
+ obj->resize(new_size);
+}
+
+// STLDeleteContainerPointers()
+// For a range within a container of pointers, calls delete
+// (non-array version) on these pointers.
+// NOTE: for these three functions, we could just implement a DeleteObject
+// functor and then call for_each() on the range and functor, but this
+// requires us to pull in all of algorithm.h, which seems expensive.
+// For hash_[multi]set, it is important that this deletes behind the iterator
+// because the hash_set may call the hash function on the iterator when it is
+// advanced, which could result in the hash function trying to deference a
+// stale pointer.
+template <class ForwardIterator>
+void STLDeleteContainerPointers(ForwardIterator begin,
+ ForwardIterator end) {
+ while (begin != end) {
+ ForwardIterator temp = begin;
+ ++begin;
+ delete *temp;
+ }
+}
+
+// STLDeleteContainerPairPointers()
+// For a range within a container of pairs, calls delete
+// (non-array version) on BOTH items in the pairs.
+// NOTE: Like STLDeleteContainerPointers, it is important that this deletes
+// behind the iterator because if both the key and value are deleted, the
+// container may call the hash function on the iterator when it is advanced,
+// which could result in the hash function trying to dereference a stale
+// pointer.
+template <class ForwardIterator>
+void STLDeleteContainerPairPointers(ForwardIterator begin,
+ ForwardIterator end) {
+ while (begin != end) {
+ ForwardIterator temp = begin;
+ ++begin;
+ delete temp->first;
+ delete temp->second;
+ }
+}
+
+// STLDeleteContainerPairFirstPointers()
+// For a range within a container of pairs, calls delete (non-array version)
+// on the FIRST item in the pairs.
+// NOTE: Like STLDeleteContainerPointers, deleting behind the iterator.
+template <class ForwardIterator>
+void STLDeleteContainerPairFirstPointers(ForwardIterator begin,
+ ForwardIterator end) {
+ while (begin != end) {
+ ForwardIterator temp = begin;
+ ++begin;
+ delete temp->first;
+ }
+}
+
+// STLDeleteContainerPairSecondPointers()
+// For a range within a container of pairs, calls delete
+// (non-array version) on the SECOND item in the pairs.
+template <class ForwardIterator>
+void STLDeleteContainerPairSecondPointers(ForwardIterator begin,
+ ForwardIterator end) {
+ while (begin != end) {
+ delete begin->second;
+ ++begin;
+ }
+}
+
+template<typename T>
+inline void STLAssignToVector(std::vector<T>* vec,
+ const T* ptr,
+ size_t n) {
+ vec->resize(n);
+ memcpy(&vec->front(), ptr, n*sizeof(T));
+}
+
+/***** Hack to allow faster assignment to a vector *****/
+
+// This routine speeds up an assignment of 32 bytes to a vector from
+// about 250 cycles per assignment to about 140 cycles.
+//
+// Usage:
+// STLAssignToVectorChar(&vec, ptr, size);
+// STLAssignToString(&str, ptr, size);
+
+inline void STLAssignToVectorChar(std::vector<char>* vec,
+ const char* ptr,
+ size_t n) {
+ STLAssignToVector(vec, ptr, n);
+}
+
+inline void STLAssignToString(std::string* str, const char* ptr, size_t n) {
+ str->resize(n);
+ memcpy(&*str->begin(), ptr, n);
+}
+
+// To treat a possibly-empty vector as an array, use these functions.
+// If you know the array will never be empty, you can use &*v.begin()
+// directly, but that is allowed to dump core if v is empty. This
+// function is the most efficient code that will work, taking into
+// account how our STL is actually implemented. THIS IS NON-PORTABLE
+// CODE, so call us instead of repeating the nonportable code
+// everywhere. If our STL implementation changes, we will need to
+// change this as well.
+
+template<typename T>
+inline T* vector_as_array(std::vector<T>* v) {
+# ifdef NDEBUG
+ return &*v->begin();
+# else
+ return v->empty() ? NULL : &*v->begin();
+# endif
+}
+
+template<typename T>
+inline const T* vector_as_array(const std::vector<T>* v) {
+# ifdef NDEBUG
+ return &*v->begin();
+# else
+ return v->empty() ? NULL : &*v->begin();
+# endif
+}
+
+// Return a mutable char* pointing to a string's internal buffer,
+// which may not be null-terminated. Writing through this pointer will
+// modify the string.
+//
+// string_as_array(&str)[i] is valid for 0 <= i < str.size() until the
+// next call to a string method that invalidates iterators.
+//
+// As of 2006-04, there is no standard-blessed way of getting a
+// mutable reference to a string's internal buffer. However, issue 530
+// (http://www.open-std.org/JTC1/SC22/WG21/docs/lwg-active.html#530)
+// proposes this as the method. According to Matt Austern, this should
+// already work on all current implementations.
+inline char* string_as_array(std::string* str) {
+ // DO NOT USE const_cast<char*>(str->data())! See the unittest for why.
+ return str->empty() ? NULL : &*str->begin();
+}
+
+// These are methods that test two hash maps/sets for equality. These exist
+// because the == operator in the STL can return false when the maps/sets
+// contain identical elements. This is because it compares the internal hash
+// tables which may be different if the order of insertions and deletions
+// differed.
+
+template <class HashSet>
+inline bool
+HashSetEquality(const HashSet& set_a,
+ const HashSet& set_b) {
+ if (set_a.size() != set_b.size()) return false;
+ for (typename HashSet::const_iterator i = set_a.begin();
+ i != set_a.end();
+ ++i) {
+ if (set_b.find(*i) == set_b.end())
+ return false;
+ }
+ return true;
+}
+
+template <class HashMap>
+inline bool
+HashMapEquality(const HashMap& map_a,
+ const HashMap& map_b) {
+ if (map_a.size() != map_b.size()) return false;
+ for (typename HashMap::const_iterator i = map_a.begin();
+ i != map_a.end(); ++i) {
+ typename HashMap::const_iterator j = map_b.find(i->first);
+ if (j == map_b.end()) return false;
+ if (i->second != j->second) return false;
+ }
+ return true;
+}
+
+// The following functions are useful for cleaning up STL containers
+// whose elements point to allocated memory.
+
+// STLDeleteElements() deletes all the elements in an STL container and clears
+// the container. This function is suitable for use with a vector, set,
+// hash_set, or any other STL container which defines sensible begin(), end(),
+// and clear() methods.
+//
+// If container is NULL, this function is a no-op.
+//
+// As an alternative to calling STLDeleteElements() directly, consider
+// STLElementDeleter (defined below), which ensures that your container's
+// elements are deleted when the STLElementDeleter goes out of scope.
+template <class T>
+void STLDeleteElements(T *container) {
+ if (!container) return;
+ STLDeleteContainerPointers(container->begin(), container->end());
+ container->clear();
+}
+
+// Given an STL container consisting of (key, value) pairs, STLDeleteValues
+// deletes all the "value" components and clears the container. Does nothing
+// in the case it's given a NULL pointer.
+
+template <class T>
+void STLDeleteValues(T *v) {
+ if (!v) return;
+ for (typename T::iterator i = v->begin(); i != v->end(); ++i) {
+ delete i->second;
+ }
+ v->clear();
+}
+
+
+// The following classes provide a convenient way to delete all elements or
+// values from STL containers when they goes out of scope. This greatly
+// simplifies code that creates temporary objects and has multiple return
+// statements. Example:
+//
+// vector<MyProto *> tmp_proto;
+// STLElementDeleter<vector<MyProto *> > d(&tmp_proto);
+// if (...) return false;
+// ...
+// return success;
+
+// Given a pointer to an STL container this class will delete all the element
+// pointers when it goes out of scope.
+
+template<class STLContainer> class STLElementDeleter {
+ public:
+ explicit STLElementDeleter(STLContainer *ptr) : container_ptr_(ptr) {}
+ ~STLElementDeleter() { STLDeleteElements(container_ptr_); }
+ private:
+ STLContainer *container_ptr_;
+};
+
+// Given a pointer to an STL container this class will delete all the value
+// pointers when it goes out of scope.
+
+template<class STLContainer> class STLValueDeleter {
+ public:
+ explicit STLValueDeleter(STLContainer *ptr) : container_ptr_(ptr) {}
+ ~STLValueDeleter() { STLDeleteValues(container_ptr_); }
+ private:
+ STLContainer *container_ptr_;
+};
+
+
+// Forward declare some callback classes in callback.h for STLBinaryFunction
+template <class R, class T1, class T2>
+class ResultCallback2;
+
+// STLBinaryFunction is a wrapper for the ResultCallback2 class in callback.h
+// It provides an operator () method instead of a Run method, so it may be
+// passed to STL functions in <algorithm>.
+//
+// The client should create callback with NewPermanentCallback, and should
+// delete callback after it is done using the STLBinaryFunction.
+
+template <class Result, class Arg1, class Arg2>
+class STLBinaryFunction : public std::binary_function<Arg1, Arg2, Result> {
+ public:
+ typedef ResultCallback2<Result, Arg1, Arg2> Callback;
+
+ explicit STLBinaryFunction(Callback* callback)
+ : callback_(callback) {
+ assert(callback_);
+ }
+
+ Result operator() (Arg1 arg1, Arg2 arg2) {
+ return callback_->Run(arg1, arg2);
+ }
+
+ private:
+ Callback* callback_;
+};
+
+// STLBinaryPredicate is a specialized version of STLBinaryFunction, where the
+// return type is bool and both arguments have type Arg. It can be used
+// wherever STL requires a StrictWeakOrdering, such as in sort() or
+// lower_bound().
+//
+// templated typedefs are not supported, so instead we use inheritance.
+
+template <class Arg>
+class STLBinaryPredicate : public STLBinaryFunction<bool, Arg, Arg> {
+ public:
+ typedef typename STLBinaryPredicate<Arg>::Callback Callback;
+ explicit STLBinaryPredicate(Callback* callback)
+ : STLBinaryFunction<bool, Arg, Arg>(callback) {
+ }
+};
+
+// Functors that compose arbitrary unary and binary functions with a
+// function that "projects" one of the members of a pair.
+// Specifically, if p1 and p2, respectively, are the functions that
+// map a pair to its first and second, respectively, members, the
+// table below summarizes the functions that can be constructed:
+//
+// * UnaryOperate1st<pair>(f) returns the function x -> f(p1(x))
+// * UnaryOperate2nd<pair>(f) returns the function x -> f(p2(x))
+// * BinaryOperate1st<pair>(f) returns the function (x,y) -> f(p1(x),p1(y))
+// * BinaryOperate2nd<pair>(f) returns the function (x,y) -> f(p2(x),p2(y))
+//
+// A typical usage for these functions would be when iterating over
+// the contents of an STL map. For other sample usage, see the unittest.
+
+template<typename Pair, typename UnaryOp>
+class UnaryOperateOnFirst
+ : public std::unary_function<Pair, typename UnaryOp::result_type> {
+ public:
+ UnaryOperateOnFirst() {
+ }
+
+ explicit UnaryOperateOnFirst(const UnaryOp& f) : f_(f) {
+ }
+
+ typename UnaryOp::result_type operator()(const Pair& p) const {
+ return f_(p.first);
+ }
+
+ private:
+ UnaryOp f_;
+};
+
+template<typename Pair, typename UnaryOp>
+UnaryOperateOnFirst<Pair, UnaryOp> UnaryOperate1st(const UnaryOp& f) {
+ return UnaryOperateOnFirst<Pair, UnaryOp>(f);
+}
+
+template<typename Pair, typename UnaryOp>
+class UnaryOperateOnSecond
+ : public std::unary_function<Pair, typename UnaryOp::result_type> {
+ public:
+ UnaryOperateOnSecond() {
+ }
+
+ explicit UnaryOperateOnSecond(const UnaryOp& f) : f_(f) {
+ }
+
+ typename UnaryOp::result_type operator()(const Pair& p) const {
+ return f_(p.second);
+ }
+
+ private:
+ UnaryOp f_;
+};
+
+template<typename Pair, typename UnaryOp>
+UnaryOperateOnSecond<Pair, UnaryOp> UnaryOperate2nd(const UnaryOp& f) {
+ return UnaryOperateOnSecond<Pair, UnaryOp>(f);
+}
+
+template<typename Pair, typename BinaryOp>
+class BinaryOperateOnFirst
+ : public std::binary_function<Pair, Pair, typename BinaryOp::result_type> {
+ public:
+ BinaryOperateOnFirst() {
+ }
+
+ explicit BinaryOperateOnFirst(const BinaryOp& f) : f_(f) {
+ }
+
+ typename BinaryOp::result_type operator()(const Pair& p1,
+ const Pair& p2) const {
+ return f_(p1.first, p2.first);
+ }
+
+ private:
+ BinaryOp f_;
+};
+
+template<typename Pair, typename BinaryOp>
+BinaryOperateOnFirst<Pair, BinaryOp> BinaryOperate1st(const BinaryOp& f) {
+ return BinaryOperateOnFirst<Pair, BinaryOp>(f);
+}
+
+template<typename Pair, typename BinaryOp>
+class BinaryOperateOnSecond
+ : public std::binary_function<Pair, Pair, typename BinaryOp::result_type> {
+ public:
+ BinaryOperateOnSecond() {
+ }
+
+ explicit BinaryOperateOnSecond(const BinaryOp& f) : f_(f) {
+ }
+
+ typename BinaryOp::result_type operator()(const Pair& p1,
+ const Pair& p2) const {
+ return f_(p1.second, p2.second);
+ }
+
+ private:
+ BinaryOp f_;
+};
+
+template<typename Pair, typename BinaryOp>
+BinaryOperateOnSecond<Pair, BinaryOp> BinaryOperate2nd(const BinaryOp& f) {
+ return BinaryOperateOnSecond<Pair, BinaryOp>(f);
+}
+
+// Translates a set into a vector.
+template<typename T>
+std::vector<T> SetToVector(const std::set<T>& values) {
+ std::vector<T> result;
+ result.reserve(values.size());
+ result.insert(result.begin(), values.begin(), values.end());
+ return result;
+}
+
+#endif // BASE_STL_UTIL_INL_H_
diff --git a/ipc/chromium/src/base/string16.cc b/ipc/chromium/src/base/string16.cc
new file mode 100644
index 000000000..3d4efb0f0
--- /dev/null
+++ b/ipc/chromium/src/base/string16.cc
@@ -0,0 +1,74 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/string16.h"
+
+#if defined(WCHAR_T_IS_UTF16)
+
+#error This file should not be used on 2-byte wchar_t systems
+// If this winds up being needed on 2-byte wchar_t systems, either the
+// definitions below can be used, or the host system's wide character
+// functions like wmemcmp can be wrapped.
+
+#elif defined(WCHAR_T_IS_UTF32)
+
+#include "base/string_util.h"
+
+namespace base {
+
+int c16memcmp(const char16* s1, const char16* s2, size_t n) {
+ // We cannot call memcmp because that changes the semantics.
+ while (n-- > 0) {
+ if (*s1 != *s2) {
+ // We cannot use (*s1 - *s2) because char16 is unsigned.
+ return ((*s1 < *s2) ? -1 : 1);
+ }
+ ++s1;
+ ++s2;
+ }
+ return 0;
+}
+
+size_t c16len(const char16* s) {
+ const char16 *s_orig = s;
+ while (*s) {
+ ++s;
+ }
+ return s - s_orig;
+}
+
+const char16* c16memchr(const char16* s, char16 c, size_t n) {
+ while (n-- > 0) {
+ if (*s == c) {
+ return s;
+ }
+ ++s;
+ }
+ return 0;
+}
+
+char16* c16memmove(char16* s1, const char16* s2, size_t n) {
+ return reinterpret_cast<char16*>(memmove(s1, s2, n * sizeof(char16)));
+}
+
+char16* c16memcpy(char16* s1, const char16* s2, size_t n) {
+ return reinterpret_cast<char16*>(memcpy(s1, s2, n * sizeof(char16)));
+}
+
+char16* c16memset(char16* s, char16 c, size_t n) {
+ char16 *s_orig = s;
+ while (n-- > 0) {
+ *s = c;
+ ++s;
+ }
+ return s_orig;
+}
+
+} // namespace base
+
+template class std::basic_string<char16, base::string16_char_traits>;
+
+#endif // WCHAR_T_IS_UTF32
diff --git a/ipc/chromium/src/base/string16.h b/ipc/chromium/src/base/string16.h
new file mode 100644
index 000000000..f38311480
--- /dev/null
+++ b/ipc/chromium/src/base/string16.h
@@ -0,0 +1,175 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_STRING16_H_
+#define BASE_STRING16_H_
+
+// WHAT:
+// A version of std::basic_string that provides 2-byte characters even when
+// wchar_t is not implemented as a 2-byte type. You can access this class as
+// string16. We also define char16, which string16 is based upon.
+//
+// WHY:
+// On Windows, wchar_t is 2 bytes, and it can conveniently handle UTF-16/UCS-2
+// data. Plenty of existing code operates on strings encoded as UTF-16.
+//
+// On many other platforms, sizeof(wchar_t) is 4 bytes by default. We can make
+// it 2 bytes by using the GCC flag -fshort-wchar. But then std::wstring fails
+// at run time, because it calls some functions (like wcslen) that come from
+// the system's native C library -- which was built with a 4-byte wchar_t!
+// It's wasteful to use 4-byte wchar_t strings to carry UTF-16 data, and it's
+// entirely improper on those systems where the encoding of wchar_t is defined
+// as UTF-32.
+//
+// Here, we define string16, which is similar to std::wstring but replaces all
+// libc functions with custom, 2-byte-char compatible routines. It is capable
+// of carrying UTF-16-encoded data.
+
+#include <stdio.h>
+#include <string>
+
+#include "base/basictypes.h"
+
+#if defined(WCHAR_T_IS_UTF16)
+
+typedef wchar_t char16;
+typedef std::wstring string16;
+
+#elif defined(WCHAR_T_IS_UTF32)
+
+typedef uint16_t char16;
+
+namespace base {
+
+// char16 versions of the functions required by string16_char_traits; these
+// are based on the wide character functions of similar names ("w" or "wcs"
+// instead of "c16").
+int c16memcmp(const char16* s1, const char16* s2, size_t n);
+size_t c16len(const char16* s);
+const char16* c16memchr(const char16* s, char16 c, size_t n);
+char16* c16memmove(char16* s1, const char16* s2, size_t n);
+char16* c16memcpy(char16* s1, const char16* s2, size_t n);
+char16* c16memset(char16* s, char16 c, size_t n);
+
+struct string16_char_traits {
+ typedef char16 char_type;
+ typedef int int_type;
+
+ // int_type needs to be able to hold each possible value of char_type, and in
+ // addition, the distinct value of eof().
+ COMPILE_ASSERT(sizeof(int_type) > sizeof(char_type), unexpected_type_width);
+
+ typedef std::streamoff off_type;
+ typedef mbstate_t state_type;
+ typedef std::fpos<state_type> pos_type;
+
+ static void assign(char_type& c1, const char_type& c2) {
+ c1 = c2;
+ }
+
+ static bool eq(const char_type& c1, const char_type& c2) {
+ return c1 == c2;
+ }
+ static bool lt(const char_type& c1, const char_type& c2) {
+ return c1 < c2;
+ }
+
+ static int compare(const char_type* s1, const char_type* s2, size_t n) {
+ return c16memcmp(s1, s2, n);
+ }
+
+ static size_t length(const char_type* s) {
+ return c16len(s);
+ }
+
+ static const char_type* find(const char_type* s, size_t n,
+ const char_type& a) {
+ return c16memchr(s, a, n);
+ }
+
+ static char_type* move(char_type* s1, const char_type* s2, int_type n) {
+ return c16memmove(s1, s2, n);
+ }
+
+ static char_type* copy(char_type* s1, const char_type* s2, size_t n) {
+ return c16memcpy(s1, s2, n);
+ }
+
+ static char_type* assign(char_type* s, size_t n, char_type a) {
+ return c16memset(s, a, n);
+ }
+
+ static int_type not_eof(const int_type& c) {
+ return eq_int_type(c, eof()) ? 0 : c;
+ }
+
+ static char_type to_char_type(const int_type& c) {
+ return char_type(c);
+ }
+
+ static int_type to_int_type(const char_type& c) {
+ return int_type(c);
+ }
+
+ static bool eq_int_type(const int_type& c1, const int_type& c2) {
+ return c1 == c2;
+ }
+
+ static int_type eof() {
+ return static_cast<int_type>(EOF);
+ }
+};
+
+} // namespace base
+
+// The string class will be explicitly instantiated only once, in string16.cc.
+//
+// std::basic_string<> in GNU libstdc++ contains a static data member,
+// _S_empty_rep_storage, to represent empty strings. When an operation such
+// as assignment or destruction is performed on a string, causing its existing
+// data member to be invalidated, it must not be freed if this static data
+// member is being used. Otherwise, it counts as an attempt to free static
+// (and not allocated) data, which is a memory error.
+//
+// Generally, due to C++ template magic, _S_empty_rep_storage will be marked
+// as a coalesced symbol, meaning that the linker will combine multiple
+// instances into a single one when generating output.
+//
+// If a string class is used by multiple shared libraries, a problem occurs.
+// Each library will get its own copy of _S_empty_rep_storage. When strings
+// are passed across a library boundary for alteration or destruction, memory
+// errors will result. GNU libstdc++ contains a configuration option,
+// --enable-fully-dynamic-string (_GLIBCXX_FULLY_DYNAMIC_STRING), which
+// disables the static data member optimization, but it's a good optimization
+// and non-STL code is generally at the mercy of the system's STL
+// configuration. Fully-dynamic strings are not the default for GNU libstdc++
+// libstdc++ itself or for the libstdc++ installations on the systems we care
+// about, such as Mac OS X and relevant flavors of Linux.
+//
+// See also http://gcc.gnu.org/bugzilla/show_bug.cgi?id=24196 .
+//
+// To avoid problems, string classes need to be explicitly instantiated only
+// once, in exactly one library. All other string users see it via an "extern"
+// declaration. This is precisely how GNU libstdc++ handles
+// std::basic_string<char> (string) and std::basic_string<wchar_t> (wstring).
+//
+// This also works around a Mac OS X linker bug in ld64-85.2.1 (Xcode 3.1.2),
+// in which the linker does not fully coalesce symbols when dead code
+// stripping is enabled. This bug causes the memory errors described above
+// to occur even when a std::basic_string<> does not cross shared library
+// boundaries, such as in statically-linked executables.
+//
+// TODO(mark): File this bug with Apple and update this note with a bug number.
+
+extern template class std::basic_string<char16, base::string16_char_traits>;
+
+typedef std::basic_string<char16, base::string16_char_traits> string16;
+
+extern std::ostream& operator<<(std::ostream& out, const string16& str);
+
+#endif // WCHAR_T_IS_UTF32
+
+#endif // BASE_STRING16_H_
diff --git a/ipc/chromium/src/base/string_piece.cc b/ipc/chromium/src/base/string_piece.cc
new file mode 100644
index 000000000..169d6a49f
--- /dev/null
+++ b/ipc/chromium/src/base/string_piece.cc
@@ -0,0 +1,217 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Copied from strings/stringpiece.cc with modifications
+
+#include <algorithm>
+#include <ostream>
+
+#include "base/string_piece.h"
+
+typedef StringPiece::size_type size_type;
+
+std::ostream& operator<<(std::ostream& o, const StringPiece& piece) {
+ o.write(piece.data(), static_cast<std::streamsize>(piece.size()));
+ return o;
+}
+
+bool operator==(const StringPiece& x, const StringPiece& y) {
+ if (x.size() != y.size())
+ return false;
+
+ return StringPiece::wordmemcmp(x.data(), y.data(), x.size()) == 0;
+}
+
+void StringPiece::CopyToString(std::string* target) const {
+ target->assign(!empty() ? data() : "", size());
+}
+
+void StringPiece::AppendToString(std::string* target) const {
+ if (!empty())
+ target->append(data(), size());
+}
+
+size_type StringPiece::copy(char* buf, size_type n, size_type pos) const {
+ size_type ret = std::min(length_ - pos, n);
+ memcpy(buf, ptr_ + pos, ret);
+ return ret;
+}
+
+size_type StringPiece::find(const StringPiece& s, size_type pos) const {
+ if (pos > length_)
+ return npos;
+
+ const char* result = std::search(ptr_ + pos, ptr_ + length_,
+ s.ptr_, s.ptr_ + s.length_);
+ const size_type xpos = result - ptr_;
+ return xpos + s.length_ <= length_ ? xpos : npos;
+}
+
+size_type StringPiece::find(char c, size_type pos) const {
+ if (pos >= length_)
+ return npos;
+
+ const char* result = std::find(ptr_ + pos, ptr_ + length_, c);
+ return result != ptr_ + length_ ? result - ptr_ : npos;
+}
+
+size_type StringPiece::rfind(const StringPiece& s, size_type pos) const {
+ if (length_ < s.length_)
+ return npos;
+
+ if (s.empty())
+ return std::min(length_, pos);
+
+ const char* last = ptr_ + std::min(length_ - s.length_, pos) + s.length_;
+ const char* result = std::find_end(ptr_, last, s.ptr_, s.ptr_ + s.length_);
+ return result != last ? result - ptr_ : npos;
+}
+
+size_type StringPiece::rfind(char c, size_type pos) const {
+ if (length_ == 0)
+ return npos;
+
+ for (size_type i = std::min(pos, length_ - 1); ; --i) {
+ if (ptr_[i] == c)
+ return i;
+ if (i == 0)
+ break;
+ }
+ return npos;
+}
+
+// For each character in characters_wanted, sets the index corresponding
+// to the ASCII code of that character to 1 in table. This is used by
+// the find_.*_of methods below to tell whether or not a character is in
+// the lookup table in constant time.
+// The argument `table' must be an array that is large enough to hold all
+// the possible values of an unsigned char. Thus it should be be declared
+// as follows:
+// bool table[UCHAR_MAX + 1]
+static inline void BuildLookupTable(const StringPiece& characters_wanted,
+ bool* table) {
+ const size_type length = characters_wanted.length();
+ const char* const data = characters_wanted.data();
+ for (size_type i = 0; i < length; ++i) {
+ table[static_cast<unsigned char>(data[i])] = true;
+ }
+}
+
+size_type StringPiece::find_first_of(const StringPiece& s,
+ size_type pos) const {
+ if (length_ == 0 || s.length_ == 0)
+ return npos;
+
+ // Avoid the cost of BuildLookupTable() for a single-character search.
+ if (s.length_ == 1)
+ return find_first_of(s.ptr_[0], pos);
+
+ bool lookup[UCHAR_MAX + 1] = { false };
+ BuildLookupTable(s, lookup);
+ for (size_type i = pos; i < length_; ++i) {
+ if (lookup[static_cast<unsigned char>(ptr_[i])]) {
+ return i;
+ }
+ }
+ return npos;
+}
+
+size_type StringPiece::find_first_not_of(const StringPiece& s,
+ size_type pos) const {
+ if (length_ == 0)
+ return npos;
+
+ if (s.length_ == 0)
+ return 0;
+
+ // Avoid the cost of BuildLookupTable() for a single-character search.
+ if (s.length_ == 1)
+ return find_first_not_of(s.ptr_[0], pos);
+
+ bool lookup[UCHAR_MAX + 1] = { false };
+ BuildLookupTable(s, lookup);
+ for (size_type i = pos; i < length_; ++i) {
+ if (!lookup[static_cast<unsigned char>(ptr_[i])]) {
+ return i;
+ }
+ }
+ return npos;
+}
+
+size_type StringPiece::find_first_not_of(char c, size_type pos) const {
+ if (length_ == 0)
+ return npos;
+
+ for (; pos < length_; ++pos) {
+ if (ptr_[pos] != c) {
+ return pos;
+ }
+ }
+ return npos;
+}
+
+size_type StringPiece::find_last_of(const StringPiece& s, size_type pos) const {
+ if (length_ == 0 || s.length_ == 0)
+ return npos;
+
+ // Avoid the cost of BuildLookupTable() for a single-character search.
+ if (s.length_ == 1)
+ return find_last_of(s.ptr_[0], pos);
+
+ bool lookup[UCHAR_MAX + 1] = { false };
+ BuildLookupTable(s, lookup);
+ for (size_type i = std::min(pos, length_ - 1); ; --i) {
+ if (lookup[static_cast<unsigned char>(ptr_[i])])
+ return i;
+ if (i == 0)
+ break;
+ }
+ return npos;
+}
+
+size_type StringPiece::find_last_not_of(const StringPiece& s,
+ size_type pos) const {
+ if (length_ == 0)
+ return npos;
+
+ size_type i = std::min(pos, length_ - 1);
+ if (s.length_ == 0)
+ return i;
+
+ // Avoid the cost of BuildLookupTable() for a single-character search.
+ if (s.length_ == 1)
+ return find_last_not_of(s.ptr_[0], pos);
+
+ bool lookup[UCHAR_MAX + 1] = { false };
+ BuildLookupTable(s, lookup);
+ for (; ; --i) {
+ if (!lookup[static_cast<unsigned char>(ptr_[i])])
+ return i;
+ if (i == 0)
+ break;
+ }
+ return npos;
+}
+
+size_type StringPiece::find_last_not_of(char c, size_type pos) const {
+ if (length_ == 0)
+ return npos;
+
+ for (size_type i = std::min(pos, length_ - 1); ; --i) {
+ if (ptr_[i] != c)
+ return i;
+ if (i == 0)
+ break;
+ }
+ return npos;
+}
+
+StringPiece StringPiece::substr(size_type pos, size_type n) const {
+ if (pos > length_) pos = length_;
+ if (n > length_ - pos) n = length_ - pos;
+ return StringPiece(ptr_ + pos, n);
+}
+
+const StringPiece::size_type StringPiece::npos = size_type(-1);
diff --git a/ipc/chromium/src/base/string_piece.h b/ipc/chromium/src/base/string_piece.h
new file mode 100644
index 000000000..36c39009c
--- /dev/null
+++ b/ipc/chromium/src/base/string_piece.h
@@ -0,0 +1,186 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Copied from strings/stringpiece.h with modifications
+//
+// A string-like object that points to a sized piece of memory.
+//
+// Functions or methods may use const StringPiece& parameters to accept either
+// a "const char*" or a "string" value that will be implicitly converted to
+// a StringPiece. The implicit conversion means that it is often appropriate
+// to include this .h file in other files rather than forward-declaring
+// StringPiece as would be appropriate for most other Google classes.
+//
+// Systematic usage of StringPiece is encouraged as it will reduce unnecessary
+// conversions from "const char*" to "string" and back again.
+//
+
+#ifndef BASE_STRING_PIECE_H_
+#define BASE_STRING_PIECE_H_
+
+#include <algorithm>
+#include <iosfwd>
+#include <string>
+
+#include "base/basictypes.h"
+
+class StringPiece {
+ public:
+ typedef size_t size_type;
+
+ private:
+ const char* ptr_;
+ size_type length_;
+
+ public:
+ // We provide non-explicit singleton constructors so users can pass
+ // in a "const char*" or a "string" wherever a "StringPiece" is
+ // expected.
+ StringPiece() : ptr_(NULL), length_(0) { }
+ MOZ_IMPLICIT StringPiece(const char* str)
+ : ptr_(str), length_((str == NULL) ? 0 : strlen(str)) { }
+ MOZ_IMPLICIT StringPiece(const std::string& str)
+ : ptr_(str.data()), length_(str.size()) { }
+ StringPiece(const char* offset, size_type len)
+ : ptr_(offset), length_(len) { }
+
+ // data() may return a pointer to a buffer with embedded NULs, and the
+ // returned buffer may or may not be null terminated. Therefore it is
+ // typically a mistake to pass data() to a routine that expects a NUL
+ // terminated string.
+ const char* data() const { return ptr_; }
+ size_type size() const { return length_; }
+ size_type length() const { return length_; }
+ bool empty() const { return length_ == 0; }
+
+ void clear() { ptr_ = NULL; length_ = 0; }
+ void set(const char* aData, size_type aLen) { ptr_ = aData; length_ = aLen; }
+ void set(const char* str) {
+ ptr_ = str;
+ length_ = str ? strlen(str) : 0;
+ }
+ void set(const void* aData, size_type aLen) {
+ ptr_ = reinterpret_cast<const char*>(aData);
+ length_ = aLen;
+ }
+
+ char operator[](size_type i) const { return ptr_[i]; }
+
+ void remove_prefix(size_type n) {
+ ptr_ += n;
+ length_ -= n;
+ }
+
+ void remove_suffix(size_type n) {
+ length_ -= n;
+ }
+
+ int compare(const StringPiece& x) const {
+ int r = wordmemcmp(ptr_, x.ptr_, std::min(length_, x.length_));
+ if (r == 0) {
+ if (length_ < x.length_) r = -1;
+ else if (length_ > x.length_) r = +1;
+ }
+ return r;
+ }
+
+ std::string as_string() const {
+ // std::string doesn't like to take a NULL pointer even with a 0 size.
+ return std::string(!empty() ? data() : "", size());
+ }
+
+ void CopyToString(std::string* target) const;
+ void AppendToString(std::string* target) const;
+
+ // Does "this" start with "x"
+ bool starts_with(const StringPiece& x) const {
+ return ((length_ >= x.length_) &&
+ (wordmemcmp(ptr_, x.ptr_, x.length_) == 0));
+ }
+
+ // Does "this" end with "x"
+ bool ends_with(const StringPiece& x) const {
+ return ((length_ >= x.length_) &&
+ (wordmemcmp(ptr_ + (length_-x.length_), x.ptr_, x.length_) == 0));
+ }
+
+ // standard STL container boilerplate
+ typedef char value_type;
+ typedef const char* pointer;
+ typedef const char& reference;
+ typedef const char& const_reference;
+ typedef ptrdiff_t difference_type;
+ static const size_type npos;
+ typedef const char* const_iterator;
+ typedef const char* iterator;
+ typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
+ typedef std::reverse_iterator<iterator> reverse_iterator;
+ iterator begin() const { return ptr_; }
+ iterator end() const { return ptr_ + length_; }
+ const_reverse_iterator rbegin() const {
+ return const_reverse_iterator(ptr_ + length_);
+ }
+ const_reverse_iterator rend() const {
+ return const_reverse_iterator(ptr_);
+ }
+
+ size_type max_size() const { return length_; }
+ size_type capacity() const { return length_; }
+
+ size_type copy(char* buf, size_type n, size_type pos = 0) const;
+
+ size_type find(const StringPiece& s, size_type pos = 0) const;
+ size_type find(char c, size_type pos = 0) const;
+ size_type rfind(const StringPiece& s, size_type pos = npos) const;
+ size_type rfind(char c, size_type pos = npos) const;
+
+ size_type find_first_of(const StringPiece& s, size_type pos = 0) const;
+ size_type find_first_of(char c, size_type pos = 0) const {
+ return find(c, pos);
+ }
+ size_type find_first_not_of(const StringPiece& s, size_type pos = 0) const;
+ size_type find_first_not_of(char c, size_type pos = 0) const;
+ size_type find_last_of(const StringPiece& s, size_type pos = npos) const;
+ size_type find_last_of(char c, size_type pos = npos) const {
+ return rfind(c, pos);
+ }
+ size_type find_last_not_of(const StringPiece& s, size_type pos = npos) const;
+ size_type find_last_not_of(char c, size_type pos = npos) const;
+
+ StringPiece substr(size_type pos, size_type n = npos) const;
+
+ static int wordmemcmp(const char* p, const char* p2, size_type N) {
+ return memcmp(p, p2, N);
+ }
+};
+
+bool operator==(const ::StringPiece& x, const ::StringPiece& y);
+
+inline bool operator!=(const ::StringPiece& x, const ::StringPiece& y) {
+ return !(x == y);
+}
+
+inline bool operator<(const ::StringPiece& x, const ::StringPiece& y) {
+ const int r = ::StringPiece::wordmemcmp(x.data(), y.data(),
+ std::min(x.size(), y.size()));
+ return ((r < 0) || ((r == 0) && (x.size() < y.size())));
+}
+
+inline bool operator>(const ::StringPiece& x, const ::StringPiece& y) {
+ return y < x;
+}
+
+inline bool operator<=(const ::StringPiece& x, const ::StringPiece& y) {
+ return !(x > y);
+}
+
+inline bool operator>=(const ::StringPiece& x, const ::StringPiece& y) {
+ return !(x < y);
+}
+
+// allow StringPiece to be logged (needed for unit testing).
+extern std::ostream& operator<<(std::ostream& o, const ::StringPiece& piece);
+
+#endif // BASE_STRING_PIECE_H_
diff --git a/ipc/chromium/src/base/string_util.cc b/ipc/chromium/src/base/string_util.cc
new file mode 100644
index 000000000..339b12271
--- /dev/null
+++ b/ipc/chromium/src/base/string_util.cc
@@ -0,0 +1,780 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/string_util.h"
+
+#include "build/build_config.h"
+
+#include <ctype.h>
+#include <errno.h>
+#include <math.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+#include <wchar.h>
+#include <wctype.h>
+
+#include <algorithm>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/logging.h"
+#include "base/singleton.h"
+
+namespace {
+
+// Force the singleton used by Empty[W]String[16] to be a unique type. This
+// prevents other code that might accidentally use Singleton<string> from
+// getting our internal one.
+struct EmptyStrings {
+ EmptyStrings() {}
+ const std::string s;
+ const std::wstring ws;
+ const string16 s16;
+};
+
+// Hack to convert any char-like type to its unsigned counterpart.
+// For example, it will convert char, signed char and unsigned char to unsigned
+// char.
+template<typename T>
+struct ToUnsigned {
+ typedef T Unsigned;
+};
+
+template<>
+struct ToUnsigned<char> {
+ typedef unsigned char Unsigned;
+};
+template<>
+struct ToUnsigned<signed char> {
+ typedef unsigned char Unsigned;
+};
+template<>
+struct ToUnsigned<wchar_t> {
+#if defined(WCHAR_T_IS_UTF16)
+ typedef unsigned short Unsigned;
+#elif defined(WCHAR_T_IS_UTF32)
+ typedef uint32_t Unsigned;
+#endif
+};
+template<>
+struct ToUnsigned<short> {
+ typedef unsigned short Unsigned;
+};
+
+// Generalized string-to-number conversion.
+//
+// StringToNumberTraits should provide:
+// - a typedef for string_type, the STL string type used as input.
+// - a typedef for value_type, the target numeric type.
+// - a static function, convert_func, which dispatches to an appropriate
+// strtol-like function and returns type value_type.
+// - a static function, valid_func, which validates |input| and returns a bool
+// indicating whether it is in proper form. This is used to check for
+// conditions that convert_func tolerates but should result in
+// StringToNumber returning false. For strtol-like funtions, valid_func
+// should check for leading whitespace.
+template<typename StringToNumberTraits>
+bool StringToNumber(const typename StringToNumberTraits::string_type& input,
+ typename StringToNumberTraits::value_type* output) {
+ typedef StringToNumberTraits traits;
+
+ errno = 0; // Thread-safe? It is on at least Mac, Linux, and Windows.
+ typename traits::string_type::value_type* endptr = NULL;
+ typename traits::value_type value = traits::convert_func(input.c_str(),
+ &endptr);
+ *output = value;
+
+ // Cases to return false:
+ // - If errno is ERANGE, there was an overflow or underflow.
+ // - If the input string is empty, there was nothing to parse.
+ // - If endptr does not point to the end of the string, there are either
+ // characters remaining in the string after a parsed number, or the string
+ // does not begin with a parseable number. endptr is compared to the
+ // expected end given the string's stated length to correctly catch cases
+ // where the string contains embedded NUL characters.
+ // - valid_func determines that the input is not in preferred form.
+ return errno == 0 &&
+ !input.empty() &&
+ input.c_str() + input.length() == endptr &&
+ traits::valid_func(input);
+}
+
+class StringToLongTraits {
+ public:
+ typedef std::string string_type;
+ typedef long value_type;
+ static const int kBase = 10;
+ static inline value_type convert_func(const string_type::value_type* str,
+ string_type::value_type** endptr) {
+ return strtol(str, endptr, kBase);
+ }
+ static inline bool valid_func(const string_type& str) {
+ return !str.empty() && !isspace(str[0]);
+ }
+};
+
+class String16ToLongTraits {
+ public:
+ typedef string16 string_type;
+ typedef long value_type;
+ static const int kBase = 10;
+ static inline value_type convert_func(const string_type::value_type* str,
+ string_type::value_type** endptr) {
+#if defined(WCHAR_T_IS_UTF16)
+ return wcstol(str, endptr, kBase);
+#elif defined(WCHAR_T_IS_UTF32)
+ std::string ascii_string = UTF16ToASCII(string16(str));
+ char* ascii_end = NULL;
+ value_type ret = strtol(ascii_string.c_str(), &ascii_end, kBase);
+ if (ascii_string.c_str() + ascii_string.length() == ascii_end) {
+ *endptr =
+ const_cast<string_type::value_type*>(str) + ascii_string.length();
+ }
+ return ret;
+#endif
+ }
+ static inline bool valid_func(const string_type& str) {
+ return !str.empty() && !iswspace(str[0]);
+ }
+};
+
+class StringToInt64Traits {
+ public:
+ typedef std::string string_type;
+ typedef int64_t value_type;
+ static const int kBase = 10;
+ static inline value_type convert_func(const string_type::value_type* str,
+ string_type::value_type** endptr) {
+#ifdef OS_WIN
+ return _strtoi64(str, endptr, kBase);
+#else // assume OS_POSIX
+ return strtoll(str, endptr, kBase);
+#endif
+ }
+ static inline bool valid_func(const string_type& str) {
+ return !str.empty() && !isspace(str[0]);
+ }
+};
+
+class String16ToInt64Traits {
+ public:
+ typedef string16 string_type;
+ typedef int64_t value_type;
+ static const int kBase = 10;
+ static inline value_type convert_func(const string_type::value_type* str,
+ string_type::value_type** endptr) {
+#ifdef OS_WIN
+ return _wcstoi64(str, endptr, kBase);
+#else // assume OS_POSIX
+ std::string ascii_string = UTF16ToASCII(string16(str));
+ char* ascii_end = NULL;
+ value_type ret = strtoll(ascii_string.c_str(), &ascii_end, kBase);
+ if (ascii_string.c_str() + ascii_string.length() == ascii_end) {
+ *endptr =
+ const_cast<string_type::value_type*>(str) + ascii_string.length();
+ }
+ return ret;
+#endif
+ }
+ static inline bool valid_func(const string_type& str) {
+ return !str.empty() && !iswspace(str[0]);
+ }
+};
+
+} // namespace
+
+
+namespace base {
+
+bool IsWprintfFormatPortable(const wchar_t* format) {
+ for (const wchar_t* position = format; *position != '\0'; ++position) {
+
+ if (*position == '%') {
+ bool in_specification = true;
+ bool modifier_l = false;
+ while (in_specification) {
+ // Eat up characters until reaching a known specifier.
+ if (*++position == '\0') {
+ // The format string ended in the middle of a specification. Call
+ // it portable because no unportable specifications were found. The
+ // string is equally broken on all platforms.
+ return true;
+ }
+
+ if (*position == 'l') {
+ // 'l' is the only thing that can save the 's' and 'c' specifiers.
+ modifier_l = true;
+ } else if (((*position == 's' || *position == 'c') && !modifier_l) ||
+ *position == 'S' || *position == 'C' || *position == 'F' ||
+ *position == 'D' || *position == 'O' || *position == 'U') {
+ // Not portable.
+ return false;
+ }
+
+ if (wcschr(L"diouxXeEfgGaAcspn%", *position)) {
+ // Portable, keep scanning the rest of the format string.
+ in_specification = false;
+ }
+ }
+ }
+
+ }
+
+ return true;
+}
+
+
+} // namespace base
+
+static const wchar_t kWhitespaceWide[] = {
+ 0x0009, // <control-0009> to <control-000D>
+ 0x000A,
+ 0x000B,
+ 0x000C,
+ 0x000D,
+ 0x0020, // Space
+ 0x0085, // <control-0085>
+ 0x00A0, // No-Break Space
+ 0x1680, // Ogham Space Mark
+ 0x180E, // Mongolian Vowel Separator
+ 0x2000, // En Quad to Hair Space
+ 0x2001,
+ 0x2002,
+ 0x2003,
+ 0x2004,
+ 0x2005,
+ 0x2006,
+ 0x2007,
+ 0x2008,
+ 0x2009,
+ 0x200A,
+ 0x200C, // Zero Width Non-Joiner
+ 0x2028, // Line Separator
+ 0x2029, // Paragraph Separator
+ 0x202F, // Narrow No-Break Space
+ 0x205F, // Medium Mathematical Space
+ 0x3000, // Ideographic Space
+ 0
+};
+static const char kWhitespaceASCII[] = {
+ 0x09, // <control-0009> to <control-000D>
+ 0x0A,
+ 0x0B,
+ 0x0C,
+ 0x0D,
+ 0x20, // Space
+ 0
+};
+
+template<typename STR>
+TrimPositions TrimStringT(const STR& input,
+ const typename STR::value_type trim_chars[],
+ TrimPositions positions,
+ STR* output) {
+ // Find the edges of leading/trailing whitespace as desired.
+ const typename STR::size_type last_char = input.length() - 1;
+ const typename STR::size_type first_good_char = (positions & TRIM_LEADING) ?
+ input.find_first_not_of(trim_chars) : 0;
+ const typename STR::size_type last_good_char = (positions & TRIM_TRAILING) ?
+ input.find_last_not_of(trim_chars) : last_char;
+
+ // When the string was all whitespace, report that we stripped off whitespace
+ // from whichever position the caller was interested in. For empty input, we
+ // stripped no whitespace, but we still need to clear |output|.
+ if (input.empty() ||
+ (first_good_char == STR::npos) || (last_good_char == STR::npos)) {
+ bool input_was_empty = input.empty(); // in case output == &input
+ output->clear();
+ return input_was_empty ? TRIM_NONE : positions;
+ }
+
+ // Trim the whitespace.
+ *output =
+ input.substr(first_good_char, last_good_char - first_good_char + 1);
+
+ // Return where we trimmed from.
+ return static_cast<TrimPositions>(
+ ((first_good_char == 0) ? TRIM_NONE : TRIM_LEADING) |
+ ((last_good_char == last_char) ? TRIM_NONE : TRIM_TRAILING));
+}
+
+TrimPositions TrimWhitespace(const std::wstring& input,
+ TrimPositions positions,
+ std::wstring* output) {
+ return TrimStringT(input, kWhitespaceWide, positions, output);
+}
+
+TrimPositions TrimWhitespaceASCII(const std::string& input,
+ TrimPositions positions,
+ std::string* output) {
+ return TrimStringT(input, kWhitespaceASCII, positions, output);
+}
+
+// This function is only for backward-compatibility.
+// To be removed when all callers are updated.
+TrimPositions TrimWhitespace(const std::string& input,
+ TrimPositions positions,
+ std::string* output) {
+ return TrimWhitespaceASCII(input, positions, output);
+}
+
+std::string WideToASCII(const std::wstring& wide) {
+ DCHECK(IsStringASCII(wide));
+ return std::string(wide.begin(), wide.end());
+}
+
+std::wstring ASCIIToWide(const std::string& ascii) {
+ DCHECK(IsStringASCII(ascii));
+ return std::wstring(ascii.begin(), ascii.end());
+}
+
+std::string UTF16ToASCII(const string16& utf16) {
+ DCHECK(IsStringASCII(utf16));
+ return std::string(utf16.begin(), utf16.end());
+}
+
+string16 ASCIIToUTF16(const std::string& ascii) {
+ DCHECK(IsStringASCII(ascii));
+ return string16(ascii.begin(), ascii.end());
+}
+
+template<class STR>
+static bool DoIsStringASCII(const STR& str) {
+ for (size_t i = 0; i < str.length(); i++) {
+ typename ToUnsigned<typename STR::value_type>::Unsigned c = str[i];
+ if (c > 0x7F)
+ return false;
+ }
+ return true;
+}
+
+bool IsStringASCII(const std::wstring& str) {
+ return DoIsStringASCII(str);
+}
+
+#if !defined(WCHAR_T_IS_UTF16)
+bool IsStringASCII(const string16& str) {
+ return DoIsStringASCII(str);
+}
+#endif
+
+bool IsStringASCII(const std::string& str) {
+ return DoIsStringASCII(str);
+}
+
+// Overloaded wrappers around vsnprintf and vswprintf. The buf_size parameter
+// is the size of the buffer. These return the number of characters in the
+// formatted string excluding the NUL terminator. If the buffer is not
+// large enough to accommodate the formatted string without truncation, they
+// return the number of characters that would be in the fully-formatted string
+// (vsnprintf, and vswprintf on Windows), or -1 (vswprintf on POSIX platforms).
+inline int vsnprintfT(char* buffer,
+ size_t buf_size,
+ const char* format,
+ va_list argptr) {
+ return base::vsnprintf(buffer, buf_size, format, argptr);
+}
+
+inline int vsnprintfT(wchar_t* buffer,
+ size_t buf_size,
+ const wchar_t* format,
+ va_list argptr) {
+ return base::vswprintf(buffer, buf_size, format, argptr);
+}
+
+// Templatized backend for StringPrintF/StringAppendF. This does not finalize
+// the va_list, the caller is expected to do that.
+template <class StringType>
+static void StringAppendVT(StringType* dst,
+ const typename StringType::value_type* format,
+ va_list ap) {
+ // First try with a small fixed size buffer.
+ // This buffer size should be kept in sync with StringUtilTest.GrowBoundary
+ // and StringUtilTest.StringPrintfBounds.
+ typename StringType::value_type stack_buf[1024];
+
+ va_list backup_ap;
+ base_va_copy(backup_ap, ap);
+
+#if !defined(OS_WIN)
+ errno = 0;
+#endif
+ int result = vsnprintfT(stack_buf, arraysize(stack_buf), format, backup_ap);
+ va_end(backup_ap);
+
+ if (result >= 0 && result < static_cast<int>(arraysize(stack_buf))) {
+ // It fit.
+ dst->append(stack_buf, result);
+ return;
+ }
+
+ // Repeatedly increase buffer size until it fits.
+ int mem_length = arraysize(stack_buf);
+ while (true) {
+ if (result < 0) {
+#if !defined(OS_WIN)
+ // On Windows, vsnprintfT always returns the number of characters in a
+ // fully-formatted string, so if we reach this point, something else is
+ // wrong and no amount of buffer-doubling is going to fix it.
+ if (errno != 0 && errno != EOVERFLOW)
+#endif
+ {
+ // If an error other than overflow occurred, it's never going to work.
+ DLOG(WARNING) << "Unable to printf the requested string due to error.";
+ return;
+ }
+ // Try doubling the buffer size.
+ mem_length *= 2;
+ } else {
+ // We need exactly "result + 1" characters.
+ mem_length = result + 1;
+ }
+
+ if (mem_length > 32 * 1024 * 1024) {
+ // That should be plenty, don't try anything larger. This protects
+ // against huge allocations when using vsnprintfT implementations that
+ // return -1 for reasons other than overflow without setting errno.
+ DLOG(WARNING) << "Unable to printf the requested string due to size.";
+ return;
+ }
+
+ std::vector<typename StringType::value_type> mem_buf(mem_length);
+
+ // Restore the va_list before we use it again.
+ base_va_copy(backup_ap, ap);
+
+ result = vsnprintfT(&mem_buf[0], mem_length, format, ap);
+ va_end(backup_ap);
+
+ if ((result >= 0) && (result < mem_length)) {
+ // It fit.
+ dst->append(&mem_buf[0], result);
+ return;
+ }
+ }
+}
+
+namespace {
+
+template <typename STR, typename INT, typename UINT, bool NEG>
+struct IntToStringT {
+
+ // This is to avoid a compiler warning about unary minus on unsigned type.
+ // For example, say you had the following code:
+ // template <typename INT>
+ // INT abs(INT value) { return value < 0 ? -value : value; }
+ // Even though if INT is unsigned, it's impossible for value < 0, so the
+ // unary minus will never be taken, the compiler will still generate a
+ // warning. We do a little specialization dance...
+ template <typename INT2, typename UINT2, bool NEG2>
+ struct ToUnsignedT { };
+
+ template <typename INT2, typename UINT2>
+ struct ToUnsignedT<INT2, UINT2, false> {
+ static UINT2 ToUnsigned(INT2 value) {
+ return static_cast<UINT2>(value);
+ }
+ };
+
+ template <typename INT2, typename UINT2>
+ struct ToUnsignedT<INT2, UINT2, true> {
+ static UINT2 ToUnsigned(INT2 value) {
+ return static_cast<UINT2>(value < 0 ? -value : value);
+ }
+ };
+
+ // This set of templates is very similar to the above templates, but
+ // for testing whether an integer is negative.
+ template <typename INT2, bool NEG2>
+ struct TestNegT {};
+ template <typename INT2>
+ struct TestNegT<INT2, false> {
+ static bool TestNeg(INT2 value) {
+ // value is unsigned, and can never be negative.
+ return false;
+ }
+ };
+ template <typename INT2>
+ struct TestNegT<INT2, true> {
+ static bool TestNeg(INT2 value) {
+ return value < 0;
+ }
+ };
+
+ static STR IntToString(INT value) {
+ // log10(2) ~= 0.3 bytes needed per bit or per byte log10(2**8) ~= 2.4.
+ // So round up to allocate 3 output characters per byte, plus 1 for '-'.
+ const int kOutputBufSize = 3 * sizeof(INT) + 1;
+
+ // Allocate the whole string right away, we will right back to front, and
+ // then return the substr of what we ended up using.
+ STR outbuf(kOutputBufSize, 0);
+
+ bool is_neg = TestNegT<INT, NEG>::TestNeg(value);
+ // Even though is_neg will never be true when INT is parameterized as
+ // unsigned, even the presence of the unary operation causes a warning.
+ UINT res = ToUnsignedT<INT, UINT, NEG>::ToUnsigned(value);
+
+ for (typename STR::iterator it = outbuf.end();;) {
+ --it;
+ DCHECK(it != outbuf.begin());
+ *it = static_cast<typename STR::value_type>((res % 10) + '0');
+ res /= 10;
+
+ // We're done..
+ if (res == 0) {
+ if (is_neg) {
+ --it;
+ DCHECK(it != outbuf.begin());
+ *it = static_cast<typename STR::value_type>('-');
+ }
+ return STR(it, outbuf.end());
+ }
+ }
+ NOTREACHED();
+ return STR();
+ }
+};
+
+}
+
+std::string IntToString(int value) {
+ return IntToStringT<std::string, int, unsigned int, true>::
+ IntToString(value);
+}
+std::wstring IntToWString(int value) {
+ return IntToStringT<std::wstring, int, unsigned int, true>::
+ IntToString(value);
+}
+std::string UintToString(unsigned int value) {
+ return IntToStringT<std::string, unsigned int, unsigned int, false>::
+ IntToString(value);
+}
+std::wstring UintToWString(unsigned int value) {
+ return IntToStringT<std::wstring, unsigned int, unsigned int, false>::
+ IntToString(value);
+}
+std::string Int64ToString(int64_t value) {
+ return IntToStringT<std::string, int64_t, uint64_t, true>::
+ IntToString(value);
+}
+std::wstring Int64ToWString(int64_t value) {
+ return IntToStringT<std::wstring, int64_t, uint64_t, true>::
+ IntToString(value);
+}
+std::string Uint64ToString(uint64_t value) {
+ return IntToStringT<std::string, uint64_t, uint64_t, false>::
+ IntToString(value);
+}
+std::wstring Uint64ToWString(uint64_t value) {
+ return IntToStringT<std::wstring, uint64_t, uint64_t, false>::
+ IntToString(value);
+}
+
+// Lower-level routine that takes a va_list and appends to a specified
+// string. All other routines are just convenience wrappers around it.
+static void StringAppendV(std::string* dst, const char* format, va_list ap) {
+ StringAppendVT(dst, format, ap);
+}
+
+static void StringAppendV(std::wstring* dst, const wchar_t* format, va_list ap) {
+ StringAppendVT(dst, format, ap);
+}
+
+std::string StringPrintf(const char* format, ...) {
+ va_list ap;
+ va_start(ap, format);
+ std::string result;
+ StringAppendV(&result, format, ap);
+ va_end(ap);
+ return result;
+}
+
+std::wstring StringPrintf(const wchar_t* format, ...) {
+ va_list ap;
+ va_start(ap, format);
+ std::wstring result;
+ StringAppendV(&result, format, ap);
+ va_end(ap);
+ return result;
+}
+
+const std::string& SStringPrintf(std::string* dst, const char* format, ...) {
+ va_list ap;
+ va_start(ap, format);
+ dst->clear();
+ StringAppendV(dst, format, ap);
+ va_end(ap);
+ return *dst;
+}
+
+const std::wstring& SStringPrintf(std::wstring* dst,
+ const wchar_t* format, ...) {
+ va_list ap;
+ va_start(ap, format);
+ dst->clear();
+ StringAppendV(dst, format, ap);
+ va_end(ap);
+ return *dst;
+}
+
+void StringAppendF(std::string* dst, const char* format, ...) {
+ va_list ap;
+ va_start(ap, format);
+ StringAppendV(dst, format, ap);
+ va_end(ap);
+}
+
+void StringAppendF(std::wstring* dst, const wchar_t* format, ...) {
+ va_list ap;
+ va_start(ap, format);
+ StringAppendV(dst, format, ap);
+ va_end(ap);
+}
+
+template<typename STR>
+static void SplitStringT(const STR& str,
+ const typename STR::value_type s,
+ bool trim_whitespace,
+ std::vector<STR>* r) {
+ size_t last = 0;
+ size_t i;
+ size_t c = str.size();
+ for (i = 0; i <= c; ++i) {
+ if (i == c || str[i] == s) {
+ size_t len = i - last;
+ STR tmp = str.substr(last, len);
+ if (trim_whitespace) {
+ STR t_tmp;
+ TrimWhitespace(tmp, TRIM_ALL, &t_tmp);
+ r->push_back(t_tmp);
+ } else {
+ r->push_back(tmp);
+ }
+ last = i + 1;
+ }
+ }
+}
+
+void SplitString(const std::wstring& str,
+ wchar_t s,
+ std::vector<std::wstring>* r) {
+ SplitStringT(str, s, true, r);
+}
+
+void SplitString(const std::string& str,
+ char s,
+ std::vector<std::string>* r) {
+ SplitStringT(str, s, true, r);
+}
+
+// For the various *ToInt conversions, there are no *ToIntTraits classes to use
+// because there's no such thing as strtoi. Use *ToLongTraits through a cast
+// instead, requiring that long and int are compatible and equal-width. They
+// are on our target platforms.
+
+// XXX Sigh.
+
+#if !defined(ARCH_CPU_64_BITS)
+bool StringToInt(const std::string& input, int* output) {
+ COMPILE_ASSERT(sizeof(int) == sizeof(long), cannot_strtol_to_int);
+ return StringToNumber<StringToLongTraits>(input,
+ reinterpret_cast<long*>(output));
+}
+
+bool StringToInt(const string16& input, int* output) {
+ COMPILE_ASSERT(sizeof(int) == sizeof(long), cannot_wcstol_to_int);
+ return StringToNumber<String16ToLongTraits>(input,
+ reinterpret_cast<long*>(output));
+}
+
+#else
+bool StringToInt(const std::string& input, int* output) {
+ long tmp;
+ bool ok = StringToNumber<StringToLongTraits>(input, &tmp);
+ if (!ok || tmp > kint32max) {
+ return false;
+ }
+ *output = static_cast<int>(tmp);
+ return true;
+}
+
+bool StringToInt(const string16& input, int* output) {
+ long tmp;
+ bool ok = StringToNumber<String16ToLongTraits>(input, &tmp);
+ if (!ok || tmp > kint32max) {
+ return false;
+ }
+ *output = static_cast<int>(tmp);
+ return true;
+}
+#endif // !defined(ARCH_CPU_64_BITS)
+
+bool StringToInt64(const std::string& input, int64_t* output) {
+ return StringToNumber<StringToInt64Traits>(input, output);
+}
+
+bool StringToInt64(const string16& input, int64_t* output) {
+ return StringToNumber<String16ToInt64Traits>(input, output);
+}
+
+int StringToInt(const std::string& value) {
+ int result;
+ StringToInt(value, &result);
+ return result;
+}
+
+int StringToInt(const string16& value) {
+ int result;
+ StringToInt(value, &result);
+ return result;
+}
+
+int64_t StringToInt64(const std::string& value) {
+ int64_t result;
+ StringToInt64(value, &result);
+ return result;
+}
+
+int64_t StringToInt64(const string16& value) {
+ int64_t result;
+ StringToInt64(value, &result);
+ return result;
+}
+
+// The following code is compatible with the OpenBSD lcpy interface. See:
+// http://www.gratisoft.us/todd/papers/strlcpy.html
+// ftp://ftp.openbsd.org/pub/OpenBSD/src/lib/libc/string/{wcs,str}lcpy.c
+
+namespace {
+
+template <typename CHAR>
+size_t lcpyT(CHAR* dst, const CHAR* src, size_t dst_size) {
+ for (size_t i = 0; i < dst_size; ++i) {
+ if ((dst[i] = src[i]) == 0) // We hit and copied the terminating NULL.
+ return i;
+ }
+
+ // We were left off at dst_size. We over copied 1 byte. Null terminate.
+ if (dst_size != 0)
+ dst[dst_size - 1] = 0;
+
+ // Count the rest of the |src|, and return it's length in characters.
+ while (src[dst_size]) ++dst_size;
+ return dst_size;
+}
+
+} // namespace
+
+size_t base::strlcpy(char* dst, const char* src, size_t dst_size) {
+ return lcpyT<char>(dst, src, dst_size);
+}
+size_t base::wcslcpy(wchar_t* dst, const wchar_t* src, size_t dst_size) {
+ return lcpyT<wchar_t>(dst, src, dst_size);
+}
diff --git a/ipc/chromium/src/base/string_util.h b/ipc/chromium/src/base/string_util.h
new file mode 100644
index 000000000..430b4a86b
--- /dev/null
+++ b/ipc/chromium/src/base/string_util.h
@@ -0,0 +1,237 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// This file defines utility functions for working with strings.
+
+#ifndef BASE_STRING_UTIL_H_
+#define BASE_STRING_UTIL_H_
+
+#include <stdarg.h> // va_list
+#include <ctype.h>
+
+#include <string>
+#include <vector>
+
+#include "base/basictypes.h"
+#include "base/string16.h"
+#include "base/string_piece.h" // For implicit conversions.
+
+// Safe standard library wrappers for all platforms.
+
+namespace base {
+
+// C standard-library functions like "strncasecmp" and "snprintf" that aren't
+// cross-platform are provided as "base::strncasecmp", and their prototypes
+// are listed below. These functions are then implemented as inline calls
+// to the platform-specific equivalents in the platform-specific headers.
+
+// Compare the two strings s1 and s2 without regard to case using
+// the current locale; returns 0 if they are equal, 1 if s1 > s2, and -1 if
+// s2 > s1 according to a lexicographic comparison.
+int strcasecmp(const char* s1, const char* s2);
+
+// Compare up to count characters of s1 and s2 without regard to case using
+// the current locale; returns 0 if they are equal, 1 if s1 > s2, and -1 if
+// s2 > s1 according to a lexicographic comparison.
+int strncasecmp(const char* s1, const char* s2, size_t count);
+
+// Wrapper for vsnprintf that always null-terminates and always returns the
+// number of characters that would be in an untruncated formatted
+// string, even when truncation occurs.
+int vsnprintf(char* buffer, size_t size, const char* format, va_list arguments);
+
+// vswprintf always null-terminates, but when truncation occurs, it will either
+// return -1 or the number of characters that would be in an untruncated
+// formatted string. The actual return value depends on the underlying
+// C library's vswprintf implementation.
+int vswprintf(wchar_t* buffer, size_t size,
+ const wchar_t* format, va_list arguments);
+
+// Some of these implementations need to be inlined.
+
+inline int snprintf(char* buffer, size_t size, const char* format, ...) {
+ va_list arguments;
+ va_start(arguments, format);
+ int result = vsnprintf(buffer, size, format, arguments);
+ va_end(arguments);
+ return result;
+}
+
+inline int swprintf(wchar_t* buffer, size_t size, const wchar_t* format, ...) {
+ va_list arguments;
+ va_start(arguments, format);
+ int result = vswprintf(buffer, size, format, arguments);
+ va_end(arguments);
+ return result;
+}
+
+// BSD-style safe and consistent string copy functions.
+// Copies |src| to |dst|, where |dst_size| is the total allocated size of |dst|.
+// Copies at most |dst_size|-1 characters, and always NULL terminates |dst|, as
+// long as |dst_size| is not 0. Returns the length of |src| in characters.
+// If the return value is >= dst_size, then the output was truncated.
+// NOTE: All sizes are in number of characters, NOT in bytes.
+size_t strlcpy(char* dst, const char* src, size_t dst_size);
+size_t wcslcpy(wchar_t* dst, const wchar_t* src, size_t dst_size);
+
+// Scan a wprintf format string to determine whether it's portable across a
+// variety of systems. This function only checks that the conversion
+// specifiers used by the format string are supported and have the same meaning
+// on a variety of systems. It doesn't check for other errors that might occur
+// within a format string.
+//
+// Nonportable conversion specifiers for wprintf are:
+// - 's' and 'c' without an 'l' length modifier. %s and %c operate on char
+// data on all systems except Windows, which treat them as wchar_t data.
+// Use %ls and %lc for wchar_t data instead.
+// - 'S' and 'C', which operate on wchar_t data on all systems except Windows,
+// which treat them as char data. Use %ls and %lc for wchar_t data
+// instead.
+// - 'F', which is not identified by Windows wprintf documentation.
+// - 'D', 'O', and 'U', which are deprecated and not available on all systems.
+// Use %ld, %lo, and %lu instead.
+//
+// Note that there is no portable conversion specifier for char data when
+// working with wprintf.
+//
+// This function is intended to be called from base::vswprintf.
+bool IsWprintfFormatPortable(const wchar_t* format);
+
+} // namespace base
+
+#if defined(OS_WIN)
+#include "base/string_util_win.h"
+#elif defined(OS_POSIX)
+#include "base/string_util_posix.h"
+#else
+#error Define string operations appropriately for your platform
+#endif
+
+// Trims any whitespace from either end of the input string. Returns where
+// whitespace was found.
+// The non-wide version has two functions:
+// * TrimWhitespaceASCII()
+// This function is for ASCII strings and only looks for ASCII whitespace;
+// * TrimWhitespaceUTF8()
+// This function is for UTF-8 strings and looks for Unicode whitespace.
+// Please choose the best one according to your usage.
+// NOTE: Safe to use the same variable for both input and output.
+enum TrimPositions {
+ TRIM_NONE = 0,
+ TRIM_LEADING = 1 << 0,
+ TRIM_TRAILING = 1 << 1,
+ TRIM_ALL = TRIM_LEADING | TRIM_TRAILING
+};
+TrimPositions TrimWhitespace(const std::wstring& input,
+ TrimPositions positions,
+ std::wstring* output);
+TrimPositions TrimWhitespaceASCII(const std::string& input,
+ TrimPositions positions,
+ std::string* output);
+
+// Deprecated. This function is only for backward compatibility and calls
+// TrimWhitespaceASCII().
+TrimPositions TrimWhitespace(const std::string& input,
+ TrimPositions positions,
+ std::string* output);
+
+// Searches for CR or LF characters. Removes all contiguous whitespace
+// strings that contain them. This is useful when trying to deal with text
+// copied from terminals.
+// Returns |text, with the following three transformations:
+// (1) Leading and trailing whitespace is trimmed.
+// (2) If |trim_sequences_with_line_breaks| is true, any other whitespace
+// sequences containing a CR or LF are trimmed.
+// (3) All other whitespace sequences are converted to single spaces.
+std::wstring CollapseWhitespace(const std::wstring& text,
+ bool trim_sequences_with_line_breaks);
+
+// These convert between ASCII (7-bit) and Wide/UTF16 strings.
+std::string WideToASCII(const std::wstring& wide);
+std::wstring ASCIIToWide(const std::string& ascii);
+std::string UTF16ToASCII(const string16& utf16);
+string16 ASCIIToUTF16(const std::string& ascii);
+
+// These convert between UTF-8, -16, and -32 strings. They are potentially slow,
+// so avoid unnecessary conversions. The low-level versions return a boolean
+// indicating whether the conversion was 100% valid. In this case, it will still
+// do the best it can and put the result in the output buffer. The versions that
+// return strings ignore this error and just return the best conversion
+// possible.
+bool WideToUTF8(const wchar_t* src, size_t src_len, std::string* output);
+std::string WideToUTF8(const std::wstring& wide);
+bool UTF8ToWide(const char* src, size_t src_len, std::wstring* output);
+std::wstring UTF8ToWide(const ::StringPiece& utf8);
+
+bool IsStringASCII(const std::wstring& str);
+bool IsStringASCII(const std::string& str);
+bool IsStringASCII(const string16& str);
+
+// Specialized string-conversion functions.
+std::string IntToString(int value);
+std::wstring IntToWString(int value);
+std::string UintToString(unsigned int value);
+std::wstring UintToWString(unsigned int value);
+std::string Int64ToString(int64_t value);
+std::wstring Int64ToWString(int64_t value);
+std::string Uint64ToString(uint64_t value);
+std::wstring Uint64ToWString(uint64_t value);
+// The DoubleToString methods convert the double to a string format that
+// ignores the locale. If you want to use locale specific formatting, use ICU.
+std::string DoubleToString(double value);
+std::wstring DoubleToWString(double value);
+
+// Perform a best-effort conversion of the input string to a numeric type,
+// setting |*output| to the result of the conversion. Returns true for
+// "perfect" conversions; returns false in the following cases:
+// - Overflow/underflow. |*output| will be set to the maximum value supported
+// by the data type.
+// - Trailing characters in the string after parsing the number. |*output|
+// will be set to the value of the number that was parsed.
+// - No characters parseable as a number at the beginning of the string.
+// |*output| will be set to 0.
+// - Empty string. |*output| will be set to 0.
+bool StringToInt(const std::string& input, int* output);
+bool StringToInt(const string16& input, int* output);
+bool StringToInt64(const std::string& input, int64_t* output);
+bool StringToInt64(const string16& input, int64_t* output);
+
+// Convenience forms of the above, when the caller is uninterested in the
+// boolean return value. These return only the |*output| value from the
+// above conversions: a best-effort conversion when possible, otherwise, 0.
+int StringToInt(const std::string& value);
+int StringToInt(const string16& value);
+int64_t StringToInt64(const std::string& value);
+int64_t StringToInt64(const string16& value);
+
+// Return a C++ string given printf-like input.
+std::string StringPrintf(const char* format, ...);
+std::wstring StringPrintf(const wchar_t* format, ...);
+
+// Store result into a supplied string and return it
+const std::string& SStringPrintf(std::string* dst, const char* format, ...);
+const std::wstring& SStringPrintf(std::wstring* dst,
+ const wchar_t* format, ...);
+
+// Append result to a supplied string
+void StringAppendF(std::string* dst, const char* format, ...);
+void StringAppendF(std::wstring* dst, const wchar_t* format, ...);
+
+//-----------------------------------------------------------------------------
+
+// Splits |str| into a vector of strings delimited by |s|. Append the results
+// into |r| as they appear. If several instances of |s| are contiguous, or if
+// |str| begins with or ends with |s|, then an empty string is inserted.
+//
+// Every substring is trimmed of any leading or trailing white space.
+void SplitString(const std::wstring& str,
+ wchar_t s,
+ std::vector<std::wstring>* r);
+void SplitString(const std::string& str,
+ char s,
+ std::vector<std::string>* r);
+
+#endif // BASE_STRING_UTIL_H_
diff --git a/ipc/chromium/src/base/string_util_posix.h b/ipc/chromium/src/base/string_util_posix.h
new file mode 100644
index 000000000..60c2d58b6
--- /dev/null
+++ b/ipc/chromium/src/base/string_util_posix.h
@@ -0,0 +1,46 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_STRING_UTIL_POSIX_H_
+#define BASE_STRING_UTIL_POSIX_H_
+
+#include <stdarg.h>
+#include <stdio.h>
+#include <string.h>
+#include <wchar.h>
+
+#include "base/logging.h"
+
+namespace base {
+
+// Chromium code style is to not use malloc'd strings; this is only for use
+// for interaction with APIs that require it.
+inline char* strdup(const char* str) {
+ return ::strdup(str);
+}
+
+inline int strcasecmp(const char* string1, const char* string2) {
+ return ::strcasecmp(string1, string2);
+}
+
+inline int strncasecmp(const char* string1, const char* string2, size_t count) {
+ return ::strncasecmp(string1, string2, count);
+}
+
+inline int vsnprintf(char* buffer, size_t size,
+ const char* format, va_list arguments) {
+ return ::vsnprintf(buffer, size, format, arguments);
+}
+
+inline int vswprintf(wchar_t* buffer, size_t size,
+ const wchar_t* format, va_list arguments) {
+ DCHECK(IsWprintfFormatPortable(format));
+ return ::vswprintf(buffer, size, format, arguments);
+}
+
+} // namespace base
+
+#endif // BASE_STRING_UTIL_POSIX_H_
diff --git a/ipc/chromium/src/base/string_util_win.h b/ipc/chromium/src/base/string_util_win.h
new file mode 100644
index 000000000..218bd6466
--- /dev/null
+++ b/ipc/chromium/src/base/string_util_win.h
@@ -0,0 +1,53 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_STRING_UTIL_WIN_H_
+#define BASE_STRING_UTIL_WIN_H_
+
+#include <stdarg.h>
+#include <stdio.h>
+#include <string.h>
+#include <wchar.h>
+
+#include "base/logging.h"
+
+namespace base {
+
+// Chromium code style is to not use malloc'd strings; this is only for use
+// for interaction with APIs that require it.
+inline char* strdup(const char* str) {
+ return _strdup(str);
+}
+
+inline int strcasecmp(const char* s1, const char* s2) {
+ return _stricmp(s1, s2);
+}
+
+inline int strncasecmp(const char* s1, const char* s2, size_t count) {
+ return _strnicmp(s1, s2, count);
+}
+
+inline int vsnprintf(char* buffer, size_t size,
+ const char* format, va_list arguments) {
+ int length = vsnprintf_s(buffer, size, size - 1, format, arguments);
+ if (length < 0)
+ return _vscprintf(format, arguments);
+ return length;
+}
+
+inline int vswprintf(wchar_t* buffer, size_t size,
+ const wchar_t* format, va_list arguments) {
+ DCHECK(IsWprintfFormatPortable(format));
+
+ int length = _vsnwprintf_s(buffer, size, size - 1, format, arguments);
+ if (length < 0)
+ return _vscwprintf(format, arguments);
+ return length;
+}
+
+} // namespace base
+
+#endif // BASE_STRING_UTIL_WIN_H_
diff --git a/ipc/chromium/src/base/sys_info.h b/ipc/chromium/src/base/sys_info.h
new file mode 100644
index 000000000..8c2e116f4
--- /dev/null
+++ b/ipc/chromium/src/base/sys_info.h
@@ -0,0 +1,65 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SYS_INFO_H_
+#define BASE_SYS_INFO_H_
+
+#include "base/basictypes.h"
+
+#include <string>
+
+namespace base {
+
+class SysInfo {
+ public:
+ // Return the number of logical processors/cores on the current machine.
+ // WARNING: On POSIX, this method uses static variables and is not threadsafe
+ // until it's been initialized by being called once without a race.
+ static int NumberOfProcessors();
+
+ // Return the number of bytes of physical memory on the current machine.
+ static int64_t AmountOfPhysicalMemory();
+
+ // Return the number of megabytes of physical memory on the current machine.
+ static int AmountOfPhysicalMemoryMB() {
+ return static_cast<int>(AmountOfPhysicalMemory() / 1024 / 1024);
+ }
+
+ // Return the available disk space in bytes on the volume containing |path|,
+ // or -1 on failure.
+ static int64_t AmountOfFreeDiskSpace(const std::wstring& path);
+
+ // Return true if the given environment variable is defined.
+ // TODO: find a better place for HasEnvVar.
+ static bool HasEnvVar(const wchar_t* var);
+
+ // Return the value of the given environment variable
+ // or an empty string if not defined.
+ // TODO: find a better place for GetEnvVar.
+ static std::wstring GetEnvVar(const wchar_t* var);
+
+ // Returns the name of the host operating system.
+ static std::string OperatingSystemName();
+
+ // Returns the CPU architecture of the system. Exact return value may differ
+ // across platforms.
+ static std::string CPUArchitecture();
+
+ // Returns the pixel dimensions of the primary display via the
+ // width and height parameters.
+ static void GetPrimaryDisplayDimensions(int* width, int* height);
+
+ // Return the number of displays.
+ static int DisplayCount();
+
+ // Return the smallest amount of memory (in bytes) which the VM system will
+ // allocate.
+ static size_t VMAllocationGranularity();
+};
+
+} // namespace base
+
+#endif // BASE_SYS_INFO_H_
diff --git a/ipc/chromium/src/base/sys_info_mac.cc b/ipc/chromium/src/base/sys_info_mac.cc
new file mode 100644
index 000000000..a0f82d021
--- /dev/null
+++ b/ipc/chromium/src/base/sys_info_mac.cc
@@ -0,0 +1,13 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/sys_info.h"
+
+#include <CoreServices/CoreServices.h>
+
+namespace base {
+
+} // namespace base
diff --git a/ipc/chromium/src/base/sys_info_posix.cc b/ipc/chromium/src/base/sys_info_posix.cc
new file mode 100644
index 000000000..81ec78053
--- /dev/null
+++ b/ipc/chromium/src/base/sys_info_posix.cc
@@ -0,0 +1,158 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/sys_info.h"
+#include "base/basictypes.h"
+
+#include <errno.h>
+#include <string.h>
+#ifndef ANDROID
+#include <sys/statvfs.h>
+#endif
+#include <sys/utsname.h>
+#include <unistd.h>
+
+#if defined(OS_MACOSX)
+#include <mach/mach_host.h>
+#include <mach/mach_init.h>
+#endif
+
+#if defined(OS_NETBSD)
+#include <sys/param.h>
+#include <sys/sysctl.h>
+#endif
+
+#include "base/logging.h"
+#include "base/string_util.h"
+
+namespace base {
+
+int SysInfo::NumberOfProcessors() {
+ // It seems that sysconf returns the number of "logical" processors on both
+ // mac and linux. So we get the number of "online logical" processors.
+#ifdef _SC_NPROCESSORS_ONLN
+ static long res = sysconf(_SC_NPROCESSORS_ONLN);
+#else
+ static long res = 1;
+#endif
+ if (res == -1) {
+ NOTREACHED();
+ return 1;
+ }
+
+ return static_cast<int>(res);
+}
+
+// static
+int64_t SysInfo::AmountOfPhysicalMemory() {
+ // _SC_PHYS_PAGES is not part of POSIX and not available on OS X
+#if defined(OS_MACOSX)
+ struct host_basic_info hostinfo;
+ mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
+ int result = host_info(mach_host_self(),
+ HOST_BASIC_INFO,
+ reinterpret_cast<host_info_t>(&hostinfo),
+ &count);
+ DCHECK_EQ(HOST_BASIC_INFO_COUNT, count);
+ if (result != KERN_SUCCESS) {
+ NOTREACHED();
+ return 0;
+ }
+
+ return static_cast<int64_t>(hostinfo.max_mem);
+#elif defined(OS_NETBSD)
+ int mib[2];
+ int rc;
+ int64_t memSize;
+ size_t len = sizeof(memSize);
+
+ mib[0] = CTL_HW;
+ mib[1] = HW_PHYSMEM64;
+ rc = sysctl( mib, 2, &memSize, &len, NULL, 0 );
+ if (-1 != rc) {
+ return memSize;
+ }
+ return 0;
+
+#else
+ long pages = sysconf(_SC_PHYS_PAGES);
+ long page_size = sysconf(_SC_PAGE_SIZE);
+ if (pages == -1 || page_size == -1) {
+ NOTREACHED();
+ return 0;
+ }
+
+ return static_cast<int64_t>(pages) * page_size;
+#endif
+}
+
+// static
+int64_t SysInfo::AmountOfFreeDiskSpace(const std::wstring& path) {
+#ifndef ANDROID
+ struct statvfs stats;
+ if (statvfs(WideToUTF8(path).c_str(), &stats) != 0) {
+ return -1;
+ }
+ return static_cast<int64_t>(stats.f_bavail) * stats.f_frsize;
+#else
+ return -1;
+#endif
+}
+
+// static
+bool SysInfo::HasEnvVar(const wchar_t* var) {
+ std::string var_utf8 = WideToUTF8(std::wstring(var));
+ return getenv(var_utf8.c_str()) != NULL;
+}
+
+// static
+std::wstring SysInfo::GetEnvVar(const wchar_t* var) {
+ std::string var_utf8 = WideToUTF8(std::wstring(var));
+ char* value = getenv(var_utf8.c_str());
+ if (!value) {
+ return L"";
+ } else {
+ return UTF8ToWide(value);
+ }
+}
+
+// static
+std::string SysInfo::OperatingSystemName() {
+ utsname info;
+ if (uname(&info) < 0) {
+ NOTREACHED();
+ return "";
+ }
+ return std::string(info.sysname);
+}
+
+// static
+std::string SysInfo::CPUArchitecture() {
+ utsname info;
+ if (uname(&info) < 0) {
+ NOTREACHED();
+ return "";
+ }
+ return std::string(info.machine);
+}
+
+// static
+void SysInfo::GetPrimaryDisplayDimensions(int* width, int* height) {
+ NOTIMPLEMENTED();
+}
+
+// static
+int SysInfo::DisplayCount() {
+ NOTIMPLEMENTED();
+ return 1;
+}
+
+// static
+size_t SysInfo::VMAllocationGranularity() {
+ return getpagesize();
+}
+
+} // namespace base
diff --git a/ipc/chromium/src/base/sys_info_win.cc b/ipc/chromium/src/base/sys_info_win.cc
new file mode 100644
index 000000000..5fffd18e3
--- /dev/null
+++ b/ipc/chromium/src/base/sys_info_win.cc
@@ -0,0 +1,100 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/sys_info.h"
+
+#include <windows.h>
+
+#include "base/logging.h"
+#include "base/string_util.h"
+#include "mozilla/UniquePtr.h"
+
+namespace base {
+
+// static
+int SysInfo::NumberOfProcessors() {
+ SYSTEM_INFO info;
+ GetSystemInfo(&info);
+ return static_cast<int>(info.dwNumberOfProcessors);
+}
+
+// static
+int64_t SysInfo::AmountOfPhysicalMemory() {
+ MEMORYSTATUSEX memory_info;
+ memory_info.dwLength = sizeof(memory_info);
+ if (!GlobalMemoryStatusEx(&memory_info)) {
+ NOTREACHED();
+ return 0;
+ }
+
+ int64_t rv = static_cast<int64_t>(memory_info.ullTotalPhys);
+ if (rv < 0)
+ rv = kint64max;
+ return rv;
+}
+
+// static
+int64_t SysInfo::AmountOfFreeDiskSpace(const std::wstring& path) {
+ ULARGE_INTEGER available, total, free;
+ if (!GetDiskFreeSpaceExW(path.c_str(), &available, &total, &free)) {
+ return -1;
+ }
+ int64_t rv = static_cast<int64_t>(available.QuadPart);
+ if (rv < 0)
+ rv = kint64max;
+ return rv;
+}
+
+// static
+bool SysInfo::HasEnvVar(const wchar_t* var) {
+ return GetEnvironmentVariable(var, NULL, 0) != 0;
+}
+
+// static
+std::wstring SysInfo::GetEnvVar(const wchar_t* var) {
+ DWORD value_length = GetEnvironmentVariable(var, NULL, 0);
+ if (value_length == 0) {
+ return L"";
+ }
+ mozilla::UniquePtr<wchar_t[]> value(new wchar_t[value_length]);
+ GetEnvironmentVariable(var, value.get(), value_length);
+ return std::wstring(value.get());
+}
+
+// static
+std::string SysInfo::OperatingSystemName() {
+ return "Windows NT";
+}
+
+// static
+std::string SysInfo::CPUArchitecture() {
+ // TODO: Make this vary when we support any other architectures.
+ return "x86";
+}
+
+// static
+void SysInfo::GetPrimaryDisplayDimensions(int* width, int* height) {
+ if (width)
+ *width = GetSystemMetrics(SM_CXSCREEN);
+
+ if (height)
+ *height = GetSystemMetrics(SM_CYSCREEN);
+}
+
+// static
+int SysInfo::DisplayCount() {
+ return GetSystemMetrics(SM_CMONITORS);
+}
+
+// static
+size_t SysInfo::VMAllocationGranularity() {
+ SYSTEM_INFO sysinfo;
+ GetSystemInfo(&sysinfo);
+
+ return sysinfo.dwAllocationGranularity;
+}
+
+} // namespace base
diff --git a/ipc/chromium/src/base/sys_string_conversions.h b/ipc/chromium/src/base/sys_string_conversions.h
new file mode 100644
index 000000000..e3a51964a
--- /dev/null
+++ b/ipc/chromium/src/base/sys_string_conversions.h
@@ -0,0 +1,35 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SYS_STRING_CONVERSIONS_H_
+#define BASE_SYS_STRING_CONVERSIONS_H_
+
+// Provides system-dependent string type conversions for cases where it's
+// necessary to not use ICU. Generally, you should not need this in Chrome,
+// but it is used in some shared code. Dependencies should be minimal.
+
+#include <string>
+#include "base/basictypes.h"
+#include "base/string16.h"
+
+class StringPiece;
+
+namespace base {
+
+// Converts between wide and UTF-8 representations of a string. On error, the
+// result is system-dependent.
+std::string SysWideToUTF8(const std::wstring& wide);
+std::wstring SysUTF8ToWide(const StringPiece& utf8);
+
+// Converts between wide and the system multi-byte representations of a string.
+// DANGER: This will lose information and can change (on Windows, this can
+// change between reboots).
+std::string SysWideToNativeMB(const std::wstring& wide);
+std::wstring SysNativeMBToWide(const StringPiece& native_mb);
+
+} // namespace base
+
+#endif // BASE_SYS_STRING_CONVERSIONS_H_
diff --git a/ipc/chromium/src/base/sys_string_conversions_mac.mm b/ipc/chromium/src/base/sys_string_conversions_mac.mm
new file mode 100644
index 000000000..cc5f58b70
--- /dev/null
+++ b/ipc/chromium/src/base/sys_string_conversions_mac.mm
@@ -0,0 +1,145 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/sys_string_conversions.h"
+
+#import <Foundation/Foundation.h>
+
+#include <vector>
+
+#include "base/scoped_cftyperef.h"
+#include "base/string_piece.h"
+
+namespace base {
+
+namespace {
+
+// Convert the supplied CFString into the specified encoding, and return it as
+// an STL string of the template type. Returns an empty string on failure.
+//
+// Do not assert in this function since it is used by the asssertion code!
+template<typename StringType>
+static StringType CFStringToSTLStringWithEncodingT(CFStringRef cfstring,
+ CFStringEncoding encoding) {
+ CFIndex length = CFStringGetLength(cfstring);
+ if (length == 0)
+ return StringType();
+
+ CFRange whole_string = CFRangeMake(0, length);
+ CFIndex out_size;
+ CFIndex converted = CFStringGetBytes(cfstring,
+ whole_string,
+ encoding,
+ 0, // lossByte
+ false, // isExternalRepresentation
+ NULL, // buffer
+ 0, // maxBufLen
+ &out_size);
+ if (converted == 0 || out_size == 0)
+ return StringType();
+
+ // out_size is the number of UInt8-sized units needed in the destination.
+ // A buffer allocated as UInt8 units might not be properly aligned to
+ // contain elements of StringType::value_type. Use a container for the
+ // proper value_type, and convert out_size by figuring the number of
+ // value_type elements per UInt8. Leave room for a NUL terminator.
+ typename StringType::size_type elements =
+ out_size * sizeof(UInt8) / sizeof(typename StringType::value_type) + 1;
+
+ std::vector<typename StringType::value_type> out_buffer(elements);
+ converted = CFStringGetBytes(cfstring,
+ whole_string,
+ encoding,
+ 0, // lossByte
+ false, // isExternalRepresentation
+ reinterpret_cast<UInt8*>(&out_buffer[0]),
+ out_size,
+ NULL); // usedBufLen
+ if (converted == 0)
+ return StringType();
+
+ out_buffer[elements - 1] = '\0';
+ return StringType(&out_buffer[0], elements - 1);
+}
+
+// Given an STL string |in| with an encoding specified by |in_encoding|,
+// convert it to |out_encoding| and return it as an STL string of the
+// |OutStringType| template type. Returns an empty string on failure.
+//
+// Do not assert in this function since it is used by the asssertion code!
+template<typename InStringType, typename OutStringType>
+static OutStringType STLStringToSTLStringWithEncodingsT(
+ const InStringType& in,
+ CFStringEncoding in_encoding,
+ CFStringEncoding out_encoding) {
+ typename InStringType::size_type in_length = in.length();
+ if (in_length == 0)
+ return OutStringType();
+
+ scoped_cftyperef<CFStringRef> cfstring(
+ CFStringCreateWithBytesNoCopy(NULL,
+ reinterpret_cast<const UInt8*>(in.data()),
+ in_length *
+ sizeof(typename InStringType::value_type),
+ in_encoding,
+ false,
+ kCFAllocatorNull));
+ if (!cfstring)
+ return OutStringType();
+
+ return CFStringToSTLStringWithEncodingT<OutStringType>(cfstring,
+ out_encoding);
+}
+
+// Given an STL string |in| with an encoding specified by |in_encoding|,
+// return it as a CFStringRef. Returns NULL on failure.
+template<typename StringType>
+static CFStringRef STLStringToCFStringWithEncodingsT(
+ const StringType& in,
+ CFStringEncoding in_encoding) {
+ typename StringType::size_type in_length = in.length();
+ if (in_length == 0)
+ return CFSTR("");
+
+ return CFStringCreateWithBytes(kCFAllocatorDefault,
+ reinterpret_cast<const UInt8*>(in.data()),
+ in_length *
+ sizeof(typename StringType::value_type),
+ in_encoding,
+ false);
+}
+
+// Specify the byte ordering explicitly, otherwise CFString will be confused
+// when strings don't carry BOMs, as they typically won't.
+static const CFStringEncoding kNarrowStringEncoding = kCFStringEncodingUTF8;
+
+#ifdef __BIG_ENDIAN__
+static const CFStringEncoding kWideStringEncoding = kCFStringEncodingUTF32BE;
+#elif defined(__LITTLE_ENDIAN__)
+static const CFStringEncoding kWideStringEncoding = kCFStringEncodingUTF32LE;
+#endif // __LITTLE_ENDIAN__
+
+} // namespace
+
+// Do not assert in this function since it is used by the asssertion code!
+std::string SysWideToUTF8(const std::wstring& wide) {
+ return STLStringToSTLStringWithEncodingsT<std::wstring, std::string>(
+ wide, kWideStringEncoding, kNarrowStringEncoding);
+}
+
+// Do not assert in this function since it is used by the asssertion code!
+std::wstring SysUTF8ToWide(const StringPiece& utf8) {
+ return STLStringToSTLStringWithEncodingsT<StringPiece, std::wstring>(
+ utf8, kNarrowStringEncoding, kWideStringEncoding);
+}
+
+std::string SysWideToNativeMB(const std::wstring& wide) {
+ return SysWideToUTF8(wide);
+}
+
+std::wstring SysNativeMBToWide(const StringPiece& native_mb) {
+ return SysUTF8ToWide(native_mb);
+}
+
+} // namespace base
diff --git a/ipc/chromium/src/base/sys_string_conversions_win.cc b/ipc/chromium/src/base/sys_string_conversions_win.cc
new file mode 100644
index 000000000..198a14d21
--- /dev/null
+++ b/ipc/chromium/src/base/sys_string_conversions_win.cc
@@ -0,0 +1,72 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/sys_string_conversions.h"
+
+#include <windows.h>
+
+#include "base/string_piece.h"
+
+namespace base {
+
+// Do not assert in this function since it is used by the asssertion code!
+std::wstring SysMultiByteToWide(const StringPiece& mb, uint32_t code_page) {
+ if (mb.empty())
+ return std::wstring();
+
+ int mb_length = static_cast<int>(mb.length());
+ // Compute the length of the buffer.
+ int charcount = MultiByteToWideChar(code_page, 0,
+ mb.data(), mb_length, NULL, 0);
+ if (charcount == 0)
+ return std::wstring();
+
+ std::wstring wide;
+ wide.resize(charcount);
+ MultiByteToWideChar(code_page, 0, mb.data(), mb_length, &wide[0], charcount);
+
+ return wide;
+}
+
+// Do not assert in this function since it is used by the asssertion code!
+std::string SysWideToMultiByte(const std::wstring& wide, uint32_t code_page) {
+ int wide_length = static_cast<int>(wide.length());
+ if (wide_length == 0)
+ return std::string();
+
+ // Compute the length of the buffer we'll need.
+ int charcount = WideCharToMultiByte(code_page, 0, wide.data(), wide_length,
+ NULL, 0, NULL, NULL);
+ if (charcount == 0)
+ return std::string();
+
+ std::string mb;
+ mb.resize(charcount);
+ WideCharToMultiByte(code_page, 0, wide.data(), wide_length,
+ &mb[0], charcount, NULL, NULL);
+
+ return mb;
+}
+
+// Do not assert in this function since it is used by the asssertion code!
+std::string SysWideToUTF8(const std::wstring& wide) {
+ return SysWideToMultiByte(wide, CP_UTF8);
+}
+
+// Do not assert in this function since it is used by the asssertion code!
+std::wstring SysUTF8ToWide(const StringPiece& utf8) {
+ return SysMultiByteToWide(utf8, CP_UTF8);
+}
+
+std::string SysWideToNativeMB(const std::wstring& wide) {
+ return SysWideToMultiByte(wide, CP_ACP);
+}
+
+std::wstring SysNativeMBToWide(const StringPiece& native_mb) {
+ return SysMultiByteToWide(native_mb, CP_ACP);
+}
+
+} // namespace base
diff --git a/ipc/chromium/src/base/task.h b/ipc/chromium/src/base/task.h
new file mode 100644
index 000000000..10dc9c488
--- /dev/null
+++ b/ipc/chromium/src/base/task.h
@@ -0,0 +1,369 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_H_
+#define BASE_TASK_H_
+
+#include "base/revocable_store.h"
+#include "base/tuple.h"
+#include "mozilla/IndexSequence.h"
+#include "mozilla/Tuple.h"
+#include "nsISupportsImpl.h"
+#include "nsThreadUtils.h"
+
+// Helper functions so that we can call a function a pass it arguments that come
+// from a Tuple.
+
+namespace details {
+
+// Call the given method on the given object. Arguments are passed by move
+// semantics from the given tuple. If the tuple has length N, the sequence must
+// be IndexSequence<0, 1, ..., N-1>.
+template<size_t... Indices, class ObjT, class Method, typename... Args>
+void CallMethod(mozilla::IndexSequence<Indices...>, ObjT* obj, Method method,
+ mozilla::Tuple<Args...>& arg)
+{
+ (obj->*method)(mozilla::Move(mozilla::Get<Indices>(arg))...);
+}
+
+// Same as above, but call a function.
+template<size_t... Indices, typename Function, typename... Args>
+void CallFunction(mozilla::IndexSequence<Indices...>, Function function,
+ mozilla::Tuple<Args...>& arg)
+{
+ (*function)(mozilla::Move(mozilla::Get<Indices>(arg))...);
+}
+
+} // namespace details
+
+// Call a method on the given object. Arguments are passed by move semantics
+// from the given tuple.
+template<class ObjT, class Method, typename... Args>
+void DispatchTupleToMethod(ObjT* obj, Method method, mozilla::Tuple<Args...>& arg)
+{
+ details::CallMethod(typename mozilla::IndexSequenceFor<Args...>::Type(),
+ obj, method, arg);
+}
+
+// Same as above, but call a function.
+template<typename Function, typename... Args>
+void DispatchTupleToFunction(Function function, mozilla::Tuple<Args...>& arg)
+{
+ details::CallFunction(typename mozilla::IndexSequenceFor<Args...>::Type(),
+ function, arg);
+}
+
+// Scoped Factories ------------------------------------------------------------
+//
+// These scoped factory objects can be used by non-refcounted objects to safely
+// place tasks in a message loop. Each factory guarantees that the tasks it
+// produces will not run after the factory is destroyed. Commonly, factories
+// are declared as class members, so the class' tasks will automatically cancel
+// when the class instance is destroyed.
+//
+// Exampe Usage:
+//
+// class MyClass {
+// private:
+// // This factory will be used to schedule invocations of SomeMethod.
+// ScopedRunnableMethodFactory<MyClass> some_method_factory_;
+//
+// public:
+// // It is safe to suppress warning 4355 here.
+// MyClass() : some_method_factory_(this) { }
+//
+// void SomeMethod() {
+// // If this function might be called directly, you might want to revoke
+// // any outstanding runnable methods scheduled to call it. If it's not
+// // referenced other than by the factory, this is unnecessary.
+// some_method_factory_.RevokeAll();
+// ...
+// }
+//
+// void ScheduleSomeMethod() {
+// // If you'd like to only only have one pending task at a time, test for
+// // |empty| before manufacturing another task.
+// if (!some_method_factory_.empty())
+// return;
+//
+// // The factories are not thread safe, so always invoke on
+// // |MessageLoop::current()|.
+// MessageLoop::current()->PostDelayedTask(
+// some_method_factory_.NewRunnableMethod(&MyClass::SomeMethod),
+// kSomeMethodDelayMS);
+// }
+// };
+
+// A ScopedTaskFactory produces tasks of type |TaskType| and prevents them from
+// running after it is destroyed.
+template<class TaskType>
+class ScopedTaskFactory : public RevocableStore {
+ public:
+ ScopedTaskFactory() { }
+
+ // Create a new task.
+ inline TaskType* NewTask() {
+ return new TaskWrapper(this);
+ }
+
+ class TaskWrapper : public TaskType {
+ public:
+ explicit TaskWrapper(RevocableStore* store) : revocable_(store) { }
+
+ NS_IMETHOD Run() override {
+ if (!revocable_.revoked())
+ TaskType::Run();
+ return NS_OK;
+ }
+
+ ~TaskWrapper() {
+ NS_ASSERT_OWNINGTHREAD(TaskWrapper);
+ }
+
+ private:
+ Revocable revocable_;
+
+ NS_DECL_OWNINGTHREAD
+
+ DISALLOW_EVIL_CONSTRUCTORS(TaskWrapper);
+ };
+
+ private:
+ DISALLOW_EVIL_CONSTRUCTORS(ScopedTaskFactory);
+};
+
+// A ScopedRunnableMethodFactory creates runnable methods for a specified
+// object. This is particularly useful for generating callbacks for
+// non-reference counted objects when the factory is a member of the object.
+template<class T>
+class ScopedRunnableMethodFactory : public RevocableStore {
+ public:
+ explicit ScopedRunnableMethodFactory(T* object) : object_(object) { }
+
+ template <class Method, typename... Elements>
+ inline already_AddRefed<mozilla::Runnable>
+ NewRunnableMethod(Method method, Elements&&... elements) {
+ typedef mozilla::Tuple<typename mozilla::Decay<Elements>::Type...> ArgsTuple;
+ typedef RunnableMethod<Method, ArgsTuple> Runnable;
+ typedef typename ScopedTaskFactory<Runnable>::TaskWrapper TaskWrapper;
+
+ RefPtr<TaskWrapper> task = new TaskWrapper(this);
+ task->Init(object_, method, mozilla::MakeTuple(mozilla::Forward<Elements>(elements)...));
+ return task.forget();
+ }
+
+ protected:
+ template <class Method, class Params>
+ class RunnableMethod : public mozilla::Runnable {
+ public:
+ RunnableMethod() { }
+
+ void Init(T* obj, Method meth, Params&& params) {
+ obj_ = obj;
+ meth_ = meth;
+ params_ = mozilla::Forward<Params>(params);
+ }
+
+ NS_IMETHOD Run() override {
+ DispatchTupleToMethod(obj_, meth_, params_);
+ return NS_OK;
+ }
+
+ private:
+ T* MOZ_UNSAFE_REF("The validity of this pointer must be enforced by "
+ "external factors.") obj_;
+ Method meth_;
+ Params params_;
+
+ DISALLOW_EVIL_CONSTRUCTORS(RunnableMethod);
+ };
+
+ private:
+ T* object_;
+
+ DISALLOW_EVIL_CONSTRUCTORS(ScopedRunnableMethodFactory);
+};
+
+// General task implementations ------------------------------------------------
+
+// Task to delete an object
+template<class T>
+class DeleteTask : public mozilla::CancelableRunnable {
+ public:
+ explicit DeleteTask(T* obj) : obj_(obj) {
+ }
+ NS_IMETHOD Run() override {
+ delete obj_;
+ return NS_OK;
+ }
+ virtual nsresult Cancel() override {
+ obj_ = NULL;
+ return NS_OK;
+ }
+ private:
+ T* MOZ_UNSAFE_REF("The validity of this pointer must be enforced by "
+ "external factors.") obj_;
+};
+
+// RunnableMethodTraits --------------------------------------------------------
+//
+// This traits-class is used by RunnableMethod to manage the lifetime of the
+// callee object. By default, it is assumed that the callee supports AddRef
+// and Release methods. A particular class can specialize this template to
+// define other lifetime management. For example, if the callee is known to
+// live longer than the RunnableMethod object, then a RunnableMethodTraits
+// struct could be defined with empty RetainCallee and ReleaseCallee methods.
+
+template <class T>
+struct RunnableMethodTraits {
+ static void RetainCallee(T* obj) {
+ obj->AddRef();
+ }
+ static void ReleaseCallee(T* obj) {
+ obj->Release();
+ }
+};
+
+// This allows using the NewRunnableMethod() functions with a const pointer
+// to the callee object. See the similar support in nsRefPtr for a rationale
+// of why this is reasonable.
+template <class T>
+struct RunnableMethodTraits<const T> {
+ static void RetainCallee(const T* obj) {
+ const_cast<T*>(obj)->AddRef();
+ }
+ static void ReleaseCallee(const T* obj) {
+ const_cast<T*>(obj)->Release();
+ }
+};
+
+// RunnableMethod and RunnableFunction -----------------------------------------
+//
+// Runnable methods are a type of task that call a function on an object when
+// they are run. We implement both an object and a set of NewRunnableMethod and
+// NewRunnableFunction functions for convenience. These functions are
+// overloaded and will infer the template types, simplifying calling code.
+//
+// The template definitions all use the following names:
+// T - the class type of the object you're supplying
+// this is not needed for the Static version of the call
+// Method/Function - the signature of a pointer to the method or function you
+// want to call
+// Param - the parameter(s) to the method, possibly packed as a Tuple
+// A - the first parameter (if any) to the method
+// B - the second parameter (if any) to the mathod
+//
+// Put these all together and you get an object that can call a method whose
+// signature is:
+// R T::MyFunction([A[, B]])
+//
+// Usage:
+// PostTask(NewRunnableMethod(object, &Object::method[, a[, b]])
+// PostTask(NewRunnableFunction(&function[, a[, b]])
+
+// RunnableMethod and NewRunnableMethod implementation -------------------------
+
+template <class T, class Method, class Params>
+class RunnableMethod : public mozilla::CancelableRunnable,
+ public RunnableMethodTraits<T> {
+ public:
+ RunnableMethod(T* obj, Method meth, Params&& params)
+ : obj_(obj), meth_(meth), params_(mozilla::Forward<Params>(params)) {
+ this->RetainCallee(obj_);
+ }
+ ~RunnableMethod() {
+ ReleaseCallee();
+ }
+
+ NS_IMETHOD Run() override {
+ if (obj_)
+ DispatchTupleToMethod(obj_, meth_, params_);
+ return NS_OK;
+ }
+
+ virtual nsresult Cancel() override {
+ ReleaseCallee();
+ return NS_OK;
+ }
+
+ private:
+ void ReleaseCallee() {
+ if (obj_) {
+ RunnableMethodTraits<T>::ReleaseCallee(obj_);
+ obj_ = nullptr;
+ }
+ }
+
+ // This is owning because of the RetainCallee and ReleaseCallee calls in the
+ // constructor and destructor.
+ T* MOZ_OWNING_REF obj_;
+ Method meth_;
+ Params params_;
+};
+
+namespace dont_add_new_uses_of_this {
+
+// Don't add new uses of this!!!!
+template <class T, class Method, typename... Args>
+inline already_AddRefed<mozilla::Runnable>
+NewRunnableMethod(T* object, Method method, Args&&... args) {
+ typedef mozilla::Tuple<typename mozilla::Decay<Args>::Type...> ArgsTuple;
+ RefPtr<mozilla::Runnable> t =
+ new RunnableMethod<T, Method, ArgsTuple>(object, method,
+ mozilla::MakeTuple(mozilla::Forward<Args>(args)...));
+ return t.forget();
+}
+
+} // namespace dont_add_new_uses_of_this
+
+// RunnableFunction and NewRunnableFunction implementation ---------------------
+
+template <class Function, class Params>
+class RunnableFunction : public mozilla::CancelableRunnable {
+ public:
+ RunnableFunction(Function function, Params&& params)
+ : function_(function), params_(mozilla::Forward<Params>(params)) {
+ }
+
+ ~RunnableFunction() {
+ }
+
+ NS_IMETHOD Run() override {
+ if (function_)
+ DispatchTupleToFunction(function_, params_);
+ return NS_OK;
+ }
+
+ virtual nsresult Cancel() override {
+ function_ = nullptr;
+ return NS_OK;
+ }
+
+ Function function_;
+ Params params_;
+};
+
+template <class Function, typename... Args>
+inline already_AddRefed<mozilla::CancelableRunnable>
+NewCancelableRunnableFunction(Function function, Args&&... args) {
+ typedef mozilla::Tuple<typename mozilla::Decay<Args>::Type...> ArgsTuple;
+ RefPtr<mozilla::CancelableRunnable> t =
+ new RunnableFunction<Function, ArgsTuple>(function,
+ mozilla::MakeTuple(mozilla::Forward<Args>(args)...));
+ return t.forget();
+}
+
+template <class Function, typename... Args>
+inline already_AddRefed<mozilla::Runnable>
+NewRunnableFunction(Function function, Args&&... args) {
+ typedef mozilla::Tuple<typename mozilla::Decay<Args>::Type...> ArgsTuple;
+ RefPtr<mozilla::Runnable> t =
+ new RunnableFunction<Function, ArgsTuple>(function,
+ mozilla::MakeTuple(mozilla::Forward<Args>(args)...));
+ return t.forget();
+}
+
+#endif // BASE_TASK_H_
diff --git a/ipc/chromium/src/base/thread.cc b/ipc/chromium/src/base/thread.cc
new file mode 100644
index 000000000..feec3aedf
--- /dev/null
+++ b/ipc/chromium/src/base/thread.cc
@@ -0,0 +1,200 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/thread.h"
+
+#include "base/string_util.h"
+#include "base/thread_local.h"
+#include "base/waitable_event.h"
+#include "GeckoProfiler.h"
+#include "mozilla/IOInterposer.h"
+#include "nsThreadUtils.h"
+
+#ifdef MOZ_TASK_TRACER
+#include "GeckoTaskTracer.h"
+#endif
+
+namespace base {
+
+// This task is used to trigger the message loop to exit.
+class ThreadQuitTask : public mozilla::Runnable {
+ public:
+ NS_IMETHOD Run() override {
+ MessageLoop::current()->Quit();
+ Thread::SetThreadWasQuitProperly(true);
+ return NS_OK;
+ }
+};
+
+// Used to pass data to ThreadMain. This structure is allocated on the stack
+// from within StartWithOptions.
+struct Thread::StartupData {
+ // We get away with a const reference here because of how we are allocated.
+ const Thread::Options& options;
+
+ // Used to synchronize thread startup.
+ WaitableEvent event;
+
+ explicit StartupData(const Options& opt)
+ : options(opt),
+ event(false, false) {}
+};
+
+Thread::Thread(const char *name)
+ : startup_data_(NULL),
+ thread_(0),
+ message_loop_(NULL),
+ thread_id_(0),
+ name_(name) {
+ MOZ_COUNT_CTOR(base::Thread);
+}
+
+Thread::~Thread() {
+ MOZ_COUNT_DTOR(base::Thread);
+ Stop();
+}
+
+namespace {
+
+// We use this thread-local variable to record whether or not a thread exited
+// because its Stop method was called. This allows us to catch cases where
+// MessageLoop::Quit() is called directly, which is unexpected when using a
+// Thread to setup and run a MessageLoop.
+
+static base::ThreadLocalBoolean& get_tls_bool() {
+ static base::ThreadLocalBoolean tls_ptr;
+ return tls_ptr;
+}
+
+} // namespace
+
+void Thread::SetThreadWasQuitProperly(bool flag) {
+ get_tls_bool().Set(flag);
+}
+
+bool Thread::GetThreadWasQuitProperly() {
+ bool quit_properly = true;
+#ifndef NDEBUG
+ quit_properly = get_tls_bool().Get();
+#endif
+ return quit_properly;
+}
+
+bool Thread::Start() {
+ return StartWithOptions(Options());
+}
+
+bool Thread::StartWithOptions(const Options& options) {
+ DCHECK(!message_loop_);
+
+ SetThreadWasQuitProperly(false);
+
+ StartupData startup_data(options);
+ startup_data_ = &startup_data;
+
+ if (!PlatformThread::Create(options.stack_size, this, &thread_)) {
+ DLOG(ERROR) << "failed to create thread";
+ startup_data_ = NULL; // Record that we failed to start.
+ return false;
+ }
+
+ // Wait for the thread to start and initialize message_loop_
+ startup_data.event.Wait();
+
+ DCHECK(message_loop_);
+ return true;
+}
+
+void Thread::Stop() {
+ if (!thread_was_started())
+ return;
+
+ // We should only be called on the same thread that started us.
+ DCHECK_NE(thread_id_, PlatformThread::CurrentId());
+
+ // StopSoon may have already been called.
+ if (message_loop_) {
+ RefPtr<ThreadQuitTask> task = new ThreadQuitTask();
+ message_loop_->PostTask(task.forget());
+ }
+
+ // Wait for the thread to exit. It should already have terminated but make
+ // sure this assumption is valid.
+ //
+ // TODO(darin): Unfortunately, we need to keep message_loop_ around until
+ // the thread exits. Some consumers are abusing the API. Make them stop.
+ //
+ PlatformThread::Join(thread_);
+
+ // The thread can't receive messages anymore.
+ message_loop_ = NULL;
+
+ // The thread no longer needs to be joined.
+ startup_data_ = NULL;
+}
+
+void Thread::StopSoon() {
+ if (!message_loop_)
+ return;
+
+ // We should only be called on the same thread that started us.
+ DCHECK_NE(thread_id_, PlatformThread::CurrentId());
+
+ // We had better have a message loop at this point! If we do not, then it
+ // most likely means that the thread terminated unexpectedly, probably due
+ // to someone calling Quit() on our message loop directly.
+ DCHECK(message_loop_);
+
+ RefPtr<ThreadQuitTask> task = new ThreadQuitTask();
+ message_loop_->PostTask(task.forget());
+}
+
+void Thread::ThreadMain() {
+ char aLocal;
+ profiler_register_thread(name_.c_str(), &aLocal);
+ mozilla::IOInterposer::RegisterCurrentThread();
+
+ // The message loop for this thread.
+ MessageLoop message_loop(startup_data_->options.message_loop_type,
+ NS_GetCurrentThread());
+
+ // Complete the initialization of our Thread object.
+ thread_id_ = PlatformThread::CurrentId();
+ PlatformThread::SetName(name_.c_str());
+ message_loop.set_thread_name(name_);
+ message_loop.set_hang_timeouts(startup_data_->options.transient_hang_timeout,
+ startup_data_->options.permanent_hang_timeout);
+ message_loop_ = &message_loop;
+
+ // Let the thread do extra initialization.
+ // Let's do this before signaling we are started.
+ Init();
+
+ startup_data_->event.Signal();
+ // startup_data_ can't be touched anymore since the starting thread is now
+ // unlocked.
+
+ message_loop.Run();
+
+ // Let the thread do extra cleanup.
+ CleanUp();
+
+ // Assert that MessageLoop::Quit was called by ThreadQuitTask.
+ DCHECK(GetThreadWasQuitProperly());
+
+ mozilla::IOInterposer::UnregisterCurrentThread();
+ profiler_unregister_thread();
+
+#ifdef MOZ_TASK_TRACER
+ mozilla::tasktracer::FreeTraceInfo();
+#endif
+
+ // We can't receive messages anymore.
+ message_loop_ = NULL;
+ thread_id_ = 0;
+}
+
+} // namespace base
diff --git a/ipc/chromium/src/base/thread.h b/ipc/chromium/src/base/thread.h
new file mode 100644
index 000000000..9e2a9bd18
--- /dev/null
+++ b/ipc/chromium/src/base/thread.h
@@ -0,0 +1,176 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_THREAD_H_
+#define BASE_THREAD_H_
+
+#include <stdint.h>
+#include <string>
+
+#include "base/message_loop.h"
+#include "base/platform_thread.h"
+
+namespace base {
+
+// A simple thread abstraction that establishes a MessageLoop on a new thread.
+// The consumer uses the MessageLoop of the thread to cause code to execute on
+// the thread. When this object is destroyed the thread is terminated. All
+// pending tasks queued on the thread's message loop will run to completion
+// before the thread is terminated.
+class Thread : PlatformThread::Delegate {
+ public:
+ struct Options {
+ // Specifies the type of message loop that will be allocated on the thread.
+ MessageLoop::Type message_loop_type;
+
+ // Specifies the maximum stack size that the thread is allowed to use.
+ // This does not necessarily correspond to the thread's initial stack size.
+ // A value of 0 indicates that the default maximum should be used.
+ size_t stack_size;
+
+ // Specifies the transient and permanent hang timeouts for background hang
+ // monitoring. A value of 0 indicates there is no timeout.
+ uint32_t transient_hang_timeout;
+ uint32_t permanent_hang_timeout;
+
+ Options()
+ : message_loop_type(MessageLoop::TYPE_DEFAULT)
+ , stack_size(0)
+ , transient_hang_timeout(0)
+ , permanent_hang_timeout(0) {}
+ Options(MessageLoop::Type type, size_t size)
+ : message_loop_type(type)
+ , stack_size(size)
+ , transient_hang_timeout(0)
+ , permanent_hang_timeout(0) {}
+ };
+
+ // Constructor.
+ // name is a display string to identify the thread.
+ explicit Thread(const char *name);
+
+ // Destroys the thread, stopping it if necessary.
+ //
+ // NOTE: If you are subclassing from Thread, and you wish for your CleanUp
+ // method to be called, then you need to call Stop() from your destructor.
+ //
+ virtual ~Thread();
+
+ // Starts the thread. Returns true if the thread was successfully started;
+ // otherwise, returns false. Upon successful return, the message_loop()
+ // getter will return non-null.
+ //
+ // Note: This function can't be called on Windows with the loader lock held;
+ // i.e. during a DllMain, global object construction or destruction, atexit()
+ // callback.
+ bool Start();
+
+ // Starts the thread. Behaves exactly like Start in addition to allow to
+ // override the default options.
+ //
+ // Note: This function can't be called on Windows with the loader lock held;
+ // i.e. during a DllMain, global object construction or destruction, atexit()
+ // callback.
+ bool StartWithOptions(const Options& options);
+
+ // Signals the thread to exit and returns once the thread has exited. After
+ // this method returns, the Thread object is completely reset and may be used
+ // as if it were newly constructed (i.e., Start may be called again).
+ //
+ // Stop may be called multiple times and is simply ignored if the thread is
+ // already stopped.
+ //
+ // NOTE: This method is optional. It is not strictly necessary to call this
+ // method as the Thread's destructor will take care of stopping the thread if
+ // necessary.
+ //
+ void Stop();
+
+ // Signals the thread to exit in the near future.
+ //
+ // WARNING: This function is not meant to be commonly used. Use at your own
+ // risk. Calling this function will cause message_loop() to become invalid in
+ // the near future. This function was created to workaround a specific
+ // deadlock on Windows with printer worker thread. In any other case, Stop()
+ // should be used.
+ //
+ // StopSoon should not be called multiple times as it is risky to do so. It
+ // could cause a timing issue in message_loop() access. Call Stop() to reset
+ // the thread object once it is known that the thread has quit.
+ void StopSoon();
+
+ // Returns the message loop for this thread. Use the MessageLoop's
+ // PostTask methods to execute code on the thread. This only returns
+ // non-null after a successful call to Start. After Stop has been called,
+ // this will return NULL.
+ //
+ // NOTE: You must not call this MessageLoop's Quit method directly. Use
+ // the Thread's Stop method instead.
+ //
+ MessageLoop* message_loop() const { return message_loop_; }
+
+ // Set the name of this thread (for display in debugger too).
+ const std::string &thread_name() { return name_; }
+
+ // The native thread handle.
+ PlatformThreadHandle thread_handle() { return thread_; }
+
+ // The thread ID.
+ PlatformThreadId thread_id() const { return thread_id_; }
+
+ // Reset thread ID as current thread.
+ PlatformThreadId reset_thread_id() {
+ thread_id_ = PlatformThread::CurrentId();
+ return thread_id_;
+ }
+
+ // Returns true if the thread has been started, and not yet stopped.
+ // When a thread is running, the thread_id_ is non-zero.
+ bool IsRunning() const { return thread_id_ != 0; }
+
+ protected:
+ // Called just prior to starting the message loop
+ virtual void Init() {}
+
+ // Called just after the message loop ends
+ virtual void CleanUp() {}
+
+ static void SetThreadWasQuitProperly(bool flag);
+ static bool GetThreadWasQuitProperly();
+
+ private:
+ // PlatformThread::Delegate methods:
+ virtual void ThreadMain();
+
+ // We piggy-back on the startup_data_ member to know if we successfully
+ // started the thread. This way we know that we need to call Join.
+ bool thread_was_started() const { return startup_data_ != NULL; }
+
+ // Used to pass data to ThreadMain.
+ struct StartupData;
+ StartupData* startup_data_;
+
+ // The thread's handle.
+ PlatformThreadHandle thread_;
+
+ // The thread's message loop. Valid only while the thread is alive. Set
+ // by the created thread.
+ MessageLoop* message_loop_;
+
+ // Our thread's ID.
+ PlatformThreadId thread_id_;
+
+ // The name of the thread. Used for debugging purposes.
+ std::string name_;
+
+ friend class ThreadQuitTask;
+
+ DISALLOW_COPY_AND_ASSIGN(Thread);
+};
+
+} // namespace base
+
+#endif // BASE_THREAD_H_
diff --git a/ipc/chromium/src/base/thread_local.h b/ipc/chromium/src/base/thread_local.h
new file mode 100644
index 000000000..d5c95a8bc
--- /dev/null
+++ b/ipc/chromium/src/base/thread_local.h
@@ -0,0 +1,124 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// WARNING: Thread local storage is a bit tricky to get right. Please make
+// sure that this is really the proper solution for what you're trying to
+// achieve. Don't prematurely optimize, most likely you can just use a Lock.
+//
+// These classes implement a warpper around the platform's TLS storage
+// mechanism. On construction, they will allocate a TLS slot, and free the
+// TLS slot on destruction. No memory management (creation or destruction) is
+// handled. This means for uses of ThreadLocalPointer, you must correctly
+// manage the memory yourself, these classes will not destroy the pointer for
+// you. There are no at-thread-exit actions taken by these classes.
+//
+// ThreadLocalPointer<Type> wraps a Type*. It performs no creation or
+// destruction, so memory management must be handled elsewhere. The first call
+// to Get() on a thread will return NULL. You can update the pointer with a
+// call to Set().
+//
+// ThreadLocalBoolean wraps a bool. It will default to false if it has never
+// been set otherwise with Set().
+//
+// Thread Safety: An instance of ThreadLocalStorage is completely thread safe
+// once it has been created. If you want to dynamically create an instance,
+// you must of course properly deal with safety and race conditions. This
+// means a function-level static initializer is generally inappropiate.
+//
+// Example usage:
+// // My class is logically attached to a single thread. We cache a pointer
+// // on the thread it was created on, so we can implement current().
+// MyClass::MyClass() {
+// DCHECK(Singleton<ThreadLocalPointer<MyClass> >::get()->Get() == NULL);
+// Singleton<ThreadLocalPointer<MyClass> >::get()->Set(this);
+// }
+//
+// MyClass::~MyClass() {
+// DCHECK(Singleton<ThreadLocalPointer<MyClass> >::get()->Get() != NULL);
+// Singleton<ThreadLocalPointer<MyClass> >::get()->Set(NULL);
+// }
+//
+// // Return the current MyClass associated with the calling thread, can be
+// // NULL if there isn't a MyClass associated.
+// MyClass* MyClass::current() {
+// return Singleton<ThreadLocalPointer<MyClass> >::get()->Get();
+// }
+
+#ifndef BASE_THREAD_LOCAL_H_
+#define BASE_THREAD_LOCAL_H_
+
+#include "base/basictypes.h"
+
+#if defined(OS_POSIX)
+#include <pthread.h>
+#endif
+
+namespace base {
+
+// Helper functions that abstract the cross-platform APIs. Do not use directly.
+struct ThreadLocalPlatform {
+#if defined(OS_WIN)
+ typedef int SlotType;
+#elif defined(OS_POSIX)
+ typedef pthread_key_t SlotType;
+#endif
+
+ static void AllocateSlot(SlotType& slot);
+ static void FreeSlot(SlotType& slot);
+ static void* GetValueFromSlot(SlotType& slot);
+ static void SetValueInSlot(SlotType& slot, void* value);
+};
+
+template <typename Type>
+class ThreadLocalPointer {
+ public:
+ ThreadLocalPointer() : slot_() {
+ ThreadLocalPlatform::AllocateSlot(slot_);
+ }
+
+ ~ThreadLocalPointer() {
+ ThreadLocalPlatform::FreeSlot(slot_);
+ }
+
+ Type* Get() {
+ return static_cast<Type*>(ThreadLocalPlatform::GetValueFromSlot(slot_));
+ }
+
+ void Set(Type* ptr) {
+ ThreadLocalPlatform::SetValueInSlot(slot_, ptr);
+ }
+
+ private:
+ typedef ThreadLocalPlatform::SlotType SlotType;
+
+ SlotType slot_;
+
+ DISALLOW_COPY_AND_ASSIGN(ThreadLocalPointer<Type>);
+};
+
+class ThreadLocalBoolean {
+ public:
+ ThreadLocalBoolean() { }
+ ~ThreadLocalBoolean() { }
+
+ bool Get() {
+ return tlp_.Get() != NULL;
+ }
+
+ void Set(bool val) {
+ uintptr_t intVal = val ? 1 : 0;
+ tlp_.Set(reinterpret_cast<void*>(intVal));
+ }
+
+ private:
+ ThreadLocalPointer<void> tlp_;
+
+ DISALLOW_COPY_AND_ASSIGN(ThreadLocalBoolean);
+};
+
+} // namespace base
+
+#endif // BASE_THREAD_LOCAL_H_
diff --git a/ipc/chromium/src/base/thread_local_posix.cc b/ipc/chromium/src/base/thread_local_posix.cc
new file mode 100644
index 000000000..050a6e4b9
--- /dev/null
+++ b/ipc/chromium/src/base/thread_local_posix.cc
@@ -0,0 +1,38 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/thread_local.h"
+
+#include <pthread.h>
+
+#include "base/logging.h"
+
+namespace base {
+
+// static
+void ThreadLocalPlatform::AllocateSlot(SlotType& slot) {
+ int error = pthread_key_create(&slot, NULL);
+ CHECK(error == 0);
+}
+
+// static
+void ThreadLocalPlatform::FreeSlot(SlotType& slot) {
+ int error = pthread_key_delete(slot);
+ DCHECK(error == 0);
+}
+
+// static
+void* ThreadLocalPlatform::GetValueFromSlot(SlotType& slot) {
+ return pthread_getspecific(slot);
+}
+
+// static
+void ThreadLocalPlatform::SetValueInSlot(SlotType& slot, void* value) {
+ int error = pthread_setspecific(slot, value);
+ CHECK(error == 0);
+}
+
+} // namespace base
diff --git a/ipc/chromium/src/base/thread_local_storage.h b/ipc/chromium/src/base/thread_local_storage.h
new file mode 100644
index 000000000..58d4e6356
--- /dev/null
+++ b/ipc/chromium/src/base/thread_local_storage.h
@@ -0,0 +1,96 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_THREAD_LOCAL_STORAGE_H_
+#define BASE_THREAD_LOCAL_STORAGE_H_
+
+#include "base/basictypes.h"
+
+#if defined(OS_POSIX)
+#include <pthread.h>
+#endif
+
+// Wrapper for thread local storage. This class doesn't do much except provide
+// an API for portability.
+class ThreadLocalStorage {
+ public:
+
+ // Prototype for the TLS destructor function, which can be optionally used to
+ // cleanup thread local storage on thread exit. 'value' is the data that is
+ // stored in thread local storage.
+ typedef void (*TLSDestructorFunc)(void* value);
+
+ // A key representing one value stored in TLS.
+ class Slot {
+ public:
+ explicit Slot(TLSDestructorFunc destructor = NULL);
+
+ // This constructor should be used for statics.
+ // It returns an uninitialized Slot.
+ explicit Slot(base::LinkerInitialized x) {}
+
+ // Set up the TLS slot. Called by the constructor.
+ // 'destructor' is a pointer to a function to perform per-thread cleanup of
+ // this object. If set to NULL, no cleanup is done for this TLS slot.
+ // Returns false on error.
+ bool Initialize(TLSDestructorFunc destructor);
+
+ // Free a previously allocated TLS 'slot'.
+ // If a destructor was set for this slot, removes
+ // the destructor so that remaining threads exiting
+ // will not free data.
+ void Free();
+
+ // Get the thread-local value stored in slot 'slot'.
+ // Values are guaranteed to initially be zero.
+ void* Get() const;
+
+ // Set the thread-local value stored in slot 'slot' to
+ // value 'value'.
+ void Set(void* value);
+
+ bool initialized() const { return initialized_; }
+
+ private:
+ // The internals of this struct should be considered private.
+ bool initialized_;
+#if defined(OS_WIN)
+ int slot_;
+#elif defined(OS_POSIX)
+ pthread_key_t key_;
+#endif
+
+ DISALLOW_COPY_AND_ASSIGN(Slot);
+ };
+
+#if defined(OS_WIN)
+ // Function called when on thread exit to call TLS
+ // destructor functions. This function is used internally.
+ static void ThreadExit();
+
+ private:
+ // Function to lazily initialize our thread local storage.
+ static void **Initialize();
+
+ private:
+ // The maximum number of 'slots' in our thread local storage stack.
+ // For now, this is fixed. We could either increase statically, or
+ // we could make it dynamic in the future.
+ static const int kThreadLocalStorageSize = 64;
+
+ static long tls_key_;
+ static long tls_max_;
+ static TLSDestructorFunc tls_destructors_[kThreadLocalStorageSize];
+#endif // OS_WIN
+
+ DISALLOW_COPY_AND_ASSIGN(ThreadLocalStorage);
+};
+
+// Temporary backwards-compatible name.
+// TODO(evanm): replace all usage of TLSSlot.
+typedef ThreadLocalStorage::Slot TLSSlot;
+
+#endif // BASE_THREAD_LOCAL_STORAGE_H_
diff --git a/ipc/chromium/src/base/thread_local_storage_posix.cc b/ipc/chromium/src/base/thread_local_storage_posix.cc
new file mode 100644
index 000000000..6f8367b66
--- /dev/null
+++ b/ipc/chromium/src/base/thread_local_storage_posix.cc
@@ -0,0 +1,46 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/thread_local_storage.h"
+
+#include "base/logging.h"
+
+ThreadLocalStorage::Slot::Slot(TLSDestructorFunc destructor)
+ : initialized_(false) {
+ Initialize(destructor);
+}
+
+bool ThreadLocalStorage::Slot::Initialize(TLSDestructorFunc destructor) {
+ DCHECK(!initialized_);
+ int error = pthread_key_create(&key_, destructor);
+ if (error) {
+ NOTREACHED();
+ return false;
+ }
+
+ initialized_ = true;
+ return true;
+}
+
+void ThreadLocalStorage::Slot::Free() {
+ DCHECK(initialized_);
+ int error = pthread_key_delete(key_);
+ if (error)
+ NOTREACHED();
+ initialized_ = false;
+}
+
+void* ThreadLocalStorage::Slot::Get() const {
+ DCHECK(initialized_);
+ return pthread_getspecific(key_);
+}
+
+void ThreadLocalStorage::Slot::Set(void* value) {
+ DCHECK(initialized_);
+ int error = pthread_setspecific(key_, value);
+ if (error)
+ NOTREACHED();
+}
diff --git a/ipc/chromium/src/base/thread_local_storage_win.cc b/ipc/chromium/src/base/thread_local_storage_win.cc
new file mode 100644
index 000000000..edd7a0540
--- /dev/null
+++ b/ipc/chromium/src/base/thread_local_storage_win.cc
@@ -0,0 +1,188 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/thread_local_storage.h"
+
+#include <windows.h>
+
+#include "base/logging.h"
+
+// In order to make TLS destructors work, we need to keep function
+// pointers to the destructor for each TLS that we allocate.
+// We make this work by allocating a single OS-level TLS, which
+// contains an array of slots for the application to use. In
+// parallel, we also allocate an array of destructors, which we
+// keep track of and call when threads terminate.
+
+// tls_key_ is the one native TLS that we use. It stores our
+// table.
+long ThreadLocalStorage::tls_key_ = TLS_OUT_OF_INDEXES;
+
+// tls_max_ is the high-water-mark of allocated thread local storage.
+// We intentionally skip 0 so that it is not confused with an
+// unallocated TLS slot.
+long ThreadLocalStorage::tls_max_ = 1;
+
+// An array of destructor function pointers for the slots. If
+// a slot has a destructor, it will be stored in its corresponding
+// entry in this array.
+ThreadLocalStorage::TLSDestructorFunc
+ ThreadLocalStorage::tls_destructors_[kThreadLocalStorageSize];
+
+void** ThreadLocalStorage::Initialize() {
+ if (tls_key_ == TLS_OUT_OF_INDEXES) {
+ long value = TlsAlloc();
+ DCHECK(value != TLS_OUT_OF_INDEXES);
+
+ // Atomically test-and-set the tls_key. If the key is TLS_OUT_OF_INDEXES,
+ // go ahead and set it. Otherwise, do nothing, as another
+ // thread already did our dirty work.
+ if (InterlockedCompareExchange(&tls_key_, value, TLS_OUT_OF_INDEXES) !=
+ TLS_OUT_OF_INDEXES) {
+ // We've been shortcut. Another thread replaced tls_key_ first so we need
+ // to destroy our index and use the one the other thread got first.
+ TlsFree(value);
+ }
+ }
+ DCHECK(TlsGetValue(tls_key_) == NULL);
+
+ // Create an array to store our data.
+ void** tls_data = new void*[kThreadLocalStorageSize];
+ memset(tls_data, 0, sizeof(void*[kThreadLocalStorageSize]));
+ TlsSetValue(tls_key_, tls_data);
+ return tls_data;
+}
+
+ThreadLocalStorage::Slot::Slot(TLSDestructorFunc destructor)
+ : initialized_(false) {
+ Initialize(destructor);
+}
+
+bool ThreadLocalStorage::Slot::Initialize(TLSDestructorFunc destructor) {
+ if (tls_key_ == TLS_OUT_OF_INDEXES || !TlsGetValue(tls_key_))
+ ThreadLocalStorage::Initialize();
+
+ // Grab a new slot.
+ slot_ = InterlockedIncrement(&tls_max_) - 1;
+ if (slot_ >= kThreadLocalStorageSize) {
+ NOTREACHED();
+ return false;
+ }
+
+ // Setup our destructor.
+ tls_destructors_[slot_] = destructor;
+ initialized_ = true;
+ return true;
+}
+
+void ThreadLocalStorage::Slot::Free() {
+ // At this time, we don't reclaim old indices for TLS slots.
+ // So all we need to do is wipe the destructor.
+ tls_destructors_[slot_] = NULL;
+ initialized_ = false;
+}
+
+void* ThreadLocalStorage::Slot::Get() const {
+ void** tls_data = static_cast<void**>(TlsGetValue(tls_key_));
+ if (!tls_data)
+ tls_data = ThreadLocalStorage::Initialize();
+ DCHECK(slot_ >= 0 && slot_ < kThreadLocalStorageSize);
+ return tls_data[slot_];
+}
+
+void ThreadLocalStorage::Slot::Set(void* value) {
+ void** tls_data = static_cast<void**>(TlsGetValue(tls_key_));
+ if (!tls_data)
+ tls_data = ThreadLocalStorage::Initialize();
+ DCHECK(slot_ >= 0 && slot_ < kThreadLocalStorageSize);
+ tls_data[slot_] = value;
+}
+
+void ThreadLocalStorage::ThreadExit() {
+ if (tls_key_ == TLS_OUT_OF_INDEXES)
+ return;
+
+ void** tls_data = static_cast<void**>(TlsGetValue(tls_key_));
+
+ // Maybe we have never initialized TLS for this thread.
+ if (!tls_data)
+ return;
+
+ for (int slot = 0; slot < tls_max_; slot++) {
+ if (tls_destructors_[slot] != NULL) {
+ void* value = tls_data[slot];
+ tls_destructors_[slot](value);
+ }
+ }
+
+ delete[] tls_data;
+
+ // In case there are other "onexit" handlers...
+ TlsSetValue(tls_key_, NULL);
+}
+
+// Thread Termination Callbacks.
+// Windows doesn't support a per-thread destructor with its
+// TLS primitives. So, we build it manually by inserting a
+// function to be called on each thread's exit.
+// This magic is from http://www.codeproject.com/threads/tls.asp
+// and it works for VC++ 7.0 and later.
+
+#ifdef _WIN64
+
+// This makes the linker create the TLS directory if it's not already
+// there. (e.g. if __declspec(thread) is not used).
+#pragma comment(linker, "/INCLUDE:_tls_used")
+
+#else // _WIN64
+
+// This makes the linker create the TLS directory if it's not already
+// there. (e.g. if __declspec(thread) is not used).
+#pragma comment(linker, "/INCLUDE:__tls_used")
+
+#endif // _WIN64
+
+// Static callback function to call with each thread termination.
+void NTAPI OnThreadExit(PVOID module, DWORD reason, PVOID reserved)
+{
+ // On XP SP0 & SP1, the DLL_PROCESS_ATTACH is never seen. It is sent on SP2+
+ // and on W2K and W2K3. So don't assume it is sent.
+ if (DLL_THREAD_DETACH == reason || DLL_PROCESS_DETACH == reason)
+ ThreadLocalStorage::ThreadExit();
+}
+
+// .CRT$XLA to .CRT$XLZ is an array of PIMAGE_TLS_CALLBACK pointers that are
+// called automatically by the OS loader code (not the CRT) when the module is
+// loaded and on thread creation. They are NOT called if the module has been
+// loaded by a LoadLibrary() call. It must have implicitly been loaded at
+// process startup.
+// By implicitly loaded, I mean that it is directly referenced by the main EXE
+// or by one of its dependent DLLs. Delay-loaded DLL doesn't count as being
+// implicitly loaded.
+//
+// See VC\crt\src\tlssup.c for reference.
+#ifdef _WIN64
+
+// .CRT section is merged with .rdata on x64 so it must be constant data.
+#pragma const_seg(".CRT$XLB")
+// When defining a const variable, it must have external linkage to be sure the
+// linker doesn't discard it. If this value is discarded, the OnThreadExit
+// function will never be called.
+extern const PIMAGE_TLS_CALLBACK p_thread_callback;
+const PIMAGE_TLS_CALLBACK p_thread_callback = OnThreadExit;
+
+// Reset the default section.
+#pragma const_seg()
+
+#else // _WIN64
+
+#pragma data_seg(".CRT$XLB")
+PIMAGE_TLS_CALLBACK p_thread_callback = OnThreadExit;
+
+// Reset the default section.
+#pragma data_seg()
+
+#endif // _WIN64
diff --git a/ipc/chromium/src/base/thread_local_win.cc b/ipc/chromium/src/base/thread_local_win.cc
new file mode 100644
index 000000000..3f8c465d0
--- /dev/null
+++ b/ipc/chromium/src/base/thread_local_win.cc
@@ -0,0 +1,40 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/thread_local.h"
+
+#include <windows.h>
+
+#include "base/logging.h"
+
+namespace base {
+
+// static
+void ThreadLocalPlatform::AllocateSlot(SlotType& slot) {
+ slot = TlsAlloc();
+ CHECK(slot != TLS_OUT_OF_INDEXES);
+}
+
+// static
+void ThreadLocalPlatform::FreeSlot(SlotType& slot) {
+ if (!TlsFree(slot)) {
+ NOTREACHED() << "Failed to deallocate tls slot with TlsFree().";
+ }
+}
+
+// static
+void* ThreadLocalPlatform::GetValueFromSlot(SlotType& slot) {
+ return TlsGetValue(slot);
+}
+
+// static
+void ThreadLocalPlatform::SetValueInSlot(SlotType& slot, void* value) {
+ if (!TlsSetValue(slot, value)) {
+ CHECK(false) << "Failed to TlsSetValue().";
+ }
+}
+
+} // namespace base
diff --git a/ipc/chromium/src/base/time.cc b/ipc/chromium/src/base/time.cc
new file mode 100644
index 000000000..dca9c83d9
--- /dev/null
+++ b/ipc/chromium/src/base/time.cc
@@ -0,0 +1,104 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/time.h"
+#include "base/string_util.h"
+#include "base/sys_string_conversions.h"
+#include "prtime.h"
+
+#include "base/logging.h"
+
+namespace base {
+
+// TimeDelta ------------------------------------------------------------------
+
+int TimeDelta::InDays() const {
+ return static_cast<int>(delta_ / Time::kMicrosecondsPerDay);
+}
+
+int TimeDelta::InHours() const {
+ return static_cast<int>(delta_ / Time::kMicrosecondsPerHour);
+}
+
+int TimeDelta::InMinutes() const {
+ return static_cast<int>(delta_ / Time::kMicrosecondsPerMinute);
+}
+
+double TimeDelta::InSecondsF() const {
+ return static_cast<double>(delta_) / Time::kMicrosecondsPerSecond;
+}
+
+int64_t TimeDelta::InSeconds() const {
+ return delta_ / Time::kMicrosecondsPerSecond;
+}
+
+double TimeDelta::InMillisecondsF() const {
+ return static_cast<double>(delta_) / Time::kMicrosecondsPerMillisecond;
+}
+
+int64_t TimeDelta::InMilliseconds() const {
+ return delta_ / Time::kMicrosecondsPerMillisecond;
+}
+
+int64_t TimeDelta::InMicroseconds() const {
+ return delta_;
+}
+
+// Time -----------------------------------------------------------------------
+
+// static
+Time Time::FromTimeT(time_t tt) {
+ if (tt == 0)
+ return Time(); // Preserve 0 so we can tell it doesn't exist.
+ return Time((tt * kMicrosecondsPerSecond) + kTimeTToMicrosecondsOffset);
+}
+
+time_t Time::ToTimeT() const {
+ if (us_ == 0)
+ return 0; // Preserve 0 so we can tell it doesn't exist.
+ return (us_ - kTimeTToMicrosecondsOffset) / kMicrosecondsPerSecond;
+}
+
+// static
+Time Time::FromDoubleT(double dt) {
+ return Time((dt * static_cast<double>(kMicrosecondsPerSecond)) +
+ kTimeTToMicrosecondsOffset);
+}
+
+double Time::ToDoubleT() const {
+ if (us_ == 0)
+ return 0; // Preserve 0 so we can tell it doesn't exist.
+ return (static_cast<double>(us_ - kTimeTToMicrosecondsOffset) /
+ static_cast<double>(kMicrosecondsPerSecond));
+}
+
+Time Time::LocalMidnight() const {
+ Exploded exploded;
+ LocalExplode(&exploded);
+ exploded.hour = 0;
+ exploded.minute = 0;
+ exploded.second = 0;
+ exploded.millisecond = 0;
+ return FromLocalExploded(exploded);
+}
+
+// static
+bool Time::FromString(const wchar_t* time_string, Time* parsed_time) {
+ DCHECK((time_string != NULL) && (parsed_time != NULL));
+ std::string ascii_time_string = SysWideToUTF8(time_string);
+ if (ascii_time_string.length() == 0)
+ return false;
+ PRTime result_time = 0;
+ PRStatus result = PR_ParseTimeString(ascii_time_string.c_str(), PR_FALSE,
+ &result_time);
+ if (PR_SUCCESS != result)
+ return false;
+ result_time += kTimeTToMicrosecondsOffset;
+ *parsed_time = Time(result_time);
+ return true;
+}
+
+} // namespace base
diff --git a/ipc/chromium/src/base/time.h b/ipc/chromium/src/base/time.h
new file mode 100644
index 000000000..55bc7b451
--- /dev/null
+++ b/ipc/chromium/src/base/time.h
@@ -0,0 +1,486 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Time represents an absolute point in time, internally represented as
+// microseconds (s/1,000,000) since a platform-dependent epoch. Each
+// platform's epoch, along with other system-dependent clock interface
+// routines, is defined in time_PLATFORM.cc.
+//
+// TimeDelta represents a duration of time, internally represented in
+// microseconds.
+//
+// TimeTicks represents an abstract time that is always incrementing for use
+// in measuring time durations. It is internally represented in microseconds.
+// It can not be converted to a human-readable time, but is guaranteed not to
+// decrease (if the user changes the computer clock, Time::Now() may actually
+// decrease or jump).
+//
+// These classes are represented as only a 64-bit value, so they can be
+// efficiently passed by value.
+
+#ifndef BASE_TIME_H_
+#define BASE_TIME_H_
+
+#include <time.h>
+
+#include "base/basictypes.h"
+
+#if defined(OS_WIN)
+// For FILETIME in FromFileTime, until it moves to a new converter class.
+// See TODO(iyengar) below.
+#include <windows.h>
+#endif
+
+namespace base {
+
+class Time;
+class TimeTicks;
+
+// This unit test does a lot of manual time manipulation.
+class PageLoadTrackerUnitTest;
+
+// TimeDelta ------------------------------------------------------------------
+
+class TimeDelta {
+ public:
+ TimeDelta() : delta_(0) {
+ }
+
+ // Converts units of time to TimeDeltas.
+ static TimeDelta FromDays(int64_t days);
+ static TimeDelta FromHours(int64_t hours);
+ static TimeDelta FromMinutes(int64_t minutes);
+ static TimeDelta FromSeconds(int64_t secs);
+ static TimeDelta FromMilliseconds(int64_t ms);
+ static TimeDelta FromMicroseconds(int64_t us);
+
+ // Returns the internal numeric value of the TimeDelta object. Please don't
+ // use this and do arithmetic on it, as it is more error prone than using the
+ // provided operators.
+ int64_t ToInternalValue() const {
+ return delta_;
+ }
+
+ // Returns the time delta in some unit. The F versions return a floating
+ // point value, the "regular" versions return a rounded-down value.
+ int InDays() const;
+ int InHours() const;
+ int InMinutes() const;
+ double InSecondsF() const;
+ int64_t InSeconds() const;
+ double InMillisecondsF() const;
+ int64_t InMilliseconds() const;
+ int64_t InMicroseconds() const;
+
+ TimeDelta& operator=(TimeDelta other) {
+ delta_ = other.delta_;
+ return *this;
+ }
+
+ // Computations with other deltas.
+ TimeDelta operator+(TimeDelta other) const {
+ return TimeDelta(delta_ + other.delta_);
+ }
+ TimeDelta operator-(TimeDelta other) const {
+ return TimeDelta(delta_ - other.delta_);
+ }
+
+ TimeDelta& operator+=(TimeDelta other) {
+ delta_ += other.delta_;
+ return *this;
+ }
+ TimeDelta& operator-=(TimeDelta other) {
+ delta_ -= other.delta_;
+ return *this;
+ }
+ TimeDelta operator-() const {
+ return TimeDelta(-delta_);
+ }
+
+ // Computations with ints, note that we only allow multiplicative operations
+ // with ints, and additive operations with other deltas.
+ TimeDelta operator*(int64_t a) const {
+ return TimeDelta(delta_ * a);
+ }
+ TimeDelta operator/(int64_t a) const {
+ return TimeDelta(delta_ / a);
+ }
+ TimeDelta& operator*=(int64_t a) {
+ delta_ *= a;
+ return *this;
+ }
+ TimeDelta& operator/=(int64_t a) {
+ delta_ /= a;
+ return *this;
+ }
+ int64_t operator/(TimeDelta a) const {
+ return delta_ / a.delta_;
+ }
+
+ // Defined below because it depends on the definition of the other classes.
+ Time operator+(Time t) const;
+ TimeTicks operator+(TimeTicks t) const;
+
+ // Comparison operators.
+ bool operator==(TimeDelta other) const {
+ return delta_ == other.delta_;
+ }
+ bool operator!=(TimeDelta other) const {
+ return delta_ != other.delta_;
+ }
+ bool operator<(TimeDelta other) const {
+ return delta_ < other.delta_;
+ }
+ bool operator<=(TimeDelta other) const {
+ return delta_ <= other.delta_;
+ }
+ bool operator>(TimeDelta other) const {
+ return delta_ > other.delta_;
+ }
+ bool operator>=(TimeDelta other) const {
+ return delta_ >= other.delta_;
+ }
+
+ private:
+ friend class Time;
+ friend class TimeTicks;
+ friend TimeDelta operator*(int64_t a, TimeDelta td);
+
+ // Constructs a delta given the duration in microseconds. This is private
+ // to avoid confusion by callers with an integer constructor. Use
+ // FromSeconds, FromMilliseconds, etc. instead.
+ explicit TimeDelta(int64_t delta_us) : delta_(delta_us) {
+ }
+
+ // Delta in microseconds.
+ int64_t delta_;
+};
+
+inline TimeDelta operator*(int64_t a, TimeDelta td) {
+ return TimeDelta(a * td.delta_);
+}
+
+// Time -----------------------------------------------------------------------
+
+// Represents a wall clock time.
+class Time {
+ public:
+ static const int64_t kMillisecondsPerSecond = 1000;
+ static const int64_t kMicrosecondsPerMillisecond = 1000;
+ static const int64_t kMicrosecondsPerSecond = kMicrosecondsPerMillisecond *
+ kMillisecondsPerSecond;
+ static const int64_t kMicrosecondsPerMinute = kMicrosecondsPerSecond * 60;
+ static const int64_t kMicrosecondsPerHour = kMicrosecondsPerMinute * 60;
+ static const int64_t kMicrosecondsPerDay = kMicrosecondsPerHour * 24;
+ static const int64_t kMicrosecondsPerWeek = kMicrosecondsPerDay * 7;
+ static const int64_t kNanosecondsPerMicrosecond = 1000;
+ static const int64_t kNanosecondsPerSecond = kNanosecondsPerMicrosecond *
+ kMicrosecondsPerSecond;
+
+ // Represents an exploded time that can be formatted nicely. This is kind of
+ // like the Win32 SYSTEMTIME structure or the Unix "struct tm" with a few
+ // additions and changes to prevent errors.
+ struct Exploded {
+ int year; // Four digit year "2007"
+ signed char month; // 1-based month (values 1 = January, etc.)
+ signed char day_of_week; // 0-based day of week (0 = Sunday, etc.)
+ signed char day_of_month; // 1-based day of month (1-31)
+ signed char hour; // Hour within the current day (0-23)
+ signed char minute; // Minute within the current hour (0-59)
+ signed char second; // Second within the current minute (0-59 plus
+ // leap seconds which may take it up to 60).
+ int millisecond; // Milliseconds within the current second (0-999)
+ };
+
+ // Contains the NULL time. Use Time::Now() to get the current time.
+ explicit Time() : us_(0) {
+ }
+
+ // Returns true if the time object has not been initialized.
+ bool is_null() const {
+ return us_ == 0;
+ }
+
+ // Returns the current time. Watch out, the system might adjust its clock
+ // in which case time will actually go backwards. We don't guarantee that
+ // times are increasing, or that two calls to Now() won't be the same.
+ static Time Now();
+
+ // Returns the current time. Same as Now() except that this function always
+ // uses system time so that there are no discrepancies between the returned
+ // time and system time even on virtual environments including our test bot.
+ // For timing sensitive unittests, this function should be used.
+ static Time NowFromSystemTime();
+
+ // Converts to/from time_t in UTC and a Time class.
+ // TODO(brettw) this should be removed once everybody starts using the |Time|
+ // class.
+ static Time FromTimeT(time_t tt);
+ time_t ToTimeT() const;
+
+ // Converts time to/from a double which is the number of seconds since epoch
+ // (Jan 1, 1970). Webkit uses this format to represent time.
+ static Time FromDoubleT(double dt);
+ double ToDoubleT() const;
+
+
+#if defined(OS_WIN)
+ static Time FromFileTime(FILETIME ft);
+ FILETIME ToFileTime() const;
+#endif
+
+ // Converts an exploded structure representing either the local time or UTC
+ // into a Time class.
+ static Time FromUTCExploded(const Exploded& exploded) {
+ return FromExploded(false, exploded);
+ }
+ static Time FromLocalExploded(const Exploded& exploded) {
+ return FromExploded(true, exploded);
+ }
+
+ // Converts an integer value representing Time to a class. This is used
+ // when deserializing a |Time| structure, using a value known to be
+ // compatible. It is not provided as a constructor because the integer type
+ // may be unclear from the perspective of a caller.
+ static Time FromInternalValue(int64_t us) {
+ return Time(us);
+ }
+
+ // Converts a string representation of time to a Time object.
+ // An example of a time string which is converted is as below:-
+ // "Tue, 15 Nov 1994 12:45:26 GMT". If the timezone is not specified
+ // in the input string, we assume local time.
+ // TODO(iyengar) Move the FromString/FromTimeT/ToTimeT/FromFileTime to
+ // a new time converter class.
+ static bool FromString(const wchar_t* time_string, Time* parsed_time);
+
+ // For serializing, use FromInternalValue to reconstitute. Please don't use
+ // this and do arithmetic on it, as it is more error prone than using the
+ // provided operators.
+ int64_t ToInternalValue() const {
+ return us_;
+ }
+
+ // Fills the given exploded structure with either the local time or UTC from
+ // this time structure (containing UTC).
+ void UTCExplode(Exploded* exploded) const {
+ return Explode(false, exploded);
+ }
+ void LocalExplode(Exploded* exploded) const {
+ return Explode(true, exploded);
+ }
+
+ // Rounds this time down to the nearest day in local time. It will represent
+ // midnight on that day.
+ Time LocalMidnight() const;
+
+ Time& operator=(Time other) {
+ us_ = other.us_;
+ return *this;
+ }
+
+ // Compute the difference between two times.
+ TimeDelta operator-(Time other) const {
+ return TimeDelta(us_ - other.us_);
+ }
+
+ // Modify by some time delta.
+ Time& operator+=(TimeDelta delta) {
+ us_ += delta.delta_;
+ return *this;
+ }
+ Time& operator-=(TimeDelta delta) {
+ us_ -= delta.delta_;
+ return *this;
+ }
+
+ // Return a new time modified by some delta.
+ Time operator+(TimeDelta delta) const {
+ return Time(us_ + delta.delta_);
+ }
+ Time operator-(TimeDelta delta) const {
+ return Time(us_ - delta.delta_);
+ }
+
+ // Comparison operators
+ bool operator==(Time other) const {
+ return us_ == other.us_;
+ }
+ bool operator!=(Time other) const {
+ return us_ != other.us_;
+ }
+ bool operator<(Time other) const {
+ return us_ < other.us_;
+ }
+ bool operator<=(Time other) const {
+ return us_ <= other.us_;
+ }
+ bool operator>(Time other) const {
+ return us_ > other.us_;
+ }
+ bool operator>=(Time other) const {
+ return us_ >= other.us_;
+ }
+
+ private:
+ friend class TimeDelta;
+
+ // Explodes the given time to either local time |is_local = true| or UTC
+ // |is_local = false|.
+ void Explode(bool is_local, Exploded* exploded) const;
+
+ // Unexplodes a given time assuming the source is either local time
+ // |is_local = true| or UTC |is_local = false|.
+ static Time FromExploded(bool is_local, const Exploded& exploded);
+
+ explicit Time(int64_t us) : us_(us) {
+ }
+
+ // The representation of Jan 1, 1970 UTC in microseconds since the
+ // platform-dependent epoch.
+ static const int64_t kTimeTToMicrosecondsOffset;
+
+ // Time in microseconds in UTC.
+ int64_t us_;
+};
+
+inline Time TimeDelta::operator+(Time t) const {
+ return Time(t.us_ + delta_);
+}
+
+// Inline the TimeDelta factory methods, for fast TimeDelta construction.
+
+// static
+inline TimeDelta TimeDelta::FromDays(int64_t days) {
+ return TimeDelta(days * Time::kMicrosecondsPerDay);
+}
+
+// static
+inline TimeDelta TimeDelta::FromHours(int64_t hours) {
+ return TimeDelta(hours * Time::kMicrosecondsPerHour);
+}
+
+// static
+inline TimeDelta TimeDelta::FromMinutes(int64_t minutes) {
+ return TimeDelta(minutes * Time::kMicrosecondsPerMinute);
+}
+
+// static
+inline TimeDelta TimeDelta::FromSeconds(int64_t secs) {
+ return TimeDelta(secs * Time::kMicrosecondsPerSecond);
+}
+
+// static
+inline TimeDelta TimeDelta::FromMilliseconds(int64_t ms) {
+ return TimeDelta(ms * Time::kMicrosecondsPerMillisecond);
+}
+
+// static
+inline TimeDelta TimeDelta::FromMicroseconds(int64_t us) {
+ return TimeDelta(us);
+}
+
+// TimeTicks ------------------------------------------------------------------
+
+class TimeTicks {
+ public:
+ TimeTicks() : ticks_(0) {
+ }
+
+ // Platform-dependent tick count representing "right now."
+ // The resolution of this clock is ~1-15ms. Resolution varies depending
+ // on hardware/operating system configuration.
+ static TimeTicks Now();
+
+ // Returns a platform-dependent high-resolution tick count. Implementation
+ // is hardware dependent and may or may not return sub-millisecond
+ // resolution. THIS CALL IS GENERALLY MUCH MORE EXPENSIVE THAN Now() AND
+ // SHOULD ONLY BE USED WHEN IT IS REALLY NEEDED.
+ static TimeTicks HighResNow();
+
+ // Returns true if this object has not been initialized.
+ bool is_null() const {
+ return ticks_ == 0;
+ }
+
+ // Returns the internal numeric value of the TimeTicks object.
+ int64_t ToInternalValue() const {
+ return ticks_;
+ }
+
+ TimeTicks& operator=(TimeTicks other) {
+ ticks_ = other.ticks_;
+ return *this;
+ }
+
+ // Compute the difference between two times.
+ TimeDelta operator-(TimeTicks other) const {
+ return TimeDelta(ticks_ - other.ticks_);
+ }
+
+ // Modify by some time delta.
+ TimeTicks& operator+=(TimeDelta delta) {
+ ticks_ += delta.delta_;
+ return *this;
+ }
+ TimeTicks& operator-=(TimeDelta delta) {
+ ticks_ -= delta.delta_;
+ return *this;
+ }
+
+ // Return a new TimeTicks modified by some delta.
+ TimeTicks operator+(TimeDelta delta) const {
+ return TimeTicks(ticks_ + delta.delta_);
+ }
+ TimeTicks operator-(TimeDelta delta) const {
+ return TimeTicks(ticks_ - delta.delta_);
+ }
+
+ // Comparison operators
+ bool operator==(TimeTicks other) const {
+ return ticks_ == other.ticks_;
+ }
+ bool operator!=(TimeTicks other) const {
+ return ticks_ != other.ticks_;
+ }
+ bool operator<(TimeTicks other) const {
+ return ticks_ < other.ticks_;
+ }
+ bool operator<=(TimeTicks other) const {
+ return ticks_ <= other.ticks_;
+ }
+ bool operator>(TimeTicks other) const {
+ return ticks_ > other.ticks_;
+ }
+ bool operator>=(TimeTicks other) const {
+ return ticks_ >= other.ticks_;
+ }
+
+ protected:
+ friend class TimeDelta;
+ friend class PageLoadTrackerUnitTest;
+
+ // Please use Now() to create a new object. This is for internal use
+ // and testing. Ticks is in microseconds.
+ explicit TimeTicks(int64_t ticks) : ticks_(ticks) {
+ }
+
+ // Tick count in microseconds.
+ int64_t ticks_;
+
+#if defined(OS_WIN)
+ typedef DWORD (*TickFunctionType)(void);
+ static TickFunctionType SetMockTickFunction(TickFunctionType ticker);
+#endif
+};
+
+inline TimeTicks TimeDelta::operator+(TimeTicks t) const {
+ return TimeTicks(t.ticks_ + delta_);
+}
+
+} // namespace base
+
+#endif // BASE_TIME_H_
diff --git a/ipc/chromium/src/base/time_mac.cc b/ipc/chromium/src/base/time_mac.cc
new file mode 100644
index 000000000..a5a9a2a6e
--- /dev/null
+++ b/ipc/chromium/src/base/time_mac.cc
@@ -0,0 +1,127 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/time.h"
+
+#include <CoreFoundation/CFDate.h>
+#include <CoreFoundation/CFTimeZone.h>
+#include <mach/mach_time.h>
+#include <sys/time.h>
+#include <time.h>
+
+#include "base/basictypes.h"
+#include "base/logging.h"
+#include "base/scoped_cftyperef.h"
+
+namespace base {
+
+// The Time routines in this file use Mach and CoreFoundation APIs, since the
+// POSIX definition of time_t in Mac OS X wraps around after 2038--and
+// there are already cookie expiration dates, etc., past that time out in
+// the field. Using CFDate prevents that problem, and using mach_absolute_time
+// for TimeTicks gives us nice high-resolution interval timing.
+
+// Time -----------------------------------------------------------------------
+
+// The internal representation of Time uses a 64-bit microsecond count
+// from 1970-01-01 00:00:00 UTC. Core Foundation uses a double second count
+// since 2001-01-01 00:00:00 UTC.
+
+// Some functions in time.cc use time_t directly, so we provide a zero offset
+// for them. The epoch is 1970-01-01 00:00:00 UTC.
+// static
+const int64_t Time::kTimeTToMicrosecondsOffset = GG_INT64_C(0);
+
+// static
+Time Time::Now() {
+ CFAbsoluteTime now =
+ CFAbsoluteTimeGetCurrent() + kCFAbsoluteTimeIntervalSince1970;
+ return Time(static_cast<int64_t>(now * kMicrosecondsPerSecond));
+}
+
+// static
+Time Time::NowFromSystemTime() {
+ // Just use Now() because Now() returns the system time.
+ return Now();
+}
+
+// static
+Time Time::FromExploded(bool is_local, const Exploded& exploded) {
+ CFGregorianDate date;
+ date.second = exploded.second +
+ exploded.millisecond / static_cast<double>(kMillisecondsPerSecond);
+ date.minute = exploded.minute;
+ date.hour = exploded.hour;
+ date.day = exploded.day_of_month;
+ date.month = exploded.month;
+ date.year = exploded.year;
+
+ scoped_cftyperef<CFTimeZoneRef>
+ time_zone(is_local ? CFTimeZoneCopySystem() : NULL);
+ CFAbsoluteTime seconds = CFGregorianDateGetAbsoluteTime(date, time_zone) +
+ kCFAbsoluteTimeIntervalSince1970;
+ return Time(static_cast<int64_t>(seconds * kMicrosecondsPerSecond));
+}
+
+void Time::Explode(bool is_local, Exploded* exploded) const {
+ CFAbsoluteTime seconds =
+ (static_cast<double>(us_) / kMicrosecondsPerSecond) -
+ kCFAbsoluteTimeIntervalSince1970;
+
+ scoped_cftyperef<CFTimeZoneRef>
+ time_zone(is_local ? CFTimeZoneCopySystem() : NULL);
+ CFGregorianDate date = CFAbsoluteTimeGetGregorianDate(seconds, time_zone);
+
+ exploded->year = date.year;
+ exploded->month = date.month;
+ exploded->day_of_month = date.day;
+ exploded->hour = date.hour;
+ exploded->minute = date.minute;
+ exploded->second = date.second;
+ exploded->millisecond =
+ static_cast<int>(date.second * kMillisecondsPerSecond) %
+ kMillisecondsPerSecond;
+}
+
+// TimeTicks ------------------------------------------------------------------
+
+// static
+TimeTicks TimeTicks::Now() {
+ uint64_t absolute_micro;
+
+ static mach_timebase_info_data_t timebase_info;
+ if (timebase_info.denom == 0) {
+ // Zero-initialization of statics guarantees that denom will be 0 before
+ // calling mach_timebase_info. mach_timebase_info will never set denom to
+ // 0 as that would be invalid, so the zero-check can be used to determine
+ // whether mach_timebase_info has already been called. This is
+ // recommended by Apple's QA1398.
+ kern_return_t kr = mach_timebase_info(&timebase_info);
+ DCHECK(kr == KERN_SUCCESS);
+ }
+
+ // mach_absolute_time is it when it comes to ticks on the Mac. Other calls
+ // with less precision (such as TickCount) just call through to
+ // mach_absolute_time.
+
+ // timebase_info converts absolute time tick units into nanoseconds. Convert
+ // to microseconds up front to stave off overflows.
+ absolute_micro = mach_absolute_time() / Time::kNanosecondsPerMicrosecond *
+ timebase_info.numer / timebase_info.denom;
+
+ // Don't bother with the rollover handling that the Windows version does.
+ // With numer and denom = 1 (the expected case), the 64-bit absolute time
+ // reported in nanoseconds is enough to last nearly 585 years.
+
+ return TimeTicks(absolute_micro);
+}
+
+// static
+TimeTicks TimeTicks::HighResNow() {
+ return Now();
+}
+
+} // namespace base
diff --git a/ipc/chromium/src/base/time_posix.cc b/ipc/chromium/src/base/time_posix.cc
new file mode 100644
index 000000000..2eb76a989
--- /dev/null
+++ b/ipc/chromium/src/base/time_posix.cc
@@ -0,0 +1,203 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/time.h"
+
+#ifdef OS_MACOSX
+#include <mach/mach_time.h>
+#endif
+#include <sys/time.h>
+#if defined(ANDROID) && !defined(__LP64__)
+#include <time64.h>
+#else
+#include <time.h>
+#endif
+#if defined(ANDROID) || defined(OS_POSIX)
+#include <unistd.h>
+#endif
+
+#include <limits>
+
+#include "base/basictypes.h"
+#include "base/logging.h"
+
+namespace base {
+
+// The Time routines in this file use standard POSIX routines, or almost-
+// standard routines in the case of timegm. We need to use a Mach-specific
+// function for TimeTicks::Now() on Mac OS X.
+
+// Time -----------------------------------------------------------------------
+
+// Some functions in time.cc use time_t directly, so we provide a zero offset
+// for them. The epoch is 1970-01-01 00:00:00 UTC.
+// static
+const int64_t Time::kTimeTToMicrosecondsOffset = GG_INT64_C(0);
+
+// static
+Time Time::Now() {
+ struct timeval tv;
+ struct timezone tz = { 0, 0 }; // UTC
+ if (gettimeofday(&tv, &tz) != 0) {
+ DCHECK(0) << "Could not determine time of day";
+ }
+ // Combine seconds and microseconds in a 64-bit field containing microseconds
+ // since the epoch. That's enough for nearly 600 centuries.
+ return Time(tv.tv_sec * kMicrosecondsPerSecond + tv.tv_usec);
+}
+
+// static
+Time Time::NowFromSystemTime() {
+ // Just use Now() because Now() returns the system time.
+ return Now();
+}
+
+// static
+Time Time::FromExploded(bool is_local, const Exploded& exploded) {
+ struct tm timestruct;
+ timestruct.tm_sec = exploded.second;
+ timestruct.tm_min = exploded.minute;
+ timestruct.tm_hour = exploded.hour;
+ timestruct.tm_mday = exploded.day_of_month;
+ timestruct.tm_mon = exploded.month - 1;
+ timestruct.tm_year = exploded.year - 1900;
+ timestruct.tm_wday = exploded.day_of_week; // mktime/timegm ignore this
+ timestruct.tm_yday = 0; // mktime/timegm ignore this
+ timestruct.tm_isdst = -1; // attempt to figure it out
+ timestruct.tm_gmtoff = 0; // not a POSIX field, so mktime/timegm ignore
+ timestruct.tm_zone = NULL; // not a POSIX field, so mktime/timegm ignore
+
+ time_t seconds;
+#ifdef ANDROID
+ seconds = mktime(&timestruct);
+#else
+ if (is_local)
+ seconds = mktime(&timestruct);
+ else
+ seconds = timegm(&timestruct);
+#endif
+
+ int64_t milliseconds;
+ // Handle overflow. Clamping the range to what mktime and timegm might
+ // return is the best that can be done here. It's not ideal, but it's better
+ // than failing here or ignoring the overflow case and treating each time
+ // overflow as one second prior to the epoch.
+ if (seconds == -1 &&
+ (exploded.year < 1969 || exploded.year > 1970)) {
+ // If exploded.year is 1969 or 1970, take -1 as correct, with the
+ // time indicating 1 second prior to the epoch. (1970 is allowed to handle
+ // time zone and DST offsets.) Otherwise, return the most future or past
+ // time representable. Assumes the time_t epoch is 1970-01-01 00:00:00 UTC.
+ //
+ // The minimum and maximum representible times that mktime and timegm could
+ // return are used here instead of values outside that range to allow for
+ // proper round-tripping between exploded and counter-type time
+ // representations in the presence of possible truncation to time_t by
+ // division and use with other functions that accept time_t.
+ //
+ // When representing the most distant time in the future, add in an extra
+ // 999ms to avoid the time being less than any other possible value that
+ // this function can return.
+
+ // Take care to avoid overflows when time_t is int64_t.
+ if (exploded.year < 1969) {
+ int64_t min_seconds = (sizeof(time_t) < sizeof(int64_t))
+ ? std::numeric_limits<time_t>::min()
+ : std::numeric_limits<int32_t>::min();
+ milliseconds = min_seconds * kMillisecondsPerSecond;
+ } else {
+ int64_t max_seconds = (sizeof(time_t) < sizeof(int64_t))
+ ? std::numeric_limits<time_t>::max()
+ : std::numeric_limits<int32_t>::max();
+ milliseconds = max_seconds * kMillisecondsPerSecond;
+ milliseconds += kMillisecondsPerSecond - 1;
+ }
+ } else {
+ milliseconds = seconds * kMillisecondsPerSecond + exploded.millisecond;
+ }
+
+ return Time(milliseconds * kMicrosecondsPerMillisecond);
+}
+
+void Time::Explode(bool is_local, Exploded* exploded) const {
+ // Time stores times with microsecond resolution, but Exploded only carries
+ // millisecond resolution, so begin by being lossy.
+ int64_t milliseconds = us_ / kMicrosecondsPerMillisecond;
+ time_t seconds = milliseconds / kMillisecondsPerSecond;
+
+ struct tm timestruct;
+ if (is_local)
+ localtime_r(&seconds, &timestruct);
+ else
+ gmtime_r(&seconds, &timestruct);
+
+ exploded->year = timestruct.tm_year + 1900;
+ exploded->month = timestruct.tm_mon + 1;
+ exploded->day_of_week = timestruct.tm_wday;
+ exploded->day_of_month = timestruct.tm_mday;
+ exploded->hour = timestruct.tm_hour;
+ exploded->minute = timestruct.tm_min;
+ exploded->second = timestruct.tm_sec;
+ exploded->millisecond = milliseconds % kMillisecondsPerSecond;
+}
+
+// TimeTicks ------------------------------------------------------------------
+
+// static
+TimeTicks TimeTicks::Now() {
+ uint64_t absolute_micro;
+
+#if defined(OS_MACOSX)
+ static mach_timebase_info_data_t timebase_info;
+ if (timebase_info.denom == 0) {
+ // Zero-initialization of statics guarantees that denom will be 0 before
+ // calling mach_timebase_info. mach_timebase_info will never set denom to
+ // 0 as that would be invalid, so the zero-check can be used to determine
+ // whether mach_timebase_info has already been called. This is
+ // recommended by Apple's QA1398.
+ kern_return_t kr = mach_timebase_info(&timebase_info);
+ DCHECK(kr == KERN_SUCCESS);
+ }
+
+ // mach_absolute_time is it when it comes to ticks on the Mac. Other calls
+ // with less precision (such as TickCount) just call through to
+ // mach_absolute_time.
+
+ // timebase_info converts absolute time tick units into nanoseconds. Convert
+ // to microseconds up front to stave off overflows.
+ absolute_micro = mach_absolute_time() / Time::kNanosecondsPerMicrosecond *
+ timebase_info.numer / timebase_info.denom;
+
+ // Don't bother with the rollover handling that the Windows version does.
+ // With numer and denom = 1 (the expected case), the 64-bit absolute time
+ // reported in nanoseconds is enough to last nearly 585 years.
+
+#elif defined(OS_OPENBSD) || defined(OS_POSIX) && \
+ defined(_POSIX_MONOTONIC_CLOCK) && _POSIX_MONOTONIC_CLOCK >= 0
+
+ struct timespec ts;
+ if (clock_gettime(CLOCK_MONOTONIC, &ts) != 0) {
+ NOTREACHED() << "clock_gettime(CLOCK_MONOTONIC) failed.";
+ return TimeTicks();
+ }
+
+ absolute_micro =
+ (static_cast<int64_t>(ts.tv_sec) * Time::kMicrosecondsPerSecond) +
+ (static_cast<int64_t>(ts.tv_nsec) / Time::kNanosecondsPerMicrosecond);
+
+#else // _POSIX_MONOTONIC_CLOCK
+#error No usable tick clock function on this platform.
+#endif // _POSIX_MONOTONIC_CLOCK
+
+ return TimeTicks(absolute_micro);
+}
+
+// static
+TimeTicks TimeTicks::HighResNow() {
+ return Now();
+}
+
+} // namespace base
diff --git a/ipc/chromium/src/base/time_win.cc b/ipc/chromium/src/base/time_win.cc
new file mode 100644
index 000000000..54252b1b4
--- /dev/null
+++ b/ipc/chromium/src/base/time_win.cc
@@ -0,0 +1,376 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+// Windows Timer Primer
+//
+// A good article: http://www.ddj.com/windows/184416651
+// A good mozilla bug: http://bugzilla.mozilla.org/show_bug.cgi?id=363258
+//
+// The default windows timer, GetSystemTimeAsFileTime is not very precise.
+// It is only good to ~15.5ms.
+//
+// QueryPerformanceCounter is the logical choice for a high-precision timer.
+// However, it is known to be buggy on some hardware. Specifically, it can
+// sometimes "jump". On laptops, QPC can also be very expensive to call.
+// It's 3-4x slower than timeGetTime() on desktops, but can be 10x slower
+// on laptops. A unittest exists which will show the relative cost of various
+// timers on any system.
+//
+// The next logical choice is timeGetTime(). timeGetTime has a precision of
+// 1ms, but only if you call APIs (timeBeginPeriod()) which affect all other
+// applications on the system. By default, precision is only 15.5ms.
+// Unfortunately, we don't want to call timeBeginPeriod because we don't
+// want to affect other applications. Further, on mobile platforms, use of
+// faster multimedia timers can hurt battery life. See the intel
+// article about this here:
+// http://softwarecommunity.intel.com/articles/eng/1086.htm
+//
+// To work around all this, we're going to generally use timeGetTime(). We
+// will only increase the system-wide timer if we're not running on battery
+// power. Using timeBeginPeriod(1) is a requirement in order to make our
+// message loop waits have the same resolution that our time measurements
+// do. Otherwise, WaitForSingleObject(..., 1) will no less than 15ms when
+// there is nothing else to waken the Wait.
+
+#include "base/time.h"
+
+#pragma comment(lib, "winmm.lib")
+#include <windows.h>
+#include <mmsystem.h>
+
+#include "base/basictypes.h"
+#include "base/lock.h"
+#include "base/logging.h"
+#include "base/cpu.h"
+#include "base/singleton.h"
+#include "mozilla/Casting.h"
+
+using base::Time;
+using base::TimeDelta;
+using base::TimeTicks;
+using mozilla::BitwiseCast;
+
+namespace {
+
+// From MSDN, FILETIME "Contains a 64-bit value representing the number of
+// 100-nanosecond intervals since January 1, 1601 (UTC)."
+int64_t FileTimeToMicroseconds(const FILETIME& ft) {
+ // Need to BitwiseCast to fix alignment, then divide by 10 to convert
+ // 100-nanoseconds to milliseconds. This only works on little-endian
+ // machines.
+ return BitwiseCast<int64_t>(ft) / 10;
+}
+
+void MicrosecondsToFileTime(int64_t us, FILETIME* ft) {
+ DCHECK(us >= 0) << "Time is less than 0, negative values are not "
+ "representable in FILETIME";
+
+ // Multiply by 10 to convert milliseconds to 100-nanoseconds. BitwiseCast will
+ // handle alignment problems. This only works on little-endian machines.
+ *ft = BitwiseCast<FILETIME>(us * 10);
+}
+
+int64_t CurrentWallclockMicroseconds() {
+ FILETIME ft;
+ ::GetSystemTimeAsFileTime(&ft);
+ return FileTimeToMicroseconds(ft);
+}
+
+// Time between resampling the un-granular clock for this API. 60 seconds.
+const int kMaxMillisecondsToAvoidDrift = 60 * Time::kMillisecondsPerSecond;
+
+int64_t initial_time = 0;
+TimeTicks initial_ticks;
+
+void InitializeClock() {
+ initial_ticks = TimeTicks::Now();
+ initial_time = CurrentWallclockMicroseconds();
+}
+
+} // namespace
+
+// Time -----------------------------------------------------------------------
+
+// The internal representation of Time uses FILETIME, whose epoch is 1601-01-01
+// 00:00:00 UTC. ((1970-1601)*365+89)*24*60*60*1000*1000, where 89 is the
+// number of leap year days between 1601 and 1970: (1970-1601)/4 excluding
+// 1700, 1800, and 1900.
+// static
+const int64_t Time::kTimeTToMicrosecondsOffset = GG_INT64_C(11644473600000000);
+
+// static
+Time Time::Now() {
+ if (initial_time == 0)
+ InitializeClock();
+
+ // We implement time using the high-resolution timers so that we can get
+ // timeouts which are smaller than 10-15ms. If we just used
+ // CurrentWallclockMicroseconds(), we'd have the less-granular timer.
+ //
+ // To make this work, we initialize the clock (initial_time) and the
+ // counter (initial_ctr). To compute the initial time, we can check
+ // the number of ticks that have elapsed, and compute the delta.
+ //
+ // To avoid any drift, we periodically resync the counters to the system
+ // clock.
+ while(true) {
+ TimeTicks ticks = TimeTicks::Now();
+
+ // Calculate the time elapsed since we started our timer
+ TimeDelta elapsed = ticks - initial_ticks;
+
+ // Check if enough time has elapsed that we need to resync the clock.
+ if (elapsed.InMilliseconds() > kMaxMillisecondsToAvoidDrift) {
+ InitializeClock();
+ continue;
+ }
+
+ return Time(elapsed + Time(initial_time));
+ }
+}
+
+// static
+Time Time::NowFromSystemTime() {
+ // Force resync.
+ InitializeClock();
+ return Time(initial_time);
+}
+
+// static
+Time Time::FromFileTime(FILETIME ft) {
+ return Time(FileTimeToMicroseconds(ft));
+}
+
+FILETIME Time::ToFileTime() const {
+ FILETIME utc_ft;
+ MicrosecondsToFileTime(us_, &utc_ft);
+ return utc_ft;
+}
+
+// static
+Time Time::FromExploded(bool is_local, const Exploded& exploded) {
+ // Create the system struct representing our exploded time. It will either be
+ // in local time or UTC.
+ SYSTEMTIME st;
+ st.wYear = exploded.year;
+ st.wMonth = exploded.month;
+ st.wDayOfWeek = exploded.day_of_week;
+ st.wDay = exploded.day_of_month;
+ st.wHour = exploded.hour;
+ st.wMinute = exploded.minute;
+ st.wSecond = exploded.second;
+ st.wMilliseconds = exploded.millisecond;
+
+ // Convert to FILETIME.
+ FILETIME ft;
+ if (!SystemTimeToFileTime(&st, &ft)) {
+ NOTREACHED() << "Unable to convert time";
+ return Time(0);
+ }
+
+ // Ensure that it's in UTC.
+ if (is_local) {
+ FILETIME utc_ft;
+ LocalFileTimeToFileTime(&ft, &utc_ft);
+ return Time(FileTimeToMicroseconds(utc_ft));
+ }
+ return Time(FileTimeToMicroseconds(ft));
+}
+
+void Time::Explode(bool is_local, Exploded* exploded) const {
+ // FILETIME in UTC.
+ FILETIME utc_ft;
+ MicrosecondsToFileTime(us_, &utc_ft);
+
+ // FILETIME in local time if necessary.
+ BOOL success = TRUE;
+ FILETIME ft;
+ if (is_local)
+ success = FileTimeToLocalFileTime(&utc_ft, &ft);
+ else
+ ft = utc_ft;
+
+ // FILETIME in SYSTEMTIME (exploded).
+ SYSTEMTIME st;
+ if (!success || !FileTimeToSystemTime(&ft, &st)) {
+ NOTREACHED() << "Unable to convert time, don't know why";
+ ZeroMemory(exploded, sizeof(*exploded));
+ return;
+ }
+
+ exploded->year = st.wYear;
+ exploded->month = st.wMonth;
+ exploded->day_of_week = st.wDayOfWeek;
+ exploded->day_of_month = st.wDay;
+ exploded->hour = st.wHour;
+ exploded->minute = st.wMinute;
+ exploded->second = st.wSecond;
+ exploded->millisecond = st.wMilliseconds;
+}
+
+// TimeTicks ------------------------------------------------------------------
+namespace {
+
+// We define a wrapper to adapt between the __stdcall and __cdecl call of the
+// mock function, and to avoid a static constructor. Assigning an import to a
+// function pointer directly would require setup code to fetch from the IAT.
+DWORD timeGetTimeWrapper() {
+ return timeGetTime();
+}
+
+
+DWORD (*tick_function)(void) = &timeGetTimeWrapper;
+
+// We use timeGetTime() to implement TimeTicks::Now(). This can be problematic
+// because it returns the number of milliseconds since Windows has started,
+// which will roll over the 32-bit value every ~49 days. We try to track
+// rollover ourselves, which works if TimeTicks::Now() is called at least every
+// 49 days.
+class NowSingleton {
+ public:
+ NowSingleton()
+ : rollover_(TimeDelta::FromMilliseconds(0)),
+ last_seen_(0) {
+ }
+
+ TimeDelta Now() {
+ AutoLock locked(lock_);
+ // We should hold the lock while calling tick_function to make sure that
+ // we keep our last_seen_ stay correctly in sync.
+ DWORD now = tick_function();
+ if (now < last_seen_)
+ rollover_ += TimeDelta::FromMilliseconds(GG_LONGLONG(0x100000000)); // ~49.7 days.
+ last_seen_ = now;
+ return TimeDelta::FromMilliseconds(now) + rollover_;
+ }
+
+ private:
+ Lock lock_; // To protected last_seen_ and rollover_.
+ TimeDelta rollover_; // Accumulation of time lost due to rollover.
+ DWORD last_seen_; // The last timeGetTime value we saw, to detect rollover.
+
+ DISALLOW_COPY_AND_ASSIGN(NowSingleton);
+};
+
+// Overview of time counters:
+// (1) CPU cycle counter. (Retrieved via RDTSC)
+// The CPU counter provides the highest resolution time stamp and is the least
+// expensive to retrieve. However, the CPU counter is unreliable and should not
+// be used in production. Its biggest issue is that it is per processor and it
+// is not synchronized between processors. Also, on some computers, the counters
+// will change frequency due to thermal and power changes, and stop in some
+// states.
+//
+// (2) QueryPerformanceCounter (QPC). The QPC counter provides a high-
+// resolution (100 nanoseconds) time stamp but is comparatively more expensive
+// to retrieve. What QueryPerformanceCounter actually does is up to the HAL.
+// (with some help from ACPI).
+// According to http://blogs.msdn.com/oldnewthing/archive/2005/09/02/459952.aspx
+// in the worst case, it gets the counter from the rollover interrupt on the
+// programmable interrupt timer. In best cases, the HAL may conclude that the
+// RDTSC counter runs at a constant frequency, then it uses that instead. On
+// multiprocessor machines, it will try to verify the values returned from
+// RDTSC on each processor are consistent with each other, and apply a handful
+// of workarounds for known buggy hardware. In other words, QPC is supposed to
+// give consistent result on a multiprocessor computer, but it is unreliable in
+// reality due to bugs in BIOS or HAL on some, especially old computers.
+// With recent updates on HAL and newer BIOS, QPC is getting more reliable but
+// it should be used with caution.
+//
+// (3) System time. The system time provides a low-resolution (typically 10ms
+// to 55 milliseconds) time stamp but is comparatively less expensive to
+// retrieve and more reliable.
+class HighResNowSingleton {
+ public:
+ HighResNowSingleton()
+ : ticks_per_microsecond_(0.0),
+ skew_(0) {
+ InitializeClock();
+
+ // On Athlon X2 CPUs (e.g. model 15) QueryPerformanceCounter is
+ // unreliable. Fallback to low-res clock.
+ base::CPU cpu;
+ if (cpu.vendor_name() == "AuthenticAMD" && cpu.family() == 15)
+ DisableHighResClock();
+ }
+
+ bool IsUsingHighResClock() {
+ return ticks_per_microsecond_ != 0.0;
+ }
+
+ void DisableHighResClock() {
+ ticks_per_microsecond_ = 0.0;
+ }
+
+ TimeDelta Now() {
+ // Our maximum tolerance for QPC drifting.
+ const int kMaxTimeDrift = 50 * Time::kMicrosecondsPerMillisecond;
+
+ if (IsUsingHighResClock()) {
+ int64_t now = UnreliableNow();
+
+ // Verify that QPC does not seem to drift.
+ DCHECK(now - ReliableNow() - skew_ < kMaxTimeDrift);
+
+ return TimeDelta::FromMicroseconds(now);
+ }
+
+ // Just fallback to the slower clock.
+ return Singleton<NowSingleton>::get()->Now();
+ }
+
+ private:
+ // Synchronize the QPC clock with GetSystemTimeAsFileTime.
+ void InitializeClock() {
+ LARGE_INTEGER ticks_per_sec = {{0}};
+ if (!QueryPerformanceFrequency(&ticks_per_sec))
+ return; // Broken, we don't guarantee this function works.
+ ticks_per_microsecond_ = static_cast<float>(ticks_per_sec.QuadPart) /
+ static_cast<float>(Time::kMicrosecondsPerSecond);
+
+ skew_ = UnreliableNow() - ReliableNow();
+ }
+
+ // Get the number of microseconds since boot in a reliable fashion
+ int64_t UnreliableNow() {
+ LARGE_INTEGER now;
+ QueryPerformanceCounter(&now);
+ return static_cast<int64_t>(now.QuadPart / ticks_per_microsecond_);
+ }
+
+ // Get the number of microseconds since boot in a reliable fashion
+ int64_t ReliableNow() {
+ return Singleton<NowSingleton>::get()->Now().InMicroseconds();
+ }
+
+ // Cached clock frequency -> microseconds. This assumes that the clock
+ // frequency is faster than one microsecond (which is 1MHz, should be OK).
+ float ticks_per_microsecond_; // 0 indicates QPF failed and we're broken.
+ int64_t skew_; // Skew between lo-res and hi-res clocks (for debugging).
+
+ DISALLOW_COPY_AND_ASSIGN(HighResNowSingleton);
+};
+
+} // namespace
+
+// static
+TimeTicks::TickFunctionType TimeTicks::SetMockTickFunction(
+ TickFunctionType ticker) {
+ TickFunctionType old = tick_function;
+ tick_function = ticker;
+ return old;
+}
+
+// static
+TimeTicks TimeTicks::Now() {
+ return TimeTicks() + Singleton<NowSingleton>::get()->Now();
+}
+
+// static
+TimeTicks TimeTicks::HighResNow() {
+ return TimeTicks() + Singleton<HighResNowSingleton>::get()->Now();
+}
diff --git a/ipc/chromium/src/base/timer.cc b/ipc/chromium/src/base/timer.cc
new file mode 100644
index 000000000..b2d880797
--- /dev/null
+++ b/ipc/chromium/src/base/timer.cc
@@ -0,0 +1,31 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/timer.h"
+
+#include "base/message_loop.h"
+
+namespace base {
+
+void BaseTimer_Helper::OrphanDelayedTask() {
+ if (delayed_task_) {
+ delayed_task_->timer_ = nullptr;
+ delayed_task_ = nullptr;
+ }
+}
+
+void BaseTimer_Helper::InitiateDelayedTask(TimerTask* timer_task) {
+ OrphanDelayedTask();
+
+ delayed_task_ = timer_task;
+ delayed_task_->timer_ = this;
+ RefPtr<TimerTask> addrefedTask = timer_task;
+ MessageLoop::current()->PostDelayedTask(
+ addrefedTask.forget(),
+ static_cast<int>(timer_task->delay_.InMilliseconds()));
+}
+
+} // namespace base
diff --git a/ipc/chromium/src/base/timer.h b/ipc/chromium/src/base/timer.h
new file mode 100644
index 000000000..b65f89392
--- /dev/null
+++ b/ipc/chromium/src/base/timer.h
@@ -0,0 +1,274 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// OneShotTimer and RepeatingTimer provide a simple timer API. As the names
+// suggest, OneShotTimer calls you back once after a time delay expires.
+// RepeatingTimer on the other hand calls you back periodically with the
+// prescribed time interval.
+//
+// OneShotTimer and RepeatingTimer both cancel the timer when they go out of
+// scope, which makes it easy to ensure that you do not get called when your
+// object has gone out of scope. Just instantiate a OneShotTimer or
+// RepeatingTimer as a member variable of the class for which you wish to
+// receive timer events.
+//
+// Sample RepeatingTimer usage:
+//
+// class MyClass {
+// public:
+// void StartDoingStuff() {
+// timer_.Start(TimeDelta::FromSeconds(1), this, &MyClass::DoStuff);
+// }
+// void StopDoingStuff() {
+// timer_.Stop();
+// }
+// private:
+// void DoStuff() {
+// // This method is called every second to do stuff.
+// ...
+// }
+// base::RepeatingTimer<MyClass> timer_;
+// };
+//
+// Both OneShotTimer and RepeatingTimer also support a Reset method, which
+// allows you to easily defer the timer event until the timer delay passes once
+// again. So, in the above example, if 0.5 seconds have already passed,
+// calling Reset on timer_ would postpone DoStuff by another 1 second. In
+// other words, Reset is shorthand for calling Stop and then Start again with
+// the same arguments.
+
+#ifndef BASE_TIMER_H_
+#define BASE_TIMER_H_
+
+// IMPORTANT: If you change timer code, make sure that all tests (including
+// disabled ones) from timer_unittests.cc pass locally. Some are disabled
+// because they're flaky on the buildbot, but when you run them locally you
+// should be able to tell the difference.
+
+#include "base/logging.h"
+#include "base/task.h"
+#include "base/time.h"
+
+class MessageLoop;
+
+namespace base {
+
+//-----------------------------------------------------------------------------
+// This class is an implementation detail of OneShotTimer and RepeatingTimer.
+// Please do not use this class directly.
+//
+// This class exists to share code between BaseTimer<T> template instantiations.
+//
+class BaseTimer_Helper {
+ public:
+ // Stops the timer.
+ ~BaseTimer_Helper() {
+ OrphanDelayedTask();
+ }
+
+ // Returns true if the timer is running (i.e., not stopped).
+ bool IsRunning() const {
+ return !!delayed_task_;
+ }
+
+ // Returns the current delay for this timer. May only call this method when
+ // the timer is running!
+ TimeDelta GetCurrentDelay() const {
+ DCHECK(IsRunning());
+ return delayed_task_->delay_;
+ }
+
+ protected:
+ BaseTimer_Helper() {}
+
+ // We have access to the timer_ member so we can orphan this task.
+ class TimerTask : public mozilla::Runnable {
+ public:
+ explicit TimerTask(TimeDelta delay) : delay_(delay) {
+ // timer_ is set in InitiateDelayedTask.
+ }
+ virtual ~TimerTask() {}
+ BaseTimer_Helper* timer_;
+ TimeDelta delay_;
+ };
+
+ // Used to orphan delayed_task_ so that when it runs it does nothing.
+ void OrphanDelayedTask();
+
+ // Used to initiated a new delayed task. This has the side-effect of
+ // orphaning delayed_task_ if it is non-null.
+ void InitiateDelayedTask(TimerTask* timer_task);
+
+ RefPtr<TimerTask> delayed_task_;
+
+ DISALLOW_COPY_AND_ASSIGN(BaseTimer_Helper);
+};
+
+//-----------------------------------------------------------------------------
+// This class is an implementation detail of OneShotTimer and RepeatingTimer.
+// Please do not use this class directly.
+template <class Receiver, bool kIsRepeating>
+class BaseTimer : public BaseTimer_Helper {
+ public:
+ typedef void (Receiver::*ReceiverMethod)();
+
+ // Call this method to start the timer. It is an error to call this method
+ // while the timer is already running.
+ void Start(TimeDelta delay, Receiver* receiver, ReceiverMethod method) {
+ DCHECK(!IsRunning());
+ InitiateDelayedTask(new TimerTask(delay, receiver, method));
+ }
+
+ // Call this method to stop the timer. It is a no-op if the timer is not
+ // running.
+ void Stop() {
+ OrphanDelayedTask();
+ }
+
+ // Call this method to reset the timer delay of an already running timer.
+ void Reset() {
+ DCHECK(IsRunning());
+ InitiateDelayedTask(static_cast<TimerTask*>(delayed_task_.get())->Clone());
+ }
+
+ private:
+ typedef BaseTimer<Receiver, kIsRepeating> SelfType;
+
+ class TimerTask : public BaseTimer_Helper::TimerTask {
+ public:
+ TimerTask(TimeDelta delay, Receiver* receiver, ReceiverMethod method)
+ : BaseTimer_Helper::TimerTask(delay),
+ receiver_(receiver),
+ method_(method) {
+ }
+
+ virtual ~TimerTask() {
+ // This task may be getting cleared because the MessageLoop has been
+ // destructed. If so, don't leave the Timer with a dangling pointer
+ // to this now-defunct task.
+ ClearBaseTimer();
+ }
+
+ NS_IMETHOD Run() override {
+ if (!timer_) // timer_ is null if we were orphaned.
+ return NS_OK;
+ if (kIsRepeating)
+ ResetBaseTimer();
+ else
+ ClearBaseTimer();
+ DispatchToMethod(receiver_, method_, Tuple0());
+ return NS_OK;
+ }
+
+ TimerTask* Clone() const {
+ return new TimerTask(delay_, receiver_, method_);
+ }
+
+ private:
+ // Inform the Base that the timer is no longer active.
+ void ClearBaseTimer() {
+ if (timer_) {
+ SelfType* self = static_cast<SelfType*>(timer_);
+ // It is possible that the Timer has already been reset, and that this
+ // Task is old. So, if the Timer points to a different task, assume
+ // that the Timer has already taken care of properly setting the task.
+ if (self->delayed_task_ == this)
+ self->delayed_task_ = nullptr;
+ // By now the delayed_task_ in the Timer does not point to us anymore.
+ // We should reset our own timer_ because the Timer can not do this
+ // for us in its destructor.
+ timer_ = NULL;
+ }
+ }
+
+ // Inform the Base that we're resetting the timer.
+ void ResetBaseTimer() {
+ DCHECK(timer_);
+ DCHECK(kIsRepeating);
+ SelfType* self = static_cast<SelfType*>(timer_);
+ self->Reset();
+ }
+
+ Receiver* receiver_;
+ ReceiverMethod method_;
+ };
+};
+
+//-----------------------------------------------------------------------------
+// A simple, one-shot timer. See usage notes at the top of the file.
+template <class Receiver>
+class OneShotTimer : public BaseTimer<Receiver, false> {};
+
+//-----------------------------------------------------------------------------
+// A simple, repeating timer. See usage notes at the top of the file.
+template <class Receiver>
+class RepeatingTimer : public BaseTimer<Receiver, true> {};
+
+//-----------------------------------------------------------------------------
+// A Delay timer is like The Button from Lost. Once started, you have to keep
+// calling Reset otherwise it will call the given method in the MessageLoop
+// thread.
+//
+// Once created, it is inactive until Reset is called. Once |delay| seconds have
+// passed since the last call to Reset, the callback is made. Once the callback
+// has been made, it's inactive until Reset is called again.
+//
+// If destroyed, the timeout is canceled and will not occur even if already
+// inflight.
+template <class Receiver>
+class DelayTimer {
+ public:
+ typedef void (Receiver::*ReceiverMethod)();
+
+ DelayTimer(TimeDelta delay, Receiver* receiver, ReceiverMethod method)
+ : receiver_(receiver),
+ method_(method),
+ delay_(delay) {
+ }
+
+ void Reset() {
+ DelayFor(delay_);
+ }
+
+ private:
+ void DelayFor(TimeDelta delay) {
+ trigger_time_ = Time::Now() + delay;
+
+ // If we already have a timer that will expire at or before the given delay,
+ // then we have nothing more to do now.
+ if (timer_.IsRunning() && timer_.GetCurrentDelay() <= delay)
+ return;
+
+ // The timer isn't running, or will expire too late, so restart it.
+ timer_.Stop();
+ timer_.Start(delay, this, &DelayTimer<Receiver>::Check);
+ }
+
+ void Check() {
+ if (trigger_time_.is_null())
+ return;
+
+ // If we have not waited long enough, then wait some more.
+ const Time now = Time::Now();
+ if (now < trigger_time_) {
+ DelayFor(trigger_time_ - now);
+ return;
+ }
+
+ (receiver_->*method_)();
+ }
+
+ Receiver *const receiver_;
+ const ReceiverMethod method_;
+ const TimeDelta delay_;
+
+ OneShotTimer<DelayTimer<Receiver> > timer_;
+ Time trigger_time_;
+};
+
+} // namespace base
+
+#endif // BASE_TIMER_H_
diff --git a/ipc/chromium/src/base/tuple.h b/ipc/chromium/src/base/tuple.h
new file mode 100644
index 000000000..d7c5a008f
--- /dev/null
+++ b/ipc/chromium/src/base/tuple.h
@@ -0,0 +1,884 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// A Tuple is a generic templatized container, similar in concept to std::pair.
+// There are classes Tuple0 to Tuple6, cooresponding to the number of elements
+// it contains. The convenient MakeTuple() function takes 0 to 6 arguments,
+// and will construct and return the appropriate Tuple object. The functions
+// DispatchToMethod and DispatchToFunction take a function pointer or instance
+// and method pointer, and unpack a tuple into arguments to the call.
+//
+// Tuple elements are copied by value, and stored in the tuple. See the unit
+// tests for more details of how/when the values are copied.
+//
+// Example usage:
+// // These two methods of creating a Tuple are identical.
+// Tuple2<int, const char*> tuple_a(1, "wee");
+// Tuple2<int, const char*> tuple_b = MakeTuple(1, "wee");
+//
+// void SomeFunc(int a, const char* b) { }
+// DispatchToFunction(&SomeFunc, tuple_a); // SomeFunc(1, "wee")
+// DispatchToFunction(
+// &SomeFunc, MakeTuple(10, "foo")); // SomeFunc(10, "foo")
+//
+// struct { void SomeMeth(int a, int b, int c) { } } foo;
+// DispatchToMethod(&foo, &Foo::SomeMeth, MakeTuple(1, 2, 3));
+// // foo->SomeMeth(1, 2, 3);
+
+#ifndef BASE_TUPLE_H__
+#define BASE_TUPLE_H__
+
+// Traits ----------------------------------------------------------------------
+//
+// A simple traits class for tuple arguments.
+//
+// ValueType: the bare, nonref version of a type (same as the type for nonrefs).
+// RefType: the ref version of a type (same as the type for refs).
+// ParamType: what type to pass to functions (refs should not be constified).
+
+template <class P>
+struct TupleTraits {
+ typedef P ValueType;
+ typedef P& RefType;
+ typedef const P& ParamType;
+};
+
+template <class P>
+struct TupleTraits<P&> {
+ typedef P ValueType;
+ typedef P& RefType;
+ typedef P& ParamType;
+};
+
+// Tuple -----------------------------------------------------------------------
+//
+// This set of classes is useful for bundling 0 or more heterogeneous data types
+// into a single variable. The advantage of this is that it greatly simplifies
+// function objects that need to take an arbitrary number of parameters; see
+// RunnableMethod and IPC::MessageWithTuple.
+//
+// Tuple0 is supplied to act as a 'void' type. It can be used, for example,
+// when dispatching to a function that accepts no arguments (see the
+// Dispatchers below).
+// Tuple1<A> is rarely useful. One such use is when A is non-const ref that you
+// want filled by the dispatchee, and the tuple is merely a container for that
+// output (a "tier"). See MakeRefTuple and its usages.
+
+struct Tuple0 {
+ typedef Tuple0 ValueTuple;
+ typedef Tuple0 RefTuple;
+};
+
+template <class A>
+struct Tuple1 {
+ public:
+ typedef A TypeA;
+ typedef Tuple1<typename TupleTraits<A>::ValueType> ValueTuple;
+ typedef Tuple1<typename TupleTraits<A>::RefType> RefTuple;
+
+ Tuple1() {}
+ explicit Tuple1(typename TupleTraits<A>::ParamType aA) : a(aA) {}
+
+ A a;
+};
+
+template <class A, class B>
+struct Tuple2 {
+ public:
+ typedef A TypeA;
+ typedef B TypeB;
+ typedef Tuple2<typename TupleTraits<A>::ValueType,
+ typename TupleTraits<B>::ValueType> ValueTuple;
+ typedef Tuple2<typename TupleTraits<A>::RefType,
+ typename TupleTraits<B>::RefType> RefTuple;
+
+ Tuple2() {}
+ Tuple2(typename TupleTraits<A>::ParamType aA,
+ typename TupleTraits<B>::ParamType aB)
+ : a(aA), b(aB) {
+ }
+
+ A a;
+ B b;
+};
+
+template <class A, class B, class C>
+struct Tuple3 {
+ public:
+ typedef A TypeA;
+ typedef B TypeB;
+ typedef C TypeC;
+ typedef Tuple3<typename TupleTraits<A>::ValueType,
+ typename TupleTraits<B>::ValueType,
+ typename TupleTraits<C>::ValueType> ValueTuple;
+ typedef Tuple3<typename TupleTraits<A>::RefType,
+ typename TupleTraits<B>::RefType,
+ typename TupleTraits<C>::RefType> RefTuple;
+
+ Tuple3() {}
+ Tuple3(typename TupleTraits<A>::ParamType aA,
+ typename TupleTraits<B>::ParamType aB,
+ typename TupleTraits<C>::ParamType aC)
+ : a(aA), b(aB), c(aC) {
+ }
+
+ A a;
+ B b;
+ C c;
+};
+
+template <class A, class B, class C, class D>
+struct Tuple4 {
+ public:
+ typedef A TypeA;
+ typedef B TypeB;
+ typedef C TypeC;
+ typedef D TypeD;
+ typedef Tuple4<typename TupleTraits<A>::ValueType,
+ typename TupleTraits<B>::ValueType,
+ typename TupleTraits<C>::ValueType,
+ typename TupleTraits<D>::ValueType> ValueTuple;
+ typedef Tuple4<typename TupleTraits<A>::RefType,
+ typename TupleTraits<B>::RefType,
+ typename TupleTraits<C>::RefType,
+ typename TupleTraits<D>::RefType> RefTuple;
+
+ Tuple4() {}
+ Tuple4(typename TupleTraits<A>::ParamType aA,
+ typename TupleTraits<B>::ParamType aB,
+ typename TupleTraits<C>::ParamType aC,
+ typename TupleTraits<D>::ParamType aD)
+ : a(aA), b(aB), c(aC), d(aD) {
+ }
+
+ A a;
+ B b;
+ C c;
+ D d;
+};
+
+template <class A, class B, class C, class D, class E>
+struct Tuple5 {
+public:
+ typedef A TypeA;
+ typedef B TypeB;
+ typedef C TypeC;
+ typedef D TypeD;
+ typedef E TypeE;
+ typedef Tuple5<typename TupleTraits<A>::ValueType,
+ typename TupleTraits<B>::ValueType,
+ typename TupleTraits<C>::ValueType,
+ typename TupleTraits<D>::ValueType,
+ typename TupleTraits<E>::ValueType> ValueTuple;
+ typedef Tuple5<typename TupleTraits<A>::RefType,
+ typename TupleTraits<B>::RefType,
+ typename TupleTraits<C>::RefType,
+ typename TupleTraits<D>::RefType,
+ typename TupleTraits<E>::RefType> RefTuple;
+
+ Tuple5() {}
+ Tuple5(typename TupleTraits<A>::ParamType aA,
+ typename TupleTraits<B>::ParamType aB,
+ typename TupleTraits<C>::ParamType aC,
+ typename TupleTraits<D>::ParamType aD,
+ typename TupleTraits<E>::ParamType aE)
+ : a(aA), b(aB), c(aC), d(aD), e(aE) {
+ }
+
+ A a;
+ B b;
+ C c;
+ D d;
+ E e;
+};
+
+template <class A, class B, class C, class D, class E, class F>
+struct Tuple6 {
+public:
+ typedef A TypeA;
+ typedef B TypeB;
+ typedef C TypeC;
+ typedef D TypeD;
+ typedef E TypeE;
+ typedef F TypeF;
+ typedef Tuple6<typename TupleTraits<A>::ValueType,
+ typename TupleTraits<B>::ValueType,
+ typename TupleTraits<C>::ValueType,
+ typename TupleTraits<D>::ValueType,
+ typename TupleTraits<E>::ValueType,
+ typename TupleTraits<F>::ValueType> ValueTuple;
+ typedef Tuple6<typename TupleTraits<A>::RefType,
+ typename TupleTraits<B>::RefType,
+ typename TupleTraits<C>::RefType,
+ typename TupleTraits<D>::RefType,
+ typename TupleTraits<E>::RefType,
+ typename TupleTraits<F>::RefType> RefTuple;
+
+ Tuple6() {}
+ Tuple6(typename TupleTraits<A>::ParamType aA,
+ typename TupleTraits<B>::ParamType aB,
+ typename TupleTraits<C>::ParamType aC,
+ typename TupleTraits<D>::ParamType aD,
+ typename TupleTraits<E>::ParamType aE,
+ typename TupleTraits<F>::ParamType aF)
+ : a(aA), b(aB), c(aC), d(aD), e(aE), f(aF) {
+ }
+
+ A a;
+ B b;
+ C c;
+ D d;
+ E e;
+ F f;
+};
+
+template <class A, class B, class C, class D, class E, class F, class G>
+struct Tuple7 {
+public:
+ typedef A TypeA;
+ typedef B TypeB;
+ typedef C TypeC;
+ typedef D TypeD;
+ typedef E TypeE;
+ typedef F TypeF;
+ typedef G TypeG;
+ typedef Tuple7<typename TupleTraits<A>::ValueType,
+ typename TupleTraits<B>::ValueType,
+ typename TupleTraits<C>::ValueType,
+ typename TupleTraits<D>::ValueType,
+ typename TupleTraits<E>::ValueType,
+ typename TupleTraits<F>::ValueType,
+ typename TupleTraits<G>::ValueType> ValueTuple;
+ typedef Tuple7<typename TupleTraits<A>::RefType,
+ typename TupleTraits<B>::RefType,
+ typename TupleTraits<C>::RefType,
+ typename TupleTraits<D>::RefType,
+ typename TupleTraits<E>::RefType,
+ typename TupleTraits<F>::RefType,
+ typename TupleTraits<G>::RefType> RefTuple;
+
+ Tuple7() {}
+ Tuple7(typename TupleTraits<A>::ParamType aA,
+ typename TupleTraits<B>::ParamType aB,
+ typename TupleTraits<C>::ParamType aC,
+ typename TupleTraits<D>::ParamType aD,
+ typename TupleTraits<E>::ParamType aE,
+ typename TupleTraits<F>::ParamType aF,
+ typename TupleTraits<G>::ParamType aG)
+ : a(aA), b(aB), c(aC), d(aD), e(aE), f(aF), g(aG) {
+ }
+
+ A a;
+ B b;
+ C c;
+ D d;
+ E e;
+ F f;
+ G g;
+};
+
+// Tuple creators -------------------------------------------------------------
+//
+// Helper functions for constructing tuples while inferring the template
+// argument types.
+
+namespace base {
+
+inline Tuple0 MakeTuple() {
+ return Tuple0();
+}
+
+template <class A>
+inline Tuple1<A> MakeTuple(const A& a) {
+ return Tuple1<A>(a);
+}
+
+template <class A, class B>
+inline Tuple2<A, B> MakeTuple(const A& a, const B& b) {
+ return Tuple2<A, B>(a, b);
+}
+
+template <class A, class B, class C>
+inline Tuple3<A, B, C> MakeTuple(const A& a, const B& b, const C& c) {
+ return Tuple3<A, B, C>(a, b, c);
+}
+
+template <class A, class B, class C, class D>
+inline Tuple4<A, B, C, D> MakeTuple(const A& a, const B& b, const C& c,
+ const D& d) {
+ return Tuple4<A, B, C, D>(a, b, c, d);
+}
+
+template <class A, class B, class C, class D, class E>
+inline Tuple5<A, B, C, D, E> MakeTuple(const A& a, const B& b, const C& c,
+ const D& d, const E& e) {
+ return Tuple5<A, B, C, D, E>(a, b, c, d, e);
+}
+
+template <class A, class B, class C, class D, class E, class F>
+inline Tuple6<A, B, C, D, E, F> MakeTuple(const A& a, const B& b, const C& c,
+ const D& d, const E& e, const F& f) {
+ return Tuple6<A, B, C, D, E, F>(a, b, c, d, e, f);
+}
+
+template <class A, class B, class C, class D, class E, class F, class G>
+inline Tuple7<A, B, C, D, E, F, G> MakeTuple(const A& a, const B& b, const C& c,
+ const D& d, const E& e, const F& f,
+ const G& g) {
+ return Tuple7<A, B, C, D, E, F, G>(a, b, c, d, e, f, g);
+}
+
+} // end namespace base
+
+// The following set of helpers make what Boost refers to as "Tiers" - a tuple
+// of references.
+
+template <class A>
+inline Tuple1<A&> MakeRefTuple(A& a) {
+ return Tuple1<A&>(a);
+}
+
+template <class A, class B>
+inline Tuple2<A&, B&> MakeRefTuple(A& a, B& b) {
+ return Tuple2<A&, B&>(a, b);
+}
+
+template <class A, class B, class C>
+inline Tuple3<A&, B&, C&> MakeRefTuple(A& a, B& b, C& c) {
+ return Tuple3<A&, B&, C&>(a, b, c);
+}
+
+template <class A, class B, class C, class D>
+inline Tuple4<A&, B&, C&, D&> MakeRefTuple(A& a, B& b, C& c, D& d) {
+ return Tuple4<A&, B&, C&, D&>(a, b, c, d);
+}
+
+template <class A, class B, class C, class D, class E>
+inline Tuple5<A&, B&, C&, D&, E&> MakeRefTuple(A& a, B& b, C& c, D& d, E& e) {
+ return Tuple5<A&, B&, C&, D&, E&>(a, b, c, d, e);
+}
+
+template <class A, class B, class C, class D, class E, class F>
+inline Tuple6<A&, B&, C&, D&, E&, F&> MakeRefTuple(A& a, B& b, C& c, D& d, E& e,
+ F& f) {
+ return Tuple6<A&, B&, C&, D&, E&, F&>(a, b, c, d, e, f);
+}
+
+template <class A, class B, class C, class D, class E, class F, class G>
+inline Tuple7<A&, B&, C&, D&, E&, F&, G&> MakeRefTuple(A& a, B& b, C& c, D& d,
+ E& e, F& f, G& g) {
+ return Tuple7<A&, B&, C&, D&, E&, F&, G&>(a, b, c, d, e, f, g);
+}
+
+// Dispatchers ----------------------------------------------------------------
+//
+// Helper functions that call the given method on an object, with the unpacked
+// tuple arguments. Notice that they all have the same number of arguments,
+// so you need only write:
+// DispatchToMethod(object, &Object::method, args);
+// This is very useful for templated dispatchers, since they don't need to know
+// what type |args| is.
+
+// Non-Static Dispatchers with no out params.
+
+template <class ObjT, class Method>
+inline void DispatchToMethod(ObjT* obj, Method method, const Tuple0& arg) {
+ (obj->*method)();
+}
+
+template <class ObjT, class Method, class A>
+inline void DispatchToMethod(ObjT* obj, Method method, const A& arg) {
+ (obj->*method)(arg);
+}
+
+template <class ObjT, class Method, class A>
+inline void DispatchToMethod(ObjT* obj, Method method, const Tuple1<A>& arg) {
+ (obj->*method)(arg.a);
+}
+
+template<class ObjT, class Method, class A, class B>
+inline void DispatchToMethod(ObjT* obj,
+ Method method,
+ const Tuple2<A, B>& arg) {
+ (obj->*method)(arg.a, arg.b);
+}
+
+template<class ObjT, class Method, class A, class B, class C>
+inline void DispatchToMethod(ObjT* obj, Method method,
+ const Tuple3<A, B, C>& arg) {
+ (obj->*method)(arg.a, arg.b, arg.c);
+}
+
+template<class ObjT, class Method, class A, class B, class C, class D>
+inline void DispatchToMethod(ObjT* obj, Method method,
+ const Tuple4<A, B, C, D>& arg) {
+ (obj->*method)(arg.a, arg.b, arg.c, arg.d);
+}
+
+template<class ObjT, class Method, class A, class B, class C, class D, class E>
+inline void DispatchToMethod(ObjT* obj, Method method,
+ const Tuple5<A, B, C, D, E>& arg) {
+ (obj->*method)(arg.a, arg.b, arg.c, arg.d, arg.e);
+}
+
+template<class ObjT, class Method, class A, class B, class C, class D, class E,
+ class F>
+inline void DispatchToMethod(ObjT* obj, Method method,
+ const Tuple6<A, B, C, D, E, F>& arg) {
+ (obj->*method)(arg.a, arg.b, arg.c, arg.d, arg.e, arg.f);
+}
+
+template<class ObjT, class Method, class A, class B, class C, class D, class E,
+ class F, class G>
+inline void DispatchToMethod(ObjT* obj, Method method,
+ const Tuple7<A, B, C, D, E, F, G>& arg) {
+ (obj->*method)(arg.a, arg.b, arg.c, arg.d, arg.e, arg.f, arg.g);
+}
+
+// Static Dispatchers with no out params.
+
+template <class Function>
+inline void DispatchToFunction(Function function, const Tuple0& arg) {
+ (*function)();
+}
+
+template <class Function, class A>
+inline void DispatchToFunction(Function function, const A& arg) {
+ (*function)(arg);
+}
+
+template <class Function, class A>
+inline void DispatchToFunction(Function function, const Tuple1<A>& arg) {
+ (*function)(arg.a);
+}
+
+template<class Function, class A, class B>
+inline void DispatchToFunction(Function function, const Tuple2<A, B>& arg) {
+ (*function)(arg.a, arg.b);
+}
+
+template<class Function, class A, class B, class C>
+inline void DispatchToFunction(Function function, const Tuple3<A, B, C>& arg) {
+ (*function)(arg.a, arg.b, arg.c);
+}
+
+template<class Function, class A, class B, class C, class D>
+inline void DispatchToFunction(Function function,
+ const Tuple4<A, B, C, D>& arg) {
+ (*function)(arg.a, arg.b, arg.c, arg.d);
+}
+
+template<class Function, class A, class B, class C, class D, class E>
+inline void DispatchToFunction(Function function,
+ const Tuple5<A, B, C, D, E>& arg) {
+ (*function)(arg.a, arg.b, arg.c, arg.d, arg.e);
+}
+
+template<class Function, class A, class B, class C, class D, class E, class F>
+inline void DispatchToFunction(Function function,
+ const Tuple6<A, B, C, D, E, F>& arg) {
+ (*function)(arg.a, arg.b, arg.c, arg.d, arg.e, arg.f);
+}
+
+// Dispatchers with 0 out param (as a Tuple0).
+
+template <class ObjT, class Method>
+inline void DispatchToMethod(ObjT* obj,
+ Method method,
+ const Tuple0& arg, Tuple0*) {
+ (obj->*method)();
+}
+
+template <class ObjT, class Method, class A>
+inline void DispatchToMethod(ObjT* obj, Method method, const A& arg, Tuple0*) {
+ (obj->*method)(arg);
+}
+
+template <class ObjT, class Method, class A>
+inline void DispatchToMethod(ObjT* obj,
+ Method method,
+ const Tuple1<A>& arg, Tuple0*) {
+ (obj->*method)(arg.a);
+}
+
+template<class ObjT, class Method, class A, class B>
+inline void DispatchToMethod(ObjT* obj,
+ Method method,
+ const Tuple2<A, B>& arg, Tuple0*) {
+ (obj->*method)(arg.a, arg.b);
+}
+
+template<class ObjT, class Method, class A, class B, class C>
+inline void DispatchToMethod(ObjT* obj, Method method,
+ const Tuple3<A, B, C>& arg, Tuple0*) {
+ (obj->*method)(arg.a, arg.b, arg.c);
+}
+
+template<class ObjT, class Method, class A, class B, class C, class D>
+inline void DispatchToMethod(ObjT* obj, Method method,
+ const Tuple4<A, B, C, D>& arg, Tuple0*) {
+ (obj->*method)(arg.a, arg.b, arg.c, arg.d);
+}
+
+template<class ObjT, class Method, class A, class B, class C, class D, class E>
+inline void DispatchToMethod(ObjT* obj, Method method,
+ const Tuple5<A, B, C, D, E>& arg, Tuple0*) {
+ (obj->*method)(arg.a, arg.b, arg.c, arg.d, arg.e);
+}
+
+template<class ObjT, class Method, class A, class B, class C, class D, class E,
+ class F>
+inline void DispatchToMethod(ObjT* obj, Method method,
+ const Tuple6<A, B, C, D, E, F>& arg, Tuple0*) {
+ (obj->*method)(arg.a, arg.b, arg.c, arg.d, arg.e, arg.f);
+}
+
+// Dispatchers with 1 out param.
+
+template<class ObjT, class Method,
+ class OutA>
+inline void DispatchToMethod(ObjT* obj, Method method,
+ const Tuple0& in,
+ Tuple1<OutA>* out) {
+ (obj->*method)(&out->a);
+}
+
+template<class ObjT, class Method, class InA,
+ class OutA>
+inline void DispatchToMethod(ObjT* obj, Method method,
+ const InA& in,
+ Tuple1<OutA>* out) {
+ (obj->*method)(in, &out->a);
+}
+
+template<class ObjT, class Method, class InA,
+ class OutA>
+inline void DispatchToMethod(ObjT* obj, Method method,
+ const Tuple1<InA>& in,
+ Tuple1<OutA>* out) {
+ (obj->*method)(in.a, &out->a);
+}
+
+template<class ObjT, class Method, class InA, class InB,
+ class OutA>
+inline void DispatchToMethod(ObjT* obj, Method method,
+ const Tuple2<InA, InB>& in,
+ Tuple1<OutA>* out) {
+ (obj->*method)(in.a, in.b, &out->a);
+}
+
+template<class ObjT, class Method, class InA, class InB, class InC,
+ class OutA>
+inline void DispatchToMethod(ObjT* obj, Method method,
+ const Tuple3<InA, InB, InC>& in,
+ Tuple1<OutA>* out) {
+ (obj->*method)(in.a, in.b, in.c, &out->a);
+}
+
+template<class ObjT, class Method, class InA, class InB, class InC, class InD,
+ class OutA>
+inline void DispatchToMethod(ObjT* obj, Method method,
+ const Tuple4<InA, InB, InC, InD>& in,
+ Tuple1<OutA>* out) {
+ (obj->*method)(in.a, in.b, in.c, in.d, &out->a);
+}
+
+template<class ObjT, class Method,
+ class InA, class InB, class InC, class InD, class InE,
+ class OutA>
+inline void DispatchToMethod(ObjT* obj, Method method,
+ const Tuple5<InA, InB, InC, InD, InE>& in,
+ Tuple1<OutA>* out) {
+ (obj->*method)(in.a, in.b, in.c, in.d, in.e, &out->a);
+}
+
+template<class ObjT, class Method,
+ class InA, class InB, class InC, class InD, class InE, class InF,
+ class OutA>
+inline void DispatchToMethod(ObjT* obj, Method method,
+ const Tuple6<InA, InB, InC, InD, InE, InF>& in,
+ Tuple1<OutA>* out) {
+ (obj->*method)(in.a, in.b, in.c, in.d, in.e, in.f, &out->a);
+}
+
+// Dispatchers with 2 out params.
+
+template<class ObjT, class Method,
+ class OutA, class OutB>
+inline void DispatchToMethod(ObjT* obj, Method method,
+ const Tuple0& in,
+ Tuple2<OutA, OutB>* out) {
+ (obj->*method)(&out->a, &out->b);
+}
+
+template<class ObjT, class Method, class InA,
+ class OutA, class OutB>
+inline void DispatchToMethod(ObjT* obj, Method method,
+ const InA& in,
+ Tuple2<OutA, OutB>* out) {
+ (obj->*method)(in, &out->a, &out->b);
+}
+
+template<class ObjT, class Method, class InA,
+ class OutA, class OutB>
+inline void DispatchToMethod(ObjT* obj, Method method,
+ const Tuple1<InA>& in,
+ Tuple2<OutA, OutB>* out) {
+ (obj->*method)(in.a, &out->a, &out->b);
+}
+
+template<class ObjT, class Method, class InA, class InB,
+ class OutA, class OutB>
+inline void DispatchToMethod(ObjT* obj, Method method,
+ const Tuple2<InA, InB>& in,
+ Tuple2<OutA, OutB>* out) {
+ (obj->*method)(in.a, in.b, &out->a, &out->b);
+}
+
+template<class ObjT, class Method, class InA, class InB, class InC,
+ class OutA, class OutB>
+inline void DispatchToMethod(ObjT* obj, Method method,
+ const Tuple3<InA, InB, InC>& in,
+ Tuple2<OutA, OutB>* out) {
+ (obj->*method)(in.a, in.b, in.c, &out->a, &out->b);
+}
+
+template<class ObjT, class Method, class InA, class InB, class InC, class InD,
+ class OutA, class OutB>
+inline void DispatchToMethod(ObjT* obj, Method method,
+ const Tuple4<InA, InB, InC, InD>& in,
+ Tuple2<OutA, OutB>* out) {
+ (obj->*method)(in.a, in.b, in.c, in.d, &out->a, &out->b);
+}
+
+template<class ObjT, class Method,
+ class InA, class InB, class InC, class InD, class InE,
+ class OutA, class OutB>
+inline void DispatchToMethod(ObjT* obj, Method method,
+ const Tuple5<InA, InB, InC, InD, InE>& in,
+ Tuple2<OutA, OutB>* out) {
+ (obj->*method)(in.a, in.b, in.c, in.d, in.e, &out->a, &out->b);
+}
+
+template<class ObjT, class Method,
+ class InA, class InB, class InC, class InD, class InE, class InF,
+ class OutA, class OutB>
+inline void DispatchToMethod(ObjT* obj, Method method,
+ const Tuple6<InA, InB, InC, InD, InE, InF>& in,
+ Tuple2<OutA, OutB>* out) {
+ (obj->*method)(in.a, in.b, in.c, in.d, in.e, in.f, &out->a, &out->b);
+}
+
+// Dispatchers with 3 out params.
+
+template<class ObjT, class Method,
+ class OutA, class OutB, class OutC>
+inline void DispatchToMethod(ObjT* obj, Method method,
+ const Tuple0& in,
+ Tuple3<OutA, OutB, OutC>* out) {
+ (obj->*method)(&out->a, &out->b, &out->c);
+}
+
+template<class ObjT, class Method, class InA,
+ class OutA, class OutB, class OutC>
+inline void DispatchToMethod(ObjT* obj, Method method,
+ const InA& in,
+ Tuple3<OutA, OutB, OutC>* out) {
+ (obj->*method)(in, &out->a, &out->b, &out->c);
+}
+
+template<class ObjT, class Method, class InA,
+ class OutA, class OutB, class OutC>
+inline void DispatchToMethod(ObjT* obj, Method method,
+ const Tuple1<InA>& in,
+ Tuple3<OutA, OutB, OutC>* out) {
+ (obj->*method)(in.a, &out->a, &out->b, &out->c);
+}
+
+template<class ObjT, class Method, class InA, class InB,
+ class OutA, class OutB, class OutC>
+inline void DispatchToMethod(ObjT* obj, Method method,
+ const Tuple2<InA, InB>& in,
+ Tuple3<OutA, OutB, OutC>* out) {
+ (obj->*method)(in.a, in.b, &out->a, &out->b, &out->c);
+}
+
+template<class ObjT, class Method, class InA, class InB, class InC,
+ class OutA, class OutB, class OutC>
+inline void DispatchToMethod(ObjT* obj, Method method,
+ const Tuple3<InA, InB, InC>& in,
+ Tuple3<OutA, OutB, OutC>* out) {
+ (obj->*method)(in.a, in.b, in.c, &out->a, &out->b, &out->c);
+}
+
+template<class ObjT, class Method, class InA, class InB, class InC, class InD,
+ class OutA, class OutB, class OutC>
+inline void DispatchToMethod(ObjT* obj, Method method,
+ const Tuple4<InA, InB, InC, InD>& in,
+ Tuple3<OutA, OutB, OutC>* out) {
+ (obj->*method)(in.a, in.b, in.c, in.d, &out->a, &out->b, &out->c);
+}
+
+template<class ObjT, class Method,
+ class InA, class InB, class InC, class InD, class InE,
+ class OutA, class OutB, class OutC>
+inline void DispatchToMethod(ObjT* obj, Method method,
+ const Tuple5<InA, InB, InC, InD, InE>& in,
+ Tuple3<OutA, OutB, OutC>* out) {
+ (obj->*method)(in.a, in.b, in.c, in.d, in.e, &out->a, &out->b, &out->c);
+}
+
+template<class ObjT, class Method,
+ class InA, class InB, class InC, class InD, class InE, class InF,
+ class OutA, class OutB, class OutC>
+inline void DispatchToMethod(ObjT* obj, Method method,
+ const Tuple6<InA, InB, InC, InD, InE, InF>& in,
+ Tuple3<OutA, OutB, OutC>* out) {
+ (obj->*method)(in.a, in.b, in.c, in.d, in.e, in.f, &out->a, &out->b, &out->c);
+}
+
+// Dispatchers with 4 out params.
+
+template<class ObjT, class Method,
+ class OutA, class OutB, class OutC, class OutD>
+inline void DispatchToMethod(ObjT* obj, Method method,
+ const Tuple0& in,
+ Tuple4<OutA, OutB, OutC, OutD>* out) {
+ (obj->*method)(&out->a, &out->b, &out->c, &out->d);
+}
+
+template<class ObjT, class Method, class InA,
+ class OutA, class OutB, class OutC, class OutD>
+inline void DispatchToMethod(ObjT* obj, Method method,
+ const InA& in,
+ Tuple4<OutA, OutB, OutC, OutD>* out) {
+ (obj->*method)(in, &out->a, &out->b, &out->c, &out->d);
+}
+
+template<class ObjT, class Method, class InA,
+ class OutA, class OutB, class OutC, class OutD>
+inline void DispatchToMethod(ObjT* obj, Method method,
+ const Tuple1<InA>& in,
+ Tuple4<OutA, OutB, OutC, OutD>* out) {
+ (obj->*method)(in.a, &out->a, &out->b, &out->c, &out->d);
+}
+
+template<class ObjT, class Method, class InA, class InB,
+ class OutA, class OutB, class OutC, class OutD>
+inline void DispatchToMethod(ObjT* obj, Method method,
+ const Tuple2<InA, InB>& in,
+ Tuple4<OutA, OutB, OutC, OutD>* out) {
+ (obj->*method)(in.a, in.b, &out->a, &out->b, &out->c, &out->d);
+}
+
+template<class ObjT, class Method, class InA, class InB, class InC,
+ class OutA, class OutB, class OutC, class OutD>
+inline void DispatchToMethod(ObjT* obj, Method method,
+ const Tuple3<InA, InB, InC>& in,
+ Tuple4<OutA, OutB, OutC, OutD>* out) {
+ (obj->*method)(in.a, in.b, in.c, &out->a, &out->b, &out->c, &out->d);
+}
+
+template<class ObjT, class Method, class InA, class InB, class InC, class InD,
+ class OutA, class OutB, class OutC, class OutD>
+inline void DispatchToMethod(ObjT* obj, Method method,
+ const Tuple4<InA, InB, InC, InD>& in,
+ Tuple4<OutA, OutB, OutC, OutD>* out) {
+ (obj->*method)(in.a, in.b, in.c, in.d, &out->a, &out->b, &out->c, &out->d);
+}
+
+template<class ObjT, class Method,
+ class InA, class InB, class InC, class InD, class InE,
+ class OutA, class OutB, class OutC, class OutD>
+inline void DispatchToMethod(ObjT* obj, Method method,
+ const Tuple5<InA, InB, InC, InD, InE>& in,
+ Tuple4<OutA, OutB, OutC, OutD>* out) {
+ (obj->*method)(in.a, in.b, in.c, in.d, in.e,
+ &out->a, &out->b, &out->c, &out->d);
+}
+
+template<class ObjT, class Method,
+ class InA, class InB, class InC, class InD, class InE, class InF,
+ class OutA, class OutB, class OutC, class OutD>
+inline void DispatchToMethod(ObjT* obj, Method method,
+ const Tuple6<InA, InB, InC, InD, InE, InF>& in,
+ Tuple4<OutA, OutB, OutC, OutD>* out) {
+ (obj->*method)(in.a, in.b, in.c, in.d, in.e, in.f,
+ &out->a, &out->b, &out->c, &out->d);
+}
+
+// Dispatchers with 5 out params.
+
+template<class ObjT, class Method,
+ class OutA, class OutB, class OutC, class OutD, class OutE>
+inline void DispatchToMethod(ObjT* obj, Method method,
+ const Tuple0& in,
+ Tuple5<OutA, OutB, OutC, OutD, OutE>* out) {
+ (obj->*method)(&out->a, &out->b, &out->c, &out->d, &out->e);
+}
+
+template<class ObjT, class Method, class InA,
+ class OutA, class OutB, class OutC, class OutD, class OutE>
+inline void DispatchToMethod(ObjT* obj, Method method,
+ const InA& in,
+ Tuple5<OutA, OutB, OutC, OutD, OutE>* out) {
+ (obj->*method)(in, &out->a, &out->b, &out->c, &out->d, &out->e);
+}
+
+template<class ObjT, class Method, class InA,
+ class OutA, class OutB, class OutC, class OutD, class OutE>
+inline void DispatchToMethod(ObjT* obj, Method method,
+ const Tuple1<InA>& in,
+ Tuple5<OutA, OutB, OutC, OutD, OutE>* out) {
+ (obj->*method)(in.a, &out->a, &out->b, &out->c, &out->d, &out->e);
+}
+
+template<class ObjT, class Method, class InA, class InB,
+ class OutA, class OutB, class OutC, class OutD, class OutE>
+inline void DispatchToMethod(ObjT* obj, Method method,
+ const Tuple2<InA, InB>& in,
+ Tuple5<OutA, OutB, OutC, OutD, OutE>* out) {
+ (obj->*method)(in.a, in.b, &out->a, &out->b, &out->c, &out->d, &out->e);
+}
+
+template<class ObjT, class Method, class InA, class InB, class InC,
+ class OutA, class OutB, class OutC, class OutD, class OutE>
+inline void DispatchToMethod(ObjT* obj, Method method,
+ const Tuple3<InA, InB, InC>& in,
+ Tuple5<OutA, OutB, OutC, OutD, OutE>* out) {
+ (obj->*method)(in.a, in.b, in.c, &out->a, &out->b, &out->c, &out->d, &out->e);
+}
+
+template<class ObjT, class Method, class InA, class InB, class InC, class InD,
+ class OutA, class OutB, class OutC, class OutD, class OutE>
+inline void DispatchToMethod(ObjT* obj, Method method,
+ const Tuple4<InA, InB, InC, InD>& in,
+ Tuple5<OutA, OutB, OutC, OutD, OutE>* out) {
+ (obj->*method)(in.a, in.b, in.c, in.d, &out->a, &out->b, &out->c, &out->d,
+ &out->e);
+}
+
+template<class ObjT, class Method,
+ class InA, class InB, class InC, class InD, class InE,
+ class OutA, class OutB, class OutC, class OutD, class OutE>
+inline void DispatchToMethod(ObjT* obj, Method method,
+ const Tuple5<InA, InB, InC, InD, InE>& in,
+ Tuple5<OutA, OutB, OutC, OutD, OutE>* out) {
+ (obj->*method)(in.a, in.b, in.c, in.d, in.e,
+ &out->a, &out->b, &out->c, &out->d, &out->e);
+}
+
+template<class ObjT, class Method,
+ class InA, class InB, class InC, class InD, class InE, class InF,
+ class OutA, class OutB, class OutC, class OutD, class OutE>
+inline void DispatchToMethod(ObjT* obj, Method method,
+ const Tuple6<InA, InB, InC, InD, InE, InF>& in,
+ Tuple5<OutA, OutB, OutC, OutD, OutE>* out) {
+ (obj->*method)(in.a, in.b, in.c, in.d, in.e, in.f,
+ &out->a, &out->b, &out->c, &out->d, &out->e);
+}
+
+#endif // BASE_TUPLE_H__
diff --git a/ipc/chromium/src/base/waitable_event.h b/ipc/chromium/src/base/waitable_event.h
new file mode 100644
index 000000000..87eaff7c7
--- /dev/null
+++ b/ipc/chromium/src/base/waitable_event.h
@@ -0,0 +1,175 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_WAITABLE_EVENT_H_
+#define BASE_WAITABLE_EVENT_H_
+
+#include "base/basictypes.h"
+
+#if defined(OS_WIN)
+#include <windows.h>
+#endif
+
+#if defined(OS_POSIX)
+#include <list>
+#include <utility>
+#include "base/condition_variable.h"
+#include "base/lock.h"
+#include "nsISupportsImpl.h"
+#include "nsAutoPtr.h"
+#endif
+
+#include "base/message_loop.h"
+
+namespace base {
+
+// This replaces INFINITE from Win32
+static const int kNoTimeout = -1;
+
+class TimeDelta;
+
+// A WaitableEvent can be a useful thread synchronization tool when you want to
+// allow one thread to wait for another thread to finish some work. For
+// non-Windows systems, this can only be used from within a single address
+// space.
+//
+// Use a WaitableEvent when you would otherwise use a Lock+ConditionVariable to
+// protect a simple boolean value. However, if you find yourself using a
+// WaitableEvent in conjunction with a Lock to wait for a more complex state
+// change (e.g., for an item to be added to a queue), then you should probably
+// be using a ConditionVariable instead of a WaitableEvent.
+//
+// NOTE: On Windows, this class provides a subset of the functionality afforded
+// by a Windows event object. This is intentional. If you are writing Windows
+// specific code and you need other features of a Windows event, then you might
+// be better off just using an Windows event directly.
+class WaitableEvent {
+ public:
+ // If manual_reset is true, then to set the event state to non-signaled, a
+ // consumer must call the Reset method. If this parameter is false, then the
+ // system automatically resets the event state to non-signaled after a single
+ // waiting thread has been released.
+ WaitableEvent(bool manual_reset, bool initially_signaled);
+
+ ~WaitableEvent();
+
+ // Put the event in the un-signaled state.
+ void Reset();
+
+ // Put the event in the signaled state. Causing any thread blocked on Wait
+ // to be woken up.
+ void Signal();
+
+ // Returns true if the event is in the signaled state, else false. If this
+ // is not a manual reset event, then this test will cause a reset.
+ bool IsSignaled();
+
+ // Wait indefinitely for the event to be signaled. Returns true if the event
+ // was signaled, else false is returned to indicate that waiting failed.
+ bool Wait();
+
+ // Wait up until max_time has passed for the event to be signaled. Returns
+ // true if the event was signaled. If this method returns false, then it
+ // does not necessarily mean that max_time was exceeded.
+ bool TimedWait(const TimeDelta& max_time);
+
+#if defined(OS_WIN)
+ HANDLE handle() const { return handle_; }
+#endif
+
+ // Wait, synchronously, on multiple events.
+ // waitables: an array of WaitableEvent pointers
+ // count: the number of elements in @waitables
+ //
+ // returns: the index of a WaitableEvent which has been signaled.
+ //
+ // You MUST NOT delete any of the WaitableEvent objects while this wait is
+ // happening.
+ static size_t WaitMany(WaitableEvent** waitables, size_t count);
+
+ // For asynchronous waiting, see WaitableEventWatcher
+
+ // This is a private helper class. It's here because it's used by friends of
+ // this class (such as WaitableEventWatcher) to be able to enqueue elements
+ // of the wait-list
+ class Waiter {
+ public:
+ // Signal the waiter to wake up.
+ //
+ // Consider the case of a Waiter which is in multiple WaitableEvent's
+ // wait-lists. Each WaitableEvent is automatic-reset and two of them are
+ // signaled at the same time. Now, each will wake only the first waiter in
+ // the wake-list before resetting. However, if those two waiters happen to
+ // be the same object (as can happen if another thread didn't have a chance
+ // to dequeue the waiter from the other wait-list in time), two auto-resets
+ // will have happened, but only one waiter has been signaled!
+ //
+ // Because of this, a Waiter may "reject" a wake by returning false. In
+ // this case, the auto-reset WaitableEvent shouldn't act as if anything has
+ // been notified.
+ virtual bool Fire(WaitableEvent* signaling_event) = 0;
+
+ // Waiters may implement this in order to provide an extra condition for
+ // two Waiters to be considered equal. In WaitableEvent::Dequeue, if the
+ // pointers match then this function is called as a final check. See the
+ // comments in ~Handle for why.
+ virtual bool Compare(void* tag) = 0;
+ };
+
+ private:
+ friend class WaitableEventWatcher;
+
+#if defined(OS_WIN)
+ HANDLE handle_;
+#else
+ // On Windows, one can close a HANDLE which is currently being waited on. The
+ // MSDN documentation says that the resulting behaviour is 'undefined', but
+ // it doesn't crash. However, if we were to include the following members
+ // directly then, on POSIX, one couldn't use WaitableEventWatcher to watch an
+ // event which gets deleted. This mismatch has bitten us several times now,
+ // so we have a kernel of the WaitableEvent, which is reference counted.
+ // WaitableEventWatchers may then take a reference and thus match the Windows
+ // behaviour.
+ struct WaitableEventKernel final {
+ public:
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(WaitableEventKernel)
+ WaitableEventKernel(bool manual_reset, bool initially_signaled)
+ : manual_reset_(manual_reset),
+ signaled_(initially_signaled) {
+ }
+
+ bool Dequeue(Waiter* waiter, void* tag);
+
+ Lock lock_;
+ const bool manual_reset_;
+ bool signaled_;
+ std::list<Waiter*> waiters_;
+ protected:
+ ~WaitableEventKernel() {}
+ };
+
+ RefPtr<WaitableEventKernel> kernel_;
+
+ bool SignalAll();
+ bool SignalOne();
+ void Enqueue(Waiter* waiter);
+
+ // When dealing with arrays of WaitableEvent*, we want to sort by the address
+ // of the WaitableEvent in order to have a globally consistent locking order.
+ // In that case we keep them, in sorted order, in an array of pairs where the
+ // second element is the index of the WaitableEvent in the original,
+ // unsorted, array.
+ typedef std::pair<WaitableEvent*, size_t> WaiterAndIndex;
+ static size_t EnqueueMany(WaiterAndIndex* waitables,
+ size_t count, Waiter* waiter);
+#endif
+
+ DISALLOW_COPY_AND_ASSIGN(WaitableEvent);
+};
+
+} // namespace base
+
+#endif // BASE_WAITABLE_EVENT_H_
diff --git a/ipc/chromium/src/base/waitable_event_posix.cc b/ipc/chromium/src/base/waitable_event_posix.cc
new file mode 100644
index 000000000..ed137c845
--- /dev/null
+++ b/ipc/chromium/src/base/waitable_event_posix.cc
@@ -0,0 +1,392 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/waitable_event.h"
+
+#include "base/condition_variable.h"
+#include "base/lock.h"
+#include "base/message_loop.h"
+
+// -----------------------------------------------------------------------------
+// A WaitableEvent on POSIX is implemented as a wait-list. Currently we don't
+// support cross-process events (where one process can signal an event which
+// others are waiting on). Because of this, we can avoid having one thread per
+// listener in several cases.
+//
+// The WaitableEvent maintains a list of waiters, protected by a lock. Each
+// waiter is either an async wait, in which case we have a Task and the
+// MessageLoop to run it on, or a blocking wait, in which case we have the
+// condition variable to signal.
+//
+// Waiting involves grabbing the lock and adding oneself to the wait list. Async
+// waits can be canceled, which means grabbing the lock and removing oneself
+// from the list.
+//
+// Waiting on multiple events is handled by adding a single, synchronous wait to
+// the wait-list of many events. An event passes a pointer to itself when
+// firing a waiter and so we can store that pointer to find out which event
+// triggered.
+// -----------------------------------------------------------------------------
+
+namespace base {
+
+// -----------------------------------------------------------------------------
+// This is just an abstract base class for waking the two types of waiters
+// -----------------------------------------------------------------------------
+WaitableEvent::WaitableEvent(bool manual_reset, bool initially_signaled)
+ : kernel_(new WaitableEventKernel(manual_reset, initially_signaled)) {
+}
+
+WaitableEvent::~WaitableEvent() {
+}
+
+void WaitableEvent::Reset() {
+ AutoLock locked(kernel_->lock_);
+ kernel_->signaled_ = false;
+}
+
+void WaitableEvent::Signal() {
+ AutoLock locked(kernel_->lock_);
+
+ if (kernel_->signaled_)
+ return;
+
+ if (kernel_->manual_reset_) {
+ SignalAll();
+ kernel_->signaled_ = true;
+ } else {
+ // In the case of auto reset, if no waiters were woken, we remain
+ // signaled.
+ if (!SignalOne())
+ kernel_->signaled_ = true;
+ }
+}
+
+bool WaitableEvent::IsSignaled() {
+ AutoLock locked(kernel_->lock_);
+
+ const bool result = kernel_->signaled_;
+ if (result && !kernel_->manual_reset_)
+ kernel_->signaled_ = false;
+ return result;
+}
+
+// -----------------------------------------------------------------------------
+// Synchronous waits
+
+// -----------------------------------------------------------------------------
+// This is an synchronous waiter. The thread is waiting on the given condition
+// variable and the fired flag in this object.
+// -----------------------------------------------------------------------------
+class SyncWaiter : public WaitableEvent::Waiter {
+ public:
+ SyncWaiter(ConditionVariable* cv, Lock* lock)
+ : fired_(false),
+ cv_(cv),
+ lock_(lock),
+ signaling_event_(NULL) {
+ }
+
+ bool Fire(WaitableEvent *signaling_event) {
+ lock_->Acquire();
+ const bool previous_value = fired_;
+ fired_ = true;
+ if (!previous_value)
+ signaling_event_ = signaling_event;
+ lock_->Release();
+
+ if (previous_value)
+ return false;
+
+ cv_->Broadcast();
+
+ // SyncWaiters are stack allocated on the stack of the blocking thread.
+ return true;
+ }
+
+ WaitableEvent* signaled_event() const {
+ return signaling_event_;
+ }
+
+ // ---------------------------------------------------------------------------
+ // These waiters are always stack allocated and don't delete themselves. Thus
+ // there's no problem and the ABA tag is the same as the object pointer.
+ // ---------------------------------------------------------------------------
+ bool Compare(void* tag) {
+ return this == tag;
+ }
+
+ // ---------------------------------------------------------------------------
+ // Called with lock held.
+ // ---------------------------------------------------------------------------
+ bool fired() const {
+ return fired_;
+ }
+
+ // ---------------------------------------------------------------------------
+ // During a TimedWait, we need a way to make sure that an auto-reset
+ // WaitableEvent doesn't think that this event has been signaled between
+ // unlocking it and removing it from the wait-list. Called with lock held.
+ // ---------------------------------------------------------------------------
+ void Disable() {
+ fired_ = true;
+ }
+
+ private:
+ bool fired_;
+ ConditionVariable *const cv_;
+ Lock *const lock_;
+ WaitableEvent* signaling_event_; // The WaitableEvent which woke us
+};
+
+bool WaitableEvent::TimedWait(const TimeDelta& max_time) {
+ const TimeTicks end_time(TimeTicks::Now() + max_time);
+ const bool finite_time = max_time.ToInternalValue() >= 0;
+
+ kernel_->lock_.Acquire();
+ if (kernel_->signaled_) {
+ if (!kernel_->manual_reset_) {
+ // In this case we were signaled when we had no waiters. Now that
+ // someone has waited upon us, we can automatically reset.
+ kernel_->signaled_ = false;
+ }
+
+ kernel_->lock_.Release();
+ return true;
+ }
+
+ Lock lock;
+ lock.Acquire();
+ ConditionVariable cv(&lock);
+ SyncWaiter sw(&cv, &lock);
+
+ Enqueue(&sw);
+ kernel_->lock_.Release();
+ // We are violating locking order here by holding the SyncWaiter lock but not
+ // the WaitableEvent lock. However, this is safe because we don't lock @lock_
+ // again before unlocking it.
+
+ for (;;) {
+ const TimeTicks current_time(TimeTicks::Now());
+
+ if (sw.fired() || (finite_time && current_time >= end_time)) {
+ const bool return_value = sw.fired();
+
+ // We can't acquire @lock_ before releasing @lock (because of locking
+ // order), however, inbetween the two a signal could be fired and @sw
+ // would accept it, however we will still return false, so the signal
+ // would be lost on an auto-reset WaitableEvent. Thus we call Disable
+ // which makes sw::Fire return false.
+ sw.Disable();
+ lock.Release();
+
+ kernel_->lock_.Acquire();
+ kernel_->Dequeue(&sw, &sw);
+ kernel_->lock_.Release();
+
+ return return_value;
+ }
+
+ if (finite_time) {
+ const TimeDelta max_wait(end_time - current_time);
+ cv.TimedWait(max_wait);
+ } else {
+ cv.Wait();
+ }
+ }
+}
+
+bool WaitableEvent::Wait() {
+ return TimedWait(TimeDelta::FromSeconds(-1));
+}
+
+// -----------------------------------------------------------------------------
+
+
+// -----------------------------------------------------------------------------
+// Synchronous waiting on multiple objects.
+
+static bool // StrictWeakOrdering
+cmp_fst_addr(const std::pair<WaitableEvent*, unsigned> &a,
+ const std::pair<WaitableEvent*, unsigned> &b) {
+ return a.first < b.first;
+}
+
+// static
+size_t WaitableEvent::WaitMany(WaitableEvent** raw_waitables,
+ size_t count) {
+ DCHECK(count) << "Cannot wait on no events";
+
+ // We need to acquire the locks in a globally consistent order. Thus we sort
+ // the array of waitables by address. We actually sort a pairs so that we can
+ // map back to the original index values later.
+ std::vector<std::pair<WaitableEvent*, size_t> > waitables;
+ waitables.reserve(count);
+ for (size_t i = 0; i < count; ++i)
+ waitables.push_back(std::make_pair(raw_waitables[i], i));
+
+ DCHECK_EQ(count, waitables.size());
+
+ sort(waitables.begin(), waitables.end(), cmp_fst_addr);
+
+ // The set of waitables must be distinct. Since we have just sorted by
+ // address, we can check this cheaply by comparing pairs of consecutive
+ // elements.
+ for (size_t i = 0; i < waitables.size() - 1; ++i) {
+ DCHECK(waitables[i].first != waitables[i+1].first);
+ }
+
+ Lock lock;
+ ConditionVariable cv(&lock);
+ SyncWaiter sw(&cv, &lock);
+
+ const size_t r = EnqueueMany(&waitables[0], count, &sw);
+ if (r) {
+ // One of the events is already signaled. The SyncWaiter has not been
+ // enqueued anywhere. EnqueueMany returns the count of remaining waitables
+ // when the signaled one was seen, so the index of the signaled event is
+ // @count - @r.
+ return waitables[count - r].second;
+ }
+
+ // At this point, we hold the locks on all the WaitableEvents and we have
+ // enqueued our waiter in them all.
+ lock.Acquire();
+ // Release the WaitableEvent locks in the reverse order
+ for (size_t i = 0; i < count; ++i) {
+ waitables[count - (1 + i)].first->kernel_->lock_.Release();
+ }
+
+ for (;;) {
+ if (sw.fired())
+ break;
+
+ cv.Wait();
+ }
+ lock.Release();
+
+ // The address of the WaitableEvent which fired is stored in the SyncWaiter.
+ WaitableEvent *const signaled_event = sw.signaled_event();
+ // This will store the index of the raw_waitables which fired.
+ size_t signaled_index = 0;
+
+ // Take the locks of each WaitableEvent in turn (except the signaled one) and
+ // remove our SyncWaiter from the wait-list
+ for (size_t i = 0; i < count; ++i) {
+ if (raw_waitables[i] != signaled_event) {
+ raw_waitables[i]->kernel_->lock_.Acquire();
+ // There's no possible ABA issue with the address of the SyncWaiter here
+ // because it lives on the stack. Thus the tag value is just the pointer
+ // value again.
+ raw_waitables[i]->kernel_->Dequeue(&sw, &sw);
+ raw_waitables[i]->kernel_->lock_.Release();
+ } else {
+ signaled_index = i;
+ }
+ }
+
+ return signaled_index;
+}
+
+// -----------------------------------------------------------------------------
+// If return value == 0:
+// The locks of the WaitableEvents have been taken in order and the Waiter has
+// been enqueued in the wait-list of each. None of the WaitableEvents are
+// currently signaled
+// else:
+// None of the WaitableEvent locks are held. The Waiter has not been enqueued
+// in any of them and the return value is the index of the first WaitableEvent
+// which was signaled, from the end of the array.
+// -----------------------------------------------------------------------------
+// static
+size_t WaitableEvent::EnqueueMany
+ (std::pair<WaitableEvent*, size_t>* waitables,
+ size_t count, Waiter* waiter) {
+ if (!count)
+ return 0;
+
+ waitables[0].first->kernel_->lock_.Acquire();
+ if (waitables[0].first->kernel_->signaled_) {
+ if (!waitables[0].first->kernel_->manual_reset_)
+ waitables[0].first->kernel_->signaled_ = false;
+ waitables[0].first->kernel_->lock_.Release();
+ return count;
+ }
+
+ const size_t r = EnqueueMany(waitables + 1, count - 1, waiter);
+ if (r) {
+ waitables[0].first->kernel_->lock_.Release();
+ } else {
+ waitables[0].first->Enqueue(waiter);
+ }
+
+ return r;
+}
+
+// -----------------------------------------------------------------------------
+
+
+// -----------------------------------------------------------------------------
+// Private functions...
+
+// -----------------------------------------------------------------------------
+// Wake all waiting waiters. Called with lock held.
+// -----------------------------------------------------------------------------
+bool WaitableEvent::SignalAll() {
+ bool signaled_at_least_one = false;
+
+ for (std::list<Waiter*>::iterator
+ i = kernel_->waiters_.begin(); i != kernel_->waiters_.end(); ++i) {
+ if ((*i)->Fire(this))
+ signaled_at_least_one = true;
+ }
+
+ kernel_->waiters_.clear();
+ return signaled_at_least_one;
+}
+
+// ---------------------------------------------------------------------------
+// Try to wake a single waiter. Return true if one was woken. Called with lock
+// held.
+// ---------------------------------------------------------------------------
+bool WaitableEvent::SignalOne() {
+ for (;;) {
+ if (kernel_->waiters_.empty())
+ return false;
+
+ const bool r = (*kernel_->waiters_.begin())->Fire(this);
+ kernel_->waiters_.pop_front();
+ if (r)
+ return true;
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Add a waiter to the list of those waiting. Called with lock held.
+// -----------------------------------------------------------------------------
+void WaitableEvent::Enqueue(Waiter* waiter) {
+ kernel_->waiters_.push_back(waiter);
+}
+
+// -----------------------------------------------------------------------------
+// Remove a waiter from the list of those waiting. Return true if the waiter was
+// actually removed. Called with lock held.
+// -----------------------------------------------------------------------------
+bool WaitableEvent::WaitableEventKernel::Dequeue(Waiter* waiter, void* tag) {
+ for (std::list<Waiter*>::iterator
+ i = waiters_.begin(); i != waiters_.end(); ++i) {
+ if (*i == waiter && (*i)->Compare(tag)) {
+ waiters_.erase(i);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+// -----------------------------------------------------------------------------
+
+} // namespace base
diff --git a/ipc/chromium/src/base/waitable_event_win.cc b/ipc/chromium/src/base/waitable_event_win.cc
new file mode 100644
index 000000000..77a1f2056
--- /dev/null
+++ b/ipc/chromium/src/base/waitable_event_win.cc
@@ -0,0 +1,88 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/waitable_event.h"
+
+#include <math.h>
+#include <windows.h>
+
+#include "base/logging.h"
+#include "base/time.h"
+
+namespace base {
+
+WaitableEvent::WaitableEvent(bool manual_reset, bool signaled)
+ : handle_(CreateEvent(NULL, manual_reset, signaled, NULL)) {
+ // We're probably going to crash anyways if this is ever NULL, so we might as
+ // well make our stack reports more informative by crashing here.
+ CHECK(handle_);
+}
+
+WaitableEvent::~WaitableEvent() {
+ CloseHandle(handle_);
+}
+
+void WaitableEvent::Reset() {
+ ResetEvent(handle_);
+}
+
+void WaitableEvent::Signal() {
+ SetEvent(handle_);
+}
+
+bool WaitableEvent::IsSignaled() {
+ return TimedWait(TimeDelta::FromMilliseconds(0));
+}
+
+bool WaitableEvent::Wait() {
+ DWORD result = WaitForSingleObject(handle_, INFINITE);
+ // It is most unexpected that this should ever fail. Help consumers learn
+ // about it if it should ever fail.
+ DCHECK(result == WAIT_OBJECT_0) << "WaitForSingleObject failed";
+ return result == WAIT_OBJECT_0;
+}
+
+bool WaitableEvent::TimedWait(const TimeDelta& max_time) {
+ DCHECK(max_time >= TimeDelta::FromMicroseconds(0));
+ // Be careful here. TimeDelta has a precision of microseconds, but this API
+ // is in milliseconds. If there are 5.5ms left, should the delay be 5 or 6?
+ // It should be 6 to avoid returning too early.
+ double timeout = ceil(max_time.InMillisecondsF());
+ DWORD result = WaitForSingleObject(handle_, static_cast<DWORD>(timeout));
+ switch (result) {
+ case WAIT_OBJECT_0:
+ return true;
+ case WAIT_TIMEOUT:
+ return false;
+ }
+ // It is most unexpected that this should ever fail. Help consumers learn
+ // about it if it should ever fail.
+ NOTREACHED() << "WaitForSingleObject failed";
+ return false;
+}
+
+// static
+size_t WaitableEvent::WaitMany(WaitableEvent** events, size_t count) {
+ HANDLE handles[MAXIMUM_WAIT_OBJECTS];
+ CHECK(count <= MAXIMUM_WAIT_OBJECTS)
+ << "Can only wait on " << MAXIMUM_WAIT_OBJECTS << " with WaitMany";
+
+ for (size_t i = 0; i < count; ++i)
+ handles[i] = events[i]->handle();
+
+ DWORD result =
+ WaitForMultipleObjects(count, handles,
+ FALSE, // don't wait for all the objects
+ INFINITE); // no timeout
+ if (result >= WAIT_OBJECT_0 + count) {
+ NOTREACHED() << "WaitForMultipleObjects failed: " << GetLastError();
+ return 0;
+ }
+
+ return result - WAIT_OBJECT_0;
+}
+
+} // namespace base
diff --git a/ipc/chromium/src/base/win_util.cc b/ipc/chromium/src/base/win_util.cc
new file mode 100644
index 000000000..9b709ec98
--- /dev/null
+++ b/ipc/chromium/src/base/win_util.cc
@@ -0,0 +1,40 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win_util.h"
+
+#include <map>
+#include <sddl.h>
+
+#include "base/logging.h"
+#include "base/singleton.h"
+#include "base/string_util.h"
+
+namespace win_util {
+
+std::wstring FormatMessage(unsigned messageid) {
+ wchar_t* string_buffer = NULL;
+ unsigned string_length = ::FormatMessage(
+ FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM |
+ FORMAT_MESSAGE_IGNORE_INSERTS, NULL, messageid, 0,
+ reinterpret_cast<wchar_t *>(&string_buffer), 0, NULL);
+
+ std::wstring formatted_string;
+ if (string_buffer) {
+ formatted_string = string_buffer;
+ LocalFree(reinterpret_cast<HLOCAL>(string_buffer));
+ } else {
+ // The formating failed. simply convert the message value into a string.
+ SStringPrintf(&formatted_string, L"message number %d", messageid);
+ }
+ return formatted_string;
+}
+
+std::wstring FormatLastWin32Error() {
+ return FormatMessage(GetLastError());
+}
+
+} // namespace win_util
diff --git a/ipc/chromium/src/base/win_util.h b/ipc/chromium/src/base/win_util.h
new file mode 100644
index 000000000..b37411f60
--- /dev/null
+++ b/ipc/chromium/src/base/win_util.h
@@ -0,0 +1,22 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_WIN_UTIL_H__
+#define BASE_WIN_UTIL_H__
+
+#include <windows.h>
+#include <aclapi.h>
+
+#include <string>
+
+namespace win_util {
+
+// Uses the last Win32 error to generate a human readable message string.
+std::wstring FormatLastWin32Error();
+
+} // namespace win_util
+
+#endif // BASE_WIN_UTIL_H__