summaryrefslogtreecommitdiffstats
path: root/xpcom/threads
diff options
context:
space:
mode:
authorMatt A. Tobin <mattatobin@localhost.localdomain>2018-02-02 04:16:08 -0500
committerMatt A. Tobin <mattatobin@localhost.localdomain>2018-02-02 04:16:08 -0500
commit5f8de423f190bbb79a62f804151bc24824fa32d8 (patch)
tree10027f336435511475e392454359edea8e25895d /xpcom/threads
parent49ee0794b5d912db1f95dce6eb52d781dc210db5 (diff)
downloadUXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.gz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.lz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.xz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.zip
Add m-esr52 at 52.6.0
Diffstat (limited to 'xpcom/threads')
-rw-r--r--xpcom/threads/AbstractThread.cpp192
-rw-r--r--xpcom/threads/AbstractThread.h111
-rw-r--r--xpcom/threads/BackgroundHangMonitor.cpp734
-rw-r--r--xpcom/threads/BackgroundHangMonitor.h246
-rw-r--r--xpcom/threads/HangAnnotations.cpp262
-rw-r--r--xpcom/threads/HangAnnotations.h104
-rw-r--r--xpcom/threads/HangMonitor.cpp434
-rw-r--r--xpcom/threads/HangMonitor.h58
-rw-r--r--xpcom/threads/LazyIdleThread.cpp624
-rw-r--r--xpcom/threads/LazyIdleThread.h226
-rw-r--r--xpcom/threads/LeakRefPtr.h52
-rw-r--r--xpcom/threads/MainThreadIdlePeriod.cpp76
-rw-r--r--xpcom/threads/MainThreadIdlePeriod.h28
-rw-r--r--xpcom/threads/MozPromise.h1067
-rw-r--r--xpcom/threads/SharedThreadPool.cpp224
-rw-r--r--xpcom/threads/SharedThreadPool.h129
-rw-r--r--xpcom/threads/StateMirroring.h378
-rw-r--r--xpcom/threads/StateWatching.h317
-rw-r--r--xpcom/threads/SyncRunnable.h129
-rw-r--r--xpcom/threads/TaskDispatcher.h276
-rw-r--r--xpcom/threads/TaskQueue.cpp271
-rw-r--r--xpcom/threads/TaskQueue.h203
-rw-r--r--xpcom/threads/ThreadStackHelper.cpp726
-rw-r--r--xpcom/threads/ThreadStackHelper.h147
-rw-r--r--xpcom/threads/ThrottledEventQueue.cpp446
-rw-r--r--xpcom/threads/ThrottledEventQueue.h94
-rw-r--r--xpcom/threads/TimerThread.cpp752
-rw-r--r--xpcom/threads/TimerThread.h115
-rw-r--r--xpcom/threads/moz.build89
-rw-r--r--xpcom/threads/nsEnvironment.cpp163
-rw-r--r--xpcom/threads/nsEnvironment.h36
-rw-r--r--xpcom/threads/nsEventQueue.cpp155
-rw-r--r--xpcom/threads/nsEventQueue.h123
-rw-r--r--xpcom/threads/nsICancelableRunnable.h38
-rw-r--r--xpcom/threads/nsIEnvironment.idl55
-rw-r--r--xpcom/threads/nsIEventTarget.idl127
-rw-r--r--xpcom/threads/nsIIdlePeriod.idl32
-rw-r--r--xpcom/threads/nsIIncrementalRunnable.h41
-rw-r--r--xpcom/threads/nsIProcess.idl99
-rw-r--r--xpcom/threads/nsIRunnable.idl27
-rw-r--r--xpcom/threads/nsISupportsPriority.idl45
-rw-r--r--xpcom/threads/nsIThread.idl149
-rw-r--r--xpcom/threads/nsIThreadInternal.idl135
-rw-r--r--xpcom/threads/nsIThreadManager.idl68
-rw-r--r--xpcom/threads/nsIThreadPool.idl88
-rw-r--r--xpcom/threads/nsITimer.idl244
-rw-r--r--xpcom/threads/nsMemoryPressure.cpp54
-rw-r--r--xpcom/threads/nsMemoryPressure.h77
-rw-r--r--xpcom/threads/nsProcess.h82
-rw-r--r--xpcom/threads/nsProcessCommon.cpp663
-rw-r--r--xpcom/threads/nsThread.cpp1500
-rw-r--r--xpcom/threads/nsThread.h284
-rw-r--r--xpcom/threads/nsThreadManager.cpp342
-rw-r--r--xpcom/threads/nsThreadManager.h89
-rw-r--r--xpcom/threads/nsThreadPool.cpp449
-rw-r--r--xpcom/threads/nsThreadPool.h65
-rw-r--r--xpcom/threads/nsThreadSyncDispatch.h50
-rw-r--r--xpcom/threads/nsTimerImpl.cpp658
-rw-r--r--xpcom/threads/nsTimerImpl.h207
59 files changed, 14655 insertions, 0 deletions
diff --git a/xpcom/threads/AbstractThread.cpp b/xpcom/threads/AbstractThread.cpp
new file mode 100644
index 000000000..451b317d8
--- /dev/null
+++ b/xpcom/threads/AbstractThread.cpp
@@ -0,0 +1,192 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/AbstractThread.h"
+
+#include "mozilla/ClearOnShutdown.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/MozPromise.h" // We initialize the MozPromise logging in this file.
+#include "mozilla/StaticPtr.h"
+#include "mozilla/StateWatching.h" // We initialize the StateWatching logging in this file.
+#include "mozilla/TaskQueue.h"
+#include "mozilla/TaskDispatcher.h"
+#include "mozilla/Unused.h"
+
+#include "nsThreadUtils.h"
+#include "nsContentUtils.h"
+#include "nsServiceManagerUtils.h"
+
+
+namespace mozilla {
+
+LazyLogModule gMozPromiseLog("MozPromise");
+LazyLogModule gStateWatchingLog("StateWatching");
+
+StaticRefPtr<AbstractThread> sMainThread;
+MOZ_THREAD_LOCAL(AbstractThread*) AbstractThread::sCurrentThreadTLS;
+
+class XPCOMThreadWrapper : public AbstractThread
+{
+public:
+ explicit XPCOMThreadWrapper(nsIThread* aTarget, bool aRequireTailDispatch)
+ : AbstractThread(aRequireTailDispatch)
+ , mTarget(aTarget)
+ {
+ // Our current mechanism of implementing tail dispatch is appshell-specific.
+ // This is because a very similar mechanism already exists on the main
+ // thread, and we want to avoid making event dispatch on the main thread
+ // more complicated than it already is.
+ //
+ // If you need to use tail dispatch on other XPCOM threads, you'll need to
+ // implement an nsIThreadObserver to fire the tail dispatcher at the
+ // appropriate times.
+ MOZ_ASSERT_IF(aRequireTailDispatch,
+ NS_IsMainThread() && NS_GetCurrentThread() == aTarget);
+ }
+
+ virtual void Dispatch(already_AddRefed<nsIRunnable> aRunnable,
+ DispatchFailureHandling aFailureHandling = AssertDispatchSuccess,
+ DispatchReason aReason = NormalDispatch) override
+ {
+ nsCOMPtr<nsIRunnable> r = aRunnable;
+ AbstractThread* currentThread;
+ if (aReason != TailDispatch && (currentThread = GetCurrent()) && RequiresTailDispatch(currentThread)) {
+ currentThread->TailDispatcher().AddTask(this, r.forget(), aFailureHandling);
+ return;
+ }
+
+ nsresult rv = mTarget->Dispatch(r, NS_DISPATCH_NORMAL);
+ MOZ_DIAGNOSTIC_ASSERT(aFailureHandling == DontAssertDispatchSuccess || NS_SUCCEEDED(rv));
+ Unused << rv;
+ }
+
+ virtual bool IsCurrentThreadIn() override
+ {
+ // Compare NSPR threads so that this works after shutdown when
+ // NS_GetCurrentThread starts returning null.
+ PRThread* thread = nullptr;
+ mTarget->GetPRThread(&thread);
+ bool in = PR_GetCurrentThread() == thread;
+ return in;
+ }
+
+ void FireTailDispatcher()
+ {
+ MOZ_DIAGNOSTIC_ASSERT(mTailDispatcher.isSome());
+ mTailDispatcher.ref().DrainDirectTasks();
+ mTailDispatcher.reset();
+ }
+
+ virtual TaskDispatcher& TailDispatcher() override
+ {
+ MOZ_ASSERT(this == sMainThread); // See the comment in the constructor.
+ MOZ_ASSERT(IsCurrentThreadIn());
+ if (!mTailDispatcher.isSome()) {
+ mTailDispatcher.emplace(/* aIsTailDispatcher = */ true);
+
+ nsCOMPtr<nsIRunnable> event = NewRunnableMethod(this, &XPCOMThreadWrapper::FireTailDispatcher);
+ nsContentUtils::RunInStableState(event.forget());
+ }
+
+ return mTailDispatcher.ref();
+ }
+
+ virtual bool MightHaveTailTasks() override
+ {
+ return mTailDispatcher.isSome();
+ }
+
+ virtual nsIThread* AsXPCOMThread() override { return mTarget; }
+
+private:
+ RefPtr<nsIThread> mTarget;
+ Maybe<AutoTaskDispatcher> mTailDispatcher;
+};
+
+void
+AbstractThread::TailDispatchTasksFor(AbstractThread* aThread)
+{
+ if (MightHaveTailTasks()) {
+ TailDispatcher().DispatchTasksFor(aThread);
+ }
+}
+
+bool
+AbstractThread::HasTailTasksFor(AbstractThread* aThread)
+{
+ if (!MightHaveTailTasks()) {
+ return false;
+ }
+ return TailDispatcher().HasTasksFor(aThread);
+}
+
+bool
+AbstractThread::RequiresTailDispatch(AbstractThread* aThread) const
+{
+ MOZ_ASSERT(aThread);
+ // We require tail dispatch if both the source and destination
+ // threads support it.
+ return SupportsTailDispatch() && aThread->SupportsTailDispatch();
+}
+
+bool
+AbstractThread::RequiresTailDispatchFromCurrentThread() const
+{
+ AbstractThread* current = GetCurrent();
+ return current && RequiresTailDispatch(current);
+}
+
+AbstractThread*
+AbstractThread::MainThread()
+{
+ MOZ_ASSERT(sMainThread);
+ return sMainThread;
+}
+
+void
+AbstractThread::InitStatics()
+{
+ MOZ_ASSERT(NS_IsMainThread());
+ MOZ_ASSERT(!sMainThread);
+ nsCOMPtr<nsIThread> mainThread;
+ NS_GetMainThread(getter_AddRefs(mainThread));
+ MOZ_DIAGNOSTIC_ASSERT(mainThread);
+ sMainThread = new XPCOMThreadWrapper(mainThread.get(), /* aRequireTailDispatch = */ true);
+ ClearOnShutdown(&sMainThread);
+
+ if (!sCurrentThreadTLS.init()) {
+ MOZ_CRASH();
+ }
+ sCurrentThreadTLS.set(sMainThread);
+}
+
+void
+AbstractThread::DispatchStateChange(already_AddRefed<nsIRunnable> aRunnable)
+{
+ GetCurrent()->TailDispatcher().AddStateChangeTask(this, Move(aRunnable));
+}
+
+/* static */ void
+AbstractThread::DispatchDirectTask(already_AddRefed<nsIRunnable> aRunnable)
+{
+ GetCurrent()->TailDispatcher().AddDirectTask(Move(aRunnable));
+}
+
+/* static */
+already_AddRefed<AbstractThread>
+AbstractThread::CreateXPCOMThreadWrapper(nsIThread* aThread, bool aRequireTailDispatch)
+{
+ RefPtr<XPCOMThreadWrapper> wrapper = new XPCOMThreadWrapper(aThread, aRequireTailDispatch);
+ // Set the thread-local sCurrentThreadTLS to point to the wrapper on the
+ // target thread. This ensures that sCurrentThreadTLS is as expected by
+ // AbstractThread::GetCurrent() on the target thread.
+ nsCOMPtr<nsIRunnable> r =
+ NS_NewRunnableFunction([wrapper]() { sCurrentThreadTLS.set(wrapper); });
+ aThread->Dispatch(r.forget(), NS_DISPATCH_NORMAL);
+ return wrapper.forget();
+}
+
+} // namespace mozilla
diff --git a/xpcom/threads/AbstractThread.h b/xpcom/threads/AbstractThread.h
new file mode 100644
index 000000000..ca6ec1b84
--- /dev/null
+++ b/xpcom/threads/AbstractThread.h
@@ -0,0 +1,111 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#if !defined(AbstractThread_h_)
+#define AbstractThread_h_
+
+#include "nscore.h"
+#include "nsIRunnable.h"
+#include "nsISupportsImpl.h"
+#include "nsIThread.h"
+#include "mozilla/RefPtr.h"
+
+#include "mozilla/ThreadLocal.h"
+
+namespace mozilla {
+
+class TaskQueue;
+class TaskDispatcher;
+
+/*
+ * We often want to run tasks on a target that guarantees that events will never
+ * run in parallel. There are various target types that achieve this - namely
+ * nsIThread and TaskQueue. Note that nsIThreadPool (which implements
+ * nsIEventTarget) does not have this property, so we do not want to use
+ * nsIEventTarget for this purpose. This class encapsulates the specifics of
+ * the structures we might use here and provides a consistent interface.
+ *
+ * At present, the supported AbstractThread implementations are TaskQueue
+ * and AbstractThread::MainThread. If you add support for another thread that is
+ * not the MainThread, you'll need to figure out how to make it unique such that
+ * comparing AbstractThread pointers is equivalent to comparing nsIThread pointers.
+ */
+class AbstractThread
+{
+public:
+ // Returns the AbstractThread that the caller is currently running in, or null
+ // if the caller is not running in an AbstractThread.
+ static AbstractThread* GetCurrent() { return sCurrentThreadTLS.get(); }
+
+ AbstractThread(bool aSupportsTailDispatch) : mSupportsTailDispatch(aSupportsTailDispatch) {}
+
+ static already_AddRefed<AbstractThread>
+ CreateXPCOMThreadWrapper(nsIThread* aThread, bool aRequireTailDispatch);
+
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(AbstractThread);
+
+ enum DispatchFailureHandling { AssertDispatchSuccess, DontAssertDispatchSuccess };
+ enum DispatchReason { NormalDispatch, TailDispatch };
+ virtual void Dispatch(already_AddRefed<nsIRunnable> aRunnable,
+ DispatchFailureHandling aHandling = AssertDispatchSuccess,
+ DispatchReason aReason = NormalDispatch) = 0;
+
+ virtual bool IsCurrentThreadIn() = 0;
+
+ // Returns true if dispatch is generally reliable. This is used to guard
+ // against FlushableTaskQueues, which should go away.
+ virtual bool IsDispatchReliable() { return true; }
+
+ // Returns a TaskDispatcher that will dispatch its tasks when the currently-
+ // running tasks pops off the stack.
+ //
+ // May only be called when running within the it is invoked up, and only on
+ // threads which support it.
+ virtual TaskDispatcher& TailDispatcher() = 0;
+
+ // Returns true if we have tail tasks scheduled, or if this isn't known.
+ // Returns false if we definitely don't have any tail tasks.
+ virtual bool MightHaveTailTasks() { return true; }
+
+ // Helper functions for methods on the tail TasklDispatcher. These check
+ // HasTailTasks to avoid allocating a TailDispatcher if it isn't
+ // needed.
+ void TailDispatchTasksFor(AbstractThread* aThread);
+ bool HasTailTasksFor(AbstractThread* aThread);
+
+ // Returns true if this supports the tail dispatcher.
+ bool SupportsTailDispatch() const { return mSupportsTailDispatch; }
+
+ // Returns true if this thread requires all dispatches originating from
+ // aThread go through the tail dispatcher.
+ bool RequiresTailDispatch(AbstractThread* aThread) const;
+ bool RequiresTailDispatchFromCurrentThread() const;
+
+ virtual TaskQueue* AsTaskQueue() { MOZ_CRASH("Not a task queue!"); }
+ virtual nsIThread* AsXPCOMThread() { MOZ_CRASH("Not an XPCOM thread!"); }
+
+ // Convenience method for getting an AbstractThread for the main thread.
+ static AbstractThread* MainThread();
+
+ // Must be called exactly once during startup.
+ static void InitStatics();
+
+ void DispatchStateChange(already_AddRefed<nsIRunnable> aRunnable);
+
+ static void DispatchDirectTask(already_AddRefed<nsIRunnable> aRunnable);
+
+protected:
+ virtual ~AbstractThread() {}
+ static MOZ_THREAD_LOCAL(AbstractThread*) sCurrentThreadTLS;
+
+ // True if we want to require that every task dispatched from tasks running in
+ // this queue go through our queue's tail dispatcher.
+ const bool mSupportsTailDispatch;
+};
+
+} // namespace mozilla
+
+#endif
diff --git a/xpcom/threads/BackgroundHangMonitor.cpp b/xpcom/threads/BackgroundHangMonitor.cpp
new file mode 100644
index 000000000..ac65d9f37
--- /dev/null
+++ b/xpcom/threads/BackgroundHangMonitor.cpp
@@ -0,0 +1,734 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/ArrayUtils.h"
+#include "mozilla/BackgroundHangMonitor.h"
+#include "mozilla/LinkedList.h"
+#include "mozilla/Monitor.h"
+#include "mozilla/Move.h"
+#include "mozilla/Preferences.h"
+#include "mozilla/StaticPtr.h"
+#include "mozilla/Telemetry.h"
+#include "mozilla/ThreadHangStats.h"
+#include "mozilla/ThreadLocal.h"
+
+#include "prinrval.h"
+#include "prthread.h"
+#include "ThreadStackHelper.h"
+#include "nsIObserverService.h"
+#include "nsIObserver.h"
+#include "mozilla/Services.h"
+#include "nsXULAppAPI.h"
+
+#include <algorithm>
+
+// Activate BHR only for one every BHR_BETA_MOD users.
+// This is now 100% of Beta population for the Beta 45/46 e10s A/B trials
+// It can be scaled back again in the future
+#define BHR_BETA_MOD 1;
+
+// Maximum depth of the call stack in the reported thread hangs. This value represents
+// the 99.9th percentile of the thread hangs stack depths reported by Telemetry.
+static const size_t kMaxThreadHangStackDepth = 30;
+
+// An utility comparator function used by std::unique to collapse "(* script)" entries in
+// a vector representing a call stack.
+bool StackScriptEntriesCollapser(const char* aStackEntry, const char *aAnotherStackEntry)
+{
+ return !strcmp(aStackEntry, aAnotherStackEntry) &&
+ (!strcmp(aStackEntry, "(chrome script)") || !strcmp(aStackEntry, "(content script)"));
+}
+
+namespace mozilla {
+
+/**
+ * BackgroundHangManager is the global object that
+ * manages all instances of BackgroundHangThread.
+ */
+class BackgroundHangManager : public nsIObserver
+{
+private:
+ // Background hang monitor thread function
+ static void MonitorThread(void* aData)
+ {
+ PR_SetCurrentThreadName("BgHangManager");
+
+ /* We do not hold a reference to BackgroundHangManager here
+ because the monitor thread only exists as long as the
+ BackgroundHangManager instance exists. We stop the monitor
+ thread in the BackgroundHangManager destructor, and we can
+ only get to the destructor if we don't hold a reference here. */
+ static_cast<BackgroundHangManager*>(aData)->RunMonitorThread();
+ }
+
+ // Hang monitor thread
+ PRThread* mHangMonitorThread;
+ // Stop hang monitoring
+ bool mShutdown;
+
+ BackgroundHangManager(const BackgroundHangManager&);
+ BackgroundHangManager& operator=(const BackgroundHangManager&);
+ void RunMonitorThread();
+
+public:
+ NS_DECL_THREADSAFE_ISUPPORTS
+ NS_DECL_NSIOBSERVER
+ static StaticRefPtr<BackgroundHangManager> sInstance;
+ static bool sDisabled;
+
+ // Lock for access to members of this class
+ Monitor mLock;
+ // Current time as seen by hang monitors
+ PRIntervalTime mIntervalNow;
+ // List of BackgroundHangThread instances associated with each thread
+ LinkedList<BackgroundHangThread> mHangThreads;
+
+ void Shutdown()
+ {
+ MonitorAutoLock autoLock(mLock);
+ mShutdown = true;
+ autoLock.Notify();
+ }
+
+ void Wakeup()
+ {
+ // PR_CreateThread could have failed earlier
+ if (mHangMonitorThread) {
+ // Use PR_Interrupt to avoid potentially taking a lock
+ PR_Interrupt(mHangMonitorThread);
+ }
+ }
+
+ BackgroundHangManager();
+private:
+ virtual ~BackgroundHangManager();
+};
+
+NS_IMPL_ISUPPORTS(BackgroundHangManager, nsIObserver)
+
+NS_IMETHODIMP
+BackgroundHangManager::Observe(nsISupports* aSubject, const char* aTopic, const char16_t* aData) {
+ NS_ENSURE_TRUE(!strcmp(aTopic, "profile-after-change"), NS_ERROR_UNEXPECTED);
+ BackgroundHangMonitor::DisableOnBeta();
+
+ nsCOMPtr<nsIObserverService> observerService = mozilla::services::GetObserverService();
+ MOZ_ASSERT(observerService);
+ observerService->RemoveObserver(this, "profile-after-change");
+
+ return NS_OK;
+}
+
+/**
+ * BackgroundHangThread is a per-thread object that is used
+ * by all instances of BackgroundHangMonitor to monitor hangs.
+ */
+class BackgroundHangThread : public LinkedListElement<BackgroundHangThread>
+{
+private:
+ static MOZ_THREAD_LOCAL(BackgroundHangThread*) sTlsKey;
+ static bool sTlsKeyInitialized;
+
+ BackgroundHangThread(const BackgroundHangThread&);
+ BackgroundHangThread& operator=(const BackgroundHangThread&);
+ ~BackgroundHangThread();
+
+ /* Keep a reference to the manager, so we can keep going even
+ after BackgroundHangManager::Shutdown is called. */
+ const RefPtr<BackgroundHangManager> mManager;
+ // Unique thread ID for identification
+ const PRThread* mThreadID;
+
+ void Update();
+
+public:
+ NS_INLINE_DECL_REFCOUNTING(BackgroundHangThread)
+ /**
+ * Returns the BackgroundHangThread associated with the
+ * running thread. Note that this will not find private
+ * BackgroundHangThread threads.
+ *
+ * @return BackgroundHangThread*, or nullptr if no thread
+ * is found.
+ */
+ static BackgroundHangThread* FindThread();
+
+ static void Startup()
+ {
+ /* We can tolerate init() failing. */
+ sTlsKeyInitialized = sTlsKey.init();
+ }
+
+ // Hang timeout in ticks
+ const PRIntervalTime mTimeout;
+ // PermaHang timeout in ticks
+ const PRIntervalTime mMaxTimeout;
+ // Time at last activity
+ PRIntervalTime mInterval;
+ // Time when a hang started
+ PRIntervalTime mHangStart;
+ // Is the thread in a hang
+ bool mHanging;
+ // Is the thread in a waiting state
+ bool mWaiting;
+ // Is the thread dedicated to a single BackgroundHangMonitor
+ BackgroundHangMonitor::ThreadType mThreadType;
+ // Platform-specific helper to get hang stacks
+ ThreadStackHelper mStackHelper;
+ // Stack of current hang
+ Telemetry::HangStack mHangStack;
+ // Statistics for telemetry
+ Telemetry::ThreadHangStats mStats;
+ // Annotations for the current hang
+ UniquePtr<HangMonitor::HangAnnotations> mAnnotations;
+ // Annotators registered for this thread
+ HangMonitor::Observer::Annotators mAnnotators;
+
+ BackgroundHangThread(const char* aName,
+ uint32_t aTimeoutMs,
+ uint32_t aMaxTimeoutMs,
+ BackgroundHangMonitor::ThreadType aThreadType = BackgroundHangMonitor::THREAD_SHARED);
+
+ // Report a hang; aManager->mLock IS locked
+ Telemetry::HangHistogram& ReportHang(PRIntervalTime aHangTime);
+ // Report a permanent hang; aManager->mLock IS locked
+ void ReportPermaHang();
+ // Called by BackgroundHangMonitor::NotifyActivity
+ void NotifyActivity()
+ {
+ MonitorAutoLock autoLock(mManager->mLock);
+ Update();
+ }
+ // Called by BackgroundHangMonitor::NotifyWait
+ void NotifyWait()
+ {
+ MonitorAutoLock autoLock(mManager->mLock);
+
+ if (mWaiting) {
+ return;
+ }
+
+ Update();
+ mWaiting = true;
+ }
+
+ // Returns true if this thread is (or might be) shared between other
+ // BackgroundHangMonitors for the monitored thread.
+ bool IsShared() {
+ return mThreadType == BackgroundHangMonitor::THREAD_SHARED;
+ }
+};
+
+
+StaticRefPtr<BackgroundHangManager> BackgroundHangManager::sInstance;
+bool BackgroundHangManager::sDisabled = false;
+
+MOZ_THREAD_LOCAL(BackgroundHangThread*) BackgroundHangThread::sTlsKey;
+bool BackgroundHangThread::sTlsKeyInitialized;
+
+BackgroundHangManager::BackgroundHangManager()
+ : mShutdown(false)
+ , mLock("BackgroundHangManager")
+ , mIntervalNow(0)
+{
+ // Lock so we don't race against the new monitor thread
+ MonitorAutoLock autoLock(mLock);
+ mHangMonitorThread = PR_CreateThread(
+ PR_USER_THREAD, MonitorThread, this,
+ PR_PRIORITY_LOW, PR_GLOBAL_THREAD, PR_JOINABLE_THREAD, 0);
+
+ MOZ_ASSERT(mHangMonitorThread, "Failed to create monitor thread");
+}
+
+BackgroundHangManager::~BackgroundHangManager()
+{
+ MOZ_ASSERT(mShutdown, "Destruction without Shutdown call");
+ MOZ_ASSERT(mHangThreads.isEmpty(), "Destruction with outstanding monitors");
+ MOZ_ASSERT(mHangMonitorThread, "No monitor thread");
+
+ // PR_CreateThread could have failed above due to resource limitation
+ if (mHangMonitorThread) {
+ // The monitor thread can only live as long as the instance lives
+ PR_JoinThread(mHangMonitorThread);
+ }
+}
+
+void
+BackgroundHangManager::RunMonitorThread()
+{
+ // Keep us locked except when waiting
+ MonitorAutoLock autoLock(mLock);
+
+ /* mIntervalNow is updated at various intervals determined by waitTime.
+ However, if an update latency is too long (due to CPU scheduling, system
+ sleep, etc.), we don't update mIntervalNow at all. This is done so that
+ long latencies in our timing are not detected as hangs. systemTime is
+ used to track PR_IntervalNow() and determine our latency. */
+
+ PRIntervalTime systemTime = PR_IntervalNow();
+ // Default values for the first iteration of thread loop
+ PRIntervalTime waitTime = PR_INTERVAL_NO_WAIT;
+ PRIntervalTime recheckTimeout = PR_INTERVAL_NO_WAIT;
+
+ while (!mShutdown) {
+
+ PR_ClearInterrupt();
+ nsresult rv = autoLock.Wait(waitTime);
+
+ PRIntervalTime newTime = PR_IntervalNow();
+ PRIntervalTime systemInterval = newTime - systemTime;
+ systemTime = newTime;
+
+ /* waitTime is a quarter of the shortest timeout value; If our timing
+ latency is low enough (less than half the shortest timeout value),
+ we can update mIntervalNow. */
+ if (MOZ_LIKELY(waitTime != PR_INTERVAL_NO_TIMEOUT &&
+ systemInterval < 2 * waitTime)) {
+ mIntervalNow += systemInterval;
+ }
+
+ /* If it's before the next recheck timeout, and our wait did not
+ get interrupted (either through Notify or PR_Interrupt), we can
+ keep the current waitTime and skip iterating through hang monitors. */
+ if (MOZ_LIKELY(systemInterval < recheckTimeout &&
+ systemInterval >= waitTime &&
+ rv == NS_OK)) {
+ recheckTimeout -= systemInterval;
+ continue;
+ }
+
+ /* We are in one of the following scenarios,
+ - Hang or permahang recheck timeout
+ - Thread added/removed
+ - Thread wait or hang ended
+ In all cases, we want to go through our list of hang
+ monitors and update waitTime and recheckTimeout. */
+ waitTime = PR_INTERVAL_NO_TIMEOUT;
+ recheckTimeout = PR_INTERVAL_NO_TIMEOUT;
+
+ // Locally hold mIntervalNow
+ PRIntervalTime intervalNow = mIntervalNow;
+
+ // iterate through hang monitors
+ for (BackgroundHangThread* currentThread = mHangThreads.getFirst();
+ currentThread; currentThread = currentThread->getNext()) {
+
+ if (currentThread->mWaiting) {
+ // Thread is waiting, not hanging
+ continue;
+ }
+ PRIntervalTime interval = currentThread->mInterval;
+ PRIntervalTime hangTime = intervalNow - interval;
+ if (MOZ_UNLIKELY(hangTime >= currentThread->mMaxTimeout)) {
+ // A permahang started
+ // Skip subsequent iterations and tolerate a race on mWaiting here
+ currentThread->mWaiting = true;
+ currentThread->mHanging = false;
+ currentThread->ReportPermaHang();
+ continue;
+ }
+
+ if (MOZ_LIKELY(!currentThread->mHanging)) {
+ if (MOZ_UNLIKELY(hangTime >= currentThread->mTimeout)) {
+ // A hang started
+ currentThread->mStackHelper.GetStack(currentThread->mHangStack);
+ currentThread->mHangStart = interval;
+ currentThread->mHanging = true;
+ currentThread->mAnnotations =
+ currentThread->mAnnotators.GatherAnnotations();
+ }
+ } else {
+ if (MOZ_LIKELY(interval != currentThread->mHangStart)) {
+ // A hang ended
+ currentThread->ReportHang(intervalNow - currentThread->mHangStart);
+ currentThread->mHanging = false;
+ }
+ }
+
+ /* If we are hanging, the next time we check for hang status is when
+ the hang turns into a permahang. If we're not hanging, the next
+ recheck timeout is when we may be entering a hang. */
+ PRIntervalTime nextRecheck;
+ if (currentThread->mHanging) {
+ nextRecheck = currentThread->mMaxTimeout;
+ } else {
+ nextRecheck = currentThread->mTimeout;
+ }
+ recheckTimeout = std::min(recheckTimeout, nextRecheck - hangTime);
+
+ if (currentThread->mTimeout != PR_INTERVAL_NO_TIMEOUT) {
+ /* We wait for a quarter of the shortest timeout
+ value to give mIntervalNow enough granularity. */
+ waitTime = std::min(waitTime, currentThread->mTimeout / 4);
+ }
+ }
+ }
+
+ /* We are shutting down now.
+ Wait for all outstanding monitors to unregister. */
+ while (!mHangThreads.isEmpty()) {
+ autoLock.Wait(PR_INTERVAL_NO_TIMEOUT);
+ }
+}
+
+
+BackgroundHangThread::BackgroundHangThread(const char* aName,
+ uint32_t aTimeoutMs,
+ uint32_t aMaxTimeoutMs,
+ BackgroundHangMonitor::ThreadType aThreadType)
+ : mManager(BackgroundHangManager::sInstance)
+ , mThreadID(PR_GetCurrentThread())
+ , mTimeout(aTimeoutMs == BackgroundHangMonitor::kNoTimeout
+ ? PR_INTERVAL_NO_TIMEOUT
+ : PR_MillisecondsToInterval(aTimeoutMs))
+ , mMaxTimeout(aMaxTimeoutMs == BackgroundHangMonitor::kNoTimeout
+ ? PR_INTERVAL_NO_TIMEOUT
+ : PR_MillisecondsToInterval(aMaxTimeoutMs))
+ , mInterval(mManager->mIntervalNow)
+ , mHangStart(mInterval)
+ , mHanging(false)
+ , mWaiting(true)
+ , mThreadType(aThreadType)
+ , mStats(aName)
+{
+ if (sTlsKeyInitialized && IsShared()) {
+ sTlsKey.set(this);
+ }
+ // Lock here because LinkedList is not thread-safe
+ MonitorAutoLock autoLock(mManager->mLock);
+ // Add to thread list
+ mManager->mHangThreads.insertBack(this);
+ // Wake up monitor thread to process new thread
+ autoLock.Notify();
+}
+
+BackgroundHangThread::~BackgroundHangThread()
+{
+ // Lock here because LinkedList is not thread-safe
+ MonitorAutoLock autoLock(mManager->mLock);
+ // Remove from thread list
+ remove();
+ // Wake up monitor thread to process removed thread
+ autoLock.Notify();
+
+ // We no longer have a thread
+ if (sTlsKeyInitialized && IsShared()) {
+ sTlsKey.set(nullptr);
+ }
+
+ // Move our copy of ThreadHangStats to Telemetry storage
+ Telemetry::RecordThreadHangStats(mStats);
+}
+
+Telemetry::HangHistogram&
+BackgroundHangThread::ReportHang(PRIntervalTime aHangTime)
+{
+ // Recovered from a hang; called on the monitor thread
+ // mManager->mLock IS locked
+
+ // Remove unwanted "js::RunScript" frame from the stack
+ for (size_t i = 0; i < mHangStack.length(); ) {
+ const char** f = mHangStack.begin() + i;
+ if (!mHangStack.IsInBuffer(*f) && !strcmp(*f, "js::RunScript")) {
+ mHangStack.erase(f);
+ } else {
+ i++;
+ }
+ }
+
+ // Collapse duplicated "(chrome script)" and "(content script)" entries in the stack.
+ auto it = std::unique(mHangStack.begin(), mHangStack.end(), StackScriptEntriesCollapser);
+ mHangStack.erase(it, mHangStack.end());
+
+ // Limit the depth of the reported stack if greater than our limit. Only keep its
+ // last entries, since the most recent frames are at the end of the vector.
+ if (mHangStack.length() > kMaxThreadHangStackDepth) {
+ const int elementsToRemove = mHangStack.length() - kMaxThreadHangStackDepth;
+ // Replace the oldest frame with a known label so that we can tell this stack
+ // was limited.
+ mHangStack[0] = "(reduced stack)";
+ mHangStack.erase(mHangStack.begin() + 1, mHangStack.begin() + elementsToRemove);
+ }
+
+ Telemetry::HangHistogram newHistogram(Move(mHangStack));
+ for (Telemetry::HangHistogram* oldHistogram = mStats.mHangs.begin();
+ oldHistogram != mStats.mHangs.end(); oldHistogram++) {
+ if (newHistogram == *oldHistogram) {
+ // New histogram matches old one
+ oldHistogram->Add(aHangTime, Move(mAnnotations));
+ return *oldHistogram;
+ }
+ }
+ // Add new histogram
+ newHistogram.Add(aHangTime, Move(mAnnotations));
+ if (!mStats.mHangs.append(Move(newHistogram))) {
+ MOZ_CRASH();
+ }
+ return mStats.mHangs.back();
+}
+
+void
+BackgroundHangThread::ReportPermaHang()
+{
+ // Permanently hanged; called on the monitor thread
+ // mManager->mLock IS locked
+
+ Telemetry::HangHistogram& hang = ReportHang(mMaxTimeout);
+ Telemetry::HangStack& stack = hang.GetNativeStack();
+ if (stack.empty()) {
+ mStackHelper.GetNativeStack(stack);
+ }
+}
+
+MOZ_ALWAYS_INLINE void
+BackgroundHangThread::Update()
+{
+ PRIntervalTime intervalNow = mManager->mIntervalNow;
+ if (mWaiting) {
+ mInterval = intervalNow;
+ mWaiting = false;
+ /* We have to wake up the manager thread because when all threads
+ are waiting, the manager thread waits indefinitely as well. */
+ mManager->Wakeup();
+ } else {
+ PRIntervalTime duration = intervalNow - mInterval;
+ mStats.mActivity.Add(duration);
+ if (MOZ_UNLIKELY(duration >= mTimeout)) {
+ /* Wake up the manager thread to tell it that a hang ended */
+ mManager->Wakeup();
+ }
+ mInterval = intervalNow;
+ }
+}
+
+BackgroundHangThread*
+BackgroundHangThread::FindThread()
+{
+#ifdef MOZ_ENABLE_BACKGROUND_HANG_MONITOR
+ if (BackgroundHangManager::sInstance == nullptr) {
+ MOZ_ASSERT(BackgroundHangManager::sDisabled,
+ "BackgroundHandleManager is not initialized");
+ return nullptr;
+ }
+
+ if (sTlsKeyInitialized) {
+ // Use TLS if available
+ return sTlsKey.get();
+ }
+ // If TLS is unavailable, we can search through the thread list
+ RefPtr<BackgroundHangManager> manager(BackgroundHangManager::sInstance);
+ MOZ_ASSERT(manager, "Creating BackgroundHangMonitor after shutdown");
+
+ PRThread* threadID = PR_GetCurrentThread();
+ // Lock thread list for traversal
+ MonitorAutoLock autoLock(manager->mLock);
+ for (BackgroundHangThread* thread = manager->mHangThreads.getFirst();
+ thread; thread = thread->getNext()) {
+ if (thread->mThreadID == threadID && thread->IsShared()) {
+ return thread;
+ }
+ }
+#endif
+ // Current thread is not initialized
+ return nullptr;
+}
+
+bool
+BackgroundHangMonitor::ShouldDisableOnBeta(const nsCString &clientID) {
+ MOZ_ASSERT(clientID.Length() == 36, "clientID is invalid");
+ const char *suffix = clientID.get() + clientID.Length() - 4;
+ return strtol(suffix, NULL, 16) % BHR_BETA_MOD;
+}
+
+bool
+BackgroundHangMonitor::IsDisabled() {
+#ifdef MOZ_ENABLE_BACKGROUND_HANG_MONITOR
+ return BackgroundHangManager::sDisabled;
+#else
+ return true;
+#endif
+}
+
+bool
+BackgroundHangMonitor::DisableOnBeta() {
+ nsAdoptingCString clientID = Preferences::GetCString("toolkit.telemetry.cachedClientID");
+ bool telemetryEnabled = Preferences::GetBool("toolkit.telemetry.enabled");
+
+ if (!telemetryEnabled || !clientID || BackgroundHangMonitor::ShouldDisableOnBeta(clientID)) {
+ if (XRE_IsParentProcess()) {
+ BackgroundHangMonitor::Shutdown();
+ } else {
+ BackgroundHangManager::sDisabled = true;
+ }
+ return true;
+ }
+
+ return false;
+}
+
+void
+BackgroundHangMonitor::Startup()
+{
+#ifdef MOZ_ENABLE_BACKGROUND_HANG_MONITOR
+ MOZ_ASSERT(!BackgroundHangManager::sInstance, "Already initialized");
+
+ if (!strcmp(NS_STRINGIFY(MOZ_UPDATE_CHANNEL), "beta")) {
+ if (XRE_IsParentProcess()) { // cached ClientID hasn't been read yet
+ ThreadStackHelper::Startup();
+ BackgroundHangThread::Startup();
+ BackgroundHangManager::sInstance = new BackgroundHangManager();
+
+ nsCOMPtr<nsIObserverService> observerService = mozilla::services::GetObserverService();
+ MOZ_ASSERT(observerService);
+
+ observerService->AddObserver(BackgroundHangManager::sInstance, "profile-after-change", false);
+ return;
+ } else if(DisableOnBeta()){
+ return;
+ }
+ }
+
+ ThreadStackHelper::Startup();
+ BackgroundHangThread::Startup();
+ BackgroundHangManager::sInstance = new BackgroundHangManager();
+#endif
+}
+
+void
+BackgroundHangMonitor::Shutdown()
+{
+#ifdef MOZ_ENABLE_BACKGROUND_HANG_MONITOR
+ if (BackgroundHangManager::sDisabled) {
+ MOZ_ASSERT(!BackgroundHangManager::sInstance, "Initialized");
+ return;
+ }
+
+ MOZ_ASSERT(BackgroundHangManager::sInstance, "Not initialized");
+ /* Scope our lock inside Shutdown() because the sInstance object can
+ be destroyed as soon as we set sInstance to nullptr below, and
+ we don't want to hold the lock when it's being destroyed. */
+ BackgroundHangManager::sInstance->Shutdown();
+ BackgroundHangManager::sInstance = nullptr;
+ ThreadStackHelper::Shutdown();
+ BackgroundHangManager::sDisabled = true;
+#endif
+}
+
+BackgroundHangMonitor::BackgroundHangMonitor(const char* aName,
+ uint32_t aTimeoutMs,
+ uint32_t aMaxTimeoutMs,
+ ThreadType aThreadType)
+ : mThread(aThreadType == THREAD_SHARED ? BackgroundHangThread::FindThread() : nullptr)
+{
+#ifdef MOZ_ENABLE_BACKGROUND_HANG_MONITOR
+ if (!BackgroundHangManager::sDisabled && !mThread) {
+ mThread = new BackgroundHangThread(aName, aTimeoutMs, aMaxTimeoutMs,
+ aThreadType);
+ }
+#endif
+}
+
+BackgroundHangMonitor::BackgroundHangMonitor()
+ : mThread(BackgroundHangThread::FindThread())
+{
+#ifdef MOZ_ENABLE_BACKGROUND_HANG_MONITOR
+ if (BackgroundHangManager::sDisabled) {
+ return;
+ }
+#endif
+}
+
+BackgroundHangMonitor::~BackgroundHangMonitor()
+{
+}
+
+void
+BackgroundHangMonitor::NotifyActivity()
+{
+#ifdef MOZ_ENABLE_BACKGROUND_HANG_MONITOR
+ if (mThread == nullptr) {
+ MOZ_ASSERT(BackgroundHangManager::sDisabled,
+ "This thread is not initialized for hang monitoring");
+ return;
+ }
+
+ if (Telemetry::CanRecordExtended()) {
+ mThread->NotifyActivity();
+ }
+#endif
+}
+
+void
+BackgroundHangMonitor::NotifyWait()
+{
+#ifdef MOZ_ENABLE_BACKGROUND_HANG_MONITOR
+ if (mThread == nullptr) {
+ MOZ_ASSERT(BackgroundHangManager::sDisabled,
+ "This thread is not initialized for hang monitoring");
+ return;
+ }
+
+ if (Telemetry::CanRecordExtended()) {
+ mThread->NotifyWait();
+ }
+#endif
+}
+
+bool
+BackgroundHangMonitor::RegisterAnnotator(HangMonitor::Annotator& aAnnotator)
+{
+#ifdef MOZ_ENABLE_BACKGROUND_HANG_MONITOR
+ BackgroundHangThread* thisThread = BackgroundHangThread::FindThread();
+ if (!thisThread) {
+ return false;
+ }
+ return thisThread->mAnnotators.Register(aAnnotator);
+#else
+ return false;
+#endif
+}
+
+bool
+BackgroundHangMonitor::UnregisterAnnotator(HangMonitor::Annotator& aAnnotator)
+{
+#ifdef MOZ_ENABLE_BACKGROUND_HANG_MONITOR
+ BackgroundHangThread* thisThread = BackgroundHangThread::FindThread();
+ if (!thisThread) {
+ return false;
+ }
+ return thisThread->mAnnotators.Unregister(aAnnotator);
+#else
+ return false;
+#endif
+}
+
+/* Because we are iterating through the BackgroundHangThread linked list,
+ we need to take a lock. Using MonitorAutoLock as a base class makes
+ sure all of that is taken care of for us. */
+BackgroundHangMonitor::ThreadHangStatsIterator::ThreadHangStatsIterator()
+ : MonitorAutoLock(BackgroundHangManager::sInstance->mLock)
+ , mThread(BackgroundHangManager::sInstance ?
+ BackgroundHangManager::sInstance->mHangThreads.getFirst() :
+ nullptr)
+{
+#ifdef MOZ_ENABLE_BACKGROUND_HANG_MONITOR
+ MOZ_ASSERT(BackgroundHangManager::sInstance ||
+ BackgroundHangManager::sDisabled,
+ "Inconsistent state");
+#endif
+}
+
+Telemetry::ThreadHangStats*
+BackgroundHangMonitor::ThreadHangStatsIterator::GetNext()
+{
+ if (!mThread) {
+ return nullptr;
+ }
+ Telemetry::ThreadHangStats* stats = &mThread->mStats;
+ mThread = mThread->getNext();
+ return stats;
+}
+
+} // namespace mozilla
diff --git a/xpcom/threads/BackgroundHangMonitor.h b/xpcom/threads/BackgroundHangMonitor.h
new file mode 100644
index 000000000..698cf2305
--- /dev/null
+++ b/xpcom/threads/BackgroundHangMonitor.h
@@ -0,0 +1,246 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_BackgroundHangMonitor_h
+#define mozilla_BackgroundHangMonitor_h
+
+#include "mozilla/HangAnnotations.h"
+#include "mozilla/Monitor.h"
+#include "mozilla/RefPtr.h"
+
+#include "nsString.h"
+
+#include <stdint.h>
+
+namespace mozilla {
+
+namespace Telemetry {
+class ThreadHangStats;
+} // namespace Telemetry
+
+class BackgroundHangThread;
+class BackgroundHangManager;
+
+/**
+ * The background hang monitor is responsible for detecting and reporting
+ * hangs in main and background threads. A thread registers itself using
+ * the BackgroundHangMonitor object and periodically calls its methods to
+ * inform the hang monitor of the thread's activity. Each thread is given
+ * a thread name, a timeout, and a maximum timeout. If one of the thread's
+ * tasks runs for longer than the timeout duration but shorter than the
+ * maximum timeout, a (transient) hang is reported. On the other hand, if
+ * a task runs for longer than the maximum timeout duration or never
+ * finishes (e.g. in a deadlock), a permahang is reported.
+ *
+ * Tasks are defined arbitrarily, but are typically represented by events
+ * in an event loop -- processing one event is equivalent to running one
+ * task. To ensure responsiveness, tasks in a thread often have a target
+ * running time. This is a good starting point for determining the timeout
+ * and maximum timeout values. For example, the Compositor thread has a
+ * responsiveness goal of 60Hz or 17ms, so a starting timeout could be
+ * 100ms. Considering some platforms (e.g. Android) can terminate the app
+ * when a critical thread hangs for longer than a few seconds, a good
+ * starting maximum timeout is 4 or 5 seconds.
+ *
+ * A thread registers itself through the BackgroundHangMonitor constructor.
+ * Multiple BackgroundHangMonitor objects can be used in one thread. The
+ * constructor without arguments can be used when it is known that the thread
+ * already has a BackgroundHangMonitor registered. When all instances of
+ * BackgroundHangMonitor are destroyed, the thread is unregistered.
+ *
+ * The thread then uses two methods to inform BackgroundHangMonitor of the
+ * thread's activity:
+ *
+ * > BackgroundHangMonitor::NotifyActivity should be called *before*
+ * starting a task. The task run time is determined by the interval
+ * between this call and the next NotifyActivity call.
+ *
+ * > BackgroundHangMonitor::NotifyWait should be called *before* the
+ * thread enters a wait state (e.g. to wait for a new event). This
+ * prevents a waiting thread from being detected as hanging. The wait
+ * state is automatically cleared at the next NotifyActivity call.
+ *
+ * The following example shows hang monitoring in a simple event loop:
+ *
+ * void thread_main()
+ * {
+ * mozilla::BackgroundHangMonitor hangMonitor("example1", 100, 1000);
+ * while (!exiting) {
+ * hangMonitor.NotifyActivity();
+ * process_next_event();
+ * hangMonitor.NotifyWait();
+ * wait_for_next_event();
+ * }
+ * }
+ *
+ * The following example shows reentrancy in nested event loops:
+ *
+ * void thread_main()
+ * {
+ * mozilla::BackgroundHangMonitor hangMonitor("example2", 100, 1000);
+ * while (!exiting) {
+ * hangMonitor.NotifyActivity();
+ * process_next_event();
+ * hangMonitor.NotifyWait();
+ * wait_for_next_event();
+ * }
+ * }
+ *
+ * void process_next_event()
+ * {
+ * mozilla::BackgroundHangMonitor hangMonitor();
+ * if (is_sync_event) {
+ * while (!finished_event) {
+ * hangMonitor.NotifyActivity();
+ * process_next_event();
+ * hangMonitor.NotifyWait();
+ * wait_for_next_event();
+ * }
+ * } else {
+ * process_nonsync_event();
+ * }
+ * }
+ */
+class BackgroundHangMonitor
+{
+private:
+ friend BackgroundHangManager;
+
+ RefPtr<BackgroundHangThread> mThread;
+
+ static bool ShouldDisableOnBeta(const nsCString &);
+ static bool DisableOnBeta();
+
+public:
+ static const uint32_t kNoTimeout = 0;
+ enum ThreadType {
+ // For a new BackgroundHangMonitor for thread T, only create a new
+ // monitoring thread for T if one doesn't already exist. If one does,
+ // share that pre-existing monitoring thread.
+ THREAD_SHARED,
+ // For a new BackgroundHangMonitor for thread T, create a new
+ // monitoring thread for T even if there are other, pre-existing
+ // monitoring threads for T.
+ THREAD_PRIVATE
+ };
+
+ /**
+ * ThreadHangStatsIterator is used to iterate through the ThreadHangStats
+ * associated with each active monitored thread. Because of an internal
+ * lock while this object is alive, a thread must use only one instance
+ * of this class at a time and must iterate through the list as fast as
+ * possible. The following example shows using the iterator:
+ *
+ * {
+ * // Scope the iter variable so it's destroyed as soon as we're done
+ * BackgroundHangMonitor::ThreadHangStatsIterator iter;
+ * for (ThreadHangStats* histogram = iter.GetNext();
+ * histogram; histogram = iter.GetNext()) {
+ * // Process histogram
+ * }
+ * }
+ */
+ class ThreadHangStatsIterator : public MonitorAutoLock
+ {
+ private:
+ BackgroundHangThread* mThread;
+
+ ThreadHangStatsIterator(const ThreadHangStatsIterator&);
+ ThreadHangStatsIterator& operator=(const ThreadHangStatsIterator&);
+
+ public:
+ /**
+ * Create an ThreadHangStatsIterator instance and take the internal lock.
+ * Internal lock is released on destruction.
+ */
+ ThreadHangStatsIterator();
+
+ /**
+ * Get the next item in the list; the first call returns the first item.
+ * Returns nullptr at the end of the list.
+ */
+ Telemetry::ThreadHangStats* GetNext();
+ };
+
+ /**
+ * Enable hang monitoring.
+ * Must return before using BackgroundHangMonitor.
+ */
+ static void Startup();
+
+ /**
+ * Disable hang monitoring.
+ * Can be called without destroying all BackgroundHangMonitors first.
+ */
+ static void Shutdown();
+
+ /**
+ * Returns true if BHR is disabled.
+ */
+ static bool IsDisabled();
+
+ /**
+ * Start monitoring hangs for the current thread.
+ *
+ * @param aName Name to identify the thread with
+ * @param aTimeoutMs Amount of time in milliseconds without
+ * activity before registering a hang
+ * @param aMaxTimeoutMs Amount of time in milliseconds without
+ * activity before registering a permanent hang
+ * @param aThreadType
+ * The ThreadType type of monitoring thread that should be created
+ * for this monitor. See the documentation for ThreadType.
+ */
+ BackgroundHangMonitor(const char* aName,
+ uint32_t aTimeoutMs,
+ uint32_t aMaxTimeoutMs,
+ ThreadType aThreadType = THREAD_SHARED);
+
+ /**
+ * Monitor hangs using an existing monitor
+ * associated with the current thread.
+ */
+ BackgroundHangMonitor();
+
+ /**
+ * Destroys the hang monitor; hang monitoring for a thread stops
+ * when all monitors associated with the thread are destroyed.
+ */
+ ~BackgroundHangMonitor();
+
+ /**
+ * Notify the hang monitor of pending current thread activity.
+ * Call this method before starting an "activity" or after
+ * exiting from a wait state.
+ */
+ void NotifyActivity();
+
+ /**
+ * Notify the hang monitor of current thread wait.
+ * Call this method before entering a wait state; call
+ * NotifyActivity when subsequently exiting the wait state.
+ */
+ void NotifyWait();
+
+ /**
+ * Register an annotator with BHR for the current thread.
+ * @param aAnnotator annotator to register
+ * @return true if the annotator was registered, otherwise false.
+ */
+ static bool RegisterAnnotator(HangMonitor::Annotator& aAnnotator);
+
+ /**
+ * Unregister an annotator that was previously registered via
+ * RegisterAnnotator.
+ * @param aAnnotator annotator to unregister
+ * @return true if there are still remaining annotators registered
+ */
+ static bool UnregisterAnnotator(HangMonitor::Annotator& aAnnotator);
+};
+
+} // namespace mozilla
+
+#endif // mozilla_BackgroundHangMonitor_h
diff --git a/xpcom/threads/HangAnnotations.cpp b/xpcom/threads/HangAnnotations.cpp
new file mode 100644
index 000000000..529b57b8e
--- /dev/null
+++ b/xpcom/threads/HangAnnotations.cpp
@@ -0,0 +1,262 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/HangAnnotations.h"
+
+#include <vector>
+
+#include "MainThreadUtils.h"
+#include "mozilla/DebugOnly.h"
+#include "nsXULAppAPI.h"
+
+namespace mozilla {
+namespace HangMonitor {
+
+// Chrome hang annotators. This can go away once BHR has completely replaced
+// ChromeHangs.
+static StaticAutoPtr<Observer::Annotators> gChromehangAnnotators;
+
+class BrowserHangAnnotations : public HangAnnotations
+{
+public:
+ BrowserHangAnnotations();
+ ~BrowserHangAnnotations();
+
+ void AddAnnotation(const nsAString& aName, const int32_t aData) override;
+ void AddAnnotation(const nsAString& aName, const double aData) override;
+ void AddAnnotation(const nsAString& aName, const nsAString& aData) override;
+ void AddAnnotation(const nsAString& aName, const nsACString& aData) override;
+ void AddAnnotation(const nsAString& aName, const bool aData) override;
+
+ size_t SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const override;
+ bool IsEmpty() const override;
+ UniquePtr<Enumerator> GetEnumerator() override;
+
+ typedef std::pair<nsString, nsString> AnnotationType;
+ typedef std::vector<AnnotationType> VectorType;
+ typedef VectorType::const_iterator IteratorType;
+
+private:
+ VectorType mAnnotations;
+};
+
+BrowserHangAnnotations::BrowserHangAnnotations()
+{
+ MOZ_COUNT_CTOR(BrowserHangAnnotations);
+}
+
+BrowserHangAnnotations::~BrowserHangAnnotations()
+{
+ MOZ_COUNT_DTOR(BrowserHangAnnotations);
+}
+
+void
+BrowserHangAnnotations::AddAnnotation(const nsAString& aName, const int32_t aData)
+{
+ nsString dataString;
+ dataString.AppendInt(aData);
+ AnnotationType annotation = std::make_pair(nsString(aName), dataString);
+ mAnnotations.push_back(annotation);
+}
+
+void
+BrowserHangAnnotations::AddAnnotation(const nsAString& aName, const double aData)
+{
+ nsString dataString;
+ dataString.AppendFloat(aData);
+ AnnotationType annotation = std::make_pair(nsString(aName), dataString);
+ mAnnotations.push_back(annotation);
+}
+
+void
+BrowserHangAnnotations::AddAnnotation(const nsAString& aName, const nsAString& aData)
+{
+ AnnotationType annotation = std::make_pair(nsString(aName), nsString(aData));
+ mAnnotations.push_back(annotation);
+}
+
+void
+BrowserHangAnnotations::AddAnnotation(const nsAString& aName, const nsACString& aData)
+{
+ nsString dataString;
+ AppendUTF8toUTF16(aData, dataString);
+ AnnotationType annotation = std::make_pair(nsString(aName), dataString);
+ mAnnotations.push_back(annotation);
+}
+
+void
+BrowserHangAnnotations::AddAnnotation(const nsAString& aName, const bool aData)
+{
+ nsString dataString;
+ dataString += aData ? NS_LITERAL_STRING("true") : NS_LITERAL_STRING("false");
+ AnnotationType annotation = std::make_pair(nsString(aName), dataString);
+ mAnnotations.push_back(annotation);
+}
+
+/**
+ * This class itself does not use synchronization but it (and its parent object)
+ * should be protected by mutual exclusion in some way. In Telemetry the chrome
+ * hang data is protected via TelemetryImpl::mHangReportsMutex.
+ */
+class ChromeHangAnnotationEnumerator : public HangAnnotations::Enumerator
+{
+public:
+ explicit ChromeHangAnnotationEnumerator(const BrowserHangAnnotations::VectorType& aAnnotations);
+ ~ChromeHangAnnotationEnumerator();
+
+ virtual bool Next(nsAString& aOutName, nsAString& aOutValue);
+
+private:
+ BrowserHangAnnotations::IteratorType mIterator;
+ BrowserHangAnnotations::IteratorType mEnd;
+};
+
+ChromeHangAnnotationEnumerator::ChromeHangAnnotationEnumerator(
+ const BrowserHangAnnotations::VectorType& aAnnotations)
+ : mIterator(aAnnotations.begin())
+ , mEnd(aAnnotations.end())
+{
+ MOZ_COUNT_CTOR(ChromeHangAnnotationEnumerator);
+}
+
+ChromeHangAnnotationEnumerator::~ChromeHangAnnotationEnumerator()
+{
+ MOZ_COUNT_DTOR(ChromeHangAnnotationEnumerator);
+}
+
+bool
+ChromeHangAnnotationEnumerator::Next(nsAString& aOutName, nsAString& aOutValue)
+{
+ aOutName.Truncate();
+ aOutValue.Truncate();
+ if (mIterator == mEnd) {
+ return false;
+ }
+ aOutName = mIterator->first;
+ aOutValue = mIterator->second;
+ ++mIterator;
+ return true;
+}
+
+bool
+BrowserHangAnnotations::IsEmpty() const
+{
+ return mAnnotations.empty();
+}
+
+size_t
+BrowserHangAnnotations::SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const
+{
+ size_t result = sizeof(mAnnotations) +
+ mAnnotations.capacity() * sizeof(AnnotationType);
+ for (IteratorType i = mAnnotations.begin(), e = mAnnotations.end(); i != e;
+ ++i) {
+ result += i->first.SizeOfExcludingThisIfUnshared(aMallocSizeOf);
+ result += i->second.SizeOfExcludingThisIfUnshared(aMallocSizeOf);
+ }
+
+ return result;
+}
+
+UniquePtr<HangAnnotations::Enumerator>
+BrowserHangAnnotations::GetEnumerator()
+{
+ if (mAnnotations.empty()) {
+ return nullptr;
+ }
+ return MakeUnique<ChromeHangAnnotationEnumerator>(mAnnotations);
+}
+
+namespace Observer {
+
+Annotators::Annotators()
+ : mMutex("HangMonitor::Annotators::mMutex")
+{
+ MOZ_COUNT_CTOR(Annotators);
+}
+
+Annotators::~Annotators()
+{
+ MOZ_ASSERT(mAnnotators.empty());
+ MOZ_COUNT_DTOR(Annotators);
+}
+
+bool
+Annotators::Register(Annotator& aAnnotator)
+{
+ MutexAutoLock lock(mMutex);
+ auto result = mAnnotators.insert(&aAnnotator);
+ return result.second;
+}
+
+bool
+Annotators::Unregister(Annotator& aAnnotator)
+{
+ MutexAutoLock lock(mMutex);
+ DebugOnly<std::set<Annotator*>::size_type> numErased;
+ numErased = mAnnotators.erase(&aAnnotator);
+ MOZ_ASSERT(numErased == 1);
+ return mAnnotators.empty();
+}
+
+UniquePtr<HangAnnotations>
+Annotators::GatherAnnotations()
+{
+ auto annotations = MakeUnique<BrowserHangAnnotations>();
+ { // Scope for lock
+ MutexAutoLock lock(mMutex);
+ for (std::set<Annotator*>::iterator i = mAnnotators.begin(),
+ e = mAnnotators.end();
+ i != e; ++i) {
+ (*i)->AnnotateHang(*annotations);
+ }
+ }
+ if (annotations->IsEmpty()) {
+ return nullptr;
+ }
+ return Move(annotations);
+}
+
+} // namespace Observer
+
+void
+RegisterAnnotator(Annotator& aAnnotator)
+{
+ BackgroundHangMonitor::RegisterAnnotator(aAnnotator);
+ // We still register annotators for ChromeHangs
+ if (NS_IsMainThread() &&
+ GeckoProcessType_Default == XRE_GetProcessType()) {
+ if (!gChromehangAnnotators) {
+ gChromehangAnnotators = new Observer::Annotators();
+ }
+ gChromehangAnnotators->Register(aAnnotator);
+ }
+}
+
+void
+UnregisterAnnotator(Annotator& aAnnotator)
+{
+ BackgroundHangMonitor::UnregisterAnnotator(aAnnotator);
+ // We still register annotators for ChromeHangs
+ if (NS_IsMainThread() &&
+ GeckoProcessType_Default == XRE_GetProcessType()) {
+ if (gChromehangAnnotators->Unregister(aAnnotator)) {
+ gChromehangAnnotators = nullptr;
+ }
+ }
+}
+
+UniquePtr<HangAnnotations>
+ChromeHangAnnotatorCallout()
+{
+ if (!gChromehangAnnotators) {
+ return nullptr;
+ }
+ return gChromehangAnnotators->GatherAnnotations();
+}
+
+} // namespace HangMonitor
+} // namespace mozilla
diff --git a/xpcom/threads/HangAnnotations.h b/xpcom/threads/HangAnnotations.h
new file mode 100644
index 000000000..6dddbf4bb
--- /dev/null
+++ b/xpcom/threads/HangAnnotations.h
@@ -0,0 +1,104 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_HangAnnotations_h
+#define mozilla_HangAnnotations_h
+
+#include <set>
+
+#include "mozilla/MemoryReporting.h"
+#include "mozilla/Mutex.h"
+#include "mozilla/UniquePtr.h"
+#include "mozilla/Vector.h"
+#include "nsString.h"
+
+namespace mozilla {
+namespace HangMonitor {
+
+/**
+ * This class declares an abstraction for a data type that encapsulates all
+ * of the annotations being reported by a registered hang Annotator.
+ */
+class HangAnnotations
+{
+public:
+ virtual ~HangAnnotations() {}
+
+ virtual void AddAnnotation(const nsAString& aName, const int32_t aData) = 0;
+ virtual void AddAnnotation(const nsAString& aName, const double aData) = 0;
+ virtual void AddAnnotation(const nsAString& aName, const nsAString& aData) = 0;
+ virtual void AddAnnotation(const nsAString& aName, const nsACString& aData) = 0;
+ virtual void AddAnnotation(const nsAString& aName, const bool aData) = 0;
+
+ class Enumerator
+ {
+ public:
+ virtual ~Enumerator() {}
+ virtual bool Next(nsAString& aOutName, nsAString& aOutValue) = 0;
+ };
+
+ virtual size_t SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const = 0;
+ virtual bool IsEmpty() const = 0;
+ virtual UniquePtr<Enumerator> GetEnumerator() = 0;
+};
+
+typedef UniquePtr<HangAnnotations> HangAnnotationsPtr;
+typedef Vector<HangAnnotationsPtr> HangAnnotationsVector;
+
+class Annotator
+{
+public:
+ /**
+ * NB: This function is always called by the HangMonitor thread.
+ * Plan accordingly.
+ */
+ virtual void AnnotateHang(HangAnnotations& aAnnotations) = 0;
+};
+
+/**
+ * Registers an Annotator to be called when a hang is detected.
+ * @param aAnnotator Reference to an object that implements the
+ * HangMonitor::Annotator interface.
+ */
+void RegisterAnnotator(Annotator& aAnnotator);
+
+/**
+ * Registers an Annotator that was previously registered via RegisterAnnotator.
+ * @param aAnnotator Reference to an object that implements the
+ * HangMonitor::Annotator interface.
+ */
+void UnregisterAnnotator(Annotator& aAnnotator);
+
+/**
+ * Gathers annotations. This function should be called by ChromeHangs.
+ * @return UniquePtr to HangAnnotations object or nullptr if none.
+ */
+HangAnnotationsPtr ChromeHangAnnotatorCallout();
+
+namespace Observer {
+
+class Annotators
+{
+public:
+ Annotators();
+ ~Annotators();
+
+ bool Register(Annotator& aAnnotator);
+ bool Unregister(Annotator& aAnnotator);
+
+ HangAnnotationsPtr GatherAnnotations();
+
+private:
+ Mutex mMutex;
+ std::set<Annotator*> mAnnotators;
+};
+
+} // namespace Observer
+
+} // namespace HangMonitor
+} // namespace mozilla
+
+#endif // mozilla_HangAnnotations_h
diff --git a/xpcom/threads/HangMonitor.cpp b/xpcom/threads/HangMonitor.cpp
new file mode 100644
index 000000000..71cc67ca4
--- /dev/null
+++ b/xpcom/threads/HangMonitor.cpp
@@ -0,0 +1,434 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/HangMonitor.h"
+
+#include "mozilla/Atomics.h"
+#include "mozilla/BackgroundHangMonitor.h"
+#include "mozilla/Monitor.h"
+#include "mozilla/Preferences.h"
+#include "mozilla/ProcessedStack.h"
+#include "mozilla/Telemetry.h"
+#include "mozilla/StaticPtr.h"
+#include "mozilla/UniquePtr.h"
+#include "nsReadableUtils.h"
+#include "mozilla/StackWalk.h"
+#ifdef _WIN64
+#include "mozilla/StackWalk_windows.h"
+#endif
+#include "nsThreadUtils.h"
+#include "nsXULAppAPI.h"
+
+#ifdef MOZ_CRASHREPORTER
+#include "nsExceptionHandler.h"
+#endif
+
+#ifdef XP_WIN
+#include <windows.h>
+#endif
+
+#if defined(MOZ_ENABLE_PROFILER_SPS) && defined(MOZ_PROFILING) && defined(XP_WIN)
+ #define REPORT_CHROME_HANGS
+#endif
+
+namespace mozilla {
+namespace HangMonitor {
+
+/**
+ * A flag which may be set from within a debugger to disable the hang
+ * monitor.
+ */
+volatile bool gDebugDisableHangMonitor = false;
+
+const char kHangMonitorPrefName[] = "hangmonitor.timeout";
+
+#ifdef REPORT_CHROME_HANGS
+const char kTelemetryPrefName[] = "toolkit.telemetry.enabled";
+#endif
+
+// Monitor protects gShutdown and gTimeout, but not gTimestamp which rely on
+// being atomically set by the processor; synchronization doesn't really matter
+// in this use case.
+Monitor* gMonitor;
+
+// The timeout preference, in seconds.
+int32_t gTimeout;
+
+PRThread* gThread;
+
+// Set when shutdown begins to signal the thread to exit immediately.
+bool gShutdown;
+
+// The timestamp of the last event notification, or PR_INTERVAL_NO_WAIT if
+// we're currently not processing events.
+Atomic<PRIntervalTime> gTimestamp(PR_INTERVAL_NO_WAIT);
+
+#ifdef REPORT_CHROME_HANGS
+// Main thread ID used in reporting chrome hangs under Windows
+static HANDLE winMainThreadHandle = nullptr;
+
+// Default timeout for reporting chrome hangs to Telemetry (5 seconds)
+static const int32_t DEFAULT_CHROME_HANG_INTERVAL = 5;
+
+// Maximum number of PCs to gather from the stack
+static const int32_t MAX_CALL_STACK_PCS = 400;
+#endif
+
+// PrefChangedFunc
+void
+PrefChanged(const char*, void*)
+{
+ int32_t newval = Preferences::GetInt(kHangMonitorPrefName);
+#ifdef REPORT_CHROME_HANGS
+ // Monitor chrome hangs on the profiling branch if Telemetry enabled
+ if (newval == 0) {
+ bool telemetryEnabled = Preferences::GetBool(kTelemetryPrefName);
+ if (telemetryEnabled) {
+ newval = DEFAULT_CHROME_HANG_INTERVAL;
+ }
+ }
+#endif
+ MonitorAutoLock lock(*gMonitor);
+ if (newval != gTimeout) {
+ gTimeout = newval;
+ lock.Notify();
+ }
+}
+
+void
+Crash()
+{
+ if (gDebugDisableHangMonitor) {
+ return;
+ }
+
+#ifdef XP_WIN
+ if (::IsDebuggerPresent()) {
+ return;
+ }
+#endif
+
+#ifdef MOZ_CRASHREPORTER
+ // If you change this, you must also deal with the threadsafety of AnnotateCrashReport in
+ // non-chrome processes!
+ if (GeckoProcessType_Default == XRE_GetProcessType()) {
+ CrashReporter::AnnotateCrashReport(NS_LITERAL_CSTRING("Hang"),
+ NS_LITERAL_CSTRING("1"));
+ }
+#endif
+
+ NS_RUNTIMEABORT("HangMonitor triggered");
+}
+
+#ifdef REPORT_CHROME_HANGS
+
+static void
+ChromeStackWalker(uint32_t aFrameNumber, void* aPC, void* aSP, void* aClosure)
+{
+ MOZ_ASSERT(aClosure);
+ std::vector<uintptr_t>* stack =
+ static_cast<std::vector<uintptr_t>*>(aClosure);
+ if (stack->size() == MAX_CALL_STACK_PCS) {
+ return;
+ }
+ MOZ_ASSERT(stack->size() < MAX_CALL_STACK_PCS);
+ stack->push_back(reinterpret_cast<uintptr_t>(aPC));
+}
+
+static void
+GetChromeHangReport(Telemetry::ProcessedStack& aStack,
+ int32_t& aSystemUptime,
+ int32_t& aFirefoxUptime)
+{
+ MOZ_ASSERT(winMainThreadHandle);
+
+ // The thread we're about to suspend might have the alloc lock
+ // so allocate ahead of time
+ std::vector<uintptr_t> rawStack;
+ rawStack.reserve(MAX_CALL_STACK_PCS);
+
+ // Workaround possible deadlock where the main thread is running a
+ // long-standing JS job, and happens to be in the JIT allocator when we
+ // suspend it. Since, on win 64, this requires holding a process lock that
+ // MozStackWalk requires, take this "workaround lock" to avoid deadlock.
+#ifdef _WIN64
+ AcquireStackWalkWorkaroundLock();
+#endif
+ DWORD ret = ::SuspendThread(winMainThreadHandle);
+ bool suspended = false;
+ if (ret != -1) {
+ // SuspendThread is asynchronous, so the thread may still be running. Use
+ // GetThreadContext to ensure it's really suspended.
+ // See https://blogs.msdn.microsoft.com/oldnewthing/20150205-00/?p=44743.
+ CONTEXT context;
+ context.ContextFlags = CONTEXT_CONTROL;
+ if (::GetThreadContext(winMainThreadHandle, &context)) {
+ suspended = true;
+ }
+ }
+
+#ifdef _WIN64
+ ReleaseStackWalkWorkaroundLock();
+#endif
+
+ if (!suspended) {
+ if (ret != -1) {
+ MOZ_ALWAYS_TRUE(::ResumeThread(winMainThreadHandle) != DWORD(-1));
+ }
+ return;
+ }
+
+ MozStackWalk(ChromeStackWalker, /* skipFrames */ 0, /* maxFrames */ 0,
+ reinterpret_cast<void*>(&rawStack),
+ reinterpret_cast<uintptr_t>(winMainThreadHandle), nullptr);
+ ret = ::ResumeThread(winMainThreadHandle);
+ if (ret == -1) {
+ return;
+ }
+ aStack = Telemetry::GetStackAndModules(rawStack);
+
+ // Record system uptime (in minutes) at the time of the hang
+ aSystemUptime = ((GetTickCount() / 1000) - (gTimeout * 2)) / 60;
+
+ // Record Firefox uptime (in minutes) at the time of the hang
+ bool error;
+ TimeStamp processCreation = TimeStamp::ProcessCreation(error);
+ if (!error) {
+ TimeDuration td = TimeStamp::Now() - processCreation;
+ aFirefoxUptime = (static_cast<int32_t>(td.ToSeconds()) - (gTimeout * 2)) / 60;
+ } else {
+ aFirefoxUptime = -1;
+ }
+}
+
+#endif
+
+void
+ThreadMain(void*)
+{
+ PR_SetCurrentThreadName("Hang Monitor");
+
+ MonitorAutoLock lock(*gMonitor);
+
+ // In order to avoid issues with the hang monitor incorrectly triggering
+ // during a general system stop such as sleeping, the monitor thread must
+ // run twice to trigger hang protection.
+ PRIntervalTime lastTimestamp = 0;
+ int waitCount = 0;
+
+#ifdef REPORT_CHROME_HANGS
+ Telemetry::ProcessedStack stack;
+ int32_t systemUptime = -1;
+ int32_t firefoxUptime = -1;
+ UniquePtr<HangAnnotations> annotations;
+#endif
+
+ while (true) {
+ if (gShutdown) {
+ return; // Exit the thread
+ }
+
+ // avoid rereading the volatile value in this loop
+ PRIntervalTime timestamp = gTimestamp;
+
+ PRIntervalTime now = PR_IntervalNow();
+
+ if (timestamp != PR_INTERVAL_NO_WAIT &&
+ now < timestamp) {
+ // 32-bit overflow, reset for another waiting period
+ timestamp = 1; // lowest legal PRInterval value
+ }
+
+ if (timestamp != PR_INTERVAL_NO_WAIT &&
+ timestamp == lastTimestamp &&
+ gTimeout > 0) {
+ ++waitCount;
+#ifdef REPORT_CHROME_HANGS
+ // Capture the chrome-hang stack + Firefox & system uptimes after
+ // the minimum hang duration has been reached (not when the hang ends)
+ if (waitCount == 2) {
+ GetChromeHangReport(stack, systemUptime, firefoxUptime);
+ annotations = ChromeHangAnnotatorCallout();
+ }
+#else
+ // This is the crash-on-hang feature.
+ // See bug 867313 for the quirk in the waitCount comparison
+ if (waitCount >= 2) {
+ int32_t delay =
+ int32_t(PR_IntervalToSeconds(now - timestamp));
+ if (delay >= gTimeout) {
+ MonitorAutoUnlock unlock(*gMonitor);
+ Crash();
+ }
+ }
+#endif
+ } else {
+#ifdef REPORT_CHROME_HANGS
+ if (waitCount >= 2) {
+ uint32_t hangDuration = PR_IntervalToSeconds(now - lastTimestamp);
+ Telemetry::RecordChromeHang(hangDuration, stack, systemUptime,
+ firefoxUptime, Move(annotations));
+ stack.Clear();
+ }
+#endif
+ lastTimestamp = timestamp;
+ waitCount = 0;
+ }
+
+ PRIntervalTime timeout;
+ if (gTimeout <= 0) {
+ timeout = PR_INTERVAL_NO_TIMEOUT;
+ } else {
+ timeout = PR_MillisecondsToInterval(gTimeout * 500);
+ }
+ lock.Wait(timeout);
+ }
+}
+
+void
+Startup()
+{
+ if (GeckoProcessType_Default != XRE_GetProcessType() &&
+ GeckoProcessType_Content != XRE_GetProcessType()) {
+ return;
+ }
+
+ MOZ_ASSERT(!gMonitor, "Hang monitor already initialized");
+ gMonitor = new Monitor("HangMonitor");
+
+ Preferences::RegisterCallback(PrefChanged, kHangMonitorPrefName, nullptr);
+ PrefChanged(nullptr, nullptr);
+
+#ifdef REPORT_CHROME_HANGS
+ Preferences::RegisterCallback(PrefChanged, kTelemetryPrefName, nullptr);
+ winMainThreadHandle =
+ OpenThread(THREAD_ALL_ACCESS, FALSE, GetCurrentThreadId());
+ if (!winMainThreadHandle) {
+ return;
+ }
+#endif
+
+ // Don't actually start measuring hangs until we hit the main event loop.
+ // This potentially misses a small class of really early startup hangs,
+ // but avoids dealing with some xpcshell tests and other situations which
+ // start XPCOM but don't ever start the event loop.
+ Suspend();
+
+ gThread = PR_CreateThread(PR_USER_THREAD,
+ ThreadMain,
+ nullptr, PR_PRIORITY_LOW, PR_GLOBAL_THREAD,
+ PR_JOINABLE_THREAD, 0);
+}
+
+void
+Shutdown()
+{
+ if (GeckoProcessType_Default != XRE_GetProcessType() &&
+ GeckoProcessType_Content != XRE_GetProcessType()) {
+ return;
+ }
+
+ MOZ_ASSERT(gMonitor, "Hang monitor not started");
+
+ {
+ // Scope the lock we're going to delete later
+ MonitorAutoLock lock(*gMonitor);
+ gShutdown = true;
+ lock.Notify();
+ }
+
+ // thread creation could theoretically fail
+ if (gThread) {
+ PR_JoinThread(gThread);
+ gThread = nullptr;
+ }
+
+ delete gMonitor;
+ gMonitor = nullptr;
+}
+
+static bool
+IsUIMessageWaiting()
+{
+#ifndef XP_WIN
+ return false;
+#else
+#define NS_WM_IMEFIRST WM_IME_SETCONTEXT
+#define NS_WM_IMELAST WM_IME_KEYUP
+ BOOL haveUIMessageWaiting = FALSE;
+ MSG msg;
+ haveUIMessageWaiting |= ::PeekMessageW(&msg, nullptr, WM_KEYFIRST,
+ WM_IME_KEYLAST, PM_NOREMOVE);
+ haveUIMessageWaiting |= ::PeekMessageW(&msg, nullptr, NS_WM_IMEFIRST,
+ NS_WM_IMELAST, PM_NOREMOVE);
+ haveUIMessageWaiting |= ::PeekMessageW(&msg, nullptr, WM_MOUSEFIRST,
+ WM_MOUSELAST, PM_NOREMOVE);
+ return haveUIMessageWaiting;
+#endif
+}
+
+void
+NotifyActivity(ActivityType aActivityType)
+{
+ MOZ_ASSERT(NS_IsMainThread(),
+ "HangMonitor::Notify called from off the main thread.");
+
+ // Determine the activity type more specifically
+ if (aActivityType == kGeneralActivity) {
+ aActivityType = IsUIMessageWaiting() ? kActivityUIAVail :
+ kActivityNoUIAVail;
+ }
+
+ // Calculate the cumulative amount of lag time since the last UI message
+ static uint32_t cumulativeUILagMS = 0;
+ switch (aActivityType) {
+ case kActivityNoUIAVail:
+ cumulativeUILagMS = 0;
+ break;
+ case kActivityUIAVail:
+ case kUIActivity:
+ if (gTimestamp != PR_INTERVAL_NO_WAIT) {
+ cumulativeUILagMS += PR_IntervalToMilliseconds(PR_IntervalNow() -
+ gTimestamp);
+ }
+ break;
+ default:
+ break;
+ }
+
+ // This is not a locked activity because PRTimeStamp is a 32-bit quantity
+ // which can be read/written atomically, and we don't want to pay locking
+ // penalties here.
+ gTimestamp = PR_IntervalNow();
+
+ // If we have UI activity we should reset the timer and report it
+ if (aActivityType == kUIActivity) {
+ mozilla::Telemetry::Accumulate(mozilla::Telemetry::EVENTLOOP_UI_ACTIVITY_EXP_MS,
+ cumulativeUILagMS);
+ cumulativeUILagMS = 0;
+ }
+
+ if (gThread && !gShutdown) {
+ mozilla::BackgroundHangMonitor().NotifyActivity();
+ }
+}
+
+void
+Suspend()
+{
+ MOZ_ASSERT(NS_IsMainThread(),
+ "HangMonitor::Suspend called from off the main thread.");
+
+ // Because gTimestamp changes this resets the wait count.
+ gTimestamp = PR_INTERVAL_NO_WAIT;
+
+ if (gThread && !gShutdown) {
+ mozilla::BackgroundHangMonitor().NotifyWait();
+ }
+}
+
+} // namespace HangMonitor
+} // namespace mozilla
diff --git a/xpcom/threads/HangMonitor.h b/xpcom/threads/HangMonitor.h
new file mode 100644
index 000000000..fd0e6ff83
--- /dev/null
+++ b/xpcom/threads/HangMonitor.h
@@ -0,0 +1,58 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_HangMonitor_h
+#define mozilla_HangMonitor_h
+
+namespace mozilla {
+namespace HangMonitor {
+
+/**
+ * Signifies the type of activity in question
+*/
+enum ActivityType
+{
+ /* There is activity and it is known to be UI related activity. */
+ kUIActivity,
+
+ /* There is non UI activity and no UI activity is pending */
+ kActivityNoUIAVail,
+
+ /* There is non UI activity and UI activity is known to be pending */
+ kActivityUIAVail,
+
+ /* There is non UI activity and UI activity pending is unknown */
+ kGeneralActivity
+};
+
+/**
+ * Start monitoring hangs. Should be called by the XPCOM startup process only.
+ */
+void Startup();
+
+/**
+ * Stop monitoring hangs and join the thread.
+ */
+void Shutdown();
+
+/**
+ * Notify the hang monitor of activity which will reset its internal timer.
+ *
+ * @param activityType The type of activity being reported.
+ * @see ActivityType
+ */
+void NotifyActivity(ActivityType activityType = kGeneralActivity);
+
+/*
+ * Notify the hang monitor that the browser is now idle and no detection should
+ * be done.
+ */
+void Suspend();
+
+} // namespace HangMonitor
+} // namespace mozilla
+
+#endif // mozilla_HangMonitor_h
diff --git a/xpcom/threads/LazyIdleThread.cpp b/xpcom/threads/LazyIdleThread.cpp
new file mode 100644
index 000000000..527cc6819
--- /dev/null
+++ b/xpcom/threads/LazyIdleThread.cpp
@@ -0,0 +1,624 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "LazyIdleThread.h"
+
+#include "nsIObserverService.h"
+
+#include "GeckoProfiler.h"
+#include "nsComponentManagerUtils.h"
+#include "nsIIdlePeriod.h"
+#include "nsServiceManagerUtils.h"
+#include "nsThreadUtils.h"
+#include "mozilla/Services.h"
+
+#ifdef DEBUG
+#define ASSERT_OWNING_THREAD() \
+ PR_BEGIN_MACRO \
+ nsIThread* currentThread = NS_GetCurrentThread(); \
+ if (currentThread) { \
+ nsCOMPtr<nsISupports> current(do_QueryInterface(currentThread)); \
+ nsCOMPtr<nsISupports> test(do_QueryInterface(mOwningThread)); \
+ MOZ_ASSERT(current == test, "Wrong thread!"); \
+ } \
+ PR_END_MACRO
+#else
+#define ASSERT_OWNING_THREAD() /* nothing */
+#endif
+
+namespace mozilla {
+
+LazyIdleThread::LazyIdleThread(uint32_t aIdleTimeoutMS,
+ const nsCSubstring& aName,
+ ShutdownMethod aShutdownMethod,
+ nsIObserver* aIdleObserver)
+ : mMutex("LazyIdleThread::mMutex")
+ , mOwningThread(NS_GetCurrentThread())
+ , mIdleObserver(aIdleObserver)
+ , mQueuedRunnables(nullptr)
+ , mIdleTimeoutMS(aIdleTimeoutMS)
+ , mPendingEventCount(0)
+ , mIdleNotificationCount(0)
+ , mShutdownMethod(aShutdownMethod)
+ , mShutdown(false)
+ , mThreadIsShuttingDown(false)
+ , mIdleTimeoutEnabled(true)
+ , mName(aName)
+{
+ MOZ_ASSERT(mOwningThread, "Need owning thread!");
+}
+
+LazyIdleThread::~LazyIdleThread()
+{
+ ASSERT_OWNING_THREAD();
+
+ Shutdown();
+}
+
+void
+LazyIdleThread::SetWeakIdleObserver(nsIObserver* aObserver)
+{
+ ASSERT_OWNING_THREAD();
+
+ if (mShutdown) {
+ NS_WARNING_ASSERTION(!aObserver,
+ "Setting an observer after Shutdown was called!");
+ return;
+ }
+
+ mIdleObserver = aObserver;
+}
+
+void
+LazyIdleThread::DisableIdleTimeout()
+{
+ ASSERT_OWNING_THREAD();
+ if (!mIdleTimeoutEnabled) {
+ return;
+ }
+ mIdleTimeoutEnabled = false;
+
+ if (mIdleTimer && NS_FAILED(mIdleTimer->Cancel())) {
+ NS_WARNING("Failed to cancel timer!");
+ }
+
+ MutexAutoLock lock(mMutex);
+
+ // Pretend we have a pending event to keep the idle timer from firing.
+ MOZ_ASSERT(mPendingEventCount < UINT32_MAX, "Way too many!");
+ mPendingEventCount++;
+}
+
+void
+LazyIdleThread::EnableIdleTimeout()
+{
+ ASSERT_OWNING_THREAD();
+ if (mIdleTimeoutEnabled) {
+ return;
+ }
+ mIdleTimeoutEnabled = true;
+
+ {
+ MutexAutoLock lock(mMutex);
+
+ MOZ_ASSERT(mPendingEventCount, "Mismatched calls to observer methods!");
+ --mPendingEventCount;
+ }
+
+ if (mThread) {
+ nsCOMPtr<nsIRunnable> runnable(new Runnable());
+ if (NS_FAILED(Dispatch(runnable.forget(), NS_DISPATCH_NORMAL))) {
+ NS_WARNING("Failed to dispatch!");
+ }
+ }
+}
+
+void
+LazyIdleThread::PreDispatch()
+{
+ MutexAutoLock lock(mMutex);
+
+ MOZ_ASSERT(mPendingEventCount < UINT32_MAX, "Way too many!");
+ mPendingEventCount++;
+}
+
+nsresult
+LazyIdleThread::EnsureThread()
+{
+ ASSERT_OWNING_THREAD();
+
+ if (mShutdown) {
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ if (mThread) {
+ return NS_OK;
+ }
+
+ MOZ_ASSERT(!mPendingEventCount, "Shouldn't have events yet!");
+ MOZ_ASSERT(!mIdleNotificationCount, "Shouldn't have idle events yet!");
+ MOZ_ASSERT(!mIdleTimer, "Should have killed this long ago!");
+ MOZ_ASSERT(!mThreadIsShuttingDown, "Should have cleared that!");
+
+ nsresult rv;
+
+ if (mShutdownMethod == AutomaticShutdown && NS_IsMainThread()) {
+ nsCOMPtr<nsIObserverService> obs =
+ do_GetService(NS_OBSERVERSERVICE_CONTRACTID, &rv);
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ return rv;
+ }
+
+ rv = obs->AddObserver(this, "xpcom-shutdown-threads", false);
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ return rv;
+ }
+ }
+
+ mIdleTimer = do_CreateInstance(NS_TIMER_CONTRACTID, &rv);
+ if (NS_WARN_IF(!mIdleTimer)) {
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ nsCOMPtr<nsIRunnable> runnable =
+ NewRunnableMethod(this, &LazyIdleThread::InitThread);
+ if (NS_WARN_IF(!runnable)) {
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ rv = NS_NewThread(getter_AddRefs(mThread), runnable);
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ return rv;
+ }
+
+ return NS_OK;
+}
+
+void
+LazyIdleThread::InitThread()
+{
+ char aLocal;
+ profiler_register_thread(mName.get(), &aLocal);
+
+ PR_SetCurrentThreadName(mName.get());
+
+ // Happens on mThread but mThread may not be set yet...
+
+ nsCOMPtr<nsIThreadInternal> thread(do_QueryInterface(NS_GetCurrentThread()));
+ MOZ_ASSERT(thread, "This should always succeed!");
+
+ if (NS_FAILED(thread->SetObserver(this))) {
+ NS_WARNING("Failed to set thread observer!");
+ }
+}
+
+void
+LazyIdleThread::CleanupThread()
+{
+ nsCOMPtr<nsIThreadInternal> thread(do_QueryInterface(NS_GetCurrentThread()));
+ MOZ_ASSERT(thread, "This should always succeed!");
+
+ if (NS_FAILED(thread->SetObserver(nullptr))) {
+ NS_WARNING("Failed to set thread observer!");
+ }
+
+ {
+ MutexAutoLock lock(mMutex);
+
+ MOZ_ASSERT(!mThreadIsShuttingDown, "Shouldn't be true ever!");
+ mThreadIsShuttingDown = true;
+ }
+
+ profiler_unregister_thread();
+}
+
+void
+LazyIdleThread::ScheduleTimer()
+{
+ ASSERT_OWNING_THREAD();
+
+ bool shouldSchedule;
+ {
+ MutexAutoLock lock(mMutex);
+
+ MOZ_ASSERT(mIdleNotificationCount, "Should have at least one!");
+ --mIdleNotificationCount;
+
+ shouldSchedule = !mIdleNotificationCount && !mPendingEventCount;
+ }
+
+ if (mIdleTimer) {
+ if (NS_FAILED(mIdleTimer->Cancel())) {
+ NS_WARNING("Failed to cancel timer!");
+ }
+
+ if (shouldSchedule &&
+ NS_FAILED(mIdleTimer->InitWithCallback(this, mIdleTimeoutMS,
+ nsITimer::TYPE_ONE_SHOT))) {
+ NS_WARNING("Failed to schedule timer!");
+ }
+ }
+}
+
+nsresult
+LazyIdleThread::ShutdownThread()
+{
+ ASSERT_OWNING_THREAD();
+
+ // Before calling Shutdown() on the real thread we need to put a queue in
+ // place in case a runnable is posted to the thread while it's in the
+ // process of shutting down. This will be our queue.
+ AutoTArray<nsCOMPtr<nsIRunnable>, 10> queuedRunnables;
+
+ nsresult rv;
+
+ // Make sure to cancel the shutdown timer before spinning the event loop
+ // during |mThread->Shutdown()| below. Otherwise the timer might fire and we
+ // could reenter here.
+ if (mIdleTimer) {
+ rv = mIdleTimer->Cancel();
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ return rv;
+ }
+
+ mIdleTimer = nullptr;
+ }
+
+ if (mThread) {
+ if (mShutdownMethod == AutomaticShutdown && NS_IsMainThread()) {
+ nsCOMPtr<nsIObserverService> obs =
+ mozilla::services::GetObserverService();
+ NS_WARNING_ASSERTION(obs, "Failed to get observer service!");
+
+ if (obs &&
+ NS_FAILED(obs->RemoveObserver(this, "xpcom-shutdown-threads"))) {
+ NS_WARNING("Failed to remove observer!");
+ }
+ }
+
+ if (mIdleObserver) {
+ mIdleObserver->Observe(static_cast<nsIThread*>(this), IDLE_THREAD_TOPIC,
+ nullptr);
+ }
+
+#ifdef DEBUG
+ {
+ MutexAutoLock lock(mMutex);
+ MOZ_ASSERT(!mThreadIsShuttingDown, "Huh?!");
+ }
+#endif
+
+ nsCOMPtr<nsIRunnable> runnable =
+ NewRunnableMethod(this, &LazyIdleThread::CleanupThread);
+ if (NS_WARN_IF(!runnable)) {
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ PreDispatch();
+
+ rv = mThread->Dispatch(runnable.forget(), NS_DISPATCH_NORMAL);
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ return rv;
+ }
+
+ // Put the temporary queue in place before calling Shutdown().
+ mQueuedRunnables = &queuedRunnables;
+
+ if (NS_FAILED(mThread->Shutdown())) {
+ NS_ERROR("Failed to shutdown the thread!");
+ }
+
+ // Now unset the queue.
+ mQueuedRunnables = nullptr;
+
+ mThread = nullptr;
+
+ {
+ MutexAutoLock lock(mMutex);
+
+ MOZ_ASSERT(!mPendingEventCount, "Huh?!");
+ MOZ_ASSERT(!mIdleNotificationCount, "Huh?!");
+ MOZ_ASSERT(mThreadIsShuttingDown, "Huh?!");
+ mThreadIsShuttingDown = false;
+ }
+ }
+
+ // If our temporary queue has any runnables then we need to dispatch them.
+ if (queuedRunnables.Length()) {
+ // If the thread manager has gone away then these runnables will never run.
+ if (mShutdown) {
+ NS_ERROR("Runnables dispatched to LazyIdleThread will never run!");
+ return NS_OK;
+ }
+
+ // Re-dispatch the queued runnables.
+ for (uint32_t index = 0; index < queuedRunnables.Length(); index++) {
+ nsCOMPtr<nsIRunnable> runnable;
+ runnable.swap(queuedRunnables[index]);
+ MOZ_ASSERT(runnable, "Null runnable?!");
+
+ if (NS_FAILED(Dispatch(runnable.forget(), NS_DISPATCH_NORMAL))) {
+ NS_ERROR("Failed to re-dispatch queued runnable!");
+ }
+ }
+ }
+
+ return NS_OK;
+}
+
+void
+LazyIdleThread::SelfDestruct()
+{
+ MOZ_ASSERT(mRefCnt == 1, "Bad refcount!");
+ delete this;
+}
+
+NS_IMPL_ADDREF(LazyIdleThread)
+
+NS_IMETHODIMP_(MozExternalRefCountType)
+LazyIdleThread::Release()
+{
+ nsrefcnt count = --mRefCnt;
+ NS_LOG_RELEASE(this, count, "LazyIdleThread");
+
+ if (!count) {
+ // Stabilize refcount.
+ mRefCnt = 1;
+
+ nsCOMPtr<nsIRunnable> runnable =
+ NewNonOwningRunnableMethod(this, &LazyIdleThread::SelfDestruct);
+ NS_WARNING_ASSERTION(runnable, "Couldn't make runnable!");
+
+ if (NS_FAILED(NS_DispatchToCurrentThread(runnable))) {
+ MOZ_ASSERT(NS_IsMainThread(), "Wrong thread!");
+ // The only way this could fail is if we're in shutdown, and in that case
+ // threads should have been joined already. Deleting here isn't dangerous
+ // anymore because we won't spin the event loop waiting to join the
+ // thread.
+ SelfDestruct();
+ }
+ }
+
+ return count;
+}
+
+NS_IMPL_QUERY_INTERFACE(LazyIdleThread, nsIThread,
+ nsIEventTarget,
+ nsITimerCallback,
+ nsIThreadObserver,
+ nsIObserver)
+
+NS_IMETHODIMP
+LazyIdleThread::DispatchFromScript(nsIRunnable* aEvent, uint32_t aFlags)
+{
+ nsCOMPtr<nsIRunnable> event(aEvent);
+ return Dispatch(event.forget(), aFlags);
+}
+
+NS_IMETHODIMP
+LazyIdleThread::Dispatch(already_AddRefed<nsIRunnable> aEvent,
+ uint32_t aFlags)
+{
+ ASSERT_OWNING_THREAD();
+ nsCOMPtr<nsIRunnable> event(aEvent); // avoid leaks
+
+ // LazyIdleThread can't always support synchronous dispatch currently.
+ if (NS_WARN_IF(aFlags != NS_DISPATCH_NORMAL)) {
+ return NS_ERROR_NOT_IMPLEMENTED;
+ }
+
+ if (NS_WARN_IF(mShutdown)) {
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ // If our thread is shutting down then we can't actually dispatch right now.
+ // Queue this runnable for later.
+ if (UseRunnableQueue()) {
+ mQueuedRunnables->AppendElement(event);
+ return NS_OK;
+ }
+
+ nsresult rv = EnsureThread();
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ return rv;
+ }
+
+ PreDispatch();
+
+ return mThread->Dispatch(event.forget(), aFlags);
+}
+
+NS_IMETHODIMP
+LazyIdleThread::DelayedDispatch(already_AddRefed<nsIRunnable>, uint32_t)
+{
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+NS_IMETHODIMP
+LazyIdleThread::IsOnCurrentThread(bool* aIsOnCurrentThread)
+{
+ if (mThread) {
+ return mThread->IsOnCurrentThread(aIsOnCurrentThread);
+ }
+
+ *aIsOnCurrentThread = false;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+LazyIdleThread::GetPRThread(PRThread** aPRThread)
+{
+ if (mThread) {
+ return mThread->GetPRThread(aPRThread);
+ }
+
+ *aPRThread = nullptr;
+ return NS_ERROR_NOT_AVAILABLE;
+}
+
+NS_IMETHODIMP
+LazyIdleThread::GetCanInvokeJS(bool* aCanInvokeJS)
+{
+ *aCanInvokeJS = false;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+LazyIdleThread::SetCanInvokeJS(bool aCanInvokeJS)
+{
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+NS_IMETHODIMP
+LazyIdleThread::AsyncShutdown()
+{
+ ASSERT_OWNING_THREAD();
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+NS_IMETHODIMP
+LazyIdleThread::Shutdown()
+{
+ ASSERT_OWNING_THREAD();
+
+ mShutdown = true;
+
+ nsresult rv = ShutdownThread();
+ MOZ_ASSERT(!mThread, "Should have destroyed this by now!");
+
+ mIdleObserver = nullptr;
+
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ return rv;
+ }
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+LazyIdleThread::HasPendingEvents(bool* aHasPendingEvents)
+{
+ // This is only supposed to be called from the thread itself so it's not
+ // implemented here.
+ NS_NOTREACHED("Shouldn't ever call this!");
+ return NS_ERROR_UNEXPECTED;
+}
+
+NS_IMETHODIMP
+LazyIdleThread::IdleDispatch(already_AddRefed<nsIRunnable> aEvent)
+{
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+NS_IMETHODIMP
+LazyIdleThread::RegisterIdlePeriod(already_AddRefed<nsIIdlePeriod> aIdlePeriod)
+{
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+NS_IMETHODIMP
+LazyIdleThread::ProcessNextEvent(bool aMayWait,
+ bool* aEventWasProcessed)
+{
+ // This is only supposed to be called from the thread itself so it's not
+ // implemented here.
+ NS_NOTREACHED("Shouldn't ever call this!");
+ return NS_ERROR_UNEXPECTED;
+}
+
+NS_IMETHODIMP
+LazyIdleThread::Notify(nsITimer* aTimer)
+{
+ ASSERT_OWNING_THREAD();
+
+ {
+ MutexAutoLock lock(mMutex);
+
+ if (mPendingEventCount || mIdleNotificationCount) {
+ // Another event was scheduled since this timer was set. Don't do
+ // anything and wait for the timer to fire again.
+ return NS_OK;
+ }
+ }
+
+ nsresult rv = ShutdownThread();
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ return rv;
+ }
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+LazyIdleThread::OnDispatchedEvent(nsIThreadInternal* /*aThread */)
+{
+ MOZ_ASSERT(NS_GetCurrentThread() == mOwningThread, "Wrong thread!");
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+LazyIdleThread::OnProcessNextEvent(nsIThreadInternal* /* aThread */,
+ bool /* aMayWait */)
+{
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+LazyIdleThread::AfterProcessNextEvent(nsIThreadInternal* /* aThread */,
+ bool aEventWasProcessed)
+{
+ bool shouldNotifyIdle;
+ {
+ MutexAutoLock lock(mMutex);
+
+ if (aEventWasProcessed) {
+ MOZ_ASSERT(mPendingEventCount, "Mismatched calls to observer methods!");
+ --mPendingEventCount;
+ }
+
+ if (mThreadIsShuttingDown) {
+ // We're shutting down, no need to fire any timer.
+ return NS_OK;
+ }
+
+ shouldNotifyIdle = !mPendingEventCount;
+ if (shouldNotifyIdle) {
+ MOZ_ASSERT(mIdleNotificationCount < UINT32_MAX, "Way too many!");
+ mIdleNotificationCount++;
+ }
+ }
+
+ if (shouldNotifyIdle) {
+ nsCOMPtr<nsIRunnable> runnable =
+ NewRunnableMethod(this, &LazyIdleThread::ScheduleTimer);
+ if (NS_WARN_IF(!runnable)) {
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ nsresult rv = mOwningThread->Dispatch(runnable.forget(), NS_DISPATCH_NORMAL);
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ return rv;
+ }
+ }
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+LazyIdleThread::Observe(nsISupports* /* aSubject */,
+ const char* aTopic,
+ const char16_t* /* aData */)
+{
+ MOZ_ASSERT(NS_IsMainThread(), "Wrong thread!");
+ MOZ_ASSERT(mShutdownMethod == AutomaticShutdown,
+ "Should not receive notifications if not AutomaticShutdown!");
+ MOZ_ASSERT(!strcmp("xpcom-shutdown-threads", aTopic), "Bad topic!");
+
+ Shutdown();
+ return NS_OK;
+}
+
+} // namespace mozilla
diff --git a/xpcom/threads/LazyIdleThread.h b/xpcom/threads/LazyIdleThread.h
new file mode 100644
index 000000000..6bf8e8e81
--- /dev/null
+++ b/xpcom/threads/LazyIdleThread.h
@@ -0,0 +1,226 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_lazyidlethread_h__
+#define mozilla_lazyidlethread_h__
+
+#ifndef MOZILLA_INTERNAL_API
+#error "This header is only usable from within libxul (MOZILLA_INTERNAL_API)."
+#endif
+
+#include "nsIObserver.h"
+#include "nsIThreadInternal.h"
+#include "nsITimer.h"
+
+#include "mozilla/Mutex.h"
+#include "nsCOMPtr.h"
+#include "nsTArray.h"
+#include "nsString.h"
+#include "mozilla/Attributes.h"
+
+#define IDLE_THREAD_TOPIC "thread-shutting-down"
+
+namespace mozilla {
+
+/**
+ * This class provides a basic event target that creates its thread lazily and
+ * destroys its thread after a period of inactivity. It may be created on any
+ * thread but it may only be used from the thread on which it is created. If it
+ * is created on the main thread then it will automatically join its thread on
+ * XPCOM shutdown using the Observer Service.
+ */
+class LazyIdleThread final
+ : public nsIThread
+ , public nsITimerCallback
+ , public nsIThreadObserver
+ , public nsIObserver
+{
+public:
+ NS_DECL_THREADSAFE_ISUPPORTS
+ NS_DECL_NSIEVENTTARGET
+ NS_DECL_NSITHREAD
+ NS_DECL_NSITIMERCALLBACK
+ NS_DECL_NSITHREADOBSERVER
+ NS_DECL_NSIOBSERVER
+ using nsIEventTarget::Dispatch;
+
+ enum ShutdownMethod
+ {
+ AutomaticShutdown = 0,
+ ManualShutdown
+ };
+
+ /**
+ * Create a new LazyIdleThread that will destroy its thread after the given
+ * number of milliseconds.
+ */
+ LazyIdleThread(uint32_t aIdleTimeoutMS,
+ const nsCSubstring& aName,
+ ShutdownMethod aShutdownMethod = AutomaticShutdown,
+ nsIObserver* aIdleObserver = nullptr);
+
+ /**
+ * Add an observer that will be notified when the thread is idle and about to
+ * be shut down. The aSubject argument can be QueryInterface'd to an nsIThread
+ * that can be used to post cleanup events. The aTopic argument will be
+ * IDLE_THREAD_TOPIC, and aData will be null. The LazyIdleThread does not add
+ * a reference to the observer to avoid circular references as it is assumed
+ * to be the owner. It is the caller's responsibility to clear this observer
+ * if the pointer becomes invalid.
+ */
+ void SetWeakIdleObserver(nsIObserver* aObserver);
+
+ /**
+ * Disable the idle timeout for this thread. No effect if the timeout is
+ * already disabled.
+ */
+ void DisableIdleTimeout();
+
+ /**
+ * Enable the idle timeout. No effect if the timeout is already enabled.
+ */
+ void EnableIdleTimeout();
+
+private:
+ /**
+ * Calls Shutdown().
+ */
+ ~LazyIdleThread();
+
+ /**
+ * Called just before dispatching to mThread.
+ */
+ void PreDispatch();
+
+ /**
+ * Makes sure a valid thread lives in mThread.
+ */
+ nsresult EnsureThread();
+
+ /**
+ * Called on mThread to set up the thread observer.
+ */
+ void InitThread();
+
+ /**
+ * Called on mThread to clean up the thread observer.
+ */
+ void CleanupThread();
+
+ /**
+ * Called on the main thread when mThread believes itself to be idle. Sets up
+ * the idle timer.
+ */
+ void ScheduleTimer();
+
+ /**
+ * Called when we are shutting down mThread.
+ */
+ nsresult ShutdownThread();
+
+ /**
+ * Deletes this object. Used to delay calling mThread->Shutdown() during the
+ * final release (during a GC, for instance).
+ */
+ void SelfDestruct();
+
+ /**
+ * Returns true if events should be queued rather than immediately dispatched
+ * to mThread. Currently only happens when the thread is shutting down.
+ */
+ bool UseRunnableQueue()
+ {
+ return !!mQueuedRunnables;
+ }
+
+ /**
+ * Protects data that is accessed on both threads.
+ */
+ mozilla::Mutex mMutex;
+
+ /**
+ * Touched on both threads but set before mThread is created. Used to direct
+ * timer events to the owning thread.
+ */
+ nsCOMPtr<nsIThread> mOwningThread;
+
+ /**
+ * Only accessed on the owning thread. Set by EnsureThread().
+ */
+ nsCOMPtr<nsIThread> mThread;
+
+ /**
+ * Protected by mMutex. Created when mThread has no pending events and fired
+ * at mOwningThread. Any thread that dispatches to mThread will take ownership
+ * of the timer and fire a separate cancel event to the owning thread.
+ */
+ nsCOMPtr<nsITimer> mIdleTimer;
+
+ /**
+ * Idle observer. Called when the thread is about to be shut down. Released
+ * only when Shutdown() is called.
+ */
+ nsIObserver* MOZ_UNSAFE_REF("See the documentation for SetWeakIdleObserver for "
+ "how the owner of LazyIdleThread should manage the "
+ "lifetime information of this field") mIdleObserver;
+
+ /**
+ * Temporary storage for events that happen to be dispatched while we're in
+ * the process of shutting down our real thread.
+ */
+ nsTArray<nsCOMPtr<nsIRunnable>>* mQueuedRunnables;
+
+ /**
+ * The number of milliseconds a thread should be idle before dying.
+ */
+ const uint32_t mIdleTimeoutMS;
+
+ /**
+ * The number of events that are pending on mThread. A nonzero value means
+ * that the thread cannot be cleaned up.
+ */
+ uint32_t mPendingEventCount;
+
+ /**
+ * The number of times that mThread has dispatched an idle notification. Any
+ * timer that fires while this count is nonzero can safely be ignored as
+ * another timer will be on the way.
+ */
+ uint32_t mIdleNotificationCount;
+
+ /**
+ * Whether or not the thread should automatically shutdown. If the owner
+ * specified ManualShutdown at construction time then the owner should take
+ * care to call Shutdown() manually when appropriate.
+ */
+ ShutdownMethod mShutdownMethod;
+
+ /**
+ * Only accessed on the owning thread. Set to true when Shutdown() has been
+ * called and prevents EnsureThread() from recreating mThread.
+ */
+ bool mShutdown;
+
+ /**
+ * Set from CleanupThread and lasting until the thread has shut down. Prevents
+ * further idle notifications during the shutdown process.
+ */
+ bool mThreadIsShuttingDown;
+
+ /**
+ * Whether or not the idle timeout is enabled.
+ */
+ bool mIdleTimeoutEnabled;
+
+ /**
+ * Name of the thread, set on the actual thread after it gets created.
+ */
+ nsCString mName;
+};
+
+} // namespace mozilla
+
+#endif // mozilla_lazyidlethread_h__
diff --git a/xpcom/threads/LeakRefPtr.h b/xpcom/threads/LeakRefPtr.h
new file mode 100644
index 000000000..56f5d90af
--- /dev/null
+++ b/xpcom/threads/LeakRefPtr.h
@@ -0,0 +1,52 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* Smart pointer which leaks its owning refcounted object by default. */
+
+#ifndef LeakRefPtr_h
+#define LeakRefPtr_h
+
+#include "mozilla/AlreadyAddRefed.h"
+
+namespace mozilla {
+
+/**
+ * Instance of this class behaves like a raw pointer which leaks the
+ * resource it's owning if not explicitly released.
+ */
+template<class T>
+class LeakRefPtr
+{
+public:
+ explicit LeakRefPtr(already_AddRefed<T>&& aPtr)
+ : mRawPtr(aPtr.take()) { }
+
+ explicit operator bool() const { return !!mRawPtr; }
+
+ LeakRefPtr<T>& operator=(already_AddRefed<T>&& aPtr)
+ {
+ mRawPtr = aPtr.take();
+ return *this;
+ }
+
+ T* get() const { return mRawPtr; }
+
+ already_AddRefed<T> take()
+ {
+ T* rawPtr = mRawPtr;
+ mRawPtr = nullptr;
+ return already_AddRefed<T>(rawPtr);
+ }
+
+ void release() { NS_RELEASE(mRawPtr); }
+
+private:
+ T* MOZ_OWNING_REF mRawPtr;
+};
+
+} // namespace mozilla
+
+#endif // LeakRefPtr_h
diff --git a/xpcom/threads/MainThreadIdlePeriod.cpp b/xpcom/threads/MainThreadIdlePeriod.cpp
new file mode 100644
index 000000000..4a5f99dd7
--- /dev/null
+++ b/xpcom/threads/MainThreadIdlePeriod.cpp
@@ -0,0 +1,76 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "MainThreadIdlePeriod.h"
+
+#include "mozilla/Maybe.h"
+#include "mozilla/Preferences.h"
+#include "nsRefreshDriver.h"
+
+#define DEFAULT_LONG_IDLE_PERIOD 50.0f
+#define DEFAULT_MIN_IDLE_PERIOD 3.0f
+
+namespace mozilla {
+
+NS_IMETHODIMP
+MainThreadIdlePeriod::GetIdlePeriodHint(TimeStamp* aIdleDeadline)
+{
+ MOZ_ASSERT(NS_IsMainThread());
+ MOZ_ASSERT(aIdleDeadline);
+
+ Maybe<TimeStamp> deadline = nsRefreshDriver::GetIdleDeadlineHint();
+
+ if (deadline.isSome()) {
+ // If the idle period is too small, then just return a null time
+ // to indicate we are busy. Otherwise return the actual deadline.
+ TimeDuration minIdlePeriod =
+ TimeDuration::FromMilliseconds(GetMinIdlePeriod());
+ bool busySoon = deadline.value().IsNull() ||
+ (TimeStamp::Now() >= (deadline.value() - minIdlePeriod));
+ *aIdleDeadline = busySoon ? TimeStamp() : deadline.value();
+ } else {
+ *aIdleDeadline =
+ TimeStamp::Now() + TimeDuration::FromMilliseconds(GetLongIdlePeriod());
+ }
+
+ return NS_OK;
+}
+
+/* static */ float
+MainThreadIdlePeriod::GetLongIdlePeriod()
+{
+ MOZ_ASSERT(NS_IsMainThread());
+
+ static float sLongIdlePeriod = DEFAULT_LONG_IDLE_PERIOD;
+ static bool sInitialized = false;
+
+ if (!sInitialized && Preferences::IsServiceAvailable()) {
+ sInitialized = true;
+ Preferences::AddFloatVarCache(&sLongIdlePeriod, "idle_queue.long_period",
+ DEFAULT_LONG_IDLE_PERIOD);
+ }
+
+ return sLongIdlePeriod;
+}
+
+/* static */ float
+MainThreadIdlePeriod::GetMinIdlePeriod()
+{
+ MOZ_ASSERT(NS_IsMainThread());
+
+ static float sMinIdlePeriod = DEFAULT_MIN_IDLE_PERIOD;
+ static bool sInitialized = false;
+
+ if (!sInitialized && Preferences::IsServiceAvailable()) {
+ sInitialized = true;
+ Preferences::AddFloatVarCache(&sMinIdlePeriod, "idle_queue.min_period",
+ DEFAULT_MIN_IDLE_PERIOD);
+ }
+
+ return sMinIdlePeriod;
+}
+
+} // namespace mozilla
diff --git a/xpcom/threads/MainThreadIdlePeriod.h b/xpcom/threads/MainThreadIdlePeriod.h
new file mode 100644
index 000000000..2b773551c
--- /dev/null
+++ b/xpcom/threads/MainThreadIdlePeriod.h
@@ -0,0 +1,28 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_dom_mainthreadidleperiod_h
+#define mozilla_dom_mainthreadidleperiod_h
+
+#include "mozilla/TimeStamp.h"
+#include "nsThreadUtils.h"
+
+namespace mozilla {
+
+class MainThreadIdlePeriod final : public IdlePeriod
+{
+public:
+ NS_DECL_NSIIDLEPERIOD
+
+ static float GetLongIdlePeriod();
+ static float GetMinIdlePeriod();
+private:
+ virtual ~MainThreadIdlePeriod() {}
+};
+
+} // namespace mozilla
+
+#endif // mozilla_dom_mainthreadidleperiod_h
diff --git a/xpcom/threads/MozPromise.h b/xpcom/threads/MozPromise.h
new file mode 100644
index 000000000..7a2921d2a
--- /dev/null
+++ b/xpcom/threads/MozPromise.h
@@ -0,0 +1,1067 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#if !defined(MozPromise_h_)
+#define MozPromise_h_
+
+#include "mozilla/AbstractThread.h"
+#include "mozilla/IndexSequence.h"
+#include "mozilla/Logging.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/Mutex.h"
+#include "mozilla/Monitor.h"
+#include "mozilla/Tuple.h"
+#include "mozilla/TypeTraits.h"
+
+#include "nsTArray.h"
+#include "nsThreadUtils.h"
+
+#if defined(DEBUG) || !defined(RELEASE_OR_BETA)
+#define PROMISE_DEBUG
+#endif
+
+#ifdef PROMISE_DEBUG
+#define PROMISE_ASSERT MOZ_RELEASE_ASSERT
+#else
+#define PROMISE_ASSERT(...) do { } while (0)
+#endif
+
+namespace mozilla {
+
+extern LazyLogModule gMozPromiseLog;
+
+#define PROMISE_LOG(x, ...) \
+ MOZ_LOG(gMozPromiseLog, mozilla::LogLevel::Debug, (x, ##__VA_ARGS__))
+
+namespace detail {
+template<typename ThisType, typename Ret, typename ArgType>
+static TrueType TakesArgumentHelper(Ret (ThisType::*)(ArgType));
+template<typename ThisType, typename Ret, typename ArgType>
+static TrueType TakesArgumentHelper(Ret (ThisType::*)(ArgType) const);
+template<typename ThisType, typename Ret>
+static FalseType TakesArgumentHelper(Ret (ThisType::*)());
+template<typename ThisType, typename Ret>
+static FalseType TakesArgumentHelper(Ret (ThisType::*)() const);
+
+template<typename ThisType, typename Ret, typename ArgType>
+static Ret ReturnTypeHelper(Ret (ThisType::*)(ArgType));
+template<typename ThisType, typename Ret, typename ArgType>
+static Ret ReturnTypeHelper(Ret (ThisType::*)(ArgType) const);
+template<typename ThisType, typename Ret>
+static Ret ReturnTypeHelper(Ret (ThisType::*)());
+template<typename ThisType, typename Ret>
+static Ret ReturnTypeHelper(Ret (ThisType::*)() const);
+
+template<typename MethodType>
+struct ReturnType {
+ typedef decltype(detail::ReturnTypeHelper(DeclVal<MethodType>())) Type;
+};
+
+} // namespace detail
+
+template<typename MethodType>
+struct TakesArgument {
+ static const bool value = decltype(detail::TakesArgumentHelper(DeclVal<MethodType>()))::value;
+};
+
+template<typename MethodType, typename TargetType>
+struct ReturnTypeIs {
+ static const bool value = IsConvertible<typename detail::ReturnType<MethodType>::Type, TargetType>::value;
+};
+
+/*
+ * A promise manages an asynchronous request that may or may not be able to be
+ * fulfilled immediately. When an API returns a promise, the consumer may attach
+ * callbacks to be invoked (asynchronously, on a specified thread) when the
+ * request is either completed (resolved) or cannot be completed (rejected).
+ * Whereas JS promise callbacks are dispatched from Microtask checkpoints,
+ * MozPromises resolution/rejection make a normal round-trip through the event
+ * loop, which simplifies their ordering semantics relative to other native code.
+ *
+ * MozPromises attempt to mirror the spirit of JS Promises to the extent that
+ * is possible (and desirable) in C++. While the intent is that MozPromises
+ * feel familiar to programmers who are accustomed to their JS-implemented cousin,
+ * we don't shy away from imposing restrictions and adding features that make
+ * sense for the use cases we encounter.
+ *
+ * A MozPromise is ThreadSafe, and may be ->Then()ed on any thread. The Then()
+ * call accepts resolve and reject callbacks, and returns a MozPromise::Request.
+ * The Request object serves several purposes for the consumer.
+ *
+ * (1) It allows the caller to cancel the delivery of the resolve/reject value
+ * if it has not already occurred, via Disconnect() (this must be done on
+ * the target thread to avoid racing).
+ *
+ * (2) It provides access to a "Completion Promise", which is roughly analagous
+ * to the Promise returned directly by ->then() calls on JS promises. If
+ * the resolve/reject callback returns a new MozPromise, that promise is
+ * chained to the completion promise, such that its resolve/reject value
+ * will be forwarded along when it arrives. If the resolve/reject callback
+ * returns void, the completion promise is resolved/rejected with the same
+ * value that was passed to the callback.
+ *
+ * The MozPromise APIs skirt traditional XPCOM convention by returning nsRefPtrs
+ * (rather than already_AddRefed) from various methods. This is done to allow elegant
+ * chaining of calls without cluttering up the code with intermediate variables, and
+ * without introducing separate API variants for callers that want a return value
+ * (from, say, ->Then()) from those that don't.
+ *
+ * When IsExclusive is true, the MozPromise does a release-mode assertion that
+ * there is at most one call to either Then(...) or ChainTo(...).
+ */
+
+class MozPromiseRefcountable
+{
+public:
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MozPromiseRefcountable)
+protected:
+ virtual ~MozPromiseRefcountable() {}
+};
+
+template<typename T> class MozPromiseHolder;
+template<typename ResolveValueT, typename RejectValueT, bool IsExclusive>
+class MozPromise : public MozPromiseRefcountable
+{
+ static const uint32_t sMagic = 0xcecace11;
+
+public:
+ typedef ResolveValueT ResolveValueType;
+ typedef RejectValueT RejectValueType;
+ class ResolveOrRejectValue
+ {
+ public:
+ template<typename ResolveValueType_>
+ void SetResolve(ResolveValueType_&& aResolveValue)
+ {
+ MOZ_ASSERT(IsNothing());
+ mResolveValue.emplace(Forward<ResolveValueType_>(aResolveValue));
+ }
+
+ template<typename RejectValueType_>
+ void SetReject(RejectValueType_&& aRejectValue)
+ {
+ MOZ_ASSERT(IsNothing());
+ mRejectValue.emplace(Forward<RejectValueType_>(aRejectValue));
+ }
+
+ template<typename ResolveValueType_>
+ static ResolveOrRejectValue MakeResolve(ResolveValueType_&& aResolveValue)
+ {
+ ResolveOrRejectValue val;
+ val.SetResolve(Forward<ResolveValueType_>(aResolveValue));
+ return val;
+ }
+
+ template<typename RejectValueType_>
+ static ResolveOrRejectValue MakeReject(RejectValueType_&& aRejectValue)
+ {
+ ResolveOrRejectValue val;
+ val.SetReject(Forward<RejectValueType_>(aRejectValue));
+ return val;
+ }
+
+ bool IsResolve() const { return mResolveValue.isSome(); }
+ bool IsReject() const { return mRejectValue.isSome(); }
+ bool IsNothing() const { return mResolveValue.isNothing() && mRejectValue.isNothing(); }
+
+ const ResolveValueType& ResolveValue() const { return mResolveValue.ref(); }
+ const RejectValueType& RejectValue() const { return mRejectValue.ref(); }
+
+ private:
+ Maybe<ResolveValueType> mResolveValue;
+ Maybe<RejectValueType> mRejectValue;
+ };
+
+protected:
+ // MozPromise is the public type, and never constructed directly. Construct
+ // a MozPromise::Private, defined below.
+ MozPromise(const char* aCreationSite, bool aIsCompletionPromise)
+ : mCreationSite(aCreationSite)
+ , mMutex("MozPromise Mutex")
+ , mHaveRequest(false)
+ , mIsCompletionPromise(aIsCompletionPromise)
+#ifdef PROMISE_DEBUG
+ , mMagic4(mMutex.mLock)
+#endif
+ {
+ PROMISE_LOG("%s creating MozPromise (%p)", mCreationSite, this);
+ }
+
+public:
+ // MozPromise::Private allows us to separate the public interface (upon which
+ // consumers of the promise may invoke methods like Then()) from the private
+ // interface (upon which the creator of the promise may invoke Resolve() or
+ // Reject()). APIs should create and store a MozPromise::Private (usually
+ // via a MozPromiseHolder), and return a MozPromise to consumers.
+ //
+ // NB: We can include the definition of this class inline once B2G ICS is gone.
+ class Private;
+
+ template<typename ResolveValueType_>
+ static RefPtr<MozPromise>
+ CreateAndResolve(ResolveValueType_&& aResolveValue, const char* aResolveSite)
+ {
+ RefPtr<typename MozPromise::Private> p = new MozPromise::Private(aResolveSite);
+ p->Resolve(Forward<ResolveValueType_>(aResolveValue), aResolveSite);
+ return p.forget();
+ }
+
+ template<typename RejectValueType_>
+ static RefPtr<MozPromise>
+ CreateAndReject(RejectValueType_&& aRejectValue, const char* aRejectSite)
+ {
+ RefPtr<typename MozPromise::Private> p = new MozPromise::Private(aRejectSite);
+ p->Reject(Forward<RejectValueType_>(aRejectValue), aRejectSite);
+ return p.forget();
+ }
+
+ typedef MozPromise<nsTArray<ResolveValueType>, RejectValueType, IsExclusive> AllPromiseType;
+private:
+ class AllPromiseHolder : public MozPromiseRefcountable
+ {
+ public:
+ explicit AllPromiseHolder(size_t aDependentPromises)
+ : mPromise(new typename AllPromiseType::Private(__func__))
+ , mOutstandingPromises(aDependentPromises)
+ {
+ mResolveValues.SetLength(aDependentPromises);
+ }
+
+ void Resolve(size_t aIndex, const ResolveValueType& aResolveValue)
+ {
+ if (!mPromise) {
+ // Already rejected.
+ return;
+ }
+
+ mResolveValues[aIndex].emplace(aResolveValue);
+ if (--mOutstandingPromises == 0) {
+ nsTArray<ResolveValueType> resolveValues;
+ resolveValues.SetCapacity(mResolveValues.Length());
+ for (size_t i = 0; i < mResolveValues.Length(); ++i) {
+ resolveValues.AppendElement(mResolveValues[i].ref());
+ }
+
+ mPromise->Resolve(resolveValues, __func__);
+ mPromise = nullptr;
+ mResolveValues.Clear();
+ }
+ }
+
+ void Reject(const RejectValueType& aRejectValue)
+ {
+ if (!mPromise) {
+ // Already rejected.
+ return;
+ }
+
+ mPromise->Reject(aRejectValue, __func__);
+ mPromise = nullptr;
+ mResolveValues.Clear();
+ }
+
+ AllPromiseType* Promise() { return mPromise; }
+
+ private:
+ nsTArray<Maybe<ResolveValueType>> mResolveValues;
+ RefPtr<typename AllPromiseType::Private> mPromise;
+ size_t mOutstandingPromises;
+ };
+public:
+
+ static RefPtr<AllPromiseType> All(AbstractThread* aProcessingThread, nsTArray<RefPtr<MozPromise>>& aPromises)
+ {
+ RefPtr<AllPromiseHolder> holder = new AllPromiseHolder(aPromises.Length());
+ for (size_t i = 0; i < aPromises.Length(); ++i) {
+ aPromises[i]->Then(aProcessingThread, __func__,
+ [holder, i] (ResolveValueType aResolveValue) -> void { holder->Resolve(i, aResolveValue); },
+ [holder] (RejectValueType aRejectValue) -> void { holder->Reject(aRejectValue); }
+ );
+ }
+ return holder->Promise();
+ }
+
+ class Request : public MozPromiseRefcountable
+ {
+ public:
+ virtual void Disconnect() = 0;
+
+ virtual MozPromise* CompletionPromise() = 0;
+
+ virtual void AssertIsDead() = 0;
+
+ protected:
+ Request() : mComplete(false), mDisconnected(false) {}
+ virtual ~Request() {}
+
+ bool mComplete;
+ bool mDisconnected;
+ };
+
+protected:
+
+ /*
+ * A ThenValue tracks a single consumer waiting on the promise. When a consumer
+ * invokes promise->Then(...), a ThenValue is created. Once the Promise is
+ * resolved or rejected, a {Resolve,Reject}Runnable is dispatched, which
+ * invokes the resolve/reject method and then deletes the ThenValue.
+ */
+ class ThenValueBase : public Request
+ {
+ static const uint32_t sMagic = 0xfadece11;
+
+ public:
+ class ResolveOrRejectRunnable : public Runnable
+ {
+ public:
+ ResolveOrRejectRunnable(ThenValueBase* aThenValue, MozPromise* aPromise)
+ : mThenValue(aThenValue)
+ , mPromise(aPromise)
+ {
+ MOZ_DIAGNOSTIC_ASSERT(!mPromise->IsPending());
+ }
+
+ ~ResolveOrRejectRunnable()
+ {
+ if (mThenValue) {
+ mThenValue->AssertIsDead();
+ }
+ }
+
+ NS_IMETHOD Run() override
+ {
+ PROMISE_LOG("ResolveOrRejectRunnable::Run() [this=%p]", this);
+ mThenValue->DoResolveOrReject(mPromise->Value());
+ mThenValue = nullptr;
+ mPromise = nullptr;
+ return NS_OK;
+ }
+
+ private:
+ RefPtr<ThenValueBase> mThenValue;
+ RefPtr<MozPromise> mPromise;
+ };
+
+ explicit ThenValueBase(AbstractThread* aResponseTarget, const char* aCallSite)
+ : mResponseTarget(aResponseTarget), mCallSite(aCallSite) {}
+
+#ifdef PROMISE_DEBUG
+ ~ThenValueBase()
+ {
+ mMagic1 = 0;
+ mMagic2 = 0;
+ }
+#endif
+
+ MozPromise* CompletionPromise() override
+ {
+ MOZ_DIAGNOSTIC_ASSERT(mResponseTarget->IsCurrentThreadIn());
+ MOZ_DIAGNOSTIC_ASSERT(!Request::mComplete);
+ if (!mCompletionPromise) {
+ mCompletionPromise = new MozPromise::Private(
+ "<completion promise>", true /* aIsCompletionPromise */);
+ }
+ return mCompletionPromise;
+ }
+
+ void AssertIsDead() override
+ {
+ PROMISE_ASSERT(mMagic1 == sMagic && mMagic2 == sMagic);
+ // We want to assert that this ThenValues is dead - that is to say, that
+ // there are no consumers waiting for the result. In the case of a normal
+ // ThenValue, we check that it has been disconnected, which is the way
+ // that the consumer signals that it no longer wishes to hear about the
+ // result. If this ThenValue has a completion promise (which is mutually
+ // exclusive with being disconnectable), we recursively assert that every
+ // ThenValue associated with the completion promise is dead.
+ if (mCompletionPromise) {
+ mCompletionPromise->AssertIsDead();
+ } else {
+ MOZ_DIAGNOSTIC_ASSERT(Request::mDisconnected);
+ }
+ }
+
+ void Dispatch(MozPromise *aPromise)
+ {
+ PROMISE_ASSERT(mMagic1 == sMagic && mMagic2 == sMagic);
+ aPromise->mMutex.AssertCurrentThreadOwns();
+ MOZ_ASSERT(!aPromise->IsPending());
+
+ RefPtr<Runnable> runnable =
+ static_cast<Runnable*>(new (typename ThenValueBase::ResolveOrRejectRunnable)(this, aPromise));
+ PROMISE_LOG("%s Then() call made from %s [Runnable=%p, Promise=%p, ThenValue=%p]",
+ aPromise->mValue.IsResolve() ? "Resolving" : "Rejecting", ThenValueBase::mCallSite,
+ runnable.get(), aPromise, this);
+
+ // Promise consumers are allowed to disconnect the Request object and
+ // then shut down the thread or task queue that the promise result would
+ // be dispatched on. So we unfortunately can't assert that promise
+ // dispatch succeeds. :-(
+ mResponseTarget->Dispatch(runnable.forget(), AbstractThread::DontAssertDispatchSuccess);
+ }
+
+ virtual void Disconnect() override
+ {
+ MOZ_DIAGNOSTIC_ASSERT(ThenValueBase::mResponseTarget->IsCurrentThreadIn());
+ MOZ_DIAGNOSTIC_ASSERT(!Request::mComplete);
+ Request::mDisconnected = true;
+
+ // We could support rejecting the completion promise on disconnection, but
+ // then we'd need to have some sort of default reject value. The use cases
+ // of disconnection and completion promise chaining seem pretty orthogonal,
+ // so let's use assert against it.
+ MOZ_DIAGNOSTIC_ASSERT(!mCompletionPromise);
+ }
+
+ protected:
+ virtual already_AddRefed<MozPromise> DoResolveOrRejectInternal(const ResolveOrRejectValue& aValue) = 0;
+
+ void DoResolveOrReject(const ResolveOrRejectValue& aValue)
+ {
+ PROMISE_ASSERT(mMagic1 == sMagic && mMagic2 == sMagic);
+ MOZ_DIAGNOSTIC_ASSERT(mResponseTarget->IsCurrentThreadIn());
+ Request::mComplete = true;
+ if (Request::mDisconnected) {
+ PROMISE_LOG("ThenValue::DoResolveOrReject disconnected - bailing out [this=%p]", this);
+ return;
+ }
+
+ // Invoke the resolve or reject method.
+ RefPtr<MozPromise> p = DoResolveOrRejectInternal(aValue);
+
+ // If there's a completion promise, resolve it appropriately with the
+ // result of the method.
+ //
+ // We jump through some hoops to cast to MozPromise::Private here. This
+ // can go away when we can just declare mCompletionPromise as
+ // MozPromise::Private. See the declaration below.
+ RefPtr<MozPromise::Private> completionPromise =
+ dont_AddRef(static_cast<MozPromise::Private*>(mCompletionPromise.forget().take()));
+ if (completionPromise) {
+ if (p) {
+ p->ChainTo(completionPromise.forget(), "<chained completion promise>");
+ } else {
+ completionPromise->ResolveOrReject(aValue, "<completion of non-promise-returning method>");
+ }
+ }
+ }
+
+ RefPtr<AbstractThread> mResponseTarget; // May be released on any thread.
+#ifdef PROMISE_DEBUG
+ uint32_t mMagic1 = sMagic;
+#endif
+ // Declaring RefPtr<MozPromise::Private> here causes build failures
+ // on MSVC because MozPromise::Private is only forward-declared at this
+ // point. This hack can go away when we inline-declare MozPromise::Private,
+ // which is blocked on the B2G ICS compiler being too old.
+ RefPtr<MozPromise> mCompletionPromise;
+#ifdef PROMISE_DEBUG
+ uint32_t mMagic2 = sMagic;
+#endif
+ const char* mCallSite;
+ };
+
+ /*
+ * We create two overloads for invoking Resolve/Reject Methods so as to
+ * make the resolve/reject value argument "optional".
+ */
+
+ template<typename ThisType, typename MethodType, typename ValueType>
+ static typename EnableIf<ReturnTypeIs<MethodType, RefPtr<MozPromise>>::value &&
+ TakesArgument<MethodType>::value,
+ already_AddRefed<MozPromise>>::Type
+ InvokeCallbackMethod(ThisType* aThisVal, MethodType aMethod, ValueType&& aValue)
+ {
+ return ((*aThisVal).*aMethod)(Forward<ValueType>(aValue)).forget();
+ }
+
+ template<typename ThisType, typename MethodType, typename ValueType>
+ static typename EnableIf<ReturnTypeIs<MethodType, void>::value &&
+ TakesArgument<MethodType>::value,
+ already_AddRefed<MozPromise>>::Type
+ InvokeCallbackMethod(ThisType* aThisVal, MethodType aMethod, ValueType&& aValue)
+ {
+ ((*aThisVal).*aMethod)(Forward<ValueType>(aValue));
+ return nullptr;
+ }
+
+ template<typename ThisType, typename MethodType, typename ValueType>
+ static typename EnableIf<ReturnTypeIs<MethodType, RefPtr<MozPromise>>::value &&
+ !TakesArgument<MethodType>::value,
+ already_AddRefed<MozPromise>>::Type
+ InvokeCallbackMethod(ThisType* aThisVal, MethodType aMethod, ValueType&& aValue)
+ {
+ return ((*aThisVal).*aMethod)().forget();
+ }
+
+ template<typename ThisType, typename MethodType, typename ValueType>
+ static typename EnableIf<ReturnTypeIs<MethodType, void>::value &&
+ !TakesArgument<MethodType>::value,
+ already_AddRefed<MozPromise>>::Type
+ InvokeCallbackMethod(ThisType* aThisVal, MethodType aMethod, ValueType&& aValue)
+ {
+ ((*aThisVal).*aMethod)();
+ return nullptr;
+ }
+
+ template<typename ThisType, typename ResolveMethodType, typename RejectMethodType>
+ class MethodThenValue : public ThenValueBase
+ {
+ public:
+ MethodThenValue(AbstractThread* aResponseTarget, ThisType* aThisVal,
+ ResolveMethodType aResolveMethod, RejectMethodType aRejectMethod,
+ const char* aCallSite)
+ : ThenValueBase(aResponseTarget, aCallSite)
+ , mThisVal(aThisVal)
+ , mResolveMethod(aResolveMethod)
+ , mRejectMethod(aRejectMethod) {}
+
+ virtual void Disconnect() override
+ {
+ ThenValueBase::Disconnect();
+
+ // If a Request has been disconnected, we don't guarantee that the
+ // resolve/reject runnable will be dispatched. Null out our refcounted
+ // this-value now so that it's released predictably on the dispatch thread.
+ mThisVal = nullptr;
+ }
+
+ protected:
+ virtual already_AddRefed<MozPromise> DoResolveOrRejectInternal(const ResolveOrRejectValue& aValue) override
+ {
+ RefPtr<MozPromise> completion;
+ if (aValue.IsResolve()) {
+ completion = InvokeCallbackMethod(mThisVal.get(), mResolveMethod, aValue.ResolveValue());
+ } else {
+ completion = InvokeCallbackMethod(mThisVal.get(), mRejectMethod, aValue.RejectValue());
+ }
+
+ // Null out mThisVal after invoking the callback so that any references are
+ // released predictably on the dispatch thread. Otherwise, it would be
+ // released on whatever thread last drops its reference to the ThenValue,
+ // which may or may not be ok.
+ mThisVal = nullptr;
+
+ return completion.forget();
+ }
+
+ private:
+ RefPtr<ThisType> mThisVal; // Only accessed and refcounted on dispatch thread.
+ ResolveMethodType mResolveMethod;
+ RejectMethodType mRejectMethod;
+ };
+
+ // NB: We could use std::function here instead of a template if it were supported. :-(
+ template<typename ResolveFunction, typename RejectFunction>
+ class FunctionThenValue : public ThenValueBase
+ {
+ public:
+ FunctionThenValue(AbstractThread* aResponseTarget,
+ ResolveFunction&& aResolveFunction,
+ RejectFunction&& aRejectFunction,
+ const char* aCallSite)
+ : ThenValueBase(aResponseTarget, aCallSite)
+ {
+ mResolveFunction.emplace(Move(aResolveFunction));
+ mRejectFunction.emplace(Move(aRejectFunction));
+ }
+
+ virtual void Disconnect() override
+ {
+ ThenValueBase::Disconnect();
+
+ // If a Request has been disconnected, we don't guarantee that the
+ // resolve/reject runnable will be dispatched. Destroy our callbacks
+ // now so that any references in closures are released predictable on
+ // the dispatch thread.
+ mResolveFunction.reset();
+ mRejectFunction.reset();
+ }
+
+ protected:
+ virtual already_AddRefed<MozPromise> DoResolveOrRejectInternal(const ResolveOrRejectValue& aValue) override
+ {
+ // Note: The usage of InvokeCallbackMethod here requires that
+ // ResolveFunction/RejectFunction are capture-lambdas (i.e. anonymous
+ // classes with ::operator()), since it allows us to share code more easily.
+ // We could fix this if need be, though it's quite easy to work around by
+ // just capturing something.
+ RefPtr<MozPromise> completion;
+ if (aValue.IsResolve()) {
+ completion = InvokeCallbackMethod(mResolveFunction.ptr(), &ResolveFunction::operator(), aValue.ResolveValue());
+ } else {
+ completion = InvokeCallbackMethod(mRejectFunction.ptr(), &RejectFunction::operator(), aValue.RejectValue());
+ }
+
+ // Destroy callbacks after invocation so that any references in closures are
+ // released predictably on the dispatch thread. Otherwise, they would be
+ // released on whatever thread last drops its reference to the ThenValue,
+ // which may or may not be ok.
+ mResolveFunction.reset();
+ mRejectFunction.reset();
+
+ return completion.forget();
+ }
+
+ private:
+ Maybe<ResolveFunction> mResolveFunction; // Only accessed and deleted on dispatch thread.
+ Maybe<RejectFunction> mRejectFunction; // Only accessed and deleted on dispatch thread.
+ };
+
+public:
+ void ThenInternal(AbstractThread* aResponseThread, ThenValueBase* aThenValue,
+ const char* aCallSite)
+ {
+ PROMISE_ASSERT(mMagic1 == sMagic && mMagic2 == sMagic && mMagic3 == sMagic && mMagic4 == mMutex.mLock);
+ MutexAutoLock lock(mMutex);
+ MOZ_ASSERT(aResponseThread->IsDispatchReliable());
+ MOZ_DIAGNOSTIC_ASSERT(!IsExclusive || !mHaveRequest);
+ mHaveRequest = true;
+ PROMISE_LOG("%s invoking Then() [this=%p, aThenValue=%p, isPending=%d]",
+ aCallSite, this, aThenValue, (int) IsPending());
+ if (!IsPending()) {
+ aThenValue->Dispatch(this);
+ } else {
+ mThenValues.AppendElement(aThenValue);
+ }
+ }
+
+public:
+
+ template<typename ThisType, typename ResolveMethodType, typename RejectMethodType>
+ RefPtr<Request> Then(AbstractThread* aResponseThread, const char* aCallSite, ThisType* aThisVal,
+ ResolveMethodType aResolveMethod, RejectMethodType aRejectMethod)
+ {
+ RefPtr<ThenValueBase> thenValue = new MethodThenValue<ThisType, ResolveMethodType, RejectMethodType>(
+ aResponseThread, aThisVal, aResolveMethod, aRejectMethod, aCallSite);
+ ThenInternal(aResponseThread, thenValue, aCallSite);
+ return thenValue.forget(); // Implicit conversion from already_AddRefed<ThenValueBase> to RefPtr<Request>.
+ }
+
+ template<typename ResolveFunction, typename RejectFunction>
+ RefPtr<Request> Then(AbstractThread* aResponseThread, const char* aCallSite,
+ ResolveFunction&& aResolveFunction, RejectFunction&& aRejectFunction)
+ {
+ RefPtr<ThenValueBase> thenValue = new FunctionThenValue<ResolveFunction, RejectFunction>(aResponseThread,
+ Move(aResolveFunction), Move(aRejectFunction), aCallSite);
+ ThenInternal(aResponseThread, thenValue, aCallSite);
+ return thenValue.forget(); // Implicit conversion from already_AddRefed<ThenValueBase> to RefPtr<Request>.
+ }
+
+ void ChainTo(already_AddRefed<Private> aChainedPromise, const char* aCallSite)
+ {
+ MutexAutoLock lock(mMutex);
+ MOZ_DIAGNOSTIC_ASSERT(!IsExclusive || !mHaveRequest);
+ mHaveRequest = true;
+ RefPtr<Private> chainedPromise = aChainedPromise;
+ PROMISE_LOG("%s invoking Chain() [this=%p, chainedPromise=%p, isPending=%d]",
+ aCallSite, this, chainedPromise.get(), (int) IsPending());
+ if (!IsPending()) {
+ ForwardTo(chainedPromise);
+ } else {
+ mChainedPromises.AppendElement(chainedPromise);
+ }
+ }
+
+ // Note we expose the function AssertIsDead() instead of IsDead() since
+ // checking IsDead() is a data race in the situation where the request is not
+ // dead. Therefore we enforce the form |Assert(IsDead())| by exposing
+ // AssertIsDead() only.
+ void AssertIsDead()
+ {
+ PROMISE_ASSERT(mMagic1 == sMagic && mMagic2 == sMagic && mMagic3 == sMagic && mMagic4 == mMutex.mLock);
+ MutexAutoLock lock(mMutex);
+ for (auto&& then : mThenValues) {
+ then->AssertIsDead();
+ }
+ for (auto&& chained : mChainedPromises) {
+ chained->AssertIsDead();
+ }
+ }
+
+protected:
+ bool IsPending() const { return mValue.IsNothing(); }
+ const ResolveOrRejectValue& Value() const
+ {
+ // This method should only be called once the value has stabilized. As
+ // such, we don't need to acquire the lock here.
+ MOZ_DIAGNOSTIC_ASSERT(!IsPending());
+ return mValue;
+ }
+
+ void DispatchAll()
+ {
+ mMutex.AssertCurrentThreadOwns();
+ for (size_t i = 0; i < mThenValues.Length(); ++i) {
+ mThenValues[i]->Dispatch(this);
+ }
+ mThenValues.Clear();
+
+ for (size_t i = 0; i < mChainedPromises.Length(); ++i) {
+ ForwardTo(mChainedPromises[i]);
+ }
+ mChainedPromises.Clear();
+ }
+
+ void ForwardTo(Private* aOther)
+ {
+ MOZ_ASSERT(!IsPending());
+ if (mValue.IsResolve()) {
+ aOther->Resolve(mValue.ResolveValue(), "<chained promise>");
+ } else {
+ aOther->Reject(mValue.RejectValue(), "<chained promise>");
+ }
+ }
+
+ virtual ~MozPromise()
+ {
+ PROMISE_LOG("MozPromise::~MozPromise [this=%p]", this);
+ AssertIsDead();
+ // We can't guarantee a completion promise will always be revolved or
+ // rejected since ResolveOrRejectRunnable might not run when dispatch fails.
+ if (!mIsCompletionPromise) {
+ MOZ_ASSERT(!IsPending());
+ MOZ_ASSERT(mThenValues.IsEmpty());
+ MOZ_ASSERT(mChainedPromises.IsEmpty());
+ }
+#ifdef PROMISE_DEBUG
+ mMagic1 = 0;
+ mMagic2 = 0;
+ mMagic3 = 0;
+ mMagic4 = nullptr;
+#endif
+ };
+
+ const char* mCreationSite; // For logging
+ Mutex mMutex;
+ ResolveOrRejectValue mValue;
+#ifdef PROMISE_DEBUG
+ uint32_t mMagic1 = sMagic;
+#endif
+ nsTArray<RefPtr<ThenValueBase>> mThenValues;
+#ifdef PROMISE_DEBUG
+ uint32_t mMagic2 = sMagic;
+#endif
+ nsTArray<RefPtr<Private>> mChainedPromises;
+#ifdef PROMISE_DEBUG
+ uint32_t mMagic3 = sMagic;
+#endif
+ bool mHaveRequest;
+ const bool mIsCompletionPromise;
+#ifdef PROMISE_DEBUG
+ void* mMagic4;
+#endif
+};
+
+template<typename ResolveValueT, typename RejectValueT, bool IsExclusive>
+class MozPromise<ResolveValueT, RejectValueT, IsExclusive>::Private
+ : public MozPromise<ResolveValueT, RejectValueT, IsExclusive>
+{
+public:
+ explicit Private(const char* aCreationSite, bool aIsCompletionPromise = false)
+ : MozPromise(aCreationSite, aIsCompletionPromise) {}
+
+ template<typename ResolveValueT_>
+ void Resolve(ResolveValueT_&& aResolveValue, const char* aResolveSite)
+ {
+ PROMISE_ASSERT(mMagic1 == sMagic && mMagic2 == sMagic && mMagic3 == sMagic && mMagic4 == mMutex.mLock);
+ MutexAutoLock lock(mMutex);
+ MOZ_ASSERT(IsPending());
+ PROMISE_LOG("%s resolving MozPromise (%p created at %s)", aResolveSite, this, mCreationSite);
+ mValue.SetResolve(Forward<ResolveValueT_>(aResolveValue));
+ DispatchAll();
+ }
+
+ template<typename RejectValueT_>
+ void Reject(RejectValueT_&& aRejectValue, const char* aRejectSite)
+ {
+ PROMISE_ASSERT(mMagic1 == sMagic && mMagic2 == sMagic && mMagic3 == sMagic && mMagic4 == mMutex.mLock);
+ MutexAutoLock lock(mMutex);
+ MOZ_ASSERT(IsPending());
+ PROMISE_LOG("%s rejecting MozPromise (%p created at %s)", aRejectSite, this, mCreationSite);
+ mValue.SetReject(Forward<RejectValueT_>(aRejectValue));
+ DispatchAll();
+ }
+
+ template<typename ResolveOrRejectValue_>
+ void ResolveOrReject(ResolveOrRejectValue_&& aValue, const char* aSite)
+ {
+ PROMISE_ASSERT(mMagic1 == sMagic && mMagic2 == sMagic && mMagic3 == sMagic && mMagic4 == mMutex.mLock);
+ MutexAutoLock lock(mMutex);
+ MOZ_ASSERT(IsPending());
+ PROMISE_LOG("%s resolveOrRejecting MozPromise (%p created at %s)", aSite, this, mCreationSite);
+ mValue = Forward<ResolveOrRejectValue_>(aValue);
+ DispatchAll();
+ }
+};
+
+// A generic promise type that does the trick for simple use cases.
+typedef MozPromise<bool, nsresult, /* IsExclusive = */ false> GenericPromise;
+
+/*
+ * Class to encapsulate a promise for a particular role. Use this as the member
+ * variable for a class whose method returns a promise.
+ */
+template<typename PromiseType>
+class MozPromiseHolder
+{
+public:
+ MozPromiseHolder()
+ : mMonitor(nullptr) {}
+
+ // Move semantics.
+ MozPromiseHolder& operator=(MozPromiseHolder&& aOther)
+ {
+ MOZ_ASSERT(!mMonitor && !aOther.mMonitor);
+ MOZ_DIAGNOSTIC_ASSERT(!mPromise);
+ mPromise = aOther.mPromise;
+ aOther.mPromise = nullptr;
+ return *this;
+ }
+
+ ~MozPromiseHolder() { MOZ_ASSERT(!mPromise); }
+
+ already_AddRefed<PromiseType> Ensure(const char* aMethodName) {
+ if (mMonitor) {
+ mMonitor->AssertCurrentThreadOwns();
+ }
+ if (!mPromise) {
+ mPromise = new (typename PromiseType::Private)(aMethodName);
+ }
+ RefPtr<PromiseType> p = mPromise.get();
+ return p.forget();
+ }
+
+ // Provide a Monitor that should always be held when accessing this instance.
+ void SetMonitor(Monitor* aMonitor) { mMonitor = aMonitor; }
+
+ bool IsEmpty() const
+ {
+ if (mMonitor) {
+ mMonitor->AssertCurrentThreadOwns();
+ }
+ return !mPromise;
+ }
+
+ already_AddRefed<typename PromiseType::Private> Steal()
+ {
+ if (mMonitor) {
+ mMonitor->AssertCurrentThreadOwns();
+ }
+
+ RefPtr<typename PromiseType::Private> p = mPromise;
+ mPromise = nullptr;
+ return p.forget();
+ }
+
+ void Resolve(typename PromiseType::ResolveValueType aResolveValue,
+ const char* aMethodName)
+ {
+ if (mMonitor) {
+ mMonitor->AssertCurrentThreadOwns();
+ }
+ MOZ_ASSERT(mPromise);
+ mPromise->Resolve(aResolveValue, aMethodName);
+ mPromise = nullptr;
+ }
+
+
+ void ResolveIfExists(typename PromiseType::ResolveValueType aResolveValue,
+ const char* aMethodName)
+ {
+ if (!IsEmpty()) {
+ Resolve(aResolveValue, aMethodName);
+ }
+ }
+
+ void Reject(typename PromiseType::RejectValueType aRejectValue,
+ const char* aMethodName)
+ {
+ if (mMonitor) {
+ mMonitor->AssertCurrentThreadOwns();
+ }
+ MOZ_ASSERT(mPromise);
+ mPromise->Reject(aRejectValue, aMethodName);
+ mPromise = nullptr;
+ }
+
+
+ void RejectIfExists(typename PromiseType::RejectValueType aRejectValue,
+ const char* aMethodName)
+ {
+ if (!IsEmpty()) {
+ Reject(aRejectValue, aMethodName);
+ }
+ }
+
+private:
+ Monitor* mMonitor;
+ RefPtr<typename PromiseType::Private> mPromise;
+};
+
+/*
+ * Class to encapsulate a MozPromise::Request reference. Use this as the member
+ * variable for a class waiting on a MozPromise.
+ */
+template<typename PromiseType>
+class MozPromiseRequestHolder
+{
+public:
+ MozPromiseRequestHolder() {}
+ ~MozPromiseRequestHolder() { MOZ_ASSERT(!mRequest); }
+
+ void Begin(RefPtr<typename PromiseType::Request>&& aRequest)
+ {
+ MOZ_DIAGNOSTIC_ASSERT(!Exists());
+ mRequest = Move(aRequest);
+ }
+
+ void Begin(typename PromiseType::Request* aRequest)
+ {
+ MOZ_DIAGNOSTIC_ASSERT(!Exists());
+ mRequest = aRequest;
+ }
+
+ void Complete()
+ {
+ MOZ_DIAGNOSTIC_ASSERT(Exists());
+ mRequest = nullptr;
+ }
+
+ // Disconnects and forgets an outstanding promise. The resolve/reject methods
+ // will never be called.
+ void Disconnect() {
+ MOZ_ASSERT(Exists());
+ mRequest->Disconnect();
+ mRequest = nullptr;
+ }
+
+ void DisconnectIfExists() {
+ if (Exists()) {
+ Disconnect();
+ }
+ }
+
+ bool Exists() const { return !!mRequest; }
+
+private:
+ RefPtr<typename PromiseType::Request> mRequest;
+};
+
+// Asynchronous Potentially-Cross-Thread Method Calls.
+//
+// This machinery allows callers to schedule a promise-returning method to be
+// invoked asynchronously on a given thread, while at the same time receiving
+// a promise upon which to invoke Then() immediately. InvokeAsync dispatches
+// a task to invoke the method on the proper thread and also chain the resulting
+// promise to the one that the caller received, so that resolve/reject values
+// are forwarded through.
+
+namespace detail {
+
+template<typename ReturnType, typename ThisType, typename... ArgTypes, size_t... Indices>
+ReturnType
+MethodCallInvokeHelper(ReturnType(ThisType::*aMethod)(ArgTypes...), ThisType* aThisVal,
+ Tuple<ArgTypes...>& aArgs, IndexSequence<Indices...>)
+{
+ return ((*aThisVal).*aMethod)(Get<Indices>(aArgs)...);
+}
+
+// Non-templated base class to allow us to use MOZ_COUNT_{C,D}TOR, which cause
+// assertions when used on templated types.
+class MethodCallBase
+{
+public:
+ MethodCallBase() { MOZ_COUNT_CTOR(MethodCallBase); }
+ virtual ~MethodCallBase() { MOZ_COUNT_DTOR(MethodCallBase); }
+};
+
+template<typename PromiseType, typename ThisType, typename... ArgTypes>
+class MethodCall : public MethodCallBase
+{
+public:
+ typedef RefPtr<PromiseType>(ThisType::*MethodType)(ArgTypes...);
+ MethodCall(MethodType aMethod, ThisType* aThisVal, ArgTypes... aArgs)
+ : mMethod(aMethod)
+ , mThisVal(aThisVal)
+ , mArgs(Forward<ArgTypes>(aArgs)...)
+ {}
+
+ RefPtr<PromiseType> Invoke()
+ {
+ return MethodCallInvokeHelper(mMethod, mThisVal.get(), mArgs, typename IndexSequenceFor<ArgTypes...>::Type());
+ }
+
+private:
+ MethodType mMethod;
+ RefPtr<ThisType> mThisVal;
+ Tuple<ArgTypes...> mArgs;
+};
+
+template<typename PromiseType, typename ThisType, typename ...ArgTypes>
+class ProxyRunnable : public Runnable
+{
+public:
+ ProxyRunnable(typename PromiseType::Private* aProxyPromise, MethodCall<PromiseType, ThisType, ArgTypes...>* aMethodCall)
+ : mProxyPromise(aProxyPromise), mMethodCall(aMethodCall) {}
+
+ NS_IMETHOD Run() override
+ {
+ RefPtr<PromiseType> p = mMethodCall->Invoke();
+ mMethodCall = nullptr;
+ p->ChainTo(mProxyPromise.forget(), "<Proxy Promise>");
+ return NS_OK;
+ }
+
+private:
+ RefPtr<typename PromiseType::Private> mProxyPromise;
+ nsAutoPtr<MethodCall<PromiseType, ThisType, ArgTypes...>> mMethodCall;
+};
+
+constexpr bool Any()
+{
+ return false;
+}
+
+template <typename T1>
+constexpr bool Any(T1 a)
+{
+ return static_cast<bool>(a);
+}
+
+template <typename T1, typename... Ts>
+constexpr bool Any(T1 a, Ts... aOthers)
+{
+ return a || Any(aOthers...);
+}
+
+} // namespace detail
+
+template<typename PromiseType, typename ThisType, typename ...ArgTypes, typename ...ActualArgTypes>
+static RefPtr<PromiseType>
+InvokeAsync(AbstractThread* aTarget, ThisType* aThisVal, const char* aCallerName,
+ RefPtr<PromiseType>(ThisType::*aMethod)(ArgTypes...), ActualArgTypes&&... aArgs)
+{
+ static_assert(!detail::Any(IsReference<ArgTypes>::value...),
+ "Cannot pass reference types through InvokeAsync, see bug 1313497 if you require it");
+ typedef detail::MethodCall<PromiseType, ThisType, ArgTypes...> MethodCallType;
+ typedef detail::ProxyRunnable<PromiseType, ThisType, ArgTypes...> ProxyRunnableType;
+
+ MethodCallType* methodCall = new MethodCallType(aMethod, aThisVal, Forward<ActualArgTypes>(aArgs)...);
+ RefPtr<typename PromiseType::Private> p = new (typename PromiseType::Private)(aCallerName);
+ RefPtr<ProxyRunnableType> r = new ProxyRunnableType(p, methodCall);
+ MOZ_ASSERT(aTarget->IsDispatchReliable());
+ aTarget->Dispatch(r.forget());
+ return p.forget();
+}
+
+#undef PROMISE_LOG
+#undef PROMISE_ASSERT
+#undef PROMISE_DEBUG
+
+} // namespace mozilla
+
+#endif
diff --git a/xpcom/threads/SharedThreadPool.cpp b/xpcom/threads/SharedThreadPool.cpp
new file mode 100644
index 000000000..9adf6449e
--- /dev/null
+++ b/xpcom/threads/SharedThreadPool.cpp
@@ -0,0 +1,224 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/SharedThreadPool.h"
+#include "mozilla/Monitor.h"
+#include "mozilla/ReentrantMonitor.h"
+#include "mozilla/Services.h"
+#include "mozilla/StaticPtr.h"
+#include "nsDataHashtable.h"
+#include "nsXPCOMCIDInternal.h"
+#include "nsComponentManagerUtils.h"
+#include "nsIObserver.h"
+#include "nsIObserverService.h"
+#ifdef XP_WIN
+#include "ThreadPoolCOMListener.h"
+#endif
+
+namespace mozilla {
+
+// Created and destroyed on the main thread.
+static StaticAutoPtr<ReentrantMonitor> sMonitor;
+
+// Hashtable, maps thread pool name to SharedThreadPool instance.
+// Modified only on the main thread.
+static StaticAutoPtr<nsDataHashtable<nsCStringHashKey, SharedThreadPool*>> sPools;
+
+static already_AddRefed<nsIThreadPool>
+CreateThreadPool(const nsCString& aName);
+
+class SharedThreadPoolShutdownObserver : public nsIObserver
+{
+public:
+ NS_DECL_ISUPPORTS
+ NS_DECL_NSIOBSERVER
+protected:
+ virtual ~SharedThreadPoolShutdownObserver() {}
+};
+
+NS_IMPL_ISUPPORTS(SharedThreadPoolShutdownObserver, nsIObserver, nsISupports)
+
+NS_IMETHODIMP
+SharedThreadPoolShutdownObserver::Observe(nsISupports* aSubject, const char *aTopic,
+ const char16_t *aData)
+{
+ MOZ_RELEASE_ASSERT(!strcmp(aTopic, "xpcom-shutdown-threads"));
+ SharedThreadPool::SpinUntilEmpty();
+ sMonitor = nullptr;
+ sPools = nullptr;
+ return NS_OK;
+}
+
+void
+SharedThreadPool::InitStatics()
+{
+ MOZ_ASSERT(NS_IsMainThread());
+ MOZ_ASSERT(!sMonitor && !sPools);
+ sMonitor = new ReentrantMonitor("SharedThreadPool");
+ sPools = new nsDataHashtable<nsCStringHashKey, SharedThreadPool*>();
+ nsCOMPtr<nsIObserverService> obsService = mozilla::services::GetObserverService();
+ nsCOMPtr<nsIObserver> obs = new SharedThreadPoolShutdownObserver();
+ obsService->AddObserver(obs, "xpcom-shutdown-threads", false);
+}
+
+/* static */
+bool
+SharedThreadPool::IsEmpty()
+{
+ ReentrantMonitorAutoEnter mon(*sMonitor);
+ return !sPools->Count();
+}
+
+/* static */
+void
+SharedThreadPool::SpinUntilEmpty()
+{
+ MOZ_ASSERT(NS_IsMainThread());
+ while (!IsEmpty()) {
+ sMonitor->AssertNotCurrentThreadIn();
+ NS_ProcessNextEvent(NS_GetCurrentThread(), true);
+ }
+}
+
+already_AddRefed<SharedThreadPool>
+SharedThreadPool::Get(const nsCString& aName, uint32_t aThreadLimit)
+{
+ MOZ_ASSERT(sMonitor && sPools);
+ ReentrantMonitorAutoEnter mon(*sMonitor);
+ SharedThreadPool* pool = nullptr;
+ nsresult rv;
+ if (!sPools->Get(aName, &pool)) {
+ nsCOMPtr<nsIThreadPool> threadPool(CreateThreadPool(aName));
+ NS_ENSURE_TRUE(threadPool, nullptr);
+ pool = new SharedThreadPool(aName, threadPool);
+
+ // Set the thread and idle limits. Note that we don't rely on the
+ // EnsureThreadLimitIsAtLeast() call below, as the default thread limit
+ // is 4, and if aThreadLimit is less than 4 we'll end up with a pool
+ // with 4 threads rather than what we expected; so we'll have unexpected
+ // behaviour.
+ rv = pool->SetThreadLimit(aThreadLimit);
+ NS_ENSURE_SUCCESS(rv, nullptr);
+
+ rv = pool->SetIdleThreadLimit(aThreadLimit);
+ NS_ENSURE_SUCCESS(rv, nullptr);
+
+ sPools->Put(aName, pool);
+ } else if (NS_FAILED(pool->EnsureThreadLimitIsAtLeast(aThreadLimit))) {
+ NS_WARNING("Failed to set limits on thread pool");
+ }
+
+ MOZ_ASSERT(pool);
+ RefPtr<SharedThreadPool> instance(pool);
+ return instance.forget();
+}
+
+NS_IMETHODIMP_(MozExternalRefCountType) SharedThreadPool::AddRef(void)
+{
+ MOZ_ASSERT(sMonitor);
+ ReentrantMonitorAutoEnter mon(*sMonitor);
+ MOZ_ASSERT(int32_t(mRefCnt) >= 0, "illegal refcnt");
+ nsrefcnt count = ++mRefCnt;
+ NS_LOG_ADDREF(this, count, "SharedThreadPool", sizeof(*this));
+ return count;
+}
+
+NS_IMETHODIMP_(MozExternalRefCountType) SharedThreadPool::Release(void)
+{
+ MOZ_ASSERT(sMonitor);
+ ReentrantMonitorAutoEnter mon(*sMonitor);
+ nsrefcnt count = --mRefCnt;
+ NS_LOG_RELEASE(this, count, "SharedThreadPool");
+ if (count) {
+ return count;
+ }
+
+ // Remove SharedThreadPool from table of pools.
+ sPools->Remove(mName);
+ MOZ_ASSERT(!sPools->Get(mName));
+
+ // Dispatch an event to the main thread to call Shutdown() on
+ // the nsIThreadPool. The Runnable here will add a refcount to the pool,
+ // and when the Runnable releases the nsIThreadPool it will be deleted.
+ NS_DispatchToMainThread(NewRunnableMethod(mPool, &nsIThreadPool::Shutdown));
+
+ // Stabilize refcount, so that if something in the dtor QIs, it won't explode.
+ mRefCnt = 1;
+ delete this;
+ return 0;
+}
+
+NS_IMPL_QUERY_INTERFACE(SharedThreadPool, nsIThreadPool, nsIEventTarget)
+
+SharedThreadPool::SharedThreadPool(const nsCString& aName,
+ nsIThreadPool* aPool)
+ : mName(aName)
+ , mPool(aPool)
+ , mRefCnt(0)
+{
+ MOZ_COUNT_CTOR(SharedThreadPool);
+ mEventTarget = do_QueryInterface(aPool);
+}
+
+SharedThreadPool::~SharedThreadPool()
+{
+ MOZ_COUNT_DTOR(SharedThreadPool);
+}
+
+nsresult
+SharedThreadPool::EnsureThreadLimitIsAtLeast(uint32_t aLimit)
+{
+ // We limit the number of threads that we use. Note that we
+ // set the thread limit to the same as the idle limit so that we're not
+ // constantly creating and destroying threads (see Bug 881954). When the
+ // thread pool threads shutdown they dispatch an event to the main thread
+ // to call nsIThread::Shutdown(), and if we're very busy that can take a
+ // while to run, and we end up with dozens of extra threads. Note that
+ // threads that are idle for 60 seconds are shutdown naturally.
+ uint32_t existingLimit = 0;
+ nsresult rv;
+
+ rv = mPool->GetThreadLimit(&existingLimit);
+ NS_ENSURE_SUCCESS(rv, rv);
+ if (aLimit > existingLimit) {
+ rv = mPool->SetThreadLimit(aLimit);
+ NS_ENSURE_SUCCESS(rv, rv);
+ }
+
+ rv = mPool->GetIdleThreadLimit(&existingLimit);
+ NS_ENSURE_SUCCESS(rv, rv);
+ if (aLimit > existingLimit) {
+ rv = mPool->SetIdleThreadLimit(aLimit);
+ NS_ENSURE_SUCCESS(rv, rv);
+ }
+
+ return NS_OK;
+}
+
+static already_AddRefed<nsIThreadPool>
+CreateThreadPool(const nsCString& aName)
+{
+ nsresult rv;
+ nsCOMPtr<nsIThreadPool> pool = do_CreateInstance(NS_THREADPOOL_CONTRACTID, &rv);
+ NS_ENSURE_SUCCESS(rv, nullptr);
+
+ rv = pool->SetName(aName);
+ NS_ENSURE_SUCCESS(rv, nullptr);
+
+ rv = pool->SetThreadStackSize(SharedThreadPool::kStackSize);
+ NS_ENSURE_SUCCESS(rv, nullptr);
+
+#ifdef XP_WIN
+ // Ensure MSCOM is initialized on the thread pools threads.
+ nsCOMPtr<nsIThreadPoolListener> listener = new MSCOMInitThreadPoolListener();
+ rv = pool->SetListener(listener);
+ NS_ENSURE_SUCCESS(rv, nullptr);
+#endif
+
+ return pool.forget();
+}
+
+} // namespace mozilla
diff --git a/xpcom/threads/SharedThreadPool.h b/xpcom/threads/SharedThreadPool.h
new file mode 100644
index 000000000..185b9e76f
--- /dev/null
+++ b/xpcom/threads/SharedThreadPool.h
@@ -0,0 +1,129 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef SharedThreadPool_h_
+#define SharedThreadPool_h_
+
+#include <queue>
+#include "mozilla/RefPtr.h"
+#include "nsThreadUtils.h"
+#include "nsIThreadManager.h"
+#include "nsIThreadPool.h"
+#include "nsISupports.h"
+#include "nsISupportsImpl.h"
+#include "nsCOMPtr.h"
+
+namespace mozilla {
+
+// Wrapper that makes an nsIThreadPool a singleton, and provides a
+// consistent threadsafe interface to get instances. Callers simply get a
+// SharedThreadPool by the name of its nsIThreadPool. All get requests of
+// the same name get the same SharedThreadPool. Users must store a reference
+// to the pool, and when the last reference to a SharedThreadPool is dropped
+// the pool is shutdown and deleted. Users aren't required to manually
+// shutdown the pool, and can release references on any thread. This can make
+// it significantly easier to use thread pools, because the caller doesn't need
+// to worry about joining and tearing it down.
+//
+// On Windows all threads in the pool have MSCOM initialized with
+// COINIT_MULTITHREADED. Note that not all users of MSCOM use this mode see [1],
+// and mixing MSCOM objects between the two is terrible for performance, and can
+// cause some functions to fail. So be careful when using Win32 APIs on a
+// SharedThreadPool, and avoid sharing objects if at all possible.
+//
+// [1] https://dxr.mozilla.org/mozilla-central/search?q=coinitialize&redirect=false
+class SharedThreadPool : public nsIThreadPool
+{
+public:
+
+ // Gets (possibly creating) the shared thread pool singleton instance with
+ // thread pool named aName.
+ static already_AddRefed<SharedThreadPool> Get(const nsCString& aName,
+ uint32_t aThreadLimit = 4);
+
+ // We implement custom threadsafe AddRef/Release pair, that destroys the
+ // the shared pool singleton when the refcount drops to 0. The addref/release
+ // are implemented using locking, so it's not recommended that you use them
+ // in a tight loop.
+ NS_IMETHOD QueryInterface(REFNSIID aIID, void** aInstancePtr) override;
+ NS_IMETHOD_(MozExternalRefCountType) AddRef(void) override;
+ NS_IMETHOD_(MozExternalRefCountType) Release(void) override;
+
+ // Forward behaviour to wrapped thread pool implementation.
+ NS_FORWARD_SAFE_NSITHREADPOOL(mPool);
+
+ // Call this when dispatching from an event on the same
+ // threadpool that is about to complete. We should not create a new thread
+ // in that case since a thread is about to become idle.
+ nsresult DispatchFromEndOfTaskInThisPool(nsIRunnable *event)
+ {
+ return Dispatch(event, NS_DISPATCH_AT_END);
+ }
+
+ NS_IMETHOD DispatchFromScript(nsIRunnable *event, uint32_t flags) override {
+ return Dispatch(event, flags);
+ }
+
+ NS_IMETHOD Dispatch(already_AddRefed<nsIRunnable> event, uint32_t flags) override
+ { return !mEventTarget ? NS_ERROR_NULL_POINTER : mEventTarget->Dispatch(Move(event), flags); }
+
+ NS_IMETHOD DelayedDispatch(already_AddRefed<nsIRunnable>, uint32_t) override
+ { return NS_ERROR_NOT_IMPLEMENTED; }
+
+ using nsIEventTarget::Dispatch;
+
+ NS_IMETHOD IsOnCurrentThread(bool *_retval) override { return !mEventTarget ? NS_ERROR_NULL_POINTER : mEventTarget->IsOnCurrentThread(_retval); }
+
+ // Creates necessary statics. Called once at startup.
+ static void InitStatics();
+
+ // Spins the event loop until all thread pools are shutdown.
+ // *Must* be called on the main thread.
+ static void SpinUntilEmpty();
+
+#if defined(MOZ_ASAN)
+ // Use the system default in ASAN builds, because the default is assumed to be
+ // larger than the size we want to use and is hopefully sufficient for ASAN.
+ static const uint32_t kStackSize = nsIThreadManager::DEFAULT_STACK_SIZE;
+#elif defined(XP_WIN) || defined(XP_MACOSX) || defined(LINUX)
+ static const uint32_t kStackSize = (256 * 1024);
+#else
+ // All other platforms use their system defaults.
+ static const uint32_t kStackSize = nsIThreadManager::DEFAULT_STACK_SIZE;
+#endif
+
+private:
+
+ // Returns whether there are no pools in existence at the moment.
+ static bool IsEmpty();
+
+ // Creates a singleton SharedThreadPool wrapper around aPool.
+ // aName is the name of the aPool, and is used to lookup the
+ // SharedThreadPool in the hash table of all created pools.
+ SharedThreadPool(const nsCString& aName,
+ nsIThreadPool* aPool);
+ virtual ~SharedThreadPool();
+
+ nsresult EnsureThreadLimitIsAtLeast(uint32_t aThreadLimit);
+
+ // Name of mPool.
+ const nsCString mName;
+
+ // Thread pool being wrapped.
+ nsCOMPtr<nsIThreadPool> mPool;
+
+ // Refcount. We implement custom ref counting so that the thread pool is
+ // shutdown in a threadsafe manner and singletonness is preserved.
+ nsrefcnt mRefCnt;
+
+ // mPool QI'd to nsIEventTarget. We cache this, so that we can use
+ // NS_FORWARD_SAFE_NSIEVENTTARGET above.
+ nsCOMPtr<nsIEventTarget> mEventTarget;
+};
+
+} // namespace mozilla
+
+#endif // SharedThreadPool_h_
diff --git a/xpcom/threads/StateMirroring.h b/xpcom/threads/StateMirroring.h
new file mode 100644
index 000000000..87d94ba74
--- /dev/null
+++ b/xpcom/threads/StateMirroring.h
@@ -0,0 +1,378 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#if !defined(StateMirroring_h_)
+#define StateMirroring_h_
+
+#include "mozilla/Maybe.h"
+#include "mozilla/MozPromise.h"
+#include "mozilla/StateWatching.h"
+#include "mozilla/TaskDispatcher.h"
+#include "mozilla/UniquePtr.h"
+#include "mozilla/Unused.h"
+
+#include "mozilla/Logging.h"
+#include "nsISupportsImpl.h"
+
+/*
+ * The state-mirroring machinery allows pieces of interesting state to be
+ * observed on multiple thread without locking. The basic strategy is to track
+ * changes in a canonical value and post updates to other threads that hold
+ * mirrors for that value.
+ *
+ * One problem with the naive implementation of such a system is that some pieces
+ * of state need to be updated atomically, and certain other operations need to
+ * wait for these atomic updates to complete before executing. The state-mirroring
+ * machinery solves this problem by requiring that its owner thread uses tail
+ * dispatch, and posting state update events (which should always be run first by
+ * TaskDispatcher implementations) to that tail dispatcher. This ensures that
+ * state changes are always atomic from the perspective of observing threads.
+ *
+ * Given that state-mirroring is an automatic background process, we try to avoid
+ * burdening the caller with worrying too much about teardown. To that end, we
+ * don't assert dispatch success for any of the notifications, and assume that
+ * any canonical or mirror owned by a thread for whom dispatch fails will soon
+ * be disconnected by its holder anyway.
+ *
+ * Given that semantics may change and comments tend to go out of date, we
+ * deliberately don't provide usage examples here. Grep around to find them.
+ */
+
+namespace mozilla {
+
+// Mirror<T> and Canonical<T> inherit WatchTarget, so we piggy-back on the
+// logging that WatchTarget already does. Given that, it makes sense to share
+// the same log module.
+#define MIRROR_LOG(x, ...) \
+ MOZ_ASSERT(gStateWatchingLog); \
+ MOZ_LOG(gStateWatchingLog, LogLevel::Debug, (x, ##__VA_ARGS__))
+
+template<typename T> class AbstractMirror;
+
+/*
+ * AbstractCanonical is a superclass from which all Canonical values must
+ * inherit. It serves as the interface of operations which may be performed (via
+ * asynchronous dispatch) by other threads, in particular by the corresponding
+ * Mirror value.
+ */
+template<typename T>
+class AbstractCanonical
+{
+public:
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(AbstractCanonical)
+ AbstractCanonical(AbstractThread* aThread) : mOwnerThread(aThread) {}
+ virtual void AddMirror(AbstractMirror<T>* aMirror) = 0;
+ virtual void RemoveMirror(AbstractMirror<T>* aMirror) = 0;
+
+ AbstractThread* OwnerThread() const { return mOwnerThread; }
+protected:
+ virtual ~AbstractCanonical() {}
+ RefPtr<AbstractThread> mOwnerThread;
+};
+
+/*
+ * AbstractMirror is a superclass from which all Mirror values must
+ * inherit. It serves as the interface of operations which may be performed (via
+ * asynchronous dispatch) by other threads, in particular by the corresponding
+ * Canonical value.
+ */
+template<typename T>
+class AbstractMirror
+{
+public:
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(AbstractMirror)
+ AbstractMirror(AbstractThread* aThread) : mOwnerThread(aThread) {}
+ virtual void UpdateValue(const T& aNewValue) = 0;
+ virtual void NotifyDisconnected() = 0;
+
+ AbstractThread* OwnerThread() const { return mOwnerThread; }
+protected:
+ virtual ~AbstractMirror() {}
+ RefPtr<AbstractThread> mOwnerThread;
+};
+
+/*
+ * Canonical<T> is a wrapper class that allows a given value to be mirrored by other
+ * threads. It maintains a list of active mirrors, and queues updates for them
+ * when the internal value changes. When changing the value, the caller needs to
+ * pass a TaskDispatcher object, which fires the updates at the appropriate time.
+ * Canonical<T> is also a WatchTarget, and may be set up to trigger other routines
+ * (on the same thread) when the canonical value changes.
+ *
+ * Canonical<T> is intended to be used as a member variable, so it doesn't actually
+ * inherit AbstractCanonical<T> (a refcounted type). Rather, it contains an inner
+ * class called |Impl| that implements most of the interesting logic.
+ */
+template<typename T>
+class Canonical
+{
+public:
+ Canonical(AbstractThread* aThread, const T& aInitialValue, const char* aName)
+ {
+ mImpl = new Impl(aThread, aInitialValue, aName);
+ }
+
+
+ ~Canonical() {}
+
+private:
+ class Impl : public AbstractCanonical<T>, public WatchTarget
+ {
+ public:
+ using AbstractCanonical<T>::OwnerThread;
+
+ Impl(AbstractThread* aThread, const T& aInitialValue, const char* aName)
+ : AbstractCanonical<T>(aThread), WatchTarget(aName), mValue(aInitialValue)
+ {
+ MIRROR_LOG("%s [%p] initialized", mName, this);
+ MOZ_ASSERT(aThread->SupportsTailDispatch(), "Can't get coherency without tail dispatch");
+ }
+
+ void AddMirror(AbstractMirror<T>* aMirror) override
+ {
+ MIRROR_LOG("%s [%p] adding mirror %p", mName, this, aMirror);
+ MOZ_ASSERT(OwnerThread()->IsCurrentThreadIn());
+ MOZ_ASSERT(!mMirrors.Contains(aMirror));
+ mMirrors.AppendElement(aMirror);
+ aMirror->OwnerThread()->DispatchStateChange(MakeNotifier(aMirror));
+ }
+
+ void RemoveMirror(AbstractMirror<T>* aMirror) override
+ {
+ MIRROR_LOG("%s [%p] removing mirror %p", mName, this, aMirror);
+ MOZ_ASSERT(OwnerThread()->IsCurrentThreadIn());
+ MOZ_ASSERT(mMirrors.Contains(aMirror));
+ mMirrors.RemoveElement(aMirror);
+ }
+
+ void DisconnectAll()
+ {
+ MIRROR_LOG("%s [%p] Disconnecting all mirrors", mName, this);
+ for (size_t i = 0; i < mMirrors.Length(); ++i) {
+ mMirrors[i]->OwnerThread()->Dispatch(NewRunnableMethod(mMirrors[i],
+ &AbstractMirror<T>::NotifyDisconnected),
+ AbstractThread::DontAssertDispatchSuccess);
+ }
+ mMirrors.Clear();
+ }
+
+ operator const T&()
+ {
+ MOZ_ASSERT(OwnerThread()->IsCurrentThreadIn());
+ return mValue;
+ }
+
+ void Set(const T& aNewValue)
+ {
+ MOZ_ASSERT(OwnerThread()->IsCurrentThreadIn());
+
+ if (aNewValue == mValue) {
+ return;
+ }
+
+ // Notify same-thread watchers. The state watching machinery will make sure
+ // that notifications run at the right time.
+ NotifyWatchers();
+
+ // Check if we've already got a pending update. If so we won't schedule another
+ // one.
+ bool alreadyNotifying = mInitialValue.isSome();
+
+ // Stash the initial value if needed, then update to the new value.
+ if (mInitialValue.isNothing()) {
+ mInitialValue.emplace(mValue);
+ }
+ mValue = aNewValue;
+
+ // We wait until things have stablized before sending state updates so that
+ // we can avoid sending multiple updates, and possibly avoid sending any
+ // updates at all if the value ends up where it started.
+ if (!alreadyNotifying) {
+ AbstractThread::DispatchDirectTask(NewRunnableMethod(this, &Impl::DoNotify));
+ }
+ }
+
+ Impl& operator=(const T& aNewValue) { Set(aNewValue); return *this; }
+ Impl& operator=(const Impl& aOther) { Set(aOther); return *this; }
+ Impl(const Impl& aOther) = delete;
+
+ protected:
+ ~Impl() { MOZ_DIAGNOSTIC_ASSERT(mMirrors.IsEmpty()); }
+
+ private:
+ void DoNotify()
+ {
+ MOZ_ASSERT(OwnerThread()->IsCurrentThreadIn());
+ MOZ_ASSERT(mInitialValue.isSome());
+ bool same = mInitialValue.ref() == mValue;
+ mInitialValue.reset();
+
+ if (same) {
+ MIRROR_LOG("%s [%p] unchanged - not sending update", mName, this);
+ return;
+ }
+
+ for (size_t i = 0; i < mMirrors.Length(); ++i) {
+ mMirrors[i]->OwnerThread()->DispatchStateChange(MakeNotifier(mMirrors[i]));
+ }
+ }
+
+ already_AddRefed<nsIRunnable> MakeNotifier(AbstractMirror<T>* aMirror)
+ {
+ return NewRunnableMethod<T>(aMirror, &AbstractMirror<T>::UpdateValue, mValue);;
+ }
+
+ T mValue;
+ Maybe<T> mInitialValue;
+ nsTArray<RefPtr<AbstractMirror<T>>> mMirrors;
+ };
+public:
+
+ // NB: Because mirror-initiated disconnection can race with canonical-
+ // initiated disconnection, a canonical should never be reinitialized.
+ // Forward control operations to the Impl.
+ void DisconnectAll() { return mImpl->DisconnectAll(); }
+
+ // Access to the Impl.
+ operator Impl&() { return *mImpl; }
+ Impl* operator&() { return mImpl; }
+
+ // Access to the T.
+ const T& Ref() const { return *mImpl; }
+ operator const T&() const { return Ref(); }
+ void Set(const T& aNewValue) { mImpl->Set(aNewValue); }
+ Canonical& operator=(const T& aNewValue) { Set(aNewValue); return *this; }
+ Canonical& operator=(const Canonical& aOther) { Set(aOther); return *this; }
+ Canonical(const Canonical& aOther) = delete;
+
+private:
+ RefPtr<Impl> mImpl;
+};
+
+/*
+ * Mirror<T> is a wrapper class that allows a given value to mirror that of a
+ * Canonical<T> owned by another thread. It registers itself with a Canonical<T>,
+ * and is periodically updated with new values. Mirror<T> is also a WatchTarget,
+ * and may be set up to trigger other routines (on the same thread) when the
+ * mirrored value changes.
+ *
+ * Mirror<T> is intended to be used as a member variable, so it doesn't actually
+ * inherit AbstractMirror<T> (a refcounted type). Rather, it contains an inner
+ * class called |Impl| that implements most of the interesting logic.
+ */
+template<typename T>
+class Mirror
+{
+public:
+ Mirror(AbstractThread* aThread, const T& aInitialValue, const char* aName)
+ {
+ mImpl = new Impl(aThread, aInitialValue, aName);
+ }
+
+ ~Mirror()
+ {
+ // As a member of complex objects, a Mirror<T> may be destroyed on a
+ // different thread than its owner, or late in shutdown during CC. Given
+ // that, we require manual disconnection so that callers can put things in
+ // the right place.
+ MOZ_DIAGNOSTIC_ASSERT(!mImpl->IsConnected());
+ }
+
+private:
+ class Impl : public AbstractMirror<T>, public WatchTarget
+ {
+ public:
+ using AbstractMirror<T>::OwnerThread;
+
+ Impl(AbstractThread* aThread, const T& aInitialValue, const char* aName)
+ : AbstractMirror<T>(aThread), WatchTarget(aName), mValue(aInitialValue)
+ {
+ MIRROR_LOG("%s [%p] initialized", mName, this);
+ MOZ_ASSERT(aThread->SupportsTailDispatch(), "Can't get coherency without tail dispatch");
+ }
+
+ operator const T&()
+ {
+ MOZ_ASSERT(OwnerThread()->IsCurrentThreadIn());
+ return mValue;
+ }
+
+ virtual void UpdateValue(const T& aNewValue) override
+ {
+ MOZ_ASSERT(OwnerThread()->IsCurrentThreadIn());
+ if (mValue != aNewValue) {
+ mValue = aNewValue;
+ WatchTarget::NotifyWatchers();
+ }
+ }
+
+ virtual void NotifyDisconnected() override
+ {
+ MIRROR_LOG("%s [%p] Notifed of disconnection from %p", mName, this, mCanonical.get());
+ MOZ_ASSERT(OwnerThread()->IsCurrentThreadIn());
+ mCanonical = nullptr;
+ }
+
+ bool IsConnected() const { return !!mCanonical; }
+
+ void Connect(AbstractCanonical<T>* aCanonical)
+ {
+ MIRROR_LOG("%s [%p] Connecting to %p", mName, this, aCanonical);
+ MOZ_ASSERT(OwnerThread()->IsCurrentThreadIn());
+ MOZ_ASSERT(!IsConnected());
+ MOZ_ASSERT(OwnerThread()->RequiresTailDispatch(aCanonical->OwnerThread()), "Can't get coherency without tail dispatch");
+
+ nsCOMPtr<nsIRunnable> r = NewRunnableMethod<StorensRefPtrPassByPtr<AbstractMirror<T>>>
+ (aCanonical, &AbstractCanonical<T>::AddMirror, this);
+ aCanonical->OwnerThread()->Dispatch(r.forget(), AbstractThread::DontAssertDispatchSuccess);
+ mCanonical = aCanonical;
+ }
+ public:
+
+ void DisconnectIfConnected()
+ {
+ MOZ_ASSERT(OwnerThread()->IsCurrentThreadIn());
+ if (!IsConnected()) {
+ return;
+ }
+
+ MIRROR_LOG("%s [%p] Disconnecting from %p", mName, this, mCanonical.get());
+ nsCOMPtr<nsIRunnable> r = NewRunnableMethod<StorensRefPtrPassByPtr<AbstractMirror<T>>>
+ (mCanonical, &AbstractCanonical<T>::RemoveMirror, this);
+ mCanonical->OwnerThread()->Dispatch(r.forget(), AbstractThread::DontAssertDispatchSuccess);
+ mCanonical = nullptr;
+ }
+
+ protected:
+ ~Impl() { MOZ_DIAGNOSTIC_ASSERT(!IsConnected()); }
+
+ private:
+ T mValue;
+ RefPtr<AbstractCanonical<T>> mCanonical;
+ };
+public:
+
+ // Forward control operations to the Impl<T>.
+ void Connect(AbstractCanonical<T>* aCanonical) { mImpl->Connect(aCanonical); }
+ void DisconnectIfConnected() { mImpl->DisconnectIfConnected(); }
+
+ // Access to the Impl<T>.
+ operator Impl&() { return *mImpl; }
+ Impl* operator&() { return mImpl; }
+
+ // Access to the T.
+ const T& Ref() const { return *mImpl; }
+ operator const T&() const { return Ref(); }
+
+private:
+ RefPtr<Impl> mImpl;
+};
+
+#undef MIRROR_LOG
+
+} // namespace mozilla
+
+#endif
diff --git a/xpcom/threads/StateWatching.h b/xpcom/threads/StateWatching.h
new file mode 100644
index 000000000..99d521603
--- /dev/null
+++ b/xpcom/threads/StateWatching.h
@@ -0,0 +1,317 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#if !defined(StateWatching_h_)
+#define StateWatching_h_
+
+#include "mozilla/AbstractThread.h"
+#include "mozilla/Logging.h"
+#include "mozilla/TaskDispatcher.h"
+#include "mozilla/UniquePtr.h"
+#include "mozilla/Unused.h"
+
+#include "nsISupportsImpl.h"
+
+/*
+ * The state-watching machinery automates the process of responding to changes
+ * in various pieces of state.
+ *
+ * A standard programming pattern is as follows:
+ *
+ * mFoo = ...;
+ * NotifyStuffChanged();
+ * ...
+ * mBar = ...;
+ * NotifyStuffChanged();
+ *
+ * This pattern is error-prone and difficult to audit because it requires the
+ * programmer to manually trigger the update routine. This can be especially
+ * problematic when the update routine depends on numerous pieces of state, and
+ * when that state is modified across a variety of helper methods. In these
+ * cases the responsibility for invoking the routine is often unclear, causing
+ * developers to scatter calls to it like pixie dust. This can result in
+ * duplicate invocations (which is wasteful) and missing invocations in corner-
+ * cases (which is a source of bugs).
+ *
+ * This file provides a set of primitives that automatically handle updates and
+ * allow the programmers to explicitly construct a graph of state dependencies.
+ * When used correctly, it eliminates the guess-work and wasted cycles described
+ * above.
+ *
+ * There are two basic pieces:
+ * (1) Objects that can be watched for updates. These inherit WatchTarget.
+ * (2) Objects that receive objects and trigger processing. These inherit
+ * AbstractWatcher. In the current machinery, these exist only internally
+ * within the WatchManager, though that could change.
+ *
+ * Note that none of this machinery is thread-safe - it must all happen on the
+ * same owning thread. To solve multi-threaded use-cases, use state mirroring
+ * and watch the mirrored value.
+ *
+ * Given that semantics may change and comments tend to go out of date, we
+ * deliberately don't provide usage examples here. Grep around to find them.
+ */
+
+namespace mozilla {
+
+extern LazyLogModule gStateWatchingLog;
+
+#define WATCH_LOG(x, ...) \
+ MOZ_LOG(gStateWatchingLog, LogLevel::Debug, (x, ##__VA_ARGS__))
+
+/*
+ * AbstractWatcher is a superclass from which all watchers must inherit.
+ */
+class AbstractWatcher
+{
+public:
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(AbstractWatcher)
+ AbstractWatcher() : mDestroyed(false) {}
+ bool IsDestroyed() { return mDestroyed; }
+ virtual void Notify() = 0;
+
+protected:
+ virtual ~AbstractWatcher() { MOZ_ASSERT(mDestroyed); }
+ bool mDestroyed;
+};
+
+/*
+ * WatchTarget is a superclass from which all watchable things must inherit.
+ * Unlike AbstractWatcher, it is a fully-implemented Mix-in, and the subclass
+ * needs only to invoke NotifyWatchers when something changes.
+ *
+ * The functionality that this class provides is not threadsafe, and should only
+ * be used on the thread that owns that WatchTarget.
+ */
+class WatchTarget
+{
+public:
+ explicit WatchTarget(const char* aName) : mName(aName) {}
+
+ void AddWatcher(AbstractWatcher* aWatcher)
+ {
+ MOZ_ASSERT(!mWatchers.Contains(aWatcher));
+ mWatchers.AppendElement(aWatcher);
+ }
+
+ void RemoveWatcher(AbstractWatcher* aWatcher)
+ {
+ MOZ_ASSERT(mWatchers.Contains(aWatcher));
+ mWatchers.RemoveElement(aWatcher);
+ }
+
+protected:
+ void NotifyWatchers()
+ {
+ WATCH_LOG("%s[%p] notifying watchers\n", mName, this);
+ PruneWatchers();
+ for (size_t i = 0; i < mWatchers.Length(); ++i) {
+ mWatchers[i]->Notify();
+ }
+ }
+
+private:
+ // We don't have Watchers explicitly unregister themselves when they die,
+ // because then they'd need back-references to all the WatchTargets they're
+ // subscribed to, and WatchTargets aren't reference-counted. So instead we
+ // just prune dead ones at appropriate times, which works just fine.
+ void PruneWatchers()
+ {
+ for (int i = mWatchers.Length() - 1; i >= 0; --i) {
+ if (mWatchers[i]->IsDestroyed()) {
+ mWatchers.RemoveElementAt(i);
+ }
+ }
+ }
+
+ nsTArray<RefPtr<AbstractWatcher>> mWatchers;
+
+protected:
+ const char* mName;
+};
+
+/*
+ * Watchable is a wrapper class that turns any primitive into a WatchTarget.
+ */
+template<typename T>
+class Watchable : public WatchTarget
+{
+public:
+ Watchable(const T& aInitialValue, const char* aName)
+ : WatchTarget(aName), mValue(aInitialValue) {}
+
+ const T& Ref() const { return mValue; }
+ operator const T&() const { return Ref(); }
+ Watchable& operator=(const T& aNewValue)
+ {
+ if (aNewValue != mValue) {
+ mValue = aNewValue;
+ NotifyWatchers();
+ }
+
+ return *this;
+ }
+
+private:
+ Watchable(const Watchable& aOther); // Not implemented
+ Watchable& operator=(const Watchable& aOther); // Not implemented
+
+ T mValue;
+};
+
+// Manager class for state-watching. Declare one of these in any class for which
+// you want to invoke method callbacks.
+//
+// Internally, WatchManager maintains one AbstractWatcher per callback method.
+// Consumers invoke Watch/Unwatch on a particular (WatchTarget, Callback) tuple.
+// This causes an AbstractWatcher for |Callback| to be instantiated if it doesn't
+// already exist, and registers it with |WatchTarget|.
+//
+// Using Direct Tasks on the TailDispatcher, WatchManager ensures that we fire
+// watch callbacks no more than once per task, once all other operations for that
+// task have been completed.
+//
+// WatchManager<OwnerType> is intended to be declared as a member of |OwnerType|
+// objects. Given that, it and its owned objects can't hold permanent strong refs to
+// the owner, since that would keep the owner alive indefinitely. Instead, it
+// _only_ holds strong refs while waiting for Direct Tasks to fire. This ensures
+// that everything is kept alive just long enough.
+template <typename OwnerType>
+class WatchManager
+{
+public:
+ typedef void(OwnerType::*CallbackMethod)();
+ explicit WatchManager(OwnerType* aOwner, AbstractThread* aOwnerThread)
+ : mOwner(aOwner), mOwnerThread(aOwnerThread) {}
+
+ ~WatchManager()
+ {
+ if (!IsShutdown()) {
+ Shutdown();
+ }
+ }
+
+ bool IsShutdown() const { return !mOwner; }
+
+ // Shutdown needs to happen on mOwnerThread. If the WatchManager will be
+ // destroyed on a different thread, Shutdown() must be called manually.
+ void Shutdown()
+ {
+ MOZ_ASSERT(mOwnerThread->IsCurrentThreadIn());
+ for (size_t i = 0; i < mWatchers.Length(); ++i) {
+ mWatchers[i]->Destroy();
+ }
+ mWatchers.Clear();
+ mOwner = nullptr;
+ }
+
+ void Watch(WatchTarget& aTarget, CallbackMethod aMethod)
+ {
+ MOZ_ASSERT(mOwnerThread->IsCurrentThreadIn());
+ aTarget.AddWatcher(&EnsureWatcher(aMethod));
+ }
+
+ void Unwatch(WatchTarget& aTarget, CallbackMethod aMethod)
+ {
+ MOZ_ASSERT(mOwnerThread->IsCurrentThreadIn());
+ PerCallbackWatcher* watcher = GetWatcher(aMethod);
+ MOZ_ASSERT(watcher);
+ aTarget.RemoveWatcher(watcher);
+ }
+
+ void ManualNotify(CallbackMethod aMethod)
+ {
+ MOZ_ASSERT(mOwnerThread->IsCurrentThreadIn());
+ PerCallbackWatcher* watcher = GetWatcher(aMethod);
+ MOZ_ASSERT(watcher);
+ watcher->Notify();
+ }
+
+private:
+ class PerCallbackWatcher : public AbstractWatcher
+ {
+ public:
+ PerCallbackWatcher(OwnerType* aOwner, AbstractThread* aOwnerThread, CallbackMethod aMethod)
+ : mOwner(aOwner), mOwnerThread(aOwnerThread), mCallbackMethod(aMethod) {}
+
+ void Destroy()
+ {
+ MOZ_ASSERT(mOwnerThread->IsCurrentThreadIn());
+ mDestroyed = true;
+ mOwner = nullptr;
+ }
+
+ void Notify() override
+ {
+ MOZ_ASSERT(mOwnerThread->IsCurrentThreadIn());
+ MOZ_DIAGNOSTIC_ASSERT(mOwner, "mOwner is only null after destruction, "
+ "at which point we shouldn't be notified");
+ if (mStrongRef) {
+ // We've already got a notification job in the pipe.
+ return;
+ }
+ mStrongRef = mOwner; // Hold the owner alive while notifying.
+
+ // Queue up our notification jobs to run in a stable state.
+ mOwnerThread->TailDispatcher().AddDirectTask(NewRunnableMethod(this, &PerCallbackWatcher::DoNotify));
+ }
+
+ bool CallbackMethodIs(CallbackMethod aMethod) const
+ {
+ return mCallbackMethod == aMethod;
+ }
+
+ private:
+ ~PerCallbackWatcher() {}
+
+ void DoNotify()
+ {
+ MOZ_ASSERT(mOwnerThread->IsCurrentThreadIn());
+ MOZ_ASSERT(mStrongRef);
+ RefPtr<OwnerType> ref = mStrongRef.forget();
+ if (!mDestroyed) {
+ ((*ref).*mCallbackMethod)();
+ }
+ }
+
+ OwnerType* mOwner; // Never null.
+ RefPtr<OwnerType> mStrongRef; // Only non-null when notifying.
+ RefPtr<AbstractThread> mOwnerThread;
+ CallbackMethod mCallbackMethod;
+ };
+
+ PerCallbackWatcher* GetWatcher(CallbackMethod aMethod)
+ {
+ MOZ_ASSERT(mOwnerThread->IsCurrentThreadIn());
+ for (size_t i = 0; i < mWatchers.Length(); ++i) {
+ if (mWatchers[i]->CallbackMethodIs(aMethod)) {
+ return mWatchers[i];
+ }
+ }
+ return nullptr;
+ }
+
+ PerCallbackWatcher& EnsureWatcher(CallbackMethod aMethod)
+ {
+ MOZ_ASSERT(mOwnerThread->IsCurrentThreadIn());
+ PerCallbackWatcher* watcher = GetWatcher(aMethod);
+ if (watcher) {
+ return *watcher;
+ }
+ watcher = mWatchers.AppendElement(new PerCallbackWatcher(mOwner, mOwnerThread, aMethod))->get();
+ return *watcher;
+ }
+
+ nsTArray<RefPtr<PerCallbackWatcher>> mWatchers;
+ OwnerType* mOwner;
+ RefPtr<AbstractThread> mOwnerThread;
+};
+
+#undef WATCH_LOG
+
+} // namespace mozilla
+
+#endif
diff --git a/xpcom/threads/SyncRunnable.h b/xpcom/threads/SyncRunnable.h
new file mode 100644
index 000000000..d96bac7ba
--- /dev/null
+++ b/xpcom/threads/SyncRunnable.h
@@ -0,0 +1,129 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_SyncRunnable_h
+#define mozilla_SyncRunnable_h
+
+#include "nsThreadUtils.h"
+#include "mozilla/AbstractThread.h"
+#include "mozilla/Monitor.h"
+#include "mozilla/Move.h"
+
+namespace mozilla {
+
+/**
+ * This class will wrap a nsIRunnable and dispatch it to the main thread
+ * synchronously. This is different from nsIEventTarget.DISPATCH_SYNC:
+ * this class does not spin the event loop waiting for the event to be
+ * dispatched. This means that you don't risk reentrance from pending
+ * messages, but you must be sure that the target thread does not ever block
+ * on this thread, or else you will deadlock.
+ *
+ * Typical usage:
+ * RefPtr<SyncRunnable> sr = new SyncRunnable(new myrunnable...());
+ * sr->DispatchToThread(t);
+ *
+ * We also provide a convenience wrapper:
+ * SyncRunnable::DispatchToThread(new myrunnable...());
+ *
+ */
+class SyncRunnable : public Runnable
+{
+public:
+ explicit SyncRunnable(nsIRunnable* aRunnable)
+ : mRunnable(aRunnable)
+ , mMonitor("SyncRunnable")
+ , mDone(false)
+ {
+ }
+
+ explicit SyncRunnable(already_AddRefed<nsIRunnable> aRunnable)
+ : mRunnable(Move(aRunnable))
+ , mMonitor("SyncRunnable")
+ , mDone(false)
+ {
+ }
+
+ void DispatchToThread(nsIEventTarget* aThread, bool aForceDispatch = false)
+ {
+ nsresult rv;
+ bool on;
+
+ if (!aForceDispatch) {
+ rv = aThread->IsOnCurrentThread(&on);
+ MOZ_ASSERT(NS_SUCCEEDED(rv));
+ if (NS_SUCCEEDED(rv) && on) {
+ mRunnable->Run();
+ return;
+ }
+ }
+
+ rv = aThread->Dispatch(this, NS_DISPATCH_NORMAL);
+ if (NS_SUCCEEDED(rv)) {
+ mozilla::MonitorAutoLock lock(mMonitor);
+ while (!mDone) {
+ lock.Wait();
+ }
+ }
+ }
+
+ void DispatchToThread(AbstractThread* aThread, bool aForceDispatch = false)
+ {
+ if (!aForceDispatch && aThread->IsCurrentThreadIn()) {
+ mRunnable->Run();
+ return;
+ }
+
+ // Check we don't have tail dispatching here. Otherwise we will deadlock
+ // ourself when spinning the loop below.
+ MOZ_ASSERT(!aThread->RequiresTailDispatchFromCurrentThread());
+
+ aThread->Dispatch(RefPtr<nsIRunnable>(this).forget());
+ mozilla::MonitorAutoLock lock(mMonitor);
+ while (!mDone) {
+ lock.Wait();
+ }
+ }
+
+ static void DispatchToThread(nsIEventTarget* aThread,
+ nsIRunnable* aRunnable,
+ bool aForceDispatch = false)
+ {
+ RefPtr<SyncRunnable> s(new SyncRunnable(aRunnable));
+ s->DispatchToThread(aThread, aForceDispatch);
+ }
+
+ static void DispatchToThread(AbstractThread* aThread,
+ nsIRunnable* aRunnable,
+ bool aForceDispatch = false)
+ {
+ RefPtr<SyncRunnable> s(new SyncRunnable(aRunnable));
+ s->DispatchToThread(aThread, aForceDispatch);
+ }
+
+protected:
+ NS_IMETHOD Run() override
+ {
+ mRunnable->Run();
+
+ mozilla::MonitorAutoLock lock(mMonitor);
+ MOZ_ASSERT(!mDone);
+
+ mDone = true;
+ mMonitor.Notify();
+
+ return NS_OK;
+ }
+
+private:
+ nsCOMPtr<nsIRunnable> mRunnable;
+ mozilla::Monitor mMonitor;
+ bool mDone;
+};
+
+} // namespace mozilla
+
+#endif // mozilla_SyncRunnable_h
diff --git a/xpcom/threads/TaskDispatcher.h b/xpcom/threads/TaskDispatcher.h
new file mode 100644
index 000000000..405c3acfe
--- /dev/null
+++ b/xpcom/threads/TaskDispatcher.h
@@ -0,0 +1,276 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#if !defined(TaskDispatcher_h_)
+#define TaskDispatcher_h_
+
+#include "mozilla/AbstractThread.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/UniquePtr.h"
+#include "mozilla/Unused.h"
+
+#include "nsISupportsImpl.h"
+#include "nsTArray.h"
+#include "nsThreadUtils.h"
+
+#include <queue>
+
+namespace mozilla {
+
+/*
+ * A classic approach to cross-thread communication is to dispatch asynchronous
+ * runnables to perform updates on other threads. This generally works well, but
+ * there are sometimes reasons why we might want to delay the actual dispatch of
+ * these tasks until a specified moment. At present, this is primarily useful to
+ * ensure that mirrored state gets updated atomically - but there may be other
+ * applications as well.
+ *
+ * TaskDispatcher is a general abstract class that accepts tasks and dispatches
+ * them at some later point. These groups of tasks are per-target-thread, and
+ * contain separate queues for several kinds of tasks (see comments below). - "state change tasks" (which
+ * run first, and are intended to be used to update the value held by mirrors),
+ * and regular tasks, which are other arbitrary operations that the are gated
+ * to run after all the state changes have completed.
+ */
+class TaskDispatcher
+{
+public:
+ TaskDispatcher() {}
+ virtual ~TaskDispatcher() {}
+
+ // Direct tasks are run directly (rather than dispatched asynchronously) when
+ // the tail dispatcher fires. A direct task may cause other tasks to be added
+ // to the tail dispatcher.
+ virtual void AddDirectTask(already_AddRefed<nsIRunnable> aRunnable) = 0;
+
+ // State change tasks are dispatched asynchronously always run before regular
+ // tasks. They are intended to be used to update the value held by mirrors
+ // before any other dispatched tasks are run on the target thread.
+ virtual void AddStateChangeTask(AbstractThread* aThread,
+ already_AddRefed<nsIRunnable> aRunnable) = 0;
+
+ // Regular tasks are dispatched asynchronously, and run after state change
+ // tasks.
+ virtual void AddTask(AbstractThread* aThread,
+ already_AddRefed<nsIRunnable> aRunnable,
+ AbstractThread::DispatchFailureHandling aFailureHandling = AbstractThread::AssertDispatchSuccess) = 0;
+
+ virtual void DispatchTasksFor(AbstractThread* aThread) = 0;
+ virtual bool HasTasksFor(AbstractThread* aThread) = 0;
+ virtual void DrainDirectTasks() = 0;
+};
+
+/*
+ * AutoTaskDispatcher is a stack-scoped TaskDispatcher implementation that fires
+ * its queued tasks when it is popped off the stack.
+ */
+class AutoTaskDispatcher : public TaskDispatcher
+{
+public:
+ explicit AutoTaskDispatcher(bool aIsTailDispatcher = false)
+ : mIsTailDispatcher(aIsTailDispatcher)
+ {}
+
+ ~AutoTaskDispatcher()
+ {
+ // Given that direct tasks may trigger other code that uses the tail
+ // dispatcher, it's better to avoid processing them in the tail dispatcher's
+ // destructor. So we require TailDispatchers to manually invoke
+ // DrainDirectTasks before the AutoTaskDispatcher gets destroyed. In truth,
+ // this is only necessary in the case where this AutoTaskDispatcher can be
+ // accessed by the direct tasks it dispatches (true for TailDispatchers, but
+ // potentially not true for other hypothetical AutoTaskDispatchers). Feel
+ // free to loosen this restriction to apply only to mIsTailDispatcher if a
+ // use-case requires it.
+ MOZ_ASSERT(!HaveDirectTasks());
+
+ for (size_t i = 0; i < mTaskGroups.Length(); ++i) {
+ DispatchTaskGroup(Move(mTaskGroups[i]));
+ }
+ }
+
+ bool HaveDirectTasks() const
+ {
+ return mDirectTasks.isSome() && !mDirectTasks->empty();
+ }
+
+ void DrainDirectTasks() override
+ {
+ while (HaveDirectTasks()) {
+ nsCOMPtr<nsIRunnable> r = mDirectTasks->front();
+ mDirectTasks->pop();
+ r->Run();
+ }
+ }
+
+ void AddDirectTask(already_AddRefed<nsIRunnable> aRunnable) override
+ {
+ if (mDirectTasks.isNothing()) {
+ mDirectTasks.emplace();
+ }
+ mDirectTasks->push(Move(aRunnable));
+ }
+
+ void AddStateChangeTask(AbstractThread* aThread,
+ already_AddRefed<nsIRunnable> aRunnable) override
+ {
+ EnsureTaskGroup(aThread).mStateChangeTasks.AppendElement(aRunnable);
+ }
+
+ void AddTask(AbstractThread* aThread,
+ already_AddRefed<nsIRunnable> aRunnable,
+ AbstractThread::DispatchFailureHandling aFailureHandling) override
+ {
+ PerThreadTaskGroup& group = EnsureTaskGroup(aThread);
+ group.mRegularTasks.AppendElement(aRunnable);
+
+ // The task group needs to assert dispatch success if any of the runnables
+ // it's dispatching want to assert it.
+ if (aFailureHandling == AbstractThread::AssertDispatchSuccess) {
+ group.mFailureHandling = AbstractThread::AssertDispatchSuccess;
+ }
+ }
+
+ bool HasTasksFor(AbstractThread* aThread) override
+ {
+ return !!GetTaskGroup(aThread) ||
+ (aThread == AbstractThread::GetCurrent() && HaveDirectTasks());
+ }
+
+ void DispatchTasksFor(AbstractThread* aThread) override
+ {
+ for (size_t i = 0; i < mTaskGroups.Length(); ++i) {
+ if (mTaskGroups[i]->mThread == aThread) {
+ DispatchTaskGroup(Move(mTaskGroups[i]));
+ mTaskGroups.RemoveElementAt(i);
+ return;
+ }
+ }
+ }
+
+private:
+
+ struct PerThreadTaskGroup
+ {
+ public:
+ explicit PerThreadTaskGroup(AbstractThread* aThread)
+ : mThread(aThread), mFailureHandling(AbstractThread::DontAssertDispatchSuccess)
+ {
+ MOZ_COUNT_CTOR(PerThreadTaskGroup);
+ }
+
+ ~PerThreadTaskGroup() { MOZ_COUNT_DTOR(PerThreadTaskGroup); }
+
+ RefPtr<AbstractThread> mThread;
+ nsTArray<nsCOMPtr<nsIRunnable>> mStateChangeTasks;
+ nsTArray<nsCOMPtr<nsIRunnable>> mRegularTasks;
+ AbstractThread::DispatchFailureHandling mFailureHandling;
+ };
+
+ class TaskGroupRunnable : public Runnable
+ {
+ public:
+ explicit TaskGroupRunnable(UniquePtr<PerThreadTaskGroup>&& aTasks) : mTasks(Move(aTasks)) {}
+
+ NS_IMETHOD Run() override
+ {
+ // State change tasks get run all together before any code is run, so
+ // that all state changes are made in an atomic unit.
+ for (size_t i = 0; i < mTasks->mStateChangeTasks.Length(); ++i) {
+ mTasks->mStateChangeTasks[i]->Run();
+ }
+
+ // Once the state changes have completed, drain any direct tasks
+ // generated by those state changes (i.e. watcher notification tasks).
+ // This needs to be outside the loop because we don't want to run code
+ // that might observe intermediate states.
+ MaybeDrainDirectTasks();
+
+ for (size_t i = 0; i < mTasks->mRegularTasks.Length(); ++i) {
+ mTasks->mRegularTasks[i]->Run();
+
+ // Scope direct tasks tightly to the task that generated them.
+ MaybeDrainDirectTasks();
+ }
+
+ return NS_OK;
+ }
+
+ private:
+ void MaybeDrainDirectTasks()
+ {
+ AbstractThread* currentThread = AbstractThread::GetCurrent();
+ if (currentThread) {
+ currentThread->TailDispatcher().DrainDirectTasks();
+ }
+ }
+
+ UniquePtr<PerThreadTaskGroup> mTasks;
+ };
+
+ PerThreadTaskGroup& EnsureTaskGroup(AbstractThread* aThread)
+ {
+ PerThreadTaskGroup* existing = GetTaskGroup(aThread);
+ if (existing) {
+ return *existing;
+ }
+
+ mTaskGroups.AppendElement(new PerThreadTaskGroup(aThread));
+ return *mTaskGroups.LastElement();
+ }
+
+ PerThreadTaskGroup* GetTaskGroup(AbstractThread* aThread)
+ {
+ for (size_t i = 0; i < mTaskGroups.Length(); ++i) {
+ if (mTaskGroups[i]->mThread == aThread) {
+ return mTaskGroups[i].get();
+ }
+ }
+
+ // Not found.
+ return nullptr;
+ }
+
+ void DispatchTaskGroup(UniquePtr<PerThreadTaskGroup> aGroup)
+ {
+ RefPtr<AbstractThread> thread = aGroup->mThread;
+
+ AbstractThread::DispatchFailureHandling failureHandling = aGroup->mFailureHandling;
+ AbstractThread::DispatchReason reason = mIsTailDispatcher ? AbstractThread::TailDispatch
+ : AbstractThread::NormalDispatch;
+ nsCOMPtr<nsIRunnable> r = new TaskGroupRunnable(Move(aGroup));
+ thread->Dispatch(r.forget(), failureHandling, reason);
+ }
+
+ // Direct tasks. We use a Maybe<> because (a) this class is hot, (b)
+ // mDirectTasks often doesn't get anything put into it, and (c) the
+ // std::queue implementation in GNU libstdc++ does two largish heap
+ // allocations when creating a new std::queue.
+ mozilla::Maybe<std::queue<nsCOMPtr<nsIRunnable>>> mDirectTasks;
+
+ // Task groups, organized by thread.
+ nsTArray<UniquePtr<PerThreadTaskGroup>> mTaskGroups;
+
+ // True if this TaskDispatcher represents the tail dispatcher for the thread
+ // upon which it runs.
+ const bool mIsTailDispatcher;
+};
+
+// Little utility class to allow declaring AutoTaskDispatcher as a default
+// parameter for methods that take a TaskDispatcher&.
+template<typename T>
+class PassByRef
+{
+public:
+ PassByRef() {}
+ operator T&() { return mVal; }
+private:
+ T mVal;
+};
+
+} // namespace mozilla
+
+#endif
diff --git a/xpcom/threads/TaskQueue.cpp b/xpcom/threads/TaskQueue.cpp
new file mode 100644
index 000000000..2e593a773
--- /dev/null
+++ b/xpcom/threads/TaskQueue.cpp
@@ -0,0 +1,271 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/TaskQueue.h"
+
+#include "nsIEventTarget.h"
+#include "nsThreadUtils.h"
+
+namespace mozilla {
+
+class TaskQueue::EventTargetWrapper final : public nsIEventTarget
+{
+ RefPtr<TaskQueue> mTaskQueue;
+
+ ~EventTargetWrapper()
+ {
+ }
+
+public:
+ explicit EventTargetWrapper(TaskQueue* aTaskQueue)
+ : mTaskQueue(aTaskQueue)
+ {
+ MOZ_ASSERT(mTaskQueue);
+ }
+
+ NS_IMETHOD
+ DispatchFromScript(nsIRunnable* aEvent, uint32_t aFlags) override
+ {
+ nsCOMPtr<nsIRunnable> ref = aEvent;
+ return Dispatch(ref.forget(), aFlags);
+ }
+
+ NS_IMETHOD
+ Dispatch(already_AddRefed<nsIRunnable> aEvent, uint32_t aFlags) override
+ {
+ nsCOMPtr<nsIRunnable> runnable = aEvent;
+ MonitorAutoLock mon(mTaskQueue->mQueueMonitor);
+ return mTaskQueue->DispatchLocked(/* passed by ref */runnable,
+ DontAssertDispatchSuccess,
+ NormalDispatch);
+ }
+
+ NS_IMETHOD
+ DelayedDispatch(already_AddRefed<nsIRunnable>, uint32_t aFlags) override
+ {
+ return NS_ERROR_NOT_IMPLEMENTED;
+ }
+
+ NS_IMETHOD
+ IsOnCurrentThread(bool* aResult) override
+ {
+ *aResult = mTaskQueue->IsCurrentThreadIn();
+ return NS_OK;
+ }
+
+ NS_DECL_THREADSAFE_ISUPPORTS
+};
+
+NS_IMPL_ISUPPORTS(TaskQueue::EventTargetWrapper, nsIEventTarget)
+
+TaskQueue::TaskQueue(already_AddRefed<nsIEventTarget> aTarget,
+ bool aRequireTailDispatch)
+ : AbstractThread(aRequireTailDispatch)
+ , mTarget(aTarget)
+ , mQueueMonitor("TaskQueue::Queue")
+ , mTailDispatcher(nullptr)
+ , mIsRunning(false)
+ , mIsShutdown(false)
+{
+ MOZ_COUNT_CTOR(TaskQueue);
+}
+
+TaskQueue::~TaskQueue()
+{
+ MonitorAutoLock mon(mQueueMonitor);
+ MOZ_ASSERT(mIsShutdown);
+ MOZ_COUNT_DTOR(TaskQueue);
+}
+
+TaskDispatcher&
+TaskQueue::TailDispatcher()
+{
+ MOZ_ASSERT(IsCurrentThreadIn());
+ MOZ_ASSERT(mTailDispatcher);
+ return *mTailDispatcher;
+}
+
+// Note aRunnable is passed by ref to support conditional ownership transfer.
+// See Dispatch() in TaskQueue.h for more details.
+nsresult
+TaskQueue::DispatchLocked(nsCOMPtr<nsIRunnable>& aRunnable,
+ DispatchFailureHandling aFailureHandling,
+ DispatchReason aReason)
+{
+ mQueueMonitor.AssertCurrentThreadOwns();
+ if (mIsShutdown) {
+ return NS_ERROR_FAILURE;
+ }
+
+ AbstractThread* currentThread;
+ if (aReason != TailDispatch && (currentThread = GetCurrent()) && RequiresTailDispatch(currentThread)) {
+ currentThread->TailDispatcher().AddTask(this, aRunnable.forget(), aFailureHandling);
+ return NS_OK;
+ }
+
+ mTasks.push(aRunnable.forget());
+ if (mIsRunning) {
+ return NS_OK;
+ }
+ RefPtr<nsIRunnable> runner(new Runner(this));
+ nsresult rv = mTarget->Dispatch(runner.forget(), NS_DISPATCH_NORMAL);
+ if (NS_FAILED(rv)) {
+ NS_WARNING("Failed to dispatch runnable to run TaskQueue");
+ return rv;
+ }
+ mIsRunning = true;
+
+ return NS_OK;
+}
+
+void
+TaskQueue::AwaitIdle()
+{
+ MonitorAutoLock mon(mQueueMonitor);
+ AwaitIdleLocked();
+}
+
+void
+TaskQueue::AwaitIdleLocked()
+{
+ // Make sure there are no tasks for this queue waiting in the caller's tail
+ // dispatcher.
+ MOZ_ASSERT_IF(AbstractThread::GetCurrent(),
+ !AbstractThread::GetCurrent()->HasTailTasksFor(this));
+
+ mQueueMonitor.AssertCurrentThreadOwns();
+ MOZ_ASSERT(mIsRunning || mTasks.empty());
+ while (mIsRunning) {
+ mQueueMonitor.Wait();
+ }
+}
+
+void
+TaskQueue::AwaitShutdownAndIdle()
+{
+ MOZ_ASSERT(!IsCurrentThreadIn());
+ // Make sure there are no tasks for this queue waiting in the caller's tail
+ // dispatcher.
+ MOZ_ASSERT_IF(AbstractThread::GetCurrent(),
+ !AbstractThread::GetCurrent()->HasTailTasksFor(this));
+
+ MonitorAutoLock mon(mQueueMonitor);
+ while (!mIsShutdown) {
+ mQueueMonitor.Wait();
+ }
+ AwaitIdleLocked();
+}
+
+RefPtr<ShutdownPromise>
+TaskQueue::BeginShutdown()
+{
+ // Dispatch any tasks for this queue waiting in the caller's tail dispatcher,
+ // since this is the last opportunity to do so.
+ if (AbstractThread* currentThread = AbstractThread::GetCurrent()) {
+ currentThread->TailDispatchTasksFor(this);
+ }
+
+ MonitorAutoLock mon(mQueueMonitor);
+ mIsShutdown = true;
+ RefPtr<ShutdownPromise> p = mShutdownPromise.Ensure(__func__);
+ MaybeResolveShutdown();
+ mon.NotifyAll();
+ return p;
+}
+
+bool
+TaskQueue::IsEmpty()
+{
+ MonitorAutoLock mon(mQueueMonitor);
+ return mTasks.empty();
+}
+
+uint32_t
+TaskQueue::ImpreciseLengthForHeuristics()
+{
+ MonitorAutoLock mon(mQueueMonitor);
+ return mTasks.size();
+}
+
+bool
+TaskQueue::IsCurrentThreadIn()
+{
+ bool in = NS_GetCurrentThread() == mRunningThread;
+ return in;
+}
+
+already_AddRefed<nsIEventTarget>
+TaskQueue::WrapAsEventTarget()
+{
+ nsCOMPtr<nsIEventTarget> ref = new EventTargetWrapper(this);
+ return ref.forget();
+}
+
+nsresult
+TaskQueue::Runner::Run()
+{
+ RefPtr<nsIRunnable> event;
+ {
+ MonitorAutoLock mon(mQueue->mQueueMonitor);
+ MOZ_ASSERT(mQueue->mIsRunning);
+ if (mQueue->mTasks.size() == 0) {
+ mQueue->mIsRunning = false;
+ mQueue->MaybeResolveShutdown();
+ mon.NotifyAll();
+ return NS_OK;
+ }
+ event = mQueue->mTasks.front().forget();
+ mQueue->mTasks.pop();
+ }
+ MOZ_ASSERT(event);
+
+ // Note that dropping the queue monitor before running the task, and
+ // taking the monitor again after the task has run ensures we have memory
+ // fences enforced. This means that if the object we're calling wasn't
+ // designed to be threadsafe, it will be, provided we're only calling it
+ // in this task queue.
+ {
+ AutoTaskGuard g(mQueue);
+ event->Run();
+ }
+
+ // Drop the reference to event. The event will hold a reference to the
+ // object it's calling, and we don't want to keep it alive, it may be
+ // making assumptions what holds references to it. This is especially
+ // the case if the object is waiting for us to shutdown, so that it
+ // can shutdown (like in the MediaDecoderStateMachine's SHUTDOWN case).
+ event = nullptr;
+
+ {
+ MonitorAutoLock mon(mQueue->mQueueMonitor);
+ if (mQueue->mTasks.size() == 0) {
+ // No more events to run. Exit the task runner.
+ mQueue->mIsRunning = false;
+ mQueue->MaybeResolveShutdown();
+ mon.NotifyAll();
+ return NS_OK;
+ }
+ }
+
+ // There's at least one more event that we can run. Dispatch this Runner
+ // to the target again to ensure it runs again. Note that we don't just
+ // run in a loop here so that we don't hog the target. This means we may
+ // run on another thread next time, but we rely on the memory fences from
+ // mQueueMonitor for thread safety of non-threadsafe tasks.
+ nsresult rv = mQueue->mTarget->Dispatch(this, NS_DISPATCH_AT_END);
+ if (NS_FAILED(rv)) {
+ // Failed to dispatch, shutdown!
+ MonitorAutoLock mon(mQueue->mQueueMonitor);
+ mQueue->mIsRunning = false;
+ mQueue->mIsShutdown = true;
+ mQueue->MaybeResolveShutdown();
+ mon.NotifyAll();
+ }
+
+ return NS_OK;
+}
+
+} // namespace mozilla
diff --git a/xpcom/threads/TaskQueue.h b/xpcom/threads/TaskQueue.h
new file mode 100644
index 000000000..aafd206a7
--- /dev/null
+++ b/xpcom/threads/TaskQueue.h
@@ -0,0 +1,203 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef TaskQueue_h_
+#define TaskQueue_h_
+
+#include "mozilla/Monitor.h"
+#include "mozilla/MozPromise.h"
+#include "mozilla/RefPtr.h"
+#include "mozilla/TaskDispatcher.h"
+#include "mozilla/Unused.h"
+
+#include <queue>
+
+#include "nsThreadUtils.h"
+
+class nsIEventTarget;
+class nsIRunnable;
+
+namespace mozilla {
+
+typedef MozPromise<bool, bool, false> ShutdownPromise;
+
+// Abstracts executing runnables in order on an arbitrary event target. The
+// runnables dispatched to the TaskQueue will be executed in the order in which
+// they're received, and are guaranteed to not be executed concurrently.
+// They may be executed on different threads, and a memory barrier is used
+// to make this threadsafe for objects that aren't already threadsafe.
+//
+// Note, since a TaskQueue can also be converted to an nsIEventTarget using
+// WrapAsEventTarget() its possible to construct a hierarchy of TaskQueues.
+// Consider these three TaskQueues:
+//
+// TQ1 dispatches to the main thread
+// TQ2 dispatches to TQ1
+// TQ3 dispatches to TQ1
+//
+// This ensures there is only ever a single runnable from the entire chain on
+// the main thread. It also ensures that TQ2 and TQ3 only have a single runnable
+// in TQ1 at any time.
+//
+// This arrangement lets you prioritize work by dispatching runnables directly
+// to TQ1. You can issue many runnables for important work. Meanwhile the TQ2
+// and TQ3 work will always execute at most one runnable and then yield.
+class TaskQueue : public AbstractThread
+{
+ class EventTargetWrapper;
+
+public:
+ explicit TaskQueue(already_AddRefed<nsIEventTarget> aTarget,
+ bool aSupportsTailDispatch = false);
+
+ TaskDispatcher& TailDispatcher() override;
+
+ TaskQueue* AsTaskQueue() override { return this; }
+
+ void Dispatch(already_AddRefed<nsIRunnable> aRunnable,
+ DispatchFailureHandling aFailureHandling = AssertDispatchSuccess,
+ DispatchReason aReason = NormalDispatch) override
+ {
+ nsCOMPtr<nsIRunnable> r = aRunnable;
+ {
+ MonitorAutoLock mon(mQueueMonitor);
+ nsresult rv = DispatchLocked(/* passed by ref */r, aFailureHandling, aReason);
+ MOZ_DIAGNOSTIC_ASSERT(aFailureHandling == DontAssertDispatchSuccess || NS_SUCCEEDED(rv));
+ Unused << rv;
+ }
+ // If the ownership of |r| is not transferred in DispatchLocked() due to
+ // dispatch failure, it will be deleted here outside the lock. We do so
+ // since the destructor of the runnable might access TaskQueue and result
+ // in deadlocks.
+ }
+
+ // Puts the queue in a shutdown state and returns immediately. The queue will
+ // remain alive at least until all the events are drained, because the Runners
+ // hold a strong reference to the task queue, and one of them is always held
+ // by the target event queue when the task queue is non-empty.
+ //
+ // The returned promise is resolved when the queue goes empty.
+ RefPtr<ShutdownPromise> BeginShutdown();
+
+ // Blocks until all task finish executing.
+ void AwaitIdle();
+
+ // Blocks until the queue is flagged for shutdown and all tasks have finished
+ // executing.
+ void AwaitShutdownAndIdle();
+
+ bool IsEmpty();
+ uint32_t ImpreciseLengthForHeuristics();
+
+ // Returns true if the current thread is currently running a Runnable in
+ // the task queue.
+ bool IsCurrentThreadIn() override;
+
+ // Create a new nsIEventTarget wrapper object that dispatches to this
+ // TaskQueue.
+ already_AddRefed<nsIEventTarget> WrapAsEventTarget();
+
+protected:
+ virtual ~TaskQueue();
+
+
+ // Blocks until all task finish executing. Called internally by methods
+ // that need to wait until the task queue is idle.
+ // mQueueMonitor must be held.
+ void AwaitIdleLocked();
+
+ nsresult DispatchLocked(nsCOMPtr<nsIRunnable>& aRunnable,
+ DispatchFailureHandling aFailureHandling,
+ DispatchReason aReason = NormalDispatch);
+
+ void MaybeResolveShutdown()
+ {
+ mQueueMonitor.AssertCurrentThreadOwns();
+ if (mIsShutdown && !mIsRunning) {
+ mShutdownPromise.ResolveIfExists(true, __func__);
+ mTarget = nullptr;
+ }
+ }
+
+ nsCOMPtr<nsIEventTarget> mTarget;
+
+ // Monitor that protects the queue and mIsRunning;
+ Monitor mQueueMonitor;
+
+ // Queue of tasks to run.
+ std::queue<nsCOMPtr<nsIRunnable>> mTasks;
+
+ // The thread currently running the task queue. We store a reference
+ // to this so that IsCurrentThreadIn() can tell if the current thread
+ // is the thread currently running in the task queue.
+ //
+ // This may be read on any thread, but may only be written on mRunningThread.
+ // The thread can't die while we're running in it, and we only use it for
+ // pointer-comparison with the current thread anyway - so we make it atomic
+ // and don't refcount it.
+ Atomic<nsIThread*> mRunningThread;
+
+ // RAII class that gets instantiated for each dispatched task.
+ class AutoTaskGuard : public AutoTaskDispatcher
+ {
+ public:
+ explicit AutoTaskGuard(TaskQueue* aQueue)
+ : AutoTaskDispatcher(/* aIsTailDispatcher = */ true), mQueue(aQueue)
+ , mLastCurrentThread(nullptr)
+ {
+ // NB: We don't hold the lock to aQueue here. Don't do anything that
+ // might require it.
+ MOZ_ASSERT(!mQueue->mTailDispatcher);
+ mQueue->mTailDispatcher = this;
+
+ mLastCurrentThread = sCurrentThreadTLS.get();
+ sCurrentThreadTLS.set(aQueue);
+
+ MOZ_ASSERT(mQueue->mRunningThread == nullptr);
+ mQueue->mRunningThread = NS_GetCurrentThread();
+ }
+
+ ~AutoTaskGuard()
+ {
+ DrainDirectTasks();
+
+ MOZ_ASSERT(mQueue->mRunningThread == NS_GetCurrentThread());
+ mQueue->mRunningThread = nullptr;
+
+ sCurrentThreadTLS.set(mLastCurrentThread);
+ mQueue->mTailDispatcher = nullptr;
+ }
+
+ private:
+ TaskQueue* mQueue;
+ AbstractThread* mLastCurrentThread;
+ };
+
+ TaskDispatcher* mTailDispatcher;
+
+ // True if we've dispatched an event to the target to execute events from
+ // the queue.
+ bool mIsRunning;
+
+ // True if we've started our shutdown process.
+ bool mIsShutdown;
+ MozPromiseHolder<ShutdownPromise> mShutdownPromise;
+
+ class Runner : public Runnable {
+ public:
+ explicit Runner(TaskQueue* aQueue)
+ : mQueue(aQueue)
+ {
+ }
+ NS_IMETHOD Run() override;
+ private:
+ RefPtr<TaskQueue> mQueue;
+ };
+};
+
+} // namespace mozilla
+
+#endif // TaskQueue_h_
diff --git a/xpcom/threads/ThreadStackHelper.cpp b/xpcom/threads/ThreadStackHelper.cpp
new file mode 100644
index 000000000..d31bf6359
--- /dev/null
+++ b/xpcom/threads/ThreadStackHelper.cpp
@@ -0,0 +1,726 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "ThreadStackHelper.h"
+#include "MainThreadUtils.h"
+#include "nsJSPrincipals.h"
+#include "nsScriptSecurityManager.h"
+#include "jsfriendapi.h"
+#ifdef MOZ_THREADSTACKHELPER_NATIVE
+#include "shared-libraries.h"
+#endif
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/IntegerPrintfMacros.h"
+#include "mozilla/Move.h"
+#include "mozilla/Scoped.h"
+#include "mozilla/UniquePtr.h"
+#include "mozilla/MemoryChecking.h"
+#include "mozilla/Sprintf.h"
+
+#ifdef __GNUC__
+# pragma GCC diagnostic push
+# pragma GCC diagnostic ignored "-Wshadow"
+#endif
+
+#if defined(MOZ_VALGRIND)
+# include <valgrind/valgrind.h>
+#endif
+
+#include <string.h>
+#include <vector>
+#include <cstdlib>
+
+#ifdef XP_LINUX
+#include <ucontext.h>
+#include <unistd.h>
+#include <sys/syscall.h>
+#endif
+
+#ifdef __GNUC__
+# pragma GCC diagnostic pop // -Wshadow
+#endif
+
+#if defined(XP_LINUX) || defined(XP_MACOSX)
+#include <pthread.h>
+#endif
+
+#ifdef ANDROID
+#ifndef SYS_gettid
+#define SYS_gettid __NR_gettid
+#endif
+#if defined(__arm__) && !defined(__NR_rt_tgsigqueueinfo)
+// Some NDKs don't define this constant even though the kernel supports it.
+#define __NR_rt_tgsigqueueinfo (__NR_SYSCALL_BASE+363)
+#endif
+#ifndef SYS_rt_tgsigqueueinfo
+#define SYS_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
+#endif
+#endif
+
+#ifdef MOZ_THREADSTACKHELPER_NATIVE
+#if defined(MOZ_THREADSTACKHELPER_X86) || \
+ defined(MOZ_THREADSTACKHELPER_X64) || \
+ defined(MOZ_THREADSTACKHELPER_ARM)
+// On these architectures, the stack grows downwards (toward lower addresses).
+#define MOZ_THREADSTACKHELPER_STACK_GROWS_DOWN
+#else
+#error "Unsupported architecture"
+#endif
+#endif // MOZ_THREADSTACKHELPER_NATIVE
+
+namespace mozilla {
+
+void
+ThreadStackHelper::Startup()
+{
+#if defined(XP_LINUX)
+ MOZ_ASSERT(NS_IsMainThread());
+ if (!sInitialized) {
+ // TODO: centralize signal number allocation
+ sFillStackSignum = SIGRTMIN + 4;
+ if (sFillStackSignum > SIGRTMAX) {
+ // Leave uninitialized
+ MOZ_ASSERT(false);
+ return;
+ }
+ struct sigaction sigact = {};
+ sigact.sa_sigaction = FillStackHandler;
+ sigemptyset(&sigact.sa_mask);
+ sigact.sa_flags = SA_SIGINFO | SA_RESTART;
+ MOZ_ALWAYS_TRUE(!::sigaction(sFillStackSignum, &sigact, nullptr));
+ }
+ sInitialized++;
+#endif
+}
+
+void
+ThreadStackHelper::Shutdown()
+{
+#if defined(XP_LINUX)
+ MOZ_ASSERT(NS_IsMainThread());
+ if (sInitialized == 1) {
+ struct sigaction sigact = {};
+ sigact.sa_handler = SIG_DFL;
+ MOZ_ALWAYS_TRUE(!::sigaction(sFillStackSignum, &sigact, nullptr));
+ }
+ sInitialized--;
+#endif
+}
+
+ThreadStackHelper::ThreadStackHelper()
+ : mStackToFill(nullptr)
+#ifdef MOZ_THREADSTACKHELPER_PSEUDO
+ , mPseudoStack(mozilla_get_pseudo_stack())
+#ifdef MOZ_THREADSTACKHELPER_NATIVE
+ , mContextToFill(nullptr)
+#endif
+ , mMaxStackSize(Stack::sMaxInlineStorage)
+ , mMaxBufferSize(512)
+#endif
+{
+#if defined(XP_LINUX)
+ MOZ_ALWAYS_TRUE(!::sem_init(&mSem, 0, 0));
+ mThreadID = ::syscall(SYS_gettid);
+#elif defined(XP_WIN)
+ mInitialized = !!::DuplicateHandle(
+ ::GetCurrentProcess(), ::GetCurrentThread(),
+ ::GetCurrentProcess(), &mThreadID,
+ THREAD_SUSPEND_RESUME
+#ifdef MOZ_THREADSTACKHELPER_NATIVE
+ | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION
+#endif
+ , FALSE, 0);
+ MOZ_ASSERT(mInitialized);
+#elif defined(XP_MACOSX)
+ mThreadID = mach_thread_self();
+#endif
+
+#ifdef MOZ_THREADSTACKHELPER_NATIVE
+ GetThreadStackBase();
+#endif
+}
+
+ThreadStackHelper::~ThreadStackHelper()
+{
+#if defined(XP_LINUX)
+ MOZ_ALWAYS_TRUE(!::sem_destroy(&mSem));
+#elif defined(XP_WIN)
+ if (mInitialized) {
+ MOZ_ALWAYS_TRUE(!!::CloseHandle(mThreadID));
+ }
+#endif
+}
+
+#ifdef MOZ_THREADSTACKHELPER_NATIVE
+void ThreadStackHelper::GetThreadStackBase()
+{
+ mThreadStackBase = 0;
+
+#if defined(XP_LINUX)
+ void* stackAddr;
+ size_t stackSize;
+ ::pthread_t pthr = ::pthread_self();
+ ::pthread_attr_t pthr_attr;
+ NS_ENSURE_TRUE_VOID(!::pthread_getattr_np(pthr, &pthr_attr));
+ if (!::pthread_attr_getstack(&pthr_attr, &stackAddr, &stackSize)) {
+#ifdef MOZ_THREADSTACKHELPER_STACK_GROWS_DOWN
+ mThreadStackBase = intptr_t(stackAddr) + stackSize;
+#else
+ mThreadStackBase = intptr_t(stackAddr);
+#endif
+ }
+ MOZ_ALWAYS_TRUE(!::pthread_attr_destroy(&pthr_attr));
+
+#elif defined(XP_WIN)
+ ::MEMORY_BASIC_INFORMATION meminfo = {};
+ NS_ENSURE_TRUE_VOID(::VirtualQuery(&meminfo, &meminfo, sizeof(meminfo)));
+#ifdef MOZ_THREADSTACKHELPER_STACK_GROWS_DOWN
+ mThreadStackBase = intptr_t(meminfo.BaseAddress) + meminfo.RegionSize;
+#else
+ mThreadStackBase = intptr_t(meminfo.AllocationBase);
+#endif
+
+#elif defined(XP_MACOSX)
+ ::pthread_t pthr = ::pthread_self();
+ mThreadStackBase = intptr_t(::pthread_get_stackaddr_np(pthr));
+
+#else
+ #error "Unsupported platform"
+#endif // platform
+}
+#endif // MOZ_THREADSTACKHELPER_NATIVE
+
+namespace {
+template<typename T>
+class ScopedSetPtr
+{
+private:
+ T*& mPtr;
+public:
+ ScopedSetPtr(T*& p, T* val) : mPtr(p) { mPtr = val; }
+ ~ScopedSetPtr() { mPtr = nullptr; }
+};
+} // namespace
+
+void
+ThreadStackHelper::GetStack(Stack& aStack)
+{
+ // Always run PrepareStackBuffer first to clear aStack
+ if (!PrepareStackBuffer(aStack)) {
+ // Skip and return empty aStack
+ return;
+ }
+
+ ScopedSetPtr<Stack> stackPtr(mStackToFill, &aStack);
+
+#if defined(XP_LINUX)
+ if (!sInitialized) {
+ MOZ_ASSERT(false);
+ return;
+ }
+ siginfo_t uinfo = {};
+ uinfo.si_signo = sFillStackSignum;
+ uinfo.si_code = SI_QUEUE;
+ uinfo.si_pid = getpid();
+ uinfo.si_uid = getuid();
+ uinfo.si_value.sival_ptr = this;
+ if (::syscall(SYS_rt_tgsigqueueinfo, uinfo.si_pid,
+ mThreadID, sFillStackSignum, &uinfo)) {
+ // rt_tgsigqueueinfo was added in Linux 2.6.31.
+ // Could have failed because the syscall did not exist.
+ return;
+ }
+ MOZ_ALWAYS_TRUE(!::sem_wait(&mSem));
+
+#elif defined(XP_WIN)
+ if (!mInitialized) {
+ MOZ_ASSERT(false);
+ return;
+ }
+ if (::SuspendThread(mThreadID) == DWORD(-1)) {
+ MOZ_ASSERT(false);
+ return;
+ }
+
+ // SuspendThread is asynchronous, so the thread may still be running. Use
+ // GetThreadContext to ensure it's really suspended.
+ // See https://blogs.msdn.microsoft.com/oldnewthing/20150205-00/?p=44743.
+ CONTEXT context;
+ context.ContextFlags = CONTEXT_CONTROL;
+ if (::GetThreadContext(mThreadID, &context)) {
+ FillStackBuffer();
+ FillThreadContext();
+ }
+
+ MOZ_ALWAYS_TRUE(::ResumeThread(mThreadID) != DWORD(-1));
+
+#elif defined(XP_MACOSX)
+# if defined(MOZ_VALGRIND) && defined(RUNNING_ON_VALGRIND)
+ if (RUNNING_ON_VALGRIND) {
+ /* thread_suspend and thread_resume sometimes hang runs on Valgrind,
+ for unknown reasons. So, just avoid them. See bug 1100911. */
+ return;
+ }
+# endif
+
+ if (::thread_suspend(mThreadID) != KERN_SUCCESS) {
+ MOZ_ASSERT(false);
+ return;
+ }
+
+ FillStackBuffer();
+ FillThreadContext();
+
+ MOZ_ALWAYS_TRUE(::thread_resume(mThreadID) == KERN_SUCCESS);
+
+#endif
+}
+
+#ifdef MOZ_THREADSTACKHELPER_NATIVE
+class ThreadStackHelper::ThreadContext final
+{
+public:
+ // TODO: provide per-platform definition of Context.
+ typedef struct {} Context;
+
+ // Limit copied stack to 4kB
+ static const size_t kMaxStackSize = 0x1000;
+ // Limit unwound stack to 32 frames
+ static const unsigned int kMaxStackFrames = 32;
+ // Whether this structure contains valid data
+ bool mValid;
+ // Processor context
+ Context mContext;
+ // Stack area
+ UniquePtr<uint8_t[]> mStack;
+ // Start of stack area
+ uintptr_t mStackBase;
+ // Size of stack area
+ size_t mStackSize;
+ // End of stack area
+ const void* mStackEnd;
+
+ ThreadContext()
+ : mValid(false)
+ , mStackBase(0)
+ , mStackSize(0)
+ , mStackEnd(nullptr) {}
+};
+#endif // MOZ_THREADSTACKHELPER_NATIVE
+
+void
+ThreadStackHelper::GetNativeStack(Stack& aStack)
+{
+#ifdef MOZ_THREADSTACKHELPER_NATIVE
+ ThreadContext context;
+ context.mStack = MakeUnique<uint8_t[]>(ThreadContext::kMaxStackSize);
+
+ ScopedSetPtr<ThreadContext> contextPtr(mContextToFill, &context);
+
+ // Get pseudostack first and fill the thread context.
+ GetStack(aStack);
+ NS_ENSURE_TRUE_VOID(context.mValid);
+
+ // TODO: walk the saved stack frames.
+#endif // MOZ_THREADSTACKHELPER_NATIVE
+}
+
+#ifdef XP_LINUX
+
+int ThreadStackHelper::sInitialized;
+int ThreadStackHelper::sFillStackSignum;
+
+void
+ThreadStackHelper::FillStackHandler(int aSignal, siginfo_t* aInfo,
+ void* aContext)
+{
+ ThreadStackHelper* const helper =
+ reinterpret_cast<ThreadStackHelper*>(aInfo->si_value.sival_ptr);
+ helper->FillStackBuffer();
+ helper->FillThreadContext(aContext);
+ ::sem_post(&helper->mSem);
+}
+
+#endif // XP_LINUX
+
+bool
+ThreadStackHelper::PrepareStackBuffer(Stack& aStack)
+{
+ // Return false to skip getting the stack and return an empty stack
+ aStack.clear();
+#ifdef MOZ_THREADSTACKHELPER_PSEUDO
+ /* Normally, provided the profiler is enabled, it would be an error if we
+ don't have a pseudostack here (the thread probably forgot to call
+ profiler_register_thread). However, on B2G, profiling secondary threads
+ may be disabled despite profiler being enabled. This is by-design and
+ is not an error. */
+#ifdef MOZ_WIDGET_GONK
+ if (!mPseudoStack) {
+ return false;
+ }
+#endif
+ MOZ_ASSERT(mPseudoStack);
+ if (!aStack.reserve(mMaxStackSize) ||
+ !aStack.reserve(aStack.capacity()) || // reserve up to the capacity
+ !aStack.EnsureBufferCapacity(mMaxBufferSize)) {
+ return false;
+ }
+ return true;
+#else
+ return false;
+#endif
+}
+
+#ifdef MOZ_THREADSTACKHELPER_PSEUDO
+
+namespace {
+
+bool
+IsChromeJSScript(JSScript* aScript)
+{
+ // May be called from another thread or inside a signal handler.
+ // We assume querying the script is safe but we must not manipulate it.
+
+ nsIScriptSecurityManager* const secman =
+ nsScriptSecurityManager::GetScriptSecurityManager();
+ NS_ENSURE_TRUE(secman, false);
+
+ JSPrincipals* const principals = JS_GetScriptPrincipals(aScript);
+ return secman->IsSystemPrincipal(nsJSPrincipals::get(principals));
+}
+
+// Get the full path after the URI scheme, if the URI matches the scheme.
+// For example, GetFullPathForScheme("a://b/c/d/e", "a://") returns "b/c/d/e".
+template <size_t LEN>
+const char*
+GetFullPathForScheme(const char* filename, const char (&scheme)[LEN]) {
+ // Account for the null terminator included in LEN.
+ if (!strncmp(filename, scheme, LEN - 1)) {
+ return filename + LEN - 1;
+ }
+ return nullptr;
+}
+
+// Get the full path after a URI component, if the URI contains the component.
+// For example, GetPathAfterComponent("a://b/c/d/e", "/c/") returns "d/e".
+template <size_t LEN>
+const char*
+GetPathAfterComponent(const char* filename, const char (&component)[LEN]) {
+ const char* found = nullptr;
+ const char* next = strstr(filename, component);
+ while (next) {
+ // Move 'found' to end of the component, after the separator '/'.
+ // 'LEN - 1' accounts for the null terminator included in LEN,
+ found = next + LEN - 1;
+ // Resume searching before the separator '/'.
+ next = strstr(found - 1, component);
+ }
+ return found;
+}
+
+} // namespace
+
+const char*
+ThreadStackHelper::AppendJSEntry(const volatile StackEntry* aEntry,
+ intptr_t& aAvailableBufferSize,
+ const char* aPrevLabel)
+{
+ // May be called from another thread or inside a signal handler.
+ // We assume querying the script is safe but we must not manupulate it.
+ // Also we must not allocate any memory from heap.
+ MOZ_ASSERT(aEntry->isJs());
+
+ const char* label;
+ JSScript* script = aEntry->script();
+ if (!script) {
+ label = "(profiling suppressed)";
+ } else if (IsChromeJSScript(aEntry->script())) {
+ const char* filename = JS_GetScriptFilename(aEntry->script());
+ const unsigned lineno = JS_PCToLineNumber(aEntry->script(), aEntry->pc());
+ MOZ_ASSERT(filename);
+
+ char buffer[128]; // Enough to fit longest js file name from the tree
+
+ // Some script names are in the form "foo -> bar -> baz".
+ // Here we find the origin of these redirected scripts.
+ const char* basename = GetPathAfterComponent(filename, " -> ");
+ if (basename) {
+ filename = basename;
+ }
+
+ basename = GetFullPathForScheme(filename, "chrome://");
+ if (!basename) {
+ basename = GetFullPathForScheme(filename, "resource://");
+ }
+ if (!basename) {
+ // If the (add-on) script is located under the {profile}/extensions
+ // directory, extract the path after the /extensions/ part.
+ basename = GetPathAfterComponent(filename, "/extensions/");
+ }
+ if (!basename) {
+ // Only keep the file base name for paths outside the above formats.
+ basename = strrchr(filename, '/');
+ basename = basename ? basename + 1 : filename;
+ // Look for Windows path separator as well.
+ filename = strrchr(basename, '\\');
+ if (filename) {
+ basename = filename + 1;
+ }
+ }
+
+ size_t len = SprintfLiteral(buffer, "%s:%u", basename, lineno);
+ if (len < sizeof(buffer)) {
+ if (mStackToFill->IsSameAsEntry(aPrevLabel, buffer)) {
+ return aPrevLabel;
+ }
+
+ // Keep track of the required buffer size
+ aAvailableBufferSize -= (len + 1);
+ if (aAvailableBufferSize >= 0) {
+ // Buffer is big enough.
+ return mStackToFill->InfallibleAppendViaBuffer(buffer, len);
+ }
+ // Buffer is not big enough; fall through to using static label below.
+ }
+ // snprintf failed or buffer is not big enough.
+ label = "(chrome script)";
+ } else {
+ label = "(content script)";
+ }
+
+ if (mStackToFill->IsSameAsEntry(aPrevLabel, label)) {
+ return aPrevLabel;
+ }
+ mStackToFill->infallibleAppend(label);
+ return label;
+}
+
+#endif // MOZ_THREADSTACKHELPER_PSEUDO
+
+void
+ThreadStackHelper::FillStackBuffer()
+{
+ MOZ_ASSERT(mStackToFill->empty());
+
+#ifdef MOZ_THREADSTACKHELPER_PSEUDO
+ size_t reservedSize = mStackToFill->capacity();
+ size_t reservedBufferSize = mStackToFill->AvailableBufferSize();
+ intptr_t availableBufferSize = intptr_t(reservedBufferSize);
+
+ // Go from front to back
+ const volatile StackEntry* entry = mPseudoStack->mStack;
+ const volatile StackEntry* end = entry + mPseudoStack->stackSize();
+ // Deduplicate identical, consecutive frames
+ const char* prevLabel = nullptr;
+ for (; reservedSize-- && entry != end; entry++) {
+ /* We only accept non-copy labels, including js::RunScript,
+ because we only want static labels in the hang stack. */
+ if (entry->isCopyLabel()) {
+ continue;
+ }
+ if (entry->isJs()) {
+ prevLabel = AppendJSEntry(entry, availableBufferSize, prevLabel);
+ continue;
+ }
+#ifdef MOZ_THREADSTACKHELPER_NATIVE
+ if (mContextToFill) {
+ mContextToFill->mStackEnd = entry->stackAddress();
+ }
+#endif
+ const char* const label = entry->label();
+ if (mStackToFill->IsSameAsEntry(prevLabel, label)) {
+ // Avoid duplicate labels to save space in the stack.
+ continue;
+ }
+ mStackToFill->infallibleAppend(label);
+ prevLabel = label;
+ }
+
+ // end != entry if we exited early due to not enough reserved frames.
+ // Expand the number of reserved frames for next time.
+ mMaxStackSize = mStackToFill->capacity() + (end - entry);
+
+ // availableBufferSize < 0 if we needed a larger buffer than we reserved.
+ // Calculate a new reserve size for next time.
+ if (availableBufferSize < 0) {
+ mMaxBufferSize = reservedBufferSize - availableBufferSize;
+ }
+#endif
+}
+
+MOZ_ASAN_BLACKLIST void
+ThreadStackHelper::FillThreadContext(void* aContext)
+{
+#ifdef MOZ_THREADSTACKHELPER_NATIVE
+ if (!mContextToFill) {
+ return;
+ }
+
+#if 0 // TODO: remove dependency on Breakpad structs.
+#if defined(XP_LINUX)
+ const ucontext_t& context = *reinterpret_cast<ucontext_t*>(aContext);
+#if defined(MOZ_THREADSTACKHELPER_X86)
+ mContextToFill->mContext.context_flags = MD_CONTEXT_X86_FULL;
+ mContextToFill->mContext.edi = context.uc_mcontext.gregs[REG_EDI];
+ mContextToFill->mContext.esi = context.uc_mcontext.gregs[REG_ESI];
+ mContextToFill->mContext.ebx = context.uc_mcontext.gregs[REG_EBX];
+ mContextToFill->mContext.edx = context.uc_mcontext.gregs[REG_EDX];
+ mContextToFill->mContext.ecx = context.uc_mcontext.gregs[REG_ECX];
+ mContextToFill->mContext.eax = context.uc_mcontext.gregs[REG_EAX];
+ mContextToFill->mContext.ebp = context.uc_mcontext.gregs[REG_EBP];
+ mContextToFill->mContext.eip = context.uc_mcontext.gregs[REG_EIP];
+ mContextToFill->mContext.eflags = context.uc_mcontext.gregs[REG_EFL];
+ mContextToFill->mContext.esp = context.uc_mcontext.gregs[REG_ESP];
+#elif defined(MOZ_THREADSTACKHELPER_X64)
+ mContextToFill->mContext.context_flags = MD_CONTEXT_AMD64_FULL;
+ mContextToFill->mContext.eflags = uint32_t(context.uc_mcontext.gregs[REG_EFL]);
+ mContextToFill->mContext.rax = context.uc_mcontext.gregs[REG_RAX];
+ mContextToFill->mContext.rcx = context.uc_mcontext.gregs[REG_RCX];
+ mContextToFill->mContext.rdx = context.uc_mcontext.gregs[REG_RDX];
+ mContextToFill->mContext.rbx = context.uc_mcontext.gregs[REG_RBX];
+ mContextToFill->mContext.rsp = context.uc_mcontext.gregs[REG_RSP];
+ mContextToFill->mContext.rbp = context.uc_mcontext.gregs[REG_RBP];
+ mContextToFill->mContext.rsi = context.uc_mcontext.gregs[REG_RSI];
+ mContextToFill->mContext.rdi = context.uc_mcontext.gregs[REG_RDI];
+ memcpy(&mContextToFill->mContext.r8,
+ &context.uc_mcontext.gregs[REG_R8], 8 * sizeof(int64_t));
+ mContextToFill->mContext.rip = context.uc_mcontext.gregs[REG_RIP];
+#elif defined(MOZ_THREADSTACKHELPER_ARM)
+ mContextToFill->mContext.context_flags = MD_CONTEXT_ARM_FULL;
+ memcpy(&mContextToFill->mContext.iregs[0],
+ &context.uc_mcontext.arm_r0, 17 * sizeof(int32_t));
+#else
+ #error "Unsupported architecture"
+#endif // architecture
+
+#elif defined(XP_WIN)
+ // Breakpad context struct is based off of the Windows CONTEXT struct,
+ // so we assume they are the same; do some sanity checks to make sure.
+ static_assert(sizeof(ThreadContext::Context) == sizeof(::CONTEXT),
+ "Context struct mismatch");
+ static_assert(offsetof(ThreadContext::Context, context_flags) ==
+ offsetof(::CONTEXT, ContextFlags),
+ "Context struct mismatch");
+ mContextToFill->mContext.context_flags = CONTEXT_FULL;
+ NS_ENSURE_TRUE_VOID(::GetThreadContext(mThreadID,
+ reinterpret_cast<::CONTEXT*>(&mContextToFill->mContext)));
+
+#elif defined(XP_MACOSX)
+#if defined(MOZ_THREADSTACKHELPER_X86)
+ const thread_state_flavor_t flavor = x86_THREAD_STATE32;
+ x86_thread_state32_t state = {};
+ mach_msg_type_number_t count = x86_THREAD_STATE32_COUNT;
+#elif defined(MOZ_THREADSTACKHELPER_X64)
+ const thread_state_flavor_t flavor = x86_THREAD_STATE64;
+ x86_thread_state64_t state = {};
+ mach_msg_type_number_t count = x86_THREAD_STATE64_COUNT;
+#elif defined(MOZ_THREADSTACKHELPER_ARM)
+ const thread_state_flavor_t flavor = ARM_THREAD_STATE;
+ arm_thread_state_t state = {};
+ mach_msg_type_number_t count = ARM_THREAD_STATE_COUNT;
+#endif
+ NS_ENSURE_TRUE_VOID(KERN_SUCCESS == ::thread_get_state(
+ mThreadID, flavor, reinterpret_cast<thread_state_t>(&state), &count));
+#if __DARWIN_UNIX03
+#define GET_REGISTER(s, r) ((s).__##r)
+#else
+#define GET_REGISTER(s, r) ((s).r)
+#endif
+#if defined(MOZ_THREADSTACKHELPER_X86)
+ mContextToFill->mContext.context_flags = MD_CONTEXT_X86_FULL;
+ mContextToFill->mContext.edi = GET_REGISTER(state, edi);
+ mContextToFill->mContext.esi = GET_REGISTER(state, esi);
+ mContextToFill->mContext.ebx = GET_REGISTER(state, ebx);
+ mContextToFill->mContext.edx = GET_REGISTER(state, edx);
+ mContextToFill->mContext.ecx = GET_REGISTER(state, ecx);
+ mContextToFill->mContext.eax = GET_REGISTER(state, eax);
+ mContextToFill->mContext.ebp = GET_REGISTER(state, ebp);
+ mContextToFill->mContext.eip = GET_REGISTER(state, eip);
+ mContextToFill->mContext.eflags = GET_REGISTER(state, eflags);
+ mContextToFill->mContext.esp = GET_REGISTER(state, esp);
+#elif defined(MOZ_THREADSTACKHELPER_X64)
+ mContextToFill->mContext.context_flags = MD_CONTEXT_AMD64_FULL;
+ mContextToFill->mContext.eflags = uint32_t(GET_REGISTER(state, rflags));
+ mContextToFill->mContext.rax = GET_REGISTER(state, rax);
+ mContextToFill->mContext.rcx = GET_REGISTER(state, rcx);
+ mContextToFill->mContext.rdx = GET_REGISTER(state, rdx);
+ mContextToFill->mContext.rbx = GET_REGISTER(state, rbx);
+ mContextToFill->mContext.rsp = GET_REGISTER(state, rsp);
+ mContextToFill->mContext.rbp = GET_REGISTER(state, rbp);
+ mContextToFill->mContext.rsi = GET_REGISTER(state, rsi);
+ mContextToFill->mContext.rdi = GET_REGISTER(state, rdi);
+ memcpy(&mContextToFill->mContext.r8,
+ &GET_REGISTER(state, r8), 8 * sizeof(int64_t));
+ mContextToFill->mContext.rip = GET_REGISTER(state, rip);
+#elif defined(MOZ_THREADSTACKHELPER_ARM)
+ mContextToFill->mContext.context_flags = MD_CONTEXT_ARM_FULL;
+ memcpy(mContextToFill->mContext.iregs,
+ GET_REGISTER(state, r), 17 * sizeof(int32_t));
+#else
+ #error "Unsupported architecture"
+#endif // architecture
+#undef GET_REGISTER
+
+#else
+ #error "Unsupported platform"
+#endif // platform
+
+ intptr_t sp = 0;
+#if defined(MOZ_THREADSTACKHELPER_X86)
+ sp = mContextToFill->mContext.esp;
+#elif defined(MOZ_THREADSTACKHELPER_X64)
+ sp = mContextToFill->mContext.rsp;
+#elif defined(MOZ_THREADSTACKHELPER_ARM)
+ sp = mContextToFill->mContext.iregs[13];
+#else
+ #error "Unsupported architecture"
+#endif // architecture
+ NS_ENSURE_TRUE_VOID(sp);
+ NS_ENSURE_TRUE_VOID(mThreadStackBase);
+
+ size_t stackSize = std::min(intptr_t(ThreadContext::kMaxStackSize),
+ std::abs(sp - mThreadStackBase));
+
+ if (mContextToFill->mStackEnd) {
+ // Limit the start of stack to a certain location if specified.
+ stackSize = std::min(intptr_t(stackSize),
+ std::abs(sp - intptr_t(mContextToFill->mStackEnd)));
+ }
+
+#ifndef MOZ_THREADSTACKHELPER_STACK_GROWS_DOWN
+ // If if the stack grows upwards, and we need to recalculate our
+ // stack copy's base address. Subtract sizeof(void*) so that the
+ // location pointed to by sp is included.
+ sp -= stackSize - sizeof(void*);
+#endif
+
+#ifndef MOZ_ASAN
+ memcpy(mContextToFill->mStack.get(), reinterpret_cast<void*>(sp), stackSize);
+ // Valgrind doesn't care about the access outside the stack frame, but
+ // the presence of uninitialised values on the stack does cause it to
+ // later report a lot of false errors when Breakpad comes to unwind it.
+ // So mark the extracted data as defined.
+ MOZ_MAKE_MEM_DEFINED(mContextToFill->mStack.get(), stackSize);
+#else
+ // ASan will flag memcpy for access outside of stack frames,
+ // so roll our own memcpy here.
+ intptr_t* dst = reinterpret_cast<intptr_t*>(&mContextToFill->mStack[0]);
+ const intptr_t* src = reinterpret_cast<intptr_t*>(sp);
+ for (intptr_t len = stackSize; len > 0; len -= sizeof(*src)) {
+ *(dst++) = *(src++);
+ }
+#endif
+
+ mContextToFill->mStackBase = uintptr_t(sp);
+ mContextToFill->mStackSize = stackSize;
+ mContextToFill->mValid = true;
+#endif
+#endif // MOZ_THREADSTACKHELPER_NATIVE
+}
+
+} // namespace mozilla
diff --git a/xpcom/threads/ThreadStackHelper.h b/xpcom/threads/ThreadStackHelper.h
new file mode 100644
index 000000000..9c40ad5e2
--- /dev/null
+++ b/xpcom/threads/ThreadStackHelper.h
@@ -0,0 +1,147 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_ThreadStackHelper_h
+#define mozilla_ThreadStackHelper_h
+
+#include "mozilla/ThreadHangStats.h"
+
+#include "GeckoProfiler.h"
+
+#include <stddef.h>
+
+#if defined(XP_LINUX)
+#include <signal.h>
+#include <semaphore.h>
+#include <sys/types.h>
+#elif defined(XP_WIN)
+#include <windows.h>
+#elif defined(XP_MACOSX)
+#include <mach/mach.h>
+#endif
+
+// Support pseudostack on these platforms.
+#if defined(XP_LINUX) || defined(XP_WIN) || defined(XP_MACOSX)
+# ifdef MOZ_ENABLE_PROFILER_SPS
+# define MOZ_THREADSTACKHELPER_PSEUDO
+# endif
+#endif
+
+#ifdef MOZ_THREADSTACKHELPER_PSEUDO
+# define MOZ_THREADSTACKHELPER_NATIVE
+# if defined(__i386__) || defined(_M_IX86)
+# define MOZ_THREADSTACKHELPER_X86
+# elif defined(__x86_64__) || defined(_M_X64)
+# define MOZ_THREADSTACKHELPER_X64
+# elif defined(__arm__) || defined(_M_ARM)
+# define MOZ_THREADSTACKHELPER_ARM
+# else
+ // Unsupported architecture
+# undef MOZ_THREADSTACKHELPER_NATIVE
+# endif
+#endif
+
+namespace mozilla {
+
+/**
+ * ThreadStackHelper is used to retrieve the profiler pseudo-stack of a
+ * thread, as an alternative of using the profiler to take a profile.
+ * The target thread first declares an ThreadStackHelper instance;
+ * then another thread can call ThreadStackHelper::GetStack to retrieve
+ * the pseudo-stack of the target thread at that instant.
+ *
+ * Only non-copying labels are included in the stack, which means labels
+ * with custom text and markers are not included.
+ */
+class ThreadStackHelper
+{
+public:
+ typedef Telemetry::HangStack Stack;
+
+private:
+ Stack* mStackToFill;
+#ifdef MOZ_THREADSTACKHELPER_PSEUDO
+ const PseudoStack* const mPseudoStack;
+#ifdef MOZ_THREADSTACKHELPER_NATIVE
+ class ThreadContext;
+ // Set to non-null if GetStack should get the thread context.
+ ThreadContext* mContextToFill;
+ intptr_t mThreadStackBase;
+#endif
+ size_t mMaxStackSize;
+ size_t mMaxBufferSize;
+#endif
+
+ bool PrepareStackBuffer(Stack& aStack);
+ void FillStackBuffer();
+ void FillThreadContext(void* aContext = nullptr);
+#ifdef MOZ_THREADSTACKHELPER_PSEUDO
+ const char* AppendJSEntry(const volatile StackEntry* aEntry,
+ intptr_t& aAvailableBufferSize,
+ const char* aPrevLabel);
+#endif
+#ifdef MOZ_THREADSTACKHELPER_NATIVE
+ void GetThreadStackBase();
+#endif
+
+public:
+ /**
+ * Initialize ThreadStackHelper. Must be called from main thread.
+ */
+ static void Startup();
+ /**
+ * Uninitialize ThreadStackHelper. Must be called from main thread.
+ */
+ static void Shutdown();
+
+ /**
+ * Create a ThreadStackHelper instance targeting the current thread.
+ */
+ ThreadStackHelper();
+
+ ~ThreadStackHelper();
+
+ /**
+ * Retrieve the current pseudostack of the thread associated
+ * with this ThreadStackHelper.
+ *
+ * @param aStack Stack instance to be filled.
+ */
+ void GetStack(Stack& aStack);
+
+ /**
+ * Retrieve the current native stack of the thread associated
+ * with this ThreadStackHelper.
+ *
+ * @param aNativeStack Stack instance to be filled.
+ */
+ void GetNativeStack(Stack& aStack);
+
+#if defined(XP_LINUX)
+private:
+ static int sInitialized;
+ static int sFillStackSignum;
+
+ static void FillStackHandler(int aSignal, siginfo_t* aInfo, void* aContext);
+
+ sem_t mSem;
+ pid_t mThreadID;
+
+#elif defined(XP_WIN)
+private:
+ bool mInitialized;
+ HANDLE mThreadID;
+
+#elif defined(XP_MACOSX)
+private:
+ thread_act_t mThreadID;
+
+#endif
+};
+
+} // namespace mozilla
+
+#endif // mozilla_ThreadStackHelper_h
diff --git a/xpcom/threads/ThrottledEventQueue.cpp b/xpcom/threads/ThrottledEventQueue.cpp
new file mode 100644
index 000000000..941566ef2
--- /dev/null
+++ b/xpcom/threads/ThrottledEventQueue.cpp
@@ -0,0 +1,446 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "ThrottledEventQueue.h"
+
+#include "mozilla/Atomics.h"
+#include "mozilla/ClearOnShutdown.h"
+#include "mozilla/Mutex.h"
+#include "mozilla/Unused.h"
+#include "nsEventQueue.h"
+
+namespace mozilla {
+
+using mozilla::services::GetObserverService;
+
+namespace {
+
+static const char kShutdownTopic[] = "xpcom-shutdown";
+
+} // anonymous namespace
+
+// The ThrottledEventQueue is designed with inner and outer objects:
+//
+// XPCOM code nsObserverService
+// | |
+// | |
+// v |
+// +-------+ |
+// | Outer | |
+// +-------+ |
+// | |
+// | +-------+ |
+// +-->| Inner |<--+
+// +-------+
+//
+// Client code references the outer nsIEventTarget which in turn references
+// an inner object. The inner object is also held alive by the observer
+// service.
+//
+// If the outer object is dereferenced and destroyed, it will trigger a
+// shutdown operation on the inner object. Similarly if the observer
+// service notifies that the browser is shutting down, then the inner
+// object also starts shutting down.
+//
+// Once the queue has drained we unregister from the observer service. If
+// the outer object is already gone, then the inner object is free'd at this
+// point. If the outer object still exists then calls fall back to the
+// ThrottledEventQueue's base target. We just don't queue things
+// any more. The inner is then released once the outer object is released.
+//
+// Note, we must keep the inner object alive and attached to the observer
+// service until the TaskQueue is fully shutdown and idle. We must delay
+// xpcom shutdown if the TaskQueue is in the middle of draining.
+class ThrottledEventQueue::Inner final : public nsIObserver
+{
+ // The runnable which is dispatched to the underlying base target. Since
+ // we only execute one event at a time we just re-use a single instance
+ // of this class while there are events left in the queue.
+ class Executor final : public Runnable
+ {
+ RefPtr<Inner> mInner;
+
+ public:
+ explicit Executor(Inner* aInner)
+ : mInner(aInner)
+ { }
+
+ NS_IMETHODIMP
+ Run()
+ {
+ mInner->ExecuteRunnable();
+ return NS_OK;
+ }
+ };
+
+ mutable Mutex mMutex;
+ mutable CondVar mIdleCondVar;
+
+ mozilla::CondVar mEventsAvailable;
+
+ // any thread, protected by mutex
+ nsEventQueue mEventQueue;
+
+ // written on main thread, read on any thread
+ nsCOMPtr<nsIEventTarget> mBaseTarget;
+
+ // any thread, protected by mutex
+ nsCOMPtr<nsIRunnable> mExecutor;
+
+ // any thread, atomic
+ Atomic<uint32_t> mExecutionDepth;
+
+ // any thread, protected by mutex
+ bool mShutdownStarted;
+
+ explicit Inner(nsIEventTarget* aBaseTarget)
+ : mMutex("ThrottledEventQueue")
+ , mIdleCondVar(mMutex, "ThrottledEventQueue:Idle")
+ , mEventsAvailable(mMutex, "[ThrottledEventQueue::Inner.mEventsAvailable]")
+ , mEventQueue(mEventsAvailable, nsEventQueue::eNormalQueue)
+ , mBaseTarget(aBaseTarget)
+ , mExecutionDepth(0)
+ , mShutdownStarted(false)
+ {
+ }
+
+ ~Inner()
+ {
+ MOZ_ASSERT(!mExecutor);
+ MOZ_ASSERT(mShutdownStarted);
+ }
+
+ void
+ ExecuteRunnable()
+ {
+ // Any thread
+ nsCOMPtr<nsIRunnable> event;
+ bool shouldShutdown = false;
+
+#ifdef DEBUG
+ bool currentThread = false;
+ mBaseTarget->IsOnCurrentThread(&currentThread);
+ MOZ_ASSERT(currentThread);
+#endif
+
+ {
+ MutexAutoLock lock(mMutex);
+
+ // We only dispatch an executor runnable when we know there is something
+ // in the queue, so this should never fail.
+ MOZ_ALWAYS_TRUE(mEventQueue.GetPendingEvent(getter_AddRefs(event), lock));
+
+ // If there are more events in the queue, then dispatch the next
+ // executor. We do this now, before running the event, because
+ // the event might spin the event loop and we don't want to stall
+ // the queue.
+ if (mEventQueue.HasPendingEvent(lock)) {
+ // Dispatch the next base target runnable to attempt to execute
+ // the next throttled event. We must do this before executing
+ // the event in case the event spins the event loop.
+ MOZ_ALWAYS_SUCCEEDS(
+ mBaseTarget->Dispatch(mExecutor, NS_DISPATCH_NORMAL));
+ }
+
+ // Otherwise the queue is empty and we can stop dispatching the
+ // executor. We might also need to shutdown after running the
+ // last event.
+ else {
+ shouldShutdown = mShutdownStarted;
+ // Note, this breaks a ref cycle.
+ mExecutor = nullptr;
+ mIdleCondVar.NotifyAll();
+ }
+ }
+
+ // Execute the event now that we have unlocked.
+ ++mExecutionDepth;
+ Unused << event->Run();
+ --mExecutionDepth;
+
+ // If shutdown was started and the queue is now empty we can now
+ // finalize the shutdown. This is performed separately at the end
+ // of the method in order to wait for the event to finish running.
+ if (shouldShutdown) {
+ MOZ_ASSERT(IsEmpty());
+ NS_DispatchToMainThread(NewRunnableMethod(this, &Inner::ShutdownComplete));
+ }
+ }
+
+ void
+ ShutdownComplete()
+ {
+ MOZ_ASSERT(NS_IsMainThread());
+ MOZ_ASSERT(IsEmpty());
+ nsCOMPtr<nsIObserverService> obs = GetObserverService();
+ obs->RemoveObserver(this, kShutdownTopic);
+ }
+
+public:
+ static already_AddRefed<Inner>
+ Create(nsIEventTarget* aBaseTarget)
+ {
+ MOZ_ASSERT(NS_IsMainThread());
+
+ if (ClearOnShutdown_Internal::sCurrentShutdownPhase != ShutdownPhase::NotInShutdown) {
+ return nullptr;
+ }
+
+ nsCOMPtr<nsIObserverService> obs = GetObserverService();
+ if (NS_WARN_IF(!obs)) {
+ return nullptr;
+ }
+
+ RefPtr<Inner> ref = new Inner(aBaseTarget);
+
+ nsresult rv = obs->AddObserver(ref, kShutdownTopic,
+ false /* means OS will hold a strong ref */);
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ ref->MaybeStartShutdown();
+ MOZ_ASSERT(ref->IsEmpty());
+ return nullptr;
+ }
+
+ return ref.forget();
+ }
+
+ NS_IMETHOD
+ Observe(nsISupports*, const char* aTopic, const char16_t*) override
+ {
+ MOZ_ASSERT(NS_IsMainThread());
+ MOZ_ASSERT(!strcmp(aTopic, kShutdownTopic));
+
+ MaybeStartShutdown();
+
+ // Once shutdown begins we set the Atomic<bool> mShutdownStarted flag.
+ // This prevents any new runnables from being dispatched into the
+ // TaskQueue. Therefore this loop should be finite.
+ while (!IsEmpty()) {
+ MOZ_ALWAYS_TRUE(NS_ProcessNextEvent());
+ }
+
+ return NS_OK;
+ }
+
+ void
+ MaybeStartShutdown()
+ {
+ // Any thread
+ MutexAutoLock lock(mMutex);
+
+ if (mShutdownStarted) {
+ return;
+ }
+ mShutdownStarted = true;
+
+ // We are marked for shutdown now, but we are still processing runnables.
+ // Return for now. The shutdown will be completed once the queue is
+ // drained.
+ if (mExecutor) {
+ return;
+ }
+
+ // The queue is empty, so we can complete immediately.
+ NS_DispatchToMainThread(NewRunnableMethod(this, &Inner::ShutdownComplete));
+ }
+
+ bool
+ IsEmpty() const
+ {
+ // Any thread
+ return Length() == 0;
+ }
+
+ uint32_t
+ Length() const
+ {
+ // Any thread
+ MutexAutoLock lock(mMutex);
+ return mEventQueue.Count(lock);
+ }
+
+ void
+ AwaitIdle() const
+ {
+ // Any thread, except the main thread or our base target. Blocking the
+ // main thread is forbidden. Blocking the base target is guaranteed to
+ // produce a deadlock.
+ MOZ_ASSERT(!NS_IsMainThread());
+#ifdef DEBUG
+ bool onBaseTarget = false;
+ Unused << mBaseTarget->IsOnCurrentThread(&onBaseTarget);
+ MOZ_ASSERT(!onBaseTarget);
+#endif
+
+ MutexAutoLock lock(mMutex);
+ while (mExecutor) {
+ mIdleCondVar.Wait();
+ }
+ }
+
+ nsresult
+ DispatchFromScript(nsIRunnable* aEvent, uint32_t aFlags)
+ {
+ // Any thread
+ nsCOMPtr<nsIRunnable> r = aEvent;
+ return Dispatch(r.forget(), aFlags);
+ }
+
+ nsresult
+ Dispatch(already_AddRefed<nsIRunnable> aEvent, uint32_t aFlags)
+ {
+ MOZ_ASSERT(aFlags == NS_DISPATCH_NORMAL ||
+ aFlags == NS_DISPATCH_AT_END);
+
+ // Any thread
+ MutexAutoLock lock(mMutex);
+
+ // If we are shutting down, just fall back to our base target
+ // directly.
+ if (mShutdownStarted) {
+ return mBaseTarget->Dispatch(Move(aEvent), aFlags);
+ }
+
+ // We are not currently processing events, so we must start
+ // operating on our base target. This is fallible, so do
+ // it first. Our lock will prevent the executor from accessing
+ // the event queue before we add the event below.
+ if (!mExecutor) {
+ // Note, this creates a ref cycle keeping the inner alive
+ // until the queue is drained.
+ mExecutor = new Executor(this);
+ nsresult rv = mBaseTarget->Dispatch(mExecutor, NS_DISPATCH_NORMAL);
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ mExecutor = nullptr;
+ return rv;
+ }
+ }
+
+ // Only add the event to the underlying queue if are able to
+ // dispatch to our base target.
+ mEventQueue.PutEvent(Move(aEvent), lock);
+ return NS_OK;
+ }
+
+ nsresult
+ DelayedDispatch(already_AddRefed<nsIRunnable> aEvent, uint32_t aDelay)
+ {
+ // The base target may implement this, but we don't. Always fail
+ // to provide consistent behavior.
+ return NS_ERROR_NOT_IMPLEMENTED;
+ }
+
+ nsresult
+ IsOnCurrentThread(bool* aResult)
+ {
+ // Any thread
+
+ bool shutdownAndIdle = false;
+ {
+ MutexAutoLock lock(mMutex);
+ shutdownAndIdle = mShutdownStarted && mEventQueue.Count(lock) == 0;
+ }
+
+ bool onBaseTarget = false;
+ nsresult rv = mBaseTarget->IsOnCurrentThread(&onBaseTarget);
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+
+ // We consider the current stack on this event target if are on
+ // the base target and one of the following is true
+ // 1) We are currently running an event OR
+ // 2) We are both shutting down and the queue is idle
+ *aResult = onBaseTarget && (mExecutionDepth || shutdownAndIdle);
+
+ return NS_OK;
+ }
+
+ NS_DECL_THREADSAFE_ISUPPORTS
+};
+
+NS_IMPL_ISUPPORTS(ThrottledEventQueue::Inner, nsIObserver);
+
+NS_IMPL_ISUPPORTS(ThrottledEventQueue, nsIEventTarget);
+
+ThrottledEventQueue::ThrottledEventQueue(already_AddRefed<Inner> aInner)
+ : mInner(aInner)
+{
+ MOZ_ASSERT(mInner);
+}
+
+ThrottledEventQueue::~ThrottledEventQueue()
+{
+ mInner->MaybeStartShutdown();
+}
+
+void
+ThrottledEventQueue::MaybeStartShutdown()
+{
+ return mInner->MaybeStartShutdown();
+}
+
+already_AddRefed<ThrottledEventQueue>
+ThrottledEventQueue::Create(nsIEventTarget* aBaseTarget)
+{
+ MOZ_ASSERT(NS_IsMainThread());
+ MOZ_ASSERT(aBaseTarget);
+
+ RefPtr<Inner> inner = Inner::Create(aBaseTarget);
+ if (NS_WARN_IF(!inner)) {
+ return nullptr;
+ }
+
+ RefPtr<ThrottledEventQueue> ref =
+ new ThrottledEventQueue(inner.forget());
+ return ref.forget();
+}
+
+bool
+ThrottledEventQueue::IsEmpty() const
+{
+ return mInner->IsEmpty();
+}
+
+uint32_t
+ThrottledEventQueue::Length() const
+{
+ return mInner->Length();
+}
+
+void
+ThrottledEventQueue::AwaitIdle() const
+{
+ return mInner->AwaitIdle();
+}
+
+NS_IMETHODIMP
+ThrottledEventQueue::DispatchFromScript(nsIRunnable* aEvent, uint32_t aFlags)
+{
+ return mInner->DispatchFromScript(aEvent, aFlags);
+}
+
+NS_IMETHODIMP
+ThrottledEventQueue::Dispatch(already_AddRefed<nsIRunnable> aEvent,
+ uint32_t aFlags)
+{
+ return mInner->Dispatch(Move(aEvent), aFlags);
+}
+
+NS_IMETHODIMP
+ThrottledEventQueue::DelayedDispatch(already_AddRefed<nsIRunnable> aEvent,
+ uint32_t aFlags)
+{
+ return mInner->DelayedDispatch(Move(aEvent), aFlags);
+}
+
+NS_IMETHODIMP
+ThrottledEventQueue::IsOnCurrentThread(bool* aResult)
+{
+ return mInner->IsOnCurrentThread(aResult);
+}
+
+} // namespace mozilla
diff --git a/xpcom/threads/ThrottledEventQueue.h b/xpcom/threads/ThrottledEventQueue.h
new file mode 100644
index 000000000..e0762bcce
--- /dev/null
+++ b/xpcom/threads/ThrottledEventQueue.h
@@ -0,0 +1,94 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// nsIEventTarget wrapper for throttling event dispatch.
+
+#ifndef mozilla_ThrottledEventQueue_h
+#define mozilla_ThrottledEventQueue_h
+
+#include "nsIEventTarget.h"
+
+namespace mozilla {
+
+// A ThrottledEventQueue is an event target that can be used to throttle
+// events being dispatched to another base target. It maintains its
+// own queue of events and only dispatches one at a time to the wrapped
+// target. This can be used to avoid flooding the base target.
+//
+// Flooding is avoided via a very simply principal. Runnables dispatched
+// to the ThrottledEventQueue are only dispatched to the base target
+// one at a time. Only once that runnable has executed will we dispatch
+// the next runnable to the base target. This in effect makes all
+// runnables passing through the ThrottledEventQueue yield to other work
+// on the base target.
+//
+// ThrottledEventQueue keeps runnables waiting to be dispatched to the
+// base in its own internal queue. Code can query the length of this
+// queue using IsEmpty() and Length(). Further, code implement back
+// pressure by checking the depth of the queue and deciding to stop
+// issuing runnables if they see the ThrottledEventQueue is backed up.
+// Code running on other threads could even use AwaitIdle() to block
+// all operation until the ThrottledEventQueue drains.
+//
+// Note, this class is similar to TaskQueue, but also differs in a few
+// ways. First, it is a very simple nsIEventTarget implementation. It
+// does not use the AbstractThread API.
+//
+// In addition, ThrottledEventQueue currently dispatches its next
+// runnable to the base target *before* running the current event. This
+// allows the event code to spin the event loop without stalling the
+// ThrottledEventQueue. In contrast, TaskQueue only dispatches its next
+// runnable after running the current event. That approach is necessary
+// for TaskQueue in order to work with thread pool targets.
+//
+// So, if you are targeting a thread pool you probably want a TaskQueue.
+// If you are targeting a single thread or other non-concurrent event
+// target, you probably want a ThrottledEventQueue.
+//
+// ThrottledEventQueue also implements an automatic shutdown mechanism.
+// De-referencing the queue or browser shutdown will automatically begin
+// shutdown.
+//
+// Once shutdown begins all events will bypass the queue and be dispatched
+// straight to the underlying base target.
+class ThrottledEventQueue final : public nsIEventTarget
+{
+ class Inner;
+ RefPtr<Inner> mInner;
+
+ explicit ThrottledEventQueue(already_AddRefed<Inner> aInner);
+ ~ThrottledEventQueue();
+
+ // Begin shutdown of the event queue. This has no effect if shutdown
+ // is already in process. After this is called nsIEventTarget methods
+ // will bypass the queue and operate directly on the base target.
+ // Note, this could be made public if code needs to explicitly shutdown
+ // for some reason.
+ void MaybeStartShutdown();
+
+public:
+ // Attempt to create a ThrottledEventQueue for the given target. This
+ // may return nullptr if the browser is already shutting down.
+ static already_AddRefed<ThrottledEventQueue>
+ Create(nsIEventTarget* aBaseTarget);
+
+ // Determine if there are any events pending in the queue.
+ bool IsEmpty() const;
+
+ // Determine how many events are pending in the queue.
+ uint32_t Length() const;
+
+ // Block the current thread until the queue is empty. This may not
+ // be called on the main thread or the base target.
+ void AwaitIdle() const;
+
+ NS_DECL_THREADSAFE_ISUPPORTS
+ NS_DECL_NSIEVENTTARGET
+};
+
+} // namespace mozilla
+
+#endif // mozilla_ThrottledEventQueue_h
diff --git a/xpcom/threads/TimerThread.cpp b/xpcom/threads/TimerThread.cpp
new file mode 100644
index 000000000..0127e2dd1
--- /dev/null
+++ b/xpcom/threads/TimerThread.cpp
@@ -0,0 +1,752 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsTimerImpl.h"
+#include "TimerThread.h"
+
+#include "nsThreadUtils.h"
+#include "plarena.h"
+#include "pratom.h"
+
+#include "nsIObserverService.h"
+#include "nsIServiceManager.h"
+#include "mozilla/Services.h"
+#include "mozilla/ChaosMode.h"
+#include "mozilla/ArrayUtils.h"
+#include "mozilla/BinarySearch.h"
+
+#include <math.h>
+
+using namespace mozilla;
+#ifdef MOZ_TASK_TRACER
+#include "GeckoTaskTracerImpl.h"
+using namespace mozilla::tasktracer;
+#endif
+
+NS_IMPL_ISUPPORTS(TimerThread, nsIRunnable, nsIObserver)
+
+TimerThread::TimerThread() :
+ mInitInProgress(false),
+ mInitialized(false),
+ mMonitor("TimerThread.mMonitor"),
+ mShutdown(false),
+ mWaiting(false),
+ mNotified(false),
+ mSleeping(false)
+{
+}
+
+TimerThread::~TimerThread()
+{
+ mThread = nullptr;
+
+ NS_ASSERTION(mTimers.IsEmpty(), "Timers remain in TimerThread::~TimerThread");
+}
+
+nsresult
+TimerThread::InitLocks()
+{
+ return NS_OK;
+}
+
+namespace {
+
+class TimerObserverRunnable : public Runnable
+{
+public:
+ explicit TimerObserverRunnable(nsIObserver* aObserver)
+ : mObserver(aObserver)
+ {
+ }
+
+ NS_DECL_NSIRUNNABLE
+
+private:
+ nsCOMPtr<nsIObserver> mObserver;
+};
+
+NS_IMETHODIMP
+TimerObserverRunnable::Run()
+{
+ nsCOMPtr<nsIObserverService> observerService =
+ mozilla::services::GetObserverService();
+ if (observerService) {
+ observerService->AddObserver(mObserver, "sleep_notification", false);
+ observerService->AddObserver(mObserver, "wake_notification", false);
+ observerService->AddObserver(mObserver, "suspend_process_notification", false);
+ observerService->AddObserver(mObserver, "resume_process_notification", false);
+ }
+ return NS_OK;
+}
+
+} // namespace
+
+namespace {
+
+// TimerEventAllocator is a thread-safe allocator used only for nsTimerEvents.
+// It's needed to avoid contention over the default allocator lock when
+// firing timer events (see bug 733277). The thread-safety is required because
+// nsTimerEvent objects are allocated on the timer thread, and freed on another
+// thread. Because TimerEventAllocator has its own lock, contention over that
+// lock is limited to the allocation and deallocation of nsTimerEvent objects.
+//
+// Because this allocator is layered over PLArenaPool, it never shrinks -- even
+// "freed" nsTimerEvents aren't truly freed, they're just put onto a free-list
+// for later recycling. So the amount of memory consumed will always be equal
+// to the high-water mark consumption. But nsTimerEvents are small and it's
+// unusual to have more than a few hundred of them, so this shouldn't be a
+// problem in practice.
+
+class TimerEventAllocator
+{
+private:
+ struct FreeEntry
+ {
+ FreeEntry* mNext;
+ };
+
+ PLArenaPool mPool;
+ FreeEntry* mFirstFree;
+ mozilla::Monitor mMonitor;
+
+public:
+ TimerEventAllocator()
+ : mFirstFree(nullptr)
+ , mMonitor("TimerEventAllocator")
+ {
+ PL_InitArenaPool(&mPool, "TimerEventPool", 4096, /* align = */ 0);
+ }
+
+ ~TimerEventAllocator()
+ {
+ PL_FinishArenaPool(&mPool);
+ }
+
+ void* Alloc(size_t aSize);
+ void Free(void* aPtr);
+};
+
+} // namespace
+
+// This is a nsICancelableRunnable because we can dispatch it to Workers and
+// those can be shut down at any time, and in these cases, Cancel() is called
+// instead of Run().
+class nsTimerEvent final : public CancelableRunnable
+{
+public:
+ NS_IMETHOD Run() override;
+
+ nsresult Cancel() override
+ {
+ // Since nsTimerImpl is not thread-safe, we should release |mTimer|
+ // here in the target thread to avoid race condition. Otherwise,
+ // ~nsTimerEvent() which calls nsTimerImpl::Release() could run in the
+ // timer thread and result in race condition.
+ mTimer = nullptr;
+ return NS_OK;
+ }
+
+ nsTimerEvent()
+ : mTimer()
+ , mGeneration(0)
+ {
+ // Note: We override operator new for this class, and the override is
+ // fallible!
+ sAllocatorUsers++;
+ }
+
+ TimeStamp mInitTime;
+
+ static void Init();
+ static void Shutdown();
+ static void DeleteAllocatorIfNeeded();
+
+ static void* operator new(size_t aSize) CPP_THROW_NEW
+ {
+ return sAllocator->Alloc(aSize);
+ }
+ void operator delete(void* aPtr)
+ {
+ sAllocator->Free(aPtr);
+ DeleteAllocatorIfNeeded();
+ }
+
+ already_AddRefed<nsTimerImpl> ForgetTimer()
+ {
+ return mTimer.forget();
+ }
+
+ void SetTimer(already_AddRefed<nsTimerImpl> aTimer)
+ {
+ mTimer = aTimer;
+ mGeneration = mTimer->GetGeneration();
+ }
+
+private:
+ nsTimerEvent(const nsTimerEvent&) = delete;
+ nsTimerEvent& operator=(const nsTimerEvent&) = delete;
+ nsTimerEvent& operator=(const nsTimerEvent&&) = delete;
+
+ ~nsTimerEvent()
+ {
+ MOZ_ASSERT(!sCanDeleteAllocator || sAllocatorUsers > 0,
+ "This will result in us attempting to deallocate the nsTimerEvent allocator twice");
+ sAllocatorUsers--;
+ }
+
+ RefPtr<nsTimerImpl> mTimer;
+ int32_t mGeneration;
+
+ static TimerEventAllocator* sAllocator;
+ static Atomic<int32_t> sAllocatorUsers;
+ static bool sCanDeleteAllocator;
+};
+
+TimerEventAllocator* nsTimerEvent::sAllocator = nullptr;
+Atomic<int32_t> nsTimerEvent::sAllocatorUsers;
+bool nsTimerEvent::sCanDeleteAllocator = false;
+
+namespace {
+
+void*
+TimerEventAllocator::Alloc(size_t aSize)
+{
+ MOZ_ASSERT(aSize == sizeof(nsTimerEvent));
+
+ mozilla::MonitorAutoLock lock(mMonitor);
+
+ void* p;
+ if (mFirstFree) {
+ p = mFirstFree;
+ mFirstFree = mFirstFree->mNext;
+ } else {
+ PL_ARENA_ALLOCATE(p, &mPool, aSize);
+ if (!p) {
+ return nullptr;
+ }
+ }
+
+ return p;
+}
+
+void
+TimerEventAllocator::Free(void* aPtr)
+{
+ mozilla::MonitorAutoLock lock(mMonitor);
+
+ FreeEntry* entry = reinterpret_cast<FreeEntry*>(aPtr);
+
+ entry->mNext = mFirstFree;
+ mFirstFree = entry;
+}
+
+} // namespace
+
+void
+nsTimerEvent::Init()
+{
+ sAllocator = new TimerEventAllocator();
+}
+
+void
+nsTimerEvent::Shutdown()
+{
+ sCanDeleteAllocator = true;
+ DeleteAllocatorIfNeeded();
+}
+
+void
+nsTimerEvent::DeleteAllocatorIfNeeded()
+{
+ if (sCanDeleteAllocator && sAllocatorUsers == 0) {
+ delete sAllocator;
+ sAllocator = nullptr;
+ }
+}
+
+NS_IMETHODIMP
+nsTimerEvent::Run()
+{
+ if (!mTimer) {
+ MOZ_ASSERT(false);
+ return NS_OK;
+ }
+
+ if (MOZ_LOG_TEST(GetTimerLog(), LogLevel::Debug)) {
+ TimeStamp now = TimeStamp::Now();
+ MOZ_LOG(GetTimerLog(), LogLevel::Debug,
+ ("[this=%p] time between PostTimerEvent() and Fire(): %fms\n",
+ this, (now - mInitTime).ToMilliseconds()));
+ }
+
+ mTimer->Fire(mGeneration);
+
+ // We call Cancel() to correctly release mTimer.
+ // Read more in the Cancel() implementation.
+ return Cancel();
+}
+
+nsresult
+TimerThread::Init()
+{
+ MOZ_LOG(GetTimerLog(), LogLevel::Debug,
+ ("TimerThread::Init [%d]\n", mInitialized));
+
+ if (mInitialized) {
+ if (!mThread) {
+ return NS_ERROR_FAILURE;
+ }
+
+ return NS_OK;
+ }
+
+ nsTimerEvent::Init();
+
+ if (mInitInProgress.exchange(true) == false) {
+ // We hold on to mThread to keep the thread alive.
+ nsresult rv = NS_NewThread(getter_AddRefs(mThread), this);
+ if (NS_FAILED(rv)) {
+ mThread = nullptr;
+ } else {
+ RefPtr<TimerObserverRunnable> r = new TimerObserverRunnable(this);
+ if (NS_IsMainThread()) {
+ r->Run();
+ } else {
+ NS_DispatchToMainThread(r);
+ }
+ }
+
+ {
+ MonitorAutoLock lock(mMonitor);
+ mInitialized = true;
+ mMonitor.NotifyAll();
+ }
+ } else {
+ MonitorAutoLock lock(mMonitor);
+ while (!mInitialized) {
+ mMonitor.Wait();
+ }
+ }
+
+ if (!mThread) {
+ return NS_ERROR_FAILURE;
+ }
+
+ return NS_OK;
+}
+
+nsresult
+TimerThread::Shutdown()
+{
+ MOZ_LOG(GetTimerLog(), LogLevel::Debug, ("TimerThread::Shutdown begin\n"));
+
+ if (!mThread) {
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+
+ nsTArray<nsTimerImpl*> timers;
+ {
+ // lock scope
+ MonitorAutoLock lock(mMonitor);
+
+ mShutdown = true;
+
+ // notify the cond var so that Run() can return
+ if (mWaiting) {
+ mNotified = true;
+ mMonitor.Notify();
+ }
+
+ // Need to copy content of mTimers array to a local array
+ // because call to timers' Cancel() (and release its self)
+ // must not be done under the lock. Destructor of a callback
+ // might potentially call some code reentering the same lock
+ // that leads to unexpected behavior or deadlock.
+ // See bug 422472.
+ timers.AppendElements(mTimers);
+ mTimers.Clear();
+ }
+
+ uint32_t timersCount = timers.Length();
+ for (uint32_t i = 0; i < timersCount; i++) {
+ nsTimerImpl* timer = timers[i];
+ timer->Cancel();
+ ReleaseTimerInternal(timer);
+ }
+
+ mThread->Shutdown(); // wait for the thread to die
+
+ nsTimerEvent::Shutdown();
+
+ MOZ_LOG(GetTimerLog(), LogLevel::Debug, ("TimerThread::Shutdown end\n"));
+ return NS_OK;
+}
+
+namespace {
+
+struct MicrosecondsToInterval
+{
+ PRIntervalTime operator[](size_t aMs) const {
+ return PR_MicrosecondsToInterval(aMs);
+ }
+};
+
+struct IntervalComparator
+{
+ int operator()(PRIntervalTime aInterval) const {
+ return (0 < aInterval) ? -1 : 1;
+ }
+};
+
+} // namespace
+
+NS_IMETHODIMP
+TimerThread::Run()
+{
+ PR_SetCurrentThreadName("Timer");
+
+ MonitorAutoLock lock(mMonitor);
+
+ // We need to know how many microseconds give a positive PRIntervalTime. This
+ // is platform-dependent and we calculate it at runtime, finding a value |v|
+ // such that |PR_MicrosecondsToInterval(v) > 0| and then binary-searching in
+ // the range [0, v) to find the ms-to-interval scale.
+ uint32_t usForPosInterval = 1;
+ while (PR_MicrosecondsToInterval(usForPosInterval) == 0) {
+ usForPosInterval <<= 1;
+ }
+
+ size_t usIntervalResolution;
+ BinarySearchIf(MicrosecondsToInterval(), 0, usForPosInterval, IntervalComparator(), &usIntervalResolution);
+ MOZ_ASSERT(PR_MicrosecondsToInterval(usIntervalResolution - 1) == 0);
+ MOZ_ASSERT(PR_MicrosecondsToInterval(usIntervalResolution) == 1);
+
+ // Half of the amount of microseconds needed to get positive PRIntervalTime.
+ // We use this to decide how to round our wait times later
+ int32_t halfMicrosecondsIntervalResolution = usIntervalResolution / 2;
+ bool forceRunNextTimer = false;
+
+ while (!mShutdown) {
+ // Have to use PRIntervalTime here, since PR_WaitCondVar takes it
+ PRIntervalTime waitFor;
+ bool forceRunThisTimer = forceRunNextTimer;
+ forceRunNextTimer = false;
+
+ if (mSleeping) {
+ // Sleep for 0.1 seconds while not firing timers.
+ uint32_t milliseconds = 100;
+ if (ChaosMode::isActive(ChaosFeature::TimerScheduling)) {
+ milliseconds = ChaosMode::randomUint32LessThan(200);
+ }
+ waitFor = PR_MillisecondsToInterval(milliseconds);
+ } else {
+ waitFor = PR_INTERVAL_NO_TIMEOUT;
+ TimeStamp now = TimeStamp::Now();
+ nsTimerImpl* timer = nullptr;
+
+ if (!mTimers.IsEmpty()) {
+ timer = mTimers[0];
+
+ if (now >= timer->mTimeout || forceRunThisTimer) {
+ next:
+ // NB: AddRef before the Release under RemoveTimerInternal to avoid
+ // mRefCnt passing through zero, in case all other refs than the one
+ // from mTimers have gone away (the last non-mTimers[i]-ref's Release
+ // must be racing with us, blocked in gThread->RemoveTimer waiting
+ // for TimerThread::mMonitor, under nsTimerImpl::Release.
+
+ RefPtr<nsTimerImpl> timerRef(timer);
+ RemoveTimerInternal(timer);
+ timer = nullptr;
+
+ MOZ_LOG(GetTimerLog(), LogLevel::Debug,
+ ("Timer thread woke up %fms from when it was supposed to\n",
+ fabs((now - timerRef->mTimeout).ToMilliseconds())));
+
+ // We are going to let the call to PostTimerEvent here handle the
+ // release of the timer so that we don't end up releasing the timer
+ // on the TimerThread instead of on the thread it targets.
+ timerRef = PostTimerEvent(timerRef.forget());
+
+ if (timerRef) {
+ // We got our reference back due to an error.
+ // Unhook the nsRefPtr, and release manually so we can get the
+ // refcount.
+ nsrefcnt rc = timerRef.forget().take()->Release();
+ (void)rc;
+
+ // The nsITimer interface requires that its users keep a reference
+ // to the timers they use while those timers are initialized but
+ // have not yet fired. If this ever happens, it is a bug in the
+ // code that created and used the timer.
+ //
+ // Further, note that this should never happen even with a
+ // misbehaving user, because nsTimerImpl::Release checks for a
+ // refcount of 1 with an armed timer (a timer whose only reference
+ // is from the timer thread) and when it hits this will remove the
+ // timer from the timer thread and thus destroy the last reference,
+ // preventing this situation from occurring.
+ MOZ_ASSERT(rc != 0, "destroyed timer off its target thread!");
+ }
+
+ if (mShutdown) {
+ break;
+ }
+
+ // Update now, as PostTimerEvent plus the locking may have taken a
+ // tick or two, and we may goto next below.
+ now = TimeStamp::Now();
+ }
+ }
+
+ if (!mTimers.IsEmpty()) {
+ timer = mTimers[0];
+
+ TimeStamp timeout = timer->mTimeout;
+
+ // Don't wait at all (even for PR_INTERVAL_NO_WAIT) if the next timer
+ // is due now or overdue.
+ //
+ // Note that we can only sleep for integer values of a certain
+ // resolution. We use halfMicrosecondsIntervalResolution, calculated
+ // before, to do the optimal rounding (i.e., of how to decide what
+ // interval is so small we should not wait at all).
+ double microseconds = (timeout - now).ToMilliseconds() * 1000;
+
+ if (ChaosMode::isActive(ChaosFeature::TimerScheduling)) {
+ // The mean value of sFractions must be 1 to ensure that
+ // the average of a long sequence of timeouts converges to the
+ // actual sum of their times.
+ static const float sFractions[] = {
+ 0.0f, 0.25f, 0.5f, 0.75f, 1.0f, 1.75f, 2.75f
+ };
+ microseconds *=
+ sFractions[ChaosMode::randomUint32LessThan(ArrayLength(sFractions))];
+ forceRunNextTimer = true;
+ }
+
+ if (microseconds < halfMicrosecondsIntervalResolution) {
+ forceRunNextTimer = false;
+ goto next; // round down; execute event now
+ }
+ waitFor = PR_MicrosecondsToInterval(
+ static_cast<uint32_t>(microseconds)); // Floor is accurate enough.
+ if (waitFor == 0) {
+ waitFor = 1; // round up, wait the minimum time we can wait
+ }
+ }
+
+ if (MOZ_LOG_TEST(GetTimerLog(), LogLevel::Debug)) {
+ if (waitFor == PR_INTERVAL_NO_TIMEOUT)
+ MOZ_LOG(GetTimerLog(), LogLevel::Debug,
+ ("waiting for PR_INTERVAL_NO_TIMEOUT\n"));
+ else
+ MOZ_LOG(GetTimerLog(), LogLevel::Debug,
+ ("waiting for %u\n", PR_IntervalToMilliseconds(waitFor)));
+ }
+ }
+
+ mWaiting = true;
+ mNotified = false;
+ mMonitor.Wait(waitFor);
+ if (mNotified) {
+ forceRunNextTimer = false;
+ }
+ mWaiting = false;
+ }
+
+ return NS_OK;
+}
+
+nsresult
+TimerThread::AddTimer(nsTimerImpl* aTimer)
+{
+ MonitorAutoLock lock(mMonitor);
+
+ if (!aTimer->mEventTarget) {
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+
+ // Add the timer to our list.
+ int32_t i = AddTimerInternal(aTimer);
+ if (i < 0) {
+ return NS_ERROR_OUT_OF_MEMORY;
+ }
+
+ // Awaken the timer thread.
+ if (mWaiting && i == 0) {
+ mNotified = true;
+ mMonitor.Notify();
+ }
+
+ return NS_OK;
+}
+
+nsresult
+TimerThread::RemoveTimer(nsTimerImpl* aTimer)
+{
+ MonitorAutoLock lock(mMonitor);
+
+ // Remove the timer from our array. Tell callers that aTimer was not found
+ // by returning NS_ERROR_NOT_AVAILABLE.
+
+ if (!RemoveTimerInternal(aTimer)) {
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ // Awaken the timer thread.
+ if (mWaiting) {
+ mNotified = true;
+ mMonitor.Notify();
+ }
+
+ return NS_OK;
+}
+
+// This function must be called from within a lock
+int32_t
+TimerThread::AddTimerInternal(nsTimerImpl* aTimer)
+{
+ mMonitor.AssertCurrentThreadOwns();
+ if (mShutdown) {
+ return -1;
+ }
+
+ TimeStamp now = TimeStamp::Now();
+
+ TimerAdditionComparator c(now, aTimer);
+ nsTimerImpl** insertSlot = mTimers.InsertElementSorted(aTimer, c);
+
+ if (!insertSlot) {
+ return -1;
+ }
+
+ NS_ADDREF(aTimer);
+
+#ifdef MOZ_TASK_TRACER
+ // Caller of AddTimer is the parent task of its timer event, so we store the
+ // TraceInfo here for later used.
+ aTimer->GetTLSTraceInfo();
+#endif
+
+ return insertSlot - mTimers.Elements();
+}
+
+bool
+TimerThread::RemoveTimerInternal(nsTimerImpl* aTimer)
+{
+ mMonitor.AssertCurrentThreadOwns();
+ if (!mTimers.RemoveElement(aTimer)) {
+ return false;
+ }
+
+ ReleaseTimerInternal(aTimer);
+ return true;
+}
+
+void
+TimerThread::ReleaseTimerInternal(nsTimerImpl* aTimer)
+{
+ if (!mShutdown) {
+ // copied to a local array before releasing in shutdown
+ mMonitor.AssertCurrentThreadOwns();
+ }
+ NS_RELEASE(aTimer);
+}
+
+already_AddRefed<nsTimerImpl>
+TimerThread::PostTimerEvent(already_AddRefed<nsTimerImpl> aTimerRef)
+{
+ mMonitor.AssertCurrentThreadOwns();
+
+ RefPtr<nsTimerImpl> timer(aTimerRef);
+ if (!timer->mEventTarget) {
+ NS_ERROR("Attempt to post timer event to NULL event target");
+ return timer.forget();
+ }
+
+ // XXX we may want to reuse this nsTimerEvent in the case of repeating timers.
+
+ // Since we already addref'd 'timer', we don't need to addref here.
+ // We will release either in ~nsTimerEvent(), or pass the reference back to
+ // the caller. We need to copy the generation number from this timer into the
+ // event, so we can avoid firing a timer that was re-initialized after being
+ // canceled.
+
+ RefPtr<nsTimerEvent> event = new nsTimerEvent;
+ if (!event) {
+ return timer.forget();
+ }
+
+ if (MOZ_LOG_TEST(GetTimerLog(), LogLevel::Debug)) {
+ event->mInitTime = TimeStamp::Now();
+ }
+
+#ifdef MOZ_TASK_TRACER
+ // During the dispatch of TimerEvent, we overwrite the current TraceInfo
+ // partially with the info saved in timer earlier, and restore it back by
+ // AutoSaveCurTraceInfo.
+ AutoSaveCurTraceInfo saveCurTraceInfo;
+ (timer->GetTracedTask()).SetTLSTraceInfo();
+#endif
+
+ nsCOMPtr<nsIEventTarget> target = timer->mEventTarget;
+ event->SetTimer(timer.forget());
+
+ nsresult rv;
+ {
+ // We release mMonitor around the Dispatch because if this timer is targeted
+ // at the TimerThread we'll deadlock.
+ MonitorAutoUnlock unlock(mMonitor);
+ rv = target->Dispatch(event, NS_DISPATCH_NORMAL);
+ }
+
+ if (NS_FAILED(rv)) {
+ timer = event->ForgetTimer();
+ RemoveTimerInternal(timer);
+ return timer.forget();
+ }
+
+ return nullptr;
+}
+
+void
+TimerThread::DoBeforeSleep()
+{
+ // Mainthread
+ MonitorAutoLock lock(mMonitor);
+ mSleeping = true;
+}
+
+// Note: wake may be notified without preceding sleep notification
+void
+TimerThread::DoAfterSleep()
+{
+ // Mainthread
+ MonitorAutoLock lock(mMonitor);
+ mSleeping = false;
+
+ // Wake up the timer thread to re-process the array to ensure the sleep delay is correct,
+ // and fire any expired timers (perhaps quite a few)
+ mNotified = true;
+ mMonitor.Notify();
+}
+
+
+NS_IMETHODIMP
+TimerThread::Observe(nsISupports* /* aSubject */, const char* aTopic,
+ const char16_t* /* aData */)
+{
+ if (strcmp(aTopic, "sleep_notification") == 0 ||
+ strcmp(aTopic, "suspend_process_notification") == 0) {
+ DoBeforeSleep();
+ } else if (strcmp(aTopic, "wake_notification") == 0 ||
+ strcmp(aTopic, "resume_process_notification") == 0) {
+ DoAfterSleep();
+ }
+
+ return NS_OK;
+}
diff --git a/xpcom/threads/TimerThread.h b/xpcom/threads/TimerThread.h
new file mode 100644
index 000000000..a7204810a
--- /dev/null
+++ b/xpcom/threads/TimerThread.h
@@ -0,0 +1,115 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef TimerThread_h___
+#define TimerThread_h___
+
+#include "nsIObserver.h"
+#include "nsIRunnable.h"
+#include "nsIThread.h"
+
+#include "nsTimerImpl.h"
+#include "nsThreadUtils.h"
+
+#include "nsTArray.h"
+
+#include "mozilla/Atomics.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/Monitor.h"
+
+namespace mozilla {
+class TimeStamp;
+} // namespace mozilla
+
+class TimerThread final
+ : public nsIRunnable
+ , public nsIObserver
+{
+public:
+ typedef mozilla::Monitor Monitor;
+ typedef mozilla::TimeStamp TimeStamp;
+ typedef mozilla::TimeDuration TimeDuration;
+
+ TimerThread();
+ nsresult InitLocks();
+
+ NS_DECL_THREADSAFE_ISUPPORTS
+ NS_DECL_NSIRUNNABLE
+ NS_DECL_NSIOBSERVER
+
+ nsresult Init();
+ nsresult Shutdown();
+
+ nsresult AddTimer(nsTimerImpl* aTimer);
+ nsresult RemoveTimer(nsTimerImpl* aTimer);
+
+ void DoBeforeSleep();
+ void DoAfterSleep();
+
+ bool IsOnTimerThread() const
+ {
+ return mThread == NS_GetCurrentThread();
+ }
+
+private:
+ ~TimerThread();
+
+ mozilla::Atomic<bool> mInitInProgress;
+ bool mInitialized;
+
+ // These two internal helper methods must be called while mMonitor is held.
+ // AddTimerInternal returns the position where the timer was added in the
+ // list, or -1 if it failed.
+ int32_t AddTimerInternal(nsTimerImpl* aTimer);
+ bool RemoveTimerInternal(nsTimerImpl* aTimer);
+ void ReleaseTimerInternal(nsTimerImpl* aTimer);
+
+ already_AddRefed<nsTimerImpl> PostTimerEvent(already_AddRefed<nsTimerImpl> aTimerRef);
+
+ nsCOMPtr<nsIThread> mThread;
+ Monitor mMonitor;
+
+ bool mShutdown;
+ bool mWaiting;
+ bool mNotified;
+ bool mSleeping;
+
+ nsTArray<nsTimerImpl*> mTimers;
+};
+
+struct TimerAdditionComparator
+{
+ TimerAdditionComparator(const mozilla::TimeStamp& aNow,
+ nsTimerImpl* aTimerToInsert) :
+ now(aNow)
+#ifdef DEBUG
+ , timerToInsert(aTimerToInsert)
+#endif
+ {
+ }
+
+ bool LessThan(nsTimerImpl* aFromArray, nsTimerImpl* aNewTimer) const
+ {
+ MOZ_ASSERT(aNewTimer == timerToInsert, "Unexpected timer ordering");
+
+ // Skip any overdue timers.
+ return aFromArray->mTimeout <= now ||
+ aFromArray->mTimeout <= aNewTimer->mTimeout;
+ }
+
+ bool Equals(nsTimerImpl* aFromArray, nsTimerImpl* aNewTimer) const
+ {
+ return false;
+ }
+
+private:
+ const mozilla::TimeStamp& now;
+#ifdef DEBUG
+ const nsTimerImpl* const timerToInsert;
+#endif
+};
+
+#endif /* TimerThread_h___ */
diff --git a/xpcom/threads/moz.build b/xpcom/threads/moz.build
new file mode 100644
index 000000000..5d54a4bf4
--- /dev/null
+++ b/xpcom/threads/moz.build
@@ -0,0 +1,89 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+XPIDL_SOURCES += [
+ 'nsIEnvironment.idl',
+ 'nsIEventTarget.idl',
+ 'nsIIdlePeriod.idl',
+ 'nsIProcess.idl',
+ 'nsIRunnable.idl',
+ 'nsISupportsPriority.idl',
+ 'nsIThread.idl',
+ 'nsIThreadInternal.idl',
+ 'nsIThreadManager.idl',
+ 'nsIThreadPool.idl',
+ 'nsITimer.idl',
+]
+
+XPIDL_MODULE = 'xpcom_threads'
+
+EXPORTS += [
+ 'nsEventQueue.h',
+ 'nsICancelableRunnable.h',
+ 'nsIIncrementalRunnable.h',
+ 'nsMemoryPressure.h',
+ 'nsProcess.h',
+ 'nsThread.h',
+]
+
+EXPORTS.mozilla += [
+ 'AbstractThread.h',
+ 'BackgroundHangMonitor.h',
+ 'HangAnnotations.h',
+ 'HangMonitor.h',
+ 'LazyIdleThread.h',
+ 'MainThreadIdlePeriod.h',
+ 'MozPromise.h',
+ 'SharedThreadPool.h',
+ 'StateMirroring.h',
+ 'StateWatching.h',
+ 'SyncRunnable.h',
+ 'TaskDispatcher.h',
+ 'TaskQueue.h',
+ 'ThrottledEventQueue.h',
+]
+
+UNIFIED_SOURCES += [
+ 'AbstractThread.cpp',
+ 'BackgroundHangMonitor.cpp',
+ 'HangAnnotations.cpp',
+ 'HangMonitor.cpp',
+ 'LazyIdleThread.cpp',
+ 'MainThreadIdlePeriod.cpp',
+ 'nsEnvironment.cpp',
+ 'nsEventQueue.cpp',
+ 'nsMemoryPressure.cpp',
+ 'nsProcessCommon.cpp',
+ 'nsThread.cpp',
+ 'nsThreadManager.cpp',
+ 'nsThreadPool.cpp',
+ 'nsTimerImpl.cpp',
+ 'SharedThreadPool.cpp',
+ 'TaskQueue.cpp',
+ 'ThreadStackHelper.cpp',
+ 'ThrottledEventQueue.cpp',
+ 'TimerThread.cpp',
+]
+
+LOCAL_INCLUDES += [
+ '../build',
+ '/caps',
+ '/tools/profiler',
+]
+
+# BHR disabled for Release builds because of bug 965392.
+# BHR disabled for debug builds because of bug 979069.
+# BHR disabled on gonk because of bug 1180533
+# BHR disabled for TSan builds because of bug 1121216.
+if CONFIG['MOZ_UPDATE_CHANNEL'] not in ('release') and \
+ not CONFIG['MOZ_DEBUG'] and \
+ not CONFIG['MOZ_WIDGET_TOOLKIT'] == 'gonk' and \
+ not CONFIG['MOZ_TSAN']:
+ DEFINES['MOZ_ENABLE_BACKGROUND_HANG_MONITOR'] = 1
+
+FINAL_LIBRARY = 'xul'
+
+include('/ipc/chromium/chromium-config.mozbuild')
diff --git a/xpcom/threads/nsEnvironment.cpp b/xpcom/threads/nsEnvironment.cpp
new file mode 100644
index 000000000..0de56675e
--- /dev/null
+++ b/xpcom/threads/nsEnvironment.cpp
@@ -0,0 +1,163 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsEnvironment.h"
+#include "prenv.h"
+#include "prprf.h"
+#include "nsBaseHashtable.h"
+#include "nsHashKeys.h"
+#include "nsPromiseFlatString.h"
+#include "nsDependentString.h"
+#include "nsNativeCharsetUtils.h"
+
+using namespace mozilla;
+
+NS_IMPL_ISUPPORTS(nsEnvironment, nsIEnvironment)
+
+nsresult
+nsEnvironment::Create(nsISupports* aOuter, REFNSIID aIID, void** aResult)
+{
+ nsresult rv;
+ *aResult = nullptr;
+
+ if (aOuter) {
+ return NS_ERROR_NO_AGGREGATION;
+ }
+
+ nsEnvironment* obj = new nsEnvironment();
+
+ rv = obj->QueryInterface(aIID, aResult);
+ if (NS_FAILED(rv)) {
+ delete obj;
+ }
+ return rv;
+}
+
+nsEnvironment::~nsEnvironment()
+{
+}
+
+NS_IMETHODIMP
+nsEnvironment::Exists(const nsAString& aName, bool* aOutValue)
+{
+ nsAutoCString nativeName;
+ nsresult rv = NS_CopyUnicodeToNative(aName, nativeName);
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ return rv;
+ }
+
+ nsAutoCString nativeVal;
+#if defined(XP_UNIX)
+ /* For Unix/Linux platforms we follow the Unix definition:
+ * An environment variable exists when |getenv()| returns a non-nullptr
+ * value. An environment variable does not exist when |getenv()| returns
+ * nullptr.
+ */
+ const char* value = PR_GetEnv(nativeName.get());
+ *aOutValue = value && *value;
+#else
+ /* For non-Unix/Linux platforms we have to fall back to a
+ * "portable" definition (which is incorrect for Unix/Linux!!!!)
+ * which simply checks whether the string returned by |Get()| is empty
+ * or not.
+ */
+ nsAutoString value;
+ Get(aName, value);
+ *aOutValue = !value.IsEmpty();
+#endif /* XP_UNIX */
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsEnvironment::Get(const nsAString& aName, nsAString& aOutValue)
+{
+ nsAutoCString nativeName;
+ nsresult rv = NS_CopyUnicodeToNative(aName, nativeName);
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ return rv;
+ }
+
+ nsAutoCString nativeVal;
+ const char* value = PR_GetEnv(nativeName.get());
+ if (value && *value) {
+ rv = NS_CopyNativeToUnicode(nsDependentCString(value), aOutValue);
+ } else {
+ aOutValue.Truncate();
+ rv = NS_OK;
+ }
+
+ return rv;
+}
+
+/* Environment strings must have static duration; We're gonna leak all of this
+ * at shutdown: this is by design, caused how Unix/Linux implement environment
+ * vars.
+ */
+
+typedef nsBaseHashtableET<nsCharPtrHashKey, char*> EnvEntryType;
+typedef nsTHashtable<EnvEntryType> EnvHashType;
+
+static EnvHashType* gEnvHash = nullptr;
+
+static bool
+EnsureEnvHash()
+{
+ if (gEnvHash) {
+ return true;
+ }
+
+ gEnvHash = new EnvHashType;
+ if (!gEnvHash) {
+ return false;
+ }
+
+ return true;
+}
+
+NS_IMETHODIMP
+nsEnvironment::Set(const nsAString& aName, const nsAString& aValue)
+{
+ nsAutoCString nativeName;
+ nsAutoCString nativeVal;
+
+ nsresult rv = NS_CopyUnicodeToNative(aName, nativeName);
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ return rv;
+ }
+
+ rv = NS_CopyUnicodeToNative(aValue, nativeVal);
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ return rv;
+ }
+
+ MutexAutoLock lock(mLock);
+
+ if (!EnsureEnvHash()) {
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ EnvEntryType* entry = gEnvHash->PutEntry(nativeName.get());
+ if (!entry) {
+ return NS_ERROR_OUT_OF_MEMORY;
+ }
+
+ char* newData = PR_smprintf("%s=%s",
+ nativeName.get(),
+ nativeVal.get());
+ if (!newData) {
+ return NS_ERROR_OUT_OF_MEMORY;
+ }
+
+ PR_SetEnv(newData);
+ if (entry->mData) {
+ PR_smprintf_free(entry->mData);
+ }
+ entry->mData = newData;
+ return NS_OK;
+}
+
+
diff --git a/xpcom/threads/nsEnvironment.h b/xpcom/threads/nsEnvironment.h
new file mode 100644
index 000000000..234055a07
--- /dev/null
+++ b/xpcom/threads/nsEnvironment.h
@@ -0,0 +1,36 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef nsEnvironment_h__
+#define nsEnvironment_h__
+
+#include "mozilla/Attributes.h"
+#include "mozilla/Mutex.h"
+#include "nsIEnvironment.h"
+
+#define NS_ENVIRONMENT_CID \
+ { 0X3D68F92UL, 0X9513, 0X4E25, \
+ { 0X9B, 0XE9, 0X7C, 0XB2, 0X39, 0X87, 0X41, 0X72 } }
+#define NS_ENVIRONMENT_CONTRACTID "@mozilla.org/process/environment;1"
+
+class nsEnvironment final : public nsIEnvironment
+{
+public:
+ NS_DECL_THREADSAFE_ISUPPORTS
+ NS_DECL_NSIENVIRONMENT
+
+ static nsresult Create(nsISupports* aOuter, REFNSIID aIID, void** aResult);
+
+private:
+ nsEnvironment() : mLock("nsEnvironment.mLock")
+ {
+ }
+ ~nsEnvironment();
+
+ mozilla::Mutex mLock;
+};
+
+#endif /* !nsEnvironment_h__ */
diff --git a/xpcom/threads/nsEventQueue.cpp b/xpcom/threads/nsEventQueue.cpp
new file mode 100644
index 000000000..4ca2f11ea
--- /dev/null
+++ b/xpcom/threads/nsEventQueue.cpp
@@ -0,0 +1,155 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsEventQueue.h"
+#include "nsAutoPtr.h"
+#include "mozilla/Logging.h"
+#include "nsThreadUtils.h"
+#include "prthread.h"
+#include "mozilla/ChaosMode.h"
+
+using namespace mozilla;
+
+static LazyLogModule sEventQueueLog("nsEventQueue");
+#ifdef LOG
+#undef LOG
+#endif
+#define LOG(args) MOZ_LOG(sEventQueueLog, mozilla::LogLevel::Debug, args)
+
+nsEventQueue::nsEventQueue(mozilla::CondVar& aCondVar, EventQueueType aType)
+ : mHead(nullptr)
+ , mTail(nullptr)
+ , mOffsetHead(0)
+ , mOffsetTail(0)
+ , mEventsAvailable(aCondVar)
+ , mType(aType)
+{
+}
+
+nsEventQueue::~nsEventQueue()
+{
+ // It'd be nice to be able to assert that no one else is holding the lock,
+ // but NSPR doesn't really expose APIs for it.
+ NS_ASSERTION(IsEmpty(),
+ "Non-empty event queue being destroyed; events being leaked.");
+
+ if (mHead) {
+ FreePage(mHead);
+ }
+}
+
+bool
+nsEventQueue::GetEvent(bool aMayWait, nsIRunnable** aResult,
+ MutexAutoLock& aProofOfLock)
+{
+ if (aResult) {
+ *aResult = nullptr;
+ }
+
+ while (IsEmpty()) {
+ if (!aMayWait) {
+ return false;
+ }
+ LOG(("EVENTQ(%p): wait begin\n", this));
+ mEventsAvailable.Wait();
+ LOG(("EVENTQ(%p): wait end\n", this));
+
+ if (mType == eSharedCondVarQueue) {
+ if (IsEmpty()) {
+ return false;
+ }
+ break;
+ }
+ }
+
+ if (aResult) {
+ MOZ_ASSERT(mOffsetHead < EVENTS_PER_PAGE);
+ MOZ_ASSERT_IF(mHead == mTail, mOffsetHead <= mOffsetTail);
+ *aResult = mHead->mEvents[mOffsetHead++];
+
+ MOZ_ASSERT(*aResult);
+ MOZ_ASSERT(mOffsetHead <= EVENTS_PER_PAGE);
+
+ // Check if mHead points to empty Page
+ if (mOffsetHead == EVENTS_PER_PAGE) {
+ Page* dead = mHead;
+ mHead = mHead->mNext;
+ FreePage(dead);
+ mOffsetHead = 0;
+ }
+ }
+
+ return true;
+}
+
+void
+nsEventQueue::PutEvent(already_AddRefed<nsIRunnable>&& aRunnable,
+ MutexAutoLock& aProofOfLock)
+{
+ if (!mHead) {
+ mHead = NewPage();
+ MOZ_ASSERT(mHead);
+
+ mTail = mHead;
+ mOffsetHead = 0;
+ mOffsetTail = 0;
+ } else if (mOffsetTail == EVENTS_PER_PAGE) {
+ Page* page = NewPage();
+ MOZ_ASSERT(page);
+
+ mTail->mNext = page;
+ mTail = page;
+ mOffsetTail = 0;
+ }
+
+ nsIRunnable*& queueLocation = mTail->mEvents[mOffsetTail];
+ MOZ_ASSERT(!queueLocation);
+ queueLocation = aRunnable.take();
+ ++mOffsetTail;
+ LOG(("EVENTQ(%p): notify\n", this));
+ mEventsAvailable.Notify();
+}
+
+void
+nsEventQueue::PutEvent(nsIRunnable* aRunnable, MutexAutoLock& aProofOfLock)
+{
+ nsCOMPtr<nsIRunnable> event(aRunnable);
+ PutEvent(event.forget(), aProofOfLock);
+}
+
+size_t
+nsEventQueue::Count(MutexAutoLock& aProofOfLock) const
+{
+ // It is obvious count is 0 when the queue is empty.
+ if (!mHead) {
+ return 0;
+ }
+
+ /* How we count the number of events in the queue:
+ * 1. Let pageCount(x, y) denote the number of pages excluding the tail page
+ * where x is the index of head page and y is the index of the tail page.
+ * 2. Then we have pageCount(x, y) = y - x.
+ *
+ * Ex: pageCount(0, 0) = 0 where both head and tail pages point to page 0.
+ * pageCount(0, 1) = 1 where head points to page 0 and tail points page 1.
+ *
+ * 3. number of events = (EVENTS_PER_PAGE * pageCount(x, y))
+ * - (empty slots in head page) + (non-empty slots in tail page)
+ * = (EVENTS_PER_PAGE * pageCount(x, y)) - mOffsetHead + mOffsetTail
+ */
+
+ int count = -mOffsetHead;
+
+ // Compute (EVENTS_PER_PAGE * pageCount(x, y))
+ for (Page* page = mHead; page != mTail; page = page->mNext) {
+ count += EVENTS_PER_PAGE;
+ }
+
+ count += mOffsetTail;
+ MOZ_ASSERT(count >= 0);
+
+ return count;
+}
diff --git a/xpcom/threads/nsEventQueue.h b/xpcom/threads/nsEventQueue.h
new file mode 100644
index 000000000..23b55e63d
--- /dev/null
+++ b/xpcom/threads/nsEventQueue.h
@@ -0,0 +1,123 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef nsEventQueue_h__
+#define nsEventQueue_h__
+
+#include <stdlib.h>
+#include "mozilla/CondVar.h"
+#include "mozilla/Mutex.h"
+#include "nsIRunnable.h"
+#include "nsCOMPtr.h"
+#include "mozilla/AlreadyAddRefed.h"
+#include "mozilla/UniquePtr.h"
+
+class nsThreadPool;
+
+// A threadsafe FIFO event queue...
+class nsEventQueue
+{
+public:
+ typedef mozilla::MutexAutoLock MutexAutoLock;
+
+ enum EventQueueType
+ {
+ eNormalQueue,
+ eSharedCondVarQueue
+ };
+
+ nsEventQueue(mozilla::CondVar& aCondVar, EventQueueType aType);
+ ~nsEventQueue();
+
+ // This method adds a new event to the pending event queue. The queue holds
+ // a strong reference to the event after this method returns. This method
+ // cannot fail.
+ void PutEvent(nsIRunnable* aEvent, MutexAutoLock& aProofOfLock);
+ void PutEvent(already_AddRefed<nsIRunnable>&& aEvent,
+ MutexAutoLock& aProofOfLock);
+
+ // This method gets an event from the event queue. If mayWait is true, then
+ // the method will block the calling thread until an event is available. If
+ // the event is null, then the method returns immediately indicating whether
+ // or not an event is pending. When the resulting event is non-null, the
+ // caller is responsible for releasing the event object. This method does
+ // not alter the reference count of the resulting event.
+ bool GetEvent(bool aMayWait, nsIRunnable** aEvent,
+ MutexAutoLock& aProofOfLock);
+
+ // This method returns true if there is a pending event.
+ bool HasPendingEvent(MutexAutoLock& aProofOfLock)
+ {
+ return GetEvent(false, nullptr, aProofOfLock);
+ }
+
+ // This method returns the next pending event or null.
+ bool GetPendingEvent(nsIRunnable** aRunnable, MutexAutoLock& aProofOfLock)
+ {
+ return GetEvent(false, aRunnable, aProofOfLock);
+ }
+
+ size_t Count(MutexAutoLock&) const;
+
+private:
+ bool IsEmpty()
+ {
+ return !mHead || (mHead == mTail && mOffsetHead == mOffsetTail);
+ }
+
+ enum
+ {
+ EVENTS_PER_PAGE = 255
+ };
+
+ // Page objects are linked together to form a simple deque.
+
+ struct Page
+ {
+ struct Page* mNext;
+ nsIRunnable* mEvents[EVENTS_PER_PAGE];
+ };
+
+ static_assert((sizeof(Page) & (sizeof(Page) - 1)) == 0,
+ "sizeof(Page) should be a power of two to avoid heap slop.");
+
+ static Page* NewPage()
+ {
+ return static_cast<Page*>(moz_xcalloc(1, sizeof(Page)));
+ }
+
+ static void FreePage(Page* aPage)
+ {
+ free(aPage);
+ }
+
+ Page* mHead;
+ Page* mTail;
+
+ uint16_t mOffsetHead; // offset into mHead where next item is removed
+ uint16_t mOffsetTail; // offset into mTail where next item is added
+ mozilla::CondVar& mEventsAvailable;
+
+ EventQueueType mType;
+
+ // These methods are made available to nsThreadPool as a hack, since
+ // nsThreadPool needs to have its threads sleep for fixed amounts of
+ // time as well as being able to wake up all threads when thread
+ // limits change.
+ friend class nsThreadPool;
+ void Wait(PRIntervalTime aInterval)
+ {
+ MOZ_ASSERT(mType == eNormalQueue);
+ mEventsAvailable.Wait(aInterval);
+ }
+ void NotifyAll()
+ {
+ MOZ_ASSERT(mType == eNormalQueue);
+ mEventsAvailable.NotifyAll();
+ }
+};
+
+#endif // nsEventQueue_h__
diff --git a/xpcom/threads/nsICancelableRunnable.h b/xpcom/threads/nsICancelableRunnable.h
new file mode 100644
index 000000000..5ae9f5b14
--- /dev/null
+++ b/xpcom/threads/nsICancelableRunnable.h
@@ -0,0 +1,38 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef nsICancelableRunnable_h__
+#define nsICancelableRunnable_h__
+
+#include "nsISupports.h"
+
+#define NS_ICANCELABLERUNNABLE_IID \
+{ 0xde93dc4c, 0x5eea, 0x4eb7, \
+{ 0xb6, 0xd1, 0xdb, 0xf1, 0xe0, 0xce, 0xf6, 0x5c } }
+
+class nsICancelableRunnable : public nsISupports
+{
+public:
+ NS_DECLARE_STATIC_IID_ACCESSOR(NS_ICANCELABLERUNNABLE_IID)
+
+ /*
+ * Cancels a pending task. If the task has already been executed this will
+ * be a no-op. Calling this method twice is considered an error.
+ *
+ * @throws NS_ERROR_UNEXPECTED
+ * Indicates that the runnable has already been canceled.
+ */
+ virtual nsresult Cancel() = 0;
+
+protected:
+ nsICancelableRunnable() { }
+ virtual ~nsICancelableRunnable() {}
+};
+
+NS_DEFINE_STATIC_IID_ACCESSOR(nsICancelableRunnable,
+ NS_ICANCELABLERUNNABLE_IID)
+
+#endif // nsICancelableRunnable_h__
diff --git a/xpcom/threads/nsIEnvironment.idl b/xpcom/threads/nsIEnvironment.idl
new file mode 100644
index 000000000..afbc3eb7c
--- /dev/null
+++ b/xpcom/threads/nsIEnvironment.idl
@@ -0,0 +1,55 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsISupports.idl"
+
+/**
+ * Scriptable access to the current process environment.
+ *
+ */
+[scriptable, uuid(101d5941-d820-4e85-a266-9a3469940807)]
+interface nsIEnvironment : nsISupports
+{
+ /**
+ * Set the value of an environment variable.
+ *
+ * @param aName the variable name to set.
+ * @param aValue the value to set.
+ */
+ void set(in AString aName, in AString aValue);
+
+ /**
+ * Get the value of an environment variable.
+ *
+ * @param aName the variable name to retrieve.
+ * @return returns the value of the env variable. An empty string
+ * will be returned when the env variable does not exist or
+ * when the value itself is an empty string - please use
+ * |exists()| to probe whether the env variable exists
+ * or not.
+ */
+ AString get(in AString aName);
+
+ /**
+ * Check the existence of an environment variable.
+ * This method checks whether an environment variable is present in
+ * the environment or not.
+ *
+ * - For Unix/Linux platforms we follow the Unix definition:
+ * An environment variable exists when |getenv()| returns a non-NULL value.
+ * An environment variable does not exist when |getenv()| returns NULL.
+ * - For non-Unix/Linux platforms we have to fall back to a
+ * "portable" definition (which is incorrect for Unix/Linux!!!!)
+ * which simply checks whether the string returned by |Get()| is empty
+ * or not.
+ *
+ * @param aName the variable name to probe.
+ * @return if the variable has been set, the value returned is
+ * PR_TRUE. If the variable was not defined in the
+ * environment PR_FALSE will be returned.
+ */
+ boolean exists(in AString aName);
+};
+
diff --git a/xpcom/threads/nsIEventTarget.idl b/xpcom/threads/nsIEventTarget.idl
new file mode 100644
index 000000000..a6f9068dc
--- /dev/null
+++ b/xpcom/threads/nsIEventTarget.idl
@@ -0,0 +1,127 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsISupports.idl"
+#include "nsIRunnable.idl"
+%{C++
+#include "nsCOMPtr.h"
+#include "mozilla/AlreadyAddRefed.h"
+%}
+
+native alreadyAddRefed_nsIRunnable(already_AddRefed<nsIRunnable>);
+
+[scriptable, uuid(88145945-3278-424e-9f37-d874cbdd9f6f)]
+interface nsIEventTarget : nsISupports
+{
+ /* until we can get rid of all uses, keep the non-alreadyAddRefed<> version */
+%{C++
+ nsresult Dispatch(nsIRunnable* aEvent, uint32_t aFlags) {
+ return Dispatch(nsCOMPtr<nsIRunnable>(aEvent).forget(), aFlags);
+ }
+%}
+
+ /**
+ * This flag specifies the default mode of event dispatch, whereby the event
+ * is simply queued for later processing. When this flag is specified,
+ * dispatch returns immediately after the event is queued.
+ */
+ const unsigned long DISPATCH_NORMAL = 0;
+
+ /**
+ * This flag specifies the synchronous mode of event dispatch, in which the
+ * dispatch method does not return until the event has been processed.
+ *
+ * NOTE: passing this flag to dispatch may have the side-effect of causing
+ * other events on the current thread to be processed while waiting for the
+ * given event to be processed.
+ */
+ const unsigned long DISPATCH_SYNC = 1;
+
+ /**
+ * This flag specifies that the dispatch is occurring from a running event
+ * that was dispatched to the same event target, and that event is about to
+ * finish.
+ *
+ * A thread pool can use this as an optimization hint to not spin up
+ * another thread, since the current thread is about to become idle.
+ *
+ * These events are always async.
+ */
+ const unsigned long DISPATCH_AT_END = 2;
+
+ /**
+ * Check to see if this event target is associated with the current thread.
+ *
+ * @returns
+ * A boolean value that if "true" indicates that events dispatched to this
+ * event target will run on the current thread (i.e., the thread calling
+ * this method).
+ */
+ boolean isOnCurrentThread();
+
+ /**
+ * Dispatch an event to this event target. This function may be called from
+ * any thread, and it may be called re-entrantly.
+ *
+ * @param event
+ * The alreadyAddRefed<> event to dispatch.
+ * NOTE that the event will be leaked if it fails to dispatch.
+ * @param flags
+ * The flags modifying event dispatch. The flags are described in detail
+ * below.
+ *
+ * @throws NS_ERROR_INVALID_ARG
+ * Indicates that event is null.
+ * @throws NS_ERROR_UNEXPECTED
+ * Indicates that the thread is shutting down and has finished processing
+ * events, so this event would never run and has not been dispatched.
+ */
+ [noscript, binaryname(Dispatch)] void dispatchFromC(in alreadyAddRefed_nsIRunnable event, in unsigned long flags);
+ /**
+ * Version of Dispatch to expose to JS, which doesn't require an alreadyAddRefed<>
+ * (it will be converted to that internally)
+ *
+ * @param event
+ * The (raw) event to dispatch.
+ * @param flags
+ * The flags modifying event dispatch. The flags are described in detail
+ * below.
+ *
+ * @throws NS_ERROR_INVALID_ARG
+ * Indicates that event is null.
+ * @throws NS_ERROR_UNEXPECTED
+ * Indicates that the thread is shutting down and has finished processing
+ * events, so this event would never run and has not been dispatched.
+ */
+ [binaryname(DispatchFromScript)] void dispatch(in nsIRunnable event, in unsigned long flags);
+ /**
+ * Dispatch an event to this event target, but do not run it before delay
+ * milliseconds have passed. This function may be called from any thread.
+ *
+ * @param event
+ * The alreadyAddrefed<> event to dispatch.
+ * @param delay
+ * The delay (in ms) before running the event. If event does not rise to
+ * the top of the event queue before the delay has passed, it will be set
+ * aside to execute once the delay has passed. Otherwise, it will be
+ * executed immediately.
+ *
+ * @throws NS_ERROR_INVALID_ARG
+ * Indicates that event is null.
+ * @throws NS_ERROR_UNEXPECTED
+ * Indicates that the thread is shutting down and has finished processing
+ * events, so this event would never run and has not been dispatched, or
+ * that delay is zero.
+ */
+ [noscript] void delayedDispatch(in alreadyAddRefed_nsIRunnable event, in unsigned long delay);
+};
+
+%{C++
+// convenient aliases:
+#define NS_DISPATCH_NORMAL nsIEventTarget::DISPATCH_NORMAL
+#define NS_DISPATCH_SYNC nsIEventTarget::DISPATCH_SYNC
+#define NS_DISPATCH_AT_END nsIEventTarget::DISPATCH_AT_END
+%}
diff --git a/xpcom/threads/nsIIdlePeriod.idl b/xpcom/threads/nsIIdlePeriod.idl
new file mode 100644
index 000000000..aa72b2171
--- /dev/null
+++ b/xpcom/threads/nsIIdlePeriod.idl
@@ -0,0 +1,32 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsISupports.idl"
+
+%{C++
+namespace mozilla {
+class TimeStamp;
+}
+%}
+
+native TimeStamp(mozilla::TimeStamp);
+
+/**
+ * An instance implementing nsIIdlePeriod is used by an associated
+ * nsIThread to estimate when it is likely that it will receive an
+ * event.
+ */
+[builtinclass, uuid(21dd35a2-eae9-4bd8-b470-0dfa35a0e3b9)]
+interface nsIIdlePeriod : nsISupports
+{
+ /**
+ * Return an estimate of a point in time in the future when we
+ * think that the associated thread will become busy. Should
+ * return TimeStamp() (i.e. the null time) or a time less than
+ * TimeStamp::Now() if the thread is currently busy or will become
+ * busy very soon.
+ */
+ TimeStamp getIdlePeriodHint();
+};
diff --git a/xpcom/threads/nsIIncrementalRunnable.h b/xpcom/threads/nsIIncrementalRunnable.h
new file mode 100644
index 000000000..526bc165e
--- /dev/null
+++ b/xpcom/threads/nsIIncrementalRunnable.h
@@ -0,0 +1,41 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef nsIIncrementalRunnable_h__
+#define nsIIncrementalRunnable_h__
+
+#include "nsISupports.h"
+#include "mozilla/TimeStamp.h"
+
+#define NS_IINCREMENTALRUNNABLE_IID \
+{ 0x688be92e, 0x7ade, 0x4fdc, \
+{ 0x9d, 0x83, 0x74, 0xcb, 0xef, 0xf4, 0xa5, 0x2c } }
+
+
+/**
+ * A task interface for tasks that can schedule their work to happen
+ * in increments bounded by a deadline.
+ */
+class nsIIncrementalRunnable : public nsISupports
+{
+public:
+ NS_DECLARE_STATIC_IID_ACCESSOR(NS_IINCREMENTALRUNNABLE_IID)
+
+ /**
+ * Notify the task of a point in time in the future when the task
+ * should stop executing.
+ */
+ virtual void SetDeadline(mozilla::TimeStamp aDeadline) = 0;
+
+protected:
+ nsIIncrementalRunnable() { }
+ virtual ~nsIIncrementalRunnable() {}
+};
+
+NS_DEFINE_STATIC_IID_ACCESSOR(nsIIncrementalRunnable,
+ NS_IINCREMENTALRUNNABLE_IID)
+
+#endif // nsIIncrementalRunnable_h__
diff --git a/xpcom/threads/nsIProcess.idl b/xpcom/threads/nsIProcess.idl
new file mode 100644
index 000000000..2c7dcd55e
--- /dev/null
+++ b/xpcom/threads/nsIProcess.idl
@@ -0,0 +1,99 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsISupports.idl"
+
+interface nsIFile;
+interface nsIObserver;
+
+[scriptable, uuid(609610de-9954-4a63-8a7c-346350a86403)]
+interface nsIProcess : nsISupports
+{
+ /**
+ * Initialises the process with an executable to be run. Call the run method
+ * to run the executable.
+ * @param executable The executable to run.
+ */
+ void init(in nsIFile executable);
+
+ /**
+ * Kills the running process. After exiting the process will either have
+ * been killed or a failure will have been returned.
+ */
+ void kill();
+
+ /**
+ * Executes the file this object was initialized with
+ * @param blocking Whether to wait until the process terminates before
+ returning or not.
+ * @param args An array of arguments to pass to the process in the
+ * native character set.
+ * @param count The length of the args array.
+ */
+ void run(in boolean blocking, [array, size_is(count)] in string args,
+ in unsigned long count);
+
+ /**
+ * Executes the file this object was initialized with optionally calling
+ * an observer after the process has finished running.
+ * @param args An array of arguments to pass to the process in the
+ * native character set.
+ * @param count The length of the args array.
+ * @param observer An observer to notify when the process has completed. It
+ * will receive this process instance as the subject and
+ * "process-finished" or "process-failed" as the topic. The
+ * observer will be notified on the main thread.
+ * @param holdWeak Whether to use a weak reference to hold the observer.
+ */
+ void runAsync([array, size_is(count)] in string args, in unsigned long count,
+ [optional] in nsIObserver observer, [optional] in boolean holdWeak);
+
+ /**
+ * Executes the file this object was initialized with
+ * @param blocking Whether to wait until the process terminates before
+ returning or not.
+ * @param args An array of arguments to pass to the process in UTF-16
+ * @param count The length of the args array.
+ */
+ void runw(in boolean blocking, [array, size_is(count)] in wstring args,
+ in unsigned long count);
+
+ /**
+ * Executes the file this object was initialized with optionally calling
+ * an observer after the process has finished running.
+ * @param args An array of arguments to pass to the process in UTF-16
+ * @param count The length of the args array.
+ * @param observer An observer to notify when the process has completed. It
+ * will receive this process instance as the subject and
+ * "process-finished" or "process-failed" as the topic. The
+ * observer will be notified on the main thread.
+ * @param holdWeak Whether to use a weak reference to hold the observer.
+ */
+ void runwAsync([array, size_is(count)] in wstring args,
+ in unsigned long count,
+ [optional] in nsIObserver observer, [optional] in boolean holdWeak);
+
+ /**
+ * The process identifier of the currently running process. This will only
+ * be available after the process has started and may not be available on
+ * some platforms.
+ */
+ readonly attribute unsigned long pid;
+
+ /**
+ * The exit value of the process. This is only valid after the process has
+ * exited.
+ */
+ readonly attribute long exitValue;
+
+ /**
+ * Returns whether the process is currently running or not.
+ */
+ readonly attribute boolean isRunning;
+};
+
+%{C++
+
+#define NS_PROCESS_CONTRACTID "@mozilla.org/process/util;1"
+%}
diff --git a/xpcom/threads/nsIRunnable.idl b/xpcom/threads/nsIRunnable.idl
new file mode 100644
index 000000000..4d26f72d9
--- /dev/null
+++ b/xpcom/threads/nsIRunnable.idl
@@ -0,0 +1,27 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsISupports.idl"
+
+/**
+ * Represents a task which can be dispatched to a thread for execution.
+ */
+
+[scriptable, function, uuid(4a2abaf0-6886-11d3-9382-00104ba0fd40)]
+interface nsIRunnable : nsISupports
+{
+ /**
+ * The function implementing the task to be run.
+ */
+ void run();
+};
+
+[uuid(e75aa42a-80a9-11e6-afb5-e89d87348e2c)]
+interface nsIRunnablePriority : nsISupports
+{
+ const unsigned short PRIORITY_NORMAL = 0;
+ const unsigned short PRIORITY_HIGH = 1;
+ readonly attribute unsigned long priority;
+};
diff --git a/xpcom/threads/nsISupportsPriority.idl b/xpcom/threads/nsISupportsPriority.idl
new file mode 100644
index 000000000..579c280cf
--- /dev/null
+++ b/xpcom/threads/nsISupportsPriority.idl
@@ -0,0 +1,45 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsISupports.idl"
+
+/**
+ * This interface exposes the general notion of a scheduled object with a
+ * integral priority value. Following UNIX conventions, smaller (and possibly
+ * negative) values have higher priority.
+ *
+ * This interface does not strictly define what happens when the priority of an
+ * object is changed. An implementation of this interface is free to define
+ * the side-effects of changing the priority of an object. In some cases,
+ * changing the priority of an object may be disallowed (resulting in an
+ * exception being thrown) or may simply be ignored.
+ */
+[scriptable, uuid(aa578b44-abd5-4c19-8b14-36d4de6fdc36)]
+interface nsISupportsPriority : nsISupports
+{
+ /**
+ * Typical priority values.
+ */
+ const long PRIORITY_HIGHEST = -20;
+ const long PRIORITY_HIGH = -10;
+ const long PRIORITY_NORMAL = 0;
+ const long PRIORITY_LOW = 10;
+ const long PRIORITY_LOWEST = 20;
+
+ /**
+ * This attribute may be modified to change the priority of this object. The
+ * implementation of this interface is free to truncate a given priority
+ * value to whatever limits are appropriate. Typically, this attribute is
+ * initialized to PRIORITY_NORMAL, but implementations may choose to assign a
+ * different initial value.
+ */
+ attribute long priority;
+
+ /**
+ * This method adjusts the priority attribute by a given delta. It helps
+ * reduce the amount of coding required to increment or decrement the value
+ * of the priority attribute.
+ */
+ void adjustPriority(in long delta);
+};
diff --git a/xpcom/threads/nsIThread.idl b/xpcom/threads/nsIThread.idl
new file mode 100644
index 000000000..fbfc8d4fb
--- /dev/null
+++ b/xpcom/threads/nsIThread.idl
@@ -0,0 +1,149 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsIEventTarget.idl"
+#include "nsIIdlePeriod.idl"
+
+%{C++
+#include "mozilla/AlreadyAddRefed.h"
+%}
+
+[ptr] native PRThread(PRThread);
+
+native alreadyAddRefed_nsIIdlePeriod(already_AddRefed<nsIIdlePeriod>);
+
+/**
+ * This interface provides a high-level abstraction for an operating system
+ * thread.
+ *
+ * Threads have a built-in event queue, and a thread is an event target that
+ * can receive nsIRunnable objects (events) to be processed on the thread.
+ *
+ * See nsIThreadManager for the API used to create and locate threads.
+ */
+[scriptable, uuid(5801d193-29d1-4964-a6b7-70eb697ddf2b)]
+interface nsIThread : nsIEventTarget
+{
+ /**
+ * @returns
+ * The NSPR thread object corresponding to this nsIThread.
+ */
+ [noscript] readonly attribute PRThread PRThread;
+
+ /**
+ * @returns
+ * Whether or not this thread may call into JS. Used in the profiler
+ * to avoid some unnecessary locking.
+ */
+ [noscript] attribute boolean CanInvokeJS;
+
+
+ /**
+ * Shutdown the thread. This method prevents further dispatch of events to
+ * the thread, and it causes any pending events to run to completion before
+ * the thread joins (see PR_JoinThread) with the current thread. During this
+ * method call, events for the current thread may be processed.
+ *
+ * This method MAY NOT be executed from the thread itself. Instead, it is
+ * meant to be executed from another thread (usually the thread that created
+ * this thread or the main application thread). When this function returns,
+ * the thread will be shutdown, and it will no longer be possible to dispatch
+ * events to the thread.
+ *
+ * @throws NS_ERROR_UNEXPECTED
+ * Indicates that this method was erroneously called when this thread was
+ * the current thread, that this thread was not created with a call to
+ * nsIThreadManager::NewThread, or if this method was called more than once
+ * on the thread object.
+ */
+ void shutdown();
+
+ /**
+ * This method may be called to determine if there are any events ready to be
+ * processed. It may only be called when this thread is the current thread.
+ *
+ * Because events may be added to this thread by another thread, a "false"
+ * result does not mean that this thread has no pending events. It only
+ * means that there were no pending events when this method was called.
+ *
+ * @returns
+ * A boolean value that if "true" indicates that this thread has one or
+ * more pending events.
+ *
+ * @throws NS_ERROR_UNEXPECTED
+ * Indicates that this method was erroneously called when this thread was
+ * not the current thread.
+ */
+ boolean hasPendingEvents();
+
+ /**
+ * Process the next event. If there are no pending events, then this method
+ * may wait -- depending on the value of the mayWait parameter -- until an
+ * event is dispatched to this thread. This method is re-entrant but may
+ * only be called if this thread is the current thread.
+ *
+ * @param mayWait
+ * A boolean parameter that if "true" indicates that the method may block
+ * the calling thread to wait for a pending event.
+ *
+ * @returns
+ * A boolean value that if "true" indicates that an event was processed.
+ *
+ * @throws NS_ERROR_UNEXPECTED
+ * Indicates that this method was erroneously called when this thread was
+ * not the current thread.
+ */
+ boolean processNextEvent(in boolean mayWait);
+
+ /**
+ * Shutdown the thread asynchronously. This method immediately prevents
+ * further dispatch of events to the thread, and it causes any pending events
+ * to run to completion before this thread joins with the current thread.
+ *
+ * UNLIKE shutdown() this does not process events on the current thread.
+ * Instead it merely ensures that the current thread continues running until
+ * this thread has shut down.
+ *
+ * This method MAY NOT be executed from the thread itself. Instead, it is
+ * meant to be executed from another thread (usually the thread that created
+ * this thread or the main application thread). When this function returns,
+ * the thread will continue running until it exhausts its event queue.
+ *
+ * @throws NS_ERROR_UNEXPECTED
+ * Indicates that this method was erroneously called when this thread was
+ * the current thread, that this thread was not created with a call to
+ * nsIThreadManager::NewThread, or if this method was called more than once
+ * on the thread object.
+ */
+ void asyncShutdown();
+
+ /**
+ * Register an instance of nsIIdlePeriod which works as a facade of
+ * the abstract notion of a "current idle period". The
+ * nsIIdlePeriod should always represent the "current" idle period
+ * with regard to expected work for the thread. The thread is free
+ * to use this when there are no higher prioritized tasks to process
+ * to determine if it is reasonable to schedule tasks that could run
+ * when the thread is idle. The responsibility of the registered
+ * nsIIdlePeriod is to answer with an estimated deadline at which
+ * the thread should expect that it could receive new higher
+ * priority tasks.
+ */
+ [noscript] void registerIdlePeriod(in alreadyAddRefed_nsIIdlePeriod aIdlePeriod);
+
+ /**
+ * Dispatch an event to the thread's idle queue. This function may be called
+ * from any thread, and it may be called re-entrantly.
+ *
+ * @param event
+ * The alreadyAddRefed<> event to dispatch.
+ * NOTE that the event will be leaked if it fails to dispatch.
+ *
+ * @throws NS_ERROR_INVALID_ARG
+ * Indicates that event is null.
+ */
+ [noscript] void idleDispatch(in alreadyAddRefed_nsIRunnable event);
+};
diff --git a/xpcom/threads/nsIThreadInternal.idl b/xpcom/threads/nsIThreadInternal.idl
new file mode 100644
index 000000000..e5d7cc83f
--- /dev/null
+++ b/xpcom/threads/nsIThreadInternal.idl
@@ -0,0 +1,135 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsIThread.idl"
+
+interface nsIRunnable;
+interface nsIThreadObserver;
+
+/**
+ * The XPCOM thread object implements this interface, which allows a consumer
+ * to observe dispatch activity on the thread.
+ */
+[scriptable, uuid(a3a72e5f-71d9-4add-8f30-59a78fb6d5eb)]
+interface nsIThreadInternal : nsIThread
+{
+ /**
+ * Get/set the current thread observer (may be null). This attribute may be
+ * read from any thread, but must only be set on the thread corresponding to
+ * this thread object. The observer will be released on the thread
+ * corresponding to this thread object after all other events have been
+ * processed during a call to Shutdown.
+ */
+ attribute nsIThreadObserver observer;
+
+ /**
+ * Add an observer that will *only* receive onProcessNextEvent,
+ * beforeProcessNextEvent. and afterProcessNextEvent callbacks. Always called
+ * on the target thread, and the implementation does not have to be
+ * threadsafe. Order of callbacks is not guaranteed (i.e.
+ * afterProcessNextEvent may be called first depending on whether or not the
+ * observer is added in a nested loop). Holds a strong ref.
+ */
+ void addObserver(in nsIThreadObserver observer);
+
+ /**
+ * Remove an observer added via the addObserver call. Once removed the
+ * observer will never be called again by the thread.
+ */
+ void removeObserver(in nsIThreadObserver observer);
+
+ /**
+ * This method causes any events currently enqueued on the thread to be
+ * suppressed until PopEventQueue is called, and any event dispatched to this
+ * thread's nsIEventTarget will queue as well. Calls to PushEventQueue may be
+ * nested and must each be paired with a call to PopEventQueue in order to
+ * restore the original state of the thread. The returned nsIEventTarget may
+ * be used to push events onto the nested queue. Dispatching will be disabled
+ * once the event queue is popped. The thread will only ever process pending
+ * events for the innermost event queue. Must only be called on the target
+ * thread.
+ */
+ [noscript] nsIEventTarget pushEventQueue();
+
+ /**
+ * Revert a call to PushEventQueue. When an event queue is popped, any events
+ * remaining in the queue are appended to the elder queue. This also causes
+ * the nsIEventTarget returned from PushEventQueue to stop dispatching events.
+ * Must only be called on the target thread, and with the innermost event
+ * queue.
+ */
+ [noscript] void popEventQueue(in nsIEventTarget aInnermostTarget);
+};
+
+/**
+ * This interface provides the observer with hooks to implement a layered
+ * event queue. For example, it is possible to overlay processing events
+ * for a GUI toolkit on top of the events for a thread:
+ *
+ * var NativeQueue;
+ * Observer = {
+ * onDispatchedEvent(thread) {
+ * NativeQueue.signal();
+ * }
+ * onProcessNextEvent(thread, mayWait) {
+ * if (NativeQueue.hasNextEvent())
+ * NativeQueue.processNextEvent();
+ * while (mayWait && !thread.hasPendingEvent()) {
+ * NativeQueue.wait();
+ * NativeQueue.processNextEvent();
+ * }
+ * }
+ * };
+ *
+ * NOTE: The implementation of this interface must be threadsafe.
+ *
+ * NOTE: It is valid to change the thread's observer during a call to an
+ * observer method.
+ *
+ * NOTE: Will be split into two interfaces soon: one for onProcessNextEvent and
+ * afterProcessNextEvent, then another that inherits the first and adds
+ * onDispatchedEvent.
+ */
+[uuid(cc8da053-1776-44c2-9199-b5a629d0a19d)]
+interface nsIThreadObserver : nsISupports
+{
+ /**
+ * This method is called after an event has been dispatched to the thread.
+ * This method may be called from any thread.
+ *
+ * @param thread
+ * The thread where the event is being dispatched.
+ */
+ void onDispatchedEvent(in nsIThreadInternal thread);
+
+ /**
+ * This method is called when nsIThread::ProcessNextEvent is called. It does
+ * not guarantee that an event is actually going to be processed. This method
+ * is only called on the target thread.
+ *
+ * @param thread
+ * The thread being asked to process another event.
+ * @param mayWait
+ * Indicates whether or not the method is allowed to block the calling
+ * thread. For example, this parameter is false during thread shutdown.
+ */
+ void onProcessNextEvent(in nsIThreadInternal thread, in boolean mayWait);
+
+ /**
+ * This method is called (from nsIThread::ProcessNextEvent) after an event
+ * is processed. It does not guarantee that an event was actually processed
+ * (depends on the value of |eventWasProcessed|. This method is only called
+ * on the target thread. DO NOT EVER RUN SCRIPT FROM THIS CALLBACK!!!
+ *
+ * @param thread
+ * The thread that processed another event.
+ * @param eventWasProcessed
+ * Indicates whether an event was actually processed. May be false if the
+ * |mayWait| flag was false when calling nsIThread::ProcessNextEvent().
+ */
+ void afterProcessNextEvent(in nsIThreadInternal thread,
+ in bool eventWasProcessed);
+};
diff --git a/xpcom/threads/nsIThreadManager.idl b/xpcom/threads/nsIThreadManager.idl
new file mode 100644
index 000000000..9b4fc126f
--- /dev/null
+++ b/xpcom/threads/nsIThreadManager.idl
@@ -0,0 +1,68 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsISupports.idl"
+
+[ptr] native PRThread(PRThread);
+
+interface nsIThread;
+
+/**
+ * An interface for creating and locating nsIThread instances.
+ */
+[scriptable, uuid(1be89eca-e2f7-453b-8d38-c11ba247f6f3)]
+interface nsIThreadManager : nsISupports
+{
+ /**
+ * Default number of bytes reserved for a thread's stack, if no stack size
+ * is specified in newThread(). 0 means use platform default.
+ */
+ const unsigned long DEFAULT_STACK_SIZE = 0;
+
+ /**
+ * Create a new thread (a global, user PRThread).
+ *
+ * @param creationFlags
+ * Reserved for future use. Pass 0.
+ * @param stackSize
+ * Number of bytes to reserve for the thread's stack.
+ *
+ * @returns
+ * The newly created nsIThread object.
+ */
+ nsIThread newThread(in unsigned long creationFlags, [optional] in unsigned long stackSize);
+
+ /**
+ * Get the nsIThread object (if any) corresponding to the given PRThread.
+ * This method returns null if there is no corresponding nsIThread.
+ *
+ * @param prthread
+ * The PRThread of the nsIThread being requested.
+ *
+ * @returns
+ * The nsIThread object corresponding to the given PRThread or null if no
+ * such nsIThread exists.
+ */
+ [noscript] nsIThread getThreadFromPRThread(in PRThread prthread);
+
+ /**
+ * Get the main thread.
+ */
+ readonly attribute nsIThread mainThread;
+
+ /**
+ * Get the current thread. If the calling thread does not already have a
+ * nsIThread associated with it, then a new nsIThread will be created and
+ * associated with the current PRThread.
+ */
+ readonly attribute nsIThread currentThread;
+
+ /**
+ * This attribute is true if the calling thread is the main thread of the
+ * application process.
+ */
+ readonly attribute boolean isMainThread;
+};
diff --git a/xpcom/threads/nsIThreadPool.idl b/xpcom/threads/nsIThreadPool.idl
new file mode 100644
index 000000000..d04e8504a
--- /dev/null
+++ b/xpcom/threads/nsIThreadPool.idl
@@ -0,0 +1,88 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et cindent: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsIEventTarget.idl"
+
+[scriptable, uuid(ef194cab-3f86-4b61-b132-e5e96a79e5d1)]
+interface nsIThreadPoolListener : nsISupports
+{
+ /**
+ * Called when a new thread is created by the thread pool. The notification
+ * happens on the newly-created thread.
+ */
+ void onThreadCreated();
+
+ /**
+ * Called when a thread is about to be destroyed by the thread pool. The
+ * notification happens on the thread that is about to be destroyed.
+ */
+ void onThreadShuttingDown();
+};
+
+/**
+ * An interface to a thread pool. A thread pool creates a limited number of
+ * anonymous (unnamed) worker threads. An event dispatched to the thread pool
+ * will be run on the next available worker thread.
+ */
+[scriptable, uuid(76ce99c9-8e43-489a-9789-f27cc4424965)]
+interface nsIThreadPool : nsIEventTarget
+{
+ /**
+ * Shutdown the thread pool. This method may not be executed from any thread
+ * in the thread pool. Instead, it is meant to be executed from another
+ * thread (usually the thread that created this thread pool). When this
+ * function returns, the thread pool and all of its threads will be shutdown,
+ * and it will no longer be possible to dispatch tasks to the thread pool.
+ *
+ * As a side effect, events on the current thread will be processed.
+ */
+ void shutdown();
+
+ /**
+ * Get/set the maximum number of threads allowed at one time in this pool.
+ */
+ attribute unsigned long threadLimit;
+
+ /**
+ * Get/set the maximum number of idle threads kept alive.
+ */
+ attribute unsigned long idleThreadLimit;
+
+ /**
+ * Get/set the amount of time in milliseconds before an idle thread is
+ * destroyed.
+ */
+ attribute unsigned long idleThreadTimeout;
+
+ /**
+ * Get/set the number of bytes reserved for the stack of all threads in
+ * the pool. By default this is nsIThreadManager::DEFAULT_STACK_SIZE.
+ */
+ attribute unsigned long threadStackSize;
+
+ /**
+ * An optional listener that will be notified when a thread is created or
+ * destroyed in the course of the thread pool's operation.
+ *
+ * A listener will only receive notifications about threads created after the
+ * listener is set so it is recommended that the consumer set the listener
+ * before dispatching the first event. A listener that receives an
+ * onThreadCreated() notification is guaranteed to always receive the
+ * corresponding onThreadShuttingDown() notification.
+ *
+ * The thread pool takes ownership of the listener and releases it when the
+ * shutdown() method is called. Threads created after the listener is set will
+ * also take ownership of the listener so that the listener will be kept alive
+ * long enough to receive the guaranteed onThreadShuttingDown() notification.
+ */
+ attribute nsIThreadPoolListener listener;
+
+ /**
+ * Set the label for threads in the pool. All threads will be named
+ * "<aName> #<n>", where <n> is a serial number.
+ */
+ void setName(in ACString aName);
+};
diff --git a/xpcom/threads/nsITimer.idl b/xpcom/threads/nsITimer.idl
new file mode 100644
index 000000000..ade2168f2
--- /dev/null
+++ b/xpcom/threads/nsITimer.idl
@@ -0,0 +1,244 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsISupports.idl"
+
+interface nsIObserver;
+interface nsIEventTarget;
+
+%{C++
+#include "mozilla/MemoryReporting.h"
+
+/**
+ * The signature of the timer callback function passed to initWithFuncCallback.
+ * This is the function that will get called when the timer expires if the
+ * timer is initialized via initWithFuncCallback.
+ *
+ * @param aTimer the timer which has expired
+ * @param aClosure opaque parameter passed to initWithFuncCallback
+ */
+class nsITimer;
+typedef void (*nsTimerCallbackFunc) (nsITimer *aTimer, void *aClosure);
+
+/**
+ * The signature of the timer name callback function passed to
+ * initWithNameableFuncCallback.
+ * This is the function that will get called when timer profiling is enabled
+ * via the "TimerFirings" log module.
+ *
+ * @param aTimer the timer which has expired
+ * @param aClosure opaque parameter passed to initWithFuncCallback
+ * @param aBuf a buffer in which to put the name
+ * @param aLen the length of the buffer
+ */
+typedef void (*nsTimerNameCallbackFunc) (nsITimer *aTimer, void *aClosure,
+ char *aBuf, size_t aLen);
+%}
+
+native nsTimerCallbackFunc(nsTimerCallbackFunc);
+native nsTimerNameCallbackFunc(nsTimerNameCallbackFunc);
+
+/**
+ * The callback interface for timers.
+ */
+interface nsITimer;
+
+[function, scriptable, uuid(a796816d-7d47-4348-9ab8-c7aeb3216a7d)]
+interface nsITimerCallback : nsISupports
+{
+ /**
+ * @param aTimer the timer which has expired
+ */
+ void notify(in nsITimer timer);
+};
+
+%{C++
+// Two timer deadlines must differ by less than half the PRIntervalTime domain.
+#define DELAY_INTERVAL_LIMIT PR_BIT(8 * sizeof(PRIntervalTime) - 1)
+%}
+
+/**
+ * nsITimer instances must be initialized by calling one of the "init" methods
+ * documented below. You may also re-initialize (using one of the init()
+ * methods) an existing instance to avoid the overhead of destroying and
+ * creating a timer. It is not necessary to cancel the timer in that case.
+ *
+ * By default a timer will fire on the thread that created it. Set the .target
+ * attribute to fire on a different thread. Once you have set a timer's .target
+ * and called one of its init functions, any further interactions with the timer
+ * (calling cancel(), changing member fields, etc) should only be done by the
+ * target thread, or races may occur with bad results like timers firing after
+ * they've been canceled, and/or not firing after re-initiatization.
+ */
+[scriptable, uuid(3de4b105-363c-482c-a409-baac83a01bfc)]
+interface nsITimer : nsISupports
+{
+ /* Timer types */
+
+ /**
+ * Type of a timer that fires once only.
+ */
+ const short TYPE_ONE_SHOT = 0;
+
+ /**
+ * After firing, a TYPE_REPEATING_SLACK timer is stopped and not restarted
+ * until its callback completes. Specified timer period will be at least
+ * the time between when processing for last firing the callback completes
+ * and when the next firing occurs.
+ *
+ * This is the preferable repeating type for most situations.
+ */
+ const short TYPE_REPEATING_SLACK = 1;
+
+ /**
+ * TYPE_REPEATING_PRECISE is just a synonym for
+ * TYPE_REPEATING_PRECISE_CAN_SKIP. They used to be distinct, but the old
+ * TYPE_REPEATING_PRECISE kind was similar to TYPE_REPEATING_PRECISE_CAN_SKIP
+ * while also being less useful. So the distinction was removed.
+ */
+ const short TYPE_REPEATING_PRECISE = 2;
+
+ /**
+ * A TYPE_REPEATING_PRECISE_CAN_SKIP repeating timer aims to have constant
+ * period between firings. The processing time for each timer callback
+ * should not influence the timer period. However this timer type
+ * guarantees that it will not queue up new events to fire the callback
+ * until the previous callback event finishes firing. If the callback
+ * takes a long time, then the next callback will be scheduled immediately
+ * afterward, but only once. This is the only non-slack timer available.
+ */
+ const short TYPE_REPEATING_PRECISE_CAN_SKIP = 3;
+
+ /**
+ * Initialize a timer that will fire after the said delay.
+ * A user must keep a reference to this timer till it is
+ * is no longer needed or has been cancelled.
+ *
+ * @param aObserver the callback object that observes the
+ * ``timer-callback'' topic with the subject being
+ * the timer itself when the timer fires:
+ *
+ * observe(nsISupports aSubject, => nsITimer
+ * string aTopic, => ``timer-callback''
+ * wstring data => null
+ *
+ * @param aDelay delay in milliseconds for timer to fire
+ * @param aType timer type per TYPE* consts defined above
+ */
+ void init(in nsIObserver aObserver, in unsigned long aDelay,
+ in unsigned long aType);
+
+
+ /**
+ * Initialize a timer to fire after the given millisecond interval.
+ * This version takes a function to call and a closure to pass to
+ * that function.
+ *
+ * @param aFunc The function to invoke
+ * @param aClosure An opaque pointer to pass to that function
+ * @param aDelay The millisecond interval
+ * @param aType Timer type per TYPE* consts defined above
+ */
+ [noscript] void initWithFuncCallback(in nsTimerCallbackFunc aCallback,
+ in voidPtr aClosure,
+ in unsigned long aDelay,
+ in unsigned long aType);
+
+ /**
+ * Initialize a timer to fire after the given millisecond interval.
+ * This version takes a function to call.
+ *
+ * @param aFunc nsITimerCallback interface to call when timer expires
+ * @param aDelay The millisecond interval
+ * @param aType Timer type per TYPE* consts defined above
+ */
+ void initWithCallback(in nsITimerCallback aCallback,
+ in unsigned long aDelay,
+ in unsigned long aType);
+
+ /**
+ * Cancel the timer. This method works on all types, not just on repeating
+ * timers -- you might want to cancel a TYPE_ONE_SHOT timer, and even reuse
+ * it by re-initializing it (to avoid object destruction and creation costs
+ * by conserving one timer instance).
+ */
+ void cancel();
+
+ /**
+ * Like initWithFuncCallback, but also takes a name for the timer; the name
+ * will be used when timer profiling is enabled via the "TimerFirings" log
+ * module.
+ *
+ * @param aFunc The function to invoke
+ * @param aClosure An opaque pointer to pass to that function
+ * @param aDelay The millisecond interval
+ * @param aType Timer type per TYPE* consts defined above
+ * @param aName The timer's name
+ */
+ [noscript] void initWithNamedFuncCallback(in nsTimerCallbackFunc aCallback,
+ in voidPtr aClosure,
+ in unsigned long aDelay,
+ in unsigned long aType,
+ in string aName);
+
+ /**
+ * Like initWithNamedFuncCallback, but instead of a timer name it takes a
+ * callback that will provide a name when the timer fires.
+ *
+ * @param aFunc The function to invoke
+ * @param aClosure An opaque pointer to pass to that function
+ * @param aDelay The millisecond interval
+ * @param aType Timer type per TYPE* consts defined above
+ * @param aNameCallback The callback function
+ */
+ [noscript] void initWithNameableFuncCallback(
+ in nsTimerCallbackFunc aCallback,
+ in voidPtr aClosure,
+ in unsigned long aDelay,
+ in unsigned long aType,
+ in nsTimerNameCallbackFunc aNameCallback);
+
+ /**
+ * The millisecond delay of the timeout.
+ *
+ * NOTE: Re-setting the delay on a one-shot timer that has already fired
+ * doesn't restart the timer. Call one of the init() methods to restart
+ * a one-shot timer.
+ */
+ attribute unsigned long delay;
+
+ /**
+ * The timer type - one of the above TYPE_* constants.
+ */
+ attribute unsigned long type;
+
+ /**
+ * The opaque pointer pass to initWithFuncCallback.
+ */
+ [noscript] readonly attribute voidPtr closure;
+
+ /**
+ * The nsITimerCallback object passed to initWithCallback.
+ */
+ readonly attribute nsITimerCallback callback;
+
+ /**
+ * The nsIEventTarget where the callback will be dispatched. Note that this
+ * target may only be set before the call to one of the init methods above.
+ *
+ * By default the target is the thread that created the timer.
+ */
+ attribute nsIEventTarget target;
+
+%{C++
+ virtual size_t SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const = 0;
+%}
+};
+
+%{C++
+#define NS_TIMER_CONTRACTID "@mozilla.org/timer;1"
+#define NS_TIMER_CALLBACK_TOPIC "timer-callback"
+%}
+
diff --git a/xpcom/threads/nsMemoryPressure.cpp b/xpcom/threads/nsMemoryPressure.cpp
new file mode 100644
index 000000000..fea9b0437
--- /dev/null
+++ b/xpcom/threads/nsMemoryPressure.cpp
@@ -0,0 +1,54 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsMemoryPressure.h"
+#include "mozilla/Assertions.h"
+#include "mozilla/Atomics.h"
+
+#include "nsThreadUtils.h"
+
+using namespace mozilla;
+
+static Atomic<int32_t, Relaxed> sMemoryPressurePending;
+static_assert(MemPressure_None == 0,
+ "Bad static initialization with the default constructor.");
+
+MemoryPressureState
+NS_GetPendingMemoryPressure()
+{
+ int32_t value = sMemoryPressurePending.exchange(MemPressure_None);
+ return MemoryPressureState(value);
+}
+
+void
+NS_DispatchEventualMemoryPressure(MemoryPressureState aState)
+{
+ /*
+ * A new memory pressure event erases an ongoing memory pressure, but an
+ * existing "new" memory pressure event takes precedence over a new "ongoing"
+ * memory pressure event.
+ */
+ switch (aState) {
+ case MemPressure_None:
+ sMemoryPressurePending = MemPressure_None;
+ break;
+ case MemPressure_New:
+ sMemoryPressurePending = MemPressure_New;
+ break;
+ case MemPressure_Ongoing:
+ sMemoryPressurePending.compareExchange(MemPressure_None,
+ MemPressure_Ongoing);
+ break;
+ }
+}
+
+nsresult
+NS_DispatchMemoryPressure(MemoryPressureState aState)
+{
+ NS_DispatchEventualMemoryPressure(aState);
+ nsCOMPtr<nsIRunnable> event = new Runnable;
+ return NS_DispatchToMainThread(event);
+}
diff --git a/xpcom/threads/nsMemoryPressure.h b/xpcom/threads/nsMemoryPressure.h
new file mode 100644
index 000000000..a05728c61
--- /dev/null
+++ b/xpcom/threads/nsMemoryPressure.h
@@ -0,0 +1,77 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef nsMemoryPressure_h__
+#define nsMemoryPressure_h__
+
+#include "nscore.h"
+
+enum MemoryPressureState
+{
+ /*
+ * No memory pressure.
+ */
+ MemPressure_None = 0,
+
+ /*
+ * New memory pressure deteced.
+ *
+ * On a new memory pressure, we stop everything to start cleaning
+ * aggresively the memory used, in order to free as much memory as
+ * possible.
+ */
+ MemPressure_New,
+
+ /*
+ * Repeated memory pressure.
+ *
+ * A repeated memory pressure implies to clean softly recent allocations.
+ * It is supposed to happen after a new memory pressure which already
+ * cleaned aggressivley. So there is no need to damage the reactivity of
+ * Gecko by stopping the world again.
+ *
+ * In case of conflict with an new memory pressue, the new memory pressure
+ * takes precedence over an ongoing memory pressure. The reason being
+ * that if no events are processed between 2 notifications (new followed
+ * by ongoing, or ongoing followed by a new) we want to be as aggresive as
+ * possible on the clean-up of the memory. After all, we are trying to
+ * keep Gecko alive as long as possible.
+ */
+ MemPressure_Ongoing
+};
+
+/**
+ * Return and erase the latest state of the memory pressure event set by any of
+ * the corresponding dispatch function.
+ */
+MemoryPressureState
+NS_GetPendingMemoryPressure();
+
+/**
+ * This function causes the main thread to fire a memory pressure event
+ * before processing the next event, but if there are no events pending in
+ * the main thread's event queue, the memory pressure event would not be
+ * dispatched until one is enqueued. It is infallible and does not allocate
+ * any memory.
+ *
+ * You may call this function from any thread.
+ */
+void
+NS_DispatchEventualMemoryPressure(MemoryPressureState aState);
+
+/**
+ * This function causes the main thread to fire a memory pressure event
+ * before processing the next event. We wake up the main thread by adding a
+ * dummy event to its event loop, so, unlike with
+ * NS_DispatchEventualMemoryPressure, this memory-pressure event is always
+ * fired relatively quickly, even if the event loop is otherwise empty.
+ *
+ * You may call this function from any thread.
+ */
+nsresult
+NS_DispatchMemoryPressure(MemoryPressureState aState);
+
+#endif // nsMemoryPressure_h__
diff --git a/xpcom/threads/nsProcess.h b/xpcom/threads/nsProcess.h
new file mode 100644
index 000000000..140944415
--- /dev/null
+++ b/xpcom/threads/nsProcess.h
@@ -0,0 +1,82 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef _nsPROCESSWIN_H_
+#define _nsPROCESSWIN_H_
+
+#if defined(XP_WIN)
+#define PROCESSMODEL_WINAPI
+#endif
+
+#include "mozilla/Attributes.h"
+#include "mozilla/Mutex.h"
+#include "nsIProcess.h"
+#include "nsIFile.h"
+#include "nsIThread.h"
+#include "nsIObserver.h"
+#include "nsIWeakReferenceUtils.h"
+#include "nsIObserver.h"
+#include "nsString.h"
+#ifndef XP_MACOSX
+#include "prproces.h"
+#endif
+#if defined(PROCESSMODEL_WINAPI)
+#include <windows.h>
+#include <shellapi.h>
+#endif
+
+#define NS_PROCESS_CID \
+{0x7b4eeb20, 0xd781, 0x11d4, \
+ {0x8A, 0x83, 0x00, 0x10, 0xa4, 0xe0, 0xc9, 0xca}}
+
+class nsProcess final
+ : public nsIProcess
+ , public nsIObserver
+{
+public:
+
+ NS_DECL_THREADSAFE_ISUPPORTS
+ NS_DECL_NSIPROCESS
+ NS_DECL_NSIOBSERVER
+
+ nsProcess();
+
+private:
+ ~nsProcess();
+ static void Monitor(void* aArg);
+ void ProcessComplete();
+ nsresult CopyArgsAndRunProcess(bool aBlocking, const char** aArgs,
+ uint32_t aCount, nsIObserver* aObserver,
+ bool aHoldWeak);
+ nsresult CopyArgsAndRunProcessw(bool aBlocking, const char16_t** aArgs,
+ uint32_t aCount, nsIObserver* aObserver,
+ bool aHoldWeak);
+ // The 'args' array is null-terminated.
+ nsresult RunProcess(bool aBlocking, char** aArgs, nsIObserver* aObserver,
+ bool aHoldWeak, bool aArgsUTF8);
+
+ PRThread* mThread;
+ mozilla::Mutex mLock;
+ bool mShutdown;
+ bool mBlocking;
+
+ nsCOMPtr<nsIFile> mExecutable;
+ nsString mTargetPath;
+ int32_t mPid;
+ nsCOMPtr<nsIObserver> mObserver;
+ nsWeakPtr mWeakObserver;
+
+ // These members are modified by multiple threads, any accesses should be
+ // protected with mLock.
+ int32_t mExitValue;
+#if defined(PROCESSMODEL_WINAPI)
+ HANDLE mProcess;
+#elif !defined(XP_MACOSX)
+ PRProcess* mProcess;
+#endif
+};
+
+#endif
diff --git a/xpcom/threads/nsProcessCommon.cpp b/xpcom/threads/nsProcessCommon.cpp
new file mode 100644
index 000000000..709865a09
--- /dev/null
+++ b/xpcom/threads/nsProcessCommon.cpp
@@ -0,0 +1,663 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*****************************************************************************
+ *
+ * nsProcess is used to execute new processes and specify if you want to
+ * wait (blocking) or continue (non-blocking).
+ *
+ *****************************************************************************
+ */
+
+#include "mozilla/ArrayUtils.h"
+
+#include "nsCOMPtr.h"
+#include "nsAutoPtr.h"
+#include "nsMemory.h"
+#include "nsProcess.h"
+#include "prio.h"
+#include "prenv.h"
+#include "nsCRT.h"
+#include "nsThreadUtils.h"
+#include "nsIObserverService.h"
+#include "nsXULAppAPI.h"
+#include "mozilla/Services.h"
+
+#include <stdlib.h>
+
+#if defined(PROCESSMODEL_WINAPI)
+#include "prmem.h"
+#include "nsString.h"
+#include "nsLiteralString.h"
+#include "nsReadableUtils.h"
+#else
+#ifdef XP_MACOSX
+#include <crt_externs.h>
+#include <spawn.h>
+#include <sys/wait.h>
+#include <sys/errno.h>
+#endif
+#include <sys/types.h>
+#include <signal.h>
+#endif
+
+using namespace mozilla;
+
+#ifdef XP_MACOSX
+cpu_type_t pref_cpu_types[2] = {
+#if defined(__i386__)
+ CPU_TYPE_X86,
+#elif defined(__x86_64__)
+ CPU_TYPE_X86_64,
+#elif defined(__ppc__)
+ CPU_TYPE_POWERPC,
+#endif
+ CPU_TYPE_ANY
+};
+#endif
+
+//-------------------------------------------------------------------//
+// nsIProcess implementation
+//-------------------------------------------------------------------//
+NS_IMPL_ISUPPORTS(nsProcess, nsIProcess,
+ nsIObserver)
+
+//Constructor
+nsProcess::nsProcess()
+ : mThread(nullptr)
+ , mLock("nsProcess.mLock")
+ , mShutdown(false)
+ , mBlocking(false)
+ , mPid(-1)
+ , mObserver(nullptr)
+ , mWeakObserver(nullptr)
+ , mExitValue(-1)
+#if !defined(XP_MACOSX)
+ , mProcess(nullptr)
+#endif
+{
+}
+
+//Destructor
+nsProcess::~nsProcess()
+{
+}
+
+NS_IMETHODIMP
+nsProcess::Init(nsIFile* aExecutable)
+{
+ if (mExecutable) {
+ return NS_ERROR_ALREADY_INITIALIZED;
+ }
+
+ if (NS_WARN_IF(!aExecutable)) {
+ return NS_ERROR_INVALID_ARG;
+ }
+ bool isFile;
+
+ //First make sure the file exists
+ nsresult rv = aExecutable->IsFile(&isFile);
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+ if (!isFile) {
+ return NS_ERROR_FAILURE;
+ }
+
+ //Store the nsIFile in mExecutable
+ mExecutable = aExecutable;
+ //Get the path because it is needed by the NSPR process creation
+#ifdef XP_WIN
+ rv = mExecutable->GetTarget(mTargetPath);
+ if (NS_FAILED(rv) || mTargetPath.IsEmpty())
+#endif
+ rv = mExecutable->GetPath(mTargetPath);
+
+ return rv;
+}
+
+
+#if defined(XP_WIN)
+// Out param `aWideCmdLine` must be PR_Freed by the caller.
+static int
+assembleCmdLine(char* const* aArgv, wchar_t** aWideCmdLine, UINT aCodePage)
+{
+ char* const* arg;
+ char* p;
+ char* q;
+ char* cmdLine;
+ int cmdLineSize;
+ int numBackslashes;
+ int i;
+ int argNeedQuotes;
+
+ /*
+ * Find out how large the command line buffer should be.
+ */
+ cmdLineSize = 0;
+ for (arg = aArgv; *arg; ++arg) {
+ /*
+ * \ and " need to be escaped by a \. In the worst case,
+ * every character is a \ or ", so the string of length
+ * may double. If we quote an argument, that needs two ".
+ * Finally, we need a space between arguments, and
+ * a null byte at the end of command line.
+ */
+ cmdLineSize += 2 * strlen(*arg) /* \ and " need to be escaped */
+ + 2 /* we quote every argument */
+ + 1; /* space in between, or final null */
+ }
+ p = cmdLine = (char*)PR_MALLOC(cmdLineSize * sizeof(char));
+ if (!p) {
+ return -1;
+ }
+
+ for (arg = aArgv; *arg; ++arg) {
+ /* Add a space to separates the arguments */
+ if (arg != aArgv) {
+ *p++ = ' ';
+ }
+ q = *arg;
+ numBackslashes = 0;
+ argNeedQuotes = 0;
+
+ /* If the argument contains white space, it needs to be quoted. */
+ if (strpbrk(*arg, " \f\n\r\t\v")) {
+ argNeedQuotes = 1;
+ }
+
+ if (argNeedQuotes) {
+ *p++ = '"';
+ }
+ while (*q) {
+ if (*q == '\\') {
+ numBackslashes++;
+ q++;
+ } else if (*q == '"') {
+ if (numBackslashes) {
+ /*
+ * Double the backslashes since they are followed
+ * by a quote
+ */
+ for (i = 0; i < 2 * numBackslashes; i++) {
+ *p++ = '\\';
+ }
+ numBackslashes = 0;
+ }
+ /* To escape the quote */
+ *p++ = '\\';
+ *p++ = *q++;
+ } else {
+ if (numBackslashes) {
+ /*
+ * Backslashes are not followed by a quote, so
+ * don't need to double the backslashes.
+ */
+ for (i = 0; i < numBackslashes; i++) {
+ *p++ = '\\';
+ }
+ numBackslashes = 0;
+ }
+ *p++ = *q++;
+ }
+ }
+
+ /* Now we are at the end of this argument */
+ if (numBackslashes) {
+ /*
+ * Double the backslashes if we have a quote string
+ * delimiter at the end.
+ */
+ if (argNeedQuotes) {
+ numBackslashes *= 2;
+ }
+ for (i = 0; i < numBackslashes; i++) {
+ *p++ = '\\';
+ }
+ }
+ if (argNeedQuotes) {
+ *p++ = '"';
+ }
+ }
+
+ *p = '\0';
+ int32_t numChars = MultiByteToWideChar(aCodePage, 0, cmdLine, -1, nullptr, 0);
+ *aWideCmdLine = (wchar_t*)PR_MALLOC(numChars * sizeof(wchar_t));
+ MultiByteToWideChar(aCodePage, 0, cmdLine, -1, *aWideCmdLine, numChars);
+ PR_Free(cmdLine);
+ return 0;
+}
+#endif
+
+void
+nsProcess::Monitor(void* aArg)
+{
+ RefPtr<nsProcess> process = dont_AddRef(static_cast<nsProcess*>(aArg));
+
+ if (!process->mBlocking) {
+ PR_SetCurrentThreadName("RunProcess");
+ }
+
+#if defined(PROCESSMODEL_WINAPI)
+ DWORD dwRetVal;
+ unsigned long exitCode = -1;
+
+ dwRetVal = WaitForSingleObject(process->mProcess, INFINITE);
+ if (dwRetVal != WAIT_FAILED) {
+ if (GetExitCodeProcess(process->mProcess, &exitCode) == FALSE) {
+ exitCode = -1;
+ }
+ }
+
+ // Lock in case Kill or GetExitCode are called during this
+ {
+ MutexAutoLock lock(process->mLock);
+ CloseHandle(process->mProcess);
+ process->mProcess = nullptr;
+ process->mExitValue = exitCode;
+ if (process->mShutdown) {
+ return;
+ }
+ }
+#else
+#ifdef XP_MACOSX
+ int exitCode = -1;
+ int status = 0;
+ pid_t result;
+ do {
+ result = waitpid(process->mPid, &status, 0);
+ } while (result == -1 && errno == EINTR);
+ if (result == process->mPid) {
+ if (WIFEXITED(status)) {
+ exitCode = WEXITSTATUS(status);
+ } else if (WIFSIGNALED(status)) {
+ exitCode = 256; // match NSPR's signal exit status
+ }
+ }
+#else
+ int32_t exitCode = -1;
+ if (PR_WaitProcess(process->mProcess, &exitCode) != PR_SUCCESS) {
+ exitCode = -1;
+ }
+#endif
+
+ // Lock in case Kill or GetExitCode are called during this
+ {
+ MutexAutoLock lock(process->mLock);
+#if !defined(XP_MACOSX)
+ process->mProcess = nullptr;
+#endif
+ process->mExitValue = exitCode;
+ if (process->mShutdown) {
+ return;
+ }
+ }
+#endif
+
+ // If we ran a background thread for the monitor then notify on the main
+ // thread
+ if (NS_IsMainThread()) {
+ process->ProcessComplete();
+ } else {
+ NS_DispatchToMainThread(NewRunnableMethod(process, &nsProcess::ProcessComplete));
+ }
+}
+
+void
+nsProcess::ProcessComplete()
+{
+ if (mThread) {
+ nsCOMPtr<nsIObserverService> os =
+ mozilla::services::GetObserverService();
+ if (os) {
+ os->RemoveObserver(this, "xpcom-shutdown");
+ }
+ PR_JoinThread(mThread);
+ mThread = nullptr;
+ }
+
+ const char* topic;
+ if (mExitValue < 0) {
+ topic = "process-failed";
+ } else {
+ topic = "process-finished";
+ }
+
+ mPid = -1;
+ nsCOMPtr<nsIObserver> observer;
+ if (mWeakObserver) {
+ observer = do_QueryReferent(mWeakObserver);
+ } else if (mObserver) {
+ observer = mObserver;
+ }
+ mObserver = nullptr;
+ mWeakObserver = nullptr;
+
+ if (observer) {
+ observer->Observe(NS_ISUPPORTS_CAST(nsIProcess*, this), topic, nullptr);
+ }
+}
+
+// XXXldb |aArgs| has the wrong const-ness
+NS_IMETHODIMP
+nsProcess::Run(bool aBlocking, const char** aArgs, uint32_t aCount)
+{
+ return CopyArgsAndRunProcess(aBlocking, aArgs, aCount, nullptr, false);
+}
+
+// XXXldb |aArgs| has the wrong const-ness
+NS_IMETHODIMP
+nsProcess::RunAsync(const char** aArgs, uint32_t aCount,
+ nsIObserver* aObserver, bool aHoldWeak)
+{
+ return CopyArgsAndRunProcess(false, aArgs, aCount, aObserver, aHoldWeak);
+}
+
+nsresult
+nsProcess::CopyArgsAndRunProcess(bool aBlocking, const char** aArgs,
+ uint32_t aCount, nsIObserver* aObserver,
+ bool aHoldWeak)
+{
+ // Add one to the aCount for the program name and one for null termination.
+ char** my_argv = nullptr;
+ my_argv = (char**)moz_xmalloc(sizeof(char*) * (aCount + 2));
+ if (!my_argv) {
+ return NS_ERROR_OUT_OF_MEMORY;
+ }
+
+ my_argv[0] = ToNewUTF8String(mTargetPath);
+
+ for (uint32_t i = 0; i < aCount; ++i) {
+ my_argv[i + 1] = const_cast<char*>(aArgs[i]);
+ }
+
+ my_argv[aCount + 1] = nullptr;
+
+ nsresult rv = RunProcess(aBlocking, my_argv, aObserver, aHoldWeak, false);
+
+ free(my_argv[0]);
+ free(my_argv);
+ return rv;
+}
+
+// XXXldb |aArgs| has the wrong const-ness
+NS_IMETHODIMP
+nsProcess::Runw(bool aBlocking, const char16_t** aArgs, uint32_t aCount)
+{
+ return CopyArgsAndRunProcessw(aBlocking, aArgs, aCount, nullptr, false);
+}
+
+// XXXldb |aArgs| has the wrong const-ness
+NS_IMETHODIMP
+nsProcess::RunwAsync(const char16_t** aArgs, uint32_t aCount,
+ nsIObserver* aObserver, bool aHoldWeak)
+{
+ return CopyArgsAndRunProcessw(false, aArgs, aCount, aObserver, aHoldWeak);
+}
+
+nsresult
+nsProcess::CopyArgsAndRunProcessw(bool aBlocking, const char16_t** aArgs,
+ uint32_t aCount, nsIObserver* aObserver,
+ bool aHoldWeak)
+{
+ // Add one to the aCount for the program name and one for null termination.
+ char** my_argv = nullptr;
+ my_argv = (char**)moz_xmalloc(sizeof(char*) * (aCount + 2));
+ if (!my_argv) {
+ return NS_ERROR_OUT_OF_MEMORY;
+ }
+
+ my_argv[0] = ToNewUTF8String(mTargetPath);
+
+ for (uint32_t i = 0; i < aCount; i++) {
+ my_argv[i + 1] = ToNewUTF8String(nsDependentString(aArgs[i]));
+ }
+
+ my_argv[aCount + 1] = nullptr;
+
+ nsresult rv = RunProcess(aBlocking, my_argv, aObserver, aHoldWeak, true);
+
+ for (uint32_t i = 0; i <= aCount; ++i) {
+ free(my_argv[i]);
+ }
+ free(my_argv);
+ return rv;
+}
+
+nsresult
+nsProcess::RunProcess(bool aBlocking, char** aMyArgv, nsIObserver* aObserver,
+ bool aHoldWeak, bool aArgsUTF8)
+{
+ NS_WARNING_ASSERTION(!XRE_IsContentProcess(),
+ "No launching of new processes in the content process");
+
+ if (NS_WARN_IF(!mExecutable)) {
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+ if (NS_WARN_IF(mThread)) {
+ return NS_ERROR_ALREADY_INITIALIZED;
+ }
+
+ if (aObserver) {
+ if (aHoldWeak) {
+ mWeakObserver = do_GetWeakReference(aObserver);
+ if (!mWeakObserver) {
+ return NS_NOINTERFACE;
+ }
+ } else {
+ mObserver = aObserver;
+ }
+ }
+
+ mExitValue = -1;
+ mPid = -1;
+
+#if defined(PROCESSMODEL_WINAPI)
+ BOOL retVal;
+ wchar_t* cmdLine = nullptr;
+
+ // |aMyArgv| is null-terminated and always starts with the program path. If
+ // the second slot is non-null then arguments are being passed.
+ if (aMyArgv[1] && assembleCmdLine(aMyArgv + 1, &cmdLine,
+ aArgsUTF8 ? CP_UTF8 : CP_ACP) == -1) {
+ return NS_ERROR_FILE_EXECUTION_FAILED;
+ }
+
+ /* The SEE_MASK_NO_CONSOLE flag is important to prevent console windows
+ * from appearing. This makes behavior the same on all platforms. The flag
+ * will not have any effect on non-console applications.
+ */
+
+ // The program name in aMyArgv[0] is always UTF-8
+ NS_ConvertUTF8toUTF16 wideFile(aMyArgv[0]);
+
+ SHELLEXECUTEINFOW sinfo;
+ memset(&sinfo, 0, sizeof(SHELLEXECUTEINFOW));
+ sinfo.cbSize = sizeof(SHELLEXECUTEINFOW);
+ sinfo.hwnd = nullptr;
+ sinfo.lpFile = wideFile.get();
+ sinfo.nShow = SW_SHOWNORMAL;
+ sinfo.fMask = SEE_MASK_FLAG_DDEWAIT |
+ SEE_MASK_NO_CONSOLE |
+ SEE_MASK_NOCLOSEPROCESS;
+
+ if (cmdLine) {
+ sinfo.lpParameters = cmdLine;
+ }
+
+ retVal = ShellExecuteExW(&sinfo);
+ if (!retVal) {
+ return NS_ERROR_FILE_EXECUTION_FAILED;
+ }
+
+ mProcess = sinfo.hProcess;
+
+ if (cmdLine) {
+ PR_Free(cmdLine);
+ }
+
+ mPid = GetProcessId(mProcess);
+#elif defined(XP_MACOSX)
+ // Initialize spawn attributes.
+ posix_spawnattr_t spawnattr;
+ if (posix_spawnattr_init(&spawnattr) != 0) {
+ return NS_ERROR_FAILURE;
+ }
+
+ // Set spawn attributes.
+ size_t attr_count = ArrayLength(pref_cpu_types);
+ size_t attr_ocount = 0;
+ if (posix_spawnattr_setbinpref_np(&spawnattr, attr_count, pref_cpu_types,
+ &attr_ocount) != 0 ||
+ attr_ocount != attr_count) {
+ posix_spawnattr_destroy(&spawnattr);
+ return NS_ERROR_FAILURE;
+ }
+
+ // Note: |aMyArgv| is already null-terminated as required by posix_spawnp.
+ pid_t newPid = 0;
+ int result = posix_spawnp(&newPid, aMyArgv[0], nullptr, &spawnattr, aMyArgv,
+ *_NSGetEnviron());
+ mPid = static_cast<int32_t>(newPid);
+
+ posix_spawnattr_destroy(&spawnattr);
+
+ if (result != 0) {
+ return NS_ERROR_FAILURE;
+ }
+#else
+ mProcess = PR_CreateProcess(aMyArgv[0], aMyArgv, nullptr, nullptr);
+ if (!mProcess) {
+ return NS_ERROR_FAILURE;
+ }
+ struct MYProcess
+ {
+ uint32_t pid;
+ };
+ MYProcess* ptrProc = (MYProcess*)mProcess;
+ mPid = ptrProc->pid;
+#endif
+
+ NS_ADDREF_THIS();
+ mBlocking = aBlocking;
+ if (aBlocking) {
+ Monitor(this);
+ if (mExitValue < 0) {
+ return NS_ERROR_FILE_EXECUTION_FAILED;
+ }
+ } else {
+ mThread = PR_CreateThread(PR_SYSTEM_THREAD, Monitor, this,
+ PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD,
+ PR_JOINABLE_THREAD, 0);
+ if (!mThread) {
+ NS_RELEASE_THIS();
+ return NS_ERROR_FAILURE;
+ }
+
+ // It isn't a failure if we just can't watch for shutdown
+ nsCOMPtr<nsIObserverService> os =
+ mozilla::services::GetObserverService();
+ if (os) {
+ os->AddObserver(this, "xpcom-shutdown", false);
+ }
+ }
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsProcess::GetIsRunning(bool* aIsRunning)
+{
+ if (mThread) {
+ *aIsRunning = true;
+ } else {
+ *aIsRunning = false;
+ }
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsProcess::GetPid(uint32_t* aPid)
+{
+ if (!mThread) {
+ return NS_ERROR_FAILURE;
+ }
+ if (mPid < 0) {
+ return NS_ERROR_NOT_IMPLEMENTED;
+ }
+ *aPid = mPid;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsProcess::Kill()
+{
+ if (!mThread) {
+ return NS_ERROR_FAILURE;
+ }
+
+ {
+ MutexAutoLock lock(mLock);
+#if defined(PROCESSMODEL_WINAPI)
+ if (TerminateProcess(mProcess, 0) == 0) {
+ return NS_ERROR_FAILURE;
+ }
+#elif defined(XP_MACOSX)
+ if (kill(mPid, SIGKILL) != 0) {
+ return NS_ERROR_FAILURE;
+ }
+#else
+ if (!mProcess || (PR_KillProcess(mProcess) != PR_SUCCESS)) {
+ return NS_ERROR_FAILURE;
+ }
+#endif
+ }
+
+ // We must null out mThread if we want IsRunning to return false immediately
+ // after this call.
+ nsCOMPtr<nsIObserverService> os = mozilla::services::GetObserverService();
+ if (os) {
+ os->RemoveObserver(this, "xpcom-shutdown");
+ }
+ PR_JoinThread(mThread);
+ mThread = nullptr;
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsProcess::GetExitValue(int32_t* aExitValue)
+{
+ MutexAutoLock lock(mLock);
+
+ *aExitValue = mExitValue;
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsProcess::Observe(nsISupports* aSubject, const char* aTopic,
+ const char16_t* aData)
+{
+ // Shutting down, drop all references
+ if (mThread) {
+ nsCOMPtr<nsIObserverService> os =
+ mozilla::services::GetObserverService();
+ if (os) {
+ os->RemoveObserver(this, "xpcom-shutdown");
+ }
+ mThread = nullptr;
+ }
+
+ mObserver = nullptr;
+ mWeakObserver = nullptr;
+
+ MutexAutoLock lock(mLock);
+ mShutdown = true;
+
+ return NS_OK;
+}
diff --git a/xpcom/threads/nsThread.cpp b/xpcom/threads/nsThread.cpp
new file mode 100644
index 000000000..63bd28ca3
--- /dev/null
+++ b/xpcom/threads/nsThread.cpp
@@ -0,0 +1,1500 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsThread.h"
+
+#include "base/message_loop.h"
+
+// Chromium's logging can sometimes leak through...
+#ifdef LOG
+#undef LOG
+#endif
+
+#include "mozilla/ReentrantMonitor.h"
+#include "nsMemoryPressure.h"
+#include "nsThreadManager.h"
+#include "nsIClassInfoImpl.h"
+#include "nsAutoPtr.h"
+#include "nsCOMPtr.h"
+#include "nsQueryObject.h"
+#include "pratom.h"
+#include "mozilla/CycleCollectedJSContext.h"
+#include "mozilla/Logging.h"
+#include "nsIObserverService.h"
+#include "mozilla/HangMonitor.h"
+#include "mozilla/IOInterposer.h"
+#include "mozilla/ipc/MessageChannel.h"
+#include "mozilla/ipc/BackgroundChild.h"
+#include "mozilla/Services.h"
+#include "nsXPCOMPrivate.h"
+#include "mozilla/ChaosMode.h"
+#include "mozilla/TimeStamp.h"
+#include "mozilla/Unused.h"
+#include "mozilla/dom/ScriptSettings.h"
+#include "nsIIdlePeriod.h"
+#include "nsIIncrementalRunnable.h"
+#include "nsThreadSyncDispatch.h"
+#include "LeakRefPtr.h"
+
+#ifdef MOZ_CRASHREPORTER
+#include "nsServiceManagerUtils.h"
+#include "nsICrashReporter.h"
+#include "mozilla/dom/ContentChild.h"
+#endif
+
+#ifdef XP_LINUX
+#include <sys/time.h>
+#include <sys/resource.h>
+#include <sched.h>
+#endif
+
+#define HAVE_UALARM _BSD_SOURCE || (_XOPEN_SOURCE >= 500 || \
+ _XOPEN_SOURCE && _XOPEN_SOURCE_EXTENDED) && \
+ !(_POSIX_C_SOURCE >= 200809L || _XOPEN_SOURCE >= 700)
+
+#if defined(XP_LINUX) && !defined(ANDROID) && defined(_GNU_SOURCE)
+#define HAVE_SCHED_SETAFFINITY
+#endif
+
+#ifdef XP_MACOSX
+#include <mach/mach.h>
+#include <mach/thread_policy.h>
+#endif
+
+#ifdef MOZ_CANARY
+# include <unistd.h>
+# include <execinfo.h>
+# include <signal.h>
+# include <fcntl.h>
+# include "nsXULAppAPI.h"
+#endif
+
+#if defined(NS_FUNCTION_TIMER) && defined(_MSC_VER)
+#include "nsTimerImpl.h"
+#include "mozilla/StackWalk.h"
+#endif
+#ifdef NS_FUNCTION_TIMER
+#include "nsCRT.h"
+#endif
+
+#ifdef MOZ_TASK_TRACER
+#include "GeckoTaskTracer.h"
+#include "TracedTaskCommon.h"
+using namespace mozilla::tasktracer;
+#endif
+
+using namespace mozilla;
+
+static LazyLogModule sThreadLog("nsThread");
+#ifdef LOG
+#undef LOG
+#endif
+#define LOG(args) MOZ_LOG(sThreadLog, mozilla::LogLevel::Debug, args)
+
+NS_DECL_CI_INTERFACE_GETTER(nsThread)
+
+//-----------------------------------------------------------------------------
+// Because we do not have our own nsIFactory, we have to implement nsIClassInfo
+// somewhat manually.
+
+class nsThreadClassInfo : public nsIClassInfo
+{
+public:
+ NS_DECL_ISUPPORTS_INHERITED // no mRefCnt
+ NS_DECL_NSICLASSINFO
+
+ nsThreadClassInfo()
+ {
+ }
+};
+
+NS_IMETHODIMP_(MozExternalRefCountType)
+nsThreadClassInfo::AddRef()
+{
+ return 2;
+}
+NS_IMETHODIMP_(MozExternalRefCountType)
+nsThreadClassInfo::Release()
+{
+ return 1;
+}
+NS_IMPL_QUERY_INTERFACE(nsThreadClassInfo, nsIClassInfo)
+
+NS_IMETHODIMP
+nsThreadClassInfo::GetInterfaces(uint32_t* aCount, nsIID*** aArray)
+{
+ return NS_CI_INTERFACE_GETTER_NAME(nsThread)(aCount, aArray);
+}
+
+NS_IMETHODIMP
+nsThreadClassInfo::GetScriptableHelper(nsIXPCScriptable** aResult)
+{
+ *aResult = nullptr;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThreadClassInfo::GetContractID(char** aResult)
+{
+ *aResult = nullptr;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThreadClassInfo::GetClassDescription(char** aResult)
+{
+ *aResult = nullptr;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThreadClassInfo::GetClassID(nsCID** aResult)
+{
+ *aResult = nullptr;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThreadClassInfo::GetFlags(uint32_t* aResult)
+{
+ *aResult = THREADSAFE;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThreadClassInfo::GetClassIDNoAlloc(nsCID* aResult)
+{
+ return NS_ERROR_NOT_AVAILABLE;
+}
+
+//-----------------------------------------------------------------------------
+
+NS_IMPL_ADDREF(nsThread)
+NS_IMPL_RELEASE(nsThread)
+NS_INTERFACE_MAP_BEGIN(nsThread)
+ NS_INTERFACE_MAP_ENTRY(nsIThread)
+ NS_INTERFACE_MAP_ENTRY(nsIThreadInternal)
+ NS_INTERFACE_MAP_ENTRY(nsIEventTarget)
+ NS_INTERFACE_MAP_ENTRY(nsISupportsPriority)
+ NS_INTERFACE_MAP_ENTRY_AMBIGUOUS(nsISupports, nsIThread)
+ if (aIID.Equals(NS_GET_IID(nsIClassInfo))) {
+ static nsThreadClassInfo sThreadClassInfo;
+ foundInterface = static_cast<nsIClassInfo*>(&sThreadClassInfo);
+ } else
+NS_INTERFACE_MAP_END
+NS_IMPL_CI_INTERFACE_GETTER(nsThread, nsIThread, nsIThreadInternal,
+ nsIEventTarget, nsISupportsPriority)
+
+//-----------------------------------------------------------------------------
+
+class nsThreadStartupEvent : public Runnable
+{
+public:
+ nsThreadStartupEvent()
+ : mMon("nsThreadStartupEvent.mMon")
+ , mInitialized(false)
+ {
+ }
+
+ // This method does not return until the thread startup object is in the
+ // completion state.
+ void Wait()
+ {
+ ReentrantMonitorAutoEnter mon(mMon);
+ while (!mInitialized) {
+ mon.Wait();
+ }
+ }
+
+ // This method needs to be public to support older compilers (xlC_r on AIX).
+ // It should be called directly as this class type is reference counted.
+ virtual ~nsThreadStartupEvent() {}
+
+private:
+ NS_IMETHOD Run() override
+ {
+ ReentrantMonitorAutoEnter mon(mMon);
+ mInitialized = true;
+ mon.Notify();
+ return NS_OK;
+ }
+
+ ReentrantMonitor mMon;
+ bool mInitialized;
+};
+//-----------------------------------------------------------------------------
+
+namespace {
+class DelayedRunnable : public Runnable,
+ public nsITimerCallback
+{
+public:
+ DelayedRunnable(already_AddRefed<nsIThread> aTargetThread,
+ already_AddRefed<nsIRunnable> aRunnable,
+ uint32_t aDelay)
+ : mTargetThread(aTargetThread),
+ mWrappedRunnable(aRunnable),
+ mDelayedFrom(TimeStamp::NowLoRes()),
+ mDelay(aDelay)
+ { }
+
+ NS_DECL_ISUPPORTS_INHERITED
+
+ nsresult Init()
+ {
+ nsresult rv;
+ mTimer = do_CreateInstance(NS_TIMER_CONTRACTID, &rv);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ MOZ_ASSERT(mTimer);
+ rv = mTimer->SetTarget(mTargetThread);
+
+ NS_ENSURE_SUCCESS(rv, rv);
+ return mTimer->InitWithCallback(this, mDelay, nsITimer::TYPE_ONE_SHOT);
+ }
+
+ nsresult DoRun()
+ {
+ nsCOMPtr<nsIRunnable> r = mWrappedRunnable.forget();
+ return r->Run();
+ }
+
+ NS_IMETHOD Run() override
+ {
+ // Already ran?
+ if (!mWrappedRunnable) {
+ return NS_OK;
+ }
+
+ // Are we too early?
+ if ((TimeStamp::NowLoRes() - mDelayedFrom).ToMilliseconds() < mDelay) {
+ return NS_OK; // Let the nsITimer run us.
+ }
+
+ mTimer->Cancel();
+ return DoRun();
+ }
+
+ NS_IMETHOD Notify(nsITimer* aTimer) override
+ {
+ // If we already ran, the timer should have been canceled.
+ MOZ_ASSERT(mWrappedRunnable);
+ MOZ_ASSERT(aTimer == mTimer);
+
+ return DoRun();
+ }
+
+private:
+ ~DelayedRunnable() {}
+
+ nsCOMPtr<nsIThread> mTargetThread;
+ nsCOMPtr<nsIRunnable> mWrappedRunnable;
+ nsCOMPtr<nsITimer> mTimer;
+ TimeStamp mDelayedFrom;
+ uint32_t mDelay;
+};
+
+NS_IMPL_ISUPPORTS_INHERITED(DelayedRunnable, Runnable, nsITimerCallback)
+
+} // anonymous namespace
+
+//-----------------------------------------------------------------------------
+
+struct nsThreadShutdownContext
+{
+ nsThreadShutdownContext(NotNull<nsThread*> aTerminatingThread,
+ NotNull<nsThread*> aJoiningThread,
+ bool aAwaitingShutdownAck)
+ : mTerminatingThread(aTerminatingThread)
+ , mJoiningThread(aJoiningThread)
+ , mAwaitingShutdownAck(aAwaitingShutdownAck)
+ {
+ MOZ_COUNT_CTOR(nsThreadShutdownContext);
+ }
+ ~nsThreadShutdownContext()
+ {
+ MOZ_COUNT_DTOR(nsThreadShutdownContext);
+ }
+
+ // NB: This will be the last reference.
+ NotNull<RefPtr<nsThread>> mTerminatingThread;
+ NotNull<nsThread*> mJoiningThread;
+ bool mAwaitingShutdownAck;
+};
+
+// This event is responsible for notifying nsThread::Shutdown that it is time
+// to call PR_JoinThread. It implements nsICancelableRunnable so that it can
+// run on a DOM Worker thread (where all events must implement
+// nsICancelableRunnable.)
+class nsThreadShutdownAckEvent : public CancelableRunnable
+{
+public:
+ explicit nsThreadShutdownAckEvent(NotNull<nsThreadShutdownContext*> aCtx)
+ : mShutdownContext(aCtx)
+ {
+ }
+ NS_IMETHOD Run() override
+ {
+ mShutdownContext->mTerminatingThread->ShutdownComplete(mShutdownContext);
+ return NS_OK;
+ }
+ nsresult Cancel() override
+ {
+ return Run();
+ }
+private:
+ virtual ~nsThreadShutdownAckEvent() { }
+
+ NotNull<nsThreadShutdownContext*> mShutdownContext;
+};
+
+// This event is responsible for setting mShutdownContext
+class nsThreadShutdownEvent : public Runnable
+{
+public:
+ nsThreadShutdownEvent(NotNull<nsThread*> aThr,
+ NotNull<nsThreadShutdownContext*> aCtx)
+ : mThread(aThr)
+ , mShutdownContext(aCtx)
+ {
+ }
+ NS_IMETHOD Run() override
+ {
+ mThread->mShutdownContext = mShutdownContext;
+ MessageLoop::current()->Quit();
+ return NS_OK;
+ }
+private:
+ NotNull<RefPtr<nsThread>> mThread;
+ NotNull<nsThreadShutdownContext*> mShutdownContext;
+};
+
+//-----------------------------------------------------------------------------
+
+static void
+SetThreadAffinity(unsigned int cpu)
+{
+#ifdef HAVE_SCHED_SETAFFINITY
+ cpu_set_t cpus;
+ CPU_ZERO(&cpus);
+ CPU_SET(cpu, &cpus);
+ sched_setaffinity(0, sizeof(cpus), &cpus);
+ // Don't assert sched_setaffinity's return value because it intermittently (?)
+ // fails with EINVAL on Linux x64 try runs.
+#elif defined(XP_MACOSX)
+ // OS X does not provide APIs to pin threads to specific processors, but you
+ // can tag threads as belonging to the same "affinity set" and the OS will try
+ // to run them on the same processor. To run threads on different processors,
+ // tag them as belonging to different affinity sets. Tag 0, the default, means
+ // "no affinity" so let's pretend each CPU has its own tag `cpu+1`.
+ thread_affinity_policy_data_t policy;
+ policy.affinity_tag = cpu + 1;
+ MOZ_ALWAYS_TRUE(thread_policy_set(mach_thread_self(), THREAD_AFFINITY_POLICY,
+ &policy.affinity_tag, 1) == KERN_SUCCESS);
+#elif defined(XP_WIN)
+ MOZ_ALWAYS_TRUE(SetThreadIdealProcessor(GetCurrentThread(), cpu) != -1);
+#endif
+}
+
+static void
+SetupCurrentThreadForChaosMode()
+{
+ if (!ChaosMode::isActive(ChaosFeature::ThreadScheduling)) {
+ return;
+ }
+
+#ifdef XP_LINUX
+ // PR_SetThreadPriority doesn't really work since priorities >
+ // PR_PRIORITY_NORMAL can't be set by non-root users. Instead we'll just use
+ // setpriority(2) to set random 'nice values'. In regular Linux this is only
+ // a dynamic adjustment so it still doesn't really do what we want, but tools
+ // like 'rr' can be more aggressive about honoring these values.
+ // Some of these calls may fail due to trying to lower the priority
+ // (e.g. something may have already called setpriority() for this thread).
+ // This makes it hard to have non-main threads with higher priority than the
+ // main thread, but that's hard to fix. Tools like rr can choose to honor the
+ // requested values anyway.
+ // Use just 4 priorities so there's a reasonable chance of any two threads
+ // having equal priority.
+ setpriority(PRIO_PROCESS, 0, ChaosMode::randomUint32LessThan(4));
+#else
+ // We should set the affinity here but NSPR doesn't provide a way to expose it.
+ uint32_t priority = ChaosMode::randomUint32LessThan(PR_PRIORITY_LAST + 1);
+ PR_SetThreadPriority(PR_GetCurrentThread(), PRThreadPriority(priority));
+#endif
+
+ // Force half the threads to CPU 0 so they compete for CPU
+ if (ChaosMode::randomUint32LessThan(2)) {
+ SetThreadAffinity(0);
+ }
+}
+
+/*static*/ void
+nsThread::ThreadFunc(void* aArg)
+{
+ using mozilla::ipc::BackgroundChild;
+
+ nsThread* self = static_cast<nsThread*>(aArg); // strong reference
+ self->mThread = PR_GetCurrentThread();
+ SetupCurrentThreadForChaosMode();
+
+ // Inform the ThreadManager
+ nsThreadManager::get().RegisterCurrentThread(*self);
+
+ mozilla::IOInterposer::RegisterCurrentThread();
+
+ // Wait for and process startup event
+ nsCOMPtr<nsIRunnable> event;
+ {
+ MutexAutoLock lock(self->mLock);
+ if (!self->mEvents->GetEvent(true, getter_AddRefs(event), lock)) {
+ NS_WARNING("failed waiting for thread startup event");
+ return;
+ }
+ }
+ event->Run(); // unblocks nsThread::Init
+ event = nullptr;
+
+ {
+ // Scope for MessageLoop.
+ nsAutoPtr<MessageLoop> loop(
+ new MessageLoop(MessageLoop::TYPE_MOZILLA_NONMAINTHREAD, self));
+
+ // Now, process incoming events...
+ loop->Run();
+
+ BackgroundChild::CloseForCurrentThread();
+
+ // NB: The main thread does not shut down here! It shuts down via
+ // nsThreadManager::Shutdown.
+
+ // Do NS_ProcessPendingEvents but with special handling to set
+ // mEventsAreDoomed atomically with the removal of the last event. The key
+ // invariant here is that we will never permit PutEvent to succeed if the
+ // event would be left in the queue after our final call to
+ // NS_ProcessPendingEvents. We also have to keep processing events as long
+ // as we have outstanding mRequestedShutdownContexts.
+ while (true) {
+ // Check and see if we're waiting on any threads.
+ self->WaitForAllAsynchronousShutdowns();
+
+ {
+ MutexAutoLock lock(self->mLock);
+ if (!self->mEvents->HasPendingEvent(lock)) {
+ // No events in the queue, so we will stop now. Don't let any more
+ // events be added, since they won't be processed. It is critical
+ // that no PutEvent can occur between testing that the event queue is
+ // empty and setting mEventsAreDoomed!
+ self->mEventsAreDoomed = true;
+ break;
+ }
+ }
+ NS_ProcessPendingEvents(self);
+ }
+ }
+
+ mozilla::IOInterposer::UnregisterCurrentThread();
+
+ // Inform the threadmanager that this thread is going away
+ nsThreadManager::get().UnregisterCurrentThread(*self);
+
+ // Dispatch shutdown ACK
+ NotNull<nsThreadShutdownContext*> context =
+ WrapNotNull(self->mShutdownContext);
+ MOZ_ASSERT(context->mTerminatingThread == self);
+ event = do_QueryObject(new nsThreadShutdownAckEvent(context));
+ context->mJoiningThread->Dispatch(event, NS_DISPATCH_NORMAL);
+
+ // Release any observer of the thread here.
+ self->SetObserver(nullptr);
+
+#ifdef MOZ_TASK_TRACER
+ FreeTraceInfo();
+#endif
+
+ NS_RELEASE(self);
+}
+
+//-----------------------------------------------------------------------------
+
+#ifdef MOZ_CRASHREPORTER
+// Tell the crash reporter to save a memory report if our heuristics determine
+// that an OOM failure is likely to occur soon.
+// Memory usage will not be checked more than every 30 seconds or saved more
+// than every 3 minutes
+// If |aShouldSave == kForceReport|, a report will be saved regardless of
+// whether the process is low on memory or not. However, it will still not be
+// saved if a report was saved less than 3 minutes ago.
+bool
+nsThread::SaveMemoryReportNearOOM(ShouldSaveMemoryReport aShouldSave)
+{
+ // Keep an eye on memory usage (cheap, ~7ms) somewhat frequently,
+ // but save memory reports (expensive, ~75ms) less frequently.
+ const size_t kLowMemoryCheckSeconds = 30;
+ const size_t kLowMemorySaveSeconds = 3 * 60;
+
+ static TimeStamp nextCheck = TimeStamp::NowLoRes()
+ + TimeDuration::FromSeconds(kLowMemoryCheckSeconds);
+ static bool recentlySavedReport = false; // Keeps track of whether a report
+ // was saved last time we checked
+
+ // Are we checking again too soon?
+ TimeStamp now = TimeStamp::NowLoRes();
+ if ((aShouldSave == ShouldSaveMemoryReport::kMaybeReport ||
+ recentlySavedReport) && now < nextCheck) {
+ return false;
+ }
+
+ bool needMemoryReport = (aShouldSave == ShouldSaveMemoryReport::kForceReport);
+#ifdef XP_WIN // XXX implement on other platforms as needed
+ // If the report is forced there is no need to check whether it is necessary
+ if (aShouldSave != ShouldSaveMemoryReport::kForceReport) {
+ const size_t LOWMEM_THRESHOLD_VIRTUAL = 200 * 1024 * 1024;
+ MEMORYSTATUSEX statex;
+ statex.dwLength = sizeof(statex);
+ if (GlobalMemoryStatusEx(&statex)) {
+ if (statex.ullAvailVirtual < LOWMEM_THRESHOLD_VIRTUAL) {
+ needMemoryReport = true;
+ }
+ }
+ }
+#endif
+
+ if (needMemoryReport) {
+ if (XRE_IsContentProcess()) {
+ dom::ContentChild* cc = dom::ContentChild::GetSingleton();
+ if (cc) {
+ cc->SendNotifyLowMemory();
+ }
+ } else {
+ nsCOMPtr<nsICrashReporter> cr =
+ do_GetService("@mozilla.org/toolkit/crash-reporter;1");
+ if (cr) {
+ cr->SaveMemoryReport();
+ }
+ }
+ recentlySavedReport = true;
+ nextCheck = now + TimeDuration::FromSeconds(kLowMemorySaveSeconds);
+ } else {
+ recentlySavedReport = false;
+ nextCheck = now + TimeDuration::FromSeconds(kLowMemoryCheckSeconds);
+ }
+
+ return recentlySavedReport;
+}
+#endif
+
+#ifdef MOZ_CANARY
+int sCanaryOutputFD = -1;
+#endif
+
+nsThread::nsThread(MainThreadFlag aMainThread, uint32_t aStackSize)
+ : mLock("nsThread.mLock")
+ , mScriptObserver(nullptr)
+ , mEvents(WrapNotNull(&mEventsRoot))
+ , mEventsRoot(mLock)
+ , mIdleEventsAvailable(mLock, "[nsThread.mEventsAvailable]")
+ , mIdleEvents(mIdleEventsAvailable, nsEventQueue::eNormalQueue)
+ , mPriority(PRIORITY_NORMAL)
+ , mThread(nullptr)
+ , mNestedEventLoopDepth(0)
+ , mStackSize(aStackSize)
+ , mShutdownContext(nullptr)
+ , mShutdownRequired(false)
+ , mEventsAreDoomed(false)
+ , mIsMainThread(aMainThread)
+ , mCanInvokeJS(false)
+{
+}
+
+nsThread::~nsThread()
+{
+ NS_ASSERTION(mRequestedShutdownContexts.IsEmpty(),
+ "shouldn't be waiting on other threads to shutdown");
+#ifdef DEBUG
+ // We deliberately leak these so they can be tracked by the leak checker.
+ // If you're having nsThreadShutdownContext leaks, you can set:
+ // XPCOM_MEM_LOG_CLASSES=nsThreadShutdownContext
+ // during a test run and that will at least tell you what thread is
+ // requesting shutdown on another, which can be helpful for diagnosing
+ // the leak.
+ for (size_t i = 0; i < mRequestedShutdownContexts.Length(); ++i) {
+ Unused << mRequestedShutdownContexts[i].forget();
+ }
+#endif
+}
+
+nsresult
+nsThread::Init()
+{
+ // spawn thread and wait until it is fully setup
+ RefPtr<nsThreadStartupEvent> startup = new nsThreadStartupEvent();
+
+ NS_ADDREF_THIS();
+
+ mIdlePeriod = new IdlePeriod();
+
+ mShutdownRequired = true;
+
+ // ThreadFunc is responsible for setting mThread
+ if (!PR_CreateThread(PR_USER_THREAD, ThreadFunc, this,
+ PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD,
+ PR_JOINABLE_THREAD, mStackSize)) {
+ NS_RELEASE_THIS();
+ return NS_ERROR_OUT_OF_MEMORY;
+ }
+
+ // ThreadFunc will wait for this event to be run before it tries to access
+ // mThread. By delaying insertion of this event into the queue, we ensure
+ // that mThread is set properly.
+ {
+ MutexAutoLock lock(mLock);
+ mEventsRoot.PutEvent(startup, lock); // retain a reference
+ }
+
+ // Wait for thread to call ThreadManager::SetupCurrentThread, which completes
+ // initialization of ThreadFunc.
+ startup->Wait();
+ return NS_OK;
+}
+
+nsresult
+nsThread::InitCurrentThread()
+{
+ mThread = PR_GetCurrentThread();
+ SetupCurrentThreadForChaosMode();
+
+ mIdlePeriod = new IdlePeriod();
+
+ nsThreadManager::get().RegisterCurrentThread(*this);
+ return NS_OK;
+}
+
+nsresult
+nsThread::PutEvent(nsIRunnable* aEvent, nsNestedEventTarget* aTarget)
+{
+ nsCOMPtr<nsIRunnable> event(aEvent);
+ return PutEvent(event.forget(), aTarget);
+}
+
+nsresult
+nsThread::PutEvent(already_AddRefed<nsIRunnable> aEvent, nsNestedEventTarget* aTarget)
+{
+ // We want to leak the reference when we fail to dispatch it, so that
+ // we won't release the event in a wrong thread.
+ LeakRefPtr<nsIRunnable> event(Move(aEvent));
+ nsCOMPtr<nsIThreadObserver> obs;
+
+ {
+ MutexAutoLock lock(mLock);
+ nsChainedEventQueue* queue = aTarget ? aTarget->mQueue : &mEventsRoot;
+ if (!queue || (queue == &mEventsRoot && mEventsAreDoomed)) {
+ NS_WARNING("An event was posted to a thread that will never run it (rejected)");
+ return NS_ERROR_UNEXPECTED;
+ }
+ queue->PutEvent(event.take(), lock);
+
+ // Make sure to grab the observer before dropping the lock, otherwise the
+ // event that we just placed into the queue could run and eventually delete
+ // this nsThread before the calling thread is scheduled again. We would then
+ // crash while trying to access a dead nsThread.
+ obs = mObserver;
+ }
+
+ if (obs) {
+ obs->OnDispatchedEvent(this);
+ }
+
+ return NS_OK;
+}
+
+nsresult
+nsThread::DispatchInternal(already_AddRefed<nsIRunnable> aEvent, uint32_t aFlags,
+ nsNestedEventTarget* aTarget)
+{
+ // We want to leak the reference when we fail to dispatch it, so that
+ // we won't release the event in a wrong thread.
+ LeakRefPtr<nsIRunnable> event(Move(aEvent));
+ if (NS_WARN_IF(!event)) {
+ return NS_ERROR_INVALID_ARG;
+ }
+
+ if (gXPCOMThreadsShutDown && MAIN_THREAD != mIsMainThread && !aTarget) {
+ NS_ASSERTION(false, "Failed Dispatch after xpcom-shutdown-threads");
+ return NS_ERROR_ILLEGAL_DURING_SHUTDOWN;
+ }
+
+#ifdef MOZ_TASK_TRACER
+ nsCOMPtr<nsIRunnable> tracedRunnable = CreateTracedRunnable(event.take());
+ (static_cast<TracedRunnable*>(tracedRunnable.get()))->DispatchTask();
+ // XXX tracedRunnable will always leaked when we fail to disptch.
+ event = tracedRunnable.forget();
+#endif
+
+ if (aFlags & DISPATCH_SYNC) {
+ nsThread* thread = nsThreadManager::get().GetCurrentThread();
+ if (NS_WARN_IF(!thread)) {
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ // XXX we should be able to do something better here... we should
+ // be able to monitor the slot occupied by this event and use
+ // that to tell us when the event has been processed.
+
+ RefPtr<nsThreadSyncDispatch> wrapper =
+ new nsThreadSyncDispatch(thread, event.take());
+ nsresult rv = PutEvent(wrapper, aTarget); // hold a ref
+ // Don't wait for the event to finish if we didn't dispatch it...
+ if (NS_FAILED(rv)) {
+ // PutEvent leaked the wrapper runnable object on failure, so we
+ // explicitly release this object once for that. Note that this
+ // object will be released again soon because it exits the scope.
+ wrapper.get()->Release();
+ return rv;
+ }
+
+ // Allows waiting; ensure no locks are held that would deadlock us!
+ while (wrapper->IsPending()) {
+ NS_ProcessNextEvent(thread, true);
+ }
+ return NS_OK;
+ }
+
+ NS_ASSERTION(aFlags == NS_DISPATCH_NORMAL ||
+ aFlags == NS_DISPATCH_AT_END, "unexpected dispatch flags");
+ return PutEvent(event.take(), aTarget);
+}
+
+bool
+nsThread::nsChainedEventQueue::GetEvent(bool aMayWait, nsIRunnable** aEvent,
+ mozilla::MutexAutoLock& aProofOfLock)
+{
+ bool retVal = false;
+ do {
+ if (mProcessSecondaryQueueRunnable) {
+ MOZ_ASSERT(mSecondaryQueue->HasPendingEvent(aProofOfLock));
+ retVal = mSecondaryQueue->GetEvent(aMayWait, aEvent, aProofOfLock);
+ MOZ_ASSERT(*aEvent);
+ mProcessSecondaryQueueRunnable = false;
+ return retVal;
+ }
+
+ // We don't want to wait if mSecondaryQueue has some events.
+ bool reallyMayWait =
+ aMayWait && !mSecondaryQueue->HasPendingEvent(aProofOfLock);
+ retVal =
+ mNormalQueue->GetEvent(reallyMayWait, aEvent, aProofOfLock);
+
+ // Let's see if we should next time process an event from the secondary
+ // queue.
+ mProcessSecondaryQueueRunnable =
+ mSecondaryQueue->HasPendingEvent(aProofOfLock);
+
+ if (*aEvent) {
+ // We got an event, return early.
+ return retVal;
+ }
+ } while(aMayWait || mProcessSecondaryQueueRunnable);
+
+ return retVal;
+}
+
+//-----------------------------------------------------------------------------
+// nsIEventTarget
+
+NS_IMETHODIMP
+nsThread::DispatchFromScript(nsIRunnable* aEvent, uint32_t aFlags)
+{
+ nsCOMPtr<nsIRunnable> event(aEvent);
+ return Dispatch(event.forget(), aFlags);
+}
+
+NS_IMETHODIMP
+nsThread::Dispatch(already_AddRefed<nsIRunnable> aEvent, uint32_t aFlags)
+{
+ LOG(("THRD(%p) Dispatch [%p %x]\n", this, /* XXX aEvent */nullptr, aFlags));
+
+ return DispatchInternal(Move(aEvent), aFlags, nullptr);
+}
+
+NS_IMETHODIMP
+nsThread::DelayedDispatch(already_AddRefed<nsIRunnable> aEvent, uint32_t aDelayMs)
+{
+ NS_ENSURE_TRUE(!!aDelayMs, NS_ERROR_UNEXPECTED);
+
+ RefPtr<DelayedRunnable> r = new DelayedRunnable(Move(do_AddRef(this)),
+ Move(aEvent),
+ aDelayMs);
+ nsresult rv = r->Init();
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ return DispatchInternal(r.forget(), 0, nullptr);
+}
+
+NS_IMETHODIMP
+nsThread::IsOnCurrentThread(bool* aResult)
+{
+ *aResult = (PR_GetCurrentThread() == mThread);
+ return NS_OK;
+}
+
+//-----------------------------------------------------------------------------
+// nsIThread
+
+NS_IMETHODIMP
+nsThread::GetPRThread(PRThread** aResult)
+{
+ *aResult = mThread;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThread::GetCanInvokeJS(bool* aResult)
+{
+ *aResult = mCanInvokeJS;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThread::SetCanInvokeJS(bool aCanInvokeJS)
+{
+ mCanInvokeJS = aCanInvokeJS;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThread::AsyncShutdown()
+{
+ LOG(("THRD(%p) async shutdown\n", this));
+
+ // XXX If we make this warn, then we hit that warning at xpcom shutdown while
+ // shutting down a thread in a thread pool. That happens b/c the thread
+ // in the thread pool is already shutdown by the thread manager.
+ if (!mThread) {
+ return NS_OK;
+ }
+
+ return !!ShutdownInternal(/* aSync = */ false) ? NS_OK : NS_ERROR_UNEXPECTED;
+}
+
+nsThreadShutdownContext*
+nsThread::ShutdownInternal(bool aSync)
+{
+ MOZ_ASSERT(mThread);
+ MOZ_ASSERT(mThread != PR_GetCurrentThread());
+ if (NS_WARN_IF(mThread == PR_GetCurrentThread())) {
+ return nullptr;
+ }
+
+ // Prevent multiple calls to this method
+ {
+ MutexAutoLock lock(mLock);
+ if (!mShutdownRequired) {
+ return nullptr;
+ }
+ mShutdownRequired = false;
+ }
+
+ NotNull<nsThread*> currentThread =
+ WrapNotNull(nsThreadManager::get().GetCurrentThread());
+
+ nsAutoPtr<nsThreadShutdownContext>& context =
+ *currentThread->mRequestedShutdownContexts.AppendElement();
+ context = new nsThreadShutdownContext(WrapNotNull(this), currentThread, aSync);
+
+ // Set mShutdownContext and wake up the thread in case it is waiting for
+ // events to process.
+ nsCOMPtr<nsIRunnable> event =
+ new nsThreadShutdownEvent(WrapNotNull(this), WrapNotNull(context.get()));
+ // XXXroc What if posting the event fails due to OOM?
+ PutEvent(event.forget(), nullptr);
+
+ // We could still end up with other events being added after the shutdown
+ // task, but that's okay because we process pending events in ThreadFunc
+ // after setting mShutdownContext just before exiting.
+ return context;
+}
+
+void
+nsThread::ShutdownComplete(NotNull<nsThreadShutdownContext*> aContext)
+{
+ MOZ_ASSERT(mThread);
+ MOZ_ASSERT(aContext->mTerminatingThread == this);
+
+ if (aContext->mAwaitingShutdownAck) {
+ // We're in a synchronous shutdown, so tell whatever is up the stack that
+ // we're done and unwind the stack so it can call us again.
+ aContext->mAwaitingShutdownAck = false;
+ return;
+ }
+
+ // Now, it should be safe to join without fear of dead-locking.
+
+ PR_JoinThread(mThread);
+ mThread = nullptr;
+
+ // We hold strong references to our event observers, and once the thread is
+ // shut down the observers can't easily unregister themselves. Do it here
+ // to avoid leaking.
+ ClearObservers();
+
+#ifdef DEBUG
+ {
+ MutexAutoLock lock(mLock);
+ MOZ_ASSERT(!mObserver, "Should have been cleared at shutdown!");
+ }
+#endif
+
+ // Delete aContext.
+ MOZ_ALWAYS_TRUE(
+ aContext->mJoiningThread->mRequestedShutdownContexts.RemoveElement(aContext));
+}
+
+void
+nsThread::WaitForAllAsynchronousShutdowns()
+{
+ while (mRequestedShutdownContexts.Length()) {
+ NS_ProcessNextEvent(this, true);
+ }
+}
+
+NS_IMETHODIMP
+nsThread::Shutdown()
+{
+ LOG(("THRD(%p) sync shutdown\n", this));
+
+ // XXX If we make this warn, then we hit that warning at xpcom shutdown while
+ // shutting down a thread in a thread pool. That happens b/c the thread
+ // in the thread pool is already shutdown by the thread manager.
+ if (!mThread) {
+ return NS_OK;
+ }
+
+ nsThreadShutdownContext* maybeContext = ShutdownInternal(/* aSync = */ true);
+ NS_ENSURE_TRUE(maybeContext, NS_ERROR_UNEXPECTED);
+ NotNull<nsThreadShutdownContext*> context = WrapNotNull(maybeContext);
+
+ // Process events on the current thread until we receive a shutdown ACK.
+ // Allows waiting; ensure no locks are held that would deadlock us!
+ while (context->mAwaitingShutdownAck) {
+ NS_ProcessNextEvent(context->mJoiningThread, true);
+ }
+
+ ShutdownComplete(context);
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThread::HasPendingEvents(bool* aResult)
+{
+ if (NS_WARN_IF(PR_GetCurrentThread() != mThread)) {
+ return NS_ERROR_NOT_SAME_THREAD;
+ }
+
+ {
+ MutexAutoLock lock(mLock);
+ *aResult = mEvents->HasPendingEvent(lock);
+ }
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThread::RegisterIdlePeriod(already_AddRefed<nsIIdlePeriod> aIdlePeriod)
+{
+ if (NS_WARN_IF(PR_GetCurrentThread() != mThread)) {
+ return NS_ERROR_NOT_SAME_THREAD;
+ }
+
+ MutexAutoLock lock(mLock);
+ mIdlePeriod = aIdlePeriod;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThread::IdleDispatch(already_AddRefed<nsIRunnable> aEvent)
+{
+ // Currently the only supported idle dispatch is from the same
+ // thread. To support idle dispatch from another thread we need to
+ // support waking threads that are waiting for an event queue that
+ // isn't mIdleEvents.
+ MOZ_ASSERT(PR_GetCurrentThread() == mThread);
+
+ MutexAutoLock lock(mLock);
+ LeakRefPtr<nsIRunnable> event(Move(aEvent));
+
+ if (NS_WARN_IF(!event)) {
+ return NS_ERROR_INVALID_ARG;
+ }
+
+ if (mEventsAreDoomed) {
+ NS_WARNING("An idle event was posted to a thread that will never run it (rejected)");
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ mIdleEvents.PutEvent(event.take(), lock);
+ return NS_OK;
+}
+
+#ifdef MOZ_CANARY
+void canary_alarm_handler(int signum);
+
+class Canary
+{
+ //XXX ToDo: support nested loops
+public:
+ Canary()
+ {
+ if (sCanaryOutputFD > 0 && EventLatencyIsImportant()) {
+ signal(SIGALRM, canary_alarm_handler);
+ ualarm(15000, 0);
+ }
+ }
+
+ ~Canary()
+ {
+ if (sCanaryOutputFD != 0 && EventLatencyIsImportant()) {
+ ualarm(0, 0);
+ }
+ }
+
+ static bool EventLatencyIsImportant()
+ {
+ return NS_IsMainThread() && XRE_IsParentProcess();
+ }
+};
+
+void canary_alarm_handler(int signum)
+{
+ void* array[30];
+ const char msg[29] = "event took too long to run:\n";
+ // use write to be safe in the signal handler
+ write(sCanaryOutputFD, msg, sizeof(msg));
+ backtrace_symbols_fd(array, backtrace(array, 30), sCanaryOutputFD);
+}
+
+#endif
+
+#define NOTIFY_EVENT_OBSERVERS(func_, params_) \
+ PR_BEGIN_MACRO \
+ if (!mEventObservers.IsEmpty()) { \
+ nsAutoTObserverArray<NotNull<nsCOMPtr<nsIThreadObserver>>, 2>::ForwardIterator \
+ iter_(mEventObservers); \
+ nsCOMPtr<nsIThreadObserver> obs_; \
+ while (iter_.HasMore()) { \
+ obs_ = iter_.GetNext(); \
+ obs_ -> func_ params_ ; \
+ } \
+ } \
+ PR_END_MACRO
+
+void
+nsThread::GetIdleEvent(nsIRunnable** aEvent, MutexAutoLock& aProofOfLock)
+{
+ MOZ_ASSERT(PR_GetCurrentThread() == mThread);
+ MOZ_ASSERT(aEvent);
+
+ TimeStamp idleDeadline;
+ {
+ MutexAutoUnlock unlock(mLock);
+ mIdlePeriod->GetIdlePeriodHint(&idleDeadline);
+ }
+
+ if (!idleDeadline || idleDeadline < TimeStamp::Now()) {
+ aEvent = nullptr;
+ return;
+ }
+
+ mIdleEvents.GetEvent(false, aEvent, aProofOfLock);
+
+ if (*aEvent) {
+ nsCOMPtr<nsIIncrementalRunnable> incrementalEvent(do_QueryInterface(*aEvent));
+ if (incrementalEvent) {
+ incrementalEvent->SetDeadline(idleDeadline);
+ }
+ }
+}
+
+void
+nsThread::GetEvent(bool aWait, nsIRunnable** aEvent, MutexAutoLock& aProofOfLock)
+{
+ MOZ_ASSERT(PR_GetCurrentThread() == mThread);
+ MOZ_ASSERT(aEvent);
+
+ // We'll try to get an event to execute in three stages.
+ // [1] First we just try to get it from the regular queue without waiting.
+ mEvents->GetEvent(false, aEvent, aProofOfLock);
+
+ // [2] If we didn't get an event from the regular queue, try to
+ // get one from the idle queue
+ if (!*aEvent) {
+ // Since events in mEvents have higher priority than idle
+ // events, we will only consider idle events when there are no
+ // pending events in mEvents. We will for the same reason never
+ // wait for an idle event, since a higher priority event might
+ // appear at any time.
+ GetIdleEvent(aEvent, aProofOfLock);
+ }
+
+ // [3] If we neither got an event from the regular queue nor the
+ // idle queue, then if we should wait for events we block on the
+ // main queue until an event is available.
+ // If we are shutting down, then do not wait for new events.
+ if (!*aEvent && aWait) {
+ mEvents->GetEvent(aWait, aEvent, aProofOfLock);
+ }
+}
+
+NS_IMETHODIMP
+nsThread::ProcessNextEvent(bool aMayWait, bool* aResult)
+{
+ LOG(("THRD(%p) ProcessNextEvent [%u %u]\n", this, aMayWait,
+ mNestedEventLoopDepth));
+
+ if (NS_WARN_IF(PR_GetCurrentThread() != mThread)) {
+ return NS_ERROR_NOT_SAME_THREAD;
+ }
+
+ // The toplevel event loop normally blocks waiting for the next event, but
+ // if we're trying to shut this thread down, we must exit the event loop when
+ // the event queue is empty.
+ // This only applys to the toplevel event loop! Nested event loops (e.g.
+ // during sync dispatch) are waiting for some state change and must be able
+ // to block even if something has requested shutdown of the thread. Otherwise
+ // we'll just busywait as we endlessly look for an event, fail to find one,
+ // and repeat the nested event loop since its state change hasn't happened yet.
+ bool reallyWait = aMayWait && (mNestedEventLoopDepth > 0 || !ShuttingDown());
+
+ if (mIsMainThread == MAIN_THREAD) {
+ DoMainThreadSpecificProcessing(reallyWait);
+ }
+
+ ++mNestedEventLoopDepth;
+
+ // We only want to create an AutoNoJSAPI on threads that actually do DOM stuff
+ // (including workers). Those are exactly the threads that have an
+ // mScriptObserver.
+ Maybe<dom::AutoNoJSAPI> noJSAPI;
+ bool callScriptObserver = !!mScriptObserver;
+ if (callScriptObserver) {
+ noJSAPI.emplace();
+ mScriptObserver->BeforeProcessTask(reallyWait);
+ }
+
+ nsCOMPtr<nsIThreadObserver> obs = mObserver;
+ if (obs) {
+ obs->OnProcessNextEvent(this, reallyWait);
+ }
+
+ NOTIFY_EVENT_OBSERVERS(OnProcessNextEvent, (this, reallyWait));
+
+#ifdef MOZ_CANARY
+ Canary canary;
+#endif
+ nsresult rv = NS_OK;
+
+ {
+ // Scope for |event| to make sure that its destructor fires while
+ // mNestedEventLoopDepth has been incremented, since that destructor can
+ // also do work.
+ nsCOMPtr<nsIRunnable> event;
+ {
+ MutexAutoLock lock(mLock);
+ GetEvent(reallyWait, getter_AddRefs(event), lock);
+ }
+
+ *aResult = (event.get() != nullptr);
+
+ if (event) {
+ LOG(("THRD(%p) running [%p]\n", this, event.get()));
+ if (MAIN_THREAD == mIsMainThread) {
+ HangMonitor::NotifyActivity();
+ }
+ event->Run();
+ } else if (aMayWait) {
+ MOZ_ASSERT(ShuttingDown(),
+ "This should only happen when shutting down");
+ rv = NS_ERROR_UNEXPECTED;
+ }
+ }
+
+ NOTIFY_EVENT_OBSERVERS(AfterProcessNextEvent, (this, *aResult));
+
+ if (obs) {
+ obs->AfterProcessNextEvent(this, *aResult);
+ }
+
+ if (callScriptObserver) {
+ if (mScriptObserver) {
+ mScriptObserver->AfterProcessTask(mNestedEventLoopDepth);
+ }
+ noJSAPI.reset();
+ }
+
+ --mNestedEventLoopDepth;
+
+ return rv;
+}
+
+//-----------------------------------------------------------------------------
+// nsISupportsPriority
+
+NS_IMETHODIMP
+nsThread::GetPriority(int32_t* aPriority)
+{
+ *aPriority = mPriority;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThread::SetPriority(int32_t aPriority)
+{
+ if (NS_WARN_IF(!mThread)) {
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+
+ // NSPR defines the following four thread priorities:
+ // PR_PRIORITY_LOW
+ // PR_PRIORITY_NORMAL
+ // PR_PRIORITY_HIGH
+ // PR_PRIORITY_URGENT
+ // We map the priority values defined on nsISupportsPriority to these values.
+
+ mPriority = aPriority;
+
+ PRThreadPriority pri;
+ if (mPriority <= PRIORITY_HIGHEST) {
+ pri = PR_PRIORITY_URGENT;
+ } else if (mPriority < PRIORITY_NORMAL) {
+ pri = PR_PRIORITY_HIGH;
+ } else if (mPriority > PRIORITY_NORMAL) {
+ pri = PR_PRIORITY_LOW;
+ } else {
+ pri = PR_PRIORITY_NORMAL;
+ }
+ // If chaos mode is active, retain the randomly chosen priority
+ if (!ChaosMode::isActive(ChaosFeature::ThreadScheduling)) {
+ PR_SetThreadPriority(mThread, pri);
+ }
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThread::AdjustPriority(int32_t aDelta)
+{
+ return SetPriority(mPriority + aDelta);
+}
+
+//-----------------------------------------------------------------------------
+// nsIThreadInternal
+
+NS_IMETHODIMP
+nsThread::GetObserver(nsIThreadObserver** aObs)
+{
+ MutexAutoLock lock(mLock);
+ NS_IF_ADDREF(*aObs = mObserver);
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThread::SetObserver(nsIThreadObserver* aObs)
+{
+ if (NS_WARN_IF(PR_GetCurrentThread() != mThread)) {
+ return NS_ERROR_NOT_SAME_THREAD;
+ }
+
+ MutexAutoLock lock(mLock);
+ mObserver = aObs;
+ return NS_OK;
+}
+
+uint32_t
+nsThread::RecursionDepth() const
+{
+ MOZ_ASSERT(PR_GetCurrentThread() == mThread);
+ return mNestedEventLoopDepth;
+}
+
+NS_IMETHODIMP
+nsThread::AddObserver(nsIThreadObserver* aObserver)
+{
+ if (NS_WARN_IF(!aObserver)) {
+ return NS_ERROR_INVALID_ARG;
+ }
+ if (NS_WARN_IF(PR_GetCurrentThread() != mThread)) {
+ return NS_ERROR_NOT_SAME_THREAD;
+ }
+
+ NS_WARNING_ASSERTION(!mEventObservers.Contains(aObserver),
+ "Adding an observer twice!");
+
+ if (!mEventObservers.AppendElement(WrapNotNull(aObserver))) {
+ NS_WARNING("Out of memory!");
+ return NS_ERROR_OUT_OF_MEMORY;
+ }
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThread::RemoveObserver(nsIThreadObserver* aObserver)
+{
+ if (NS_WARN_IF(PR_GetCurrentThread() != mThread)) {
+ return NS_ERROR_NOT_SAME_THREAD;
+ }
+
+ if (aObserver && !mEventObservers.RemoveElement(aObserver)) {
+ NS_WARNING("Removing an observer that was never added!");
+ }
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThread::PushEventQueue(nsIEventTarget** aResult)
+{
+ if (NS_WARN_IF(PR_GetCurrentThread() != mThread)) {
+ return NS_ERROR_NOT_SAME_THREAD;
+ }
+
+ NotNull<nsChainedEventQueue*> queue =
+ WrapNotNull(new nsChainedEventQueue(mLock));
+ queue->mEventTarget = new nsNestedEventTarget(WrapNotNull(this), queue);
+
+ {
+ MutexAutoLock lock(mLock);
+ queue->mNext = mEvents;
+ mEvents = queue;
+ }
+
+ NS_ADDREF(*aResult = queue->mEventTarget);
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThread::PopEventQueue(nsIEventTarget* aInnermostTarget)
+{
+ if (NS_WARN_IF(PR_GetCurrentThread() != mThread)) {
+ return NS_ERROR_NOT_SAME_THREAD;
+ }
+
+ if (NS_WARN_IF(!aInnermostTarget)) {
+ return NS_ERROR_NULL_POINTER;
+ }
+
+ // Don't delete or release anything while holding the lock.
+ nsAutoPtr<nsChainedEventQueue> queue;
+ RefPtr<nsNestedEventTarget> target;
+
+ {
+ MutexAutoLock lock(mLock);
+
+ // Make sure we're popping the innermost event target.
+ if (NS_WARN_IF(mEvents->mEventTarget != aInnermostTarget)) {
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ MOZ_ASSERT(mEvents != &mEventsRoot);
+
+ queue = mEvents;
+ mEvents = WrapNotNull(mEvents->mNext);
+
+ nsCOMPtr<nsIRunnable> event;
+ while (queue->GetEvent(false, getter_AddRefs(event), lock)) {
+ mEvents->PutEvent(event.forget(), lock);
+ }
+
+ // Don't let the event target post any more events.
+ queue->mEventTarget.swap(target);
+ target->mQueue = nullptr;
+ }
+
+ return NS_OK;
+}
+
+void
+nsThread::SetScriptObserver(mozilla::CycleCollectedJSContext* aScriptObserver)
+{
+ if (!aScriptObserver) {
+ mScriptObserver = nullptr;
+ return;
+ }
+
+ MOZ_ASSERT(!mScriptObserver);
+ mScriptObserver = aScriptObserver;
+}
+
+void
+nsThread::DoMainThreadSpecificProcessing(bool aReallyWait)
+{
+ MOZ_ASSERT(mIsMainThread == MAIN_THREAD);
+
+ ipc::CancelCPOWs();
+
+ if (aReallyWait) {
+ HangMonitor::Suspend();
+ }
+
+ // Fire a memory pressure notification, if one is pending.
+ if (!ShuttingDown()) {
+ MemoryPressureState mpPending = NS_GetPendingMemoryPressure();
+ if (mpPending != MemPressure_None) {
+ nsCOMPtr<nsIObserverService> os = services::GetObserverService();
+
+ // Use no-forward to prevent the notifications from being transferred to
+ // the children of this process.
+ NS_NAMED_LITERAL_STRING(lowMem, "low-memory-no-forward");
+ NS_NAMED_LITERAL_STRING(lowMemOngoing, "low-memory-ongoing-no-forward");
+
+ if (os) {
+ os->NotifyObservers(nullptr, "memory-pressure",
+ mpPending == MemPressure_New ? lowMem.get() :
+ lowMemOngoing.get());
+ } else {
+ NS_WARNING("Can't get observer service!");
+ }
+ }
+ }
+
+#ifdef MOZ_CRASHREPORTER
+ if (!ShuttingDown()) {
+ SaveMemoryReportNearOOM(ShouldSaveMemoryReport::kMaybeReport);
+ }
+#endif
+}
+
+//-----------------------------------------------------------------------------
+
+NS_IMPL_ISUPPORTS(nsThread::nsNestedEventTarget, nsIEventTarget)
+
+NS_IMETHODIMP
+nsThread::nsNestedEventTarget::DispatchFromScript(nsIRunnable* aEvent, uint32_t aFlags)
+{
+ nsCOMPtr<nsIRunnable> event(aEvent);
+ return Dispatch(event.forget(), aFlags);
+}
+
+NS_IMETHODIMP
+nsThread::nsNestedEventTarget::Dispatch(already_AddRefed<nsIRunnable> aEvent, uint32_t aFlags)
+{
+ LOG(("THRD(%p) Dispatch [%p %x] to nested loop %p\n", mThread.get().get(),
+ /*XXX aEvent*/ nullptr, aFlags, this));
+
+ return mThread->DispatchInternal(Move(aEvent), aFlags, this);
+}
+
+NS_IMETHODIMP
+nsThread::nsNestedEventTarget::DelayedDispatch(already_AddRefed<nsIRunnable>, uint32_t)
+{
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+NS_IMETHODIMP
+nsThread::nsNestedEventTarget::IsOnCurrentThread(bool* aResult)
+{
+ return mThread->IsOnCurrentThread(aResult);
+}
diff --git a/xpcom/threads/nsThread.h b/xpcom/threads/nsThread.h
new file mode 100644
index 000000000..836123747
--- /dev/null
+++ b/xpcom/threads/nsThread.h
@@ -0,0 +1,284 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef nsThread_h__
+#define nsThread_h__
+
+#include "mozilla/Mutex.h"
+#include "nsIIdlePeriod.h"
+#include "nsIThreadInternal.h"
+#include "nsISupportsPriority.h"
+#include "nsEventQueue.h"
+#include "nsThreadUtils.h"
+#include "nsString.h"
+#include "nsTObserverArray.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/NotNull.h"
+#include "nsAutoPtr.h"
+#include "mozilla/AlreadyAddRefed.h"
+#include "mozilla/UniquePtr.h"
+
+namespace mozilla {
+class CycleCollectedJSContext;
+}
+
+using mozilla::NotNull;
+
+// A native thread
+class nsThread
+ : public nsIThreadInternal
+ , public nsISupportsPriority
+{
+public:
+ NS_DECL_THREADSAFE_ISUPPORTS
+ NS_DECL_NSIEVENTTARGET
+ NS_DECL_NSITHREAD
+ NS_DECL_NSITHREADINTERNAL
+ NS_DECL_NSISUPPORTSPRIORITY
+ using nsIEventTarget::Dispatch;
+
+ enum MainThreadFlag
+ {
+ MAIN_THREAD,
+ NOT_MAIN_THREAD
+ };
+
+ nsThread(MainThreadFlag aMainThread, uint32_t aStackSize);
+
+ // Initialize this as a wrapper for a new PRThread.
+ nsresult Init();
+
+ // Initialize this as a wrapper for the current PRThread.
+ nsresult InitCurrentThread();
+
+ // The PRThread corresponding to this thread.
+ PRThread* GetPRThread()
+ {
+ return mThread;
+ }
+
+ // If this flag is true, then the nsThread was created using
+ // nsIThreadManager::NewThread.
+ bool ShutdownRequired()
+ {
+ return mShutdownRequired;
+ }
+
+ // Clear the observer list.
+ void ClearObservers()
+ {
+ mEventObservers.Clear();
+ }
+
+ void
+ SetScriptObserver(mozilla::CycleCollectedJSContext* aScriptObserver);
+
+ uint32_t
+ RecursionDepth() const;
+
+ void ShutdownComplete(NotNull<struct nsThreadShutdownContext*> aContext);
+
+ void WaitForAllAsynchronousShutdowns();
+
+#ifdef MOZ_CRASHREPORTER
+ enum class ShouldSaveMemoryReport
+ {
+ kMaybeReport,
+ kForceReport
+ };
+
+ static bool SaveMemoryReportNearOOM(ShouldSaveMemoryReport aShouldSave);
+#endif
+
+private:
+ void DoMainThreadSpecificProcessing(bool aReallyWait);
+
+ void GetIdleEvent(nsIRunnable** aEvent, mozilla::MutexAutoLock& aProofOfLock);
+ void GetEvent(bool aWait, nsIRunnable** aEvent,
+ mozilla::MutexAutoLock& aProofOfLock);
+
+protected:
+ class nsChainedEventQueue;
+
+ class nsNestedEventTarget;
+ friend class nsNestedEventTarget;
+
+ friend class nsThreadShutdownEvent;
+
+ virtual ~nsThread();
+
+ bool ShuttingDown()
+ {
+ return mShutdownContext != nullptr;
+ }
+
+ static void ThreadFunc(void* aArg);
+
+ // Helper
+ already_AddRefed<nsIThreadObserver> GetObserver()
+ {
+ nsIThreadObserver* obs;
+ nsThread::GetObserver(&obs);
+ return already_AddRefed<nsIThreadObserver>(obs);
+ }
+
+ // Wrappers for event queue methods:
+ nsresult PutEvent(nsIRunnable* aEvent, nsNestedEventTarget* aTarget);
+ nsresult PutEvent(already_AddRefed<nsIRunnable> aEvent,
+ nsNestedEventTarget* aTarget);
+
+ nsresult DispatchInternal(already_AddRefed<nsIRunnable> aEvent,
+ uint32_t aFlags, nsNestedEventTarget* aTarget);
+
+ struct nsThreadShutdownContext* ShutdownInternal(bool aSync);
+
+ // Wrapper for nsEventQueue that supports chaining.
+ class nsChainedEventQueue
+ {
+ public:
+ explicit nsChainedEventQueue(mozilla::Mutex& aLock)
+ : mNext(nullptr)
+ , mEventsAvailable(aLock, "[nsChainedEventQueue.mEventsAvailable]")
+ , mProcessSecondaryQueueRunnable(false)
+ {
+ mNormalQueue =
+ mozilla::MakeUnique<nsEventQueue>(mEventsAvailable,
+ nsEventQueue::eSharedCondVarQueue);
+ // Both queues need to use the same CondVar!
+ mSecondaryQueue =
+ mozilla::MakeUnique<nsEventQueue>(mEventsAvailable,
+ nsEventQueue::eSharedCondVarQueue);
+ }
+
+ bool GetEvent(bool aMayWait, nsIRunnable** aEvent,
+ mozilla::MutexAutoLock& aProofOfLock);
+
+ void PutEvent(nsIRunnable* aEvent, mozilla::MutexAutoLock& aProofOfLock)
+ {
+ RefPtr<nsIRunnable> event(aEvent);
+ PutEvent(event.forget(), aProofOfLock);
+ }
+
+ void PutEvent(already_AddRefed<nsIRunnable> aEvent,
+ mozilla::MutexAutoLock& aProofOfLock)
+ {
+ RefPtr<nsIRunnable> event(aEvent);
+ nsCOMPtr<nsIRunnablePriority> runnablePrio =
+ do_QueryInterface(event);
+ uint32_t prio = nsIRunnablePriority::PRIORITY_NORMAL;
+ if (runnablePrio) {
+ runnablePrio->GetPriority(&prio);
+ }
+ MOZ_ASSERT(prio == nsIRunnablePriority::PRIORITY_NORMAL ||
+ prio == nsIRunnablePriority::PRIORITY_HIGH);
+ if (prio == nsIRunnablePriority::PRIORITY_NORMAL) {
+ mNormalQueue->PutEvent(event.forget(), aProofOfLock);
+ } else {
+ mSecondaryQueue->PutEvent(event.forget(), aProofOfLock);
+ }
+ }
+
+ bool HasPendingEvent(mozilla::MutexAutoLock& aProofOfLock)
+ {
+ return mNormalQueue->HasPendingEvent(aProofOfLock) ||
+ mSecondaryQueue->HasPendingEvent(aProofOfLock);
+ }
+
+ nsChainedEventQueue* mNext;
+ RefPtr<nsNestedEventTarget> mEventTarget;
+
+ private:
+ mozilla::CondVar mEventsAvailable;
+ mozilla::UniquePtr<nsEventQueue> mNormalQueue;
+ mozilla::UniquePtr<nsEventQueue> mSecondaryQueue;
+
+ // Try to process one high priority runnable after each normal
+ // priority runnable. This gives the processing model HTML spec has for
+ // 'Update the rendering' in the case only vsync messages are in the
+ // secondary queue and prevents starving the normal queue.
+ bool mProcessSecondaryQueueRunnable;
+ };
+
+ class nsNestedEventTarget final : public nsIEventTarget
+ {
+ public:
+ NS_DECL_THREADSAFE_ISUPPORTS
+ NS_DECL_NSIEVENTTARGET
+
+ nsNestedEventTarget(NotNull<nsThread*> aThread,
+ NotNull<nsChainedEventQueue*> aQueue)
+ : mThread(aThread)
+ , mQueue(aQueue)
+
+
+
+ {
+ }
+
+ NotNull<RefPtr<nsThread>> mThread;
+
+ // This is protected by mThread->mLock.
+ nsChainedEventQueue* mQueue;
+
+ private:
+ ~nsNestedEventTarget()
+ {
+ }
+ };
+
+ // This lock protects access to mObserver, mEvents, mIdleEvents,
+ // mIdlePeriod and mEventsAreDoomed. All of those fields are only
+ // modified on the thread itself (never from another thread). This
+ // means that we can avoid holding the lock while using mObserver
+ // and mEvents on the thread itself. When calling PutEvent on
+ // mEvents, we have to hold the lock to synchronize with
+ // PopEventQueue.
+ mozilla::Mutex mLock;
+
+ nsCOMPtr<nsIThreadObserver> mObserver;
+ mozilla::CycleCollectedJSContext* mScriptObserver;
+
+ // Only accessed on the target thread.
+ nsAutoTObserverArray<NotNull<nsCOMPtr<nsIThreadObserver>>, 2> mEventObservers;
+
+ NotNull<nsChainedEventQueue*> mEvents; // never null
+ nsChainedEventQueue mEventsRoot;
+
+ // mIdlePeriod keeps track of the current idle period. If at any
+ // time the main event queue is empty, calling
+ // mIdlePeriod->GetIdlePeriodHint() will give an estimate of when
+ // the current idle period will end.
+ nsCOMPtr<nsIIdlePeriod> mIdlePeriod;
+ mozilla::CondVar mIdleEventsAvailable;
+ nsEventQueue mIdleEvents;
+
+ int32_t mPriority;
+ PRThread* mThread;
+ uint32_t mNestedEventLoopDepth;
+ uint32_t mStackSize;
+
+ // The shutdown context for ourselves.
+ struct nsThreadShutdownContext* mShutdownContext;
+ // The shutdown contexts for any other threads we've asked to shut down.
+ nsTArray<nsAutoPtr<struct nsThreadShutdownContext>> mRequestedShutdownContexts;
+
+ bool mShutdownRequired;
+ // Set to true when events posted to this thread will never run.
+ bool mEventsAreDoomed;
+ MainThreadFlag mIsMainThread;
+
+ // Set to true if this thread creates a JSRuntime.
+ bool mCanInvokeJS;
+};
+
+#if defined(XP_UNIX) && !defined(ANDROID) && !defined(DEBUG) && HAVE_UALARM \
+ && defined(_GNU_SOURCE)
+# define MOZ_CANARY
+
+extern int sCanaryOutputFD;
+#endif
+
+#endif // nsThread_h__
diff --git a/xpcom/threads/nsThreadManager.cpp b/xpcom/threads/nsThreadManager.cpp
new file mode 100644
index 000000000..d1eb84b8f
--- /dev/null
+++ b/xpcom/threads/nsThreadManager.cpp
@@ -0,0 +1,342 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsThreadManager.h"
+#include "nsThread.h"
+#include "nsThreadUtils.h"
+#include "nsIClassInfoImpl.h"
+#include "nsTArray.h"
+#include "nsAutoPtr.h"
+#include "mozilla/ThreadLocal.h"
+#ifdef MOZ_CANARY
+#include <fcntl.h>
+#include <unistd.h>
+#endif
+
+#include "MainThreadIdlePeriod.h"
+
+using namespace mozilla;
+
+static MOZ_THREAD_LOCAL(bool) sTLSIsMainThread;
+
+bool
+NS_IsMainThread()
+{
+ return sTLSIsMainThread.get();
+}
+
+void
+NS_SetMainThread()
+{
+ if (!sTLSIsMainThread.init()) {
+ MOZ_CRASH();
+ }
+ sTLSIsMainThread.set(true);
+ MOZ_ASSERT(NS_IsMainThread());
+}
+
+typedef nsTArray<NotNull<RefPtr<nsThread>>> nsThreadArray;
+
+//-----------------------------------------------------------------------------
+
+static void
+ReleaseObject(void* aData)
+{
+ static_cast<nsISupports*>(aData)->Release();
+}
+
+// statically allocated instance
+NS_IMETHODIMP_(MozExternalRefCountType)
+nsThreadManager::AddRef()
+{
+ return 2;
+}
+NS_IMETHODIMP_(MozExternalRefCountType)
+nsThreadManager::Release()
+{
+ return 1;
+}
+NS_IMPL_CLASSINFO(nsThreadManager, nullptr,
+ nsIClassInfo::THREADSAFE | nsIClassInfo::SINGLETON,
+ NS_THREADMANAGER_CID)
+NS_IMPL_QUERY_INTERFACE_CI(nsThreadManager, nsIThreadManager)
+NS_IMPL_CI_INTERFACE_GETTER(nsThreadManager, nsIThreadManager)
+
+//-----------------------------------------------------------------------------
+
+nsresult
+nsThreadManager::Init()
+{
+ // Child processes need to initialize the thread manager before they
+ // initialize XPCOM in order to set up the crash reporter. This leads to
+ // situations where we get initialized twice.
+ if (mInitialized) {
+ return NS_OK;
+ }
+
+ if (PR_NewThreadPrivateIndex(&mCurThreadIndex, ReleaseObject) == PR_FAILURE) {
+ return NS_ERROR_FAILURE;
+ }
+
+
+#ifdef MOZ_CANARY
+ const int flags = O_WRONLY | O_APPEND | O_CREAT | O_NONBLOCK;
+ const mode_t mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH;
+ char* env_var_flag = getenv("MOZ_KILL_CANARIES");
+ sCanaryOutputFD =
+ env_var_flag ? (env_var_flag[0] ? open(env_var_flag, flags, mode) :
+ STDERR_FILENO) :
+ 0;
+#endif
+
+ // Setup "main" thread
+ mMainThread = new nsThread(nsThread::MAIN_THREAD, 0);
+
+ nsresult rv = mMainThread->InitCurrentThread();
+ if (NS_FAILED(rv)) {
+ mMainThread = nullptr;
+ return rv;
+ }
+
+ {
+ nsCOMPtr<nsIIdlePeriod> idlePeriod = new MainThreadIdlePeriod();
+ mMainThread->RegisterIdlePeriod(idlePeriod.forget());
+ }
+
+ // We need to keep a pointer to the current thread, so we can satisfy
+ // GetIsMainThread calls that occur post-Shutdown.
+ mMainThread->GetPRThread(&mMainPRThread);
+
+ mInitialized = true;
+ return NS_OK;
+}
+
+void
+nsThreadManager::Shutdown()
+{
+ MOZ_ASSERT(NS_IsMainThread(), "shutdown not called from main thread");
+
+ // Prevent further access to the thread manager (no more new threads!)
+ //
+ // What happens if shutdown happens before NewThread completes?
+ // We Shutdown() the new thread, and return error if we've started Shutdown
+ // between when NewThread started, and when the thread finished initializing
+ // and registering with ThreadManager.
+ //
+ mInitialized = false;
+
+ // Empty the main thread event queue before we begin shutting down threads.
+ NS_ProcessPendingEvents(mMainThread);
+
+ // We gather the threads from the hashtable into a list, so that we avoid
+ // holding the hashtable lock while calling nsIThread::Shutdown.
+ nsThreadArray threads;
+ {
+ OffTheBooksMutexAutoLock lock(mLock);
+ for (auto iter = mThreadsByPRThread.Iter(); !iter.Done(); iter.Next()) {
+ RefPtr<nsThread>& thread = iter.Data();
+ threads.AppendElement(WrapNotNull(thread));
+ iter.Remove();
+ }
+ }
+
+ // It's tempting to walk the list of threads here and tell them each to stop
+ // accepting new events, but that could lead to badness if one of those
+ // threads is stuck waiting for a response from another thread. To do it
+ // right, we'd need some way to interrupt the threads.
+ //
+ // Instead, we process events on the current thread while waiting for threads
+ // to shutdown. This means that we have to preserve a mostly functioning
+ // world until such time as the threads exit.
+
+ // Shutdown all threads that require it (join with threads that we created).
+ for (uint32_t i = 0; i < threads.Length(); ++i) {
+ NotNull<nsThread*> thread = threads[i];
+ if (thread->ShutdownRequired()) {
+ thread->Shutdown();
+ }
+ }
+
+ // NB: It's possible that there are events in the queue that want to *start*
+ // an asynchronous shutdown. But we have already shutdown the threads above,
+ // so there's no need to worry about them. We only have to wait for all
+ // in-flight asynchronous thread shutdowns to complete.
+ mMainThread->WaitForAllAsynchronousShutdowns();
+
+ // In case there are any more events somehow...
+ NS_ProcessPendingEvents(mMainThread);
+
+ // There are no more background threads at this point.
+
+ // Clear the table of threads.
+ {
+ OffTheBooksMutexAutoLock lock(mLock);
+ mThreadsByPRThread.Clear();
+ }
+
+ // Normally thread shutdown clears the observer for the thread, but since the
+ // main thread is special we do it manually here after we're sure all events
+ // have been processed.
+ mMainThread->SetObserver(nullptr);
+ mMainThread->ClearObservers();
+
+ // Release main thread object.
+ mMainThread = nullptr;
+
+ // Remove the TLS entry for the main thread.
+ PR_SetThreadPrivate(mCurThreadIndex, nullptr);
+}
+
+void
+nsThreadManager::RegisterCurrentThread(nsThread& aThread)
+{
+ MOZ_ASSERT(aThread.GetPRThread() == PR_GetCurrentThread(), "bad aThread");
+
+ OffTheBooksMutexAutoLock lock(mLock);
+
+ ++mCurrentNumberOfThreads;
+ if (mCurrentNumberOfThreads > mHighestNumberOfThreads) {
+ mHighestNumberOfThreads = mCurrentNumberOfThreads;
+ }
+
+ mThreadsByPRThread.Put(aThread.GetPRThread(), &aThread); // XXX check OOM?
+
+ aThread.AddRef(); // for TLS entry
+ PR_SetThreadPrivate(mCurThreadIndex, &aThread);
+}
+
+void
+nsThreadManager::UnregisterCurrentThread(nsThread& aThread)
+{
+ MOZ_ASSERT(aThread.GetPRThread() == PR_GetCurrentThread(), "bad aThread");
+
+ OffTheBooksMutexAutoLock lock(mLock);
+
+ --mCurrentNumberOfThreads;
+ mThreadsByPRThread.Remove(aThread.GetPRThread());
+
+ PR_SetThreadPrivate(mCurThreadIndex, nullptr);
+ // Ref-count balanced via ReleaseObject
+}
+
+nsThread*
+nsThreadManager::GetCurrentThread()
+{
+ // read thread local storage
+ void* data = PR_GetThreadPrivate(mCurThreadIndex);
+ if (data) {
+ return static_cast<nsThread*>(data);
+ }
+
+ if (!mInitialized) {
+ return nullptr;
+ }
+
+ // OK, that's fine. We'll dynamically create one :-)
+ RefPtr<nsThread> thread = new nsThread(nsThread::NOT_MAIN_THREAD, 0);
+ if (!thread || NS_FAILED(thread->InitCurrentThread())) {
+ return nullptr;
+ }
+
+ return thread.get(); // reference held in TLS
+}
+
+NS_IMETHODIMP
+nsThreadManager::NewThread(uint32_t aCreationFlags,
+ uint32_t aStackSize,
+ nsIThread** aResult)
+{
+ // Note: can be called from arbitrary threads
+
+ // No new threads during Shutdown
+ if (NS_WARN_IF(!mInitialized)) {
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+
+ RefPtr<nsThread> thr = new nsThread(nsThread::NOT_MAIN_THREAD, aStackSize);
+ nsresult rv = thr->Init(); // Note: blocks until the new thread has been set up
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+
+ // At this point, we expect that the thread has been registered in mThreadByPRThread;
+ // however, it is possible that it could have also been replaced by now, so
+ // we cannot really assert that it was added. Instead, kill it if we entered
+ // Shutdown() during/before Init()
+
+ if (NS_WARN_IF(!mInitialized)) {
+ if (thr->ShutdownRequired()) {
+ thr->Shutdown(); // ok if it happens multiple times
+ }
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+
+ thr.forget(aResult);
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThreadManager::GetThreadFromPRThread(PRThread* aThread, nsIThread** aResult)
+{
+ // Keep this functioning during Shutdown
+ if (NS_WARN_IF(!mMainThread)) {
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+ if (NS_WARN_IF(!aThread)) {
+ return NS_ERROR_INVALID_ARG;
+ }
+
+ RefPtr<nsThread> temp;
+ {
+ OffTheBooksMutexAutoLock lock(mLock);
+ mThreadsByPRThread.Get(aThread, getter_AddRefs(temp));
+ }
+
+ NS_IF_ADDREF(*aResult = temp);
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThreadManager::GetMainThread(nsIThread** aResult)
+{
+ // Keep this functioning during Shutdown
+ if (NS_WARN_IF(!mMainThread)) {
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+ NS_ADDREF(*aResult = mMainThread);
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThreadManager::GetCurrentThread(nsIThread** aResult)
+{
+ // Keep this functioning during Shutdown
+ if (NS_WARN_IF(!mMainThread)) {
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+ *aResult = GetCurrentThread();
+ if (!*aResult) {
+ return NS_ERROR_OUT_OF_MEMORY;
+ }
+ NS_ADDREF(*aResult);
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThreadManager::GetIsMainThread(bool* aResult)
+{
+ // This method may be called post-Shutdown
+
+ *aResult = (PR_GetCurrentThread() == mMainPRThread);
+ return NS_OK;
+}
+
+uint32_t
+nsThreadManager::GetHighestNumberOfThreads()
+{
+ OffTheBooksMutexAutoLock lock(mLock);
+ return mHighestNumberOfThreads;
+}
diff --git a/xpcom/threads/nsThreadManager.h b/xpcom/threads/nsThreadManager.h
new file mode 100644
index 000000000..64ccc9bc9
--- /dev/null
+++ b/xpcom/threads/nsThreadManager.h
@@ -0,0 +1,89 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef nsThreadManager_h__
+#define nsThreadManager_h__
+
+#include "mozilla/Mutex.h"
+#include "nsIThreadManager.h"
+#include "nsRefPtrHashtable.h"
+#include "nsThread.h"
+
+class nsIRunnable;
+
+class nsThreadManager : public nsIThreadManager
+{
+public:
+ NS_DECL_ISUPPORTS
+ NS_DECL_NSITHREADMANAGER
+
+ static nsThreadManager& get()
+ {
+ static nsThreadManager sInstance;
+ return sInstance;
+ }
+
+ nsresult Init();
+
+ // Shutdown all threads. This function should only be called on the main
+ // thread of the application process.
+ void Shutdown();
+
+ // Called by nsThread to inform the ThreadManager it exists. This method
+ // must be called when the given thread is the current thread.
+ void RegisterCurrentThread(nsThread& aThread);
+
+ // Called by nsThread to inform the ThreadManager it is going away. This
+ // method must be called when the given thread is the current thread.
+ void UnregisterCurrentThread(nsThread& aThread);
+
+ // Returns the current thread. Returns null if OOM or if ThreadManager isn't
+ // initialized.
+ nsThread* GetCurrentThread();
+
+ // Returns the maximal number of threads that have been in existence
+ // simultaneously during the execution of the thread manager.
+ uint32_t GetHighestNumberOfThreads();
+
+ // This needs to be public in order to support static instantiation of this
+ // class with older compilers (e.g., egcs-2.91.66).
+ ~nsThreadManager()
+ {
+ }
+
+private:
+ nsThreadManager()
+ : mCurThreadIndex(0)
+ , mMainPRThread(nullptr)
+ , mLock("nsThreadManager.mLock")
+ , mInitialized(false)
+ , mCurrentNumberOfThreads(1)
+ , mHighestNumberOfThreads(1)
+ {
+ }
+
+ nsRefPtrHashtable<nsPtrHashKey<PRThread>, nsThread> mThreadsByPRThread;
+ unsigned mCurThreadIndex; // thread-local-storage index
+ RefPtr<nsThread> mMainThread;
+ PRThread* mMainPRThread;
+ mozilla::OffTheBooksMutex mLock; // protects tables
+ mozilla::Atomic<bool> mInitialized;
+
+ // The current number of threads
+ uint32_t mCurrentNumberOfThreads;
+ // The highest number of threads encountered so far during the session
+ uint32_t mHighestNumberOfThreads;
+};
+
+#define NS_THREADMANAGER_CID \
+{ /* 7a4204c6-e45a-4c37-8ebb-6709a22c917c */ \
+ 0x7a4204c6, \
+ 0xe45a, \
+ 0x4c37, \
+ {0x8e, 0xbb, 0x67, 0x09, 0xa2, 0x2c, 0x91, 0x7c} \
+}
+
+#endif // nsThreadManager_h__
diff --git a/xpcom/threads/nsThreadPool.cpp b/xpcom/threads/nsThreadPool.cpp
new file mode 100644
index 000000000..241fad39d
--- /dev/null
+++ b/xpcom/threads/nsThreadPool.cpp
@@ -0,0 +1,449 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsIClassInfoImpl.h"
+#include "nsThreadPool.h"
+#include "nsThreadManager.h"
+#include "nsThread.h"
+#include "nsMemory.h"
+#include "nsAutoPtr.h"
+#include "prinrval.h"
+#include "mozilla/Logging.h"
+#include "nsThreadSyncDispatch.h"
+
+using namespace mozilla;
+
+static LazyLogModule sThreadPoolLog("nsThreadPool");
+#ifdef LOG
+#undef LOG
+#endif
+#define LOG(args) MOZ_LOG(sThreadPoolLog, mozilla::LogLevel::Debug, args)
+
+// DESIGN:
+// o Allocate anonymous threads.
+// o Use nsThreadPool::Run as the main routine for each thread.
+// o Each thread waits on the event queue's monitor, checking for
+// pending events and rescheduling itself as an idle thread.
+
+#define DEFAULT_THREAD_LIMIT 4
+#define DEFAULT_IDLE_THREAD_LIMIT 1
+#define DEFAULT_IDLE_THREAD_TIMEOUT PR_SecondsToInterval(60)
+
+NS_IMPL_ADDREF(nsThreadPool)
+NS_IMPL_RELEASE(nsThreadPool)
+NS_IMPL_CLASSINFO(nsThreadPool, nullptr, nsIClassInfo::THREADSAFE,
+ NS_THREADPOOL_CID)
+NS_IMPL_QUERY_INTERFACE_CI(nsThreadPool, nsIThreadPool, nsIEventTarget,
+ nsIRunnable)
+NS_IMPL_CI_INTERFACE_GETTER(nsThreadPool, nsIThreadPool, nsIEventTarget)
+
+nsThreadPool::nsThreadPool()
+ : mMutex("[nsThreadPool.mMutex]")
+ , mEventsAvailable(mMutex, "[nsThreadPool.mEventsAvailable]")
+ , mEvents(mEventsAvailable, nsEventQueue::eNormalQueue)
+ , mThreadLimit(DEFAULT_THREAD_LIMIT)
+ , mIdleThreadLimit(DEFAULT_IDLE_THREAD_LIMIT)
+ , mIdleThreadTimeout(DEFAULT_IDLE_THREAD_TIMEOUT)
+ , mIdleCount(0)
+ , mStackSize(nsIThreadManager::DEFAULT_STACK_SIZE)
+ , mShutdown(false)
+{
+ LOG(("THRD-P(%p) constructor!!!\n", this));
+}
+
+nsThreadPool::~nsThreadPool()
+{
+ // Threads keep a reference to the nsThreadPool until they return from Run()
+ // after removing themselves from mThreads.
+ MOZ_ASSERT(mThreads.IsEmpty());
+}
+
+nsresult
+nsThreadPool::PutEvent(nsIRunnable* aEvent)
+{
+ nsCOMPtr<nsIRunnable> event(aEvent);
+ return PutEvent(event.forget(), 0);
+}
+
+nsresult
+nsThreadPool::PutEvent(already_AddRefed<nsIRunnable> aEvent, uint32_t aFlags)
+{
+ // Avoid spawning a new thread while holding the event queue lock...
+
+ bool spawnThread = false;
+ uint32_t stackSize = 0;
+ {
+ MutexAutoLock lock(mMutex);
+
+ if (NS_WARN_IF(mShutdown)) {
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+ LOG(("THRD-P(%p) put [%d %d %d]\n", this, mIdleCount, mThreads.Count(),
+ mThreadLimit));
+ MOZ_ASSERT(mIdleCount <= (uint32_t)mThreads.Count(), "oops");
+
+ // Make sure we have a thread to service this event.
+ if (mThreads.Count() < (int32_t)mThreadLimit &&
+ !(aFlags & NS_DISPATCH_AT_END) &&
+ // Spawn a new thread if we don't have enough idle threads to serve
+ // pending events immediately.
+ mEvents.Count(lock) >= mIdleCount) {
+ spawnThread = true;
+ }
+
+ mEvents.PutEvent(Move(aEvent), lock);
+ stackSize = mStackSize;
+ }
+
+ LOG(("THRD-P(%p) put [spawn=%d]\n", this, spawnThread));
+ if (!spawnThread) {
+ return NS_OK;
+ }
+
+ nsCOMPtr<nsIThread> thread;
+ nsThreadManager::get().NewThread(0, stackSize, getter_AddRefs(thread));
+ if (NS_WARN_IF(!thread)) {
+ return NS_ERROR_UNEXPECTED;
+ }
+
+ bool killThread = false;
+ {
+ MutexAutoLock lock(mMutex);
+ if (mThreads.Count() < (int32_t)mThreadLimit) {
+ mThreads.AppendObject(thread);
+ } else {
+ killThread = true; // okay, we don't need this thread anymore
+ }
+ }
+ LOG(("THRD-P(%p) put [%p kill=%d]\n", this, thread.get(), killThread));
+ if (killThread) {
+ // We never dispatched any events to the thread, so we can shut it down
+ // asynchronously without worrying about anything.
+ ShutdownThread(thread);
+ } else {
+ thread->Dispatch(this, NS_DISPATCH_NORMAL);
+ }
+
+ return NS_OK;
+}
+
+void
+nsThreadPool::ShutdownThread(nsIThread* aThread)
+{
+ LOG(("THRD-P(%p) shutdown async [%p]\n", this, aThread));
+
+ // This is either called by a threadpool thread that is out of work, or
+ // a thread that attempted to create a threadpool thread and raced in
+ // such a way that the newly created thread is no longer necessary.
+ // In the first case, we must go to another thread to shut aThread down
+ // (because it is the current thread). In the second case, we cannot
+ // synchronously shut down the current thread (because then Dispatch() would
+ // spin the event loop, and that could blow up the world), and asynchronous
+ // shutdown requires this thread have an event loop (and it may not, see bug
+ // 10204784). The simplest way to cover all cases is to asynchronously
+ // shutdown aThread from the main thread.
+ NS_DispatchToMainThread(NewRunnableMethod(aThread,
+ &nsIThread::AsyncShutdown));
+}
+
+NS_IMETHODIMP
+nsThreadPool::Run()
+{
+ mThreadNaming.SetThreadPoolName(mName);
+
+ LOG(("THRD-P(%p) enter %s\n", this, mName.BeginReading()));
+
+ nsCOMPtr<nsIThread> current;
+ nsThreadManager::get().GetCurrentThread(getter_AddRefs(current));
+
+ bool shutdownThreadOnExit = false;
+ bool exitThread = false;
+ bool wasIdle = false;
+ PRIntervalTime idleSince;
+
+ nsCOMPtr<nsIThreadPoolListener> listener;
+ {
+ MutexAutoLock lock(mMutex);
+ listener = mListener;
+ }
+
+ if (listener) {
+ listener->OnThreadCreated();
+ }
+
+ do {
+ nsCOMPtr<nsIRunnable> event;
+ {
+ MutexAutoLock lock(mMutex);
+
+ if (!mEvents.GetPendingEvent(getter_AddRefs(event), lock)) {
+ PRIntervalTime now = PR_IntervalNow();
+ PRIntervalTime timeout = PR_MillisecondsToInterval(mIdleThreadTimeout);
+
+ // If we are shutting down, then don't keep any idle threads
+ if (mShutdown) {
+ exitThread = true;
+ } else {
+ if (wasIdle) {
+ // if too many idle threads or idle for too long, then bail.
+ if (mIdleCount > mIdleThreadLimit ||
+ (mIdleThreadTimeout != UINT32_MAX && (now - idleSince) >= timeout)) {
+ exitThread = true;
+ }
+ } else {
+ // if would be too many idle threads...
+ if (mIdleCount == mIdleThreadLimit) {
+ exitThread = true;
+ } else {
+ ++mIdleCount;
+ idleSince = now;
+ wasIdle = true;
+ }
+ }
+ }
+
+ if (exitThread) {
+ if (wasIdle) {
+ --mIdleCount;
+ }
+ shutdownThreadOnExit = mThreads.RemoveObject(current);
+ } else {
+ PRIntervalTime delta = timeout - (now - idleSince);
+ LOG(("THRD-P(%p) %s waiting [%d]\n", this, mName.BeginReading(), delta));
+ mEvents.Wait(delta);
+ LOG(("THRD-P(%p) done waiting\n", this));
+ }
+ } else if (wasIdle) {
+ wasIdle = false;
+ --mIdleCount;
+ }
+ }
+ if (event) {
+ LOG(("THRD-P(%p) %s running [%p]\n", this, mName.BeginReading(), event.get()));
+ event->Run();
+ }
+ } while (!exitThread);
+
+ if (listener) {
+ listener->OnThreadShuttingDown();
+ }
+
+ if (shutdownThreadOnExit) {
+ ShutdownThread(current);
+ }
+
+ LOG(("THRD-P(%p) leave\n", this));
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThreadPool::DispatchFromScript(nsIRunnable* aEvent, uint32_t aFlags)
+{
+ nsCOMPtr<nsIRunnable> event(aEvent);
+ return Dispatch(event.forget(), aFlags);
+}
+
+NS_IMETHODIMP
+nsThreadPool::Dispatch(already_AddRefed<nsIRunnable> aEvent, uint32_t aFlags)
+{
+ LOG(("THRD-P(%p) dispatch [%p %x]\n", this, /* XXX aEvent*/ nullptr, aFlags));
+
+ if (NS_WARN_IF(mShutdown)) {
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ if (aFlags & DISPATCH_SYNC) {
+ nsCOMPtr<nsIThread> thread;
+ nsThreadManager::get().GetCurrentThread(getter_AddRefs(thread));
+ if (NS_WARN_IF(!thread)) {
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ RefPtr<nsThreadSyncDispatch> wrapper =
+ new nsThreadSyncDispatch(thread, Move(aEvent));
+ PutEvent(wrapper);
+
+ while (wrapper->IsPending()) {
+ NS_ProcessNextEvent(thread);
+ }
+ } else {
+ NS_ASSERTION(aFlags == NS_DISPATCH_NORMAL ||
+ aFlags == NS_DISPATCH_AT_END, "unexpected dispatch flags");
+ PutEvent(Move(aEvent), aFlags);
+ }
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThreadPool::DelayedDispatch(already_AddRefed<nsIRunnable>, uint32_t)
+{
+ return NS_ERROR_NOT_IMPLEMENTED;
+}
+
+NS_IMETHODIMP
+nsThreadPool::IsOnCurrentThread(bool* aResult)
+{
+ MutexAutoLock lock(mMutex);
+ if (NS_WARN_IF(mShutdown)) {
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ nsIThread* thread = NS_GetCurrentThread();
+ for (uint32_t i = 0; i < static_cast<uint32_t>(mThreads.Count()); ++i) {
+ if (mThreads[i] == thread) {
+ *aResult = true;
+ return NS_OK;
+ }
+ }
+ *aResult = false;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThreadPool::Shutdown()
+{
+ nsCOMArray<nsIThread> threads;
+ nsCOMPtr<nsIThreadPoolListener> listener;
+ {
+ MutexAutoLock lock(mMutex);
+ mShutdown = true;
+ mEvents.NotifyAll();
+
+ threads.AppendObjects(mThreads);
+ mThreads.Clear();
+
+ // Swap in a null listener so that we release the listener at the end of
+ // this method. The listener will be kept alive as long as the other threads
+ // that were created when it was set.
+ mListener.swap(listener);
+ }
+
+ // It's important that we shutdown the threads while outside the event queue
+ // monitor. Otherwise, we could end up dead-locking.
+
+ for (int32_t i = 0; i < threads.Count(); ++i) {
+ threads[i]->Shutdown();
+ }
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThreadPool::GetThreadLimit(uint32_t* aValue)
+{
+ *aValue = mThreadLimit;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThreadPool::SetThreadLimit(uint32_t aValue)
+{
+ MutexAutoLock lock(mMutex);
+ LOG(("THRD-P(%p) thread limit [%u]\n", this, aValue));
+ mThreadLimit = aValue;
+ if (mIdleThreadLimit > mThreadLimit) {
+ mIdleThreadLimit = mThreadLimit;
+ }
+
+ if (static_cast<uint32_t>(mThreads.Count()) > mThreadLimit) {
+ mEvents.NotifyAll(); // wake up threads so they observe this change
+ }
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThreadPool::GetIdleThreadLimit(uint32_t* aValue)
+{
+ *aValue = mIdleThreadLimit;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThreadPool::SetIdleThreadLimit(uint32_t aValue)
+{
+ MutexAutoLock lock(mMutex);
+ LOG(("THRD-P(%p) idle thread limit [%u]\n", this, aValue));
+ mIdleThreadLimit = aValue;
+ if (mIdleThreadLimit > mThreadLimit) {
+ mIdleThreadLimit = mThreadLimit;
+ }
+
+ // Do we need to kill some idle threads?
+ if (mIdleCount > mIdleThreadLimit) {
+ mEvents.NotifyAll(); // wake up threads so they observe this change
+ }
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThreadPool::GetIdleThreadTimeout(uint32_t* aValue)
+{
+ *aValue = mIdleThreadTimeout;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThreadPool::SetIdleThreadTimeout(uint32_t aValue)
+{
+ MutexAutoLock lock(mMutex);
+ uint32_t oldTimeout = mIdleThreadTimeout;
+ mIdleThreadTimeout = aValue;
+
+ // Do we need to notify any idle threads that their sleep time has shortened?
+ if (mIdleThreadTimeout < oldTimeout && mIdleCount > 0) {
+ mEvents.NotifyAll(); // wake up threads so they observe this change
+ }
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThreadPool::GetThreadStackSize(uint32_t* aValue)
+{
+ MutexAutoLock lock(mMutex);
+ *aValue = mStackSize;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThreadPool::SetThreadStackSize(uint32_t aValue)
+{
+ MutexAutoLock lock(mMutex);
+ mStackSize = aValue;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThreadPool::GetListener(nsIThreadPoolListener** aListener)
+{
+ MutexAutoLock lock(mMutex);
+ NS_IF_ADDREF(*aListener = mListener);
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThreadPool::SetListener(nsIThreadPoolListener* aListener)
+{
+ nsCOMPtr<nsIThreadPoolListener> swappedListener(aListener);
+ {
+ MutexAutoLock lock(mMutex);
+ mListener.swap(swappedListener);
+ }
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsThreadPool::SetName(const nsACString& aName)
+{
+ {
+ MutexAutoLock lock(mMutex);
+ if (mThreads.Count()) {
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+ }
+
+ mName = aName;
+ return NS_OK;
+}
diff --git a/xpcom/threads/nsThreadPool.h b/xpcom/threads/nsThreadPool.h
new file mode 100644
index 000000000..47a4bd1ff
--- /dev/null
+++ b/xpcom/threads/nsThreadPool.h
@@ -0,0 +1,65 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef nsThreadPool_h__
+#define nsThreadPool_h__
+
+#include "nsIThreadPool.h"
+#include "nsIThread.h"
+#include "nsIRunnable.h"
+#include "nsEventQueue.h"
+#include "nsCOMArray.h"
+#include "nsCOMPtr.h"
+#include "nsThreadUtils.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/AlreadyAddRefed.h"
+#include "mozilla/Mutex.h"
+#include "mozilla/Monitor.h"
+
+class nsThreadPool final
+ : public nsIThreadPool
+ , public nsIRunnable
+{
+public:
+ NS_DECL_THREADSAFE_ISUPPORTS
+ NS_DECL_NSIEVENTTARGET
+ NS_DECL_NSITHREADPOOL
+ NS_DECL_NSIRUNNABLE
+ using nsIEventTarget::Dispatch;
+
+ nsThreadPool();
+
+private:
+ ~nsThreadPool();
+
+ void ShutdownThread(nsIThread* aThread);
+ nsresult PutEvent(nsIRunnable* aEvent);
+ nsresult PutEvent(already_AddRefed<nsIRunnable> aEvent, uint32_t aFlags);
+
+ nsCOMArray<nsIThread> mThreads;
+ mozilla::Mutex mMutex;
+ mozilla::CondVar mEventsAvailable;
+ nsEventQueue mEvents;
+ uint32_t mThreadLimit;
+ uint32_t mIdleThreadLimit;
+ uint32_t mIdleThreadTimeout;
+ uint32_t mIdleCount;
+ uint32_t mStackSize;
+ nsCOMPtr<nsIThreadPoolListener> mListener;
+ bool mShutdown;
+ nsCString mName;
+ nsThreadPoolNaming mThreadNaming;
+};
+
+#define NS_THREADPOOL_CID \
+{ /* 547ec2a8-315e-4ec4-888e-6e4264fe90eb */ \
+ 0x547ec2a8, \
+ 0x315e, \
+ 0x4ec4, \
+ {0x88, 0x8e, 0x6e, 0x42, 0x64, 0xfe, 0x90, 0xeb} \
+}
+
+#endif // nsThreadPool_h__
diff --git a/xpcom/threads/nsThreadSyncDispatch.h b/xpcom/threads/nsThreadSyncDispatch.h
new file mode 100644
index 000000000..ae5e85464
--- /dev/null
+++ b/xpcom/threads/nsThreadSyncDispatch.h
@@ -0,0 +1,50 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef nsThreadSyncDispatch_h_
+#define nsThreadSyncDispatch_h_
+
+#include "nsThreadUtils.h"
+#include "LeakRefPtr.h"
+#include "mozilla/DebugOnly.h"
+
+class nsThreadSyncDispatch : public mozilla::Runnable
+{
+public:
+ nsThreadSyncDispatch(nsIThread* aOrigin, already_AddRefed<nsIRunnable>&& aTask)
+ : mOrigin(aOrigin)
+ , mSyncTask(mozilla::Move(aTask))
+ {
+ }
+
+ bool IsPending()
+ {
+ return !!mSyncTask;
+ }
+
+private:
+ NS_IMETHOD Run() override
+ {
+ if (nsIRunnable* task = mSyncTask.get()) {
+ mozilla::DebugOnly<nsresult> result = task->Run();
+ MOZ_ASSERT(NS_SUCCEEDED(result),
+ "task in sync dispatch should not fail");
+ // We must release the task here to ensure that when the original
+ // thread is unblocked, this task has been released.
+ mSyncTask.release();
+ // unblock the origin thread
+ mOrigin->Dispatch(this, NS_DISPATCH_NORMAL);
+ }
+ return NS_OK;
+ }
+
+ nsCOMPtr<nsIThread> mOrigin;
+ // The task is leaked by default when Run() is not called, because
+ // otherwise we may release it in an incorrect thread.
+ mozilla::LeakRefPtr<nsIRunnable> mSyncTask;
+};
+
+#endif // nsThreadSyncDispatch_h_
diff --git a/xpcom/threads/nsTimerImpl.cpp b/xpcom/threads/nsTimerImpl.cpp
new file mode 100644
index 000000000..bc2d338e0
--- /dev/null
+++ b/xpcom/threads/nsTimerImpl.cpp
@@ -0,0 +1,658 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsTimerImpl.h"
+#include "TimerThread.h"
+#include "nsAutoPtr.h"
+#include "nsThreadManager.h"
+#include "nsThreadUtils.h"
+#include "pratom.h"
+#include "GeckoProfiler.h"
+#include "mozilla/Atomics.h"
+#include "mozilla/IntegerPrintfMacros.h"
+#include "mozilla/Logging.h"
+#ifdef MOZ_TASK_TRACER
+#include "GeckoTaskTracerImpl.h"
+using namespace mozilla::tasktracer;
+#endif
+
+#ifdef XP_WIN
+#include <process.h>
+#ifndef getpid
+#define getpid _getpid
+#endif
+#else
+#include <unistd.h>
+#endif
+
+using mozilla::Atomic;
+using mozilla::LogLevel;
+using mozilla::TimeDuration;
+using mozilla::TimeStamp;
+
+static TimerThread* gThread = nullptr;
+
+// This module prints info about the precision of timers.
+static mozilla::LazyLogModule sTimerLog("nsTimerImpl");
+
+mozilla::LogModule*
+GetTimerLog()
+{
+ return sTimerLog;
+}
+
+// This module prints info about which timers are firing, which is useful for
+// wakeups for the purposes of power profiling. Set the following environment
+// variable before starting the browser.
+//
+// MOZ_LOG=TimerFirings:4
+//
+// Then a line will be printed for every timer that fires. The name used for a
+// |Callback::Type::Function| timer depends on the circumstances.
+//
+// - If it was explicitly named (e.g. it was initialized with
+// InitWithNamedFuncCallback()) then that explicit name will be shown.
+//
+// - Otherwise, if we are on a platform that supports function name lookup
+// (Mac or Linux) then the looked-up name will be shown with a
+// "[from dladdr]" annotation. On Mac the looked-up name will be immediately
+// useful. On Linux it'll need post-processing with
+// tools/rb/fix_linux_stack.py.
+//
+// - Otherwise, no name will be printed. If many timers hit this case then
+// you'll need to re-run the workload on a Mac to find out which timers they
+// are, and then give them explicit names.
+//
+// If you redirect this output to a file called "out", you can then
+// post-process it with a command something like the following.
+//
+// cat out | grep timer | sort | uniq -c | sort -r -n
+//
+// This will show how often each unique line appears, with the most common ones
+// first.
+//
+// More detailed docs are here:
+// https://developer.mozilla.org/en-US/docs/Mozilla/Performance/TimerFirings_logging
+//
+static mozilla::LazyLogModule sTimerFiringsLog("TimerFirings");
+
+mozilla::LogModule*
+GetTimerFiringsLog()
+{
+ return sTimerFiringsLog;
+}
+
+#include <math.h>
+
+double nsTimerImpl::sDeltaSumSquared = 0;
+double nsTimerImpl::sDeltaSum = 0;
+double nsTimerImpl::sDeltaNum = 0;
+
+static void
+myNS_MeanAndStdDev(double n, double sumOfValues, double sumOfSquaredValues,
+ double* meanResult, double* stdDevResult)
+{
+ double mean = 0.0, var = 0.0, stdDev = 0.0;
+ if (n > 0.0 && sumOfValues >= 0) {
+ mean = sumOfValues / n;
+ double temp = (n * sumOfSquaredValues) - (sumOfValues * sumOfValues);
+ if (temp < 0.0 || n <= 1) {
+ var = 0.0;
+ } else {
+ var = temp / (n * (n - 1));
+ }
+ // for some reason, Windows says sqrt(0.0) is "-1.#J" (?!) so do this:
+ stdDev = var != 0.0 ? sqrt(var) : 0.0;
+ }
+ *meanResult = mean;
+ *stdDevResult = stdDev;
+}
+
+NS_IMPL_QUERY_INTERFACE(nsTimer, nsITimer)
+NS_IMPL_ADDREF(nsTimer)
+
+NS_IMETHODIMP_(MozExternalRefCountType)
+nsTimer::Release(void)
+{
+ nsrefcnt count = --mRefCnt;
+ NS_LOG_RELEASE(this, count, "nsTimer");
+
+ if (count == 1) {
+ // Last ref, held by nsTimerImpl. Make sure the cycle is broken.
+ // If there is a nsTimerEvent in a queue for this timer, the nsTimer will
+ // live until that event pops, otherwise the nsTimerImpl will go away and
+ // the nsTimer along with it.
+ mImpl->Cancel();
+ mImpl = nullptr;
+ } else if (count == 0) {
+ delete this;
+ }
+
+ return count;
+}
+
+nsTimerImpl::nsTimerImpl(nsITimer* aTimer) :
+ mGeneration(0),
+ mDelay(0),
+ mITimer(aTimer),
+ mMutex("nsTimerImpl::mMutex")
+{
+ // XXXbsmedberg: shouldn't this be in Init()?
+ mEventTarget = static_cast<nsIEventTarget*>(NS_GetCurrentThread());
+}
+
+//static
+nsresult
+nsTimerImpl::Startup()
+{
+ nsresult rv;
+
+ gThread = new TimerThread();
+
+ NS_ADDREF(gThread);
+ rv = gThread->InitLocks();
+
+ if (NS_FAILED(rv)) {
+ NS_RELEASE(gThread);
+ }
+
+ return rv;
+}
+
+void
+nsTimerImpl::Shutdown()
+{
+ if (MOZ_LOG_TEST(GetTimerLog(), LogLevel::Debug)) {
+ double mean = 0, stddev = 0;
+ myNS_MeanAndStdDev(sDeltaNum, sDeltaSum, sDeltaSumSquared, &mean, &stddev);
+
+ MOZ_LOG(GetTimerLog(), LogLevel::Debug,
+ ("sDeltaNum = %f, sDeltaSum = %f, sDeltaSumSquared = %f\n",
+ sDeltaNum, sDeltaSum, sDeltaSumSquared));
+ MOZ_LOG(GetTimerLog(), LogLevel::Debug,
+ ("mean: %fms, stddev: %fms\n", mean, stddev));
+ }
+
+ if (!gThread) {
+ return;
+ }
+
+ gThread->Shutdown();
+ NS_RELEASE(gThread);
+}
+
+
+nsresult
+nsTimerImpl::InitCommon(uint32_t aDelay, uint32_t aType)
+{
+ mMutex.AssertCurrentThreadOwns();
+ nsresult rv;
+
+ if (NS_WARN_IF(!gThread)) {
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+ if (!mEventTarget) {
+ NS_ERROR("mEventTarget is NULL");
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+
+ rv = gThread->Init();
+ if (NS_WARN_IF(NS_FAILED(rv))) {
+ return rv;
+ }
+
+ gThread->RemoveTimer(this);
+ ++mGeneration;
+
+ mType = (uint8_t)aType;
+ mDelay = aDelay;
+ mTimeout = TimeStamp::Now() + TimeDuration::FromMilliseconds(mDelay);
+
+ return gThread->AddTimer(this);
+}
+
+nsresult
+nsTimerImpl::InitWithFuncCallbackCommon(nsTimerCallbackFunc aFunc,
+ void* aClosure,
+ uint32_t aDelay,
+ uint32_t aType,
+ Callback::Name aName)
+{
+ if (NS_WARN_IF(!aFunc)) {
+ return NS_ERROR_INVALID_ARG;
+ }
+
+ Callback cb; // Goes out of scope after the unlock, prevents deadlock
+ cb.mType = Callback::Type::Function;
+ cb.mCallback.c = aFunc;
+ cb.mClosure = aClosure;
+ cb.mName = aName;
+
+ MutexAutoLock lock(mMutex);
+ cb.swap(mCallback);
+
+ return InitCommon(aDelay, aType);
+}
+
+NS_IMETHODIMP
+nsTimerImpl::InitWithFuncCallback(nsTimerCallbackFunc aFunc,
+ void* aClosure,
+ uint32_t aDelay,
+ uint32_t aType)
+{
+ Callback::Name name(Callback::Nothing);
+ return InitWithFuncCallbackCommon(aFunc, aClosure, aDelay, aType, name);
+}
+
+NS_IMETHODIMP
+nsTimerImpl::InitWithNamedFuncCallback(nsTimerCallbackFunc aFunc,
+ void* aClosure,
+ uint32_t aDelay,
+ uint32_t aType,
+ const char* aNameString)
+{
+ Callback::Name name(aNameString);
+ return InitWithFuncCallbackCommon(aFunc, aClosure, aDelay, aType, name);
+}
+
+NS_IMETHODIMP
+nsTimerImpl::InitWithNameableFuncCallback(nsTimerCallbackFunc aFunc,
+ void* aClosure,
+ uint32_t aDelay,
+ uint32_t aType,
+ nsTimerNameCallbackFunc aNameFunc)
+{
+ Callback::Name name(aNameFunc);
+ return InitWithFuncCallbackCommon(aFunc, aClosure, aDelay, aType, name);
+}
+
+NS_IMETHODIMP
+nsTimerImpl::InitWithCallback(nsITimerCallback* aCallback,
+ uint32_t aDelay,
+ uint32_t aType)
+{
+ if (NS_WARN_IF(!aCallback)) {
+ return NS_ERROR_INVALID_ARG;
+ }
+
+ Callback cb; // Goes out of scope after the unlock, prevents deadlock
+ cb.mType = Callback::Type::Interface;
+ cb.mCallback.i = aCallback;
+ NS_ADDREF(cb.mCallback.i);
+
+ MutexAutoLock lock(mMutex);
+ cb.swap(mCallback);
+
+ return InitCommon(aDelay, aType);
+}
+
+NS_IMETHODIMP
+nsTimerImpl::Init(nsIObserver* aObserver, uint32_t aDelay, uint32_t aType)
+{
+ if (NS_WARN_IF(!aObserver)) {
+ return NS_ERROR_INVALID_ARG;
+ }
+
+ Callback cb; // Goes out of scope after the unlock, prevents deadlock
+ cb.mType = Callback::Type::Observer;
+ cb.mCallback.o = aObserver;
+ NS_ADDREF(cb.mCallback.o);
+
+ MutexAutoLock lock(mMutex);
+ cb.swap(mCallback);
+
+ return InitCommon(aDelay, aType);
+}
+
+NS_IMETHODIMP
+nsTimerImpl::Cancel()
+{
+ Callback cb;
+
+ MutexAutoLock lock(mMutex);
+
+ if (gThread) {
+ gThread->RemoveTimer(this);
+ }
+
+ cb.swap(mCallback);
+ ++mGeneration;
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsTimerImpl::SetDelay(uint32_t aDelay)
+{
+ MutexAutoLock lock(mMutex);
+ if (GetCallback().mType == Callback::Type::Unknown && !IsRepeating()) {
+ // This may happen if someone tries to re-use a one-shot timer
+ // by re-setting delay instead of reinitializing the timer.
+ NS_ERROR("nsITimer->SetDelay() called when the "
+ "one-shot timer is not set up.");
+ return NS_ERROR_NOT_INITIALIZED;
+ }
+
+ bool reAdd = false;
+ if (gThread) {
+ reAdd = NS_SUCCEEDED(gThread->RemoveTimer(this));
+ }
+
+ mDelay = aDelay;
+ mTimeout = TimeStamp::Now() + TimeDuration::FromMilliseconds(mDelay);
+
+ if (reAdd) {
+ gThread->AddTimer(this);
+ }
+
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsTimerImpl::GetDelay(uint32_t* aDelay)
+{
+ MutexAutoLock lock(mMutex);
+ *aDelay = mDelay;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsTimerImpl::SetType(uint32_t aType)
+{
+ MutexAutoLock lock(mMutex);
+ mType = (uint8_t)aType;
+ // XXX if this is called, we should change the actual type.. this could effect
+ // repeating timers. we need to ensure in Fire() that if mType has changed
+ // during the callback that we don't end up with the timer in the queue twice.
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+nsTimerImpl::GetType(uint32_t* aType)
+{
+ MutexAutoLock lock(mMutex);
+ *aType = mType;
+ return NS_OK;
+}
+
+
+NS_IMETHODIMP
+nsTimerImpl::GetClosure(void** aClosure)
+{
+ MutexAutoLock lock(mMutex);
+ *aClosure = GetCallback().mClosure;
+ return NS_OK;
+}
+
+
+NS_IMETHODIMP
+nsTimerImpl::GetCallback(nsITimerCallback** aCallback)
+{
+ MutexAutoLock lock(mMutex);
+ if (GetCallback().mType == Callback::Type::Interface) {
+ NS_IF_ADDREF(*aCallback = GetCallback().mCallback.i);
+ } else {
+ *aCallback = nullptr;
+ }
+
+ return NS_OK;
+}
+
+
+NS_IMETHODIMP
+nsTimerImpl::GetTarget(nsIEventTarget** aTarget)
+{
+ MutexAutoLock lock(mMutex);
+ NS_IF_ADDREF(*aTarget = mEventTarget);
+ return NS_OK;
+}
+
+
+NS_IMETHODIMP
+nsTimerImpl::SetTarget(nsIEventTarget* aTarget)
+{
+ MutexAutoLock lock(mMutex);
+ if (NS_WARN_IF(mCallback.mType != Callback::Type::Unknown)) {
+ return NS_ERROR_ALREADY_INITIALIZED;
+ }
+
+ if (aTarget) {
+ mEventTarget = aTarget;
+ } else {
+ mEventTarget = static_cast<nsIEventTarget*>(NS_GetCurrentThread());
+ }
+ return NS_OK;
+}
+
+
+void
+nsTimerImpl::Fire(int32_t aGeneration)
+{
+ uint8_t oldType;
+ uint32_t oldDelay;
+ TimeStamp oldTimeout;
+
+ {
+ // Don't fire callbacks or fiddle with refcounts when the mutex is locked.
+ // If some other thread Cancels/Inits after this, they're just too late.
+ MutexAutoLock lock(mMutex);
+ if (aGeneration != mGeneration) {
+ return;
+ }
+
+ mCallbackDuringFire.swap(mCallback);
+ oldType = mType;
+ oldDelay = mDelay;
+ oldTimeout = mTimeout;
+ }
+
+ PROFILER_LABEL("Timer", "Fire",
+ js::ProfileEntry::Category::OTHER);
+
+ TimeStamp now = TimeStamp::Now();
+ if (MOZ_LOG_TEST(GetTimerLog(), LogLevel::Debug)) {
+ TimeDuration delta = now - oldTimeout;
+ int32_t d = delta.ToMilliseconds(); // delta in ms
+ sDeltaSum += abs(d);
+ sDeltaSumSquared += double(d) * double(d);
+ sDeltaNum++;
+
+ MOZ_LOG(GetTimerLog(), LogLevel::Debug,
+ ("[this=%p] expected delay time %4ums\n", this, oldDelay));
+ MOZ_LOG(GetTimerLog(), LogLevel::Debug,
+ ("[this=%p] actual delay time %4dms\n", this, oldDelay + d));
+ MOZ_LOG(GetTimerLog(), LogLevel::Debug,
+ ("[this=%p] (mType is %d) -------\n", this, oldType));
+ MOZ_LOG(GetTimerLog(), LogLevel::Debug,
+ ("[this=%p] delta %4dms\n", this, d));
+ }
+
+ if (MOZ_LOG_TEST(GetTimerFiringsLog(), LogLevel::Debug)) {
+ LogFiring(mCallbackDuringFire, oldType, oldDelay);
+ }
+
+ switch (mCallbackDuringFire.mType) {
+ case Callback::Type::Function:
+ mCallbackDuringFire.mCallback.c(mITimer, mCallbackDuringFire.mClosure);
+ break;
+ case Callback::Type::Interface:
+ mCallbackDuringFire.mCallback.i->Notify(mITimer);
+ break;
+ case Callback::Type::Observer:
+ mCallbackDuringFire.mCallback.o->Observe(mITimer, NS_TIMER_CALLBACK_TOPIC,
+ nullptr);
+ break;
+ default:
+ ;
+ }
+
+ Callback trash; // Swap into here to dispose of callback after the unlock
+ MutexAutoLock lock(mMutex);
+ if (aGeneration == mGeneration && IsRepeating()) {
+ // Repeating timer has not been re-init or canceled; reschedule
+ mCallbackDuringFire.swap(mCallback);
+ TimeDuration delay = TimeDuration::FromMilliseconds(mDelay);
+ if (mType == nsITimer::TYPE_REPEATING_SLACK) {
+ mTimeout = TimeStamp::Now() + delay;
+ } else {
+ mTimeout = mTimeout + delay;
+ }
+ if (gThread) {
+ gThread->AddTimer(this);
+ }
+ }
+
+ mCallbackDuringFire.swap(trash);
+
+ MOZ_LOG(GetTimerLog(), LogLevel::Debug,
+ ("[this=%p] Took %fms to fire timer callback\n",
+ this, (TimeStamp::Now() - now).ToMilliseconds()));
+}
+
+#if defined(HAVE_DLADDR) && defined(HAVE___CXA_DEMANGLE)
+#define USE_DLADDR 1
+#endif
+
+#ifdef USE_DLADDR
+#include <cxxabi.h>
+#include <dlfcn.h>
+#endif
+
+// See the big comment above GetTimerFiringsLog() to understand this code.
+void
+nsTimerImpl::LogFiring(const Callback& aCallback, uint8_t aType, uint32_t aDelay)
+{
+ const char* typeStr;
+ switch (aType) {
+ case nsITimer::TYPE_ONE_SHOT: typeStr = "ONE_SHOT"; break;
+ case nsITimer::TYPE_REPEATING_SLACK: typeStr = "SLACK "; break;
+ case nsITimer::TYPE_REPEATING_PRECISE: /* fall through */
+ case nsITimer::TYPE_REPEATING_PRECISE_CAN_SKIP: typeStr = "PRECISE "; break;
+ default: MOZ_CRASH("bad type");
+ }
+
+ switch (aCallback.mType) {
+ case Callback::Type::Function: {
+ bool needToFreeName = false;
+ const char* annotation = "";
+ const char* name;
+ static const size_t buflen = 1024;
+ char buf[buflen];
+
+ if (aCallback.mName.is<Callback::NameString>()) {
+ name = aCallback.mName.as<Callback::NameString>();
+
+ } else if (aCallback.mName.is<Callback::NameFunc>()) {
+ aCallback.mName.as<Callback::NameFunc>()(
+ mITimer, aCallback.mClosure, buf, buflen);
+ name = buf;
+
+ } else {
+ MOZ_ASSERT(aCallback.mName.is<Callback::NameNothing>());
+#ifdef USE_DLADDR
+ annotation = "[from dladdr] ";
+
+ Dl_info info;
+ void* addr = reinterpret_cast<void*>(aCallback.mCallback.c);
+ if (dladdr(addr, &info) == 0) {
+ name = "???[dladdr: failed]";
+
+ } else if (info.dli_sname) {
+ int status;
+ name = abi::__cxa_demangle(info.dli_sname, nullptr, nullptr, &status);
+ if (status == 0) {
+ // Success. Because we didn't pass in a buffer to __cxa_demangle it
+ // allocates its own one with malloc() which we must free() later.
+ MOZ_ASSERT(name);
+ needToFreeName = true;
+ } else if (status == -1) {
+ name = "???[__cxa_demangle: OOM]";
+ } else if (status == -2) {
+ name = "???[__cxa_demangle: invalid mangled name]";
+ } else if (status == -3) {
+ name = "???[__cxa_demangle: invalid argument]";
+ } else {
+ name = "???[__cxa_demangle: unexpected status value]";
+ }
+
+ } else if (info.dli_fname) {
+ // The "#0: " prefix is necessary for fix_linux_stack.py to interpret
+ // this string as something to convert.
+ snprintf(buf, buflen, "#0: ???[%s +0x%" PRIxPTR "]\n",
+ info.dli_fname, uintptr_t(addr) - uintptr_t(info.dli_fbase));
+ name = buf;
+
+ } else {
+ name = "???[dladdr: no symbol or shared object obtained]";
+ }
+#else
+ name = "???[dladdr is unimplemented or doesn't work well on this OS]";
+#endif
+ }
+
+ MOZ_LOG(GetTimerFiringsLog(), LogLevel::Debug,
+ ("[%d] fn timer (%s %5d ms): %s%s\n",
+ getpid(), typeStr, aDelay, annotation, name));
+
+ if (needToFreeName) {
+ free(const_cast<char*>(name));
+ }
+
+ break;
+ }
+
+ case Callback::Type::Interface: {
+ MOZ_LOG(GetTimerFiringsLog(), LogLevel::Debug,
+ ("[%d] iface timer (%s %5d ms): %p\n",
+ getpid(), typeStr, aDelay, aCallback.mCallback.i));
+ break;
+ }
+
+ case Callback::Type::Observer: {
+ MOZ_LOG(GetTimerFiringsLog(), LogLevel::Debug,
+ ("[%d] obs timer (%s %5d ms): %p\n",
+ getpid(), typeStr, aDelay, aCallback.mCallback.o));
+ break;
+ }
+
+ case Callback::Type::Unknown:
+ default: {
+ MOZ_LOG(GetTimerFiringsLog(), LogLevel::Debug,
+ ("[%d] ??? timer (%s, %5d ms)\n",
+ getpid(), typeStr, aDelay));
+ break;
+ }
+ }
+}
+
+nsTimer::~nsTimer()
+{
+}
+
+size_t
+nsTimer::SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const
+{
+ return aMallocSizeOf(this);
+}
+
+/* static */
+const nsTimerImpl::Callback::NameNothing nsTimerImpl::Callback::Nothing = 0;
+
+#ifdef MOZ_TASK_TRACER
+void
+nsTimerImpl::GetTLSTraceInfo()
+{
+ mTracedTask.GetTLSTraceInfo();
+}
+
+TracedTaskCommon
+nsTimerImpl::GetTracedTask()
+{
+ return mTracedTask;
+}
+
+#endif
+
diff --git a/xpcom/threads/nsTimerImpl.h b/xpcom/threads/nsTimerImpl.h
new file mode 100644
index 000000000..5c731fbb4
--- /dev/null
+++ b/xpcom/threads/nsTimerImpl.h
@@ -0,0 +1,207 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef nsTimerImpl_h___
+#define nsTimerImpl_h___
+
+#include "nsITimer.h"
+#include "nsIEventTarget.h"
+#include "nsIObserver.h"
+
+#include "nsCOMPtr.h"
+
+#include "mozilla/Attributes.h"
+#include "mozilla/Logging.h"
+#include "mozilla/Mutex.h"
+#include "mozilla/TimeStamp.h"
+#include "mozilla/Variant.h"
+
+#ifdef MOZ_TASK_TRACER
+#include "TracedTaskCommon.h"
+#endif
+
+extern mozilla::LogModule* GetTimerLog();
+
+#define NS_TIMER_CID \
+{ /* 5ff24248-1dd2-11b2-8427-fbab44f29bc8 */ \
+ 0x5ff24248, \
+ 0x1dd2, \
+ 0x11b2, \
+ {0x84, 0x27, 0xfb, 0xab, 0x44, 0xf2, 0x9b, 0xc8} \
+}
+
+// TimerThread, nsTimerEvent, and nsTimer have references to these. nsTimer has
+// a separate lifecycle so we can Cancel() the underlying timer when the user of
+// the nsTimer has let go of its last reference.
+class nsTimerImpl
+{
+ ~nsTimerImpl() {}
+public:
+ typedef mozilla::TimeStamp TimeStamp;
+
+ explicit nsTimerImpl(nsITimer* aTimer);
+ NS_INLINE_DECL_THREADSAFE_REFCOUNTING(nsTimerImpl)
+ NS_DECL_NON_VIRTUAL_NSITIMER
+
+ static nsresult Startup();
+ static void Shutdown();
+
+ void Fire(int32_t aGeneration);
+
+#ifdef MOZ_TASK_TRACER
+ void GetTLSTraceInfo();
+ mozilla::tasktracer::TracedTaskCommon GetTracedTask();
+#endif
+
+ int32_t GetGeneration()
+ {
+ return mGeneration;
+ }
+
+ nsresult InitCommon(uint32_t aDelay, uint32_t aType);
+
+ struct Callback {
+ Callback() :
+ mType(Type::Unknown),
+ mName(Nothing),
+ mClosure(nullptr)
+ {
+ mCallback.c = nullptr;
+ }
+
+ Callback(const Callback& other) = delete;
+ Callback& operator=(const Callback& other) = delete;
+
+ ~Callback()
+ {
+ if (mType == Type::Interface) {
+ NS_RELEASE(mCallback.i);
+ } else if (mType == Type::Observer) {
+ NS_RELEASE(mCallback.o);
+ }
+ }
+
+ void swap(Callback& other)
+ {
+ std::swap(mType, other.mType);
+ std::swap(mCallback, other.mCallback);
+ std::swap(mName, other.mName);
+ std::swap(mClosure, other.mClosure);
+ }
+
+ enum class Type : uint8_t {
+ Unknown = 0,
+ Interface = 1,
+ Function = 2,
+ Observer = 3,
+ };
+ Type mType;
+
+ union CallbackUnion
+ {
+ nsTimerCallbackFunc c;
+ // These refcounted references are managed manually, as they are in a union
+ nsITimerCallback* MOZ_OWNING_REF i;
+ nsIObserver* MOZ_OWNING_REF o;
+ } mCallback;
+
+ // |Name| is a tagged union type representing one of (a) nothing, (b) a
+ // string, or (c) a function. mozilla::Variant doesn't naturally handle the
+ // "nothing" case, so we define a dummy type and value (which is unused and
+ // so the exact value doesn't matter) for it.
+ typedef const int NameNothing;
+ typedef const char* NameString;
+ typedef nsTimerNameCallbackFunc NameFunc;
+ typedef mozilla::Variant<NameNothing, NameString, NameFunc> Name;
+ static const NameNothing Nothing;
+ Name mName;
+
+ void* mClosure;
+ };
+
+ Callback& GetCallback()
+ {
+ mMutex.AssertCurrentThreadOwns();
+ if (mCallback.mType == Callback::Type::Unknown) {
+ return mCallbackDuringFire;
+ }
+
+ return mCallback;
+ }
+
+ bool IsRepeating() const
+ {
+ static_assert(nsITimer::TYPE_ONE_SHOT < nsITimer::TYPE_REPEATING_SLACK,
+ "invalid ordering of timer types!");
+ static_assert(
+ nsITimer::TYPE_REPEATING_SLACK < nsITimer::TYPE_REPEATING_PRECISE,
+ "invalid ordering of timer types!");
+ static_assert(
+ nsITimer::TYPE_REPEATING_PRECISE <
+ nsITimer::TYPE_REPEATING_PRECISE_CAN_SKIP,
+ "invalid ordering of timer types!");
+ return mType >= nsITimer::TYPE_REPEATING_SLACK;
+ }
+
+ nsCOMPtr<nsIEventTarget> mEventTarget;
+
+ void LogFiring(const Callback& aCallback, uint8_t aType, uint32_t aDelay);
+
+ nsresult InitWithFuncCallbackCommon(nsTimerCallbackFunc aFunc,
+ void* aClosure,
+ uint32_t aDelay,
+ uint32_t aType,
+ Callback::Name aName);
+
+ // These members are set by the initiating thread, when the timer's type is
+ // changed and during the period where it fires on that thread.
+ uint8_t mType;
+
+ // The generation number of this timer, re-generated each time the timer is
+ // initialized so one-shot timers can be canceled and re-initialized by the
+ // arming thread without any bad race conditions.
+ // Updated only after this timer has been removed from the timer thread.
+ int32_t mGeneration;
+
+ uint32_t mDelay;
+ // Updated only after this timer has been removed from the timer thread.
+ TimeStamp mTimeout;
+
+#ifdef MOZ_TASK_TRACER
+ mozilla::tasktracer::TracedTaskCommon mTracedTask;
+#endif
+
+ static double sDeltaSum;
+ static double sDeltaSumSquared;
+ static double sDeltaNum;
+ const RefPtr<nsITimer> mITimer;
+ mozilla::Mutex mMutex;
+ Callback mCallback;
+ Callback mCallbackDuringFire;
+};
+
+class nsTimer final : public nsITimer
+{
+ virtual ~nsTimer();
+public:
+ nsTimer() : mImpl(new nsTimerImpl(this)) {}
+
+ friend class TimerThread;
+ friend class nsTimerEvent;
+ friend struct TimerAdditionComparator;
+
+ NS_DECL_THREADSAFE_ISUPPORTS
+ NS_FORWARD_SAFE_NSITIMER(mImpl);
+
+ virtual size_t SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const override;
+
+private:
+ // nsTimerImpl holds a strong ref to us. When our refcount goes to 1, we will
+ // null this to break the cycle.
+ RefPtr<nsTimerImpl> mImpl;
+};
+
+#endif /* nsTimerImpl_h___ */