summaryrefslogtreecommitdiffstats
path: root/js/src/ds
diff options
context:
space:
mode:
Diffstat (limited to 'js/src/ds')
-rw-r--r--js/src/ds/BitArray.h82
-rw-r--r--js/src/ds/Fifo.h156
-rw-r--r--js/src/ds/FixedSizeHash.h128
-rw-r--r--js/src/ds/IdValuePair.h44
-rw-r--r--js/src/ds/InlineTable.h687
-rw-r--r--js/src/ds/LifoAlloc.cpp171
-rw-r--r--js/src/ds/LifoAlloc.h635
-rw-r--r--js/src/ds/MemoryProtectionExceptionHandler.cpp760
-rw-r--r--js/src/ds/MemoryProtectionExceptionHandler.h45
-rw-r--r--js/src/ds/OrderedHashTable.h841
-rw-r--r--js/src/ds/PageProtectingVector.h257
-rw-r--r--js/src/ds/PriorityQueue.h135
-rw-r--r--js/src/ds/Sort.h137
-rw-r--r--js/src/ds/SplayTree.h308
-rw-r--r--js/src/ds/TraceableFifo.h128
15 files changed, 4514 insertions, 0 deletions
diff --git a/js/src/ds/BitArray.h b/js/src/ds/BitArray.h
new file mode 100644
index 000000000..821296e5b
--- /dev/null
+++ b/js/src/ds/BitArray.h
@@ -0,0 +1,82 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef ds_BitArray_h
+#define ds_BitArray_h
+
+#include "mozilla/TemplateLib.h"
+
+#include <limits.h>
+
+#include "jstypes.h"
+
+namespace js {
+
+template <size_t nbits>
+class BitArray
+{
+ private:
+ // Use a 32 bit word to make it easier to access a BitArray from JIT code.
+ using WordT = uint32_t;
+
+ static const size_t bitsPerElement = sizeof(WordT) * CHAR_BIT;
+ static const size_t numSlots = nbits / bitsPerElement + (nbits % bitsPerElement == 0 ? 0 : 1);
+ static const size_t paddingBits = (numSlots * bitsPerElement) - nbits;
+ static_assert(paddingBits < bitsPerElement, "More padding bits than expected.");
+ static const WordT paddingMask = WordT(-1) >> paddingBits;
+
+ WordT map[numSlots];
+
+ public:
+ void clear(bool value) {
+ memset(map, value ? 0xFF : 0, sizeof(map));
+ if (value)
+ map[numSlots - 1] &= paddingMask;
+ }
+
+ inline bool get(size_t offset) const {
+ size_t index;
+ WordT mask;
+ getIndexAndMask(offset, &index, &mask);
+ return map[index] & mask;
+ }
+
+ void set(size_t offset) {
+ size_t index;
+ WordT mask;
+ getIndexAndMask(offset, &index, &mask);
+ map[index] |= mask;
+ }
+
+ void unset(size_t offset) {
+ size_t index;
+ WordT mask;
+ getIndexAndMask(offset, &index, &mask);
+ map[index] &= ~mask;
+ }
+
+ bool isAllClear() const {
+ for (size_t i = 0; i < numSlots; i++) {
+ if (map[i])
+ return false;
+ }
+ return true;
+ }
+
+ static void getIndexAndMask(size_t offset, size_t* indexp, WordT* maskp) {
+ static_assert(bitsPerElement == 32, "unexpected bitsPerElement value");
+ *indexp = offset / bitsPerElement;
+ *maskp = WordT(1) << (offset % bitsPerElement);
+ }
+
+ static size_t offsetOfMap() {
+ return offsetof(BitArray<nbits>, map);
+ }
+};
+
+} /* namespace js */
+
+#endif /* ds_BitArray_h */
diff --git a/js/src/ds/Fifo.h b/js/src/ds/Fifo.h
new file mode 100644
index 000000000..84851ac94
--- /dev/null
+++ b/js/src/ds/Fifo.h
@@ -0,0 +1,156 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef js_Fifo_h
+#define js_Fifo_h
+
+#include "mozilla/Move.h"
+
+#include "js/Utility.h"
+#include "js/Vector.h"
+
+namespace js {
+
+// A first-in-first-out queue container type. Fifo calls constructors and
+// destructors of all elements added so non-PODs may be used safely. |Fifo|
+// stores the first |MinInlineCapacity| elements in-place before resorting to
+// dynamic allocation.
+//
+// T requirements:
+// - Either movable or copyable.
+// MinInlineCapacity requirements:
+// - Must be even.
+// AllocPolicy:
+// - see "Allocation policies" in AllocPolicy.h
+template <typename T,
+ size_t MinInlineCapacity = 0,
+ class AllocPolicy = TempAllocPolicy>
+class Fifo
+{
+ static_assert(MinInlineCapacity % 2 == 0, "MinInlineCapacity must be even!");
+
+ protected:
+ // An element A is "younger" than an element B if B was inserted into the
+ // |Fifo| before A was.
+ //
+ // Invariant 1: Every element within |front_| is older than every element
+ // within |rear_|.
+ // Invariant 2: Entries within |front_| are sorted from younger to older.
+ // Invariant 3: Entries within |rear_| are sorted from older to younger.
+ // Invariant 4: If the |Fifo| is not empty, then |front_| is not empty.
+ Vector<T, MinInlineCapacity / 2, AllocPolicy> front_;
+ Vector<T, MinInlineCapacity / 2, AllocPolicy> rear_;
+
+ private:
+ // Maintain invariants after adding or removing entries.
+ bool fixup() {
+ if (!front_.empty())
+ return true;
+
+ if (!front_.reserve(rear_.length()))
+ return false;
+
+ while (!rear_.empty()) {
+ front_.infallibleAppend(mozilla::Move(rear_.back()));
+ rear_.popBack();
+ }
+
+ return true;
+ }
+
+ public:
+ explicit Fifo(AllocPolicy alloc = AllocPolicy())
+ : front_(alloc)
+ , rear_(alloc)
+ { }
+
+ Fifo(Fifo&& rhs)
+ : front_(mozilla::Move(rhs.front_))
+ , rear_(mozilla::Move(rhs.rear_))
+ { }
+
+ Fifo& operator=(Fifo&& rhs) {
+ MOZ_ASSERT(&rhs != this, "self-move disallowed");
+ this->~Fifo();
+ new (this) Fifo(mozilla::Move(rhs));
+ return *this;
+ }
+
+ Fifo(const Fifo&) = delete;
+ Fifo& operator=(const Fifo&) = delete;
+
+ size_t length() const {
+ MOZ_ASSERT_IF(rear_.length() > 0, front_.length() > 0); // Invariant 4.
+ return front_.length() + rear_.length();
+ }
+
+ bool empty() const {
+ MOZ_ASSERT_IF(rear_.length() > 0, front_.length() > 0); // Invariant 4.
+ return front_.empty();
+ }
+
+ // Push an element to the back of the queue. This method can take either a
+ // |const T&| or a |T&&|.
+ template <typename U>
+ MOZ_MUST_USE bool pushBack(U&& u) {
+ if (!rear_.append(mozilla::Forward<U>(u)))
+ return false;
+ if (!fixup()) {
+ rear_.popBack();
+ return false;
+ }
+ return true;
+ }
+
+ // Construct a T in-place at the back of the queue.
+ template <typename... Args>
+ MOZ_MUST_USE bool emplaceBack(Args&&... args) {
+ if (!rear_.emplaceBack(mozilla::Forward<Args>(args)...))
+ return false;
+ if (!fixup()) {
+ rear_.popBack();
+ return false;
+ }
+ return true;
+ }
+
+ // Access the element at the front of the queue.
+ T& front() {
+ MOZ_ASSERT(!empty());
+ return front_.back();
+ }
+ const T& front() const {
+ MOZ_ASSERT(!empty());
+ return front_.back();
+ }
+
+ // Remove the front element from the queue.
+ MOZ_MUST_USE bool popFront() {
+ MOZ_ASSERT(!empty());
+ T t(mozilla::Move(front()));
+ front_.popBack();
+ if (!fixup()) {
+ // Attempt to remain in a valid state by reinserting the element
+ // back at the front. If we can't remain in a valid state in the
+ // face of OOMs, crash.
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ if (!front_.append(mozilla::Move(t)))
+ oomUnsafe.crash("js::Fifo::popFront");
+ return false;
+ }
+ return true;
+ }
+
+ // Clear all elements from the queue.
+ void clear() {
+ front_.clear();
+ rear_.clear();
+ }
+};
+
+} // namespace js
+
+#endif /* js_Fifo_h */
diff --git a/js/src/ds/FixedSizeHash.h b/js/src/ds/FixedSizeHash.h
new file mode 100644
index 000000000..9b4752a4f
--- /dev/null
+++ b/js/src/ds/FixedSizeHash.h
@@ -0,0 +1,128 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jsfixedsizehash_h_
+#define jsfixedsizehash_h_
+
+#include "ds/LifoAlloc.h"
+
+namespace js {
+
+/*
+ * Class representing a hash set with fixed capacity, with newer entries
+ * evicting older entries. Each entry has several hashes and can be stored in
+ * different buckets, with the choice of which to evict on insertion being
+ * managed via LRU. For tables with a relatively small size, using different
+ * hashes increases utilization and makes it less likely that entries will keep
+ * evicting each other due to wanting to use the same bucket.
+ *
+ * T indicates the type of hash elements, HashPolicy must have the following
+ * contents:
+ *
+ * Lookup - As for HashMap / HashSet.
+ *
+ * bool match(T, Lookup) - As for HashMap / HashSet.
+ *
+ * NumHashes - Number of different hashes generated for each entry.
+ *
+ * void hash(Lookup, HashNumber[NumHashes]) - Compute all hashes for an entry.
+ *
+ * void clear(T*) - Clear an entry, such that isCleared() holds afterwards.
+ *
+ * bool isCleared(T) - Test whether an entry has been cleared.
+ */
+template <class T, class HashPolicy, size_t Capacity>
+class FixedSizeHashSet
+{
+ T entries[Capacity];
+ uint32_t lastOperations[Capacity];
+ uint32_t numOperations;
+
+ static const size_t NumHashes = HashPolicy::NumHashes;
+
+ static_assert(Capacity > 0, "an empty fixed-size hash set is meaningless");
+
+ public:
+ typedef typename HashPolicy::Lookup Lookup;
+
+ FixedSizeHashSet()
+ : entries(), lastOperations(), numOperations(0)
+ {
+ MOZ_ASSERT(HashPolicy::isCleared(entries[0]));
+ }
+
+ bool lookup(const Lookup& lookup, T* pentry)
+ {
+ size_t bucket;
+ if (lookupReference(lookup, &bucket)) {
+ *pentry = entries[bucket];
+ lastOperations[bucket] = numOperations++;
+ return true;
+ }
+ return false;
+ }
+
+ void insert(const Lookup& lookup, const T& entry)
+ {
+ size_t buckets[NumHashes];
+ getBuckets(lookup, buckets);
+
+ size_t min = buckets[0];
+ for (size_t i = 0; i < NumHashes; i++) {
+ const T& entry = entries[buckets[i]];
+ if (HashPolicy::isCleared(entry)) {
+ entries[buckets[i]] = entry;
+ lastOperations[buckets[i]] = numOperations++;
+ return;
+ }
+ if (i && lastOperations[min] > lastOperations[buckets[i]])
+ min = buckets[i];
+ }
+
+ entries[min] = entry;
+ lastOperations[min] = numOperations++;
+ }
+
+ template <typename S>
+ void remove(const S& s)
+ {
+ size_t bucket;
+ if (lookupReference(s, &bucket))
+ HashPolicy::clear(&entries[bucket]);
+ }
+
+ private:
+ template <typename S>
+ bool lookupReference(const S& s, size_t* pbucket)
+ {
+ size_t buckets[NumHashes];
+ getBuckets(s, buckets);
+
+ for (size_t i = 0; i < NumHashes; i++) {
+ const T& entry = entries[buckets[i]];
+ if (!HashPolicy::isCleared(entry) && HashPolicy::match(entry, s)) {
+ *pbucket = buckets[i];
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ template <typename S>
+ void getBuckets(const S& s, size_t buckets[NumHashes])
+ {
+ HashNumber hashes[NumHashes];
+ HashPolicy::hash(s, hashes);
+
+ for (size_t i = 0; i < NumHashes; i++)
+ buckets[i] = hashes[i] % Capacity;
+ }
+};
+
+} /* namespace js */
+
+#endif /* jsfixedsizehash_h_ */
diff --git a/js/src/ds/IdValuePair.h b/js/src/ds/IdValuePair.h
new file mode 100644
index 000000000..000fdcf66
--- /dev/null
+++ b/js/src/ds/IdValuePair.h
@@ -0,0 +1,44 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef ds_IdValuePair_h
+#define ds_IdValuePair_h
+
+#include "jsapi.h"
+
+#include "NamespaceImports.h"
+#include "gc/Tracer.h"
+#include "js/GCVector.h"
+#include "js/Id.h"
+
+namespace js {
+
+struct IdValuePair
+{
+ Value value;
+ jsid id;
+
+ IdValuePair()
+ : value(UndefinedValue()), id(JSID_EMPTY)
+ {}
+ explicit IdValuePair(jsid idArg)
+ : value(UndefinedValue()), id(idArg)
+ {}
+ IdValuePair(jsid idArg, const Value& valueArg)
+ : value(valueArg), id(idArg)
+ {}
+
+ void trace(JSTracer* trc) {
+ TraceRoot(trc, &value, "IdValuePair::value");
+ TraceRoot(trc, &id, "IdValuePair::id");
+ }
+};
+
+using IdValueVector = JS::GCVector<IdValuePair>;
+
+} /* namespace js */
+
+#endif /* ds_IdValuePair_h */
diff --git a/js/src/ds/InlineTable.h b/js/src/ds/InlineTable.h
new file mode 100644
index 000000000..5b648735f
--- /dev/null
+++ b/js/src/ds/InlineTable.h
@@ -0,0 +1,687 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef ds_InlineTable_h
+#define ds_InlineTable_h
+
+#include "mozilla/Move.h"
+
+#include "jsalloc.h"
+
+#include "js/HashTable.h"
+
+namespace js {
+
+namespace detail {
+
+template <typename InlineEntry,
+ typename Entry,
+ typename Table,
+ typename HashPolicy,
+ typename AllocPolicy,
+ size_t InlineEntries>
+class InlineTable
+{
+ private:
+ using TablePtr = typename Table::Ptr;
+ using TableAddPtr = typename Table::AddPtr;
+ using TableRange = typename Table::Range;
+ using Lookup = typename HashPolicy::Lookup;
+
+ size_t inlNext_;
+ size_t inlCount_;
+ InlineEntry inl_[InlineEntries];
+ Table table_;
+
+#ifdef DEBUG
+ template <typename Key>
+ static bool keyNonZero(const Key& key) {
+ // Zero as tombstone means zero keys are invalid.
+ return !!key;
+ }
+#endif
+
+ InlineEntry* inlineStart() {
+ MOZ_ASSERT(!usingTable());
+ return inl_;
+ }
+
+ const InlineEntry* inlineStart() const {
+ MOZ_ASSERT(!usingTable());
+ return inl_;
+ }
+
+ InlineEntry* inlineEnd() {
+ MOZ_ASSERT(!usingTable());
+ return inl_ + inlNext_;
+ }
+
+ const InlineEntry* inlineEnd() const {
+ MOZ_ASSERT(!usingTable());
+ return inl_ + inlNext_;
+ }
+
+ bool usingTable() const {
+ return inlNext_ > InlineEntries;
+ }
+
+ MOZ_MUST_USE bool switchToTable() {
+ MOZ_ASSERT(inlNext_ == InlineEntries);
+
+ if (table_.initialized()) {
+ table_.clear();
+ } else {
+ if (!table_.init(count()))
+ return false;
+ MOZ_ASSERT(table_.initialized());
+ }
+
+ InlineEntry* end = inlineEnd();
+ for (InlineEntry* it = inlineStart(); it != end; ++it) {
+ if (it->key && !it->moveTo(table_))
+ return false;
+ }
+
+ inlNext_ = InlineEntries + 1;
+ MOZ_ASSERT(table_.count() == inlCount_);
+ MOZ_ASSERT(usingTable());
+ return true;
+ }
+
+ MOZ_NEVER_INLINE
+ MOZ_MUST_USE bool switchAndAdd(const InlineEntry& entry) {
+ if (!switchToTable())
+ return false;
+
+ return entry.putNew(table_);
+ }
+
+ public:
+ static const size_t SizeOfInlineEntries = sizeof(InlineEntry) * InlineEntries;
+
+ explicit InlineTable(AllocPolicy a = AllocPolicy())
+ : inlNext_(0),
+ inlCount_(0),
+ table_(a)
+ { }
+
+ class Ptr
+ {
+ friend class InlineTable;
+
+ protected:
+ Entry entry_;
+ TablePtr tablePtr_;
+ InlineEntry* inlPtr_;
+ bool isInlinePtr_;
+
+ explicit Ptr(TablePtr p)
+ : entry_(p.found() ? &*p : nullptr),
+ tablePtr_(p),
+ isInlinePtr_(false)
+ { }
+
+ explicit Ptr(InlineEntry* inlineEntry)
+ : entry_(inlineEntry),
+ inlPtr_(inlineEntry),
+ isInlinePtr_(true)
+ { }
+
+ void operator==(const Ptr& other);
+
+ public:
+ // Leaves Ptr uninitialized.
+ Ptr() {
+#ifdef DEBUG
+ inlPtr_ = (InlineEntry*) 0xbad;
+ isInlinePtr_ = true;
+#endif
+ }
+
+ // Default copy constructor works for this structure.
+
+ bool found() const {
+ return isInlinePtr_ ? bool(inlPtr_) : tablePtr_.found();
+ }
+
+ explicit operator bool() const {
+ return found();
+ }
+
+ bool operator==(const Ptr& other) const {
+ MOZ_ASSERT(found() && other.found());
+ if (isInlinePtr_ != other.isInlinePtr_)
+ return false;
+ if (isInlinePtr_)
+ return inlPtr_ == other.inlPtr_;
+ return tablePtr_ == other.tablePtr_;
+ }
+
+ bool operator!=(const Ptr& other) const {
+ return !(*this == other);
+ }
+
+ Entry& operator*() {
+ MOZ_ASSERT(found());
+ return entry_;
+ }
+
+ Entry* operator->() {
+ MOZ_ASSERT(found());
+ return &entry_;
+ }
+ };
+
+ class AddPtr
+ {
+ friend class InlineTable;
+
+ protected:
+ Entry entry_;
+ TableAddPtr tableAddPtr_;
+ InlineEntry* inlAddPtr_;
+ bool isInlinePtr_;
+ // Indicates whether inlAddPtr is a found result or an add pointer.
+ bool inlPtrFound_;
+
+ AddPtr(InlineEntry* ptr, bool found)
+ : entry_(ptr),
+ inlAddPtr_(ptr),
+ isInlinePtr_(true),
+ inlPtrFound_(found)
+ {}
+
+ explicit AddPtr(const TableAddPtr& p)
+ : entry_(p.found() ? &*p : nullptr),
+ tableAddPtr_(p),
+ isInlinePtr_(false)
+ { }
+
+ public:
+ AddPtr() {}
+
+ bool found() const {
+ return isInlinePtr_ ? inlPtrFound_ : tableAddPtr_.found();
+ }
+
+ explicit operator bool() const {
+ return found();
+ }
+
+ bool operator==(const AddPtr& other) const {
+ MOZ_ASSERT(found() && other.found());
+ if (isInlinePtr_ != other.isInlinePtr_)
+ return false;
+ if (isInlinePtr_)
+ return inlAddPtr_ == other.inlAddPtr_;
+ return tableAddPtr_ == other.tableAddPtr_;
+ }
+
+ bool operator!=(const AddPtr& other) const {
+ return !(*this == other);
+ }
+
+ Entry& operator*() {
+ MOZ_ASSERT(found());
+ return entry_;
+ }
+
+ Entry* operator->() {
+ MOZ_ASSERT(found());
+ return &entry_;
+ }
+ };
+
+ size_t count() const {
+ return usingTable() ? table_.count() : inlCount_;
+ }
+
+ bool empty() const {
+ return usingTable() ? table_.empty() : !inlCount_;
+ }
+
+ void clear() {
+ inlNext_ = 0;
+ inlCount_ = 0;
+ }
+
+ MOZ_ALWAYS_INLINE
+ Ptr lookup(const Lookup& l) {
+ MOZ_ASSERT(keyNonZero(l));
+
+ if (usingTable())
+ return Ptr(table_.lookup(l));
+
+ InlineEntry* end = inlineEnd();
+ for (InlineEntry* it = inlineStart(); it != end; ++it) {
+ if (it->key && HashPolicy::match(it->key, l))
+ return Ptr(it);
+ }
+
+ return Ptr(nullptr);
+ }
+
+ MOZ_ALWAYS_INLINE
+ AddPtr lookupForAdd(const Lookup& l) {
+ MOZ_ASSERT(keyNonZero(l));
+
+ if (usingTable())
+ return AddPtr(table_.lookupForAdd(l));
+
+ InlineEntry* end = inlineEnd();
+ for (InlineEntry* it = inlineStart(); it != end; ++it) {
+ if (it->key && HashPolicy::match(it->key, l))
+ return AddPtr(it, true);
+ }
+
+ // The add pointer that's returned here may indicate the limit entry of
+ // the linear space, in which case the |add| operation will initialize
+ // the table if necessary and add the entry there.
+ return AddPtr(inlineEnd(), false);
+ }
+
+ template <typename KeyInput,
+ typename... Args>
+ MOZ_ALWAYS_INLINE
+ MOZ_MUST_USE bool add(AddPtr& p, KeyInput&& key, Args&&... args) {
+ MOZ_ASSERT(!p);
+ MOZ_ASSERT(keyNonZero(key));
+
+ if (p.isInlinePtr_) {
+ InlineEntry* addPtr = p.inlAddPtr_;
+ MOZ_ASSERT(addPtr == inlineEnd());
+
+ // Switching to table mode before we add this pointer.
+ if (addPtr == inlineStart() + InlineEntries) {
+ if (!switchToTable())
+ return false;
+ return table_.putNew(mozilla::Forward<KeyInput>(key),
+ mozilla::Forward<Args>(args)...);
+ }
+
+ MOZ_ASSERT(!p.found());
+ MOZ_ASSERT(uintptr_t(inlineEnd()) == uintptr_t(p.inlAddPtr_));
+ addPtr->update(mozilla::Forward<KeyInput>(key),
+ mozilla::Forward<Args>(args)...);
+ ++inlCount_;
+ ++inlNext_;
+ return true;
+ }
+
+ return table_.add(p.tableAddPtr_,
+ mozilla::Forward<KeyInput>(key),
+ mozilla::Forward<Args>(args)...);
+ }
+
+ void remove(Ptr& p) {
+ MOZ_ASSERT(p);
+ if (p.isInlinePtr_) {
+ MOZ_ASSERT(inlCount_ > 0);
+ MOZ_ASSERT(p.inlPtr_->key != nullptr);
+ p.inlPtr_->key = nullptr;
+ --inlCount_;
+ return;
+ }
+ MOZ_ASSERT(table_.initialized() && usingTable());
+ table_.remove(p.tablePtr_);
+ }
+
+ void remove(const Lookup& l) {
+ if (Ptr p = lookup(l))
+ remove(p);
+ }
+
+ class Range
+ {
+ friend class InlineTable;
+
+ protected:
+ TableRange tableRange_;
+ InlineEntry* cur_;
+ InlineEntry* end_;
+ bool isInline_;
+
+ explicit Range(TableRange r)
+ : cur_(nullptr),
+ end_(nullptr),
+ isInline_(false)
+ {
+ tableRange_ = r;
+ MOZ_ASSERT(!isInlineRange());
+ }
+
+ Range(const InlineEntry* begin, const InlineEntry* end)
+ : cur_(const_cast<InlineEntry*>(begin)),
+ end_(const_cast<InlineEntry*>(end)),
+ isInline_(true)
+ {
+ advancePastNulls(cur_);
+ MOZ_ASSERT(isInlineRange());
+ }
+
+ bool assertInlineRangeInvariants() const {
+ MOZ_ASSERT(uintptr_t(cur_) <= uintptr_t(end_));
+ MOZ_ASSERT_IF(cur_ != end_, cur_->key != nullptr);
+ return true;
+ }
+
+ bool isInlineRange() const {
+ MOZ_ASSERT_IF(isInline_, assertInlineRangeInvariants());
+ return isInline_;
+ }
+
+ void advancePastNulls(InlineEntry* begin) {
+ InlineEntry* newCur = begin;
+ while (newCur < end_ && nullptr == newCur->key)
+ ++newCur;
+ MOZ_ASSERT(uintptr_t(newCur) <= uintptr_t(end_));
+ cur_ = newCur;
+ }
+
+ void bumpCurPtr() {
+ MOZ_ASSERT(isInlineRange());
+ advancePastNulls(cur_ + 1);
+ }
+
+ public:
+ bool empty() const {
+ return isInlineRange() ? cur_ == end_ : tableRange_.empty();
+ }
+
+ Entry front() {
+ MOZ_ASSERT(!empty());
+ if (isInlineRange())
+ return Entry(cur_);
+ return Entry(&tableRange_.front());
+ }
+
+ void popFront() {
+ MOZ_ASSERT(!empty());
+ if (isInlineRange())
+ bumpCurPtr();
+ else
+ tableRange_.popFront();
+ }
+ };
+
+ Range all() const {
+ return usingTable() ? Range(table_.all()) : Range(inlineStart(), inlineEnd());
+ }
+};
+
+} // namespace detail
+
+// A map with InlineEntries number of entries kept inline in an array.
+//
+// The Key type must be zeroable as zeros are used as tombstone keys.
+// The Value type must have a default constructor.
+//
+// The API is very much like HashMap's.
+template <typename Key,
+ typename Value,
+ size_t InlineEntries,
+ typename HashPolicy = DefaultHasher<Key>,
+ typename AllocPolicy = TempAllocPolicy>
+class InlineMap
+{
+ using Map = HashMap<Key, Value, HashPolicy, AllocPolicy>;
+
+ struct InlineEntry
+ {
+ Key key;
+ Value value;
+
+ template <typename KeyInput, typename ValueInput>
+ void update(KeyInput&& key, ValueInput&& value) {
+ this->key = mozilla::Forward<KeyInput>(key);
+ this->value = mozilla::Forward<ValueInput>(value);
+ }
+
+ MOZ_MUST_USE bool moveTo(Map& map) {
+ return map.putNew(mozilla::Move(key), mozilla::Move(value));
+ }
+ };
+
+ class Entry
+ {
+ using MapEntry = typename Map::Entry;
+
+ MapEntry* mapEntry_;
+ InlineEntry* inlineEntry_;
+
+ public:
+ Entry() = default;
+
+ explicit Entry(MapEntry* mapEntry)
+ : mapEntry_(mapEntry),
+ inlineEntry_(nullptr)
+ { }
+
+ explicit Entry(InlineEntry* inlineEntry)
+ : mapEntry_(nullptr),
+ inlineEntry_(inlineEntry)
+ { }
+
+ const Key& key() const {
+ MOZ_ASSERT(!!mapEntry_ != !!inlineEntry_);
+ if (mapEntry_)
+ return mapEntry_->key();
+ return inlineEntry_->key;
+ }
+
+ Value& value() {
+ MOZ_ASSERT(!!mapEntry_ != !!inlineEntry_);
+ if (mapEntry_)
+ return mapEntry_->value();
+ return inlineEntry_->value;
+ }
+ };
+
+ using Impl = detail::InlineTable<InlineEntry, Entry,
+ Map, HashPolicy, AllocPolicy,
+ InlineEntries>;
+
+ Impl impl_;
+
+ public:
+ using Table = Map;
+ using Ptr = typename Impl::Ptr;
+ using AddPtr = typename Impl::AddPtr;
+ using Range = typename Impl::Range;
+ using Lookup = typename HashPolicy::Lookup;
+
+ static const size_t SizeOfInlineEntries = Impl::SizeOfInlineEntries;
+
+ explicit InlineMap(AllocPolicy a = AllocPolicy())
+ : impl_(a)
+ { }
+
+ size_t count() const {
+ return impl_.count();
+ }
+
+ bool empty() const {
+ return impl_.empty();
+ }
+
+ void clear() {
+ impl_.clear();
+ }
+
+ Range all() const {
+ return impl_.all();
+ }
+
+ MOZ_ALWAYS_INLINE
+ Ptr lookup(const Lookup& l) {
+ return impl_.lookup(l);
+ }
+
+ MOZ_ALWAYS_INLINE
+ bool has(const Lookup& l) const {
+ return const_cast<InlineMap*>(this)->lookup(l).found();
+ }
+
+ MOZ_ALWAYS_INLINE
+ AddPtr lookupForAdd(const Lookup& l) {
+ return impl_.lookupForAdd(l);
+ }
+
+ template <typename KeyInput, typename ValueInput>
+ MOZ_ALWAYS_INLINE
+ MOZ_MUST_USE bool add(AddPtr& p, KeyInput&& key, ValueInput&& value) {
+ return impl_.add(p, mozilla::Forward<KeyInput>(key), mozilla::Forward<ValueInput>(value));
+ }
+
+ template <typename KeyInput, typename ValueInput>
+ MOZ_MUST_USE bool put(KeyInput&& key, ValueInput&& value) {
+ AddPtr p = lookupForAdd(key);
+ if (p) {
+ p->value() = mozilla::Forward<ValueInput>(value);
+ return true;
+ }
+ return add(p, mozilla::Forward<KeyInput>(key), mozilla::Forward<ValueInput>(value));
+ }
+
+ void remove(Ptr& p) {
+ impl_.remove(p);
+ }
+
+ void remove(const Lookup& l) {
+ impl_.remove(l);
+ }
+};
+
+// A set with InlineEntries number of entries kept inline in an array.
+//
+// The T type must be zeroable as zeros are used as tombstone keys.
+// The T type must have a default constructor.
+//
+// The API is very much like HashMap's.
+template <typename T,
+ size_t InlineEntries,
+ typename HashPolicy = DefaultHasher<T>,
+ typename AllocPolicy = TempAllocPolicy>
+class InlineSet
+{
+ using Set = HashSet<T, HashPolicy, AllocPolicy>;
+
+ struct InlineEntry
+ {
+ T key;
+
+ template <typename TInput>
+ void update(TInput&& key) {
+ this->key = mozilla::Forward<TInput>(key);
+ }
+
+ MOZ_MUST_USE bool moveTo(Set& set) {
+ return set.putNew(mozilla::Move(key));
+ }
+ };
+
+ class Entry
+ {
+ using SetEntry = typename Set::Entry;
+
+ SetEntry* setEntry_;
+ InlineEntry* inlineEntry_;
+
+ public:
+ Entry() = default;
+
+ explicit Entry(const SetEntry* setEntry)
+ : setEntry_(const_cast<SetEntry*>(setEntry)),
+ inlineEntry_(nullptr)
+ { }
+
+ explicit Entry(InlineEntry* inlineEntry)
+ : setEntry_(nullptr),
+ inlineEntry_(inlineEntry)
+ { }
+
+ operator T() const {
+ MOZ_ASSERT(!!setEntry_ != !!inlineEntry_);
+ if (setEntry_)
+ return *setEntry_;
+ return inlineEntry_->key;
+ }
+ };
+
+ using Impl = detail::InlineTable<InlineEntry, Entry,
+ Set, HashPolicy, AllocPolicy,
+ InlineEntries>;
+
+ Impl impl_;
+
+ public:
+ using Table = Set;
+ using Ptr = typename Impl::Ptr;
+ using AddPtr = typename Impl::AddPtr;
+ using Range = typename Impl::Range;
+ using Lookup = typename HashPolicy::Lookup;
+
+ static const size_t SizeOfInlineEntries = Impl::SizeOfInlineEntries;
+
+ explicit InlineSet(AllocPolicy a = AllocPolicy())
+ : impl_(a)
+ { }
+
+ size_t count() const {
+ return impl_.count();
+ }
+
+ bool empty() const {
+ return impl_.empty();
+ }
+
+ void clear() {
+ impl_.clear();
+ }
+
+ Range all() const {
+ return impl_.all();
+ }
+
+ MOZ_ALWAYS_INLINE
+ Ptr lookup(const Lookup& l) {
+ return impl_.lookup(l);
+ }
+
+ MOZ_ALWAYS_INLINE
+ bool has(const Lookup& l) const {
+ return const_cast<InlineSet*>(this)->lookup(l).found();
+ }
+
+ MOZ_ALWAYS_INLINE
+ AddPtr lookupForAdd(const Lookup& l) {
+ return impl_.lookupForAdd(l);
+ }
+
+ template <typename TInput>
+ MOZ_ALWAYS_INLINE
+ MOZ_MUST_USE bool add(AddPtr& p, TInput&& key) {
+ return impl_.add(p, mozilla::Forward<TInput>(key));
+ }
+
+ template <typename TInput>
+ MOZ_MUST_USE bool put(TInput&& key) {
+ AddPtr p = lookupForAdd(key);
+ return p ? true : add(p, mozilla::Forward<TInput>(key));
+ }
+
+ void remove(Ptr& p) {
+ impl_.remove(p);
+ }
+
+ void remove(const Lookup& l) {
+ impl_.remove(l);
+ }
+};
+
+} // namespace js
+
+#endif // ds_InlineTable_h
diff --git a/js/src/ds/LifoAlloc.cpp b/js/src/ds/LifoAlloc.cpp
new file mode 100644
index 000000000..bd1cf7c5e
--- /dev/null
+++ b/js/src/ds/LifoAlloc.cpp
@@ -0,0 +1,171 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "ds/LifoAlloc.h"
+
+#include "mozilla/MathAlgorithms.h"
+
+using namespace js;
+
+using mozilla::RoundUpPow2;
+using mozilla::tl::BitSize;
+
+namespace js {
+namespace detail {
+
+BumpChunk*
+BumpChunk::new_(size_t chunkSize)
+{
+ MOZ_ASSERT(RoundUpPow2(chunkSize) == chunkSize);
+ void* mem = js_malloc(chunkSize);
+ if (!mem)
+ return nullptr;
+ BumpChunk* result = new (mem) BumpChunk(chunkSize - sizeof(BumpChunk));
+
+ // We assume that the alignment of sAlign is less than that of
+ // the underlying memory allocator -- creating a new BumpChunk should
+ // always satisfy the sAlign alignment constraint.
+ MOZ_ASSERT(AlignPtr(result->bump) == result->bump);
+ return result;
+}
+
+void
+BumpChunk::delete_(BumpChunk* chunk)
+{
+#ifdef DEBUG
+ // Part of the chunk may have been marked as poisoned/noaccess. Undo that
+ // before writing the 0xcd bytes.
+ size_t size = sizeof(*chunk) + chunk->bumpSpaceSize;
+ MOZ_MAKE_MEM_UNDEFINED(chunk, size);
+ memset(chunk, 0xcd, size);
+#endif
+ js_free(chunk);
+}
+
+bool
+BumpChunk::canAlloc(size_t n)
+{
+ char* aligned = AlignPtr(bump);
+ char* bumped = aligned + n;
+ return bumped <= limit && bumped > headerBase();
+}
+
+} // namespace detail
+} // namespace js
+
+void
+LifoAlloc::freeAll()
+{
+ while (first) {
+ BumpChunk* victim = first;
+ first = first->next();
+ decrementCurSize(victim->computedSizeOfIncludingThis());
+ BumpChunk::delete_(victim);
+ }
+ first = latest = last = nullptr;
+
+ // Nb: maintaining curSize_ correctly isn't easy. Fortunately, this is an
+ // excellent sanity check.
+ MOZ_ASSERT(curSize_ == 0);
+}
+
+LifoAlloc::BumpChunk*
+LifoAlloc::getOrCreateChunk(size_t n)
+{
+ if (first) {
+ // Look for existing, unused BumpChunks to satisfy the request.
+ while (latest->next()) {
+ latest = latest->next();
+ latest->resetBump(); // This was an unused BumpChunk on the chain.
+ if (latest->canAlloc(n))
+ return latest;
+ }
+ }
+
+ size_t defaultChunkFreeSpace = defaultChunkSize_ - sizeof(BumpChunk);
+ size_t chunkSize;
+ if (n > defaultChunkFreeSpace) {
+ size_t allocSizeWithHeader = n + sizeof(BumpChunk);
+
+ // Guard for overflow.
+ if (allocSizeWithHeader < n ||
+ (allocSizeWithHeader & (size_t(1) << (BitSize<size_t>::value - 1)))) {
+ return nullptr;
+ }
+
+ chunkSize = RoundUpPow2(allocSizeWithHeader);
+ } else {
+ chunkSize = defaultChunkSize_;
+ }
+
+ // If we get here, we couldn't find an existing BumpChunk to fill the request.
+ MOZ_ASSERT(fallibleScope_, "[OOM] Cannot allocate a new chunk in an infallible scope.");
+ BumpChunk* newChunk = BumpChunk::new_(chunkSize);
+ if (!newChunk)
+ return nullptr;
+ if (!first) {
+ latest = first = last = newChunk;
+ } else {
+ MOZ_ASSERT(latest && !latest->next());
+ latest->setNext(newChunk);
+ latest = last = newChunk;
+ }
+
+ size_t computedChunkSize = newChunk->computedSizeOfIncludingThis();
+ MOZ_ASSERT(computedChunkSize == chunkSize);
+ incrementCurSize(computedChunkSize);
+
+ return newChunk;
+}
+
+void
+LifoAlloc::transferFrom(LifoAlloc* other)
+{
+ MOZ_ASSERT(!markCount);
+ MOZ_ASSERT(!other->markCount);
+
+ if (!other->first)
+ return;
+
+ incrementCurSize(other->curSize_);
+ if (other->isEmpty())
+ appendUnused(other->first, other->last);
+ else
+ appendUsed(other->first, other->latest, other->last);
+ other->first = other->last = other->latest = nullptr;
+ other->curSize_ = 0;
+}
+
+void
+LifoAlloc::transferUnusedFrom(LifoAlloc* other)
+{
+ MOZ_ASSERT(!markCount);
+ MOZ_ASSERT(latest == first);
+
+ if (other->markCount || !other->first)
+ return;
+
+ // Transfer all chunks *after* |latest|.
+
+ if (other->latest->next()) {
+ if (other->latest == other->first) {
+ // We're transferring everything except the first chunk.
+ size_t delta = other->curSize_ - other->first->computedSizeOfIncludingThis();
+ other->decrementCurSize(delta);
+ incrementCurSize(delta);
+ } else {
+ for (BumpChunk* chunk = other->latest->next(); chunk; chunk = chunk->next()) {
+ size_t size = chunk->computedSizeOfIncludingThis();
+ incrementCurSize(size);
+ other->decrementCurSize(size);
+ }
+ }
+
+ appendUnused(other->latest->next(), other->last);
+ other->latest->setNext(nullptr);
+ other->last = other->latest;
+ }
+}
diff --git a/js/src/ds/LifoAlloc.h b/js/src/ds/LifoAlloc.h
new file mode 100644
index 000000000..f349cd476
--- /dev/null
+++ b/js/src/ds/LifoAlloc.h
@@ -0,0 +1,635 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef ds_LifoAlloc_h
+#define ds_LifoAlloc_h
+
+#include "mozilla/Attributes.h"
+#include "mozilla/MathAlgorithms.h"
+#include "mozilla/MemoryChecking.h"
+#include "mozilla/MemoryReporting.h"
+#include "mozilla/PodOperations.h"
+#include "mozilla/TemplateLib.h"
+#include "mozilla/TypeTraits.h"
+
+// This data structure supports stacky LIFO allocation (mark/release and
+// LifoAllocScope). It does not maintain one contiguous segment; instead, it
+// maintains a bunch of linked memory segments. In order to prevent malloc/free
+// thrashing, unused segments are deallocated when garbage collection occurs.
+
+#include "jsutil.h"
+
+namespace js {
+
+namespace detail {
+
+static const size_t LIFO_ALLOC_ALIGN = 8;
+
+MOZ_ALWAYS_INLINE
+char*
+AlignPtr(void* orig)
+{
+ static_assert(mozilla::tl::FloorLog2<LIFO_ALLOC_ALIGN>::value ==
+ mozilla::tl::CeilingLog2<LIFO_ALLOC_ALIGN>::value,
+ "LIFO_ALLOC_ALIGN must be a power of two");
+
+ char* result = (char*) ((uintptr_t(orig) + (LIFO_ALLOC_ALIGN - 1)) & (~LIFO_ALLOC_ALIGN + 1));
+ MOZ_ASSERT(uintptr_t(result) % LIFO_ALLOC_ALIGN == 0);
+ return result;
+}
+
+// Header for a chunk of memory wrangled by the LifoAlloc.
+class BumpChunk
+{
+ char* bump; // start of the available data
+ char* limit; // end of the data
+ BumpChunk* next_; // the next BumpChunk
+ size_t bumpSpaceSize; // size of the data area
+
+ char* headerBase() { return reinterpret_cast<char*>(this); }
+ char* bumpBase() const { return limit - bumpSpaceSize; }
+
+ explicit BumpChunk(size_t bumpSpaceSize)
+ : bump(reinterpret_cast<char*>(this) + sizeof(BumpChunk)),
+ limit(bump + bumpSpaceSize),
+ next_(nullptr), bumpSpaceSize(bumpSpaceSize)
+ {
+ MOZ_ASSERT(bump == AlignPtr(bump));
+ }
+
+ void setBump(void* ptr) {
+ MOZ_ASSERT(bumpBase() <= ptr);
+ MOZ_ASSERT(ptr <= limit);
+#if defined(DEBUG) || defined(MOZ_HAVE_MEM_CHECKS)
+ char* prevBump = bump;
+#endif
+ bump = static_cast<char*>(ptr);
+#ifdef DEBUG
+ MOZ_ASSERT(contains(prevBump));
+
+ // Clobber the now-free space.
+ if (prevBump > bump)
+ memset(bump, 0xcd, prevBump - bump);
+#endif
+
+ // Poison/Unpoison memory that we just free'd/allocated.
+#if defined(MOZ_HAVE_MEM_CHECKS)
+ if (prevBump > bump)
+ MOZ_MAKE_MEM_NOACCESS(bump, prevBump - bump);
+ else if (bump > prevBump)
+ MOZ_MAKE_MEM_UNDEFINED(prevBump, bump - prevBump);
+#endif
+ }
+
+ public:
+ BumpChunk* next() const { return next_; }
+ void setNext(BumpChunk* succ) { next_ = succ; }
+
+ size_t used() const { return bump - bumpBase(); }
+
+ void* start() const { return bumpBase(); }
+ void* end() const { return limit; }
+
+ size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) {
+ return mallocSizeOf(this);
+ }
+
+ size_t computedSizeOfIncludingThis() {
+ return limit - headerBase();
+ }
+
+ void resetBump() {
+ setBump(headerBase() + sizeof(BumpChunk));
+ }
+
+ void* mark() const { return bump; }
+
+ void release(void* mark) {
+ MOZ_ASSERT(contains(mark));
+ MOZ_ASSERT(mark <= bump);
+ setBump(mark);
+ }
+
+ bool contains(void* mark) const {
+ return bumpBase() <= mark && mark <= limit;
+ }
+
+ bool canAlloc(size_t n);
+
+ size_t unused() {
+ return limit - AlignPtr(bump);
+ }
+
+ // Try to perform an allocation of size |n|, return null if not possible.
+ MOZ_ALWAYS_INLINE
+ void* tryAlloc(size_t n) {
+ char* aligned = AlignPtr(bump);
+ char* newBump = aligned + n;
+
+ if (newBump > limit)
+ return nullptr;
+
+ // Check for overflow.
+ if (MOZ_UNLIKELY(newBump < bump))
+ return nullptr;
+
+ MOZ_ASSERT(canAlloc(n)); // Ensure consistency between "can" and "try".
+ setBump(newBump);
+ return aligned;
+ }
+
+ static BumpChunk* new_(size_t chunkSize);
+ static void delete_(BumpChunk* chunk);
+};
+
+} // namespace detail
+
+// LIFO bump allocator: used for phase-oriented and fast LIFO allocations.
+//
+// Note: |latest| is not necessary "last". We leave BumpChunks latent in the
+// chain after they've been released to avoid thrashing before a GC.
+class LifoAlloc
+{
+ typedef detail::BumpChunk BumpChunk;
+
+ BumpChunk* first;
+ BumpChunk* latest;
+ BumpChunk* last;
+ size_t markCount;
+ size_t defaultChunkSize_;
+ size_t curSize_;
+ size_t peakSize_;
+#if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
+ bool fallibleScope_;
+#endif
+
+ void operator=(const LifoAlloc&) = delete;
+ LifoAlloc(const LifoAlloc&) = delete;
+
+ // Return a BumpChunk that can perform an allocation of at least size |n|
+ // and add it to the chain appropriately.
+ //
+ // Side effect: if retval is non-null, |first| and |latest| are initialized
+ // appropriately.
+ BumpChunk* getOrCreateChunk(size_t n);
+
+ void reset(size_t defaultChunkSize) {
+ MOZ_ASSERT(mozilla::RoundUpPow2(defaultChunkSize) == defaultChunkSize);
+ first = latest = last = nullptr;
+ defaultChunkSize_ = defaultChunkSize;
+ markCount = 0;
+ curSize_ = 0;
+ }
+
+ // Append unused chunks to the end of this LifoAlloc.
+ void appendUnused(BumpChunk* start, BumpChunk* end) {
+ MOZ_ASSERT(start && end);
+ if (last)
+ last->setNext(start);
+ else
+ first = latest = start;
+ last = end;
+ }
+
+ // Append used chunks to the end of this LifoAlloc. We act as if all the
+ // chunks in |this| are used, even if they're not, so memory may be wasted.
+ void appendUsed(BumpChunk* otherFirst, BumpChunk* otherLatest, BumpChunk* otherLast) {
+ MOZ_ASSERT(otherFirst && otherLatest && otherLast);
+ if (last)
+ last->setNext(otherFirst);
+ else
+ first = otherFirst;
+ latest = otherLatest;
+ last = otherLast;
+ }
+
+ void incrementCurSize(size_t size) {
+ curSize_ += size;
+ if (curSize_ > peakSize_)
+ peakSize_ = curSize_;
+ }
+ void decrementCurSize(size_t size) {
+ MOZ_ASSERT(curSize_ >= size);
+ curSize_ -= size;
+ }
+
+ MOZ_ALWAYS_INLINE
+ void* allocImpl(size_t n) {
+ void* result;
+ if (latest && (result = latest->tryAlloc(n)))
+ return result;
+
+ if (!getOrCreateChunk(n))
+ return nullptr;
+
+ // Since we just created a large enough chunk, this can't fail.
+ result = latest->tryAlloc(n);
+ MOZ_ASSERT(result);
+ return result;
+ }
+
+ public:
+ explicit LifoAlloc(size_t defaultChunkSize)
+ : peakSize_(0)
+#if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
+ , fallibleScope_(true)
+#endif
+ {
+ reset(defaultChunkSize);
+ }
+
+ // Steal allocated chunks from |other|.
+ void steal(LifoAlloc* other) {
+ MOZ_ASSERT(!other->markCount);
+ MOZ_ASSERT(!latest);
+
+ // Copy everything from |other| to |this| except for |peakSize_|, which
+ // requires some care.
+ size_t oldPeakSize = peakSize_;
+ mozilla::PodAssign(this, other);
+ peakSize_ = Max(oldPeakSize, curSize_);
+
+ other->reset(defaultChunkSize_);
+ }
+
+ // Append all chunks from |other|. They are removed from |other|.
+ void transferFrom(LifoAlloc* other);
+
+ // Append unused chunks from |other|. They are removed from |other|.
+ void transferUnusedFrom(LifoAlloc* other);
+
+ ~LifoAlloc() { freeAll(); }
+
+ size_t defaultChunkSize() const { return defaultChunkSize_; }
+
+ // Frees all held memory.
+ void freeAll();
+
+ static const unsigned HUGE_ALLOCATION = 50 * 1024 * 1024;
+ void freeAllIfHugeAndUnused() {
+ if (markCount == 0 && curSize_ > HUGE_ALLOCATION)
+ freeAll();
+ }
+
+ MOZ_ALWAYS_INLINE
+ void* alloc(size_t n) {
+#if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
+ // Only simulate OOMs when we are not using the LifoAlloc as an
+ // infallible allocator.
+ if (fallibleScope_)
+ JS_OOM_POSSIBLY_FAIL();
+#endif
+ return allocImpl(n);
+ }
+
+ MOZ_ALWAYS_INLINE
+ void* allocInfallible(size_t n) {
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ if (void* result = allocImpl(n))
+ return result;
+ oomUnsafe.crash("LifoAlloc::allocInfallible");
+ return nullptr;
+ }
+
+ // Ensures that enough space exists to satisfy N bytes worth of
+ // allocation requests, not necessarily contiguous. Note that this does
+ // not guarantee a successful single allocation of N bytes.
+ MOZ_ALWAYS_INLINE
+ MOZ_MUST_USE bool ensureUnusedApproximate(size_t n) {
+ AutoFallibleScope fallibleAllocator(this);
+ size_t total = 0;
+ for (BumpChunk* chunk = latest; chunk; chunk = chunk->next()) {
+ total += chunk->unused();
+ if (total >= n)
+ return true;
+ }
+ BumpChunk* latestBefore = latest;
+ if (!getOrCreateChunk(n))
+ return false;
+ if (latestBefore)
+ latest = latestBefore;
+ return true;
+ }
+
+ MOZ_ALWAYS_INLINE
+ void setAsInfallibleByDefault() {
+#if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
+ fallibleScope_ = false;
+#endif
+ }
+
+ class MOZ_NON_TEMPORARY_CLASS AutoFallibleScope {
+#if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
+ LifoAlloc* lifoAlloc_;
+ bool prevFallibleScope_;
+ MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER
+
+ public:
+ explicit AutoFallibleScope(LifoAlloc* lifoAlloc MOZ_GUARD_OBJECT_NOTIFIER_PARAM) {
+ MOZ_GUARD_OBJECT_NOTIFIER_INIT;
+ lifoAlloc_ = lifoAlloc;
+ prevFallibleScope_ = lifoAlloc->fallibleScope_;
+ lifoAlloc->fallibleScope_ = true;
+ }
+
+ ~AutoFallibleScope() {
+ lifoAlloc_->fallibleScope_ = prevFallibleScope_;
+ }
+#else
+ public:
+ explicit AutoFallibleScope(LifoAlloc*) {}
+#endif
+ };
+
+ template <typename T>
+ T* newArray(size_t count) {
+ static_assert(mozilla::IsPod<T>::value,
+ "T must be POD so that constructors (and destructors, "
+ "when the LifoAlloc is freed) need not be called");
+ return newArrayUninitialized<T>(count);
+ }
+
+ // Create an array with uninitialized elements of type |T|.
+ // The caller is responsible for initialization.
+ template <typename T>
+ T* newArrayUninitialized(size_t count) {
+ size_t bytes;
+ if (MOZ_UNLIKELY(!CalculateAllocSize<T>(count, &bytes)))
+ return nullptr;
+ return static_cast<T*>(alloc(bytes));
+ }
+
+ class Mark {
+ BumpChunk* chunk;
+ void* markInChunk;
+ friend class LifoAlloc;
+ Mark(BumpChunk* chunk, void* markInChunk) : chunk(chunk), markInChunk(markInChunk) {}
+ public:
+ Mark() : chunk(nullptr), markInChunk(nullptr) {}
+ };
+
+ Mark mark() {
+ markCount++;
+ return latest ? Mark(latest, latest->mark()) : Mark();
+ }
+
+ void release(Mark mark) {
+ markCount--;
+ if (!mark.chunk) {
+ latest = first;
+ if (latest)
+ latest->resetBump();
+ } else {
+ latest = mark.chunk;
+ latest->release(mark.markInChunk);
+ }
+ }
+
+ void releaseAll() {
+ MOZ_ASSERT(!markCount);
+ latest = first;
+ if (latest)
+ latest->resetBump();
+ }
+
+ // Get the total "used" (occupied bytes) count for the arena chunks.
+ size_t used() const {
+ size_t accum = 0;
+ for (BumpChunk* chunk = first; chunk; chunk = chunk->next()) {
+ accum += chunk->used();
+ if (chunk == latest)
+ break;
+ }
+ return accum;
+ }
+
+ // Return true if the LifoAlloc does not currently contain any allocations.
+ bool isEmpty() const {
+ return !latest || !latest->used();
+ }
+
+ // Return the number of bytes remaining to allocate in the current chunk.
+ // e.g. How many bytes we can allocate before needing a new block.
+ size_t availableInCurrentChunk() const {
+ if (!latest)
+ return 0;
+ return latest->unused();
+ }
+
+ // Get the total size of the arena chunks (including unused space).
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
+ size_t n = 0;
+ for (BumpChunk* chunk = first; chunk; chunk = chunk->next())
+ n += chunk->sizeOfIncludingThis(mallocSizeOf);
+ return n;
+ }
+
+ // Get the total size of the arena chunks (including unused space).
+ size_t computedSizeOfExcludingThis() const {
+ size_t n = 0;
+ for (BumpChunk* chunk = first; chunk; chunk = chunk->next())
+ n += chunk->computedSizeOfIncludingThis();
+ return n;
+ }
+
+ // Like sizeOfExcludingThis(), but includes the size of the LifoAlloc itself.
+ size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
+ return mallocSizeOf(this) + sizeOfExcludingThis(mallocSizeOf);
+ }
+
+ // Get the peak size of the arena chunks (including unused space and
+ // bookkeeping space).
+ size_t peakSizeOfExcludingThis() const { return peakSize_; }
+
+ // Doesn't perform construction; useful for lazily-initialized POD types.
+ template <typename T>
+ MOZ_ALWAYS_INLINE
+ T* pod_malloc() {
+ return static_cast<T*>(alloc(sizeof(T)));
+ }
+
+ JS_DECLARE_NEW_METHODS(new_, alloc, MOZ_ALWAYS_INLINE)
+ JS_DECLARE_NEW_METHODS(newInfallible, allocInfallible, MOZ_ALWAYS_INLINE)
+
+#ifdef DEBUG
+ bool contains(void* ptr) const {
+ for (BumpChunk* chunk = first; chunk; chunk = chunk->next()) {
+ if (chunk->contains(ptr))
+ return true;
+ }
+ return false;
+ }
+#endif
+
+ // A mutable enumeration of the allocated data.
+ class Enum
+ {
+ friend class LifoAlloc;
+ friend class detail::BumpChunk;
+
+ LifoAlloc* alloc_; // The LifoAlloc being traversed.
+ BumpChunk* chunk_; // The current chunk.
+ char* position_; // The current position (must be within chunk_).
+
+ // If there is not enough room in the remaining block for |size|,
+ // advance to the next block and update the position.
+ void ensureSpaceAndAlignment(size_t size) {
+ MOZ_ASSERT(!empty());
+ char* aligned = detail::AlignPtr(position_);
+ if (aligned + size > chunk_->end()) {
+ chunk_ = chunk_->next();
+ position_ = static_cast<char*>(chunk_->start());
+ } else {
+ position_ = aligned;
+ }
+ MOZ_ASSERT(uintptr_t(position_) + size <= uintptr_t(chunk_->end()));
+ }
+
+ public:
+ explicit Enum(LifoAlloc& alloc)
+ : alloc_(&alloc),
+ chunk_(alloc.first),
+ position_(static_cast<char*>(alloc.first ? alloc.first->start() : nullptr))
+ {}
+
+ // Return true if there are no more bytes to enumerate.
+ bool empty() {
+ return !chunk_ || (chunk_ == alloc_->latest && position_ >= chunk_->mark());
+ }
+
+ // Move the read position forward by the size of one T.
+ template <typename T>
+ void popFront() {
+ popFront(sizeof(T));
+ }
+
+ // Move the read position forward by |size| bytes.
+ void popFront(size_t size) {
+ ensureSpaceAndAlignment(size);
+ position_ = position_ + size;
+ }
+
+ // Update the bytes at the current position with a new value.
+ template <typename T>
+ void updateFront(const T& t) {
+ ensureSpaceAndAlignment(sizeof(T));
+ memmove(position_, &t, sizeof(T));
+ }
+
+ // Return a pointer to the item at the current position. This
+ // returns a pointer to the inline storage, not a copy.
+ template <typename T>
+ T* get(size_t size = sizeof(T)) {
+ ensureSpaceAndAlignment(size);
+ return reinterpret_cast<T*>(position_);
+ }
+
+ // Return a Mark at the current position of the Enum.
+ Mark mark() {
+ alloc_->markCount++;
+ return Mark(chunk_, position_);
+ }
+ };
+};
+
+class MOZ_NON_TEMPORARY_CLASS LifoAllocScope
+{
+ LifoAlloc* lifoAlloc;
+ LifoAlloc::Mark mark;
+ LifoAlloc::AutoFallibleScope fallibleScope;
+ bool shouldRelease;
+ MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER
+
+ public:
+ explicit LifoAllocScope(LifoAlloc* lifoAlloc
+ MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
+ : lifoAlloc(lifoAlloc),
+ mark(lifoAlloc->mark()),
+ fallibleScope(lifoAlloc),
+ shouldRelease(true)
+ {
+ MOZ_GUARD_OBJECT_NOTIFIER_INIT;
+ }
+
+ ~LifoAllocScope() {
+ if (shouldRelease)
+ lifoAlloc->release(mark);
+ }
+
+ LifoAlloc& alloc() {
+ return *lifoAlloc;
+ }
+
+ void releaseEarly() {
+ MOZ_ASSERT(shouldRelease);
+ lifoAlloc->release(mark);
+ shouldRelease = false;
+ }
+};
+
+enum Fallibility {
+ Fallible,
+ Infallible
+};
+
+template <Fallibility fb>
+class LifoAllocPolicy
+{
+ LifoAlloc& alloc_;
+
+ public:
+ MOZ_IMPLICIT LifoAllocPolicy(LifoAlloc& alloc)
+ : alloc_(alloc)
+ {}
+ template <typename T>
+ T* maybe_pod_malloc(size_t numElems) {
+ size_t bytes;
+ if (MOZ_UNLIKELY(!CalculateAllocSize<T>(numElems, &bytes)))
+ return nullptr;
+ void* p = fb == Fallible ? alloc_.alloc(bytes) : alloc_.allocInfallible(bytes);
+ return static_cast<T*>(p);
+ }
+ template <typename T>
+ T* maybe_pod_calloc(size_t numElems) {
+ T* p = maybe_pod_malloc<T>(numElems);
+ if (MOZ_UNLIKELY(!p))
+ return nullptr;
+ memset(p, 0, numElems * sizeof(T));
+ return p;
+ }
+ template <typename T>
+ T* maybe_pod_realloc(T* p, size_t oldSize, size_t newSize) {
+ T* n = maybe_pod_malloc<T>(newSize);
+ if (MOZ_UNLIKELY(!n))
+ return nullptr;
+ MOZ_ASSERT(!(oldSize & mozilla::tl::MulOverflowMask<sizeof(T)>::value));
+ memcpy(n, p, Min(oldSize * sizeof(T), newSize * sizeof(T)));
+ return n;
+ }
+ template <typename T>
+ T* pod_malloc(size_t numElems) {
+ return maybe_pod_malloc<T>(numElems);
+ }
+ template <typename T>
+ T* pod_calloc(size_t numElems) {
+ return maybe_pod_calloc<T>(numElems);
+ }
+ template <typename T>
+ T* pod_realloc(T* p, size_t oldSize, size_t newSize) {
+ return maybe_pod_realloc<T>(p, oldSize, newSize);
+ }
+ void free_(void* p) {
+ }
+ void reportAllocOverflow() const {
+ }
+ MOZ_MUST_USE bool checkSimulatedOOM() const {
+ return fb == Infallible || !js::oom::ShouldFailWithOOM();
+ }
+};
+
+} // namespace js
+
+#endif /* ds_LifoAlloc_h */
diff --git a/js/src/ds/MemoryProtectionExceptionHandler.cpp b/js/src/ds/MemoryProtectionExceptionHandler.cpp
new file mode 100644
index 000000000..7c6646a9a
--- /dev/null
+++ b/js/src/ds/MemoryProtectionExceptionHandler.cpp
@@ -0,0 +1,760 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "ds/MemoryProtectionExceptionHandler.h"
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Atomics.h"
+
+#if defined(XP_WIN)
+# include "jswin.h"
+#elif defined(XP_UNIX) && !defined(XP_DARWIN)
+# include <signal.h>
+# include <sys/types.h>
+# include <unistd.h>
+#elif defined(XP_DARWIN)
+# include <mach/mach.h>
+# include <unistd.h>
+#endif
+
+#ifdef ANDROID
+# include <android/log.h>
+#endif
+
+#include "ds/SplayTree.h"
+
+#include "threading/LockGuard.h"
+#include "threading/Thread.h"
+#include "vm/MutexIDs.h"
+
+namespace js {
+
+/*
+ * A class to store the addresses of the regions recognized as protected
+ * by this exception handler. We use a splay tree to store these addresses.
+ */
+class ProtectedRegionTree
+{
+ struct Region
+ {
+ uintptr_t first;
+ uintptr_t last;
+
+ Region(uintptr_t addr, size_t size) : first(addr),
+ last(addr + (size - 1)) {}
+
+ static int compare(const Region& A, const Region& B) {
+ if (A.last < B.first)
+ return -1;
+ if (A.first > B.last)
+ return 1;
+ return 0;
+ }
+ };
+
+ Mutex lock;
+ LifoAlloc alloc;
+ SplayTree<Region, Region> tree;
+
+ public:
+ ProtectedRegionTree() : lock(mutexid::ProtectedRegionTree),
+ alloc(4096),
+ tree(&alloc) {}
+
+ ~ProtectedRegionTree() { MOZ_ASSERT(tree.empty()); }
+
+ void insert(uintptr_t addr, size_t size) {
+ MOZ_ASSERT(addr && size);
+ LockGuard<Mutex> guard(lock);
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ if (!tree.insert(Region(addr, size)))
+ oomUnsafe.crash("Failed to store allocation ID.");
+ }
+
+ void remove(uintptr_t addr) {
+ MOZ_ASSERT(addr);
+ LockGuard<Mutex> guard(lock);
+ tree.remove(Region(addr, 1));
+ }
+
+ bool isProtected(uintptr_t addr) {
+ if (!addr)
+ return false;
+ LockGuard<Mutex> guard(lock);
+ return tree.maybeLookup(Region(addr, 1));
+ }
+};
+
+static bool sExceptionHandlerInstalled = false;
+
+static ProtectedRegionTree sProtectedRegions;
+
+bool
+MemoryProtectionExceptionHandler::isDisabled()
+{
+ // Disabled everywhere for this release.
+ return true;
+}
+
+void
+MemoryProtectionExceptionHandler::addRegion(void* addr, size_t size)
+{
+ if (sExceptionHandlerInstalled)
+ sProtectedRegions.insert(uintptr_t(addr), size);
+}
+
+void
+MemoryProtectionExceptionHandler::removeRegion(void* addr)
+{
+ if (sExceptionHandlerInstalled)
+ sProtectedRegions.remove(uintptr_t(addr));
+}
+
+/* -------------------------------------------------------------------------- */
+
+/*
+ * This helper function attempts to replicate the functionality of
+ * mozilla::MOZ_ReportCrash() in an async-signal-safe way.
+ */
+static MOZ_COLD MOZ_ALWAYS_INLINE void
+ReportCrashIfDebug(const char* aStr)
+ MOZ_PRETEND_NORETURN_FOR_STATIC_ANALYSIS
+{
+#ifdef DEBUG
+# if defined(XP_WIN)
+ DWORD bytesWritten;
+ BOOL ret = WriteFile(GetStdHandle(STD_ERROR_HANDLE), aStr,
+ strlen(aStr) + 1, &bytesWritten, nullptr);
+ ::__debugbreak();
+# elif defined(ANDROID)
+ int ret = __android_log_write(ANDROID_LOG_FATAL, "MOZ_CRASH", aStr);
+# else
+ ssize_t ret = write(STDERR_FILENO, aStr, strlen(aStr) + 1);
+# endif
+ (void)ret; // Ignore failures; we're already crashing anyway.
+#endif
+}
+
+/* -------------------------------------------------------------------------- */
+
+#if defined(XP_WIN)
+
+static void* sVectoredExceptionHandler = nullptr;
+
+/*
+ * We can only handle one exception. To guard against races and reentrancy,
+ * we set this value the first time we enter the exception handler and never
+ * touch it again.
+ */
+static mozilla::Atomic<bool> sHandlingException(false);
+
+static long __stdcall
+VectoredExceptionHandler(EXCEPTION_POINTERS* ExceptionInfo)
+{
+ EXCEPTION_RECORD* ExceptionRecord = ExceptionInfo->ExceptionRecord;
+
+ // We only handle one kind of exception; ignore all others.
+ if (ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION) {
+ // Make absolutely sure we can only get here once.
+ if (sHandlingException.compareExchange(false, true)) {
+ // Restore the previous handler. We're going to forward to it
+ // anyway, and if we crash while doing so we don't want to hang.
+ MOZ_ALWAYS_TRUE(RemoveVectoredExceptionHandler(sVectoredExceptionHandler));
+
+ // Get the address that the offending code tried to access.
+ uintptr_t address = uintptr_t(ExceptionRecord->ExceptionInformation[1]);
+
+ // If the faulting address is in one of our protected regions, we
+ // want to annotate the crash to make it stand out from the crowd.
+ if (sProtectedRegions.isProtected(address)) {
+ ReportCrashIfDebug("Hit MOZ_CRASH(Tried to access a protected region!)\n");
+ MOZ_CRASH_ANNOTATE("MOZ_CRASH(Tried to access a protected region!)");
+ }
+ }
+ }
+
+ // Forward to the previous handler which may be a debugger,
+ // the crash reporter or something else entirely.
+ return EXCEPTION_CONTINUE_SEARCH;
+}
+
+bool
+MemoryProtectionExceptionHandler::install()
+{
+ MOZ_ASSERT(!sExceptionHandlerInstalled);
+
+ // If the exception handler is disabled, report success anyway.
+ if (MemoryProtectionExceptionHandler::isDisabled())
+ return true;
+
+ // Install our new exception handler.
+ sVectoredExceptionHandler = AddVectoredExceptionHandler(/* FirstHandler = */ true,
+ VectoredExceptionHandler);
+
+ sExceptionHandlerInstalled = sVectoredExceptionHandler != nullptr;
+ return sExceptionHandlerInstalled;
+}
+
+void
+MemoryProtectionExceptionHandler::uninstall()
+{
+ if (sExceptionHandlerInstalled) {
+ MOZ_ASSERT(!sHandlingException);
+
+ // Restore the previous exception handler.
+ MOZ_ALWAYS_TRUE(RemoveVectoredExceptionHandler(sVectoredExceptionHandler));
+
+ sExceptionHandlerInstalled = false;
+ }
+}
+
+#elif defined(XP_UNIX) && !defined(XP_DARWIN)
+
+static struct sigaction sPrevSEGVHandler = {};
+
+/*
+ * We can only handle one exception. To guard against races and reentrancy,
+ * we set this value the first time we enter the exception handler and never
+ * touch it again.
+ */
+static mozilla::Atomic<bool> sHandlingException(false);
+
+static void
+UnixExceptionHandler(int signum, siginfo_t* info, void* context)
+{
+ // Make absolutely sure we can only get here once.
+ if (sHandlingException.compareExchange(false, true)) {
+ // Restore the previous handler. We're going to forward to it
+ // anyway, and if we crash while doing so we don't want to hang.
+ MOZ_ALWAYS_FALSE(sigaction(SIGSEGV, &sPrevSEGVHandler, nullptr));
+
+ MOZ_ASSERT(signum == SIGSEGV && info->si_signo == SIGSEGV);
+
+ if (info->si_code == SEGV_ACCERR) {
+ // Get the address that the offending code tried to access.
+ uintptr_t address = uintptr_t(info->si_addr);
+
+ // If the faulting address is in one of our protected regions, we
+ // want to annotate the crash to make it stand out from the crowd.
+ if (sProtectedRegions.isProtected(address)) {
+ ReportCrashIfDebug("Hit MOZ_CRASH(Tried to access a protected region!)\n");
+ MOZ_CRASH_ANNOTATE("MOZ_CRASH(Tried to access a protected region!)");
+ }
+ }
+ }
+
+ // Forward to the previous handler which may be a debugger,
+ // the crash reporter or something else entirely.
+ if (sPrevSEGVHandler.sa_flags & SA_SIGINFO)
+ sPrevSEGVHandler.sa_sigaction(signum, info, context);
+ else if (sPrevSEGVHandler.sa_handler == SIG_DFL || sPrevSEGVHandler.sa_handler == SIG_IGN)
+ sigaction(SIGSEGV, &sPrevSEGVHandler, nullptr);
+ else
+ sPrevSEGVHandler.sa_handler(signum);
+
+ // If we reach here, we're returning to let the default signal handler deal
+ // with the exception. This is technically undefined behavior, but
+ // everything seems to do it, and it removes us from the crash stack.
+}
+
+bool
+MemoryProtectionExceptionHandler::install()
+{
+ MOZ_ASSERT(!sExceptionHandlerInstalled);
+
+ // If the exception handler is disabled, report success anyway.
+ if (MemoryProtectionExceptionHandler::isDisabled())
+ return true;
+
+ // Install our new exception handler and save the previous one.
+ struct sigaction faultHandler = {};
+ faultHandler.sa_flags = SA_SIGINFO | SA_NODEFER;
+ faultHandler.sa_sigaction = UnixExceptionHandler;
+ sigemptyset(&faultHandler.sa_mask);
+ sExceptionHandlerInstalled = !sigaction(SIGSEGV, &faultHandler, &sPrevSEGVHandler);
+
+ return sExceptionHandlerInstalled;
+}
+
+void
+MemoryProtectionExceptionHandler::uninstall()
+{
+ if (sExceptionHandlerInstalled) {
+ MOZ_ASSERT(!sHandlingException);
+
+ // Restore the previous exception handler.
+ MOZ_ALWAYS_FALSE(sigaction(SIGSEGV, &sPrevSEGVHandler, nullptr));
+
+ sExceptionHandlerInstalled = false;
+ }
+}
+
+#elif defined(XP_DARWIN)
+
+/*
+ * The fact that we need to be able to forward to other exception handlers
+ * makes this code much more complicated. The forwarding logic and the
+ * structures required to make it work are heavily based on the code at
+ * www.ravenbrook.com/project/mps/prototype/2013-06-24/machtest/machtest/main.c
+ */
+
+/* -------------------------------------------------------------------------- */
+/* Begin Mach definitions and helper functions */
+/* -------------------------------------------------------------------------- */
+
+/*
+ * These are the message IDs associated with each exception type.
+ * We'll be using sIDRequest64, but we need the others for forwarding.
+ */
+static const mach_msg_id_t sIDRequest32 = 2401;
+static const mach_msg_id_t sIDRequestState32 = 2402;
+static const mach_msg_id_t sIDRequestStateIdentity32 = 2403;
+
+static const mach_msg_id_t sIDRequest64 = 2405;
+static const mach_msg_id_t sIDRequestState64 = 2406;
+static const mach_msg_id_t sIDRequestStateIdentity64 = 2407;
+
+/*
+ * Each message ID has an associated Mach message structure.
+ * We use the preprocessor to make defining them a little less arduous.
+ */
+# define REQUEST_HEADER_FIELDS\
+ mach_msg_header_t header;
+
+# define REQUEST_IDENTITY_FIELDS\
+ mach_msg_body_t msgh_body;\
+ mach_msg_port_descriptor_t thread;\
+ mach_msg_port_descriptor_t task;
+
+# define REQUEST_GENERAL_FIELDS(bits)\
+ NDR_record_t NDR;\
+ exception_type_t exception;\
+ mach_msg_type_number_t code_count;\
+ int##bits##_t code[2];
+
+# define REQUEST_STATE_FIELDS\
+ int flavor;\
+ mach_msg_type_number_t old_state_count;\
+ natural_t old_state[THREAD_STATE_MAX];
+
+# define REQUEST_TRAILER_FIELDS\
+ mach_msg_trailer_t trailer;
+
+# define EXCEPTION_REQUEST(bits)\
+ struct ExceptionRequest##bits\
+ {\
+ REQUEST_HEADER_FIELDS\
+ REQUEST_IDENTITY_FIELDS\
+ REQUEST_GENERAL_FIELDS(bits)\
+ REQUEST_TRAILER_FIELDS\
+ };\
+
+# define EXCEPTION_REQUEST_STATE(bits)\
+ struct ExceptionRequestState##bits\
+ {\
+ REQUEST_HEADER_FIELDS\
+ REQUEST_GENERAL_FIELDS(bits)\
+ REQUEST_STATE_FIELDS\
+ REQUEST_TRAILER_FIELDS\
+ };\
+
+# define EXCEPTION_REQUEST_STATE_IDENTITY(bits)\
+ struct ExceptionRequestStateIdentity##bits\
+ {\
+ REQUEST_HEADER_FIELDS\
+ REQUEST_IDENTITY_FIELDS\
+ REQUEST_GENERAL_FIELDS(bits)\
+ REQUEST_STATE_FIELDS\
+ REQUEST_TRAILER_FIELDS\
+ };\
+
+/* This is needed because not all fields are naturally aligned on 64-bit. */
+# ifdef __MigPackStructs
+# pragma pack(4)
+# endif
+
+EXCEPTION_REQUEST(32)
+EXCEPTION_REQUEST(64)
+EXCEPTION_REQUEST_STATE(32)
+EXCEPTION_REQUEST_STATE(64)
+EXCEPTION_REQUEST_STATE_IDENTITY(32)
+EXCEPTION_REQUEST_STATE_IDENTITY(64)
+
+/* We use this as a common base when forwarding to the previous handler. */
+union ExceptionRequestUnion {
+ mach_msg_header_t header;
+ ExceptionRequest32 r32;
+ ExceptionRequest64 r64;
+ ExceptionRequestState32 rs32;
+ ExceptionRequestState64 rs64;
+ ExceptionRequestStateIdentity32 rsi32;
+ ExceptionRequestStateIdentity64 rsi64;
+};
+
+/* This isn't really a full Mach message, but it's all we need to send. */
+struct ExceptionReply
+{
+ mach_msg_header_t header;
+ NDR_record_t NDR;
+ kern_return_t RetCode;
+};
+
+# ifdef __MigPackStructs
+# pragma pack()
+# endif
+
+# undef EXCEPTION_REQUEST_STATE_IDENTITY
+# undef EXCEPTION_REQUEST_STATE
+# undef EXCEPTION_REQUEST
+# undef REQUEST_STATE_FIELDS
+# undef REQUEST_GENERAL_FIELDS
+# undef REQUEST_IDENTITY_FIELDS
+# undef REQUEST_HEADER_FIELDS
+
+/*
+ * The exception handler we're forwarding to may not have the same behavior
+ * or thread state flavor as what we're using. These macros help populate
+ * the fields of the message we're about to send to the previous handler.
+ */
+# define COPY_REQUEST_COMMON(bits, id)\
+ dst.header = src.header;\
+ dst.header.msgh_id = id;\
+ dst.header.msgh_size = static_cast<mach_msg_size_t>(sizeof(dst) - sizeof(dst.trailer));\
+ dst.NDR = src.NDR;\
+ dst.exception = src.exception;\
+ dst.code_count = src.code_count;\
+ dst.code[0] = int##bits##_t(src.code[0]);\
+ dst.code[1] = int##bits##_t(src.code[1]);
+
+# define COPY_REQUEST_IDENTITY\
+ dst.msgh_body = src.msgh_body;\
+ dst.thread = src.thread;\
+ dst.task = src.task;
+
+# define COPY_REQUEST_STATE(flavor, stateCount, state)\
+ mach_msg_size_t stateSize = stateCount * sizeof(natural_t);\
+ dst.header.msgh_size = static_cast<mach_msg_size_t>(sizeof(dst) - sizeof(dst.trailer) -\
+ sizeof(dst.old_state) + stateSize);\
+ dst.flavor = flavor;\
+ dst.old_state_count = stateCount;\
+ memcpy(dst.old_state, state, stateSize);
+
+# define COPY_EXCEPTION_REQUEST(bits)\
+ static void\
+ CopyExceptionRequest##bits(ExceptionRequest64& src,\
+ ExceptionRequest##bits& dst)\
+ {\
+ COPY_REQUEST_COMMON(bits, sIDRequest##bits)\
+ COPY_REQUEST_IDENTITY\
+ }
+
+# define COPY_EXCEPTION_REQUEST_STATE(bits)\
+ static void\
+ CopyExceptionRequestState##bits(ExceptionRequest64& src,\
+ ExceptionRequestState##bits& dst,\
+ thread_state_flavor_t flavor,\
+ mach_msg_type_number_t stateCount,\
+ thread_state_t state)\
+ {\
+ COPY_REQUEST_COMMON(bits, sIDRequestState##bits)\
+ COPY_REQUEST_STATE(flavor, stateCount, state)\
+ }
+
+# define COPY_EXCEPTION_REQUEST_STATE_IDENTITY(bits)\
+ static void\
+ CopyExceptionRequestStateIdentity##bits(ExceptionRequest64& src,\
+ ExceptionRequestStateIdentity##bits& dst,\
+ thread_state_flavor_t flavor,\
+ mach_msg_type_number_t stateCount,\
+ thread_state_t state)\
+ {\
+ COPY_REQUEST_COMMON(bits, sIDRequestStateIdentity##bits)\
+ COPY_REQUEST_IDENTITY\
+ COPY_REQUEST_STATE(flavor, stateCount, state)\
+ }
+
+COPY_EXCEPTION_REQUEST(32)
+COPY_EXCEPTION_REQUEST_STATE(32)
+COPY_EXCEPTION_REQUEST_STATE_IDENTITY(32)
+COPY_EXCEPTION_REQUEST(64)
+COPY_EXCEPTION_REQUEST_STATE(64)
+COPY_EXCEPTION_REQUEST_STATE_IDENTITY(64)
+
+# undef COPY_EXCEPTION_REQUEST_STATE_IDENTITY
+# undef COPY_EXCEPTION_REQUEST_STATE
+# undef COPY_EXCEPTION_REQUEST
+# undef COPY_REQUEST_STATE
+# undef COPY_REQUEST_IDENTITY
+# undef COPY_REQUEST_COMMON
+
+/* -------------------------------------------------------------------------- */
+/* End Mach definitions and helper functions */
+/* -------------------------------------------------------------------------- */
+
+/* Every Mach exception handler is parameterized by these four properties. */
+struct MachExceptionParameters
+{
+ exception_mask_t mask;
+ mach_port_t port;
+ exception_behavior_t behavior;
+ thread_state_flavor_t flavor;
+};
+
+struct ExceptionHandlerState
+{
+ MachExceptionParameters current;
+ MachExceptionParameters previous;
+
+ /* Each Mach exception handler runs in its own thread. */
+ Thread handlerThread;
+};
+
+/* This choice of ID is arbitrary, but must not match our exception ID. */
+static const mach_msg_id_t sIDQuit = 42;
+
+static ExceptionHandlerState sMachExceptionState;
+
+/*
+ * The meat of our exception handler. This thread waits for an exception
+ * message, annotates the exception if needed, then forwards it to the
+ * previously installed handler (which will likely terminate the process).
+ */
+static void
+MachExceptionHandler()
+{
+ kern_return_t ret;
+ MachExceptionParameters& current = sMachExceptionState.current;
+ MachExceptionParameters& previous = sMachExceptionState.previous;
+
+ // We use the simplest kind of 64-bit exception message here.
+ ExceptionRequest64 request = {};
+ request.header.msgh_local_port = current.port;
+ request.header.msgh_size = static_cast<mach_msg_size_t>(sizeof(request));
+ ret = mach_msg(&request.header, MACH_RCV_MSG, 0, request.header.msgh_size,
+ current.port, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
+
+ // Restore the previous handler. We're going to forward to it
+ // anyway, and if we crash while doing so we don't want to hang.
+ task_set_exception_ports(mach_task_self(), previous.mask, previous.port,
+ previous.behavior, previous.flavor);
+
+ // If we failed even receiving the message, just give up.
+ if (ret != MACH_MSG_SUCCESS)
+ MOZ_CRASH("MachExceptionHandler: mach_msg failed to receive a message!");
+
+ // Terminate the thread if we're shutting down.
+ if (request.header.msgh_id == sIDQuit)
+ return;
+
+ // The only other valid message ID is the one associated with the
+ // EXCEPTION_DEFAULT | MACH_EXCEPTION_CODES behavior we chose.
+ if (request.header.msgh_id != sIDRequest64)
+ MOZ_CRASH("MachExceptionHandler: Unexpected Message ID!");
+
+ // Make sure we can understand the exception we received.
+ if (request.exception != EXC_BAD_ACCESS || request.code_count != 2)
+ MOZ_CRASH("MachExceptionHandler: Unexpected exception type!");
+
+ // Get the address that the offending code tried to access.
+ uintptr_t address = uintptr_t(request.code[1]);
+
+ // If the faulting address is inside one of our protected regions, we
+ // want to annotate the crash to make it stand out from the crowd.
+ if (sProtectedRegions.isProtected(address)) {
+ ReportCrashIfDebug("Hit MOZ_CRASH(Tried to access a protected region!)\n");
+ MOZ_CRASH_ANNOTATE("MOZ_CRASH(Tried to access a protected region!)");
+ }
+
+ // Forward to the previous handler which may be a debugger, the unix
+ // signal handler, the crash reporter or something else entirely.
+ if (previous.port != MACH_PORT_NULL) {
+ mach_msg_type_number_t stateCount;
+ thread_state_data_t state;
+ if ((uint32_t(previous.behavior) & ~MACH_EXCEPTION_CODES) != EXCEPTION_DEFAULT) {
+ // If the previous handler requested thread state, get it here.
+ stateCount = THREAD_STATE_MAX;
+ ret = thread_get_state(request.thread.name, previous.flavor, state, &stateCount);
+ if (ret != KERN_SUCCESS)
+ MOZ_CRASH("MachExceptionHandler: Could not get the thread state to forward!");
+ }
+
+ // Depending on the behavior of the previous handler, the forwarded
+ // exception message will have a different set of fields.
+ // Of particular note is that exception handlers that lack
+ // MACH_EXCEPTION_CODES will get 32-bit fields even on 64-bit
+ // systems. It appears that OSX simply truncates these fields.
+ ExceptionRequestUnion forward;
+ switch (uint32_t(previous.behavior)) {
+ case EXCEPTION_DEFAULT:
+ CopyExceptionRequest32(request, forward.r32);
+ break;
+ case EXCEPTION_DEFAULT | MACH_EXCEPTION_CODES:
+ CopyExceptionRequest64(request, forward.r64);
+ break;
+ case EXCEPTION_STATE:
+ CopyExceptionRequestState32(request, forward.rs32,
+ previous.flavor, stateCount, state);
+ break;
+ case EXCEPTION_STATE | MACH_EXCEPTION_CODES:
+ CopyExceptionRequestState64(request, forward.rs64,
+ previous.flavor, stateCount, state);
+ break;
+ case EXCEPTION_STATE_IDENTITY:
+ CopyExceptionRequestStateIdentity32(request, forward.rsi32,
+ previous.flavor, stateCount, state);
+ break;
+ case EXCEPTION_STATE_IDENTITY | MACH_EXCEPTION_CODES:
+ CopyExceptionRequestStateIdentity64(request, forward.rsi64,
+ previous.flavor, stateCount, state);
+ break;
+ default:
+ MOZ_CRASH("MachExceptionHandler: Unknown previous handler behavior!");
+ }
+
+ // Forward the generated message to the old port. The local and remote
+ // port fields *and their rights* are swapped on arrival, so we need to
+ // swap them back first.
+ forward.header.msgh_bits = (request.header.msgh_bits & ~MACH_MSGH_BITS_PORTS_MASK) |
+ MACH_MSGH_BITS(MACH_MSGH_BITS_LOCAL(request.header.msgh_bits),
+ MACH_MSGH_BITS_REMOTE(request.header.msgh_bits));
+ forward.header.msgh_local_port = forward.header.msgh_remote_port;
+ forward.header.msgh_remote_port = previous.port;
+ ret = mach_msg(&forward.header, MACH_SEND_MSG, forward.header.msgh_size, 0,
+ MACH_PORT_NULL, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
+ if (ret != MACH_MSG_SUCCESS)
+ MOZ_CRASH("MachExceptionHandler: Failed to forward to the previous handler!");
+ } else {
+ // There was no previous task-level exception handler, so defer to the
+ // host level one instead. We set the return code to KERN_FAILURE to
+ // indicate that we did not handle the exception.
+ // The reply message ID is always the request ID + 100.
+ ExceptionReply reply = {};
+ reply.header.msgh_bits =
+ MACH_MSGH_BITS(MACH_MSGH_BITS_REMOTE(request.header.msgh_bits), 0);
+ reply.header.msgh_size = static_cast<mach_msg_size_t>(sizeof(reply));
+ reply.header.msgh_remote_port = request.header.msgh_remote_port;
+ reply.header.msgh_local_port = MACH_PORT_NULL;
+ reply.header.msgh_id = request.header.msgh_id + 100;
+ reply.NDR = request.NDR;
+ reply.RetCode = KERN_FAILURE;
+ ret = mach_msg(&reply.header, MACH_SEND_MSG, reply.header.msgh_size, 0,
+ MACH_PORT_NULL, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
+ if (ret != MACH_MSG_SUCCESS)
+ MOZ_CRASH("MachExceptionHandler: Failed to forward to the host level!");
+ }
+}
+
+static void
+TerminateMachExceptionHandlerThread()
+{
+ // Send a simple quit message to the exception handler thread.
+ mach_msg_header_t msg;
+ msg.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, 0);
+ msg.msgh_size = static_cast<mach_msg_size_t>(sizeof(msg));
+ msg.msgh_remote_port = sMachExceptionState.current.port;
+ msg.msgh_local_port = MACH_PORT_NULL;
+ msg.msgh_reserved = 0;
+ msg.msgh_id = sIDQuit;
+ kern_return_t ret = mach_msg(&msg, MACH_SEND_MSG, sizeof(msg), 0, MACH_PORT_NULL,
+ MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
+
+ if (ret == MACH_MSG_SUCCESS)
+ sMachExceptionState.handlerThread.join();
+ else
+ MOZ_CRASH("MachExceptionHandler: Handler thread failed to terminate!");
+}
+
+bool
+MemoryProtectionExceptionHandler::install()
+{
+ MOZ_ASSERT(!sExceptionHandlerInstalled);
+
+ // If the exception handler is disabled, report success anyway.
+ if (MemoryProtectionExceptionHandler::isDisabled())
+ return true;
+
+ kern_return_t ret;
+ mach_port_t task = mach_task_self();
+
+ // Allocate a new exception port with receive rights.
+ sMachExceptionState.current = {};
+ MachExceptionParameters& current = sMachExceptionState.current;
+ ret = mach_port_allocate(task, MACH_PORT_RIGHT_RECEIVE, &current.port);
+ if (ret != KERN_SUCCESS)
+ return false;
+
+ // Give the new port send rights as well.
+ ret = mach_port_insert_right(task, current.port, current.port, MACH_MSG_TYPE_MAKE_SEND);
+ if (ret != KERN_SUCCESS) {
+ mach_port_deallocate(task, current.port);
+ current = {};
+ return false;
+ }
+
+ // Start the thread that will receive the messages from our exception port.
+ if (!sMachExceptionState.handlerThread.init(MachExceptionHandler)) {
+ mach_port_deallocate(task, current.port);
+ current = {};
+ return false;
+ }
+
+ // Set the other properties of our new exception handler.
+ current.mask = EXC_MASK_BAD_ACCESS;
+ current.behavior = exception_behavior_t(EXCEPTION_DEFAULT | MACH_EXCEPTION_CODES);
+ current.flavor = THREAD_STATE_NONE;
+
+ // Tell the task to use our exception handler, and save the previous one.
+ sMachExceptionState.previous = {};
+ MachExceptionParameters& previous = sMachExceptionState.previous;
+ mach_msg_type_number_t previousCount = 1;
+ ret = task_swap_exception_ports(task, current.mask, current.port, current.behavior,
+ current.flavor, &previous.mask, &previousCount,
+ &previous.port, &previous.behavior, &previous.flavor);
+ if (ret != KERN_SUCCESS) {
+ TerminateMachExceptionHandlerThread();
+ mach_port_deallocate(task, current.port);
+ previous = {};
+ current = {};
+ return false;
+ }
+
+ // We should have info on the previous exception handler, even if it's null.
+ MOZ_ASSERT(previousCount == 1);
+
+ sExceptionHandlerInstalled = true;
+ return sExceptionHandlerInstalled;
+}
+
+void
+MemoryProtectionExceptionHandler::uninstall()
+{
+ if (sExceptionHandlerInstalled) {
+ mach_port_t task = mach_task_self();
+
+ // Restore the previous exception handler.
+ MachExceptionParameters& previous = sMachExceptionState.previous;
+ task_set_exception_ports(task, previous.mask, previous.port,
+ previous.behavior, previous.flavor);
+
+ TerminateMachExceptionHandlerThread();
+
+ // Release the Mach IPC port we used.
+ mach_port_deallocate(task, sMachExceptionState.current.port);
+
+ sMachExceptionState.current = {};
+ sMachExceptionState.previous = {};
+
+ sExceptionHandlerInstalled = false;
+ }
+}
+
+#else
+
+#error "This platform is not supported!"
+
+#endif
+
+} /* namespace js */
diff --git a/js/src/ds/MemoryProtectionExceptionHandler.h b/js/src/ds/MemoryProtectionExceptionHandler.h
new file mode 100644
index 000000000..c8b96679c
--- /dev/null
+++ b/js/src/ds/MemoryProtectionExceptionHandler.h
@@ -0,0 +1,45 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef ds_MemoryProtectionExceptionHandler_h
+#define ds_MemoryProtectionExceptionHandler_h
+
+#include "jstypes.h"
+
+namespace js {
+
+/*
+ * This structure allows you to annotate crashes resulting from unauthorized
+ * access to protected memory in regions of interest, to make them stand out
+ * from other heap corruption crashes.
+ */
+
+struct MemoryProtectionExceptionHandler
+{
+ /* Installs the exception handler; called early during initialization. */
+ static bool install();
+
+ /* If the exception handler is disabled, it won't be installed. */
+ static bool isDisabled();
+
+ /*
+ * Marks a region of memory as important; if something tries to access this
+ * region in an unauthorized way (e.g. writing to read-only memory),
+ * the resulting crash will be annotated to stand out from other random
+ * heap corruption.
+ */
+ static void addRegion(void* addr, size_t size);
+
+ /* Removes a previously added region. */
+ static void removeRegion(void* addr);
+
+ /* Uninstalls the exception handler; called late during shutdown. */
+ static void uninstall();
+};
+
+} /* namespace js */
+
+#endif /* ds_MemoryProtectionExceptionHandler_h */
diff --git a/js/src/ds/OrderedHashTable.h b/js/src/ds/OrderedHashTable.h
new file mode 100644
index 000000000..8d26c5546
--- /dev/null
+++ b/js/src/ds/OrderedHashTable.h
@@ -0,0 +1,841 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef ds_OrderedHashTable_h
+#define ds_OrderedHashTable_h
+
+/*
+ * Define two collection templates, js::OrderedHashMap and js::OrderedHashSet.
+ * They are like js::HashMap and js::HashSet except that:
+ *
+ * - Iterating over an Ordered hash table visits the entries in the order in
+ * which they were inserted. This means that unlike a HashMap, the behavior
+ * of an OrderedHashMap is deterministic (as long as the HashPolicy methods
+ * are effect-free and consistent); the hashing is a pure performance
+ * optimization.
+ *
+ * - Range objects over Ordered tables remain valid even when entries are
+ * added or removed or the table is resized. (However in the case of
+ * removing entries, note the warning on class Range below.)
+ *
+ * - The API is a little different, so it's not a drop-in replacement.
+ * In particular, the hash policy is a little different.
+ * Also, the Ordered templates lack the Ptr and AddPtr types.
+ *
+ * Hash policies
+ *
+ * See the comment about "Hash policy" in HashTable.h for general features that
+ * hash policy classes must provide. Hash policies for OrderedHashMaps and Sets
+ * differ in that the hash() method takes an extra argument:
+ * static js::HashNumber hash(Lookup, const HashCodeScrambler&);
+ * They must additionally provide a distinguished "empty" key value and the
+ * following static member functions:
+ * bool isEmpty(const Key&);
+ * void makeEmpty(Key*);
+ */
+
+#include "mozilla/HashFunctions.h"
+#include "mozilla/Move.h"
+
+using mozilla::Forward;
+using mozilla::Move;
+
+namespace js {
+
+namespace detail {
+
+/*
+ * detail::OrderedHashTable is the underlying data structure used to implement both
+ * OrderedHashMap and OrderedHashSet. Programs should use one of those two
+ * templates rather than OrderedHashTable.
+ */
+template <class T, class Ops, class AllocPolicy>
+class OrderedHashTable
+{
+ public:
+ typedef typename Ops::KeyType Key;
+ typedef typename Ops::Lookup Lookup;
+
+ struct Data
+ {
+ T element;
+ Data* chain;
+
+ Data(const T& e, Data* c) : element(e), chain(c) {}
+ Data(T&& e, Data* c) : element(Move(e)), chain(c) {}
+ };
+
+ class Range;
+ friend class Range;
+
+ private:
+ Data** hashTable; // hash table (has hashBuckets() elements)
+ Data* data; // data vector, an array of Data objects
+ // data[0:dataLength] are constructed
+ uint32_t dataLength; // number of constructed elements in data
+ uint32_t dataCapacity; // size of data, in elements
+ uint32_t liveCount; // dataLength less empty (removed) entries
+ uint32_t hashShift; // multiplicative hash shift
+ Range* ranges; // list of all live Ranges on this table
+ AllocPolicy alloc;
+ mozilla::HashCodeScrambler hcs; // don't reveal pointer hash codes
+
+ public:
+ OrderedHashTable(AllocPolicy& ap, mozilla::HashCodeScrambler hcs)
+ : hashTable(nullptr), data(nullptr), dataLength(0), ranges(nullptr), alloc(ap), hcs(hcs) {}
+
+ MOZ_MUST_USE bool init() {
+ MOZ_ASSERT(!hashTable, "init must be called at most once");
+
+ uint32_t buckets = initialBuckets();
+ Data** tableAlloc = alloc.template pod_malloc<Data*>(buckets);
+ if (!tableAlloc)
+ return false;
+ for (uint32_t i = 0; i < buckets; i++)
+ tableAlloc[i] = nullptr;
+
+ uint32_t capacity = uint32_t(buckets * fillFactor());
+ Data* dataAlloc = alloc.template pod_malloc<Data>(capacity);
+ if (!dataAlloc) {
+ alloc.free_(tableAlloc);
+ return false;
+ }
+
+ // clear() requires that members are assigned only after all allocation
+ // has succeeded, and that this->ranges is left untouched.
+ hashTable = tableAlloc;
+ data = dataAlloc;
+ dataLength = 0;
+ dataCapacity = capacity;
+ liveCount = 0;
+ hashShift = HashNumberSizeBits - initialBucketsLog2();
+ MOZ_ASSERT(hashBuckets() == buckets);
+ return true;
+ }
+
+ ~OrderedHashTable() {
+ for (Range* r = ranges; r; ) {
+ Range* next = r->next;
+ r->onTableDestroyed();
+ r = next;
+ }
+ alloc.free_(hashTable);
+ freeData(data, dataLength);
+ }
+
+ /* Return the number of elements in the table. */
+ uint32_t count() const { return liveCount; }
+
+ /* True if any element matches l. */
+ bool has(const Lookup& l) const {
+ return lookup(l) != nullptr;
+ }
+
+ /* Return a pointer to the element, if any, that matches l, or nullptr. */
+ T* get(const Lookup& l) {
+ Data* e = lookup(l, prepareHash(l));
+ return e ? &e->element : nullptr;
+ }
+
+ /* Return a pointer to the element, if any, that matches l, or nullptr. */
+ const T* get(const Lookup& l) const {
+ return const_cast<OrderedHashTable*>(this)->get(l);
+ }
+
+ /*
+ * If the table already contains an entry that matches |element|,
+ * replace that entry with |element|. Otherwise add a new entry.
+ *
+ * On success, return true, whether there was already a matching element or
+ * not. On allocation failure, return false. If this returns false, it
+ * means the element was not added to the table.
+ */
+ template <typename ElementInput>
+ MOZ_MUST_USE bool put(ElementInput&& element) {
+ HashNumber h = prepareHash(Ops::getKey(element));
+ if (Data* e = lookup(Ops::getKey(element), h)) {
+ e->element = Forward<ElementInput>(element);
+ return true;
+ }
+
+ if (dataLength == dataCapacity) {
+ // If the hashTable is more than 1/4 deleted data, simply rehash in
+ // place to free up some space. Otherwise, grow the table.
+ uint32_t newHashShift = liveCount >= dataCapacity * 0.75 ? hashShift - 1 : hashShift;
+ if (!rehash(newHashShift))
+ return false;
+ }
+
+ h >>= hashShift;
+ liveCount++;
+ Data* e = &data[dataLength++];
+ new (e) Data(Forward<ElementInput>(element), hashTable[h]);
+ hashTable[h] = e;
+ return true;
+ }
+
+ /*
+ * If the table contains an element matching l, remove it and set *foundp
+ * to true. Otherwise set *foundp to false.
+ *
+ * Return true on success, false if we tried to shrink the table and hit an
+ * allocation failure. Even if this returns false, *foundp is set correctly
+ * and the matching element was removed. Shrinking is an optimization and
+ * it's OK for it to fail.
+ */
+ bool remove(const Lookup& l, bool* foundp) {
+ // Note: This could be optimized so that removing the last entry,
+ // data[dataLength - 1], decrements dataLength. LIFO use cases would
+ // benefit.
+
+ // If a matching entry exists, empty it.
+ Data* e = lookup(l, prepareHash(l));
+ if (e == nullptr) {
+ *foundp = false;
+ return true;
+ }
+
+ *foundp = true;
+ liveCount--;
+ Ops::makeEmpty(&e->element);
+
+ // Update active Ranges.
+ uint32_t pos = e - data;
+ for (Range* r = ranges; r; r = r->next)
+ r->onRemove(pos);
+
+ // If many entries have been removed, try to shrink the table.
+ if (hashBuckets() > initialBuckets() && liveCount < dataLength * minDataFill()) {
+ if (!rehash(hashShift + 1))
+ return false;
+ }
+ return true;
+ }
+
+ /*
+ * Remove all entries.
+ *
+ * Returns false on OOM, leaving the OrderedHashTable and any live Ranges
+ * in the old state.
+ *
+ * The effect on live Ranges is the same as removing all entries; in
+ * particular, those Ranges are still live and will see any entries added
+ * after a successful clear().
+ */
+ MOZ_MUST_USE bool clear() {
+ if (dataLength != 0) {
+ Data** oldHashTable = hashTable;
+ Data* oldData = data;
+ uint32_t oldDataLength = dataLength;
+
+ hashTable = nullptr;
+ if (!init()) {
+ // init() only mutates members on success; see comment above.
+ hashTable = oldHashTable;
+ return false;
+ }
+
+ alloc.free_(oldHashTable);
+ freeData(oldData, oldDataLength);
+ for (Range* r = ranges; r; r = r->next)
+ r->onClear();
+ }
+
+ MOZ_ASSERT(hashTable);
+ MOZ_ASSERT(data);
+ MOZ_ASSERT(dataLength == 0);
+ MOZ_ASSERT(liveCount == 0);
+ return true;
+ }
+
+ /*
+ * Ranges are used to iterate over OrderedHashTables.
+ *
+ * Suppose 'Map' is some instance of OrderedHashMap, and 'map' is a Map.
+ * Then you can walk all the key-value pairs like this:
+ *
+ * for (Map::Range r = map.all(); !r.empty(); r.popFront()) {
+ * Map::Entry& pair = r.front();
+ * ... do something with pair ...
+ * }
+ *
+ * Ranges remain valid for the lifetime of the OrderedHashTable, even if
+ * entries are added or removed or the table is resized. Don't do anything
+ * to a Range, except destroy it, after the OrderedHashTable has been
+ * destroyed. (We support destroying the two objects in either order to
+ * humor the GC, bless its nondeterministic heart.)
+ *
+ * Warning: The behavior when the current front() entry is removed from the
+ * table is subtly different from js::HashTable<>::Enum::removeFront()!
+ * HashTable::Enum doesn't skip any entries when you removeFront() and then
+ * popFront(). OrderedHashTable::Range does! (This is useful for using a
+ * Range to implement JS Map.prototype.iterator.)
+ *
+ * The workaround is to call popFront() as soon as possible,
+ * before there's any possibility of modifying the table:
+ *
+ * for (Map::Range r = map.all(); !r.empty(); ) {
+ * Key key = r.front().key; // this won't modify map
+ * Value val = r.front().value; // this won't modify map
+ * r.popFront();
+ * // ...do things that might modify map...
+ * }
+ */
+ class Range
+ {
+ friend class OrderedHashTable;
+
+ // Cannot be a reference since we need to be able to do
+ // |offsetof(Range, ht)|.
+ OrderedHashTable* ht;
+
+ /* The index of front() within ht->data. */
+ uint32_t i;
+
+ /*
+ * The number of nonempty entries in ht->data to the left of front().
+ * This is used when the table is resized or compacted.
+ */
+ uint32_t count;
+
+ /*
+ * Links in the doubly-linked list of active Ranges on ht.
+ *
+ * prevp points to the previous Range's .next field;
+ * or to ht->ranges if this is the first Range in the list.
+ * next points to the next Range;
+ * or nullptr if this is the last Range in the list.
+ *
+ * Invariant: *prevp == this.
+ */
+ Range** prevp;
+ Range* next;
+
+ /*
+ * Create a Range over all the entries in ht.
+ * (This is private on purpose. End users must use ht->all().)
+ */
+ explicit Range(OrderedHashTable* ht) : ht(ht), i(0), count(0), prevp(&ht->ranges), next(ht->ranges) {
+ *prevp = this;
+ if (next)
+ next->prevp = &next;
+ seek();
+ }
+
+ public:
+ Range(const Range& other)
+ : ht(other.ht), i(other.i), count(other.count), prevp(&ht->ranges), next(ht->ranges)
+ {
+ *prevp = this;
+ if (next)
+ next->prevp = &next;
+ }
+
+ ~Range() {
+ *prevp = next;
+ if (next)
+ next->prevp = prevp;
+ }
+
+ private:
+ // Prohibit copy assignment.
+ Range& operator=(const Range& other) = delete;
+
+ void seek() {
+ while (i < ht->dataLength && Ops::isEmpty(Ops::getKey(ht->data[i].element)))
+ i++;
+ }
+
+ /*
+ * The hash table calls this when an entry is removed.
+ * j is the index of the removed entry.
+ */
+ void onRemove(uint32_t j) {
+ MOZ_ASSERT(valid());
+ if (j < i)
+ count--;
+ if (j == i)
+ seek();
+ }
+
+ /*
+ * The hash table calls this when the table is resized or compacted.
+ * Since |count| is the number of nonempty entries to the left of
+ * front(), discarding the empty entries will not affect count, and it
+ * will make i and count equal.
+ */
+ void onCompact() {
+ MOZ_ASSERT(valid());
+ i = count;
+ }
+
+ /* The hash table calls this when cleared. */
+ void onClear() {
+ MOZ_ASSERT(valid());
+ i = count = 0;
+ }
+
+ bool valid() const {
+ return next != this;
+ }
+
+ void onTableDestroyed() {
+ MOZ_ASSERT(valid());
+ prevp = &next;
+ next = this;
+ }
+
+ public:
+ bool empty() const {
+ MOZ_ASSERT(valid());
+ return i >= ht->dataLength;
+ }
+
+ /*
+ * Return the first element in the range. This must not be called if
+ * this->empty().
+ *
+ * Warning: Removing an entry from the table also removes it from any
+ * live Ranges, and a Range can become empty that way, rendering
+ * front() invalid. If in doubt, check empty() before calling front().
+ */
+ T& front() {
+ MOZ_ASSERT(valid());
+ MOZ_ASSERT(!empty());
+ return ht->data[i].element;
+ }
+
+ /*
+ * Remove the first element from this range.
+ * This must not be called if this->empty().
+ *
+ * Warning: Removing an entry from the table also removes it from any
+ * live Ranges, and a Range can become empty that way, rendering
+ * popFront() invalid. If in doubt, check empty() before calling
+ * popFront().
+ */
+ void popFront() {
+ MOZ_ASSERT(valid());
+ MOZ_ASSERT(!empty());
+ MOZ_ASSERT(!Ops::isEmpty(Ops::getKey(ht->data[i].element)));
+ count++;
+ i++;
+ seek();
+ }
+
+ /*
+ * Change the key of the front entry.
+ *
+ * This calls Ops::hash on both the current key and the new key.
+ * Ops::hash on the current key must return the same hash code as
+ * when the entry was added to the table.
+ */
+ void rekeyFront(const Key& k) {
+ MOZ_ASSERT(valid());
+ Data& entry = ht->data[i];
+ HashNumber oldHash = ht->prepareHash(Ops::getKey(entry.element)) >> ht->hashShift;
+ HashNumber newHash = ht->prepareHash(k) >> ht->hashShift;
+ Ops::setKey(entry.element, k);
+ if (newHash != oldHash) {
+ // Remove this entry from its old hash chain. (If this crashes
+ // reading nullptr, it would mean we did not find this entry on
+ // the hash chain where we expected it. That probably means the
+ // key's hash code changed since it was inserted, breaking the
+ // hash code invariant.)
+ Data** ep = &ht->hashTable[oldHash];
+ while (*ep != &entry)
+ ep = &(*ep)->chain;
+ *ep = entry.chain;
+
+ // Add it to the new hash chain. We could just insert it at the
+ // beginning of the chain. Instead, we do a bit of work to
+ // preserve the invariant that hash chains always go in reverse
+ // insertion order (descending memory order). No code currently
+ // depends on this invariant, so it's fine to kill it if
+ // needed.
+ ep = &ht->hashTable[newHash];
+ while (*ep && *ep > &entry)
+ ep = &(*ep)->chain;
+ entry.chain = *ep;
+ *ep = &entry;
+ }
+ }
+
+ static size_t offsetOfHashTable() {
+ return offsetof(Range, ht);
+ }
+ static size_t offsetOfI() {
+ return offsetof(Range, i);
+ }
+ static size_t offsetOfCount() {
+ return offsetof(Range, count);
+ }
+ static size_t offsetOfPrevP() {
+ return offsetof(Range, prevp);
+ }
+ static size_t offsetOfNext() {
+ return offsetof(Range, next);
+ }
+ };
+
+ Range all() { return Range(this); }
+
+ /*
+ * Change the value of the given key.
+ *
+ * This calls Ops::hash on both the current key and the new key.
+ * Ops::hash on the current key must return the same hash code as
+ * when the entry was added to the table.
+ */
+ void rekeyOneEntry(const Key& current, const Key& newKey, const T& element) {
+ if (current == newKey)
+ return;
+
+ Data* entry = lookup(current, prepareHash(current));
+ if (!entry)
+ return;
+
+ HashNumber oldHash = prepareHash(current) >> hashShift;
+ HashNumber newHash = prepareHash(newKey) >> hashShift;
+
+ entry->element = element;
+
+ // Remove this entry from its old hash chain. (If this crashes
+ // reading nullptr, it would mean we did not find this entry on
+ // the hash chain where we expected it. That probably means the
+ // key's hash code changed since it was inserted, breaking the
+ // hash code invariant.)
+ Data** ep = &hashTable[oldHash];
+ while (*ep != entry)
+ ep = &(*ep)->chain;
+ *ep = entry->chain;
+
+ // Add it to the new hash chain. We could just insert it at the
+ // beginning of the chain. Instead, we do a bit of work to
+ // preserve the invariant that hash chains always go in reverse
+ // insertion order (descending memory order). No code currently
+ // depends on this invariant, so it's fine to kill it if
+ // needed.
+ ep = &hashTable[newHash];
+ while (*ep && *ep > entry)
+ ep = &(*ep)->chain;
+ entry->chain = *ep;
+ *ep = entry;
+ }
+
+ static size_t offsetOfDataLength() {
+ return offsetof(OrderedHashTable, dataLength);
+ }
+ static size_t offsetOfData() {
+ return offsetof(OrderedHashTable, data);
+ }
+ static constexpr size_t offsetOfDataElement() {
+ return offsetof(Data, element);
+ }
+ static constexpr size_t sizeofData() {
+ return sizeof(Data);
+ }
+
+ private:
+ /* Logarithm base 2 of the number of buckets in the hash table initially. */
+ static uint32_t initialBucketsLog2() { return 1; }
+ static uint32_t initialBuckets() { return 1 << initialBucketsLog2(); }
+
+ /*
+ * The maximum load factor (mean number of entries per bucket).
+ * It is an invariant that
+ * dataCapacity == floor(hashBuckets() * fillFactor()).
+ *
+ * The fill factor should be between 2 and 4, and it should be chosen so that
+ * the fill factor times sizeof(Data) is close to but <= a power of 2.
+ * This fixed fill factor was chosen to make the size of the data
+ * array, in bytes, close to a power of two when sizeof(T) is 16.
+ */
+ static double fillFactor() { return 8.0 / 3.0; }
+
+ /*
+ * The minimum permitted value of (liveCount / dataLength).
+ * If that ratio drops below this value, we shrink the table.
+ */
+ static double minDataFill() { return 0.25; }
+
+ public:
+ HashNumber prepareHash(const Lookup& l) const {
+ return ScrambleHashCode(Ops::hash(l, hcs));
+ }
+
+ private:
+ /* The size of hashTable, in elements. Always a power of two. */
+ uint32_t hashBuckets() const {
+ return 1 << (HashNumberSizeBits - hashShift);
+ }
+
+ static void destroyData(Data* data, uint32_t length) {
+ for (Data* p = data + length; p != data; )
+ (--p)->~Data();
+ }
+
+ void freeData(Data* data, uint32_t length) {
+ destroyData(data, length);
+ alloc.free_(data);
+ }
+
+ Data* lookup(const Lookup& l, HashNumber h) {
+ for (Data* e = hashTable[h >> hashShift]; e; e = e->chain) {
+ if (Ops::match(Ops::getKey(e->element), l))
+ return e;
+ }
+ return nullptr;
+ }
+
+ const Data* lookup(const Lookup& l) const {
+ return const_cast<OrderedHashTable*>(this)->lookup(l, prepareHash(l));
+ }
+
+ /* This is called after rehashing the table. */
+ void compacted() {
+ // If we had any empty entries, compacting may have moved live entries
+ // to the left within |data|. Notify all live Ranges of the change.
+ for (Range* r = ranges; r; r = r->next)
+ r->onCompact();
+ }
+
+ /* Compact the entries in |data| and rehash them. */
+ void rehashInPlace() {
+ for (uint32_t i = 0, N = hashBuckets(); i < N; i++)
+ hashTable[i] = nullptr;
+ Data* wp = data;
+ Data* end = data + dataLength;
+ for (Data* rp = data; rp != end; rp++) {
+ if (!Ops::isEmpty(Ops::getKey(rp->element))) {
+ HashNumber h = prepareHash(Ops::getKey(rp->element)) >> hashShift;
+ if (rp != wp)
+ wp->element = Move(rp->element);
+ wp->chain = hashTable[h];
+ hashTable[h] = wp;
+ wp++;
+ }
+ }
+ MOZ_ASSERT(wp == data + liveCount);
+
+ while (wp != end)
+ (--end)->~Data();
+ dataLength = liveCount;
+ compacted();
+ }
+
+ /*
+ * Grow, shrink, or compact both |hashTable| and |data|.
+ *
+ * On success, this returns true, dataLength == liveCount, and there are no
+ * empty elements in data[0:dataLength]. On allocation failure, this
+ * leaves everything as it was and returns false.
+ */
+ MOZ_MUST_USE bool rehash(uint32_t newHashShift) {
+ // If the size of the table is not changing, rehash in place to avoid
+ // allocating memory.
+ if (newHashShift == hashShift) {
+ rehashInPlace();
+ return true;
+ }
+
+ size_t newHashBuckets =
+ size_t(1) << (HashNumberSizeBits - newHashShift);
+ Data** newHashTable = alloc.template pod_malloc<Data*>(newHashBuckets);
+ if (!newHashTable)
+ return false;
+ for (uint32_t i = 0; i < newHashBuckets; i++)
+ newHashTable[i] = nullptr;
+
+ uint32_t newCapacity = uint32_t(newHashBuckets * fillFactor());
+ Data* newData = alloc.template pod_malloc<Data>(newCapacity);
+ if (!newData) {
+ alloc.free_(newHashTable);
+ return false;
+ }
+
+ Data* wp = newData;
+ Data* end = data + dataLength;
+ for (Data* p = data; p != end; p++) {
+ if (!Ops::isEmpty(Ops::getKey(p->element))) {
+ HashNumber h = prepareHash(Ops::getKey(p->element)) >> newHashShift;
+ new (wp) Data(Move(p->element), newHashTable[h]);
+ newHashTable[h] = wp;
+ wp++;
+ }
+ }
+ MOZ_ASSERT(wp == newData + liveCount);
+
+ alloc.free_(hashTable);
+ freeData(data, dataLength);
+
+ hashTable = newHashTable;
+ data = newData;
+ dataLength = liveCount;
+ dataCapacity = newCapacity;
+ hashShift = newHashShift;
+ MOZ_ASSERT(hashBuckets() == newHashBuckets);
+
+ compacted();
+ return true;
+ }
+
+ // Not copyable.
+ OrderedHashTable& operator=(const OrderedHashTable&) = delete;
+ OrderedHashTable(const OrderedHashTable&) = delete;
+};
+
+} // namespace detail
+
+template <class Key, class Value, class OrderedHashPolicy, class AllocPolicy>
+class OrderedHashMap
+{
+ public:
+ class Entry
+ {
+ template <class, class, class> friend class detail::OrderedHashTable;
+ void operator=(const Entry& rhs) {
+ const_cast<Key&>(key) = rhs.key;
+ value = rhs.value;
+ }
+
+ void operator=(Entry&& rhs) {
+ MOZ_ASSERT(this != &rhs, "self-move assignment is prohibited");
+ const_cast<Key&>(key) = Move(rhs.key);
+ value = Move(rhs.value);
+ }
+
+ public:
+ Entry() : key(), value() {}
+ template <typename V>
+ Entry(const Key& k, V&& v) : key(k), value(Forward<V>(v)) {}
+ Entry(Entry&& rhs) : key(Move(rhs.key)), value(Move(rhs.value)) {}
+
+ const Key key;
+ Value value;
+
+ static size_t offsetOfKey() {
+ return offsetof(Entry, key);
+ }
+ static size_t offsetOfValue() {
+ return offsetof(Entry, value);
+ }
+ };
+
+ private:
+ struct MapOps : OrderedHashPolicy
+ {
+ typedef Key KeyType;
+ static void makeEmpty(Entry* e) {
+ OrderedHashPolicy::makeEmpty(const_cast<Key*>(&e->key));
+
+ // Clear the value. Destroying it is another possibility, but that
+ // would complicate class Entry considerably.
+ e->value = Value();
+ }
+ static const Key& getKey(const Entry& e) { return e.key; }
+ static void setKey(Entry& e, const Key& k) { const_cast<Key&>(e.key) = k; }
+ };
+
+ typedef detail::OrderedHashTable<Entry, MapOps, AllocPolicy> Impl;
+ Impl impl;
+
+ public:
+ typedef typename Impl::Range Range;
+
+ OrderedHashMap(AllocPolicy ap, mozilla::HashCodeScrambler hcs) : impl(ap, hcs) {}
+ MOZ_MUST_USE bool init() { return impl.init(); }
+ uint32_t count() const { return impl.count(); }
+ bool has(const Key& key) const { return impl.has(key); }
+ Range all() { return impl.all(); }
+ const Entry* get(const Key& key) const { return impl.get(key); }
+ Entry* get(const Key& key) { return impl.get(key); }
+ bool remove(const Key& key, bool* foundp) { return impl.remove(key, foundp); }
+ MOZ_MUST_USE bool clear() { return impl.clear(); }
+
+ template <typename V>
+ MOZ_MUST_USE bool put(const Key& key, V&& value) {
+ return impl.put(Entry(key, Forward<V>(value)));
+ }
+
+ HashNumber hash(const Key& key) const { return impl.prepareHash(key); }
+
+ void rekeyOneEntry(const Key& current, const Key& newKey) {
+ const Entry* e = get(current);
+ if (!e)
+ return;
+ return impl.rekeyOneEntry(current, newKey, Entry(newKey, e->value));
+ }
+
+ static size_t offsetOfEntryKey() {
+ return Entry::offsetOfKey();
+ }
+ static size_t offsetOfImplDataLength() {
+ return Impl::offsetOfDataLength();
+ }
+ static size_t offsetOfImplData() {
+ return Impl::offsetOfData();
+ }
+ static constexpr size_t offsetOfImplDataElement() {
+ return Impl::offsetOfDataElement();
+ }
+ static constexpr size_t sizeofImplData() {
+ return Impl::sizeofData();
+ }
+};
+
+template <class T, class OrderedHashPolicy, class AllocPolicy>
+class OrderedHashSet
+{
+ private:
+ struct SetOps : OrderedHashPolicy
+ {
+ typedef const T KeyType;
+ static const T& getKey(const T& v) { return v; }
+ static void setKey(const T& e, const T& v) { const_cast<T&>(e) = v; }
+ };
+
+ typedef detail::OrderedHashTable<T, SetOps, AllocPolicy> Impl;
+ Impl impl;
+
+ public:
+ typedef typename Impl::Range Range;
+
+ explicit OrderedHashSet(AllocPolicy ap, mozilla::HashCodeScrambler hcs) : impl(ap, hcs) {}
+ MOZ_MUST_USE bool init() { return impl.init(); }
+ uint32_t count() const { return impl.count(); }
+ bool has(const T& value) const { return impl.has(value); }
+ Range all() { return impl.all(); }
+ MOZ_MUST_USE bool put(const T& value) { return impl.put(value); }
+ bool remove(const T& value, bool* foundp) { return impl.remove(value, foundp); }
+ MOZ_MUST_USE bool clear() { return impl.clear(); }
+
+ HashNumber hash(const T& value) const { return impl.prepareHash(value); }
+
+ void rekeyOneEntry(const T& current, const T& newKey) {
+ return impl.rekeyOneEntry(current, newKey, newKey);
+ }
+
+ static size_t offsetOfEntryKey() {
+ return 0;
+ }
+ static size_t offsetOfImplDataLength() {
+ return Impl::offsetOfDataLength();
+ }
+ static size_t offsetOfImplData() {
+ return Impl::offsetOfData();
+ }
+ static constexpr size_t offsetOfImplDataElement() {
+ return Impl::offsetOfDataElement();
+ }
+ static constexpr size_t sizeofImplData() {
+ return Impl::sizeofData();
+ }
+};
+
+} // namespace js
+
+#endif /* ds_OrderedHashTable_h */
diff --git a/js/src/ds/PageProtectingVector.h b/js/src/ds/PageProtectingVector.h
new file mode 100644
index 000000000..86d52fc0c
--- /dev/null
+++ b/js/src/ds/PageProtectingVector.h
@@ -0,0 +1,257 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef ds_PageProtectingVector_h
+#define ds_PageProtectingVector_h
+
+#include "mozilla/Vector.h"
+
+#include "ds/MemoryProtectionExceptionHandler.h"
+#include "gc/Memory.h"
+
+namespace js {
+
+/*
+ * PageProtectingVector is a vector that can only grow or be cleared, and marks
+ * all of its fully used memory pages as read-only. It can be used to detect
+ * heap corruption in important buffers, since anything that tries to write
+ * into its protected pages will crash.
+ *
+ * PageProtectingVector's protection is limited to full pages. If the front
+ * of its buffer is not aligned on a page boundary, bytes preceding the first
+ * page boundary will not be protected. Similarly, the end of the buffer will
+ * not be fully protected unless it is aligned on a page boundary. Altogether,
+ * up to two pages of memory may not be protected.
+ */
+template<typename T,
+ size_t MinInlineCapacity = 0,
+ class AllocPolicy = mozilla::MallocAllocPolicy>
+class PageProtectingVector final
+{
+ mozilla::Vector<T, MinInlineCapacity, AllocPolicy> vector;
+
+ size_t pageSize;
+ size_t pageMask;
+
+ /*
+ * The number of bytes between the start of the buffer being used by
+ * |vector| and the first page we can protect. With jemalloc, this number
+ * should always be 0 for vectors with a buffer larger than |pageSize / 2|
+ * bytes, but with other allocators large buffers may not be page-aligned.
+ */
+ size_t offsetToPage;
+
+ /* The number of currently protected bytes (a multiple of pageSize). */
+ size_t protectedBytes;
+
+ /*
+ * The number of bytes that are currently unprotected, but could be.
+ * This number starts at |-offsetToPage|, since any bytes before
+ * |vector.begin() + offsetToPage| can never be protected (as we do not own
+ * the whole page). As a result, if |unprotectedBytes >= pageSize|, we know
+ * we can protect at least one more page, and |unprotectedBytes & ~pageMask|
+ * is always the number of additional bytes we can protect. Put another way,
+ * |offsetToPage + protectedBytes + unprotectedBytes == [size in bytes]|
+ * always holds, and if |protectedBytes != 0| then |unprotectedBytes >= 0|.
+ */
+ intptr_t unprotectedBytes;
+
+ /*
+ * The size in bytes that a buffer needs to be before its pages will be
+ * protected. This is intended to reduce churn for small vectors while
+ * still offering protection when they grow large enough.
+ */
+ size_t protectionLowerBound;
+
+ bool protectionEnabled;
+ bool regionUnprotected;
+
+ void updateOffsetToPage() {
+ unprotectedBytes += offsetToPage;
+ offsetToPage = (pageSize - (uintptr_t(vector.begin()) & pageMask)) & pageMask;
+ unprotectedBytes -= offsetToPage;
+#if 0
+ protectionEnabled = vector.capacity() * sizeof(T) >= protectionLowerBound &&
+ vector.capacity() * sizeof(T) >= pageSize + offsetToPage;
+#endif
+ }
+
+ void protect() {
+ if (!regionUnprotected && protectionEnabled && unprotectedBytes >= intptr_t(pageSize)) {
+ size_t toProtect = size_t(unprotectedBytes) & ~pageMask;
+ uintptr_t addr = uintptr_t(vector.begin()) + offsetToPage + protectedBytes;
+ gc::MakePagesReadOnly(reinterpret_cast<void*>(addr), toProtect);
+ unprotectedBytes -= toProtect;
+ protectedBytes += toProtect;
+ }
+ }
+
+ void unprotect() {
+ MOZ_ASSERT_IF(!protectionEnabled, !protectedBytes);
+ if (!regionUnprotected && protectedBytes) {
+ uintptr_t addr = uintptr_t(vector.begin()) + offsetToPage;
+ gc::UnprotectPages(reinterpret_cast<void*>(addr), protectedBytes);
+ unprotectedBytes += protectedBytes;
+ protectedBytes = 0;
+ }
+ }
+
+ void protectNewBuffer() {
+ updateOffsetToPage();
+ if (protectionEnabled)
+ MemoryProtectionExceptionHandler::addRegion(vector.begin(), vector.capacity() * sizeof(T));
+ protect();
+ }
+
+ void unprotectOldBuffer() {
+ if (protectionEnabled)
+ MemoryProtectionExceptionHandler::removeRegion(vector.begin());
+ unprotect();
+ }
+
+ bool anyProtected(size_t first, size_t last) {
+ return last >= offsetToPage && first < offsetToPage + protectedBytes;
+ }
+
+ void setContainingRegion(size_t first, size_t last, uintptr_t* addr, size_t* size) {
+ if (first < offsetToPage)
+ first = offsetToPage;
+ if (last > offsetToPage + protectedBytes - 1)
+ last = offsetToPage + protectedBytes - 1;
+ uintptr_t firstAddr = uintptr_t(vector.begin());
+ uintptr_t firstPage = (firstAddr + first) & ~pageMask;
+ uintptr_t lastPage = (firstAddr + last) & ~pageMask;
+ *size = pageSize + (lastPage - firstPage);
+ *addr = firstPage;
+ }
+
+ void increaseElemsUsed(size_t used) {
+ unprotectedBytes += used * sizeof(T);
+ protect();
+ }
+
+ /* A helper class to simplify unprotecting and reprotecting when needed. */
+ class AutoUnprotect
+ {
+ PageProtectingVector* vector;
+
+ public:
+ AutoUnprotect() : vector(nullptr) {};
+
+ void emplace(PageProtectingVector* holder) {
+ vector = holder;
+ vector->unprotectOldBuffer();
+ }
+
+ explicit AutoUnprotect(PageProtectingVector* holder) {
+ emplace(holder);
+ }
+
+ ~AutoUnprotect() {
+ if (vector)
+ vector->protectNewBuffer();
+ }
+ };
+
+ public:
+ explicit PageProtectingVector(AllocPolicy policy = AllocPolicy())
+ : vector(policy),
+ pageSize(gc::SystemPageSize()),
+ pageMask(pageSize - 1),
+ offsetToPage(0),
+ protectedBytes(0),
+ unprotectedBytes(0),
+ protectionLowerBound(0),
+ protectionEnabled(false),
+ regionUnprotected(false) { protectNewBuffer(); }
+
+ ~PageProtectingVector() { unprotectOldBuffer(); }
+
+ /*
+ * Sets the lower bound on the size, in bytes, that this vector's underlying
+ * capacity has to be before its used pages will be protected.
+ */
+ void setLowerBoundForProtection(size_t bytes) {
+ if (protectionLowerBound != bytes) {
+ unprotectOldBuffer();
+ protectionLowerBound = bytes;
+ protectNewBuffer();
+ }
+ }
+
+ /*
+ * Disable protection on the smallest number of pages containing
+ * both |firstByteOffset| and |lastByteOffset|.
+ */
+ void unprotectRegion(size_t firstByteOffset, size_t lastByteOffset) {
+ MOZ_ASSERT(!regionUnprotected);
+ regionUnprotected = true;
+ if (!protectedBytes || !anyProtected(firstByteOffset, lastByteOffset))
+ return;
+ size_t size;
+ uintptr_t addr;
+ setContainingRegion(firstByteOffset, lastByteOffset, &addr, &size);
+ gc::UnprotectPages(reinterpret_cast<void*>(addr), size);
+ }
+
+ /*
+ * Re-enable protection on the region containing
+ * |firstByteOffset| and |lastByteOffset|.
+ */
+ void reprotectRegion(size_t firstByteOffset, size_t lastByteOffset) {
+ MOZ_ASSERT(regionUnprotected);
+ regionUnprotected = false;
+ if (!protectedBytes || !anyProtected(firstByteOffset, lastByteOffset))
+ return;
+ size_t size;
+ uintptr_t addr;
+ setContainingRegion(firstByteOffset, lastByteOffset, &addr, &size);
+ gc::MakePagesReadOnly(reinterpret_cast<void*>(addr), size);
+ }
+
+ size_t length() const { return vector.length(); }
+
+ T* begin() { return vector.begin(); }
+ const T* begin() const { return vector.begin(); }
+
+ void clear() {
+ AutoUnprotect guard(this);
+ vector.clear();
+ offsetToPage = 0;
+ unprotectedBytes = 0;
+ }
+
+ MOZ_MUST_USE bool reserve(size_t size) {
+ AutoUnprotect guard;
+ if (size > vector.capacity())
+ guard.emplace(this);
+ return vector.reserve(size);
+ }
+
+ template<typename U>
+ MOZ_ALWAYS_INLINE void infallibleAppend(const U* values, size_t size) {
+ vector.infallibleAppend(values, size);
+ increaseElemsUsed(size);
+ }
+
+ template<typename U>
+ MOZ_MUST_USE bool append(const U* values, size_t size) {
+ bool ret;
+ {
+ AutoUnprotect guard;
+ if (MOZ_UNLIKELY(vector.length() + size > vector.capacity()))
+ guard.emplace(this);
+ ret = vector.append(values, size);
+ }
+ if (ret)
+ increaseElemsUsed(size);
+ return ret;
+ }
+};
+
+} /* namespace js */
+
+#endif /* ds_PageProtectingVector_h */
diff --git a/js/src/ds/PriorityQueue.h b/js/src/ds/PriorityQueue.h
new file mode 100644
index 000000000..549e44e92
--- /dev/null
+++ b/js/src/ds/PriorityQueue.h
@@ -0,0 +1,135 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef ds_PriorityQueue_h
+#define ds_PriorityQueue_h
+
+#include "js/Vector.h"
+
+namespace js {
+
+/*
+ * Class which represents a heap based priority queue using a vector.
+ * Inserting elements and removing the highest priority one are both O(log n).
+ *
+ * Template parameters are the same as for Vector, with the addition that P
+ * must have a static priority(const T&) method which returns higher numbers
+ * for higher priority elements.
+ */
+template <class T, class P,
+ size_t MinInlineCapacity = 0,
+ class AllocPolicy = TempAllocPolicy>
+class PriorityQueue
+{
+ Vector<T, MinInlineCapacity, AllocPolicy> heap;
+
+ PriorityQueue(const PriorityQueue&) = delete;
+ PriorityQueue& operator=(const PriorityQueue&) = delete;
+
+ public:
+
+ explicit PriorityQueue(AllocPolicy ap = AllocPolicy())
+ : heap(ap)
+ {}
+
+ MOZ_MUST_USE bool reserve(size_t capacity) {
+ return heap.reserve(capacity);
+ }
+
+ size_t length() const {
+ return heap.length();
+ }
+
+ bool empty() const {
+ return heap.empty();
+ }
+
+ T removeHighest() {
+ T highest = heap[0];
+ T last = heap.popCopy();
+ if (!heap.empty()) {
+ heap[0] = last;
+ siftDown(0);
+ }
+ return highest;
+ }
+
+ MOZ_MUST_USE bool insert(const T& v) {
+ if (!heap.append(v))
+ return false;
+ siftUp(heap.length() - 1);
+ return true;
+ }
+
+ void infallibleInsert(const T& v) {
+ heap.infallibleAppend(v);
+ siftUp(heap.length() - 1);
+ }
+
+ private:
+
+ /*
+ * Elements of the vector encode a binary tree:
+ *
+ * 0
+ * 1 2
+ * 3 4 5 6
+ *
+ * The children of element N are (2N + 1) and (2N + 2).
+ * The parent of element N is (N - 1) / 2.
+ *
+ * Each element has higher priority than its children.
+ */
+
+ void siftDown(size_t n) {
+ while (true) {
+ size_t left = n * 2 + 1;
+ size_t right = n * 2 + 2;
+
+ if (left < heap.length()) {
+ if (right < heap.length()) {
+ if (P::priority(heap[n]) < P::priority(heap[right]) &&
+ P::priority(heap[left]) < P::priority(heap[right]))
+ {
+ swap(n, right);
+ n = right;
+ continue;
+ }
+ }
+
+ if (P::priority(heap[n]) < P::priority(heap[left])) {
+ swap(n, left);
+ n = left;
+ continue;
+ }
+ }
+
+ break;
+ }
+ }
+
+ void siftUp(size_t n) {
+ while (n > 0) {
+ size_t parent = (n - 1) / 2;
+
+ if (P::priority(heap[parent]) > P::priority(heap[n]))
+ break;
+
+ swap(n, parent);
+ n = parent;
+ }
+ }
+
+ void swap(size_t a, size_t b) {
+ T tmp = heap[a];
+ heap[a] = heap[b];
+ heap[b] = tmp;
+ }
+};
+
+} /* namespace js */
+
+#endif /* ds_PriorityQueue_h */
diff --git a/js/src/ds/Sort.h b/js/src/ds/Sort.h
new file mode 100644
index 000000000..e5870a350
--- /dev/null
+++ b/js/src/ds/Sort.h
@@ -0,0 +1,137 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef ds_Sort_h
+#define ds_Sort_h
+
+#include "jstypes.h"
+
+namespace js {
+
+namespace detail {
+
+template<typename T>
+MOZ_ALWAYS_INLINE void
+CopyNonEmptyArray(T* dst, const T* src, size_t nelems)
+{
+ MOZ_ASSERT(nelems != 0);
+ const T* end = src + nelems;
+ do {
+ *dst++ = *src++;
+ } while (src != end);
+}
+
+/* Helper function for MergeSort. */
+template<typename T, typename Comparator>
+MOZ_ALWAYS_INLINE bool
+MergeArrayRuns(T* dst, const T* src, size_t run1, size_t run2, Comparator c)
+{
+ MOZ_ASSERT(run1 >= 1);
+ MOZ_ASSERT(run2 >= 1);
+
+ /* Copy runs already in sorted order. */
+ const T* b = src + run1;
+ bool lessOrEqual;
+ if (!c(b[-1], b[0], &lessOrEqual))
+ return false;
+
+ if (!lessOrEqual) {
+ /* Runs are not already sorted, merge them. */
+ for (const T* a = src;;) {
+ if (!c(*a, *b, &lessOrEqual))
+ return false;
+ if (lessOrEqual) {
+ *dst++ = *a++;
+ if (!--run1) {
+ src = b;
+ break;
+ }
+ } else {
+ *dst++ = *b++;
+ if (!--run2) {
+ src = a;
+ break;
+ }
+ }
+ }
+ }
+ CopyNonEmptyArray(dst, src, run1 + run2);
+ return true;
+}
+
+} /* namespace detail */
+
+/*
+ * Sort the array using the merge sort algorithm. The scratch should point to
+ * a temporary storage that can hold nelems elements.
+ *
+ * The comparator must provide the () operator with the following signature:
+ *
+ * bool operator()(const T& a, const T& a, bool* lessOrEqualp);
+ *
+ * It should return true on success and set *lessOrEqualp to the result of
+ * a <= b operation. If it returns false, the sort terminates immediately with
+ * the false result. In this case the content of the array and scratch is
+ * arbitrary.
+ */
+template<typename T, typename Comparator>
+MOZ_MUST_USE bool
+MergeSort(T* array, size_t nelems, T* scratch, Comparator c)
+{
+ const size_t INS_SORT_LIMIT = 3;
+
+ if (nelems <= 1)
+ return true;
+
+ /*
+ * Apply insertion sort to small chunks to reduce the number of merge
+ * passes needed.
+ */
+ for (size_t lo = 0; lo < nelems; lo += INS_SORT_LIMIT) {
+ size_t hi = lo + INS_SORT_LIMIT;
+ if (hi >= nelems)
+ hi = nelems;
+ for (size_t i = lo + 1; i != hi; i++) {
+ for (size_t j = i; ;) {
+ bool lessOrEqual;
+ if (!c(array[j - 1], array[j], &lessOrEqual))
+ return false;
+ if (lessOrEqual)
+ break;
+ T tmp = array[j - 1];
+ array[j - 1] = array[j];
+ array[j] = tmp;
+ if (--j == lo)
+ break;
+ }
+ }
+ }
+
+ T* vec1 = array;
+ T* vec2 = scratch;
+ for (size_t run = INS_SORT_LIMIT; run < nelems; run *= 2) {
+ for (size_t lo = 0; lo < nelems; lo += 2 * run) {
+ size_t hi = lo + run;
+ if (hi >= nelems) {
+ detail::CopyNonEmptyArray(vec2 + lo, vec1 + lo, nelems - lo);
+ break;
+ }
+ size_t run2 = (run <= nelems - hi) ? run : nelems - hi;
+ if (!detail::MergeArrayRuns(vec2 + lo, vec1 + lo, run, run2, c))
+ return false;
+ }
+ T* swap = vec1;
+ vec1 = vec2;
+ vec2 = swap;
+ }
+ if (vec1 == scratch)
+ detail::CopyNonEmptyArray(array, scratch, nelems);
+ return true;
+}
+
+} /* namespace js */
+
+#endif /* ds_Sort_h */
diff --git a/js/src/ds/SplayTree.h b/js/src/ds/SplayTree.h
new file mode 100644
index 000000000..f130706e5
--- /dev/null
+++ b/js/src/ds/SplayTree.h
@@ -0,0 +1,308 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef ds_SplayTree_h
+#define ds_SplayTree_h
+
+#include "ds/LifoAlloc.h"
+
+namespace js {
+
+/*
+ * Class which represents a splay tree with nodes allocated from a LifoAlloc.
+ * Splay trees are balanced binary search trees for which search, insert and
+ * remove are all amortized O(log n).
+ *
+ * T indicates the type of tree elements, C must have a static
+ * compare(const T&, const T&) method ordering the elements. As for LifoAlloc
+ * objects, T objects stored in the tree will not be explicitly destroyed.
+ */
+template <class T, class C>
+class SplayTree
+{
+ struct Node {
+ T item;
+ Node* left;
+ Node* right;
+ Node* parent;
+
+ explicit Node(const T& item)
+ : item(item), left(nullptr), right(nullptr), parent(nullptr)
+ {}
+ };
+
+ LifoAlloc* alloc;
+ Node* root;
+ Node* freeList;
+
+#ifdef DEBUG
+ bool enableCheckCoherency;
+#endif
+
+ SplayTree(const SplayTree&) = delete;
+ SplayTree& operator=(const SplayTree&) = delete;
+
+ public:
+
+ explicit SplayTree(LifoAlloc* alloc = nullptr)
+ : alloc(alloc), root(nullptr), freeList(nullptr)
+#ifdef DEBUG
+ , enableCheckCoherency(true)
+#endif
+ {}
+
+ void setAllocator(LifoAlloc* alloc) {
+ this->alloc = alloc;
+ }
+
+ void disableCheckCoherency() {
+#ifdef DEBUG
+ enableCheckCoherency = false;
+#endif
+ }
+
+ bool empty() const {
+ return !root;
+ }
+
+ T* maybeLookup(const T& v)
+ {
+ if (!root)
+ return nullptr;
+ Node* last = lookup(v);
+ return (C::compare(v, last->item) == 0) ? &(last->item) : nullptr;
+ }
+
+ bool contains(const T& v, T* res)
+ {
+ if (!root)
+ return false;
+ Node* last = lookup(v);
+ splay(last);
+ checkCoherency(root, nullptr);
+ if (C::compare(v, last->item) == 0) {
+ *res = last->item;
+ return true;
+ }
+ return false;
+ }
+
+ MOZ_MUST_USE bool insert(const T& v)
+ {
+ Node* element = allocateNode(v);
+ if (!element)
+ return false;
+
+ if (!root) {
+ root = element;
+ return true;
+ }
+ Node* last = lookup(v);
+ int cmp = C::compare(v, last->item);
+
+ // Don't tolerate duplicate elements.
+ MOZ_ASSERT(cmp);
+
+ Node*& parentPointer = (cmp < 0) ? last->left : last->right;
+ MOZ_ASSERT(!parentPointer);
+ parentPointer = element;
+ element->parent = last;
+
+ splay(element);
+ checkCoherency(root, nullptr);
+ return true;
+ }
+
+ void remove(const T& v)
+ {
+ Node* last = lookup(v);
+ MOZ_ASSERT(last && C::compare(v, last->item) == 0);
+
+ splay(last);
+ MOZ_ASSERT(last == root);
+
+ // Find another node which can be swapped in for the root: either the
+ // rightmost child of the root's left, or the leftmost child of the
+ // root's right.
+ Node* swap;
+ Node* swapChild;
+ if (root->left) {
+ swap = root->left;
+ while (swap->right)
+ swap = swap->right;
+ swapChild = swap->left;
+ } else if (root->right) {
+ swap = root->right;
+ while (swap->left)
+ swap = swap->left;
+ swapChild = swap->right;
+ } else {
+ freeNode(root);
+ root = nullptr;
+ return;
+ }
+
+ // The selected node has at most one child, in swapChild. Detach it
+ // from the subtree by replacing it with that child.
+ if (swap == swap->parent->left)
+ swap->parent->left = swapChild;
+ else
+ swap->parent->right = swapChild;
+ if (swapChild)
+ swapChild->parent = swap->parent;
+
+ root->item = swap->item;
+ freeNode(swap);
+
+ checkCoherency(root, nullptr);
+ }
+
+ template <class Op>
+ void forEach(Op op)
+ {
+ forEachInner<Op>(op, root);
+ }
+
+ private:
+
+ Node* lookup(const T& v)
+ {
+ MOZ_ASSERT(root);
+ Node* node = root;
+ Node* parent;
+ do {
+ parent = node;
+ int c = C::compare(v, node->item);
+ if (c == 0)
+ return node;
+ else if (c < 0)
+ node = node->left;
+ else
+ node = node->right;
+ } while (node);
+ return parent;
+ }
+
+ Node* allocateNode(const T& v)
+ {
+ Node* node = freeList;
+ if (node) {
+ freeList = node->left;
+ new(node) Node(v);
+ return node;
+ }
+ return alloc->new_<Node>(v);
+ }
+
+ void freeNode(Node* node)
+ {
+ node->left = freeList;
+ freeList = node;
+ }
+
+ void splay(Node* node)
+ {
+ // Rotate the element until it is at the root of the tree. Performing
+ // the rotations in this fashion preserves the amortized balancing of
+ // the tree.
+ MOZ_ASSERT(node);
+ while (node != root) {
+ Node* parent = node->parent;
+ if (parent == root) {
+ // Zig rotation.
+ rotate(node);
+ MOZ_ASSERT(node == root);
+ return;
+ }
+ Node* grandparent = parent->parent;
+ if ((parent->left == node) == (grandparent->left == parent)) {
+ // Zig-zig rotation.
+ rotate(parent);
+ rotate(node);
+ } else {
+ // Zig-zag rotation.
+ rotate(node);
+ rotate(node);
+ }
+ }
+ }
+
+ void rotate(Node* node)
+ {
+ // Rearrange nodes so that node becomes the parent of its current
+ // parent, while preserving the sortedness of the tree.
+ Node* parent = node->parent;
+ if (parent->left == node) {
+ // x y
+ // y c ==> a x
+ // a b b c
+ parent->left = node->right;
+ if (node->right)
+ node->right->parent = parent;
+ node->right = parent;
+ } else {
+ MOZ_ASSERT(parent->right == node);
+ // x y
+ // a y ==> x c
+ // b c a b
+ parent->right = node->left;
+ if (node->left)
+ node->left->parent = parent;
+ node->left = parent;
+ }
+ node->parent = parent->parent;
+ parent->parent = node;
+ if (Node* grandparent = node->parent) {
+ if (grandparent->left == parent)
+ grandparent->left = node;
+ else
+ grandparent->right = node;
+ } else {
+ root = node;
+ }
+ }
+
+ template <class Op>
+ void forEachInner(Op op, Node* node)
+ {
+ if (!node)
+ return;
+
+ forEachInner<Op>(op, node->left);
+ op(node->item);
+ forEachInner<Op>(op, node->right);
+ }
+
+ Node* checkCoherency(Node* node, Node* minimum)
+ {
+#ifdef DEBUG
+ if (!enableCheckCoherency)
+ return nullptr;
+ if (!node) {
+ MOZ_ASSERT(!root);
+ return nullptr;
+ }
+ MOZ_ASSERT_IF(!node->parent, node == root);
+ MOZ_ASSERT_IF(minimum, C::compare(minimum->item, node->item) < 0);
+ if (node->left) {
+ MOZ_ASSERT(node->left->parent == node);
+ Node* leftMaximum = checkCoherency(node->left, minimum);
+ MOZ_ASSERT(C::compare(leftMaximum->item, node->item) < 0);
+ }
+ if (node->right) {
+ MOZ_ASSERT(node->right->parent == node);
+ return checkCoherency(node->right, node);
+ }
+ return node;
+#else
+ return nullptr;
+#endif
+ }
+};
+
+} /* namespace js */
+
+#endif /* ds_SplayTree_h */
diff --git a/js/src/ds/TraceableFifo.h b/js/src/ds/TraceableFifo.h
new file mode 100644
index 000000000..5899c6799
--- /dev/null
+++ b/js/src/ds/TraceableFifo.h
@@ -0,0 +1,128 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef js_TraceableFifo_h
+#define js_TraceableFifo_h
+
+#include "ds/Fifo.h"
+#include "js/RootingAPI.h"
+#include "js/TracingAPI.h"
+
+namespace js {
+
+// A TraceableFifo is a Fifo with an additional trace method that knows how to
+// visit all of the items stored in the Fifo. For Fifos that contain GC things,
+// this is usually more convenient than manually iterating and marking the
+// contents.
+//
+// Most types of GC pointers as keys and values can be traced with no extra
+// infrastructure. For structs and non-gc-pointer members, ensure that there is
+// a specialization of GCPolicy<T> with an appropriate trace method available
+// to handle the custom type. Generic helpers can be found in
+// js/public/TracingAPI.h. Generic helpers can be found in
+// js/public/TracingAPI.h.
+//
+// Note that although this Fifo's trace will deal correctly with moved items, it
+// does not itself know when to barrier or trace items. To function properly it
+// must either be used with Rooted, or barriered and traced manually.
+template <typename T,
+ size_t MinInlineCapacity = 0,
+ typename AllocPolicy = TempAllocPolicy>
+class TraceableFifo : public js::Fifo<T, MinInlineCapacity, AllocPolicy>
+{
+ using Base = js::Fifo<T, MinInlineCapacity, AllocPolicy>;
+
+ public:
+ explicit TraceableFifo(AllocPolicy alloc = AllocPolicy()) : Base(alloc) {}
+
+ TraceableFifo(TraceableFifo&& rhs) : Base(mozilla::Move(rhs)) { }
+ TraceableFifo& operator=(TraceableFifo&& rhs) { return Base::operator=(mozilla::Move(rhs)); }
+
+ TraceableFifo(const TraceableFifo&) = delete;
+ TraceableFifo& operator=(const TraceableFifo&) = delete;
+
+ void trace(JSTracer* trc) {
+ for (size_t i = 0; i < this->front_.length(); ++i)
+ JS::GCPolicy<T>::trace(trc, &this->front_[i], "fifo element");
+ for (size_t i = 0; i < this->rear_.length(); ++i)
+ JS::GCPolicy<T>::trace(trc, &this->rear_[i], "fifo element");
+ }
+};
+
+template <typename Outer, typename T, size_t Capacity, typename AllocPolicy>
+class TraceableFifoOperations
+{
+ using TF = TraceableFifo<T, Capacity, AllocPolicy>;
+ const TF& fifo() const { return static_cast<const Outer*>(this)->extract(); }
+
+ public:
+ size_t length() const { return fifo().length(); }
+ bool empty() const { return fifo().empty(); }
+ const T& front() const { return fifo().front(); }
+};
+
+template <typename Outer, typename T, size_t Capacity, typename AllocPolicy>
+class MutableTraceableFifoOperations
+ : public TraceableFifoOperations<Outer, T, Capacity, AllocPolicy>
+{
+ using TF = TraceableFifo<T, Capacity, AllocPolicy>;
+ TF& fifo() { return static_cast<Outer*>(this)->extract(); }
+
+ public:
+ T& front() { return fifo().front(); }
+
+ template<typename U> bool pushBack(U&& u) { return fifo().pushBack(mozilla::Forward<U>(u)); }
+ template<typename... Args> bool emplaceBack(Args&&... args) {
+ return fifo().emplaceBack(mozilla::Forward<Args...>(args...));
+ }
+
+ bool popFront() { return fifo().popFront(); }
+ void clear() { fifo().clear(); }
+};
+
+template <typename A, size_t B, typename C>
+class RootedBase<TraceableFifo<A,B,C>>
+ : public MutableTraceableFifoOperations<JS::Rooted<TraceableFifo<A,B,C>>, A,B,C>
+{
+ using TF = TraceableFifo<A,B,C>;
+
+ friend class TraceableFifoOperations<JS::Rooted<TF>, A,B,C>;
+ const TF& extract() const { return *static_cast<const JS::Rooted<TF>*>(this)->address(); }
+
+ friend class MutableTraceableFifoOperations<JS::Rooted<TF>, A,B,C>;
+ TF& extract() { return *static_cast<JS::Rooted<TF>*>(this)->address(); }
+};
+
+template <typename A, size_t B, typename C>
+class MutableHandleBase<TraceableFifo<A,B,C>>
+ : public MutableTraceableFifoOperations<JS::MutableHandle<TraceableFifo<A,B,C>>, A,B,C>
+{
+ using TF = TraceableFifo<A,B,C>;
+
+ friend class TraceableFifoOperations<JS::MutableHandle<TF>, A,B,C>;
+ const TF& extract() const {
+ return *static_cast<const JS::MutableHandle<TF>*>(this)->address();
+ }
+
+ friend class MutableTraceableFifoOperations<JS::MutableHandle<TF>, A,B,C>;
+ TF& extract() { return *static_cast<JS::MutableHandle<TF>*>(this)->address(); }
+};
+
+template <typename A, size_t B, typename C>
+class HandleBase<TraceableFifo<A,B,C>>
+ : public TraceableFifoOperations<JS::Handle<TraceableFifo<A,B,C>>, A,B,C>
+{
+ using TF = TraceableFifo<A,B,C>;
+
+ friend class TraceableFifoOperations<JS::Handle<TF>, A,B,C>;
+ const TF& extract() const {
+ return *static_cast<const JS::Handle<TF>*>(this)->address();
+ }
+};
+
+} // namespace js
+
+#endif // js_TraceableFifo_h