summaryrefslogtreecommitdiffstats
path: root/js/src/gc/Zone.h
diff options
context:
space:
mode:
authorMatt A. Tobin <mattatobin@localhost.localdomain>2018-02-02 04:16:08 -0500
committerMatt A. Tobin <mattatobin@localhost.localdomain>2018-02-02 04:16:08 -0500
commit5f8de423f190bbb79a62f804151bc24824fa32d8 (patch)
tree10027f336435511475e392454359edea8e25895d /js/src/gc/Zone.h
parent49ee0794b5d912db1f95dce6eb52d781dc210db5 (diff)
downloadUXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.gz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.lz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.xz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.zip
Add m-esr52 at 52.6.0
Diffstat (limited to 'js/src/gc/Zone.h')
-rw-r--r--js/src/gc/Zone.h743
1 files changed, 743 insertions, 0 deletions
diff --git a/js/src/gc/Zone.h b/js/src/gc/Zone.h
new file mode 100644
index 000000000..a3a6dc07f
--- /dev/null
+++ b/js/src/gc/Zone.h
@@ -0,0 +1,743 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_Zone_h
+#define gc_Zone_h
+
+#include "mozilla/Atomics.h"
+#include "mozilla/MemoryReporting.h"
+
+#include "jscntxt.h"
+
+#include "ds/SplayTree.h"
+#include "gc/FindSCCs.h"
+#include "gc/GCRuntime.h"
+#include "js/GCHashTable.h"
+#include "js/TracingAPI.h"
+#include "vm/MallocProvider.h"
+#include "vm/TypeInference.h"
+
+namespace js {
+
+namespace jit {
+class JitZone;
+} // namespace jit
+
+namespace gc {
+
+// This class encapsulates the data that determines when we need to do a zone GC.
+class ZoneHeapThreshold
+{
+ // The "growth factor" for computing our next thresholds after a GC.
+ double gcHeapGrowthFactor_;
+
+ // GC trigger threshold for allocations on the GC heap.
+ mozilla::Atomic<size_t, mozilla::Relaxed> gcTriggerBytes_;
+
+ public:
+ ZoneHeapThreshold()
+ : gcHeapGrowthFactor_(3.0),
+ gcTriggerBytes_(0)
+ {}
+
+ double gcHeapGrowthFactor() const { return gcHeapGrowthFactor_; }
+ size_t gcTriggerBytes() const { return gcTriggerBytes_; }
+ double allocTrigger(bool highFrequencyGC) const;
+
+ void updateAfterGC(size_t lastBytes, JSGCInvocationKind gckind,
+ const GCSchedulingTunables& tunables, const GCSchedulingState& state,
+ const AutoLockGC& lock);
+ void updateForRemovedArena(const GCSchedulingTunables& tunables);
+
+ private:
+ static double computeZoneHeapGrowthFactorForHeapSize(size_t lastBytes,
+ const GCSchedulingTunables& tunables,
+ const GCSchedulingState& state);
+ static size_t computeZoneTriggerBytes(double growthFactor, size_t lastBytes,
+ JSGCInvocationKind gckind,
+ const GCSchedulingTunables& tunables,
+ const AutoLockGC& lock);
+};
+
+struct ZoneComponentFinder : public ComponentFinder<JS::Zone, ZoneComponentFinder>
+{
+ ZoneComponentFinder(uintptr_t sl, AutoLockForExclusiveAccess& lock)
+ : ComponentFinder<JS::Zone, ZoneComponentFinder>(sl), lock(lock)
+ {}
+
+ AutoLockForExclusiveAccess& lock;
+};
+
+struct UniqueIdGCPolicy {
+ static bool needsSweep(Cell** cell, uint64_t* value);
+};
+
+// Maps a Cell* to a unique, 64bit id.
+using UniqueIdMap = GCHashMap<Cell*,
+ uint64_t,
+ PointerHasher<Cell*, 3>,
+ SystemAllocPolicy,
+ UniqueIdGCPolicy>;
+
+extern uint64_t NextCellUniqueId(JSRuntime* rt);
+
+template <typename T>
+class ZoneCellIter;
+
+} // namespace gc
+} // namespace js
+
+namespace JS {
+
+// A zone is a collection of compartments. Every compartment belongs to exactly
+// one zone. In Firefox, there is roughly one zone per tab along with a system
+// zone for everything else. Zones mainly serve as boundaries for garbage
+// collection. Unlike compartments, they have no special security properties.
+//
+// Every GC thing belongs to exactly one zone. GC things from the same zone but
+// different compartments can share an arena (4k page). GC things from different
+// zones cannot be stored in the same arena. The garbage collector is capable of
+// collecting one zone at a time; it cannot collect at the granularity of
+// compartments.
+//
+// GC things are tied to zones and compartments as follows:
+//
+// - JSObjects belong to a compartment and cannot be shared between
+// compartments. If an object needs to point to a JSObject in a different
+// compartment, regardless of zone, it must go through a cross-compartment
+// wrapper. Each compartment keeps track of its outgoing wrappers in a table.
+// JSObjects find their compartment via their ObjectGroup.
+//
+// - JSStrings do not belong to any particular compartment, but they do belong
+// to a zone. Thus, two different compartments in the same zone can point to a
+// JSString. When a string needs to be wrapped, we copy it if it's in a
+// different zone and do nothing if it's in the same zone. Thus, transferring
+// strings within a zone is very efficient.
+//
+// - Shapes and base shapes belong to a zone and are shared between compartments
+// in that zone where possible. Accessor shapes store getter and setter
+// JSObjects which belong to a single compartment, so these shapes and all
+// their descendants can't be shared with other compartments.
+//
+// - Scripts are also compartment-local and cannot be shared. A script points to
+// its compartment.
+//
+// - ObjectGroup and JitCode objects belong to a compartment and cannot be
+// shared. There is no mechanism to obtain the compartment from a JitCode
+// object.
+//
+// A zone remains alive as long as any GC things in the zone are alive. A
+// compartment remains alive as long as any JSObjects, scripts, shapes, or base
+// shapes within it are alive.
+//
+// We always guarantee that a zone has at least one live compartment by refusing
+// to delete the last compartment in a live zone.
+struct Zone : public JS::shadow::Zone,
+ public js::gc::GraphNodeBase<JS::Zone>,
+ public js::MallocProvider<JS::Zone>
+{
+ explicit Zone(JSRuntime* rt);
+ ~Zone();
+ MOZ_MUST_USE bool init(bool isSystem);
+
+ void findOutgoingEdges(js::gc::ZoneComponentFinder& finder);
+
+ void discardJitCode(js::FreeOp* fop, bool discardBaselineCode = true);
+
+ void addSizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf,
+ size_t* typePool,
+ size_t* baselineStubsOptimized,
+ size_t* uniqueIdMap,
+ size_t* shapeTables);
+
+ void resetGCMallocBytes();
+ void setGCMaxMallocBytes(size_t value);
+ void updateMallocCounter(size_t nbytes) {
+ // Note: this code may be run from worker threads. We tolerate any
+ // thread races when updating gcMallocBytes.
+ gcMallocBytes -= ptrdiff_t(nbytes);
+ if (MOZ_UNLIKELY(isTooMuchMalloc()))
+ onTooMuchMalloc();
+ }
+
+ // Iterate over all cells in the zone. See the definition of ZoneCellIter
+ // in jsgcinlines.h for the possible arguments and documentation.
+ template <typename T, typename... Args>
+ js::gc::ZoneCellIter<T> cellIter(Args&&... args) {
+ return js::gc::ZoneCellIter<T>(const_cast<Zone*>(this), mozilla::Forward<Args>(args)...);
+ }
+
+ bool isTooMuchMalloc() const { return gcMallocBytes <= 0; }
+ void onTooMuchMalloc();
+
+ MOZ_MUST_USE void* onOutOfMemory(js::AllocFunction allocFunc, size_t nbytes,
+ void* reallocPtr = nullptr) {
+ if (!js::CurrentThreadCanAccessRuntime(runtime_))
+ return nullptr;
+ return runtimeFromMainThread()->onOutOfMemory(allocFunc, nbytes, reallocPtr);
+ }
+ void reportAllocationOverflow() { js::ReportAllocationOverflow(nullptr); }
+
+ void beginSweepTypes(js::FreeOp* fop, bool releaseTypes);
+
+ bool hasMarkedCompartments();
+
+ void scheduleGC() { MOZ_ASSERT(!runtimeFromMainThread()->isHeapBusy()); gcScheduled_ = true; }
+ void unscheduleGC() { gcScheduled_ = false; }
+ bool isGCScheduled() { return gcScheduled_ && canCollect(); }
+
+ void setPreservingCode(bool preserving) { gcPreserveCode_ = preserving; }
+ bool isPreservingCode() const { return gcPreserveCode_; }
+
+ bool canCollect();
+
+ void notifyObservingDebuggers();
+
+ enum GCState {
+ NoGC,
+ Mark,
+ MarkGray,
+ Sweep,
+ Finished,
+ Compact
+ };
+ void setGCState(GCState state) {
+ MOZ_ASSERT(runtimeFromMainThread()->isHeapBusy());
+ MOZ_ASSERT_IF(state != NoGC, canCollect());
+ gcState_ = state;
+ if (state == Finished)
+ notifyObservingDebuggers();
+ }
+
+ bool isCollecting() const {
+ if (runtimeFromMainThread()->isHeapCollecting())
+ return gcState_ != NoGC;
+ else
+ return needsIncrementalBarrier();
+ }
+
+ bool isCollectingFromAnyThread() const {
+ if (runtimeFromAnyThread()->isHeapCollecting())
+ return gcState_ != NoGC;
+ else
+ return needsIncrementalBarrier();
+ }
+
+ // If this returns true, all object tracing must be done with a GC marking
+ // tracer.
+ bool requireGCTracer() const {
+ JSRuntime* rt = runtimeFromAnyThread();
+ return rt->isHeapMajorCollecting() && !rt->gc.isHeapCompacting() && gcState_ != NoGC;
+ }
+
+ bool isGCMarking() {
+ if (runtimeFromMainThread()->isHeapCollecting())
+ return gcState_ == Mark || gcState_ == MarkGray;
+ else
+ return needsIncrementalBarrier();
+ }
+
+ GCState gcState() const { return gcState_; }
+ bool wasGCStarted() const { return gcState_ != NoGC; }
+ bool isGCMarkingBlack() { return gcState_ == Mark; }
+ bool isGCMarkingGray() { return gcState_ == MarkGray; }
+ bool isGCSweeping() { return gcState_ == Sweep; }
+ bool isGCFinished() { return gcState_ == Finished; }
+ bool isGCCompacting() { return gcState_ == Compact; }
+ bool isGCSweepingOrCompacting() { return gcState_ == Sweep || gcState_ == Compact; }
+
+ // Get a number that is incremented whenever this zone is collected, and
+ // possibly at other times too.
+ uint64_t gcNumber();
+
+ bool compileBarriers() const { return compileBarriers(needsIncrementalBarrier()); }
+ bool compileBarriers(bool needsIncrementalBarrier) const {
+ return needsIncrementalBarrier ||
+ runtimeFromMainThread()->hasZealMode(js::gc::ZealMode::VerifierPre);
+ }
+
+ enum ShouldUpdateJit { DontUpdateJit, UpdateJit };
+ void setNeedsIncrementalBarrier(bool needs, ShouldUpdateJit updateJit);
+ const bool* addressOfNeedsIncrementalBarrier() const { return &needsIncrementalBarrier_; }
+
+ js::jit::JitZone* getJitZone(JSContext* cx) { return jitZone_ ? jitZone_ : createJitZone(cx); }
+ js::jit::JitZone* jitZone() { return jitZone_; }
+
+ bool isAtomsZone() const { return runtimeFromAnyThread()->isAtomsZone(this); }
+ bool isSelfHostingZone() const { return runtimeFromAnyThread()->isSelfHostingZone(this); }
+
+ void prepareForCompacting();
+
+#ifdef DEBUG
+ // For testing purposes, return the index of the zone group which this zone
+ // was swept in in the last GC.
+ unsigned lastZoneGroupIndex() { return gcLastZoneGroupIndex; }
+#endif
+
+ using DebuggerVector = js::Vector<js::Debugger*, 0, js::SystemAllocPolicy>;
+
+ private:
+ DebuggerVector* debuggers;
+
+ void sweepBreakpoints(js::FreeOp* fop);
+ void sweepUniqueIds(js::FreeOp* fop);
+ void sweepWeakMaps();
+ void sweepCompartments(js::FreeOp* fop, bool keepAtleastOne, bool lastGC);
+
+ js::jit::JitZone* createJitZone(JSContext* cx);
+
+ bool isQueuedForBackgroundSweep() {
+ return isOnList();
+ }
+
+ // Side map for storing a unique ids for cells, independent of address.
+ js::gc::UniqueIdMap uniqueIds_;
+
+ public:
+ bool hasDebuggers() const { return debuggers && debuggers->length(); }
+ DebuggerVector* getDebuggers() const { return debuggers; }
+ DebuggerVector* getOrCreateDebuggers(JSContext* cx);
+
+ void clearTables();
+
+ /*
+ * When true, skip calling the metadata callback. We use this:
+ * - to avoid invoking the callback recursively;
+ * - to avoid observing lazy prototype setup (which confuses callbacks that
+ * want to use the types being set up!);
+ * - to avoid attaching allocation stacks to allocation stack nodes, which
+ * is silly
+ * And so on.
+ */
+ bool suppressAllocationMetadataBuilder;
+
+ js::gc::ArenaLists arenas;
+
+ js::TypeZone types;
+
+ /* Live weakmaps in this zone. */
+ mozilla::LinkedList<js::WeakMapBase> gcWeakMapList;
+
+ // The set of compartments in this zone.
+ typedef js::Vector<JSCompartment*, 1, js::SystemAllocPolicy> CompartmentVector;
+ CompartmentVector compartments;
+
+ // This zone's gray roots.
+ typedef js::Vector<js::gc::Cell*, 0, js::SystemAllocPolicy> GrayRootVector;
+ GrayRootVector gcGrayRoots;
+
+ // This zone's weak edges found via graph traversal during marking,
+ // preserved for re-scanning during sweeping.
+ using WeakEdges = js::Vector<js::gc::TenuredCell**, 0, js::SystemAllocPolicy>;
+ WeakEdges gcWeakRefs;
+
+ // List of non-ephemeron weak containers to sweep during beginSweepingZoneGroup.
+ mozilla::LinkedList<WeakCache<void*>> weakCaches_;
+ void registerWeakCache(WeakCache<void*>* cachep) {
+ weakCaches_.insertBack(cachep);
+ }
+
+ /*
+ * Mapping from not yet marked keys to a vector of all values that the key
+ * maps to in any live weak map.
+ */
+ js::gc::WeakKeyTable gcWeakKeys;
+
+ // A set of edges from this zone to other zones.
+ //
+ // This is used during GC while calculating zone groups to record edges that
+ // can't be determined by examining this zone by itself.
+ ZoneSet gcZoneGroupEdges;
+
+ // Keep track of all TypeDescr and related objects in this compartment.
+ // This is used by the GC to trace them all first when compacting, since the
+ // TypedObject trace hook may access these objects.
+ using TypeDescrObjectSet = js::GCHashSet<js::HeapPtr<JSObject*>,
+ js::MovableCellHasher<js::HeapPtr<JSObject*>>,
+ js::SystemAllocPolicy>;
+ JS::WeakCache<TypeDescrObjectSet> typeDescrObjects;
+
+
+ // Malloc counter to measure memory pressure for GC scheduling. It runs from
+ // gcMaxMallocBytes down to zero. This counter should be used only when it's
+ // not possible to know the size of a free.
+ mozilla::Atomic<ptrdiff_t, mozilla::ReleaseAcquire> gcMallocBytes;
+
+ // GC trigger threshold for allocations on the C heap.
+ size_t gcMaxMallocBytes;
+
+ // Whether a GC has been triggered as a result of gcMallocBytes falling
+ // below zero.
+ //
+ // This should be a bool, but Atomic only supports 32-bit and pointer-sized
+ // types.
+ mozilla::Atomic<uint32_t, mozilla::ReleaseAcquire> gcMallocGCTriggered;
+
+ // Track heap usage under this Zone.
+ js::gc::HeapUsage usage;
+
+ // Thresholds used to trigger GC.
+ js::gc::ZoneHeapThreshold threshold;
+
+ // Amount of data to allocate before triggering a new incremental slice for
+ // the current GC.
+ size_t gcDelayBytes;
+
+ // Shared Shape property tree.
+ js::PropertyTree propertyTree;
+
+ // Set of all unowned base shapes in the Zone.
+ JS::WeakCache<js::BaseShapeSet> baseShapes;
+
+ // Set of initial shapes in the Zone. For certain prototypes -- namely,
+ // those of various builtin classes -- there are two entries: one for a
+ // lookup via TaggedProto, and one for a lookup via JSProtoKey. See
+ // InitialShapeProto.
+ JS::WeakCache<js::InitialShapeSet> initialShapes;
+
+#ifdef JSGC_HASH_TABLE_CHECKS
+ void checkInitialShapesTableAfterMovingGC();
+ void checkBaseShapeTableAfterMovingGC();
+#endif
+ void fixupInitialShapeTable();
+ void fixupAfterMovingGC();
+
+ // Per-zone data for use by an embedder.
+ void* data;
+
+ bool isSystem;
+
+ mozilla::Atomic<bool> usedByExclusiveThread;
+
+ // True when there are active frames.
+ bool active;
+
+#ifdef DEBUG
+ unsigned gcLastZoneGroupIndex;
+#endif
+
+ static js::HashNumber UniqueIdToHash(uint64_t uid) {
+ return js::HashNumber(uid >> 32) ^ js::HashNumber(uid & 0xFFFFFFFF);
+ }
+
+ // Creates a HashNumber based on getUniqueId. Returns false on OOM.
+ MOZ_MUST_USE bool getHashCode(js::gc::Cell* cell, js::HashNumber* hashp) {
+ uint64_t uid;
+ if (!getUniqueId(cell, &uid))
+ return false;
+ *hashp = UniqueIdToHash(uid);
+ return true;
+ }
+
+ // Puts an existing UID in |uidp|, or creates a new UID for this Cell and
+ // puts that into |uidp|. Returns false on OOM.
+ MOZ_MUST_USE bool getUniqueId(js::gc::Cell* cell, uint64_t* uidp) {
+ MOZ_ASSERT(uidp);
+ MOZ_ASSERT(js::CurrentThreadCanAccessZone(this));
+
+ // Get an existing uid, if one has been set.
+ auto p = uniqueIds_.lookupForAdd(cell);
+ if (p) {
+ *uidp = p->value();
+ return true;
+ }
+
+ // Set a new uid on the cell.
+ *uidp = js::gc::NextCellUniqueId(runtimeFromAnyThread());
+ if (!uniqueIds_.add(p, cell, *uidp))
+ return false;
+
+ // If the cell was in the nursery, hopefully unlikely, then we need to
+ // tell the nursery about it so that it can sweep the uid if the thing
+ // does not get tenured.
+ if (!runtimeFromAnyThread()->gc.nursery.addedUniqueIdToCell(cell)) {
+ uniqueIds_.remove(cell);
+ return false;
+ }
+
+ return true;
+ }
+
+ js::HashNumber getHashCodeInfallible(js::gc::Cell* cell) {
+ return UniqueIdToHash(getUniqueIdInfallible(cell));
+ }
+
+ uint64_t getUniqueIdInfallible(js::gc::Cell* cell) {
+ uint64_t uid;
+ js::AutoEnterOOMUnsafeRegion oomUnsafe;
+ if (!getUniqueId(cell, &uid))
+ oomUnsafe.crash("failed to allocate uid");
+ return uid;
+ }
+
+ // Return true if this cell has a UID associated with it.
+ MOZ_MUST_USE bool hasUniqueId(js::gc::Cell* cell) {
+ MOZ_ASSERT(js::CurrentThreadCanAccessZone(this));
+ return uniqueIds_.has(cell);
+ }
+
+ // Transfer an id from another cell. This must only be called on behalf of a
+ // moving GC. This method is infallible.
+ void transferUniqueId(js::gc::Cell* tgt, js::gc::Cell* src) {
+ MOZ_ASSERT(src != tgt);
+ MOZ_ASSERT(!IsInsideNursery(tgt));
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtimeFromMainThread()));
+ MOZ_ASSERT(js::CurrentThreadCanAccessZone(this));
+ uniqueIds_.rekeyIfMoved(src, tgt);
+ }
+
+ // Remove any unique id associated with this Cell.
+ void removeUniqueId(js::gc::Cell* cell) {
+ MOZ_ASSERT(js::CurrentThreadCanAccessZone(this));
+ uniqueIds_.remove(cell);
+ }
+
+ // When finished parsing off-thread, transfer any UIDs we created in the
+ // off-thread zone into the target zone.
+ void adoptUniqueIds(JS::Zone* source) {
+ js::AutoEnterOOMUnsafeRegion oomUnsafe;
+ for (js::gc::UniqueIdMap::Enum e(source->uniqueIds_); !e.empty(); e.popFront()) {
+ MOZ_ASSERT(!uniqueIds_.has(e.front().key()));
+ if (!uniqueIds_.put(e.front().key(), e.front().value()))
+ oomUnsafe.crash("failed to transfer unique ids from off-main-thread");
+ }
+ source->uniqueIds_.clear();
+ }
+
+ JSContext* contextFromMainThread() {
+ return runtime_->contextFromMainThread();
+ }
+
+#ifdef JSGC_HASH_TABLE_CHECKS
+ // Assert that the UniqueId table has been redirected successfully.
+ void checkUniqueIdTableAfterMovingGC();
+#endif
+
+ bool keepShapeTables() const {
+ return keepShapeTables_;
+ }
+ void setKeepShapeTables(bool b) {
+ keepShapeTables_ = b;
+ }
+
+ private:
+ js::jit::JitZone* jitZone_;
+
+ GCState gcState_;
+ bool gcScheduled_;
+ bool gcPreserveCode_;
+ bool jitUsingBarriers_;
+ bool keepShapeTables_;
+
+ // Allow zones to be linked into a list
+ friend class js::gc::ZoneList;
+ static Zone * const NotOnList;
+ Zone* listNext_;
+ bool isOnList() const;
+ Zone* nextZone() const;
+
+ friend bool js::CurrentThreadCanAccessZone(Zone* zone);
+ friend class js::gc::GCRuntime;
+};
+
+} // namespace JS
+
+namespace js {
+
+// Using the atoms zone without holding the exclusive access lock is dangerous
+// because worker threads may be using it simultaneously. Therefore, it's
+// better to skip the atoms zone when iterating over zones. If you need to
+// iterate over the atoms zone, consider taking the exclusive access lock first.
+enum ZoneSelector {
+ WithAtoms,
+ SkipAtoms
+};
+
+class ZonesIter
+{
+ gc::AutoEnterIteration iterMarker;
+ JS::Zone** it;
+ JS::Zone** end;
+
+ public:
+ ZonesIter(JSRuntime* rt, ZoneSelector selector) : iterMarker(&rt->gc) {
+ it = rt->gc.zones.begin();
+ end = rt->gc.zones.end();
+
+ if (selector == SkipAtoms) {
+ MOZ_ASSERT(atAtomsZone(rt));
+ it++;
+ }
+ }
+
+ bool atAtomsZone(JSRuntime* rt);
+
+ bool done() const { return it == end; }
+
+ void next() {
+ MOZ_ASSERT(!done());
+ do {
+ it++;
+ } while (!done() && (*it)->usedByExclusiveThread);
+ }
+
+ JS::Zone* get() const {
+ MOZ_ASSERT(!done());
+ return *it;
+ }
+
+ operator JS::Zone*() const { return get(); }
+ JS::Zone* operator->() const { return get(); }
+};
+
+struct CompartmentsInZoneIter
+{
+ explicit CompartmentsInZoneIter(JS::Zone* zone) : zone(zone) {
+ it = zone->compartments.begin();
+ }
+
+ bool done() const {
+ MOZ_ASSERT(it);
+ return it < zone->compartments.begin() ||
+ it >= zone->compartments.end();
+ }
+ void next() {
+ MOZ_ASSERT(!done());
+ it++;
+ }
+
+ JSCompartment* get() const {
+ MOZ_ASSERT(it);
+ return *it;
+ }
+
+ operator JSCompartment*() const { return get(); }
+ JSCompartment* operator->() const { return get(); }
+
+ private:
+ JS::Zone* zone;
+ JSCompartment** it;
+
+ CompartmentsInZoneIter()
+ : zone(nullptr), it(nullptr)
+ {}
+
+ // This is for the benefit of CompartmentsIterT::comp.
+ friend class mozilla::Maybe<CompartmentsInZoneIter>;
+};
+
+// This iterator iterates over all the compartments in a given set of zones. The
+// set of zones is determined by iterating ZoneIterT.
+template<class ZonesIterT>
+class CompartmentsIterT
+{
+ gc::AutoEnterIteration iterMarker;
+ ZonesIterT zone;
+ mozilla::Maybe<CompartmentsInZoneIter> comp;
+
+ public:
+ explicit CompartmentsIterT(JSRuntime* rt)
+ : iterMarker(&rt->gc), zone(rt)
+ {
+ if (zone.done())
+ comp.emplace();
+ else
+ comp.emplace(zone);
+ }
+
+ CompartmentsIterT(JSRuntime* rt, ZoneSelector selector)
+ : iterMarker(&rt->gc), zone(rt, selector)
+ {
+ if (zone.done())
+ comp.emplace();
+ else
+ comp.emplace(zone);
+ }
+
+ bool done() const { return zone.done(); }
+
+ void next() {
+ MOZ_ASSERT(!done());
+ MOZ_ASSERT(!comp.ref().done());
+ comp->next();
+ if (comp->done()) {
+ comp.reset();
+ zone.next();
+ if (!zone.done())
+ comp.emplace(zone);
+ }
+ }
+
+ JSCompartment* get() const {
+ MOZ_ASSERT(!done());
+ return *comp;
+ }
+
+ operator JSCompartment*() const { return get(); }
+ JSCompartment* operator->() const { return get(); }
+};
+
+typedef CompartmentsIterT<ZonesIter> CompartmentsIter;
+
+/*
+ * Allocation policy that uses Zone::pod_malloc and friends, so that memory
+ * pressure is accounted for on the zone. This is suitable for memory associated
+ * with GC things allocated in the zone.
+ *
+ * Since it doesn't hold a JSContext (those may not live long enough), it can't
+ * report out-of-memory conditions itself; the caller must check for OOM and
+ * take the appropriate action.
+ *
+ * FIXME bug 647103 - replace these *AllocPolicy names.
+ */
+class ZoneAllocPolicy
+{
+ Zone* const zone;
+
+ public:
+ MOZ_IMPLICIT ZoneAllocPolicy(Zone* zone) : zone(zone) {}
+
+ template <typename T>
+ T* maybe_pod_malloc(size_t numElems) {
+ return zone->maybe_pod_malloc<T>(numElems);
+ }
+
+ template <typename T>
+ T* maybe_pod_calloc(size_t numElems) {
+ return zone->maybe_pod_calloc<T>(numElems);
+ }
+
+ template <typename T>
+ T* maybe_pod_realloc(T* p, size_t oldSize, size_t newSize) {
+ return zone->maybe_pod_realloc<T>(p, oldSize, newSize);
+ }
+
+ template <typename T>
+ T* pod_malloc(size_t numElems) {
+ return zone->pod_malloc<T>(numElems);
+ }
+
+ template <typename T>
+ T* pod_calloc(size_t numElems) {
+ return zone->pod_calloc<T>(numElems);
+ }
+
+ template <typename T>
+ T* pod_realloc(T* p, size_t oldSize, size_t newSize) {
+ return zone->pod_realloc<T>(p, oldSize, newSize);
+ }
+
+ void free_(void* p) { js_free(p); }
+ void reportAllocOverflow() const {}
+
+ MOZ_MUST_USE bool checkSimulatedOOM() const {
+ return !js::oom::ShouldFailWithOOM();
+ }
+};
+
+} // namespace js
+
+#endif // gc_Zone_h