diff options
author | Matt A. Tobin <mattatobin@localhost.localdomain> | 2018-02-02 04:16:08 -0500 |
---|---|---|
committer | Matt A. Tobin <mattatobin@localhost.localdomain> | 2018-02-02 04:16:08 -0500 |
commit | 5f8de423f190bbb79a62f804151bc24824fa32d8 (patch) | |
tree | 10027f336435511475e392454359edea8e25895d /tools/memory-profiler/GCHeapProfilerImpl.cpp | |
parent | 49ee0794b5d912db1f95dce6eb52d781dc210db5 (diff) | |
download | UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.gz UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.lz UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.xz UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.zip |
Add m-esr52 at 52.6.0
Diffstat (limited to 'tools/memory-profiler/GCHeapProfilerImpl.cpp')
-rw-r--r-- | tools/memory-profiler/GCHeapProfilerImpl.cpp | 168 |
1 files changed, 168 insertions, 0 deletions
diff --git a/tools/memory-profiler/GCHeapProfilerImpl.cpp b/tools/memory-profiler/GCHeapProfilerImpl.cpp new file mode 100644 index 000000000..528a05501 --- /dev/null +++ b/tools/memory-profiler/GCHeapProfilerImpl.cpp @@ -0,0 +1,168 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "GCHeapProfilerImpl.h" + +#include "UncensoredAllocator.h" + +namespace mozilla { + +GCHeapProfilerImpl::GCHeapProfilerImpl() +{ + mLock = PR_NewLock(); + mMarking = false; +} + +GCHeapProfilerImpl::~GCHeapProfilerImpl() +{ + if (mLock) { + PR_DestroyLock(mLock); + } +} + +nsTArray<nsCString> +GCHeapProfilerImpl::GetNames() const +{ + return mTraceTable.GetNames(); +} + +nsTArray<TrieNode> +GCHeapProfilerImpl::GetTraces() const +{ + return mTraceTable.GetTraces(); +} + +const nsTArray<AllocEvent>& +GCHeapProfilerImpl::GetEvents() const +{ + return mAllocEvents; +} + +void +GCHeapProfilerImpl::reset() +{ + mTraceTable.Reset(); + mAllocEvents.Clear(); + mNurseryEntries.Clear(); + mTenuredEntriesFG.Clear(); + mTenuredEntriesBG.Clear(); +} + +void +GCHeapProfilerImpl::sampleTenured(void* addr, uint32_t size) +{ + SampleInternal(addr, size, mTenuredEntriesFG); +} + +void +GCHeapProfilerImpl::sampleNursery(void* addr, uint32_t size) +{ + SampleInternal(addr, size, mNurseryEntries); +} + +void +GCHeapProfilerImpl::markTenuredStart() +{ + AutoUseUncensoredAllocator ua; + AutoMPLock lock(mLock); + if (!mMarking) { + mMarking = true; + mTenuredEntriesFG.SwapElements(mTenuredEntriesBG); + MOZ_ASSERT(mTenuredEntriesFG.Count() == 0); + } +} + +void +GCHeapProfilerImpl::markTenured(void* addr) +{ + AutoUseUncensoredAllocator ua; + AutoMPLock lock(mLock); + if (mMarking) { + AllocEntry entry; + if (mTenuredEntriesBG.Get(addr, &entry)) { + entry.mMarked = true; + mTenuredEntriesBG.Put(addr, entry); + } + } +} + +void +GCHeapProfilerImpl::sweepTenured() +{ + AutoUseUncensoredAllocator ua; + AutoMPLock lock(mLock); + if (mMarking) { + mMarking = false; + for (auto iter = mTenuredEntriesBG.Iter(); !iter.Done(); iter.Next()) { + if (iter.Data().mMarked) { + iter.Data().mMarked = false; + mTenuredEntriesFG.Put(iter.Key(), iter.Data()); + } else { + AllocEvent& oldEvent = mAllocEvents[iter.Data().mEventIdx]; + AllocEvent newEvent(oldEvent.mTraceIdx, -oldEvent.mSize, TimeStamp::Now()); + mAllocEvents.AppendElement(newEvent); + } + } + mTenuredEntriesBG.Clear(); + } +} + +void +GCHeapProfilerImpl::sweepNursery() +{ + AutoUseUncensoredAllocator ua; + AutoMPLock lock(mLock); + for (auto iter = mNurseryEntries.Iter(); !iter.Done(); iter.Next()) { + AllocEvent& oldEvent = mAllocEvents[iter.Data().mEventIdx]; + AllocEvent newEvent(oldEvent.mTraceIdx, -oldEvent.mSize, TimeStamp::Now()); + mAllocEvents.AppendElement(newEvent); + } + mNurseryEntries.Clear(); +} + +void +GCHeapProfilerImpl::moveNurseryToTenured(void* addrOld, void* addrNew) +{ + AutoUseUncensoredAllocator ua; + AutoMPLock lock(mLock); + AllocEntry entryOld; + if (!mNurseryEntries.Get(addrOld, &entryOld)) { + return; + } + + // Because the tenured heap is sampled, the address might already be there. + // If not, the address is inserted with the old event. + AllocEntry tenuredEntryOld; + if (!mTenuredEntriesFG.Get(addrNew, &tenuredEntryOld)) { + mTenuredEntriesFG.Put(addrNew, AllocEntry(entryOld.mEventIdx)); + } else { + // If it is already inserted, the insertion above will fail and the + // iterator of the already-inserted element is returned. + // We choose to ignore the the new event by setting its size zero and point + // the newly allocated address to the old event. + // An event of size zero will be skipped when reporting. + mAllocEvents[entryOld.mEventIdx].mSize = 0; + tenuredEntryOld.mEventIdx = entryOld.mEventIdx; + mTenuredEntriesFG.Put(addrNew, tenuredEntryOld); + } + mNurseryEntries.Remove(addrOld); +} + +void +GCHeapProfilerImpl::SampleInternal(void* aAddr, uint32_t aSize, AllocMap& aTable) +{ + AutoUseUncensoredAllocator ua; + AutoMPLock lock(mLock); + size_t nSamples = AddBytesSampled(aSize); + if (nSamples > 0) { + nsTArray<nsCString> trace = GetStacktrace(); + AllocEvent ai(mTraceTable.Insert(trace), nSamples * mSampleSize, TimeStamp::Now()); + aTable.Put(aAddr, AllocEntry(mAllocEvents.Length())); + mAllocEvents.AppendElement(ai); + } +} + +} // namespace mozilla |