diff options
Diffstat (limited to 'toolkit/components/crashes')
-rw-r--r-- | toolkit/components/crashes/CrashManager.jsm | 1351 | ||||
-rw-r--r-- | toolkit/components/crashes/CrashManagerTest.jsm | 186 | ||||
-rw-r--r-- | toolkit/components/crashes/CrashService.js | 71 | ||||
-rw-r--r-- | toolkit/components/crashes/CrashService.manifest | 3 | ||||
-rw-r--r-- | toolkit/components/crashes/docs/crash-events.rst | 176 | ||||
-rw-r--r-- | toolkit/components/crashes/docs/index.rst | 24 | ||||
-rw-r--r-- | toolkit/components/crashes/moz.build | 31 | ||||
-rw-r--r-- | toolkit/components/crashes/nsICrashService.idl | 30 | ||||
-rw-r--r-- | toolkit/components/crashes/tests/xpcshell/.eslintrc.js | 7 | ||||
-rw-r--r-- | toolkit/components/crashes/tests/xpcshell/test_crash_manager.js | 494 | ||||
-rw-r--r-- | toolkit/components/crashes/tests/xpcshell/test_crash_service.js | 31 | ||||
-rw-r--r-- | toolkit/components/crashes/tests/xpcshell/test_crash_store.js | 587 | ||||
-rw-r--r-- | toolkit/components/crashes/tests/xpcshell/xpcshell.ini | 8 |
13 files changed, 2999 insertions, 0 deletions
diff --git a/toolkit/components/crashes/CrashManager.jsm b/toolkit/components/crashes/CrashManager.jsm new file mode 100644 index 000000000..3aac33254 --- /dev/null +++ b/toolkit/components/crashes/CrashManager.jsm @@ -0,0 +1,1351 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +"use strict"; + +const {classes: Cc, interfaces: Ci, utils: Cu} = Components; +const myScope = this; + +Cu.import("resource://gre/modules/Log.jsm", this); +Cu.import("resource://gre/modules/osfile.jsm", this); +Cu.import("resource://gre/modules/Promise.jsm", this); +Cu.import("resource://gre/modules/Services.jsm", this); +Cu.import("resource://gre/modules/Task.jsm", this); +Cu.import("resource://gre/modules/Timer.jsm", this); +Cu.import("resource://gre/modules/XPCOMUtils.jsm", this); +Cu.import("resource://gre/modules/TelemetryController.jsm"); +Cu.import("resource://gre/modules/KeyValueParser.jsm"); + +this.EXPORTED_SYMBOLS = [ + "CrashManager", +]; + +/** + * How long to wait after application startup before crash event files are + * automatically aggregated. + * + * We defer aggregation for performance reasons, as we don't want too many + * services competing for I/O immediately after startup. + */ +const AGGREGATE_STARTUP_DELAY_MS = 57000; + +const MILLISECONDS_IN_DAY = 24 * 60 * 60 * 1000; + +// Converts Date to days since UNIX epoch. +// This was copied from /services/metrics.storage.jsm. The implementation +// does not account for leap seconds. +function dateToDays(date) { + return Math.floor(date.getTime() / MILLISECONDS_IN_DAY); +} + + +/** + * A gateway to crash-related data. + * + * This type is generic and can be instantiated any number of times. + * However, most applications will typically only have one instance + * instantiated and that instance will point to profile and user appdata + * directories. + * + * Instances are created by passing an object with properties. + * Recognized properties are: + * + * pendingDumpsDir (string) (required) + * Where dump files that haven't been uploaded are located. + * + * submittedDumpsDir (string) (required) + * Where records of uploaded dumps are located. + * + * eventsDirs (array) + * Directories (defined as strings) where events files are written. This + * instance will collects events from files in the directories specified. + * + * storeDir (string) + * Directory we will use for our data store. This instance will write + * data files into the directory specified. + * + * telemetryStoreSizeKey (string) + * Telemetry histogram to report store size under. + */ +this.CrashManager = function (options) { + for (let k of ["pendingDumpsDir", "submittedDumpsDir", "eventsDirs", + "storeDir"]) { + if (!(k in options)) { + throw new Error("Required key not present in options: " + k); + } + } + + this._log = Log.repository.getLogger("Crashes.CrashManager"); + + for (let k in options) { + let v = options[k]; + + switch (k) { + case "pendingDumpsDir": + this._pendingDumpsDir = v; + break; + + case "submittedDumpsDir": + this._submittedDumpsDir = v; + break; + + case "eventsDirs": + this._eventsDirs = v; + break; + + case "storeDir": + this._storeDir = v; + break; + + case "telemetryStoreSizeKey": + this._telemetryStoreSizeKey = v; + break; + + default: + throw new Error("Unknown property in options: " + k); + } + } + + // Promise for in-progress aggregation operation. We store it on the + // object so it can be returned for in-progress operations. + this._aggregatePromise = null; + + // The CrashStore currently attached to this object. + this._store = null; + + // A Task to retrieve the store. This is needed to avoid races when + // _getStore() is called multiple times in a short interval. + this._getStoreTask = null; + + // The timer controlling the expiration of the CrashStore instance. + this._storeTimer = null; + + // This is a semaphore that prevents the store from being freed by our + // timer-based resource freeing mechanism. + this._storeProtectedCount = 0; +}; + +this.CrashManager.prototype = Object.freeze({ + // A crash in the main process. + PROCESS_TYPE_MAIN: "main", + + // A crash in a content process. + PROCESS_TYPE_CONTENT: "content", + + // A crash in a plugin process. + PROCESS_TYPE_PLUGIN: "plugin", + + // A crash in a Gecko media plugin process. + PROCESS_TYPE_GMPLUGIN: "gmplugin", + + // A crash in the GPU process. + PROCESS_TYPE_GPU: "gpu", + + // A real crash. + CRASH_TYPE_CRASH: "crash", + + // A hang. + CRASH_TYPE_HANG: "hang", + + // Submission result values. + SUBMISSION_RESULT_OK: "ok", + SUBMISSION_RESULT_FAILED: "failed", + + DUMP_REGEX: /^([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})\.dmp$/i, + SUBMITTED_REGEX: /^bp-(?:hr-)?([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})\.txt$/i, + ALL_REGEX: /^(.*)$/, + + // How long the store object should persist in memory before being + // automatically garbage collected. + STORE_EXPIRATION_MS: 60 * 1000, + + // Number of days after which a crash with no activity will get purged. + PURGE_OLDER_THAN_DAYS: 180, + + // The following are return codes for individual event file processing. + // File processed OK. + EVENT_FILE_SUCCESS: "ok", + // The event appears to be malformed. + EVENT_FILE_ERROR_MALFORMED: "malformed", + // The type of event is unknown. + EVENT_FILE_ERROR_UNKNOWN_EVENT: "unknown-event", + + /** + * Obtain a list of all dumps pending upload. + * + * The returned value is a promise that resolves to an array of objects + * on success. Each element in the array has the following properties: + * + * id (string) + * The ID of the crash (a UUID). + * + * path (string) + * The filename of the crash (<UUID.dmp>) + * + * date (Date) + * When this dump was created + * + * The returned arry is sorted by the modified time of the file backing + * the entry, oldest to newest. + * + * @return Promise<Array> + */ + pendingDumps: function () { + return this._getDirectoryEntries(this._pendingDumpsDir, this.DUMP_REGEX); + }, + + /** + * Obtain a list of all dump files corresponding to submitted crashes. + * + * The returned value is a promise that resolves to an Array of + * objects. Each object has the following properties: + * + * path (string) + * The path of the file this entry comes from. + * + * id (string) + * The crash UUID. + * + * date (Date) + * The (estimated) date this crash was submitted. + * + * The returned array is sorted by the modified time of the file backing + * the entry, oldest to newest. + * + * @return Promise<Array> + */ + submittedDumps: function () { + return this._getDirectoryEntries(this._submittedDumpsDir, + this.SUBMITTED_REGEX); + }, + + /** + * Aggregates "loose" events files into the unified "database." + * + * This function should be called periodically to collect metadata from + * all events files into the central data store maintained by this manager. + * + * Once events have been stored in the backing store the corresponding + * source files are deleted. + * + * Only one aggregation operation is allowed to occur at a time. If this + * is called when an existing aggregation is in progress, the promise for + * the original call will be returned. + * + * @return promise<int> The number of event files that were examined. + */ + aggregateEventsFiles: function () { + if (this._aggregatePromise) { + return this._aggregatePromise; + } + + return this._aggregatePromise = Task.spawn(function* () { + if (this._aggregatePromise) { + return this._aggregatePromise; + } + + try { + let unprocessedFiles = yield this._getUnprocessedEventsFiles(); + + let deletePaths = []; + let needsSave = false; + + this._storeProtectedCount++; + for (let entry of unprocessedFiles) { + try { + let result = yield this._processEventFile(entry); + + switch (result) { + case this.EVENT_FILE_SUCCESS: + needsSave = true; + // Fall through. + + case this.EVENT_FILE_ERROR_MALFORMED: + deletePaths.push(entry.path); + break; + + case this.EVENT_FILE_ERROR_UNKNOWN_EVENT: + break; + + default: + Cu.reportError("Unhandled crash event file return code. Please " + + "file a bug: " + result); + } + } catch (ex) { + if (ex instanceof OS.File.Error) { + this._log.warn("I/O error reading " + entry.path, ex); + } else { + // We should never encounter an exception. This likely represents + // a coding error because all errors should be detected and + // converted to return codes. + // + // If we get here, report the error and delete the source file + // so we don't see it again. + Cu.reportError("Exception when processing crash event file: " + + Log.exceptionStr(ex)); + deletePaths.push(entry.path); + } + } + } + + if (needsSave) { + let store = yield this._getStore(); + yield store.save(); + } + + for (let path of deletePaths) { + try { + yield OS.File.remove(path); + } catch (ex) { + this._log.warn("Error removing event file (" + path + ")", ex); + } + } + + return unprocessedFiles.length; + + } finally { + this._aggregatePromise = false; + this._storeProtectedCount--; + } + }.bind(this)); + }, + + /** + * Prune old crash data. + * + * @param date + * (Date) The cutoff point for pruning. Crashes without data newer + * than this will be pruned. + */ + pruneOldCrashes: function (date) { + return Task.spawn(function* () { + let store = yield this._getStore(); + store.pruneOldCrashes(date); + yield store.save(); + }.bind(this)); + }, + + /** + * Run tasks that should be periodically performed. + */ + runMaintenanceTasks: function () { + return Task.spawn(function* () { + yield this.aggregateEventsFiles(); + + let offset = this.PURGE_OLDER_THAN_DAYS * MILLISECONDS_IN_DAY; + yield this.pruneOldCrashes(new Date(Date.now() - offset)); + }.bind(this)); + }, + + /** + * Schedule maintenance tasks for some point in the future. + * + * @param delay + * (integer) Delay in milliseconds when maintenance should occur. + */ + scheduleMaintenance: function (delay) { + let deferred = Promise.defer(); + + setTimeout(() => { + this.runMaintenanceTasks().then(deferred.resolve, deferred.reject); + }, delay); + + return deferred.promise; + }, + + /** + * Record the occurrence of a crash. + * + * This method skips event files altogether and writes directly and + * immediately to the manager's data store. + * + * @param processType (string) One of the PROCESS_TYPE constants. + * @param crashType (string) One of the CRASH_TYPE constants. + * @param id (string) Crash ID. Likely a UUID. + * @param date (Date) When the crash occurred. + * @param metadata (dictionary) Crash metadata, may be empty. + * + * @return promise<null> Resolved when the store has been saved. + */ + addCrash: function (processType, crashType, id, date, metadata) { + return Task.spawn(function* () { + let store = yield this._getStore(); + if (store.addCrash(processType, crashType, id, date, metadata)) { + yield store.save(); + } + }.bind(this)); + }, + + /** + * Record the remote ID for a crash. + * + * @param crashID (string) Crash ID. Likely a UUID. + * @param remoteID (Date) Server/Breakpad ID. + * + * @return boolean True if the remote ID was recorded. + */ + setRemoteCrashID: Task.async(function* (crashID, remoteID) { + let store = yield this._getStore(); + if (store.setRemoteCrashID(crashID, remoteID)) { + yield store.save(); + } + }), + + /** + * Generate a submission ID for use with addSubmission{Attempt,Result}. + */ + generateSubmissionID() { + return "sub-" + Cc["@mozilla.org/uuid-generator;1"] + .getService(Ci.nsIUUIDGenerator) + .generateUUID().toString().slice(1, -1); + }, + + /** + * Record the occurrence of a submission attempt for a crash. + * + * @param crashID (string) Crash ID. Likely a UUID. + * @param submissionID (string) Submission ID. Likely a UUID. + * @param date (Date) When the attempt occurred. + * + * @return boolean True if the attempt was recorded and false if not. + */ + addSubmissionAttempt: Task.async(function* (crashID, submissionID, date) { + let store = yield this._getStore(); + if (store.addSubmissionAttempt(crashID, submissionID, date)) { + yield store.save(); + } + }), + + /** + * Record the occurrence of a submission result for a crash. + * + * @param crashID (string) Crash ID. Likely a UUID. + * @param submissionID (string) Submission ID. Likely a UUID. + * @param date (Date) When the submission result was obtained. + * @param result (string) One of the SUBMISSION_RESULT constants. + * + * @return boolean True if the result was recorded and false if not. + */ + addSubmissionResult: Task.async(function* (crashID, submissionID, date, result) { + let store = yield this._getStore(); + if (store.addSubmissionResult(crashID, submissionID, date, result)) { + yield store.save(); + } + }), + + /** + * Set the classification of a crash. + * + * @param crashID (string) Crash ID. Likely a UUID. + * @param classifications (array) Crash classifications. + * + * @return boolean True if the data was recorded and false if not. + */ + setCrashClassifications: Task.async(function* (crashID, classifications) { + let store = yield this._getStore(); + if (store.setCrashClassifications(crashID, classifications)) { + yield store.save(); + } + }), + + /** + * Obtain the paths of all unprocessed events files. + * + * The promise-resolved array is sorted by file mtime, oldest to newest. + */ + _getUnprocessedEventsFiles: function () { + return Task.spawn(function* () { + let entries = []; + + for (let dir of this._eventsDirs) { + for (let e of yield this._getDirectoryEntries(dir, this.ALL_REGEX)) { + entries.push(e); + } + } + + entries.sort((a, b) => { return a.date - b.date; }); + + return entries; + }.bind(this)); + }, + + // See docs/crash-events.rst for the file format specification. + _processEventFile: function (entry) { + return Task.spawn(function* () { + let data = yield OS.File.read(entry.path); + let store = yield this._getStore(); + + let decoder = new TextDecoder(); + data = decoder.decode(data); + + let type, time; + let start = 0; + for (let i = 0; i < 2; i++) { + let index = data.indexOf("\n", start); + if (index == -1) { + return this.EVENT_FILE_ERROR_MALFORMED; + } + + let sub = data.substring(start, index); + switch (i) { + case 0: + type = sub; + break; + case 1: + time = sub; + try { + time = parseInt(time, 10); + } catch (ex) { + return this.EVENT_FILE_ERROR_MALFORMED; + } + } + + start = index + 1; + } + let date = new Date(time * 1000); + let payload = data.substring(start); + + return this._handleEventFilePayload(store, entry, type, date, payload); + }.bind(this)); + }, + + _handleEventFilePayload: function (store, entry, type, date, payload) { + // The payload types and formats are documented in docs/crash-events.rst. + // Do not change the format of an existing type. Instead, invent a new + // type. + // DO NOT ADD NEW TYPES WITHOUT DOCUMENTING! + let lines = payload.split("\n"); + + switch (type) { + case "crash.main.1": + if (lines.length > 1) { + this._log.warn("Multiple lines unexpected in payload for " + + entry.path); + return this.EVENT_FILE_ERROR_MALFORMED; + } + // fall-through + case "crash.main.2": + let crashID = lines[0]; + let metadata = parseKeyValuePairsFromLines(lines.slice(1)); + store.addCrash(this.PROCESS_TYPE_MAIN, this.CRASH_TYPE_CRASH, + crashID, date, metadata); + + // If we have a saved environment, use it. Otherwise report + // the current environment. + let crashEnvironment = null; + let sessionId = null; + let stackTraces = null; + let reportMeta = Cu.cloneInto(metadata, myScope); + if ('TelemetryEnvironment' in reportMeta) { + try { + crashEnvironment = JSON.parse(reportMeta.TelemetryEnvironment); + } catch (e) { + Cu.reportError(e); + } + delete reportMeta.TelemetryEnvironment; + } + if ('TelemetrySessionId' in reportMeta) { + sessionId = reportMeta.TelemetrySessionId; + delete reportMeta.TelemetrySessionId; + } + if ('StackTraces' in reportMeta) { + try { + stackTraces = JSON.parse(reportMeta.StackTraces); + } catch (e) { + Cu.reportError(e); + } + delete reportMeta.StackTraces; + } + TelemetryController.submitExternalPing("crash", + { + version: 1, + crashDate: date.toISOString().slice(0, 10), // YYYY-MM-DD + sessionId: sessionId, + crashId: entry.id, + stackTraces: stackTraces, + metadata: reportMeta, + hasCrashEnvironment: (crashEnvironment !== null), + }, + { + retentionDays: 180, + addClientId: true, + addEnvironment: true, + overrideEnvironment: crashEnvironment, + }); + break; + + case "crash.submission.1": + if (lines.length == 3) { + let [crashID, result, remoteID] = lines; + store.addCrash(this.PROCESS_TYPE_MAIN, this.CRASH_TYPE_CRASH, + crashID, date); + + let submissionID = this.generateSubmissionID(); + let succeeded = result === "true"; + store.addSubmissionAttempt(crashID, submissionID, date); + store.addSubmissionResult(crashID, submissionID, date, + succeeded ? this.SUBMISSION_RESULT_OK : + this.SUBMISSION_RESULT_FAILED); + if (succeeded) { + store.setRemoteCrashID(crashID, remoteID); + } + } else { + return this.EVENT_FILE_ERROR_MALFORMED; + } + break; + + default: + return this.EVENT_FILE_ERROR_UNKNOWN_EVENT; + } + + return this.EVENT_FILE_SUCCESS; + }, + + /** + * The resolved promise is an array of objects with the properties: + * + * path -- String filename + * id -- regexp.match()[1] (likely the crash ID) + * date -- Date mtime of the file + */ + _getDirectoryEntries: function (path, re) { + return Task.spawn(function* () { + try { + yield OS.File.stat(path); + } catch (ex) { + if (!(ex instanceof OS.File.Error) || !ex.becauseNoSuchFile) { + throw ex; + } + return []; + } + + let it = new OS.File.DirectoryIterator(path); + let entries = []; + + try { + yield it.forEach((entry, index, it) => { + if (entry.isDir) { + return undefined; + } + + let match = re.exec(entry.name); + if (!match) { + return undefined; + } + + return OS.File.stat(entry.path).then((info) => { + entries.push({ + path: entry.path, + id: match[1], + date: info.lastModificationDate, + }); + }); + }); + } finally { + it.close(); + } + + entries.sort((a, b) => { return a.date - b.date; }); + + return entries; + }.bind(this)); + }, + + _getStore: function () { + if (this._getStoreTask) { + return this._getStoreTask; + } + + return this._getStoreTask = Task.spawn(function* () { + try { + if (!this._store) { + yield OS.File.makeDir(this._storeDir, { + ignoreExisting: true, + unixMode: OS.Constants.libc.S_IRWXU, + }); + + let store = new CrashStore(this._storeDir, + this._telemetryStoreSizeKey); + yield store.load(); + + this._store = store; + this._storeTimer = Cc["@mozilla.org/timer;1"] + .createInstance(Ci.nsITimer); + } + + // The application can go long periods without interacting with the + // store. Since the store takes up resources, we automatically "free" + // the store after inactivity so resources can be returned to the + // system. We do this via a timer and a mechanism that tracks when the + // store is being accessed. + this._storeTimer.cancel(); + + // This callback frees resources from the store unless the store + // is protected from freeing by some other process. + let timerCB = function () { + if (this._storeProtectedCount) { + this._storeTimer.initWithCallback(timerCB, this.STORE_EXPIRATION_MS, + this._storeTimer.TYPE_ONE_SHOT); + return; + } + + // We kill the reference that we hold. GC will kill it later. If + // someone else holds a reference, that will prevent GC until that + // reference is gone. + this._store = null; + this._storeTimer = null; + }.bind(this); + + this._storeTimer.initWithCallback(timerCB, this.STORE_EXPIRATION_MS, + this._storeTimer.TYPE_ONE_SHOT); + + return this._store; + } finally { + this._getStoreTask = null; + } + }.bind(this)); + }, + + /** + * Obtain information about all known crashes. + * + * Returns an array of CrashRecord instances. Instances are read-only. + */ + getCrashes: function () { + return Task.spawn(function* () { + let store = yield this._getStore(); + + return store.crashes; + }.bind(this)); + }, + + getCrashCountsByDay: function () { + return Task.spawn(function* () { + let store = yield this._getStore(); + + return store._countsByDay; + }.bind(this)); + }, +}); + +var gCrashManager; + +/** + * Interface to storage of crash data. + * + * This type handles storage of crash metadata. It exists as a separate type + * from the crash manager for performance reasons: since all crash metadata + * needs to be loaded into memory for access, we wish to easily dispose of all + * associated memory when this data is no longer needed. Having an isolated + * object whose references can easily be lost faciliates that simple disposal. + * + * When metadata is updated, the caller must explicitly persist the changes + * to disk. This prevents excessive I/O during updates. + * + * The store has a mechanism for ensuring it doesn't grow too large. A ceiling + * is placed on the number of daily events that can occur for events that can + * occur with relatively high frequency, notably plugin crashes and hangs + * (plugins can enter cycles where they repeatedly crash). If we've reached + * the high water mark and new data arrives, it's silently dropped. + * However, the count of actual events is always preserved. This allows + * us to report on the severity of problems beyond the storage threshold. + * + * Main process crashes are excluded from limits because they are both + * important and should be rare. + * + * @param storeDir (string) + * Directory the store should be located in. + * @param telemetrySizeKey (string) + * The telemetry histogram that should be used to store the size + * of the data file. + */ +function CrashStore(storeDir, telemetrySizeKey) { + this._storeDir = storeDir; + this._telemetrySizeKey = telemetrySizeKey; + + this._storePath = OS.Path.join(storeDir, "store.json.mozlz4"); + + // Holds the read data from disk. + this._data = null; + + // Maps days since UNIX epoch to a Map of event types to counts. + // This data structure is populated when the JSON file is loaded + // and is also updated when new events are added. + this._countsByDay = new Map(); +} + +CrashStore.prototype = Object.freeze({ + // Maximum number of events to store per day. This establishes a + // ceiling on the per-type/per-day records that will be stored. + HIGH_WATER_DAILY_THRESHOLD: 100, + + /** + * Reset all data. + */ + reset() { + this._data = { + v: 1, + crashes: new Map(), + corruptDate: null, + }; + this._countsByDay = new Map(); + }, + + /** + * Load data from disk. + * + * @return Promise + */ + load: function () { + return Task.spawn(function* () { + // Loading replaces data. + this.reset(); + + try { + let decoder = new TextDecoder(); + let data = yield OS.File.read(this._storePath, {compression: "lz4"}); + data = JSON.parse(decoder.decode(data)); + + if (data.corruptDate) { + this._data.corruptDate = new Date(data.corruptDate); + } + + // actualCounts is used to validate that the derived counts by + // days stored in the payload matches up to actual data. + let actualCounts = new Map(); + + // In the past, submissions were stored as separate crash records + // with an id of e.g. "someID-submission". If we find IDs ending + // with "-submission", we will need to convert the data to be stored + // as actual submissions. + // + // The old way of storing submissions was used from FF33 - FF34. We + // drop this old data on the floor. + for (let id in data.crashes) { + if (id.endsWith("-submission")) { + continue; + } + + let crash = data.crashes[id]; + let denormalized = this._denormalize(crash); + + denormalized.submissions = new Map(); + if (crash.submissions) { + for (let submissionID in crash.submissions) { + let submission = crash.submissions[submissionID]; + denormalized.submissions.set(submissionID, + this._denormalize(submission)); + } + } + + this._data.crashes.set(id, denormalized); + + let key = dateToDays(denormalized.crashDate) + "-" + denormalized.type; + actualCounts.set(key, (actualCounts.get(key) || 0) + 1); + + // If we have an OOM size, count the crash as an OOM in addition to + // being a main process crash. + if (denormalized.metadata && + denormalized.metadata.OOMAllocationSize) { + let oomKey = key + "-oom"; + actualCounts.set(oomKey, (actualCounts.get(oomKey) || 0) + 1); + } + + } + + // The validation in this loop is arguably not necessary. We perform + // it as a defense against unknown bugs. + for (let dayKey in data.countsByDay) { + let day = parseInt(dayKey, 10); + for (let type in data.countsByDay[day]) { + this._ensureCountsForDay(day); + + let count = data.countsByDay[day][type]; + let key = day + "-" + type; + + // If the payload says we have data for a given day but we + // don't, the payload is wrong. Ignore it. + if (!actualCounts.has(key)) { + continue; + } + + // If we encountered more data in the payload than what the + // data structure says, use the proper value. + count = Math.max(count, actualCounts.get(key)); + + this._countsByDay.get(day).set(type, count); + } + } + } catch (ex) { + // Missing files (first use) are allowed. + if (!(ex instanceof OS.File.Error) || !ex.becauseNoSuchFile) { + // If we can't load for any reason, mark a corrupt date in the instance + // and swallow the error. + // + // The marking of a corrupted file is intentionally not persisted to + // disk yet. Instead, we wait until the next save(). This is to give + // non-permanent failures the opportunity to recover on their own. + this._data.corruptDate = new Date(); + } + } + }.bind(this)); + }, + + /** + * Save data to disk. + * + * @return Promise<null> + */ + save: function () { + return Task.spawn(function* () { + if (!this._data) { + return; + } + + let normalized = { + // The version should be incremented whenever the format + // changes. + v: 1, + // Maps crash IDs to objects defining the crash. + crashes: {}, + // Maps days since UNIX epoch to objects mapping event types to + // counts. This is a mirror of this._countsByDay. e.g. + // { + // 15000: { + // "main-crash": 2, + // "plugin-crash": 1 + // } + // } + countsByDay: {}, + + // When the store was last corrupted. + corruptDate: null, + }; + + if (this._data.corruptDate) { + normalized.corruptDate = this._data.corruptDate.getTime(); + } + + for (let [id, crash] of this._data.crashes) { + let c = this._normalize(crash); + + c.submissions = {}; + for (let [submissionID, submission] of crash.submissions) { + c.submissions[submissionID] = this._normalize(submission); + } + + normalized.crashes[id] = c; + } + + for (let [day, m] of this._countsByDay) { + normalized.countsByDay[day] = {}; + for (let [type, count] of m) { + normalized.countsByDay[day][type] = count; + } + } + + let encoder = new TextEncoder(); + let data = encoder.encode(JSON.stringify(normalized)); + let size = yield OS.File.writeAtomic(this._storePath, data, { + tmpPath: this._storePath + ".tmp", + compression: "lz4"}); + if (this._telemetrySizeKey) { + Services.telemetry.getHistogramById(this._telemetrySizeKey).add(size); + } + }.bind(this)); + }, + + /** + * Normalize an object into one fit for serialization. + * + * This function along with _denormalize() serve to hack around the + * default handling of Date JSON serialization because Date serialization + * is undefined by JSON. + * + * Fields ending with "Date" are assumed to contain Date instances. + * We convert these to milliseconds since epoch on output and back to + * Date on input. + */ + _normalize: function (o) { + let normalized = {}; + + for (let k in o) { + let v = o[k]; + if (v && k.endsWith("Date")) { + normalized[k] = v.getTime(); + } else { + normalized[k] = v; + } + } + + return normalized; + }, + + /** + * Convert a serialized object back to its native form. + */ + _denormalize: function (o) { + let n = {}; + + for (let k in o) { + let v = o[k]; + if (v && k.endsWith("Date")) { + n[k] = new Date(parseInt(v, 10)); + } else { + n[k] = v; + } + } + + return n; + }, + + /** + * Prune old crash data. + * + * Crashes without recent activity are pruned from the store so the + * size of the store is not unbounded. If there is activity on a crash, + * that activity will keep the crash and all its data around for longer. + * + * @param date + * (Date) The cutoff at which data will be pruned. If an entry + * doesn't have data newer than this, it will be pruned. + */ + pruneOldCrashes: function (date) { + for (let crash of this.crashes) { + let newest = crash.newestDate; + if (!newest || newest.getTime() < date.getTime()) { + this._data.crashes.delete(crash.id); + } + } + }, + + /** + * Date the store was last corrupted and required a reset. + * + * May be null (no corruption has ever occurred) or a Date instance. + */ + get corruptDate() { + return this._data.corruptDate; + }, + + /** + * The number of distinct crashes tracked. + */ + get crashesCount() { + return this._data.crashes.size; + }, + + /** + * All crashes tracked. + * + * This is an array of CrashRecord. + */ + get crashes() { + let crashes = []; + for (let [, crash] of this._data.crashes) { + crashes.push(new CrashRecord(crash)); + } + + return crashes; + }, + + /** + * Obtain a particular crash from its ID. + * + * A CrashRecord will be returned if the crash exists. null will be returned + * if the crash is unknown. + */ + getCrash: function (id) { + for (let crash of this.crashes) { + if (crash.id == id) { + return crash; + } + } + + return null; + }, + + _ensureCountsForDay: function (day) { + if (!this._countsByDay.has(day)) { + this._countsByDay.set(day, new Map()); + } + }, + + /** + * Ensure the crash record is present in storage. + * + * Returns the crash record if we're allowed to store it or null + * if we've hit the high water mark. + * + * @param processType + * (string) One of the PROCESS_TYPE constants. + * @param crashType + * (string) One of the CRASH_TYPE constants. + * @param id + * (string) The crash ID. + * @param date + * (Date) When this crash occurred. + * @param metadata + * (dictionary) Crash metadata, may be empty. + * + * @return null | object crash record + */ + _ensureCrashRecord: function (processType, crashType, id, date, metadata) { + if (!id) { + // Crashes are keyed on ID, so it's not really helpful to store crashes + // without IDs. + return null; + } + + let type = processType + "-" + crashType; + + if (!this._data.crashes.has(id)) { + let day = dateToDays(date); + this._ensureCountsForDay(day); + + let count = (this._countsByDay.get(day).get(type) || 0) + 1; + this._countsByDay.get(day).set(type, count); + + if (count > this.HIGH_WATER_DAILY_THRESHOLD && + processType != CrashManager.prototype.PROCESS_TYPE_MAIN) { + return null; + } + + // If we have an OOM size, count the crash as an OOM in addition to + // being a main process crash. + if (metadata && metadata.OOMAllocationSize) { + let oomType = type + "-oom"; + let oomCount = (this._countsByDay.get(day).get(oomType) || 0) + 1; + this._countsByDay.get(day).set(oomType, oomCount); + } + + this._data.crashes.set(id, { + id: id, + remoteID: null, + type: type, + crashDate: date, + submissions: new Map(), + classifications: [], + metadata: metadata, + }); + } + + let crash = this._data.crashes.get(id); + crash.type = type; + crash.crashDate = date; + + return crash; + }, + + /** + * Record the occurrence of a crash. + * + * @param processType (string) One of the PROCESS_TYPE constants. + * @param crashType (string) One of the CRASH_TYPE constants. + * @param id (string) Crash ID. Likely a UUID. + * @param date (Date) When the crash occurred. + * @param metadata (dictionary) Crash metadata, may be empty. + * + * @return boolean True if the crash was recorded and false if not. + */ + addCrash: function (processType, crashType, id, date, metadata) { + return !!this._ensureCrashRecord(processType, crashType, id, date, metadata); + }, + + /** + * @return boolean True if the remote ID was recorded and false if not. + */ + setRemoteCrashID: function (crashID, remoteID) { + let crash = this._data.crashes.get(crashID); + if (!crash || !remoteID) { + return false; + } + + crash.remoteID = remoteID; + return true; + }, + + getCrashesOfType: function (processType, crashType) { + let crashes = []; + for (let crash of this.crashes) { + if (crash.isOfType(processType, crashType)) { + crashes.push(crash); + } + } + + return crashes; + }, + + /** + * Ensure the submission record is present in storage. + * @returns [submission, crash] + */ + _ensureSubmissionRecord: function (crashID, submissionID) { + let crash = this._data.crashes.get(crashID); + if (!crash || !submissionID) { + return null; + } + + if (!crash.submissions.has(submissionID)) { + crash.submissions.set(submissionID, { + requestDate: null, + responseDate: null, + result: null, + }); + } + + return [crash.submissions.get(submissionID), crash]; + }, + + /** + * @return boolean True if the attempt was recorded. + */ + addSubmissionAttempt: function (crashID, submissionID, date) { + let [submission, crash] = + this._ensureSubmissionRecord(crashID, submissionID); + if (!submission) { + return false; + } + + submission.requestDate = date; + Services.telemetry.getKeyedHistogramById("PROCESS_CRASH_SUBMIT_ATTEMPT") + .add(crash.type, 1); + return true; + }, + + /** + * @return boolean True if the response was recorded. + */ + addSubmissionResult: function (crashID, submissionID, date, result) { + let crash = this._data.crashes.get(crashID); + if (!crash || !submissionID) { + return false; + } + let submission = crash.submissions.get(submissionID); + if (!submission) { + return false; + } + + submission.responseDate = date; + submission.result = result; + Services.telemetry.getKeyedHistogramById("PROCESS_CRASH_SUBMIT_SUCCESS") + .add(crash.type, result == "ok"); + return true; + }, + + /** + * @return boolean True if the classifications were set. + */ + setCrashClassifications: function (crashID, classifications) { + let crash = this._data.crashes.get(crashID); + if (!crash) { + return false; + } + + crash.classifications = classifications; + return true; + }, +}); + +/** + * Represents an individual crash with metadata. + * + * This is a wrapper around the low-level anonymous JS objects that define + * crashes. It exposes a consistent and helpful API. + * + * Instances of this type should only be constructured inside this module, + * not externally. The constructor is not considered a public API. + * + * @param o (object) + * The crash's entry from the CrashStore. + */ +function CrashRecord(o) { + this._o = o; +} + +CrashRecord.prototype = Object.freeze({ + get id() { + return this._o.id; + }, + + get remoteID() { + return this._o.remoteID; + }, + + get crashDate() { + return this._o.crashDate; + }, + + /** + * Obtain the newest date in this record. + * + * This is a convenience getter. The returned value is used to determine when + * to expire a record. + */ + get newestDate() { + // We currently only have 1 date, so this is easy. + return this._o.crashDate; + }, + + get oldestDate() { + return this._o.crashDate; + }, + + get type() { + return this._o.type; + }, + + isOfType: function (processType, crashType) { + return processType + "-" + crashType == this.type; + }, + + get submissions() { + return this._o.submissions; + }, + + get classifications() { + return this._o.classifications; + }, + + get metadata() { + return this._o.metadata; + }, +}); + +/** + * Obtain the global CrashManager instance used by the running application. + * + * CrashManager is likely only ever instantiated once per application lifetime. + * The main reason it's implemented as a reusable type is to facilitate testing. + */ +XPCOMUtils.defineLazyGetter(this.CrashManager, "Singleton", function () { + if (gCrashManager) { + return gCrashManager; + } + + let crPath = OS.Path.join(OS.Constants.Path.userApplicationDataDir, + "Crash Reports"); + let storePath = OS.Path.join(OS.Constants.Path.profileDir, "crashes"); + + gCrashManager = new CrashManager({ + pendingDumpsDir: OS.Path.join(crPath, "pending"), + submittedDumpsDir: OS.Path.join(crPath, "submitted"), + eventsDirs: [OS.Path.join(crPath, "events"), OS.Path.join(storePath, "events")], + storeDir: storePath, + telemetryStoreSizeKey: "CRASH_STORE_COMPRESSED_BYTES", + }); + + // Automatically aggregate event files shortly after startup. This + // ensures it happens with some frequency. + // + // There are performance considerations here. While this is doing + // work and could negatively impact performance, the amount of work + // is kept small per run by periodically aggregating event files. + // Furthermore, well-behaving installs should not have much work + // here to do. If there is a lot of work, that install has bigger + // issues beyond reduced performance near startup. + gCrashManager.scheduleMaintenance(AGGREGATE_STARTUP_DELAY_MS); + + return gCrashManager; +}); diff --git a/toolkit/components/crashes/CrashManagerTest.jsm b/toolkit/components/crashes/CrashManagerTest.jsm new file mode 100644 index 000000000..2c6c4b1a0 --- /dev/null +++ b/toolkit/components/crashes/CrashManagerTest.jsm @@ -0,0 +1,186 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* + * This file provides common and shared functionality to facilitate + * testing of the Crashes component (CrashManager.jsm). + */ + +"use strict"; + +const {classes: Cc, interfaces: Ci, utils: Cu} = Components; + +this.EXPORTED_SYMBOLS = [ + "configureLogging", + "getManager", + "sleep", + "TestingCrashManager", +]; + +Cu.import("resource://gre/modules/CrashManager.jsm", this); +Cu.import("resource://gre/modules/Log.jsm", this); +Cu.import("resource://gre/modules/osfile.jsm", this); +Cu.import("resource://gre/modules/Promise.jsm", this); +Cu.import("resource://gre/modules/Task.jsm", this); +Cu.import("resource://gre/modules/Timer.jsm", this); + +var loggingConfigured = false; + +this.configureLogging = function () { + if (loggingConfigured) { + return; + } + + let log = Log.repository.getLogger("Crashes.CrashManager"); + log.level = Log.Level.All; + let appender = new Log.DumpAppender(); + appender.level = Log.Level.All; + log.addAppender(appender); + loggingConfigured = true; +}; + +this.sleep = function (wait) { + let deferred = Promise.defer(); + + setTimeout(() => { + deferred.resolve(); + }, wait); + + return deferred.promise; +}; + +this.TestingCrashManager = function (options) { + CrashManager.call(this, options); +} + +this.TestingCrashManager.prototype = { + __proto__: CrashManager.prototype, + + createDummyDump: function (submitted=false, date=new Date(), hr=false) { + let uuid = Cc["@mozilla.org/uuid-generator;1"] + .getService(Ci.nsIUUIDGenerator) + .generateUUID() + .toString(); + uuid = uuid.substring(1, uuid.length - 1); + + let path; + let mode; + if (submitted) { + if (hr) { + path = OS.Path.join(this._submittedDumpsDir, "bp-hr-" + uuid + ".txt"); + } else { + path = OS.Path.join(this._submittedDumpsDir, "bp-" + uuid + ".txt"); + } + mode = OS.Constants.libc.S_IRUSR | OS.Constants.libc.S_IWUSR | + OS.Constants.libc.S_IRGRP | OS.Constants.libc.S_IROTH; + } else { + path = OS.Path.join(this._pendingDumpsDir, uuid + ".dmp"); + mode = OS.Constants.libc.S_IRUSR | OS.Constants.libc.S_IWUSR; + } + + return Task.spawn(function* () { + let f = yield OS.File.open(path, {create: true}, {unixMode: mode}); + yield f.setDates(date, date); + yield f.close(); + dump("Created fake crash: " + path + "\n"); + + return uuid; + }); + }, + + createIgnoredDumpFile: function (filename, submitted=false) { + let path; + if (submitted) { + path = OS.Path.join(this._submittedDumpsDir, filename); + } else { + path = OS.Path.join(this._pendingDumpsDir, filename); + } + + return Task.spawn(function* () { + let mode = OS.Constants.libc.S_IRUSR | OS.Constants.libc.S_IWUSR; + yield OS.File.open(path, {create: true}, {unixMode: mode}); + dump ("Create ignored dump file: " + path + "\n"); + }); + }, + + createEventsFile: function (filename, type, date, content, index=0) { + let path = OS.Path.join(this._eventsDirs[index], filename); + + let data = type + "\n" + + Math.floor(date.getTime() / 1000) + "\n" + + content; + let encoder = new TextEncoder(); + let array = encoder.encode(data); + + return Task.spawn(function* () { + yield OS.File.writeAtomic(path, array); + yield OS.File.setDates(path, date, date); + }); + }, + + /** + * Overwrite event file handling to process our test file type. + * + * We can probably delete this once we have actual events defined. + */ + _handleEventFilePayload: function (store, entry, type, date, payload) { + if (type == "test.1") { + if (payload == "malformed") { + return this.EVENT_FILE_ERROR_MALFORMED; + } else if (payload == "success") { + return this.EVENT_FILE_SUCCESS; + } + return this.EVENT_FILE_ERROR_UNKNOWN_EVENT; + } + + return CrashManager.prototype._handleEventFilePayload.call(this, + store, + entry, + type, + date, + payload); + }, +}; + +var DUMMY_DIR_COUNT = 0; + +this.getManager = function () { + return Task.spawn(function* () { + const dirMode = OS.Constants.libc.S_IRWXU; + let baseFile = OS.Constants.Path.profileDir; + + function makeDir(create=true) { + return Task.spawn(function* () { + let path = OS.Path.join(baseFile, "dummy-dir-" + DUMMY_DIR_COUNT++); + + if (!create) { + return path; + } + + dump("Creating directory: " + path + "\n"); + yield OS.File.makeDir(path, {unixMode: dirMode}); + + return path; + }); + } + + let pendingD = yield makeDir(); + let submittedD = yield makeDir(); + let eventsD1 = yield makeDir(); + let eventsD2 = yield makeDir(); + + // Store directory is created at run-time if needed. Ensure those code + // paths are triggered. + let storeD = yield makeDir(false); + + let m = new TestingCrashManager({ + pendingDumpsDir: pendingD, + submittedDumpsDir: submittedD, + eventsDirs: [eventsD1, eventsD2], + storeDir: storeD, + }); + + return m; + }); +}; diff --git a/toolkit/components/crashes/CrashService.js b/toolkit/components/crashes/CrashService.js new file mode 100644 index 000000000..56f8b69e7 --- /dev/null +++ b/toolkit/components/crashes/CrashService.js @@ -0,0 +1,71 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +"use strict"; + +const {classes: Cc, interfaces: Ci, utils: Cu} = Components; + +Cu.import("resource://gre/modules/Services.jsm", this); +Cu.import("resource://gre/modules/XPCOMUtils.jsm", this); + +/** + * This component makes crash data available throughout the application. + * + * It is a service because some background activity will eventually occur. + */ +this.CrashService = function () {}; + +CrashService.prototype = Object.freeze({ + classID: Components.ID("{92668367-1b17-4190-86b2-1061b2179744}"), + QueryInterface: XPCOMUtils.generateQI([ + Ci.nsICrashService, + Ci.nsIObserver, + ]), + + addCrash: function (processType, crashType, id) { + switch (processType) { + case Ci.nsICrashService.PROCESS_TYPE_MAIN: + processType = Services.crashmanager.PROCESS_TYPE_MAIN; + break; + case Ci.nsICrashService.PROCESS_TYPE_CONTENT: + processType = Services.crashmanager.PROCESS_TYPE_CONTENT; + break; + case Ci.nsICrashService.PROCESS_TYPE_PLUGIN: + processType = Services.crashmanager.PROCESS_TYPE_PLUGIN; + break; + case Ci.nsICrashService.PROCESS_TYPE_GMPLUGIN: + processType = Services.crashmanager.PROCESS_TYPE_GMPLUGIN; + break; + case Ci.nsICrashService.PROCESS_TYPE_GPU: + processType = Services.crashmanager.PROCESS_TYPE_GPU; + break; + default: + throw new Error("Unrecognized PROCESS_TYPE: " + processType); + } + + switch (crashType) { + case Ci.nsICrashService.CRASH_TYPE_CRASH: + crashType = Services.crashmanager.CRASH_TYPE_CRASH; + break; + case Ci.nsICrashService.CRASH_TYPE_HANG: + crashType = Services.crashmanager.CRASH_TYPE_HANG; + break; + default: + throw new Error("Unrecognized CRASH_TYPE: " + crashType); + } + + Services.crashmanager.addCrash(processType, crashType, id, new Date()); + }, + + observe: function (subject, topic, data) { + switch (topic) { + case "profile-after-change": + // Side-effect is the singleton is instantiated. + Services.crashmanager; + break; + } + }, +}); + +this.NSGetFactory = XPCOMUtils.generateNSGetFactory([CrashService]); diff --git a/toolkit/components/crashes/CrashService.manifest b/toolkit/components/crashes/CrashService.manifest new file mode 100644 index 000000000..ed45109fe --- /dev/null +++ b/toolkit/components/crashes/CrashService.manifest @@ -0,0 +1,3 @@ +component {92668367-1b17-4190-86b2-1061b2179744} CrashService.js +contract @mozilla.org/crashservice;1 {92668367-1b17-4190-86b2-1061b2179744} +category profile-after-change CrashService @mozilla.org/crashservice;1 diff --git a/toolkit/components/crashes/docs/crash-events.rst b/toolkit/components/crashes/docs/crash-events.rst new file mode 100644 index 000000000..b29b27989 --- /dev/null +++ b/toolkit/components/crashes/docs/crash-events.rst @@ -0,0 +1,176 @@ +============ +Crash Events +============ + +**Crash Events** refers to a special subsystem of Gecko that aims to capture +events of interest related to process crashing and hanging. + +When an event worthy of recording occurs, a file containing that event's +information is written to a well-defined location on the filesystem. The Gecko +process periodically scans for produced files and consolidates information +into a more unified and efficient backend store. + +Crash Event Files +================= + +When a crash-related event occurs, a file describing that event is written +to a well-defined directory. That directory is likely in the directory of +the currently-active profile. However, if a profile is not yet active in +the Gecko process, that directory likely resides in the user's *app data* +directory (*UAppData* from the directory service). + +The filename of the event file is not relevant. However, producers need +to choose a filename intelligently to avoid name collisions and race +conditions. Since file locking is potentially dangerous at crash time, +the convention of generating a UUID and using it as a filename has been +adopted. + +File Format +----------- + +All crash event files share the same high-level file format. The format +consists of the following fields delimited by a UNIX newline (*\n*) +character: + +* String event name (valid UTF-8, but likely ASCII) +* String representation of integer seconds since UNIX epoch +* Payload + +The payload is event specific and may contain UNIX newline characters. +The recommended method for parsing is to split at most 3 times on UNIX +newline and then dispatch to an event-specific parsed based on the +event name. + +If an unknown event type is encountered, the event can safely be ignored +until later. This helps ensure that application downgrades (potentially +due to elevated crash rate) don't result in data loss. + +The format and semantics of each event type are meant to be constant once +that event type is committed to the main Firefox repository. If new metadata +needs to be captured or the meaning of data captured in an event changes, +that change should be expressed through the invention of a new event type. +For this reason, event names are highly recommended to contain a version. +e.g. instead of a *Gecko process crashed* event, we prefer a *Gecko process +crashed v1* event. + +Event Types +----------- + +Each subsection documents the different types of crash events that may be +produced. Each section name corresponds to the first line of the crash +event file. + +Currently only main process crashes produce event files. Because crashes and +hangs in child processes can be easily recorded by the main process, we do not +foresee the need for writing event files for child processes, design +considerations below notwithstanding. + +crash.main.2 +^^^^^^^^^^^^ + +This event is produced when the main process crashes. + +The payload of this event is delimited by UNIX newlines (*\n*) and contains the +following fields: + +* The crash ID string, very likely a UUID +* 0 or more lines of metadata, each containing one key=value pair of text + +crash.main.1 +^^^^^^^^^^^^ + +This event is produced when the main process crashes. + +The payload of this event is the string crash ID, very likely a UUID. +There should be ``UUID.dmp`` and ``UUID.extra`` files on disk, saved by +Breakpad. + +crash.submission.1 +^^^^^^^^^^^^^^^^^^ + +This event is produced when a crash is submitted. + +The payload of this event is delimited by UNIX newlines (*\n*) and contains the +following fields: + +* The crash ID string +* "true" if the submission succeeded or "false" otherwise +* The remote crash ID string if the submission succeeded + +Aggregated Event Log +==================== + +Crash events are aggregated together into a unified event *log*. Currently, +this *log* is really a JSON file. However, this is an implementation detail +and it could change at any time. The interface to crash data provided by +the JavaScript API is the only supported interface. + +Design Considerations +===================== + +There are many considerations influencing the design of this subsystem. +We attempt to document them in this section. + +Decoupling of Event Files from Final Data Structure +--------------------------------------------------- + +While it is certainly possible for the Gecko process to write directly to +the final data structure on disk, there is an intentional decoupling between +the production of events and their transition into final storage. Along the +same vein, the choice to have events written to multiple files by producers +is deliberate. + +Some recorded events are written immediately after a process crash. This is +a very uncertain time for the host system. There is a high liklihood the +system is in an exceptional state, such as memory exhaustion. Therefore, any +action taken after crashing needs to be very deliberate about what it does. +Excessive memory allocation and certain system calls may cause the system +to crash again or the machine's condition to worsen. This means that the act +of recording a crash event must be very light weight. Writing a new file from +nothing is very light weight. This is one reason we write separate files. + +Another reason we write separate files is because if the main Gecko process +itself crashes (as opposed to say a plugin process), the crash reporter (not +Gecko) is running and the crash reporter needs to handle the writing of the +event info. If this writing is involved (say loading, parsing, updating, and +reserializing back to disk), this logic would need to be implemented in both +Gecko and the crash reporter or would need to be implemented in such a way +that both could use. Neither of these is very practical from a software +lifecycle management perspective. It's much easier to have separate processes +write a simple file and to let a single implementation do all the complex +work. + +Idempotent Event Processing +=========================== + +Processing of event files has been designed such that the result is +idempotent regardless of what order those files are processed in. This is +not only a good design decision, but it is arguably necessary. While event +files are processed in order by file mtime, filesystem times may not have +the resolution required for proper sorting. Therefore, processing order is +merely an optimistic assumption. + +Aggregated Storage Format +========================= + +Crash events are aggregated into a unified data structure on disk. That data +structure is currently LZ4-compressed JSON and is represented by a single file. + +The choice of a single JSON file was initially driven by time and complexity +concerns. Before changing the format or adding significant amounts of new +data, some considerations must be taken into account. + +First, in well-behaving installs, crash data should be minimal. Crashes and +hangs will be rare and thus the size of the crash data should remain small +over time. + +The choice of a single JSON file has larger implications as the amount of +crash data grows. As new data is accumulated, we need to read and write +an entire file to make small updates. LZ4 compression helps reduce I/O. +But, there is a potential for unbounded file growth. We establish a +limit for the max age of records. Anything older than that limit is +pruned. We also establish a daily limit on the number of crashes we will +store. All crashes beyond the first N in a day have no payload and are +only recorded by the presence of a count. This count ensures we can +distinguish between ``N`` and ``100 * N``, which are very different +values! diff --git a/toolkit/components/crashes/docs/index.rst b/toolkit/components/crashes/docs/index.rst new file mode 100644 index 000000000..e2ab50ea4 --- /dev/null +++ b/toolkit/components/crashes/docs/index.rst @@ -0,0 +1,24 @@ +.. _crashes_crashmanager: + +============= +Crash Manager +============= + +The **Crash Manager** is a service and interface for managing crash +data within the Gecko application. + +From JavaScript, the service can be accessed via:: + + Cu.import("resource://gre/modules/Services.jsm"); + let crashManager = Services.crashmanager; + +That will give you an instance of ``CrashManager`` from ``CrashManager.jsm``. +From there, you can access and manipulate crash data. + +Other Documents +=============== + +.. toctree:: + :maxdepth: 1 + + crash-events diff --git a/toolkit/components/crashes/moz.build b/toolkit/components/crashes/moz.build new file mode 100644 index 000000000..5a36a3cd3 --- /dev/null +++ b/toolkit/components/crashes/moz.build @@ -0,0 +1,31 @@ +# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*- +# vim: set filetype=python: +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + +SPHINX_TREES['crash-manager'] = 'docs' + +EXTRA_COMPONENTS += [ + 'CrashService.js', + 'CrashService.manifest', +] + +EXTRA_JS_MODULES += [ + 'CrashManager.jsm', +] + +TESTING_JS_MODULES += [ + 'CrashManagerTest.jsm', +] + +XPCSHELL_TESTS_MANIFESTS += ['tests/xpcshell/xpcshell.ini'] + +XPIDL_MODULE = 'toolkit_crashservice' + +XPIDL_SOURCES += [ + 'nsICrashService.idl', +] + +with Files('**'): + BUG_COMPONENT = ('Toolkit', 'Breakpad Integration') diff --git a/toolkit/components/crashes/nsICrashService.idl b/toolkit/components/crashes/nsICrashService.idl new file mode 100644 index 000000000..57a412804 --- /dev/null +++ b/toolkit/components/crashes/nsICrashService.idl @@ -0,0 +1,30 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "nsISupports.idl" + +[scriptable, uuid(f60d76e5-62c3-4f58-89f6-b726c2b7bc20)] +interface nsICrashService : nsISupports +{ + /** + * Records the occurrence of a crash. + * + * @param processType + * One of the PROCESS_TYPE constants defined below. + * @param crashType + * One of the CRASH_TYPE constants defined below. + * @param id + * Crash ID. Likely a UUID. + */ + void addCrash(in long processType, in long crashType, in AString id); + + const long PROCESS_TYPE_MAIN = 0; + const long PROCESS_TYPE_CONTENT = 1; + const long PROCESS_TYPE_PLUGIN = 2; + const long PROCESS_TYPE_GMPLUGIN = 3; + const long PROCESS_TYPE_GPU = 4; + + const long CRASH_TYPE_CRASH = 0; + const long CRASH_TYPE_HANG = 1; +}; diff --git a/toolkit/components/crashes/tests/xpcshell/.eslintrc.js b/toolkit/components/crashes/tests/xpcshell/.eslintrc.js new file mode 100644 index 000000000..d35787cd2 --- /dev/null +++ b/toolkit/components/crashes/tests/xpcshell/.eslintrc.js @@ -0,0 +1,7 @@ +"use strict"; + +module.exports = { + "extends": [ + "../../../../../testing/xpcshell/xpcshell.eslintrc.js" + ] +}; diff --git a/toolkit/components/crashes/tests/xpcshell/test_crash_manager.js b/toolkit/components/crashes/tests/xpcshell/test_crash_manager.js new file mode 100644 index 000000000..9844e78c4 --- /dev/null +++ b/toolkit/components/crashes/tests/xpcshell/test_crash_manager.js @@ -0,0 +1,494 @@ +/* Any copyright is dedicated to the Public Domain. + * http://creativecommons.org/publicdomain/zero/1.0/ */ + +"use strict"; + +var {classes: Cc, interfaces: Ci, utils: Cu} = Components; + +var bsp = Cu.import("resource://gre/modules/CrashManager.jsm", this); +Cu.import("resource://gre/modules/Promise.jsm", this); +Cu.import("resource://gre/modules/Task.jsm", this); +Cu.import("resource://gre/modules/osfile.jsm", this); +Cu.import("resource://gre/modules/TelemetryEnvironment.jsm", this); + +Cu.import("resource://testing-common/CrashManagerTest.jsm", this); +Cu.import("resource://testing-common/TelemetryArchiveTesting.jsm", this); + +const DUMMY_DATE = new Date(Date.now() - 10 * 24 * 60 * 60 * 1000); +DUMMY_DATE.setMilliseconds(0); + +const DUMMY_DATE_2 = new Date(Date.now() - 20 * 24 * 60 * 60 * 1000); +DUMMY_DATE_2.setMilliseconds(0); + +function run_test() { + do_get_profile(); + configureLogging(); + TelemetryArchiveTesting.setup(); + run_next_test(); +} + +add_task(function* test_constructor_ok() { + let m = new CrashManager({ + pendingDumpsDir: "/foo", + submittedDumpsDir: "/bar", + eventsDirs: [], + storeDir: "/baz", + }); + Assert.ok(m, "CrashManager can be created."); +}); + +add_task(function* test_constructor_invalid() { + Assert.throws(() => { + new CrashManager({foo: true}); + }); +}); + +add_task(function* test_get_manager() { + let m = yield getManager(); + Assert.ok(m, "CrashManager obtained."); + + yield m.createDummyDump(true); + yield m.createDummyDump(false); +}); + +// Unsubmitted dump files on disk are detected properly. +add_task(function* test_pending_dumps() { + let m = yield getManager(); + let now = Date.now(); + let ids = []; + const COUNT = 5; + + for (let i = 0; i < COUNT; i++) { + ids.push(yield m.createDummyDump(false, new Date(now - i * 86400000))); + } + yield m.createIgnoredDumpFile("ignored", false); + + let entries = yield m.pendingDumps(); + Assert.equal(entries.length, COUNT, "proper number detected."); + + for (let entry of entries) { + Assert.equal(typeof(entry), "object", "entry is an object"); + Assert.ok("id" in entry, "id in entry"); + Assert.ok("path" in entry, "path in entry"); + Assert.ok("date" in entry, "date in entry"); + Assert.notEqual(ids.indexOf(entry.id), -1, "ID is known"); + } + + for (let i = 0; i < COUNT; i++) { + Assert.equal(entries[i].id, ids[COUNT-i-1], "Entries sorted by mtime"); + } +}); + +// Submitted dump files on disk are detected properly. +add_task(function* test_submitted_dumps() { + let m = yield getManager(); + let COUNT = 5; + + for (let i = 0; i < COUNT; i++) { + yield m.createDummyDump(true); + } + yield m.createIgnoredDumpFile("ignored", true); + + let entries = yield m.submittedDumps(); + Assert.equal(entries.length, COUNT, "proper number detected."); + + let hrID = yield m.createDummyDump(true, new Date(), true); + entries = yield m.submittedDumps(); + Assert.equal(entries.length, COUNT + 1, "hr- in filename detected."); + + let gotIDs = new Set(entries.map(e => e.id)); + Assert.ok(gotIDs.has(hrID)); +}); + +// The store should expire after inactivity. +add_task(function* test_store_expires() { + let m = yield getManager(); + + Object.defineProperty(m, "STORE_EXPIRATION_MS", { + value: 250, + }); + + let store = yield m._getStore(); + Assert.ok(store); + Assert.equal(store, m._store); + + yield sleep(300); + Assert.ok(!m._store, "Store has gone away."); +}); + +// Ensure discovery of unprocessed events files works. +add_task(function* test_unprocessed_events_files() { + let m = yield getManager(); + yield m.createEventsFile("1", "test.1", new Date(), "foo", 0); + yield m.createEventsFile("2", "test.1", new Date(), "bar", 0); + yield m.createEventsFile("1", "test.1", new Date(), "baz", 1); + + let paths = yield m._getUnprocessedEventsFiles(); + Assert.equal(paths.length, 3); +}); + +// Ensure only 1 aggregateEventsFiles() is allowed at a time. +add_task(function* test_aggregate_events_locking() { + let m = yield getManager(); + + let p1 = m.aggregateEventsFiles(); + let p2 = m.aggregateEventsFiles(); + + Assert.strictEqual(p1, p2, "Same promise should be returned."); +}); + +// Malformed events files should be deleted. +add_task(function* test_malformed_files_deleted() { + let m = yield getManager(); + + yield m.createEventsFile("1", "crash.main.1", new Date(), "foo\nbar"); + + let count = yield m.aggregateEventsFiles(); + Assert.equal(count, 1); + let crashes = yield m.getCrashes(); + Assert.equal(crashes.length, 0); + + count = yield m.aggregateEventsFiles(); + Assert.equal(count, 0); +}); + +// Unknown event types should be ignored. +add_task(function* test_aggregate_ignore_unknown_events() { + let m = yield getManager(); + + yield m.createEventsFile("1", "crash.main.2", DUMMY_DATE, "id1"); + yield m.createEventsFile("2", "foobar.1", new Date(), "dummy"); + + let count = yield m.aggregateEventsFiles(); + Assert.equal(count, 2); + + count = yield m.aggregateEventsFiles(); + Assert.equal(count, 1); + + count = yield m.aggregateEventsFiles(); + Assert.equal(count, 1); +}); + +add_task(function* test_prune_old() { + let m = yield getManager(); + let oldDate = new Date(Date.now() - 86400000); + let newDate = new Date(Date.now() - 10000); + yield m.createEventsFile("1", "crash.main.2", oldDate, "id1"); + yield m.addCrash(m.PROCESS_TYPE_PLUGIN, m.CRASH_TYPE_CRASH, "id2", newDate); + + yield m.aggregateEventsFiles(); + + let crashes = yield m.getCrashes(); + Assert.equal(crashes.length, 2); + + yield m.pruneOldCrashes(new Date(oldDate.getTime() + 10000)); + + crashes = yield m.getCrashes(); + Assert.equal(crashes.length, 1, "Old crash has been pruned."); + + let c = crashes[0]; + Assert.equal(c.id, "id2", "Proper crash was pruned."); + + // We can't test exact boundary conditions because dates from filesystem + // don't have same guarantees as JS dates. + yield m.pruneOldCrashes(new Date(newDate.getTime() + 5000)); + crashes = yield m.getCrashes(); + Assert.equal(crashes.length, 0); +}); + +add_task(function* test_schedule_maintenance() { + let m = yield getManager(); + yield m.createEventsFile("1", "crash.main.2", DUMMY_DATE, "id1"); + + let oldDate = new Date(Date.now() - m.PURGE_OLDER_THAN_DAYS * 2 * 24 * 60 * 60 * 1000); + yield m.createEventsFile("2", "crash.main.2", oldDate, "id2"); + + yield m.scheduleMaintenance(25); + let crashes = yield m.getCrashes(); + Assert.equal(crashes.length, 1); + Assert.equal(crashes[0].id, "id1"); +}); + +add_task(function* test_main_crash_event_file() { + let ac = new TelemetryArchiveTesting.Checker(); + yield ac.promiseInit(); + let theEnvironment = TelemetryEnvironment.currentEnvironment; + let sessionId = "be66af2f-2ee5-4330-ae95-44462dfbdf0c"; + let stackTraces = { status: "OK" }; + + // To test proper escaping, add data to the environment with an embedded + // double-quote + theEnvironment.testValue = "MyValue\""; + + let m = yield getManager(); + const fileContent = "id1\nk1=v1\nk2=v2\n" + + "TelemetryEnvironment=" + JSON.stringify(theEnvironment) + "\n" + + "TelemetrySessionId=" + sessionId + "\n" + + "StackTraces=" + JSON.stringify(stackTraces) + "\n"; + + yield m.createEventsFile("1", "crash.main.2", DUMMY_DATE, fileContent); + let count = yield m.aggregateEventsFiles(); + Assert.equal(count, 1); + + let crashes = yield m.getCrashes(); + Assert.equal(crashes.length, 1); + Assert.equal(crashes[0].id, "id1"); + Assert.equal(crashes[0].type, "main-crash"); + Assert.equal(crashes[0].metadata.k1, "v1"); + Assert.equal(crashes[0].metadata.k2, "v2"); + Assert.ok(crashes[0].metadata.TelemetryEnvironment); + Assert.equal(Object.getOwnPropertyNames(crashes[0].metadata).length, 5); + Assert.equal(crashes[0].metadata.TelemetrySessionId, sessionId); + Assert.ok(crashes[0].metadata.StackTraces); + Assert.deepEqual(crashes[0].crashDate, DUMMY_DATE); + + let found = yield ac.promiseFindPing("crash", [ + [["payload", "hasCrashEnvironment"], true], + [["payload", "metadata", "k1"], "v1"], + [["payload", "crashId"], "1"], + [["payload", "stackTraces", "status"], "OK"], + [["payload", "sessionId"], sessionId], + ]); + Assert.ok(found, "Telemetry ping submitted for found crash"); + Assert.deepEqual(found.environment, theEnvironment, "The saved environment should be present"); + + count = yield m.aggregateEventsFiles(); + Assert.equal(count, 0); +}); + +add_task(function* test_main_crash_event_file_noenv() { + let ac = new TelemetryArchiveTesting.Checker(); + yield ac.promiseInit(); + + let m = yield getManager(); + yield m.createEventsFile("1", "crash.main.2", DUMMY_DATE, "id1\nk1=v3\nk2=v2"); + let count = yield m.aggregateEventsFiles(); + Assert.equal(count, 1); + + let crashes = yield m.getCrashes(); + Assert.equal(crashes.length, 1); + Assert.equal(crashes[0].id, "id1"); + Assert.equal(crashes[0].type, "main-crash"); + Assert.deepEqual(crashes[0].metadata, { k1: "v3", k2: "v2"}); + Assert.deepEqual(crashes[0].crashDate, DUMMY_DATE); + + let found = yield ac.promiseFindPing("crash", [ + [["payload", "hasCrashEnvironment"], false], + [["payload", "metadata", "k1"], "v3"], + ]); + Assert.ok(found, "Telemetry ping submitted for found crash"); + Assert.ok(found.environment, "There is an environment"); + + count = yield m.aggregateEventsFiles(); + Assert.equal(count, 0); +}); + +add_task(function* test_crash_submission_event_file() { + let m = yield getManager(); + yield m.createEventsFile("1", "crash.main.2", DUMMY_DATE, "crash1"); + yield m.createEventsFile("1-submission", "crash.submission.1", DUMMY_DATE_2, + "crash1\nfalse\n"); + + // The line below has been intentionally commented out to make sure that + // the crash record is created when one does not exist. + // yield m.createEventsFile("2", "crash.main.1", DUMMY_DATE, "crash2"); + yield m.createEventsFile("2-submission", "crash.submission.1", DUMMY_DATE_2, + "crash2\ntrue\nbp-2"); + let count = yield m.aggregateEventsFiles(); + Assert.equal(count, 3); + + let crashes = yield m.getCrashes(); + Assert.equal(crashes.length, 2); + + let map = new Map(crashes.map(crash => [crash.id, crash])); + + let crash1 = map.get("crash1"); + Assert.ok(!!crash1); + Assert.equal(crash1.remoteID, null); + let crash2 = map.get("crash2"); + Assert.ok(!!crash2); + Assert.equal(crash2.remoteID, "bp-2"); + + Assert.equal(crash1.submissions.size, 1); + let submission = crash1.submissions.values().next().value; + Assert.equal(submission.result, m.SUBMISSION_RESULT_FAILED); + Assert.equal(submission.requestDate.getTime(), DUMMY_DATE_2.getTime()); + Assert.equal(submission.responseDate.getTime(), DUMMY_DATE_2.getTime()); + + Assert.equal(crash2.submissions.size, 1); + submission = crash2.submissions.values().next().value; + Assert.equal(submission.result, m.SUBMISSION_RESULT_OK); + Assert.equal(submission.requestDate.getTime(), DUMMY_DATE_2.getTime()); + Assert.equal(submission.responseDate.getTime(), DUMMY_DATE_2.getTime()); + + count = yield m.aggregateEventsFiles(); + Assert.equal(count, 0); +}); + +add_task(function* test_multiline_crash_id_rejected() { + let m = yield getManager(); + yield m.createEventsFile("1", "crash.main.1", DUMMY_DATE, "id1\nid2"); + yield m.aggregateEventsFiles(); + let crashes = yield m.getCrashes(); + Assert.equal(crashes.length, 0); +}); + +// Main process crashes should be remembered beyond the high water mark. +add_task(function* test_high_water_mark() { + let m = yield getManager(); + + let store = yield m._getStore(); + + for (let i = 0; i < store.HIGH_WATER_DAILY_THRESHOLD + 1; i++) { + yield m.createEventsFile("m" + i, "crash.main.2", DUMMY_DATE, "m" + i); + } + + let count = yield m.aggregateEventsFiles(); + Assert.equal(count, bsp.CrashStore.prototype.HIGH_WATER_DAILY_THRESHOLD + 1); + + // Need to fetch again in case the first one was garbage collected. + store = yield m._getStore(); + + Assert.equal(store.crashesCount, store.HIGH_WATER_DAILY_THRESHOLD + 1); +}); + +add_task(function* test_addCrash() { + let m = yield getManager(); + + let crashes = yield m.getCrashes(); + Assert.equal(crashes.length, 0); + + yield m.addCrash(m.PROCESS_TYPE_MAIN, m.CRASH_TYPE_CRASH, + "main-crash", DUMMY_DATE); + yield m.addCrash(m.PROCESS_TYPE_MAIN, m.CRASH_TYPE_HANG, + "main-hang", DUMMY_DATE); + yield m.addCrash(m.PROCESS_TYPE_CONTENT, m.CRASH_TYPE_CRASH, + "content-crash", DUMMY_DATE); + yield m.addCrash(m.PROCESS_TYPE_CONTENT, m.CRASH_TYPE_HANG, + "content-hang", DUMMY_DATE); + yield m.addCrash(m.PROCESS_TYPE_PLUGIN, m.CRASH_TYPE_CRASH, + "plugin-crash", DUMMY_DATE); + yield m.addCrash(m.PROCESS_TYPE_PLUGIN, m.CRASH_TYPE_HANG, + "plugin-hang", DUMMY_DATE); + yield m.addCrash(m.PROCESS_TYPE_GMPLUGIN, m.CRASH_TYPE_CRASH, + "gmplugin-crash", DUMMY_DATE); + yield m.addCrash(m.PROCESS_TYPE_GPU, m.CRASH_TYPE_CRASH, + "gpu-crash", DUMMY_DATE); + + yield m.addCrash(m.PROCESS_TYPE_MAIN, m.CRASH_TYPE_CRASH, + "changing-item", DUMMY_DATE); + yield m.addCrash(m.PROCESS_TYPE_CONTENT, m.CRASH_TYPE_HANG, + "changing-item", DUMMY_DATE_2); + + crashes = yield m.getCrashes(); + Assert.equal(crashes.length, 9); + + let map = new Map(crashes.map(crash => [crash.id, crash])); + + let crash = map.get("main-crash"); + Assert.ok(!!crash); + Assert.equal(crash.crashDate, DUMMY_DATE); + Assert.equal(crash.type, m.PROCESS_TYPE_MAIN + "-" + m.CRASH_TYPE_CRASH); + Assert.ok(crash.isOfType(m.PROCESS_TYPE_MAIN, m.CRASH_TYPE_CRASH)); + + crash = map.get("main-hang"); + Assert.ok(!!crash); + Assert.equal(crash.crashDate, DUMMY_DATE); + Assert.equal(crash.type, m.PROCESS_TYPE_MAIN + "-" + m.CRASH_TYPE_HANG); + Assert.ok(crash.isOfType(m.PROCESS_TYPE_MAIN, m.CRASH_TYPE_HANG)); + + crash = map.get("content-crash"); + Assert.ok(!!crash); + Assert.equal(crash.crashDate, DUMMY_DATE); + Assert.equal(crash.type, m.PROCESS_TYPE_CONTENT + "-" + m.CRASH_TYPE_CRASH); + Assert.ok(crash.isOfType(m.PROCESS_TYPE_CONTENT, m.CRASH_TYPE_CRASH)); + + crash = map.get("content-hang"); + Assert.ok(!!crash); + Assert.equal(crash.crashDate, DUMMY_DATE); + Assert.equal(crash.type, m.PROCESS_TYPE_CONTENT + "-" + m.CRASH_TYPE_HANG); + Assert.ok(crash.isOfType(m.PROCESS_TYPE_CONTENT, m.CRASH_TYPE_HANG)); + + crash = map.get("plugin-crash"); + Assert.ok(!!crash); + Assert.equal(crash.crashDate, DUMMY_DATE); + Assert.equal(crash.type, m.PROCESS_TYPE_PLUGIN + "-" + m.CRASH_TYPE_CRASH); + Assert.ok(crash.isOfType(m.PROCESS_TYPE_PLUGIN, m.CRASH_TYPE_CRASH)); + + crash = map.get("plugin-hang"); + Assert.ok(!!crash); + Assert.equal(crash.crashDate, DUMMY_DATE); + Assert.equal(crash.type, m.PROCESS_TYPE_PLUGIN + "-" + m.CRASH_TYPE_HANG); + Assert.ok(crash.isOfType(m.PROCESS_TYPE_PLUGIN, m.CRASH_TYPE_HANG)); + + crash = map.get("gmplugin-crash"); + Assert.ok(!!crash); + Assert.equal(crash.crashDate, DUMMY_DATE); + Assert.equal(crash.type, m.PROCESS_TYPE_GMPLUGIN + "-" + m.CRASH_TYPE_CRASH); + Assert.ok(crash.isOfType(m.PROCESS_TYPE_GMPLUGIN, m.CRASH_TYPE_CRASH)); + + crash = map.get("gpu-crash"); + Assert.ok(!!crash); + Assert.equal(crash.crashDate, DUMMY_DATE); + Assert.equal(crash.type, m.PROCESS_TYPE_GPU+ "-" + m.CRASH_TYPE_CRASH); + Assert.ok(crash.isOfType(m.PROCESS_TYPE_GPU, m.CRASH_TYPE_CRASH)); + + crash = map.get("changing-item"); + Assert.ok(!!crash); + Assert.equal(crash.crashDate, DUMMY_DATE_2); + Assert.equal(crash.type, m.PROCESS_TYPE_CONTENT + "-" + m.CRASH_TYPE_HANG); + Assert.ok(crash.isOfType(m.PROCESS_TYPE_CONTENT, m.CRASH_TYPE_HANG)); +}); + +add_task(function* test_generateSubmissionID() { + let m = yield getManager(); + + const SUBMISSION_ID_REGEX = + /^(sub-[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})$/i; + let id = m.generateSubmissionID(); + Assert.ok(SUBMISSION_ID_REGEX.test(id)); +}); + +add_task(function* test_addSubmissionAttemptAndResult() { + let m = yield getManager(); + + let crashes = yield m.getCrashes(); + Assert.equal(crashes.length, 0); + + yield m.addCrash(m.PROCESS_TYPE_MAIN, m.CRASH_TYPE_CRASH, + "main-crash", DUMMY_DATE); + yield m.addSubmissionAttempt("main-crash", "submission", DUMMY_DATE); + yield m.addSubmissionResult("main-crash", "submission", DUMMY_DATE_2, + m.SUBMISSION_RESULT_OK); + + crashes = yield m.getCrashes(); + Assert.equal(crashes.length, 1); + + let submissions = crashes[0].submissions; + Assert.ok(!!submissions); + + let submission = submissions.get("submission"); + Assert.ok(!!submission); + Assert.equal(submission.requestDate.getTime(), DUMMY_DATE.getTime()); + Assert.equal(submission.responseDate.getTime(), DUMMY_DATE_2.getTime()); + Assert.equal(submission.result, m.SUBMISSION_RESULT_OK); +}); + +add_task(function* test_setCrashClassifications() { + let m = yield getManager(); + + yield m.addCrash(m.PROCESS_TYPE_MAIN, m.CRASH_TYPE_CRASH, + "main-crash", DUMMY_DATE); + yield m.setCrashClassifications("main-crash", ["a"]); + let classifications = (yield m.getCrashes())[0].classifications; + Assert.ok(classifications.indexOf("a") != -1); +}); + +add_task(function* test_setRemoteCrashID() { + let m = yield getManager(); + + yield m.addCrash(m.PROCESS_TYPE_MAIN, m.CRASH_TYPE_CRASH, + "main-crash", DUMMY_DATE); + yield m.setRemoteCrashID("main-crash", "bp-1"); + Assert.equal((yield m.getCrashes())[0].remoteID, "bp-1"); +}); diff --git a/toolkit/components/crashes/tests/xpcshell/test_crash_service.js b/toolkit/components/crashes/tests/xpcshell/test_crash_service.js new file mode 100644 index 000000000..c207057e0 --- /dev/null +++ b/toolkit/components/crashes/tests/xpcshell/test_crash_service.js @@ -0,0 +1,31 @@ +/* Any copyright is dedicated to the Public Domain. + * http://creativecommons.org/publicdomain/zero/1.0/ */ + +"use strict"; + +var {classes: Cc, interfaces: Ci, utils: Cu} = Components; + +Cu.import("resource://gre/modules/Services.jsm", this); +Cu.import("resource://testing-common/AppData.jsm", this); +var bsp = Cu.import("resource://gre/modules/CrashManager.jsm", this); + +function run_test() { + run_next_test(); +} + +add_task(function* test_instantiation() { + Assert.ok(!bsp.gCrashManager, "CrashManager global instance not initially defined."); + + do_get_profile(); + yield makeFakeAppDir(); + + // Fake profile creation. + Cc["@mozilla.org/crashservice;1"] + .getService(Ci.nsIObserver) + .observe(null, "profile-after-change", null); + + Assert.ok(bsp.gCrashManager, "Profile creation makes it available."); + Assert.ok(Services.crashmanager, "CrashManager available via Services."); + Assert.strictEqual(bsp.gCrashManager, Services.crashmanager, + "The objects are the same."); +}); diff --git a/toolkit/components/crashes/tests/xpcshell/test_crash_store.js b/toolkit/components/crashes/tests/xpcshell/test_crash_store.js new file mode 100644 index 000000000..12b180e91 --- /dev/null +++ b/toolkit/components/crashes/tests/xpcshell/test_crash_store.js @@ -0,0 +1,587 @@ +/* Any copyright is dedicated to the Public Domain. + * http://creativecommons.org/publicdomain/zero/1.0/ */ + +/* + * This file tests the CrashStore type in CrashManager.jsm. + */ + +"use strict"; + +var {classes: Cc, interfaces: Ci, utils: Cu} = Components; + +var bsp = Cu.import("resource://gre/modules/CrashManager.jsm", this); +Cu.import("resource://gre/modules/osfile.jsm", this); +Cu.import("resource://gre/modules/Task.jsm", this); + +const DUMMY_DATE = new Date(Date.now() - 10 * 24 * 60 * 60 * 1000); +DUMMY_DATE.setMilliseconds(0); + +const DUMMY_DATE_2 = new Date(Date.now() - 5 * 24 * 60 * 60 * 1000); +DUMMY_DATE_2.setMilliseconds(0); + +const { + PROCESS_TYPE_MAIN, + PROCESS_TYPE_CONTENT, + PROCESS_TYPE_PLUGIN, + PROCESS_TYPE_GMPLUGIN, + PROCESS_TYPE_GPU, + CRASH_TYPE_CRASH, + CRASH_TYPE_HANG, + SUBMISSION_RESULT_OK, + SUBMISSION_RESULT_FAILED, +} = CrashManager.prototype; + +const CrashStore = bsp.CrashStore; + +var STORE_DIR_COUNT = 0; + +function getStore() { + return Task.spawn(function* () { + let storeDir = do_get_tempdir().path; + storeDir = OS.Path.join(storeDir, "store-" + STORE_DIR_COUNT++); + + yield OS.File.makeDir(storeDir, {unixMode: OS.Constants.libc.S_IRWXU}); + + let s = new CrashStore(storeDir); + yield s.load(); + + return s; + }); +} + +function run_test() { + run_next_test(); +} + +add_task(function* test_constructor() { + let s = new CrashStore("/some/path"); + Assert.ok(s instanceof CrashStore); +}); + +add_task(function* test_add_crash() { + let s = yield getStore(); + + Assert.equal(s.crashesCount, 0); + let d = new Date(Date.now() - 5000); + Assert.ok(s.addCrash(PROCESS_TYPE_MAIN, CRASH_TYPE_CRASH, "id1", d)); + + Assert.equal(s.crashesCount, 1); + + let crashes = s.crashes; + Assert.equal(crashes.length, 1); + let c = crashes[0]; + + Assert.equal(c.id, "id1", "ID set properly."); + Assert.equal(c.crashDate.getTime(), d.getTime(), "Date set."); + + Assert.ok( + s.addCrash(PROCESS_TYPE_MAIN, CRASH_TYPE_CRASH, "id2", new Date()) + ); + Assert.equal(s.crashesCount, 2); +}); + +add_task(function* test_reset() { + let s = yield getStore(); + + Assert.ok(s.addCrash(PROCESS_TYPE_MAIN, CRASH_TYPE_CRASH, "id1", DUMMY_DATE)); + Assert.equal(s.crashes.length, 1); + s.reset(); + Assert.equal(s.crashes.length, 0); +}); + +add_task(function* test_save_load() { + let s = yield getStore(); + + yield s.save(); + + let d1 = new Date(); + let d2 = new Date(d1.getTime() - 10000); + Assert.ok(s.addCrash(PROCESS_TYPE_MAIN, CRASH_TYPE_CRASH, "id1", d1)); + Assert.ok(s.addCrash(PROCESS_TYPE_MAIN, CRASH_TYPE_CRASH, "id2", d2)); + Assert.ok(s.addSubmissionAttempt("id1", "sub1", d1)); + Assert.ok(s.addSubmissionResult("id1", "sub1", d2, SUBMISSION_RESULT_OK)); + Assert.ok(s.setRemoteCrashID("id1", "bp-1")); + + yield s.save(); + + yield s.load(); + Assert.ok(!s.corruptDate); + let crashes = s.crashes; + + Assert.equal(crashes.length, 2); + let c = s.getCrash("id1"); + Assert.equal(c.crashDate.getTime(), d1.getTime()); + Assert.equal(c.remoteID, "bp-1"); + + Assert.ok(!!c.submissions); + let submission = c.submissions.get("sub1"); + Assert.ok(!!submission); + Assert.equal(submission.requestDate.getTime(), d1.getTime()); + Assert.equal(submission.responseDate.getTime(), d2.getTime()); + Assert.equal(submission.result, SUBMISSION_RESULT_OK); +}); + +add_task(function* test_corrupt_json() { + let s = yield getStore(); + + let buffer = new TextEncoder().encode("{bad: json-file"); + yield OS.File.writeAtomic(s._storePath, buffer, {compression: "lz4"}); + + yield s.load(); + Assert.ok(s.corruptDate, "Corrupt date is defined."); + + let date = s.corruptDate; + yield s.save(); + s._data = null; + yield s.load(); + Assert.ok(s.corruptDate); + Assert.equal(date.getTime(), s.corruptDate.getTime()); +}); + +add_task(function* test_add_main_crash() { + let s = yield getStore(); + + Assert.ok( + s.addCrash(PROCESS_TYPE_MAIN, CRASH_TYPE_CRASH, "id1", new Date()) + ); + Assert.equal(s.crashesCount, 1); + + let c = s.crashes[0]; + Assert.ok(c.crashDate); + Assert.equal(c.type, PROCESS_TYPE_MAIN + "-" + CRASH_TYPE_CRASH); + Assert.ok(c.isOfType(PROCESS_TYPE_MAIN, CRASH_TYPE_CRASH)); + + Assert.ok( + s.addCrash(PROCESS_TYPE_MAIN, CRASH_TYPE_CRASH, "id2", new Date()) + ); + Assert.equal(s.crashesCount, 2); + + // Duplicate. + Assert.ok( + s.addCrash(PROCESS_TYPE_MAIN, CRASH_TYPE_CRASH, "id1", new Date()) + ); + Assert.equal(s.crashesCount, 2); + + Assert.ok( + s.addCrash(PROCESS_TYPE_MAIN, CRASH_TYPE_CRASH, "id3", new Date(), + { OOMAllocationSize: 1048576 }) + ); + Assert.equal(s.crashesCount, 3); + Assert.deepEqual(s.crashes[2].metadata, { OOMAllocationSize: 1048576 }); + + let crashes = s.getCrashesOfType(PROCESS_TYPE_MAIN, CRASH_TYPE_CRASH); + Assert.equal(crashes.length, 3); +}); + +add_task(function* test_add_main_hang() { + let s = yield getStore(); + + Assert.ok( + s.addCrash(PROCESS_TYPE_MAIN, CRASH_TYPE_HANG, "id1", new Date()) + ); + Assert.equal(s.crashesCount, 1); + + let c = s.crashes[0]; + Assert.ok(c.crashDate); + Assert.equal(c.type, PROCESS_TYPE_MAIN + "-" + CRASH_TYPE_HANG); + Assert.ok(c.isOfType(PROCESS_TYPE_MAIN, CRASH_TYPE_HANG)); + + Assert.ok( + s.addCrash(PROCESS_TYPE_MAIN, CRASH_TYPE_HANG, "id2", new Date()) + ); + Assert.equal(s.crashesCount, 2); + + Assert.ok( + s.addCrash(PROCESS_TYPE_MAIN, CRASH_TYPE_HANG, "id1", new Date()) + ); + Assert.equal(s.crashesCount, 2); + + let crashes = s.getCrashesOfType(PROCESS_TYPE_MAIN, CRASH_TYPE_HANG); + Assert.equal(crashes.length, 2); +}); + +add_task(function* test_add_content_crash() { + let s = yield getStore(); + + Assert.ok( + s.addCrash(PROCESS_TYPE_CONTENT, CRASH_TYPE_CRASH, "id1", new Date()) + ); + Assert.equal(s.crashesCount, 1); + + let c = s.crashes[0]; + Assert.ok(c.crashDate); + Assert.equal(c.type, PROCESS_TYPE_CONTENT + "-" + CRASH_TYPE_CRASH); + Assert.ok(c.isOfType(PROCESS_TYPE_CONTENT, CRASH_TYPE_CRASH)); + + Assert.ok( + s.addCrash(PROCESS_TYPE_CONTENT, CRASH_TYPE_CRASH, "id2", new Date()) + ); + Assert.equal(s.crashesCount, 2); + + Assert.ok( + s.addCrash(PROCESS_TYPE_CONTENT, CRASH_TYPE_CRASH, "id1", new Date()) + ); + Assert.equal(s.crashesCount, 2); + + let crashes = s.getCrashesOfType(PROCESS_TYPE_CONTENT, CRASH_TYPE_CRASH); + Assert.equal(crashes.length, 2); +}); + +add_task(function* test_add_content_hang() { + let s = yield getStore(); + + Assert.ok( + s.addCrash(PROCESS_TYPE_CONTENT, CRASH_TYPE_HANG, "id1", new Date()) + ); + Assert.equal(s.crashesCount, 1); + + let c = s.crashes[0]; + Assert.ok(c.crashDate); + Assert.equal(c.type, PROCESS_TYPE_CONTENT + "-" + CRASH_TYPE_HANG); + Assert.ok(c.isOfType(PROCESS_TYPE_CONTENT, CRASH_TYPE_HANG)); + + Assert.ok( + s.addCrash(PROCESS_TYPE_CONTENT, CRASH_TYPE_HANG, "id2", new Date()) + ); + Assert.equal(s.crashesCount, 2); + + Assert.ok( + s.addCrash(PROCESS_TYPE_CONTENT, CRASH_TYPE_HANG, "id1", new Date()) + ); + Assert.equal(s.crashesCount, 2); + + let crashes = s.getCrashesOfType(PROCESS_TYPE_CONTENT, CRASH_TYPE_HANG); + Assert.equal(crashes.length, 2); +}); + +add_task(function* test_add_plugin_crash() { + let s = yield getStore(); + + Assert.ok( + s.addCrash(PROCESS_TYPE_PLUGIN, CRASH_TYPE_CRASH, "id1", new Date()) + ); + Assert.equal(s.crashesCount, 1); + + let c = s.crashes[0]; + Assert.ok(c.crashDate); + Assert.equal(c.type, PROCESS_TYPE_PLUGIN + "-" + CRASH_TYPE_CRASH); + Assert.ok(c.isOfType(PROCESS_TYPE_PLUGIN, CRASH_TYPE_CRASH)); + + Assert.ok( + s.addCrash(PROCESS_TYPE_PLUGIN, CRASH_TYPE_CRASH, "id2", new Date()) + ); + Assert.equal(s.crashesCount, 2); + + Assert.ok( + s.addCrash(PROCESS_TYPE_PLUGIN, CRASH_TYPE_CRASH, "id1", new Date()) + ); + Assert.equal(s.crashesCount, 2); + + let crashes = s.getCrashesOfType(PROCESS_TYPE_PLUGIN, CRASH_TYPE_CRASH); + Assert.equal(crashes.length, 2); +}); + +add_task(function* test_add_plugin_hang() { + let s = yield getStore(); + + Assert.ok( + s.addCrash(PROCESS_TYPE_PLUGIN, CRASH_TYPE_HANG, "id1", new Date()) + ); + Assert.equal(s.crashesCount, 1); + + let c = s.crashes[0]; + Assert.ok(c.crashDate); + Assert.equal(c.type, PROCESS_TYPE_PLUGIN + "-" + CRASH_TYPE_HANG); + Assert.ok(c.isOfType(PROCESS_TYPE_PLUGIN, CRASH_TYPE_HANG)); + + Assert.ok( + s.addCrash(PROCESS_TYPE_PLUGIN, CRASH_TYPE_HANG, "id2", new Date()) + ); + Assert.equal(s.crashesCount, 2); + + Assert.ok( + s.addCrash(PROCESS_TYPE_PLUGIN, CRASH_TYPE_HANG, "id1", new Date()) + ); + Assert.equal(s.crashesCount, 2); + + let crashes = s.getCrashesOfType(PROCESS_TYPE_PLUGIN, CRASH_TYPE_HANG); + Assert.equal(crashes.length, 2); +}); + +add_task(function* test_add_gmplugin_crash() { + let s = yield getStore(); + + Assert.ok( + s.addCrash(PROCESS_TYPE_GMPLUGIN, CRASH_TYPE_CRASH, "id1", new Date()) + ); + Assert.equal(s.crashesCount, 1); + + let c = s.crashes[0]; + Assert.ok(c.crashDate); + Assert.equal(c.type, PROCESS_TYPE_GMPLUGIN + "-" + CRASH_TYPE_CRASH); + Assert.ok(c.isOfType(PROCESS_TYPE_GMPLUGIN, CRASH_TYPE_CRASH)); + + Assert.ok( + s.addCrash(PROCESS_TYPE_GMPLUGIN, CRASH_TYPE_CRASH, "id2", new Date()) + ); + Assert.equal(s.crashesCount, 2); + + Assert.ok( + s.addCrash(PROCESS_TYPE_GMPLUGIN, CRASH_TYPE_CRASH, "id1", new Date()) + ); + Assert.equal(s.crashesCount, 2); + + let crashes = s.getCrashesOfType(PROCESS_TYPE_GMPLUGIN, CRASH_TYPE_CRASH); + Assert.equal(crashes.length, 2); +}); + +add_task(function* test_add_gpu_crash() { + let s = yield getStore(); + + Assert.ok( + s.addCrash(PROCESS_TYPE_GPU, CRASH_TYPE_CRASH, "id1", new Date()) + ); + Assert.equal(s.crashesCount, 1); + + let c = s.crashes[0]; + Assert.ok(c.crashDate); + Assert.equal(c.type, PROCESS_TYPE_GPU + "-" + CRASH_TYPE_CRASH); + Assert.ok(c.isOfType(PROCESS_TYPE_GPU, CRASH_TYPE_CRASH)); + + Assert.ok( + s.addCrash(PROCESS_TYPE_GPU, CRASH_TYPE_CRASH, "id2", new Date()) + ); + Assert.equal(s.crashesCount, 2); + + Assert.ok( + s.addCrash(PROCESS_TYPE_GPU, CRASH_TYPE_CRASH, "id1", new Date()) + ); + Assert.equal(s.crashesCount, 2); + + let crashes = s.getCrashesOfType(PROCESS_TYPE_GPU, CRASH_TYPE_CRASH); + Assert.equal(crashes.length, 2); +}); + +add_task(function* test_add_mixed_types() { + let s = yield getStore(); + + Assert.ok( + s.addCrash(PROCESS_TYPE_MAIN, CRASH_TYPE_CRASH, "mcrash", new Date()) && + s.addCrash(PROCESS_TYPE_MAIN, CRASH_TYPE_HANG, "mhang", new Date()) && + s.addCrash(PROCESS_TYPE_CONTENT, CRASH_TYPE_CRASH, "ccrash", new Date()) && + s.addCrash(PROCESS_TYPE_CONTENT, CRASH_TYPE_HANG, "chang", new Date()) && + s.addCrash(PROCESS_TYPE_PLUGIN, CRASH_TYPE_CRASH, "pcrash", new Date()) && + s.addCrash(PROCESS_TYPE_PLUGIN, CRASH_TYPE_HANG, "phang", new Date()) && + s.addCrash(PROCESS_TYPE_GMPLUGIN, CRASH_TYPE_CRASH, "gmpcrash", new Date()) && + s.addCrash(PROCESS_TYPE_GPU, CRASH_TYPE_CRASH, "gpucrash", new Date()) + ); + + Assert.equal(s.crashesCount, 8); + + yield s.save(); + + s._data.crashes.clear(); + Assert.equal(s.crashesCount, 0); + + yield s.load(); + + Assert.equal(s.crashesCount, 8); + + let crashes = s.getCrashesOfType(PROCESS_TYPE_MAIN, CRASH_TYPE_CRASH); + Assert.equal(crashes.length, 1); + crashes = s.getCrashesOfType(PROCESS_TYPE_MAIN, CRASH_TYPE_HANG); + Assert.equal(crashes.length, 1); + crashes = s.getCrashesOfType(PROCESS_TYPE_CONTENT, CRASH_TYPE_CRASH); + Assert.equal(crashes.length, 1); + crashes = s.getCrashesOfType(PROCESS_TYPE_CONTENT, CRASH_TYPE_HANG); + Assert.equal(crashes.length, 1); + crashes = s.getCrashesOfType(PROCESS_TYPE_PLUGIN, CRASH_TYPE_CRASH); + Assert.equal(crashes.length, 1); + crashes = s.getCrashesOfType(PROCESS_TYPE_PLUGIN, CRASH_TYPE_HANG); + Assert.equal(crashes.length, 1); + crashes = s.getCrashesOfType(PROCESS_TYPE_GMPLUGIN, CRASH_TYPE_CRASH); + Assert.equal(crashes.length, 1); + crashes = s.getCrashesOfType(PROCESS_TYPE_GPU, CRASH_TYPE_CRASH); + Assert.equal(crashes.length, 1); +}); + +// Crashes added beyond the high water mark behave properly. +add_task(function* test_high_water() { + let s = yield getStore(); + + let d1 = new Date(2014, 0, 1, 0, 0, 0); + let d2 = new Date(2014, 0, 2, 0, 0, 0); + + let i = 0; + for (; i < s.HIGH_WATER_DAILY_THRESHOLD; i++) { + Assert.ok( + s.addCrash(PROCESS_TYPE_MAIN, CRASH_TYPE_CRASH, "mc1" + i, d1) && + s.addCrash(PROCESS_TYPE_MAIN, CRASH_TYPE_CRASH, "mc2" + i, d2) && + s.addCrash(PROCESS_TYPE_MAIN, CRASH_TYPE_HANG, "mh1" + i, d1) && + s.addCrash(PROCESS_TYPE_MAIN, CRASH_TYPE_HANG, "mh2" + i, d2) && + + s.addCrash(PROCESS_TYPE_CONTENT, CRASH_TYPE_CRASH, "cc1" + i, d1) && + s.addCrash(PROCESS_TYPE_CONTENT, CRASH_TYPE_CRASH, "cc2" + i, d2) && + s.addCrash(PROCESS_TYPE_CONTENT, CRASH_TYPE_HANG, "ch1" + i, d1) && + s.addCrash(PROCESS_TYPE_CONTENT, CRASH_TYPE_HANG, "ch2" + i, d2) && + + s.addCrash(PROCESS_TYPE_PLUGIN, CRASH_TYPE_CRASH, "pc1" + i, d1) && + s.addCrash(PROCESS_TYPE_PLUGIN, CRASH_TYPE_CRASH, "pc2" + i, d2) && + s.addCrash(PROCESS_TYPE_PLUGIN, CRASH_TYPE_HANG, "ph1" + i, d1) && + s.addCrash(PROCESS_TYPE_PLUGIN, CRASH_TYPE_HANG, "ph2" + i, d2) + ); + } + + Assert.ok( + s.addCrash(PROCESS_TYPE_MAIN, CRASH_TYPE_CRASH, "mc1" + i, d1) && + s.addCrash(PROCESS_TYPE_MAIN, CRASH_TYPE_CRASH, "mc2" + i, d2) && + s.addCrash(PROCESS_TYPE_MAIN, CRASH_TYPE_HANG, "mh1" + i, d1) && + s.addCrash(PROCESS_TYPE_MAIN, CRASH_TYPE_HANG, "mh2" + i, d2) + ); + + Assert.ok(!s.addCrash(PROCESS_TYPE_CONTENT, CRASH_TYPE_CRASH, "cc1" + i, d1)); + Assert.ok(!s.addCrash(PROCESS_TYPE_CONTENT, CRASH_TYPE_CRASH, "cc2" + i, d2)); + Assert.ok(!s.addCrash(PROCESS_TYPE_CONTENT, CRASH_TYPE_HANG, "ch1" + i, d1)); + Assert.ok(!s.addCrash(PROCESS_TYPE_CONTENT, CRASH_TYPE_HANG, "ch2" + i, d2)); + + Assert.ok(!s.addCrash(PROCESS_TYPE_PLUGIN, CRASH_TYPE_CRASH, "pc1" + i, d1)); + Assert.ok(!s.addCrash(PROCESS_TYPE_PLUGIN, CRASH_TYPE_CRASH, "pc2" + i, d2)); + Assert.ok(!s.addCrash(PROCESS_TYPE_PLUGIN, CRASH_TYPE_HANG, "ph1" + i, d1)); + Assert.ok(!s.addCrash(PROCESS_TYPE_PLUGIN, CRASH_TYPE_HANG, "ph2" + i, d2)); + + // We preserve main process crashes and hangs. Content and plugin crashes and + // hangs beyond should be discarded. + Assert.equal(s.crashesCount, 12 * s.HIGH_WATER_DAILY_THRESHOLD + 4); + + let crashes = s.getCrashesOfType(PROCESS_TYPE_MAIN, CRASH_TYPE_CRASH); + Assert.equal(crashes.length, 2 * s.HIGH_WATER_DAILY_THRESHOLD + 2); + crashes = s.getCrashesOfType(PROCESS_TYPE_MAIN, CRASH_TYPE_HANG); + Assert.equal(crashes.length, 2 * s.HIGH_WATER_DAILY_THRESHOLD + 2); + + crashes = s.getCrashesOfType(PROCESS_TYPE_CONTENT, CRASH_TYPE_CRASH); + Assert.equal(crashes.length, 2 * s.HIGH_WATER_DAILY_THRESHOLD); + crashes = s.getCrashesOfType(PROCESS_TYPE_CONTENT, CRASH_TYPE_HANG); + Assert.equal(crashes.length, 2 * s.HIGH_WATER_DAILY_THRESHOLD); + + crashes = s.getCrashesOfType(PROCESS_TYPE_PLUGIN, CRASH_TYPE_CRASH); + Assert.equal(crashes.length, 2 * s.HIGH_WATER_DAILY_THRESHOLD); + crashes = s.getCrashesOfType(PROCESS_TYPE_PLUGIN, CRASH_TYPE_HANG); + Assert.equal(crashes.length, 2 * s.HIGH_WATER_DAILY_THRESHOLD); + + // But raw counts should be preserved. + let day1 = bsp.dateToDays(d1); + let day2 = bsp.dateToDays(d2); + Assert.ok(s._countsByDay.has(day1)); + Assert.ok(s._countsByDay.has(day2)); + + Assert.equal(s._countsByDay.get(day1). + get(PROCESS_TYPE_MAIN + "-" + CRASH_TYPE_CRASH), + s.HIGH_WATER_DAILY_THRESHOLD + 1); + Assert.equal(s._countsByDay.get(day1). + get(PROCESS_TYPE_MAIN + "-" + CRASH_TYPE_HANG), + s.HIGH_WATER_DAILY_THRESHOLD + 1); + + Assert.equal(s._countsByDay.get(day1). + get(PROCESS_TYPE_CONTENT + "-" + CRASH_TYPE_CRASH), + s.HIGH_WATER_DAILY_THRESHOLD + 1); + Assert.equal(s._countsByDay.get(day1). + get(PROCESS_TYPE_CONTENT + "-" + CRASH_TYPE_HANG), + s.HIGH_WATER_DAILY_THRESHOLD + 1); + + Assert.equal(s._countsByDay.get(day1). + get(PROCESS_TYPE_PLUGIN + "-" + CRASH_TYPE_CRASH), + s.HIGH_WATER_DAILY_THRESHOLD + 1); + Assert.equal(s._countsByDay.get(day1). + get(PROCESS_TYPE_PLUGIN + "-" + CRASH_TYPE_HANG), + s.HIGH_WATER_DAILY_THRESHOLD + 1); + + yield s.save(); + yield s.load(); + + Assert.ok(s._countsByDay.has(day1)); + Assert.ok(s._countsByDay.has(day2)); + + Assert.equal(s._countsByDay.get(day1). + get(PROCESS_TYPE_MAIN + "-" + CRASH_TYPE_CRASH), + s.HIGH_WATER_DAILY_THRESHOLD + 1); + Assert.equal(s._countsByDay.get(day1). + get(PROCESS_TYPE_MAIN + "-" + CRASH_TYPE_HANG), + s.HIGH_WATER_DAILY_THRESHOLD + 1); + + Assert.equal(s._countsByDay.get(day1). + get(PROCESS_TYPE_CONTENT + "-" + CRASH_TYPE_CRASH), + s.HIGH_WATER_DAILY_THRESHOLD + 1); + Assert.equal(s._countsByDay.get(day1). + get(PROCESS_TYPE_CONTENT + "-" + CRASH_TYPE_HANG), + s.HIGH_WATER_DAILY_THRESHOLD + 1); + + Assert.equal(s._countsByDay.get(day1). + get(PROCESS_TYPE_PLUGIN + "-" + CRASH_TYPE_CRASH), + s.HIGH_WATER_DAILY_THRESHOLD + 1); + Assert.equal(s._countsByDay.get(day1). + get(PROCESS_TYPE_PLUGIN + "-" + CRASH_TYPE_HANG), + s.HIGH_WATER_DAILY_THRESHOLD + 1); +}); + +add_task(function* test_addSubmission() { + let s = yield getStore(); + + Assert.ok(s.addCrash(PROCESS_TYPE_MAIN, CRASH_TYPE_CRASH, "crash1", + DUMMY_DATE)); + + Assert.ok(s.addSubmissionAttempt("crash1", "sub1", DUMMY_DATE)); + + let crash = s.getCrash("crash1"); + let submission = crash.submissions.get("sub1"); + Assert.ok(!!submission); + Assert.equal(submission.requestDate.getTime(), DUMMY_DATE.getTime()); + Assert.equal(submission.responseDate, null); + Assert.equal(submission.result, null); + + Assert.ok(s.addSubmissionResult("crash1", "sub1", DUMMY_DATE_2, + SUBMISSION_RESULT_FAILED)); + + crash = s.getCrash("crash1"); + Assert.equal(crash.submissions.size, 1); + submission = crash.submissions.get("sub1"); + Assert.ok(!!submission); + Assert.equal(submission.requestDate.getTime(), DUMMY_DATE.getTime()); + Assert.equal(submission.responseDate.getTime(), DUMMY_DATE_2.getTime()); + Assert.equal(submission.result, SUBMISSION_RESULT_FAILED); + + Assert.ok(s.addSubmissionAttempt("crash1", "sub2", DUMMY_DATE)); + Assert.ok(s.addSubmissionResult("crash1", "sub2", DUMMY_DATE_2, + SUBMISSION_RESULT_OK)); + + Assert.equal(crash.submissions.size, 2); + submission = crash.submissions.get("sub2"); + Assert.ok(!!submission); + Assert.equal(submission.result, SUBMISSION_RESULT_OK); +}); + +add_task(function* test_setCrashClassification() { + let s = yield getStore(); + + Assert.ok(s.addCrash(PROCESS_TYPE_MAIN, CRASH_TYPE_CRASH, "crash1", + new Date())); + let classifications = s.crashes[0].classifications; + Assert.ok(!!classifications); + Assert.equal(classifications.length, 0); + + Assert.ok(s.setCrashClassifications("crash1", ["foo", "bar"])); + classifications = s.crashes[0].classifications; + Assert.equal(classifications.length, 2); + Assert.ok(classifications.indexOf("foo") != -1); + Assert.ok(classifications.indexOf("bar") != -1); +}); + +add_task(function* test_setRemoteCrashID() { + let s = yield getStore(); + + Assert.ok(s.addCrash(PROCESS_TYPE_MAIN, CRASH_TYPE_CRASH, "crash1", + new Date())); + Assert.equal(s.crashes[0].remoteID, null); + Assert.ok(s.setRemoteCrashID("crash1", "bp-1")); + Assert.equal(s.crashes[0].remoteID, "bp-1"); +}); + diff --git a/toolkit/components/crashes/tests/xpcshell/xpcshell.ini b/toolkit/components/crashes/tests/xpcshell/xpcshell.ini new file mode 100644 index 000000000..5cb8a69d5 --- /dev/null +++ b/toolkit/components/crashes/tests/xpcshell/xpcshell.ini @@ -0,0 +1,8 @@ +[DEFAULT] +head = +tail = +skip-if = toolkit == 'android' + +[test_crash_manager.js] +[test_crash_service.js] +[test_crash_store.js] |