summaryrefslogtreecommitdiffstats
path: root/toolkit/components/telemetry
diff options
context:
space:
mode:
authorMatt A. Tobin <mattatobin@localhost.localdomain>2018-02-02 04:16:08 -0500
committerMatt A. Tobin <mattatobin@localhost.localdomain>2018-02-02 04:16:08 -0500
commit5f8de423f190bbb79a62f804151bc24824fa32d8 (patch)
tree10027f336435511475e392454359edea8e25895d /toolkit/components/telemetry
parent49ee0794b5d912db1f95dce6eb52d781dc210db5 (diff)
downloadUXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.gz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.lz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.xz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.zip
Add m-esr52 at 52.6.0
Diffstat (limited to 'toolkit/components/telemetry')
-rw-r--r--toolkit/components/telemetry/EventInfo.h52
-rw-r--r--toolkit/components/telemetry/Events.yaml68
-rw-r--r--toolkit/components/telemetry/GCTelemetry.jsm216
-rw-r--r--toolkit/components/telemetry/Histograms.json11002
-rw-r--r--toolkit/components/telemetry/Makefile.in17
-rw-r--r--toolkit/components/telemetry/ProcessedStack.h63
-rw-r--r--toolkit/components/telemetry/ScalarInfo.h27
-rw-r--r--toolkit/components/telemetry/Scalars.yaml298
-rw-r--r--toolkit/components/telemetry/Telemetry.cpp3076
-rw-r--r--toolkit/components/telemetry/Telemetry.h436
-rw-r--r--toolkit/components/telemetry/TelemetryArchive.jsm125
-rw-r--r--toolkit/components/telemetry/TelemetryCommon.cpp105
-rw-r--r--toolkit/components/telemetry/TelemetryCommon.h75
-rw-r--r--toolkit/components/telemetry/TelemetryComms.h84
-rw-r--r--toolkit/components/telemetry/TelemetryController.jsm954
-rw-r--r--toolkit/components/telemetry/TelemetryEnvironment.jsm1459
-rw-r--r--toolkit/components/telemetry/TelemetryEvent.cpp687
-rw-r--r--toolkit/components/telemetry/TelemetryEvent.h39
-rw-r--r--toolkit/components/telemetry/TelemetryHistogram.cpp2725
-rw-r--r--toolkit/components/telemetry/TelemetryHistogram.h104
-rw-r--r--toolkit/components/telemetry/TelemetryLog.jsm35
-rw-r--r--toolkit/components/telemetry/TelemetryReportingPolicy.jsm496
-rw-r--r--toolkit/components/telemetry/TelemetryScalar.cpp1896
-rw-r--r--toolkit/components/telemetry/TelemetryScalar.h64
-rw-r--r--toolkit/components/telemetry/TelemetrySend.jsm1114
-rw-r--r--toolkit/components/telemetry/TelemetrySession.jsm2124
-rw-r--r--toolkit/components/telemetry/TelemetryStartup.js49
-rw-r--r--toolkit/components/telemetry/TelemetryStartup.manifest4
-rw-r--r--toolkit/components/telemetry/TelemetryStopwatch.jsm335
-rw-r--r--toolkit/components/telemetry/TelemetryStorage.jsm1882
-rw-r--r--toolkit/components/telemetry/TelemetryTimestamps.jsm54
-rw-r--r--toolkit/components/telemetry/TelemetryUtils.jsm152
-rw-r--r--toolkit/components/telemetry/ThirdPartyCookieProbe.jsm181
-rw-r--r--toolkit/components/telemetry/ThreadHangStats.h230
-rw-r--r--toolkit/components/telemetry/UITelemetry.jsm235
-rw-r--r--toolkit/components/telemetry/WebrtcTelemetry.cpp112
-rw-r--r--toolkit/components/telemetry/WebrtcTelemetry.h43
-rw-r--r--toolkit/components/telemetry/datareporting-prefs.js12
-rw-r--r--toolkit/components/telemetry/docs/collection/custom-pings.rst74
-rw-r--r--toolkit/components/telemetry/docs/collection/histograms.rst5
-rw-r--r--toolkit/components/telemetry/docs/collection/index.rst35
-rw-r--r--toolkit/components/telemetry/docs/collection/measuring-time.rst74
-rw-r--r--toolkit/components/telemetry/docs/collection/scalars.rst140
-rw-r--r--toolkit/components/telemetry/docs/concepts/archiving.rst12
-rw-r--r--toolkit/components/telemetry/docs/concepts/crashes.rst23
-rw-r--r--toolkit/components/telemetry/docs/concepts/index.rst23
-rw-r--r--toolkit/components/telemetry/docs/concepts/pings.rst32
-rw-r--r--toolkit/components/telemetry/docs/concepts/sessions.rst40
-rw-r--r--toolkit/components/telemetry/docs/concepts/submission.rst34
-rw-r--r--toolkit/components/telemetry/docs/concepts/subsession_triggers.pngbin0 -> 1219295 bytes
-rw-r--r--toolkit/components/telemetry/docs/data/addons-malware-ping.rst42
-rw-r--r--toolkit/components/telemetry/docs/data/common-ping.rst42
-rw-r--r--toolkit/components/telemetry/docs/data/core-ping.rst191
-rw-r--r--toolkit/components/telemetry/docs/data/crash-ping.rst144
-rw-r--r--toolkit/components/telemetry/docs/data/deletion-ping.rst19
-rw-r--r--toolkit/components/telemetry/docs/data/environment.rst373
-rw-r--r--toolkit/components/telemetry/docs/data/heartbeat-ping.rst63
-rw-r--r--toolkit/components/telemetry/docs/data/index.rst18
-rw-r--r--toolkit/components/telemetry/docs/data/main-ping.rst609
-rw-r--r--toolkit/components/telemetry/docs/data/sync-ping.rst182
-rw-r--r--toolkit/components/telemetry/docs/data/uitour-ping.rst26
-rw-r--r--toolkit/components/telemetry/docs/fhr/architecture.rst226
-rw-r--r--toolkit/components/telemetry/docs/fhr/dataformat.rst1997
-rw-r--r--toolkit/components/telemetry/docs/fhr/identifiers.rst83
-rw-r--r--toolkit/components/telemetry/docs/fhr/index.rst34
-rw-r--r--toolkit/components/telemetry/docs/index.rst25
-rw-r--r--toolkit/components/telemetry/docs/internals/index.rst9
-rw-r--r--toolkit/components/telemetry/docs/internals/preferences.rst119
-rw-r--r--toolkit/components/telemetry/gen-event-data.py142
-rw-r--r--toolkit/components/telemetry/gen-event-enum.py73
-rw-r--r--toolkit/components/telemetry/gen-histogram-bucket-ranges.py52
-rw-r--r--toolkit/components/telemetry/gen-histogram-data.py178
-rw-r--r--toolkit/components/telemetry/gen-histogram-enum.py107
-rw-r--r--toolkit/components/telemetry/gen-scalar-data.py90
-rw-r--r--toolkit/components/telemetry/gen-scalar-enum.py56
-rw-r--r--toolkit/components/telemetry/healthreport-prefs.js10
-rw-r--r--toolkit/components/telemetry/histogram-whitelists.json1990
-rw-r--r--toolkit/components/telemetry/histogram_tools.py513
-rw-r--r--toolkit/components/telemetry/moz.build130
-rw-r--r--toolkit/components/telemetry/nsITelemetry.idl469
-rw-r--r--toolkit/components/telemetry/parse_events.py271
-rw-r--r--toolkit/components/telemetry/parse_scalars.py262
-rw-r--r--toolkit/components/telemetry/schemas/core.schema.json41
-rw-r--r--toolkit/components/telemetry/shared_telemetry_utils.py103
-rw-r--r--toolkit/components/telemetry/tests/addons/dictionary/install.rdf25
-rw-r--r--toolkit/components/telemetry/tests/addons/experiment/install.rdf16
-rw-r--r--toolkit/components/telemetry/tests/addons/extension-2/install.rdf16
-rw-r--r--toolkit/components/telemetry/tests/addons/extension/install.rdf16
-rw-r--r--toolkit/components/telemetry/tests/addons/long-fields/install.rdf24
-rw-r--r--toolkit/components/telemetry/tests/addons/restartless/install.rdf24
-rw-r--r--toolkit/components/telemetry/tests/addons/signed/META-INF/manifest.mf7
-rw-r--r--toolkit/components/telemetry/tests/addons/signed/META-INF/mozilla.rsabin0 -> 4190 bytes
-rw-r--r--toolkit/components/telemetry/tests/addons/signed/META-INF/mozilla.sf4
-rw-r--r--toolkit/components/telemetry/tests/addons/signed/install.rdf24
-rw-r--r--toolkit/components/telemetry/tests/addons/system/install.rdf24
-rw-r--r--toolkit/components/telemetry/tests/addons/theme/install.rdf16
-rw-r--r--toolkit/components/telemetry/tests/browser/browser.ini5
-rw-r--r--toolkit/components/telemetry/tests/browser/browser_TelemetryGC.js193
-rw-r--r--toolkit/components/telemetry/tests/search/chrome.manifest3
-rw-r--r--toolkit/components/telemetry/tests/search/searchTest.jarbin0 -> 867 bytes
-rw-r--r--toolkit/components/telemetry/tests/unit/.eslintrc.js7
-rw-r--r--toolkit/components/telemetry/tests/unit/TelemetryArchiveTesting.jsm86
-rw-r--r--toolkit/components/telemetry/tests/unit/engine.xml7
-rw-r--r--toolkit/components/telemetry/tests/unit/head.js319
-rw-r--r--toolkit/components/telemetry/tests/unit/test_ChildHistograms.js107
-rw-r--r--toolkit/components/telemetry/tests/unit/test_PingAPI.js502
-rw-r--r--toolkit/components/telemetry/tests/unit/test_SubsessionChaining.js236
-rw-r--r--toolkit/components/telemetry/tests/unit/test_TelemetryController.js507
-rw-r--r--toolkit/components/telemetry/tests/unit/test_TelemetryControllerBuildID.js70
-rw-r--r--toolkit/components/telemetry/tests/unit/test_TelemetryControllerShutdown.js70
-rw-r--r--toolkit/components/telemetry/tests/unit/test_TelemetryController_idle.js73
-rw-r--r--toolkit/components/telemetry/tests/unit/test_TelemetryEnvironment.js1528
-rw-r--r--toolkit/components/telemetry/tests/unit/test_TelemetryEvents.js249
-rw-r--r--toolkit/components/telemetry/tests/unit/test_TelemetryFlagClear.js14
-rw-r--r--toolkit/components/telemetry/tests/unit/test_TelemetryLateWrites.js127
-rw-r--r--toolkit/components/telemetry/tests/unit/test_TelemetryLockCount.js53
-rw-r--r--toolkit/components/telemetry/tests/unit/test_TelemetryLog.js51
-rw-r--r--toolkit/components/telemetry/tests/unit/test_TelemetryReportingPolicy.js268
-rw-r--r--toolkit/components/telemetry/tests/unit/test_TelemetryScalars.js574
-rw-r--r--toolkit/components/telemetry/tests/unit/test_TelemetrySend.js427
-rw-r--r--toolkit/components/telemetry/tests/unit/test_TelemetrySendOldPings.js547
-rw-r--r--toolkit/components/telemetry/tests/unit/test_TelemetrySession.js2029
-rw-r--r--toolkit/components/telemetry/tests/unit/test_TelemetryStopwatch.js156
-rw-r--r--toolkit/components/telemetry/tests/unit/test_TelemetryTimestamps.js77
-rw-r--r--toolkit/components/telemetry/tests/unit/test_ThreadHangStats.js102
-rw-r--r--toolkit/components/telemetry/tests/unit/test_nsITelemetry.js883
-rw-r--r--toolkit/components/telemetry/tests/unit/xpcshell.ini63
127 files changed, 49340 insertions, 0 deletions
diff --git a/toolkit/components/telemetry/EventInfo.h b/toolkit/components/telemetry/EventInfo.h
new file mode 100644
index 000000000..b8934e2c4
--- /dev/null
+++ b/toolkit/components/telemetry/EventInfo.h
@@ -0,0 +1,52 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2; -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef TelemetryEventInfo_h__
+#define TelemetryEventInfo_h__
+
+// This module is internal to Telemetry. The structures here hold data that
+// describe events.
+// It should only be used by TelemetryEventData.h and TelemetryEvent.cpp.
+//
+// For the public interface to Telemetry functionality, see Telemetry.h.
+
+namespace {
+
+struct CommonEventInfo {
+ // Indices for the category and expiration strings.
+ uint32_t category_offset;
+ uint32_t expiration_version_offset;
+
+ // The index and count for the extra key offsets in the extra table.
+ uint32_t extra_index;
+ uint32_t extra_count;
+
+ // The day since UNIX epoch that this probe expires on.
+ uint32_t expiration_day;
+
+ // The dataset this event is recorded in.
+ uint32_t dataset;
+
+ // Convenience functions for accessing event strings.
+ const char* expiration_version() const;
+ const char* category() const;
+ const char* extra_key(uint32_t index) const;
+};
+
+struct EventInfo {
+ // The corresponding CommonEventInfo.
+ const CommonEventInfo& common_info;
+
+ // Indices for the method & object strings.
+ uint32_t method_offset;
+ uint32_t object_offset;
+
+ const char* method() const;
+ const char* object() const;
+};
+
+} // namespace
+
+#endif // TelemetryEventInfo_h__
diff --git a/toolkit/components/telemetry/Events.yaml b/toolkit/components/telemetry/Events.yaml
new file mode 100644
index 000000000..750a13914
--- /dev/null
+++ b/toolkit/components/telemetry/Events.yaml
@@ -0,0 +1,68 @@
+navigation:
+- methods: ["search"]
+ objects: ["about_home", "about_newtab", "contextmenu", "oneoff",
+ "suggestion", "alias", "enter", "searchbar", "urlbar"]
+ release_channel_collection: opt-in
+ description: >
+ This is recorded on each search navigation.
+ The value field records the action used to trigger the search:
+ "enter", "oneoff", "suggestion", "alias", null (for contextmenu)
+ bug_numbers: [1316281]
+ notification_emails: ["past@mozilla.com"]
+ expiry_version: "58.0"
+ extra_keys:
+ engine: The id of the search engine used.
+
+# This category contains event entries used for Telemetry tests.
+# They will not be sent out with any pings.
+telemetry.test:
+- methods: ["test1", "test2"]
+ objects: ["object1", "object2"]
+ bug_numbers: [1286606]
+ notification_emails: ["telemetry-client-dev@mozilla.com"]
+ description: This is a test entry for Telemetry.
+ expiry_date: never
+ extra_keys:
+ key1: This is just a test description.
+ key2: This is another test description.
+- methods: ["optout"]
+ objects: ["object1", "object2"]
+ bug_numbers: [1286606]
+ notification_emails: ["telemetry-client-dev@mozilla.com"]
+ description: This is an opt-out test entry.
+ expiry_date: never
+ release_channel_collection: opt-out
+ extra_keys:
+ key1: This is just a test description.
+- methods: ["expired_version"]
+ objects: ["object1", "object2"]
+ bug_numbers: [1286606]
+ notification_emails: ["telemetry-client-dev@mozilla.com"]
+ description: This is a test entry with an expired version.
+ expiry_version: "3.6"
+- methods: ["expired_date"]
+ objects: ["object1", "object2"]
+ bug_numbers: [1286606]
+ notification_emails: ["telemetry-client-dev@mozilla.com"]
+ description: This is a test entry with an expired date.
+ expiry_date: 2014-01-28
+- methods: ["not_expired_optout"]
+ objects: ["object1"]
+ bug_numbers: [1286606]
+ notification_emails: ["telemetry-client-dev@mozilla.com"]
+ description: This is an opt-out test entry with unexpired date and version.
+ release_channel_collection: opt-out
+ expiry_date: 2099-01-01
+ expiry_version: "999.0"
+
+# This is a secondary category used for Telemetry tests.
+# The events here will not be sent out with any pings.
+telemetry.test.second:
+- methods: ["test"]
+ objects: ["object1", "object2", "object3"]
+ bug_numbers: [1286606]
+ notification_emails: ["telemetry-client-dev@mozilla.com"]
+ description: This is a test entry for Telemetry.
+ expiry_date: never
+ extra_keys:
+ key1: This is just a test description.
diff --git a/toolkit/components/telemetry/GCTelemetry.jsm b/toolkit/components/telemetry/GCTelemetry.jsm
new file mode 100644
index 000000000..43a4ea9ca
--- /dev/null
+++ b/toolkit/components/telemetry/GCTelemetry.jsm
@@ -0,0 +1,216 @@
+/* -*- js-indent-level: 2; indent-tabs-mode: nil -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+"use strict";
+
+/**
+ * This module records detailed timing information about selected
+ * GCs. The data is sent back in the telemetry session ping. To avoid
+ * bloating the ping, only a few GCs are included. There are two
+ * selection strategies. We always save the five GCs with the worst
+ * max_pause time. Additionally, five collections are selected at
+ * random. If a GC runs for C milliseconds and the total time for all
+ * GCs since the session began is T milliseconds, then the GC has a
+ * 5*C/T probablility of being selected (the factor of 5 is because we
+ * save 5 of them).
+ *
+ * GCs from both the main process and all content processes are
+ * recorded. The data is cleared for each new subsession.
+ */
+
+const Cu = Components.utils;
+
+Cu.import("resource://gre/modules/Services.jsm", this);
+
+this.EXPORTED_SYMBOLS = ["GCTelemetry"];
+
+// Names of processes where we record GCs.
+const PROCESS_NAMES = ["main", "content"];
+
+// Should be the time we started up in milliseconds since the epoch.
+const BASE_TIME = Date.now() - Services.telemetry.msSinceProcessStart();
+
+// Records selected GCs. There is one instance per process type.
+class GCData {
+ constructor(kind) {
+ let numRandom = {main: 0, content: 2};
+ let numWorst = {main: 2, content: 2};
+
+ this.totalGCTime = 0;
+ this.randomlySelected = Array(numRandom[kind]).fill(null);
+ this.worst = Array(numWorst[kind]).fill(null);
+ }
+
+ // Turn absolute timestamps (in microseconds since the epoch) into
+ // milliseconds since startup.
+ rebaseTimes(data) {
+ function fixup(t) {
+ return t / 1000.0 - BASE_TIME;
+ }
+
+ data.timestamp = fixup(data.timestamp);
+
+ for (let i = 0; i < data.slices.length; i++) {
+ let slice = data.slices[i];
+ slice.start_timestamp = fixup(slice.start_timestamp);
+ slice.end_timestamp = fixup(slice.end_timestamp);
+ }
+ }
+
+ // Records a GC (represented by |data|) in the randomlySelected or
+ // worst batches depending on the criteria above.
+ record(data) {
+ this.rebaseTimes(data);
+
+ let time = data.total_time;
+ this.totalGCTime += time;
+
+ // Probability that we will replace any one of our
+ // current randomlySelected GCs with |data|.
+ let prob = time / this.totalGCTime;
+
+ // Note that we may replace multiple GCs in
+ // randomlySelected. It's easier to reason about the
+ // probabilities this way, and it's unlikely to have any effect in
+ // practice.
+ for (let i = 0; i < this.randomlySelected.length; i++) {
+ let r = Math.random();
+ if (r <= prob) {
+ this.randomlySelected[i] = data;
+ }
+ }
+
+ // Save the 5 worst GCs based on max_pause. A GC may appear in
+ // both worst and randomlySelected.
+ for (let i = 0; i < this.worst.length; i++) {
+ if (!this.worst[i]) {
+ this.worst[i] = data;
+ break;
+ }
+
+ if (this.worst[i].max_pause < data.max_pause) {
+ this.worst.splice(i, 0, data);
+ this.worst.length--;
+ break;
+ }
+ }
+ }
+
+ entries() {
+ return {
+ random: this.randomlySelected.filter(e => e !== null),
+ worst: this.worst.filter(e => e !== null),
+ };
+ }
+}
+
+// If you adjust any of the constants here (slice limit, number of keys, etc.)
+// make sure to update the JSON schema at:
+// https://github.com/mozilla-services/mozilla-pipeline-schemas/blob/master/telemetry/main.schema.json
+// You should also adjust browser_TelemetryGC.js.
+const MAX_GC_KEYS = 25;
+const MAX_SLICES = 4;
+const MAX_SLICE_KEYS = 15;
+const MAX_PHASES = 65;
+
+function limitProperties(obj, count) {
+ // If there are too many properties, just delete them all. We don't
+ // expect this ever to happen.
+ if (Object.keys(obj).length > count) {
+ for (let key of Object.keys(obj)) {
+ delete obj[key];
+ }
+ }
+}
+
+function limitSize(data) {
+ // Store the number of slices so we know if we lost any at the end.
+ data.num_slices = data.slices.length;
+
+ data.slices.sort((a, b) => b.pause - a.pause);
+
+ if (data.slices.length > MAX_SLICES) {
+ // Make sure we always keep the first slice since it has the
+ // reason the GC was started.
+ let firstSliceIndex = data.slices.findIndex(s => s.slice == 0);
+ if (firstSliceIndex >= MAX_SLICES) {
+ data.slices[MAX_SLICES - 1] = data.slices[firstSliceIndex];
+ }
+
+ data.slices.length = MAX_SLICES;
+ }
+
+ data.slices.sort((a, b) => a.slice - b.slice);
+
+ limitProperties(data, MAX_GC_KEYS);
+
+ for (let slice of data.slices) {
+ limitProperties(slice, MAX_SLICE_KEYS);
+ limitProperties(slice.times, MAX_PHASES);
+ }
+
+ limitProperties(data.totals, MAX_PHASES);
+}
+
+let processData = new Map();
+for (let name of PROCESS_NAMES) {
+ processData.set(name, new GCData(name));
+}
+
+var GCTelemetry = {
+ initialized: false,
+
+ init() {
+ if (this.initialized) {
+ return false;
+ }
+
+ this.initialized = true;
+ Services.obs.addObserver(this, "garbage-collection-statistics", false);
+
+ if (Services.appinfo.processType == Services.appinfo.PROCESS_TYPE_DEFAULT) {
+ Services.ppmm.addMessageListener("Telemetry:GCStatistics", this);
+ }
+
+ return true;
+ },
+
+ shutdown() {
+ if (!this.initialized) {
+ return;
+ }
+
+ Services.obs.removeObserver(this, "garbage-collection-statistics");
+
+ if (Services.appinfo.processType == Services.appinfo.PROCESS_TYPE_DEFAULT) {
+ Services.ppmm.removeMessageListener("Telemetry:GCStatistics", this);
+ }
+ this.initialized = false;
+ },
+
+ observe(subject, topic, arg) {
+ let data = JSON.parse(arg);
+
+ limitSize(data);
+
+ if (Services.appinfo.processType == Services.appinfo.PROCESS_TYPE_DEFAULT) {
+ processData.get("main").record(data);
+ } else {
+ Services.cpmm.sendAsyncMessage("Telemetry:GCStatistics", data);
+ }
+ },
+
+ receiveMessage(msg) {
+ processData.get("content").record(msg.data);
+ },
+
+ entries(kind, clear) {
+ let result = processData.get(kind).entries();
+ if (clear) {
+ processData.set(kind, new GCData(kind));
+ }
+ return result;
+ },
+};
diff --git a/toolkit/components/telemetry/Histograms.json b/toolkit/components/telemetry/Histograms.json
new file mode 100644
index 000000000..aa66fbe14
--- /dev/null
+++ b/toolkit/components/telemetry/Histograms.json
@@ -0,0 +1,11002 @@
+
+{
+ "A11Y_INSTANTIATED_FLAG": {
+ "expires_in_version": "never",
+ "kind": "flag",
+ "description": "has accessibility support been instantiated"
+ },
+ "A11Y_CONSUMERS": {
+ "expires_in_version": "default",
+ "kind": "enumerated",
+ "n_values": 11,
+ "description": "Accessibility client by enum id"
+ },
+ "A11Y_ISIMPLEDOM_USAGE_FLAG": {
+ "expires_in_version": "default",
+ "kind": "flag",
+ "description": "have the ISimpleDOM* accessibility interfaces been used"
+ },
+ "A11Y_IATABLE_USAGE_FLAG": {
+ "expires_in_version": "default",
+ "kind": "flag",
+ "description": "has the IAccessibleTable accessibility interface been used"
+ },
+ "A11Y_UPDATE_TIME": {
+ "expires_in_version": "default",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "time spent updating accessibility (ms)"
+ },
+ "ADDON_MANAGER_UPGRADE_UI_SHOWN": {
+ "expires_in_version": "53",
+ "kind": "count",
+ "description": "Recorded when the addon manager shows the modal upgrade UI. Should only be recorded once per upgrade.",
+ "releaseChannelCollection": "opt-out",
+ "bug_numbers": [1268548],
+ "alert_emails": ["kev@mozilla.com"]
+ },
+ "ADDON_SHIM_USAGE": {
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 15,
+ "keyed": true,
+ "description": "Reasons why add-on shims were used, keyed by add-on ID."
+ },
+ "ADDON_FORBIDDEN_CPOW_USAGE": {
+ "expires_in_version": "never",
+ "kind": "count",
+ "keyed": true,
+ "description": "Counts the number of times a given add-on used CPOWs when it was marked as e10s compatible.",
+ "bug_numbers": [1214824],
+ "alert_emails": ["wmccloskey@mozilla.com"]
+ },
+ "BROWSER_SHIM_USAGE_BLOCKED": {
+ "expires_in_version": "never",
+ "kind": "count",
+ "description": "Counts the number of times a CPOW shim was blocked from being created by browser code.",
+ "releaseChannelCollection": "opt-out",
+ "bug_numbers": [1245901],
+ "alert_emails": ["benjamin@smedbergs.us"]
+ },
+ "APPLICATION_REPUTATION_SHOULD_BLOCK": {
+ "alert_emails": ["safebrowsing-telemetry@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "Overall (local or remote) application reputation verdict (shouldBlock=false is OK)."
+ },
+ "APPLICATION_REPUTATION_LOCAL": {
+ "alert_emails": ["safebrowsing-telemetry@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 3,
+ "description": "Application reputation local results (0=ALLOW, 1=BLOCK, 2=NONE)"
+ },
+ "APPLICATION_REPUTATION_SERVER": {
+ "alert_emails": ["safebrowsing-telemetry@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 3,
+ "description": "Status of the application reputation remote lookup (0=OK, 1=failed, 2=invalid protobuf response)"
+ },
+ "APPLICATION_REPUTATION_SERVER_VERDICT": {
+ "alert_emails": ["safebrowsing-telemetry@mozilla.org"],
+ "expires_in_version": "56",
+ "releaseChannelCollection": "opt-out",
+ "bug_numbers": [1272788],
+ "kind": "enumerated",
+ "n_values": 8,
+ "description": "Application reputation remote verdict (0=SAFE, 1=DANGEROUS, 2=UNCOMMON, 3=POTENTIALLY_UNWANTED, 4=DANGEROUS_HOST, 5=UNKNOWN)"
+ },
+ "APPLICATION_REPUTATION_COUNT": {
+ "alert_emails": ["safebrowsing-telemetry@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "Application reputation query count (both local and remote)"
+ },
+ "APPLICATION_REPUTATION_REMOTE_LOOKUP_TIMEOUT": {
+ "alert_emails": ["safebrowsing-telemetry@mozilla.org"],
+ "expires_in_version": "56",
+ "kind": "boolean",
+ "bug_numbers": [1172689],
+ "description": "Recorded when application reputation remote lookup is performed, `true` is recorded if the lookup times out."
+ },
+ "AUDIOSTREAM_FIRST_OPEN_MS": {
+ "expires_in_version": "50",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "The length of time (in milliseconds) for the first open of AudioStream."
+ },
+ "AUDIOSTREAM_LATER_OPEN_MS": {
+ "expires_in_version": "50",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "The length of time (in milliseconds) for the subsequent opens of AudioStream."
+ },
+ "AUDIOSTREAM_BACKEND_USED": {
+ "alert_emails": ["padenot@mozilla.com", "kinetik@flim.org"],
+ "bug_numbers": [1280630],
+ "expires_in_version": "55",
+ "kind": "enumerated",
+ "n_values": 16,
+ "description": "The operating system audio back-end used when successfully opening an audio stream, or whether the failure occurred on the first try or not <https://dxr.mozilla.org/mozilla-central/search?q=AUDIOSTREAM_BACKEND_ID_STR>",
+ "releaseChannelCollection": "opt-out"
+ },
+ "AUSHELPER_CPU_ERROR_CODE": {
+ "alert_emails": ["application-update-telemetry-alerts@mozilla.com"],
+ "bug_numbers": [1296630],
+ "expires_in_version": "60",
+ "kind": "enumerated",
+ "n_values": 16,
+ "releaseChannelCollection": "opt-out",
+ "description": "The error code from the aushelper system add-on when querying the registry for CPU information for bug 1296630 (see browser/extensions/aushelper/bootstrap.js)."
+ },
+ "AUSHELPER_CPU_RESULT_CODE": {
+ "alert_emails": ["application-update-telemetry-alerts@mozilla.com"],
+ "bug_numbers": [1296630],
+ "expires_in_version": "60",
+ "kind": "enumerated",
+ "n_values": 5,
+ "releaseChannelCollection": "opt-out",
+ "description": "Whether the system is affected by bug 1296630 (1=No, 2=Yes, 3=Error, and 4=Unknown)."
+ },
+ "AUSHELPER_WEBSENSE_ERROR_CODE": {
+ "alert_emails": ["application-update-telemetry-alerts@mozilla.com"],
+ "bug_numbers": [1305847],
+ "expires_in_version": "60",
+ "kind": "enumerated",
+ "n_values": 8,
+ "releaseChannelCollection": "opt-out",
+ "description": "The error code from the aushelper system add-on when gathering information on Websense (see browser/extensions/aushelper/bootstrap.js)."
+ },
+ "AUSHELPER_WEBSENSE_REG_EXISTS": {
+ "alert_emails": ["application-update-telemetry-alerts@mozilla.com"],
+ "bug_numbers": [1305847],
+ "expires_in_version": "60",
+ "kind": "boolean",
+ "releaseChannelCollection": "opt-out",
+ "description": "Whether the system has a Websense InstallVersion registry value (see browser/extensions/aushelper/bootstrap.js)."
+ },
+ "BACKGROUNDFILESAVER_THREAD_COUNT": {
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 21,
+ "description": "Maximum number of concurrent threads reached during a given download session"
+ },
+ "BLOCKLIST_SYNC_FILE_LOAD": {
+ "alert_emails": ["rvitillo@mozilla.com"],
+ "expires_in_version": "35",
+ "kind": "boolean",
+ "description": "blocklist.xml has been loaded synchronously *** No longer needed (bug 1156565). Delete histogram and accumulation code! ***"
+ },
+ "CHECKERBOARD_DURATION": {
+ "alert_emails": ["kgupta@mozilla.com"],
+ "bug_numbers": [1238040],
+ "expires_in_version": "55",
+ "kind": "exponential",
+ "high": 100000,
+ "n_buckets": 50,
+ "description": "Duration of a checkerboard event in milliseconds"
+ },
+ "CHECKERBOARD_PEAK": {
+ "alert_emails": ["kgupta@mozilla.com"],
+ "bug_numbers": [1238040],
+ "expires_in_version": "55",
+ "kind": "exponential",
+ "high": 66355200,
+ "n_buckets": 50,
+ "description": "Peak number of CSS pixels checkerboarded during a checkerboard event (the high value is the size of a 4k display with max APZ zooming)"
+ },
+ "CHECKERBOARD_POTENTIAL_DURATION": {
+ "alert_emails": ["kgupta@mozilla.com"],
+ "bug_numbers": [1238040],
+ "expires_in_version": "55",
+ "kind": "exponential",
+ "high": 1000000,
+ "n_buckets": 50,
+ "description": "Duration of a chunk of time (in ms) that could reasonably have had checkerboarding"
+ },
+ "CHECKERBOARD_SEVERITY": {
+ "alert_emails": ["kgupta@mozilla.com"],
+ "bug_numbers": [1238040],
+ "expires_in_version": "55",
+ "kind": "exponential",
+ "high": 1073741824,
+ "n_buckets": 50,
+ "description": "Opaque measure of the severity of a checkerboard event"
+ },
+ "COMPOSITE_TIME" : {
+ "expires_in_version": "never",
+ "description": "Composite times in milliseconds",
+ "kind": "exponential",
+ "high": 1000,
+ "n_buckets": 50
+ },
+ "COMPOSITE_FRAME_ROUNDTRIP_TIME" : {
+ "expires_in_version": "never",
+ "description": "Time from vsync to finishing a composite in milliseconds.",
+ "kind": "exponential",
+ "high": 1000,
+ "n_buckets": 50
+ },
+ "CONTENT_RESPONSE_DURATION" : {
+ "alert_emails": ["kgupta@mozilla.com"],
+ "bug_numbers": [1261373],
+ "expires_in_version": "55",
+ "description": "Main thread response times for APZ notifications about input events (ms)",
+ "kind" : "exponential",
+ "high": 60000,
+ "n_buckets": 50
+ },
+ "CREATE_EVENT_BEFOREUNLOADEVENT" : {
+ "alert_emails": ["ayg@aryeh.name"],
+ "description": "Was document.createEvent(\"beforeunloadevent\") ever called",
+ "expires_in_version": "56",
+ "kind": "count",
+ "bug_numbers": [1295588, 1251198]
+ },
+ "CREATE_EVENT_COMMANDEVENT" : {
+ "alert_emails": ["ayg@aryeh.name"],
+ "description": "Was document.createEvent(\"commandevent\") ever called",
+ "expires_in_version": "56",
+ "kind": "count",
+ "bug_numbers": [1295588, 1251198]
+ },
+ "CREATE_EVENT_COMMANDEVENTS" : {
+ "alert_emails": ["ayg@aryeh.name"],
+ "description": "Was document.createEvent(\"commandevents\") ever called",
+ "expires_in_version": "56",
+ "kind": "count",
+ "bug_numbers": [1295588, 1251198]
+ },
+ "CREATE_EVENT_COMPOSITIONEVENT" : {
+ "alert_emails": ["ayg@aryeh.name"],
+ "description": "Was document.createEvent(\"compositionevent\") ever called",
+ "expires_in_version": "56",
+ "kind": "count",
+ "bug_numbers": [1295588, 1251198]
+ },
+ "CREATE_EVENT_CUSTOMEVENT" : {
+ "alert_emails": ["ayg@aryeh.name"],
+ "description": "Was document.createEvent(\"customevent\") ever called",
+ "expires_in_version": "56",
+ "kind": "count",
+ "bug_numbers": [1295588, 1251198]
+ },
+ "CREATE_EVENT_DATACONTAINEREVENT" : {
+ "alert_emails": ["ayg@aryeh.name"],
+ "description": "Was document.createEvent(\"datacontainerevent\") ever called",
+ "expires_in_version": "56",
+ "kind": "count",
+ "bug_numbers": [1295588, 1251198]
+ },
+ "CREATE_EVENT_DATACONTAINEREVENTS" : {
+ "alert_emails": ["ayg@aryeh.name"],
+ "description": "Was document.createEvent(\"datacontainerevents\") ever called",
+ "expires_in_version": "56",
+ "kind": "count",
+ "bug_numbers": [1295588, 1251198]
+ },
+ "CREATE_EVENT_DEVICEMOTIONEVENT" : {
+ "alert_emails": ["ayg@aryeh.name"],
+ "description": "Was document.createEvent(\"devicemotionevent\") ever called",
+ "expires_in_version": "56",
+ "kind": "count",
+ "bug_numbers": [1295588, 1251198]
+ },
+ "CREATE_EVENT_DEVICEORIENTATIONEVENT" : {
+ "alert_emails": ["ayg@aryeh.name"],
+ "description": "Was document.createEvent(\"deviceorientationevent\") ever called",
+ "expires_in_version": "56",
+ "kind": "count",
+ "bug_numbers": [1295588, 1251198]
+ },
+ "CREATE_EVENT_DRAGEVENT" : {
+ "alert_emails": ["ayg@aryeh.name"],
+ "description": "Was document.createEvent(\"dragevent\") ever called",
+ "expires_in_version": "56",
+ "kind": "count",
+ "bug_numbers": [1295588, 1251198]
+ },
+ "CREATE_EVENT_DRAGEVENTS" : {
+ "alert_emails": ["ayg@aryeh.name"],
+ "description": "Was document.createEvent(\"dragevents\") ever called",
+ "expires_in_version": "56",
+ "kind": "count",
+ "bug_numbers": [1295588, 1251198]
+ },
+ "CREATE_EVENT_EVENT" : {
+ "alert_emails": ["ayg@aryeh.name"],
+ "description": "Was document.createEvent(\"event\") ever called",
+ "expires_in_version": "56",
+ "kind": "count",
+ "bug_numbers": [1295588, 1251198]
+ },
+ "CREATE_EVENT_EVENTS" : {
+ "alert_emails": ["ayg@aryeh.name"],
+ "description": "Was document.createEvent(\"events\") ever called",
+ "expires_in_version": "56",
+ "kind": "count",
+ "bug_numbers": [1295588, 1251198]
+ },
+ "CREATE_EVENT_HASHCHANGEEVENT" : {
+ "alert_emails": ["ayg@aryeh.name"],
+ "description": "Was document.createEvent(\"hashchangeevent\") ever called",
+ "expires_in_version": "56",
+ "kind": "count",
+ "bug_numbers": [1295588, 1251198]
+ },
+ "CREATE_EVENT_HTMLEVENTS" : {
+ "alert_emails": ["ayg@aryeh.name"],
+ "description": "Was document.createEvent(\"htmlevents\") ever called",
+ "expires_in_version": "56",
+ "kind": "count",
+ "bug_numbers": [1295588, 1251198]
+ },
+ "CREATE_EVENT_KEYBOARDEVENT" : {
+ "alert_emails": ["ayg@aryeh.name"],
+ "description": "Was document.createEvent(\"keyboardevent\") ever called",
+ "expires_in_version": "56",
+ "kind": "count",
+ "bug_numbers": [1295588, 1251198]
+ },
+ "CREATE_EVENT_KEYEVENTS" : {
+ "alert_emails": ["ayg@aryeh.name"],
+ "description": "Was document.createEvent(\"keyevents\") ever called",
+ "expires_in_version": "56",
+ "kind": "count",
+ "bug_numbers": [1295588, 1251198]
+ },
+ "CREATE_EVENT_MESSAGEEVENT" : {
+ "alert_emails": ["ayg@aryeh.name"],
+ "description": "Was document.createEvent(\"messageevent\") ever called",
+ "expires_in_version": "56",
+ "kind": "count",
+ "bug_numbers": [1295588, 1251198]
+ },
+ "CREATE_EVENT_MOUSEEVENT" : {
+ "alert_emails": ["ayg@aryeh.name"],
+ "description": "Was document.createEvent(\"mouseevent\") ever called",
+ "expires_in_version": "56",
+ "kind": "count",
+ "bug_numbers": [1295588, 1251198]
+ },
+ "CREATE_EVENT_MOUSEEVENTS" : {
+ "alert_emails": ["ayg@aryeh.name"],
+ "description": "Was document.createEvent(\"mouseevents\") ever called",
+ "expires_in_version": "56",
+ "kind": "count",
+ "bug_numbers": [1295588, 1251198]
+ },
+ "CREATE_EVENT_MOUSESCROLLEVENTS" : {
+ "alert_emails": ["ayg@aryeh.name"],
+ "description": "Was document.createEvent(\"mousescrollevents\") ever called",
+ "expires_in_version": "56",
+ "kind": "count",
+ "bug_numbers": [1295588, 1251198]
+ },
+ "CREATE_EVENT_MUTATIONEVENT" : {
+ "alert_emails": ["ayg@aryeh.name"],
+ "description": "Was document.createEvent(\"mutationevent\") ever called",
+ "expires_in_version": "56",
+ "kind": "count",
+ "bug_numbers": [1295588, 1251198]
+ },
+ "CREATE_EVENT_MUTATIONEVENTS" : {
+ "alert_emails": ["ayg@aryeh.name"],
+ "description": "Was document.createEvent(\"mutationevents\") ever called",
+ "expires_in_version": "56",
+ "kind": "count",
+ "bug_numbers": [1295588, 1251198]
+ },
+ "CREATE_EVENT_NOTIFYPAINTEVENT" : {
+ "alert_emails": ["ayg@aryeh.name"],
+ "description": "Was document.createEvent(\"notifypaintevent\") ever called",
+ "expires_in_version": "56",
+ "kind": "count",
+ "bug_numbers": [1295588, 1251198]
+ },
+ "CREATE_EVENT_PAGETRANSITION" : {
+ "alert_emails": ["ayg@aryeh.name"],
+ "description": "Was document.createEvent(\"pagetransition\") ever called",
+ "expires_in_version": "56",
+ "kind": "count",
+ "bug_numbers": [1295588, 1251198]
+ },
+ "CREATE_EVENT_POPSTATEEVENT" : {
+ "alert_emails": ["ayg@aryeh.name"],
+ "description": "Was document.createEvent(\"popstateevent\") ever called",
+ "expires_in_version": "56",
+ "kind": "count",
+ "bug_numbers": [1295588, 1251198]
+ },
+ "CREATE_EVENT_POPUPEVENTS" : {
+ "alert_emails": ["ayg@aryeh.name"],
+ "description": "Was document.createEvent(\"popupevents\") ever called",
+ "expires_in_version": "56",
+ "kind": "count",
+ "bug_numbers": [1295588, 1251198]
+ },
+ "CREATE_EVENT_SCROLLAREAEVENT" : {
+ "alert_emails": ["ayg@aryeh.name"],
+ "description": "Was document.createEvent(\"scrollareaevent\") ever called",
+ "expires_in_version": "56",
+ "kind": "count",
+ "bug_numbers": [1295588, 1251198]
+ },
+ "CREATE_EVENT_SIMPLEGESTUREEVENT" : {
+ "alert_emails": ["ayg@aryeh.name"],
+ "description": "Was document.createEvent(\"simplegestureevent\") ever called",
+ "expires_in_version": "56",
+ "kind": "count",
+ "bug_numbers": [1295588, 1251198]
+ },
+ "CREATE_EVENT_STORAGEEVENT" : {
+ "alert_emails": ["ayg@aryeh.name"],
+ "description": "Was document.createEvent(\"storageevent\") ever called",
+ "expires_in_version": "56",
+ "kind": "count",
+ "bug_numbers": [1295588, 1251198]
+ },
+ "CREATE_EVENT_SVGEVENT" : {
+ "alert_emails": ["ayg@aryeh.name"],
+ "description": "Was document.createEvent(\"svgevent\") ever called",
+ "expires_in_version": "56",
+ "kind": "count",
+ "bug_numbers": [1295588, 1251198]
+ },
+ "CREATE_EVENT_SVGEVENTS" : {
+ "alert_emails": ["ayg@aryeh.name"],
+ "description": "Was document.createEvent(\"svgevents\") ever called",
+ "expires_in_version": "56",
+ "kind": "count",
+ "bug_numbers": [1295588, 1251198]
+ },
+ "CREATE_EVENT_SVGZOOMEVENT" : {
+ "alert_emails": ["ayg@aryeh.name"],
+ "description": "Was document.createEvent(\"svgzoomevent\") ever called",
+ "expires_in_version": "56",
+ "kind": "count",
+ "bug_numbers": [1295588, 1251198]
+ },
+ "CREATE_EVENT_SVGZOOMEVENTS" : {
+ "alert_emails": ["ayg@aryeh.name"],
+ "description": "Was document.createEvent(\"svgzoomevents\") ever called",
+ "expires_in_version": "56",
+ "kind": "count",
+ "bug_numbers": [1295588, 1251198]
+ },
+ "CREATE_EVENT_TEXTEVENT" : {
+ "alert_emails": ["ayg@aryeh.name"],
+ "description": "Was document.createEvent(\"textevent\") ever called",
+ "expires_in_version": "56",
+ "kind": "count",
+ "bug_numbers": [1295588, 1251198]
+ },
+ "CREATE_EVENT_TEXTEVENTS" : {
+ "alert_emails": ["ayg@aryeh.name"],
+ "description": "Was document.createEvent(\"textevents\") ever called",
+ "expires_in_version": "56",
+ "kind": "count",
+ "bug_numbers": [1295588, 1251198]
+ },
+ "CREATE_EVENT_TIMEEVENT" : {
+ "alert_emails": ["ayg@aryeh.name"],
+ "description": "Was document.createEvent(\"timeevent\") ever called",
+ "expires_in_version": "56",
+ "kind": "count",
+ "bug_numbers": [1295588, 1251198]
+ },
+ "CREATE_EVENT_TIMEEVENTS" : {
+ "alert_emails": ["ayg@aryeh.name"],
+ "description": "Was document.createEvent(\"timeevents\") ever called",
+ "expires_in_version": "56",
+ "kind": "count",
+ "bug_numbers": [1295588, 1251198]
+ },
+ "CREATE_EVENT_TOUCHEVENT" : {
+ "alert_emails": ["ayg@aryeh.name"],
+ "description": "Was document.createEvent(\"touchevent\") ever called",
+ "expires_in_version": "56",
+ "kind": "count",
+ "bug_numbers": [1295588, 1251198]
+ },
+ "CREATE_EVENT_UIEVENT" : {
+ "alert_emails": ["ayg@aryeh.name"],
+ "description": "Was document.createEvent(\"uievent\") ever called",
+ "expires_in_version": "56",
+ "kind": "count",
+ "bug_numbers": [1295588, 1251198]
+ },
+ "CREATE_EVENT_UIEVENTS" : {
+ "alert_emails": ["ayg@aryeh.name"],
+ "description": "Was document.createEvent(\"uievents\") ever called",
+ "expires_in_version": "56",
+ "kind": "count",
+ "bug_numbers": [1295588, 1251198]
+ },
+ "CREATE_EVENT_XULCOMMANDEVENT" : {
+ "alert_emails": ["ayg@aryeh.name"],
+ "description": "Was document.createEvent(\"xulcommandevent\") ever called",
+ "expires_in_version": "56",
+ "kind": "count",
+ "bug_numbers": [1295588, 1251198]
+ },
+ "CREATE_EVENT_XULCOMMANDEVENTS" : {
+ "alert_emails": ["ayg@aryeh.name"],
+ "description": "Was document.createEvent(\"xulcommandevents\") ever called",
+ "expires_in_version": "56",
+ "kind": "count",
+ "bug_numbers": [1295588, 1251198]
+ },
+ "CYCLE_COLLECTOR": {
+ "alert_emails": ["dev-telemetry-gc-alerts@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent on one cycle collection (ms)"
+ },
+ "CYCLE_COLLECTOR_WORKER": {
+ "alert_emails": ["dev-telemetry-gc-alerts@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent on one cycle collection in a worker (ms)"
+ },
+ "CYCLE_COLLECTOR_FULL": {
+ "alert_emails": ["dev-telemetry-gc-alerts@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Full pause time for one cycle collection, including preparation (ms)"
+ },
+ "CYCLE_COLLECTOR_MAX_PAUSE": {
+ "alert_emails": ["dev-telemetry-gc-alerts@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Longest pause for an individual slice of one cycle collection, including preparation (ms)"
+ },
+ "CYCLE_COLLECTOR_FINISH_IGC": {
+ "alert_emails": ["dev-telemetry-gc-alerts@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "Cycle collection finished an incremental GC"
+ },
+ "CYCLE_COLLECTOR_SYNC_SKIPPABLE": {
+ "alert_emails": ["dev-telemetry-gc-alerts@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "Cycle collection synchronously ran forget skippable"
+ },
+ "CYCLE_COLLECTOR_VISITED_REF_COUNTED": {
+ "alert_emails": ["dev-telemetry-gc-alerts@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 300000,
+ "n_buckets": 50,
+ "description": "Number of ref counted objects visited by the cycle collector"
+ },
+ "CYCLE_COLLECTOR_WORKER_VISITED_REF_COUNTED": {
+ "alert_emails": ["dev-telemetry-gc-alerts@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 300000,
+ "n_buckets": 50,
+ "description": "Number of ref counted objects visited by the cycle collector in a worker"
+ },
+ "CYCLE_COLLECTOR_VISITED_GCED": {
+ "alert_emails": ["dev-telemetry-gc-alerts@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 300000,
+ "n_buckets": 50,
+ "description": "Number of JS objects visited by the cycle collector"
+ },
+ "CYCLE_COLLECTOR_WORKER_VISITED_GCED": {
+ "alert_emails": ["dev-telemetry-gc-alerts@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 300000,
+ "n_buckets": 50,
+ "description": "Number of JS objects visited by the cycle collector in a worker"
+ },
+ "CYCLE_COLLECTOR_COLLECTED": {
+ "alert_emails": ["dev-telemetry-gc-alerts@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 100000,
+ "n_buckets": 50,
+ "description": "Number of objects collected by the cycle collector"
+ },
+ "CYCLE_COLLECTOR_WORKER_COLLECTED": {
+ "alert_emails": ["dev-telemetry-gc-alerts@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 100000,
+ "n_buckets": 50,
+ "description": "Number of objects collected by the cycle collector in a worker"
+ },
+ "CYCLE_COLLECTOR_NEED_GC": {
+ "alert_emails": ["dev-telemetry-gc-alerts@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "Needed garbage collection before cycle collection."
+ },
+ "CYCLE_COLLECTOR_WORKER_NEED_GC": {
+ "alert_emails": ["dev-telemetry-gc-alerts@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "Needed garbage collection before cycle collection in a worker."
+ },
+ "CYCLE_COLLECTOR_TIME_BETWEEN": {
+ "alert_emails": ["dev-telemetry-gc-alerts@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 120,
+ "n_buckets": 50,
+ "description": "Time spent in between cycle collections (seconds)"
+ },
+ "CYCLE_COLLECTOR_OOM": {
+ "alert_emails": ["dev-telemetry-gc-alerts@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "flag",
+ "description": "Set if the cycle collector ran out of memory at some point"
+ },
+ "CYCLE_COLLECTOR_WORKER_OOM": {
+ "alert_emails": ["dev-telemetry-gc-alerts@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "flag",
+ "description": "Set if the cycle collector in a worker ran out of memory at some point"
+ },
+ "CYCLE_COLLECTOR_ASYNC_SNOW_WHITE_FREEING": {
+ "alert_emails": ["dev-telemetry-gc-alerts@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent on one asynchronous SnowWhite freeing (ms)"
+ },
+ "DEFERRED_FINALIZE_ASYNC": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Pause time for asynchronous deferred finalization (ms)"
+ },
+ "DEVICE_RESET_REASON": {
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 10,
+ "description": "GPU Device Reset Reason (ok, hung, removed, reset, internal error, invalid call, out of memory)"
+ },
+ "FAMILY_SAFETY": {
+ "alert_emails": ["seceng@mozilla.org"],
+ "expires_in_version": "55",
+ "kind": "enumerated",
+ "n_values": 16,
+ "bug_numbers": [1239166],
+ "description": "Status of Family Safety detection and remediation. See nsNSSComponent.cpp."
+ },
+ "FETCH_IS_MAINTHREAD": {
+ "expires_in_version": "50",
+ "kind": "boolean",
+ "description": "Was Fetch request initiated from the main thread?"
+ },
+ "FORCED_DEVICE_RESET_REASON": {
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 50,
+ "releaseChannelCollection": "opt-out",
+ "description": "GPU Forced Device Reset Reason (OpenSharedHandle)"
+ },
+ "FORGET_SKIPPABLE_MAX": {
+ "alert_emails": ["dev-telemetry-gc-alerts@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Max time spent on one forget skippable (ms)"
+ },
+ "FULLSCREEN_TRANSITION_BLACK_MS": {
+ "alert_emails": ["xquan@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "low": 100,
+ "high": 5000,
+ "n_buckets": 50,
+ "bug_numbers": [1271160],
+ "description": "The time spent in the fully-black screen in fullscreen transition"
+ },
+ "FULLSCREEN_CHANGE_MS": {
+ "alert_emails": ["xquan@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "low": 100,
+ "high": 5000,
+ "n_buckets": 50,
+ "bug_numbers": [1271160],
+ "description": "The time content uses to enter/exit fullscreen regardless of fullscreen transition timeout"
+ },
+ "GC_REASON_2": {
+ "alert_emails": ["dev-telemetry-gc-alerts@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 100,
+ "description": "Reason (enum value) for initiating a GC"
+ },
+ "GC_IS_COMPARTMENTAL": {
+ "alert_emails": ["dev-telemetry-gc-alerts@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "Is it a zone GC?"
+ },
+ "GC_MS": {
+ "alert_emails": ["dev-telemetry-gc-alerts@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent running JS GC (ms)"
+ },
+ "GC_BUDGET_MS": {
+ "alert_emails": ["dev-telemetry-gc-alerts@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "linear",
+ "high": 100,
+ "n_buckets": 10,
+ "description": "Requested GC slice budget (ms)"
+ },
+ "GC_ANIMATION_MS": {
+ "alert_emails": ["dev-telemetry-gc-alerts@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent running JS GC when animating (ms)"
+ },
+ "GC_MAX_PAUSE_MS": {
+ "alert_emails": ["dev-telemetry-gc-alerts@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "linear",
+ "high": 1000,
+ "n_buckets": 50,
+ "description": "Longest GC slice in a GC (ms)"
+ },
+ "GC_MARK_MS": {
+ "alert_emails": ["dev-telemetry-gc-alerts@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent running JS GC mark phase (ms)"
+ },
+ "GC_SWEEP_MS": {
+ "alert_emails": ["dev-telemetry-gc-alerts@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent running JS GC sweep phase (ms)"
+ },
+ "GC_COMPACT_MS": {
+ "alert_emails": ["dev-telemetry-gc-alerts@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent running JS GC compact phase (ms)"
+ },
+ "GC_MARK_ROOTS_MS": {
+ "alert_emails": ["dev-telemetry-gc-alerts@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "linear",
+ "high": 200,
+ "n_buckets": 50,
+ "description": "Time spent marking GC roots (ms)"
+ },
+ "GC_MARK_GRAY_MS": {
+ "alert_emails": ["dev-telemetry-gc-alerts@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "linear",
+ "high": 200,
+ "n_buckets": 50,
+ "description": "Time spent marking gray GC objects (ms)"
+ },
+ "GC_SLICE_MS": {
+ "alert_emails": ["dev-telemetry-gc-alerts@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent running a JS GC slice (ms)"
+ },
+ "GC_SLOW_PHASE": {
+ "alert_emails": ["dev-telemetry-gc-alerts@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 75,
+ "description": "The longest phase in any slice that goes over 2x the budget"
+ },
+ "GC_MMU_50": {
+ "alert_emails": ["dev-telemetry-gc-alerts@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "linear",
+ "high": 100,
+ "n_buckets": 20,
+ "description": "Minimum percentage of time spent outside GC over any 50ms window"
+ },
+ "GC_RESET": {
+ "alert_emails": ["dev-telemetry-gc-alerts@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "Was an incremental GC canceled?"
+ },
+ "GC_RESET_REASON": {
+ "alert_emails": ["dev-telemetry-gc-alerts@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 20,
+ "description": "Reason for cancelling an ongoing GC (see js::gc::AbortReason)",
+ "bug_numbers": [1308116]
+ },
+ "GC_INCREMENTAL_DISABLED": {
+ "alert_emails": ["dev-telemetry-gc-alerts@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "Is incremental GC permanently disabled?"
+ },
+ "GC_NON_INCREMENTAL": {
+ "alert_emails": ["dev-telemetry-gc-alerts@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "Was the GC non-incremental?"
+ },
+ "GC_NON_INCREMENTAL_REASON": {
+ "alert_emails": ["dev-telemetry-gc-alerts@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 20,
+ "description": "Reason for performing a non-incremental GC (see js::gc::AbortReason)",
+ "bug_numbers": [1308116]
+ },
+ "GC_SCC_SWEEP_TOTAL_MS": {
+ "alert_emails": ["dev-telemetry-gc-alerts@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "linear",
+ "high": 500,
+ "n_buckets": 50,
+ "description": "Time spent sweeping compartment SCCs (ms)"
+ },
+ "GC_SCC_SWEEP_MAX_PAUSE_MS": {
+ "alert_emails": ["dev-telemetry-gc-alerts@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "linear",
+ "high": 500,
+ "n_buckets": 50,
+ "description": "Time spent sweeping slowest compartment SCC (ms)"
+ },
+ "GC_MINOR_REASON": {
+ "alert_emails": ["dev-telemetry-gc-alerts@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 100,
+ "description": "Reason (enum value) for initiating a minor GC"
+ },
+ "GC_MINOR_REASON_LONG": {
+ "alert_emails": ["dev-telemetry-gc-alerts@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 100,
+ "description": "Reason (enum value) that caused a long (>1ms) minor GC"
+ },
+ "GC_MINOR_US": {
+ "alert_emails": ["dev-telemetry-gc-alerts@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 1000000,
+ "n_buckets": 100,
+ "description": "Time spent running JS minor GC (us)"
+ },
+ "GC_NURSERY_BYTES": {
+ "alert_emails": ["dev-telemetry-gc-alerts@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "linear",
+ "high": 16777216,
+ "n_buckets": 16,
+ "bug_numbers": [1259347],
+ "description": "Size of the GC nursery (bytes)"
+ },
+ "GC_PRETENURE_COUNT": {
+ "alert_emails": ["dev-telemetry-gc-alerts@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "linear",
+ "kind": "enumerated",
+ "n_values": 32,
+ "bug_numbers": [1293262],
+ "description": "How many objects groups were selected for pretenuring by a minor GC"
+ },
+ "GEOLOCATION_ACCURACY_EXPONENTIAL": {
+ "expires_in_version": "default",
+ "kind": "exponential",
+ "high": 100000,
+ "n_buckets": 50,
+ "description": "Location accuracy"
+ },
+ "GEOLOCATION_ERROR": {
+ "expires_in_version": "never",
+ "kind": "flag",
+ "description": "Has seen location error"
+ },
+ "GEOLOCATION_GETCURRENTPOSITION_SECURE_ORIGIN" : {
+ "expires_in_version" : "55",
+ "kind": "enumerated",
+ "n_values": 10,
+ "bug_numbers": [1230209],
+ "description" : "Number of navigator.geolocation.getCurrentPosition() calls (0=other, 1=http, 2=https)"
+ },
+ "GEOLOCATION_REQUEST_GRANTED": {
+ "expires_in_version": "55",
+ "kind": "enumerated",
+ "n_values": 20,
+ "bug_numbers": [1230209],
+ "description": "Geolocation requests either granted or denied (0=denied/other, 1=denied/http, 2=denied/https, ..., 10=granted/other, 11=granted/http, 12=granted/https)"
+ },
+ "GEOLOCATION_WATCHPOSITION_SECURE_ORIGIN" : {
+ "expires_in_version" : "55",
+ "kind": "enumerated",
+ "n_values": 10,
+ "bug_numbers": [1230209],
+ "description" : "Number of navigator.geolocation.watchPosition() calls (0=other, 1=http, 2=https)"
+ },
+ "GEOLOCATION_WIN8_SOURCE_IS_MLS": {
+ "expires_in_version": "default",
+ "kind": "boolean",
+ "description": "Geolocation on Win8 is either MLS or native"
+ },
+ "GEOLOCATION_OSX_SOURCE_IS_MLS": {
+ "expires_in_version": "default",
+ "kind": "boolean",
+ "description": "Geolocation on OS X is either MLS or CoreLocation"
+ },
+ "GEOLOCATION_GETCURRENTPOSITION_VISIBLE": {
+ "alert_emails": ["michelangelo@mozilla.com"],
+ "expires_in_version": "55",
+ "kind": "boolean",
+ "bug_numbers": [1255198],
+ "description": "This metric is recorded every time a navigator.geolocation.getCurrentPosition() request gets allowed/fulfilled. A false value is recorded if the owner is not visible according to document.isVisible."
+ },
+ "GEOLOCATION_WATCHPOSITION_VISIBLE": {
+ "alert_emails": ["michelangelo@mozilla.com"],
+ "expires_in_version": "55",
+ "kind": "boolean",
+ "bug_numbers": [1255198],
+ "description": "This metric is recorded every time a navigator.geolocation.watchPosition() request gets allowed/fulfilled. A false value is recorded if the owner is not visible according to document.isVisible."
+ },
+ "GPU_PROCESS_LAUNCH_TIME_MS" : {
+ "alert_emails": ["george@mozilla.com", "danderson@mozilla.com"],
+ "expires_in_version": "never",
+ "bug_numbers": [1297790],
+ "kind": "exponential",
+ "high": 1000,
+ "n_buckets": 50,
+ "releaseChannelCollection": "opt-out",
+ "description": "GPU process launch time in milliseconds"
+ },
+ "JS_DEPRECATED_LANGUAGE_EXTENSIONS_IN_CONTENT": {
+ "alert_emails": ["jdemooij@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 10,
+ "description": "Use of SpiderMonkey's deprecated language extensions in web content: ForEach=0, DestructuringForIn=1 (obsolete), LegacyGenerator=2, ExpressionClosure=3, LetBlock=4 (obsolete), LetExpression=5 (obsolete), NoSuchMethod=6 (obsolete), FlagsArgument=7 (obsolete), RegExpSourceProp=8 (obsolete), RestoredRegExpStatics=9 (obsolete), BlockScopeFunRedecl=10"
+ },
+ "JS_DEPRECATED_LANGUAGE_EXTENSIONS_IN_ADDONS": {
+ "alert_emails": ["jdemooij@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 10,
+ "description": "Use of SpiderMonkey's deprecated language extensions in add-ons: ForEach=0, DestructuringForIn=1 (obsolete), LegacyGenerator=2, ExpressionClosure=3, LetBlock=4 (obsolete), LetExpression=5 (obsolete), NoSuchMethod=6 (obsolete), FlagsArgument=7 (obsolete), RegExpSourceProp=8 (obsolete), RestoredRegExpStatics=9 (obsolete), BlockScopeFunRedecl=10"
+ },
+ "XUL_CACHE_DISABLED": {
+ "expires_in_version": "default",
+ "kind": "flag",
+ "description": "XUL cache was disabled"
+ },
+ "MEMORY_RESIDENT_FAST": {
+ "alert_emails": ["memshrink-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "low": 32768,
+ "high": 16777216,
+ "n_buckets": 100,
+ "bug_numbers": [1226196],
+ "description": "Resident memory size (KB)"
+ },
+ "MEMORY_TOTAL": {
+ "alert_emails": ["memshrink-telemetry-alerts@mozilla.com"],
+ "bug_numbers": [1198209],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "low": 32768,
+ "high": 16777216,
+ "n_buckets": 100,
+ "description": "Total Memory Across All Processes (KB)"
+ },
+ "MEMORY_UNIQUE": {
+ "alert_emails": ["memshrink-telemetry-alerts@mozilla.com"],
+ "bug_numbers": [1198209],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "low": 32768,
+ "high": 16777216,
+ "n_buckets": 100,
+ "description": "Unique Set Size (KB)"
+ },
+ "MEMORY_VSIZE": {
+ "alert_emails": ["memshrink-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "low": 32768,
+ "high": 16777216,
+ "n_buckets": 100,
+ "description": "Virtual memory size (KB)"
+ },
+ "MEMORY_VSIZE_MAX_CONTIGUOUS": {
+ "alert_emails": ["memshrink-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "low": 32768,
+ "high": 16777216,
+ "n_buckets": 100,
+ "description": "Maximum-sized block of contiguous virtual memory (KB)"
+ },
+ "MEMORY_JS_COMPARTMENTS_SYSTEM": {
+ "alert_emails": ["memshrink-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 1000,
+ "n_buckets": 50,
+ "description": "Total JavaScript compartments used for add-ons and internals."
+ },
+ "MEMORY_JS_COMPARTMENTS_USER": {
+ "alert_emails": ["memshrink-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 1000,
+ "n_buckets": 50,
+ "description": "Total JavaScript compartments used for web pages"
+ },
+ "MEMORY_JS_GC_HEAP": {
+ "alert_emails": ["memshrink-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "low": 1024,
+ "high": 16777216,
+ "n_buckets": 200,
+ "description": "Memory used by the garbage-collected JavaScript heap (KB)"
+ },
+ "MEMORY_STORAGE_SQLITE": {
+ "alert_emails": ["memshrink-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "low": 1024,
+ "high": 524288,
+ "n_buckets": 50,
+ "description": "Memory used by SQLite (KB)"
+ },
+ "MEMORY_IMAGES_CONTENT_USED_UNCOMPRESSED": {
+ "alert_emails": ["memshrink-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "low": 1024,
+ "high": 1048576,
+ "n_buckets": 50,
+ "description": "Memory used for uncompressed, in-use content images (KB)"
+ },
+ "MEMORY_HEAP_ALLOCATED": {
+ "alert_emails": ["memshrink-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "low": 1024,
+ "high": 16777216,
+ "n_buckets": 200,
+ "description": "Heap memory allocated (KB)"
+ },
+ "MEMORY_HEAP_COMMITTED_UNUSED": {
+ "alert_emails": ["memshrink-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "low": 1024,
+ "high": 524288,
+ "n_buckets": 50,
+ "description": "Committed, unused heap memory (KB)"
+ },
+ "MEMORY_HEAP_OVERHEAD_FRACTION": {
+ "alert_emails": ["memshrink-telemetry-alerts@mozilla.com"],
+ "bug_numbers": [1252375],
+ "expires_in_version": "never",
+ "kind": "linear",
+ "high": 100,
+ "n_buckets": 25,
+ "description": "Fraction of committed heap memory that is overhead (percentage)."
+ },
+ "GHOST_WINDOWS": {
+ "alert_emails": ["memshrink-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 128,
+ "n_buckets": 32,
+ "description": "Number of ghost windows"
+ },
+ "MEMORY_FREE_PURGED_PAGES_MS": {
+ "alert_emails": ["memshrink-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 1024,
+ "n_buckets": 10,
+ "description": "Time(ms) to purge dirty heap pages."
+ },
+ "LOW_MEMORY_EVENTS_VIRTUAL": {
+ "alert_emails": ["memshrink-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 1024,
+ "n_buckets": 21,
+ "description": "Number of low-virtual-memory events fired since last ping",
+ "cpp_guard": "XP_WIN"
+ },
+ "LOW_MEMORY_EVENTS_PHYSICAL": {
+ "alert_emails": ["memshrink-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 1024,
+ "n_buckets": 21,
+ "description": "Number of low-physical-memory events fired since last ping",
+ "cpp_guard": "XP_WIN"
+ },
+ "LOW_MEMORY_EVENTS_COMMIT_SPACE": {
+ "alert_emails": ["memshrink-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 1024,
+ "n_buckets": 21,
+ "description": "Number of low-commit-space events fired since last ping",
+ "cpp_guard": "XP_WIN"
+ },
+ "PAGE_FAULTS_HARD": {
+ "expires_in_version": "default",
+ "kind": "exponential",
+ "low": 8,
+ "high": 65536,
+ "n_buckets": 13,
+ "description": "Hard page faults (since last telemetry ping)",
+ "cpp_guard": "XP_UNIX"
+ },
+ "FONTLIST_INITOTHERFAMILYNAMES": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 30000,
+ "n_buckets": 50,
+ "description": "Time(ms) spent on reading other family names from all fonts"
+ },
+ "FONTLIST_INITFACENAMELISTS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 30000,
+ "n_buckets": 50,
+ "description": "Time(ms) spent on reading family names from all fonts"
+ },
+ "DWRITEFONT_DELAYEDINITFONTLIST_TOTAL": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 30000,
+ "n_buckets": 10,
+ "description": "gfxDWriteFontList::DelayedInitFontList Total (ms)",
+ "cpp_guard": "XP_WIN"
+ },
+ "DWRITEFONT_DELAYEDINITFONTLIST_COUNT": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 10,
+ "description": "gfxDWriteFontList::DelayedInitFontList Font Family Count",
+ "cpp_guard": "XP_WIN"
+ },
+ "DWRITEFONT_DELAYEDINITFONTLIST_COLLECT": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 30000,
+ "n_buckets": 10,
+ "description": "gfxDWriteFontList::DelayedInitFontList GetSystemFontCollection (ms)",
+ "cpp_guard": "XP_WIN"
+ },
+ "DWRITEFONT_INIT_PROBLEM": {
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 8,
+ "description": "DirectWrite system fontlist initialization problem (1=GDI interop, 2=system font collection, 3=no fonts)",
+ "cpp_guard": "XP_WIN"
+ },
+ "GDI_INITFONTLIST_TOTAL": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 30000,
+ "n_buckets": 10,
+ "description": "gfxGDIFontList::InitFontList Total (ms)",
+ "cpp_guard": "XP_WIN"
+ },
+ "MAC_INITFONTLIST_TOTAL": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 30000,
+ "n_buckets": 10,
+ "description": "gfxMacPlatformFontList::InitFontList Total (ms)",
+ "cpp_guard": "XP_DARWIN"
+ },
+ "SYSTEM_FONT_FALLBACK": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 100000,
+ "n_buckets": 50,
+ "description": "System font fallback (us)"
+ },
+ "SYSTEM_FONT_FALLBACK_FIRST": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 40000,
+ "n_buckets": 20,
+ "description": "System font fallback, first call (ms)"
+ },
+ "SYSTEM_FONT_FALLBACK_SCRIPT": {
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 110,
+ "description": "System font fallback script"
+ },
+ "GRADIENT_DURATION": {
+ "expires_in_version": "default",
+ "kind": "exponential",
+ "high": 50000000,
+ "n_buckets": 20,
+ "description": "Gradient generation time (us)"
+ },
+ "GRADIENT_RETENTION_TIME": {
+ "expires_in_version": "never",
+ "kind": "linear",
+ "high": 10000,
+ "n_buckets": 20,
+ "description": "Maximum retention time for the gradient cache. (ms)"
+ },
+ "STARTUP_CACHE_AGE_HOURS": {
+ "expires_in_version": "default",
+ "kind": "exponential",
+ "high": 3000,
+ "n_buckets": 20,
+ "description": "Startup cache age (hours)"
+ },
+ "STARTUP_CACHE_INVALID": {
+ "alert_emails": ["perf-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "flag",
+ "description": "Was the disk startup cache file detected as invalid"
+ },
+ "WORD_CACHE_HITS_CONTENT": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 256,
+ "n_buckets": 30,
+ "description": "Word cache hits, content text (chars)"
+ },
+ "WORD_CACHE_HITS_CHROME": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 256,
+ "n_buckets": 30,
+ "description": "Word cache hits, chrome text (chars)"
+ },
+ "WORD_CACHE_MISSES_CONTENT": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 256,
+ "n_buckets": 30,
+ "description": "Word cache misses, content text (chars)"
+ },
+ "WORD_CACHE_MISSES_CHROME": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 256,
+ "n_buckets": 30,
+ "description": "Word cache misses, chrome text (chars)"
+ },
+ "FONT_CACHE_HIT": {
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "font cache hit"
+ },
+ "BAD_FALLBACK_FONT": {
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "system fallback font can't be used"
+ },
+ "SHUTDOWN_OK": {
+ "expires_in_version": "default",
+ "kind": "boolean",
+ "description": "Did the browser start after a successful shutdown"
+ },
+ "IMAGE_DECODE_LATENCY_US": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "low": 50,
+ "high": 5000000,
+ "n_buckets": 100,
+ "description": "Time spent decoding an image chunk (us)"
+ },
+ "IMAGE_DECODE_TIME": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "low": 50,
+ "high": 50000000,
+ "n_buckets": 100,
+ "description": "Time spent decoding an image (us)"
+ },
+ "IMAGE_DECODE_ON_DRAW_LATENCY": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "low": 50,
+ "high": 50000000,
+ "n_buckets": 100,
+ "description": "Time from starting a decode to it showing up on the screen (us)"
+ },
+ "IMAGE_DECODE_CHUNKS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 500,
+ "n_buckets": 50,
+ "description": "Number of chunks per decode attempt"
+ },
+ "IMAGE_DECODE_COUNT": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 500,
+ "n_buckets": 50,
+ "description": "Decode count"
+ },
+ "IMAGE_DECODE_OPAQUE_BGRA": {
+ "alert_emails": ["aosmond@mozilla.com"],
+ "expires_in_version": "53",
+ "kind": "boolean",
+ "description": "Opaque images are BGRA",
+ "bug_numbers": [1311779]
+ },
+ "IMAGE_DECODE_SPEED_JPEG": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "low": 500,
+ "high": 50000000,
+ "n_buckets": 50,
+ "description": "JPEG image decode speed (Kbytes/sec)"
+ },
+ "IMAGE_DECODE_SPEED_GIF": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "low": 500,
+ "high": 50000000,
+ "n_buckets": 50,
+ "description": "GIF image decode speed (Kbytes/sec)"
+ },
+ "IMAGE_DECODE_SPEED_PNG": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "low": 500,
+ "high": 50000000,
+ "n_buckets": 50,
+ "description": "PNG image decode speed (Kbytes/sec)"
+ },
+ "CANVAS_2D_USED": {
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "2D canvas used"
+ },
+ "CANVAS_WEBGL_ACCL_FAILURE_ID": {
+ "alert_emails": ["gfx-telemetry-alerts@mozilla.com","bgirard@mozilla.com","msreckovic@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "keyed": true,
+ "description": "Track the failure IDs that lead us to reject attempting to create an accelerated context. CANVAS_WEBGL_FAILURE_ID reports the overall WebGL status with the attempt to fallback.",
+ "bug_numbers": [1272808]
+ },
+ "CANVAS_WEBGL_FAILURE_ID": {
+ "alert_emails": ["gfx-telemetry-alerts@mozilla.com","bgirard@mozilla.com","msreckovic@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "keyed": true,
+ "description": "WebGL runtime and dynamic failure IDs. This will record a count for each context creation success or failure. Each failure id is a unique identifier that can be traced back to a particular failure branch or blocklist rule.",
+ "bug_numbers": [1272808]
+ },
+ "CANVAS_WEBGL_SUCCESS": {
+ "alert_emails": ["jmuizelaar@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "WebGL1 creation success",
+ "bug_numbers": [1247327]
+ },
+ "CANVAS_WEBGL_USED": {
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "WebGL canvas used"
+ },
+ "CANVAS_WEBGL2_SUCCESS": {
+ "alert_emails": ["jmuizelaar@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "WebGL2 creation success",
+ "bug_numbers": [1247327]
+ },
+ "TOTAL_CONTENT_PAGE_LOAD_TIME": {
+ "alert_emails": ["perf-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "low": 100,
+ "high": 30000,
+ "n_buckets": 100,
+ "description": "HTTP: Total page load time (ms)"
+ },
+ "HTTP_SUBITEM_OPEN_LATENCY_TIME": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 30000,
+ "n_buckets": 50,
+ "description": "HTTP subitem: Page start -> subitem open() (ms)"
+ },
+ "HTTP_SUBITEM_FIRST_BYTE_LATENCY_TIME": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 30000,
+ "n_buckets": 50,
+ "description": "HTTP subitem: Page start -> first byte received for subitem reply (ms)"
+ },
+ "HTTP_REQUEST_PER_PAGE": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 1000,
+ "n_buckets": 50,
+ "description": "HTTP: Requests per page (count)"
+ },
+ "HTTP_REQUEST_PER_PAGE_FROM_CACHE": {
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 101,
+ "description": "HTTP: Requests serviced from cache (%)"
+ },
+ "HTTP_REQUEST_PER_CONN": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 1000,
+ "n_buckets": 50,
+ "description": "HTTP: requests per connection"
+ },
+ "HTTP_KBREAD_PER_CONN": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 3000,
+ "n_buckets": 50,
+ "description": "HTTP: KB read per connection"
+ },
+ "HTTP_PAGE_DNS_ISSUE_TIME": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 30000,
+ "n_buckets": 50,
+ "description": "HTTP page: open() -> DNS request issued (ms)"
+ },
+ "HTTP_PAGE_DNS_LOOKUP_TIME": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 30000,
+ "n_buckets": 50,
+ "description": "HTTP page: DNS lookup time (ms)"
+ },
+ "HTTP_PAGE_TCP_CONNECTION": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 30000,
+ "n_buckets": 50,
+ "description": "HTTP page: TCP connection setup (ms)"
+ },
+ "HTTP_PAGE_OPEN_TO_FIRST_SENT": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 30000,
+ "n_buckets": 50,
+ "description": "HTTP page: Open -> first byte of request sent (ms)"
+ },
+ "HTTP_PAGE_FIRST_SENT_TO_LAST_RECEIVED": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 30000,
+ "n_buckets": 50,
+ "description": "HTTP page: First byte of request sent -> last byte of response received (ms)"
+ },
+ "HTTP_PAGE_OPEN_TO_FIRST_RECEIVED": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 30000,
+ "n_buckets": 50,
+ "description": "HTTP page: Open -> first byte of reply received (ms)"
+ },
+ "HTTP_PAGE_OPEN_TO_FIRST_FROM_CACHE": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 30000,
+ "n_buckets": 50,
+ "description": "HTTP page: Open -> cache read start (ms)"
+ },
+ "HTTP_PAGE_OPEN_TO_FIRST_FROM_CACHE_V2": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 30000,
+ "n_buckets": 50,
+ "description": "HTTP page: Open -> cache read start (ms), [cache2]"
+ },
+ "HTTP_PAGE_CACHE_READ_TIME": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 30000,
+ "n_buckets": 50,
+ "description": "HTTP page: Cache read time (ms)"
+ },
+ "HTTP_PAGE_CACHE_READ_TIME_V2": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 30000,
+ "n_buckets": 50,
+ "description": "HTTP page: Cache read time (ms) [cache2]"
+ },
+ "HTTP_PAGE_REVALIDATION": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 30000,
+ "n_buckets": 50,
+ "description": "HTTP page: Positive cache validation time (ms)"
+ },
+ "HTTP_PAGE_COMPLETE_LOAD": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 30000,
+ "n_buckets": 50,
+ "description": "HTTP page: Overall load time - all (ms)"
+ },
+ "HTTP_PAGE_COMPLETE_LOAD_V2": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 30000,
+ "n_buckets": 50,
+ "description": "HTTP page: Overall load time - all (ms) [cache2]"
+ },
+ "HTTP_PAGE_COMPLETE_LOAD_CACHED": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 30000,
+ "n_buckets": 50,
+ "description": "HTTP page: Overall load time - cache hits (ms)"
+ },
+ "HTTP_PAGE_COMPLETE_LOAD_CACHED_V2": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 30000,
+ "n_buckets": 50,
+ "description": "HTTP page: Overall load time - cache hits (ms) [cache2]"
+ },
+ "HTTP_PAGE_COMPLETE_LOAD_NET": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 30000,
+ "n_buckets": 50,
+ "description": "HTTP page: Overall load time - network (ms)"
+ },
+ "HTTP_PAGE_COMPLETE_LOAD_NET_V2": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 30000,
+ "n_buckets": 50,
+ "description": "HTTP page: Overall load time - network (ms) [cache2]"
+ },
+ "HTTP_SUB_DNS_ISSUE_TIME": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 30000,
+ "n_buckets": 50,
+ "description": "HTTP subitem: open() -> DNS request issued (ms)"
+ },
+ "HTTP_SUB_DNS_LOOKUP_TIME": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 30000,
+ "n_buckets": 50,
+ "description": "HTTP subitem: DNS lookup time (ms)"
+ },
+ "HTTP_SUB_TCP_CONNECTION": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 30000,
+ "n_buckets": 50,
+ "description": "HTTP subitem: TCP connection setup (ms)"
+ },
+ "HTTP_SUB_OPEN_TO_FIRST_SENT": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 30000,
+ "n_buckets": 50,
+ "description": "HTTP subitem: Open -> first byte of request sent (ms)"
+ },
+ "HTTP_SUB_FIRST_SENT_TO_LAST_RECEIVED": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 30000,
+ "n_buckets": 50,
+ "description": "HTTP subitem: First byte of request sent -> last byte of response received (ms)"
+ },
+ "HTTP_SUB_OPEN_TO_FIRST_RECEIVED": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 30000,
+ "n_buckets": 50,
+ "description": "HTTP subitem: Open -> first byte of reply received (ms)"
+ },
+ "HTTP_SUB_OPEN_TO_FIRST_FROM_CACHE": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 30000,
+ "n_buckets": 50,
+ "description": "HTTP subitem: Open -> cache read start (ms)"
+ },
+ "HTTP_SUB_OPEN_TO_FIRST_FROM_CACHE_V2": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 30000,
+ "n_buckets": 50,
+ "description": "HTTP subitem: Open -> cache read start (ms) [cache2]"
+ },
+ "HTTP_SUB_CACHE_READ_TIME": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 30000,
+ "n_buckets": 50,
+ "description": "HTTP subitem: Cache read time (ms)"
+ },
+ "HTTP_SUB_CACHE_READ_TIME_V2": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 30000,
+ "n_buckets": 50,
+ "description": "HTTP subitem: Cache read time (ms) [cache2]"
+ },
+ "HTTP_SUB_REVALIDATION": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 30000,
+ "n_buckets": 50,
+ "description": "HTTP subitem: Positive cache validation time (ms)"
+ },
+ "HTTP_SUB_COMPLETE_LOAD": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 30000,
+ "n_buckets": 50,
+ "description": "HTTP subitem: Overall load time - all (ms)"
+ },
+ "HTTP_SUB_COMPLETE_LOAD_V2": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 30000,
+ "n_buckets": 50,
+ "description": "HTTP subitem: Overall load time - all (ms) [cache2]"
+ },
+ "HTTP_SUB_COMPLETE_LOAD_CACHED": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 30000,
+ "n_buckets": 50,
+ "description": "HTTP subitem: Overall load time - cache hits (ms)"
+ },
+ "HTTP_SUB_COMPLETE_LOAD_CACHED_V2": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 30000,
+ "n_buckets": 50,
+ "description": "HTTP subitem: Overall load time - cache hits (ms) [cache2]"
+ },
+ "HTTP_SUB_COMPLETE_LOAD_NET": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 30000,
+ "n_buckets": 50,
+ "description": "HTTP subitem: Overall load time - network (ms)"
+ },
+ "HTTP_SUB_COMPLETE_LOAD_NET_V2": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 30000,
+ "n_buckets": 50,
+ "description": "HTTP subitem: Overall load time - network (ms) [cache2]"
+ },
+ "HTTP_PROXY_TYPE": {
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 8,
+ "description": "HTTP Proxy Type (none, http, socks)"
+ },
+ "HTTP_TRANSACTION_IS_SSL": {
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "Whether a HTTP transaction was over SSL or not."
+ },
+ "HTTP_PAGELOAD_IS_SSL": {
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "Whether a HTTP base page load was over SSL or not."
+ },
+ "HTTP_TRANSACTION_USE_ALTSVC": {
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "Whether a HTTP transaction was routed via Alt-Svc or not."
+ },
+ "HTTP_TRANSACTION_USE_ALTSVC_OE": {
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "Whether a HTTP transaction routed via Alt-Svc was scheme=http"
+ },
+ "HTTP_SCHEME_UPGRADE": {
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 10,
+ "description": "Was the URL upgraded to HTTPS? (0=already HTTPS, 1=no reason to upgrade, 2=STS upgrade blocked by pref, 3=upgraded with STS, 4=upgraded with CSP)"
+ },
+ "HTTP_RESPONSE_STATUS_CODE": {
+ "alert_emails": ["ckerschbaumer@mozilla.com"],
+ "bug_numbers": [1272345, 1296287],
+ "expires_in_version": "56",
+ "kind": "enumerated",
+ "n_values": 12,
+ "description": "Whether the URL gets redirected? (0=200, 1=301, 2=302, 3=304, 4=307, 5=308, 6=400, 7=401, 8=403, 9=404, 10=500, 11=other)"
+ },
+ "HTTP_NET_VS_CACHE_ONSTART_ISIMG": {
+ "expires_in_version": "never",
+ "alert_emails": ["necko@mozilla.com"],
+ "bug_numbers": [1313095],
+ "kind": "linear",
+ "high": 1000,
+ "n_buckets": 100,
+ "description": "Network vs cache time load (OnStartRequest) difference (ms) for images. Offset by 500 ms."
+ },
+ "HTTP_NET_VS_CACHE_ONSTART_NOTIMG": {
+ "expires_in_version": "never",
+ "alert_emails": ["necko@mozilla.com"],
+ "bug_numbers": [1313095],
+ "kind": "linear",
+ "high": 1000,
+ "n_buckets": 100,
+ "description": "Network vs cache time load (OnStartRequest) difference (ms) for non-images. Offset by 500 ms."
+ },
+ "HTTP_NET_VS_CACHE_ONSTOP_ISIMG": {
+ "expires_in_version": "never",
+ "alert_emails": ["necko@mozilla.com"],
+ "bug_numbers": [1313095],
+ "kind": "linear",
+ "high": 1000,
+ "n_buckets": 100,
+ "description": "Network vs cache time load (OnStopRequest) difference (ms) for images. Offset by 500 ms."
+ },
+ "HTTP_NET_VS_CACHE_ONSTOP_NOTIMG": {
+ "expires_in_version": "never",
+ "alert_emails": ["necko@mozilla.com"],
+ "bug_numbers": [1313095],
+ "kind": "linear",
+ "high": 1000,
+ "n_buckets": 100,
+ "description": "Network vs cache time load (OnStopRequest) difference (ms) for non-images. Offset by 500 ms."
+ },
+ "HTTP_NET_VS_CACHE_ONSTART_QSMALL_NORMALPRI": {
+ "expires_in_version": "never",
+ "alert_emails": ["necko@mozilla.com"],
+ "bug_numbers": [1313095],
+ "kind": "linear",
+ "high": 1000,
+ "n_buckets": 100,
+ "description": "Network vs cache time load (OnStartRequest) difference (ms) for requests with a normal priority and small queue. Offset by 500 ms."
+ },
+ "HTTP_NET_VS_CACHE_ONSTART_QMED_NORMALPRI": {
+ "expires_in_version": "never",
+ "alert_emails": ["necko@mozilla.com"],
+ "bug_numbers": [1313095],
+ "kind": "linear",
+ "high": 1000,
+ "n_buckets": 100,
+ "description": "Network vs cache time load (OnStartRequest) difference (ms) for requests with a normal priority and medium queue. Offset by 500 ms."
+ },
+ "HTTP_NET_VS_CACHE_ONSTART_QBIG_NORMALPRI": {
+ "expires_in_version": "never",
+ "alert_emails": ["necko@mozilla.com"],
+ "bug_numbers": [1313095],
+ "kind": "linear",
+ "high": 1000,
+ "n_buckets": 100,
+ "description": "Network vs cache time load (OnStartRequest) difference (ms) for requests with a normal priority and large queue. Offset by 500 ms."
+ },
+ "HTTP_NET_VS_CACHE_ONSTART_QSMALL_HIGHPRI": {
+ "expires_in_version": "never",
+ "alert_emails": ["necko@mozilla.com"],
+ "bug_numbers": [1313095],
+ "kind": "linear",
+ "high": 1000,
+ "n_buckets": 100,
+ "description": "Network vs cache time load (OnStartRequest) difference (ms) for requests with a high priority and small queue. Offset by 500 ms."
+ },
+ "HTTP_NET_VS_CACHE_ONSTART_QMED_HIGHPRI": {
+ "expires_in_version": "never",
+ "alert_emails": ["necko@mozilla.com"],
+ "bug_numbers": [1313095],
+ "kind": "linear",
+ "high": 1000,
+ "n_buckets": 100,
+ "description": "Network vs cache time load (OnStartRequest) difference (ms) for requests with a high priority and medium queue. Offset by 500 ms."
+ },
+ "HTTP_NET_VS_CACHE_ONSTART_QBIG_HIGHPRI": {
+ "expires_in_version": "never",
+ "alert_emails": ["necko@mozilla.com"],
+ "bug_numbers": [1313095],
+ "kind": "linear",
+ "high": 1000,
+ "n_buckets": 100,
+ "description": "Network vs cache time load (OnStartRequest) difference (ms) for requests with a high priority and large queue. Offset by 500 ms."
+ },
+ "HTTP_NET_VS_CACHE_ONSTOP_QSMALL_NORMALPRI": {
+ "expires_in_version": "never",
+ "alert_emails": ["necko@mozilla.com"],
+ "bug_numbers": [1313095],
+ "kind": "linear",
+ "high": 1000,
+ "n_buckets": 100,
+ "description": "Network vs cache time load (OnStopRequest) difference (ms) for requests with a normal priority and small queue. Offset by 500 ms."
+ },
+ "HTTP_NET_VS_CACHE_ONSTOP_QMED_NORMALPRI": {
+ "expires_in_version": "never",
+ "alert_emails": ["necko@mozilla.com"],
+ "bug_numbers": [1313095],
+ "kind": "linear",
+ "high": 1000,
+ "n_buckets": 100,
+ "description": "Network vs cache time load (OnStopRequest) difference (ms) for requests with a normal priority and medium queue. Offset by 500 ms."
+ },
+ "HTTP_NET_VS_CACHE_ONSTOP_QBIG_NORMALPRI": {
+ "expires_in_version": "never",
+ "alert_emails": ["necko@mozilla.com"],
+ "bug_numbers": [1313095],
+ "kind": "linear",
+ "high": 1000,
+ "n_buckets": 100,
+ "description": "Network vs cache time load (OnStopRequest) difference (ms) for requests with a normal priority and large queue. Offset by 500 ms."
+ },
+ "HTTP_NET_VS_CACHE_ONSTOP_QSMALL_HIGHPRI": {
+ "expires_in_version": "never",
+ "alert_emails": ["necko@mozilla.com"],
+ "bug_numbers": [1313095],
+ "kind": "linear",
+ "high": 1000,
+ "n_buckets": 100,
+ "description": "Network vs cache time load (OnStopRequest) difference (ms) for requests with a high priority and small queue. Offset by 500 ms."
+ },
+ "HTTP_NET_VS_CACHE_ONSTOP_QMED_HIGHPRI": {
+ "expires_in_version": "never",
+ "alert_emails": ["necko@mozilla.com"],
+ "bug_numbers": [1313095],
+ "kind": "linear",
+ "high": 1000,
+ "n_buckets": 100,
+ "description": "Network vs cache time load (OnStopRequest) difference (ms) for requests with a high priority and medium queue. Offset by 500 ms."
+ },
+ "HTTP_NET_VS_CACHE_ONSTOP_QBIG_HIGHPRI": {
+ "expires_in_version": "never",
+ "alert_emails": ["necko@mozilla.com"],
+ "bug_numbers": [1313095],
+ "kind": "linear",
+ "high": 1000,
+ "n_buckets": 100,
+ "description": "Network vs cache time load (OnStopRequest) difference (ms) for requests with a high priority and large queue. Offset by 500 ms."
+ },
+ "HTTP_NET_VS_CACHE_ONSTART_SMALL_NORMALPRI": {
+ "expires_in_version": "never",
+ "alert_emails": ["necko@mozilla.com"],
+ "bug_numbers": [1313095],
+ "kind": "linear",
+ "high": 1000,
+ "n_buckets": 100,
+ "description": "Network vs cache time load (OnStartRequest) difference (ms) for cache files with a small size (<32K) and normal priority. Offset by 500 ms."
+ },
+ "HTTP_NET_VS_CACHE_ONSTART_MED_NORMALPRI": {
+ "expires_in_version": "never",
+ "alert_emails": ["necko@mozilla.com"],
+ "bug_numbers": [1313095],
+ "kind": "linear",
+ "high": 1000,
+ "n_buckets": 100,
+ "description": "Network vs cache time load (OnStartRequest) difference (ms) for cache files with a medium size (<256K) and normal priority. Offset by 500 ms."
+ },
+ "HTTP_NET_VS_CACHE_ONSTART_LARGE_NORMALPRI": {
+ "expires_in_version": "never",
+ "alert_emails": ["necko@mozilla.com"],
+ "bug_numbers": [1313095],
+ "kind": "linear",
+ "high": 1000,
+ "n_buckets": 100,
+ "description": "Network vs cache time load (OnStartRequest) difference (ms) for cache files with a large size (>256K) and normal priority. Offset by 500 ms."
+ },
+ "HTTP_NET_VS_CACHE_ONSTART_SMALL_HIGHPRI": {
+ "expires_in_version": "never",
+ "alert_emails": ["necko@mozilla.com"],
+ "bug_numbers": [1313095],
+ "kind": "linear",
+ "high": 1000,
+ "n_buckets": 100,
+ "description": "Network vs cache time load (OnStartRequest) difference (ms) for cache files with a small size (<32K) and high priority. Offset by 500 ms."
+ },
+ "HTTP_NET_VS_CACHE_ONSTART_MED_HIGHPRI": {
+ "expires_in_version": "never",
+ "alert_emails": ["necko@mozilla.com"],
+ "bug_numbers": [1313095],
+ "kind": "linear",
+ "high": 1000,
+ "n_buckets": 100,
+ "description": "Network vs cache time load (OnStartRequest) difference (ms) for cache files with a medium size (<256K) and high priority. Offset by 500 ms."
+ },
+ "HTTP_NET_VS_CACHE_ONSTART_LARGE_HIGHPRI": {
+ "expires_in_version": "never",
+ "alert_emails": ["necko@mozilla.com"],
+ "bug_numbers": [1313095],
+ "kind": "linear",
+ "high": 1000,
+ "n_buckets": 100,
+ "description": "Network vs cache time load (OnStartRequest) difference (ms) for cache files with a large size (>256K) and high priority. Offset by 500 ms."
+ },
+ "HTTP_NET_VS_CACHE_ONSTOP_SMALL_NORMALPRI": {
+ "expires_in_version": "never",
+ "alert_emails": ["necko@mozilla.com"],
+ "bug_numbers": [1313095],
+ "kind": "linear",
+ "high": 1000,
+ "n_buckets": 100,
+ "description": "Network vs cache time load (OnStopRequest) difference (ms) for cache files with a small size (<32K) and normal priority. Offset by 500 ms."
+ },
+ "HTTP_NET_VS_CACHE_ONSTOP_MED_NORMALPRI": {
+ "expires_in_version": "never",
+ "alert_emails": ["necko@mozilla.com"],
+ "bug_numbers": [1313095],
+ "kind": "linear",
+ "high": 1000,
+ "n_buckets": 100,
+ "description": "Network vs cache time load (OnStopRequest) difference (ms) for cache files with a medium size (<256K) and normal priority. Offset by 500 ms."
+ },
+ "HTTP_NET_VS_CACHE_ONSTOP_LARGE_NORMALPRI": {
+ "expires_in_version": "never",
+ "alert_emails": ["necko@mozilla.com"],
+ "bug_numbers": [1313095],
+ "kind": "linear",
+ "high": 1000,
+ "n_buckets": 100,
+ "description": "Network vs cache time load (OnStopRequest) difference (ms) for cache files with a large size (>256K) and normal priority. Offset by 500 ms."
+ },
+ "HTTP_NET_VS_CACHE_ONSTOP_SMALL_HIGHPRI": {
+ "expires_in_version": "never",
+ "alert_emails": ["necko@mozilla.com"],
+ "bug_numbers": [1313095],
+ "kind": "linear",
+ "high": 1000,
+ "n_buckets": 100,
+ "description": "Network vs cache time load (OnStopRequest) difference (ms) for cache files with a small size (<32K) and high priority. Offset by 500 ms."
+ },
+ "HTTP_NET_VS_CACHE_ONSTOP_MED_HIGHPRI": {
+ "expires_in_version": "never",
+ "alert_emails": ["necko@mozilla.com"],
+ "bug_numbers": [1313095],
+ "kind": "linear",
+ "high": 1000,
+ "n_buckets": 100,
+ "description": "Network vs cache time load (OnStopRequest) difference (ms) for cache files with a medium size (<256K) and high priority. Offset by 500 ms."
+ },
+ "HTTP_NET_VS_CACHE_ONSTOP_LARGE_HIGHPRI": {
+ "expires_in_version": "never",
+ "alert_emails": ["necko@mozilla.com"],
+ "bug_numbers": [1313095],
+ "kind": "linear",
+ "high": 1000,
+ "n_buckets": 100,
+ "description": "Network vs cache time load (OnStopRequest) difference (ms) for cache files with a large size (>256K) and high priority. Offset by 500 ms."
+ },
+ "HTTP_NET_VS_CACHE_ONSTART_REVALIDATED": {
+ "expires_in_version": "never",
+ "alert_emails": ["necko@mozilla.com"],
+ "bug_numbers": [1313095],
+ "kind": "linear",
+ "high": 1000,
+ "n_buckets": 100,
+ "description": "Network vs cache time load (OnStartRequest) difference (ms) revalidated cache entries. Offset by 500 ms."
+ },
+ "HTTP_NET_VS_CACHE_ONSTART_NOTREVALIDATED": {
+ "expires_in_version": "never",
+ "alert_emails": ["necko@mozilla.com"],
+ "bug_numbers": [1313095],
+ "kind": "linear",
+ "high": 1000,
+ "n_buckets": 100,
+ "description": "Network vs cache time load (OnStartRequest) difference (ms) not revalidated cache entries. Offset by 500 ms."
+ },
+ "HTTP_NET_VS_CACHE_ONSTOP_REVALIDATED": {
+ "expires_in_version": "never",
+ "alert_emails": ["necko@mozilla.com"],
+ "bug_numbers": [1313095],
+ "kind": "linear",
+ "high": 1000,
+ "n_buckets": 100,
+ "description": "Network vs cache time load (OnStopRequest) difference (ms) revalidated cache entries. Offset by 500 ms."
+ },
+ "HTTP_NET_VS_CACHE_ONSTOP_NOTREVALIDATED": {
+ "expires_in_version": "never",
+ "alert_emails": ["necko@mozilla.com"],
+ "bug_numbers": [1313095],
+ "kind": "linear",
+ "high": 1000,
+ "n_buckets": 100,
+ "description": "Network vs cache time load (OnStopRequest) difference (ms) not revalidated cache entries. Offset by 500 ms."
+ },
+ "HTTP_AUTH_DIALOG_STATS": {
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 4,
+ "description": "Stats about what kind of resource requested http authentication. (0=top-level doc, 1=same origin subresources, 2=cross-origin subresources, 3=xhr)"
+ },
+ "HTTP_AUTH_TYPE_STATS": {
+ "alert_emails": ["rbarnes@mozilla.com"],
+ "bug_numbers": [1266571],
+ "expires_in_version": "52",
+ "kind": "enumerated",
+ "n_values": 8,
+ "releaseChannelCollection": "opt-out",
+ "description": "Recorded once for each HTTP 401 response. The value records the type of authentication and the TLS-enabled status. (0=basic/clear, 1=basic/tls, 2=digest/clear, 3=digest/tls, 4=ntlm/clear, 5=ntlm/tls, 6=negotiate/clear, 7=negotiate/tls)"
+ },
+ "TLS_EARLY_DATA_NEGOTIATED": {
+ "expires_in_version": "58",
+ "kind": "enumerated",
+ "n_values": 3,
+ "description": "Sending TLS early data was possible: 0 - not possible, 1 - possible but not used, 2 - possible and used.",
+ "alert_emails": ["necko@mozilla.com"],
+ "bug_numbers": [1296288]
+ },
+ "TLS_EARLY_DATA_ACCEPTED": {
+ "expires_in_version": "58",
+ "kind": "boolean",
+ "description": "TLS early data was used and it was accepted (true) or rejected (false) by the remote host.",
+ "alert_emails": ["necko@mozilla.com"],
+ "bug_numbers": [1296288]
+ },
+ "TLS_EARLY_DATA_BYTES_WRITTEN": {
+ "expires_in_version": "58",
+ "kind": "exponential",
+ "high": 60000,
+ "n_buckets": 100,
+ "description": "Amount of bytes sent using TLS early data at the start of a TLS connection for a given channel.",
+ "alert_emails": ["necko@mozilla.com"],
+ "bug_numbers": [1296288]
+ },
+ "SSL_HANDSHAKE_VERSION": {
+ "alert_emails": ["seceng-telemetry@mozilla.com"],
+ "bug_numbers": [1250568],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 16,
+ "description": "SSL Version (1=tls1, 2=tls1.1, 3=tls1.2, 4=tls1.3)"
+ },
+ "SSL_HANDSHAKE_RESULT": {
+ "alert_emails": ["seceng-telemetry@mozilla.com"],
+ "bug_numbers": [1331280],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 672,
+ "releaseChannelCollection": "opt-out",
+ "description": "SSL handshake result, 0=success, 1-255=NSS error offset, 256-511=SEC error offset + 256, 512-639=NSPR error offset + 512, 640-670=PKIX error, 671=unknown err"
+ },
+ "SSL_TIME_UNTIL_READY": {
+ "alert_emails": ["seceng-telemetry@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 60000,
+ "n_buckets": 200,
+ "description": "ms of SSL wait time including TCP and proxy tunneling"
+ },
+ "SSL_TIME_UNTIL_HANDSHAKE_FINISHED": {
+ "alert_emails": ["seceng-telemetry@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 60000,
+ "n_buckets": 200,
+ "description": "ms of SSL wait time for full handshake including TCP and proxy tunneling"
+ },
+ "SSL_BYTES_BEFORE_CERT_CALLBACK": {
+ "alert_emails": ["seceng-telemetry@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 32000,
+ "n_buckets": 64,
+ "description": "plaintext bytes read before a server certificate authenticated"
+ },
+ "SSL_NPN_TYPE": {
+ "alert_emails": ["seceng-telemetry@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 16,
+ "description": "NPN Results (0=none, 1=negotiated, 2=no-overlap, 3=selected(alpn))"
+ },
+ "SSL_RESUMED_SESSION": {
+ "alert_emails": ["seceng-telemetry@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "complete TLS connect that used TLS Sesison Resumption"
+ },
+ "CERT_VALIDATION_HTTP_REQUEST_RESULT": {
+ "alert_emails": ["seceng-telemetry@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 16,
+ "description": "HTTP result of OCSP, etc.. (0=canceled, 1=OK, 2=FAILED, 3=internal-error)"
+ },
+ "CERT_VALIDATION_HTTP_REQUEST_CANCELED_TIME": {
+ "alert_emails": ["seceng-telemetry@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 60000,
+ "n_buckets": 200,
+ "description": "ms elapsed time of OCSP etc.. that was canceled"
+ },
+ "CERT_VALIDATION_HTTP_REQUEST_SUCCEEDED_TIME": {
+ "alert_emails": ["seceng-telemetry@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 60000,
+ "n_buckets": 200,
+ "description": "ms elapsed time of OCSP etc.. that succeeded"
+ },
+ "CERT_VALIDATION_HTTP_REQUEST_FAILED_TIME": {
+ "alert_emails": ["seceng-telemetry@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 60000,
+ "n_buckets": 200,
+ "description": "ms elapsed time of OCSP etc.. that failed"
+ },
+ "SSL_KEY_EXCHANGE_ALGORITHM_FULL": {
+ "alert_emails": ["seceng-telemetry@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 16,
+ "description": "SSL Handshake Key Exchange Algorithm for full handshake (null=0, rsa=1, dh=2, fortezza=3, ecdh=4)"
+ },
+ "SSL_KEY_EXCHANGE_ALGORITHM_RESUMED": {
+ "alert_emails": ["seceng-telemetry@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 16,
+ "description": "SSL Handshake Key Exchange Algorithm for resumed handshake (null=0, rsa=1, dh=2, fortezza=3, ecdh=4)"
+ },
+ "SSL_OBSERVED_END_ENTITY_CERTIFICATE_LIFETIME": {
+ "expires_in_version": "55",
+ "alert_emails": ["seceng-telemetry@mozilla.com"],
+ "kind": "enumerated",
+ "n_values": 125,
+ "releaseChannelCollection": "opt-out",
+ "description": "The lifetime of accepted HTTPS server certificates, in weeks, up to 2 years. Bucket 105 is all end-entity HTTPS server certificates with a lifetime > 2 years."
+ },
+ "KEYGEN_GENERATED_KEY_TYPE": {
+ "expires_in_version": "55",
+ "alert_emails": ["seceng-telemetry@mozilla.com"],
+ "kind": "count",
+ "keyed": true,
+ "releaseChannelCollection": "opt-out",
+ "bug_numbers": [1191414,1284945],
+ "description": "The number of times we generate a key via keygen, keyed on algorithm and keysize. Keys include RSA with key size (512, 1024, 2048, possibly others), secp384r1, secp256r1, and 'other_ec'."
+ },
+ "WEBSOCKETS_HANDSHAKE_TYPE": {
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 16,
+ "description": "Websockets Handshake Results (ws-ok-plain, ws-ok-proxy, ws-failed-plain, ws-failed-proxy, wss-ok-plain, wss-ok-proxy, wss-failed-plain, wss-failed-proxy)"
+ },
+ "SPDY_VERSION2": {
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 48,
+ "description": "SPDY: Protocol Version Used"
+ },
+ "HTTP_RESPONSE_VERSION": {
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 48,
+ "description": "HTTP: Protocol Version Used on Response from nsHttp.h"
+ },
+ "HTTP_09_INFO": {
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 4,
+ "description": "HTTP 09 Response Breakdown: lowbit subresource, high bit nonstd port",
+ "bug_numbers": [1262572],
+ "alert_emails": ["necko@mozilla.com"]
+ },
+ "SPDY_PARALLEL_STREAMS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 1000,
+ "n_buckets": 50,
+ "description": "SPDY: Streams concurrent active per connection"
+ },
+ "SPDY_REQUEST_PER_CONN": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 1000,
+ "n_buckets": 50,
+ "description": "SPDY: Streams created per connection"
+ },
+ "SPDY_SERVER_INITIATED_STREAMS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 100000,
+ "n_buckets": 250,
+ "description": "SPDY: Streams recevied per connection"
+ },
+ "SPDY_CHUNK_RECVD": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 1000,
+ "n_buckets": 100,
+ "description": "SPDY: Recvd Chunk Size (rounded to KB)"
+ },
+ "SPDY_SYN_SIZE": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "low": 20,
+ "high": 20000,
+ "n_buckets": 50,
+ "description": "SPDY: SYN Frame Header Size"
+ },
+ "SPDY_SYN_RATIO": {
+ "expires_in_version": "never",
+ "kind": "linear",
+ "high": 99,
+ "n_buckets": 20,
+ "description": "SPDY: SYN Frame Header Ratio (lower better)"
+ },
+ "SPDY_SYN_REPLY_SIZE": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "low": 16,
+ "high": 20000,
+ "n_buckets": 50,
+ "description": "SPDY: SYN Reply Header Size"
+ },
+ "SPDY_SYN_REPLY_RATIO": {
+ "expires_in_version": "never",
+ "kind": "linear",
+ "high": 99,
+ "n_buckets": 20,
+ "description": "SPDY: SYN Reply Header Ratio (lower better)"
+ },
+ "SPDY_NPN_CONNECT": {
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "SPDY: NPN Negotiated"
+ },
+ "SPDY_NPN_JOIN": {
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "SPDY: Coalesce Succeeded"
+ },
+ "SPDY_KBREAD_PER_CONN": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 3000,
+ "n_buckets": 50,
+ "description": "SPDY: KB read per connection"
+ },
+ "SPDY_SETTINGS_UL_BW": {
+ "expires_in_version": "42",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 100,
+ "description": "SPDY: Settings Upload Bandwidth"
+ },
+ "SPDY_SETTINGS_DL_BW": {
+ "expires_in_version": "42",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 100,
+ "description": "SPDY: Settings Download Bandwidth"
+ },
+ "SPDY_SETTINGS_RTT": {
+ "expires_in_version": "42",
+ "kind": "exponential",
+ "high": 1000,
+ "n_buckets": 100,
+ "description": "SPDY: Settings RTT"
+ },
+ "SPDY_SETTINGS_MAX_STREAMS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 5000,
+ "n_buckets": 100,
+ "description": "H2: Settings Max Streams parameter"
+ },
+ "SPDY_SETTINGS_CWND": {
+ "expires_in_version": "42",
+ "kind": "exponential",
+ "high": 500,
+ "n_buckets": 50,
+ "description": "SPDY: Settings CWND (packets)"
+ },
+ "SPDY_SETTINGS_RETRANS": {
+ "expires_in_version": "42",
+ "kind": "exponential",
+ "high": 100,
+ "n_buckets": 50,
+ "description": "SPDY: Retransmission Rate"
+ },
+ "SPDY_SETTINGS_IW": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 1000,
+ "n_buckets": 50,
+ "description": "H2: Settings Initial Window (rounded to KB)"
+ },
+ "SPDY_GOAWAY_LOCAL": {
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 32,
+ "description": "H2: goaway reason client sent from rfc 7540. 31 is none sent."
+ },
+ "SPDY_GOAWAY_PEER": {
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 32,
+ "description": "H2: goaway reason from peer from rfc 7540. 31 is none received."
+ },
+ "HPACK_ELEMENTS_EVICTED_DECOMPRESSOR": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 256,
+ "n_buckets": 50,
+ "description": "HPACK: Number of items removed from dynamic table to make room for 1 new item",
+ "alert_emails": ["necko@mozilla.com", "hurley@mozilla.com"],
+ "bug_numbers": [1296280]
+ },
+ "HPACK_BYTES_EVICTED_DECOMPRESSOR": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 8192,
+ "n_buckets": 50,
+ "description": "HPACK: Number of bytes removed from dynamic table to make room for 1 new item",
+ "alert_emails": ["necko@mozilla.com", "hurley@mozilla.com"],
+ "bug_numbers": [1296280]
+ },
+ "HPACK_BYTES_EVICTED_RATIO_DECOMPRESSOR": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 256,
+ "n_buckets": 50,
+ "description": "HPACK: Ratio of bytes evicted to bytes added (* 100)",
+ "alert_emails": ["necko@mozilla.com", "hurley@mozilla.com"],
+ "bug_numbers": [1296280]
+ },
+ "HPACK_PEAK_COUNT_DECOMPRESSOR": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 1024,
+ "n_buckets": 50,
+ "description": "HPACK: peak number of items in the dynamic table",
+ "alert_emails": ["necko@mozilla.com", "hurley@mozilla.com"],
+ "bug_numbers": [1296280]
+ },
+ "HPACK_PEAK_SIZE_DECOMPRESSOR": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 16384,
+ "n_buckets": 100,
+ "description": "HPACK: peak size in bytes of the table",
+ "alert_emails": ["necko@mozilla.com", "hurley@mozilla.com"],
+ "bug_numbers": [1296280]
+ },
+ "HPACK_ELEMENTS_EVICTED_COMPRESSOR": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 256,
+ "n_buckets": 50,
+ "description": "HPACK: Number of items removed from dynamic table to make room for 1 new item",
+ "alert_emails": ["necko@mozilla.com", "hurley@mozilla.com"],
+ "bug_numbers": [1296280]
+ },
+ "HPACK_BYTES_EVICTED_COMPRESSOR": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 8192,
+ "n_buckets": 50,
+ "description": "HPACK: Number of bytes removed from dynamic table to make room for 1 new item",
+ "alert_emails": ["necko@mozilla.com", "hurley@mozilla.com"],
+ "bug_numbers": [1296280]
+ },
+ "HPACK_BYTES_EVICTED_RATIO_COMPRESSOR": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 256,
+ "n_buckets": 50,
+ "description": "HPACK: Ratio of bytes evicted to bytes added (* 100)",
+ "alert_emails": ["necko@mozilla.com", "hurley@mozilla.com"],
+ "bug_numbers": [1296280]
+ },
+ "HPACK_PEAK_COUNT_COMPRESSOR": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 1024,
+ "n_buckets": 50,
+ "description": "HPACK: peak number of items in the dynamic table",
+ "alert_emails": ["necko@mozilla.com", "hurley@mozilla.com"],
+ "bug_numbers": [1296280]
+ },
+ "HPACK_PEAK_SIZE_COMPRESSOR": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 16384,
+ "n_buckets": 100,
+ "description": "HPACK: peak size in bytes of the table",
+ "alert_emails": ["necko@mozilla.com", "hurley@mozilla.com"],
+ "bug_numbers": [1296280]
+ },
+ "HTTP_CHANNEL_DISPOSITION" : {
+ "alert_emails": ["necko@mozilla.com"],
+ "bug_numbers": [1341128],
+ "expires_in_version": "60",
+ "kind": "enumerated",
+ "n_values": 16,
+ "releaseChannelCollection": "opt-out",
+ "description": "Channel Disposition: 0=Cancel, 1=Disk, 2=NetOK, 3=NetEarlyFail, 4=NetlateFail, +8 for HTTPS"
+ },
+ "HTTP_CONNECTION_ENTRY_CACHE_HIT_1" : {
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "Fraction of sockets that used a nsConnectionEntry with history - size 300."
+ },
+ "HTTP_CACHE_DISPOSITION_2": {
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 5,
+ "description": "HTTP Cache Hit, Reval, Failed-Reval, Miss"
+ },
+ "HTTP_CACHE_DISPOSITION_2_V2": {
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 5,
+ "description": "HTTP Cache v2 Hit, Reval, Failed-Reval, Miss"
+ },
+ "HTTP_DISK_CACHE_DISPOSITION_2": {
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 5,
+ "description": "HTTP Disk Cache Hit, Reval, Failed-Reval, Miss"
+ },
+ "HTTP_CACHE_MISS_HALFLIFE_EXPERIMENT_2": {
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 4,
+ "description": "HTTP Cache v2 Miss by half-life value (5 min, 15 min, 1 hour, 6 hours)"
+ },
+ "HTTP_CACHE_ENTRY_RELOAD_TIME": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 900000,
+ "n_buckets": 50,
+ "description": "Time before we reload an HTTP cache entry again to memory"
+ },
+ "HTTP_CACHE_ENTRY_ALIVE_TIME": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 7200000,
+ "n_buckets": 50,
+ "description": "Time for which an HTTP cache entry is kept warmed in memory"
+ },
+ "HTTP_CACHE_ENTRY_REUSE_COUNT": {
+ "expires_in_version": "never",
+ "kind": "linear",
+ "high": 20,
+ "n_buckets": 19,
+ "description": "Reuse count of an HTTP cache entry warmed in memory"
+ },
+ "HTTP_MEMORY_CACHE_DISPOSITION_2": {
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 5,
+ "description": "HTTP Memory Cache Hit, Reval, Failed-Reval, Miss"
+ },
+ "HTTP_OFFLINE_CACHE_DISPOSITION_2": {
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 5,
+ "description": "HTTP Offline Cache Hit, Reval, Failed-Reval, Miss"
+ },
+ "HTTP_OFFLINE_CACHE_DOCUMENT_LOAD": {
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "Rate of page load from offline cache"
+ },
+ "HTTP_CACHE_IO_QUEUE_2_OPEN_PRIORITY": {
+ "alert_emails": ["hbambas@mozilla.com"],
+ "bug_numbers": [1294183],
+ "expires_in_version": "55",
+ "kind": "enumerated",
+ "n_values": 10,
+ "description": "HTTP Cache IO queue length"
+ },
+ "HTTP_CACHE_IO_QUEUE_2_READ_PRIORITY": {
+ "alert_emails": ["hbambas@mozilla.com"],
+ "bug_numbers": [1294183],
+ "expires_in_version": "55",
+ "kind": "enumerated",
+ "n_values": 10,
+ "description": "HTTP Cache IO queue length"
+ },
+ "HTTP_CACHE_IO_QUEUE_2_MANAGEMENT": {
+ "alert_emails": ["hbambas@mozilla.com"],
+ "bug_numbers": [1294183],
+ "expires_in_version": "55",
+ "kind": "enumerated",
+ "n_values": 10,
+ "description": "HTTP Cache IO queue length"
+ },
+ "HTTP_CACHE_IO_QUEUE_2_OPEN": {
+ "alert_emails": ["hbambas@mozilla.com"],
+ "bug_numbers": [1294183],
+ "expires_in_version": "55",
+ "kind": "enumerated",
+ "n_values": 10,
+ "description": "HTTP Cache IO queue length"
+ },
+ "HTTP_CACHE_IO_QUEUE_2_READ": {
+ "alert_emails": ["hbambas@mozilla.com"],
+ "bug_numbers": [1294183],
+ "expires_in_version": "55",
+ "kind": "enumerated",
+ "n_values": 10,
+ "description": "HTTP Cache IO queue length"
+ },
+ "HTTP_CACHE_IO_QUEUE_2_WRITE": {
+ "alert_emails": ["hbambas@mozilla.com"],
+ "bug_numbers": [1294183],
+ "expires_in_version": "55",
+ "kind": "enumerated",
+ "n_values": 10,
+ "description": "HTTP Cache IO queue length"
+ },
+ "HTTP_CACHE_IO_QUEUE_2_WRITE_PRIORITY": {
+ "alert_emails": ["hbambas@mozilla.com"],
+ "bug_numbers": [1294183],
+ "expires_in_version": "55",
+ "kind": "enumerated",
+ "n_values": 10,
+ "description": "HTTP Cache IO queue length"
+ },
+ "HTTP_CACHE_IO_QUEUE_2_INDEX": {
+ "alert_emails": ["hbambas@mozilla.com"],
+ "bug_numbers": [1294183],
+ "expires_in_version": "55",
+ "kind": "enumerated",
+ "n_values": 10,
+ "description": "HTTP Cache IO queue length"
+ },
+ "HTTP_CACHE_IO_QUEUE_2_EVICT": {
+ "alert_emails": ["hbambas@mozilla.com"],
+ "bug_numbers": [1294183],
+ "expires_in_version": "55",
+ "kind": "enumerated",
+ "n_values": 10,
+ "description": "HTTP Cache IO queue length"
+ },
+ "CACHE_DEVICE_SEARCH_2": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time to search cache (ms)"
+ },
+ "CACHE_MEMORY_SEARCH_2": {
+ "expires_in_version": "default",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time to search memory cache (ms)"
+ },
+ "CACHE_DISK_SEARCH_2": {
+ "expires_in_version": "default",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time to search disk cache (ms)"
+ },
+ "CACHE_OFFLINE_SEARCH_2": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time to search offline cache (ms)"
+ },
+ "TRANSACTION_WAIT_TIME_HTTP": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 5000,
+ "n_buckets": 100,
+ "description": "Time from submission to dispatch of HTTP transaction (ms)"
+ },
+ "TRANSACTION_WAIT_TIME_HTTP_PIPELINES": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 5000,
+ "n_buckets": 100,
+ "description": "Time from submission to dispatch of HTTP with pipelines transaction (ms)"
+ },
+ "TRANSACTION_WAIT_TIME_SPDY": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 5000,
+ "n_buckets": 100,
+ "description": "Time from submission to dispatch of SPDY transaction (ms)"
+ },
+ "HTTP_SAW_QUIC_ALT_PROTOCOL": {
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "Fraction of responses with a quic alt-protocol advertisement."
+ },
+ "HTTP_CONTENT_ENCODING": {
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 6,
+ "description": "encoding removed: 0=unknown, 1=gzip, 2=deflate, 3=brotli"
+ },
+ "HTTP_DISK_CACHE_OVERHEAD": {
+ "expires_in_version": "default",
+ "kind": "exponential",
+ "high": 32000000,
+ "n_buckets": 100,
+ "description": "HTTP Disk cache memory overhead (bytes)"
+ },
+ "CACHE_LM_INCONSISTENT": {
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "Cache discovered inconsistent last-modified entry"
+ },
+ "CACHE_SERVICE_LOCK_WAIT_2": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent waiting on the cache service lock (ms)"
+ },
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_2": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent waiting on the cache service lock on the main thread (ms)"
+ },
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSSETDISKSMARTSIZECALLBACK_NOTIFY": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent waiting on the cache service lock (ms) on the main thread in NSSETDISKSMARTSIZECALLBACK_NOTIFY"
+ },
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSPROCESSREQUESTEVENT_RUN": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent waiting on the cache service lock (ms) on the main thread in NSPROCESSREQUESTEVENT_RUN"
+ },
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSOUTPUTSTREAMWRAPPER_LAZYINIT": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent waiting on the cache service lock (ms) on the main thread in NSOUTPUTSTREAMWRAPPER_LAZYINIT"
+ },
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSOUTPUTSTREAMWRAPPER_CLOSEINTERNAL": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent waiting on the cache service lock (ms) on the main thread in NSOUTPUTSTREAMWRAPPER_CLOSEINTERNAL"
+ },
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSOUTPUTSTREAMWRAPPER_RELEASE": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent waiting on the cache service lock (ms) on the main thread in NSOUTPUTSTREAMWRAPPER_RELEASE"
+ },
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCOMPRESSOUTPUTSTREAMWRAPPER_RELEASE": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent waiting on the cache service lock (ms) on the main thread in NSCOMPRESSOUTPUTSTREAMWRAPPER_RELEASE"
+ },
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSINPUTSTREAMWRAPPER_LAZYINIT": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent waiting on the cache service lock (ms) on the main thread in NSINPUTSTREAMWRAPPER_LAZYINIT"
+ },
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSINPUTSTREAMWRAPPER_CLOSEINTERNAL": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent waiting on the cache service lock (ms) on the main thread in NSINPUTSTREAMWRAPPER_CLOSEINTERNAL"
+ },
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSINPUTSTREAMWRAPPER_RELEASE": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent waiting on the cache service lock (ms) on the main thread in NSINPUTSTREAMWRAPPER_RELEASE"
+ },
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSDECOMPRESSINPUTSTREAMWRAPPER_RELEASE": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent waiting on the cache service lock (ms) on the main thread in NSDECOMPRESSINPUTSTREAMWRAPPER_RELEASE"
+ },
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHESERVICE_SHUTDOWN": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent waiting on the cache service lock (ms) on the main thread in NSCACHESERVICE_SHUTDOWN"
+ },
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHESERVICE_SETOFFLINECACHEENABLED": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent waiting on the cache service lock (ms) on the main thread in NSCACHESERVICE_SETOFFLINECACHEENABLED"
+ },
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHESERVICE_SETOFFLINECACHECAPACITY": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent waiting on the cache service lock (ms) on the main thread in NSCACHESERVICE_SETOFFLINECACHECAPACITY"
+ },
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHESERVICE_SETMEMORYCACHE": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent waiting on the cache service lock (ms) on the main thread in NSCACHESERVICE_SETMEMORYCACHE"
+ },
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHESERVICE_SETDISKSMARTSIZE": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent waiting on the cache service lock (ms) on the main thread in NSCACHESERVICE_SETDISKSMARTSIZE"
+ },
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHESERVICE_SETDISKCACHEMAXENTRYSIZE": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent waiting on the cache service lock (ms) on the main thread in NSCACHESERVICE_SETDISKCACHEMAXENTRYSIZE"
+ },
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHESERVICE_SETMEMORYCACHEMAXENTRYSIZE": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent waiting on the cache service lock (ms) on the main thread in NSCACHESERVICE_SETMEMORYCACHEMAXENTRYSIZE"
+ },
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHESERVICE_SETDISKCACHEENABLED": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent waiting on the cache service lock (ms) on the main thread in NSCACHESERVICE_SETDISKCACHEENABLED"
+ },
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHESERVICE_SETDISKCACHECAPACITY": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent waiting on the cache service lock (ms) on the main thread in NSCACHESERVICE_SETDISKCACHECAPACITY"
+ },
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHESERVICE_OPENCACHEENTRY": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent waiting on the cache service lock (ms) on the main thread in NSCACHESERVICE_OPENCACHEENTRY"
+ },
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHESERVICE_ONPROFILESHUTDOWN": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent waiting on the cache service lock (ms) on the main thread in NSCACHESERVICE_ONPROFILESHUTDOWN"
+ },
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHESERVICE_ONPROFILECHANGED": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent waiting on the cache service lock (ms) on the main thread in NSCACHESERVICE_ONPROFILECHANGED"
+ },
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHESERVICE_ISSTORAGEENABLEDFORPOLICY": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent waiting on the cache service lock (ms) on the main thread in NSCACHESERVICE_ISSTORAGEENABLEDFORPOLICY"
+ },
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHESERVICE_GETCACHEIOTARGET": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent waiting on the cache service lock (ms) on the main thread in NSCACHESERVICE_GETCACHEIOTARGET"
+ },
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHESERVICE_EVICTENTRIESFORCLIENT": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent waiting on the cache service lock (ms) on the main thread in NSCACHESERVICE_EVICTENTRIESFORCLIENT"
+ },
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHESERVICE_DISKDEVICEHEAPSIZE": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent waiting on the cache service lock (ms) on the main thread in NSCACHESERVICE_DISKDEVICEHEAPSIZE"
+ },
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHESERVICE_CLOSEALLSTREAMS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent waiting on the cache service lock (ms) on the main thread in NSCACHESERVICE_CLOSEALLSTREAMS"
+ },
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_DOOM": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent waiting on the cache service lock (ms) on the main thread in NSCACHEENTRYDESCRIPTOR_DOOM"
+ },
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_SETPREDICTEDDATASIZE": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent waiting on the cache service lock (ms) on the main thread in NSCACHEENTRYDESCRIPTOR_SETPREDICTEDDATASIZE"
+ },
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_GETDATASIZE": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent waiting on the cache service lock (ms) on the main thread in NSCACHEENTRYDESCRIPTOR_GETDATASIZE"
+ },
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_GETSTORAGEDATASIZE": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent waiting on the cache service lock (ms) on the main thread in NSCACHEENTRYDESCRIPTOR_GETSTORAGEDATASIZE"
+ },
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_REQUESTDATASIZECHANGE": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent waiting on the cache service lock (ms) on the main thread in NSCACHEENTRYDESCRIPTOR_REQUESTDATASIZECHANGE"
+ },
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_SETDATASIZE": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent waiting on the cache service lock (ms) on the main thread in NSCACHEENTRYDESCRIPTOR_SETDATASIZE"
+ },
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_OPENINPUTSTREAM": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent waiting on the cache service lock (ms) on the main thread in NSCACHEENTRYDESCRIPTOR_OPENINPUTSTREAM"
+ },
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_OPENOUTPUTSTREAM": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent waiting on the cache service lock (ms) on the main thread in NSCACHEENTRYDESCRIPTOR_OPENOUTPUTSTREAM"
+ },
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_GETCACHEELEMENT": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent waiting on the cache service lock (ms) on the main thread in NSCACHEENTRYDESCRIPTOR_GETCACHEELEMENT"
+ },
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_SETCACHEELEMENT": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent waiting on the cache service lock (ms) on the main thread in NSCACHEENTRYDESCRIPTOR_SETCACHEELEMENT"
+ },
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_GETSTORAGEPOLICY": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent waiting on the cache service lock (ms) on the main thread in NSCACHEENTRYDESCRIPTOR_GETSTORAGEPOLICY"
+ },
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_SETSTORAGEPOLICY": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent waiting on the cache service lock (ms) on the main thread in NSCACHEENTRYDESCRIPTOR_SETSTORAGEPOLICY"
+ },
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_GETFILE": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent waiting on the cache service lock (ms) on the main thread in NSCACHEENTRYDESCRIPTOR_GETFILE"
+ },
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_GETSECURITYINFO": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent waiting on the cache service lock (ms) on the main thread in NSCACHEENTRYDESCRIPTOR_GETSECURITYINFO"
+ },
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_SETSECURITYINFO": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent waiting on the cache service lock (ms) on the main thread in NSCACHEENTRYDESCRIPTOR_SETSECURITYINFO"
+ },
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_DOOMANDFAILPENDINGREQUESTS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent waiting on the cache service lock (ms) on the main thread in NSCACHEENTRYDESCRIPTOR_DOOMANDFAILPENDINGREQUESTS"
+ },
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_MARKVALID": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent waiting on the cache service lock (ms) on the main thread in NSCACHEENTRYDESCRIPTOR_MARKVALID"
+ },
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_CLOSE": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent waiting on the cache service lock (ms) on the main thread in NSCACHEENTRYDESCRIPTOR_CLOSE"
+ },
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_GETMETADATAELEMENT": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent waiting on the cache service lock (ms) on the main thread in NSCACHEENTRYDESCRIPTOR_GETMETADATAELEMENT"
+ },
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_SETMETADATAELEMENT": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent waiting on the cache service lock (ms) on the main thread in NSCACHEENTRYDESCRIPTOR_SETMETADATAELEMENT"
+ },
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_VISITMETADATA": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent waiting on the cache service lock (ms) on the main thread in NSCACHEENTRYDESCRIPTOR_VISITMETADATA"
+ },
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_SETEXPIRATIONTIME": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent waiting on the cache service lock (ms) on the main thread in NSCACHEENTRYDESCRIPTOR_SETEXPIRATIONTIME"
+ },
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_ISSTREAMBASED": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent waiting on the cache service lock (ms) on the main thread in NSCACHEENTRYDESCRIPTOR_ISSTREAMBASED"
+ },
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_GETLASTMODIFIED": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent waiting on the cache service lock (ms) on the main thread in NSCACHEENTRYDESCRIPTOR_GETLASTMODIFIED"
+ },
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_GETEXPIRATIONTIME": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent waiting on the cache service lock (ms) on the main thread in NSCACHEENTRYDESCRIPTOR_GETEXPIRATIONTIME"
+ },
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_GETKEY": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent waiting on the cache service lock (ms) on the main thread in NSCACHEENTRYDESCRIPTOR_GETKEY"
+ },
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_GETFETCHCOUNT": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent waiting on the cache service lock (ms) on the main thread in NSCACHEENTRYDESCRIPTOR_GETFETCHCOUNT"
+ },
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_GETDEVICEID": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent waiting on the cache service lock (ms) on the main thread in NSCACHEENTRYDESCRIPTOR_GETDEVICEID"
+ },
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHESERVICE_PROCESSREQUEST": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent waiting on the cache service lock (ms) on the main thread in NSCACHESERVICE_PROCESSREQUEST"
+ },
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHESERVICE_VISITENTRIES": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent waiting on the cache service lock (ms) on the main thread in NSCACHESERVICE_VISITENTRIES"
+ },
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_GETPREDICTEDDATASIZE": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent waiting on the cache service lock (ms) on the main thread in NSCACHEENTRYDESCRIPTOR_GETPREDICTEDDATASIZE"
+ },
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_GETLASTFETCHED": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent waiting on the cache service lock (ms) on the main thread in NSCACHEENTRYDESCRIPTOR_GETLASTFETCHED"
+ },
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_GETCLIENTID": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent waiting on the cache service lock (ms) on the main thread in NSCACHEENTRYDESCRIPTOR_GETCLIENTID"
+ },
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSBLOCKONCACHETHREADEVENT_RUN": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent waiting on the cache service lock (ms) on the main thread in NSBLOCKONCACHETHREADEVENT_RUN"
+ },
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSASYNCDOOMEVENT_RUN": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent waiting on the cache service lock (ms) on the main thread in NSASYNCDOOMEVENT_RUN"
+ },
+ "DNT_USAGE": {
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 3,
+ "description": "I want to be tracked, I do NOT want to be tracked, DNT unset"
+ },
+ "DNS_LOOKUP_METHOD2": {
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 16,
+ "description": "DNS Lookup Type (hit, renewal, negative-hit, literal, overflow, network-first, network-shared)"
+ },
+ "DNS_CLEANUP_AGE": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 1440,
+ "n_buckets": 50,
+ "description": "DNS Cache Entry Age at Removal Time (minutes)"
+ },
+ "DNS_LOOKUP_TIME": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 60000,
+ "n_buckets": 50,
+ "description": "Time for a successful DNS OS resolution (msec)"
+ },
+ "DNS_RENEWAL_TIME": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 60000,
+ "n_buckets": 50,
+ "description": "Time for a renewed DNS OS resolution (msec)"
+ },
+ "DNS_RENEWAL_TIME_FOR_TTL": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 60000,
+ "n_buckets": 50,
+ "description": "Time for a DNS OS resolution (msec) used to get TTL"
+ },
+ "DNS_FAILED_LOOKUP_TIME": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 60000,
+ "n_buckets": 50,
+ "description": "Time for an unsuccessful DNS OS resolution (msec)"
+ },
+ "DNS_BLACKLIST_COUNT": {
+ "expires_in_version": "never",
+ "kind": "linear",
+ "high": 21,
+ "n_buckets": 20,
+ "description": "The number of unusable addresses reported for each record"
+ },
+ "REFRESH_DRIVER_TICK" : {
+ "expires_in_version": "never",
+ "description": "Total time spent ticking the refresh driver in milliseconds",
+ "kind": "exponential",
+ "high": 1000,
+ "n_buckets": 50
+ },
+ "PAINT_BUILD_DISPLAYLIST_TIME" : {
+ "expires_in_version": "never",
+ "description": "Time spent in building displaylists in milliseconds",
+ "kind": "exponential",
+ "high": 1000,
+ "n_buckets": 50
+ },
+ "PAINT_RASTERIZE_TIME" : {
+ "expires_in_version": "never",
+ "description": "Time spent rasterizing each frame in milliseconds",
+ "kind": "exponential",
+ "high": 1000,
+ "n_buckets": 50
+ },
+ "PREDICTOR_PREDICT_ATTEMPTS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 1000000,
+ "n_buckets": 50,
+ "description": "Number of times nsINetworkPredictor::Predict is called and attempts to predict"
+ },
+ "PREDICTOR_LEARN_ATTEMPTS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 1000000,
+ "n_buckets": 50,
+ "description": "Number of times nsINetworkPredictor::Learn is called and attempts to learn"
+ },
+ "PREDICTOR_PREDICT_FULL_QUEUE": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 60000,
+ "n_buckets": 50,
+ "description": "Number of times nsINetworkPredictor::Predict doesn't continue because the queue is full"
+ },
+ "PREDICTOR_LEARN_FULL_QUEUE": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 60000,
+ "n_buckets": 50,
+ "description": "Number of times nsINetworkPredictor::Learn doesn't continue because the queue is full"
+ },
+ "PREDICTOR_WAIT_TIME": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 3000,
+ "n_buckets": 10,
+ "description": "Amount of time a predictor event waits in the queue (ms)"
+ },
+ "PREDICTOR_PREDICT_WORK_TIME": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 3000,
+ "n_buckets": 10,
+ "description": "Amount of time spent doing the work for predict (ms)"
+ },
+ "PREDICTOR_LEARN_WORK_TIME": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 3000,
+ "n_buckets": 10,
+ "description": "Amount of time spent doing the work for learn (ms)"
+ },
+ "PREDICTOR_TOTAL_PREDICTIONS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 1000000,
+ "n_buckets": 50,
+ "description": "How many actual predictions (preresolves, preconnects, ...) happen"
+ },
+ "PREDICTOR_TOTAL_PREFETCHES": {
+ "expires_in_version": "never",
+ "alert_emails": [],
+ "bug_numbers": [1016628],
+ "kind": "exponential",
+ "high": 1000000,
+ "n_buckets": 50,
+ "description": "How many actual prefetches happen"
+ },
+ "PREDICTOR_TOTAL_PREFETCHES_USED": {
+ "expires_in_version": "never",
+ "alert_emails": [],
+ "bug_numbers": [1016628],
+ "kind": "exponential",
+ "high": 1000000,
+ "n_buckets": 50,
+ "description": "How many prefetches are actually used by a channel"
+ },
+ "PREDICTOR_PREFETCH_TIME": {
+ "expires_in_version": "never",
+ "alert_emails": [],
+ "bug_numbers": [1016628],
+ "kind": "exponential",
+ "high": 3000,
+ "n_buckets": 10,
+ "description": "How long it takes from OnStartRequest to OnStopRequest for a prefetch"
+ },
+ "PREDICTOR_TOTAL_PRECONNECTS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 1000000,
+ "n_buckets": 50,
+ "description": "How many actual preconnects happen"
+ },
+ "PREDICTOR_TOTAL_PRECONNECTS_CREATED": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 1000000,
+ "n_buckets": 50,
+ "description": "How many preconnects actually created a speculative socket"
+ },
+ "PREDICTOR_TOTAL_PRECONNECTS_USED": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 1000000,
+ "n_buckets": 50,
+ "description": "How many preconnects actually created a used speculative socket"
+ },
+ "PREDICTOR_TOTAL_PRECONNECTS_UNUSED": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 1000000,
+ "n_buckets": 50,
+ "description": "How many preconnects needlessly created a speculative socket"
+ },
+ "PREDICTOR_TOTAL_PRERESOLVES": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 1000000,
+ "n_buckets": 50,
+ "description": "How many actual preresolves happen"
+ },
+ "PREDICTOR_PREDICTIONS_CALCULATED": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 1000000,
+ "n_buckets": 50,
+ "description": "How many prediction calculations are performed"
+ },
+ "PREDICTOR_GLOBAL_DEGRADATION": {
+ "expires_in_version": "never",
+ "kind": "linear",
+ "high": 100,
+ "n_buckets": 50,
+ "description": "The global degradation calculated"
+ },
+ "PREDICTOR_SUBRESOURCE_DEGRADATION": {
+ "expires_in_version": "never",
+ "kind": "linear",
+ "high": 100,
+ "n_buckets": 50,
+ "description": "The degradation calculated for a subresource"
+ },
+ "PREDICTOR_BASE_CONFIDENCE": {
+ "expires_in_version": "never",
+ "kind": "linear",
+ "high": 100,
+ "n_buckets": 50,
+ "description": "The base confidence calculated for a subresource"
+ },
+ "PREDICTOR_CONFIDENCE": {
+ "expires_in_version": "never",
+ "kind": "linear",
+ "high": 100,
+ "n_buckets": 50,
+ "description": "The final confidence calculated for a subresource"
+ },
+ "PREDICTOR_PREDICT_TIME_TO_ACTION": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 3000,
+ "n_buckets": 10,
+ "description": "How long it takes from the time Predict() is called to the time we take action"
+ },
+ "PREDICTOR_PREDICT_TIME_TO_INACTION": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 3000,
+ "n_buckets": 10,
+ "description": "How long it takes from the time Predict() is called to the time we figure out there's nothing to do"
+ },
+ "HTTPCONNMGR_TOTAL_SPECULATIVE_CONN": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 1000000,
+ "n_buckets": 50,
+ "description": "How many speculative http connections are created"
+ },
+ "HTTPCONNMGR_USED_SPECULATIVE_CONN": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 1000000,
+ "n_buckets": 50,
+ "description": "How many speculative http connections are actually used"
+ },
+ "HTTPCONNMGR_UNUSED_SPECULATIVE_CONN": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 1000000,
+ "n_buckets": 50,
+ "description": "How many speculative connections are made needlessly"
+ },
+ "TAP_TO_LOAD_IMAGE_SIZE": {
+ "expires_in_version": "50",
+ "kind": "exponential",
+ "high": 32768,
+ "n_buckets": 50,
+ "description": "The size of the image being shown, when using tap-to-load images. (kilobytes)",
+ "bug_numbers": [1208167]
+ },
+ "STS_POLL_AND_EVENTS_CYCLE": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 60000,
+ "n_buckets": 1000,
+ "description": "The duraion of a socketThread cycle, including polls and pending events. (ms)"
+ },
+ "STS_NUMBER_OF_PENDING_EVENTS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 2000,
+ "n_buckets": 100,
+ "description": "Number of pending events per SocketThread cycle."
+ },
+ "STS_POLL_CYCLE": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 60000,
+ "n_buckets": 1000,
+ "description": "The duration of poll. (ms)"
+ },
+ "STS_POLL_AND_EVENT_THE_LAST_CYCLE": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 60000,
+ "n_buckets": 1000,
+ "description": "The duraion of the socketThread cycle during shutdown, including polls and pending events. (ms)"
+ },
+ "STS_NUMBER_OF_PENDING_EVENTS_IN_THE_LAST_CYCLE": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 2000,
+ "n_buckets": 100,
+ "description": "Number of pending events per SocketThread cycle during shutdown."
+ },
+ "STS_NUMBER_OF_ONSOCKETREADY_CALLS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 2000,
+ "n_buckets": 100,
+ "description": "Number of OnSocketReady calls during a single poll."
+ },
+ "STS_POLL_BLOCK_TIME": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 60000,
+ "n_buckets": 1000,
+ "description": "Time spent blocked on poll (ms)."
+ },
+ "PRCONNECT_BLOCKING_TIME_NORMAL": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 60000,
+ "n_buckets": 1000,
+ "description": "Time spent blocked in PR_Connect when we are not shutting down and there has been niether a network nor an offline state change in the last 60s (ms)."
+ },
+ "PRCONNECT_BLOCKING_TIME_SHUTDOWN": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 60000,
+ "n_buckets": 1000,
+ "description": "Time spent blocked in PR_Connect during a shutdown (ms)."
+ },
+ "PRCONNECT_BLOCKING_TIME_CONNECTIVITY_CHANGE": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 60000,
+ "n_buckets": 1000,
+ "description": "Time spent blocked in PR_Connect when there has been the connectiviy change in the last 60s (ms)."
+ },
+ "PRCONNECT_BLOCKING_TIME_LINK_CHANGE": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 60000,
+ "n_buckets": 1000,
+ "description": "Time spent blocked in PR_Connect when there has been a link change in the last 60s (ms)."
+ },
+ "PRCONNECT_BLOCKING_TIME_OFFLINE": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 60000,
+ "n_buckets": 1000,
+ "description": "Time spent blocked in PR_Connect when the offline state has changed in the last 60s (ms)."
+ },
+ "PRCONNECT_FAIL_BLOCKING_TIME_NORMAL": {
+ "bug_numbers": [1257809],
+ "alert_emails": ["ddamjanovic@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 60000,
+ "n_buckets": 100,
+ "description": "Time spent blocked in a failed PR_Connect when we are not shutting down and there has been niether a network nor an offline state change in the last 60s (ms)."
+ },
+ "PRCONNECT_FAIL_BLOCKING_TIME_SHUTDOWN": {
+ "bug_numbers": [1257809],
+ "alert_emails": ["ddamjanovic@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 60000,
+ "n_buckets": 100,
+ "description": "Time spent blocked in a failed PR_Connect during a shutdown (ms)."
+ },
+ "PRCONNECT_FAIL_BLOCKING_TIME_CONNECTIVITY_CHANGE": {
+ "bug_numbers": [1257809],
+ "alert_emails": ["ddamjanovic@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 60000,
+ "n_buckets": 100,
+ "description": "Time spent blocked in a failed PR_Connect when there has been the connectiviy change in the last 60s (ms)."
+ },
+ "PRCONNECT_FAIL_BLOCKING_TIME_LINK_CHANGE": {
+ "bug_numbers": [1257809],
+ "alert_emails": ["ddamjanovic@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 60000,
+ "n_buckets": 100,
+ "description": "Time spent blocked in a failed PR_Connect when there has been a link change in the last 60s (ms)."
+ },
+ "PRCONNECT_FAIL_BLOCKING_TIME_OFFLINE": {
+ "bug_numbers": [1257809],
+ "alert_emails": ["ddamjanovic@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 60000,
+ "n_buckets": 100,
+ "description": "Time spent blocked in a failed PR_Connect when the offline state has changed in the last 60s (ms)."
+ },
+ "PRCONNECTCONTINUE_BLOCKING_TIME_NORMAL": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 60000,
+ "n_buckets": 1000,
+ "description": "Time spent blocked in PR_ConnectContinue when we are not shutting down and there has been niether a network nor an offline state change in the last 60s (ms)."
+ },
+ "PRCONNECTCONTINUE_BLOCKING_TIME_SHUTDOWN": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 60000,
+ "n_buckets": 1000,
+ "description": "Time spent blocked in PR_ConnectContinue during a shutdown (ms)."
+ },
+ "PRCONNECTCONTINUE_BLOCKING_TIME_CONNECTIVITY_CHANGE": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 60000,
+ "n_buckets": 1000,
+ "description": "Time spent blocked in PR_ConnectContinue when there has been the connectivity change in the last 60s (ms)."
+ },
+ "PRCONNECTCONTINUE_BLOCKING_TIME_LINK_CHANGE": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 60000,
+ "n_buckets": 1000,
+ "description": "Time spent blocked in PR_ConnectContinue when there has been a link change in the last 60s (ms)."
+ },
+ "PRCONNECTCONTINUE_BLOCKING_TIME_OFFLINE": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 60000,
+ "n_buckets": 1000,
+ "description": "Time spent blocked in PR_ConnectContinue when the offline state has changed in the last 60s (ms)."
+ },
+ "PRCLOSE_TCP_BLOCKING_TIME_NORMAL": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 60000,
+ "n_buckets": 1000,
+ "description": "Time spent blocked in PR_Close when we are not shutting down and there has been niether a network nor an offline state change in the last 60s (ms)."
+ },
+ "PRCLOSE_TCP_BLOCKING_TIME_SHUTDOWN": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 60000,
+ "n_buckets": 1000,
+ "description": "Time spent blocked in PR_Close during a shutdown (ms)."
+ },
+ "PRCLOSE_TCP_BLOCKING_TIME_CONNECTIVITY_CHANGE": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 60000,
+ "n_buckets": 1000,
+ "description": "Time spent blocked in PR_Close when there has been the connectivity change in the last 60s (ms)."
+ },
+ "PRCLOSE_TCP_BLOCKING_TIME_LINK_CHANGE": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 60000,
+ "n_buckets": 1000,
+ "description": "Time spent blocked in PR_Close when there has been a link change in the last 60s (ms)."
+ },
+ "PRCLOSE_TCP_BLOCKING_TIME_OFFLINE": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 60000,
+ "n_buckets": 1000,
+ "description": "Time spent blocked in PR_Close when the offline state has changed in the last 60s (ms)."
+ },
+ "PRCLOSE_UDP_BLOCKING_TIME_NORMAL": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 60000,
+ "n_buckets": 1000,
+ "description": "Time spent blocked in PR_Close when we are not shutting down and there has been niether a network nor an offline state change in the last 60s (ms)."
+ },
+ "PRCLOSE_UDP_BLOCKING_TIME_SHUTDOWN": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 60000,
+ "n_buckets": 1000,
+ "description": "Time spent blocked in PR_Close during a shutdown (ms)."
+ },
+ "PRCLOSE_UDP_BLOCKING_TIME_CONNECTIVITY_CHANGE": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 60000,
+ "n_buckets": 1000,
+ "description": "Time spent blocked in PR_Close when there has been the connectivity change in the last 60s (ms)."
+ },
+ "PRCLOSE_UDP_BLOCKING_TIME_LINK_CHANGE": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 60000,
+ "n_buckets": 1000,
+ "description": "Time spent blocked in PR_Close when there has been a link change in the last 60s (ms)."
+ },
+ "PRCLOSE_UDP_BLOCKING_TIME_OFFLINE": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 60000,
+ "n_buckets": 1000,
+ "description": "Time spent blocked in PR_Close when the offline state has changed in the last 60s (ms)."
+ },
+ "IPV4_AND_IPV6_ADDRESS_CONNECTIVITY": {
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 4,
+ "description": "Count the number of 0) successful connections to an ipv4 address, 1) failed connection an ipv4 address, 2) successful connection to an ipv6 address and 3) failed connections to an ipv6 address."
+ },
+ "NETWORK_SESSION_AT_900FD": {
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "session reached 900 fd limit sockets",
+ "bug_numbers": [1260218],
+ "alert_emails": ["necko@mozilla.com"]
+ },
+ "NETWORK_PROBE_MAXCOUNT": {
+ "expires_in_version": "never",
+ "kind": "linear",
+ "low": 50,
+ "high": 1000,
+ "n_buckets": 10,
+ "description": "Result of nsSocketTransportService::ProbeMaxCount()",
+ "bug_numbers": [1260218],
+ "alert_emails": ["necko@mozilla.com"]
+ },
+ "FIND_PLUGINS": {
+ "alert_emails": ["perf-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 3000,
+ "n_buckets": 10,
+ "description": "Time spent scanning filesystem for plugins (ms)"
+ },
+ "CHECK_JAVA_ENABLED": {
+ "expires_in_version": "default",
+ "kind": "exponential",
+ "high": 3000,
+ "n_buckets": 10,
+ "description": "Time spent checking if Java is enabled (ms)"
+ },
+ "PLUGIN_HANG_UI_USER_RESPONSE": {
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 3,
+ "description": "User response to Plugin Hang UI"
+ },
+ "PLUGIN_HANG_UI_DONT_ASK": {
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "Whether the user has requested not to see the Plugin Hang UI again"
+ },
+ "PLUGIN_HANG_UI_RESPONSE_TIME": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 60000,
+ "n_buckets": 20,
+ "description": "Time spent in Plugin Hang UI (ms)"
+ },
+ "PLUGIN_HANG_TIME": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 60000,
+ "n_buckets": 20,
+ "description": "Value of dom.ipc.plugins.hangUITimeoutSecs plus time spent in Plugin Hang UI (ms)"
+ },
+ "PLUGIN_LOAD_METADATA": {
+ "alert_emails": ["perf-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 5000,
+ "n_buckets": 20,
+ "description": "Time spent loading plugin DLL and obtaining metadata (ms)"
+ },
+ "PLUGIN_SHUTDOWN_MS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 5000,
+ "n_buckets": 20,
+ "description": "Time spent shutting down plugins (ms)"
+ },
+ "PLUGIN_CALLED_DIRECTLY": {
+ "expires_in_version": "never",
+ "kind": "flag",
+ "description": "A plugin object was successfully invoked as a function"
+ },
+ "FLASH_PLUGIN_STATES": {
+ "expires_in_version": "50",
+ "kind": "enumerated",
+ "n_values": 20,
+ "description": "A flash object's initialization state"
+ },
+ "FLASH_PLUGIN_AREA": {
+ "expires_in_version": "50",
+ "kind": "exponential",
+ "low": 256,
+ "high": 16777216,
+ "n_buckets": 50,
+ "description": "Flash object area (width * height)"
+ },
+ "FLASH_PLUGIN_WIDTH": {
+ "expires_in_version": "50",
+ "kind": "linear",
+ "low": 1,
+ "high": 2000,
+ "n_buckets": 50,
+ "description": "Flash object width"
+ },
+ "FLASH_PLUGIN_HEIGHT": {
+ "expires_in_version": "50",
+ "kind": "linear",
+ "low": 1,
+ "high": 2000,
+ "n_buckets": 50,
+ "description": "Flash object height"
+ },
+ "FLASH_PLUGIN_INSTANCES_ON_PAGE": {
+ "expires_in_version": "50",
+ "kind": "enumerated",
+ "n_values": 30,
+ "description": "Flash object instances count on page"
+ },
+ "MOZ_SQLITE_OPEN_MS": {
+ "expires_in_version": "default",
+ "kind": "exponential",
+ "high": 3000,
+ "n_buckets": 10,
+ "description": "Time spent on SQLite open() (ms)"
+ },
+ "MOZ_SQLITE_OPEN_MAIN_THREAD_MS": {
+ "expires_in_version": "40",
+ "kind": "exponential",
+ "high": 3000,
+ "n_buckets": 10,
+ "description": "Time spent on SQLite open() (ms) *** No longer needed (bug 1156565). Delete histogram and accumulation code! ***"
+ },
+ "MOZ_SQLITE_TRUNCATE_MS": {
+ "expires_in_version": "40",
+ "kind": "exponential",
+ "high": 3000,
+ "n_buckets": 10,
+ "description": "Time spent on SQLite truncate() (ms) *** No longer needed (bug 1156565). Delete histogram and accumulation code! ***"
+ },
+ "MOZ_SQLITE_TRUNCATE_MAIN_THREAD_MS": {
+ "expires_in_version": "40",
+ "kind": "exponential",
+ "high": 3000,
+ "n_buckets": 10,
+ "description": "Time spent on SQLite truncate() (ms) *** No longer needed (bug 1156565). Delete histogram and accumulation code! ***"
+ },
+ "MOZ_SQLITE_OTHER_READ_MS": {
+ "expires_in_version": "40",
+ "kind": "exponential",
+ "high": 3000,
+ "n_buckets": 10,
+ "description": "Time spent on SQLite read() (ms) *** No longer needed (bug 1156565). Delete histogram and accumulation code! ***"
+ },
+ "MOZ_SQLITE_OTHER_READ_MAIN_THREAD_MS": {
+ "expires_in_version": "40",
+ "kind": "exponential",
+ "high": 3000,
+ "n_buckets": 10,
+ "description": "Time spent on SQLite read() (ms) *** No longer needed (bug 1156565). Delete histogram and accumulation code! ***"
+ },
+ "MOZ_SQLITE_PLACES_READ_MS": {
+ "expires_in_version": "40",
+ "kind": "exponential",
+ "high": 3000,
+ "n_buckets": 10,
+ "description": "Time spent on SQLite read() (ms) *** No longer needed (bug 1156565). Delete histogram and accumulation code! ***"
+ },
+ "MOZ_SQLITE_PLACES_READ_MAIN_THREAD_MS": {
+ "expires_in_version": "40",
+ "kind": "exponential",
+ "high": 3000,
+ "n_buckets": 10,
+ "description": "Time spent on SQLite read() (ms) *** No longer needed (bug 1156565). Delete histogram and accumulation code! ***"
+ },
+ "MOZ_SQLITE_COOKIES_OPEN_READAHEAD_MS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 3000,
+ "n_buckets": 10,
+ "description": "Time spent on cookie DB open with readahead (ms)"
+ },
+ "MOZ_SQLITE_COOKIES_READ_MS": {
+ "expires_in_version": "40",
+ "kind": "exponential",
+ "high": 3000,
+ "n_buckets": 10,
+ "description": "Time spent on SQLite read() (ms) *** No longer needed (bug 1156565). Delete histogram and accumulation code! ***"
+ },
+ "MOZ_SQLITE_COOKIES_READ_MAIN_THREAD_MS": {
+ "expires_in_version": "40",
+ "kind": "exponential",
+ "high": 3000,
+ "n_buckets": 10,
+ "description": "Time spent on SQLite read() (ms) *** No longer needed (bug 1156565). Delete histogram and accumulation code! ***"
+ },
+ "MOZ_SQLITE_WEBAPPS_READ_MS": {
+ "expires_in_version": "40",
+ "kind": "exponential",
+ "high": 3000,
+ "n_buckets": 10,
+ "description": "Time spent on SQLite read() (ms) *** No longer needed (bug 1156565). Delete histogram and accumulation code! ***"
+ },
+ "MOZ_SQLITE_WEBAPPS_READ_MAIN_THREAD_MS": {
+ "expires_in_version": "40",
+ "kind": "exponential",
+ "high": 3000,
+ "n_buckets": 10,
+ "description": "Time spent on SQLite read() (ms) *** No longer needed (bug 1156565). Delete histogram and accumulation code! ***"
+ },
+ "MOZ_SQLITE_OTHER_WRITE_MS": {
+ "expires_in_version": "40",
+ "kind": "exponential",
+ "high": 3000,
+ "n_buckets": 10,
+ "description": "Time spent on SQLite write() (ms) *** No longer needed (bug 1156565). Delete histogram and accumulation code! ***"
+ },
+ "MOZ_SQLITE_OTHER_WRITE_MAIN_THREAD_MS": {
+ "expires_in_version": "40",
+ "kind": "exponential",
+ "high": 3000,
+ "n_buckets": 10,
+ "description": "Time spent on SQLite write() (ms) *** No longer needed (bug 1156565). Delete histogram and accumulation code! ***"
+ },
+ "MOZ_SQLITE_PLACES_WRITE_MS": {
+ "expires_in_version": "default",
+ "kind": "exponential",
+ "high": 3000,
+ "n_buckets": 10,
+ "description": "Time spent on SQLite write() (ms)"
+ },
+ "MOZ_SQLITE_PLACES_WRITE_MAIN_THREAD_MS": {
+ "expires_in_version": "40",
+ "kind": "exponential",
+ "high": 3000,
+ "n_buckets": 10,
+ "description": "Time spent on SQLite write() (ms) *** No longer needed (bug 1156565). Delete histogram and accumulation code! ***"
+ },
+ "MOZ_SQLITE_COOKIES_WRITE_MS": {
+ "expires_in_version": "40",
+ "kind": "exponential",
+ "high": 3000,
+ "n_buckets": 10,
+ "description": "Time spent on SQLite write() (ms) *** No longer needed (bug 1156565). Delete histogram and accumulation code! ***"
+ },
+ "MOZ_SQLITE_COOKIES_WRITE_MAIN_THREAD_MS": {
+ "expires_in_version": "40",
+ "kind": "exponential",
+ "high": 3000,
+ "n_buckets": 10,
+ "description": "Time spent on SQLite write() (ms) *** No longer needed (bug 1156565). Delete histogram and accumulation code! ***"
+ },
+ "MOZ_SQLITE_WEBAPPS_WRITE_MS": {
+ "expires_in_version": "40",
+ "kind": "exponential",
+ "high": 3000,
+ "n_buckets": 10,
+ "description": "Time spent on SQLite write() (ms) *** No longer needed (bug 1156565). Delete histogram and accumulation code! ***"
+ },
+ "MOZ_SQLITE_WEBAPPS_WRITE_MAIN_THREAD_MS": {
+ "expires_in_version": "40",
+ "kind": "exponential",
+ "high": 3000,
+ "n_buckets": 10,
+ "description": "Time spent on SQLite write() (ms) *** No longer needed (bug 1156565). Delete histogram and accumulation code! ***"
+ },
+ "MOZ_SQLITE_OTHER_SYNC_MS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 3000,
+ "n_buckets": 10,
+ "description": "Time spent on SQLite fsync() (ms)"
+ },
+ "MOZ_SQLITE_OTHER_SYNC_MAIN_THREAD_MS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 3000,
+ "n_buckets": 10,
+ "description": "Time spent on SQLite fsync() (ms)"
+ },
+ "MOZ_SQLITE_PLACES_SYNC_MS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 3000,
+ "n_buckets": 10,
+ "description": "Time spent on SQLite fsync() (ms)"
+ },
+ "MOZ_SQLITE_PLACES_SYNC_MAIN_THREAD_MS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 3000,
+ "n_buckets": 10,
+ "description": "Time spent on SQLite fsync() (ms)"
+ },
+ "MOZ_SQLITE_COOKIES_SYNC_MS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 3000,
+ "n_buckets": 10,
+ "description": "Time spent on SQLite fsync() (ms)"
+ },
+ "MOZ_SQLITE_COOKIES_SYNC_MAIN_THREAD_MS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 3000,
+ "n_buckets": 10,
+ "description": "Time spent on SQLite fsync() (ms)"
+ },
+ "MOZ_SQLITE_WEBAPPS_SYNC_MS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 3000,
+ "n_buckets": 10,
+ "description": "Time spent on SQLite fsync() (ms)"
+ },
+ "MOZ_SQLITE_WEBAPPS_SYNC_MAIN_THREAD_MS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 3000,
+ "n_buckets": 10,
+ "description": "Time spent on SQLite fsync() (ms)"
+ },
+ "MOZ_SQLITE_OTHER_READ_B": {
+ "expires_in_version": "default",
+ "kind": "linear",
+ "high": 32768,
+ "n_buckets": 3,
+ "description": "SQLite read() (bytes)"
+ },
+ "MOZ_SQLITE_PLACES_READ_B": {
+ "expires_in_version": "40",
+ "kind": "linear",
+ "high": 32768,
+ "n_buckets": 3,
+ "description": "SQLite read() (bytes) *** No longer needed (bug 1156565). Delete histogram and accumulation code! ***"
+ },
+ "MOZ_SQLITE_COOKIES_READ_B": {
+ "expires_in_version": "40",
+ "kind": "linear",
+ "high": 32768,
+ "n_buckets": 3,
+ "description": "SQLite read() (bytes) *** No longer needed (bug 1156565). Delete histogram and accumulation code! ***"
+ },
+ "MOZ_SQLITE_WEBAPPS_READ_B": {
+ "expires_in_version": "40",
+ "kind": "linear",
+ "high": 32768,
+ "n_buckets": 3,
+ "description": "SQLite read() (bytes) *** No longer needed (bug 1156565). Delete histogram and accumulation code! ***"
+ },
+ "MOZ_SQLITE_PLACES_WRITE_B": {
+ "expires_in_version": "40",
+ "kind": "linear",
+ "high": 32768,
+ "n_buckets": 3,
+ "description": "SQLite write (bytes) *** No longer needed (bug 1156565). Delete histogram and accumulation code! ***"
+ },
+ "MOZ_SQLITE_COOKIES_WRITE_B": {
+ "expires_in_version": "40",
+ "kind": "linear",
+ "high": 32768,
+ "n_buckets": 3,
+ "description": "SQLite write (bytes) *** No longer needed (bug 1156565). Delete histogram and accumulation code! ***"
+ },
+ "MOZ_SQLITE_WEBAPPS_WRITE_B": {
+ "expires_in_version": "40",
+ "kind": "linear",
+ "high": 32768,
+ "n_buckets": 3,
+ "description": "SQLite write (bytes) *** No longer needed (bug 1156565). Delete histogram and accumulation code! ***"
+ },
+ "MOZ_SQLITE_OTHER_WRITE_B": {
+ "expires_in_version": "default",
+ "kind": "linear",
+ "high": 32768,
+ "n_buckets": 3,
+ "description": "SQLite write (bytes)"
+ },
+ "MOZ_STORAGE_ASYNC_REQUESTS_MS": {
+ "alert_emails": ["perf-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "40",
+ "kind": "exponential",
+ "high": 32768,
+ "n_buckets": 20,
+ "description": "mozStorage async requests completion (ms) *** No longer needed (bug 1156565). Delete histogram and accumulation code! ***"
+ },
+ "MOZ_STORAGE_ASYNC_REQUESTS_SUCCESS": {
+ "alert_emails": ["perf-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "40",
+ "kind": "boolean",
+ "description": "mozStorage async requests success *** No longer needed (bug 1156565). Delete histogram and accumulation code! ***"
+ },
+ "STARTUP_MEASUREMENT_ERRORS": {
+ "expires_in_version": "default",
+ "kind": "enumerated",
+ "n_values": 16,
+ "description": "Flags errors in startup calculation()"
+ },
+ "NETWORK_DISK_CACHE_OPEN": {
+ "expires_in_version": "default",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 10,
+ "description": "Time spent opening disk cache (ms)"
+ },
+ "NETWORK_DISK_CACHE_TRASHRENAME": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 10,
+ "description": "Time spent renaming bad Cache to Cache.Trash (ms)"
+ },
+ "NETWORK_DISK_CACHE_DELETEDIR": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 10,
+ "description": "Time spent deleting disk cache (ms)"
+ },
+ "NETWORK_DISK_CACHE_DELETEDIR_SHUTDOWN": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 10,
+ "description": "Time spent during showdown stopping thread deleting old disk cache (ms)"
+ },
+ "NETWORK_DISK_CACHE_SHUTDOWN": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 10,
+ "description": "Total Time spent (ms) during disk cache showdown"
+ },
+ "NETWORK_DISK_CACHE_SHUTDOWN_V2": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 10,
+ "description": "Total Time spent (ms) during disk cache showdown [cache2]"
+ },
+ "NETWORK_DISK_CACHE_SHUTDOWN_CLEAR_PRIVATE": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 10,
+ "description": "Time spent (ms) during showdown deleting disk cache for 'clear private data' option"
+ },
+ "NETWORK_DISK_CACHE2_SHUTDOWN_CLEAR_PRIVATE": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 10,
+ "description": "Time spent (ms) during showdown deleting disk cache v2 for 'clear private data' option"
+ },
+ "NETWORK_ID": {
+ "alert_emails": ["necko@mozilla.com"],
+ "bug_numbers": [1240932],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 6,
+ "description": "Network identification (0=None, 1=New, 2=Same)"
+ },
+ "IDLE_NOTIFY_IDLE_MS": {
+ "alert_emails": ["froydnj@mozilla.com"],
+ "bug_numbers": [731004],
+ "expires_in_version": "default",
+ "kind": "exponential",
+ "high": 5000,
+ "n_buckets": 10,
+ "description": "Time spent checking for and notifying listeners that the user is idle (ms)"
+ },
+ "URLCLASSIFIER_LOOKUP_TIME": {
+ "alert_emails": ["safebrowsing-telemetry@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 500,
+ "n_buckets": 10,
+ "description": "Time spent per dbservice lookup (ms)"
+ },
+ "URLCLASSIFIER_SHUTDOWN_TIME": {
+ "alert_emails": ["safebrowsing-telemetry@mozilla.org"],
+ "expires_in_version": "58",
+ "kind": "exponential",
+ "high": 60000,
+ "n_buckets": 50,
+ "bug_numbers": [1315140],
+ "description": "Time spent per dbservice shutdown (ms)"
+ },
+ "URLCLASSIFIER_CL_CHECK_TIME": {
+ "alert_emails": ["safebrowsing-telemetry@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 500,
+ "n_buckets": 10,
+ "description": "Time spent per classifier lookup (ms)"
+ },
+ "URLCLASSIFIER_CL_UPDATE_TIME": {
+ "alert_emails": ["safebrowsing-telemetry@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "low": 20,
+ "high": 15000,
+ "n_buckets": 15,
+ "description": "Time spent per classifier update (ms)"
+ },
+ "URLCLASSIFIER_PS_FILELOAD_TIME": {
+ "alert_emails": ["safebrowsing-telemetry@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 1000,
+ "n_buckets": 10,
+ "description": "Time spent loading PrefixSet from file (ms)"
+ },
+ "URLCLASSIFIER_PS_FALLOCATE_TIME": {
+ "alert_emails": ["safebrowsing-telemetry@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 1000,
+ "n_buckets": 10,
+ "description": "Time spent fallocating PrefixSet (ms)"
+ },
+ "URLCLASSIFIER_PS_CONSTRUCT_TIME": {
+ "alert_emails": ["safebrowsing-telemetry@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 5000,
+ "n_buckets": 15,
+ "description": "Time spent constructing PrefixSet from DB (ms)"
+ },
+ "URLCLASSIFIER_VLPS_FILELOAD_TIME": {
+ "alert_emails": ["safebrowsing-telemetry@mozilla.org"],
+ "expires_in_version": "58",
+ "kind": "exponential",
+ "high": 1000,
+ "n_buckets": 10,
+ "bug_numbers": [1283007],
+ "description": "Time spent loading Variable-Length PrefixSet from file (ms)"
+ },
+ "URLCLASSIFIER_VLPS_FALLOCATE_TIME": {
+ "alert_emails": ["safebrowsing-telemetry@mozilla.org"],
+ "expires_in_version": "58",
+ "kind": "exponential",
+ "high": 1000,
+ "n_buckets": 10,
+ "bug_numbers": [1283007],
+ "description": "Time spent fallocating Variable-Length PrefixSet (ms)"
+ },
+ "URLCLASSIFIER_VLPS_LOAD_CORRUPT": {
+ "alert_emails": ["safebrowsing-telemetry@mozilla.org"],
+ "expires_in_version": "58",
+ "kind": "boolean",
+ "bug_numbers": [1305581],
+ "description": "Whether or not a variable-length prefix set loaded from disk is corrupted (true = file corrupted)."
+ },
+ "URLCLASSIFIER_LC_PREFIXES": {
+ "alert_emails": ["safebrowsing-telemetry@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "linear",
+ "high": 1500000,
+ "n_buckets": 15,
+ "description": "Size of the prefix cache in entries"
+ },
+ "URLCLASSIFIER_LC_COMPLETIONS": {
+ "alert_emails": ["safebrowsing-telemetry@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 200,
+ "n_buckets": 10,
+ "description": "Size of the completion cache in entries"
+ },
+ "URLCLASSIFIER_UPDATE_REMOTE_STATUS": {
+ "alert_emails": ["safebrowsing-telemetry@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 16,
+ "bug_numbers": [1150921],
+ "description": "Server HTTP status code from SafeBrowsing database updates. (0=1xx, 1=200, 2=2xx, 3=204, 4=3xx, 5=400, 6=4xx, 7=403, 8=404, 9=408, 10=413, 11=5xx, 12=502|504|511, 13=503, 14=505, 15=Other)"
+ },
+ "URLCLASSIFIER_COMPLETE_REMOTE_STATUS": {
+ "alert_emails": ["safebrowsing-telemetry@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 16,
+ "bug_numbers": [1150921],
+ "description": "Server HTTP status code from remote SafeBrowsing gethash lookups. (0=1xx, 1=200, 2=2xx, 3=204, 4=3xx, 5=400, 6=4xx, 7=403, 8=404, 9=408, 10=413, 11=5xx, 12=502|504|511, 13=503, 14=505, 15=Other)"
+ },
+ "URLCLASSIFIER_COMPLETE_TIMEOUT": {
+ "alert_emails": ["safebrowsing-telemetry@mozilla.org"],
+ "expires_in_version": "56",
+ "kind": "boolean",
+ "bug_numbers": [1172688],
+ "description": "This metric is recorded every time a gethash lookup is performed, `true` is recorded if the lookup times out."
+ },
+ "URLCLASSIFIER_UPDATE_ERROR_TYPE": {
+ "alert_emails": ["safebrowsing-telemetry@mozilla.org"],
+ "expires_in_version": "58",
+ "kind": "enumerated",
+ "n_values": 10,
+ "bug_numbers": [1305801],
+ "description": "An error was encountered while parsing a partial update returned by a Safe Browsing V4 server (0 = addition of an already existing prefix, 1 = parser got into an infinite loop, 2 = removal index out of bounds, 3 = checksum mismatch, 4 = missing checksum)"
+ },
+ "URLCLASSIFIER_PREFIX_MATCH": {
+ "alert_emails": ["safebrowsing-telemetry@mozilla.org"],
+ "expires_in_version": "58",
+ "kind": "enumerated",
+ "n_values": 4,
+ "bug_numbers": [1298257],
+ "description": "Classifier prefix matching result (0 = no match, 1 = match only V2, 2 = match only V4, 3 = match both V2 and V4)"
+ },
+ "CSP_DOCUMENTS_COUNT": {
+ "alert_emails": ["seceng@mozilla.com"],
+ "bug_numbers": [1252829],
+ "expires_in_version": "55",
+ "kind": "count",
+ "description": "Number of unique pages that contain a CSP"
+ },
+ "CSP_UNSAFE_INLINE_DOCUMENTS_COUNT": {
+ "alert_emails": ["seceng@mozilla.com"],
+ "bug_numbers": [1252829],
+ "expires_in_version": "55",
+ "kind": "count",
+ "description": "Number of unique pages that contain an unsafe-inline CSP directive"
+ },
+ "CSP_UNSAFE_EVAL_DOCUMENTS_COUNT": {
+ "alert_emails": ["seceng@mozilla.com"],
+ "bug_numbers": [1252829],
+ "expires_in_version": "55",
+ "kind": "count",
+ "description": "Number of unique pages that contain an unsafe-eval CSP directive"
+ },
+ "PLACES_DATABASE_CORRUPTION_HANDLING_STAGE": {
+ "alert_emails": ["firefox-dev@mozilla.org"],
+ "bug_numbers": [1356812],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 6,
+ "releaseChannelCollection": "opt-out",
+ "description": "PLACES: stage reached when trying to fix a database corruption , see Places::Database::eCorruptDBReplaceStatus"
+ },
+ "PLACES_PAGES_COUNT": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "low": 1000,
+ "high": 150000,
+ "n_buckets": 20,
+ "releaseChannelCollection": "opt-out",
+ "description": "PLACES: Number of unique pages"
+ },
+ "PLACES_MOST_RECENT_EXPIRED_VISIT_DAYS": {
+ "alert_emails": ["firefox-dev@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "linear",
+ "low": 30,
+ "high": 730,
+ "n_buckets": 12,
+ "description": "PLACES: the most recent expired visit in days"
+ },
+ "PLACES_BOOKMARKS_COUNT": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "low": 100,
+ "high": 8000,
+ "n_buckets": 15,
+ "releaseChannelCollection": "opt-out",
+ "description": "PLACES: Number of bookmarks"
+ },
+ "PLACES_TAGS_COUNT": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 200,
+ "n_buckets": 10,
+ "description": "PLACES: Number of tags"
+ },
+ "PLACES_KEYWORDS_COUNT": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 200,
+ "n_buckets": 10,
+ "description": "PLACES: Number of keywords"
+ },
+ "PLACES_BACKUPS_DAYSFROMLAST": {
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 15,
+ "description": "PLACES: Days from last backup"
+ },
+ "PLACES_BACKUPS_BOOKMARKSTREE_MS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "low": 50,
+ "high": 2000,
+ "n_buckets": 10,
+ "description": "PLACES: Time to build the bookmarks tree"
+ },
+ "PLACES_BACKUPS_TOJSON_MS": {
+ "expires_in_version": "default",
+ "kind": "exponential",
+ "low": 50,
+ "high": 2000,
+ "n_buckets": 10,
+ "description": "PLACES: Time to convert and write the backup"
+ },
+ "PLACES_EXPORT_TOHTML_MS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "low": 50,
+ "high": 2000,
+ "n_buckets": 10,
+ "description": "PLACES: Time to convert and write bookmarks.html"
+ },
+ "PLACES_FAVICON_ICO_SIZES": {
+ "expires_in_version" : "never",
+ "kind": "exponential",
+ "high": 524288,
+ "n_buckets" : 100,
+ "description": "PLACES: Size of the ICO favicon files loaded from the web (Bytes)"
+ },
+ "PLACES_FAVICON_PNG_SIZES": {
+ "expires_in_version" : "never",
+ "kind": "exponential",
+ "high": 524288,
+ "n_buckets" : 100,
+ "description": "PLACES: Size of the PNG favicon files loaded from the web (Bytes)"
+ },
+ "PLACES_FAVICON_GIF_SIZES": {
+ "expires_in_version" : "never",
+ "kind": "exponential",
+ "high": 524288,
+ "n_buckets" : 100,
+ "description": "PLACES: Size of the GIF favicon files loaded from the web (Bytes)"
+ },
+ "PLACES_FAVICON_JPEG_SIZES": {
+ "expires_in_version" : "never",
+ "kind": "exponential",
+ "high": 524288,
+ "n_buckets" : 100,
+ "description": "PLACES: Size of the JPEG favicon files loaded from the web (Bytes)"
+ },
+ "PLACES_FAVICON_BMP_SIZES": {
+ "expires_in_version" : "never",
+ "kind": "exponential",
+ "high": 524288,
+ "n_buckets" : 100,
+ "description": "PLACES: Size of the BMP favicon files loaded from the web (Bytes)"
+ },
+ "PLACES_FAVICON_SVG_SIZES": {
+ "expires_in_version" : "never",
+ "kind": "exponential",
+ "high": 524288,
+ "n_buckets" : 100,
+ "description": "PLACES: Size of the SVG favicon files loaded from the web (Bytes)"
+ },
+ "PLACES_FAVICON_OTHER_SIZES": {
+ "expires_in_version" : "never",
+ "kind": "exponential",
+ "high": 524288,
+ "n_buckets" : 100,
+ "description": "PLACES: Size of favicon files without a specific file type probe, loaded from the web (Bytes)"
+ },
+ "LINK_ICON_SIZES_ATTR_USAGE": {
+ "expires_in_version" : "never",
+ "kind": "enumerated",
+ "n_values": 4,
+ "description": "The possible types of the 'sizes' attribute for <link rel=icon>. 0: Attribute not specified, 1: 'any', 2: Integer dimensions, 3: Invalid value."
+ },
+ "LINK_ICON_SIZES_ATTR_DIMENSION": {
+ "expires_in_version" : "never",
+ "kind": "linear",
+ "high": 513,
+ "n_buckets" : 64,
+ "description": "The width dimension of the 'sizes' attribute for <link rel=icon>."
+ },
+ "FENNEC_DISTRIBUTION_REFERRER_INVALID": {
+ "expires_in_version": "never",
+ "kind": "flag",
+ "description": "Whether the referrer intent specified an invalid distribution name",
+ "cpp_guard": "ANDROID"
+ },
+ "FENNEC_DISTRIBUTION_CODE_CATEGORY": {
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 20,
+ "description": "First digit of HTTP result code, or error category, during distribution download",
+ "cpp_guard": "ANDROID"
+ },
+ "FENNEC_DISTRIBUTION_DOWNLOAD_TIME_MS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "low": 100,
+ "high": 40000,
+ "n_buckets": 30,
+ "description": "Time taken to download a specified distribution file (msec)",
+ "cpp_guard": "ANDROID"
+ },
+ "FENNEC_BOOKMARKS_COUNT": {
+ "expires_in_version": "60",
+ "kind": "exponential",
+ "high": 8000,
+ "n_buckets": 20,
+ "description": "Number of bookmarks stored in the browser DB",
+ "alert_emails": ["mobile-frontend@mozilla.com"],
+ "bug_numbers": [1244704]
+ },
+ "FENNEC_ORBOT_INSTALLED": {
+ "expires_in_version": "60",
+ "kind": "flag",
+ "cpp_guard": "ANDROID",
+ "description": "Whether or not users have Orbot installed",
+ "alert_emails": ["seceng@mozilla.org"],
+ "bug_numbers": [1314784]
+ },
+ "FENNEC_READING_LIST_COUNT": {
+ "expires_in_version": "50",
+ "kind": "exponential",
+ "high": 1000,
+ "n_buckets": 10,
+ "cpp_guard": "ANDROID",
+ "description": "Number of reading list items stored in the browser DB *** No longer needed (bug 1156565). Delete histogram and accumulation code! ***"
+ },
+ "FENNEC_READER_VIEW_CACHE_SIZE": {
+ "expires_in_version": "60",
+ "alert_emails": ["mobile-frontend@mozilla.com"],
+ "kind": "exponential",
+ "low": 32,
+ "high": 51200,
+ "n_buckets": 20,
+ "description": "Total disk space used by items in the reader view cache (KB)",
+ "bug_numbers": [1246159]
+ },
+ "PLACES_SORTED_BOOKMARKS_PERC": {
+ "expires_in_version": "never",
+ "kind": "linear",
+ "high": 100,
+ "n_buckets": 10,
+ "description": "PLACES: Percentage of bookmarks organized in folders"
+ },
+ "PLACES_TAGGED_BOOKMARKS_PERC": {
+ "expires_in_version": "never",
+ "kind": "linear",
+ "high": 100,
+ "n_buckets": 10,
+ "description": "PLACES: Percentage of tagged bookmarks"
+ },
+ "PLACES_DATABASE_FILESIZE_MB": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "low": 5,
+ "high": 200,
+ "n_buckets": 10,
+ "description": "PLACES: Database filesize (MB)"
+ },
+ "PLACES_DATABASE_PAGESIZE_B": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "low": 1024,
+ "high": 32768,
+ "n_buckets": 10,
+ "description": "PLACES: Database page size (bytes)"
+ },
+ "PLACES_DATABASE_SIZE_PER_PAGE_B": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "low": 500,
+ "high": 10240,
+ "n_buckets": 20,
+ "description": "PLACES: Average size of a place in the database (bytes)"
+ },
+ "PLACES_EXPIRATION_STEPS_TO_CLEAN2": {
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 10,
+ "description": "PLACES: Expiration steps to cleanup the database"
+ },
+ "PLACES_AUTOCOMPLETE_1ST_RESULT_TIME_MS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "low": 50,
+ "high": 500,
+ "n_buckets": 10,
+ "description": "PLACES: Time for first autocomplete result if > 50ms (ms)"
+ },
+ "PLACES_AUTOCOMPLETE_6_FIRST_RESULTS_TIME_MS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "low": 50,
+ "high": 1000,
+ "n_buckets": 30,
+ "description": "PLACES: Time for the 6 first autocomplete results (ms)"
+ },
+ "HISTORY_LASTVISITED_TREE_QUERY_TIME_MS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "low": 50,
+ "high": 2000,
+ "n_buckets": 30,
+ "description": "PLACES: Time to load the sidebar history tree sorted by last visit (ms)"
+ },
+ "PLACES_HISTORY_LIBRARY_SEARCH_TIME_MS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "low": 50,
+ "high": 1000,
+ "n_buckets": 30,
+ "description": "PLACES: Time to search the history library (ms)"
+ },
+ "PLACES_AUTOCOMPLETE_URLINLINE_DOMAIN_QUERY_TIME_MS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "low": 50,
+ "high": 2000,
+ "n_buckets": 10,
+ "description": "PLACES: Duration of the domain query for the url inline autocompletion (ms)"
+ },
+ "PLACES_IDLE_FRECENCY_DECAY_TIME_MS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "low": 50,
+ "high": 10000,
+ "n_buckets": 10,
+ "description": "PLACES: Time to decay all frecencies values on idle (ms)"
+ },
+ "PLACES_IDLE_MAINTENANCE_TIME_MS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "low": 1000,
+ "high": 30000,
+ "n_buckets": 10,
+ "description": "PLACES: Time to execute maintenance tasks on idle (ms)"
+ },
+ "PLACES_ANNOS_BOOKMARKS_COUNT": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "low": 50,
+ "high": 5000,
+ "n_buckets": 10,
+ "description": "PLACES: Number of bookmarks annotations"
+ },
+ "PLACES_ANNOS_PAGES_COUNT": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "low": 50,
+ "high": 5000,
+ "n_buckets": 10,
+ "description": "PLACES: Number of pages annotations"
+ },
+ "PLACES_MAINTENANCE_DAYSFROMLAST": {
+ "expires_in_version" : "never",
+ "kind": "exponential",
+ "low": 7,
+ "high": 60,
+ "n_buckets" : 10,
+ "description": "PLACES: Days from last maintenance"
+ },
+ "UPDATE_CHECK_NO_UPDATE_EXTERNAL" : {
+ "alert_emails": ["application-update-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "releaseChannelCollection": "opt-out",
+ "description": "Update: count of no updates were found for a background update check (externally initiated)"
+ },
+ "UPDATE_CHECK_NO_UPDATE_NOTIFY" : {
+ "alert_emails": ["application-update-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "releaseChannelCollection": "opt-out",
+ "description": "Update: count of no updates were found for a background update check (timer initiated)"
+ },
+ "UPDATE_CHECK_CODE_EXTERNAL": {
+ "alert_emails": ["application-update-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 50,
+ "releaseChannelCollection": "opt-out",
+ "description": "Update: background update check result code except for no updates found (externally initiated)"
+ },
+ "UPDATE_CHECK_CODE_NOTIFY": {
+ "alert_emails": ["application-update-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 50,
+ "releaseChannelCollection": "opt-out",
+ "description": "Update: background update check result code except for no updates found (timer initiated)"
+ },
+ "UPDATE_CHECK_EXTENDED_ERROR_EXTERNAL": {
+ "alert_emails": ["application-update-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "keyed": true,
+ "releaseChannelCollection": "opt-out",
+ "description": "Update: keyed count (key names are prefixed with AUS_CHECK_EX_ERR_) of background update check extended error code (externally initiated)"
+ },
+ "UPDATE_CHECK_EXTENDED_ERROR_NOTIFY": {
+ "alert_emails": ["application-update-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "keyed": true,
+ "releaseChannelCollection": "opt-out",
+ "description": "Update: keyed count (key names are prefixed with AUS_CHECK_EX_ERR_) of background update check extended error code (timer initiated)"
+ },
+ "UPDATE_INVALID_LASTUPDATETIME_EXTERNAL": {
+ "alert_emails": ["application-update-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "releaseChannelCollection": "opt-out",
+ "description": "Update: count of systems that have a last update time greater than the current time (externally initiated)"
+ },
+ "UPDATE_INVALID_LASTUPDATETIME_NOTIFY": {
+ "alert_emails": ["application-update-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "releaseChannelCollection": "opt-out",
+ "description": "Update: count of systems that have a last update time greater than the current time (timer initiated)"
+ },
+ "UPDATE_LAST_NOTIFY_INTERVAL_DAYS_EXTERNAL": {
+ "alert_emails": ["application-update-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "n_buckets": 60,
+ "high": 365,
+ "releaseChannelCollection": "opt-out",
+ "description": "Update: interval in days since the last background update check (externally initiated)"
+ },
+ "UPDATE_LAST_NOTIFY_INTERVAL_DAYS_NOTIFY": {
+ "alert_emails": ["application-update-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "n_buckets": 30,
+ "high": 180,
+ "releaseChannelCollection": "opt-out",
+ "description": "Update: interval in days since the last background update check (timer initiated)"
+ },
+ "UPDATE_PING_COUNT_EXTERNAL": {
+ "alert_emails": ["application-update-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "releaseChannelCollection": "opt-out",
+ "description": "Update: count of systems for this ping for comparison with other pings (externally initiated)"
+ },
+ "UPDATE_PING_COUNT_NOTIFY": {
+ "alert_emails": ["application-update-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "releaseChannelCollection": "opt-out",
+ "description": "Update: count of systems for this ping for comparison with other pings (timer initiated)"
+ },
+ "UPDATE_SERVICE_INSTALLED_EXTERNAL": {
+ "alert_emails": ["application-update-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "releaseChannelCollection": "opt-out",
+ "description": "Update: whether the service is installed (externally initiated)"
+ },
+ "UPDATE_SERVICE_INSTALLED_NOTIFY": {
+ "alert_emails": ["application-update-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "releaseChannelCollection": "opt-out",
+ "description": "Update: whether the service is installed (timer initiated)"
+ },
+ "UPDATE_SERVICE_MANUALLY_UNINSTALLED_EXTERNAL": {
+ "alert_emails": ["application-update-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "releaseChannelCollection": "opt-out",
+ "description": "Update: count of systems that manually uninstalled the service (externally initiated)"
+ },
+ "UPDATE_SERVICE_MANUALLY_UNINSTALLED_NOTIFY": {
+ "alert_emails": ["application-update-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "releaseChannelCollection": "opt-out",
+ "description": "Update: count of systems that manually uninstalled the service (timer initiated)"
+ },
+ "UPDATE_UNABLE_TO_APPLY_EXTERNAL": {
+ "alert_emails": ["application-update-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "releaseChannelCollection": "opt-out",
+ "description": "Update: count of systems that cannot apply updates (externally initiated)"
+ },
+ "UPDATE_UNABLE_TO_APPLY_NOTIFY": {
+ "alert_emails": ["application-update-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "releaseChannelCollection": "opt-out",
+ "description": "Update: count of systems that cannot apply updates (timer initiated)"
+ },
+ "UPDATE_CANNOT_STAGE_EXTERNAL": {
+ "alert_emails": ["application-update-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "releaseChannelCollection": "opt-out",
+ "description": "Update: count of systems that cannot stage updates (externally initiated)"
+ },
+ "UPDATE_CANNOT_STAGE_NOTIFY": {
+ "alert_emails": ["application-update-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "releaseChannelCollection": "opt-out",
+ "description": "Update: count of systems that cannot stage updates (timer initiated)"
+ },
+ "UPDATE_PREF_UPDATE_CANCELATIONS_EXTERNAL": {
+ "alert_emails": ["application-update-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 100,
+ "releaseChannelCollection": "opt-out",
+ "description": "Update: number of sequential update elevation request cancelations greater than 0 (externally initiated)"
+ },
+ "UPDATE_PREF_UPDATE_CANCELATIONS_NOTIFY": {
+ "alert_emails": ["application-update-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 100,
+ "releaseChannelCollection": "opt-out",
+ "description": "Update: number of sequential update elevation request cancelations greater than 0 (timer initiated)"
+ },
+ "UPDATE_PREF_SERVICE_ERRORS_EXTERNAL": {
+ "alert_emails": ["application-update-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 30,
+ "releaseChannelCollection": "opt-out",
+ "description": "Update: number of sequential update service errors greater than 0 (externally initiated)"
+ },
+ "UPDATE_PREF_SERVICE_ERRORS_NOTIFY": {
+ "alert_emails": ["application-update-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 30,
+ "releaseChannelCollection": "opt-out",
+ "description": "Update: number of sequential update service errors greater than 0 (timer initiated)"
+ },
+ "UPDATE_NOT_PREF_UPDATE_AUTO_EXTERNAL": {
+ "alert_emails": ["application-update-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "releaseChannelCollection": "opt-out",
+ "description": "Update: count of when the app.update.auto boolean preference is not the default value of true (true values are not submitted)"
+ },
+ "UPDATE_NOT_PREF_UPDATE_AUTO_NOTIFY": {
+ "alert_emails": ["application-update-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "releaseChannelCollection": "opt-out",
+ "description": "Update: count of when the app.update.auto boolean preference is not the default value of true (true values are not submitted)"
+ },
+ "UPDATE_NOT_PREF_UPDATE_ENABLED_EXTERNAL": {
+ "alert_emails": ["application-update-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "releaseChannelCollection": "opt-out",
+ "description": "Update: count of when the app.update.enabled boolean preference is not the default value of true (true values are not submitted)"
+ },
+ "UPDATE_NOT_PREF_UPDATE_ENABLED_NOTIFY": {
+ "alert_emails": ["application-update-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "releaseChannelCollection": "opt-out",
+ "description": "Update: count of when the app.update.enabled boolean preference is not the default value of true (true values are not submitted)"
+ },
+ "UPDATE_NOT_PREF_UPDATE_STAGING_ENABLED_EXTERNAL": {
+ "alert_emails": ["application-update-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "releaseChannelCollection": "opt-out",
+ "description": "Update: count of when the app.update.staging.enabled boolean preference is not the default value of true (true values are not submitted)"
+ },
+ "UPDATE_NOT_PREF_UPDATE_STAGING_ENABLED_NOTIFY": {
+ "alert_emails": ["application-update-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "releaseChannelCollection": "opt-out",
+ "description": "Update: count of when the app.update.staging.enabled boolean preference is not the default value of true (true values are not submitted)"
+ },
+ "UPDATE_NOT_PREF_UPDATE_SERVICE_ENABLED_EXTERNAL": {
+ "alert_emails": ["application-update-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "releaseChannelCollection": "opt-out",
+ "description": "Update: count of when the app.update.service.enabled boolean preference is not the default value of true (true values are not submitted)"
+ },
+ "UPDATE_NOT_PREF_UPDATE_SERVICE_ENABLED_NOTIFY": {
+ "alert_emails": ["application-update-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "releaseChannelCollection": "opt-out",
+ "description": "Update: count of when the app.update.service.enabled boolean preference is not the default value of true (true values are not submitted)"
+ },
+ "UPDATE_DOWNLOAD_CODE_COMPLETE": {
+ "alert_emails": ["application-update-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 50,
+ "releaseChannelCollection": "opt-out",
+ "description": "Update: complete patch download result code"
+ },
+ "UPDATE_DOWNLOAD_CODE_PARTIAL": {
+ "alert_emails": ["application-update-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 50,
+ "releaseChannelCollection": "opt-out",
+ "description": "Update: complete patch download result code"
+ },
+ "UPDATE_STATE_CODE_COMPLETE_STARTUP": {
+ "alert_emails": ["application-update-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 20,
+ "releaseChannelCollection": "opt-out",
+ "description": "Update: the state of a complete update from update.status on startup"
+ },
+ "UPDATE_STATE_CODE_PARTIAL_STARTUP": {
+ "alert_emails": ["application-update-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 20,
+ "releaseChannelCollection": "opt-out",
+ "description": "Update: the state of a partial patch update from update.status on startup"
+ },
+ "UPDATE_STATE_CODE_UNKNOWN_STARTUP": {
+ "alert_emails": ["application-update-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 20,
+ "releaseChannelCollection": "opt-out",
+ "description": "Update: the state of an unknown patch update from update.status on startup"
+ },
+ "UPDATE_STATE_CODE_COMPLETE_STAGE": {
+ "alert_emails": ["application-update-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 20,
+ "releaseChannelCollection": "opt-out",
+ "description": "Update: the state of a complete patch update from update.status after staging"
+ },
+ "UPDATE_STATE_CODE_PARTIAL_STAGE": {
+ "alert_emails": ["application-update-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 20,
+ "releaseChannelCollection": "opt-out",
+ "description": "Update: the state of a partial patch update from update.status after staging"
+ },
+ "UPDATE_STATE_CODE_UNKNOWN_STAGE": {
+ "alert_emails": ["application-update-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 20,
+ "releaseChannelCollection": "opt-out",
+ "description": "Update: the state of an unknown patch update from update.status after staging"
+ },
+ "UPDATE_STATUS_ERROR_CODE_COMPLETE_STARTUP": {
+ "alert_emails": ["application-update-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 100,
+ "releaseChannelCollection": "opt-out",
+ "description": "Update: the status error code for a failed complete patch update from update.status on startup"
+ },
+ "UPDATE_STATUS_ERROR_CODE_PARTIAL_STARTUP": {
+ "alert_emails": ["application-update-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 100,
+ "releaseChannelCollection": "opt-out",
+ "description": "Update: the status error code for a failed partial patch update from update.status on startup"
+ },
+ "UPDATE_STATUS_ERROR_CODE_UNKNOWN_STARTUP": {
+ "alert_emails": ["application-update-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 100,
+ "releaseChannelCollection": "opt-out",
+ "description": "Update: the status error code for a failed unknown patch update from update.status on startup"
+ },
+ "UPDATE_STATUS_ERROR_CODE_COMPLETE_STAGE": {
+ "alert_emails": ["application-update-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 100,
+ "releaseChannelCollection": "opt-out",
+ "description": "Update: the status error code for a failed complete patch update from update.status after staging"
+ },
+ "UPDATE_STATUS_ERROR_CODE_PARTIAL_STAGE": {
+ "alert_emails": ["application-update-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 100,
+ "releaseChannelCollection": "opt-out",
+ "description": "Update: the status error code for a failed partial patch update from update.status after staging"
+ },
+ "UPDATE_STATUS_ERROR_CODE_UNKNOWN_STAGE": {
+ "alert_emails": ["application-update-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 100,
+ "releaseChannelCollection": "opt-out",
+ "description": "Update: the status error code for a failed unknown patch update from update.status after staging"
+ },
+ "UPDATE_WIZ_LAST_PAGE_CODE": {
+ "alert_emails": ["application-update-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 30,
+ "releaseChannelCollection": "opt-out",
+ "description": "Update: the update wizard page displayed when the UI was closed (mapped in toolkit/mozapps/update/UpdateTelemetry.jsm)"
+ },
+ "THUNDERBIRD_GLODA_SIZE_MB": {
+ "expires_in_version": "never",
+ "kind": "linear",
+ "high": 1000,
+ "n_buckets": 40,
+ "description": "Gloda: size of global-messages-db.sqlite (MB)"
+ },
+ "THUNDERBIRD_CONVERSATIONS_TIME_TO_2ND_GLODA_QUERY_MS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 30,
+ "description": "Conversations: time between the moment we click and the second gloda query returns (ms)"
+ },
+ "THUNDERBIRD_INDEXING_RATE_MSG_PER_S": {
+ "expires_in_version": "never",
+ "kind": "linear",
+ "high": 100,
+ "n_buckets": 20,
+ "description": "Gloda: indexing rate (message/s)"
+ },
+ "FX_GESTURE_INSTALL_SNAPSHOT_OF_PAGE": {
+ "expires_in_version": "50",
+ "kind": "exponential",
+ "high": 1000,
+ "n_buckets": 30,
+ "description": "Firefox: Time taken to store the image capture of the page to a canvas, for reuse while swiping through history (ms)."
+ },
+ "FX_GESTURE_COMPRESS_SNAPSHOT_OF_PAGE": {
+ "expires_in_version": "50",
+ "kind": "exponential",
+ "high": 1000,
+ "n_buckets": 30,
+ "description": "Firefox: Time taken to kick off image compression of the canvas that will be used during swiping through history (ms)."
+ },
+ "FX_TAB_ANIM_OPEN_PREVIEW_FRAME_INTERVAL_MS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "low": 7,
+ "high": 500,
+ "n_buckets": 50,
+ "description": "Average frame interval during tab open animation of about:newtab (preview=on), when other tabs are unaffected"
+ },
+ "FX_TAB_ANIM_OPEN_FRAME_INTERVAL_MS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "low": 7,
+ "high": 500,
+ "n_buckets": 50,
+ "description": "Average frame interval during tab open animation of about:newtab (preview=off), when other tabs are unaffected"
+ },
+ "FX_TAB_ANIM_ANY_FRAME_INTERVAL_MS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "low": 7,
+ "high": 500,
+ "n_buckets": 50,
+ "description": "Average frame interval during any tab open/close animation (excluding tabstrip scroll)"
+ },
+ "FX_REFRESH_DRIVER_CHROME_FRAME_DELAY_MS": {
+ "alert_emails": ["perf-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "bug_numbers": [1220699],
+ "description": "Delay in ms between the target and the actual handling time of the frame at refresh driver in the chrome process."
+ },
+ "FX_REFRESH_DRIVER_CONTENT_FRAME_DELAY_MS": {
+ "alert_emails": ["perf-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "bug_numbers": [1221674],
+ "description": "Delay in ms between the target and the actual handling time of the frame at refresh driver in the content process."
+ },
+ "FX_REFRESH_DRIVER_SYNC_SCROLL_FRAME_DELAY_MS": {
+ "alert_emails": ["perf-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "bug_numbers": [1228147],
+ "description": "Delay in ms between the target and the actual handling time of the frame at refresh driver while scrolling synchronously."
+ },
+ "FX_TAB_SWITCH_UPDATE_MS": {
+ "alert_emails": ["perf-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 1000,
+ "n_buckets": 20,
+ "description": "Firefox: Time in ms spent updating UI in response to a tab switch"
+ },
+ "FX_TAB_SWITCH_TOTAL_MS": {
+ "expires_in_version": "56",
+ "kind": "exponential",
+ "high": 1000,
+ "n_buckets": 20,
+ "releaseChannelCollection": "opt-out",
+ "description": "Firefox: Time in ms till a tab switch is complete including the first paint"
+ },
+ "FX_TAB_SWITCH_TOTAL_E10S_MS": {
+ "expires_in_version": "56",
+ "kind": "exponential",
+ "high": 1000,
+ "n_buckets": 20,
+ "releaseChannelCollection": "opt-out",
+ "description": "Firefox: Time in ms between tab selection and tab content paint."
+ },
+ "FX_TAB_SWITCH_SPINNER_VISIBLE_MS": {
+ "expires_in_version": "56",
+ "kind": "exponential",
+ "high": 1000,
+ "n_buckets": 20,
+ "releaseChannelCollection": "opt-out",
+ "description": "Firefox: If the spinner interstitial displays during tab switching, records the time in ms the graphic is visible"
+ },
+ "FX_TAB_SWITCH_SPINNER_VISIBLE_LONG_MS": {
+ "expires_in_version": "56",
+ "kind": "exponential",
+ "low": 1000,
+ "high": 64000,
+ "n_buckets": 7,
+ "bug_numbers": [1301104],
+ "alert_emails": ["mconley@mozilla.com"],
+ "releaseChannelCollection": "opt-out",
+ "description": "Firefox: If the spinner interstitial displays during tab switching, records the time in ms the graphic is visible. This probe is similar to FX_TAB_SWITCH_SPINNER_VISIBLE_MS, but is for truly degenerate cases."
+ },
+ "FX_TAB_CLICK_MS": {
+ "expires_in_version": "default",
+ "kind": "exponential",
+ "high": 1000,
+ "n_buckets": 20,
+ "description": "Firefox: Time in ms spent on switching tabs in response to a tab click"
+ },
+ "FX_BOOKMARKS_TOOLBAR_INIT_MS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "low": 50,
+ "high": 5000,
+ "n_buckets": 10,
+ "description": "Firefox: Time to initialize the bookmarks toolbar view (ms)"
+ },
+ "FX_BROWSER_FULLSCREEN_USED": {
+ "expires_in_version": "46",
+ "kind": "count",
+ "description": "The number of times that a session enters browser fullscreen (f11-fullscreen)"
+ },
+ "FX_NEW_WINDOW_MS": {
+ "expires_in_version": "default",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 20,
+ "description": "Firefox: Time taken to open a new browser window (ms)"
+ },
+ "FX_PAGE_LOAD_MS": {
+ "expires_in_version": "default",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 20,
+ "description": "Firefox: Time taken to load a page (ms). This includes all static contents, no dynamic content. Loading of about: pages is not counted."
+ },
+ "FX_TOTAL_TOP_VISITS": {
+ "expires_in_version": "default",
+ "kind": "boolean",
+ "description": "Count the number of times a new top page was starting to load"
+ },
+ "FX_THUMBNAILS_CAPTURE_TIME_MS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 500,
+ "n_buckets": 15,
+ "description": "THUMBNAILS: Time (ms) it takes to capture a thumbnail"
+ },
+ "FX_THUMBNAILS_STORE_TIME_MS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 500,
+ "n_buckets": 15,
+ "description": "THUMBNAILS: Time (ms) it takes to store a thumbnail in the cache"
+ },
+ "FX_THUMBNAILS_HIT_OR_MISS": {
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "THUMBNAILS: Thumbnail found"
+ },
+ "FX_MIGRATION_ENTRY_POINT": {
+ "bug_numbers": [731025],
+ "alert_emails": ["gijs@mozilla.com"],
+ "expires_in_version": "53",
+ "kind": "enumerated",
+ "n_values": 10,
+ "releaseChannelCollection": "opt-out",
+ "description": "Where the migration wizard was entered from. 0=Other/catch-all, 1=first-run, 2=refresh-firefox, 3=Places window, 4=Password manager"
+ },
+ "FX_MIGRATION_SOURCE_BROWSER": {
+ "bug_numbers": [731025],
+ "alert_emails": ["gijs@mozilla.com"],
+ "expires_in_version": "53",
+ "kind": "enumerated",
+ "n_values": 15,
+ "releaseChannelCollection": "opt-out",
+ "description": "The browser that data is pulled from. The values correspond to the internal browser ID (see MigrationUtils.jsm)"
+ },
+ "FX_MIGRATION_ERRORS": {
+ "bug_numbers": [731025],
+ "alert_emails": ["gijs@mozilla.com"],
+ "expires_in_version": "53",
+ "kind": "enumerated",
+ "keyed": true,
+ "n_values": 12,
+ "releaseChannelCollection": "opt-out",
+ "description": "Errors encountered during migration in buckets defined by the datatype, keyed by the string description of the browser."
+ },
+ "FX_MIGRATION_USAGE": {
+ "bug_numbers": [731025],
+ "alert_emails": ["gijs@mozilla.com"],
+ "expires_in_version": "53",
+ "kind": "enumerated",
+ "keyed": true,
+ "n_values": 12,
+ "releaseChannelCollection": "opt-out",
+ "description": "Usage of migration for each datatype when migration is run through the post-firstrun flow which allows individual datatypes, keyed by the string description of the browser."
+ },
+ "FX_MIGRATION_IMPORTED_HOMEPAGE": {
+ "bug_numbers": [731025, 1298208],
+ "alert_emails": ["gijs@mozilla.com"],
+ "expires_in_version": "53",
+ "kind": "boolean",
+ "keyed": true,
+ "releaseChannelCollection": "opt-out",
+ "description": "Whether the homepage was imported during browser migration. Only available on release builds during firstrun."
+ },
+ "FX_MIGRATION_BOOKMARKS_IMPORT_MS": {
+ "bug_numbers": [1289436],
+ "alert_emails": ["gijs@mozilla.com"],
+ "expires_in_version": "54",
+ "kind": "exponential",
+ "n_buckets": 70,
+ "high": 100000,
+ "releaseChannelCollection": "opt-out",
+ "keyed": true,
+ "description": "How long it took to import bookmarks from another browser, keyed by the name of the browser."
+ },
+ "FX_MIGRATION_HISTORY_IMPORT_MS": {
+ "bug_numbers": [1289436],
+ "alert_emails": ["gijs@mozilla.com"],
+ "expires_in_version": "54",
+ "kind": "exponential",
+ "n_buckets": 70,
+ "high": 100000,
+ "releaseChannelCollection": "opt-out",
+ "keyed": true,
+ "description": "How long it took to import history from another browser, keyed by the name of the browser."
+ },
+ "FX_MIGRATION_LOGINS_IMPORT_MS": {
+ "bug_numbers": [1289436],
+ "alert_emails": ["gijs@mozilla.com"],
+ "expires_in_version": "54",
+ "kind": "exponential",
+ "n_buckets": 70,
+ "high": 100000,
+ "releaseChannelCollection": "opt-out",
+ "keyed": true,
+ "description": "How long it took to import logins (passwords) from another browser, keyed by the name of the browser."
+ },
+ "FX_MIGRATION_BOOKMARKS_JANK_MS": {
+ "bug_numbers": [1338522],
+ "alert_emails": ["dao@mozilla.com"],
+ "expires_in_version": "58",
+ "kind": "exponential",
+ "n_buckets": 20,
+ "high": 60000,
+ "releaseChannelCollection": "opt-out",
+ "keyed": true,
+ "description": "Accumulated timer delay (variance between when the timer was expected to fire and when it actually fired) in milliseconds as an indicator for decreased main-thread responsiveness while importing bookmarks from another browser, keyed by the name of the browser (see gAvailableMigratorKeys in MigrationUtils.jsm). The import is happening on a background thread and should ideally not affect the UI noticeably."
+ },
+ "FX_MIGRATION_HISTORY_JANK_MS": {
+ "bug_numbers": [1338522],
+ "alert_emails": ["dao@mozilla.com"],
+ "expires_in_version": "58",
+ "kind": "exponential",
+ "n_buckets": 20,
+ "high": 60000,
+ "releaseChannelCollection": "opt-out",
+ "keyed": true,
+ "description": "Accumulated timer delay (variance between when the timer was expected to fire and when it actually fired) in milliseconds as an indicator for decreased main-thread responsiveness while importing history from another browser, keyed by the name of the browser (see gAvailableMigratorKeys in MigrationUtils.jsm). The import is happening on a background thread and should ideally not affect the UI noticeably."
+ },
+ "FX_MIGRATION_LOGINS_JANK_MS": {
+ "bug_numbers": [1338522],
+ "alert_emails": ["dao@mozilla.com"],
+ "expires_in_version": "58",
+ "kind": "exponential",
+ "n_buckets": 20,
+ "high": 60000,
+ "releaseChannelCollection": "opt-out",
+ "keyed": true,
+ "description": "Accumulated timer delay (variance between when the timer was expected to fire and when it actually fired) in milliseconds as an indicator for decreased main-thread responsiveness while importing logins / passwords from another browser, keyed by the name of the browser (see gAvailableMigratorKeys in MigrationUtils.jsm). The import is happening on a background thread and should ideally not affect the UI noticeably."
+ },
+ "FX_MIGRATION_BOOKMARKS_QUANTITY": {
+ "bug_numbers": [1279501],
+ "alert_emails": ["gijs@mozilla.com"],
+ "expires_in_version": "56",
+ "kind": "exponential",
+ "n_buckets": 20,
+ "high": 1000,
+ "releaseChannelCollection": "opt-out",
+ "keyed": true,
+ "description": "How many bookmarks we imported from another browser, keyed by the name of the browser."
+ },
+ "FX_MIGRATION_HISTORY_QUANTITY": {
+ "bug_numbers": [1279501],
+ "alert_emails": ["gijs@mozilla.com"],
+ "expires_in_version": "56",
+ "kind": "exponential",
+ "n_buckets": 40,
+ "high": 10000,
+ "releaseChannelCollection": "opt-out",
+ "keyed": true,
+ "description": "How many history visits we imported from another browser, keyed by the name of the browser."
+ },
+ "FX_MIGRATION_LOGINS_QUANTITY": {
+ "bug_numbers": [1279501],
+ "alert_emails": ["gijs@mozilla.com"],
+ "expires_in_version": "56",
+ "kind": "exponential",
+ "n_buckets": 20,
+ "high": 1000,
+ "releaseChannelCollection": "opt-out",
+ "keyed": true,
+ "description": "How many logins (passwords) we imported from another browser, keyed by the name of the browser."
+ },
+ "FX_STARTUP_MIGRATION_BROWSER_COUNT": {
+ "bug_numbers": [1275114],
+ "alert_emails": ["gijs@mozilla.com"],
+ "expires_in_version": "53",
+ "kind": "enumerated",
+ "n_values": 15,
+ "releaseChannelCollection": "opt-out",
+ "description": "Number of browsers from which the user could migrate on initial profile migration. Only available on release builds during firstrun."
+ },
+ "FX_STARTUP_MIGRATION_EXISTING_DEFAULT_BROWSER": {
+ "bug_numbers": [1275114],
+ "alert_emails": ["gijs@mozilla.com"],
+ "expires_in_version": "53",
+ "kind": "enumerated",
+ "n_values": 15,
+ "releaseChannelCollection": "opt-out",
+ "description": "The browser that was the default on the initial profile migration. The values correspond to the internal browser ID (see MigrationUtils.jsm)"
+ },
+ "FX_STARTUP_MIGRATION_AUTOMATED_IMPORT_PROCESS_SUCCESS": {
+ "bug_numbers": [1271775],
+ "alert_emails": ["gijs@mozilla.com"],
+ "expires_in_version": "53",
+ "kind": "enumerated",
+ "n_values": 27,
+ "releaseChannelCollection": "opt-out",
+ "description": "Where automatic migration was attempted, indicates to what degree we succeeded. Values 0-25 indicate progress through the automatic migration sequence, with 25 indicating the migration finished. 26 is only used when the migration produced errors before it finished."
+ },
+ "FX_STARTUP_MIGRATION_AUTOMATED_IMPORT_UNDO": {
+ "bug_numbers": [1283565],
+ "alert_emails": ["gijs@mozilla.com"],
+ "expires_in_version": "53",
+ "kind": "enumerated",
+ "n_values": 31,
+ "releaseChannelCollection": "opt-out",
+ "description": "Where undo of the automatic migration was attempted, indicates to what degree we succeeded to undo. 0 means we started to undo, 5 means we bailed out from the undo because it was not possible to complete it (there was nothing to undo or the user was signed in to sync). All higher values indicate progression through the undo sequence, with 30 indicating we finished the undo without exceptions in the middle."
+ },
+ "FX_STARTUP_MIGRATION_UNDO_REASON": {
+ "bug_numbers": [1289906],
+ "alert_emails": ["gijs@mozilla.com"],
+ "expires_in_version": "54",
+ "keyed": true,
+ "kind": "enumerated",
+ "n_values": 10,
+ "releaseChannelCollection": "opt-out",
+ "description": "Why the undo functionality of an automatic migration was disabled: 0 means we used undo, 1 means the user signed in to sync, 2 means the user created/modified a password, 3 means the user created/modified a bookmark (item or folder), 4 means we showed an undo option repeatedly and the user did not use it, 5 means we showed an undo option and the user actively elected to keep the data. The whole thing is keyed to the identifiers of different browsers (so 'chrome', 'ie', 'edge', 'safari', etc.)."
+ },
+ "FX_STARTUP_MIGRATION_UNDO_OFFERED": {
+ "bug_numbers": [1309617],
+ "alert_emails": ["gijs@mozilla.com"],
+ "expires_in_version": "56",
+ "kind": "enumerated",
+ "n_values": 5,
+ "releaseChannelCollection": "opt-out",
+ "description": "Indicates we showed a 'would you like to undo this automatic migration?' notification bar. The bucket indicates which nth day we're on (1st/2nd/3rd, by default - 0 would be indicative the pref didn't get set which shouldn't happen). After 3 days on which the notification gets shown, it will get disabled and never shown again."
+ },
+ "FX_STARTUP_MIGRATION_UNDO_BOOKMARKS_ERRORCOUNT": {
+ "bug_numbers": [1333233],
+ "alert_emails": ["gijs@mozilla.com"],
+ "expires_in_version": "58",
+ "keyed": true,
+ "kind": "exponential",
+ "n_buckets": 20,
+ "high": 100,
+ "releaseChannelCollection": "opt-out",
+ "description": "Indicates how many errors we find when trying to 'undo' bookmarks import. Keys are internal ids of browsers we import from, e.g. 'chrome' or 'ie', etc."
+ },
+ "FX_STARTUP_MIGRATION_UNDO_LOGINS_ERRORCOUNT": {
+ "bug_numbers": [1333233],
+ "alert_emails": ["gijs@mozilla.com"],
+ "expires_in_version": "58",
+ "keyed": true,
+ "kind": "exponential",
+ "n_buckets": 20,
+ "high": 100,
+ "releaseChannelCollection": "opt-out",
+ "description": "Indicates how many errors we find when trying to 'undo' login (password) import. Keys are internal ids of browsers we import from, e.g. 'chrome' or 'ie', etc."
+ },
+ "FX_STARTUP_MIGRATION_UNDO_VISITS_ERRORCOUNT": {
+ "bug_numbers": [1333233],
+ "alert_emails": ["gijs@mozilla.com"],
+ "expires_in_version": "58",
+ "keyed": true,
+ "kind": "exponential",
+ "n_buckets": 20,
+ "high": 100,
+ "releaseChannelCollection": "opt-out",
+ "description": "Indicates how many errors we find when trying to 'undo' history import. Keys are internal ids of browsers we import from, e.g. 'chrome' or 'ie', etc."
+ },
+ "FX_STARTUP_MIGRATION_UNDO_BOOKMARKS_MS": {
+ "bug_numbers": [1333233],
+ "alert_emails": ["gijs@mozilla.com"],
+ "expires_in_version": "58",
+ "keyed": true,
+ "kind": "exponential",
+ "n_buckets": 20,
+ "high": 60000,
+ "releaseChannelCollection": "opt-out",
+ "description": "Indicates how long it took to undo the startup import of bookmarks, in ms. Keys are internal ids of browsers we import from, e.g. 'chrome' or 'ie', etc."
+ },
+ "FX_STARTUP_MIGRATION_UNDO_LOGINS_MS": {
+ "bug_numbers": [1333233],
+ "alert_emails": ["gijs@mozilla.com"],
+ "expires_in_version": "58",
+ "keyed": true,
+ "kind": "exponential",
+ "n_buckets": 20,
+ "high": 60000,
+ "releaseChannelCollection": "opt-out",
+ "description": "Indicates how long it took to undo the startup import of logins, in ms. Keys are internal ids of browsers we import from, e.g. 'chrome' or 'ie', etc."
+ },
+ "FX_STARTUP_MIGRATION_UNDO_VISITS_MS": {
+ "bug_numbers": [1333233],
+ "alert_emails": ["gijs@mozilla.com"],
+ "expires_in_version": "58",
+ "keyed": true,
+ "kind": "exponential",
+ "n_buckets": 20,
+ "high": 60000,
+ "releaseChannelCollection": "opt-out",
+ "description": "Indicates how long it took to undo the startup import of visits (history), in ms. Keys are internal ids of browsers we import from, e.g. 'chrome' or 'ie', etc."
+ },
+ "FX_STARTUP_MIGRATION_UNDO_TOTAL_MS": {
+ "bug_numbers": [1333233],
+ "alert_emails": ["gijs@mozilla.com"],
+ "expires_in_version": "58",
+ "keyed": true,
+ "kind": "exponential",
+ "n_buckets": 20,
+ "high": 60000,
+ "releaseChannelCollection": "opt-out",
+ "description": "Indicates how long it took to undo the entirety of the startup undo, in ms. Keys are internal ids of browsers we import from, e.g. 'chrome' or 'ie', etc."
+ },
+ "FX_STARTUP_MIGRATION_DATA_RECENCY": {
+ "bug_numbers": [1276694],
+ "alert_emails": ["gijs@mozilla.com"],
+ "expires_in_version": "53",
+ "keyed": true,
+ "kind": "exponential",
+ "n_buckets": 50,
+ "high": 8760,
+ "releaseChannelCollection": "opt-out",
+ "description": "The 'last modified' time of the data we imported on the initial profile migration (time delta with 'now' at the time of migration, in hours). Collected for all browsers for which migration data is available, and stored keyed by browser identifier (e.g. 'ie', 'edge', 'safari', etc.)."
+ },
+ "FX_STARTUP_MIGRATION_USED_RECENT_BROWSER": {
+ "bug_numbers": [1276694],
+ "alert_emails": ["gijs@mozilla.com"],
+ "expires_in_version": "53",
+ "keyed": true,
+ "kind": "boolean",
+ "releaseChannelCollection": "opt-out",
+ "description": "Whether the browser we migrated from was the browser with the most recent data. Keyed by that browser's identifier (e.g. 'ie', 'edge', 'safari', etc.)."
+ },
+ "FX_STARTUP_EXTERNAL_CONTENT_HANDLER": {
+ "bug_numbers": [1276027],
+ "alert_emails": ["jaws@mozilla.com"],
+ "expires_in_version": "53",
+ "kind": "count",
+ "description": "Count how often the browser is opened as an external app handler. This is generally used when the browser is set as the default browser."
+ },
+ "FX_PREFERENCES_CATEGORY_OPENED": {
+ "bug_numbers": [1324167],
+ "alert_emails": ["jaws@mozilla.com"],
+ "expires_in_version": "56",
+ "kind": "categorical",
+ "labels": ["unknown", "general", "search", "content", "applications", "privacy", "security", "sync", "advancedGeneral", "advancedDataChoices", "advancedNetwork", "advancedUpdates", "advancedCerts"],
+ "releaseChannelCollection": "opt-out",
+ "description": "Count how often each preference category is opened."
+ },
+ "INPUT_EVENT_RESPONSE_MS": {
+ "alert_emails": ["perf-telemetry-alerts@mozilla.com"],
+ "bug_numbers": [1235908],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time (ms) from the Input event being created to the end of it being handled"
+ },
+ "LOAD_INPUT_EVENT_RESPONSE_MS": {
+ "alert_emails": ["perf-telemetry-alerts@mozilla.com"],
+ "bug_numbers": [1298101],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time (ms) from the Input event being created to the end of it being handled for events handling during page load only"
+ },
+ "EVENTLOOP_UI_ACTIVITY_EXP_MS": {
+ "alert_emails": ["perf-telemetry-alerts@mozilla.com"],
+ "bug_numbers": [1198196],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 60000,
+ "n_buckets": 50,
+ "description": "Widget: Time it takes for the message before a UI message (ms)"
+ },
+ "FX_SESSION_RESTORE_STARTUP_INIT_SESSION_MS": {
+ "alert_emails": ["session-restore-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "default",
+ "kind": "exponential",
+ "high": 30000,
+ "n_buckets": 20,
+ "description": "Session restore: Time it takes to prepare the data structures for restoring a session (ms)"
+ },
+ "FX_SESSION_RESTORE_STARTUP_ONLOAD_INITIAL_WINDOW_MS": {
+ "alert_emails": ["session-restore-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 30000,
+ "n_buckets": 20,
+ "description": "Session restore: Time it takes to finish restoration once we have first opened a window (ms)"
+ },
+ "FX_SESSION_RESTORE_COLLECT_ALL_WINDOWS_DATA_MS": {
+ "alert_emails": ["session-restore-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 30000,
+ "n_buckets": 10,
+ "description": "Session restore: Time to collect all window data (ms)"
+ },
+ "FX_SESSION_RESTORE_COLLECT_COOKIES_MS": {
+ "alert_emails": ["session-restore-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 30000,
+ "n_buckets": 10,
+ "description": "Session restore: Time to collect cookies (ms)"
+ },
+ "FX_SESSION_RESTORE_COLLECT_DATA_MS": {
+ "alert_emails": ["session-restore-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "default",
+ "kind": "exponential",
+ "high": 30000,
+ "n_buckets": 10,
+ "description": "Session restore: Time to collect all window and tab data (ms)"
+ },
+ "FX_SESSION_RESTORE_COLLECT_DATA_LONGEST_OP_MS": {
+ "alert_emails": ["session-restore-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 30000,
+ "n_buckets": 10,
+ "description": "Session restore: Duration of the longest uninterruptible operation while collecting all window and tab data (ms)"
+ },
+ "FX_SESSION_RESTORE_CONTENT_COLLECT_DATA_LONGEST_OP_MS": {
+ "alert_emails": ["session-restore-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "default",
+ "kind": "exponential",
+ "high": 30000,
+ "n_buckets": 10,
+ "description": "Session restore: Duration of the longest uninterruptible operation while collecting data in the content process (ms)"
+ },
+ "FX_SESSION_RESTORE_SERIALIZE_DATA_MS": {
+ "alert_emails": ["session-restore-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 1000,
+ "n_buckets": 10,
+ "description": "Session restore: Time to JSON serialize session data (ms)"
+ },
+ "FX_SESSION_RESTORE_READ_FILE_MS": {
+ "alert_emails": ["session-restore-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "default",
+ "kind": "exponential",
+ "high": 3000,
+ "n_buckets": 10,
+ "description": "Session restore: Time to read the session data from the file on disk (ms)"
+ },
+ "FX_SESSION_RESTORE_WRITE_FILE_MS": {
+ "alert_emails": ["session-restore-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "default",
+ "kind": "exponential",
+ "high": 3000,
+ "n_buckets": 10,
+ "description": "Session restore: Time to write the session data to the file on disk (ms)"
+ },
+ "FX_SESSION_RESTORE_FILE_SIZE_BYTES": {
+ "alert_emails": ["session-restore-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "default",
+ "kind": "exponential",
+ "high": 50000000,
+ "n_buckets": 30,
+ "description": "Session restore: The size of file sessionstore.js (bytes)"
+ },
+ "FX_SESSION_RESTORE_CORRUPT_FILE": {
+ "alert_emails": ["session-restore-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "default",
+ "kind": "boolean",
+ "description": "Session restore: Whether the file read on startup contained parse-able JSON"
+ },
+ "FX_SESSION_RESTORE_ALL_FILES_CORRUPT": {
+ "alert_emails": ["session-restore-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "default",
+ "kind": "boolean",
+ "description": "Session restore: Whether none of the backup files contained parse-able JSON"
+ },
+ "FX_SESSION_RESTORE_RESTORE_WINDOW_MS": {
+ "alert_emails": ["session-restore-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "default",
+ "kind": "exponential",
+ "high": 3000,
+ "n_buckets": 10,
+ "description": "Session restore: Time spent blocking the main thread while restoring a window state (ms)"
+ },
+ "FX_SESSION_RESTORE_SEND_UPDATE_CAUSED_OOM": {
+ "alert_emails": ["session-restore-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "default",
+ "kind": "count",
+ "description": "Count of messages sent by SessionRestore from child frames to the parent and that cannot be transmitted as they eat up too much memory."
+ },
+ "FX_SESSION_RESTORE_DOM_STORAGE_SIZE_ESTIMATE_CHARS": {
+ "expires_in_version": "default",
+ "kind": "exponential",
+ "high": 30000000,
+ "n_buckets": 20,
+ "description": "Session restore: Number of characters in DOM Storage for a tab. Pages without DOM Storage or with an empty DOM Storage are ignored."
+ },
+ "FX_SESSION_RESTORE_AUTO_RESTORE_DURATION_UNTIL_EAGER_TABS_RESTORED_MS": {
+ "alert_emails": ["session-restore-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "default",
+ "kind": "exponential",
+ "low": 100,
+ "high": 100000,
+ "n_buckets": 20,
+ "description": "Session restore: If the browser is setup to auto-restore tabs, this probe measures the time elapsed between the instant we start Session Restore and the instant we have finished restoring tabs eagerly. At this stage, the tabs that are restored on demand are not restored yet."
+ },
+ "FX_SESSION_RESTORE_MANUAL_RESTORE_DURATION_UNTIL_EAGER_TABS_RESTORED_MS": {
+ "alert_emails": ["session-restore-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "default",
+ "kind": "exponential",
+ "low": 100,
+ "high": 100000,
+ "n_buckets": 20,
+ "description": "Session restore: If a session is restored by the user clicking on 'Restore Session', this probe measures the time elapsed between the instant the user has clicked and the instant we have finished restoring tabs eagerly. At this stage, the tabs that are restored on demand are not restored yet."
+ },
+ "FX_SESSION_RESTORE_NUMBER_OF_TABS_RESTORED": {
+ "expires_in_version": "default",
+ "kind": "exponential",
+ "high": 500,
+ "n_buckets": 20,
+ "description": "Session restore: Number of tabs in the session that has just been restored."
+ },
+ "FX_SESSION_RESTORE_NUMBER_OF_WINDOWS_RESTORED": {
+ "expires_in_version": "default",
+ "kind": "enumerated",
+ "n_values": 50,
+ "description": "Session restore: Number of windows in the session that has just been restored."
+ },
+ "FX_SESSION_RESTORE_NUMBER_OF_EAGER_TABS_RESTORED": {
+ "expires_in_version": "default",
+ "kind": "enumerated",
+ "n_values": 50,
+ "description": "Session restore: Number of tabs restored eagerly in the session that has just been restored."
+ },
+ "FX_TABLETMODE_PAGE_LOAD": {
+ "expires_in_version": "47",
+ "kind": "exponential",
+ "high": 100000,
+ "n_buckets": 30,
+ "keyed": true,
+ "description": "Number of toplevel location changes in tablet and desktop mode (only used on win10 where tablet mode is available)"
+ },
+ "FX_TOUCH_USED": {
+ "expires_in_version": "46",
+ "kind": "count",
+ "description": "Windows only. Counts occurrences of touch events"
+ },
+ "FX_URLBAR_SELECTED_RESULT_INDEX": {
+ "alert_emails": ["firefox-dev@mozilla.org"],
+ "expires_in_version": "55",
+ "kind": "enumerated",
+ "n_values": 17,
+ "bug_numbers": [775825],
+ "description": "Firefox: The index of the selected result in the URL bar popup"
+ },
+ "FX_URLBAR_SELECTED_RESULT_TYPE": {
+ "alert_emails": ["firefox-dev@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 14,
+ "bug_numbers": [775825],
+ "description": "Firefox: The type of the selected result in the URL bar popup. See nsBrowserGlue.js::_handleURLBarTelemetry for the result types."
+ },
+ "INNERWINDOWS_WITH_MUTATION_LISTENERS": {
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "Deleted or to-be-reused innerwindow which has had mutation event listeners."
+ },
+ "CHARSET_OVERRIDE_SITUATION": {
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 8,
+ "description": "Labeling status of top-level page when overriding charset (0: unlabeled file URL without detection, 1: unlabeled non-TLD-guessed non-file URL without detection, 2: unlabeled file URL with detection, 3: unlabeled non-file URL with detection, 4: labeled, 5: already overridden, 6: bug, 7: unlabeled with TLD guessing)"
+ },
+ "CHARSET_OVERRIDE_USED": {
+ "expires_in_version": "never",
+ "kind": "flag",
+ "description": "Whether the character encoding menu was used to override an encoding in this session."
+ },
+ "DECODER_INSTANTIATED_ISO2022JP": {
+ "expires_in_version": "never",
+ "kind": "flag",
+ "description": "Whether the decoder for ISO-2022-JP has been instantiated in this session."
+ },
+ "DECODER_INSTANTIATED_IBM866": {
+ "expires_in_version": "never",
+ "kind": "flag",
+ "description": "Whether the decoder for IBM866 has been instantiated in this session."
+ },
+ "DECODER_INSTANTIATED_MACGREEK": {
+ "expires_in_version": "never",
+ "kind": "flag",
+ "description": "Whether the decoder for MACGREEK has been instantiated in this session."
+ },
+ "DECODER_INSTANTIATED_MACICELANDIC": {
+ "expires_in_version": "never",
+ "kind": "flag",
+ "description": "Whether the decoder for MACICELANDIC has been instantiated in this session."
+ },
+ "DECODER_INSTANTIATED_MACCE": {
+ "expires_in_version": "never",
+ "kind": "flag",
+ "description": "Whether the decoder for MACCE has been instantiated in this session."
+ },
+ "DECODER_INSTANTIATED_MACHEBREW": {
+ "expires_in_version": "never",
+ "kind": "flag",
+ "description": "Whether the decoder for MACHEBREW has been instantiated in this session."
+ },
+ "DECODER_INSTANTIATED_MACARABIC": {
+ "expires_in_version": "never",
+ "kind": "flag",
+ "description": "Whether the decoder for MACARABIC has been instantiated in this session."
+ },
+ "DECODER_INSTANTIATED_MACFARSI": {
+ "expires_in_version": "never",
+ "kind": "flag",
+ "description": "Whether the decoder for MACFARSI has been instantiated in this session."
+ },
+ "DECODER_INSTANTIATED_MACCROATIAN": {
+ "expires_in_version": "never",
+ "kind": "flag",
+ "description": "Whether the decoder for MACCROATIAN has been instantiated in this session."
+ },
+ "DECODER_INSTANTIATED_MACCYRILLIC": {
+ "expires_in_version": "never",
+ "kind": "flag",
+ "description": "Whether the decoder for MACCYRILLIC has been instantiated in this session."
+ },
+ "DECODER_INSTANTIATED_MACROMANIAN": {
+ "expires_in_version": "never",
+ "kind": "flag",
+ "description": "Whether the decoder for MACROMANIAN has been instantiated in this session."
+ },
+ "DECODER_INSTANTIATED_MACTURKISH": {
+ "expires_in_version": "never",
+ "kind": "flag",
+ "description": "Whether the decoder for MACTURKISH has been instantiated in this session."
+ },
+ "DECODER_INSTANTIATED_MACDEVANAGARI": {
+ "expires_in_version": "never",
+ "kind": "flag",
+ "description": "Whether the decoder for MACDEVANAGARI has been instantiated in this session."
+ },
+ "DECODER_INSTANTIATED_MACGUJARATI": {
+ "expires_in_version": "never",
+ "kind": "flag",
+ "description": "Whether the decoder for MACGUJARATI has been instantiated in this session."
+ },
+ "DECODER_INSTANTIATED_MACGURMUKHI": {
+ "expires_in_version": "never",
+ "kind": "flag",
+ "description": "Whether the decoder for MACGURMUKHI has been instantiated in this session."
+ },
+ "DECODER_INSTANTIATED_KOI8R": {
+ "expires_in_version": "never",
+ "kind": "flag",
+ "description": "Whether the decoder for KOI8R has been instantiated in this session."
+ },
+ "DECODER_INSTANTIATED_KOI8U": {
+ "expires_in_version": "never",
+ "kind": "flag",
+ "description": "Whether the decoder for KOI8U has been instantiated in this session."
+ },
+ "DECODER_INSTANTIATED_ISO_8859_5": {
+ "expires_in_version": "never",
+ "kind": "flag",
+ "description": "Whether the decoder for ISO-8859-5 has been instantiated in this session."
+ },
+ "LONG_REFLOW_INTERRUPTIBLE": {
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "Long running reflow, interruptible or not"
+ },
+ "XMLHTTPREQUEST_ASYNC_OR_SYNC": {
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "Type of XMLHttpRequest, async or sync"
+ },
+ "LOCALDOMSTORAGE_SHUTDOWN_DATABASE_MS": {
+ "expires_in_version": "default",
+ "kind": "exponential",
+ "high": 3000,
+ "n_buckets": 10,
+ "description": "Time to flush and close the localStorage database (ms)"
+ },
+ "LOCALDOMSTORAGE_PRELOAD_PENDING_ON_FIRST_ACCESS": {
+ "expires_in_version": "default",
+ "kind": "boolean",
+ "description": "True when we had to wait for a pending preload on first access to localStorage data, false otherwise"
+ },
+ "LOCALDOMSTORAGE_GETALLKEYS_BLOCKING_MS": {
+ "expires_in_version": "40",
+ "kind": "exponential",
+ "high": 3000,
+ "n_buckets": 10,
+ "description": "Time to block before we return a list of all keys in domain's LocalStorage (ms)"
+ },
+ "LOCALDOMSTORAGE_GETKEY_BLOCKING_MS": {
+ "expires_in_version": "40",
+ "kind": "exponential",
+ "high": 3000,
+ "n_buckets": 10,
+ "description": "Time to block before we return a key name in domain's LocalStorage (ms)"
+ },
+ "LOCALDOMSTORAGE_GETLENGTH_BLOCKING_MS": {
+ "expires_in_version": "40",
+ "kind": "exponential",
+ "high": 3000,
+ "n_buckets": 10,
+ "description": "Time to block before we return number of keys in domain's LocalStorage (ms)"
+ },
+ "LOCALDOMSTORAGE_GETVALUE_BLOCKING_MS": {
+ "expires_in_version": "default",
+ "kind": "exponential",
+ "high": 3000,
+ "n_buckets": 10,
+ "description": "Time to block before we return a value for a key in LocalStorage (ms)"
+ },
+ "LOCALDOMSTORAGE_SETVALUE_BLOCKING_MS": {
+ "expires_in_version": "40",
+ "kind": "exponential",
+ "high": 3000,
+ "n_buckets": 10,
+ "description": "Time to block before we set a single key's value in LocalStorage (ms)"
+ },
+ "LOCALDOMSTORAGE_REMOVEKEY_BLOCKING_MS": {
+ "expires_in_version": "40",
+ "kind": "exponential",
+ "high": 3000,
+ "n_buckets": 10,
+ "description": "Time to block before we remove a single key from LocalStorage (ms)"
+ },
+ "LOCALDOMSTORAGE_CLEAR_BLOCKING_MS": {
+ "expires_in_version": "40",
+ "kind": "exponential",
+ "high": 3000,
+ "n_buckets": 10,
+ "description": "Time to block before we clear LocalStorage for all domains (ms)"
+ },
+ "LOCALDOMSTORAGE_UNLOAD_BLOCKING_MS": {
+ "expires_in_version": "40",
+ "kind": "exponential",
+ "high": 3000,
+ "n_buckets": 10,
+ "description": "Time to fetch LocalStorage data before we can clean the cache (ms)"
+ },
+ "LOCALDOMSTORAGE_SESSIONONLY_PRELOAD_BLOCKING_MS": {
+ "expires_in_version": "40",
+ "kind": "exponential",
+ "high": 3000,
+ "n_buckets": 10,
+ "description": "Time to fetch LocalStorage data before we can expose them as session only data (ms)"
+ },
+ "RANGE_CHECKSUM_ERRORS": {
+ "alert_emails": ["perf-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 3000,
+ "n_buckets": 10,
+ "description": "Number of histograms with range checksum errors"
+ },
+ "BUCKET_ORDER_ERRORS": {
+ "alert_emails": ["perf-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 3000,
+ "n_buckets": 10,
+ "description": "Number of histograms with bucket order errors"
+ },
+ "TOTAL_COUNT_HIGH_ERRORS": {
+ "alert_emails": ["perf-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 3000,
+ "n_buckets": 10,
+ "description": "Number of histograms with total count high errors"
+ },
+ "TOTAL_COUNT_LOW_ERRORS": {
+ "alert_emails": ["perf-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 3000,
+ "n_buckets": 10,
+ "description": "Number of histograms with total count low errors"
+ },
+ "TELEMETRY_ARCHIVE_DIRECTORIES_COUNT": {
+ "alert_emails": ["telemetry-client-dev@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "linear",
+ "high": 13,
+ "n_buckets": 12,
+ "bug_numbers": [1162538],
+ "description": "Number of directories in the archive at scan"
+ },
+ "TELEMETRY_ARCHIVE_OLDEST_DIRECTORY_AGE": {
+ "alert_emails": ["telemetry-client-dev@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "linear",
+ "high": 13,
+ "n_buckets": 12,
+ "bug_numbers": [1162538],
+ "description": "The age of the oldest Telemetry archive directory in months"
+ },
+ "TELEMETRY_ARCHIVE_SCAN_PING_COUNT": {
+ "alert_emails": ["telemetry-client-dev@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 100000,
+ "n_buckets": 100,
+ "bug_numbers": [1162538],
+ "description": "Number of Telemetry pings in the archive at scan"
+ },
+ "TELEMETRY_ARCHIVE_SESSION_PING_COUNT": {
+ "alert_emails": ["telemetry-client-dev@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "bug_numbers": [1162538],
+ "description": "Number of Telemetry pings added to the archive during the session"
+ },
+ "TELEMETRY_ARCHIVE_SIZE_MB": {
+ "alert_emails": ["telemetry-client-dev@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "linear",
+ "high": 300,
+ "n_buckets": 60,
+ "bug_numbers": [1162538],
+ "description": "The size of the Telemetry archive (MB)"
+ },
+ "TELEMETRY_ARCHIVE_EVICTED_OVER_QUOTA": {
+ "alert_emails": ["telemetry-client-dev@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 100000,
+ "n_buckets": 100,
+ "bug_numbers": [1162538],
+ "description": "Number of Telemetry pings evicted from the archive during cleanup, because they were over the quota"
+ },
+ "TELEMETRY_ARCHIVE_EVICTED_OLD_DIRS": {
+ "alert_emails": ["telemetry-client-dev@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "linear",
+ "high": 13,
+ "n_buckets": 12,
+ "bug_numbers": [1162538],
+ "description": "Number of Telemetry directories evicted from the archive during cleanup, because they were too old"
+ },
+ "TELEMETRY_ARCHIVE_EVICTING_DIRS_MS": {
+ "alert_emails": ["telemetry-client-dev@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 300000,
+ "n_buckets": 20,
+ "bug_numbers": [1162538],
+ "description": "Time (ms) it takes for evicting old directories"
+ },
+ "TELEMETRY_ARCHIVE_CHECKING_OVER_QUOTA_MS": {
+ "alert_emails": ["telemetry-client-dev@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 300000,
+ "n_buckets": 20,
+ "bug_numbers": [1162538],
+ "description": "Time (ms) it takes for checking if the archive is over-quota"
+ },
+ "TELEMETRY_ARCHIVE_EVICTING_OVER_QUOTA_MS": {
+ "alert_emails": ["telemetry-client-dev@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 300000,
+ "n_buckets": 20,
+ "bug_numbers": [1162538],
+ "description": "Time (ms) it takes for evicting over-quota pings"
+ },
+ "TELEMETRY_PENDING_LOAD_FAILURE_READ": {
+ "alert_emails": ["telemetry-client-dev@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "description": "Number of pending Telemetry pings that failed to load from the disk"
+ },
+ "TELEMETRY_PENDING_LOAD_FAILURE_PARSE": {
+ "alert_emails": ["telemetry-client-dev@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "description": "Number of pending Telemetry pings that failed to parse once loaded from the disk"
+ },
+ "TELEMETRY_PENDING_PINGS_SIZE_MB": {
+ "alert_emails": ["telemetry-client-dev@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "linear",
+ "high": 17,
+ "n_buckets": 16,
+ "description": "The size of the Telemetry pending pings directory (MB). The special value 17 is used to indicate over quota pings."
+ },
+ "TELEMETRY_PENDING_PINGS_AGE": {
+ "alert_emails": ["telemetry-client-dev@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 365,
+ "n_buckets": 30,
+ "description": "The age, in days, of the pending pings."
+ },
+ "TELEMETRY_PENDING_PINGS_EVICTED_OVER_QUOTA": {
+ "alert_emails": ["telemetry-client-dev@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 100000,
+ "n_buckets": 100,
+ "description": "Number of Telemetry pings evicted from the pending pings directory during cleanup, because they were over the quota"
+ },
+ "TELEMETRY_PENDING_EVICTING_OVER_QUOTA_MS": {
+ "alert_emails": ["telemetry-client-dev@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 300000,
+ "n_buckets": 20,
+ "description": "Time (ms) it takes for evicting over-quota pending pings"
+ },
+ "TELEMETRY_PENDING_CHECKING_OVER_QUOTA_MS": {
+ "alert_emails": ["telemetry-client-dev@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 300000,
+ "n_buckets": 20,
+ "description": "Time (ms) it takes for checking if the pending pings are over-quota"
+ },
+ "TELEMETRY_PING_SIZE_EXCEEDED_SEND": {
+ "alert_emails": ["telemetry-client-dev@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "description": "Number of Telemetry pings discarded before sending because they exceeded the maximum size"
+ },
+ "TELEMETRY_PING_SIZE_EXCEEDED_PENDING": {
+ "alert_emails": ["telemetry-client-dev@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "description": "Number of Telemetry pending pings discarded because they exceeded the maximum size"
+ },
+ "TELEMETRY_PING_SIZE_EXCEEDED_ARCHIVED": {
+ "alert_emails": ["telemetry-client-dev@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "description": "Number of archived Telemetry pings discarded because they exceeded the maximum size"
+ },
+ "TELEMETRY_PING_SUBMISSION_WAITING_CLIENTID": {
+ "alert_emails": ["telemetry-client-dev@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "releaseChannelCollection": "opt-out",
+ "bug_numbers": [1233986],
+ "description": "The number of pings that were submitted and had to wait for a client id (i.e. before it was cached or loaded from disk)"
+ },
+ "TELEMETRY_DISCARDED_PENDING_PINGS_SIZE_MB": {
+ "alert_emails": ["telemetry-client-dev@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "linear",
+ "high": 30,
+ "n_buckets": 29,
+ "description": "The size (MB) of the Telemetry pending pings exceeding the maximum file size"
+ },
+ "TELEMETRY_DISCARDED_ARCHIVED_PINGS_SIZE_MB": {
+ "alert_emails": ["telemetry-client-dev@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "linear",
+ "high": 30,
+ "n_buckets": 29,
+ "description": "The size (MB) of the Telemetry archived, compressed, pings exceeding the maximum file size"
+ },
+ "TELEMETRY_DISCARDED_SEND_PINGS_SIZE_MB": {
+ "alert_emails": ["telemetry-client-dev@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "linear",
+ "high": 30,
+ "n_buckets": 29,
+ "description": "The size (MB) of the ping data submitted to Telemetry exceeding the maximum size"
+ },
+ "TELEMETRY_DISCARDED_CONTENT_PINGS_COUNT": {
+ "alert_emails": ["perf-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "description": "Count of discarded content payloads."
+ },
+ "TELEMETRY_COMPRESS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 3000,
+ "n_buckets": 10,
+ "description": "Time taken to compress telemetry object (ms)"
+ },
+ "TELEMETRY_SEND_SUCCESS" : {
+ "alert_emails": ["telemetry-client-dev@mozilla.com"],
+ "bug_numbers": [1318284],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 120000,
+ "n_buckets": 20,
+ "description": "Time needed (in ms) for a successful send of a Telemetry ping to the servers and getting a reply back."
+ },
+ "TELEMETRY_SEND_FAILURE" : {
+ "alert_emails": ["telemetry-client-dev@mozilla.com"],
+ "bug_numbers": [1318284],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 120000,
+ "n_buckets": 20,
+ "description": "Time needed (in ms) for a failed send of a Telemetry ping to the servers and getting a reply back."
+ },
+ "TELEMETRY_STRINGIFY" : {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 3000,
+ "n_buckets": 10,
+ "description": "Time to stringify telemetry object (ms)"
+ },
+ "TELEMETRY_SUCCESS": {
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "Successful telemetry submission"
+ },
+ "TELEMETRY_INVALID_PING_TYPE_SUBMITTED": {
+ "alert_emails": ["telemetry-client-dev@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "keyed": true,
+ "description": "Count of individual invalid ping types that were submitted to Telemetry."
+ },
+ "TELEMETRY_INVALID_PAYLOAD_SUBMITTED": {
+ "alert_emails": ["telemetry-client-dev@mozilla.com"],
+ "expires_in_version": "never",
+ "bug_numbers": [1292226],
+ "kind": "count",
+ "description": "Count of individual invalid payloads that were submitted to Telemetry."
+ },
+ "TELEMETRY_PING_EVICTED_FOR_SERVER_ERRORS": {
+ "alert_emails": ["telemetry-client-dev@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "description": "Number of Telemetry ping files evicted due to server errors (4XX HTTP code received)"
+ },
+ "TELEMETRY_SESSIONDATA_FAILED_LOAD": {
+ "alert_emails": ["telemetry-client-dev@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "flag",
+ "description": "Set if Telemetry failed to load the session data from disk."
+ },
+ "TELEMETRY_SESSIONDATA_FAILED_PARSE": {
+ "alert_emails": ["telemetry-client-dev@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "flag",
+ "description": "Set if Telemetry failed to parse the session data loaded from disk."
+ },
+ "TELEMETRY_SESSIONDATA_FAILED_VALIDATION": {
+ "alert_emails": ["telemetry-client-dev@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "flag",
+ "description": "Set if Telemetry failed to validate the session data loaded from disk."
+ },
+ "TELEMETRY_SESSIONDATA_FAILED_SAVE": {
+ "alert_emails": ["telemetry-client-dev@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "flag",
+ "description": "Set if Telemetry failed to save the session data to disk."
+ },
+ "TELEMETRY_ASSEMBLE_PAYLOAD_EXCEPTION": {
+ "alert_emails": ["telemetry-client-dev@mozilla.com"],
+ "bug_numbers": [1250640],
+ "expires_in_version": "53",
+ "kind": "count",
+ "description": "Count of exceptions in TelemetrySession.getSessionPayload()."
+ },
+ "TELEMETRY_SCHEDULER_TICK_EXCEPTION": {
+ "alert_emails": ["telemetry-client-dev@mozilla.com"],
+ "bug_numbers": [1250640],
+ "expires_in_version": "53",
+ "kind": "count",
+ "description": "Count of exceptions during executing the TelemetrySession scheduler tick logic."
+ },
+ "TELEMETRY_SCHEDULER_WAKEUP": {
+ "alert_emails": ["telemetry-client-dev@mozilla.com"],
+ "bug_numbers": [1250640],
+ "expires_in_version": "53",
+ "kind": "count",
+ "description": "Count of TelemetrySession scheduler ticks that were delayed long enough to suspect sleep."
+ },
+ "TELEMETRY_SCHEDULER_SEND_DAILY": {
+ "alert_emails": ["telemetry-client-dev@mozilla.com"],
+ "bug_numbers": [1250640],
+ "expires_in_version": "53",
+ "kind": "count",
+ "description": "Count of TelemetrySession triggering a daily ping."
+ },
+ "TELEMETRY_TEST_FLAG": {
+ "expires_in_version": "never",
+ "kind": "flag",
+ "description": "a testing histogram; not meant to be touched"
+ },
+ "TELEMETRY_TEST_COUNT": {
+ "expires_in_version": "never",
+ "kind": "count",
+ "description": "a testing histogram; not meant to be touched"
+ },
+ "TELEMETRY_TEST_COUNT2": {
+ "alert_emails": ["telemetry-client-dev@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "bug_numbers": [1288745],
+ "description": "a testing histogram; not meant to be touched"
+ },
+ "TELEMETRY_TEST_COUNT_INIT_NO_RECORD": {
+ "expires_in_version": "never",
+ "kind": "count",
+ "description": "a testing histogram; not meant to be touched - initially not recording"
+ },
+ "TELEMETRY_TEST_CATEGORICAL": {
+ "alert_emails": ["telemetry-client-dev@mozilla.com"],
+ "bug_numbers": [1188888],
+ "expires_in_version": "never",
+ "kind": "categorical",
+ "labels": [
+ "CommonLabel",
+ "Label2",
+ "Label3"
+ ],
+ "description": "a testing histogram; not meant to be touched"
+ },
+ "TELEMETRY_TEST_CATEGORICAL_OPTOUT": {
+ "alert_emails": ["telemetry-client-dev@mozilla.com"],
+ "bug_numbers": [1188888],
+ "expires_in_version": "never",
+ "releaseChannelCollection": "opt-out",
+ "kind": "categorical",
+ "labels": [
+ "CommonLabel",
+ "Label4",
+ "Label5",
+ "Label6"
+ ],
+ "description": "a testing histogram; not meant to be touched"
+ },
+ "TELEMETRY_TEST_KEYED_COUNT_INIT_NO_RECORD": {
+ "expires_in_version": "never",
+ "kind": "count",
+ "keyed": true,
+ "description": "a testing histogram; not meant to be touched - initially not recording"
+ },
+ "TELEMETRY_TEST_KEYED_FLAG": {
+ "expires_in_version": "never",
+ "kind": "flag",
+ "keyed": true,
+ "description": "a testing histogram; not meant to be touched"
+ },
+ "TELEMETRY_TEST_KEYED_COUNT": {
+ "expires_in_version": "never",
+ "kind": "count",
+ "keyed": true,
+ "description": "a testing histogram; not meant to be touched"
+ },
+ "TELEMETRY_TEST_KEYED_BOOLEAN": {
+ "alert_emails": ["telemetry-client-dev@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "keyed": true,
+ "bug_numbers": [1299144],
+ "description": "a testing histogram; not meant to be touched"
+ },
+ "TELEMETRY_TEST_RELEASE_OPTOUT": {
+ "expires_in_version": "never",
+ "kind": "flag",
+ "releaseChannelCollection": "opt-out",
+ "description": "a testing histogram; not meant to be touched"
+ },
+ "TELEMETRY_TEST_RELEASE_OPTIN": {
+ "expires_in_version": "never",
+ "kind": "flag",
+ "releaseChannelCollection": "opt-in",
+ "description": "a testing histogram; not meant to be touched"
+ },
+ "TELEMETRY_TEST_KEYED_RELEASE_OPTIN": {
+ "expires_in_version": "never",
+ "kind": "flag",
+ "keyed": true,
+ "releaseChannelCollection": "opt-in",
+ "description": "a testing histogram; not meant to be touched"
+ },
+ "TELEMETRY_TEST_KEYED_RELEASE_OPTOUT": {
+ "expires_in_version": "never",
+ "kind": "flag",
+ "keyed": true,
+ "releaseChannelCollection": "opt-out",
+ "description": "a testing histogram; not meant to be touched"
+ },
+ "TELEMETRY_TEST_EXPONENTIAL": {
+ "alert_emails": ["telemetry-client-dev@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "low": 1,
+ "high": 2147483646,
+ "n_buckets": 10,
+ "bug_numbers": [1288745],
+ "description": "a testing histogram; not meant to be touched"
+ },
+ "TELEMETRY_TEST_LINEAR": {
+ "alert_emails": ["telemetry-client-dev@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "linear",
+ "low": 1,
+ "high": 2147483646,
+ "n_buckets": 10,
+ "bug_numbers": [1288745],
+ "description": "a testing histogram; not meant to be touched"
+ },
+ "TELEMETRY_TEST_BOOLEAN": {
+ "alert_emails": ["telemetry-client-dev@mozilla.com"],
+ "expires_in_version" : "never",
+ "kind": "boolean",
+ "bug_numbers": [1288745],
+ "description": "a testing histogram; not meant to be touched"
+ },
+ "STARTUP_CRASH_DETECTED": {
+ "expires_in_version": "never",
+ "kind": "flag",
+ "description": "Whether there was a crash during the last startup"
+ },
+ "SAFE_MODE_USAGE": {
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 3,
+ "description": "Whether the user is in safe mode (No, Yes, Forced)"
+ },
+ "SCRIPT_BLOCK_INCORRECT_MIME": {
+ "alert_emails": ["ckerschbaumer@mozilla.com"],
+ "bug_numbers": [1288361, 1299267],
+ "expires_in_version": "56",
+ "kind": "enumerated",
+ "n_values": 15,
+ "description": "Whether the script load has a MIME type of ...? (0=unknown, 1=js, 2=image, 3=audio, 4=video, 5=text/plain, 6=text/csv, 7=text/xml, 8=application/octet-stream, 9=application/xml, 10=text/html, 11=empty)"
+ },
+ "XCTO_NOSNIFF_BLOCK_IMAGE": {
+ "alert_emails": ["ckerschbaumer@mozilla.com"],
+ "bug_numbers": [1302539],
+ "expires_in_version": "56",
+ "kind": "enumerated",
+ "n_values": 3,
+ "description": "Whether XCTO: nosniff would allow/block an image load? (0=allow, 1=block)"
+ },
+ "NEWTAB_PAGE_ENABLED": {
+ "expires_in_version": "default",
+ "kind": "boolean",
+ "description": "New tab page is enabled."
+ },
+ "NEWTAB_PAGE_ENHANCED": {
+ "expires_in_version": "default",
+ "kind": "boolean",
+ "description": "New tab page is enhanced (showing suggestions)."
+ },
+ "NEWTAB_PAGE_LIFE_SPAN": {
+ "expires_in_version": "default",
+ "kind": "exponential",
+ "high": 1200,
+ "n_buckets": 100,
+ "description": "Life-span of a new tab without suggested tile: time delta between first-visible and unload events (half-seconds)."
+ },
+ "NEWTAB_PAGE_LIFE_SPAN_SUGGESTED": {
+ "expires_in_version": "default",
+ "kind": "exponential",
+ "high": 1200,
+ "n_buckets": 100,
+ "description": "Life-span of a new tab with suggested tile: time delta between first-visible and unload events (half-seconds)."
+ },
+ "NEWTAB_PAGE_PINNED_SITES_COUNT": {
+ "expires_in_version": "default",
+ "kind": "enumerated",
+ "n_values": 9,
+ "description": "Number of pinned sites on the new tab page."
+ },
+ "NEWTAB_PAGE_BLOCKED_SITES_COUNT": {
+ "expires_in_version": "default",
+ "kind": "exponential",
+ "high": 100,
+ "n_buckets": 10,
+ "description": "Number of sites blocked from the new tab page."
+ },
+ "NEWTAB_PAGE_SHOWN": {
+ "expires_in_version": "35",
+ "kind": "boolean",
+ "description": "Number of times about:newtab was shown from opening a new tab or window. *** No longer needed (bug 1156565). Delete histogram and accumulation code! ***"
+ },
+ "NEWTAB_PAGE_SITE_CLICKED": {
+ "expires_in_version": "35",
+ "kind": "enumerated",
+ "n_values": 10,
+ "description": "Track click count on about:newtab tiles per index (0-8). For non-default row or column configurations all clicks into the '9' bucket. *** No longer needed (bug 1156565). Delete histogram and accumulation code! ***"
+ },
+ "BROWSERPROVIDER_XUL_IMPORT_BOOKMARKS": {
+ "expires_in_version": "default",
+ "kind": "exponential",
+ "high": 50000,
+ "n_buckets": 20,
+ "description": "Number of bookmarks in the original XUL places database",
+ "cpp_guard": "ANDROID"
+ },
+ "FENNEC_GLOBALHISTORY_ADD_MS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "low": 10,
+ "high": 20000,
+ "n_buckets": 20,
+ "description": "Time for a record to be added to history (ms)",
+ "cpp_guard": "ANDROID"
+ },
+ "FENNEC_GLOBALHISTORY_UPDATE_MS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "low": 10,
+ "high": 20000,
+ "n_buckets": 20,
+ "description": "Time for a record to be updated in history (ms)",
+ "cpp_guard": "ANDROID"
+ },
+ "FENNEC_GLOBALHISTORY_VISITED_BUILD_MS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "low": 10,
+ "high": 20000,
+ "n_buckets": 20,
+ "description": "Time to update the visited link set (ms)",
+ "cpp_guard": "ANDROID"
+ },
+ "FENNEC_RESTORING_ACTIVITY": {
+ "expires_in_version": "never",
+ "kind": "flag",
+ "description": "Fennec is starting up but the Gecko thread was still running",
+ "cpp_guard": "ANDROID"
+ },
+ "FENNEC_SEARCH_LOADER_TIME_MS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "low": 10,
+ "high": 20000,
+ "n_buckets": 20,
+ "description": "Time for a URL bar DB search to return (ms)",
+ "cpp_guard": "ANDROID"
+ },
+ "FENNEC_STARTUP_TIME_GECKOREADY": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "low": 500,
+ "high": 20000,
+ "n_buckets": 20,
+ "description": "Time for the Gecko:Ready message to arrive (ms)",
+ "cpp_guard": "ANDROID"
+ },
+ "FENNEC_STARTUP_TIME_JAVAUI": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "low": 100,
+ "high": 5000,
+ "n_buckets": 20,
+ "description": "Time for the Java UI to load (ms)",
+ "cpp_guard": "ANDROID"
+ },
+ "FENNEC_TOPSITES_LOADER_TIME_MS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "low": 10,
+ "high": 20000,
+ "n_buckets": 20,
+ "description": "Time for the home screen Top Sites query to return with no filter set (ms)",
+ "cpp_guard": "ANDROID"
+ },
+ "FENNEC_ACTIVITY_STREAM_TOPSITES_LOADER_TIME_MS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "low": 10,
+ "high": 20000,
+ "n_buckets": 20,
+ "description": "Time for the Activity Stream home screen Top Sites query to return (ms)",
+ "alert_emails": ["mobile-frontend@mozilla.com"],
+ "bug_numbers": [1293790],
+ "cpp_guard": "ANDROID"
+ },
+ "FENNEC_HOMEPANELS_CUSTOM": {
+ "expires_in_version": "54",
+ "kind": "boolean",
+ "bug_numbers": [1245368],
+ "description": "Whether the user has customized their homepanels",
+ "cpp_guard": "ANDROID"
+ },
+ "FENNEC_WAS_KILLED": {
+ "expires_in_version": "never",
+ "kind": "flag",
+ "description": "Killed, likely due to an OOM condition",
+ "cpp_guard": "ANDROID"
+ },
+ "FIPS_ENABLED": {
+ "alert_emails": ["seceng@mozilla.org"],
+ "expires_in_version": "54",
+ "kind": "flag",
+ "bug_numbers": [1241317],
+ "releaseChannelCollection": "opt-out",
+ "description": "Has FIPS mode been enabled?"
+ },
+ "SECURITY_UI": {
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 100,
+ "description": "Security UI Telemetry"
+ },
+ "JS_TELEMETRY_ADDON_EXCEPTIONS" : {
+ "expires_in_version" : "never",
+ "kind": "count",
+ "keyed" : true,
+ "description" : "Exceptions thrown by add-ons"
+ },
+ "IPC_TRANSACTION_CANCEL": {
+ "alert_emails": ["billm@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "True when an IPC transaction is canceled"
+ },
+ "IPC_SAME_PROCESS_MESSAGE_COPY_OOM_KB": {
+ "expires_in_version": "50",
+ "kind": "exponential",
+ "low": 100,
+ "high": 10000000,
+ "n_buckets": 10,
+ "description": "Whenever the same-process MessageManager cannot be sent through sendAsyncMessage as it would cause an OOM, the size of the message content, in kb."
+ },
+ "SLOW_ADDON_WARNING_STATES": {
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 20,
+ "description": "The states the Slow Add-on Warning goes through. 0: Displayed the warning. 1: User clicked on 'Disable add-on'. 2: User clicked 'Ignore add-on for now'. 3: User clicked 'Ignore add-on permanently'. 4: User closed notification. Other values are reserved for future uses."
+ },
+ "SLOW_ADDON_WARNING_RESPONSE_TIME": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 86400000,
+ "n_buckets": 30,
+ "description": "Time elapsed between before responding to Slow Add-on Warning UI (ms). Not updated if the user doesn't respond at all."
+ },
+ "SEARCH_COUNTS": {
+ "expires_in_version": "never",
+ "kind": "count",
+ "keyed": true,
+ "releaseChannelCollection": "opt-out",
+ "description": "Record the search counts for search engines"
+ },
+ "SEARCH_RESET_RESULT": {
+ "alert_emails": ["fqueze@mozilla.com"],
+ "bug_numbers": [1203168],
+ "expires_in_version": "53",
+ "kind": "enumerated",
+ "n_values": 5,
+ "releaseChannelCollection": "opt-out",
+ "description": "Result of showing the search reset prompt to the user. 0=restored original default, 1=kept current engine, 2=changed engine, 3=closed the page, 4=opened search settings"
+ },
+ "SEARCH_SERVICE_INIT_MS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 1000,
+ "n_buckets": 15,
+ "description": "Time (ms) it takes to initialize the search service"
+ },
+ "SEARCH_SERVICE_INIT_SYNC": {
+ "alert_emails": ["rvitillo@mozilla.com", "gavin@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "search service has been initialized synchronously"
+ },
+ "SEARCH_SERVICE_ENGINE_COUNT": {
+ "releaseChannelCollection": "opt-out",
+ "alert_emails": ["florian@mozilla.com"],
+ "expires_in_version": "55",
+ "bug_numbers": [1268424],
+ "kind": "linear",
+ "high": 200,
+ "n_buckets": 50,
+ "description": "Recorded once per session near startup: records the search plugin count, including both built-in plugins (including the ones the user has hidden) and user-installed plugins."
+ },
+ "SEARCH_SERVICE_HAS_UPDATES": {
+ "alert_emails": ["florian@mozilla.com"],
+ "expires_in_version": "55",
+ "kind": "boolean",
+ "bug_numbers": [1259510],
+ "description": "Recorded once per session near startup: records true/false whether the search service has engines with update URLs.",
+ "releaseChannelCollection": "opt-out"
+ },
+ "SEARCH_SERVICE_HAS_ICON_UPDATES": {
+ "alert_emails": ["florian@mozilla.com"],
+ "expires_in_version": "55",
+ "kind": "boolean",
+ "bug_numbers": [1259510],
+ "description": "Recorded once per session near startup: records true/false whether the search service has engines with icon update URLs.",
+ "releaseChannelCollection": "opt-out"
+ },
+ "SEARCH_SERVICE_COUNTRY_FETCH_TIME_MS": {
+ "alert_emails": ["mhammond@mozilla.com", "gavin@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "n_buckets": 30,
+ "high": 100000,
+ "description": "Time (ms) it takes to fetch the country code"
+ },
+ "SEARCH_SERVICE_COUNTRY_FETCH_RESULT": {
+ "alert_emails": ["mhammond@mozilla.com", "gavin@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 8,
+ "description": "Result of XHR request fetching the country-code. 0=SUCCESS, 1=SUCCESS_WITHOUT_DATA, 2=XHRTIMEOUT, 3=ERROR (rest reserved for finer-grained error codes later)"
+ },
+ "SEARCH_SERVICE_COUNTRY_TIMEOUT": {
+ "alert_emails": ["mhammond@mozilla.com", "gavin@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "True if we stopped waiting for the XHR response before it completed"
+ },
+ "SEARCH_SERVICE_COUNTRY_FETCH_CAUSED_SYNC_INIT": {
+ "alert_emails": ["mhammond@mozilla.com", "gavin@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "True if the search service was synchronously initialized while we were waiting for the XHR response"
+ },
+ "SEARCH_SERVICE_US_COUNTRY_MISMATCHED_TIMEZONE": {
+ "alert_emails": ["mhammond@mozilla.com", "gavin@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "flag",
+ "description": "Set if the fetched country-code indicates US but the time-zone heuristic doesn't"
+ },
+ "SEARCH_SERVICE_US_TIMEZONE_MISMATCHED_COUNTRY": {
+ "alert_emails": ["mhammond@mozilla.com", "gavin@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "flag",
+ "description": "Set if the time-zone heuristic indicates US but the fetched country code doesn't"
+ },
+ "SEARCH_SERVICE_US_COUNTRY_MISMATCHED_PLATFORM_OSX": {
+ "alert_emails": ["mhammond@mozilla.com", "gavin@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "If we are on OSX and either the OSX countryCode or the geoip countryCode indicates we are in the US, set to false if they both do or true otherwise"
+ },
+ "SEARCH_SERVICE_NONUS_COUNTRY_MISMATCHED_PLATFORM_OSX": {
+ "alert_emails": ["mhammond@mozilla.com", "gavin@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "If we are on OSX and neither the OSX countryCode nor the geoip countryCode indicates we are in the US, set to false if they both agree on the value or true otherwise"
+ },
+ "SEARCH_SERVICE_US_COUNTRY_MISMATCHED_PLATFORM_WIN": {
+ "alert_emails": ["mhammond@mozilla.com", "gavin@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "If we are on Windows and either the Windows countryCode or the geoip countryCode indicates we are in the US, set to false if they both do or true otherwise"
+ },
+ "SEARCH_SERVICE_NONUS_COUNTRY_MISMATCHED_PLATFORM_WIN": {
+ "alert_emails": ["mhammond@mozilla.com", "gavin@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "If we are on Windows and neither the Windows countryCode nor the geoip countryCode indicates we are in the US, set to false if they both agree on the value or true otherwise"
+ },
+ "SOCIAL_ENABLED_ON_SESSION": {
+ "expires_in_version": "never",
+ "kind": "flag",
+ "description": "Social has been enabled at least once on the current session"
+ },
+ "ENABLE_PRIVILEGE_EVER_CALLED": {
+ "expires_in_version": "never",
+ "kind": "flag",
+ "description": "Whether enablePrivilege has ever been called during the current session"
+ },
+ "SUBJECT_PRINCIPAL_ACCESSED_WITHOUT_SCRIPT_ON_STACK": {
+ "expires_in_version": "46",
+ "alert_emails": ["bholley@mozilla.com"],
+ "kind": "flag",
+ "description": "Whether the subject principal was accessed without script on the stack during the current session"
+ },
+ "TOUCH_ENABLED_DEVICE": {
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "The device supports touch input",
+ "cpp_guard": "XP_WIN"
+ },
+ "COMPONENTS_SHIM_ACCESSED_BY_CONTENT": {
+ "expires_in_version": "never",
+ "kind": "flag",
+ "description": "Whether content ever accesed the Components shim in this session"
+ },
+ "CHECK_ADDONS_MODIFIED_MS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 5000,
+ "n_buckets": 15,
+ "description": "Time (ms) it takes to figure out extension last modified time"
+ },
+ "TELEMETRY_MEMORY_REPORTER_MS": {
+ "alert_emails": ["memshrink-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 5000,
+ "n_buckets": 10,
+ "description": "Time (ms) it takes to run memory reporters when sending a telemetry ping"
+ },
+ "SSL_SUCCESFUL_CERT_VALIDATION_TIME_MOZILLAPKIX" : {
+ "alert_emails": ["seceng-telemetry@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 60000,
+ "n_buckets": 50,
+ "description": "Time spent on a successful cert verification in mozilla::pkix mode (ms)"
+ },
+ "SSL_INITIAL_FAILED_CERT_VALIDATION_TIME_MOZILLAPKIX" : {
+ "alert_emails": ["seceng-telemetry@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 60000,
+ "n_buckets": 50,
+ "description": "Time spent on an initially failed cert verification in mozilla::pkix mode (ms)"
+ },
+ "CRASH_STORE_COMPRESSED_BYTES": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 1000000,
+ "n_buckets": 202,
+ "description": "Size (in bytes) of the compressed crash store JSON file."
+ },
+ "PDF_VIEWER_USED": {
+ "expires_in_version": "default",
+ "kind": "boolean",
+ "description": "How many times PDF Viewer was used"
+ },
+ "PDF_VIEWER_FALLBACK_SHOWN": {
+ "expires_in_version": "default",
+ "kind": "boolean",
+ "description": "How many times PDF Viewer fallback bar was shown"
+ },
+ "PDF_VIEWER_PRINT": {
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "How many times PDF Viewer print functionality was used"
+ },
+ "PDF_VIEWER_DOCUMENT_VERSION": {
+ "expires_in_version": "default",
+ "kind": "enumerated",
+ "n_values": 20,
+ "description": "The PDF document version (1.1, 1.2, etc.)"
+ },
+ "PDF_VIEWER_DOCUMENT_GENERATOR": {
+ "expires_in_version": "default",
+ "kind": "enumerated",
+ "n_values": 30,
+ "description": "The PDF document generator"
+ },
+ "PDF_VIEWER_DOCUMENT_SIZE_KB": {
+ "expires_in_version": "default",
+ "kind": "exponential",
+ "low": 2,
+ "high": 65536,
+ "n_buckets": 20,
+ "description": "The PDF document size (KB)"
+ },
+ "PDF_VIEWER_FONT_TYPES": {
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 19,
+ "description": "The PDF document font types used"
+ },
+ "PDF_VIEWER_EMBED": {
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "A PDF document was embedded: true using OBJECT/EMBED and false using IFRAME"
+ },
+ "PDF_VIEWER_FORM": {
+ "expires_in_version": "default",
+ "kind": "boolean",
+ "description": "A PDF form expected: true for AcroForm and false for XFA"
+ },
+ "PDF_VIEWER_STREAM_TYPES": {
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 19,
+ "description": "The PDF document compression stream types used"
+ },
+ "PDF_VIEWER_TIME_TO_VIEW_MS": {
+ "expires_in_version": "default",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent to display first page in PDF Viewer (ms)"
+ },
+ "PLUGINS_NOTIFICATION_SHOWN": {
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "The number of times the click-to-activate notification was shown: false: shown by in-content activation true: shown by location bar activation"
+ },
+ "PLUGINS_NOTIFICATION_PLUGIN_COUNT": {
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 5,
+ "description": "The number of plugins present in the click-to-activate notification, minus one (1, 2, 3, 4, more than 4)"
+ },
+ "PLUGINS_NOTIFICATION_USER_ACTION": {
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 3,
+ "description": "User actions taken in the plugin notification: 0: allownow 1: allowalways 2: block"
+ },
+ "PLUGINS_INFOBAR_SHOWN": {
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "Count of when the hidden-plugin infobar was displayed."
+ },
+ "PLUGINS_INFOBAR_BLOCK": {
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "Count the number of times the user clicked 'block' on the hidden-plugin infobar."
+ },
+ "PLUGINS_INFOBAR_ALLOW": {
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "Count the number of times the user clicked 'allow' on the hidden-plugin infobar."
+ },
+ "POPUP_NOTIFICATION_STATS": {
+ "releaseChannelCollection": "opt-out",
+ "alert_emails": ["firefox-dev@mozilla.org"],
+ "bug_numbers": [1207089],
+ "expires_in_version": "55",
+ "kind": "enumerated",
+ "keyed": true,
+ "n_values": 40,
+ "description": "(Bug 1207089) Usage of popup notifications, keyed by ID (0 = Offered, 1..4 = Action, 5 = Click outside, 6 = Leave page, 7 = Use 'X', 8 = Not now, 10 = Open submenu, 11 = Learn more. Add 20 if happened after reopen.)"
+ },
+ "POPUP_NOTIFICATION_MAIN_ACTION_MS": {
+ "alert_emails": ["firefox-dev@mozilla.org"],
+ "bug_numbers": [1207089],
+ "expires_in_version": "55",
+ "kind": "exponential",
+ "keyed": true,
+ "low": 100,
+ "high": 600000,
+ "n_buckets": 40,
+ "description": "(Bug 1207089) Time in ms between initially requesting a popup notification and triggering the main action, keyed by ID"
+ },
+ "POPUP_NOTIFICATION_DISMISSAL_MS": {
+ "alert_emails": ["firefox-dev@mozilla.org"],
+ "bug_numbers": [1207089],
+ "expires_in_version": "55",
+ "kind": "exponential",
+ "keyed": true,
+ "low": 200,
+ "high": 20000,
+ "n_buckets": 50,
+ "description": "(Bug 1207089) Time in ms between displaying a popup notification and dismissing it without an action the first time, keyed by ID"
+ },
+ "PRINT_PREVIEW_OPENED_COUNT": {
+ "alert_emails": ["carnold@mozilla.org"],
+ "bug_numbers": [1275570],
+ "expires_in_version": "56",
+ "kind": "count",
+ "releaseChannelCollection": "opt-out",
+ "description": "A counter incremented every time the browser enters print preview."
+ },
+ "PRINT_PREVIEW_SIMPLIFY_PAGE_OPENED_COUNT": {
+ "alert_emails": ["carnold@mozilla.org"],
+ "bug_numbers": [1275570],
+ "expires_in_version": "56",
+ "kind": "count",
+ "releaseChannelCollection": "opt-out",
+ "description": "A counter incremented every time the browser enters simplified mode on print preview."
+ },
+ "PRINT_PREVIEW_SIMPLIFY_PAGE_UNAVAILABLE_COUNT": {
+ "alert_emails": ["carnold@mozilla.org"],
+ "bug_numbers": [1287587],
+ "expires_in_version": "56",
+ "kind": "count",
+ "releaseChannelCollection": "opt-out",
+ "description": "A counter incremented every time the simplified mode is unavailable on print preview."
+ },
+ "PRINT_DIALOG_OPENED_COUNT": {
+ "alert_emails": ["carnold@mozilla.org"],
+ "bug_numbers": [1306624],
+ "expires_in_version": "56",
+ "kind": "count",
+ "keyed": true,
+ "releaseChannelCollection": "opt-out",
+ "description": "A counter incremented every time the user opens print dialog."
+ },
+ "PRINT_COUNT": {
+ "alert_emails": ["carnold@mozilla.org"],
+ "bug_numbers": [1287587],
+ "expires_in_version": "56",
+ "kind": "count",
+ "keyed": true,
+ "releaseChannelCollection": "opt-out",
+ "description": "A counter incremented every time the user prints a document."
+ },
+ "DEVTOOLS_DEBUGGER_DISPLAY_SOURCE_LOCAL_MS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 1000,
+ "description": "The time (in milliseconds) that it took to display a selected source to the user."
+ },
+ "DEVTOOLS_DEBUGGER_DISPLAY_SOURCE_REMOTE_MS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 1000,
+ "description": "The time (in milliseconds) that it took to display a selected source to the user."
+ },
+ "MEDIA_RUST_MP4PARSE_SUCCESS": {
+ "alert_emails": ["giles@mozilla.com", "kinetik@flim.org"],
+ "expires_in_version": "55",
+ "kind": "boolean",
+ "bug_numbers": [1220885],
+ "description": "(Bug 1220885) Whether the rust mp4 demuxer successfully parsed a stream segment.",
+ "cpp_guard": "MOZ_RUST_MP4PARSE"
+ },
+ "MEDIA_RUST_MP4PARSE_ERROR_CODE": {
+ "alert_emails": ["giles@mozilla.com", "kinetik@flim.org"],
+ "expires_in_version": "55",
+ "kind": "enumerated",
+ "n_values": 32,
+ "bug_numbers": [1238420],
+ "description": "The error code reported when an MP4 parse attempt has failed.0 = OK, 1 = bad argument, 2 = invalid data, 3 = unsupported, 4 = unexpected end of file, 5 = read error.",
+ "cpp_guard": "MOZ_RUST_MP4PARSE"
+ },
+ "MEDIA_RUST_MP4PARSE_TRACK_MATCH_AUDIO": {
+ "alert_emails": ["giles@mozilla.com", "kinetik@flim.org"],
+ "expires_in_version": "55",
+ "kind": "boolean",
+ "bug_numbers": [1231169],
+ "description": "Whether rust and stagefight mp4 parser audio track results match.",
+ "cpp_guard": "MOZ_RUST_MP4PARSE"
+ },
+ "MEDIA_RUST_MP4PARSE_TRACK_MATCH_VIDEO": {
+ "alert_emails": ["giles@mozilla.com", "kinetik@flim.org"],
+ "expires_in_version": "55",
+ "kind": "boolean",
+ "bug_numbers": [1231169],
+ "description": "Whether rust and stagefight mp4 parser video track results match.",
+ "cpp_guard": "MOZ_RUST_MP4PARSE"
+ },
+ "MEDIA_WMF_DECODE_ERROR": {
+ "expires_in_version": "55",
+ "kind": "enumerated",
+ "n_values": 256,
+ "description": "WMF media decoder error or success (0) codes."
+ },
+ "MEDIA_OGG_LOADED_IS_CHAINED": {
+ "alert_emails": ["cpearce@mozilla.com"],
+ "expires_in_version": "53",
+ "kind": "boolean",
+ "description": "Whether Ogg audio/video encountered are chained or not.",
+ "bug_numbers": [1230295]
+ },
+ "MEDIA_HLS_CANPLAY_REQUESTED": {
+ "alert_emails": ["ajones@mozilla.com", "giles@mozilla.com"],
+ "expires_in_version": "55",
+ "kind": "boolean",
+ "description": "Reports a true value when a page requests canPlayType for an HTTP Live Streaming media type (or generic m3u playlist).",
+ "bug_numbers": [1262659]
+ },
+ "MEDIA_HLS_DECODER_SUCCESS": {
+ "alert_emails": ["ajones@mozilla.com", "giles@mozilla.com"],
+ "expires_in_version": "55",
+ "kind": "boolean",
+ "description": "Reports whether a decoder for an HTTP Live Streaming media type was created when requested.",
+ "bug_numbers": [1262659]
+ },
+ "MEDIA_DECODING_PROCESS_CRASH": {
+ "alert_emails": ["bwu@mozilla.com", "jolin@mozilla.com", "jacheng@mozilla.com"],
+ "expires_in_version": "57",
+ "kind": "count",
+ "bug_numbers": [1297556, 1257777],
+ "description": "Records a value each time Fennec remote decoding process crashes unexpected while decoding media content.",
+ "releaseChannelCollection": "opt-out"
+ },
+ "VIDEO_MFT_OUTPUT_NULL_SAMPLES": {
+ "alert_emails": ["cpearce@mozilla.com"],
+ "expires_in_version": "53",
+ "kind": "enumerated",
+ "n_values": 10,
+ "description": "Does the WMF video decoder return success but null output? 0 = playback successful, 1 = excessive null output but able to decode some frames, 2 = excessive null output and gave up, 3 = null output but recovered, 4 = non-excessive null output without being able to decode frames.",
+ "bug_numbers": [1176071]
+ },
+ "AUDIO_MFT_OUTPUT_NULL_SAMPLES": {
+ "alert_emails": ["cpearce@mozilla.com"],
+ "expires_in_version": "53",
+ "kind": "count",
+ "description": "How many times the audio MFT decoder returns success but output nothing.",
+ "bug_numbers": [1176071]
+ },
+ "VIDEO_CAN_CREATE_AAC_DECODER": {
+ "alert_emails": ["cpearce@mozilla.com"],
+ "expires_in_version": "58",
+ "kind": "boolean",
+ "description": "Whether at startup we report we can playback MP4 (AAC) audio. This is single value is recorded at every startup.",
+ "releaseChannelCollection": "opt-out"
+ },
+ "VIDEO_CAN_CREATE_H264_DECODER": {
+ "alert_emails": ["cpearce@mozilla.com"],
+ "expires_in_version": "58",
+ "kind": "boolean",
+ "description": "Whether at startup we report we can playback MP4 (H.264) video. This is single value is recorded at every startup.",
+ "releaseChannelCollection": "opt-out"
+ },
+ "VIDEO_CANPLAYTYPE_H264_CONSTRAINT_SET_FLAG": {
+ "expires_in_version": "50",
+ "kind": "enumerated",
+ "n_values": 128,
+ "description": "The H.264 constraint set flag as extracted from the codecs parameter passed to HTMLMediaElement.canPlayType, with the addition of 0 for unknown values."
+ },
+ "VIDEO_CANPLAYTYPE_H264_LEVEL": {
+ "expires_in_version": "50",
+ "kind": "enumerated",
+ "n_values": 51,
+ "description": "The H.264 level (level_idc) as extracted from the codecs parameter passed to HTMLMediaElement.canPlayType, from levels 1 (10) to 5.2 (51), with the addition of 0 for unknown values."
+ },
+ "VIDEO_CANPLAYTYPE_H264_PROFILE": {
+ "expires_in_version": "50",
+ "kind": "enumerated",
+ "n_values": 244,
+ "description": "The H.264 profile number (profile_idc) as extracted from the codecs parameter passed to HTMLMediaElement.canPlayType."
+ },
+ "DECODER_DOCTOR_INFOBAR_STATS": {
+ "alert_emails": ["gsquelart@mozilla.com"],
+ "bug_numbers": [1271483],
+ "expires_in_version": "53",
+ "kind": "enumerated",
+ "keyed": true,
+ "n_values": 8,
+ "description": "Counts of various Decoder Doctor notification events. Used to track efficacy of Decoder Doctor at helping users fix problems with their audio/video codecs. Keys are localized string names that identify problem with audio/video codecs that Decoder Doctor attempts to solve; see string values in dom.properties for verbose description of problems being solved. 0=recorded every time the Decoder Doctor notification is shown, 1=recorded the first time in a profile when notification is shown, 2=recorded when 'Learn how' button clicked, 3=recorded when 'Learn how' button first clicked in a profile, 4=recorded when issue solved after infobar has been shown at least once in a profile."
+ },
+ "VIDEO_DECODED_H264_SPS_CONSTRAINT_SET_FLAG": {
+ "expires_in_version": "55",
+ "kind": "enumerated",
+ "n_values": 128,
+ "description": "A bit pattern to collect H.264 constraint set flag from the decoded SPS. Bits 0 through 5 represent constraint_set0_flag through constraint_set5_flag, respectively."
+ },
+ "VIDEO_DECODED_H264_SPS_LEVEL": {
+ "expires_in_version": "55",
+ "kind": "enumerated",
+ "n_values": 51,
+ "description": "The H.264 level (level_idc) as extracted from the decoded SPS, from levels 1 (10) to 5.2 (51), with the addition of 0 for unknown values."
+ },
+ "VIDEO_DECODED_H264_SPS_PROFILE": {
+ "expires_in_version": "55",
+ "kind": "enumerated",
+ "n_values": 244,
+ "description": "The H.264 profile number (profile_idc) as extracted from the decoded SPS."
+ },
+ "VIDEO_H264_SPS_MAX_NUM_REF_FRAMES": {
+ "expires_in_version": "55",
+ "kind": "enumerated",
+ "n_values": 17,
+ "description": "SPS.max_num_ref_frames indicates how deep the H.264 queue is going to be, and as such the minimum memory usage by the decoder, from 0 to 16. 17 indicates an invalid value."
+ },
+ "WEBRTC_ICE_FINAL_CONNECTION_STATE": {
+ "alert_emails": ["webrtc-ice-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "53",
+ "kind": "enumerated",
+ "n_values": 7,
+ "description": "The ICE connection state when the PC was closed"
+ },
+ "WEBRTC_ICE_ON_TIME_TRICKLE_ARRIVAL_TIME": {
+ "alert_emails": ["webrtc-ice-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "53",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 20,
+ "description": "The length of time (in milliseconds) that a trickle candidate took to arrive after the start of ICE, given that it arrived when ICE was not in a failure state (ie; a candidate that we could do something with, hence 'on time')"
+ },
+ "WEBRTC_ICE_LATE_TRICKLE_ARRIVAL_TIME": {
+ "alert_emails": ["webrtc-ice-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "53",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 20,
+ "description": "The length of time (in milliseconds) that a trickle candidate took to arrive after the start of ICE, given that it arrived after ICE failed."
+ },
+ "WEBRTC_ICE_SUCCESS_TIME": {
+ "alert_emails": ["webrtc-ice-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "53",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 20,
+ "description": "The length of time (in milliseconds) it took for ICE to complete, given that ICE succeeded."
+ },
+ "WEBRTC_ICE_FAILURE_TIME": {
+ "alert_emails": ["webrtc-ice-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "53",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 20,
+ "description": "The length of time (in milliseconds) it took for ICE to complete, given that it failed."
+ },
+ "WEBRTC_ICE_SUCCESS_RATE": {
+ "alert_emails": ["webrtc-ice-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "53",
+ "kind": "boolean",
+ "description": "The number of failed ICE Connections (0) vs. number of successful ICE connections (1)."
+ },
+ "WEBRTC_STUN_RATE_LIMIT_EXCEEDED_BY_TYPE_GIVEN_SUCCESS": {
+ "alert_emails": ["webrtc-ice-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "53",
+ "kind": "enumerated",
+ "n_values": 4,
+ "description": "For each successful PeerConnection, bit 0 indicates the short-duration rate limit was reached, bit 1 indicates the long-duration rate limit was reached"
+ },
+ "WEBRTC_STUN_RATE_LIMIT_EXCEEDED_BY_TYPE_GIVEN_FAILURE": {
+ "alert_emails": ["webrtc-ice-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "53",
+ "kind": "enumerated",
+ "n_values": 4,
+ "description": "For each failed PeerConnection, bit 0 indicates the short-duration rate limit was reached, bit 1 indicates the long-duration rate limit was reached"
+ },
+ "WEBRTC_AVSYNC_WHEN_AUDIO_LAGS_VIDEO_MS": {
+ "alert_emails": ["webrtc-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 60000,
+ "n_buckets": 1000,
+ "description": "The delay (in milliseconds) when audio is behind video. Zero delay is counted. Measured every second of a call."
+ },
+ "WEBRTC_AVSYNC_WHEN_VIDEO_LAGS_AUDIO_MS": {
+ "alert_emails": ["webrtc-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 60000,
+ "n_buckets": 1000,
+ "description": "The delay (in milliseconds) when video is behind audio. Zero delay is not counted. Measured every second of a call."
+ },
+ "WEBRTC_VIDEO_QUALITY_INBOUND_BANDWIDTH_KBITS": {
+ "alert_emails": ["webrtc-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 1000000,
+ "n_buckets": 1000,
+ "description": "Locally measured data rate of inbound video (kbit/s). Computed every second of a call."
+ },
+ "WEBRTC_AUDIO_QUALITY_INBOUND_BANDWIDTH_KBITS": {
+ "alert_emails": ["webrtc-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 1000000,
+ "n_buckets": 1000,
+ "description": "Locally measured data rate on inbound audio (kbit/s). Computed every second of a call."
+ },
+ "WEBRTC_VIDEO_QUALITY_OUTBOUND_BANDWIDTH_KBITS": {
+ "alert_emails": ["webrtc-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 1000000,
+ "n_buckets": 1000,
+ "description": "Data rate deduced from RTCP from remote recipient of outbound video (kbit/s). Computed every second of a call (for easy comparison)."
+ },
+ "WEBRTC_AUDIO_QUALITY_OUTBOUND_BANDWIDTH_KBITS": {
+ "alert_emails": ["webrtc-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 1000000,
+ "n_buckets": 1000,
+ "description": "Data rate deduced from RTCP from remote recipient of outbound audio (kbit/s). Computed every second of a call (for easy comparison)."
+ },
+ "WEBRTC_VIDEO_QUALITY_INBOUND_PACKETLOSS_RATE": {
+ "alert_emails": ["webrtc-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 1000,
+ "n_buckets": 100,
+ "description": "Locally measured packet loss on inbound video (permille). Sampled every second of a call."
+ },
+ "WEBRTC_AUDIO_QUALITY_INBOUND_PACKETLOSS_RATE": {
+ "alert_emails": ["webrtc-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 1000,
+ "n_buckets": 100,
+ "description": "Locally measured packet loss on inbound audio (permille). Sampled every second of a call."
+ },
+ "WEBRTC_VIDEO_QUALITY_OUTBOUND_PACKETLOSS_RATE": {
+ "alert_emails": ["webrtc-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 1000,
+ "n_buckets": 100,
+ "description": "RTCP-reported packet loss by remote recipient of outbound video (permille). Sampled every second of a call (for easy comparison)."
+ },
+ "WEBRTC_AUDIO_QUALITY_OUTBOUND_PACKETLOSS_RATE": {
+ "alert_emails": ["webrtc-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 1000,
+ "n_buckets": 100,
+ "description": "RTCP-reported packet loss by remote recipient of outbound audio (permille). Sampled every second of a call (for easy comparison)."
+ },
+ "WEBRTC_VIDEO_QUALITY_INBOUND_JITTER": {
+ "alert_emails": ["webrtc-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 100,
+ "description": "Locally measured jitter on inbound video (ms). Sampled every second of a call."
+ },
+ "WEBRTC_AUDIO_QUALITY_INBOUND_JITTER": {
+ "alert_emails": ["webrtc-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 1000,
+ "description": "Locally measured jitter on inbound audio (ms). Sampled every second of a call."
+ },
+ "WEBRTC_VIDEO_QUALITY_OUTBOUND_JITTER": {
+ "alert_emails": ["webrtc-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 1000,
+ "description": "RTCP-reported jitter by remote recipient of outbound video (ms). Sampled every second of a call (for easy comparison)."
+ },
+ "WEBRTC_AUDIO_QUALITY_OUTBOUND_JITTER": {
+ "alert_emails": ["webrtc-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 1000,
+ "description": "RTCP-reported jitter by remote recipient of outbound audio (ms). Sampled every second of a call (for easy comparison)."
+ },
+ "WEBRTC_VIDEO_ERROR_RECOVERY_MS": {
+ "alert_emails": ["webrtc-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 500,
+ "description": "Time to recover from a video error in ms"
+ },
+ "WEBRTC_VIDEO_RECOVERY_BEFORE_ERROR_PER_MIN": {
+ "alert_emails": ["webrtc-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 1000,
+ "n_buckets": 200,
+ "description": "Number of losses recovered before error per min"
+ },
+ "WEBRTC_VIDEO_RECOVERY_AFTER_ERROR_PER_MIN": {
+ "alert_emails": ["webrtc-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 1000,
+ "n_buckets": 200,
+ "description": "Number of losses recovered after error per min"
+ },
+ "WEBRTC_VIDEO_DECODE_ERROR_TIME_PERMILLE": {
+ "alert_emails": ["webrtc-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 1000,
+ "n_buckets": 100,
+ "description": "Percentage*10 (permille) of call decoding with errors or frozen due to errors"
+ },
+ "WEBRTC_VIDEO_QUALITY_OUTBOUND_RTT": {
+ "alert_emails": ["webrtc-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 1000,
+ "description": "Roundtrip time of outbound video (ms). Sampled every second of a call."
+ },
+ "WEBRTC_AUDIO_QUALITY_OUTBOUND_RTT": {
+ "alert_emails": ["webrtc-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 1000,
+ "description": "Roundtrip time of outbound audio (ms). Sampled every second of a call."
+ },
+ "WEBRTC_VIDEO_ENCODER_BITRATE_AVG_PER_CALL_KBPS": {
+ "alert_emails": ["webrtc-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 100,
+ "description": "Video encoder's average bitrate (in kbits/s) over an entire call"
+ },
+ "WEBRTC_VIDEO_ENCODER_BITRATE_STD_DEV_PER_CALL_KBPS": {
+ "alert_emails": ["webrtc-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 5000,
+ "n_buckets": 100,
+ "description": "Standard deviation from video encoder's average bitrate (in kbits/s) over an entire call"
+ },
+ "WEBRTC_VIDEO_ENCODER_FRAMERATE_AVG_PER_CALL": {
+ "alert_emails": ["webrtc-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 200,
+ "n_buckets": 50,
+ "description": "Video encoder's average framerate (in fps) over an entire call"
+ },
+ "WEBRTC_VIDEO_ENCODER_FRAMERATE_10X_STD_DEV_PER_CALL": {
+ "alert_emails": ["webrtc-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 200,
+ "n_buckets": 50,
+ "description": "Standard deviation from video encoder's average framerate (in 1/10 fps) over an entire call"
+ },
+ "WEBRTC_VIDEO_ENCODER_DROPPED_FRAMES_PER_CALL_FPM": {
+ "alert_emails": ["webrtc-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 50000,
+ "n_buckets": 100,
+ "description": "Video encoder's number of frames dropped (in frames/min) over an entire call"
+ },
+ "WEBRTC_VIDEO_DECODER_BITRATE_AVG_PER_CALL_KBPS": {
+ "alert_emails": ["webrtc-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 100,
+ "description": "Video decoder's average bitrate (in kbits/s) over an entire call"
+ },
+ "WEBRTC_VIDEO_DECODER_BITRATE_STD_DEV_PER_CALL_KBPS": {
+ "alert_emails": ["webrtc-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 5000,
+ "n_buckets": 100,
+ "description": "Standard deviation from video decoder's average bitrate (in kbits/s) over an entire call"
+ },
+ "WEBRTC_VIDEO_DECODER_FRAMERATE_AVG_PER_CALL": {
+ "alert_emails": ["webrtc-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 200,
+ "n_buckets": 50,
+ "description": "Video decoder's average framerate (in fps) over an entire call"
+ },
+ "WEBRTC_VIDEO_DECODER_FRAMERATE_10X_STD_DEV_PER_CALL": {
+ "alert_emails": ["webrtc-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 200,
+ "n_buckets": 50,
+ "description": "Standard deviation from video decoder's average framerate (in 1/10 fps) over an entire call"
+ },
+ "WEBRTC_VIDEO_DECODER_DISCARDED_PACKETS_PER_CALL_PPM": {
+ "alert_emails": ["webrtc-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 50000,
+ "n_buckets": 100,
+ "description": "Video decoder's number of discarded packets (in packets/min) over an entire call"
+ },
+ "WEBRTC_CALL_DURATION": {
+ "alert_emails": ["webrtc-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 1000,
+ "description": "The length of time (in seconds) that a call lasted."
+ },
+ "WEBRTC_CALL_COUNT": {
+ "alert_emails": ["webrtc-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "48",
+ "kind": "exponential",
+ "high": 500,
+ "n_buckets": 50,
+ "description": "The number of calls made during a session."
+ },
+ "WEBRTC_CALL_COUNT_2": {
+ "alert_emails": ["webrtc-telemetry-alerts@mozilla.com"],
+ "bug_numbers": [1261063],
+ "expires_in_version": "never",
+ "kind": "count",
+ "description": "The number of calls made during a session."
+ },
+ "WEBRTC_ICE_ADD_CANDIDATE_ERRORS_GIVEN_SUCCESS": {
+ "alert_emails": ["webrtc-ice-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "53",
+ "kind": "linear",
+ "high": 30,
+ "n_buckets": 29,
+ "description": "The number of times AddIceCandidate failed on a given PeerConnection, given that ICE succeeded."
+ },
+ "WEBRTC_ICE_ADD_CANDIDATE_ERRORS_GIVEN_FAILURE": {
+ "alert_emails": ["webrtc-ice-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "53",
+ "kind": "linear",
+ "high": 30,
+ "n_buckets": 29,
+ "description": "The number of times AddIceCandidate failed on a given PeerConnection, given that ICE failed."
+ },
+ "WEBRTC_GET_USER_MEDIA_SECURE_ORIGIN": {
+ "alert_emails": ["seceng@mozilla.org"],
+ "expires_in_version": "55",
+ "kind": "enumerated",
+ "n_values": 15,
+ "description": "Origins for getUserMedia calls (0=other, 1=HTTPS, 2=file, 3=app, 4=localhost, 5=loop, 6=privileged)",
+ "releaseChannelCollection": "opt-out"
+ },
+ "WEBRTC_GET_USER_MEDIA_TYPE": {
+ "alert_emails": ["webrtc-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 8,
+ "description": "Type for media in getUserMedia calls (0=Camera, 1=Screen, 2=Application, 3=Window, 4=Browser, 5=Microphone, 6=AudioCapture, 7=Other)"
+ },
+ "WEBRTC_LOAD_STATE_RELAXED": {
+ "alert_emails": ["webrtc-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "linear",
+ "high": 100,
+ "n_buckets": 25,
+ "description": "Percentage of time spent in the Relaxed load state in calls over 30 seconds."
+ },
+ "WEBRTC_LOAD_STATE_RELAXED_SHORT": {
+ "alert_emails": ["webrtc-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "linear",
+ "high": 100,
+ "n_buckets": 25,
+ "description": "Percentage of time spent in the Relaxed load state in calls 5-30 seconds."
+ },
+ "WEBRTC_LOAD_STATE_NORMAL": {
+ "alert_emails": ["webrtc-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "linear",
+ "high": 100,
+ "n_buckets": 25,
+ "description": "Percentage of time spent in the Normal load state in calls over 30 seconds."
+ },
+ "WEBRTC_LOAD_STATE_NORMAL_SHORT": {
+ "alert_emails": ["webrtc-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "linear",
+ "high": 100,
+ "n_buckets": 25,
+ "description": "Percentage of time spent in the Normal load state in calls over 5-30 seconds."
+ },
+ "WEBRTC_LOAD_STATE_STRESSED": {
+ "alert_emails": ["webrtc-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "linear",
+ "high": 100,
+ "n_buckets": 25,
+ "description": "Percentage of time spent in the Stressed load state in calls over 30 seconds."
+ },
+ "WEBRTC_LOAD_STATE_STRESSED_SHORT": {
+ "alert_emails": ["webrtc-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "linear",
+ "high": 100,
+ "n_buckets": 25,
+ "description": "Percentage of time spent in the Stressed load state in calls 5-30 seconds."
+ },
+ "WEBRTC_RENEGOTIATIONS": {
+ "alert_emails": ["webrtc-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "linear",
+ "high": 21,
+ "n_buckets": 20,
+ "description": "Number of Renegotiations during each call"
+ },
+ "WEBRTC_MAX_VIDEO_SEND_TRACK": {
+ "alert_emails": ["webrtc-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "linear",
+ "high": 10,
+ "n_buckets": 9,
+ "description": "Number of Video tracks sent simultaneously"
+ },
+ "WEBRTC_MAX_VIDEO_RECEIVE_TRACK": {
+ "alert_emails": ["webrtc-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "linear",
+ "high": 20,
+ "n_buckets": 19,
+ "description": "Number of Video tracks received simultaneously"
+ },
+ "WEBRTC_MAX_AUDIO_SEND_TRACK": {
+ "alert_emails": ["webrtc-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "linear",
+ "high": 20,
+ "n_buckets": 19,
+ "description": "Number of Audio tracks sent simultaneously"
+ },
+ "WEBRTC_MAX_AUDIO_RECEIVE_TRACK": {
+ "alert_emails": ["webrtc-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "linear",
+ "high": 30,
+ "n_buckets": 29,
+ "description": "Number of Audio tracks received simultaneously"
+ },
+ "WEBRTC_DATACHANNEL_NEGOTIATED": {
+ "alert_emails": ["webrtc-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "Was DataChannels negotiated"
+ },
+ "WEBRTC_CALL_TYPE": {
+ "alert_emails": ["webrtc-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 8,
+ "description": "Type of call: (Bitmask) Audio = 1, Video = 2, DataChannels = 4"
+ },
+ "DEVTOOLS_TOOLBOX_OPENED_COUNT": {
+ "alert_emails": ["dev-developer-tools@lists.mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "bug_numbers": [1247985],
+ "description": "Number of times the DevTools toolbox has been opened.",
+ "releaseChannelCollection": "opt-out"
+ },
+ "DEVTOOLS_OPTIONS_OPENED_COUNT": {
+ "alert_emails": ["dev-developer-tools@lists.mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "bug_numbers": [1247985],
+ "description": "Number of times the DevTools options panel has been opened.",
+ "releaseChannelCollection": "opt-out"
+ },
+ "DEVTOOLS_WEBCONSOLE_OPENED_COUNT": {
+ "alert_emails": ["dev-developer-tools@lists.mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "bug_numbers": [1247985],
+ "description": "Number of times the DevTools Web Console has been opened.",
+ "releaseChannelCollection": "opt-out"
+ },
+ "DEVTOOLS_BROWSERCONSOLE_OPENED_COUNT": {
+ "alert_emails": ["dev-developer-tools@lists.mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "bug_numbers": [1247985],
+ "description": "Number of times the DevTools Browser Console has been opened.",
+ "releaseChannelCollection": "opt-out"
+ },
+ "DEVTOOLS_INSPECTOR_OPENED_COUNT": {
+ "alert_emails": ["dev-developer-tools@lists.mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "bug_numbers": [1247985],
+ "description": "Number of times the DevTools Inspector has been opened.",
+ "releaseChannelCollection": "opt-out"
+ },
+ "DEVTOOLS_RULEVIEW_OPENED_COUNT": {
+ "alert_emails": ["dev-developer-tools@lists.mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "bug_numbers": [1247985],
+ "description": "Number of times the DevTools Rule View has been opened.",
+ "releaseChannelCollection": "opt-out"
+ },
+ "DEVTOOLS_COMPUTEDVIEW_OPENED_COUNT": {
+ "alert_emails": ["dev-developer-tools@lists.mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "bug_numbers": [1247985],
+ "description": "Number of times the DevTools Computed View has been opened.",
+ "releaseChannelCollection": "opt-out"
+ },
+ "DEVTOOLS_FONTINSPECTOR_OPENED_COUNT": {
+ "alert_emails": ["dev-developer-tools@lists.mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "bug_numbers": [1247985],
+ "description": "Number of times the DevTools Font Inspector has been opened.",
+ "releaseChannelCollection": "opt-out"
+ },
+ "DEVTOOLS_ANIMATIONINSPECTOR_OPENED_COUNT": {
+ "alert_emails": ["dev-developer-tools@lists.mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "bug_numbers": [1247985],
+ "description": "Number of times the DevTools Animation Inspector has been opened.",
+ "releaseChannelCollection": "opt-out"
+ },
+ "DEVTOOLS_JSDEBUGGER_OPENED_COUNT": {
+ "alert_emails": ["dev-developer-tools@lists.mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "bug_numbers": [1247985],
+ "description": "Number of times the DevTools Debugger has been opened.",
+ "releaseChannelCollection": "opt-out"
+ },
+ "DEVTOOLS_JSBROWSERDEBUGGER_OPENED_COUNT": {
+ "alert_emails": ["dev-developer-tools@lists.mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "bug_numbers": [1247985],
+ "description": "Number of times the DevTools Browser Debugger has been opened.",
+ "releaseChannelCollection": "opt-out"
+ },
+ "DEVTOOLS_STYLEEDITOR_OPENED_COUNT": {
+ "alert_emails": ["dev-developer-tools@lists.mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "bug_numbers": [1247985],
+ "description": "Number of times the DevTools Style Editor has been opened.",
+ "releaseChannelCollection": "opt-out"
+ },
+ "DEVTOOLS_SHADEREDITOR_OPENED_COUNT": {
+ "alert_emails": ["dev-developer-tools@lists.mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "bug_numbers": [1247985],
+ "description": "Number of times the DevTools Shader Editor has been opened.",
+ "releaseChannelCollection": "opt-out"
+ },
+ "DEVTOOLS_WEBAUDIOEDITOR_OPENED_COUNT": {
+ "alert_emails": ["dev-developer-tools@lists.mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "bug_numbers": [1247985],
+ "description": "Number of times the DevTools Web Audio Editor has been opened.",
+ "releaseChannelCollection": "opt-out"
+ },
+ "DEVTOOLS_CANVASDEBUGGER_OPENED_COUNT": {
+ "alert_emails": ["dev-developer-tools@lists.mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "bug_numbers": [1247985],
+ "description": "Number of times the DevTools Canvas Debugger has been opened.",
+ "releaseChannelCollection": "opt-out"
+ },
+ "DEVTOOLS_JSPROFILER_OPENED_COUNT": {
+ "alert_emails": ["dev-developer-tools@lists.mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "bug_numbers": [1247985],
+ "description": "Number of times the DevTools JS Profiler has been opened.",
+ "releaseChannelCollection": "opt-out"
+ },
+ "DEVTOOLS_MEMORY_OPENED_COUNT": {
+ "alert_emails": ["dev-developer-tools@lists.mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "bug_numbers": [1247985],
+ "description": "Number of times the DevTools Memory Tool has been opened.",
+ "releaseChannelCollection": "opt-out"
+ },
+ "DEVTOOLS_NETMONITOR_OPENED_COUNT": {
+ "alert_emails": ["dev-developer-tools@lists.mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "bug_numbers": [1247985],
+ "description": "Number of times the DevTools Network Monitor has been opened.",
+ "releaseChannelCollection": "opt-out"
+ },
+ "DEVTOOLS_STORAGE_OPENED_COUNT": {
+ "alert_emails": ["dev-developer-tools@lists.mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "bug_numbers": [1247985],
+ "description": "Number of times the DevTools Storage Inspector has been opened.",
+ "releaseChannelCollection": "opt-out"
+ },
+ "DEVTOOLS_PAINTFLASHING_OPENED_COUNT": {
+ "alert_emails": ["dev-developer-tools@lists.mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "bug_numbers": [1247985],
+ "description": "Number of times the DevTools Paint Flashing has been opened via the toolbox button.",
+ "releaseChannelCollection": "opt-out"
+ },
+ "DEVTOOLS_TILT_OPENED_COUNT": {
+ "alert_emails": ["dev-developer-tools@lists.mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "bug_numbers": [1247985],
+ "description": "Number of times the DevTools Tilt has been opened via the toolbox button.",
+ "releaseChannelCollection": "opt-out"
+ },
+ "DEVTOOLS_SCRATCHPAD_OPENED_COUNT": {
+ "alert_emails": ["dev-developer-tools@lists.mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "bug_numbers": [1247985],
+ "description": "Number of times the DevTools Scratchpad toolbox panel has been opened.",
+ "releaseChannelCollection": "opt-out"
+ },
+ "DEVTOOLS_SCRATCHPAD_WINDOW_OPENED_COUNT": {
+ "alert_emails": ["dev-developer-tools@lists.mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "bug_numbers": [1214352, 1247985],
+ "description": "Number of times the DevTools Scratchpad standalone window has been opened.",
+ "releaseChannelCollection": "opt-out"
+ },
+ "DEVTOOLS_RESPONSIVE_OPENED_COUNT": {
+ "alert_emails": ["dev-developer-tools@lists.mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "bug_numbers": [1247985],
+ "description": "Number of times the DevTools Responsive Design Mode tool has been opened.",
+ "releaseChannelCollection": "opt-out"
+ },
+ "DEVTOOLS_EYEDROPPER_OPENED_COUNT": {
+ "alert_emails": ["dev-developer-tools@lists.mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "bug_numbers": [1247985],
+ "description": "Number of times the DevTools Eyedropper tool has been opened.",
+ "releaseChannelCollection": "opt-out"
+ },
+ "DEVTOOLS_MENU_EYEDROPPER_OPENED_COUNT": {
+ "alert_emails": ["dev-developer-tools@lists.mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "bug_numbers": [1247985],
+ "description": "Number of times the DevTools Eyedropper has been opened via the DevTools menu.",
+ "releaseChannelCollection": "opt-out"
+ },
+ "DEVTOOLS_PICKER_EYEDROPPER_OPENED_COUNT": {
+ "alert_emails": ["dev-developer-tools@lists.mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "bug_numbers": [1247985],
+ "description": "Number of times the DevTools Eyedropper has been opened via the color picker.",
+ "releaseChannelCollection": "opt-out"
+ },
+ "DEVTOOLS_DEVELOPERTOOLBAR_OPENED_COUNT": {
+ "alert_emails": ["dev-developer-tools@lists.mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "bug_numbers": [1247985],
+ "description": "Number of times the DevTools Developer Toolbar / GCLI has been opened.",
+ "releaseChannelCollection": "opt-out"
+ },
+ "DEVTOOLS_ABOUTDEBUGGING_OPENED_COUNT": {
+ "alert_emails": ["dev-developer-tools@lists.mozilla.org", "jan@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "bug_numbers": [1247985, 1204601],
+ "description": "Number of times about:debugging has been opened.",
+ "releaseChannelCollection": "opt-out"
+ },
+ "DEVTOOLS_WEBIDE_OPENED_COUNT": {
+ "alert_emails": ["dev-developer-tools@lists.mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "bug_numbers": [1247985],
+ "description": "Number of times the DevTools WebIDE has been opened.",
+ "releaseChannelCollection": "opt-out"
+ },
+ "DEVTOOLS_WEBIDE_PROJECT_EDITOR_OPENED_COUNT": {
+ "alert_emails": ["dev-developer-tools@lists.mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "bug_numbers": [1247985],
+ "description": "Number of times the DevTools WebIDE project editor has been opened.",
+ "releaseChannelCollection": "opt-out"
+ },
+ "DEVTOOLS_WEBIDE_PROJECT_EDITOR_SAVE_COUNT": {
+ "alert_emails": ["dev-developer-tools@lists.mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "bug_numbers": [1247985],
+ "description": "Number of times a file has been saved in the DevTools WebIDE project editor.",
+ "releaseChannelCollection": "opt-out"
+ },
+ "DEVTOOLS_WEBIDE_NEW_PROJECT_COUNT": {
+ "alert_emails": ["dev-developer-tools@lists.mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "bug_numbers": [1247985],
+ "description": "Number of times a new project has been created in the DevTools WebIDE.",
+ "releaseChannelCollection": "opt-out"
+ },
+ "DEVTOOLS_WEBIDE_IMPORT_PROJECT_COUNT": {
+ "alert_emails": ["dev-developer-tools@lists.mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "bug_numbers": [1247985],
+ "description": "Number of times a project has been imported into the DevTools WebIDE.",
+ "releaseChannelCollection": "opt-out"
+ },
+ "DEVTOOLS_CUSTOM_OPENED_COUNT": {
+ "alert_emails": ["dev-developer-tools@lists.mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "bug_numbers": [1247985],
+ "description": "Number of times a custom developer tool has been opened.",
+ "releaseChannelCollection": "opt-out"
+ },
+ "DEVTOOLS_RELOAD_ADDON_INSTALLED_COUNT": {
+ "alert_emails": ["dev-developer-tools@lists.mozilla.org"],
+ "expires_in_version": "55",
+ "kind": "count",
+ "description": "Number of times the reload addon has been installed.",
+ "bug_numbers": [1248435]
+ },
+ "DEVTOOLS_RELOAD_ADDON_RELOAD_COUNT": {
+ "alert_emails": ["dev-developer-tools@lists.mozilla.org"],
+ "expires_in_version": "55",
+ "kind": "count",
+ "description": "Number of times the tools have been reloaded by the reload addon.",
+ "bug_numbers": [1248435]
+ },
+ "DEVTOOLS_TOOLBOX_TIME_ACTIVE_SECONDS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000000,
+ "n_buckets": 100,
+ "description": "How long has the toolbox been active (seconds)"
+ },
+ "DEVTOOLS_OPTIONS_TIME_ACTIVE_SECONDS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000000,
+ "n_buckets": 100,
+ "description": "How long has the options panel been active (seconds)"
+ },
+ "DEVTOOLS_WEBCONSOLE_TIME_ACTIVE_SECONDS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000000,
+ "n_buckets": 100,
+ "description": "How long has the web console been active (seconds)"
+ },
+ "DEVTOOLS_BROWSERCONSOLE_TIME_ACTIVE_SECONDS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000000,
+ "n_buckets": 100,
+ "description": "How long has the browser console been active (seconds)"
+ },
+ "DEVTOOLS_INSPECTOR_TIME_ACTIVE_SECONDS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000000,
+ "n_buckets": 100,
+ "description": "How long has the inspector been active (seconds)"
+ },
+ "DEVTOOLS_RULEVIEW_TIME_ACTIVE_SECONDS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000000,
+ "n_buckets": 100,
+ "description": "How long has the rule view been active (seconds)"
+ },
+ "DEVTOOLS_COMPUTEDVIEW_TIME_ACTIVE_SECONDS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000000,
+ "n_buckets": 100,
+ "description": "How long has the computed view been active (seconds)"
+ },
+ "DEVTOOLS_FONTINSPECTOR_TIME_ACTIVE_SECONDS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000000,
+ "n_buckets": 100,
+ "description": "How long has the font inspector been active (seconds)"
+ },
+ "DEVTOOLS_ANIMATIONINSPECTOR_TIME_ACTIVE_SECONDS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000000,
+ "n_buckets": 100,
+ "description": "How long has the animation inspector been active (seconds)"
+ },
+ "DEVTOOLS_JSDEBUGGER_TIME_ACTIVE_SECONDS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000000,
+ "n_buckets": 100,
+ "description": "How long has the JS debugger been active (seconds)"
+ },
+ "DEVTOOLS_JSBROWSERDEBUGGER_TIME_ACTIVE_SECONDS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000000,
+ "n_buckets": 100,
+ "description": "How long has the JS browser debugger been active (seconds)"
+ },
+ "DEVTOOLS_STYLEEDITOR_TIME_ACTIVE_SECONDS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000000,
+ "n_buckets": 100,
+ "description": "How long has the style editor been active (seconds)"
+ },
+ "DEVTOOLS_SHADEREDITOR_TIME_ACTIVE_SECONDS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000000,
+ "n_buckets": 100,
+ "description": "How long has the Shader Editor been active (seconds)"
+ },
+ "DEVTOOLS_WEBAUDIOEDITOR_TIME_ACTIVE_SECONDS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000000,
+ "n_buckets": 100,
+ "description": "How long has the Web Audio Editor been active (seconds)"
+ },
+ "DEVTOOLS_CANVASDEBUGGER_TIME_ACTIVE_SECONDS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000000,
+ "n_buckets": 100,
+ "description": "How long has the Canvas Debugger been active (seconds)"
+ },
+ "DEVTOOLS_JSPROFILER_TIME_ACTIVE_SECONDS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000000,
+ "n_buckets": 100,
+ "description": "How long has the JS profiler been active (seconds)"
+ },
+ "DEVTOOLS_MEMORY_TIME_ACTIVE_SECONDS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000000,
+ "n_buckets": 100,
+ "description": "How long has the Memory Tool been active (seconds)"
+ },
+ "DEVTOOLS_NETMONITOR_TIME_ACTIVE_SECONDS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000000,
+ "n_buckets": 100,
+ "description": "How long has the network monitor been active (seconds)"
+ },
+ "DEVTOOLS_STORAGE_TIME_ACTIVE_SECONDS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000000,
+ "n_buckets": 100,
+ "description": "How long has the storage inspector been active (seconds)"
+ },
+ "DEVTOOLS_PAINTFLASHING_TIME_ACTIVE_SECONDS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000000,
+ "n_buckets": 100,
+ "description": "How long has paint flashing been active (seconds)"
+ },
+ "DEVTOOLS_TILT_TIME_ACTIVE_SECONDS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000000,
+ "n_buckets": 100,
+ "description": "How long has Tilt been active (seconds)"
+ },
+ "DEVTOOLS_SCRATCHPAD_TIME_ACTIVE_SECONDS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000000,
+ "n_buckets": 100,
+ "description": "How long has Scratchpad been active (seconds)"
+ },
+ "DEVTOOLS_SCRATCHPAD_WINDOW_TIME_ACTIVE_SECONDS": {
+ "alert_emails": ["dev-developer-tools@lists.mozilla.org"],
+ "expires_in_version": "50",
+ "kind": "exponential",
+ "high": 10000000,
+ "n_buckets": 100,
+ "description": "How long has Scratchpad standalone window been active (seconds)",
+ "bug_numbers": [1214352]
+ },
+ "DEVTOOLS_RESPONSIVE_TIME_ACTIVE_SECONDS": {
+ "expires_in_version": "55",
+ "kind": "exponential",
+ "high": 10000000,
+ "n_buckets": 100,
+ "bug_numbers": [1242057],
+ "description": "How long has the responsive view been active (seconds)",
+ "releaseChannelCollection": "opt-out"
+ },
+ "DEVTOOLS_DEVELOPERTOOLBAR_TIME_ACTIVE_SECONDS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000000,
+ "n_buckets": 100,
+ "description": "How long has the developer toolbar been active (seconds)"
+ },
+ "DEVTOOLS_ABOUTDEBUGGING_TIME_ACTIVE_SECONDS": {
+ "alert_emails": ["dev-developer-tools@lists.mozilla.org", "jan@mozilla.com"],
+ "expires_in_version": "55",
+ "kind": "exponential",
+ "high": 10000000,
+ "n_buckets": 100,
+ "description": "How long has about:debugging been active? (seconds) (bug 1204601)"
+ },
+ "DEVTOOLS_WEBIDE_TIME_ACTIVE_SECONDS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000000,
+ "n_buckets": 100,
+ "description": "How long has WebIDE been active (seconds)"
+ },
+ "DEVTOOLS_WEBIDE_PROJECT_EDITOR_TIME_ACTIVE_SECONDS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000000,
+ "n_buckets": 100,
+ "description": "How long has WebIDE's project editor been active (seconds)"
+ },
+ "DEVTOOLS_CUSTOM_TIME_ACTIVE_SECONDS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000000,
+ "n_buckets": 100,
+ "description": "How long has a custom developer tool been active (seconds)"
+ },
+ "DEVTOOLS_WEBIDE_CONNECTION_RESULT": {
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "Did WebIDE runtime connection succeed?"
+ },
+ "DEVTOOLS_WEBIDE_USB_CONNECTION_RESULT": {
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "Did WebIDE USB runtime connection succeed?"
+ },
+ "DEVTOOLS_WEBIDE_WIFI_CONNECTION_RESULT": {
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "Did WebIDE WiFi runtime connection succeed?"
+ },
+ "DEVTOOLS_WEBIDE_SIMULATOR_CONNECTION_RESULT": {
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "Did WebIDE simulator runtime connection succeed?"
+ },
+ "DEVTOOLS_WEBIDE_REMOTE_CONNECTION_RESULT": {
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "Did WebIDE remote runtime connection succeed?"
+ },
+ "DEVTOOLS_WEBIDE_LOCAL_CONNECTION_RESULT": {
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "Did WebIDE local runtime connection succeed?"
+ },
+ "DEVTOOLS_WEBIDE_OTHER_CONNECTION_RESULT": {
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "Did WebIDE other runtime connection succeed?"
+ },
+ "DEVTOOLS_WEBIDE_CONNECTION_TIME_SECONDS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000000,
+ "n_buckets": 100,
+ "description": "How long was WebIDE connected to a runtime (seconds)?"
+ },
+ "DEVTOOLS_WEBIDE_CONNECTION_PLAY_USED": {
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "Was WebIDE's play button used during this runtime connection?"
+ },
+ "DEVTOOLS_WEBIDE_CONNECTION_DEBUG_USED": {
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "Was WebIDE's debug button used during this runtime connection?"
+ },
+ "DEVTOOLS_WEBIDE_CONNECTED_RUNTIME_TYPE": {
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "keyed": true,
+ "description": "What runtime type did WebIDE connect to?"
+ },
+ "DEVTOOLS_WEBIDE_CONNECTED_RUNTIME_ID": {
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "keyed": true,
+ "description": "What runtime ID did WebIDE connect to?"
+ },
+ "DEVTOOLS_WEBIDE_CONNECTED_RUNTIME_PROCESSOR": {
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "keyed": true,
+ "description": "What runtime processor did WebIDE connect to?"
+ },
+ "DEVTOOLS_WEBIDE_CONNECTED_RUNTIME_OS": {
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "keyed": true,
+ "description": "What runtime OS did WebIDE connect to?"
+ },
+ "DEVTOOLS_WEBIDE_CONNECTED_RUNTIME_PLATFORM_VERSION": {
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "keyed": true,
+ "description": "What runtime platform version did WebIDE connect to?"
+ },
+ "DEVTOOLS_WEBIDE_CONNECTED_RUNTIME_APP_TYPE": {
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "keyed": true,
+ "description": "What runtime app type did WebIDE connect to?"
+ },
+ "DEVTOOLS_WEBIDE_CONNECTED_RUNTIME_VERSION": {
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "keyed": true,
+ "description": "What runtime version did WebIDE connect to?"
+ },
+ "DEVTOOLS_OS_ENUMERATED_PER_USER": {
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 13,
+ "description": "OS of DevTools user (0:Windows XP, 1:Windows Vista, 2:Windows 7, 3:Windows 8, 4:Windows 8.1, 5:OSX, 6:Linux 7:Windows 10, 8:reserved, 9:reserved, 10:reserved, 11:reserved, 12:other)"
+ },
+ "DEVTOOLS_OS_IS_64_BITS_PER_USER": {
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 3,
+ "description": "OS bit size of DevTools user (0:32bit, 1:64bit, 2:128bit)"
+ },
+ "DEVTOOLS_SCREEN_RESOLUTION_ENUMERATED_PER_USER": {
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 13,
+ "description": "Screen resolution of DevTools user (0:lower, 1:800x600, 2:1024x768, 3:1280x800, 4:1280x1024, 5:1366x768, 6:1440x900, 7:1920x1080, 8:2560×1440, 9:2560×1600, 10:2880x1800, 11:other, 12:higher)"
+ },
+ "DEVTOOLS_TABS_OPEN_PEAK_LINEAR": {
+ "expires_in_version": "never",
+ "kind": "linear",
+ "high": 101,
+ "n_buckets": 100,
+ "description": "The peak number of open tabs in all windows for a session for devtools users."
+ },
+ "DEVTOOLS_TABS_OPEN_AVERAGE_LINEAR": {
+ "expires_in_version": "never",
+ "kind": "linear",
+ "high": 101,
+ "n_buckets": 100,
+ "description": "The mean number of open tabs in all windows for a session for devtools users."
+ },
+ "DEVTOOLS_TABS_PINNED_PEAK_LINEAR": {
+ "expires_in_version": "never",
+ "kind": "linear",
+ "high": 101,
+ "n_buckets": 100,
+ "description": "The peak number of pinned tabs (app tabs) in all windows for a session for devtools users."
+ },
+ "DEVTOOLS_TABS_PINNED_AVERAGE_LINEAR": {
+ "expires_in_version": "never",
+ "kind": "linear",
+ "high": 101,
+ "n_buckets": 100,
+ "description": "The mean number of pinned tabs (app tabs) in all windows for a session for devtools users."
+ },
+ "DEVTOOLS_SAVE_HEAP_SNAPSHOT_MS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 100000,
+ "n_buckets": 1000,
+ "description": "The time (in milliseconds) that it took to save a heap snapshot in mozilla::devtools::ChromeUtils::SaveHeapSnapshot."
+ },
+ "DEVTOOLS_READ_HEAP_SNAPSHOT_MS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 100000,
+ "n_buckets": 1000,
+ "description": "The time (in milliseconds) that it took to read a heap snapshot in mozilla::devtools::ChromeUtils::ReadHeapSnapshot."
+ },
+ "DEVTOOLS_HEAP_SNAPSHOT_NODE_COUNT": {
+ "expires_in_version": "never",
+ "kind": "linear",
+ "high": 10000000,
+ "n_buckets": 10000,
+ "description": "The number of nodes serialized into a heap snapshot."
+ },
+ "DEVTOOLS_HEAP_SNAPSHOT_EDGE_COUNT": {
+ "expires_in_version": "never",
+ "kind": "linear",
+ "high": 10000000,
+ "n_buckets": 10000,
+ "description": "The number of edges serialized into a heap snapshot."
+ },
+ "DEVTOOLS_PERFTOOLS_RECORDING_COUNT": {
+ "expires_in_version": "never",
+ "kind": "count",
+ "description": "Incremented whenever a performance tool recording is completed."
+ },
+ "DEVTOOLS_PERFTOOLS_CONSOLE_RECORDING_COUNT": {
+ "expires_in_version": "never",
+ "kind": "count",
+ "description": "Incremented whenever a performance tool recording is completed that was initiated via console.profile."
+ },
+ "DEVTOOLS_PERFTOOLS_RECORDING_IMPORT_FLAG": {
+ "expires_in_version": "never",
+ "kind": "flag",
+ "description": "When a user imports a recording in the performance tool."
+ },
+ "DEVTOOLS_PERFTOOLS_RECORDING_EXPORT_FLAG": {
+ "expires_in_version": "never",
+ "kind": "flag",
+ "description": "When a user imports a recording in the performance tool."
+ },
+ "DEVTOOLS_PERFTOOLS_RECORDING_FEATURES_USED": {
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "keyed": true,
+ "description": "When a user starts a recording with specific recording options, keyed by feature name (withMarkers, withAllocations, etc.)."
+ },
+ "DEVTOOLS_PERFTOOLS_RECORDING_DURATION_MS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 600000,
+ "n_buckets": 20,
+ "description": "The length of a duration in MS of a performance tool recording."
+ },
+ "DEVTOOLS_PERFTOOLS_SELECTED_VIEW_MS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "keyed": true,
+ "high": 600000,
+ "n_buckets": 20,
+ "description": "The amount of time spent in a specific performance tool view, keyed by view name (waterfall, js-calltree, js-flamegraph, etc)."
+ },
+ "DEVTOOLS_JAVASCRIPT_ERROR_DISPLAYED": {
+ "alert_emails": ["mphillips@mozilla.com"],
+ "bug_numbers": [1255133],
+ "expires_in_version": "55",
+ "kind": "boolean",
+ "keyed": true,
+ "description": "Measures whether a particular JavaScript error has been displayed in the webconsole."
+ },
+ "DEVTOOLS_TOOLBOX_HOST": {
+ "alert_emails": ["dev-developer-tools@lists.mozilla.org"],
+ "expires_in_version": "58",
+ "kind": "enumerated",
+ "bug_numbers": [1205845],
+ "n_values": 9,
+ "releaseChannelCollection": "opt-out",
+ "description": "Records DevTools toolbox host each time the toolbox is opened and when the host is changed (0:Bottom, 1:Side, 2:Window, 3:Custom, 9:Unknown)."
+ },
+ "VIEW_SOURCE_IN_BROWSER_OPENED_BOOLEAN": {
+ "alert_emails": ["mozilla-dev-developer-tools@lists.mozilla.org", "jryans@mozilla.com"],
+ "expires_in_version": "53",
+ "kind": "boolean",
+ "description": "How many times has view source in browser / tab been opened?"
+ },
+ "VIEW_SOURCE_IN_WINDOW_OPENED_BOOLEAN": {
+ "alert_emails": ["mozilla-dev-developer-tools@lists.mozilla.org", "jryans@mozilla.com"],
+ "expires_in_version": "53",
+ "kind": "boolean",
+ "description": "How many times has view source in a new window been opened?"
+ },
+ "VIEW_SOURCE_EXTERNAL_RESULT_BOOLEAN": {
+ "alert_emails": ["mozilla-dev-developer-tools@lists.mozilla.org", "jryans@mozilla.com"],
+ "expires_in_version": "53",
+ "kind": "boolean",
+ "description": "How many times has view source in an external editor been opened, and did it succeed?"
+ },
+ "BROWSER_IS_USER_DEFAULT": {
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "releaseChannelCollection": "opt-out",
+ "description": "The result of the startup default desktop browser check."
+ },
+ "BROWSER_IS_USER_DEFAULT_ERROR": {
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "releaseChannelCollection": "opt-out",
+ "description": "True if the browser was unable to determine if the browser was set as default."
+ },
+ "BROWSER_SET_DEFAULT_DIALOG_PROMPT_RAWCOUNT": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 250,
+ "n_buckets": 15,
+ "releaseChannelCollection": "opt-out",
+ "description": "The number of times that a profile has seen the 'Set Default Browser' dialog."
+ },
+ "BROWSER_SET_DEFAULT_ALWAYS_CHECK": {
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "releaseChannelCollection": "opt-out",
+ "description": "True if the profile has `browser.shell.checkDefaultBrowser` set to true."
+ },
+ "BROWSER_SET_DEFAULT_RESULT": {
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 4,
+ "releaseChannelCollection": "opt-out",
+ "description": "Result of the Set Default Browser dialog (0=Use Firefox + 'Always perform check' unchecked, 1=Use Firefox + 'Always perform check' checked, 2=Not Now + 'Always perform check' unchecked, 3=Not Now + 'Always perform check' checked)"
+ },
+ "BROWSER_SET_DEFAULT_ERROR": {
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "releaseChannelCollection": "opt-out",
+ "description": "True if the browser was unable to set Firefox as the default browser"
+ },
+ "BROWSER_SET_DEFAULT_TIME_TO_COMPLETION_SECONDS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 500,
+ "n_buckets": 15,
+ "releaseChannelCollection": "opt-out",
+ "description": "Time to successfully set Firefox as the default browser after clicking 'Set Firefox as Default'. Should be near-instant in some environments, others require user interaction. Measured in seconds."
+ },
+ "BROWSER_IS_ASSIST_DEFAULT": {
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "The result of the default browser check for assist intent."
+ },
+ "MIXED_CONTENT_PAGE_LOAD": {
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 4,
+ "description": "Accumulates type of content per page load (0=no mixed or non-secure page, 1=mixed passive, 2=mixed active, 3=mixed passive and mixed active)"
+ },
+ "MIXED_CONTENT_UNBLOCK_COUNTER": {
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 3,
+ "description": "A simple counter of daily mixed-content unblock operations and top documents loaded"
+ },
+ "MIXED_CONTENT_HSTS": {
+ "alert_emails": ["seceng@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 10,
+ "description": "How often would blocked mixed content be allowed if HSTS upgrades were allowed? 0=display/no-HSTS, 1=display/HSTS, 2=active/no-HSTS, 3=active/HSTS"
+ },
+ "MIXED_CONTENT_HSTS_PRIMING": {
+ "alert_emails": ["seceng@mozilla.org"],
+ "bug_numbers": [1246540],
+ "expires_in_version": "60",
+ "kind": "enumerated",
+ "n_values": 16,
+ "description": "How often would blocked mixed content be allowed if HSTS upgrades were allowed, including how often would we send an HSTS priming request? 0=display/no-HSTS, 1=display/HSTS, 2=active/no-HSTS, 3=active/HSTS, 4=display/no-HSTS-priming, 5=display/do-HSTS-priming, 6=active/no-HSTS-priming, 7=active/do-HSTS-priming"
+ },
+ "MIXED_CONTENT_HSTS_PRIMING_RESULT": {
+ "alert_emails": ["seceng@mozilla.org"],
+ "bug_numbers": [1246540],
+ "expires_in_version": "60",
+ "kind": "enumerated",
+ "n_values": 16,
+ "description": "How often do we get back an HSTS priming result which upgrades the connection to HTTPS? 0=cached (no upgrade), 1=cached (do upgrade), 2=cached (blocked), 3=already upgraded, 4=priming succeeded, 5=priming succeeded (block due to pref), 6=priming succeeded (no upgrade due to pref), 7=priming failed (block), 8=priming failed (accept)"
+ },
+ "HSTS_PRIMING_REQUEST_DURATION": {
+ "alert_emails": ["seceng-telemetry@mozilla.org"],
+ "bug_numbers": [1311893],
+ "expires_in_version": "58",
+ "kind": "exponential",
+ "low": 100,
+ "high": 30000,
+ "n_buckets": 100,
+ "keyed": true,
+ "description": "The amount of time required for HSTS priming requests (ms), keyed by success or failure of the priming request. (success, failure)"
+ },
+ "MIXED_CONTENT_OBJECT_SUBREQUEST": {
+ "alert_emails": ["seceng@mozilla.org"],
+ "bug_numbers": [1244116],
+ "expires_in_version": "55",
+ "kind": "enumerated",
+ "n_values": 10,
+ "description": "How often objects load insecure content on secure pages (counting pages, not objects). 0=pages with no mixed object subrequests, 1=pages with mixed object subrequests"
+ },
+ "COOKIE_SCHEME_SECURITY": {
+ "alert_emails": ["seceng@mozilla.org"],
+ "expires_in_version": "55",
+ "kind": "enumerated",
+ "n_values": 10,
+ "releaseChannelCollection": "opt-out",
+ "description": "How often are secure cookies set from non-secure origins, and vice-versa? 0=nonsecure/http, 1=nonsecure/https, 2=secure/http, 3=secure/https"
+ },
+ "COOKIE_LEAVE_SECURE_ALONE": {
+ "alert_emails": ["seceng-telemetry@mozilla.com"],
+ "bug_numbers": [976073],
+ "expires_in_version": "57",
+ "kind": "enumerated",
+ "n_values": 10,
+ "releaseChannelCollection": "opt-out",
+ "description": "Measuring the effects of draft-ietf-httpbis-cookie-alone blocking. 0=blocked http setting secure cookie; 1=blocked http downgrading secure cookie; 2=blocked evicting secure cookie; 3=evicting newer insecure cookie; 4=evicting the oldest insecure cookie; 5=evicting the preferred cookie; 6=evicting the secure blocked"
+ },
+ "NTLM_MODULE_USED_2": {
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 8,
+ "description": "The module used for the NTLM protocol (Windows_API, Kerberos, Samba_auth or Generic) and whether or not the authentication was used to connect to a proxy server. This data is collected only once per session (at first NTLM authentification) ; fixed version."
+ },
+ "FX_THUMBNAILS_BG_QUEUE_SIZE_ON_CAPTURE": {
+ "expires_in_version": "default",
+ "kind": "exponential",
+ "high": 100,
+ "n_buckets": 15,
+ "description": "BACKGROUND THUMBNAILS: Size of capture queue when a capture request is received"
+ },
+ "FX_THUMBNAILS_BG_CAPTURE_QUEUE_TIME_MS": {
+ "expires_in_version": "default",
+ "kind": "exponential",
+ "high": 300000,
+ "n_buckets": 20,
+ "description": "BACKGROUND THUMBNAILS: Time the capture request spent in the queue before being serviced (ms)"
+ },
+ "FX_THUMBNAILS_BG_CAPTURE_SERVICE_TIME_MS": {
+ "expires_in_version": "default",
+ "kind": "exponential",
+ "high": 30000,
+ "n_buckets": 20,
+ "description": "BACKGROUND THUMBNAILS: Time the capture took once it started and successfully completed (ms)"
+ },
+ "FX_THUMBNAILS_BG_CAPTURE_DONE_REASON_2": {
+ "expires_in_version": "default",
+ "kind": "enumerated",
+ "n_values": 10,
+ "description": "BACKGROUND THUMBNAILS: Reason the capture completed (see TEL_CAPTURE_DONE_* constants in BackgroundPageThumbs.jsm)"
+ },
+ "FX_THUMBNAILS_BG_CAPTURE_PAGE_LOAD_TIME_MS": {
+ "expires_in_version": "default",
+ "kind": "exponential",
+ "high": 60000,
+ "n_buckets": 20,
+ "description": "BACKGROUND THUMBNAILS: Time the capture's page load took (ms)"
+ },
+ "FX_THUMBNAILS_BG_CAPTURE_CANVAS_DRAW_TIME_MS": {
+ "expires_in_version": "default",
+ "kind": "exponential",
+ "high": 500,
+ "n_buckets": 15,
+ "description": "BACKGROUND THUMBNAILS: Time it took to draw the capture's window to canvas (ms)"
+ },
+ "NETWORK_CACHE_V2_MISS_TIME_MS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent to find out a cache entry file is missing"
+ },
+ "NETWORK_CACHE_V2_HIT_TIME_MS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent to open an existing file"
+ },
+ "NETWORK_CACHE_V1_TRUNCATE_TIME_MS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent to reopen an entry with OPEN_TRUNCATE"
+ },
+ "NETWORK_CACHE_V1_MISS_TIME_MS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent to find out a cache entry is missing"
+ },
+ "NETWORK_CACHE_V1_HIT_TIME_MS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent to open an existing cache entry"
+ },
+ "NETWORK_CACHE_V2_OUTPUT_STREAM_STATUS": {
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 7,
+ "description": "Final status of the CacheFileOutputStream (0=ok, 1=other error, 2=out of memory, 3=disk full, 4=file corrupted, 5=file not found, 6=binding aborted)"
+ },
+ "NETWORK_CACHE_V2_INPUT_STREAM_STATUS": {
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 7,
+ "description": "Final status of the CacheFileInputStream (0=ok, 1=other error, 2=out of memory, 3=disk full, 4=file corrupted, 5=file not found, 6=binding aborted)"
+ },
+ "NETWORK_CACHE_FS_TYPE": {
+ "expires_in_version": "42",
+ "kind": "enumerated",
+ "n_values": 5,
+ "description": "Type of FS that the cache is stored on (0=NTFS (Win), 1=FAT32 (Win), 2=FAT (Win), 3=other FS (Win), 4=other OS)"
+ },
+ "NETWORK_CACHE_SIZE_FULL_FAT": {
+ "expires_in_version": "42",
+ "kind": "linear",
+ "high": 500,
+ "n_buckets": 50,
+ "description": "Size (in MB) of a cache that reached a file count limit"
+ },
+ "NETWORK_CACHE_HIT_MISS_STAT_PER_CACHE_SIZE": {
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 40,
+ "description": "Hit/Miss count split by cache size in file count (0=Hit 0-5000, 1=Miss 0-5000, 2=Hit 5001-10000, ...)"
+ },
+ "NETWORK_CACHE_HIT_RATE_PER_CACHE_SIZE": {
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 400,
+ "description": "Hit rate for a specific cache size in file count. The hit rate is split into 20 buckets, the lower limit of the range in percents is 5*n/20. The cache size is divided into 20 ranges of length 5000, the lower limit of the range is 5000*(n%20)"
+ },
+ "NETWORK_CACHE_METADATA_FIRST_READ_TIME_MS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent to read the first part of the metadata from the cache entry file."
+ },
+ "NETWORK_CACHE_METADATA_SECOND_READ_TIME_MS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "description": "Time spent to read the missing part of the metadata from the cache entry file."
+ },
+ "NETWORK_CACHE_METADATA_FIRST_READ_SIZE": {
+ "expires_in_version": "never",
+ "kind": "linear",
+ "high": 5119,
+ "n_buckets": 256,
+ "description": "Guessed size of the metadata that we read from the cache file as the first part."
+ },
+ "NETWORK_CACHE_METADATA_SIZE": {
+ "expires_in_version": "never",
+ "kind": "linear",
+ "high": 5119,
+ "n_buckets": 256,
+ "description": "Actual size of the metadata parsed from the disk."
+ },
+ "NETWORK_CACHE_HASH_STATS": {
+ "expires_in_version": "46",
+ "kind": "enumerated",
+ "n_values": 160,
+ "description": "The longest hash match between a newly added entry and all the existing entries."
+ },
+ "DATABASE_LOCKED_EXCEPTION": {
+ "expires_in_version": "42",
+ "kind": "enumerated",
+ "description": "Record database locks when opening one of Fennec's databases. The index corresponds to how many attempts, beginning with 0.",
+ "n_values": 5
+ },
+ "DATABASE_SUCCESSFUL_UNLOCK": {
+ "expires_in_version": "42",
+ "kind": "enumerated",
+ "description": "Record on which attempt we successfully unlocked a database. See DATABASE_LOCKED_EXCEPTION.",
+ "n_values": 5
+ },
+ "SSL_TLS13_INTOLERANCE_REASON_PRE": {
+ "alert_emails": ["seceng-telemetry@mozilla.com"],
+ "bug_numbers": [1250568],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 64,
+ "description": "Potential TLS 1.3 intolerance, before considering historical info (see tlsIntoleranceTelemetryBucket() in nsNSSIOLayer.cpp)."
+ },
+ "SSL_TLS13_INTOLERANCE_REASON_POST": {
+ "alert_emails": ["seceng-telemetry@mozilla.com"],
+ "bug_numbers": [1250568],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 64,
+ "description": "Potential TLS 1.3 intolerance, after considering historical info (see tlsIntoleranceTelemetryBucket() in nsNSSIOLayer.cpp)."
+ },
+ "SSL_TLS12_INTOLERANCE_REASON_PRE": {
+ "alert_emails": ["seceng-telemetry@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 64,
+ "description": "Potential TLS 1.2 intolerance, before considering historical info (see tlsIntoleranceTelemetryBucket() in nsNSSIOLayer.cpp)."
+ },
+ "SSL_TLS12_INTOLERANCE_REASON_POST": {
+ "alert_emails": ["seceng-telemetry@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 64,
+ "description": "Potential TLS 1.2 intolerance, after considering historical info (see tlsIntoleranceTelemetryBucket() in nsNSSIOLayer.cpp)."
+ },
+ "SSL_TLS11_INTOLERANCE_REASON_PRE": {
+ "alert_emails": ["seceng-telemetry@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 64,
+ "description": "Potential TLS 1.1 intolerance, before considering historical info (see tlsIntoleranceTelemetryBucket() in nsNSSIOLayer.cpp)."
+ },
+ "SSL_TLS11_INTOLERANCE_REASON_POST": {
+ "alert_emails": ["seceng-telemetry@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 64,
+ "description": "Potential TLS 1.1 intolerance, after considering historical info (see tlsIntoleranceTelemetryBucket() in nsNSSIOLayer.cpp)."
+ },
+ "SSL_TLS10_INTOLERANCE_REASON_PRE": {
+ "alert_emails": ["seceng-telemetry@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 64,
+ "description": "Potential TLS 1.0 intolerance, before considering historical info (see tlsIntoleranceTelemetryBucket() in nsNSSIOLayer.cpp)."
+ },
+ "SSL_TLS10_INTOLERANCE_REASON_POST": {
+ "alert_emails": ["seceng-telemetry@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 64,
+ "description": "Potential TLS 1.0 intolerance, after considering historical info (see tlsIntoleranceTelemetryBucket() in nsNSSIOLayer.cpp)."
+ },
+ "SSL_VERSION_FALLBACK_INAPPROPRIATE": {
+ "alert_emails": ["seceng-telemetry@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 64,
+ "description": "TLS/SSL version intolerance was falsely detected, server rejected handshake (see tlsIntoleranceTelemetryBucket() in nsNSSIOLayer.cpp)."
+ },
+ "SSL_WEAK_CIPHERS_FALLBACK": {
+ "alert_emails": ["seceng-telemetry@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 64,
+ "description": "Fallback attempted when server did not support any strong cipher suites"
+ },
+ "SSL_CIPHER_SUITE_FULL": {
+ "alert_emails": ["seceng-telemetry@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 128,
+ "description": "Negotiated cipher suite in full handshake (see key in HandshakeCallback in nsNSSCallbacks.cpp)"
+ },
+ "SSL_CIPHER_SUITE_RESUMED": {
+ "alert_emails": ["seceng-telemetry@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 128,
+ "description": "Negotiated cipher suite in resumed handshake (see key in HandshakeCallback in nsNSSCallbacks.cpp)"
+ },
+ "SSL_KEA_RSA_KEY_SIZE_FULL": {
+ "alert_emails": ["seceng-telemetry@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 24,
+ "description": "RSA KEA (TLS_RSA_*) key size in full handshake"
+ },
+ "SSL_KEA_DHE_KEY_SIZE_FULL": {
+ "alert_emails": ["seceng-telemetry@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 24,
+ "description": "DHE KEA (TLS_DHE_*) key size in full handshake"
+ },
+ "SSL_KEA_ECDHE_CURVE_FULL": {
+ "alert_emails": ["seceng-telemetry@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 36,
+ "description": "ECDHE KEA (TLS_ECDHE_*) curve (23=P-256, 24=P-384, 25=P-521) in full handshake"
+ },
+ "SSL_AUTH_ALGORITHM_FULL": {
+ "alert_emails": ["seceng-telemetry@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 16,
+ "description": "SSL Authentication Algorithm (null=0, rsa=1, dsa=2, ecdsa=4) in full handshake"
+ },
+ "SSL_AUTH_RSA_KEY_SIZE_FULL": {
+ "alert_emails": ["seceng-telemetry@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 24,
+ "description": "RSA signature key size for TLS_*_RSA_* in full handshake"
+ },
+ "SSL_AUTH_ECDSA_CURVE_FULL": {
+ "alert_emails": ["seceng-telemetry@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 36,
+ "description": "ECDSA signature curve for TLS_*_ECDSA_* in full handshake (23=P-256, 24=P-384, 25=P-521)"
+ },
+ "SSL_SYMMETRIC_CIPHER_FULL": {
+ "alert_emails": ["seceng-telemetry@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 32,
+ "description": "Symmetric cipher used in full handshake (null=0, rc4=1, 3des=4, aes-cbc=7, camellia=8, seed=9, aes-gcm=10)"
+ },
+ "SSL_SYMMETRIC_CIPHER_RESUMED": {
+ "alert_emails": ["seceng-telemetry@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 32,
+ "description": "Symmetric cipher used in resumed handshake (null=0, rc4=1, 3des=4, aes-cbc=7, camellia=8, seed=9, aes-gcm=10)"
+ },
+ "SSL_REASONS_FOR_NOT_FALSE_STARTING": {
+ "alert_emails": ["seceng-telemetry@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 512,
+ "description": "Bitmask of reasons we did not false start when libssl would have let us (see key in nsNSSCallbacks.cpp)"
+ },
+ "SSL_HANDSHAKE_TYPE": {
+ "alert_emails": ["seceng-telemetry@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 8,
+ "description": "Type of handshake (1=resumption, 2=false started, 3=chose not to false start, 4=not allowed to false start)"
+ },
+ "SSL_OCSP_STAPLING": {
+ "alert_emails": ["seceng-telemetry@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 8,
+ "description": "Status of OCSP stapling on this handshake (1=present, good; 2=none; 3=present, expired; 4=present, other error)"
+ },
+ "SSL_OCSP_MAY_FETCH": {
+ "alert_emails": ["seceng-telemetry@mozilla.com"],
+ "expires_in_version": "default",
+ "kind": "enumerated",
+ "n_values": 8,
+ "description": "For non-stapling cases, is OCSP fetching a possibility? (0=yes, 1=no because missing/invalid OCSP URI, 2=no because fetching disabled, 3=no because both)"
+ },
+ "SSL_CERT_ERROR_OVERRIDES": {
+ "alert_emails": ["seceng-telemetry@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 24,
+ "description": "Was a certificate error overridden on this handshake? What was it? (0=unknown error (indicating bug), 1=no, >1=a specific error)"
+ },
+ "SSL_CERT_VERIFICATION_ERRORS": {
+ "alert_emails": ["seceng@mozilla.org"],
+ "expires_in_version": "default",
+ "kind": "enumerated",
+ "n_values": 100,
+ "description": "If certificate verification failed in a TLS handshake, what was the error? (see MapCertErrorToProbeValue in security/manager/ssl/SSLServerCertVerification.cpp and the values in security/pkix/include/pkix/Result.h)"
+ },
+ "SSL_PERMANENT_CERT_ERROR_OVERRIDES": {
+ "alert_emails": ["seceng@mozilla.org"],
+ "expires_in_version": "default",
+ "kind": "exponential",
+ "high": 1024,
+ "n_buckets": 10,
+ "description": "How many permanent certificate overrides a user has stored."
+ },
+ "SSL_SCTS_ORIGIN": {
+ "alert_emails": ["seceng-telemetry@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 10,
+ "bug_numbers": [1293231],
+ "releaseChannelCollection": "opt-out",
+ "description": "Origin of Signed Certificate Timestamps received (1=Embedded, 2=TLS handshake extension, 3=Stapled OCSP response)"
+ },
+ "SSL_SCTS_PER_CONNECTION": {
+ "alert_emails": ["seceng-telemetry@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 10,
+ "bug_numbers": [1293231],
+ "releaseChannelCollection": "opt-out",
+ "description": "Histogram of Signed Certificate Timestamps per SSL connection, from all sources (embedded / OCSP Stapling / TLS handshake). Bucket 0 counts the cases when no SCTs were received, or none were extracted due to parsing errors."
+ },
+ "SSL_SCTS_VERIFICATION_STATUS": {
+ "alert_emails": ["seceng-telemetry@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 10,
+ "bug_numbers": [1293231],
+ "releaseChannelCollection": "opt-out",
+ "description": "Verification status of Signed Certificate Timestamps received (0=Decoding error, 1=SCT verified, 2=SCT from unknown log, 3=Invalid SCT signature, 4=SCT timestamp is in the future)"
+ },
+ "SSL_SERVER_AUTH_EKU": {
+ "alert_emails": ["seceng-telemetry@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 10,
+ "description": "Presence of of the Server Authenticaton EKU in accepted SSL server certificates (0=No EKU, 1=EKU present and has id_kp_serverAuth, 2=EKU present and has id_kp_serverAuth as well as some other EKU, 3=EKU present but does not contain id_kp_serverAuth)"
+ },
+ "TELEMETRY_TEST_EXPIRED": {
+ "expires_in_version": "4.0a1",
+ "kind": "flag",
+ "description": "a testing histogram; not meant to be touched"
+ },
+ "TLS_ERROR_REPORT_UI": {
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 15,
+ "description": "User interaction with the TLS Error Reporter in about:neterror (0=Error seen, 1='auto' checked, 2='auto' unchecked, 3=Sent manually, 4=Sent automatically, 5=Send success, 6=Send failure, 7=Report section expanded)"
+ },
+ "CERT_OCSP_ENABLED": {
+ "alert_emails": ["seceng-telemetry@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "Is OCSP fetching enabled? (pref security.OCSP.enabled)"
+ },
+ "CERT_OCSP_REQUIRED": {
+ "alert_emails": ["seceng-telemetry@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "Is OCSP required when the cert has an OCSP URI? (pref security.OCSP.require)"
+ },
+ "OSFILE_WORKER_LAUNCH_MS": {
+ "expires_in_version": "default",
+ "kind": "exponential",
+ "description": "The duration between the instant the first message is sent to OS.File and the moment the OS.File worker starts executing JavaScript, in milliseconds",
+ "high": 5000,
+ "n_buckets": 10
+ },
+ "OSFILE_WORKER_READY_MS": {
+ "expires_in_version": "default",
+ "kind": "exponential",
+ "description": "The duration between the instant the first message is sent to OS.File and the moment the OS.File worker has finished executing its startup JavaScript and is ready to receive requests, in milliseconds",
+ "high": 5000,
+ "n_buckets": 10
+ },
+ "OSFILE_WRITEATOMIC_JANK_MS": {
+ "expires_in_version": "default",
+ "kind": "exponential",
+ "description": "The duration during which the main thread is blocked during a call to OS.File.writeAtomic, in milliseconds",
+ "high": 5000,
+ "n_buckets": 10
+ },
+ "CERT_EV_STATUS": {
+ "expires_in_version": "never",
+ "alert_emails": ["seceng@mozilla.org"],
+ "bug_numbers": [1254653],
+ "kind": "enumerated",
+ "n_values": 10,
+ "description": "EV status of a certificate, recorded on each TLS connection. 0=invalid, 1=DV, 2=EV"
+ },
+ "CERT_VALIDATION_SUCCESS_BY_CA": {
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 256,
+ "description": "Successful SSL server cert validations by CA (see RootHashes.inc for names of CAs)"
+ },
+ "CERT_PINNING_FAILURES_BY_CA": {
+ "alert_emails": ["pinning@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 256,
+ "description": "Pinning failures by CA (see RootHashes.inc for names of CAs)"
+ },
+ "CERT_PINNING_RESULTS": {
+ "alert_emails": ["pinning@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "Certificate pinning results (0 = failure, 1 = success)"
+ },
+ "CERT_PINNING_TEST_RESULTS": {
+ "alert_emails": ["pinning@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "Certificate pinning test results (0 = failure, 1 = success)"
+ },
+ "CERT_PINNING_MOZ_RESULTS": {
+ "alert_emails": ["pinning@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "Certificate pinning results for Mozilla sites (0 = failure, 1 = success)"
+ },
+ "CERT_PINNING_MOZ_TEST_RESULTS": {
+ "alert_emails": ["pinning@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "Certificate pinning test results for Mozilla sites (0 = failure, 1 = success)"
+ },
+ "CERT_PINNING_MOZ_RESULTS_BY_HOST": {
+ "alert_emails": ["pinning@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 512,
+ "description": "Certificate pinning results by host for Mozilla operational sites"
+ },
+ "CERT_PINNING_MOZ_TEST_RESULTS_BY_HOST": {
+ "alert_emails": ["pinning@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 512,
+ "description": "Certificate pinning test results by host for Mozilla operational sites"
+ },
+ "CERT_CHAIN_KEY_SIZE_STATUS": {
+ "expires_in_version": "default",
+ "kind": "enumerated",
+ "n_values": 4,
+ "description": "Does enforcing a larger minimum RSA key size cause verification failures? 1 = no, 2 = yes, 3 = another error prevented finding a verified chain"
+ },
+ "CERT_CHAIN_SHA1_POLICY_STATUS": {
+ "expires_in_version": "default",
+ "kind": "enumerated",
+ "n_values": 6,
+ "description": "1 = No SHA1 signatures, 2 = SHA1 certificates issued by an imported root, 3 = SHA1 certificates issued before 2016, 4 = SHA1 certificates issued after 2015, 5 = another error prevented successful verification"
+ },
+ "WEAVE_CONFIGURED": {
+ "expires_in_version": "default",
+ "kind": "boolean",
+ "description": "If any version of Firefox Sync is configured for this device",
+ "releaseChannelCollection": "opt-out"
+ },
+ "WEAVE_CONFIGURED_MASTER_PASSWORD": {
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "If both Firefox Sync and Master Password are configured for this device"
+ },
+ "WEAVE_START_COUNT": {
+ "expires_in_version": "default",
+ "kind": "exponential",
+ "high": 1000,
+ "n_buckets": 10,
+ "description": "The number of times a sync started in this session"
+ },
+ "WEAVE_COMPLETE_SUCCESS_COUNT": {
+ "expires_in_version": "default",
+ "kind": "exponential",
+ "high": 1000,
+ "n_buckets": 10,
+ "description": "The number of times a sync successfully completed in this session"
+ },
+ "WEAVE_WIPE_SERVER_SUCCEEDED": {
+ "expires_in_version": "55",
+ "alert_emails": ["fx-team@mozilla.com"],
+ "kind": "boolean",
+ "bug_numbers": [1241699],
+ "description": "Stores 1 if a wipeServer call succeeded, and 0 if it failed."
+ },
+ "WEBCRYPTO_EXTRACTABLE_IMPORT": {
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "Whether an imported key was marked as extractable"
+ },
+ "WEBCRYPTO_EXTRACTABLE_GENERATE": {
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "Whether a generated key was marked as extractable"
+ },
+ "WEBCRYPTO_EXTRACTABLE_ENC": {
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "Whether a key used in an encrypt/decrypt operation was marked as extractable"
+ },
+ "WEBCRYPTO_EXTRACTABLE_SIG": {
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "Whether a key used in a sign/verify operation was marked as extractable"
+ },
+ "WEBCRYPTO_RESOLVED": {
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "Whether a promise created by WebCrypto was resolved (vs rejected)"
+ },
+ "WEBCRYPTO_METHOD": {
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 20,
+ "description": "Methods invoked under window.crypto.subtle (0=encrypt, 1=decrypt, 2=sign, 3=verify, 4=digest, 5=generateKey, 6=deriveKey, 7=deriveBits, 8=importKey, 9=exportKey, 10=wrapKey, 11=unwrapKey)"
+ },
+ "WEBCRYPTO_ALG": {
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 30,
+ "description": "Algorithms used with WebCrypto (see table in WebCryptoTask.cpp)"
+ },
+ "MASTER_PASSWORD_ENABLED": {
+ "expires_in_version": "never",
+ "kind": "flag",
+ "description": "If a master-password is enabled for this profile"
+ },
+ "DISPLAY_SCALING_OSX" : {
+ "expires_in_version": "never",
+ "kind": "linear",
+ "high": 500,
+ "n_buckets": 100,
+ "description": "Scaling percentage for the display where the first window is opened (OS X only)",
+ "cpp_guard": "XP_MACOSX"
+ },
+ "DISPLAY_SCALING_MSWIN" : {
+ "expires_in_version": "never",
+ "kind": "linear",
+ "high": 500,
+ "n_buckets": 100,
+ "description": "Scaling percentage for the display where the first window is opened (MS Windows only)",
+ "cpp_guard": "XP_WIN"
+ },
+ "DISPLAY_SCALING_LINUX" : {
+ "expires_in_version": "never",
+ "kind": "linear",
+ "high": 500,
+ "n_buckets": 100,
+ "description": "Scaling percentage for the display where the first window is opened (Linux only)",
+ "cpp_guard": "XP_LINUX"
+ },
+ "SOCIAL_SIDEBAR_STATE": {
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "Social Sidebar state 0: closed, 1: opened. Toggling between providers will result in a higher opened rate."
+ },
+ "SOCIAL_TOOLBAR_BUTTONS": {
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 3,
+ "description": "Social toolbar button has been used (0:share, 1:status, 2:bookmark)"
+ },
+ "SOCIAL_PANEL_CLICKS": {
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 4,
+ "description": "Social content has been interacted with (0:share, 1:status, 2:bookmark, 3: sidebar)"
+ },
+ "SOCIAL_SIDEBAR_OPEN_DURATION": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000000,
+ "n_buckets": 10,
+ "description": "Sidebar showing: seconds that the sidebar has been opened"
+ },
+ "SHUTDOWN_PHASE_DURATION_TICKS_QUIT_APPLICATION": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 65,
+ "n_buckets": 10,
+ "description": "Duration of shutdown phase quit-application, as measured by the shutdown terminator, in seconds of activity"
+ },
+ "SHUTDOWN_PHASE_DURATION_TICKS_PROFILE_CHANGE_TEARDOWN": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 65,
+ "n_buckets": 10,
+ "description": "Duration of shutdown phase profile-change-teardown, as measured by the shutdown terminator, in seconds of activity"
+ },
+ "SHUTDOWN_PHASE_DURATION_TICKS_XPCOM_WILL_SHUTDOWN": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 65,
+ "n_buckets": 10,
+ "description": "Duration of shutdown phase xpcom-will-shutdown, as measured by the shutdown terminator, in seconds of activity"
+ },
+ "SHUTDOWN_PHASE_DURATION_TICKS_PROFILE_BEFORE_CHANGE": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 65,
+ "n_buckets": 10,
+ "description": "Duration of shutdown phase profile-before-change, as measured by the shutdown terminator, in seconds of activity"
+ },
+ "BR_9_2_1_SUBJECT_ALT_NAMES": {
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 8,
+ "description": "Baseline Requirements section 9.2.1: subject alternative names extension (0: ok, 1 or more: error)"
+ },
+ "BR_9_2_2_SUBJECT_COMMON_NAME": {
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 8,
+ "description": "Baseline Requirements section 9.2.2: subject common name field (0: present, in subject alt. names; 1: not present; 2: not present in subject alt. names)"
+ },
+ "TAP_TO_LOAD_ENABLED": {
+ "expires_in_version": "50",
+ "kind": "enumerated",
+ "n_values": 3,
+ "description": "Whether or not a user has tap-to-load enabled.",
+ "bug_numbers": [1208167]
+ },
+ "ZOOMED_VIEW_ENABLED": {
+ "expires_in_version": "60",
+ "kind": "boolean",
+ "description": "Whether or not a user has the zoomed view (a.k.a. \"Magnify small areas\") enabled.",
+ "alert_emails": ["mobile-frontend@mozilla.com"],
+ "bug_numbers": [1235061]
+ },
+ "TRACKING_PROTECTION_ENABLED": {
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "Whether or not a session has tracking protection enabled"
+ },
+ "TRACKING_PROTECTION_PBM_DISABLED": {
+ "expires_in_version": "60",
+ "kind": "boolean",
+ "description": "Is the tracking protection in private browsing mode disabled?"
+ },
+ "FENNEC_TRACKING_PROTECTION_STATE": {
+ "expires_in_version": "60",
+ "kind": "enumerated",
+ "n_values": 5,
+ "description": "The state of the user-visible tracking protection setting (0 = Disabled, 1 = Enabled in PB, 2 = Enabled)",
+ "alert_emails": ["mleibovic@mozilla.com"],
+ "bug_numbers": [1228090]
+ },
+ "TRACKING_PROTECTION_SHIELD": {
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 4,
+ "description": "Tracking protection shield (0 = not shown, 1 = loaded, 2 = blocked)"
+ },
+ "TRACKING_PROTECTION_EVENTS": {
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 3,
+ "description": "Doorhanger shown = 0, Disable = 1, Enable = 2"
+ },
+ "SERVICE_WORKER_REGISTRATION_LOADING": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 5000,
+ "n_buckets": 20,
+ "description": "Tracking how ServiceWorkerRegistrar loads data before the first content is shown. File bugs in Core::DOM in case of a Telemetry regression."
+ },
+ "SERVICE_WORKER_REQUEST_PASSTHROUGH": {
+ "expires_in_version": "50",
+ "kind": "boolean",
+ "description": "Intercepted fetch sending back same Request object. File bugs in Core::DOM in case of a Telemetry regression."
+ },
+ "E10S_STATUS": {
+ "alert_emails": ["firefox-dev@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 12,
+ "releaseChannelCollection": "opt-out",
+ "bug_numbers": [1241294],
+ "description": "Why e10s is enabled or disabled (0=ENABLED_BY_USER, 1=ENABLED_BY_DEFAULT, 2=DISABLED_BY_USER, 3=DISABLED_IN_SAFE_MODE, 4=DISABLED_FOR_ACCESSIBILITY, 5=DISABLED_FOR_MAC_GFX, 6=DISABLED_FOR_BIDI, 7=DISABLED_FOR_ADDONS, 8=FORCE_DISABLED, 9=DISABLED_FOR_XPLAYERS, 10=DISABLED_FOR_OS_VERSION)"
+ },
+ "E10S_WINDOW": {
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "Whether a browser window is set as an e10s window"
+ },
+ "E10S_BLOCKED_FROM_RUNNING": {
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "Whether the e10s pref was set but it was blocked from running due to blacklisted conditions"
+ },
+ "BLOCKED_ON_PLUGIN_MODULE_INIT_MS": {
+ "alert_emails": ["perf-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 20,
+ "keyed": true,
+ "description": "Time (ms) that the main thread has been blocked on LoadModule and NP_Initialize in PluginModuleParent"
+ },
+ "BLOCKED_ON_PLUGIN_INSTANCE_INIT_MS": {
+ "alert_emails": ["perf-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 20,
+ "keyed": true,
+ "description": "Time (ms) that the main thread has been blocked on NPP_New in an IPC plugin"
+ },
+ "BLOCKED_ON_PLUGIN_STREAM_INIT_MS": {
+ "alert_emails": ["perf-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 20,
+ "keyed": true,
+ "description": "Time (ms) that the main thread has been blocked on NPP_NewStream in an IPC plugin"
+ },
+ "BLOCKED_ON_PLUGINASYNCSURROGATE_WAITFORINIT_MS": {
+ "alert_emails": ["perf-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "50",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 20,
+ "keyed": true,
+ "description": "Time (ms) that the main thread has been blocked on PluginAsyncSurrogate::WaitForInit in an IPC plugin"
+ },
+ "BLOCKED_ON_PLUGIN_INSTANCE_DESTROY_MS": {
+ "alert_emails": ["perf-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 20,
+ "keyed": true,
+ "description": "Time (ms) that the main thread has been blocked on NPP_Destroy in an IPC plugin"
+ },
+ "ONBEFOREUNLOAD_PROMPT_ACTION" : {
+ "expires_in_version": "45",
+ "kind": "enumerated",
+ "n_values": 3,
+ "description": "What button a user clicked in an onbeforeunload prompt. (Stay on Page = 0, Leave Page = 1, prompt aborted = 2)"
+ },
+ "ONBEFOREUNLOAD_PROMPT_COUNT" : {
+ "expires_in_version": "45",
+ "kind": "count",
+ "description": "How many onbeforeunload prompts has the user encountered in their session?"
+ },
+ "SUBPROCESS_ABNORMAL_ABORT": {
+ "expires_in_version": "never",
+ "kind": "count",
+ "keyed": true,
+ "releaseChannelCollection": "opt-out",
+ "description": "Counts of plugin/content process abnormal shutdown, whether or not a crash report was available."
+ },
+ "SUBPROCESS_CRASHES_WITH_DUMP": {
+ "expires_in_version": "never",
+ "kind": "count",
+ "keyed": true,
+ "releaseChannelCollection": "opt-out",
+ "description": "Counts of plugin and content process crashes which are reported with a crash dump."
+ },
+ "SUBPROCESS_LAUNCH_FAILURE": {
+ "alert_emails": ["haftandilian@mozilla.com"],
+ "expires_in_version": "never",
+ "bug_numbers": [1275430],
+ "kind": "count",
+ "keyed": true,
+ "releaseChannelCollection": "opt-out",
+ "description": "Counts the number of times launching a subprocess fails. Counts are by subprocess-type using the GeckoProcessType enum."
+ },
+ "PROCESS_CRASH_SUBMIT_ATTEMPT": {
+ "expires_in_version": "never",
+ "kind": "count",
+ "keyed": true,
+ "releaseChannelCollection": "opt-out",
+ "description": "An attempt to submit a crash. Keyed on the CrashManager Crash.type."
+ },
+ "PROCESS_CRASH_SUBMIT_SUCCESS": {
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "keyed": true,
+ "releaseChannelCollection": "opt-out",
+ "description": "The submission status when main/plugin/content crashes are submitted. 1 is success, 0 is failure. Keyed on the CrashManager Crash.type."
+ },
+ "STUMBLER_TIME_BETWEEN_UPLOADS_SEC": {
+ "expires_in_version": "45",
+ "kind": "exponential",
+ "n_buckets": 50,
+ "high": 259200,
+ "description": "Stumbler: The time in seconds between uploads."
+ },
+ "STUMBLER_VOLUME_BYTES_UPLOADED_PER_SEC": {
+ "expires_in_version": "45",
+ "kind": "exponential",
+ "n_buckets": 50,
+ "high": 1000000,
+ "description": "Stumbler: Volume measurement of bytes uploaded, normalized to per-second."
+ },
+ "STUMBLER_TIME_BETWEEN_START_SEC": {
+ "expires_in_version": "45",
+ "kind": "exponential",
+ "n_buckets": 50,
+ "high": 259200,
+ "description": "Stumbler: The time between the service starts."
+ },
+ "STUMBLER_UPLOAD_BYTES": {
+ "expires_in_version": "45",
+ "kind": "exponential",
+ "n_buckets": 50,
+ "high": 1000000,
+ "description": "Stumbler: The bytes per upload."
+ },
+ "STUMBLER_UPLOAD_OBSERVATION_COUNT": {
+ "expires_in_version": "45",
+ "kind": "exponential",
+ "n_buckets": 50,
+ "high": 10000,
+ "description": "Stumbler: The observations per upload."
+ },
+ "STUMBLER_UPLOAD_CELL_COUNT": {
+ "expires_in_version": "45",
+ "kind": "exponential",
+ "n_buckets": 50,
+ "high": 10000,
+ "description": "Stumbler: The cells per upload."
+ },
+ "STUMBLER_UPLOAD_WIFI_AP_COUNT": {
+ "expires_in_version": "45",
+ "kind": "exponential",
+ "n_buckets": 50,
+ "high": 10000,
+ "description": "Stumbler: The Wi-Fi APs per upload."
+ },
+ "STUMBLER_OBSERVATIONS_PER_DAY": {
+ "expires_in_version": "45",
+ "kind": "exponential",
+ "n_buckets": 50,
+ "high": 10000,
+ "description": "Stumbler: The number of observations between upload events, normalized to per day."
+ },
+ "STUMBLER_TIME_BETWEEN_RECEIVED_LOCATIONS_SEC": {
+ "expires_in_version": "45",
+ "kind": "exponential",
+ "n_buckets": 50,
+ "high": 86400,
+ "description": "Stumbler: The time between receiving passive locations."
+ },
+ "DATA_STORAGE_ENTRIES": {
+ "expires_in_version": "default",
+ "kind": "linear",
+ "high": 1024,
+ "n_buckets": 16,
+ "description": "The number of entries in persistent DataStorage (HSTS and HPKP data, basically)"
+ },
+ "VIDEO_EME_PLAY_SUCCESS": {
+ "expires_in_version": "45",
+ "kind": "boolean",
+ "description": "EME video playback success or failure"
+ },
+ "VIDEO_PLAY_TIME_MS" : {
+ "alert_emails": ["ajones@mozilla.com"],
+ "expires_in_version": "55",
+ "description": "Total time spent playing video in milliseconds. This reports the total play time for an HTML Media Element whenever it is suspended or resumed, such as when the page is unloaded, or when the mute status changes when the AudioChannelAPI pref is set.",
+ "kind": "exponential",
+ "high": 7200000,
+ "n_buckets": 100,
+ "bug_numbers": [1261955, 1127646]
+ },
+ "VIDEO_HIDDEN_PLAY_TIME_MS" : {
+ "alert_emails": ["ajones@mozilla.com", "gsquelart@mozilla.com"],
+ "expires_in_version": "55",
+ "description": "Total time spent playing video while element is hidden, in milliseconds. This reports the total hidden play time for an HTML Media Element whenever it is suspended or resumed, such as when the page is unloaded, or when the mute status changes when the AudioChannelAPI pref is set.",
+ "kind": "exponential",
+ "high": 7200000,
+ "n_buckets": 100,
+ "bug_numbers": [1285419]
+ },
+ "VIDEO_HIDDEN_PLAY_TIME_PERCENTAGE" : {
+ "alert_emails": ["ajones@mozilla.com", "gsquelart@mozilla.com"],
+ "expires_in_version": "55",
+ "description": "Percentage of total time spent playing video while element is hidden. Keyed by audio presence and by height ranges (boundaries: 240. 480, 576, 720, 1080, 2160), e.g.: 'V,0<h<=240', 'AV,h>2160'; and 'All' will accumulate all percentages. This is reported whenever an HTML Media Element is suspended or resumed, such as when the page is unloaded.",
+ "keyed": true,
+ "kind": "linear",
+ "high": 100,
+ "n_buckets": 50,
+ "bug_numbers": [1287987]
+ },
+ "VIDEO_INFERRED_DECODE_SUSPEND_PERCENTAGE" : {
+ "alert_emails": ["ajones@mozilla.com", "gsquelart@mozilla.com"],
+ "expires_in_version": "55",
+ "description": "Percentage of total time spent *not* fully decoding video while element is hidden (simulated, even when feature is not enabled). Keyed by audio presence and by height ranges (boundaries: 240. 480, 576, 720, 1080, 2160), e.g.: 'V,0<h<=240', 'AV,h>2160'; and 'All' will accumulate all percentages. This is reported whenever an HTML Media Element is suspended or resumed, such as when the page is unloaded.",
+ "keyed": true,
+ "kind": "linear",
+ "high": 100,
+ "n_buckets": 50,
+ "bug_numbers": [1293145]
+ },
+ "VIDEO_INTER_KEYFRAME_AVERAGE_MS" : {
+ "alert_emails": ["ajones@mozilla.com", "gsquelart@mozilla.com"],
+ "expires_in_version": "55",
+ "description": "Average interval between video keyframes in played videos, in milliseconds. Keyed by audio presence and by height ranges (boundaries: 240. 480, 576, 720, 1080, 2160), e.g.: 'V,0<h<=240', 'AV,h>2160'; and 'All' will accumulate all percentages. This is reported whenever an HTML Media Element is suspended or resumed, such as when the page is unloaded.",
+ "keyed": true,
+ "kind": "exponential",
+ "high": 60000,
+ "n_buckets": 100,
+ "bug_numbers": [1289668]
+ },
+ "VIDEO_INTER_KEYFRAME_MAX_MS" : {
+ "alert_emails": ["ajones@mozilla.com", "gsquelart@mozilla.com"],
+ "expires_in_version": "55",
+ "description": "Maximum interval between video keyframes in played videos, in milliseconds; '0' means only 1 keyframe found. Keyed by audio presence and by height ranges (boundaries: 240. 480, 576, 720, 1080, 2160), e.g.: 'V,0<h<=240', 'AV,h>2160'; and 'All' will accumulate all percentages. This is reported whenever an HTML Media Element is suspended or resumed, such as when the page is unloaded.",
+ "keyed": true,
+ "kind": "exponential",
+ "high": 60000,
+ "n_buckets": 100,
+ "bug_numbers": [1289668]
+ },
+ "VIDEO_SUSPEND_RECOVERY_TIME_MS" : {
+ "alert_emails": ["ajones@mozilla.com", "gsquelart@mozilla.com"],
+ "expires_in_version": "55",
+ "description": "Time taken for a video to resume after decoding was suspended, in milliseconds. Keyed by audio presence, hw acceleration, and by height ranges (boundaries: 240. 480, 720, 1080, 2160), e.g.: 'V,0-240', 'AV(hw),2160+'; and 'All' will accumulate all percentages.",
+ "keyed": true,
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 100,
+ "bug_numbers": [1294349]
+ },
+ "VIDEO_AS_CONTENT_SOURCE" : {
+ "alert_emails": ["ajones@mozilla.com", "kaku@mozilla.com"],
+ "expires_in_version": "58",
+ "description": "Usage of a {visible / invisible} video element as the source of {drawImage(), createPattern(), createImageBitmap() and captureStream()} APIs. (0 = ALL_VISIBLE, 1 = ALL_INVISIBLE, 2 = drawImage_VISIBLE, 3 = drawImage_INVISIBLE, 4 = createPattern_VISIBLE, 5 = createPattern_INVISIBLE, 6 = createImageBitmap_VISIBLE, 7 = createImageBitmap_INVISIBLE, 8 = captureStream_VISIBLE, 9 = captureStream_INVISIBLE)",
+ "kind": "enumerated",
+ "n_values": 12,
+ "bug_numbers": [1299718]
+ },
+ "VIDEO_AS_CONTENT_SOURCE_IN_TREE_OR_NOT" : {
+ "alert_emails": ["ajones@mozilla.com", "kaku@mozilla.com"],
+ "expires_in_version": "58",
+ "description": "Usage of an invisible {in tree / not in tree} video element as the source of {drawImage(), createPattern(), createImageBitmap() and captureStream()} APIs. (0 = ALL_IN_TREE, 1 = ALL_NOT_IN_TREE, 2 = drawImage_IN_TREE, 3 = drawImage_NOT_IN_TREE, 4 = createPattern_IN_TREE, 5 = createPattern_NOT_IN_TREE, 6 = createImageBitmap_IN_TREE, 7 = createImageBitmap_NOT_IN_TREE, 8 = captureStream_IN_TREE, 9 = captureStream_NOT_IN_TREE)",
+ "kind": "enumerated",
+ "n_values": 12,
+ "bug_numbers": [1337301]
+ },
+ "VIDEO_UNLOAD_STATE": {
+ "alert_emails": ["ajones@mozilla.com"],
+ "expires_in_version": "55",
+ "kind": "enumerated",
+ "n_values": 5,
+ "description": "HTML Media Element state when unloading. ended = 0, paused = 1, stalled = 2, seeking = 3, other = 4",
+ "bug_numbers": [1261955, 1261955]
+ },
+ "VIDEO_VP9_BENCHMARK_FPS": {
+ "alert_emails": ["ajones@mozilla.com"],
+ "expires_in_version": "55",
+ "bug_numbers": [1230265],
+ "kind": "linear",
+ "high": 1000,
+ "n_buckets": 100,
+ "description": "720p VP9 decode benchmark measurement in frames per second",
+ "releaseChannelCollection": "opt-out"
+ },
+ "VIDEO_CDM_CREATED": {
+ "alert_emails": ["cpearce@mozilla.com"],
+ "expires_in_version": "58",
+ "bug_numbers": [1304207],
+ "kind": "enumerated",
+ "n_values": 6,
+ "description": "Note the type of CDM (0=ClearKey, 1=Primetime, 2=Widevine, 3=unknown) every time we successfully instantiate an EME MediaKeys object.",
+ "releaseChannelCollection": "opt-out"
+ },
+ "VIDEO_CDM_GENERATE_REQUEST_CALLED": {
+ "alert_emails": ["cpearce@mozilla.com"],
+ "expires_in_version": "58",
+ "bug_numbers": [1305552],
+ "kind": "enumerated",
+ "n_values": 6,
+ "description": "Note the type of CDM (0=ClearKey, 1=Primetime, 2=Widevine, 3=unknown) every time we call MediaKeySession.generateRequest().",
+ "releaseChannelCollection": "opt-out"
+ },
+ "MEDIA_CODEC_USED": {
+ "alert_emails": ["cpearce@mozilla.com"],
+ "expires_in_version": "never",
+ "keyed": true,
+ "kind": "count",
+ "description": "Count of use of audio/video codecs in HTMLMediaElements and WebAudio. Those with 'resource' prefix are approximate; report based on HTTP ContentType or sniffing. Those with 'webaudio' prefix are for WebAudio."
+ },
+ "FX_SANITIZE_TOTAL": {
+ "alert_emails": ["firefox-dev@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 30000,
+ "n_buckets": 20,
+ "description": "Sanitize: Total time it takes to sanitize (ms)"
+ },
+ "FX_SANITIZE_CACHE": {
+ "alert_emails": ["firefox-dev@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 30000,
+ "n_buckets": 20,
+ "description": "Sanitize: Time it takes to sanitize the cache (ms)"
+ },
+ "FX_SANITIZE_COOKIES_2": {
+ "alert_emails": ["firefox-dev@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 30000,
+ "n_buckets": 20,
+ "description": "Sanitize: Time it takes to sanitize firefox cookies (ms). A subset of FX_SANITIZE_COOKIES."
+ },
+ "FX_SANITIZE_LOADED_FLASH": {
+ "alert_emails": ["firefox-dev@mozilla.org"],
+ "bug_numbers": [1251469],
+ "expires_in_version": "55",
+ "kind": "exponential",
+ "high": 30000,
+ "n_buckets": 20,
+ "description": "Sanitize: Time it takes to sanitize Flash when it's already loaded (ms). A subset of FX_SANITIZE_PLUGINS."
+ },
+ "FX_SANITIZE_UNLOADED_FLASH": {
+ "alert_emails": ["firefox-dev@mozilla.org"],
+ "bug_numbers": [1251469],
+ "expires_in_version": "55",
+ "kind": "exponential",
+ "high": 30000,
+ "n_buckets": 20,
+ "description": "Sanitize: Time it takes to sanitize Flash when it's not yet loaded (ms). A subset of FX_SANITIZE_PLUGINS."
+ },
+ "FX_SANITIZE_HISTORY": {
+ "alert_emails": ["firefox-dev@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 30000,
+ "n_buckets": 20,
+ "description": "Sanitize: Time it takes to sanitize history (ms)"
+ },
+ "FX_SANITIZE_FORMDATA": {
+ "alert_emails": ["firefox-dev@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 30000,
+ "n_buckets": 20,
+ "description": "Sanitize: Time it takes to sanitize stored form data (ms)"
+ },
+ "FX_SANITIZE_DOWNLOADS": {
+ "alert_emails": ["firefox-dev@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 30000,
+ "n_buckets": 20,
+ "description": "Sanitize: Time it takes to sanitize recent downloads (ms)"
+ },
+ "FX_SANITIZE_SESSIONS": {
+ "alert_emails": ["firefox-dev@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 30000,
+ "n_buckets": 20,
+ "description": "Sanitize: Time it takes to sanitize saved sessions (ms)"
+ },
+ "FX_SANITIZE_SITESETTINGS": {
+ "alert_emails": ["firefox-dev@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 30000,
+ "n_buckets": 20,
+ "description": "Sanitize: Time it takes to sanitize site-specific settings (ms)"
+ },
+ "FX_SANITIZE_OPENWINDOWS": {
+ "alert_emails": ["firefox-dev@mozilla.org"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 30000,
+ "n_buckets": 20,
+ "description": "Sanitize: Time it takes to sanitize the open windows list (ms)"
+ },
+ "PWMGR_BLOCKLIST_NUM_SITES": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 100,
+ "n_buckets" : 10,
+ "description": "The number of sites for which the user has explicitly rejected saving logins"
+ },
+ "PWMGR_FORM_AUTOFILL_RESULT": {
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values" : 20,
+ "description": "The result of auto-filling a login form. See http://mzl.la/1Mbs6jL for bucket descriptions."
+ },
+ "PWMGR_LOGIN_LAST_USED_DAYS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 750,
+ "n_buckets" : 40,
+ "description": "Time in days each saved login was last used"
+ },
+ "PWMGR_LOGIN_PAGE_SAFETY": {
+ "expires_in_version": "55",
+ "kind": "enumerated",
+ "n_values": 8,
+ "description": "The safety of a page where we see a password field. (0: safe page & safe submit; 1: safe page & unsafe submit; 2: safe page & unknown submit; 3: unsafe page & safe submit; 4: unsafe page & unsafe submit; 5: unsafe page & unknown submit)"
+ },
+ "PWMGR_MANAGE_COPIED_PASSWORD": {
+ "expires_in_version": "never",
+ "kind": "count",
+ "description": "Count of passwords copied from the password management interface"
+ },
+ "PWMGR_MANAGE_COPIED_USERNAME": {
+ "expires_in_version": "never",
+ "kind": "count",
+ "description": "Count of usernames copied from the password management interface"
+ },
+ "PWMGR_MANAGE_DELETED": {
+ "expires_in_version": "never",
+ "kind": "count",
+ "description": "Count of passwords deleted from the password management interface (including via Remove All)"
+ },
+ "PWMGR_MANAGE_DELETED_ALL": {
+ "expires_in_version": "never",
+ "kind": "count",
+ "description": "Count of times that Remove All was used from the password management interface"
+ },
+ "PWMGR_MANAGE_OPENED": {
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values" : 5,
+ "description": "Accumulates how the password management interface was opened. (0=Preferences, 1=Page Info)"
+ },
+ "PWMGR_MANAGE_SORTED": {
+ "expires_in_version": "never",
+ "keyed": true,
+ "kind": "count",
+ "description": "Reports the column that logins are sorted by"
+ },
+ "PWMGR_MANAGE_VISIBILITY_TOGGLED": {
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "Whether the visibility of passwords was toggled (0=Hide, 1=Show)"
+ },
+ "PWMGR_NUM_PASSWORDS_PER_HOSTNAME": {
+ "expires_in_version": "never",
+ "kind": "linear",
+ "high": 21,
+ "n_buckets" : 20,
+ "description": "The number of passwords per hostname"
+ },
+ "PWMGR_NUM_SAVED_PASSWORDS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 750,
+ "n_buckets" : 50,
+ "description": "Total number of saved logins, including those that cannot be decrypted"
+ },
+ "PWMGR_NUM_HTTPAUTH_PASSWORDS": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 750,
+ "n_buckets" : 50,
+ "description": "Number of HTTP Auth logins"
+ },
+ "PWMGR_PASSWORD_INPUT_IN_FORM": {
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "Whether an <input type=password> is associated with a <form> when it is added to a document"
+ },
+ "PWMGR_PROMPT_REMEMBER_ACTION" : {
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 5,
+ "description": "Action taken by user through prompt for creating a login. (0=Prompt displayed [always recorded], 1=Add login, 2=Don't save now, 3=Never save)"
+ },
+ "PWMGR_PROMPT_UPDATE_ACTION" : {
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 5,
+ "description": "Action taken by user through prompt for modifying a login. (0=Prompt displayed [always recorded], 1=Update login)"
+ },
+ "PWMGR_SAVING_ENABLED": {
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "Number of users who have password saving on globally"
+ },
+ "PWMGR_USERNAME_PRESENT": {
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "description": "Whether a saved login has a username"
+ },
+ "FENNEC_SYNC11_MIGRATION_SENTINELS_SEEN": {
+ "expires_in_version": "45",
+ "kind": "count",
+ "description": "The number of Sync 1.1 -> Sync 1.5 migration sentinels seen by Android Sync."
+ },
+ "FENNEC_SYNC11_MIGRATIONS_FAILED": {
+ "expires_in_version": "45",
+ "kind": "count",
+ "description": "The number of Sync 1.1 -> Sync 1.5 migrations that failed during Android Sync."
+ },
+ "FENNEC_SYNC11_MIGRATIONS_SUCCEEDED": {
+ "expires_in_version": "45",
+ "kind": "count",
+ "description": "The number of Sync 1.1 -> Sync 1.5 migrations that succeeded during Android Sync."
+ },
+ "FENNEC_SYNC11_MIGRATION_NOTIFICATIONS_OFFERED": {
+ "expires_in_version": "45",
+ "kind": "exponential",
+ "high": 500,
+ "n_buckets": 5,
+ "description": "The number of Sync 1.5 'complete upgrade/migration' notifications offered by Android Sync."
+ },
+ "FENNEC_SYNC11_MIGRATIONS_COMPLETED": {
+ "expires_in_version": "45",
+ "kind": "count",
+ "description": "The number of Sync 1.5 migrations completed by Android Sync."
+ },
+ "FENNEC_SYNC_NUMBER_OF_SYNCS_STARTED": {
+ "expires_in_version": "45",
+ "kind": "count",
+ "description": "Counts the number of times that a sync has started."
+ },
+ "FENNEC_SYNC_NUMBER_OF_SYNCS_COMPLETED": {
+ "expires_in_version": "45",
+ "kind": "count",
+ "description": "Counts the number of times that a sync has completed with no errors."
+ },
+ "FENNEC_SYNC_NUMBER_OF_SYNCS_FAILED": {
+ "expires_in_version": "45",
+ "kind": "count",
+ "description": "Counts the number of times that a sync has failed with errors."
+ },
+ "FENNEC_SYNC_NUMBER_OF_SYNCS_FAILED_BACKOFF": {
+ "expires_in_version": "45",
+ "kind": "count",
+ "description": "Counts the number of times that a sync has failed because of trying to sync before server backoff interval has passed."
+ },
+ "SLOW_SCRIPT_NOTICE_COUNT": {
+ "alert_emails": ["perf-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "description": "Count slow script notices"
+ },
+ "SLOW_SCRIPT_PAGE_COUNT": {
+ "alert_emails": ["perf-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "bug_numbers": [1251667],
+ "description": "The number of pages that trigger slow script notices"
+ },
+ "SLOW_SCRIPT_NOTIFY_DELAY": {
+ "alert_emails": ["perf-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 60000,
+ "n_buckets": 50,
+ "bug_numbers": [1271978],
+ "description": "The difference between the js slow script timeout for content set in prefs and the actual time we waited before displaying the notification (msec)."
+ },
+ "PLUGIN_HANG_NOTICE_COUNT": {
+ "alert_emails": ["perf-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "description": "Count plugin hang notices in e10s"
+ },
+ "SERVICE_WORKER_SPAWN_ATTEMPTS": {
+ "expires_in_version": "50",
+ "kind": "count",
+ "description": "Count attempts to spawn a ServiceWorker for a domain. File bugs in Core::DOM in case of a Telemetry regression."
+ },
+ "SERVICE_WORKER_WAS_SPAWNED": {
+ "expires_in_version": "50",
+ "kind": "count",
+ "description": "Count ServiceWorkers that really did get a thread created for them. File bugs in Core::DOM in case of a Telemetry regression."
+ },
+ "SERVICE_WORKER_SPAWN_GETS_QUEUED": {
+ "alert_emails": ["amarchesini@mozilla.com"],
+ "bug_numbers": [1286895],
+ "expires_in_version": "never",
+ "kind": "count",
+ "description": "Tracking whether a ServiceWorker spawn gets queued due to hitting max workers per domain limit. File bugs in Core::DOM in case of a Telemetry regression."
+ },
+ "SHARED_WORKER_SPAWN_GETS_QUEUED": {
+ "alert_emails": ["amarchesini@mozilla.com"],
+ "bug_numbers": [1286895],
+ "expires_in_version": "never",
+ "kind": "count",
+ "description": "Tracking whether a SharedWorker spawn gets queued due to hitting max workers per domain limit. File bugs in Core::DOM in case of a Telemetry regression."
+ },
+ "DEDICATED_WORKER_SPAWN_GETS_QUEUED": {
+ "alert_emails": ["amarchesini@mozilla.com"],
+ "bug_numbers": [1286895],
+ "expires_in_version": "never",
+ "kind": "count",
+ "description": "Tracking whether a DedicatedWorker spawn gets queued due to hitting max workers per domain limit. File bugs in Core::DOM in case of a Telemetry regression."
+ },
+ "SERVICE_WORKER_REGISTRATIONS": {
+ "expires_in_version": "50",
+ "kind": "count",
+ "description": "Count how many registrations occurs. File bugs in Core::DOM in case of a Telemetry regression."
+ },
+ "SERVICE_WORKER_CONTROLLED_DOCUMENTS": {
+ "expires_in_version": "50",
+ "kind": "count",
+ "description": "Count whenever a document is controlled. File bugs in Core::DOM in case of a Telemetry regression."
+ },
+ "SERVICE_WORKER_UPDATED": {
+ "expires_in_version": "50",
+ "kind": "count",
+ "description": "Count ServiceWorkers scripts that are updated. File bugs in Core::DOM in case of a Telemetry regression."
+ },
+ "SERVICE_WORKER_LIFE_TIME": {
+ "expires_in_version": "50",
+ "kind": "exponential",
+ "high": 120000,
+ "n_buckets": 20,
+ "description": "Tracking how long a ServiceWorker stays alive after it is spawned. File bugs in Core::DOM in case of a Telemetry regression."
+ },
+ "GRAPHICS_SANITY_TEST": {
+ "expires_in_version": "never",
+ "alert_emails": ["gfx-telemetry-alerts@mozilla.com","msreckovic@mozilla.com"],
+ "kind": "enumerated",
+ "n_values": 20,
+ "releaseChannelCollection": "opt-out",
+ "description": "Reports results from the graphics sanity test to track which drivers are having problems (0=TEST_PASSED, 1=TEST_FAILED_RENDER, 2=TEST_FAILED_VIDEO, 3=TEST_CRASHED)"
+ },
+ "READER_MODE_PARSE_RESULT" : {
+ "expires_in_version": "54",
+ "alert_emails": ["firefox-dev@mozilla.org", "gijs@mozilla.com"],
+ "kind": "enumerated",
+ "n_values": 5,
+ "description": "The result of trying to parse a document to show in reader view (0=Success, 1=Error too many elements, 2=Error in worker, 3=Error no article)"
+ },
+ "READER_MODE_DOWNLOAD_RESULT" : {
+ "expires_in_version": "54",
+ "alert_emails": ["firefox-dev@mozilla.org", "gijs@mozilla.com"],
+ "kind": "enumerated",
+ "n_values": 5,
+ "description": "The result of trying to download a document to show in reader view (0=Success, 1=Error XHR, 2=Error no document)"
+ },
+ "FENNEC_LOAD_SAVED_PAGE": {
+ "expires_in_version": "60",
+ "alert_emails": ["mobile-frontend@mozilla.com"],
+ "kind": "enumerated",
+ "n_values": 10,
+ "description": "How often users load saved items when online/offline (0=RL online, 1=RL offline, 2=BM online, 3=BM offline)",
+ "bug_numbers": [1243387]
+ },
+ "PERMISSIONS_SQL_CORRUPTED": {
+ "expires_in_version": "never",
+ "kind": "count",
+ "description": "Record the permissions.sqlite init failure"
+ },
+ "DEFECTIVE_PERMISSIONS_SQL_REMOVED": {
+ "expires_in_version": "never",
+ "kind": "count",
+ "description": "Record the removal of defective permissions.sqlite"
+ },
+ "FENNEC_TABQUEUE_QUEUESIZE" : {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 50,
+ "n_buckets": 10,
+ "description": "The number of tabs queued when opened."
+ },
+ "FENNEC_CUSTOM_HOMEPAGE": {
+ "expires_in_version": "60",
+ "alert_emails": ["mobile-frontend@mozilla.com"],
+ "bug_numbers": [1239102],
+ "kind": "boolean",
+ "description": "Whether the user has set a custom homepage."
+ },
+ "GRAPHICS_DRIVER_STARTUP_TEST": {
+ "alert_emails": ["gfx-telemetry-alerts@mozilla.com","danderson@mozilla.com","msreckovic@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 20,
+ "releaseChannelCollection": "opt-out",
+ "description": "Reports whether or not graphics drivers crashed during startup."
+ },
+ "GRAPHICS_SANITY_TEST_OS_SNAPSHOT": {
+ "alert_emails": ["gfx-telemetry-alerts@mozilla.com","danderson@mozilla.com","msreckovic@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 10,
+ "releaseChannelCollection": "opt-out",
+ "description": "Reports whether the graphics sanity test passed an OS snapshot test. 0=Pass, 1=Fail, 2=Error, 3=Timed out."
+ },
+ "DEVTOOLS_HUD_JANK": {
+ "alert_emails": ["rnicoletti@mozilla.com","thills@mozilla.com"],
+ "expires_in_version": "52",
+ "kind": "exponential",
+ "keyed": true,
+ "description": "The duration which a thread is blocked in ms, keyed by appName.",
+ "high": 5000,
+ "n_buckets": 10
+ },
+ "DEVTOOLS_HUD_REFLOW_DURATION": {
+ "alert_emails": ["rnicoletti@mozilla.com","thills@mozilla.com"],
+ "expires_in_version": "52",
+ "kind": "exponential",
+ "keyed": true,
+ "description": "The duration a reflow takes in ms, keyed by appName.",
+ "high": 1000,
+ "n_buckets": 10
+ },
+ "DEVTOOLS_HUD_REFLOWS": {
+ "alert_emails": ["rnicoletti@mozilla.com","thills@mozilla.com"],
+ "expires_in_version": "52",
+ "kind": "count",
+ "keyed": true,
+ "description": "A count of the number of reflows, keyed by appName."
+ },
+ "DEVTOOLS_HUD_SECURITY_CATEGORY": {
+ "alert_emails": ["rnicoletti@mozilla.com","thills@mozilla.com"],
+ "expires_in_version": "52",
+ "kind": "enumerated",
+ "keyed": true,
+ "description": "The security error enums, keyed by appName.",
+ "n_values": 8
+ },
+ "DEVTOOLS_HUD_ERRORS": {
+ "alert_emails": ["rnicoletti@mozilla.com","thills@mozilla.com"],
+ "expires_in_version": "52",
+ "kind": "count",
+ "keyed": true,
+ "description": "Number of errors, keyed by appName."
+ },
+ "DEVTOOLS_HUD_WARNINGS": {
+ "alert_emails": ["rnicoletti@mozilla.com","thills@mozilla.com"],
+ "expires_in_version": "52",
+ "kind": "count",
+ "keyed": true,
+ "description": "Number of warnings, keyed by appName."
+ },
+ "DEVTOOLS_HUD_USS": {
+ "alert_emails": ["rnicoletti@mozilla.com","thills@mozilla.com"],
+ "expires_in_version": "52",
+ "kind": "linear",
+ "keyed": true,
+ "low": 20000000,
+ "high": 100000000,
+ "n_buckets": 52,
+ "description": "The USS memory consumed by an application, keyed by appName."
+ },
+ "DEVTOOLS_HUD_APP_STARTUP_TIME_CONTENTINTERACTIVE": {
+ "alert_emails": ["rnicoletti@mozilla.com","thills@mozilla.com"],
+ "expires_in_version": "52",
+ "kind": "linear",
+ "keyed": true,
+ "description": "The duration in ms between application launch and the 'contentInteractive' performance mark, keyed by appName.",
+ "high": 2000,
+ "n_buckets": 10
+ },
+ "DEVTOOLS_HUD_APP_STARTUP_TIME_NAVIGATIONINTERACTIVE": {
+ "alert_emails": ["rnicoletti@mozilla.com","thills@mozilla.com"],
+ "expires_in_version": "52",
+ "kind": "linear",
+ "keyed": true,
+ "description": "The duration in ms between application launch and the 'navigationInteractive' performance mark, keyed by appName.",
+ "high": 3000,
+ "n_buckets": 10
+ },
+ "DEVTOOLS_HUD_APP_STARTUP_TIME_NAVIGATIONLOADED": {
+ "alert_emails": ["rnicoletti@mozilla.com","thills@mozilla.com"],
+ "expires_in_version": "52",
+ "kind": "linear",
+ "keyed": true,
+ "description": "The duration in ms between application launch and the 'navigationLoaded' performance mark, keyed by appName.",
+ "high": 4000,
+ "n_buckets": 10
+ },
+ "DEVTOOLS_HUD_APP_STARTUP_TIME_VISUALLYLOADED": {
+ "alert_emails": ["rnicoletti@mozilla.com","thills@mozilla.com"],
+ "expires_in_version": "52",
+ "kind": "linear",
+ "keyed": true,
+ "description": "The duration in ms between application launch and the 'visuallyLoaded' performance mark, keyed by appName.",
+ "high": 5000,
+ "n_buckets": 10
+ },
+ "DEVTOOLS_HUD_APP_STARTUP_TIME_MEDIAENUMERATED": {
+ "alert_emails": ["rnicoletti@mozilla.com","thills@mozilla.com"],
+ "expires_in_version": "52",
+ "kind": "linear",
+ "keyed": true,
+ "description": "The duration in ms between application launch and the 'mediaEnumerated' performance mark, keyed by appName.",
+ "high": 5000,
+ "n_buckets": 10
+ },
+ "DEVTOOLS_HUD_APP_STARTUP_TIME_FULLYLOADED": {
+ "alert_emails": ["rnicoletti@mozilla.com","thills@mozilla.com"],
+ "expires_in_version": "52",
+ "kind": "linear",
+ "keyed": true,
+ "description": "The duration in ms between application launch and the 'fullyLoaded' performance mark, keyed by appName.",
+ "high": 30000,
+ "n_buckets": 30
+ },
+ "DEVTOOLS_HUD_APP_STARTUP_TIME_SCANEND": {
+ "alert_emails": ["rnicoletti@mozilla.com","thills@mozilla.com"],
+ "expires_in_version": "52",
+ "kind": "linear",
+ "keyed": true,
+ "description": "The duration in ms between application launch and the 'scanEnd' performance mark, keyed by appName.",
+ "high": 30000,
+ "n_buckets": 30
+ },
+ "DEVTOOLS_HUD_APP_MEMORY_CONTENTINTERACTIVE_V2": {
+ "alert_emails": ["rnicoletti@mozilla.com","thills@mozilla.com"],
+ "expires_in_version": "52",
+ "kind": "linear",
+ "keyed": true,
+ "description": "The USS memory consumed by an application at the time of the 'contentInteractive' performance mark, keyed by appName.",
+ "low": 20000000,
+ "high": 30000000,
+ "n_buckets": 10
+ },
+ "DEVTOOLS_HUD_APP_MEMORY_NAVIGATIONINTERACTIVE_V2": {
+ "alert_emails": ["rnicoletti@mozilla.com","thills@mozilla.com"],
+ "expires_in_version": "52",
+ "kind": "linear",
+ "keyed": true,
+ "description": "The USS memory consumed by an application at the time of the 'navigationInteractive' performance mark, keyed by appName.",
+ "low": 20000000,
+ "high": 30000000,
+ "n_buckets": 10
+ },
+ "DEVTOOLS_HUD_APP_MEMORY_NAVIGATIONLOADED_V2": {
+ "alert_emails": ["rnicoletti@mozilla.com","thills@mozilla.com"],
+ "expires_in_version": "52",
+ "kind": "linear",
+ "keyed": true,
+ "description": "The USS memory consumed by an application at the time of the 'navigationLoaded' performance mark, keyed by appName.",
+ "low": 20000000,
+ "high": 30000000,
+ "n_buckets": 10
+ },
+ "DEVTOOLS_HUD_APP_MEMORY_VISUALLYLOADED_V2": {
+ "alert_emails": ["rnicoletti@mozilla.com","thills@mozilla.com"],
+ "expires_in_version": "52",
+ "kind": "linear",
+ "keyed": true,
+ "description": "The USS memory consumed by an application at the time of the 'visuallyLoaded' performance mark, keyed by appName.",
+ "low": 20000000,
+ "high": 30000000,
+ "n_buckets": 10
+ },
+ "DEVTOOLS_HUD_APP_MEMORY_MEDIAENUMERATED_V2": {
+ "alert_emails": ["rnicoletti@mozilla.com","thills@mozilla.com"],
+ "expires_in_version": "52",
+ "kind": "linear",
+ "keyed": true,
+ "description": "The USS memory consumed by an application at the time of the 'mediaEnumerated' performance mark, keyed by appName.",
+ "low": 20000000,
+ "high": 40000000,
+ "n_buckets": 10
+ },
+ "DEVTOOLS_HUD_APP_MEMORY_FULLYLOADED_V2": {
+ "alert_emails": ["rnicoletti@mozilla.com","thills@mozilla.com"],
+ "expires_in_version": "52",
+ "kind": "linear",
+ "keyed": true,
+ "description": "The USS memory consumed by an application at the time of the 'fullyLoaded' performance mark, keyed by appName.",
+ "low": 20000000,
+ "high": 40000000,
+ "n_buckets": 20
+ },
+ "DEVTOOLS_HUD_APP_MEMORY_SCANEND_V2": {
+ "alert_emails": ["rnicoletti@mozilla.com","thills@mozilla.com"],
+ "expires_in_version": "52",
+ "kind": "linear",
+ "keyed": true,
+ "description": "The USS memory consumed by an application at the time of the 'scanEnd' performance mark, keyed by appName.",
+ "low": 20000000,
+ "high": 40000000,
+ "n_buckets": 20
+ },
+ "DEVTOOLS_MEMORY_TAKE_SNAPSHOT_COUNT": {
+ "expires_in_version": "56",
+ "kind": "count",
+ "bug_numbers": [1221619],
+ "description": "The number of heap snapshots taken by a user"
+ },
+ "DEVTOOLS_MEMORY_IMPORT_SNAPSHOT_COUNT": {
+ "expires_in_version": "56",
+ "kind": "count",
+ "bug_numbers": [1221619],
+ "description": "The number of heap snapshots imported by a user"
+ },
+ "DEVTOOLS_MEMORY_EXPORT_SNAPSHOT_COUNT": {
+ "expires_in_version": "56",
+ "kind": "count",
+ "bug_numbers": [1221619],
+ "description": "The number of heap snapshots exported by a user"
+ },
+ "DEVTOOLS_MEMORY_FILTER_CENSUS": {
+ "expires_in_version": "56",
+ "kind": "boolean",
+ "bug_numbers": [1221619],
+ "description": "Whether a census tree was filtered or not"
+ },
+ "DEVTOOLS_MEMORY_DIFF_CENSUS": {
+ "expires_in_version": "56",
+ "kind": "boolean",
+ "bug_numbers": [1221619],
+ "description": "Whether a census was the result of diffing or not"
+ },
+ "DEVTOOLS_MEMORY_INVERTED_CENSUS": {
+ "expires_in_version": "56",
+ "kind": "boolean",
+ "bug_numbers": [1221619],
+ "description": "Whether a census tree was inverted or not"
+ },
+ "DEVTOOLS_MEMORY_BREAKDOWN_CENSUS_COUNT": {
+ "expires_in_version": "56",
+ "kind": "count",
+ "bug_numbers": [1221619],
+ "keyed": true,
+ "description": "The number of times a given type of breakdown was used for a census"
+ },
+ "DEVTOOLS_MEMORY_DOMINATOR_TREE_COUNT": {
+ "expires_in_version": "56",
+ "kind": "count",
+ "bug_numbers": [1221619],
+ "description": "The number of times a user requested a dominator tree be computed"
+ },
+ "DEVTOOLS_MEMORY_BREAKDOWN_DOMINATOR_TREE_COUNT": {
+ "expires_in_version": "56",
+ "kind": "count",
+ "bug_numbers": [1221619],
+ "keyed": true,
+ "description": "The number of times a given type of breakdown was used for a dominator tree"
+ },
+ "GRAPHICS_SANITY_TEST_REASON": {
+ "alert_emails": ["gfx-telemetry-alerts@mozilla.com","danderson@mozilla.com","msreckovic@mozilla.com"],
+ "expires_in_version": "43",
+ "kind": "enumerated",
+ "n_values": 20,
+ "releaseChannelCollection": "opt-out",
+ "description": "Reports why a graphics sanity test was run. 0=First Run, 1=App Updated, 2=Device Change, 3=Driver Change."
+ },
+ "TRANSLATION_OPPORTUNITIES": {
+ "expires_in_version": "default",
+ "kind": "boolean",
+ "description": "A number of successful and failed attempts to translate a document"
+ },
+ "TRANSLATION_OPPORTUNITIES_BY_LANGUAGE": {
+ "expires_in_version": "default",
+ "kind": "boolean",
+ "keyed": true,
+ "description": "A number of successful and failed attempts to translate a document grouped by language"
+ },
+ "TRANSLATED_PAGES": {
+ "expires_in_version": "default",
+ "kind": "count",
+ "description": "A number of sucessfully translated pages"
+ },
+ "TRANSLATED_PAGES_BY_LANGUAGE": {
+ "expires_in_version": "default",
+ "kind": "count",
+ "keyed": true,
+ "description": "A number of sucessfully translated pages by language"
+ },
+ "TRANSLATED_CHARACTERS": {
+ "expires_in_version": "default",
+ "kind": "exponential",
+ "high": 10240,
+ "n_buckets": 50,
+ "description": "A number of sucessfully translated characters"
+ },
+ "DENIED_TRANSLATION_OFFERS": {
+ "expires_in_version": "default",
+ "kind": "count",
+ "description": "A number of tranlation offers the user denied"
+ },
+ "AUTO_REJECTED_TRANSLATION_OFFERS": {
+ "expires_in_version": "default",
+ "kind": "count",
+ "description": "A number of auto-rejected tranlation offers"
+ },
+ "REQUESTS_OF_ORIGINAL_CONTENT": {
+ "expires_in_version": "default",
+ "kind": "count",
+ "description": "A number of times the user requested to see the original content of a translated page"
+ },
+ "CHANGES_OF_TARGET_LANGUAGE": {
+ "expires_in_version": "default",
+ "kind": "count",
+ "description": "A number of times when the target language was changed by the user"
+ },
+ "CHANGES_OF_DETECTED_LANGUAGE": {
+ "expires_in_version": "default",
+ "kind": "boolean",
+ "description": "A number of changes of detected language before (true) or after (false) translating a page for the first time."
+ },
+ "SHOULD_TRANSLATION_UI_APPEAR": {
+ "expires_in_version": "default",
+ "kind": "flag",
+ "description": "Tracks situations when the user opts for displaying translation UI"
+ },
+ "SHOULD_AUTO_DETECT_LANGUAGE": {
+ "expires_in_version": "default",
+ "kind": "flag",
+ "description": "Tracks situations when the user opts for auto-detecting the language of a page"
+ },
+ "PERMISSIONS_REMIGRATION_COMPARISON": {
+ "alert_emails": ["michael@thelayzells.com"],
+ "expires_in_version": "44",
+ "kind": "enumerated",
+ "n_values": 10,
+ "description": "Reports a comparison between row count of original and re-migration of the v7 permissions DB. 0=New == 0, 1=New < Old, 2=New == Old, 3=New > Old"
+ },
+ "PERMISSIONS_MIGRATION_7_ERROR": {
+ "alert_emails": ["michael@thelayzells.com"],
+ "expires_in_version": "44",
+ "kind": "boolean",
+ "description": "Was there an error while performing the v7 permissions DB migration?"
+ },
+ "PERF_MONITORING_TEST_CPU_RESCHEDULING_PROPORTION_MOVED": {
+ "alert_emails": ["dteller@mozilla.com"],
+ "expires_in_version": "48",
+ "kind": "linear",
+ "high": 100,
+ "n_buckets": 20,
+ "description": "Proportion (%) of reschedulings of the main process to another CPU during the execution of code inside a JS compartment. Updated while we are measuring jank."
+ },
+ "PERF_MONITORING_SLOW_ADDON_JANK_US": {
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "low": 1,
+ "high": 10000000,
+ "n_buckets": 20,
+ "keyed": true,
+ "description": "Contiguous time spent by an add-on blocking the main loop (microseconds, keyed by add-on ID)."
+ },
+ "PERF_MONITORING_SLOW_ADDON_CPOW_US": {
+ "expires_in_version": "70",
+ "kind": "exponential",
+ "low": 1,
+ "high": 10000000,
+ "n_buckets": 20,
+ "keyed": true,
+ "description": "Contiguous time spent by an add-on blocking the main loop by performing a blocking cross-process call (microseconds, keyed by add-on ID)."
+ },
+ "VIDEO_EME_REQUEST_SUCCESS_LATENCY_MS": {
+ "alert_emails": ["cpearce@mozilla.com"],
+ "expires_in_version": "55",
+ "kind": "exponential",
+ "high": 60000,
+ "n_buckets": 60,
+ "releaseChannelCollection": "opt-out",
+ "description": "Time spent waiting for a navigator.requestMediaKeySystemAccess call to succeed."
+ },
+ "VIDEO_EME_REQUEST_FAILURE_LATENCY_MS": {
+ "alert_emails": ["cpearce@mozilla.com"],
+ "expires_in_version": "55",
+ "kind": "exponential",
+ "high": 60000,
+ "n_buckets": 60,
+ "releaseChannelCollection": "opt-out",
+ "description": "Time spent waiting for a navigator.requestMediaKeySystemAccess call to fail."
+ },
+ "FXA_CONFIGURED": {
+ "alert_emails": ["fx-team@mozilla.com"],
+ "bug_numbers": [1236383],
+ "expires_in_version": "never",
+ "kind": "flag",
+ "releaseChannelCollection": "opt-out",
+ "description": "If the user is signed in to a Firefox Account on this device. Recorded once per session just after startup as Sync is initialized."
+ },
+ "WEAVE_DEVICE_COUNT_DESKTOP": {
+ "alert_emails": ["fx-team@mozilla.com"],
+ "bug_numbers": [1232050],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 10,
+ "releaseChannelCollection": "opt-out",
+ "description": "Number of desktop devices (including this device) associated with this Sync account. Recorded each time Sync successfully completes the 'clients' engine."
+ },
+ "WEAVE_DEVICE_COUNT_MOBILE": {
+ "alert_emails": ["fx-team@mozilla.com"],
+ "bug_numbers": [1232050],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 10,
+ "releaseChannelCollection": "opt-out",
+ "description": "Number of mobile devices associated with this Sync account. Recorded each time Sync successfully completes the 'clients' engine."
+ },
+ "WEAVE_ENGINE_SYNC_ERRORS": {
+ "alert_emails": ["fx-team@mozilla.com"],
+ "bug_numbers": [1236383],
+ "expires_in_version": "never",
+ "kind": "count",
+ "keyed": true,
+ "releaseChannelCollection": "opt-out",
+ "description": "Exceptions thrown by a Sync engine. Keyed on the engine name."
+ },
+ "CONTENT_DOCUMENTS_DESTROYED": {
+ "expires_in_version": "never",
+ "kind": "count",
+ "description": "Number of content documents destroyed; used in conjunction with use counter histograms"
+ },
+ "TOP_LEVEL_CONTENT_DOCUMENTS_DESTROYED": {
+ "expires_in_version": "never",
+ "kind": "count",
+ "description": "Number of top-level content documents destroyed; used in conjunction with use counter histograms"
+ },
+ "PUSH_API_USED": {
+ "alert_emails": ["push@mozilla.com"],
+ "expires_in_version": "55",
+ "kind": "flag",
+ "description": "A Push API subscribe() operation was performed at least once this session."
+ },
+ "PUSH_API_PERMISSION_REQUESTED": {
+ "alert_emails": ["push@mozilla.com"],
+ "expires_in_version": "55",
+ "kind": "count",
+ "description": "Count of number of times the PermissionManager explicitly prompted user for Push Notifications permission"
+ },
+ "PUSH_API_PERMISSION_DENIED": {
+ "alert_emails": ["push@mozilla.com"],
+ "expires_in_version": "55",
+ "kind": "count",
+ "description": "User explicitly denied Push Notifications permission"
+ },
+ "PUSH_API_PERMISSION_GRANTED": {
+ "alert_emails": ["push@mozilla.com"],
+ "expires_in_version": "55",
+ "kind": "count",
+ "description": "User explicitly granted Push Notifications permission"
+ },
+ "PUSH_API_SUBSCRIBE_ATTEMPT": {
+ "alert_emails": ["push@mozilla.com"],
+ "expires_in_version": "55",
+ "kind": "count",
+ "description": "Push Service attempts to subscribe with Push Server."
+ },
+ "PUSH_API_SUBSCRIBE_FAILED": {
+ "alert_emails": ["push@mozilla.com"],
+ "expires_in_version": "55",
+ "kind": "count",
+ "description": "Attempt to subscribe with Push Server failed."
+ },
+ "PUSH_API_SUBSCRIBE_SUCCEEDED": {
+ "alert_emails": ["push@mozilla.com"],
+ "expires_in_version": "55",
+ "kind": "count",
+ "description": "Attempt to subscribe with Push Server succeeded."
+ },
+ "PUSH_API_UNSUBSCRIBE_ATTEMPT": {
+ "alert_emails": ["push@mozilla.com"],
+ "expires_in_version": "55",
+ "kind": "count",
+ "description": "Push Service attempts to unsubscribe with Push Server."
+ },
+ "PUSH_API_UNSUBSCRIBE_FAILED": {
+ "alert_emails": ["push@mozilla.com"],
+ "expires_in_version": "55",
+ "kind": "count",
+ "description": "Attempt to unsubscribe with Push Server failed."
+ },
+ "PUSH_API_UNSUBSCRIBE_SUCCEEDED": {
+ "alert_emails": ["push@mozilla.com"],
+ "expires_in_version": "55",
+ "kind": "count",
+ "description": "Attempt to unsubscribe with Push Server succeeded."
+ },
+ "PUSH_API_SUBSCRIBE_WS_TIME": {
+ "alert_emails": ["push@mozilla.com"],
+ "expires_in_version": "55",
+ "kind": "exponential",
+ "high": 15000,
+ "n_buckets": 10,
+ "description": "Time taken to subscribe over WebSocket (ms)."
+ },
+ "PUSH_API_SUBSCRIBE_HTTP2_TIME": {
+ "alert_emails": ["push@mozilla.com"],
+ "expires_in_version": "55",
+ "kind": "exponential",
+ "high": 15000,
+ "n_buckets": 10,
+ "description": "Time taken to subscribe over HTTP2 (ms)."
+ },
+ "PUSH_API_QUOTA_EXPIRATION_TIME": {
+ "alert_emails": ["push@mozilla.com"],
+ "expires_in_version": "55",
+ "kind": "exponential",
+ "high": 31622400,
+ "n_buckets": 20,
+ "description": "Time taken for a push subscription to expire its quota (seconds). The maximum is just over an year."
+ },
+ "PUSH_API_QUOTA_RESET_TO": {
+ "alert_emails": ["push@mozilla.com"],
+ "expires_in_version": "55",
+ "kind": "exponential",
+ "high": 200,
+ "n_buckets": 10,
+ "description": "The value a push record quota (a count) is reset to based on the user's browsing history."
+ },
+ "PUSH_API_NOTIFICATION_RECEIVED": {
+ "alert_emails": ["push@mozilla.com"],
+ "expires_in_version": "55",
+ "kind": "count",
+ "description": "Push notification was received from server."
+ },
+ "PUSH_API_NOTIFICATION_RECEIVED_BUT_DID_NOT_NOTIFY": {
+ "alert_emails": ["push@mozilla.com"],
+ "expires_in_version": "55",
+ "kind": "enumerated",
+ "n_values": 16,
+ "description": "Push notification was received from server, but not delivered to ServiceWorker. Enumeration values are defined in dom/push/PushService.jsm as kDROP_NOTIFICATION_REASON_*."
+ },
+ "PUSH_API_NOTIFY": {
+ "releaseChannelCollection": "opt-out",
+ "alert_emails": ["push@mozilla.com"],
+ "expires_in_version": "55",
+ "kind": "count",
+ "description": "Number of push messages that were successfully decrypted and delivered to a ServiceWorker."
+ },
+ "PUSH_API_NOTIFY_REGISTRATION_LOST": {
+ "alert_emails": ["push@mozilla.com"],
+ "expires_in_version": "55",
+ "kind": "count",
+ "description": "Attempt to notify ServiceWorker of push notification resubscription."
+ },
+ "D3D11_SYNC_HANDLE_FAILURE": {
+ "alert_emails": ["gfx-telemetry-alerts@mozilla.com","bschouten@mozilla.com","danderson@mozilla.com","msreckovic@mozilla.com","ashughes@mozilla.com"],
+ "expires_in_version": "60",
+ "releaseChannelCollection": "opt-out",
+ "kind": "count",
+ "description": "Number of times the D3D11 compositor failed to get a texture sync handle."
+ },
+ "GFX_CONTENT_FAILED_TO_ACQUIRE_DEVICE": {
+ "alert_emails": ["gfx-telemetry-alerts@mozilla.com","msreckovic@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 6,
+ "description": "Failed to create a gfx content device. 0=content d3d11, 1=image d3d11, 2=d2d1."
+ },
+ "GFX_CRASH": {
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 100,
+ "releaseChannelCollection": "opt-out",
+ "description": "Graphics Crash Reason (...)"
+ },
+ "PLUGIN_ACTIVATION_COUNT": {
+ "alert_emails": ["cpeterson@mozilla.com"],
+ "expires_in_version": "53",
+ "kind": "count",
+ "keyed": true,
+ "releaseChannelCollection": "opt-out",
+ "bug_numbers": [722110,1260065],
+ "description": "Counts number of times a certain plugin has been activated."
+ },
+ "SCROLL_INPUT_METHODS": {
+ "alert_emails": ["botond@mozilla.com"],
+ "bug_numbers": [1238137],
+ "expires_in_version": "55",
+ "kind": "enumerated",
+ "n_values": 64,
+ "description": "Count of scroll actions triggered by different input methods. See gfx/layers/apz/util/ScrollInputMethods.h for a list of possible values and their meanings."
+ },
+ "WEB_NOTIFICATION_CLICKED": {
+ "releaseChannelCollection": "opt-out",
+ "alert_emails": ["firefox-dev@mozilla.org"],
+ "bug_numbers": [1225336],
+ "expires_in_version": "55",
+ "kind": "count",
+ "description": "Count of times a web notification was clicked"
+ },
+ "WEB_NOTIFICATION_MENU": {
+ "alert_emails": ["firefox-dev@mozilla.org"],
+ "bug_numbers": [1225336],
+ "expires_in_version": "50",
+ "kind": "enumerated",
+ "n_values": 5,
+ "description": "Count of times a contextual menu item was used from a Notification (0: DND, 1: Disable, 2: Settings)"
+ },
+ "WEB_NOTIFICATION_SHOWN": {
+ "releaseChannelCollection": "opt-out",
+ "alert_emails": ["firefox-dev@mozilla.org"],
+ "bug_numbers": [1225336],
+ "expires_in_version": "55",
+ "kind": "count",
+ "description": "Count of times a Notification was rendered (accounting for XUL DND). A system backend may put the notification directly into the tray if its own DND is on."
+ },
+ "WEBFONT_DOWNLOAD_TIME": {
+ "alert_emails": ["jdaggett@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 60000,
+ "n_buckets": 50,
+ "description": "Time to download a webfont (ms)"
+ },
+ "WEBFONT_DOWNLOAD_TIME_AFTER_START": {
+ "alert_emails": ["jdaggett@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 60000,
+ "n_buckets": 50,
+ "description": "Time after navigationStart webfont download completed (ms)"
+ },
+ "WEBFONT_FONTTYPE": {
+ "alert_emails": ["jdaggett@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 10,
+ "description": "Font format type (woff/woff2/ttf/...)"
+ },
+ "WEBFONT_SRCTYPE": {
+ "alert_emails": ["jdaggett@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 5,
+ "description": "Font src type loaded (1 = local, 2 = url, 3 = data)"
+ },
+ "WEBFONT_PER_PAGE": {
+ "alert_emails": ["jdaggett@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "description": "Number of fonts loaded at page load"
+ },
+ "WEBFONT_SIZE_PER_PAGE": {
+ "alert_emails": ["jdaggett@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 5000,
+ "n_buckets": 50,
+ "description": "Size of all fonts loaded at page load (kb)"
+ },
+ "WEBFONT_SIZE": {
+ "alert_emails": ["jdaggett@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 5000,
+ "n_buckets": 50,
+ "description": "Size of font loaded (kb)"
+ },
+ "WEBFONT_COMPRESSION_WOFF": {
+ "alert_emails": ["jdaggett@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 50,
+ "description": "Compression ratio of WOFF data (%)"
+ },
+ "WEBFONT_COMPRESSION_WOFF2": {
+ "alert_emails": ["jdaggett@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 50,
+ "description": "Compression ratio of WOFF2 data (%)"
+ },
+ "WEBRTC_ICE_CHECKING_RATE": {
+ "alert_emails": ["webrtc-ice-telemetry-alerts@mozilla.com"],
+ "expires_in_version": "53",
+ "kind": "boolean",
+ "bug_numbers": [1188391],
+ "description": "The number of ICE connections which immediately failed (0) vs. reached at least checking state (1)."
+ },
+ "ALERTS_SERVICE_DND_ENABLED": {
+ "alert_emails": ["firefox-dev@mozilla.org"],
+ "bug_numbers": [1219030],
+ "expires_in_version": "50",
+ "kind": "boolean",
+ "description": "XUL-only: whether the user has toggled do not disturb."
+ },
+ "ALERTS_SERVICE_DND_SUPPORTED_FLAG": {
+ "alert_emails": ["firefox-dev@mozilla.org"],
+ "bug_numbers": [1219030],
+ "expires_in_version": "50",
+ "kind": "flag",
+ "description": "Whether the do not disturb option is supported. True if the browser uses XUL alerts."
+ },
+ "WEB_NOTIFICATION_EXCEPTIONS_OPENED": {
+ "alert_emails": ["firefox-dev@mozilla.org"],
+ "bug_numbers": [1219030],
+ "expires_in_version": "50",
+ "kind": "count",
+ "description": "Number of times the Notification Permissions dialog has been opened."
+ },
+ "WEB_NOTIFICATION_PERMISSIONS": {
+ "releaseChannelCollection": "opt-out",
+ "alert_emails": ["firefox-dev@mozilla.org"],
+ "bug_numbers": [1219030],
+ "expires_in_version": "55",
+ "kind": "enumerated",
+ "n_values": 10,
+ "description": "Number of origins with the web notifications permission (0 = denied, 1 = allowed)."
+ },
+ "WEB_NOTIFICATION_PERMISSION_REMOVED": {
+ "alert_emails": ["firefox-dev@mozilla.org"],
+ "bug_numbers": [1219030],
+ "expires_in_version": "50",
+ "kind": "enumerated",
+ "n_values": 10,
+ "description": "Number of removed web notifications permissions (0 = remove deny, 1 = remove allow)."
+ },
+ "WEB_NOTIFICATION_SENDERS": {
+ "releaseChannelCollection": "opt-out",
+ "alert_emails": ["firefox-dev@mozilla.org"],
+ "bug_numbers": [1219030],
+ "expires_in_version": "50",
+ "kind": "count",
+ "description": "Number of origins that have shown a web notification. Excludes system alerts like update reminders and add-ons."
+ },
+ "YOUTUBE_REWRITABLE_EMBED_SEEN": {
+ "alert_emails": ["cpeterson@mozilla.com"],
+ "expires_in_version": "53",
+ "kind": "flag",
+ "bug_numbers": [1229971],
+ "releaseChannelCollection": "opt-out",
+ "description": "Flag activated whenever a rewritable youtube flash embed is seen during a session."
+ },
+ "YOUTUBE_NONREWRITABLE_EMBED_SEEN": {
+ "alert_emails": ["cpeterson@mozilla.com"],
+ "expires_in_version": "53",
+ "kind": "flag",
+ "bug_numbers": [1237401],
+ "releaseChannelCollection": "opt-out",
+ "description": "Flag activated whenever a non-rewritable (enablejsapi=1) youtube flash embed is seen during a session."
+ },
+ "PLUGIN_DRAWING_MODEL": {
+ "alert_emails": ["danderson@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "bug_numbers": [1229961],
+ "n_values": 12,
+ "description": "Plugin drawing model. 0 when windowed, otherwise NPDrawingModel + 1."
+ },
+ "WEB_NOTIFICATION_REQUEST_PERMISSION_CALLBACK": {
+ "alert_emails": ["push@mozilla.com"],
+ "expires_in_version": "55",
+ "bug_numbers": [1241278],
+ "kind": "boolean",
+ "description": "Usage of the deprecated Notification.requestPermission() callback argument"
+ },
+ "VIDEO_FASTSEEK_USED": {
+ "alert_emails": ["lchristie@mozilla.com", "cpearce@mozilla.com"],
+ "expires_in_version": "55",
+ "bug_numbers": [1245982],
+ "kind": "count",
+ "description": "Uses of HTMLMediaElement.fastSeek"
+ },
+ "VIDEO_DROPPED_FRAMES_PROPORTION" : {
+ "alert_emails": ["lchristie@mozilla.com", "cpearce@mozilla.com"],
+ "expires_in_version": "55",
+ "kind": "linear",
+ "high": 100,
+ "n_buckets": 50,
+ "bug_numbers": [1238433],
+ "description": "Percentage of frames decoded frames dropped in an HTMLVideoElement"
+ },
+ "TAB_SWITCH_CACHE_POSITION": {
+ "expires_in_version": "55",
+ "bug_numbers": [1242013],
+ "kind": "linear",
+ "high": 100,
+ "n_buckets": 50,
+ "description": "Position in (theoretical) tab cache of tab being switched to"
+ },
+ "REMOTE_JAR_PROTOCOL_USED": {
+ "alert_emails": ["dev-platform@lists.mozilla.org"],
+ "expires_in_version": "55",
+ "bug_numbers": [1255934],
+ "kind": "count",
+ "description": "Remote JAR protocol usage"
+ },
+ "MEDIA_DECODER_BACKEND_USED": {
+ "alert_emails": ["danderson@mozilla.com"],
+ "bug_numbers": [1259695],
+ "expires_in_version": "never",
+ "kind": "enumerated",
+ "n_values": 10,
+ "description": "Media decoder backend (0=WMF Software, 1=DXVA2D3D9, 2=DXVA2D3D11)"
+ },
+ "PLUGIN_BLOCKED_FOR_STABILITY": {
+ "alert_emails": ["cpeterson@mozilla.com"],
+ "expires_in_version": "52",
+ "kind": "count",
+ "bug_numbers": [1237198],
+ "description": "Count plugins blocked for stability"
+ },
+ "PLUGIN_TINY_CONTENT": {
+ "alert_emails": ["cpeterson@mozilla.com"],
+ "expires_in_version": "52",
+ "kind": "count",
+ "bug_numbers": [1237198],
+ "description": "Count tiny plugin content"
+ },
+ "IPC_MESSAGE_SIZE": {
+ "alert_emails": ["wmccloskey@mozilla.com"],
+ "bug_numbers": [1260908],
+ "expires_in_version": "55",
+ "kind": "exponential",
+ "high": 8000000,
+ "n_buckets": 50,
+ "keyed": true,
+ "description": "Measures the size of IPC messages by message name"
+ },
+ "IPC_REPLY_SIZE": {
+ "alert_emails": ["wmccloskey@mozilla.com"],
+ "bug_numbers": [1264820],
+ "expires_in_version": "55",
+ "kind": "exponential",
+ "high": 8000000,
+ "n_buckets": 50,
+ "keyed": true,
+ "description": "Measures the size of IPC messages by message name"
+ },
+ "MESSAGE_MANAGER_MESSAGE_SIZE2": {
+ "alert_emails": ["wmccloskey@mozilla.com","amccreight@mozilla.com"],
+ "bug_numbers": [1260908],
+ "expires_in_version": "55",
+ "kind": "exponential",
+ "low": 8192,
+ "high": 8000000,
+ "n_buckets": 50,
+ "keyed": true,
+ "description": "Each key is the message name, with digits removed, from an async message manager message that was larger than 8192 bytes, recorded in the sending process at the time of sending."
+ },
+ "REJECTED_MESSAGE_MANAGER_MESSAGE": {
+ "alert_emails": ["amccreight@mozilla.com"],
+ "bug_numbers": [1272423],
+ "expires_in_version": "55",
+ "kind": "count",
+ "keyed": true,
+ "description": "Each key is the message name, with digits removed, from an async message manager message that was rejected for being over approximately 128MB, recorded in the sending process at the time of sending."
+ },
+ "SANDBOX_BROKER_INITIALIZED": {
+ "alert_emails": ["bowen@mozilla.com"],
+ "bug_numbers": [1256992],
+ "expires_in_version": "55",
+ "kind": "boolean",
+ "description": "Result of call to SandboxBroker::Initialize"
+ },
+ "SANDBOX_HAS_SECCOMP_BPF": {
+ "alert_emails": ["gcp@mozilla.com"],
+ "bug_numbers": [1098428],
+ "expires_in_version": "55",
+ "kind": "boolean",
+ "cpp_guard": "XP_LINUX",
+ "description": "Whether the system has seccomp-bpf capability"
+ },
+ "SANDBOX_HAS_SECCOMP_TSYNC": {
+ "alert_emails": ["gcp@mozilla.com"],
+ "bug_numbers": [1098428],
+ "expires_in_version": "55",
+ "kind": "boolean",
+ "cpp_guard": "XP_LINUX",
+ "description": "Whether the system has seccomp-bpf thread-sync capability"
+ },
+ "SANDBOX_HAS_USER_NAMESPACES": {
+ "alert_emails": ["gcp@mozilla.com"],
+ "bug_numbers": [1098428],
+ "expires_in_version": "55",
+ "kind": "boolean",
+ "cpp_guard": "XP_LINUX",
+ "description": "Whether our process succedeed in creating a user namespace"
+ },
+ "SANDBOX_HAS_USER_NAMESPACES_PRIVILEGED": {
+ "alert_emails": ["gcp@mozilla.com"],
+ "bug_numbers": [1098428],
+ "expires_in_version": "55",
+ "kind": "boolean",
+ "cpp_guard": "XP_LINUX",
+ "description": "Whether the system has the capability to create privileged user namespaces"
+ },
+ "SANDBOX_MEDIA_ENABLED": {
+ "alert_emails": ["gcp@mozilla.com"],
+ "bug_numbers": [1098428],
+ "expires_in_version": "55",
+ "kind": "boolean",
+ "cpp_guard": "XP_LINUX",
+ "description": "Whether the sandbox is enabled for media/GMP plugins"
+ },
+ "SANDBOX_CONTENT_ENABLED": {
+ "alert_emails": ["gcp@mozilla.com"],
+ "bug_numbers": [1098428],
+ "expires_in_version": "55",
+ "kind": "boolean",
+ "cpp_guard": "XP_LINUX",
+ "description": "Whether the sandbox is enabled for the content process"
+ },
+ "SYNC_WORKER_OPERATION": {
+ "alert_emails": ["amarchesini@mozilla.com", "khuey@mozilla.com" ],
+ "bug_numbers": [1267904],
+ "expires_in_version": "never",
+ "kind": "exponential",
+ "high": 5000,
+ "n_buckets": 20,
+ "keyed": true,
+ "description": "Tracking how long a Worker thread is blocked when a sync operation is executed on the main-thread."
+ },
+ "SUBPROCESS_KILL_HARD": {
+ "alert_emails": ["wmccloskey@mozilla.com"],
+ "bug_numbers": [1269961],
+ "expires_in_version": "never",
+ "kind": "count",
+ "keyed": true,
+ "releaseChannelCollection": "opt-out",
+ "description": "Counts the number of times a subprocess was forcibly killed, and the reason."
+ },
+ "FX_CONTENT_CRASH_DUMP_UNAVAILABLE": {
+ "alert_emails": ["wmccloskey@mozilla.com"],
+ "bug_numbers": [1269961],
+ "expires_in_version": "never",
+ "kind": "count",
+ "releaseChannelCollection": "opt-out",
+ "description": "Counts the number of times that about:tabcrashed was unable to find a crash dump."
+ },
+ "FX_CONTENT_CRASH_PRESENTED": {
+ "alert_emails": ["wmccloskey@mozilla.com"],
+ "bug_numbers": [1269961],
+ "expires_in_version": "never",
+ "kind": "count",
+ "releaseChannelCollection": "opt-out",
+ "description": "Counts the number of times that about:tabcrashed appeared and found a crash dump."
+ },
+ "FX_CONTENT_CRASH_NOT_SUBMITTED": {
+ "alert_emails": ["wmccloskey@mozilla.com"],
+ "bug_numbers": [1269961],
+ "expires_in_version": "never",
+ "kind": "count",
+ "releaseChannelCollection": "opt-out",
+ "description": "Counts the number of times that about:tabcrashed was unloaded without submitting."
+ },
+ "ABOUTCRASHES_OPENED_COUNT": {
+ "alert_emails": ["gfx-telemetry-alerts@mozilla.com","bgirard@mozilla.com","msreckovic@mozilla.com"],
+ "expires_in_version": "55",
+ "kind": "count",
+ "bug_numbers": [1276714, 1276716],
+ "description": "Number of times about:crashes has been opened.",
+ "releaseChannelCollection": "opt-out"
+ },
+ "D3D9_COMPOSITING_FAILURE_ID": {
+ "alert_emails": ["bgirard@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "keyed": true,
+ "description": "D3D9 compositor runtime and dynamic failure IDs. This will record a count for each context creation success or failure. Each failure id is a unique identifier that can be traced back to a particular failure branch or blocklist rule.",
+ "bug_numbers": [1002846]
+ },
+ "D3D11_COMPOSITING_FAILURE_ID": {
+ "alert_emails": ["bgirard@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "keyed": true,
+ "description": "D3D11 compositor runtime and dynamic failure IDs. This will record a count for each context creation success or failure. Each failure id is a unique identifier that can be traced back to a particular failure branch or blocklist rule.",
+ "bug_numbers": [1002846]
+ },
+ "OPENGL_COMPOSITING_FAILURE_ID": {
+ "alert_emails": ["bgirard@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "count",
+ "keyed": true,
+ "description": "OpenGL compositor runtime and dynamic failure IDs. This will record a count for each context creation success or failure. Each failure id is a unique identifier that can be traced back to a particular failure branch or blocklist rule.",
+ "bug_numbers": [1002846]
+ },
+ "XHR_IN_WORKER": {
+ "alert_emails": ["amarchesini@mozilla.com"],
+ "expires_in_version": "55",
+ "kind": "count",
+ "bug_numbers": [1280229],
+ "description": "Number of the use of XHR in workers."
+ },
+ "WEBVTT_TRACK_KINDS": {
+ "alert_emails": ["alwu@mozilla.com"],
+ "expires_in_version": "55",
+ "kind": "enumerated",
+ "n_values": 10,
+ "bug_numbers": [1280644],
+ "description": "Number of the use of the subtitles kind track. 0=Subtitles, 1=Captions, 2=Descriptions, 3=Chapters, 4=Metadata, 5=Undefined Error",
+ "releaseChannelCollection": "opt-out"
+ },
+ "WEBVTT_USED_VTT_CUES": {
+ "alert_emails": ["alwu@mozilla.com"],
+ "expires_in_version": "55",
+ "kind": "count",
+ "bug_numbers": [1280644],
+ "description": "Number of the use of the vtt cues.",
+ "releaseChannelCollection": "opt-out"
+ },
+ "BLINK_FILESYSTEM_USED": {
+ "alert_emails": ["amarchesini@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "bug_numbers": [1272501],
+ "releaseChannelCollection": "opt-out",
+ "description": "Webkit/Blink filesystem used"
+ },
+ "WEBKIT_DIRECTORY_USED": {
+ "alert_emails": ["amarchesini@mozilla.com"],
+ "expires_in_version": "never",
+ "kind": "boolean",
+ "bug_numbers": [1272501],
+ "releaseChannelCollection": "opt-out",
+ "description": "HTMLInputElement.webkitdirectory attribute used"
+ },
+ "CONTAINER_USED": {
+ "alert_emails": ["amarchesini@mozilla.com"],
+ "expires_in_version": "55",
+ "kind": "enumerated",
+ "bug_numbers": [1276006],
+ "n_values": 5,
+ "description": "Records a value each time a builtin container is opened. 1=personal 2=work 3=banking 4=shopping. Does not record usage of user-created containers."
+ },
+ "UNIQUE_CONTAINERS_OPENED": {
+ "alert_emails": ["amarchesini@mozilla.com"],
+ "expires_in_version": "never",
+ "bug_numbers": [1276006],
+ "kind": "count",
+ "description": "Tracking the unique number of opened Containers."
+ },
+ "TOTAL_CONTAINERS_OPENED": {
+ "alert_emails": ["amarchesini@mozilla.com"],
+ "expires_in_version": "never",
+ "bug_numbers": [1276006],
+ "kind": "count",
+ "description": "Tracking the total number of opened Containers."
+ },
+ "FENNEC_SESSIONSTORE_DAMAGED_SESSION_FILE": {
+ "alert_emails": ["jh+bugzilla@buttercookie.de"],
+ "expires_in_version": "56",
+ "kind": "flag",
+ "bug_numbers": [1284017],
+ "description": "When restoring tabs on startup, reading from sessionstore.js failed, even though the file exists and is not containing an explicitly empty window.",
+ "cpp_guard": "ANDROID"
+ },
+ "SHARED_WORKER_COUNT": {
+ "alert_emails": ["amarchesini@mozilla.com"],
+ "expires_in_version": "58",
+ "kind": "count",
+ "bug_numbers": [1295980],
+ "description": "Number of the use of SharedWorkers."
+ },
+ "FENNEC_SESSIONSTORE_RESTORING_FROM_BACKUP": {
+ "alert_emails": ["jh+bugzilla@buttercookie.de"],
+ "expires_in_version": "56",
+ "kind": "flag",
+ "bug_numbers": [1190627],
+ "description": "When restoring tabs on startup, reading from sessionstore.js failed, but sessionstore.bak was read successfully.",
+ "cpp_guard": "ANDROID"
+ },
+ "NUMBER_OF_PROFILES": {
+ "alert_emails": ["amarchesini@mozilla.com"],
+ "expires_in_version": "58",
+ "bug_numbers": [1296606],
+ "kind": "count",
+ "description": "Number of named browser profiles for the current user, as reported by the profile service at startup."
+ },
+ "WEB_PERMISSION_CLEARED": {
+ "alert_emails": ["firefox-dev@mozilla.org"],
+ "bug_numbers": [1286118],
+ "expires_in_version": "55",
+ "kind": "enumerated",
+ "keyed": true,
+ "n_values": 6,
+ "description": "Number of revoke actions on permissions in the control center, keyed by permission id. Values represent the permission type that was revoked. (0=unknown, 1=permanently allowed, 2=permanently blocked, 3=temporarily allowed, 4=temporarily blocked)"
+ },
+ "JS_AOT_USAGE": {
+ "alert_emails": ["luke@mozilla.com", "bbouvier@mozilla.com"],
+ "bug_numbers": [1288778],
+ "expires_in_version": "56",
+ "kind": "enumerated",
+ "n_values": 4,
+ "description": "Counts the number of asm.js vs WebAssembly modules instanciations, at the time modules are getting instanciated."
+ },
+ "DOCUMENT_WITH_EXPANDED_PRINCIPAL": {
+ "alert_emails": ["dev-platform@lists.mozilla.org"],
+ "bug_numbers": [1301123],
+ "expires_in_version": "58",
+ "kind": "count",
+ "description": "Number of documents encountered using an expanded principal."
+ },
+ "CONTENT_PAINT_TIME": {
+ "alert_emails": ["danderson@mozilla.com"],
+ "bug_numbers": [1309442],
+ "expires_in_version": "56",
+ "kind": "exponential",
+ "high": 1000,
+ "n_buckets": 50,
+ "description": "Time spent in the paint pipeline for content."
+ },
+ "CONTENT_LARGE_PAINT_PHASE_WEIGHT": {
+ "alert_emails": ["danderson@mozilla.com"],
+ "bug_numbers": [1309442],
+ "expires_in_version": "56",
+ "keyed": true,
+ "kind": "linear",
+ "high": 100,
+ "n_buckets": 10,
+ "description": "Percentage of time taken by phases in expensive content paints."
+ },
+ "NARRATE_CONTENT_BY_LANGUAGE_2": {
+ "alert_emails": ["eisaacson@mozilla.com"],
+ "bug_numbers": [1308030, 1324868],
+ "releaseChannelCollection": "opt-out",
+ "expires_in_version": "56",
+ "kind": "enumerated",
+ "keyed": true,
+ "n_values": 4,
+ "description": "Number of Narrate initialization attempts and successes broken up by content's language (ISO 639-1 code) (0 = initialization attempt, 1 = successfully initialized)"
+ },
+ "NARRATE_CONTENT_SPEAKTIME_MS": {
+ "alert_emails": ["eisaacson@mozilla.com"],
+ "bug_numbers": [1308030],
+ "releaseChannelCollection": "opt-out",
+ "expires_in_version": "56",
+ "kind": "linear",
+ "high": 300000,
+ "n_buckets": 30,
+ "description": "Time in MS that content is narrated in 10 second increments up to 5 minutes"
+ },
+ "HANDLE_UNLOAD_MS": {
+ "alert_emails": ["kchen@mozilla.com"],
+ "expires_in_version": "55",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "bug_numbers": [1301346],
+ "description": "The time spent handling unload event in milliseconds. It measures all documents and subframes separately. If there are multiple handlers for the unload event in a document, this will record a single value across all handlers in the document."
+ },
+ "HANDLE_BEFOREUNLOAD_MS": {
+ "alert_emails": ["kchen@mozilla.com"],
+ "expires_in_version": "55",
+ "kind": "exponential",
+ "high": 10000,
+ "n_buckets": 50,
+ "bug_numbers": [1301346],
+ "description": "The time spent handling beforeunload event in milliseconds. It measures all documents and subframes separately. If there are multiple handlers for the unload event in a document, this will record a single value across all handlers in the document."
+ },
+ "TABCHILD_PAINT_TIME": {
+ "alert_emails": ["mconley@mozilla.com"],
+ "bug_numbers": [1313686],
+ "expires_in_version": "56",
+ "kind": "exponential",
+ "high": 1000,
+ "n_buckets": 50,
+ "description": "Time spent painting the contents of a remote browser (ms).",
+ "releaseChannelCollection": "opt-out"
+ },
+ "TIME_TO_NON_BLANK_PAINT_MS": {
+ "alert_emails": ["hkirschner@mozilla.com"],
+ "expires_in_version": "55",
+ "kind": "exponential",
+ "high": 100000,
+ "n_buckets": 100,
+ "bug_numbers": [1307242],
+ "description": "The time between navigation start and the first non-blank paint of a foreground root content document, in milliseconds. This only records documents that were in an active docshell throughout the whole time between navigation start and non-blank paint. The non-blank paint timestamp is taken during display list building and does not include rasterization or compositing of that paint."
+ },
+ "MOZ_BLOB_IN_XHR": {
+ "alert_emails": ["amarchesini@mozilla.com"],
+ "expires_in_version": "58",
+ "kind": "boolean",
+ "bug_numbers": [1335365],
+ "releaseChannelCollection": "opt-out",
+ "description": "XMLHttpRequest.responseType set to moz-blob"
+ },
+ "MOZ_CHUNKED_TEXT_IN_XHR": {
+ "alert_emails": ["amarchesini@mozilla.com"],
+ "expires_in_version": "58",
+ "kind": "boolean",
+ "bug_numbers": [1335365],
+ "releaseChannelCollection": "opt-out",
+ "description": "XMLHttpRequest.responseType set to moz-chunked-text"
+ },
+ "MOZ_CHUNKED_ARRAYBUFFER_IN_XHR": {
+ "alert_emails": ["amarchesini@mozilla.com"],
+ "expires_in_version": "58",
+ "kind": "boolean",
+ "bug_numbers": [1335365],
+ "releaseChannelCollection": "opt-out",
+ "description": "XMLHttpRequest.responseType set to moz-chunked-arraybuffer"
+ }
+}
diff --git a/toolkit/components/telemetry/Makefile.in b/toolkit/components/telemetry/Makefile.in
new file mode 100644
index 000000000..52016707c
--- /dev/null
+++ b/toolkit/components/telemetry/Makefile.in
@@ -0,0 +1,17 @@
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+include $(topsrcdir)/config/rules.mk
+
+# This is so hacky. Waiting on bug 988938.
+addondir = $(srcdir)/tests/addons
+testdir = $(topobjdir)/_tests/xpcshell/toolkit/components/telemetry/tests/unit
+
+misc:: $(call mkdir_deps,$(testdir))
+ $(EXIT_ON_ERROR) \
+ for dir in $(addondir)/*; do \
+ base=`basename $$dir`; \
+ (cd $$dir && zip -qr $(testdir)/$$base.xpi *); \
+ done
diff --git a/toolkit/components/telemetry/ProcessedStack.h b/toolkit/components/telemetry/ProcessedStack.h
new file mode 100644
index 000000000..2bda55007
--- /dev/null
+++ b/toolkit/components/telemetry/ProcessedStack.h
@@ -0,0 +1,63 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2; -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef ProcessedStack_h__
+#define ProcessedStack_h__
+
+#include <string>
+#include <vector>
+
+namespace mozilla {
+namespace Telemetry {
+
+// This class represents a stack trace and the modules referenced in that trace.
+// It is designed to be easy to read and write to disk or network and doesn't
+// include any logic on how to collect or read the information it stores.
+class ProcessedStack
+{
+public:
+ ProcessedStack();
+ size_t GetStackSize() const;
+ size_t GetNumModules() const;
+
+ struct Frame
+ {
+ // The offset of this program counter in its module or an absolute pc.
+ uintptr_t mOffset;
+ // The index to pass to GetModule to get the module this program counter
+ // was in.
+ uint16_t mModIndex;
+ };
+ struct Module
+ {
+ // The file name, /foo/bar/libxul.so for example.
+ std::string mName;
+ std::string mBreakpadId;
+
+ bool operator==(const Module& other) const;
+ };
+
+ const Frame &GetFrame(unsigned aIndex) const;
+ void AddFrame(const Frame& aFrame);
+ const Module &GetModule(unsigned aIndex) const;
+ void AddModule(const Module& aFrame);
+
+ void Clear();
+
+private:
+ std::vector<Module> mModules;
+ std::vector<Frame> mStack;
+};
+
+// Get the current list of loaded modules, filter and pair it to the provided
+// stack. We let the caller collect the stack since different callers have
+// different needs (current thread X main thread, stopping the thread, etc).
+ProcessedStack
+GetStackAndModules(const std::vector<uintptr_t> &aPCs);
+
+} // namespace Telemetry
+} // namespace mozilla
+
+#endif // ProcessedStack_h__
diff --git a/toolkit/components/telemetry/ScalarInfo.h b/toolkit/components/telemetry/ScalarInfo.h
new file mode 100644
index 000000000..6c9d8aade
--- /dev/null
+++ b/toolkit/components/telemetry/ScalarInfo.h
@@ -0,0 +1,27 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2; -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef TelemetryScalarInfo_h__
+#define TelemetryScalarInfo_h__
+
+// This module is internal to Telemetry. It defines a structure that holds the
+// scalar info. It should only be used by TelemetryScalarData.h automatically
+// generated file and TelemetryScalar.cpp. This should not be used anywhere else.
+// For the public interface to Telemetry functionality, see Telemetry.h.
+
+namespace {
+struct ScalarInfo {
+ uint32_t kind;
+ uint32_t name_offset;
+ uint32_t expiration_offset;
+ uint32_t dataset;
+ bool keyed;
+
+ const char *name() const;
+ const char *expiration() const;
+};
+} // namespace
+
+#endif // TelemetryScalarInfo_h__
diff --git a/toolkit/components/telemetry/Scalars.yaml b/toolkit/components/telemetry/Scalars.yaml
new file mode 100644
index 000000000..e95819879
--- /dev/null
+++ b/toolkit/components/telemetry/Scalars.yaml
@@ -0,0 +1,298 @@
+# This file contains a definition of the scalar probes that are recorded in Telemetry.
+# They are submitted with the "main" pings and can be inspected in about:telemetry.
+
+# The following section contains the aushelper system add-on scalars.
+aushelper:
+ websense_reg_version:
+ bug_numbers:
+ - 1305847
+ description: The Websense version from the Windows registry.
+ expires: "60"
+ kind: string
+ notification_emails:
+ - application-update-telemetry-alerts@mozilla.com
+ release_channel_collection: opt-out
+
+# The following section contains the browser engagement scalars.
+browser.engagement:
+ max_concurrent_tab_count:
+ bug_numbers:
+ - 1271304
+ description: >
+ The count of maximum number of tabs open during a subsession,
+ across all windows, including tabs in private windows and restored
+ at startup.
+ expires: "55"
+ kind: uint
+ notification_emails:
+ - rweiss@mozilla.com
+ release_channel_collection: opt-out
+
+ tab_open_event_count:
+ bug_numbers:
+ - 1271304
+ description: >
+ The count of tab open events per subsession, across all windows, after the
+ session has been restored. This includes tab open events from private windows
+ and from manual session restorations (i.e. after crashes and from about:home).
+ expires: "55"
+ kind: uint
+ notification_emails:
+ - rweiss@mozilla.com
+ release_channel_collection: opt-out
+
+ max_concurrent_window_count:
+ bug_numbers:
+ - 1271304
+ description: >
+ The count of maximum number of browser windows open during a subsession. This
+ includes private windows and the ones opened when starting the browser.
+ expires: "55"
+ kind: uint
+ notification_emails:
+ - rweiss@mozilla.com
+ release_channel_collection: opt-out
+
+ window_open_event_count:
+ bug_numbers:
+ - 1271304
+ description: >
+ The count of browser window open events per subsession, after the session
+ has been restored. The count includes private windows and the ones from manual
+ session restorations (i.e. after crashes and from about:home).
+ expires: "55"
+ kind: uint
+ notification_emails:
+ - rweiss@mozilla.com
+ release_channel_collection: opt-out
+
+ total_uri_count:
+ bug_numbers:
+ - 1271313
+ description: >
+ The count of the total non-unique http(s) URIs visited in a subsession, including
+ page reloads, after the session has been restored. This does not include background
+ page requests and URIs from embedded pages or private browsing.
+ expires: "55"
+ kind: uint
+ notification_emails:
+ - rweiss@mozilla.com
+ release_channel_collection: opt-out
+
+ unfiltered_uri_count:
+ bug_numbers:
+ - 1304647
+ description: >
+ The count of the total non-unique URIs visited in a subsession, not restricted to
+ a specific protocol, including page reloads and about:* pages (other than initial
+ pages such as about:blank, ...), after the session has been restored. This does
+ not include background page requests and URIs from embedded pages or private browsing.
+ expires: "55"
+ kind: uint
+ notification_emails:
+ - bcolloran@mozilla.com
+ release_channel_collection: opt-out
+
+ unique_domains_count:
+ bug_numbers:
+ - 1271310
+ description: >
+ The count of the unique domains visited in a subsession, after the session
+ has been restored. Subdomains under eTLD are aggregated after the first level
+ (i.e. test.example.com and other.example.com are only counted once).
+ This does not include background page requests and domains from embedded pages
+ or private browsing. The count is limited to 100 unique domains.
+ expires: "55"
+ kind: uint
+ notification_emails:
+ - rweiss@mozilla.com
+ release_channel_collection: opt-out
+
+# The following section contains the browser engagement scalars.
+browser.engagement.navigation:
+ urlbar:
+ bug_numbers:
+ - 1271313
+ description: >
+ The count URI loads triggered in a subsession from the urlbar (awesomebar),
+ broken down by the originating action.
+ expires: "55"
+ kind: uint
+ keyed: true
+ notification_emails:
+ - bcolloran@mozilla.com
+ release_channel_collection: opt-out
+
+ searchbar:
+ bug_numbers:
+ - 1271313
+ description: >
+ The count URI loads triggered in a subsession from the searchbar,
+ broken down by the originating action.
+ expires: "55"
+ kind: uint
+ keyed: true
+ notification_emails:
+ - bcolloran@mozilla.com
+ release_channel_collection: opt-out
+
+ about_home:
+ bug_numbers:
+ - 1271313
+ description: >
+ The count URI loads triggered in a subsession from about:home,
+ broken down by the originating action.
+ expires: "55"
+ kind: uint
+ keyed: true
+ notification_emails:
+ - bcolloran@mozilla.com
+ release_channel_collection: opt-out
+
+ about_newtab:
+ bug_numbers:
+ - 1271313
+ description: >
+ The count URI loads triggered in a subsession from about:newtab,
+ broken down by the originating action.
+ expires: "55"
+ kind: uint
+ keyed: true
+ notification_emails:
+ - bcolloran@mozilla.com
+ release_channel_collection: opt-out
+
+ contextmenu:
+ bug_numbers:
+ - 1271313
+ description: >
+ The count URI loads triggered in a subsession from the contextmenu,
+ broken down by the originating action.
+ expires: "55"
+ kind: uint
+ keyed: true
+ notification_emails:
+ - bcolloran@mozilla.com
+ release_channel_collection: opt-out
+
+# The following section is for probes testing the Telemetry system. They will not be
+# submitted in pings and are only used for testing.
+telemetry.test:
+ unsigned_int_kind:
+ bug_numbers:
+ - 1276190
+ description: >
+ This is a test uint type with a really long description, maybe spanning even multiple
+ lines, to just prove a point: everything works just fine.
+ expires: never
+ kind: uint
+ notification_emails:
+ - telemetry-client-dev@mozilla.com
+
+ string_kind:
+ bug_numbers:
+ - 1276190
+ description: A string test type with a one line comment that works just fine!
+ expires: never
+ kind: string
+ notification_emails:
+ - telemetry-client-dev@mozilla.com
+
+ boolean_kind:
+ bug_numbers:
+ - 1281214
+ description: A boolean test type with a one line comment that works just fine!
+ expires: never
+ kind: boolean
+ notification_emails:
+ - telemetry-client-dev@mozilla.com
+
+ expired:
+ bug_numbers:
+ - 1276190
+ description: This is an expired testing scalar; not meant to be touched.
+ expires: 4.0a1
+ kind: uint
+ notification_emails:
+ - telemetry-client-dev@mozilla.com
+
+ unexpired:
+ bug_numbers:
+ - 1276190
+ description: This is an unexpired testing scalar; not meant to be touched.
+ expires: "375.0"
+ kind: uint
+ notification_emails:
+ - telemetry-client-dev@mozilla.com
+
+ release_optin:
+ bug_numbers:
+ - 1276190
+ description: A testing scalar; not meant to be touched.
+ expires: never
+ kind: uint
+ notification_emails:
+ - telemetry-client-dev@mozilla.com
+ release_channel_collection: opt-in
+
+ release_optout:
+ bug_numbers:
+ - 1276190
+ description: A testing scalar; not meant to be touched.
+ expires: never
+ kind: uint
+ notification_emails:
+ - telemetry-client-dev@mozilla.com
+ release_channel_collection: opt-out
+
+ keyed_release_optin:
+ bug_numbers:
+ - 1277806
+ description: A testing scalar; not meant to be touched.
+ expires: never
+ kind: uint
+ keyed: true
+ notification_emails:
+ - telemetry-client-dev@mozilla.com
+ release_channel_collection: opt-in
+
+ keyed_release_optout:
+ bug_numbers:
+ - 1277806
+ description: A testing scalar; not meant to be touched.
+ expires: never
+ kind: uint
+ keyed: true
+ notification_emails:
+ - telemetry-client-dev@mozilla.com
+ release_channel_collection: opt-out
+
+ keyed_expired:
+ bug_numbers:
+ - 1277806
+ description: This is an expired testing scalar; not meant to be touched.
+ expires: 4.0a1
+ kind: uint
+ keyed: true
+ notification_emails:
+ - telemetry-client-dev@mozilla.com
+
+ keyed_unsigned_int:
+ bug_numbers:
+ - 1277806
+ description: A testing keyed uint scalar; not meant to be touched.
+ expires: never
+ kind: uint
+ keyed: true
+ notification_emails:
+ - telemetry-client-dev@mozilla.com
+
+ keyed_boolean_kind:
+ bug_numbers:
+ - 1277806
+ description: A testing keyed boolean scalar; not meant to be touched.
+ expires: never
+ kind: boolean
+ keyed: true
+ notification_emails:
+ - telemetry-client-dev@mozilla.com
diff --git a/toolkit/components/telemetry/Telemetry.cpp b/toolkit/components/telemetry/Telemetry.cpp
new file mode 100644
index 000000000..ad2263c9b
--- /dev/null
+++ b/toolkit/components/telemetry/Telemetry.cpp
@@ -0,0 +1,3076 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <algorithm>
+
+#include <fstream>
+
+#include <prio.h>
+
+#include "mozilla/dom/ToJSValue.h"
+#include "mozilla/Atomics.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/DebugOnly.h"
+#include "mozilla/Likely.h"
+#include "mozilla/MathAlgorithms.h"
+#include "mozilla/Unused.h"
+
+#include "base/pickle.h"
+#include "nsIComponentManager.h"
+#include "nsIServiceManager.h"
+#include "nsThreadManager.h"
+#include "nsCOMArray.h"
+#include "nsCOMPtr.h"
+#include "nsXPCOMPrivate.h"
+#include "nsIXULAppInfo.h"
+#include "nsVersionComparator.h"
+#include "mozilla/MemoryReporting.h"
+#include "mozilla/ModuleUtils.h"
+#include "nsIXPConnect.h"
+#include "mozilla/Services.h"
+#include "jsapi.h"
+#include "jsfriendapi.h"
+#include "js/GCAPI.h"
+#include "nsString.h"
+#include "nsITelemetry.h"
+#include "nsIFile.h"
+#include "nsIFileStreams.h"
+#include "nsIMemoryReporter.h"
+#include "nsISeekableStream.h"
+#include "Telemetry.h"
+#include "TelemetryCommon.h"
+#include "TelemetryHistogram.h"
+#include "TelemetryScalar.h"
+#include "TelemetryEvent.h"
+#include "WebrtcTelemetry.h"
+#include "nsTHashtable.h"
+#include "nsHashKeys.h"
+#include "nsBaseHashtable.h"
+#include "nsClassHashtable.h"
+#include "nsXULAppAPI.h"
+#include "nsReadableUtils.h"
+#include "nsThreadUtils.h"
+#if defined(XP_WIN)
+#include "nsUnicharUtils.h"
+#endif
+#include "nsNetCID.h"
+#include "nsNetUtil.h"
+#include "nsJSUtils.h"
+#include "nsReadableUtils.h"
+#include "plstr.h"
+#include "nsAppDirectoryServiceDefs.h"
+#include "mozilla/BackgroundHangMonitor.h"
+#include "mozilla/ThreadHangStats.h"
+#include "mozilla/ProcessedStack.h"
+#include "mozilla/Mutex.h"
+#include "mozilla/FileUtils.h"
+#include "mozilla/Preferences.h"
+#include "mozilla/StaticPtr.h"
+#include "mozilla/IOInterposer.h"
+#include "mozilla/PoisonIOInterposer.h"
+#include "mozilla/StartupTimeline.h"
+#include "mozilla/HangMonitor.h"
+#if defined(MOZ_ENABLE_PROFILER_SPS)
+#include "shared-libraries.h"
+#endif
+
+namespace {
+
+using namespace mozilla;
+using namespace mozilla::HangMonitor;
+using Telemetry::Common::AutoHashtable;
+
+// The maximum number of chrome hangs stacks that we're keeping.
+const size_t kMaxChromeStacksKept = 50;
+// The maximum depth of a single chrome hang stack.
+const size_t kMaxChromeStackDepth = 50;
+
+// This class is conceptually a list of ProcessedStack objects, but it represents them
+// more efficiently by keeping a single global list of modules.
+class CombinedStacks {
+public:
+ CombinedStacks() : mNextIndex(0) {}
+ typedef std::vector<Telemetry::ProcessedStack::Frame> Stack;
+ const Telemetry::ProcessedStack::Module& GetModule(unsigned aIndex) const;
+ size_t GetModuleCount() const;
+ const Stack& GetStack(unsigned aIndex) const;
+ size_t AddStack(const Telemetry::ProcessedStack& aStack);
+ size_t GetStackCount() const;
+ size_t SizeOfExcludingThis() const;
+private:
+ std::vector<Telemetry::ProcessedStack::Module> mModules;
+ // A circular buffer to hold the stacks.
+ std::vector<Stack> mStacks;
+ // The index of the next buffer element to write to in mStacks.
+ size_t mNextIndex;
+};
+
+static JSObject *
+CreateJSStackObject(JSContext *cx, const CombinedStacks &stacks);
+
+size_t
+CombinedStacks::GetModuleCount() const {
+ return mModules.size();
+}
+
+const Telemetry::ProcessedStack::Module&
+CombinedStacks::GetModule(unsigned aIndex) const {
+ return mModules[aIndex];
+}
+
+size_t
+CombinedStacks::AddStack(const Telemetry::ProcessedStack& aStack) {
+ // Advance the indices of the circular queue holding the stacks.
+ size_t index = mNextIndex++ % kMaxChromeStacksKept;
+ // Grow the vector up to the maximum size, if needed.
+ if (mStacks.size() < kMaxChromeStacksKept) {
+ mStacks.resize(mStacks.size() + 1);
+ }
+ // Get a reference to the location holding the new stack.
+ CombinedStacks::Stack& adjustedStack = mStacks[index];
+ // If we're using an old stack to hold aStack, clear it.
+ adjustedStack.clear();
+
+ size_t stackSize = aStack.GetStackSize();
+ for (size_t i = 0; i < stackSize; ++i) {
+ const Telemetry::ProcessedStack::Frame& frame = aStack.GetFrame(i);
+ uint16_t modIndex;
+ if (frame.mModIndex == std::numeric_limits<uint16_t>::max()) {
+ modIndex = frame.mModIndex;
+ } else {
+ const Telemetry::ProcessedStack::Module& module =
+ aStack.GetModule(frame.mModIndex);
+ std::vector<Telemetry::ProcessedStack::Module>::iterator modIterator =
+ std::find(mModules.begin(), mModules.end(), module);
+ if (modIterator == mModules.end()) {
+ mModules.push_back(module);
+ modIndex = mModules.size() - 1;
+ } else {
+ modIndex = modIterator - mModules.begin();
+ }
+ }
+ Telemetry::ProcessedStack::Frame adjustedFrame = { frame.mOffset, modIndex };
+ adjustedStack.push_back(adjustedFrame);
+ }
+ return index;
+}
+
+const CombinedStacks::Stack&
+CombinedStacks::GetStack(unsigned aIndex) const {
+ return mStacks[aIndex];
+}
+
+size_t
+CombinedStacks::GetStackCount() const {
+ return mStacks.size();
+}
+
+size_t
+CombinedStacks::SizeOfExcludingThis() const {
+ // This is a crude approximation. We would like to do something like
+ // aMallocSizeOf(&mModules[0]), but on linux aMallocSizeOf will call
+ // malloc_usable_size which is only safe on the pointers returned by malloc.
+ // While it works on current libstdc++, it is better to be safe and not assume
+ // that &vec[0] points to one. We could use a custom allocator, but
+ // it doesn't seem worth it.
+ size_t n = 0;
+ n += mModules.capacity() * sizeof(Telemetry::ProcessedStack::Module);
+ n += mStacks.capacity() * sizeof(Stack);
+ for (std::vector<Stack>::const_iterator i = mStacks.begin(),
+ e = mStacks.end(); i != e; ++i) {
+ const Stack& s = *i;
+ n += s.capacity() * sizeof(Telemetry::ProcessedStack::Frame);
+ }
+ return n;
+}
+
+// This utility function generates a string key that is used to index the annotations
+// in a hash map from |HangReports::AddHang|.
+nsresult
+ComputeAnnotationsKey(const HangAnnotationsPtr& aAnnotations, nsAString& aKeyOut)
+{
+ UniquePtr<HangAnnotations::Enumerator> annotationsEnum = aAnnotations->GetEnumerator();
+ if (!annotationsEnum) {
+ return NS_ERROR_FAILURE;
+ }
+
+ // Append all the attributes to the key, to uniquely identify this annotation.
+ nsAutoString key;
+ nsAutoString value;
+ while (annotationsEnum->Next(key, value)) {
+ aKeyOut.Append(key);
+ aKeyOut.Append(value);
+ }
+
+ return NS_OK;
+}
+
+class HangReports {
+public:
+ /**
+ * This struct encapsulates information for an individual ChromeHang annotation.
+ * mHangIndex is the index of the corresponding ChromeHang.
+ */
+ struct AnnotationInfo {
+ AnnotationInfo(uint32_t aHangIndex,
+ HangAnnotationsPtr aAnnotations)
+ : mAnnotations(Move(aAnnotations))
+ {
+ mHangIndices.AppendElement(aHangIndex);
+ }
+ AnnotationInfo(AnnotationInfo&& aOther)
+ : mHangIndices(aOther.mHangIndices)
+ , mAnnotations(Move(aOther.mAnnotations))
+ {}
+ ~AnnotationInfo() {}
+ AnnotationInfo& operator=(AnnotationInfo&& aOther)
+ {
+ mHangIndices = aOther.mHangIndices;
+ mAnnotations = Move(aOther.mAnnotations);
+ return *this;
+ }
+ // To save memory, a single AnnotationInfo can be associated to multiple chrome
+ // hangs. The following array holds the index of each related chrome hang.
+ nsTArray<uint32_t> mHangIndices;
+ HangAnnotationsPtr mAnnotations;
+
+ private:
+ // Force move constructor
+ AnnotationInfo(const AnnotationInfo& aOther) = delete;
+ void operator=(const AnnotationInfo& aOther) = delete;
+ };
+ size_t SizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf) const;
+ void AddHang(const Telemetry::ProcessedStack& aStack, uint32_t aDuration,
+ int32_t aSystemUptime, int32_t aFirefoxUptime,
+ HangAnnotationsPtr aAnnotations);
+ void PruneStackReferences(const size_t aRemovedStackIndex);
+ uint32_t GetDuration(unsigned aIndex) const;
+ int32_t GetSystemUptime(unsigned aIndex) const;
+ int32_t GetFirefoxUptime(unsigned aIndex) const;
+ const nsClassHashtable<nsStringHashKey, AnnotationInfo>& GetAnnotationInfo() const;
+ const CombinedStacks& GetStacks() const;
+private:
+ /**
+ * This struct encapsulates the data for an individual ChromeHang, excluding
+ * annotations.
+ */
+ struct HangInfo {
+ // Hang duration (in seconds)
+ uint32_t mDuration;
+ // System uptime (in minutes) at the time of the hang
+ int32_t mSystemUptime;
+ // Firefox uptime (in minutes) at the time of the hang
+ int32_t mFirefoxUptime;
+ };
+ std::vector<HangInfo> mHangInfo;
+ nsClassHashtable<nsStringHashKey, AnnotationInfo> mAnnotationInfo;
+ CombinedStacks mStacks;
+};
+
+void
+HangReports::AddHang(const Telemetry::ProcessedStack& aStack,
+ uint32_t aDuration,
+ int32_t aSystemUptime,
+ int32_t aFirefoxUptime,
+ HangAnnotationsPtr aAnnotations) {
+ // Append the new stack to the stack's circular queue.
+ size_t hangIndex = mStacks.AddStack(aStack);
+ // Append the hang info at the same index, in mHangInfo.
+ HangInfo info = { aDuration, aSystemUptime, aFirefoxUptime };
+ if (mHangInfo.size() < kMaxChromeStacksKept) {
+ mHangInfo.push_back(info);
+ } else {
+ mHangInfo[hangIndex] = info;
+ // Remove any reference to the stack overwritten in the circular queue
+ // from the annotations.
+ PruneStackReferences(hangIndex);
+ }
+
+ if (!aAnnotations) {
+ return;
+ }
+
+ nsAutoString annotationsKey;
+ // Generate a key to index aAnnotations in the hash map.
+ nsresult rv = ComputeAnnotationsKey(aAnnotations, annotationsKey);
+ if (NS_FAILED(rv)) {
+ return;
+ }
+
+ AnnotationInfo* annotationsEntry = mAnnotationInfo.Get(annotationsKey);
+ if (annotationsEntry) {
+ // If the key is already in the hash map, append the index of the chrome hang
+ // to its indices.
+ annotationsEntry->mHangIndices.AppendElement(hangIndex);
+ return;
+ }
+
+ // If the key was not found, add the annotations to the hash map.
+ mAnnotationInfo.Put(annotationsKey, new AnnotationInfo(hangIndex, Move(aAnnotations)));
+}
+
+/**
+ * This function removes links to discarded chrome hangs stacks and prunes unused
+ * annotations.
+ */
+void
+HangReports::PruneStackReferences(const size_t aRemovedStackIndex) {
+ // We need to adjust the indices that link annotations to chrome hangs. Since we
+ // removed a stack, we must remove all references to it and prune annotations
+ // linked to no stacks.
+ for (auto iter = mAnnotationInfo.Iter(); !iter.Done(); iter.Next()) {
+ nsTArray<uint32_t>& stackIndices = iter.Data()->mHangIndices;
+ size_t toRemove = stackIndices.NoIndex;
+ for (size_t k = 0; k < stackIndices.Length(); k++) {
+ // Is this index referencing the removed stack?
+ if (stackIndices[k] == aRemovedStackIndex) {
+ toRemove = k;
+ break;
+ }
+ }
+
+ // Remove the index referencing the old stack from the annotation.
+ if (toRemove != stackIndices.NoIndex) {
+ stackIndices.RemoveElementAt(toRemove);
+ }
+
+ // If this annotation no longer references any stack, drop it.
+ if (!stackIndices.Length()) {
+ iter.Remove();
+ }
+ }
+}
+
+size_t
+HangReports::SizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf) const {
+ size_t n = 0;
+ n += mStacks.SizeOfExcludingThis();
+ // This is a crude approximation. See comment on
+ // CombinedStacks::SizeOfExcludingThis.
+ n += mHangInfo.capacity() * sizeof(HangInfo);
+ n += mAnnotationInfo.ShallowSizeOfExcludingThis(aMallocSizeOf);
+ n += mAnnotationInfo.Count() * sizeof(AnnotationInfo);
+ for (auto iter = mAnnotationInfo.ConstIter(); !iter.Done(); iter.Next()) {
+ n += iter.Key().SizeOfExcludingThisIfUnshared(aMallocSizeOf);
+ n += iter.Data()->mAnnotations->SizeOfIncludingThis(aMallocSizeOf);
+ }
+ return n;
+}
+
+const CombinedStacks&
+HangReports::GetStacks() const {
+ return mStacks;
+}
+
+uint32_t
+HangReports::GetDuration(unsigned aIndex) const {
+ return mHangInfo[aIndex].mDuration;
+}
+
+int32_t
+HangReports::GetSystemUptime(unsigned aIndex) const {
+ return mHangInfo[aIndex].mSystemUptime;
+}
+
+int32_t
+HangReports::GetFirefoxUptime(unsigned aIndex) const {
+ return mHangInfo[aIndex].mFirefoxUptime;
+}
+
+const nsClassHashtable<nsStringHashKey, HangReports::AnnotationInfo>&
+HangReports::GetAnnotationInfo() const {
+ return mAnnotationInfo;
+}
+
+/**
+ * IOInterposeObserver recording statistics of main-thread I/O during execution,
+ * aimed at consumption by TelemetryImpl
+ */
+class TelemetryIOInterposeObserver : public IOInterposeObserver
+{
+ /** File-level statistics structure */
+ struct FileStats {
+ FileStats()
+ : creates(0)
+ , reads(0)
+ , writes(0)
+ , fsyncs(0)
+ , stats(0)
+ , totalTime(0)
+ {}
+ uint32_t creates; /** Number of create/open operations */
+ uint32_t reads; /** Number of read operations */
+ uint32_t writes; /** Number of write operations */
+ uint32_t fsyncs; /** Number of fsync operations */
+ uint32_t stats; /** Number of stat operations */
+ double totalTime; /** Accumulated duration of all operations */
+ };
+
+ struct SafeDir {
+ SafeDir(const nsAString& aPath, const nsAString& aSubstName)
+ : mPath(aPath)
+ , mSubstName(aSubstName)
+ {}
+ size_t SizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf) const {
+ return mPath.SizeOfExcludingThisIfUnshared(aMallocSizeOf) +
+ mSubstName.SizeOfExcludingThisIfUnshared(aMallocSizeOf);
+ }
+ nsString mPath; /** Path to the directory */
+ nsString mSubstName; /** Name to substitute with */
+ };
+
+public:
+ explicit TelemetryIOInterposeObserver(nsIFile* aXreDir);
+
+ /**
+ * An implementation of Observe that records statistics of all
+ * file IO operations.
+ */
+ void Observe(Observation& aOb);
+
+ /**
+ * Reflect recorded file IO statistics into Javascript
+ */
+ bool ReflectIntoJS(JSContext *cx, JS::Handle<JSObject*> rootObj);
+
+ /**
+ * Adds a path for inclusion in main thread I/O report.
+ * @param aPath Directory path
+ * @param aSubstName Name to substitute for aPath for privacy reasons
+ */
+ void AddPath(const nsAString& aPath, const nsAString& aSubstName);
+
+ /**
+ * Get size of hash table with file stats
+ */
+ size_t SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const {
+ return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf);
+ }
+
+ size_t SizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf) const {
+ size_t size = 0;
+ size += mFileStats.ShallowSizeOfExcludingThis(aMallocSizeOf);
+ for (auto iter = mFileStats.ConstIter(); !iter.Done(); iter.Next()) {
+ size += iter.Get()->GetKey().SizeOfExcludingThisIfUnshared(aMallocSizeOf);
+ }
+ size += mSafeDirs.ShallowSizeOfExcludingThis(aMallocSizeOf);
+ uint32_t safeDirsLen = mSafeDirs.Length();
+ for (uint32_t i = 0; i < safeDirsLen; ++i) {
+ size += mSafeDirs[i].SizeOfExcludingThis(aMallocSizeOf);
+ }
+ return size;
+ }
+
+private:
+ enum Stage
+ {
+ STAGE_STARTUP = 0,
+ STAGE_NORMAL,
+ STAGE_SHUTDOWN,
+ NUM_STAGES
+ };
+ static inline Stage NextStage(Stage aStage)
+ {
+ switch (aStage) {
+ case STAGE_STARTUP:
+ return STAGE_NORMAL;
+ case STAGE_NORMAL:
+ return STAGE_SHUTDOWN;
+ case STAGE_SHUTDOWN:
+ return STAGE_SHUTDOWN;
+ default:
+ return NUM_STAGES;
+ }
+ }
+
+ struct FileStatsByStage
+ {
+ FileStats mStats[NUM_STAGES];
+ };
+ typedef nsBaseHashtableET<nsStringHashKey, FileStatsByStage> FileIOEntryType;
+
+ // Statistics for each filename
+ AutoHashtable<FileIOEntryType> mFileStats;
+ // Container for whitelisted directories
+ nsTArray<SafeDir> mSafeDirs;
+ Stage mCurStage;
+
+ /**
+ * Reflect a FileIOEntryType object to a Javascript property on obj with
+ * filename as key containing array:
+ * [totalTime, creates, reads, writes, fsyncs, stats]
+ */
+ static bool ReflectFileStats(FileIOEntryType* entry, JSContext *cx,
+ JS::Handle<JSObject*> obj);
+};
+
+TelemetryIOInterposeObserver::TelemetryIOInterposeObserver(nsIFile* aXreDir)
+ : mCurStage(STAGE_STARTUP)
+{
+ nsAutoString xreDirPath;
+ nsresult rv = aXreDir->GetPath(xreDirPath);
+ if (NS_SUCCEEDED(rv)) {
+ AddPath(xreDirPath, NS_LITERAL_STRING("{xre}"));
+ }
+}
+
+void TelemetryIOInterposeObserver::AddPath(const nsAString& aPath,
+ const nsAString& aSubstName)
+{
+ mSafeDirs.AppendElement(SafeDir(aPath, aSubstName));
+}
+
+// Threshold for reporting slow main-thread I/O (50 milliseconds).
+const TimeDuration kTelemetryReportThreshold = TimeDuration::FromMilliseconds(50);
+
+void TelemetryIOInterposeObserver::Observe(Observation& aOb)
+{
+ // We only report main-thread I/O
+ if (!IsMainThread()) {
+ return;
+ }
+
+ if (aOb.ObservedOperation() == OpNextStage) {
+ mCurStage = NextStage(mCurStage);
+ MOZ_ASSERT(mCurStage < NUM_STAGES);
+ return;
+ }
+
+ if (aOb.Duration() < kTelemetryReportThreshold) {
+ return;
+ }
+
+ // Get the filename
+ const char16_t* filename = aOb.Filename();
+
+ // Discard observations without filename
+ if (!filename) {
+ return;
+ }
+
+#if defined(XP_WIN)
+ nsCaseInsensitiveStringComparator comparator;
+#else
+ nsDefaultStringComparator comparator;
+#endif
+ nsAutoString processedName;
+ nsDependentString filenameStr(filename);
+ uint32_t safeDirsLen = mSafeDirs.Length();
+ for (uint32_t i = 0; i < safeDirsLen; ++i) {
+ if (StringBeginsWith(filenameStr, mSafeDirs[i].mPath, comparator)) {
+ processedName = mSafeDirs[i].mSubstName;
+ processedName += Substring(filenameStr, mSafeDirs[i].mPath.Length());
+ break;
+ }
+ }
+
+ if (processedName.IsEmpty()) {
+ return;
+ }
+
+ // Create a new entry or retrieve the existing one
+ FileIOEntryType* entry = mFileStats.PutEntry(processedName);
+ if (entry) {
+ FileStats& stats = entry->mData.mStats[mCurStage];
+ // Update the statistics
+ stats.totalTime += (double) aOb.Duration().ToMilliseconds();
+ switch (aOb.ObservedOperation()) {
+ case OpCreateOrOpen:
+ stats.creates++;
+ break;
+ case OpRead:
+ stats.reads++;
+ break;
+ case OpWrite:
+ stats.writes++;
+ break;
+ case OpFSync:
+ stats.fsyncs++;
+ break;
+ case OpStat:
+ stats.stats++;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+bool TelemetryIOInterposeObserver::ReflectFileStats(FileIOEntryType* entry,
+ JSContext *cx,
+ JS::Handle<JSObject*> obj)
+{
+ JS::AutoValueArray<NUM_STAGES> stages(cx);
+
+ FileStatsByStage& statsByStage = entry->mData;
+ for (int s = STAGE_STARTUP; s < NUM_STAGES; ++s) {
+ FileStats& fileStats = statsByStage.mStats[s];
+
+ if (fileStats.totalTime == 0 && fileStats.creates == 0 &&
+ fileStats.reads == 0 && fileStats.writes == 0 &&
+ fileStats.fsyncs == 0 && fileStats.stats == 0) {
+ // Don't add an array that contains no information
+ stages[s].setNull();
+ continue;
+ }
+
+ // Array we want to report
+ JS::AutoValueArray<6> stats(cx);
+ stats[0].setNumber(fileStats.totalTime);
+ stats[1].setNumber(fileStats.creates);
+ stats[2].setNumber(fileStats.reads);
+ stats[3].setNumber(fileStats.writes);
+ stats[4].setNumber(fileStats.fsyncs);
+ stats[5].setNumber(fileStats.stats);
+
+ // Create jsStats as array of elements above
+ JS::RootedObject jsStats(cx, JS_NewArrayObject(cx, stats));
+ if (!jsStats) {
+ continue;
+ }
+
+ stages[s].setObject(*jsStats);
+ }
+
+ JS::Rooted<JSObject*> jsEntry(cx, JS_NewArrayObject(cx, stages));
+ if (!jsEntry) {
+ return false;
+ }
+
+ // Add jsEntry to top-level dictionary
+ const nsAString& key = entry->GetKey();
+ return JS_DefineUCProperty(cx, obj, key.Data(), key.Length(),
+ jsEntry, JSPROP_ENUMERATE | JSPROP_READONLY);
+}
+
+bool TelemetryIOInterposeObserver::ReflectIntoJS(JSContext *cx,
+ JS::Handle<JSObject*> rootObj)
+{
+ return mFileStats.ReflectIntoJS(ReflectFileStats, cx, rootObj);
+}
+
+// This is not a member of TelemetryImpl because we want to record I/O during
+// startup.
+StaticAutoPtr<TelemetryIOInterposeObserver> sTelemetryIOObserver;
+
+void
+ClearIOReporting()
+{
+ if (!sTelemetryIOObserver) {
+ return;
+ }
+ IOInterposer::Unregister(IOInterposeObserver::OpAllWithStaging,
+ sTelemetryIOObserver);
+ sTelemetryIOObserver = nullptr;
+}
+
+class TelemetryImpl final
+ : public nsITelemetry
+ , public nsIMemoryReporter
+{
+ NS_DECL_THREADSAFE_ISUPPORTS
+ NS_DECL_NSITELEMETRY
+ NS_DECL_NSIMEMORYREPORTER
+
+public:
+ void InitMemoryReporter();
+
+ static already_AddRefed<nsITelemetry> CreateTelemetryInstance();
+ static void ShutdownTelemetry();
+ static void RecordSlowStatement(const nsACString &sql, const nsACString &dbName,
+ uint32_t delay);
+#if defined(MOZ_ENABLE_PROFILER_SPS)
+ static void RecordChromeHang(uint32_t aDuration,
+ Telemetry::ProcessedStack &aStack,
+ int32_t aSystemUptime,
+ int32_t aFirefoxUptime,
+ HangAnnotationsPtr aAnnotations);
+#endif
+ static void RecordThreadHangStats(Telemetry::ThreadHangStats& aStats);
+ size_t SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf);
+ struct Stat {
+ uint32_t hitCount;
+ uint32_t totalTime;
+ };
+ struct StmtStats {
+ struct Stat mainThread;
+ struct Stat otherThreads;
+ };
+ typedef nsBaseHashtableET<nsCStringHashKey, StmtStats> SlowSQLEntryType;
+
+ static void RecordIceCandidates(const uint32_t iceCandidateBitmask,
+ const bool success);
+private:
+ TelemetryImpl();
+ ~TelemetryImpl();
+
+ static nsCString SanitizeSQL(const nsACString& sql);
+
+ enum SanitizedState { Sanitized, Unsanitized };
+
+ static void StoreSlowSQL(const nsACString &offender, uint32_t delay,
+ SanitizedState state);
+
+ static bool ReflectMainThreadSQL(SlowSQLEntryType *entry, JSContext *cx,
+ JS::Handle<JSObject*> obj);
+ static bool ReflectOtherThreadsSQL(SlowSQLEntryType *entry, JSContext *cx,
+ JS::Handle<JSObject*> obj);
+ static bool ReflectSQL(const SlowSQLEntryType *entry, const Stat *stat,
+ JSContext *cx, JS::Handle<JSObject*> obj);
+
+ bool AddSQLInfo(JSContext *cx, JS::Handle<JSObject*> rootObj, bool mainThread,
+ bool privateSQL);
+ bool GetSQLStats(JSContext *cx, JS::MutableHandle<JS::Value> ret,
+ bool includePrivateSql);
+
+ void ReadLateWritesStacks(nsIFile* aProfileDir);
+
+ static TelemetryImpl *sTelemetry;
+ AutoHashtable<SlowSQLEntryType> mPrivateSQL;
+ AutoHashtable<SlowSQLEntryType> mSanitizedSQL;
+ Mutex mHashMutex;
+ HangReports mHangReports;
+ Mutex mHangReportsMutex;
+ // mThreadHangStats stores recorded, inactive thread hang stats
+ Vector<Telemetry::ThreadHangStats> mThreadHangStats;
+ Mutex mThreadHangStatsMutex;
+
+ CombinedStacks mLateWritesStacks; // This is collected out of the main thread.
+ bool mCachedTelemetryData;
+ uint32_t mLastShutdownTime;
+ uint32_t mFailedLockCount;
+ nsCOMArray<nsIFetchTelemetryDataCallback> mCallbacks;
+ friend class nsFetchTelemetryData;
+
+ WebrtcTelemetry mWebrtcTelemetry;
+};
+
+TelemetryImpl* TelemetryImpl::sTelemetry = nullptr;
+
+MOZ_DEFINE_MALLOC_SIZE_OF(TelemetryMallocSizeOf)
+
+NS_IMETHODIMP
+TelemetryImpl::CollectReports(nsIHandleReportCallback* aHandleReport,
+ nsISupports* aData, bool aAnonymize)
+{
+ MOZ_COLLECT_REPORT(
+ "explicit/telemetry", KIND_HEAP, UNITS_BYTES,
+ SizeOfIncludingThis(TelemetryMallocSizeOf),
+ "Memory used by the telemetry system.");
+
+ return NS_OK;
+}
+
+void
+InitHistogramRecordingEnabled()
+{
+ TelemetryHistogram::InitHistogramRecordingEnabled();
+}
+
+static uint32_t
+ReadLastShutdownDuration(const char *filename) {
+ FILE *f = fopen(filename, "r");
+ if (!f) {
+ return 0;
+ }
+
+ int shutdownTime;
+ int r = fscanf(f, "%d\n", &shutdownTime);
+ fclose(f);
+ if (r != 1) {
+ return 0;
+ }
+
+ return shutdownTime;
+}
+
+const int32_t kMaxFailedProfileLockFileSize = 10;
+
+bool
+GetFailedLockCount(nsIInputStream* inStream, uint32_t aCount,
+ unsigned int& result)
+{
+ nsAutoCString bufStr;
+ nsresult rv;
+ rv = NS_ReadInputStreamToString(inStream, bufStr, aCount);
+ NS_ENSURE_SUCCESS(rv, false);
+ result = bufStr.ToInteger(&rv);
+ return NS_SUCCEEDED(rv) && result > 0;
+}
+
+nsresult
+GetFailedProfileLockFile(nsIFile* *aFile, nsIFile* aProfileDir)
+{
+ NS_ENSURE_ARG_POINTER(aProfileDir);
+
+ nsresult rv = aProfileDir->Clone(aFile);
+ NS_ENSURE_SUCCESS(rv, rv);
+
+ (*aFile)->AppendNative(NS_LITERAL_CSTRING("Telemetry.FailedProfileLocks.txt"));
+ return NS_OK;
+}
+
+class nsFetchTelemetryData : public Runnable
+{
+public:
+ nsFetchTelemetryData(const char* aShutdownTimeFilename,
+ nsIFile* aFailedProfileLockFile,
+ nsIFile* aProfileDir)
+ : mShutdownTimeFilename(aShutdownTimeFilename),
+ mFailedProfileLockFile(aFailedProfileLockFile),
+ mTelemetry(TelemetryImpl::sTelemetry),
+ mProfileDir(aProfileDir)
+ {
+ }
+
+private:
+ const char* mShutdownTimeFilename;
+ nsCOMPtr<nsIFile> mFailedProfileLockFile;
+ RefPtr<TelemetryImpl> mTelemetry;
+ nsCOMPtr<nsIFile> mProfileDir;
+
+public:
+ void MainThread() {
+ mTelemetry->mCachedTelemetryData = true;
+ for (unsigned int i = 0, n = mTelemetry->mCallbacks.Count(); i < n; ++i) {
+ mTelemetry->mCallbacks[i]->Complete();
+ }
+ mTelemetry->mCallbacks.Clear();
+ }
+
+ NS_IMETHOD Run() override {
+ LoadFailedLockCount(mTelemetry->mFailedLockCount);
+ mTelemetry->mLastShutdownTime =
+ ReadLastShutdownDuration(mShutdownTimeFilename);
+ mTelemetry->ReadLateWritesStacks(mProfileDir);
+ nsCOMPtr<nsIRunnable> e =
+ NewRunnableMethod(this, &nsFetchTelemetryData::MainThread);
+ NS_ENSURE_STATE(e);
+ NS_DispatchToMainThread(e);
+ return NS_OK;
+ }
+
+private:
+ nsresult
+ LoadFailedLockCount(uint32_t& failedLockCount)
+ {
+ failedLockCount = 0;
+ int64_t fileSize = 0;
+ nsresult rv = mFailedProfileLockFile->GetFileSize(&fileSize);
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+ NS_ENSURE_TRUE(fileSize <= kMaxFailedProfileLockFileSize,
+ NS_ERROR_UNEXPECTED);
+ nsCOMPtr<nsIInputStream> inStream;
+ rv = NS_NewLocalFileInputStream(getter_AddRefs(inStream),
+ mFailedProfileLockFile,
+ PR_RDONLY);
+ NS_ENSURE_SUCCESS(rv, rv);
+ NS_ENSURE_TRUE(GetFailedLockCount(inStream, fileSize, failedLockCount),
+ NS_ERROR_UNEXPECTED);
+ inStream->Close();
+
+ mFailedProfileLockFile->Remove(false);
+ return NS_OK;
+ }
+};
+
+static TimeStamp gRecordedShutdownStartTime;
+static bool gAlreadyFreedShutdownTimeFileName = false;
+static char *gRecordedShutdownTimeFileName = nullptr;
+
+static char *
+GetShutdownTimeFileName()
+{
+ if (gAlreadyFreedShutdownTimeFileName) {
+ return nullptr;
+ }
+
+ if (!gRecordedShutdownTimeFileName) {
+ nsCOMPtr<nsIFile> mozFile;
+ NS_GetSpecialDirectory(NS_APP_USER_PROFILE_50_DIR, getter_AddRefs(mozFile));
+ if (!mozFile)
+ return nullptr;
+
+ mozFile->AppendNative(NS_LITERAL_CSTRING("Telemetry.ShutdownTime.txt"));
+ nsAutoCString nativePath;
+ nsresult rv = mozFile->GetNativePath(nativePath);
+ if (!NS_SUCCEEDED(rv))
+ return nullptr;
+
+ gRecordedShutdownTimeFileName = PL_strdup(nativePath.get());
+ }
+
+ return gRecordedShutdownTimeFileName;
+}
+
+NS_IMETHODIMP
+TelemetryImpl::GetLastShutdownDuration(uint32_t *aResult)
+{
+ // The user must call AsyncFetchTelemetryData first. We return zero instead of
+ // reporting a failure so that the rest of telemetry can uniformly handle
+ // the read not being available yet.
+ if (!mCachedTelemetryData) {
+ *aResult = 0;
+ return NS_OK;
+ }
+
+ *aResult = mLastShutdownTime;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+TelemetryImpl::GetFailedProfileLockCount(uint32_t* aResult)
+{
+ // The user must call AsyncFetchTelemetryData first. We return zero instead of
+ // reporting a failure so that the rest of telemetry can uniformly handle
+ // the read not being available yet.
+ if (!mCachedTelemetryData) {
+ *aResult = 0;
+ return NS_OK;
+ }
+
+ *aResult = mFailedLockCount;
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+TelemetryImpl::AsyncFetchTelemetryData(nsIFetchTelemetryDataCallback *aCallback)
+{
+ // We have finished reading the data already, just call the callback.
+ if (mCachedTelemetryData) {
+ aCallback->Complete();
+ return NS_OK;
+ }
+
+ // We already have a read request running, just remember the callback.
+ if (mCallbacks.Count() != 0) {
+ mCallbacks.AppendObject(aCallback);
+ return NS_OK;
+ }
+
+ // We make this check so that GetShutdownTimeFileName() doesn't get
+ // called; calling that function without telemetry enabled violates
+ // assumptions that the write-the-shutdown-timestamp machinery makes.
+ if (!Telemetry::CanRecordExtended()) {
+ mCachedTelemetryData = true;
+ aCallback->Complete();
+ return NS_OK;
+ }
+
+ // Send the read to a background thread provided by the stream transport
+ // service to avoid a read in the main thread.
+ nsCOMPtr<nsIEventTarget> targetThread =
+ do_GetService(NS_STREAMTRANSPORTSERVICE_CONTRACTID);
+ if (!targetThread) {
+ mCachedTelemetryData = true;
+ aCallback->Complete();
+ return NS_OK;
+ }
+
+ // We have to get the filename from the main thread.
+ const char *shutdownTimeFilename = GetShutdownTimeFileName();
+ if (!shutdownTimeFilename) {
+ mCachedTelemetryData = true;
+ aCallback->Complete();
+ return NS_OK;
+ }
+
+ nsCOMPtr<nsIFile> profileDir;
+ nsresult rv = NS_GetSpecialDirectory(NS_APP_USER_PROFILE_50_DIR,
+ getter_AddRefs(profileDir));
+ if (NS_FAILED(rv)) {
+ mCachedTelemetryData = true;
+ aCallback->Complete();
+ return NS_OK;
+ }
+
+ nsCOMPtr<nsIFile> failedProfileLockFile;
+ rv = GetFailedProfileLockFile(getter_AddRefs(failedProfileLockFile),
+ profileDir);
+ if (NS_FAILED(rv)) {
+ mCachedTelemetryData = true;
+ aCallback->Complete();
+ return NS_OK;
+ }
+
+ mCallbacks.AppendObject(aCallback);
+
+ nsCOMPtr<nsIRunnable> event = new nsFetchTelemetryData(shutdownTimeFilename,
+ failedProfileLockFile,
+ profileDir);
+
+ targetThread->Dispatch(event, NS_DISPATCH_NORMAL);
+ return NS_OK;
+}
+
+TelemetryImpl::TelemetryImpl()
+ : mHashMutex("Telemetry::mHashMutex")
+ , mHangReportsMutex("Telemetry::mHangReportsMutex")
+ , mThreadHangStatsMutex("Telemetry::mThreadHangStatsMutex")
+ , mCachedTelemetryData(false)
+ , mLastShutdownTime(0)
+ , mFailedLockCount(0)
+{
+ // We expect TelemetryHistogram::InitializeGlobalState() to have been
+ // called before we get to this point.
+ MOZ_ASSERT(TelemetryHistogram::GlobalStateHasBeenInitialized());
+}
+
+TelemetryImpl::~TelemetryImpl() {
+ UnregisterWeakMemoryReporter(this);
+}
+
+void
+TelemetryImpl::InitMemoryReporter() {
+ RegisterWeakMemoryReporter(this);
+}
+
+bool
+TelemetryImpl::ReflectSQL(const SlowSQLEntryType *entry,
+ const Stat *stat,
+ JSContext *cx,
+ JS::Handle<JSObject*> obj)
+{
+ if (stat->hitCount == 0)
+ return true;
+
+ const nsACString &sql = entry->GetKey();
+
+ JS::Rooted<JSObject*> arrayObj(cx, JS_NewArrayObject(cx, 0));
+ if (!arrayObj) {
+ return false;
+ }
+ return (JS_DefineElement(cx, arrayObj, 0, stat->hitCount, JSPROP_ENUMERATE)
+ && JS_DefineElement(cx, arrayObj, 1, stat->totalTime, JSPROP_ENUMERATE)
+ && JS_DefineProperty(cx, obj, sql.BeginReading(), arrayObj,
+ JSPROP_ENUMERATE));
+}
+
+bool
+TelemetryImpl::ReflectMainThreadSQL(SlowSQLEntryType *entry, JSContext *cx,
+ JS::Handle<JSObject*> obj)
+{
+ return ReflectSQL(entry, &entry->mData.mainThread, cx, obj);
+}
+
+bool
+TelemetryImpl::ReflectOtherThreadsSQL(SlowSQLEntryType *entry, JSContext *cx,
+ JS::Handle<JSObject*> obj)
+{
+ return ReflectSQL(entry, &entry->mData.otherThreads, cx, obj);
+}
+
+bool
+TelemetryImpl::AddSQLInfo(JSContext *cx, JS::Handle<JSObject*> rootObj, bool mainThread,
+ bool privateSQL)
+{
+ JS::Rooted<JSObject*> statsObj(cx, JS_NewPlainObject(cx));
+ if (!statsObj)
+ return false;
+
+ AutoHashtable<SlowSQLEntryType>& sqlMap = (privateSQL ? mPrivateSQL : mSanitizedSQL);
+ AutoHashtable<SlowSQLEntryType>::ReflectEntryFunc reflectFunction =
+ (mainThread ? ReflectMainThreadSQL : ReflectOtherThreadsSQL);
+ if (!sqlMap.ReflectIntoJS(reflectFunction, cx, statsObj)) {
+ return false;
+ }
+
+ return JS_DefineProperty(cx, rootObj,
+ mainThread ? "mainThread" : "otherThreads",
+ statsObj, JSPROP_ENUMERATE);
+}
+
+NS_IMETHODIMP
+TelemetryImpl::RegisterAddonHistogram(const nsACString &id,
+ const nsACString &name,
+ uint32_t histogramType,
+ uint32_t min, uint32_t max,
+ uint32_t bucketCount,
+ uint8_t optArgCount)
+{
+ return TelemetryHistogram::RegisterAddonHistogram
+ (id, name, histogramType, min, max, bucketCount, optArgCount);
+}
+
+NS_IMETHODIMP
+TelemetryImpl::GetAddonHistogram(const nsACString &id, const nsACString &name,
+ JSContext *cx, JS::MutableHandle<JS::Value> ret)
+{
+ return TelemetryHistogram::GetAddonHistogram(id, name, cx, ret);
+}
+
+NS_IMETHODIMP
+TelemetryImpl::UnregisterAddonHistograms(const nsACString &id)
+{
+ return TelemetryHistogram::UnregisterAddonHistograms(id);
+}
+
+NS_IMETHODIMP
+TelemetryImpl::SetHistogramRecordingEnabled(const nsACString &id, bool aEnabled)
+{
+ return TelemetryHistogram::SetHistogramRecordingEnabled(id, aEnabled);
+}
+
+NS_IMETHODIMP
+TelemetryImpl::GetHistogramSnapshots(JSContext *cx, JS::MutableHandle<JS::Value> ret)
+{
+ return TelemetryHistogram::CreateHistogramSnapshots(cx, ret, false, false);
+}
+
+NS_IMETHODIMP
+TelemetryImpl::SnapshotSubsessionHistograms(bool clearSubsession,
+ JSContext *cx,
+ JS::MutableHandle<JS::Value> ret)
+{
+#if !defined(MOZ_WIDGET_GONK) && !defined(MOZ_WIDGET_ANDROID)
+ return TelemetryHistogram::CreateHistogramSnapshots(cx, ret, true,
+ clearSubsession);
+#else
+ return NS_OK;
+#endif
+}
+
+NS_IMETHODIMP
+TelemetryImpl::GetAddonHistogramSnapshots(JSContext *cx, JS::MutableHandle<JS::Value> ret)
+{
+ return TelemetryHistogram::GetAddonHistogramSnapshots(cx, ret);
+}
+
+NS_IMETHODIMP
+TelemetryImpl::GetKeyedHistogramSnapshots(JSContext *cx, JS::MutableHandle<JS::Value> ret)
+{
+ return TelemetryHistogram::GetKeyedHistogramSnapshots(cx, ret);
+}
+
+bool
+TelemetryImpl::GetSQLStats(JSContext *cx, JS::MutableHandle<JS::Value> ret, bool includePrivateSql)
+{
+ JS::Rooted<JSObject*> root_obj(cx, JS_NewPlainObject(cx));
+ if (!root_obj)
+ return false;
+ ret.setObject(*root_obj);
+
+ MutexAutoLock hashMutex(mHashMutex);
+ // Add info about slow SQL queries on the main thread
+ if (!AddSQLInfo(cx, root_obj, true, includePrivateSql))
+ return false;
+ // Add info about slow SQL queries on other threads
+ if (!AddSQLInfo(cx, root_obj, false, includePrivateSql))
+ return false;
+
+ return true;
+}
+
+NS_IMETHODIMP
+TelemetryImpl::GetSlowSQL(JSContext *cx, JS::MutableHandle<JS::Value> ret)
+{
+ if (GetSQLStats(cx, ret, false))
+ return NS_OK;
+ return NS_ERROR_FAILURE;
+}
+
+NS_IMETHODIMP
+TelemetryImpl::GetDebugSlowSQL(JSContext *cx, JS::MutableHandle<JS::Value> ret)
+{
+ bool revealPrivateSql =
+ Preferences::GetBool("toolkit.telemetry.debugSlowSql", false);
+ if (GetSQLStats(cx, ret, revealPrivateSql))
+ return NS_OK;
+ return NS_ERROR_FAILURE;
+}
+
+NS_IMETHODIMP
+TelemetryImpl::GetWebrtcStats(JSContext *cx, JS::MutableHandle<JS::Value> ret)
+{
+ if (mWebrtcTelemetry.GetWebrtcStats(cx, ret))
+ return NS_OK;
+ return NS_ERROR_FAILURE;
+}
+
+NS_IMETHODIMP
+TelemetryImpl::GetMaximalNumberOfConcurrentThreads(uint32_t *ret)
+{
+ *ret = nsThreadManager::get().GetHighestNumberOfThreads();
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+TelemetryImpl::GetChromeHangs(JSContext *cx, JS::MutableHandle<JS::Value> ret)
+{
+ MutexAutoLock hangReportMutex(mHangReportsMutex);
+
+ const CombinedStacks& stacks = mHangReports.GetStacks();
+ JS::Rooted<JSObject*> fullReportObj(cx, CreateJSStackObject(cx, stacks));
+ if (!fullReportObj) {
+ return NS_ERROR_FAILURE;
+ }
+
+ ret.setObject(*fullReportObj);
+
+ JS::Rooted<JSObject*> durationArray(cx, JS_NewArrayObject(cx, 0));
+ JS::Rooted<JSObject*> systemUptimeArray(cx, JS_NewArrayObject(cx, 0));
+ JS::Rooted<JSObject*> firefoxUptimeArray(cx, JS_NewArrayObject(cx, 0));
+ JS::Rooted<JSObject*> annotationsArray(cx, JS_NewArrayObject(cx, 0));
+ if (!durationArray || !systemUptimeArray || !firefoxUptimeArray ||
+ !annotationsArray) {
+ return NS_ERROR_FAILURE;
+ }
+
+ bool ok = JS_DefineProperty(cx, fullReportObj, "durations",
+ durationArray, JSPROP_ENUMERATE);
+ if (!ok) {
+ return NS_ERROR_FAILURE;
+ }
+
+ ok = JS_DefineProperty(cx, fullReportObj, "systemUptime",
+ systemUptimeArray, JSPROP_ENUMERATE);
+ if (!ok) {
+ return NS_ERROR_FAILURE;
+ }
+
+ ok = JS_DefineProperty(cx, fullReportObj, "firefoxUptime",
+ firefoxUptimeArray, JSPROP_ENUMERATE);
+ if (!ok) {
+ return NS_ERROR_FAILURE;
+ }
+
+ ok = JS_DefineProperty(cx, fullReportObj, "annotations", annotationsArray,
+ JSPROP_ENUMERATE);
+ if (!ok) {
+ return NS_ERROR_FAILURE;
+ }
+
+
+ const size_t length = stacks.GetStackCount();
+ for (size_t i = 0; i < length; ++i) {
+ if (!JS_DefineElement(cx, durationArray, i, mHangReports.GetDuration(i),
+ JSPROP_ENUMERATE)) {
+ return NS_ERROR_FAILURE;
+ }
+ if (!JS_DefineElement(cx, systemUptimeArray, i, mHangReports.GetSystemUptime(i),
+ JSPROP_ENUMERATE)) {
+ return NS_ERROR_FAILURE;
+ }
+ if (!JS_DefineElement(cx, firefoxUptimeArray, i, mHangReports.GetFirefoxUptime(i),
+ JSPROP_ENUMERATE)) {
+ return NS_ERROR_FAILURE;
+ }
+
+ size_t annotationIndex = 0;
+ const nsClassHashtable<nsStringHashKey, HangReports::AnnotationInfo>& annotationInfo =
+ mHangReports.GetAnnotationInfo();
+
+ for (auto iter = annotationInfo.ConstIter(); !iter.Done(); iter.Next()) {
+ const HangReports::AnnotationInfo* info = iter.Data();
+
+ JS::Rooted<JSObject*> keyValueArray(cx, JS_NewArrayObject(cx, 0));
+ if (!keyValueArray) {
+ return NS_ERROR_FAILURE;
+ }
+
+ // Create an array containing all the indices of the chrome hangs relative to this
+ // annotation.
+ JS::Rooted<JS::Value> indicesArray(cx);
+ if (!mozilla::dom::ToJSValue(cx, info->mHangIndices, &indicesArray)) {
+ return NS_ERROR_OUT_OF_MEMORY;
+ }
+
+ // We're saving the annotation as [[indices], {annotation-data}], so add the indices
+ // array as the first element of that structure.
+ if (!JS_DefineElement(cx, keyValueArray, 0, indicesArray, JSPROP_ENUMERATE)) {
+ return NS_ERROR_FAILURE;
+ }
+
+ // Create the annotations object...
+ JS::Rooted<JSObject*> jsAnnotation(cx, JS_NewPlainObject(cx));
+ if (!jsAnnotation) {
+ return NS_ERROR_FAILURE;
+ }
+ UniquePtr<HangAnnotations::Enumerator> annotationsEnum =
+ info->mAnnotations->GetEnumerator();
+ if (!annotationsEnum) {
+ return NS_ERROR_FAILURE;
+ }
+
+ // ... fill it with key:value pairs...
+ nsAutoString key;
+ nsAutoString value;
+ while (annotationsEnum->Next(key, value)) {
+ JS::RootedValue jsValue(cx);
+ jsValue.setString(JS_NewUCStringCopyN(cx, value.get(), value.Length()));
+ if (!JS_DefineUCProperty(cx, jsAnnotation, key.get(), key.Length(),
+ jsValue, JSPROP_ENUMERATE)) {
+ return NS_ERROR_FAILURE;
+ }
+ }
+
+ // ... and append it after the indices array.
+ if (!JS_DefineElement(cx, keyValueArray, 1, jsAnnotation, JSPROP_ENUMERATE)) {
+ return NS_ERROR_FAILURE;
+ }
+ if (!JS_DefineElement(cx, annotationsArray, annotationIndex++,
+ keyValueArray, JSPROP_ENUMERATE)) {
+ return NS_ERROR_FAILURE;
+ }
+ }
+ }
+
+ return NS_OK;
+}
+
+static JSObject *
+CreateJSStackObject(JSContext *cx, const CombinedStacks &stacks) {
+ JS::Rooted<JSObject*> ret(cx, JS_NewPlainObject(cx));
+ if (!ret) {
+ return nullptr;
+ }
+
+ JS::Rooted<JSObject*> moduleArray(cx, JS_NewArrayObject(cx, 0));
+ if (!moduleArray) {
+ return nullptr;
+ }
+ bool ok = JS_DefineProperty(cx, ret, "memoryMap", moduleArray,
+ JSPROP_ENUMERATE);
+ if (!ok) {
+ return nullptr;
+ }
+
+ const size_t moduleCount = stacks.GetModuleCount();
+ for (size_t moduleIndex = 0; moduleIndex < moduleCount; ++moduleIndex) {
+ // Current module
+ const Telemetry::ProcessedStack::Module& module =
+ stacks.GetModule(moduleIndex);
+
+ JS::Rooted<JSObject*> moduleInfoArray(cx, JS_NewArrayObject(cx, 0));
+ if (!moduleInfoArray) {
+ return nullptr;
+ }
+ if (!JS_DefineElement(cx, moduleArray, moduleIndex, moduleInfoArray,
+ JSPROP_ENUMERATE)) {
+ return nullptr;
+ }
+
+ unsigned index = 0;
+
+ // Module name
+ JS::Rooted<JSString*> str(cx, JS_NewStringCopyZ(cx, module.mName.c_str()));
+ if (!str) {
+ return nullptr;
+ }
+ if (!JS_DefineElement(cx, moduleInfoArray, index++, str, JSPROP_ENUMERATE)) {
+ return nullptr;
+ }
+
+ // Module breakpad identifier
+ JS::Rooted<JSString*> id(cx, JS_NewStringCopyZ(cx, module.mBreakpadId.c_str()));
+ if (!id) {
+ return nullptr;
+ }
+ if (!JS_DefineElement(cx, moduleInfoArray, index++, id, JSPROP_ENUMERATE)) {
+ return nullptr;
+ }
+ }
+
+ JS::Rooted<JSObject*> reportArray(cx, JS_NewArrayObject(cx, 0));
+ if (!reportArray) {
+ return nullptr;
+ }
+ ok = JS_DefineProperty(cx, ret, "stacks", reportArray, JSPROP_ENUMERATE);
+ if (!ok) {
+ return nullptr;
+ }
+
+ const size_t length = stacks.GetStackCount();
+ for (size_t i = 0; i < length; ++i) {
+ // Represent call stack PCs as (module index, offset) pairs.
+ JS::Rooted<JSObject*> pcArray(cx, JS_NewArrayObject(cx, 0));
+ if (!pcArray) {
+ return nullptr;
+ }
+
+ if (!JS_DefineElement(cx, reportArray, i, pcArray, JSPROP_ENUMERATE)) {
+ return nullptr;
+ }
+
+ const CombinedStacks::Stack& stack = stacks.GetStack(i);
+ const uint32_t pcCount = stack.size();
+ for (size_t pcIndex = 0; pcIndex < pcCount; ++pcIndex) {
+ const Telemetry::ProcessedStack::Frame& frame = stack[pcIndex];
+ JS::Rooted<JSObject*> framePair(cx, JS_NewArrayObject(cx, 0));
+ if (!framePair) {
+ return nullptr;
+ }
+ int modIndex = (std::numeric_limits<uint16_t>::max() == frame.mModIndex) ?
+ -1 : frame.mModIndex;
+ if (!JS_DefineElement(cx, framePair, 0, modIndex, JSPROP_ENUMERATE)) {
+ return nullptr;
+ }
+ if (!JS_DefineElement(cx, framePair, 1, static_cast<double>(frame.mOffset),
+ JSPROP_ENUMERATE)) {
+ return nullptr;
+ }
+ if (!JS_DefineElement(cx, pcArray, pcIndex, framePair, JSPROP_ENUMERATE)) {
+ return nullptr;
+ }
+ }
+ }
+
+ return ret;
+}
+
+static bool
+IsValidBreakpadId(const std::string &breakpadId) {
+ if (breakpadId.size() < 33) {
+ return false;
+ }
+ for (unsigned i = 0, n = breakpadId.size(); i < n; ++i) {
+ char c = breakpadId[i];
+ if ((c < '0' || c > '9') && (c < 'A' || c > 'F')) {
+ return false;
+ }
+ }
+ return true;
+}
+
+// Read a stack from the given file name. In case of any error, aStack is
+// unchanged.
+static void
+ReadStack(const char *aFileName, Telemetry::ProcessedStack &aStack)
+{
+ std::ifstream file(aFileName);
+
+ size_t numModules;
+ file >> numModules;
+ if (file.fail()) {
+ return;
+ }
+
+ char newline = file.get();
+ if (file.fail() || newline != '\n') {
+ return;
+ }
+
+ Telemetry::ProcessedStack stack;
+ for (size_t i = 0; i < numModules; ++i) {
+ std::string breakpadId;
+ file >> breakpadId;
+ if (file.fail() || !IsValidBreakpadId(breakpadId)) {
+ return;
+ }
+
+ char space = file.get();
+ if (file.fail() || space != ' ') {
+ return;
+ }
+
+ std::string moduleName;
+ getline(file, moduleName);
+ if (file.fail() || moduleName[0] == ' ') {
+ return;
+ }
+
+ Telemetry::ProcessedStack::Module module = {
+ moduleName,
+ breakpadId
+ };
+ stack.AddModule(module);
+ }
+
+ size_t numFrames;
+ file >> numFrames;
+ if (file.fail()) {
+ return;
+ }
+
+ newline = file.get();
+ if (file.fail() || newline != '\n') {
+ return;
+ }
+
+ for (size_t i = 0; i < numFrames; ++i) {
+ uint16_t index;
+ file >> index;
+ uintptr_t offset;
+ file >> std::hex >> offset >> std::dec;
+ if (file.fail()) {
+ return;
+ }
+
+ Telemetry::ProcessedStack::Frame frame = {
+ offset,
+ index
+ };
+ stack.AddFrame(frame);
+ }
+
+ aStack = stack;
+}
+
+static JSObject*
+CreateJSTimeHistogram(JSContext* cx, const Telemetry::TimeHistogram& time)
+{
+ /* Create JS representation of TimeHistogram,
+ in the format of Chromium-style histograms. */
+ JS::RootedObject ret(cx, JS_NewPlainObject(cx));
+ if (!ret) {
+ return nullptr;
+ }
+
+ if (!JS_DefineProperty(cx, ret, "min", time.GetBucketMin(0),
+ JSPROP_ENUMERATE) ||
+ !JS_DefineProperty(cx, ret, "max",
+ time.GetBucketMax(ArrayLength(time) - 1),
+ JSPROP_ENUMERATE) ||
+ !JS_DefineProperty(cx, ret, "histogram_type",
+ nsITelemetry::HISTOGRAM_EXPONENTIAL,
+ JSPROP_ENUMERATE)) {
+ return nullptr;
+ }
+ // TODO: calculate "sum"
+ if (!JS_DefineProperty(cx, ret, "sum", 0, JSPROP_ENUMERATE)) {
+ return nullptr;
+ }
+
+ JS::RootedObject ranges(
+ cx, JS_NewArrayObject(cx, ArrayLength(time) + 1));
+ JS::RootedObject counts(
+ cx, JS_NewArrayObject(cx, ArrayLength(time) + 1));
+ if (!ranges || !counts) {
+ return nullptr;
+ }
+ /* In a Chromium-style histogram, the first bucket is an "under" bucket
+ that represents all values below the histogram's range. */
+ if (!JS_DefineElement(cx, ranges, 0, time.GetBucketMin(0), JSPROP_ENUMERATE) ||
+ !JS_DefineElement(cx, counts, 0, 0, JSPROP_ENUMERATE)) {
+ return nullptr;
+ }
+ for (size_t i = 0; i < ArrayLength(time); i++) {
+ if (!JS_DefineElement(cx, ranges, i + 1, time.GetBucketMax(i),
+ JSPROP_ENUMERATE) ||
+ !JS_DefineElement(cx, counts, i + 1, time[i], JSPROP_ENUMERATE)) {
+ return nullptr;
+ }
+ }
+ if (!JS_DefineProperty(cx, ret, "ranges", ranges, JSPROP_ENUMERATE) ||
+ !JS_DefineProperty(cx, ret, "counts", counts, JSPROP_ENUMERATE)) {
+ return nullptr;
+ }
+ return ret;
+}
+
+static JSObject*
+CreateJSHangStack(JSContext* cx, const Telemetry::HangStack& stack)
+{
+ JS::RootedObject ret(cx, JS_NewArrayObject(cx, stack.length()));
+ if (!ret) {
+ return nullptr;
+ }
+ for (size_t i = 0; i < stack.length(); i++) {
+ JS::RootedString string(cx, JS_NewStringCopyZ(cx, stack[i]));
+ if (!JS_DefineElement(cx, ret, i, string, JSPROP_ENUMERATE)) {
+ return nullptr;
+ }
+ }
+ return ret;
+}
+
+static void
+CreateJSHangAnnotations(JSContext* cx, const HangAnnotationsVector& annotations,
+ JS::MutableHandleObject returnedObject)
+{
+ JS::RootedObject annotationsArray(cx, JS_NewArrayObject(cx, 0));
+ if (!annotationsArray) {
+ returnedObject.set(nullptr);
+ return;
+ }
+ // We keep track of the annotations we reported in this hash set, so we can
+ // discard duplicated ones.
+ nsTHashtable<nsStringHashKey> reportedAnnotations;
+ size_t annotationIndex = 0;
+ for (const HangAnnotationsPtr *i = annotations.begin(), *e = annotations.end();
+ i != e; ++i) {
+ JS::RootedObject jsAnnotation(cx, JS_NewPlainObject(cx));
+ if (!jsAnnotation) {
+ continue;
+ }
+ const HangAnnotationsPtr& curAnnotations = *i;
+ // Build a key to index the current annotations in our hash set.
+ nsAutoString annotationsKey;
+ nsresult rv = ComputeAnnotationsKey(curAnnotations, annotationsKey);
+ if (NS_FAILED(rv)) {
+ continue;
+ }
+ // Check if the annotations are in the set. If that's the case, don't double report.
+ if (reportedAnnotations.GetEntry(annotationsKey)) {
+ continue;
+ }
+ // If not, report them.
+ reportedAnnotations.PutEntry(annotationsKey);
+ UniquePtr<HangAnnotations::Enumerator> annotationsEnum =
+ curAnnotations->GetEnumerator();
+ if (!annotationsEnum) {
+ continue;
+ }
+ nsAutoString key;
+ nsAutoString value;
+ while (annotationsEnum->Next(key, value)) {
+ JS::RootedValue jsValue(cx);
+ jsValue.setString(JS_NewUCStringCopyN(cx, value.get(), value.Length()));
+ if (!JS_DefineUCProperty(cx, jsAnnotation, key.get(), key.Length(),
+ jsValue, JSPROP_ENUMERATE)) {
+ returnedObject.set(nullptr);
+ return;
+ }
+ }
+ if (!JS_SetElement(cx, annotationsArray, annotationIndex, jsAnnotation)) {
+ continue;
+ }
+ ++annotationIndex;
+ }
+ // Return the array using a |MutableHandleObject| to avoid triggering a false
+ // positive rooting issue in the hazard analysis build.
+ returnedObject.set(annotationsArray);
+}
+
+static JSObject*
+CreateJSHangHistogram(JSContext* cx, const Telemetry::HangHistogram& hang)
+{
+ JS::RootedObject ret(cx, JS_NewPlainObject(cx));
+ if (!ret) {
+ return nullptr;
+ }
+
+ JS::RootedObject stack(cx, CreateJSHangStack(cx, hang.GetStack()));
+ JS::RootedObject time(cx, CreateJSTimeHistogram(cx, hang));
+ auto& hangAnnotations = hang.GetAnnotations();
+ JS::RootedObject annotations(cx);
+ CreateJSHangAnnotations(cx, hangAnnotations, &annotations);
+
+ if (!stack ||
+ !time ||
+ !annotations ||
+ !JS_DefineProperty(cx, ret, "stack", stack, JSPROP_ENUMERATE) ||
+ !JS_DefineProperty(cx, ret, "histogram", time, JSPROP_ENUMERATE) ||
+ (!hangAnnotations.empty() && // <-- Only define annotations when nonempty
+ !JS_DefineProperty(cx, ret, "annotations", annotations, JSPROP_ENUMERATE))) {
+ return nullptr;
+ }
+
+ if (!hang.GetNativeStack().empty()) {
+ JS::RootedObject native(cx, CreateJSHangStack(cx, hang.GetNativeStack()));
+ if (!native ||
+ !JS_DefineProperty(cx, ret, "nativeStack", native, JSPROP_ENUMERATE)) {
+ return nullptr;
+ }
+ }
+ return ret;
+}
+
+static JSObject*
+CreateJSThreadHangStats(JSContext* cx, const Telemetry::ThreadHangStats& thread)
+{
+ JS::RootedObject ret(cx, JS_NewPlainObject(cx));
+ if (!ret) {
+ return nullptr;
+ }
+ JS::RootedString name(cx, JS_NewStringCopyZ(cx, thread.GetName()));
+ if (!name ||
+ !JS_DefineProperty(cx, ret, "name", name, JSPROP_ENUMERATE)) {
+ return nullptr;
+ }
+
+ JS::RootedObject activity(cx, CreateJSTimeHistogram(cx, thread.mActivity));
+ if (!activity ||
+ !JS_DefineProperty(cx, ret, "activity", activity, JSPROP_ENUMERATE)) {
+ return nullptr;
+ }
+
+ JS::RootedObject hangs(cx, JS_NewArrayObject(cx, 0));
+ if (!hangs) {
+ return nullptr;
+ }
+ for (size_t i = 0; i < thread.mHangs.length(); i++) {
+ JS::RootedObject obj(cx, CreateJSHangHistogram(cx, thread.mHangs[i]));
+ if (!JS_DefineElement(cx, hangs, i, obj, JSPROP_ENUMERATE)) {
+ return nullptr;
+ }
+ }
+ if (!JS_DefineProperty(cx, ret, "hangs", hangs, JSPROP_ENUMERATE)) {
+ return nullptr;
+ }
+
+ return ret;
+}
+
+NS_IMETHODIMP
+TelemetryImpl::GetThreadHangStats(JSContext* cx, JS::MutableHandle<JS::Value> ret)
+{
+ JS::RootedObject retObj(cx, JS_NewArrayObject(cx, 0));
+ if (!retObj) {
+ return NS_ERROR_FAILURE;
+ }
+ size_t threadIndex = 0;
+
+ if (!BackgroundHangMonitor::IsDisabled()) {
+ /* First add active threads; we need to hold |iter| (and its lock)
+ throughout this method to avoid a race condition where a thread can
+ be recorded twice if the thread is destroyed while this method is
+ running */
+ BackgroundHangMonitor::ThreadHangStatsIterator iter;
+ for (Telemetry::ThreadHangStats* histogram = iter.GetNext();
+ histogram; histogram = iter.GetNext()) {
+ JS::RootedObject obj(cx, CreateJSThreadHangStats(cx, *histogram));
+ if (!JS_DefineElement(cx, retObj, threadIndex++, obj, JSPROP_ENUMERATE)) {
+ return NS_ERROR_FAILURE;
+ }
+ }
+ }
+
+ // Add saved threads next
+ MutexAutoLock autoLock(mThreadHangStatsMutex);
+ for (size_t i = 0; i < mThreadHangStats.length(); i++) {
+ JS::RootedObject obj(cx,
+ CreateJSThreadHangStats(cx, mThreadHangStats[i]));
+ if (!JS_DefineElement(cx, retObj, threadIndex++, obj, JSPROP_ENUMERATE)) {
+ return NS_ERROR_FAILURE;
+ }
+ }
+ ret.setObject(*retObj);
+ return NS_OK;
+}
+
+void
+TelemetryImpl::ReadLateWritesStacks(nsIFile* aProfileDir)
+{
+ nsAutoCString nativePath;
+ nsresult rv = aProfileDir->GetNativePath(nativePath);
+ if (NS_FAILED(rv)) {
+ return;
+ }
+
+ const char *name = nativePath.get();
+ PRDir *dir = PR_OpenDir(name);
+ if (!dir) {
+ return;
+ }
+
+ PRDirEntry *ent;
+ const char *prefix = "Telemetry.LateWriteFinal-";
+ unsigned int prefixLen = strlen(prefix);
+ while ((ent = PR_ReadDir(dir, PR_SKIP_NONE))) {
+ if (strncmp(prefix, ent->name, prefixLen) != 0) {
+ continue;
+ }
+
+ nsAutoCString stackNativePath = nativePath;
+ stackNativePath += XPCOM_FILE_PATH_SEPARATOR;
+ stackNativePath += nsDependentCString(ent->name);
+
+ Telemetry::ProcessedStack stack;
+ ReadStack(stackNativePath.get(), stack);
+ if (stack.GetStackSize() != 0) {
+ mLateWritesStacks.AddStack(stack);
+ }
+ // Delete the file so that we don't report it again on the next run.
+ PR_Delete(stackNativePath.get());
+ }
+ PR_CloseDir(dir);
+}
+
+NS_IMETHODIMP
+TelemetryImpl::GetLateWrites(JSContext *cx, JS::MutableHandle<JS::Value> ret)
+{
+ // The user must call AsyncReadTelemetryData first. We return an empty list
+ // instead of reporting a failure so that the rest of telemetry can uniformly
+ // handle the read not being available yet.
+
+ // FIXME: we allocate the js object again and again in the getter. We should
+ // figure out a way to cache it. In order to do that we have to call
+ // JS_AddNamedObjectRoot. A natural place to do so is in the TelemetryImpl
+ // constructor, but it is not clear how to get a JSContext in there.
+ // Another option would be to call it in here when we first call
+ // CreateJSStackObject, but we would still need to figure out where to call
+ // JS_RemoveObjectRoot. Would it be ok to never call JS_RemoveObjectRoot
+ // and just set the pointer to nullptr is the telemetry destructor?
+
+ JSObject *report;
+ if (!mCachedTelemetryData) {
+ CombinedStacks empty;
+ report = CreateJSStackObject(cx, empty);
+ } else {
+ report = CreateJSStackObject(cx, mLateWritesStacks);
+ }
+
+ if (report == nullptr) {
+ return NS_ERROR_FAILURE;
+ }
+
+ ret.setObject(*report);
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+TelemetryImpl::RegisteredHistograms(uint32_t aDataset, uint32_t *aCount,
+ char*** aHistograms)
+{
+ return
+ TelemetryHistogram::RegisteredHistograms(aDataset, aCount, aHistograms);
+}
+
+NS_IMETHODIMP
+TelemetryImpl::RegisteredKeyedHistograms(uint32_t aDataset, uint32_t *aCount,
+ char*** aHistograms)
+{
+ return
+ TelemetryHistogram::RegisteredKeyedHistograms(aDataset, aCount,
+ aHistograms);
+}
+
+NS_IMETHODIMP
+TelemetryImpl::GetHistogramById(const nsACString &name, JSContext *cx,
+ JS::MutableHandle<JS::Value> ret)
+{
+ return TelemetryHistogram::GetHistogramById(name, cx, ret);
+}
+
+NS_IMETHODIMP
+TelemetryImpl::GetKeyedHistogramById(const nsACString &name, JSContext *cx,
+ JS::MutableHandle<JS::Value> ret)
+{
+ return TelemetryHistogram::GetKeyedHistogramById(name, cx, ret);
+}
+
+/**
+ * Indicates if Telemetry can record base data (FHR data). This is true if the
+ * FHR data reporting service or self-support are enabled.
+ *
+ * In the unlikely event that adding a new base probe is needed, please check the data
+ * collection wiki at https://wiki.mozilla.org/Firefox/Data_Collection and talk to the
+ * Telemetry team.
+ */
+NS_IMETHODIMP
+TelemetryImpl::GetCanRecordBase(bool *ret) {
+ *ret = TelemetryHistogram::CanRecordBase();
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+TelemetryImpl::SetCanRecordBase(bool canRecord) {
+ TelemetryHistogram::SetCanRecordBase(canRecord);
+ TelemetryScalar::SetCanRecordBase(canRecord);
+ TelemetryEvent::SetCanRecordBase(canRecord);
+ return NS_OK;
+}
+
+/**
+ * Indicates if Telemetry is allowed to record extended data. Returns false if the user
+ * hasn't opted into "extended Telemetry" on the Release channel, when the user has
+ * explicitly opted out of Telemetry on Nightly/Aurora/Beta or if manually set to false
+ * during tests.
+ * If the returned value is false, gathering of extended telemetry statistics is disabled.
+ */
+NS_IMETHODIMP
+TelemetryImpl::GetCanRecordExtended(bool *ret) {
+ *ret = TelemetryHistogram::CanRecordExtended();
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+TelemetryImpl::SetCanRecordExtended(bool canRecord) {
+ TelemetryHistogram::SetCanRecordExtended(canRecord);
+ TelemetryScalar::SetCanRecordExtended(canRecord);
+ TelemetryEvent::SetCanRecordExtended(canRecord);
+ return NS_OK;
+}
+
+
+NS_IMETHODIMP
+TelemetryImpl::GetIsOfficialTelemetry(bool *ret) {
+#if defined(MOZILLA_OFFICIAL) && defined(MOZ_TELEMETRY_REPORTING) && !defined(DEBUG)
+ *ret = true;
+#else
+ *ret = false;
+#endif
+ return NS_OK;
+}
+
+already_AddRefed<nsITelemetry>
+TelemetryImpl::CreateTelemetryInstance()
+{
+ MOZ_ASSERT(sTelemetry == nullptr, "CreateTelemetryInstance may only be called once, via GetService()");
+
+ bool useTelemetry = false;
+ if (XRE_IsParentProcess() ||
+ XRE_IsContentProcess() ||
+ XRE_IsGPUProcess())
+ {
+ useTelemetry = true;
+ }
+
+ // First, initialize the TelemetryHistogram and TelemetryScalar global states.
+ TelemetryHistogram::InitializeGlobalState(useTelemetry, useTelemetry);
+
+ // Only record scalars from the parent process.
+ TelemetryScalar::InitializeGlobalState(XRE_IsParentProcess(), XRE_IsParentProcess());
+
+ // Only record events from the parent process.
+ TelemetryEvent::InitializeGlobalState(XRE_IsParentProcess(), XRE_IsParentProcess());
+
+ // Now, create and initialize the Telemetry global state.
+ sTelemetry = new TelemetryImpl();
+
+ // AddRef for the local reference
+ NS_ADDREF(sTelemetry);
+ // AddRef for the caller
+ nsCOMPtr<nsITelemetry> ret = sTelemetry;
+
+ sTelemetry->InitMemoryReporter();
+ InitHistogramRecordingEnabled(); // requires sTelemetry to exist
+
+ return ret.forget();
+}
+
+void
+TelemetryImpl::ShutdownTelemetry()
+{
+ // No point in collecting IO beyond this point
+ ClearIOReporting();
+ NS_IF_RELEASE(sTelemetry);
+
+ // Lastly, de-initialise the TelemetryHistogram and TelemetryScalar global states,
+ // so as to release any heap storage that would otherwise be kept alive by it.
+ TelemetryHistogram::DeInitializeGlobalState();
+ TelemetryScalar::DeInitializeGlobalState();
+ TelemetryEvent::DeInitializeGlobalState();
+}
+
+void
+TelemetryImpl::StoreSlowSQL(const nsACString &sql, uint32_t delay,
+ SanitizedState state)
+{
+ AutoHashtable<SlowSQLEntryType>* slowSQLMap = nullptr;
+ if (state == Sanitized)
+ slowSQLMap = &(sTelemetry->mSanitizedSQL);
+ else
+ slowSQLMap = &(sTelemetry->mPrivateSQL);
+
+ MutexAutoLock hashMutex(sTelemetry->mHashMutex);
+
+ SlowSQLEntryType *entry = slowSQLMap->GetEntry(sql);
+ if (!entry) {
+ entry = slowSQLMap->PutEntry(sql);
+ if (MOZ_UNLIKELY(!entry))
+ return;
+ entry->mData.mainThread.hitCount = 0;
+ entry->mData.mainThread.totalTime = 0;
+ entry->mData.otherThreads.hitCount = 0;
+ entry->mData.otherThreads.totalTime = 0;
+ }
+
+ if (NS_IsMainThread()) {
+ entry->mData.mainThread.hitCount++;
+ entry->mData.mainThread.totalTime += delay;
+ } else {
+ entry->mData.otherThreads.hitCount++;
+ entry->mData.otherThreads.totalTime += delay;
+ }
+}
+
+/**
+ * This method replaces string literals in SQL strings with the word :private
+ *
+ * States used in this state machine:
+ *
+ * NORMAL:
+ * - This is the active state when not iterating over a string literal or
+ * comment
+ *
+ * SINGLE_QUOTE:
+ * - Defined here: http://www.sqlite.org/lang_expr.html
+ * - This state represents iterating over a string literal opened with
+ * a single quote.
+ * - A single quote within the string can be encoded by putting 2 single quotes
+ * in a row, e.g. 'This literal contains an escaped quote '''
+ * - Any double quotes found within a single-quoted literal are ignored
+ * - This state covers BLOB literals, e.g. X'ABC123'
+ * - The string literal and the enclosing quotes will be replaced with
+ * the text :private
+ *
+ * DOUBLE_QUOTE:
+ * - Same rules as the SINGLE_QUOTE state.
+ * - According to http://www.sqlite.org/lang_keywords.html,
+ * SQLite interprets text in double quotes as an identifier unless it's used in
+ * a context where it cannot be resolved to an identifier and a string literal
+ * is allowed. This method removes text in double-quotes for safety.
+ *
+ * DASH_COMMENT:
+ * - http://www.sqlite.org/lang_comment.html
+ * - A dash comment starts with two dashes in a row,
+ * e.g. DROP TABLE foo -- a comment
+ * - Any text following two dashes in a row is interpreted as a comment until
+ * end of input or a newline character
+ * - Any quotes found within the comment are ignored and no replacements made
+ *
+ * C_STYLE_COMMENT:
+ * - http://www.sqlite.org/lang_comment.html
+ * - A C-style comment starts with a forward slash and an asterisk, and ends
+ * with an asterisk and a forward slash
+ * - Any text following comment start is interpreted as a comment up to end of
+ * input or comment end
+ * - Any quotes found within the comment are ignored and no replacements made
+ */
+nsCString
+TelemetryImpl::SanitizeSQL(const nsACString &sql) {
+ nsCString output;
+ int length = sql.Length();
+
+ typedef enum {
+ NORMAL,
+ SINGLE_QUOTE,
+ DOUBLE_QUOTE,
+ DASH_COMMENT,
+ C_STYLE_COMMENT,
+ } State;
+
+ State state = NORMAL;
+ int fragmentStart = 0;
+ for (int i = 0; i < length; i++) {
+ char character = sql[i];
+ char nextCharacter = (i + 1 < length) ? sql[i + 1] : '\0';
+
+ switch (character) {
+ case '\'':
+ case '"':
+ if (state == NORMAL) {
+ state = (character == '\'') ? SINGLE_QUOTE : DOUBLE_QUOTE;
+ output += nsDependentCSubstring(sql, fragmentStart, i - fragmentStart);
+ output += ":private";
+ fragmentStart = -1;
+ } else if ((state == SINGLE_QUOTE && character == '\'') ||
+ (state == DOUBLE_QUOTE && character == '"')) {
+ if (nextCharacter == character) {
+ // Two consecutive quotes within a string literal are a single escaped quote
+ i++;
+ } else {
+ state = NORMAL;
+ fragmentStart = i + 1;
+ }
+ }
+ break;
+ case '-':
+ if (state == NORMAL) {
+ if (nextCharacter == '-') {
+ state = DASH_COMMENT;
+ i++;
+ }
+ }
+ break;
+ case '\n':
+ if (state == DASH_COMMENT) {
+ state = NORMAL;
+ }
+ break;
+ case '/':
+ if (state == NORMAL) {
+ if (nextCharacter == '*') {
+ state = C_STYLE_COMMENT;
+ i++;
+ }
+ }
+ break;
+ case '*':
+ if (state == C_STYLE_COMMENT) {
+ if (nextCharacter == '/') {
+ state = NORMAL;
+ }
+ }
+ break;
+ default:
+ continue;
+ }
+ }
+
+ if ((fragmentStart >= 0) && fragmentStart < length)
+ output += nsDependentCSubstring(sql, fragmentStart, length - fragmentStart);
+
+ return output;
+}
+
+// A whitelist mechanism to prevent Telemetry reporting on Addon & Thunderbird
+// DBs.
+struct TrackedDBEntry
+{
+ const char* mName;
+ const uint32_t mNameLength;
+
+ // This struct isn't meant to be used beyond the static arrays below.
+ constexpr
+ TrackedDBEntry(const char* aName, uint32_t aNameLength)
+ : mName(aName)
+ , mNameLength(aNameLength)
+ { }
+
+ TrackedDBEntry() = delete;
+ TrackedDBEntry(TrackedDBEntry&) = delete;
+};
+
+#define TRACKEDDB_ENTRY(_name) { _name, (sizeof(_name) - 1) }
+
+// A whitelist of database names. If the database name exactly matches one of
+// these then its SQL statements will always be recorded.
+static constexpr TrackedDBEntry kTrackedDBs[] = {
+ // IndexedDB for about:home, see aboutHome.js
+ TRACKEDDB_ENTRY("818200132aebmoouht.sqlite"),
+ TRACKEDDB_ENTRY("addons.sqlite"),
+ TRACKEDDB_ENTRY("content-prefs.sqlite"),
+ TRACKEDDB_ENTRY("cookies.sqlite"),
+ TRACKEDDB_ENTRY("downloads.sqlite"),
+ TRACKEDDB_ENTRY("extensions.sqlite"),
+ TRACKEDDB_ENTRY("formhistory.sqlite"),
+ TRACKEDDB_ENTRY("index.sqlite"),
+ TRACKEDDB_ENTRY("netpredictions.sqlite"),
+ TRACKEDDB_ENTRY("permissions.sqlite"),
+ TRACKEDDB_ENTRY("places.sqlite"),
+ TRACKEDDB_ENTRY("reading-list.sqlite"),
+ TRACKEDDB_ENTRY("search.sqlite"),
+ TRACKEDDB_ENTRY("signons.sqlite"),
+ TRACKEDDB_ENTRY("urlclassifier3.sqlite"),
+ TRACKEDDB_ENTRY("webappsstore.sqlite")
+};
+
+// A whitelist of database name prefixes. If the database name begins with
+// one of these prefixes then its SQL statements will always be recorded.
+static const TrackedDBEntry kTrackedDBPrefixes[] = {
+ TRACKEDDB_ENTRY("indexedDB-")
+};
+
+#undef TRACKEDDB_ENTRY
+
+// Slow SQL statements will be automatically
+// trimmed to kMaxSlowStatementLength characters.
+// This limit doesn't include the ellipsis and DB name,
+// that are appended at the end of the stored statement.
+const uint32_t kMaxSlowStatementLength = 1000;
+
+void
+TelemetryImpl::RecordSlowStatement(const nsACString &sql,
+ const nsACString &dbName,
+ uint32_t delay)
+{
+ MOZ_ASSERT(!sql.IsEmpty());
+ MOZ_ASSERT(!dbName.IsEmpty());
+
+ if (!sTelemetry || !TelemetryHistogram::CanRecordExtended())
+ return;
+
+ bool recordStatement = false;
+
+ for (const TrackedDBEntry& nameEntry : kTrackedDBs) {
+ MOZ_ASSERT(nameEntry.mNameLength);
+ const nsDependentCString name(nameEntry.mName, nameEntry.mNameLength);
+ if (dbName == name) {
+ recordStatement = true;
+ break;
+ }
+ }
+
+ if (!recordStatement) {
+ for (const TrackedDBEntry& prefixEntry : kTrackedDBPrefixes) {
+ MOZ_ASSERT(prefixEntry.mNameLength);
+ const nsDependentCString prefix(prefixEntry.mName,
+ prefixEntry.mNameLength);
+ if (StringBeginsWith(dbName, prefix)) {
+ recordStatement = true;
+ break;
+ }
+ }
+ }
+
+ if (recordStatement) {
+ nsAutoCString sanitizedSQL(SanitizeSQL(sql));
+ if (sanitizedSQL.Length() > kMaxSlowStatementLength) {
+ sanitizedSQL.SetLength(kMaxSlowStatementLength);
+ sanitizedSQL += "...";
+ }
+ sanitizedSQL.AppendPrintf(" /* %s */", nsPromiseFlatCString(dbName).get());
+ StoreSlowSQL(sanitizedSQL, delay, Sanitized);
+ } else {
+ // Report aggregate DB-level statistics for addon DBs
+ nsAutoCString aggregate;
+ aggregate.AppendPrintf("Untracked SQL for %s",
+ nsPromiseFlatCString(dbName).get());
+ StoreSlowSQL(aggregate, delay, Sanitized);
+ }
+
+ nsAutoCString fullSQL;
+ fullSQL.AppendPrintf("%s /* %s */",
+ nsPromiseFlatCString(sql).get(),
+ nsPromiseFlatCString(dbName).get());
+ StoreSlowSQL(fullSQL, delay, Unsanitized);
+}
+
+void
+TelemetryImpl::RecordIceCandidates(const uint32_t iceCandidateBitmask,
+ const bool success)
+{
+ if (!sTelemetry || !TelemetryHistogram::CanRecordExtended())
+ return;
+
+ sTelemetry->mWebrtcTelemetry.RecordIceCandidateMask(iceCandidateBitmask, success);
+}
+
+#if defined(MOZ_ENABLE_PROFILER_SPS)
+void
+TelemetryImpl::RecordChromeHang(uint32_t aDuration,
+ Telemetry::ProcessedStack &aStack,
+ int32_t aSystemUptime,
+ int32_t aFirefoxUptime,
+ HangAnnotationsPtr aAnnotations)
+{
+ if (!sTelemetry || !TelemetryHistogram::CanRecordExtended())
+ return;
+
+ HangAnnotationsPtr annotations;
+ // We only pass aAnnotations if it is not empty.
+ if (aAnnotations && !aAnnotations->IsEmpty()) {
+ annotations = Move(aAnnotations);
+ }
+
+ MutexAutoLock hangReportMutex(sTelemetry->mHangReportsMutex);
+
+ sTelemetry->mHangReports.AddHang(aStack, aDuration,
+ aSystemUptime, aFirefoxUptime,
+ Move(annotations));
+}
+#endif
+
+void
+TelemetryImpl::RecordThreadHangStats(Telemetry::ThreadHangStats& aStats)
+{
+ if (!sTelemetry || !TelemetryHistogram::CanRecordExtended())
+ return;
+
+ MutexAutoLock autoLock(sTelemetry->mThreadHangStatsMutex);
+
+ // Ignore OOM.
+ mozilla::Unused << sTelemetry->mThreadHangStats.append(Move(aStats));
+}
+
+NS_IMPL_ISUPPORTS(TelemetryImpl, nsITelemetry, nsIMemoryReporter)
+NS_GENERIC_FACTORY_SINGLETON_CONSTRUCTOR(nsITelemetry, TelemetryImpl::CreateTelemetryInstance)
+
+#define NS_TELEMETRY_CID \
+ {0xaea477f2, 0xb3a2, 0x469c, {0xaa, 0x29, 0x0a, 0x82, 0xd1, 0x32, 0xb8, 0x29}}
+NS_DEFINE_NAMED_CID(NS_TELEMETRY_CID);
+
+const Module::CIDEntry kTelemetryCIDs[] = {
+ { &kNS_TELEMETRY_CID, false, nullptr, nsITelemetryConstructor, Module::ALLOW_IN_GPU_PROCESS },
+ { nullptr }
+};
+
+const Module::ContractIDEntry kTelemetryContracts[] = {
+ { "@mozilla.org/base/telemetry;1", &kNS_TELEMETRY_CID, Module::ALLOW_IN_GPU_PROCESS },
+ { nullptr }
+};
+
+const Module kTelemetryModule = {
+ Module::kVersion,
+ kTelemetryCIDs,
+ kTelemetryContracts,
+ nullptr,
+ nullptr,
+ nullptr,
+ TelemetryImpl::ShutdownTelemetry,
+ Module::ALLOW_IN_GPU_PROCESS
+};
+
+NS_IMETHODIMP
+TelemetryImpl::GetFileIOReports(JSContext *cx, JS::MutableHandleValue ret)
+{
+ if (sTelemetryIOObserver) {
+ JS::Rooted<JSObject*> obj(cx, JS_NewPlainObject(cx));
+ if (!obj) {
+ return NS_ERROR_FAILURE;
+ }
+
+ if (!sTelemetryIOObserver->ReflectIntoJS(cx, obj)) {
+ return NS_ERROR_FAILURE;
+ }
+ ret.setObject(*obj);
+ return NS_OK;
+ }
+ ret.setNull();
+ return NS_OK;
+}
+
+NS_IMETHODIMP
+TelemetryImpl::MsSinceProcessStart(double* aResult)
+{
+ return Telemetry::Common::MsSinceProcessStart(aResult);
+}
+
+// Telemetry Scalars IDL Implementation
+
+NS_IMETHODIMP
+TelemetryImpl::ScalarAdd(const nsACString& aName, JS::HandleValue aVal, JSContext* aCx)
+{
+ return TelemetryScalar::Add(aName, aVal, aCx);
+}
+
+NS_IMETHODIMP
+TelemetryImpl::ScalarSet(const nsACString& aName, JS::HandleValue aVal, JSContext* aCx)
+{
+ return TelemetryScalar::Set(aName, aVal, aCx);
+}
+
+NS_IMETHODIMP
+TelemetryImpl::ScalarSetMaximum(const nsACString& aName, JS::HandleValue aVal, JSContext* aCx)
+{
+ return TelemetryScalar::SetMaximum(aName, aVal, aCx);
+}
+
+NS_IMETHODIMP
+TelemetryImpl::SnapshotScalars(unsigned int aDataset, bool aClearScalars, JSContext* aCx,
+ uint8_t optional_argc, JS::MutableHandleValue aResult)
+{
+ return TelemetryScalar::CreateSnapshots(aDataset, aClearScalars, aCx, optional_argc, aResult);
+}
+
+NS_IMETHODIMP
+TelemetryImpl::KeyedScalarAdd(const nsACString& aName, const nsAString& aKey,
+ JS::HandleValue aVal, JSContext* aCx)
+{
+ return TelemetryScalar::Add(aName, aKey, aVal, aCx);
+}
+
+NS_IMETHODIMP
+TelemetryImpl::KeyedScalarSet(const nsACString& aName, const nsAString& aKey,
+ JS::HandleValue aVal, JSContext* aCx)
+{
+ return TelemetryScalar::Set(aName, aKey, aVal, aCx);
+}
+
+NS_IMETHODIMP
+TelemetryImpl::KeyedScalarSetMaximum(const nsACString& aName, const nsAString& aKey,
+ JS::HandleValue aVal, JSContext* aCx)
+{
+ return TelemetryScalar::SetMaximum(aName, aKey, aVal, aCx);
+}
+
+NS_IMETHODIMP
+TelemetryImpl::SnapshotKeyedScalars(unsigned int aDataset, bool aClearScalars, JSContext* aCx,
+ uint8_t optional_argc, JS::MutableHandleValue aResult)
+{
+ return TelemetryScalar::CreateKeyedSnapshots(aDataset, aClearScalars, aCx, optional_argc,
+ aResult);
+}
+
+NS_IMETHODIMP
+TelemetryImpl::ClearScalars()
+{
+ TelemetryScalar::ClearScalars();
+ return NS_OK;
+}
+
+// Telemetry Event IDL implementation.
+
+NS_IMETHODIMP
+TelemetryImpl::RecordEvent(const nsACString & aCategory, const nsACString & aMethod,
+ const nsACString & aObject, JS::HandleValue aValue,
+ JS::HandleValue aExtra, JSContext* aCx, uint8_t optional_argc)
+{
+ return TelemetryEvent::RecordEvent(aCategory, aMethod, aObject, aValue, aExtra, aCx, optional_argc);
+}
+
+NS_IMETHODIMP
+TelemetryImpl::SnapshotBuiltinEvents(uint32_t aDataset, bool aClear, JSContext* aCx,
+ uint8_t optional_argc, JS::MutableHandleValue aResult)
+{
+ return TelemetryEvent::CreateSnapshots(aDataset, aClear, aCx, optional_argc, aResult);
+}
+
+NS_IMETHODIMP
+TelemetryImpl::ClearEvents()
+{
+ TelemetryEvent::ClearEvents();
+ return NS_OK;
+}
+
+
+NS_IMETHODIMP
+TelemetryImpl::FlushBatchedChildTelemetry()
+{
+ TelemetryHistogram::IPCTimerFired(nullptr, nullptr);
+ return NS_OK;
+}
+
+size_t
+TelemetryImpl::SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf)
+{
+ size_t n = aMallocSizeOf(this);
+
+ // Ignore the hashtables in mAddonMap; they are not significant.
+ n += TelemetryHistogram::GetMapShallowSizesOfExcludingThis(aMallocSizeOf);
+ n += TelemetryScalar::GetMapShallowSizesOfExcludingThis(aMallocSizeOf);
+ n += mWebrtcTelemetry.SizeOfExcludingThis(aMallocSizeOf);
+ { // Scope for mHashMutex lock
+ MutexAutoLock lock(mHashMutex);
+ n += mPrivateSQL.SizeOfExcludingThis(aMallocSizeOf);
+ n += mSanitizedSQL.SizeOfExcludingThis(aMallocSizeOf);
+ }
+ { // Scope for mHangReportsMutex lock
+ MutexAutoLock lock(mHangReportsMutex);
+ n += mHangReports.SizeOfExcludingThis(aMallocSizeOf);
+ }
+ { // Scope for mThreadHangStatsMutex lock
+ MutexAutoLock lock(mThreadHangStatsMutex);
+ n += mThreadHangStats.sizeOfExcludingThis(aMallocSizeOf);
+ }
+
+ // It's a bit gross that we measure this other stuff that lives outside of
+ // TelemetryImpl... oh well.
+ if (sTelemetryIOObserver) {
+ n += sTelemetryIOObserver->SizeOfIncludingThis(aMallocSizeOf);
+ }
+
+ n += TelemetryHistogram::GetHistogramSizesofIncludingThis(aMallocSizeOf);
+ n += TelemetryScalar::GetScalarSizesOfIncludingThis(aMallocSizeOf);
+ n += TelemetryEvent::SizeOfIncludingThis(aMallocSizeOf);
+
+ return n;
+}
+
+struct StackFrame
+{
+ uintptr_t mPC; // The program counter at this position in the call stack.
+ uint16_t mIndex; // The number of this frame in the call stack.
+ uint16_t mModIndex; // The index of module that has this program counter.
+};
+
+#ifdef MOZ_ENABLE_PROFILER_SPS
+static bool CompareByPC(const StackFrame &a, const StackFrame &b)
+{
+ return a.mPC < b.mPC;
+}
+
+static bool CompareByIndex(const StackFrame &a, const StackFrame &b)
+{
+ return a.mIndex < b.mIndex;
+}
+#endif
+
+} // namespace
+
+
+////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////
+//
+// EXTERNALLY VISIBLE FUNCTIONS in no name space
+// These are NOT listed in Telemetry.h
+
+NSMODULE_DEFN(nsTelemetryModule) = &kTelemetryModule;
+
+/**
+ * The XRE_TelemetryAdd function is to be used by embedding applications
+ * that can't use mozilla::Telemetry::Accumulate() directly.
+ */
+void
+XRE_TelemetryAccumulate(int aID, uint32_t aSample)
+{
+ mozilla::Telemetry::Accumulate((mozilla::Telemetry::ID) aID, aSample);
+}
+
+
+////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////
+//
+// EXTERNALLY VISIBLE FUNCTIONS in mozilla::
+// These are NOT listed in Telemetry.h
+
+namespace mozilla {
+
+void
+RecordShutdownStartTimeStamp() {
+#ifdef DEBUG
+ // FIXME: this function should only be called once, since it should be called
+ // at the earliest point we *know* we are shutting down. Unfortunately
+ // this assert has been firing. Given that if we are called multiple times
+ // we just keep the last timestamp, the assert is commented for now.
+ static bool recorded = false;
+ // MOZ_ASSERT(!recorded);
+ (void)recorded; // Silence unused-var warnings (remove when assert re-enabled)
+ recorded = true;
+#endif
+
+ if (!Telemetry::CanRecordExtended())
+ return;
+
+ gRecordedShutdownStartTime = TimeStamp::Now();
+
+ GetShutdownTimeFileName();
+}
+
+void
+RecordShutdownEndTimeStamp() {
+ if (!gRecordedShutdownTimeFileName || gAlreadyFreedShutdownTimeFileName)
+ return;
+
+ nsCString name(gRecordedShutdownTimeFileName);
+ PL_strfree(gRecordedShutdownTimeFileName);
+ gRecordedShutdownTimeFileName = nullptr;
+ gAlreadyFreedShutdownTimeFileName = true;
+
+ if (gRecordedShutdownStartTime.IsNull()) {
+ // If |CanRecordExtended()| is true before |AsyncFetchTelemetryData| is called and
+ // then disabled before shutdown, |RecordShutdownStartTimeStamp| will bail out and
+ // we will end up with a null |gRecordedShutdownStartTime| here. This can happen
+ // during tests.
+ return;
+ }
+
+ nsCString tmpName = name;
+ tmpName += ".tmp";
+ FILE *f = fopen(tmpName.get(), "w");
+ if (!f)
+ return;
+ // On a normal release build this should be called just before
+ // calling _exit, but on a debug build or when the user forces a full
+ // shutdown this is called as late as possible, so we have to
+ // white list this write as write poisoning will be enabled.
+ MozillaRegisterDebugFILE(f);
+
+ TimeStamp now = TimeStamp::Now();
+ MOZ_ASSERT(now >= gRecordedShutdownStartTime);
+ TimeDuration diff = now - gRecordedShutdownStartTime;
+ uint32_t diff2 = diff.ToMilliseconds();
+ int written = fprintf(f, "%d\n", diff2);
+ MozillaUnRegisterDebugFILE(f);
+ int rv = fclose(f);
+ if (written < 0 || rv != 0) {
+ PR_Delete(tmpName.get());
+ return;
+ }
+ PR_Delete(name.get());
+ PR_Rename(tmpName.get(), name.get());
+}
+
+} // namespace mozilla
+
+
+////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////
+//
+// EXTERNALLY VISIBLE FUNCTIONS in mozilla::Telemetry::
+// These are NOT listed in Telemetry.h
+
+namespace mozilla {
+namespace Telemetry {
+
+ProcessedStack::ProcessedStack()
+{
+}
+
+size_t ProcessedStack::GetStackSize() const
+{
+ return mStack.size();
+}
+
+size_t ProcessedStack::GetNumModules() const
+{
+ return mModules.size();
+}
+
+bool ProcessedStack::Module::operator==(const Module& aOther) const {
+ return mName == aOther.mName &&
+ mBreakpadId == aOther.mBreakpadId;
+}
+
+const ProcessedStack::Frame &ProcessedStack::GetFrame(unsigned aIndex) const
+{
+ MOZ_ASSERT(aIndex < mStack.size());
+ return mStack[aIndex];
+}
+
+void ProcessedStack::AddFrame(const Frame &aFrame)
+{
+ mStack.push_back(aFrame);
+}
+
+const ProcessedStack::Module &ProcessedStack::GetModule(unsigned aIndex) const
+{
+ MOZ_ASSERT(aIndex < mModules.size());
+ return mModules[aIndex];
+}
+
+void ProcessedStack::AddModule(const Module &aModule)
+{
+ mModules.push_back(aModule);
+}
+
+void ProcessedStack::Clear() {
+ mModules.clear();
+ mStack.clear();
+}
+
+ProcessedStack
+GetStackAndModules(const std::vector<uintptr_t>& aPCs)
+{
+ std::vector<StackFrame> rawStack;
+ auto stackEnd = aPCs.begin() + std::min(aPCs.size(), kMaxChromeStackDepth);
+ for (auto i = aPCs.begin(); i != stackEnd; ++i) {
+ uintptr_t aPC = *i;
+ StackFrame Frame = {aPC, static_cast<uint16_t>(rawStack.size()),
+ std::numeric_limits<uint16_t>::max()};
+ rawStack.push_back(Frame);
+ }
+
+#ifdef MOZ_ENABLE_PROFILER_SPS
+ // Remove all modules not referenced by a PC on the stack
+ std::sort(rawStack.begin(), rawStack.end(), CompareByPC);
+
+ size_t moduleIndex = 0;
+ size_t stackIndex = 0;
+ size_t stackSize = rawStack.size();
+
+ SharedLibraryInfo rawModules = SharedLibraryInfo::GetInfoForSelf();
+ rawModules.SortByAddress();
+
+ while (moduleIndex < rawModules.GetSize()) {
+ const SharedLibrary& module = rawModules.GetEntry(moduleIndex);
+ uintptr_t moduleStart = module.GetStart();
+ uintptr_t moduleEnd = module.GetEnd() - 1;
+ // the interval is [moduleStart, moduleEnd)
+
+ bool moduleReferenced = false;
+ for (;stackIndex < stackSize; ++stackIndex) {
+ uintptr_t pc = rawStack[stackIndex].mPC;
+ if (pc >= moduleEnd)
+ break;
+
+ if (pc >= moduleStart) {
+ // If the current PC is within the current module, mark
+ // module as used
+ moduleReferenced = true;
+ rawStack[stackIndex].mPC -= moduleStart;
+ rawStack[stackIndex].mModIndex = moduleIndex;
+ } else {
+ // PC does not belong to any module. It is probably from
+ // the JIT. Use a fixed mPC so that we don't get different
+ // stacks on different runs.
+ rawStack[stackIndex].mPC =
+ std::numeric_limits<uintptr_t>::max();
+ }
+ }
+
+ if (moduleReferenced) {
+ ++moduleIndex;
+ } else {
+ // Remove module if no PCs within its address range
+ rawModules.RemoveEntries(moduleIndex, moduleIndex + 1);
+ }
+ }
+
+ for (;stackIndex < stackSize; ++stackIndex) {
+ // These PCs are past the last module.
+ rawStack[stackIndex].mPC = std::numeric_limits<uintptr_t>::max();
+ }
+
+ std::sort(rawStack.begin(), rawStack.end(), CompareByIndex);
+#endif
+
+ // Copy the information to the return value.
+ ProcessedStack Ret;
+ for (std::vector<StackFrame>::iterator i = rawStack.begin(),
+ e = rawStack.end(); i != e; ++i) {
+ const StackFrame &rawFrame = *i;
+ mozilla::Telemetry::ProcessedStack::Frame frame = { rawFrame.mPC, rawFrame.mModIndex };
+ Ret.AddFrame(frame);
+ }
+
+#ifdef MOZ_ENABLE_PROFILER_SPS
+ for (unsigned i = 0, n = rawModules.GetSize(); i != n; ++i) {
+ const SharedLibrary &info = rawModules.GetEntry(i);
+ const std::string &name = info.GetName();
+ std::string basename = name;
+#ifdef XP_MACOSX
+ // FIXME: We want to use just the basename as the libname, but the
+ // current profiler addon needs the full path name, so we compute the
+ // basename in here.
+ size_t pos = name.rfind('/');
+ if (pos != std::string::npos) {
+ basename = name.substr(pos + 1);
+ }
+#endif
+ mozilla::Telemetry::ProcessedStack::Module module = {
+ basename,
+ info.GetBreakpadId()
+ };
+ Ret.AddModule(module);
+ }
+#endif
+
+ return Ret;
+}
+
+void
+TimeHistogram::Add(PRIntervalTime aTime)
+{
+ uint32_t timeMs = PR_IntervalToMilliseconds(aTime);
+ size_t index = mozilla::FloorLog2(timeMs);
+ operator[](index)++;
+}
+
+const char*
+HangStack::InfallibleAppendViaBuffer(const char* aText, size_t aLength)
+{
+ MOZ_ASSERT(this->canAppendWithoutRealloc(1));
+ // Include null-terminator in length count.
+ MOZ_ASSERT(mBuffer.canAppendWithoutRealloc(aLength + 1));
+
+ const char* const entry = mBuffer.end();
+ mBuffer.infallibleAppend(aText, aLength);
+ mBuffer.infallibleAppend('\0'); // Explicitly append null-terminator
+ this->infallibleAppend(entry);
+ return entry;
+}
+
+const char*
+HangStack::AppendViaBuffer(const char* aText, size_t aLength)
+{
+ if (!this->reserve(this->length() + 1)) {
+ return nullptr;
+ }
+
+ // Keep track of the previous buffer in case we need to adjust pointers later.
+ const char* const prevStart = mBuffer.begin();
+ const char* const prevEnd = mBuffer.end();
+
+ // Include null-terminator in length count.
+ if (!mBuffer.reserve(mBuffer.length() + aLength + 1)) {
+ return nullptr;
+ }
+
+ if (prevStart != mBuffer.begin()) {
+ // The buffer has moved; we have to adjust pointers in the stack.
+ for (const char** entry = this->begin(); entry != this->end(); entry++) {
+ if (*entry >= prevStart && *entry < prevEnd) {
+ // Move from old buffer to new buffer.
+ *entry += mBuffer.begin() - prevStart;
+ }
+ }
+ }
+
+ return InfallibleAppendViaBuffer(aText, aLength);
+}
+
+uint32_t
+HangHistogram::GetHash(const HangStack& aStack)
+{
+ uint32_t hash = 0;
+ for (const char* const* label = aStack.begin();
+ label != aStack.end(); label++) {
+ /* If the string is within our buffer, we need to hash its content.
+ Otherwise, the string is statically allocated, and we only need
+ to hash the pointer instead of the content. */
+ if (aStack.IsInBuffer(*label)) {
+ hash = AddToHash(hash, HashString(*label));
+ } else {
+ hash = AddToHash(hash, *label);
+ }
+ }
+ return hash;
+}
+
+bool
+HangHistogram::operator==(const HangHistogram& aOther) const
+{
+ if (mHash != aOther.mHash) {
+ return false;
+ }
+ if (mStack.length() != aOther.mStack.length()) {
+ return false;
+ }
+ return mStack == aOther.mStack;
+}
+
+} // namespace Telemetry
+} // namespace mozilla
+
+
+////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////
+//
+// EXTERNALLY VISIBLE FUNCTIONS in mozilla::Telemetry::
+// These are listed in Telemetry.h
+
+namespace mozilla {
+namespace Telemetry {
+
+// The external API for controlling recording state
+void
+SetHistogramRecordingEnabled(ID aID, bool aEnabled)
+{
+ TelemetryHistogram::SetHistogramRecordingEnabled(aID, aEnabled);
+}
+
+void
+Accumulate(ID aHistogram, uint32_t aSample)
+{
+ TelemetryHistogram::Accumulate(aHistogram, aSample);
+}
+
+void
+Accumulate(ID aID, const nsCString& aKey, uint32_t aSample)
+{
+ TelemetryHistogram::Accumulate(aID, aKey, aSample);
+}
+
+void
+Accumulate(const char* name, uint32_t sample)
+{
+ TelemetryHistogram::Accumulate(name, sample);
+}
+
+void
+Accumulate(const char *name, const nsCString& key, uint32_t sample)
+{
+ TelemetryHistogram::Accumulate(name, key, sample);
+}
+
+void
+AccumulateCategorical(ID id, const nsCString& label)
+{
+ TelemetryHistogram::AccumulateCategorical(id, label);
+}
+
+void
+AccumulateTimeDelta(ID aHistogram, TimeStamp start, TimeStamp end)
+{
+ Accumulate(aHistogram,
+ static_cast<uint32_t>((end - start).ToMilliseconds()));
+}
+
+void
+AccumulateChild(GeckoProcessType aProcessType,
+ const nsTArray<Accumulation>& aAccumulations)
+{
+ TelemetryHistogram::AccumulateChild(aProcessType, aAccumulations);
+}
+
+void
+AccumulateChildKeyed(GeckoProcessType aProcessType,
+ const nsTArray<KeyedAccumulation>& aAccumulations)
+{
+ TelemetryHistogram::AccumulateChildKeyed(aProcessType, aAccumulations);
+}
+
+const char*
+GetHistogramName(ID id)
+{
+ return TelemetryHistogram::GetHistogramName(id);
+}
+
+bool
+CanRecordBase()
+{
+ return TelemetryHistogram::CanRecordBase();
+}
+
+bool
+CanRecordExtended()
+{
+ return TelemetryHistogram::CanRecordExtended();
+}
+
+void
+RecordSlowSQLStatement(const nsACString &statement,
+ const nsACString &dbName,
+ uint32_t delay)
+{
+ TelemetryImpl::RecordSlowStatement(statement, dbName, delay);
+}
+
+void
+RecordWebrtcIceCandidates(const uint32_t iceCandidateBitmask,
+ const bool success)
+{
+ TelemetryImpl::RecordIceCandidates(iceCandidateBitmask, success);
+}
+
+void Init()
+{
+ // Make the service manager hold a long-lived reference to the service
+ nsCOMPtr<nsITelemetry> telemetryService =
+ do_GetService("@mozilla.org/base/telemetry;1");
+ MOZ_ASSERT(telemetryService);
+}
+
+#if defined(MOZ_ENABLE_PROFILER_SPS)
+void RecordChromeHang(uint32_t duration,
+ ProcessedStack &aStack,
+ int32_t aSystemUptime,
+ int32_t aFirefoxUptime,
+ HangAnnotationsPtr aAnnotations)
+{
+ TelemetryImpl::RecordChromeHang(duration, aStack,
+ aSystemUptime, aFirefoxUptime,
+ Move(aAnnotations));
+}
+#endif
+
+void RecordThreadHangStats(ThreadHangStats& aStats)
+{
+ TelemetryImpl::RecordThreadHangStats(aStats);
+}
+
+
+void
+WriteFailedProfileLock(nsIFile* aProfileDir)
+{
+ nsCOMPtr<nsIFile> file;
+ nsresult rv = GetFailedProfileLockFile(getter_AddRefs(file), aProfileDir);
+ NS_ENSURE_SUCCESS_VOID(rv);
+ int64_t fileSize = 0;
+ rv = file->GetFileSize(&fileSize);
+ // It's expected that the file might not exist yet
+ if (NS_FAILED(rv) && rv != NS_ERROR_FILE_NOT_FOUND) {
+ return;
+ }
+ nsCOMPtr<nsIFileStream> fileStream;
+ rv = NS_NewLocalFileStream(getter_AddRefs(fileStream), file,
+ PR_RDWR | PR_CREATE_FILE, 0640);
+ NS_ENSURE_SUCCESS_VOID(rv);
+ NS_ENSURE_TRUE_VOID(fileSize <= kMaxFailedProfileLockFileSize);
+ unsigned int failedLockCount = 0;
+ if (fileSize > 0) {
+ nsCOMPtr<nsIInputStream> inStream = do_QueryInterface(fileStream);
+ NS_ENSURE_TRUE_VOID(inStream);
+ if (!GetFailedLockCount(inStream, fileSize, failedLockCount)) {
+ failedLockCount = 0;
+ }
+ }
+ ++failedLockCount;
+ nsAutoCString bufStr;
+ bufStr.AppendInt(static_cast<int>(failedLockCount));
+ nsCOMPtr<nsISeekableStream> seekStream = do_QueryInterface(fileStream);
+ NS_ENSURE_TRUE_VOID(seekStream);
+ // If we read in an existing failed lock count, we need to reset the file ptr
+ if (fileSize > 0) {
+ rv = seekStream->Seek(nsISeekableStream::NS_SEEK_SET, 0);
+ NS_ENSURE_SUCCESS_VOID(rv);
+ }
+ nsCOMPtr<nsIOutputStream> outStream = do_QueryInterface(fileStream);
+ uint32_t bytesLeft = bufStr.Length();
+ const char* bytes = bufStr.get();
+ do {
+ uint32_t written = 0;
+ rv = outStream->Write(bytes, bytesLeft, &written);
+ if (NS_FAILED(rv)) {
+ break;
+ }
+ bytes += written;
+ bytesLeft -= written;
+ } while (bytesLeft > 0);
+ seekStream->SetEOF();
+}
+
+void
+InitIOReporting(nsIFile* aXreDir)
+{
+ // Never initialize twice
+ if (sTelemetryIOObserver) {
+ return;
+ }
+
+ sTelemetryIOObserver = new TelemetryIOInterposeObserver(aXreDir);
+ IOInterposer::Register(IOInterposeObserver::OpAllWithStaging,
+ sTelemetryIOObserver);
+}
+
+void
+SetProfileDir(nsIFile* aProfD)
+{
+ if (!sTelemetryIOObserver || !aProfD) {
+ return;
+ }
+ nsAutoString profDirPath;
+ nsresult rv = aProfD->GetPath(profDirPath);
+ if (NS_FAILED(rv)) {
+ return;
+ }
+ sTelemetryIOObserver->AddPath(profDirPath, NS_LITERAL_STRING("{profile}"));
+}
+
+void CreateStatisticsRecorder()
+{
+ TelemetryHistogram::CreateStatisticsRecorder();
+}
+
+void DestroyStatisticsRecorder()
+{
+ TelemetryHistogram::DestroyStatisticsRecorder();
+}
+
+// Scalar API C++ Endpoints
+
+void
+ScalarAdd(mozilla::Telemetry::ScalarID aId, uint32_t aVal)
+{
+ TelemetryScalar::Add(aId, aVal);
+}
+
+void
+ScalarSet(mozilla::Telemetry::ScalarID aId, uint32_t aVal)
+{
+ TelemetryScalar::Set(aId, aVal);
+}
+
+void
+ScalarSet(mozilla::Telemetry::ScalarID aId, bool aVal)
+{
+ TelemetryScalar::Set(aId, aVal);
+}
+
+void
+ScalarSet(mozilla::Telemetry::ScalarID aId, const nsAString& aVal)
+{
+ TelemetryScalar::Set(aId, aVal);
+}
+
+void
+ScalarSetMaximum(mozilla::Telemetry::ScalarID aId, uint32_t aVal)
+{
+ TelemetryScalar::SetMaximum(aId, aVal);
+}
+
+void
+ScalarAdd(mozilla::Telemetry::ScalarID aId, const nsAString& aKey, uint32_t aVal)
+{
+ TelemetryScalar::Add(aId, aKey, aVal);
+}
+
+void
+ScalarSet(mozilla::Telemetry::ScalarID aId, const nsAString& aKey, uint32_t aVal)
+{
+ TelemetryScalar::Set(aId, aKey, aVal);
+}
+
+void
+ScalarSet(mozilla::Telemetry::ScalarID aId, const nsAString& aKey, bool aVal)
+{
+ TelemetryScalar::Set(aId, aKey, aVal);
+}
+
+void
+ScalarSetMaximum(mozilla::Telemetry::ScalarID aId, const nsAString& aKey, uint32_t aVal)
+{
+ TelemetryScalar::SetMaximum(aId, aKey, aVal);
+}
+
+} // namespace Telemetry
+} // namespace mozilla
diff --git a/toolkit/components/telemetry/Telemetry.h b/toolkit/components/telemetry/Telemetry.h
new file mode 100644
index 000000000..64f50013a
--- /dev/null
+++ b/toolkit/components/telemetry/Telemetry.h
@@ -0,0 +1,436 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2; -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef Telemetry_h__
+#define Telemetry_h__
+
+#include "mozilla/GuardObjects.h"
+#include "mozilla/TimeStamp.h"
+#include "mozilla/StartupTimeline.h"
+#include "nsTArray.h"
+#include "nsStringGlue.h"
+#include "nsXULAppAPI.h"
+
+#include "mozilla/TelemetryHistogramEnums.h"
+#include "mozilla/TelemetryScalarEnums.h"
+
+/******************************************************************************
+ * This implements the Telemetry system.
+ * It allows recording into histograms as well some more specialized data
+ * points and gives access to the data.
+ *
+ * For documentation on how to add and use new Telemetry probes, see:
+ * https://developer.mozilla.org/en-US/docs/Mozilla/Performance/Adding_a_new_Telemetry_probe
+ *
+ * For more general information on Telemetry see:
+ * https://wiki.mozilla.org/Telemetry
+ *****************************************************************************/
+
+namespace mozilla {
+namespace HangMonitor {
+ class HangAnnotations;
+} // namespace HangMonitor
+namespace Telemetry {
+
+struct Accumulation;
+struct KeyedAccumulation;
+
+enum TimerResolution {
+ Millisecond,
+ Microsecond
+};
+
+/**
+ * Create and destroy the underlying base::StatisticsRecorder singleton.
+ * Creation has to be done very early in the startup sequence.
+ */
+void CreateStatisticsRecorder();
+void DestroyStatisticsRecorder();
+
+/**
+ * Initialize the Telemetry service on the main thread at startup.
+ */
+void Init();
+
+/**
+ * Adds sample to a histogram defined in TelemetryHistogramEnums.h
+ *
+ * @param id - histogram id
+ * @param sample - value to record.
+ */
+void Accumulate(ID id, uint32_t sample);
+
+/**
+ * Adds sample to a keyed histogram defined in TelemetryHistogramEnums.h
+ *
+ * @param id - keyed histogram id
+ * @param key - the string key
+ * @param sample - (optional) value to record, defaults to 1.
+ */
+void Accumulate(ID id, const nsCString& key, uint32_t sample = 1);
+
+/**
+ * Adds a sample to a histogram defined in TelemetryHistogramEnums.h.
+ * This function is here to support telemetry measurements from Java,
+ * where we have only names and not numeric IDs. You should almost
+ * certainly be using the by-enum-id version instead of this one.
+ *
+ * @param name - histogram name
+ * @param sample - value to record
+ */
+void Accumulate(const char* name, uint32_t sample);
+
+/**
+ * Adds a sample to a histogram defined in TelemetryHistogramEnums.h.
+ * This function is here to support telemetry measurements from Java,
+ * where we have only names and not numeric IDs. You should almost
+ * certainly be using the by-enum-id version instead of this one.
+ *
+ * @param name - histogram name
+ * @param key - the string key
+ * @param sample - sample - (optional) value to record, defaults to 1.
+ */
+void Accumulate(const char *name, const nsCString& key, uint32_t sample = 1);
+
+/**
+ * Adds sample to a categorical histogram defined in TelemetryHistogramEnums.h
+ * This is the typesafe - and preferred - way to use the categorical histograms
+ * by passing values from the corresponding Telemetry::LABELS_* enum.
+ *
+ * @param enumValue - Label value from one of the Telemetry::LABELS_* enums.
+ */
+template<class E>
+void AccumulateCategorical(E enumValue) {
+ static_assert(IsCategoricalLabelEnum<E>::value,
+ "Only categorical label enum types are supported.");
+ Accumulate(static_cast<ID>(CategoricalLabelId<E>::value),
+ static_cast<uint32_t>(enumValue));
+};
+
+/**
+ * Adds sample to a categorical histogram defined in TelemetryHistogramEnums.h
+ * This string will be matched against the labels defined in Histograms.json.
+ * If the string does not match a label defined for the histogram, nothing will
+ * be recorded.
+ *
+ * @param id - The histogram id.
+ * @param label - A string label value that is defined in Histograms.json for this histogram.
+ */
+void AccumulateCategorical(ID id, const nsCString& label);
+
+/**
+ * Adds time delta in milliseconds to a histogram defined in TelemetryHistogramEnums.h
+ *
+ * @param id - histogram id
+ * @param start - start time
+ * @param end - end time
+ */
+void AccumulateTimeDelta(ID id, TimeStamp start, TimeStamp end = TimeStamp::Now());
+
+/**
+ * Accumulate child process data into histograms for the given process type.
+ *
+ * @param aAccumulations - accumulation actions to perform
+ */
+void AccumulateChild(GeckoProcessType aProcessType, const nsTArray<Accumulation>& aAccumulations);
+
+/**
+ * Accumulate child process data into keyed histograms for the given process type.
+ *
+ * @param aAccumulations - accumulation actions to perform
+ */
+void AccumulateChildKeyed(GeckoProcessType aProcessType, const nsTArray<KeyedAccumulation>& aAccumulations);
+
+/**
+ * Enable/disable recording for this histogram at runtime.
+ * Recording is enabled by default, unless listed at kRecordingInitiallyDisabledIDs[].
+ * id must be a valid telemetry enum, otherwise an assertion is triggered.
+ *
+ * @param id - histogram id
+ * @param enabled - whether or not to enable recording from now on.
+ */
+void SetHistogramRecordingEnabled(ID id, bool enabled);
+
+const char* GetHistogramName(ID id);
+
+/**
+ * Those wrappers are needed because the VS versions we use do not support free
+ * functions with default template arguments.
+ */
+template<TimerResolution res>
+struct AccumulateDelta_impl
+{
+ static void compute(ID id, TimeStamp start, TimeStamp end = TimeStamp::Now());
+ static void compute(ID id, const nsCString& key, TimeStamp start, TimeStamp end = TimeStamp::Now());
+};
+
+template<>
+struct AccumulateDelta_impl<Millisecond>
+{
+ static void compute(ID id, TimeStamp start, TimeStamp end = TimeStamp::Now()) {
+ Accumulate(id, static_cast<uint32_t>((end - start).ToMilliseconds()));
+ }
+ static void compute(ID id, const nsCString& key, TimeStamp start, TimeStamp end = TimeStamp::Now()) {
+ Accumulate(id, key, static_cast<uint32_t>((end - start).ToMilliseconds()));
+ }
+};
+
+template<>
+struct AccumulateDelta_impl<Microsecond>
+{
+ static void compute(ID id, TimeStamp start, TimeStamp end = TimeStamp::Now()) {
+ Accumulate(id, static_cast<uint32_t>((end - start).ToMicroseconds()));
+ }
+ static void compute(ID id, const nsCString& key, TimeStamp start, TimeStamp end = TimeStamp::Now()) {
+ Accumulate(id, key, static_cast<uint32_t>((end - start).ToMicroseconds()));
+ }
+};
+
+
+template<ID id, TimerResolution res = Millisecond>
+class MOZ_RAII AutoTimer {
+public:
+ explicit AutoTimer(TimeStamp aStart = TimeStamp::Now() MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
+ : start(aStart)
+ {
+ MOZ_GUARD_OBJECT_NOTIFIER_INIT;
+ }
+
+ explicit AutoTimer(const nsCString& aKey, TimeStamp aStart = TimeStamp::Now() MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
+ : start(aStart)
+ , key(aKey)
+ {
+ MOZ_GUARD_OBJECT_NOTIFIER_INIT;
+ }
+
+ ~AutoTimer() {
+ if (key.IsEmpty()) {
+ AccumulateDelta_impl<res>::compute(id, start);
+ } else {
+ AccumulateDelta_impl<res>::compute(id, key, start);
+ }
+ }
+
+private:
+ const TimeStamp start;
+ const nsCString key;
+ MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER
+};
+
+template<ID id>
+class MOZ_RAII AutoCounter {
+public:
+ explicit AutoCounter(uint32_t counterStart = 0 MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
+ : counter(counterStart)
+ {
+ MOZ_GUARD_OBJECT_NOTIFIER_INIT;
+ }
+
+ ~AutoCounter() {
+ Accumulate(id, counter);
+ }
+
+ // Prefix increment only, to encourage good habits.
+ void operator++() {
+ ++counter;
+ }
+
+ // Chaining doesn't make any sense, don't return anything.
+ void operator+=(int increment) {
+ counter += increment;
+ }
+
+private:
+ uint32_t counter;
+ MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER
+};
+
+/**
+ * Indicates whether Telemetry base data recording is turned on. Added for future uses.
+ */
+bool CanRecordBase();
+
+/**
+ * Indicates whether Telemetry extended data recording is turned on. This is intended
+ * to guard calls to Accumulate when the statistic being recorded is expensive to compute.
+ */
+bool CanRecordExtended();
+
+/**
+ * Records slow SQL statements for Telemetry reporting.
+ *
+ * @param statement - offending SQL statement to record
+ * @param dbName - DB filename
+ * @param delay - execution time in milliseconds
+ */
+void RecordSlowSQLStatement(const nsACString &statement,
+ const nsACString &dbName,
+ uint32_t delay);
+
+/**
+ * Record Webrtc ICE candidate type combinations in a 17bit bitmask
+ *
+ * @param iceCandidateBitmask - the bitmask representing local and remote ICE
+ * candidate types present for the connection
+ * @param success - did the peer connection connected
+ */
+void
+RecordWebrtcIceCandidates(const uint32_t iceCandidateBitmask,
+ const bool success);
+/**
+ * Initialize I/O Reporting
+ * Initially this only records I/O for files in the binary directory.
+ *
+ * @param aXreDir - XRE directory
+ */
+void InitIOReporting(nsIFile* aXreDir);
+
+/**
+ * Set the profile directory. Once called, files in the profile directory will
+ * be included in I/O reporting. We can't use the directory
+ * service to obtain this information because it isn't running yet.
+ */
+void SetProfileDir(nsIFile* aProfD);
+
+/**
+ * Called to inform Telemetry that startup has completed.
+ */
+void LeavingStartupStage();
+
+/**
+ * Called to inform Telemetry that shutdown is commencing.
+ */
+void EnteringShutdownStage();
+
+/**
+ * Thresholds for a statement to be considered slow, in milliseconds
+ */
+const uint32_t kSlowSQLThresholdForMainThread = 50;
+const uint32_t kSlowSQLThresholdForHelperThreads = 100;
+
+class ProcessedStack;
+
+/**
+ * Record the main thread's call stack after it hangs.
+ *
+ * @param aDuration - Approximate duration of main thread hang, in seconds
+ * @param aStack - Array of PCs from the hung call stack
+ * @param aSystemUptime - System uptime at the time of the hang, in minutes
+ * @param aFirefoxUptime - Firefox uptime at the time of the hang, in minutes
+ * @param aAnnotations - Any annotations to be added to the report
+ */
+#if defined(MOZ_ENABLE_PROFILER_SPS)
+void RecordChromeHang(uint32_t aDuration,
+ ProcessedStack &aStack,
+ int32_t aSystemUptime,
+ int32_t aFirefoxUptime,
+ mozilla::UniquePtr<mozilla::HangMonitor::HangAnnotations>
+ aAnnotations);
+#endif
+
+class ThreadHangStats;
+
+/**
+ * Move a ThreadHangStats to Telemetry storage. Normally Telemetry queries
+ * for active ThreadHangStats through BackgroundHangMonitor, but once a
+ * thread exits, the thread's copy of ThreadHangStats needs to be moved to
+ * inside Telemetry using this function.
+ *
+ * @param aStats ThreadHangStats to save; the data inside aStats
+ * will be moved and aStats should be treated as
+ * invalid after this function returns
+ */
+void RecordThreadHangStats(ThreadHangStats& aStats);
+
+/**
+ * Record a failed attempt at locking the user's profile.
+ *
+ * @param aProfileDir The profile directory whose lock attempt failed
+ */
+void WriteFailedProfileLock(nsIFile* aProfileDir);
+
+/**
+ * Adds the value to the given scalar.
+ *
+ * @param aId The scalar enum id.
+ * @param aValue The value to add to the scalar.
+ */
+void ScalarAdd(mozilla::Telemetry::ScalarID aId, uint32_t aValue);
+
+/**
+ * Sets the scalar to the given value.
+ *
+ * @param aId The scalar enum id.
+ * @param aValue The value to set the scalar to.
+ */
+void ScalarSet(mozilla::Telemetry::ScalarID aId, uint32_t aValue);
+
+/**
+ * Sets the scalar to the given value.
+ *
+ * @param aId The scalar enum id.
+ * @param aValue The value to set the scalar to.
+ */
+void ScalarSet(mozilla::Telemetry::ScalarID aId, bool aValue);
+
+/**
+ * Sets the scalar to the given value.
+ *
+ * @param aId The scalar enum id.
+ * @param aValue The value to set the scalar to, truncated to
+ * 50 characters if exceeding that length.
+ */
+void ScalarSet(mozilla::Telemetry::ScalarID aId, const nsAString& aValue);
+
+/**
+ * Sets the scalar to the maximum of the current and the passed value.
+ *
+ * @param aId The scalar enum id.
+ * @param aValue The value the scalar is set to if its greater
+ * than the current value.
+ */
+void ScalarSetMaximum(mozilla::Telemetry::ScalarID aId, uint32_t aValue);
+
+/**
+ * Adds the value to the given scalar.
+ *
+ * @param aId The scalar enum id.
+ * @param aKey The scalar key.
+ * @param aValue The value to add to the scalar.
+ */
+void ScalarAdd(mozilla::Telemetry::ScalarID aId, const nsAString& aKey, uint32_t aValue);
+
+/**
+ * Sets the scalar to the given value.
+ *
+ * @param aId The scalar enum id.
+ * @param aKey The scalar key.
+ * @param aValue The value to set the scalar to.
+ */
+void ScalarSet(mozilla::Telemetry::ScalarID aId, const nsAString& aKey, uint32_t aValue);
+
+/**
+ * Sets the scalar to the given value.
+ *
+ * @param aId The scalar enum id.
+ * @param aKey The scalar key.
+ * @param aValue The value to set the scalar to.
+ */
+void ScalarSet(mozilla::Telemetry::ScalarID aId, const nsAString& aKey, bool aValue);
+
+/**
+ * Sets the scalar to the maximum of the current and the passed value.
+ *
+ * @param aId The scalar enum id.
+ * @param aKey The scalar key.
+ * @param aValue The value the scalar is set to if its greater
+ * than the current value.
+ */
+void ScalarSetMaximum(mozilla::Telemetry::ScalarID aId, const nsAString& aKey, uint32_t aValue);
+
+} // namespace Telemetry
+} // namespace mozilla
+
+#endif // Telemetry_h__
diff --git a/toolkit/components/telemetry/TelemetryArchive.jsm b/toolkit/components/telemetry/TelemetryArchive.jsm
new file mode 100644
index 000000000..c5d251ab7
--- /dev/null
+++ b/toolkit/components/telemetry/TelemetryArchive.jsm
@@ -0,0 +1,125 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+"use strict";
+
+this.EXPORTED_SYMBOLS = [
+ "TelemetryArchive"
+];
+
+const {classes: Cc, interfaces: Ci, results: Cr, utils: Cu} = Components;
+
+Cu.import("resource://gre/modules/Log.jsm", this);
+Cu.import("resource://gre/modules/XPCOMUtils.jsm", this);
+Cu.import("resource://gre/modules/Preferences.jsm", this);
+Cu.import("resource://gre/modules/Task.jsm", this);
+Cu.import("resource://gre/modules/osfile.jsm", this);
+
+const LOGGER_NAME = "Toolkit.Telemetry";
+const LOGGER_PREFIX = "TelemetryArchive::";
+
+const PREF_BRANCH = "toolkit.telemetry.";
+const PREF_ARCHIVE_ENABLED = PREF_BRANCH + "archive.enabled";
+
+XPCOMUtils.defineLazyModuleGetter(this, "TelemetryStorage",
+ "resource://gre/modules/TelemetryStorage.jsm");
+
+this.TelemetryArchive = {
+ /**
+ * Get a list of the archived pings, sorted by the creation date.
+ * Note that scanning the archived pings on disk is delayed on startup,
+ * use promizeInitialized() to access this after scanning.
+ *
+ * @return {Promise<sequence<Object>>}
+ * A list of the archived ping info in the form:
+ * { id: <string>,
+ * timestampCreated: <number>,
+ * type: <string> }
+ */
+ promiseArchivedPingList: function() {
+ return TelemetryArchiveImpl.promiseArchivedPingList();
+ },
+
+ /**
+ * Load an archived ping from disk by id, asynchronously.
+ *
+ * @param id {String} The pings UUID.
+ * @return {Promise<PingData>} A promise resolved with the pings data on success.
+ */
+ promiseArchivedPingById: function(id) {
+ return TelemetryArchiveImpl.promiseArchivedPingById(id);
+ },
+
+ /**
+ * Archive a ping and persist it to disk.
+ *
+ * @param {object} ping The ping data to archive.
+ * @return {promise} Promise that is resolved when the ping is successfully archived.
+ */
+ promiseArchivePing: function(ping) {
+ return TelemetryArchiveImpl.promiseArchivePing(ping);
+ },
+};
+
+/**
+ * Checks if pings can be archived. Some products (e.g. Thunderbird) might not want
+ * to do that.
+ * @return {Boolean} True if pings should be archived, false otherwise.
+ */
+function shouldArchivePings() {
+ return Preferences.get(PREF_ARCHIVE_ENABLED, false);
+}
+
+var TelemetryArchiveImpl = {
+ _logger: null,
+
+ get _log() {
+ if (!this._logger) {
+ this._logger = Log.repository.getLoggerWithMessagePrefix(LOGGER_NAME, LOGGER_PREFIX);
+ }
+
+ return this._logger;
+ },
+
+ promiseArchivePing: function(ping) {
+ if (!shouldArchivePings()) {
+ this._log.trace("promiseArchivePing - archiving is disabled");
+ return Promise.resolve();
+ }
+
+ for (let field of ["creationDate", "id", "type"]) {
+ if (!(field in ping)) {
+ this._log.warn("promiseArchivePing - missing field " + field)
+ return Promise.reject(new Error("missing field " + field));
+ }
+ }
+
+ return TelemetryStorage.saveArchivedPing(ping);
+ },
+
+ _buildArchivedPingList: function(archivedPingsMap) {
+ let list = Array.from(archivedPingsMap, p => ({
+ id: p[0],
+ timestampCreated: p[1].timestampCreated,
+ type: p[1].type,
+ }));
+
+ list.sort((a, b) => a.timestampCreated - b.timestampCreated);
+
+ return list;
+ },
+
+ promiseArchivedPingList: function() {
+ this._log.trace("promiseArchivedPingList");
+
+ return TelemetryStorage.loadArchivedPingList().then(loadedInfo => {
+ return this._buildArchivedPingList(loadedInfo);
+ });
+ },
+
+ promiseArchivedPingById: function(id) {
+ this._log.trace("promiseArchivedPingById - id: " + id);
+ return TelemetryStorage.loadArchivedPing(id);
+ },
+};
diff --git a/toolkit/components/telemetry/TelemetryCommon.cpp b/toolkit/components/telemetry/TelemetryCommon.cpp
new file mode 100644
index 000000000..db9341ab5
--- /dev/null
+++ b/toolkit/components/telemetry/TelemetryCommon.cpp
@@ -0,0 +1,105 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsITelemetry.h"
+#include "nsVersionComparator.h"
+#include "mozilla/TimeStamp.h"
+#include "nsIConsoleService.h"
+#include "nsThreadUtils.h"
+
+#include "TelemetryCommon.h"
+
+#include <cstring>
+
+namespace mozilla {
+namespace Telemetry {
+namespace Common {
+
+bool
+IsExpiredVersion(const char* aExpiration)
+{
+ MOZ_ASSERT(aExpiration);
+ // Note: We intentionally don't construct a static Version object here as we
+ // saw odd crashes around this (see bug 1334105).
+ return strcmp(aExpiration, "never") && strcmp(aExpiration, "default") &&
+ (mozilla::Version(aExpiration) <= MOZ_APP_VERSION);
+}
+
+bool
+IsInDataset(uint32_t aDataset, uint32_t aContainingDataset)
+{
+ if (aDataset == aContainingDataset) {
+ return true;
+ }
+
+ // The "optin on release channel" dataset is a superset of the
+ // "optout on release channel one".
+ if (aContainingDataset == nsITelemetry::DATASET_RELEASE_CHANNEL_OPTIN &&
+ aDataset == nsITelemetry::DATASET_RELEASE_CHANNEL_OPTOUT) {
+ return true;
+ }
+
+ return false;
+}
+
+bool
+CanRecordDataset(uint32_t aDataset, bool aCanRecordBase, bool aCanRecordExtended)
+{
+ // If we are extended telemetry is enabled, we are allowed to record
+ // regardless of the dataset.
+ if (aCanRecordExtended) {
+ return true;
+ }
+
+ // If base telemetry data is enabled and we're trying to record base
+ // telemetry, allow it.
+ if (aCanRecordBase &&
+ IsInDataset(aDataset, nsITelemetry::DATASET_RELEASE_CHANNEL_OPTOUT)) {
+ return true;
+ }
+
+ // We're not recording extended telemetry or this is not the base
+ // dataset. Bail out.
+ return false;
+}
+
+nsresult
+MsSinceProcessStart(double* aResult)
+{
+ bool error;
+ *aResult = (TimeStamp::NowLoRes() -
+ TimeStamp::ProcessCreation(error)).ToMilliseconds();
+ if (error) {
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+ return NS_OK;
+}
+
+void
+LogToBrowserConsole(uint32_t aLogLevel, const nsAString& aMsg)
+{
+ if (!NS_IsMainThread()) {
+ nsString msg(aMsg);
+ nsCOMPtr<nsIRunnable> task =
+ NS_NewRunnableFunction([aLogLevel, msg]() { LogToBrowserConsole(aLogLevel, msg); });
+ NS_DispatchToMainThread(task.forget(), NS_DISPATCH_NORMAL);
+ return;
+ }
+
+ nsCOMPtr<nsIConsoleService> console(do_GetService("@mozilla.org/consoleservice;1"));
+ if (!console) {
+ NS_WARNING("Failed to log message to console.");
+ return;
+ }
+
+ nsCOMPtr<nsIScriptError> error(do_CreateInstance(NS_SCRIPTERROR_CONTRACTID));
+ error->Init(aMsg, EmptyString(), EmptyString(), 0, 0, aLogLevel, "chrome javascript");
+ console->LogMessage(error);
+}
+
+} // namespace Common
+} // namespace Telemetry
+} // namespace mozilla
diff --git a/toolkit/components/telemetry/TelemetryCommon.h b/toolkit/components/telemetry/TelemetryCommon.h
new file mode 100644
index 000000000..3beefd673
--- /dev/null
+++ b/toolkit/components/telemetry/TelemetryCommon.h
@@ -0,0 +1,75 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2; -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef TelemetryCommon_h__
+#define TelemetryCommon_h__
+
+#include "nsTHashtable.h"
+#include "jsapi.h"
+#include "nsIScriptError.h"
+
+namespace mozilla {
+namespace Telemetry {
+namespace Common {
+
+template<class EntryType>
+class AutoHashtable : public nsTHashtable<EntryType>
+{
+public:
+ explicit AutoHashtable(uint32_t initLength =
+ PLDHashTable::kDefaultInitialLength);
+ typedef bool (*ReflectEntryFunc)(EntryType *entry, JSContext *cx, JS::Handle<JSObject*> obj);
+ bool ReflectIntoJS(ReflectEntryFunc entryFunc, JSContext *cx, JS::Handle<JSObject*> obj);
+};
+
+template<class EntryType>
+AutoHashtable<EntryType>::AutoHashtable(uint32_t initLength)
+ : nsTHashtable<EntryType>(initLength)
+{
+}
+
+/**
+ * Reflect the individual entries of table into JS, usually by defining
+ * some property and value of obj. entryFunc is called for each entry.
+ */
+template<typename EntryType>
+bool
+AutoHashtable<EntryType>::ReflectIntoJS(ReflectEntryFunc entryFunc,
+ JSContext *cx, JS::Handle<JSObject*> obj)
+{
+ for (auto iter = this->Iter(); !iter.Done(); iter.Next()) {
+ if (!entryFunc(iter.Get(), cx, obj)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool IsExpiredVersion(const char* aExpiration);
+bool IsInDataset(uint32_t aDataset, uint32_t aContainingDataset);
+bool CanRecordDataset(uint32_t aDataset, bool aCanRecordBase, bool aCanRecordExtended);
+
+/**
+ * Return the number of milliseconds since process start using monotonic
+ * timestamps (unaffected by system clock changes).
+ *
+ * @return NS_OK on success, NS_ERROR_NOT_AVAILABLE if TimeStamp doesn't have the data.
+ */
+nsresult MsSinceProcessStart(double* aResult);
+
+/**
+ * Dumps a log message to the Browser Console using the provided level.
+ *
+ * @param aLogLevel The level to use when displaying the message in the browser console
+ * (e.g. nsIScriptError::warningFlag, ...).
+ * @param aMsg The text message to print to the console.
+ */
+void LogToBrowserConsole(uint32_t aLogLevel, const nsAString& aMsg);
+
+} // namespace Common
+} // namespace Telemetry
+} // namespace mozilla
+
+#endif // TelemetryCommon_h__
diff --git a/toolkit/components/telemetry/TelemetryComms.h b/toolkit/components/telemetry/TelemetryComms.h
new file mode 100644
index 000000000..0f2d888e3
--- /dev/null
+++ b/toolkit/components/telemetry/TelemetryComms.h
@@ -0,0 +1,84 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2; -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ */
+
+#ifndef Telemetry_Comms_h__
+#define Telemetry_Comms_h__
+
+#include "ipc/IPCMessageUtils.h"
+
+namespace mozilla {
+namespace Telemetry {
+
+enum ID : uint32_t;
+
+struct Accumulation
+{
+ mozilla::Telemetry::ID mId;
+ uint32_t mSample;
+};
+
+struct KeyedAccumulation
+{
+ mozilla::Telemetry::ID mId;
+ uint32_t mSample;
+ nsCString mKey;
+};
+
+} // namespace Telemetry
+} // namespace mozilla
+
+namespace IPC {
+
+template<>
+struct
+ParamTraits<mozilla::Telemetry::Accumulation>
+{
+ typedef mozilla::Telemetry::Accumulation paramType;
+
+ static void Write(Message* aMsg, const paramType& aParam)
+ {
+ aMsg->WriteUInt32(aParam.mId);
+ WriteParam(aMsg, aParam.mSample);
+ }
+
+ static bool Read(const Message* aMsg, PickleIterator* aIter, paramType* aResult)
+ {
+ if (!aMsg->ReadUInt32(aIter, reinterpret_cast<uint32_t*>(&(aResult->mId))) ||
+ !ReadParam(aMsg, aIter, &(aResult->mSample))) {
+ return false;
+ }
+
+ return true;
+ }
+};
+
+template<>
+struct
+ParamTraits<mozilla::Telemetry::KeyedAccumulation>
+{
+ typedef mozilla::Telemetry::KeyedAccumulation paramType;
+
+ static void Write(Message* aMsg, const paramType& aParam)
+ {
+ aMsg->WriteUInt32(aParam.mId);
+ WriteParam(aMsg, aParam.mSample);
+ WriteParam(aMsg, aParam.mKey);
+ }
+
+ static bool Read(const Message* aMsg, PickleIterator* aIter, paramType* aResult)
+ {
+ if (!aMsg->ReadUInt32(aIter, reinterpret_cast<uint32_t*>(&(aResult->mId))) ||
+ !ReadParam(aMsg, aIter, &(aResult->mSample)) ||
+ !ReadParam(aMsg, aIter, &(aResult->mKey))) {
+ return false;
+ }
+
+ return true;
+ }
+};
+
+} // namespace IPC
+
+#endif // Telemetry_Comms_h__
diff --git a/toolkit/components/telemetry/TelemetryController.jsm b/toolkit/components/telemetry/TelemetryController.jsm
new file mode 100644
index 000000000..b8de776da
--- /dev/null
+++ b/toolkit/components/telemetry/TelemetryController.jsm
@@ -0,0 +1,954 @@
+/* -*- js-indent-level: 2; indent-tabs-mode: nil -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+"use strict";
+
+const Cc = Components.classes;
+const Ci = Components.interfaces;
+const Cr = Components.results;
+const Cu = Components.utils;
+const myScope = this;
+
+Cu.import("resource://gre/modules/Log.jsm");
+Cu.import("resource://gre/modules/debug.js", this);
+Cu.import("resource://gre/modules/Services.jsm", this);
+Cu.import("resource://gre/modules/XPCOMUtils.jsm", this);
+Cu.import("resource://gre/modules/osfile.jsm", this);
+Cu.import("resource://gre/modules/Promise.jsm", this);
+Cu.import("resource://gre/modules/PromiseUtils.jsm", this);
+Cu.import("resource://gre/modules/Task.jsm", this);
+Cu.import("resource://gre/modules/DeferredTask.jsm", this);
+Cu.import("resource://gre/modules/Preferences.jsm");
+Cu.import("resource://gre/modules/Timer.jsm");
+Cu.import("resource://gre/modules/TelemetryUtils.jsm", this);
+Cu.import("resource://gre/modules/AppConstants.jsm");
+
+const Utils = TelemetryUtils;
+
+const LOGGER_NAME = "Toolkit.Telemetry";
+const LOGGER_PREFIX = "TelemetryController::";
+
+const PREF_BRANCH = "toolkit.telemetry.";
+const PREF_BRANCH_LOG = PREF_BRANCH + "log.";
+const PREF_SERVER = PREF_BRANCH + "server";
+const PREF_LOG_LEVEL = PREF_BRANCH_LOG + "level";
+const PREF_LOG_DUMP = PREF_BRANCH_LOG + "dump";
+const PREF_CACHED_CLIENTID = PREF_BRANCH + "cachedClientID";
+const PREF_FHR_UPLOAD_ENABLED = "datareporting.healthreport.uploadEnabled";
+const PREF_SESSIONS_BRANCH = "datareporting.sessions.";
+const PREF_UNIFIED = PREF_BRANCH + "unified";
+
+// Whether the FHR/Telemetry unification features are enabled.
+// Changing this pref requires a restart.
+const IS_UNIFIED_TELEMETRY = Preferences.get(PREF_UNIFIED, false);
+
+const PING_FORMAT_VERSION = 4;
+
+// Delay before intializing telemetry (ms)
+const TELEMETRY_DELAY = Preferences.get("toolkit.telemetry.initDelay", 60) * 1000;
+// Delay before initializing telemetry if we're testing (ms)
+const TELEMETRY_TEST_DELAY = 1;
+
+// Ping types.
+const PING_TYPE_MAIN = "main";
+const PING_TYPE_DELETION = "deletion";
+
+// Session ping reasons.
+const REASON_GATHER_PAYLOAD = "gather-payload";
+const REASON_GATHER_SUBSESSION_PAYLOAD = "gather-subsession-payload";
+
+XPCOMUtils.defineLazyModuleGetter(this, "ClientID",
+ "resource://gre/modules/ClientID.jsm");
+XPCOMUtils.defineLazyServiceGetter(this, "Telemetry",
+ "@mozilla.org/base/telemetry;1",
+ "nsITelemetry");
+XPCOMUtils.defineLazyModuleGetter(this, "AsyncShutdown",
+ "resource://gre/modules/AsyncShutdown.jsm");
+XPCOMUtils.defineLazyModuleGetter(this, "TelemetryStorage",
+ "resource://gre/modules/TelemetryStorage.jsm");
+XPCOMUtils.defineLazyModuleGetter(this, "ThirdPartyCookieProbe",
+ "resource://gre/modules/ThirdPartyCookieProbe.jsm");
+XPCOMUtils.defineLazyModuleGetter(this, "TelemetryEnvironment",
+ "resource://gre/modules/TelemetryEnvironment.jsm");
+XPCOMUtils.defineLazyModuleGetter(this, "SessionRecorder",
+ "resource://gre/modules/SessionRecorder.jsm");
+XPCOMUtils.defineLazyModuleGetter(this, "UpdateUtils",
+ "resource://gre/modules/UpdateUtils.jsm");
+XPCOMUtils.defineLazyModuleGetter(this, "TelemetryArchive",
+ "resource://gre/modules/TelemetryArchive.jsm");
+XPCOMUtils.defineLazyModuleGetter(this, "TelemetrySession",
+ "resource://gre/modules/TelemetrySession.jsm");
+XPCOMUtils.defineLazyModuleGetter(this, "TelemetrySend",
+ "resource://gre/modules/TelemetrySend.jsm");
+XPCOMUtils.defineLazyModuleGetter(this, "TelemetryReportingPolicy",
+ "resource://gre/modules/TelemetryReportingPolicy.jsm");
+
+/**
+ * Setup Telemetry logging. This function also gets called when loggin related
+ * preferences change.
+ */
+var gLogger = null;
+var gLogAppenderDump = null;
+function configureLogging() {
+ if (!gLogger) {
+ gLogger = Log.repository.getLogger(LOGGER_NAME);
+
+ // Log messages need to go to the browser console.
+ let consoleAppender = new Log.ConsoleAppender(new Log.BasicFormatter());
+ gLogger.addAppender(consoleAppender);
+
+ Preferences.observe(PREF_BRANCH_LOG, configureLogging);
+ }
+
+ // Make sure the logger keeps up with the logging level preference.
+ gLogger.level = Log.Level[Preferences.get(PREF_LOG_LEVEL, "Warn")];
+
+ // If enabled in the preferences, add a dump appender.
+ let logDumping = Preferences.get(PREF_LOG_DUMP, false);
+ if (logDumping != !!gLogAppenderDump) {
+ if (logDumping) {
+ gLogAppenderDump = new Log.DumpAppender(new Log.BasicFormatter());
+ gLogger.addAppender(gLogAppenderDump);
+ } else {
+ gLogger.removeAppender(gLogAppenderDump);
+ gLogAppenderDump = null;
+ }
+ }
+}
+
+/**
+ * This is a policy object used to override behavior for testing.
+ */
+var Policy = {
+ now: () => new Date(),
+ generatePingId: () => Utils.generateUUID(),
+ getCachedClientID: () => ClientID.getCachedClientID(),
+}
+
+this.EXPORTED_SYMBOLS = ["TelemetryController"];
+
+this.TelemetryController = Object.freeze({
+ Constants: Object.freeze({
+ PREF_LOG_LEVEL: PREF_LOG_LEVEL,
+ PREF_LOG_DUMP: PREF_LOG_DUMP,
+ PREF_SERVER: PREF_SERVER,
+ }),
+
+ /**
+ * Used only for testing purposes.
+ */
+ testInitLogging: function() {
+ configureLogging();
+ },
+
+ /**
+ * Used only for testing purposes.
+ */
+ testReset: function() {
+ return Impl.reset();
+ },
+
+ /**
+ * Used only for testing purposes.
+ */
+ testSetup: function() {
+ return Impl.setupTelemetry(true);
+ },
+
+ /**
+ * Used only for testing purposes.
+ */
+ testShutdown: function() {
+ return Impl.shutdown();
+ },
+
+ /**
+ * Used only for testing purposes.
+ */
+ testSetupContent: function() {
+ return Impl.setupContentTelemetry(true);
+ },
+
+ /**
+ * Send a notification.
+ */
+ observe: function (aSubject, aTopic, aData) {
+ return Impl.observe(aSubject, aTopic, aData);
+ },
+
+ /**
+ * Submit ping payloads to Telemetry. This will assemble a complete ping, adding
+ * environment data, client id and some general info.
+ * Depending on configuration, the ping will be sent to the server (immediately or later)
+ * and archived locally.
+ *
+ * To identify the different pings and to be able to query them pings have a type.
+ * A type is a string identifier that should be unique to the type ping that is being submitted,
+ * it should only contain alphanumeric characters and '-' for separation, i.e. satisfy:
+ * /^[a-z0-9][a-z0-9-]+[a-z0-9]$/i
+ *
+ * @param {String} aType The type of the ping.
+ * @param {Object} aPayload The actual data payload for the ping.
+ * @param {Object} [aOptions] Options object.
+ * @param {Boolean} [aOptions.addClientId=false] true if the ping should contain the client
+ * id, false otherwise.
+ * @param {Boolean} [aOptions.addEnvironment=false] true if the ping should contain the
+ * environment data.
+ * @param {Object} [aOptions.overrideEnvironment=null] set to override the environment data.
+ * @returns {Promise} Test-only - a promise that resolves with the ping id once the ping is stored or sent.
+ */
+ submitExternalPing: function(aType, aPayload, aOptions = {}) {
+ aOptions.addClientId = aOptions.addClientId || false;
+ aOptions.addEnvironment = aOptions.addEnvironment || false;
+
+ return Impl.submitExternalPing(aType, aPayload, aOptions);
+ },
+
+ /**
+ * Get the current session ping data as it would be sent out or stored.
+ *
+ * @param {bool} aSubsession Whether to get subsession data. Optional, defaults to false.
+ * @return {object} The current ping data if Telemetry is enabled, null otherwise.
+ */
+ getCurrentPingData: function(aSubsession = false) {
+ return Impl.getCurrentPingData(aSubsession);
+ },
+
+ /**
+ * Save a ping to disk.
+ *
+ * @param {String} aType The type of the ping.
+ * @param {Object} aPayload The actual data payload for the ping.
+ * @param {Object} [aOptions] Options object.
+ * @param {Boolean} [aOptions.addClientId=false] true if the ping should contain the client
+ * id, false otherwise.
+ * @param {Boolean} [aOptions.addEnvironment=false] true if the ping should contain the
+ * environment data.
+ * @param {Boolean} [aOptions.overwrite=false] true overwrites a ping with the same name,
+ * if found.
+ * @param {Object} [aOptions.overrideEnvironment=null] set to override the environment data.
+ *
+ * @returns {Promise} A promise that resolves with the ping id when the ping is saved to
+ * disk.
+ */
+ addPendingPing: function(aType, aPayload, aOptions = {}) {
+ let options = aOptions;
+ options.addClientId = aOptions.addClientId || false;
+ options.addEnvironment = aOptions.addEnvironment || false;
+ options.overwrite = aOptions.overwrite || false;
+
+ return Impl.addPendingPing(aType, aPayload, options);
+ },
+
+ /**
+ * Check if we have an aborted-session ping from a previous session.
+ * If so, submit and then remove it.
+ *
+ * @return {Promise} Promise that is resolved when the ping is saved.
+ */
+ checkAbortedSessionPing: function() {
+ return Impl.checkAbortedSessionPing();
+ },
+
+ /**
+ * Save an aborted-session ping to disk without adding it to the pending pings.
+ *
+ * @param {Object} aPayload The ping payload data.
+ * @return {Promise} Promise that is resolved when the ping is saved.
+ */
+ saveAbortedSessionPing: function(aPayload) {
+ return Impl.saveAbortedSessionPing(aPayload);
+ },
+
+ /**
+ * Remove the aborted-session ping if any exists.
+ *
+ * @return {Promise} Promise that is resolved when the ping was removed.
+ */
+ removeAbortedSessionPing: function() {
+ return Impl.removeAbortedSessionPing();
+ },
+
+ /**
+ * Write a ping to a specified location on the disk. Does not add the ping to the
+ * pending pings.
+ *
+ * @param {String} aType The type of the ping.
+ * @param {Object} aPayload The actual data payload for the ping.
+ * @param {String} aFilePath The path to save the ping to.
+ * @param {Object} [aOptions] Options object.
+ * @param {Boolean} [aOptions.addClientId=false] true if the ping should contain the client
+ * id, false otherwise.
+ * @param {Boolean} [aOptions.addEnvironment=false] true if the ping should contain the
+ * environment data.
+ * @param {Boolean} [aOptions.overwrite=false] true overwrites a ping with the same name,
+ * if found.
+ * @param {Object} [aOptions.overrideEnvironment=null] set to override the environment data.
+ *
+ * @returns {Promise} A promise that resolves with the ping id when the ping is saved to
+ * disk.
+ */
+ savePing: function(aType, aPayload, aFilePath, aOptions = {}) {
+ let options = aOptions;
+ options.addClientId = aOptions.addClientId || false;
+ options.addEnvironment = aOptions.addEnvironment || false;
+ options.overwrite = aOptions.overwrite || false;
+
+ return Impl.savePing(aType, aPayload, aFilePath, options);
+ },
+
+ /**
+ * The session recorder instance managed by Telemetry.
+ * @return {Object} The active SessionRecorder instance or null if not available.
+ */
+ getSessionRecorder: function() {
+ return Impl._sessionRecorder;
+ },
+
+ /**
+ * Allows waiting for TelemetryControllers delayed initialization to complete.
+ * The returned promise is guaranteed to resolve before TelemetryController is shutting down.
+ * @return {Promise} Resolved when delayed TelemetryController initialization completed.
+ */
+ promiseInitialized: function() {
+ return Impl.promiseInitialized();
+ },
+});
+
+var Impl = {
+ _initialized: false,
+ _initStarted: false, // Whether we started setting up TelemetryController.
+ _logger: null,
+ _prevValues: {},
+ // The previous build ID, if this is the first run with a new build.
+ // Undefined if this is not the first run, or the previous build ID is unknown.
+ _previousBuildID: undefined,
+ _clientID: null,
+ // A task performing delayed initialization
+ _delayedInitTask: null,
+ // The deferred promise resolved when the initialization task completes.
+ _delayedInitTaskDeferred: null,
+
+ // The session recorder, shared with FHR and the Data Reporting Service.
+ _sessionRecorder: null,
+ // This is a public barrier Telemetry clients can use to add blockers to the shutdown
+ // of TelemetryController.
+ // After this barrier, clients can not submit Telemetry pings anymore.
+ _shutdownBarrier: new AsyncShutdown.Barrier("TelemetryController: Waiting for clients."),
+ // This is a private barrier blocked by pending async ping activity (sending & saving).
+ _connectionsBarrier: new AsyncShutdown.Barrier("TelemetryController: Waiting for pending ping activity"),
+ // This is true when running in the test infrastructure.
+ _testMode: false,
+
+ get _log() {
+ if (!this._logger) {
+ this._logger = Log.repository.getLoggerWithMessagePrefix(LOGGER_NAME, LOGGER_PREFIX);
+ }
+
+ return this._logger;
+ },
+
+ /**
+ * Get the data for the "application" section of the ping.
+ */
+ _getApplicationSection: function() {
+ // Querying architecture and update channel can throw. Make sure to recover and null
+ // those fields.
+ let arch = null;
+ try {
+ arch = Services.sysinfo.get("arch");
+ } catch (e) {
+ this._log.trace("_getApplicationSection - Unable to get system architecture.", e);
+ }
+
+ let updateChannel = null;
+ try {
+ updateChannel = UpdateUtils.getUpdateChannel(false);
+ } catch (e) {
+ this._log.trace("_getApplicationSection - Unable to get update channel.", e);
+ }
+
+ return {
+ architecture: arch,
+ buildId: Services.appinfo.appBuildID,
+ name: Services.appinfo.name,
+ version: Services.appinfo.version,
+ displayVersion: AppConstants.MOZ_APP_VERSION_DISPLAY,
+ vendor: Services.appinfo.vendor,
+ platformVersion: Services.appinfo.platformVersion,
+ xpcomAbi: Services.appinfo.XPCOMABI,
+ channel: updateChannel,
+ };
+ },
+
+ /**
+ * Assemble a complete ping following the common ping format specification.
+ *
+ * @param {String} aType The type of the ping.
+ * @param {Object} aPayload The actual data payload for the ping.
+ * @param {Object} aOptions Options object.
+ * @param {Boolean} aOptions.addClientId true if the ping should contain the client
+ * id, false otherwise.
+ * @param {Boolean} aOptions.addEnvironment true if the ping should contain the
+ * environment data.
+ * @param {Object} [aOptions.overrideEnvironment=null] set to override the environment data.
+ *
+ * @returns {Object} An object that contains the assembled ping data.
+ */
+ assemblePing: function assemblePing(aType, aPayload, aOptions = {}) {
+ this._log.trace("assemblePing - Type " + aType + ", aOptions " + JSON.stringify(aOptions));
+
+ // Clone the payload data so we don't race against unexpected changes in subobjects that are
+ // still referenced by other code.
+ // We can't trust all callers to do this properly on their own.
+ let payload = Cu.cloneInto(aPayload, myScope);
+
+ // Fill the common ping fields.
+ let pingData = {
+ type: aType,
+ id: Policy.generatePingId(),
+ creationDate: (Policy.now()).toISOString(),
+ version: PING_FORMAT_VERSION,
+ application: this._getApplicationSection(),
+ payload: payload,
+ };
+
+ if (aOptions.addClientId) {
+ pingData.clientId = this._clientID;
+ }
+
+ if (aOptions.addEnvironment) {
+ pingData.environment = aOptions.overrideEnvironment || TelemetryEnvironment.currentEnvironment;
+ }
+
+ return pingData;
+ },
+
+ /**
+ * Track any pending ping send and save tasks through the promise passed here.
+ * This is needed to block shutdown on any outstanding ping activity.
+ */
+ _trackPendingPingTask: function (aPromise) {
+ this._connectionsBarrier.client.addBlocker("Waiting for ping task", aPromise);
+ },
+
+ /**
+ * Internal function to assemble a complete ping, adding environment data, client id
+ * and some general info. This waits on the client id to be loaded/generated if it's
+ * not yet available. Note that this function is synchronous unless we need to load
+ * the client id.
+ * Depending on configuration, the ping will be sent to the server (immediately or later)
+ * and archived locally.
+ *
+ * @param {String} aType The type of the ping.
+ * @param {Object} aPayload The actual data payload for the ping.
+ * @param {Object} [aOptions] Options object.
+ * @param {Boolean} [aOptions.addClientId=false] true if the ping should contain the client
+ * id, false otherwise.
+ * @param {Boolean} [aOptions.addEnvironment=false] true if the ping should contain the
+ * environment data.
+ * @param {Object} [aOptions.overrideEnvironment=null] set to override the environment data.
+ * @returns {Promise} Test-only - a promise that is resolved with the ping id once the ping is stored or sent.
+ */
+ _submitPingLogic: Task.async(function* (aType, aPayload, aOptions) {
+ // Make sure to have a clientId if we need one. This cover the case of submitting
+ // a ping early during startup, before Telemetry is initialized, if no client id was
+ // cached.
+ if (!this._clientID && aOptions.addClientId) {
+ Telemetry.getHistogramById("TELEMETRY_PING_SUBMISSION_WAITING_CLIENTID").add();
+ // We can safely call |getClientID| here and during initialization: we would still
+ // spawn and return one single loading task.
+ this._clientID = yield ClientID.getClientID();
+ }
+
+ const pingData = this.assemblePing(aType, aPayload, aOptions);
+ this._log.trace("submitExternalPing - ping assembled, id: " + pingData.id);
+
+ // Always persist the pings if we are allowed to. We should not yield on any of the
+ // following operations to keep this function synchronous for the majority of the calls.
+ let archivePromise = TelemetryArchive.promiseArchivePing(pingData)
+ .catch(e => this._log.error("submitExternalPing - Failed to archive ping " + pingData.id, e));
+ let p = [ archivePromise ];
+
+ p.push(TelemetrySend.submitPing(pingData));
+
+ return Promise.all(p).then(() => pingData.id);
+ }),
+
+ /**
+ * Submit ping payloads to Telemetry.
+ *
+ * @param {String} aType The type of the ping.
+ * @param {Object} aPayload The actual data payload for the ping.
+ * @param {Object} [aOptions] Options object.
+ * @param {Boolean} [aOptions.addClientId=false] true if the ping should contain the client
+ * id, false otherwise.
+ * @param {Boolean} [aOptions.addEnvironment=false] true if the ping should contain the
+ * environment data.
+ * @param {Object} [aOptions.overrideEnvironment=null] set to override the environment data.
+ * @returns {Promise} Test-only - a promise that is resolved with the ping id once the ping is stored or sent.
+ */
+ submitExternalPing: function send(aType, aPayload, aOptions) {
+ this._log.trace("submitExternalPing - type: " + aType + ", aOptions: " + JSON.stringify(aOptions));
+
+ // Enforce the type string to only contain sane characters.
+ const typeUuid = /^[a-z0-9][a-z0-9-]+[a-z0-9]$/i;
+ if (!typeUuid.test(aType)) {
+ this._log.error("submitExternalPing - invalid ping type: " + aType);
+ let histogram = Telemetry.getKeyedHistogramById("TELEMETRY_INVALID_PING_TYPE_SUBMITTED");
+ histogram.add(aType, 1);
+ return Promise.reject(new Error("Invalid type string submitted."));
+ }
+ // Enforce that the payload is an object.
+ if (aPayload === null || typeof aPayload !== 'object' || Array.isArray(aPayload)) {
+ this._log.error("submitExternalPing - invalid payload type: " + typeof aPayload);
+ let histogram = Telemetry.getHistogramById("TELEMETRY_INVALID_PAYLOAD_SUBMITTED");
+ histogram.add(1);
+ return Promise.reject(new Error("Invalid payload type submitted."));
+ }
+
+ let promise = this._submitPingLogic(aType, aPayload, aOptions);
+ this._trackPendingPingTask(promise);
+ return promise;
+ },
+
+ /**
+ * Save a ping to disk.
+ *
+ * @param {String} aType The type of the ping.
+ * @param {Object} aPayload The actual data payload for the ping.
+ * @param {Object} aOptions Options object.
+ * @param {Boolean} aOptions.addClientId true if the ping should contain the client id,
+ * false otherwise.
+ * @param {Boolean} aOptions.addEnvironment true if the ping should contain the
+ * environment data.
+ * @param {Boolean} aOptions.overwrite true overwrites a ping with the same name, if found.
+ * @param {Object} [aOptions.overrideEnvironment=null] set to override the environment data.
+ *
+ * @returns {Promise} A promise that resolves with the ping id when the ping is saved to
+ * disk.
+ */
+ addPendingPing: function addPendingPing(aType, aPayload, aOptions) {
+ this._log.trace("addPendingPing - Type " + aType + ", aOptions " + JSON.stringify(aOptions));
+
+ let pingData = this.assemblePing(aType, aPayload, aOptions);
+
+ let savePromise = TelemetryStorage.savePendingPing(pingData);
+ let archivePromise = TelemetryArchive.promiseArchivePing(pingData).catch(e => {
+ this._log.error("addPendingPing - Failed to archive ping " + pingData.id, e);
+ });
+
+ // Wait for both the archiving and ping persistence to complete.
+ let promises = [
+ savePromise,
+ archivePromise,
+ ];
+ return Promise.all(promises).then(() => pingData.id);
+ },
+
+ /**
+ * Write a ping to a specified location on the disk. Does not add the ping to the
+ * pending pings.
+ *
+ * @param {String} aType The type of the ping.
+ * @param {Object} aPayload The actual data payload for the ping.
+ * @param {String} aFilePath The path to save the ping to.
+ * @param {Object} aOptions Options object.
+ * @param {Boolean} aOptions.addClientId true if the ping should contain the client id,
+ * false otherwise.
+ * @param {Boolean} aOptions.addEnvironment true if the ping should contain the
+ * environment data.
+ * @param {Boolean} aOptions.overwrite true overwrites a ping with the same name, if found.
+ * @param {Object} [aOptions.overrideEnvironment=null] set to override the environment data.
+ *
+ * @returns {Promise} A promise that resolves with the ping id when the ping is saved to
+ * disk.
+ */
+ savePing: function savePing(aType, aPayload, aFilePath, aOptions) {
+ this._log.trace("savePing - Type " + aType + ", File Path " + aFilePath +
+ ", aOptions " + JSON.stringify(aOptions));
+ let pingData = this.assemblePing(aType, aPayload, aOptions);
+ return TelemetryStorage.savePingToFile(pingData, aFilePath, aOptions.overwrite)
+ .then(() => pingData.id);
+ },
+
+ /**
+ * Check whether we have an aborted-session ping. If so add it to the pending pings and archive it.
+ *
+ * @return {Promise} Promise that is resolved when the ping is submitted and archived.
+ */
+ checkAbortedSessionPing: Task.async(function*() {
+ let ping = yield TelemetryStorage.loadAbortedSessionPing();
+ this._log.trace("checkAbortedSessionPing - found aborted-session ping: " + !!ping);
+ if (!ping) {
+ return;
+ }
+
+ try {
+ yield TelemetryStorage.addPendingPing(ping);
+ yield TelemetryArchive.promiseArchivePing(ping);
+ } catch (e) {
+ this._log.error("checkAbortedSessionPing - Unable to add the pending ping", e);
+ } finally {
+ yield TelemetryStorage.removeAbortedSessionPing();
+ }
+ }),
+
+ /**
+ * Save an aborted-session ping to disk without adding it to the pending pings.
+ *
+ * @param {Object} aPayload The ping payload data.
+ * @return {Promise} Promise that is resolved when the ping is saved.
+ */
+ saveAbortedSessionPing: function(aPayload) {
+ this._log.trace("saveAbortedSessionPing");
+ const options = {addClientId: true, addEnvironment: true};
+ const pingData = this.assemblePing(PING_TYPE_MAIN, aPayload, options);
+ return TelemetryStorage.saveAbortedSessionPing(pingData);
+ },
+
+ removeAbortedSessionPing: function() {
+ return TelemetryStorage.removeAbortedSessionPing();
+ },
+
+ /**
+ * Perform telemetry initialization for either chrome or content process.
+ * @return {Boolean} True if Telemetry is allowed to record at least base (FHR) data,
+ * false otherwise.
+ */
+ enableTelemetryRecording: function enableTelemetryRecording() {
+ // The thumbnail service also runs in a content process, even with e10s off.
+ // We need to check if e10s is on so we don't submit child payloads for it.
+ // We still need xpcshell child tests to work, so we skip this if test mode is enabled.
+ if (Utils.isContentProcess && !this._testMode && !Services.appinfo.browserTabsRemoteAutostart) {
+ this._log.config("enableTelemetryRecording - not enabling Telemetry for non-e10s child process");
+ Telemetry.canRecordBase = false;
+ Telemetry.canRecordExtended = false;
+ return false;
+ }
+
+ // Configure base Telemetry recording.
+ // Unified Telemetry makes it opt-out. If extended Telemetry is enabled, base recording
+ // is always on as well.
+ const enabled = Utils.isTelemetryEnabled;
+ Telemetry.canRecordBase = enabled || IS_UNIFIED_TELEMETRY;
+ Telemetry.canRecordExtended = enabled;
+
+ this._log.config("enableTelemetryRecording - canRecordBase:" + Telemetry.canRecordBase +
+ ", canRecordExtended: " + Telemetry.canRecordExtended);
+
+ return Telemetry.canRecordBase;
+ },
+
+ /**
+ * This triggers basic telemetry initialization and schedules a full initialized for later
+ * for performance reasons.
+ *
+ * This delayed initialization means TelemetryController init can be in the following states:
+ * 1) setupTelemetry was never called
+ * or it was called and
+ * 2) _delayedInitTask was scheduled, but didn't run yet.
+ * 3) _delayedInitTask is currently running.
+ * 4) _delayedInitTask finished running and is nulled out.
+ *
+ * @return {Promise} Resolved when TelemetryController and TelemetrySession are fully
+ * initialized. This is only used in tests.
+ */
+ setupTelemetry: function setupTelemetry(testing) {
+ this._initStarted = true;
+ this._testMode = testing;
+
+ this._log.trace("setupTelemetry");
+
+ if (this._delayedInitTask) {
+ this._log.error("setupTelemetry - init task already running");
+ return this._delayedInitTaskDeferred.promise;
+ }
+
+ if (this._initialized && !this._testMode) {
+ this._log.error("setupTelemetry - already initialized");
+ return Promise.resolve();
+ }
+
+ // This will trigger displaying the datachoices infobar.
+ TelemetryReportingPolicy.setup();
+
+ if (!this.enableTelemetryRecording()) {
+ this._log.config("setupChromeProcess - Telemetry recording is disabled, skipping Chrome process setup.");
+ return Promise.resolve();
+ }
+
+ // Initialize the session recorder.
+ if (!this._sessionRecorder) {
+ this._sessionRecorder = new SessionRecorder(PREF_SESSIONS_BRANCH);
+ this._sessionRecorder.onStartup();
+ }
+
+ this._attachObservers();
+
+ // Perform a lightweight, early initialization for the component, just registering
+ // a few observers and initializing the session.
+ TelemetrySession.earlyInit(this._testMode);
+
+ // For very short session durations, we may never load the client
+ // id from disk.
+ // We try to cache it in prefs to avoid this, even though this may
+ // lead to some stale client ids.
+ this._clientID = ClientID.getCachedClientID();
+
+ // Delay full telemetry initialization to give the browser time to
+ // run various late initializers. Otherwise our gathered memory
+ // footprint and other numbers would be too optimistic.
+ this._delayedInitTaskDeferred = Promise.defer();
+ this._delayedInitTask = new DeferredTask(function* () {
+ try {
+ // TODO: This should probably happen after all the delayed init here.
+ this._initialized = true;
+ TelemetryEnvironment.delayedInit();
+
+ yield TelemetrySend.setup(this._testMode);
+
+ // Load the ClientID.
+ this._clientID = yield ClientID.getClientID();
+
+ // Perform TelemetrySession delayed init.
+ yield TelemetrySession.delayedInit();
+ // Purge the pings archive by removing outdated pings. We don't wait for
+ // this task to complete, but TelemetryStorage blocks on it during
+ // shutdown.
+ TelemetryStorage.runCleanPingArchiveTask();
+
+ // Now that FHR/healthreporter is gone, make sure to remove FHR's DB from
+ // the profile directory. This is a temporary measure that we should drop
+ // in the future.
+ TelemetryStorage.removeFHRDatabase();
+
+ this._delayedInitTaskDeferred.resolve();
+ } catch (e) {
+ this._delayedInitTaskDeferred.reject(e);
+ } finally {
+ this._delayedInitTask = null;
+ }
+ }.bind(this), this._testMode ? TELEMETRY_TEST_DELAY : TELEMETRY_DELAY);
+
+ AsyncShutdown.sendTelemetry.addBlocker("TelemetryController: shutting down",
+ () => this.shutdown(),
+ () => this._getState());
+
+ this._delayedInitTask.arm();
+ return this._delayedInitTaskDeferred.promise;
+ },
+
+ /**
+ * This triggers basic telemetry initialization for content processes.
+ * @param {Boolean} [testing=false] True if we are in test mode, false otherwise.
+ */
+ setupContentTelemetry: function (testing = false) {
+ this._testMode = testing;
+
+ // We call |enableTelemetryRecording| here to make sure that Telemetry.canRecord* flags
+ // are in sync between chrome and content processes.
+ if (!this.enableTelemetryRecording()) {
+ this._log.trace("setupContentTelemetry - Content process recording disabled.");
+ return;
+ }
+ TelemetrySession.setupContent(testing);
+ },
+
+ // Do proper shutdown waiting and cleanup.
+ _cleanupOnShutdown: Task.async(function*() {
+ if (!this._initialized) {
+ return;
+ }
+
+ Preferences.ignore(PREF_BRANCH_LOG, configureLogging);
+ this._detachObservers();
+
+ // Now do an orderly shutdown.
+ try {
+ // Stop the datachoices infobar display.
+ TelemetryReportingPolicy.shutdown();
+ TelemetryEnvironment.shutdown();
+
+ // Stop any ping sending.
+ yield TelemetrySend.shutdown();
+
+ yield TelemetrySession.shutdown();
+
+ // First wait for clients processing shutdown.
+ yield this._shutdownBarrier.wait();
+
+ // ... and wait for any outstanding async ping activity.
+ yield this._connectionsBarrier.wait();
+
+ // Perform final shutdown operations.
+ yield TelemetryStorage.shutdown();
+ } finally {
+ // Reset state.
+ this._initialized = false;
+ this._initStarted = false;
+ }
+ }),
+
+ shutdown: function() {
+ this._log.trace("shutdown");
+
+ // We can be in one the following states here:
+ // 1) setupTelemetry was never called
+ // or it was called and
+ // 2) _delayedInitTask was scheduled, but didn't run yet.
+ // 3) _delayedInitTask is running now.
+ // 4) _delayedInitTask finished running already.
+
+ // This handles 1).
+ if (!this._initStarted) {
+ return Promise.resolve();
+ }
+
+ // This handles 4).
+ if (!this._delayedInitTask) {
+ // We already ran the delayed initialization.
+ return this._cleanupOnShutdown();
+ }
+
+ // This handles 2) and 3).
+ return this._delayedInitTask.finalize().then(() => this._cleanupOnShutdown());
+ },
+
+ /**
+ * This observer drives telemetry.
+ */
+ observe: function (aSubject, aTopic, aData) {
+ // The logger might still be not available at this point.
+ if (aTopic == "profile-after-change" || aTopic == "app-startup") {
+ // If we don't have a logger, we need to make sure |Log.repository.getLogger()| is
+ // called before |getLoggerWithMessagePrefix|. Otherwise logging won't work.
+ configureLogging();
+ }
+
+ this._log.trace("observe - " + aTopic + " notified.");
+
+ switch (aTopic) {
+ case "profile-after-change":
+ // profile-after-change is only registered for chrome processes.
+ return this.setupTelemetry();
+ case "app-startup":
+ // app-startup is only registered for content processes.
+ return this.setupContentTelemetry();
+ }
+ return undefined;
+ },
+
+ /**
+ * Get an object describing the current state of this module for AsyncShutdown diagnostics.
+ */
+ _getState: function() {
+ return {
+ initialized: this._initialized,
+ initStarted: this._initStarted,
+ haveDelayedInitTask: !!this._delayedInitTask,
+ shutdownBarrier: this._shutdownBarrier.state,
+ connectionsBarrier: this._connectionsBarrier.state,
+ sendModule: TelemetrySend.getShutdownState(),
+ };
+ },
+
+ /**
+ * Called whenever the FHR Upload preference changes (e.g. when user disables FHR from
+ * the preferences panel), this triggers sending the deletion ping.
+ */
+ _onUploadPrefChange: function() {
+ const uploadEnabled = Preferences.get(PREF_FHR_UPLOAD_ENABLED, false);
+ if (uploadEnabled) {
+ // There's nothing we should do if we are enabling upload.
+ return;
+ }
+
+ let p = Task.spawn(function*() {
+ try {
+ // Clear the current pings.
+ yield TelemetrySend.clearCurrentPings();
+
+ // Remove all the pending pings, but not the deletion ping.
+ yield TelemetryStorage.runRemovePendingPingsTask();
+ } catch (e) {
+ this._log.error("_onUploadPrefChange - error clearing pending pings", e);
+ } finally {
+ // Always send the deletion ping.
+ this._log.trace("_onUploadPrefChange - Sending deletion ping.");
+ this.submitExternalPing(PING_TYPE_DELETION, {}, { addClientId: true });
+ }
+ }.bind(this));
+
+ this._shutdownBarrier.client.addBlocker(
+ "TelemetryController: removing pending pings after data upload was disabled", p);
+ },
+
+ _attachObservers: function() {
+ if (IS_UNIFIED_TELEMETRY) {
+ // Watch the FHR upload setting to trigger deletion pings.
+ Preferences.observe(PREF_FHR_UPLOAD_ENABLED, this._onUploadPrefChange, this);
+ }
+ },
+
+ /**
+ * Remove the preference observer to avoid leaks.
+ */
+ _detachObservers: function() {
+ if (IS_UNIFIED_TELEMETRY) {
+ Preferences.ignore(PREF_FHR_UPLOAD_ENABLED, this._onUploadPrefChange, this);
+ }
+ },
+
+ /**
+ * Allows waiting for TelemetryControllers delayed initialization to complete.
+ * This will complete before TelemetryController is shutting down.
+ * @return {Promise} Resolved when delayed TelemetryController initialization completed.
+ */
+ promiseInitialized: function() {
+ return this._delayedInitTaskDeferred.promise;
+ },
+
+ getCurrentPingData: function(aSubsession) {
+ this._log.trace("getCurrentPingData - subsession: " + aSubsession)
+
+ // Telemetry is disabled, don't gather any data.
+ if (!Telemetry.canRecordBase) {
+ return null;
+ }
+
+ const reason = aSubsession ? REASON_GATHER_SUBSESSION_PAYLOAD : REASON_GATHER_PAYLOAD;
+ const type = PING_TYPE_MAIN;
+ const payload = TelemetrySession.getPayload(reason);
+ const options = { addClientId: true, addEnvironment: true };
+ const ping = this.assemblePing(type, payload, options);
+
+ return ping;
+ },
+
+ reset: Task.async(function*() {
+ this._clientID = null;
+ this._detachObservers();
+
+ yield TelemetrySession.testReset();
+
+ this._connectionsBarrier = new AsyncShutdown.Barrier(
+ "TelemetryController: Waiting for pending ping activity"
+ );
+ this._shutdownBarrier = new AsyncShutdown.Barrier(
+ "TelemetryController: Waiting for clients."
+ );
+
+ // We need to kick of the controller setup first for tests that check the
+ // cached client id.
+ let controllerSetup = this.setupTelemetry(true);
+
+ yield TelemetrySend.reset();
+ yield TelemetryStorage.reset();
+ yield TelemetryEnvironment.testReset();
+
+ yield controllerSetup;
+ }),
+};
diff --git a/toolkit/components/telemetry/TelemetryEnvironment.jsm b/toolkit/components/telemetry/TelemetryEnvironment.jsm
new file mode 100644
index 000000000..e2453649c
--- /dev/null
+++ b/toolkit/components/telemetry/TelemetryEnvironment.jsm
@@ -0,0 +1,1459 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+"use strict";
+
+this.EXPORTED_SYMBOLS = [
+ "TelemetryEnvironment",
+];
+
+const {classes: Cc, interfaces: Ci, utils: Cu} = Components;
+const myScope = this;
+
+Cu.import("resource://gre/modules/XPCOMUtils.jsm", this);
+Cu.import("resource://gre/modules/Task.jsm");
+Cu.import("resource://gre/modules/Log.jsm");
+Cu.import("resource://gre/modules/Preferences.jsm");
+Cu.import("resource://gre/modules/PromiseUtils.jsm");
+Cu.import("resource://gre/modules/Services.jsm");
+Cu.import("resource://gre/modules/TelemetryUtils.jsm", this);
+Cu.import("resource://gre/modules/ObjectUtils.jsm");
+Cu.import("resource://gre/modules/TelemetryController.jsm", this);
+Cu.import("resource://gre/modules/AppConstants.jsm");
+
+const Utils = TelemetryUtils;
+
+XPCOMUtils.defineLazyModuleGetter(this, "AttributionCode",
+ "resource:///modules/AttributionCode.jsm");
+XPCOMUtils.defineLazyModuleGetter(this, "ctypes",
+ "resource://gre/modules/ctypes.jsm");
+if (AppConstants.platform !== "gonk") {
+ Cu.import("resource://gre/modules/AddonManager.jsm");
+ XPCOMUtils.defineLazyModuleGetter(this, "LightweightThemeManager",
+ "resource://gre/modules/LightweightThemeManager.jsm");
+}
+XPCOMUtils.defineLazyModuleGetter(this, "ProfileAge",
+ "resource://gre/modules/ProfileAge.jsm");
+XPCOMUtils.defineLazyModuleGetter(this, "UpdateUtils",
+ "resource://gre/modules/UpdateUtils.jsm");
+XPCOMUtils.defineLazyModuleGetter(this, "WindowsRegistry",
+ "resource://gre/modules/WindowsRegistry.jsm");
+
+// The maximum length of a string (e.g. description) in the addons section.
+const MAX_ADDON_STRING_LENGTH = 100;
+// The maximum length of a string value in the settings.attribution object.
+const MAX_ATTRIBUTION_STRING_LENGTH = 100;
+
+/**
+ * This is a policy object used to override behavior for testing.
+ */
+var Policy = {
+ now: () => new Date(),
+};
+
+var gGlobalEnvironment;
+function getGlobal() {
+ if (!gGlobalEnvironment) {
+ gGlobalEnvironment = new EnvironmentCache();
+ }
+ return gGlobalEnvironment;
+}
+
+this.TelemetryEnvironment = {
+ get currentEnvironment() {
+ return getGlobal().currentEnvironment;
+ },
+
+ onInitialized: function() {
+ return getGlobal().onInitialized();
+ },
+
+ delayedInit: function() {
+ return getGlobal().delayedInit();
+ },
+
+ registerChangeListener: function(name, listener) {
+ return getGlobal().registerChangeListener(name, listener);
+ },
+
+ unregisterChangeListener: function(name) {
+ return getGlobal().unregisterChangeListener(name);
+ },
+
+ shutdown: function() {
+ return getGlobal().shutdown();
+ },
+
+ // Policy to use when saving preferences. Exported for using them in tests.
+ RECORD_PREF_STATE: 1, // Don't record the preference value
+ RECORD_PREF_VALUE: 2, // We only record user-set prefs.
+
+ // Testing method
+ testWatchPreferences: function(prefMap) {
+ return getGlobal()._watchPreferences(prefMap);
+ },
+
+ /**
+ * Intended for use in tests only.
+ *
+ * In multiple tests we need a way to shut and re-start telemetry together
+ * with TelemetryEnvironment. This is problematic due to the fact that
+ * TelemetryEnvironment is a singleton. We, therefore, need this helper
+ * method to be able to re-set TelemetryEnvironment.
+ */
+ testReset: function() {
+ return getGlobal().reset();
+ },
+
+ /**
+ * Intended for use in tests only.
+ */
+ testCleanRestart: function() {
+ getGlobal().shutdown();
+ gGlobalEnvironment = null;
+ return getGlobal();
+ },
+};
+
+const RECORD_PREF_STATE = TelemetryEnvironment.RECORD_PREF_STATE;
+const RECORD_PREF_VALUE = TelemetryEnvironment.RECORD_PREF_VALUE;
+const DEFAULT_ENVIRONMENT_PREFS = new Map([
+ ["app.feedback.baseURL", {what: RECORD_PREF_VALUE}],
+ ["app.support.baseURL", {what: RECORD_PREF_VALUE}],
+ ["accessibility.browsewithcaret", {what: RECORD_PREF_VALUE}],
+ ["accessibility.force_disabled", {what: RECORD_PREF_VALUE}],
+ ["app.update.auto", {what: RECORD_PREF_VALUE}],
+ ["app.update.enabled", {what: RECORD_PREF_VALUE}],
+ ["app.update.interval", {what: RECORD_PREF_VALUE}],
+ ["app.update.service.enabled", {what: RECORD_PREF_VALUE}],
+ ["app.update.silent", {what: RECORD_PREF_VALUE}],
+ ["app.update.url", {what: RECORD_PREF_VALUE}],
+ ["browser.cache.disk.enable", {what: RECORD_PREF_VALUE}],
+ ["browser.cache.disk.capacity", {what: RECORD_PREF_VALUE}],
+ ["browser.cache.memory.enable", {what: RECORD_PREF_VALUE}],
+ ["browser.cache.offline.enable", {what: RECORD_PREF_VALUE}],
+ ["browser.formfill.enable", {what: RECORD_PREF_VALUE}],
+ ["browser.newtab.url", {what: RECORD_PREF_STATE}],
+ ["browser.newtabpage.enabled", {what: RECORD_PREF_VALUE}],
+ ["browser.newtabpage.enhanced", {what: RECORD_PREF_VALUE}],
+ ["browser.shell.checkDefaultBrowser", {what: RECORD_PREF_VALUE}],
+ ["browser.search.suggest.enabled", {what: RECORD_PREF_VALUE}],
+ ["browser.startup.homepage", {what: RECORD_PREF_STATE}],
+ ["browser.startup.page", {what: RECORD_PREF_VALUE}],
+ ["browser.tabs.animate", {what: RECORD_PREF_VALUE}],
+ ["browser.urlbar.suggest.searches", {what: RECORD_PREF_VALUE}],
+ ["browser.urlbar.userMadeSearchSuggestionsChoice", {what: RECORD_PREF_VALUE}],
+ // Record "Zoom Text Only" pref in Firefox 50 to 52 (Bug 979323).
+ ["browser.zoom.full", {what: RECORD_PREF_VALUE}],
+ ["devtools.chrome.enabled", {what: RECORD_PREF_VALUE}],
+ ["devtools.debugger.enabled", {what: RECORD_PREF_VALUE}],
+ ["devtools.debugger.remote-enabled", {what: RECORD_PREF_VALUE}],
+ ["dom.ipc.plugins.asyncInit.enabled", {what: RECORD_PREF_VALUE}],
+ ["dom.ipc.plugins.enabled", {what: RECORD_PREF_VALUE}],
+ ["dom.ipc.processCount", {what: RECORD_PREF_VALUE, requiresRestart: true}],
+ ["dom.max_script_run_time", {what: RECORD_PREF_VALUE}],
+ ["experiments.manifest.uri", {what: RECORD_PREF_VALUE}],
+ ["extensions.autoDisableScopes", {what: RECORD_PREF_VALUE}],
+ ["extensions.enabledScopes", {what: RECORD_PREF_VALUE}],
+ ["extensions.blocklist.enabled", {what: RECORD_PREF_VALUE}],
+ ["extensions.blocklist.url", {what: RECORD_PREF_VALUE}],
+ ["extensions.strictCompatibility", {what: RECORD_PREF_VALUE}],
+ ["extensions.update.enabled", {what: RECORD_PREF_VALUE}],
+ ["extensions.update.url", {what: RECORD_PREF_VALUE}],
+ ["extensions.update.background.url", {what: RECORD_PREF_VALUE}],
+ ["general.smoothScroll", {what: RECORD_PREF_VALUE}],
+ ["gfx.direct2d.disabled", {what: RECORD_PREF_VALUE}],
+ ["gfx.direct2d.force-enabled", {what: RECORD_PREF_VALUE}],
+ ["gfx.direct2d.use1_1", {what: RECORD_PREF_VALUE}],
+ ["layers.acceleration.disabled", {what: RECORD_PREF_VALUE}],
+ ["layers.acceleration.force-enabled", {what: RECORD_PREF_VALUE}],
+ ["layers.async-pan-zoom.enabled", {what: RECORD_PREF_VALUE}],
+ ["layers.async-video-oop.enabled", {what: RECORD_PREF_VALUE}],
+ ["layers.async-video.enabled", {what: RECORD_PREF_VALUE}],
+ ["layers.componentalpha.enabled", {what: RECORD_PREF_VALUE}],
+ ["layers.d3d11.disable-warp", {what: RECORD_PREF_VALUE}],
+ ["layers.d3d11.force-warp", {what: RECORD_PREF_VALUE}],
+ ["layers.offmainthreadcomposition.force-disabled", {what: RECORD_PREF_VALUE}],
+ ["layers.prefer-d3d9", {what: RECORD_PREF_VALUE}],
+ ["layers.prefer-opengl", {what: RECORD_PREF_VALUE}],
+ ["layout.css.devPixelsPerPx", {what: RECORD_PREF_VALUE}],
+ ["network.proxy.autoconfig_url", {what: RECORD_PREF_STATE}],
+ ["network.proxy.http", {what: RECORD_PREF_STATE}],
+ ["network.proxy.ssl", {what: RECORD_PREF_STATE}],
+ ["pdfjs.disabled", {what: RECORD_PREF_VALUE}],
+ ["places.history.enabled", {what: RECORD_PREF_VALUE}],
+ ["privacy.trackingprotection.enabled", {what: RECORD_PREF_VALUE}],
+ ["privacy.donottrackheader.enabled", {what: RECORD_PREF_VALUE}],
+ ["services.sync.serverURL", {what: RECORD_PREF_STATE}],
+ ["security.mixed_content.block_active_content", {what: RECORD_PREF_VALUE}],
+ ["security.mixed_content.block_display_content", {what: RECORD_PREF_VALUE}],
+ ["security.sandbox.content.level", {what: RECORD_PREF_VALUE}],
+ ["xpinstall.signatures.required", {what: RECORD_PREF_VALUE}],
+]);
+
+const LOGGER_NAME = "Toolkit.Telemetry";
+
+const PREF_BLOCKLIST_ENABLED = "extensions.blocklist.enabled";
+const PREF_DISTRIBUTION_ID = "distribution.id";
+const PREF_DISTRIBUTION_VERSION = "distribution.version";
+const PREF_DISTRIBUTOR = "app.distributor";
+const PREF_DISTRIBUTOR_CHANNEL = "app.distributor.channel";
+const PREF_HOTFIX_LASTVERSION = "extensions.hotfix.lastVersion";
+const PREF_APP_PARTNER_BRANCH = "app.partner.";
+const PREF_PARTNER_ID = "mozilla.partner.id";
+const PREF_UPDATE_ENABLED = "app.update.enabled";
+const PREF_UPDATE_AUTODOWNLOAD = "app.update.auto";
+const PREF_SEARCH_COHORT = "browser.search.cohort";
+const PREF_E10S_COHORT = "e10s.rollout.cohort";
+
+const COMPOSITOR_CREATED_TOPIC = "compositor:created";
+const DISTRIBUTION_CUSTOMIZATION_COMPLETE_TOPIC = "distribution-customization-complete";
+const EXPERIMENTS_CHANGED_TOPIC = "experiments-changed";
+const GFX_FEATURES_READY_TOPIC = "gfx-features-ready";
+const SEARCH_ENGINE_MODIFIED_TOPIC = "browser-search-engine-modified";
+const SEARCH_SERVICE_TOPIC = "browser-search-service";
+
+/**
+ * Enforces the parameter to a boolean value.
+ * @param aValue The input value.
+ * @return {Boolean|Object} If aValue is a boolean or a number, returns its truthfulness
+ * value. Otherwise, return null.
+ */
+function enforceBoolean(aValue) {
+ if (typeof(aValue) !== "number" && typeof(aValue) !== "boolean") {
+ return null;
+ }
+ return (new Boolean(aValue)).valueOf();
+}
+
+/**
+ * Get the current browser.
+ * @return a string with the locale or null on failure.
+ */
+function getBrowserLocale() {
+ try {
+ return Cc["@mozilla.org/chrome/chrome-registry;1"].
+ getService(Ci.nsIXULChromeRegistry).
+ getSelectedLocale('global');
+ } catch (e) {
+ return null;
+ }
+}
+
+/**
+ * Get the current OS locale.
+ * @return a string with the OS locale or null on failure.
+ */
+function getSystemLocale() {
+ try {
+ return Services.locale.getLocaleComponentForUserAgent();
+ } catch (e) {
+ return null;
+ }
+}
+
+/**
+ * Asynchronously get a list of addons of the specified type from the AddonManager.
+ * @param aTypes An array containing the types of addons to request.
+ * @return Promise<Array> resolved when AddonManager has finished, returning an
+ * array of addons.
+ */
+function promiseGetAddonsByTypes(aTypes) {
+ return new Promise((resolve) =>
+ AddonManager.getAddonsByTypes(aTypes, (addons) => resolve(addons)));
+}
+
+/**
+ * Safely get a sysinfo property and return its value. If the property is not
+ * available, return aDefault.
+ *
+ * @param aPropertyName the property name to get.
+ * @param aDefault the value to return if aPropertyName is not available.
+ * @return The property value, if available, or aDefault.
+ */
+function getSysinfoProperty(aPropertyName, aDefault) {
+ try {
+ // |getProperty| may throw if |aPropertyName| does not exist.
+ return Services.sysinfo.getProperty(aPropertyName);
+ } catch (e) {}
+
+ return aDefault;
+}
+
+/**
+ * Safely get a gfxInfo field and return its value. If the field is not available, return
+ * aDefault.
+ *
+ * @param aPropertyName the property name to get.
+ * @param aDefault the value to return if aPropertyName is not available.
+ * @return The property value, if available, or aDefault.
+ */
+function getGfxField(aPropertyName, aDefault) {
+ let gfxInfo = Cc["@mozilla.org/gfx/info;1"].getService(Ci.nsIGfxInfo);
+
+ try {
+ // Accessing the field may throw if |aPropertyName| does not exist.
+ let gfxProp = gfxInfo[aPropertyName];
+ if (gfxProp !== undefined && gfxProp !== "") {
+ return gfxProp;
+ }
+ } catch (e) {}
+
+ return aDefault;
+}
+
+/**
+ * Returns a substring of the input string.
+ *
+ * @param {String} aString The input string.
+ * @param {Integer} aMaxLength The maximum length of the returned substring. If this is
+ * greater than the length of the input string, we return the whole input string.
+ * @return {String} The substring or null if the input string is null.
+ */
+function limitStringToLength(aString, aMaxLength) {
+ if (typeof(aString) !== "string") {
+ return null;
+ }
+ return aString.substring(0, aMaxLength);
+}
+
+/**
+ * Force a value to be a string.
+ * Only if the value is null, null is returned instead.
+ */
+function forceToStringOrNull(aValue) {
+ if (aValue === null) {
+ return null;
+ }
+
+ return String(aValue);
+}
+
+/**
+ * Get the information about a graphic adapter.
+ *
+ * @param aSuffix A suffix to add to the properties names.
+ * @return An object containing the adapter properties.
+ */
+function getGfxAdapter(aSuffix = "") {
+ // Note that gfxInfo, and so getGfxField, might return "Unknown" for the RAM on failures,
+ // not null.
+ let memoryMB = parseInt(getGfxField("adapterRAM" + aSuffix, null), 10);
+ if (Number.isNaN(memoryMB)) {
+ memoryMB = null;
+ }
+
+ return {
+ description: getGfxField("adapterDescription" + aSuffix, null),
+ vendorID: getGfxField("adapterVendorID" + aSuffix, null),
+ deviceID: getGfxField("adapterDeviceID" + aSuffix, null),
+ subsysID: getGfxField("adapterSubsysID" + aSuffix, null),
+ RAM: memoryMB,
+ driver: getGfxField("adapterDriver" + aSuffix, null),
+ driverVersion: getGfxField("adapterDriverVersion" + aSuffix, null),
+ driverDate: getGfxField("adapterDriverDate" + aSuffix, null),
+ };
+}
+
+/**
+ * Gets the service pack and build information on Windows platforms. The initial version
+ * was copied from nsUpdateService.js.
+ *
+ * @return An object containing the service pack major and minor versions, along with the
+ * build number.
+ */
+function getWindowsVersionInfo() {
+ const UNKNOWN_VERSION_INFO = {servicePackMajor: null, servicePackMinor: null, buildNumber: null};
+
+ if (AppConstants.platform !== "win") {
+ return UNKNOWN_VERSION_INFO;
+ }
+
+ const BYTE = ctypes.uint8_t;
+ const WORD = ctypes.uint16_t;
+ const DWORD = ctypes.uint32_t;
+ const WCHAR = ctypes.char16_t;
+ const BOOL = ctypes.int;
+
+ // This structure is described at:
+ // http://msdn.microsoft.com/en-us/library/ms724833%28v=vs.85%29.aspx
+ const SZCSDVERSIONLENGTH = 128;
+ const OSVERSIONINFOEXW = new ctypes.StructType('OSVERSIONINFOEXW',
+ [
+ {dwOSVersionInfoSize: DWORD},
+ {dwMajorVersion: DWORD},
+ {dwMinorVersion: DWORD},
+ {dwBuildNumber: DWORD},
+ {dwPlatformId: DWORD},
+ {szCSDVersion: ctypes.ArrayType(WCHAR, SZCSDVERSIONLENGTH)},
+ {wServicePackMajor: WORD},
+ {wServicePackMinor: WORD},
+ {wSuiteMask: WORD},
+ {wProductType: BYTE},
+ {wReserved: BYTE}
+ ]);
+
+ let kernel32 = ctypes.open("kernel32");
+ try {
+ let GetVersionEx = kernel32.declare("GetVersionExW",
+ ctypes.default_abi,
+ BOOL,
+ OSVERSIONINFOEXW.ptr);
+ let winVer = OSVERSIONINFOEXW();
+ winVer.dwOSVersionInfoSize = OSVERSIONINFOEXW.size;
+
+ if (0 === GetVersionEx(winVer.address())) {
+ throw ("Failure in GetVersionEx (returned 0)");
+ }
+
+ return {
+ servicePackMajor: winVer.wServicePackMajor,
+ servicePackMinor: winVer.wServicePackMinor,
+ buildNumber: winVer.dwBuildNumber,
+ };
+ } catch (e) {
+ return UNKNOWN_VERSION_INFO;
+ } finally {
+ kernel32.close();
+ }
+}
+
+/**
+ * Encapsulates the asynchronous magic interfacing with the addon manager. The builder
+ * is owned by a parent environment object and is an addon listener.
+ */
+function EnvironmentAddonBuilder(environment) {
+ this._environment = environment;
+
+ // The pending task blocks addon manager shutdown. It can either be the initial load
+ // or a change load.
+ this._pendingTask = null;
+
+ // Set to true once initial load is complete and we're watching for changes.
+ this._loaded = false;
+}
+EnvironmentAddonBuilder.prototype = {
+ /**
+ * Get the initial set of addons.
+ * @returns Promise<void> when the initial load is complete.
+ */
+ init: function() {
+ // Some tests don't initialize the addon manager. This accounts for the
+ // unfortunate reality of life.
+ try {
+ AddonManager.shutdown.addBlocker("EnvironmentAddonBuilder",
+ () => this._shutdownBlocker());
+ } catch (err) {
+ return Promise.reject(err);
+ }
+
+ this._pendingTask = this._updateAddons().then(
+ () => { this._pendingTask = null; },
+ (err) => {
+ this._environment._log.error("init - Exception in _updateAddons", err);
+ this._pendingTask = null;
+ }
+ );
+
+ return this._pendingTask;
+ },
+
+ /**
+ * Register an addon listener and watch for changes.
+ */
+ watchForChanges: function() {
+ this._loaded = true;
+ AddonManager.addAddonListener(this);
+ Services.obs.addObserver(this, EXPERIMENTS_CHANGED_TOPIC, false);
+ },
+
+ // AddonListener
+ onEnabled: function() {
+ this._onAddonChange();
+ },
+ onDisabled: function() {
+ this._onAddonChange();
+ },
+ onInstalled: function() {
+ this._onAddonChange();
+ },
+ onUninstalling: function() {
+ this._onAddonChange();
+ },
+
+ _onAddonChange: function() {
+ this._environment._log.trace("_onAddonChange");
+ this._checkForChanges("addons-changed");
+ },
+
+ // nsIObserver
+ observe: function (aSubject, aTopic, aData) {
+ this._environment._log.trace("observe - Topic " + aTopic);
+ this._checkForChanges("experiment-changed");
+ },
+
+ _checkForChanges: function(changeReason) {
+ if (this._pendingTask) {
+ this._environment._log.trace("_checkForChanges - task already pending, dropping change with reason " + changeReason);
+ return;
+ }
+
+ this._pendingTask = this._updateAddons().then(
+ (result) => {
+ this._pendingTask = null;
+ if (result.changed) {
+ this._environment._onEnvironmentChange(changeReason, result.oldEnvironment);
+ }
+ },
+ (err) => {
+ this._pendingTask = null;
+ this._environment._log.error("_checkForChanges: Error collecting addons", err);
+ });
+ },
+
+ _shutdownBlocker: function() {
+ if (this._loaded) {
+ AddonManager.removeAddonListener(this);
+ Services.obs.removeObserver(this, EXPERIMENTS_CHANGED_TOPIC);
+ }
+ return this._pendingTask;
+ },
+
+ /**
+ * Collect the addon data for the environment.
+ *
+ * This should only be called from _pendingTask; otherwise we risk
+ * running this during addon manager shutdown.
+ *
+ * @returns Promise<Object> This returns a Promise resolved with a status object with the following members:
+ * changed - Whether the environment changed.
+ * oldEnvironment - Only set if a change occured, contains the environment data before the change.
+ */
+ _updateAddons: Task.async(function* () {
+ this._environment._log.trace("_updateAddons");
+ let personaId = null;
+ if (AppConstants.platform !== "gonk") {
+ let theme = LightweightThemeManager.currentTheme;
+ if (theme) {
+ personaId = theme.id;
+ }
+ }
+
+ let addons = {
+ activeAddons: yield this._getActiveAddons(),
+ theme: yield this._getActiveTheme(),
+ activePlugins: this._getActivePlugins(),
+ activeGMPlugins: yield this._getActiveGMPlugins(),
+ activeExperiment: this._getActiveExperiment(),
+ persona: personaId,
+ };
+
+ let result = {
+ changed: !this._environment._currentEnvironment.addons ||
+ !ObjectUtils.deepEqual(addons, this._environment._currentEnvironment.addons),
+ };
+
+ if (result.changed) {
+ this._environment._log.trace("_updateAddons: addons differ");
+ result.oldEnvironment = Cu.cloneInto(this._environment._currentEnvironment, myScope);
+ this._environment._currentEnvironment.addons = addons;
+ }
+
+ return result;
+ }),
+
+ /**
+ * Get the addon data in object form.
+ * @return Promise<object> containing the addon data.
+ */
+ _getActiveAddons: Task.async(function* () {
+ // Request addons, asynchronously.
+ let allAddons = yield promiseGetAddonsByTypes(["extension", "service"]);
+
+ let activeAddons = {};
+ for (let addon of allAddons) {
+ // Skip addons which are not active.
+ if (!addon.isActive) {
+ continue;
+ }
+
+ // Weird addon data in the wild can lead to exceptions while collecting
+ // the data.
+ try {
+ // Make sure to have valid dates.
+ let installDate = new Date(Math.max(0, addon.installDate));
+ let updateDate = new Date(Math.max(0, addon.updateDate));
+
+ activeAddons[addon.id] = {
+ blocklisted: (addon.blocklistState !== Ci.nsIBlocklistService.STATE_NOT_BLOCKED),
+ description: limitStringToLength(addon.description, MAX_ADDON_STRING_LENGTH),
+ name: limitStringToLength(addon.name, MAX_ADDON_STRING_LENGTH),
+ userDisabled: enforceBoolean(addon.userDisabled),
+ appDisabled: addon.appDisabled,
+ version: limitStringToLength(addon.version, MAX_ADDON_STRING_LENGTH),
+ scope: addon.scope,
+ type: addon.type,
+ foreignInstall: enforceBoolean(addon.foreignInstall),
+ hasBinaryComponents: addon.hasBinaryComponents,
+ installDay: Utils.millisecondsToDays(installDate.getTime()),
+ updateDay: Utils.millisecondsToDays(updateDate.getTime()),
+ signedState: addon.signedState,
+ isSystem: addon.isSystem,
+ };
+
+ if (addon.signedState !== undefined)
+ activeAddons[addon.id].signedState = addon.signedState;
+
+ } catch (ex) {
+ this._environment._log.error("_getActiveAddons - An addon was discarded due to an error", ex);
+ continue;
+ }
+ }
+
+ return activeAddons;
+ }),
+
+ /**
+ * Get the currently active theme data in object form.
+ * @return Promise<object> containing the active theme data.
+ */
+ _getActiveTheme: Task.async(function* () {
+ // Request themes, asynchronously.
+ let themes = yield promiseGetAddonsByTypes(["theme"]);
+
+ let activeTheme = {};
+ // We only store information about the active theme.
+ let theme = themes.find(theme => theme.isActive);
+ if (theme) {
+ // Make sure to have valid dates.
+ let installDate = new Date(Math.max(0, theme.installDate));
+ let updateDate = new Date(Math.max(0, theme.updateDate));
+
+ activeTheme = {
+ id: theme.id,
+ blocklisted: (theme.blocklistState !== Ci.nsIBlocklistService.STATE_NOT_BLOCKED),
+ description: limitStringToLength(theme.description, MAX_ADDON_STRING_LENGTH),
+ name: limitStringToLength(theme.name, MAX_ADDON_STRING_LENGTH),
+ userDisabled: enforceBoolean(theme.userDisabled),
+ appDisabled: theme.appDisabled,
+ version: limitStringToLength(theme.version, MAX_ADDON_STRING_LENGTH),
+ scope: theme.scope,
+ foreignInstall: enforceBoolean(theme.foreignInstall),
+ hasBinaryComponents: theme.hasBinaryComponents,
+ installDay: Utils.millisecondsToDays(installDate.getTime()),
+ updateDay: Utils.millisecondsToDays(updateDate.getTime()),
+ };
+ }
+
+ return activeTheme;
+ }),
+
+ /**
+ * Get the plugins data in object form.
+ * @return Object containing the plugins data.
+ */
+ _getActivePlugins: function () {
+ let pluginTags =
+ Cc["@mozilla.org/plugin/host;1"].getService(Ci.nsIPluginHost).getPluginTags({});
+
+ let activePlugins = [];
+ for (let tag of pluginTags) {
+ // Skip plugins which are not active.
+ if (tag.disabled) {
+ continue;
+ }
+
+ try {
+ // Make sure to have a valid date.
+ let updateDate = new Date(Math.max(0, tag.lastModifiedTime));
+
+ activePlugins.push({
+ name: limitStringToLength(tag.name, MAX_ADDON_STRING_LENGTH),
+ version: limitStringToLength(tag.version, MAX_ADDON_STRING_LENGTH),
+ description: limitStringToLength(tag.description, MAX_ADDON_STRING_LENGTH),
+ blocklisted: tag.blocklisted,
+ disabled: tag.disabled,
+ clicktoplay: tag.clicktoplay,
+ mimeTypes: tag.getMimeTypes({}),
+ updateDay: Utils.millisecondsToDays(updateDate.getTime()),
+ });
+ } catch (ex) {
+ this._environment._log.error("_getActivePlugins - A plugin was discarded due to an error", ex);
+ continue;
+ }
+ }
+
+ return activePlugins;
+ },
+
+ /**
+ * Get the GMPlugins data in object form.
+ * @return Object containing the GMPlugins data.
+ *
+ * This should only be called from _pendingTask; otherwise we risk
+ * running this during addon manager shutdown.
+ */
+ _getActiveGMPlugins: Task.async(function* () {
+ // Request plugins, asynchronously.
+ let allPlugins = yield promiseGetAddonsByTypes(["plugin"]);
+
+ let activeGMPlugins = {};
+ for (let plugin of allPlugins) {
+ // Only get info for active GMplugins.
+ if (!plugin.isGMPlugin || !plugin.isActive) {
+ continue;
+ }
+
+ try {
+ activeGMPlugins[plugin.id] = {
+ version: plugin.version,
+ userDisabled: enforceBoolean(plugin.userDisabled),
+ applyBackgroundUpdates: plugin.applyBackgroundUpdates,
+ };
+ } catch (ex) {
+ this._environment._log.error("_getActiveGMPlugins - A GMPlugin was discarded due to an error", ex);
+ continue;
+ }
+ }
+
+ return activeGMPlugins;
+ }),
+
+ /**
+ * Get the active experiment data in object form.
+ * @return Object containing the active experiment data.
+ */
+ _getActiveExperiment: function () {
+ let experimentInfo = {};
+ try {
+ let scope = {};
+ Cu.import("resource:///modules/experiments/Experiments.jsm", scope);
+ let experiments = scope.Experiments.instance();
+ let activeExperiment = experiments.getActiveExperimentID();
+ if (activeExperiment) {
+ experimentInfo.id = activeExperiment;
+ experimentInfo.branch = experiments.getActiveExperimentBranch();
+ }
+ } catch (e) {
+ // If this is not Firefox, the import will fail.
+ }
+
+ return experimentInfo;
+ },
+};
+
+function EnvironmentCache() {
+ this._log = Log.repository.getLoggerWithMessagePrefix(
+ LOGGER_NAME, "TelemetryEnvironment::");
+ this._log.trace("constructor");
+
+ this._shutdown = false;
+ this._delayedInitFinished = false;
+
+ // A map of listeners that will be called on environment changes.
+ this._changeListeners = new Map();
+
+ // A map of watched preferences which trigger an Environment change when
+ // modified. Every entry contains a recording policy (RECORD_PREF_*).
+ this._watchedPrefs = DEFAULT_ENVIRONMENT_PREFS;
+
+ this._currentEnvironment = {
+ build: this._getBuild(),
+ partner: this._getPartner(),
+ system: this._getSystem(),
+ };
+
+ this._updateSettings();
+ // Fill in the default search engine, if the search provider is already initialized.
+ this._updateSearchEngine();
+ this._addObservers();
+
+ // Build the remaining asynchronous parts of the environment. Don't register change listeners
+ // until the initial environment has been built.
+
+ let p = [];
+ if (AppConstants.platform === "gonk") {
+ this._addonBuilder = {
+ watchForChanges: function() {}
+ };
+ } else {
+ this._addonBuilder = new EnvironmentAddonBuilder(this);
+ p = [ this._addonBuilder.init() ];
+ }
+
+ this._currentEnvironment.profile = {};
+ p.push(this._updateProfile());
+ if (AppConstants.MOZ_BUILD_APP == "browser") {
+ p.push(this._updateAttribution());
+ }
+
+ let setup = () => {
+ this._initTask = null;
+ this._startWatchingPrefs();
+ this._addonBuilder.watchForChanges();
+ this._updateGraphicsFeatures();
+ return this.currentEnvironment;
+ };
+
+ this._initTask = Promise.all(p)
+ .then(
+ () => setup(),
+ (err) => {
+ // log errors but eat them for consumers
+ this._log.error("EnvironmentCache - error while initializing", err);
+ return setup();
+ });
+}
+EnvironmentCache.prototype = {
+ /**
+ * The current environment data. The returned data is cloned to avoid
+ * unexpected sharing or mutation.
+ * @returns object
+ */
+ get currentEnvironment() {
+ return Cu.cloneInto(this._currentEnvironment, myScope);
+ },
+
+ /**
+ * Wait for the current enviroment to be fully initialized.
+ * @returns Promise<object>
+ */
+ onInitialized: function() {
+ if (this._initTask) {
+ return this._initTask;
+ }
+ return Promise.resolve(this.currentEnvironment);
+ },
+
+ /**
+ * This gets called when the delayed init completes.
+ */
+ delayedInit: function() {
+ this._delayedInitFinished = true;
+ },
+
+ /**
+ * Register a listener for environment changes.
+ * @param name The name of the listener. If a new listener is registered
+ * with the same name, the old listener will be replaced.
+ * @param listener function(reason, oldEnvironment) - Will receive a reason for
+ the change and the environment data before the change.
+ */
+ registerChangeListener: function (name, listener) {
+ this._log.trace("registerChangeListener for " + name);
+ if (this._shutdown) {
+ this._log.warn("registerChangeListener - already shutdown");
+ return;
+ }
+ this._changeListeners.set(name, listener);
+ },
+
+ /**
+ * Unregister from listening to environment changes.
+ * It's fine to call this on an unitialized TelemetryEnvironment.
+ * @param name The name of the listener to remove.
+ */
+ unregisterChangeListener: function (name) {
+ this._log.trace("unregisterChangeListener for " + name);
+ if (this._shutdown) {
+ this._log.warn("registerChangeListener - already shutdown");
+ return;
+ }
+ this._changeListeners.delete(name);
+ },
+
+ shutdown: function() {
+ this._log.trace("shutdown");
+ this._shutdown = true;
+ },
+
+ /**
+ * Only used in tests, set the preferences to watch.
+ * @param aPreferences A map of preferences names and their recording policy.
+ */
+ _watchPreferences: function (aPreferences) {
+ this._stopWatchingPrefs();
+ this._watchedPrefs = aPreferences;
+ this._updateSettings();
+ this._startWatchingPrefs();
+ },
+
+ /**
+ * Get an object containing the values for the watched preferences. Depending on the
+ * policy, the value for a preference or whether it was changed by user is reported.
+ *
+ * @return An object containing the preferences values.
+ */
+ _getPrefData: function () {
+ let prefData = {};
+ for (let [pref, policy] of this._watchedPrefs.entries()) {
+ // Only record preferences if they are non-default
+ if (!Preferences.isSet(pref)) {
+ continue;
+ }
+
+ // Check the policy for the preference and decide if we need to store its value
+ // or whether it changed from the default value.
+ let prefValue = undefined;
+ if (policy.what == TelemetryEnvironment.RECORD_PREF_STATE) {
+ prefValue = "<user-set>";
+ } else {
+ prefValue = Preferences.get(pref, null);
+ }
+ prefData[pref] = prefValue;
+ }
+ return prefData;
+ },
+
+ /**
+ * Start watching the preferences.
+ */
+ _startWatchingPrefs: function () {
+ this._log.trace("_startWatchingPrefs - " + this._watchedPrefs);
+
+ for (let [pref, options] of this._watchedPrefs) {
+ if (!("requiresRestart" in options) || !options.requiresRestart) {
+ Preferences.observe(pref, this._onPrefChanged, this);
+ }
+ }
+ },
+
+ _onPrefChanged: function() {
+ this._log.trace("_onPrefChanged");
+ let oldEnvironment = Cu.cloneInto(this._currentEnvironment, myScope);
+ this._updateSettings();
+ this._onEnvironmentChange("pref-changed", oldEnvironment);
+ },
+
+ /**
+ * Do not receive any more change notifications for the preferences.
+ */
+ _stopWatchingPrefs: function () {
+ this._log.trace("_stopWatchingPrefs");
+
+ for (let [pref, options] of this._watchedPrefs) {
+ if (!("requiresRestart" in options) || !options.requiresRestart) {
+ Preferences.ignore(pref, this._onPrefChanged, this);
+ }
+ }
+ },
+
+ _addObservers: function () {
+ // Watch the search engine change and service topics.
+ Services.obs.addObserver(this, COMPOSITOR_CREATED_TOPIC, false);
+ Services.obs.addObserver(this, DISTRIBUTION_CUSTOMIZATION_COMPLETE_TOPIC, false);
+ Services.obs.addObserver(this, GFX_FEATURES_READY_TOPIC, false);
+ Services.obs.addObserver(this, SEARCH_ENGINE_MODIFIED_TOPIC, false);
+ Services.obs.addObserver(this, SEARCH_SERVICE_TOPIC, false);
+ },
+
+ _removeObservers: function () {
+ Services.obs.removeObserver(this, COMPOSITOR_CREATED_TOPIC);
+ try {
+ Services.obs.removeObserver(this, DISTRIBUTION_CUSTOMIZATION_COMPLETE_TOPIC);
+ } catch (ex) {}
+ Services.obs.removeObserver(this, GFX_FEATURES_READY_TOPIC);
+ Services.obs.removeObserver(this, SEARCH_ENGINE_MODIFIED_TOPIC);
+ Services.obs.removeObserver(this, SEARCH_SERVICE_TOPIC);
+ },
+
+ observe: function (aSubject, aTopic, aData) {
+ this._log.trace("observe - aTopic: " + aTopic + ", aData: " + aData);
+ switch (aTopic) {
+ case SEARCH_ENGINE_MODIFIED_TOPIC:
+ if (aData != "engine-current") {
+ return;
+ }
+ // Record the new default search choice and send the change notification.
+ this._onSearchEngineChange();
+ break;
+ case SEARCH_SERVICE_TOPIC:
+ if (aData != "init-complete") {
+ return;
+ }
+ // Now that the search engine init is complete, record the default search choice.
+ this._updateSearchEngine();
+ break;
+ case GFX_FEATURES_READY_TOPIC:
+ case COMPOSITOR_CREATED_TOPIC:
+ // Full graphics information is not available until we have created at
+ // least one off-main-thread-composited window. Thus we wait for the
+ // first compositor to be created and then query nsIGfxInfo again.
+ this._updateGraphicsFeatures();
+ break;
+ case DISTRIBUTION_CUSTOMIZATION_COMPLETE_TOPIC:
+ // Distribution customizations are applied after final-ui-startup. query
+ // partner prefs again when they are ready.
+ this._updatePartner();
+ Services.obs.removeObserver(this, aTopic);
+ break;
+ }
+ },
+
+ /**
+ * Get the default search engine.
+ * @return {String} Returns the search engine identifier, "NONE" if no default search
+ * engine is defined or "UNDEFINED" if no engine identifier or name can be found.
+ */
+ _getDefaultSearchEngine: function () {
+ let engine;
+ try {
+ engine = Services.search.defaultEngine;
+ } catch (e) {}
+
+ let name;
+ if (!engine) {
+ name = "NONE";
+ } else if (engine.identifier) {
+ name = engine.identifier;
+ } else if (engine.name) {
+ name = "other-" + engine.name;
+ } else {
+ name = "UNDEFINED";
+ }
+
+ return name;
+ },
+
+ /**
+ * Update the default search engine value.
+ */
+ _updateSearchEngine: function () {
+ if (!Services.search) {
+ // Just ignore cases where the search service is not implemented.
+ return;
+ }
+
+ this._log.trace("_updateSearchEngine - isInitialized: " + Services.search.isInitialized);
+ if (!Services.search.isInitialized) {
+ return;
+ }
+
+ // Make sure we have a settings section.
+ this._currentEnvironment.settings = this._currentEnvironment.settings || {};
+ // Update the search engine entry in the current environment.
+ this._currentEnvironment.settings.defaultSearchEngine = this._getDefaultSearchEngine();
+ this._currentEnvironment.settings.defaultSearchEngineData =
+ Services.search.getDefaultEngineInfo();
+
+ // Record the cohort identifier used for search defaults A/B testing.
+ if (Services.prefs.prefHasUserValue(PREF_SEARCH_COHORT))
+ this._currentEnvironment.settings.searchCohort = Services.prefs.getCharPref(PREF_SEARCH_COHORT);
+ },
+
+ /**
+ * Update the default search engine value and trigger the environment change.
+ */
+ _onSearchEngineChange: function () {
+ this._log.trace("_onSearchEngineChange");
+
+ // Finally trigger the environment change notification.
+ let oldEnvironment = Cu.cloneInto(this._currentEnvironment, myScope);
+ this._updateSearchEngine();
+ this._onEnvironmentChange("search-engine-changed", oldEnvironment);
+ },
+
+ /**
+ * Update the graphics features object.
+ */
+ _updateGraphicsFeatures: function () {
+ let gfxData = this._currentEnvironment.system.gfx;
+ try {
+ let gfxInfo = Cc["@mozilla.org/gfx/info;1"].getService(Ci.nsIGfxInfo);
+ gfxData.features = gfxInfo.getFeatures();
+ } catch (e) {
+ this._log.error("nsIGfxInfo.getFeatures() caught error", e);
+ }
+ },
+
+ /**
+ * Update the partner prefs.
+ */
+ _updatePartner: function() {
+ this._currentEnvironment.partner = this._getPartner();
+ },
+
+ /**
+ * Get the build data in object form.
+ * @return Object containing the build data.
+ */
+ _getBuild: function () {
+ let buildData = {
+ applicationId: Services.appinfo.ID || null,
+ applicationName: Services.appinfo.name || null,
+ architecture: Services.sysinfo.get("arch"),
+ buildId: Services.appinfo.appBuildID || null,
+ version: Services.appinfo.version || null,
+ vendor: Services.appinfo.vendor || null,
+ platformVersion: Services.appinfo.platformVersion || null,
+ xpcomAbi: Services.appinfo.XPCOMABI,
+ hotfixVersion: Preferences.get(PREF_HOTFIX_LASTVERSION, null),
+ };
+
+ // Add |architecturesInBinary| only for Mac Universal builds.
+ if ("@mozilla.org/xpcom/mac-utils;1" in Cc) {
+ let macUtils = Cc["@mozilla.org/xpcom/mac-utils;1"].getService(Ci.nsIMacUtils);
+ if (macUtils && macUtils.isUniversalBinary) {
+ buildData.architecturesInBinary = macUtils.architecturesInBinary;
+ }
+ }
+
+ return buildData;
+ },
+
+ /**
+ * Determine if we're the default browser.
+ * @returns null on error, true if we are the default browser, or false otherwise.
+ */
+ _isDefaultBrowser: function () {
+ if (AppConstants.platform === "gonk") {
+ return true;
+ }
+
+ if (!("@mozilla.org/browser/shell-service;1" in Cc)) {
+ this._log.info("_isDefaultBrowser - Could not obtain browser shell service");
+ return null;
+ }
+
+ let shellService;
+ try {
+ let scope = {};
+ Cu.import("resource:///modules/ShellService.jsm", scope);
+ shellService = scope.ShellService;
+ } catch (ex) {
+ this._log.error("_isDefaultBrowser - Could not obtain shell service JSM");
+ }
+
+ if (!shellService) {
+ try {
+ shellService = Cc["@mozilla.org/browser/shell-service;1"]
+ .getService(Ci.nsIShellService);
+ } catch (ex) {
+ this._log.error("_isDefaultBrowser - Could not obtain shell service", ex);
+ return null;
+ }
+ }
+
+ try {
+ // This uses the same set of flags used by the pref pane.
+ return shellService.isDefaultBrowser(false, true) ? true : false;
+ } catch (ex) {
+ this._log.error("_isDefaultBrowser - Could not determine if default browser", ex);
+ return null;
+ }
+ },
+
+ /**
+ * Update the cached settings data.
+ */
+ _updateSettings: function () {
+ let updateChannel = null;
+ try {
+ updateChannel = UpdateUtils.getUpdateChannel(false);
+ } catch (e) {}
+
+ this._currentEnvironment.settings = {
+ blocklistEnabled: Preferences.get(PREF_BLOCKLIST_ENABLED, true),
+ e10sEnabled: Services.appinfo.browserTabsRemoteAutostart,
+ e10sCohort: Preferences.get(PREF_E10S_COHORT, "unknown"),
+ telemetryEnabled: Utils.isTelemetryEnabled,
+ locale: getBrowserLocale(),
+ update: {
+ channel: updateChannel,
+ enabled: Preferences.get(PREF_UPDATE_ENABLED, true),
+ autoDownload: Preferences.get(PREF_UPDATE_AUTODOWNLOAD, true),
+ },
+ userPrefs: this._getPrefData(),
+ };
+
+ if (AppConstants.platform !== "gonk") {
+ this._currentEnvironment.settings.addonCompatibilityCheckEnabled =
+ AddonManager.checkCompatibility;
+ }
+
+ if (AppConstants.platform !== "android") {
+ this._currentEnvironment.settings.isDefaultBrowser =
+ this._isDefaultBrowser();
+ }
+
+ this._updateSearchEngine();
+ },
+
+ /**
+ * Update the cached profile data.
+ * @returns Promise<> resolved when the I/O is complete.
+ */
+ _updateProfile: Task.async(function* () {
+ const logger = Log.repository.getLoggerWithMessagePrefix(LOGGER_NAME, "ProfileAge - ");
+ let profileAccessor = new ProfileAge(null, logger);
+
+ let creationDate = yield profileAccessor.created;
+ let resetDate = yield profileAccessor.reset;
+
+ this._currentEnvironment.profile.creationDate =
+ Utils.millisecondsToDays(creationDate);
+ if (resetDate) {
+ this._currentEnvironment.profile.resetDate =
+ Utils.millisecondsToDays(resetDate);
+ }
+ }),
+
+ /**
+ * Update the cached attribution data object.
+ * @returns Promise<> resolved when the I/O is complete.
+ */
+ _updateAttribution: Task.async(function* () {
+ let data = yield AttributionCode.getAttrDataAsync();
+ if (Object.keys(data).length > 0) {
+ this._currentEnvironment.settings.attribution = {};
+ for (let key in data) {
+ this._currentEnvironment.settings.attribution[key] =
+ limitStringToLength(data[key], MAX_ATTRIBUTION_STRING_LENGTH);
+ }
+ }
+ }),
+
+ /**
+ * Get the partner data in object form.
+ * @return Object containing the partner data.
+ */
+ _getPartner: function () {
+ let partnerData = {
+ distributionId: Preferences.get(PREF_DISTRIBUTION_ID, null),
+ distributionVersion: Preferences.get(PREF_DISTRIBUTION_VERSION, null),
+ partnerId: Preferences.get(PREF_PARTNER_ID, null),
+ distributor: Preferences.get(PREF_DISTRIBUTOR, null),
+ distributorChannel: Preferences.get(PREF_DISTRIBUTOR_CHANNEL, null),
+ };
+
+ // Get the PREF_APP_PARTNER_BRANCH branch and append its children to partner data.
+ let partnerBranch = Services.prefs.getBranch(PREF_APP_PARTNER_BRANCH);
+ partnerData.partnerNames = partnerBranch.getChildList("");
+
+ return partnerData;
+ },
+
+ /**
+ * Get the CPU information.
+ * @return Object containing the CPU information data.
+ */
+ _getCpuData: function () {
+ let cpuData = {
+ count: getSysinfoProperty("cpucount", null),
+ cores: getSysinfoProperty("cpucores", null),
+ vendor: getSysinfoProperty("cpuvendor", null),
+ family: getSysinfoProperty("cpufamily", null),
+ model: getSysinfoProperty("cpumodel", null),
+ stepping: getSysinfoProperty("cpustepping", null),
+ l2cacheKB: getSysinfoProperty("cpucachel2", null),
+ l3cacheKB: getSysinfoProperty("cpucachel3", null),
+ speedMHz: getSysinfoProperty("cpuspeed", null),
+ };
+
+ const CPU_EXTENSIONS = ["hasMMX", "hasSSE", "hasSSE2", "hasSSE3", "hasSSSE3",
+ "hasSSE4A", "hasSSE4_1", "hasSSE4_2", "hasAVX", "hasAVX2",
+ "hasEDSP", "hasARMv6", "hasARMv7", "hasNEON"];
+
+ // Enumerate the available CPU extensions.
+ let availableExts = [];
+ for (let ext of CPU_EXTENSIONS) {
+ if (getSysinfoProperty(ext, false)) {
+ availableExts.push(ext);
+ }
+ }
+
+ cpuData.extensions = availableExts;
+
+ return cpuData;
+ },
+
+ /**
+ * Get the device information, if we are on a portable device.
+ * @return Object containing the device information data, or null if
+ * not a portable device.
+ */
+ _getDeviceData: function () {
+ if (!["gonk", "android"].includes(AppConstants.platform)) {
+ return null;
+ }
+
+ return {
+ model: getSysinfoProperty("device", null),
+ manufacturer: getSysinfoProperty("manufacturer", null),
+ hardware: getSysinfoProperty("hardware", null),
+ isTablet: getSysinfoProperty("tablet", null),
+ };
+ },
+
+ /**
+ * Get the OS information.
+ * @return Object containing the OS data.
+ */
+ _getOSData: function () {
+ let data = {
+ name: forceToStringOrNull(getSysinfoProperty("name", null)),
+ version: forceToStringOrNull(getSysinfoProperty("version", null)),
+ locale: forceToStringOrNull(getSystemLocale()),
+ };
+
+ if (["gonk", "android"].includes(AppConstants.platform)) {
+ data.kernelVersion = forceToStringOrNull(getSysinfoProperty("kernel_version", null));
+ } else if (AppConstants.platform === "win") {
+ // The path to the "UBR" key, queried to get additional version details on Windows.
+ const WINDOWS_UBR_KEY_PATH = "SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion";
+
+ let versionInfo = getWindowsVersionInfo();
+ data.servicePackMajor = versionInfo.servicePackMajor;
+ data.servicePackMinor = versionInfo.servicePackMinor;
+ // We only need the build number and UBR if we're at or above Windows 10.
+ if (typeof(data.version) === 'string' &&
+ Services.vc.compare(data.version, "10") >= 0) {
+ data.windowsBuildNumber = versionInfo.buildNumber;
+ // Query the UBR key and only add it to the environment if it's available.
+ // |readRegKey| doesn't throw, but rather returns 'undefined' on error.
+ let ubr = WindowsRegistry.readRegKey(Ci.nsIWindowsRegKey.ROOT_KEY_LOCAL_MACHINE,
+ WINDOWS_UBR_KEY_PATH, "UBR",
+ Ci.nsIWindowsRegKey.WOW64_64);
+ data.windowsUBR = (ubr !== undefined) ? ubr : null;
+ }
+ data.installYear = getSysinfoProperty("installYear", null);
+ }
+
+ return data;
+ },
+
+ /**
+ * Get the HDD information.
+ * @return Object containing the HDD data.
+ */
+ _getHDDData: function () {
+ return {
+ profile: { // hdd where the profile folder is located
+ model: getSysinfoProperty("profileHDDModel", null),
+ revision: getSysinfoProperty("profileHDDRevision", null),
+ },
+ binary: { // hdd where the application binary is located
+ model: getSysinfoProperty("binHDDModel", null),
+ revision: getSysinfoProperty("binHDDRevision", null),
+ },
+ system: { // hdd where the system files are located
+ model: getSysinfoProperty("winHDDModel", null),
+ revision: getSysinfoProperty("winHDDRevision", null),
+ },
+ };
+ },
+
+ /**
+ * Get the GFX information.
+ * @return Object containing the GFX data.
+ */
+ _getGFXData: function () {
+ let gfxData = {
+ D2DEnabled: getGfxField("D2DEnabled", null),
+ DWriteEnabled: getGfxField("DWriteEnabled", null),
+ ContentBackend: getGfxField("ContentBackend", null),
+ // The following line is disabled due to main thread jank and will be enabled
+ // again as part of bug 1154500.
+ // DWriteVersion: getGfxField("DWriteVersion", null),
+ adapters: [],
+ monitors: [],
+ features: {},
+ };
+
+ if (!["gonk", "android", "linux"].includes(AppConstants.platform)) {
+ let gfxInfo = Cc["@mozilla.org/gfx/info;1"].getService(Ci.nsIGfxInfo);
+ try {
+ gfxData.monitors = gfxInfo.getMonitors();
+ } catch (e) {
+ this._log.error("nsIGfxInfo.getMonitors() caught error", e);
+ }
+ }
+
+ try {
+ let gfxInfo = Cc["@mozilla.org/gfx/info;1"].getService(Ci.nsIGfxInfo);
+ gfxData.features = gfxInfo.getFeatures();
+ } catch (e) {
+ this._log.error("nsIGfxInfo.getFeatures() caught error", e);
+ }
+
+ // GfxInfo does not yet expose a way to iterate through all the adapters.
+ gfxData.adapters.push(getGfxAdapter(""));
+ gfxData.adapters[0].GPUActive = true;
+
+ // If we have a second adapter add it to the gfxData.adapters section.
+ let hasGPU2 = getGfxField("adapterDeviceID2", null) !== null;
+ if (!hasGPU2) {
+ this._log.trace("_getGFXData - Only one display adapter detected.");
+ return gfxData;
+ }
+
+ this._log.trace("_getGFXData - Two display adapters detected.");
+
+ gfxData.adapters.push(getGfxAdapter("2"));
+ gfxData.adapters[1].GPUActive = getGfxField("isGPU2Active", null);
+
+ return gfxData;
+ },
+
+ /**
+ * Get the system data in object form.
+ * @return Object containing the system data.
+ */
+ _getSystem: function () {
+ let memoryMB = getSysinfoProperty("memsize", null);
+ if (memoryMB) {
+ // Send RAM size in megabytes. Rounding because sysinfo doesn't
+ // always provide RAM in multiples of 1024.
+ memoryMB = Math.round(memoryMB / 1024 / 1024);
+ }
+
+ let virtualMB = getSysinfoProperty("virtualmemsize", null);
+ if (virtualMB) {
+ // Send the total virtual memory size in megabytes. Rounding because
+ // sysinfo doesn't always provide RAM in multiples of 1024.
+ virtualMB = Math.round(virtualMB / 1024 / 1024);
+ }
+
+ let data = {
+ memoryMB: memoryMB,
+ virtualMaxMB: virtualMB,
+ cpu: this._getCpuData(),
+ os: this._getOSData(),
+ hdd: this._getHDDData(),
+ gfx: this._getGFXData(),
+ };
+
+ if (AppConstants.platform === "win") {
+ data.isWow64 = getSysinfoProperty("isWow64", null);
+ } else if (["gonk", "android"].includes(AppConstants.platform)) {
+ data.device = this._getDeviceData();
+ }
+
+ return data;
+ },
+
+ _onEnvironmentChange: function (what, oldEnvironment) {
+ this._log.trace("_onEnvironmentChange for " + what);
+
+ // We are already skipping change events in _checkChanges if there is a pending change task running.
+ if (this._shutdown) {
+ this._log.trace("_onEnvironmentChange - Already shut down.");
+ return;
+ }
+
+ for (let [name, listener] of this._changeListeners) {
+ try {
+ this._log.debug("_onEnvironmentChange - calling " + name);
+ listener(what, oldEnvironment);
+ } catch (e) {
+ this._log.error("_onEnvironmentChange - listener " + name + " caught error", e);
+ }
+ }
+ },
+
+ reset: function () {
+ this._shutdown = false;
+ this._delayedInitFinished = false;
+ }
+};
diff --git a/toolkit/components/telemetry/TelemetryEvent.cpp b/toolkit/components/telemetry/TelemetryEvent.cpp
new file mode 100644
index 000000000..1e8126f66
--- /dev/null
+++ b/toolkit/components/telemetry/TelemetryEvent.cpp
@@ -0,0 +1,687 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include <prtime.h>
+#include "nsITelemetry.h"
+#include "nsHashKeys.h"
+#include "nsDataHashtable.h"
+#include "nsClassHashtable.h"
+#include "nsTArray.h"
+#include "mozilla/StaticMutex.h"
+#include "mozilla/Unused.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/StaticPtr.h"
+#include "jsapi.h"
+#include "nsJSUtils.h"
+#include "nsXULAppAPI.h"
+#include "nsUTF8Utils.h"
+
+#include "TelemetryCommon.h"
+#include "TelemetryEvent.h"
+#include "TelemetryEventData.h"
+
+using mozilla::StaticMutex;
+using mozilla::StaticMutexAutoLock;
+using mozilla::ArrayLength;
+using mozilla::Maybe;
+using mozilla::Nothing;
+using mozilla::Pair;
+using mozilla::StaticAutoPtr;
+using mozilla::Telemetry::Common::AutoHashtable;
+using mozilla::Telemetry::Common::IsExpiredVersion;
+using mozilla::Telemetry::Common::CanRecordDataset;
+using mozilla::Telemetry::Common::IsInDataset;
+using mozilla::Telemetry::Common::MsSinceProcessStart;
+using mozilla::Telemetry::Common::LogToBrowserConsole;
+
+////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////
+//
+// Naming: there are two kinds of functions in this file:
+//
+// * Functions taking a StaticMutexAutoLock: these can only be reached via
+// an interface function (TelemetryEvent::*). They expect the interface
+// function to have acquired |gTelemetryEventsMutex|, so they do not
+// have to be thread-safe.
+//
+// * Functions named TelemetryEvent::*. This is the external interface.
+// Entries and exits to these functions are serialised using
+// |gTelemetryEventsMutex|.
+//
+// Avoiding races and deadlocks:
+//
+// All functions in the external interface (TelemetryEvent::*) are
+// serialised using the mutex |gTelemetryEventsMutex|. This means
+// that the external interface is thread-safe, and the internal
+// functions can ignore thread safety. But it also brings a danger
+// of deadlock if any function in the external interface can get back
+// to that interface. That is, we will deadlock on any call chain like
+// this:
+//
+// TelemetryEvent::* -> .. any functions .. -> TelemetryEvent::*
+//
+// To reduce the danger of that happening, observe the following rules:
+//
+// * No function in TelemetryEvent::* may directly call, nor take the
+// address of, any other function in TelemetryEvent::*.
+//
+// * No internal function may call, nor take the address
+// of, any function in TelemetryEvent::*.
+
+////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////
+//
+// PRIVATE TYPES
+
+namespace {
+
+const uint32_t kEventCount = mozilla::Telemetry::EventID::EventCount;
+// This is a special event id used to mark expired events, to make expiry checks
+// faster at runtime.
+const uint32_t kExpiredEventId = kEventCount + 1;
+static_assert(kEventCount < kExpiredEventId, "Should not overflow.");
+
+// This is the hard upper limit on the number of event records we keep in storage.
+// If we cross this limit, we will drop any further event recording until elements
+// are removed from storage.
+const uint32_t kMaxEventRecords = 1000;
+// Maximum length of any passed value string, in UTF8 byte sequence length.
+const uint32_t kMaxValueByteLength = 80;
+// Maximum length of any string value in the extra dictionary, in UTF8 byte sequence length.
+const uint32_t kMaxExtraValueByteLength = 80;
+
+typedef nsDataHashtable<nsCStringHashKey, uint32_t> EventMapType;
+typedef nsClassHashtable<nsCStringHashKey, nsCString> StringMap;
+
+enum class RecordEventResult {
+ Ok,
+ UnknownEvent,
+ InvalidExtraKey,
+ StorageLimitReached,
+};
+
+struct ExtraEntry {
+ const nsCString key;
+ const nsCString value;
+};
+
+typedef nsTArray<ExtraEntry> ExtraArray;
+
+class EventRecord {
+public:
+ EventRecord(double timestamp, uint32_t eventId, const Maybe<nsCString>& value,
+ const ExtraArray& extra)
+ : mTimestamp(timestamp)
+ , mEventId(eventId)
+ , mValue(value)
+ , mExtra(extra)
+ {}
+
+ EventRecord(const EventRecord& other)
+ : mTimestamp(other.mTimestamp)
+ , mEventId(other.mEventId)
+ , mValue(other.mValue)
+ , mExtra(other.mExtra)
+ {}
+
+ EventRecord& operator=(const EventRecord& other) = delete;
+
+ double Timestamp() const { return mTimestamp; }
+ uint32_t EventId() const { return mEventId; }
+ const Maybe<nsCString>& Value() const { return mValue; }
+ const ExtraArray& Extra() const { return mExtra; }
+
+ size_t SizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf) const;
+
+private:
+ const double mTimestamp;
+ const uint32_t mEventId;
+ const Maybe<nsCString> mValue;
+ const ExtraArray mExtra;
+};
+
+// Implements the methods for EventInfo.
+const char*
+EventInfo::method() const
+{
+ return &gEventsStringTable[this->method_offset];
+}
+
+const char*
+EventInfo::object() const
+{
+ return &gEventsStringTable[this->object_offset];
+}
+
+// Implements the methods for CommonEventInfo.
+const char*
+CommonEventInfo::category() const
+{
+ return &gEventsStringTable[this->category_offset];
+}
+
+const char*
+CommonEventInfo::expiration_version() const
+{
+ return &gEventsStringTable[this->expiration_version_offset];
+}
+
+const char*
+CommonEventInfo::extra_key(uint32_t index) const
+{
+ MOZ_ASSERT(index < this->extra_count);
+ uint32_t key_index = gExtraKeysTable[this->extra_index + index];
+ return &gEventsStringTable[key_index];
+}
+
+// Implementation for the EventRecord class.
+size_t
+EventRecord::SizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf) const
+{
+ size_t n = 0;
+
+ if (mValue) {
+ n += mValue.value().SizeOfExcludingThisIfUnshared(aMallocSizeOf);
+ }
+
+ n += mExtra.ShallowSizeOfExcludingThis(aMallocSizeOf);
+ for (uint32_t i = 0; i < mExtra.Length(); ++i) {
+ n += mExtra[i].key.SizeOfExcludingThisIfUnshared(aMallocSizeOf);
+ n += mExtra[i].value.SizeOfExcludingThisIfUnshared(aMallocSizeOf);
+ }
+
+ return n;
+}
+
+nsCString
+UniqueEventName(const nsACString& category, const nsACString& method, const nsACString& object)
+{
+ nsCString name;
+ name.Append(category);
+ name.AppendLiteral("#");
+ name.Append(method);
+ name.AppendLiteral("#");
+ name.Append(object);
+ return name;
+}
+
+nsCString
+UniqueEventName(const EventInfo& info)
+{
+ return UniqueEventName(nsDependentCString(info.common_info.category()),
+ nsDependentCString(info.method()),
+ nsDependentCString(info.object()));
+}
+
+bool
+IsExpiredDate(uint32_t expires_days_since_epoch) {
+ if (expires_days_since_epoch == 0) {
+ return false;
+ }
+
+ const uint32_t days_since_epoch = PR_Now() / (PRTime(PR_USEC_PER_SEC) * 24 * 60 * 60);
+ return expires_days_since_epoch <= days_since_epoch;
+}
+
+void
+TruncateToByteLength(nsCString& str, uint32_t length)
+{
+ // last will be the index of the first byte of the current multi-byte sequence.
+ uint32_t last = RewindToPriorUTF8Codepoint(str.get(), length);
+ str.Truncate(last);
+}
+
+} // anonymous namespace
+
+////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////
+//
+// PRIVATE STATE, SHARED BY ALL THREADS
+
+namespace {
+
+// Set to true once this global state has been initialized.
+bool gInitDone = false;
+
+bool gCanRecordBase;
+bool gCanRecordExtended;
+
+// The Name -> ID cache map.
+EventMapType gEventNameIDMap(kEventCount);
+
+// The main event storage. Events are inserted here in recording order.
+StaticAutoPtr<nsTArray<EventRecord>> gEventRecords;
+
+} // namespace
+
+////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////
+//
+// PRIVATE: thread-unsafe helpers for event recording.
+
+namespace {
+
+bool
+CanRecordEvent(const StaticMutexAutoLock& lock, const CommonEventInfo& info)
+{
+ if (!gCanRecordBase) {
+ return false;
+ }
+
+ return CanRecordDataset(info.dataset, gCanRecordBase, gCanRecordExtended);
+}
+
+RecordEventResult
+RecordEvent(const StaticMutexAutoLock& lock, double timestamp,
+ const nsACString& category, const nsACString& method,
+ const nsACString& object, const Maybe<nsCString>& value,
+ const ExtraArray& extra)
+{
+ // Apply hard limit on event count in storage.
+ if (gEventRecords->Length() >= kMaxEventRecords) {
+ return RecordEventResult::StorageLimitReached;
+ }
+
+ // Look up the event id.
+ const nsCString& name = UniqueEventName(category, method, object);
+ uint32_t eventId;
+ if (!gEventNameIDMap.Get(name, &eventId)) {
+ return RecordEventResult::UnknownEvent;
+ }
+
+ // If the event is expired, silently drop this call.
+ // We don't want recording for expired probes to be an error so code doesn't
+ // have to be removed at a specific time or version.
+ // Even logging warnings would become very noisy.
+ if (eventId == kExpiredEventId) {
+ return RecordEventResult::Ok;
+ }
+
+ // Check whether we can record this event.
+ const CommonEventInfo& common = gEventInfo[eventId].common_info;
+ if (!CanRecordEvent(lock, common)) {
+ return RecordEventResult::Ok;
+ }
+
+ // Check whether the extra keys passed are valid.
+ nsTHashtable<nsCStringHashKey> validExtraKeys;
+ for (uint32_t i = 0; i < common.extra_count; ++i) {
+ validExtraKeys.PutEntry(nsDependentCString(common.extra_key(i)));
+ }
+
+ for (uint32_t i = 0; i < extra.Length(); ++i) {
+ if (!validExtraKeys.GetEntry(extra[i].key)) {
+ return RecordEventResult::InvalidExtraKey;
+ }
+ }
+
+ // Add event record.
+ gEventRecords->AppendElement(EventRecord(timestamp, eventId, value, extra));
+ return RecordEventResult::Ok;
+}
+
+} // anonymous namespace
+
+////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////
+//
+// EXTERNALLY VISIBLE FUNCTIONS in namespace TelemetryEvents::
+
+// This is a StaticMutex rather than a plain Mutex (1) so that
+// it gets initialised in a thread-safe manner the first time
+// it is used, and (2) because it is never de-initialised, and
+// a normal Mutex would show up as a leak in BloatView. StaticMutex
+// also has the "OffTheBooks" property, so it won't show as a leak
+// in BloatView.
+// Another reason to use a StaticMutex instead of a plain Mutex is
+// that, due to the nature of Telemetry, we cannot rely on having a
+// mutex initialized in InitializeGlobalState. Unfortunately, we
+// cannot make sure that no other function is called before this point.
+static StaticMutex gTelemetryEventsMutex;
+
+void
+TelemetryEvent::InitializeGlobalState(bool aCanRecordBase, bool aCanRecordExtended)
+{
+ StaticMutexAutoLock locker(gTelemetryEventsMutex);
+ MOZ_ASSERT(!gInitDone, "TelemetryEvent::InitializeGlobalState "
+ "may only be called once");
+
+ gCanRecordBase = aCanRecordBase;
+ gCanRecordExtended = aCanRecordExtended;
+
+ gEventRecords = new nsTArray<EventRecord>();
+
+ // Populate the static event name->id cache. Note that the event names are
+ // statically allocated and come from the automatically generated TelemetryEventData.h.
+ const uint32_t eventCount = static_cast<uint32_t>(mozilla::Telemetry::EventID::EventCount);
+ for (uint32_t i = 0; i < eventCount; ++i) {
+ const EventInfo& info = gEventInfo[i];
+ uint32_t eventId = i;
+
+ // If this event is expired, mark it with a special event id.
+ // This avoids doing expensive expiry checks at runtime.
+ if (IsExpiredVersion(info.common_info.expiration_version()) ||
+ IsExpiredDate(info.common_info.expiration_day)) {
+ eventId = kExpiredEventId;
+ }
+
+ gEventNameIDMap.Put(UniqueEventName(info), eventId);
+ }
+
+#ifdef DEBUG
+ gEventNameIDMap.MarkImmutable();
+#endif
+ gInitDone = true;
+}
+
+void
+TelemetryEvent::DeInitializeGlobalState()
+{
+ StaticMutexAutoLock locker(gTelemetryEventsMutex);
+ MOZ_ASSERT(gInitDone);
+
+ gCanRecordBase = false;
+ gCanRecordExtended = false;
+
+ gEventNameIDMap.Clear();
+ gEventRecords->Clear();
+ gEventRecords = nullptr;
+
+ gInitDone = false;
+}
+
+void
+TelemetryEvent::SetCanRecordBase(bool b)
+{
+ StaticMutexAutoLock locker(gTelemetryEventsMutex);
+ gCanRecordBase = b;
+}
+
+void
+TelemetryEvent::SetCanRecordExtended(bool b) {
+ StaticMutexAutoLock locker(gTelemetryEventsMutex);
+ gCanRecordExtended = b;
+}
+
+nsresult
+TelemetryEvent::RecordEvent(const nsACString& aCategory, const nsACString& aMethod,
+ const nsACString& aObject, JS::HandleValue aValue,
+ JS::HandleValue aExtra, JSContext* cx,
+ uint8_t optional_argc)
+{
+ // Currently only recording in the parent process is supported.
+ if (!XRE_IsParentProcess()) {
+ return NS_OK;
+ }
+
+ // Get the current time.
+ double timestamp = -1;
+ nsresult rv = MsSinceProcessStart(&timestamp);
+ if (NS_FAILED(rv)) {
+ LogToBrowserConsole(nsIScriptError::warningFlag,
+ NS_LITERAL_STRING("Failed to get time since process start."));
+ return NS_OK;
+ }
+
+ // Check value argument.
+ if ((optional_argc > 0) && !aValue.isNull() && !aValue.isString()) {
+ LogToBrowserConsole(nsIScriptError::warningFlag,
+ NS_LITERAL_STRING("Invalid type for value parameter."));
+ return NS_OK;
+ }
+
+ // Extract value parameter.
+ Maybe<nsCString> value;
+ if (aValue.isString()) {
+ nsAutoJSString jsStr;
+ if (!jsStr.init(cx, aValue)) {
+ LogToBrowserConsole(nsIScriptError::warningFlag,
+ NS_LITERAL_STRING("Invalid string value for value parameter."));
+ return NS_OK;
+ }
+
+ nsCString str = NS_ConvertUTF16toUTF8(jsStr);
+ if (str.Length() > kMaxValueByteLength) {
+ LogToBrowserConsole(nsIScriptError::warningFlag,
+ NS_LITERAL_STRING("Value parameter exceeds maximum string length, truncating."));
+ TruncateToByteLength(str, kMaxValueByteLength);
+ }
+ value = mozilla::Some(str);
+ }
+
+ // Check extra argument.
+ if ((optional_argc > 1) && !aExtra.isNull() && !aExtra.isObject()) {
+ LogToBrowserConsole(nsIScriptError::warningFlag,
+ NS_LITERAL_STRING("Invalid type for extra parameter."));
+ return NS_OK;
+ }
+
+ // Extract extra dictionary.
+ ExtraArray extra;
+ if (aExtra.isObject()) {
+ JS::RootedObject obj(cx, &aExtra.toObject());
+ JS::Rooted<JS::IdVector> ids(cx, JS::IdVector(cx));
+ if (!JS_Enumerate(cx, obj, &ids)) {
+ LogToBrowserConsole(nsIScriptError::warningFlag,
+ NS_LITERAL_STRING("Failed to enumerate object."));
+ return NS_OK;
+ }
+
+ for (size_t i = 0, n = ids.length(); i < n; i++) {
+ nsAutoJSString key;
+ if (!key.init(cx, ids[i])) {
+ LogToBrowserConsole(nsIScriptError::warningFlag,
+ NS_LITERAL_STRING("Extra dictionary should only contain string keys."));
+ return NS_OK;
+ }
+
+ JS::Rooted<JS::Value> value(cx);
+ if (!JS_GetPropertyById(cx, obj, ids[i], &value)) {
+ LogToBrowserConsole(nsIScriptError::warningFlag,
+ NS_LITERAL_STRING("Failed to get extra property."));
+ return NS_OK;
+ }
+
+ nsAutoJSString jsStr;
+ if (!value.isString() || !jsStr.init(cx, value)) {
+ LogToBrowserConsole(nsIScriptError::warningFlag,
+ NS_LITERAL_STRING("Extra properties should have string values."));
+ return NS_OK;
+ }
+
+ nsCString str = NS_ConvertUTF16toUTF8(jsStr);
+ if (str.Length() > kMaxExtraValueByteLength) {
+ LogToBrowserConsole(nsIScriptError::warningFlag,
+ NS_LITERAL_STRING("Extra value exceeds maximum string length, truncating."));
+ TruncateToByteLength(str, kMaxExtraValueByteLength);
+ }
+
+ extra.AppendElement(ExtraEntry{NS_ConvertUTF16toUTF8(key), str});
+ }
+ }
+
+ // Lock for accessing internal data.
+ // While the lock is being held, no complex calls like JS calls can be made,
+ // as all of these could record Telemetry, which would result in deadlock.
+ RecordEventResult res;
+ {
+ StaticMutexAutoLock lock(gTelemetryEventsMutex);
+
+ if (!gInitDone) {
+ return NS_ERROR_FAILURE;
+ }
+
+ res = ::RecordEvent(lock, timestamp, aCategory, aMethod, aObject, value, extra);
+ }
+
+ // Trigger warnings or errors where needed.
+ switch (res) {
+ case RecordEventResult::UnknownEvent: {
+ JS_ReportErrorASCII(cx, R"(Unknown event: ["%s", "%s", "%s"])",
+ PromiseFlatCString(aCategory).get(),
+ PromiseFlatCString(aMethod).get(),
+ PromiseFlatCString(aObject).get());
+ return NS_ERROR_INVALID_ARG;
+ }
+ case RecordEventResult::InvalidExtraKey:
+ LogToBrowserConsole(nsIScriptError::warningFlag,
+ NS_LITERAL_STRING("Invalid extra key for event."));
+ return NS_OK;
+ case RecordEventResult::StorageLimitReached:
+ LogToBrowserConsole(nsIScriptError::warningFlag,
+ NS_LITERAL_STRING("Event storage limit reached."));
+ return NS_OK;
+ default:
+ return NS_OK;
+ }
+}
+
+nsresult
+TelemetryEvent::CreateSnapshots(uint32_t aDataset, bool aClear, JSContext* cx,
+ uint8_t optional_argc, JS::MutableHandleValue aResult)
+{
+ // Extract the events from storage.
+ nsTArray<EventRecord> events;
+ {
+ StaticMutexAutoLock locker(gTelemetryEventsMutex);
+
+ if (!gInitDone) {
+ return NS_ERROR_FAILURE;
+ }
+
+ uint32_t len = gEventRecords->Length();
+ for (uint32_t i = 0; i < len; ++i) {
+ const EventRecord& record = (*gEventRecords)[i];
+ const EventInfo& info = gEventInfo[record.EventId()];
+
+ if (IsInDataset(info.common_info.dataset, aDataset)) {
+ events.AppendElement(record);
+ }
+ }
+
+ if (aClear) {
+ gEventRecords->Clear();
+ }
+ }
+
+ // We serialize the events to a JS array.
+ JS::RootedObject eventsArray(cx, JS_NewArrayObject(cx, events.Length()));
+ if (!eventsArray) {
+ return NS_ERROR_FAILURE;
+ }
+
+ for (uint32_t i = 0; i < events.Length(); ++i) {
+ const EventRecord& record = events[i];
+ const EventInfo& info = gEventInfo[record.EventId()];
+
+ // Each entry is an array of one of the forms:
+ // [timestamp, category, method, object, value]
+ // [timestamp, category, method, object, null, extra]
+ // [timestamp, category, method, object, value, extra]
+ JS::AutoValueVector items(cx);
+
+ // Add timestamp.
+ JS::Rooted<JS::Value> val(cx);
+ if (!items.append(JS::NumberValue(floor(record.Timestamp())))) {
+ return NS_ERROR_FAILURE;
+ }
+
+ // Add category, method, object.
+ const char* strings[] = {
+ info.common_info.category(),
+ info.method(),
+ info.object(),
+ };
+ for (const char* s : strings) {
+ const NS_ConvertUTF8toUTF16 wide(s);
+ if (!items.append(JS::StringValue(JS_NewUCStringCopyN(cx, wide.Data(), wide.Length())))) {
+ return NS_ERROR_FAILURE;
+ }
+ }
+
+ // Add the optional string value only when needed.
+ // When extra is empty and this has no value, we can save a little space.
+ if (record.Value()) {
+ const NS_ConvertUTF8toUTF16 wide(record.Value().value());
+ if (!items.append(JS::StringValue(JS_NewUCStringCopyN(cx, wide.Data(), wide.Length())))) {
+ return NS_ERROR_FAILURE;
+ }
+ } else if (!record.Extra().IsEmpty()) {
+ if (!items.append(JS::NullValue())) {
+ return NS_ERROR_FAILURE;
+ }
+ }
+
+ // Add the optional extra dictionary.
+ // To save a little space, only add it when it is not empty.
+ if (!record.Extra().IsEmpty()) {
+ JS::RootedObject obj(cx, JS_NewPlainObject(cx));
+ if (!obj) {
+ return NS_ERROR_FAILURE;
+ }
+
+ // Add extra key & value entries.
+ const ExtraArray& extra = record.Extra();
+ for (uint32_t i = 0; i < extra.Length(); ++i) {
+ const NS_ConvertUTF8toUTF16 wide(extra[i].value);
+ JS::Rooted<JS::Value> value(cx);
+ value.setString(JS_NewUCStringCopyN(cx, wide.Data(), wide.Length()));
+
+ if (!JS_DefineProperty(cx, obj, extra[i].key.get(), value, JSPROP_ENUMERATE)) {
+ return NS_ERROR_FAILURE;
+ }
+ }
+ val.setObject(*obj);
+
+ if (!items.append(val)) {
+ return NS_ERROR_FAILURE;
+ }
+ }
+
+ // Add the record to the events array.
+ JS::RootedObject itemsArray(cx, JS_NewArrayObject(cx, items));
+ if (!JS_DefineElement(cx, eventsArray, i, itemsArray, JSPROP_ENUMERATE)) {
+ return NS_ERROR_FAILURE;
+ }
+ }
+
+ aResult.setObject(*eventsArray);
+ return NS_OK;
+}
+
+/**
+ * Resets all the stored events. This is intended to be only used in tests.
+ */
+void
+TelemetryEvent::ClearEvents()
+{
+ StaticMutexAutoLock lock(gTelemetryEventsMutex);
+
+ if (!gInitDone) {
+ return;
+ }
+
+ gEventRecords->Clear();
+}
+
+size_t
+TelemetryEvent::SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf)
+{
+ StaticMutexAutoLock locker(gTelemetryEventsMutex);
+ size_t n = 0;
+
+ n += gEventRecords->ShallowSizeOfIncludingThis(aMallocSizeOf);
+ for (uint32_t i = 0; i < gEventRecords->Length(); ++i) {
+ n += (*gEventRecords)[i].SizeOfExcludingThis(aMallocSizeOf);
+ }
+
+ n += gEventNameIDMap.ShallowSizeOfExcludingThis(aMallocSizeOf);
+ for (auto iter = gEventNameIDMap.ConstIter(); !iter.Done(); iter.Next()) {
+ n += iter.Key().SizeOfExcludingThisIfUnshared(aMallocSizeOf);
+ }
+
+ return n;
+}
diff --git a/toolkit/components/telemetry/TelemetryEvent.h b/toolkit/components/telemetry/TelemetryEvent.h
new file mode 100644
index 000000000..34a0720b7
--- /dev/null
+++ b/toolkit/components/telemetry/TelemetryEvent.h
@@ -0,0 +1,39 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2; -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef TelemetryEvent_h__
+#define TelemetryEvent_h__
+
+#include "mozilla/TelemetryEventEnums.h"
+
+// This module is internal to Telemetry. It encapsulates Telemetry's
+// event recording and storage logic. It should only be used by
+// Telemetry.cpp. These functions should not be used anywhere else.
+// For the public interface to Telemetry functionality, see Telemetry.h.
+
+namespace TelemetryEvent {
+
+void InitializeGlobalState(bool canRecordBase, bool canRecordExtended);
+void DeInitializeGlobalState();
+
+void SetCanRecordBase(bool b);
+void SetCanRecordExtended(bool b);
+
+// JS API Endpoints.
+nsresult RecordEvent(const nsACString& aCategory, const nsACString& aMethod,
+ const nsACString& aObject, JS::HandleValue aValue,
+ JS::HandleValue aExtra, JSContext* aCx,
+ uint8_t optional_argc);
+nsresult CreateSnapshots(uint32_t aDataset, bool aClear, JSContext* aCx,
+ uint8_t optional_argc, JS::MutableHandleValue aResult);
+
+// Only to be used for testing.
+void ClearEvents();
+
+size_t SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf);
+
+} // namespace TelemetryEvent
+
+#endif // TelemetryEvent_h__
diff --git a/toolkit/components/telemetry/TelemetryHistogram.cpp b/toolkit/components/telemetry/TelemetryHistogram.cpp
new file mode 100644
index 000000000..abae9c613
--- /dev/null
+++ b/toolkit/components/telemetry/TelemetryHistogram.cpp
@@ -0,0 +1,2725 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jsapi.h"
+#include "jsfriendapi.h"
+#include "js/GCAPI.h"
+#include "nsString.h"
+#include "nsTHashtable.h"
+#include "nsHashKeys.h"
+#include "nsBaseHashtable.h"
+#include "nsClassHashtable.h"
+#include "nsITelemetry.h"
+
+#include "mozilla/dom/ContentChild.h"
+#include "mozilla/dom/ToJSValue.h"
+#include "mozilla/gfx/GPUParent.h"
+#include "mozilla/gfx/GPUProcessManager.h"
+#include "mozilla/Atomics.h"
+#include "mozilla/StartupTimeline.h"
+#include "mozilla/StaticMutex.h"
+#include "mozilla/StaticPtr.h"
+#include "mozilla/Unused.h"
+
+#include "TelemetryCommon.h"
+#include "TelemetryHistogram.h"
+
+#include "base/histogram.h"
+
+using base::Histogram;
+using base::StatisticsRecorder;
+using base::BooleanHistogram;
+using base::CountHistogram;
+using base::FlagHistogram;
+using base::LinearHistogram;
+using mozilla::StaticMutex;
+using mozilla::StaticMutexAutoLock;
+using mozilla::StaticAutoPtr;
+using mozilla::Telemetry::Accumulation;
+using mozilla::Telemetry::KeyedAccumulation;
+
+
+////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////
+//
+// Naming: there are two kinds of functions in this file:
+//
+// * Functions named internal_*: these can only be reached via an
+// interface function (TelemetryHistogram::*). They mostly expect
+// the interface function to have acquired
+// |gTelemetryHistogramMutex|, so they do not have to be
+// thread-safe. However, those internal_* functions that are
+// reachable from internal_WrapAndReturnHistogram and
+// internal_WrapAndReturnKeyedHistogram can sometimes be called
+// without |gTelemetryHistogramMutex|, and so might be racey.
+//
+// * Functions named TelemetryHistogram::*. This is the external interface.
+// Entries and exits to these functions are serialised using
+// |gTelemetryHistogramMutex|, except for GetAddonHistogramSnapshots,
+// GetKeyedHistogramSnapshots and CreateHistogramSnapshots.
+//
+// Avoiding races and deadlocks:
+//
+// All functions in the external interface (TelemetryHistogram::*) are
+// serialised using the mutex |gTelemetryHistogramMutex|. This means
+// that the external interface is thread-safe, and many of the
+// internal_* functions can ignore thread safety. But it also brings
+// a danger of deadlock if any function in the external interface can
+// get back to that interface. That is, we will deadlock on any call
+// chain like this
+//
+// TelemetryHistogram::* -> .. any functions .. -> TelemetryHistogram::*
+//
+// To reduce the danger of that happening, observe the following rules:
+//
+// * No function in TelemetryHistogram::* may directly call, nor take the
+// address of, any other function in TelemetryHistogram::*.
+//
+// * No internal function internal_* may call, nor take the address
+// of, any function in TelemetryHistogram::*.
+//
+// internal_WrapAndReturnHistogram and
+// internal_WrapAndReturnKeyedHistogram are not protected by
+// |gTelemetryHistogramMutex| because they make calls to the JS
+// engine, but that can in turn call back to Telemetry and hence back
+// to a TelemetryHistogram:: function, in order to report GC and other
+// statistics. This would lead to deadlock due to attempted double
+// acquisition of |gTelemetryHistogramMutex|, if the internal_* functions
+// were required to be protected by |gTelemetryHistogramMutex|. To
+// break that cycle, we relax that requirement. Unfortunately this
+// means that this file is not guaranteed race-free.
+
+
+////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////
+//
+// PRIVATE TYPES
+
+#define EXPIRED_ID "__expired__"
+#define SUBSESSION_HISTOGRAM_PREFIX "sub#"
+#define KEYED_HISTOGRAM_NAME_SEPARATOR "#"
+#define CONTENT_HISTOGRAM_SUFFIX "#content"
+#define GPU_HISTOGRAM_SUFFIX "#gpu"
+
+namespace {
+
+using mozilla::Telemetry::Common::AutoHashtable;
+using mozilla::Telemetry::Common::IsExpiredVersion;
+using mozilla::Telemetry::Common::CanRecordDataset;
+using mozilla::Telemetry::Common::IsInDataset;
+
+class KeyedHistogram;
+
+typedef nsBaseHashtableET<nsDepCharHashKey, mozilla::Telemetry::ID>
+ CharPtrEntryType;
+
+typedef AutoHashtable<CharPtrEntryType> HistogramMapType;
+
+typedef nsClassHashtable<nsCStringHashKey, KeyedHistogram>
+ KeyedHistogramMapType;
+
+// Hardcoded probes
+struct HistogramInfo {
+ uint32_t min;
+ uint32_t max;
+ uint32_t bucketCount;
+ uint32_t histogramType;
+ uint32_t id_offset;
+ uint32_t expiration_offset;
+ uint32_t dataset;
+ uint32_t label_index;
+ uint32_t label_count;
+ bool keyed;
+
+ const char *id() const;
+ const char *expiration() const;
+ nsresult label_id(const char* label, uint32_t* labelId) const;
+};
+
+struct AddonHistogramInfo {
+ uint32_t min;
+ uint32_t max;
+ uint32_t bucketCount;
+ uint32_t histogramType;
+ Histogram *h;
+};
+
+enum reflectStatus {
+ REFLECT_OK,
+ REFLECT_CORRUPT,
+ REFLECT_FAILURE
+};
+
+typedef StatisticsRecorder::Histograms::iterator HistogramIterator;
+
+typedef nsBaseHashtableET<nsCStringHashKey, AddonHistogramInfo>
+ AddonHistogramEntryType;
+
+typedef AutoHashtable<AddonHistogramEntryType>
+ AddonHistogramMapType;
+
+typedef nsBaseHashtableET<nsCStringHashKey, AddonHistogramMapType *>
+ AddonEntryType;
+
+typedef AutoHashtable<AddonEntryType> AddonMapType;
+
+} // namespace
+
+
+////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////
+//
+// PRIVATE STATE, SHARED BY ALL THREADS
+
+namespace {
+
+// Set to true once this global state has been initialized
+bool gInitDone = false;
+
+bool gCanRecordBase = false;
+bool gCanRecordExtended = false;
+
+HistogramMapType gHistogramMap(mozilla::Telemetry::HistogramCount);
+
+KeyedHistogramMapType gKeyedHistograms;
+
+bool gCorruptHistograms[mozilla::Telemetry::HistogramCount];
+
+// This is for gHistograms, gHistogramStringTable
+#include "TelemetryHistogramData.inc"
+
+AddonMapType gAddonMap;
+
+// The singleton StatisticsRecorder object for this process.
+base::StatisticsRecorder* gStatisticsRecorder = nullptr;
+
+// For batching and sending child process accumulations to the parent
+nsITimer* gIPCTimer = nullptr;
+mozilla::Atomic<bool, mozilla::Relaxed> gIPCTimerArmed(false);
+mozilla::Atomic<bool, mozilla::Relaxed> gIPCTimerArming(false);
+StaticAutoPtr<nsTArray<Accumulation>> gAccumulations;
+StaticAutoPtr<nsTArray<KeyedAccumulation>> gKeyedAccumulations;
+
+} // namespace
+
+
+////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////
+//
+// PRIVATE CONSTANTS
+
+namespace {
+
+// List of histogram IDs which should have recording disabled initially.
+const mozilla::Telemetry::ID kRecordingInitiallyDisabledIDs[] = {
+ mozilla::Telemetry::FX_REFRESH_DRIVER_SYNC_SCROLL_FRAME_DELAY_MS,
+
+ // The array must not be empty. Leave these item here.
+ mozilla::Telemetry::TELEMETRY_TEST_COUNT_INIT_NO_RECORD,
+ mozilla::Telemetry::TELEMETRY_TEST_KEYED_COUNT_INIT_NO_RECORD
+};
+
+// Sending each remote accumulation immediately places undue strain on the
+// IPC subsystem. Batch the remote accumulations for a period of time before
+// sending them all at once. This value was chosen as a balance between data
+// timeliness and performance (see bug 1218576)
+const uint32_t kBatchTimeoutMs = 2000;
+
+// To stop growing unbounded in memory while waiting for kBatchTimeoutMs to
+// drain the g*Accumulations arrays, request an immediate flush if the arrays
+// manage to reach this high water mark of elements.
+const size_t kAccumulationsArrayHighWaterMark = 5 * 1024;
+
+} // namespace
+
+
+////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////
+//
+// PRIVATE: Misc small helpers
+
+namespace {
+
+bool
+internal_CanRecordBase() {
+ return gCanRecordBase;
+}
+
+bool
+internal_CanRecordExtended() {
+ return gCanRecordExtended;
+}
+
+bool
+internal_IsHistogramEnumId(mozilla::Telemetry::ID aID)
+{
+ static_assert(((mozilla::Telemetry::ID)-1 > 0), "ID should be unsigned.");
+ return aID < mozilla::Telemetry::HistogramCount;
+}
+
+// Note: this is completely unrelated to mozilla::IsEmpty.
+bool
+internal_IsEmpty(const Histogram *h)
+{
+ Histogram::SampleSet ss;
+ h->SnapshotSample(&ss);
+ return ss.counts(0) == 0 && ss.sum() == 0;
+}
+
+bool
+internal_IsExpired(const Histogram *histogram)
+{
+ return histogram->histogram_name() == EXPIRED_ID;
+}
+
+nsresult
+internal_GetRegisteredHistogramIds(bool keyed, uint32_t dataset,
+ uint32_t *aCount, char*** aHistograms)
+{
+ nsTArray<char*> collection;
+
+ for (size_t i = 0; i < mozilla::ArrayLength(gHistograms); ++i) {
+ const HistogramInfo& h = gHistograms[i];
+ if (IsExpiredVersion(h.expiration()) ||
+ h.keyed != keyed ||
+ !IsInDataset(h.dataset, dataset)) {
+ continue;
+ }
+
+ const char* id = h.id();
+ const size_t len = strlen(id);
+ collection.AppendElement(static_cast<char*>(nsMemory::Clone(id, len+1)));
+ }
+
+ const size_t bytes = collection.Length() * sizeof(char*);
+ char** histograms = static_cast<char**>(moz_xmalloc(bytes));
+ memcpy(histograms, collection.Elements(), bytes);
+ *aHistograms = histograms;
+ *aCount = collection.Length();
+
+ return NS_OK;
+}
+
+const char *
+HistogramInfo::id() const
+{
+ return &gHistogramStringTable[this->id_offset];
+}
+
+const char *
+HistogramInfo::expiration() const
+{
+ return &gHistogramStringTable[this->expiration_offset];
+}
+
+nsresult
+HistogramInfo::label_id(const char* label, uint32_t* labelId) const
+{
+ MOZ_ASSERT(label);
+ MOZ_ASSERT(this->histogramType == nsITelemetry::HISTOGRAM_CATEGORICAL);
+ if (this->histogramType != nsITelemetry::HISTOGRAM_CATEGORICAL) {
+ return NS_ERROR_FAILURE;
+ }
+
+ for (uint32_t i = 0; i < this->label_count; ++i) {
+ // gHistogramLabelTable contains the indices of the label strings in the
+ // gHistogramStringTable.
+ // They are stored in-order and consecutively, from the offset label_index
+ // to (label_index + label_count).
+ uint32_t string_offset = gHistogramLabelTable[this->label_index + i];
+ const char* const str = &gHistogramStringTable[string_offset];
+ if (::strcmp(label, str) == 0) {
+ *labelId = i;
+ return NS_OK;
+ }
+ }
+
+ return NS_ERROR_FAILURE;
+}
+
+void internal_DispatchToMainThread(already_AddRefed<nsIRunnable>&& aEvent)
+{
+ nsCOMPtr<nsIRunnable> event(aEvent);
+ nsCOMPtr<nsIThread> thread;
+ nsresult rv = NS_GetMainThread(getter_AddRefs(thread));
+ if (NS_FAILED(rv)) {
+ NS_WARNING("NS_FAILED DispatchToMainThread. Maybe we're shutting down?");
+ return;
+ }
+ thread->Dispatch(event, 0);
+}
+
+} // namespace
+
+
+////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////
+//
+// PRIVATE: Histogram Get, Add, Clone, Clear functions
+
+namespace {
+
+nsresult
+internal_CheckHistogramArguments(uint32_t histogramType,
+ uint32_t min, uint32_t max,
+ uint32_t bucketCount, bool haveOptArgs)
+{
+ if (histogramType != nsITelemetry::HISTOGRAM_BOOLEAN
+ && histogramType != nsITelemetry::HISTOGRAM_FLAG
+ && histogramType != nsITelemetry::HISTOGRAM_COUNT) {
+ // The min, max & bucketCount arguments are not optional for this type.
+ if (!haveOptArgs)
+ return NS_ERROR_ILLEGAL_VALUE;
+
+ // Sanity checks for histogram parameters.
+ if (min >= max)
+ return NS_ERROR_ILLEGAL_VALUE;
+
+ if (bucketCount <= 2)
+ return NS_ERROR_ILLEGAL_VALUE;
+
+ if (min < 1)
+ return NS_ERROR_ILLEGAL_VALUE;
+ }
+
+ return NS_OK;
+}
+
+/*
+ * min, max & bucketCount are optional for boolean, flag & count histograms.
+ * haveOptArgs has to be set if the caller provides them.
+ */
+nsresult
+internal_HistogramGet(const char *name, const char *expiration,
+ uint32_t histogramType, uint32_t min, uint32_t max,
+ uint32_t bucketCount, bool haveOptArgs,
+ Histogram **result)
+{
+ nsresult rv = internal_CheckHistogramArguments(histogramType, min, max,
+ bucketCount, haveOptArgs);
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+
+ if (IsExpiredVersion(expiration)) {
+ name = EXPIRED_ID;
+ min = 1;
+ max = 2;
+ bucketCount = 3;
+ histogramType = nsITelemetry::HISTOGRAM_LINEAR;
+ }
+
+ switch (histogramType) {
+ case nsITelemetry::HISTOGRAM_EXPONENTIAL:
+ *result = Histogram::FactoryGet(name, min, max, bucketCount, Histogram::kUmaTargetedHistogramFlag);
+ break;
+ case nsITelemetry::HISTOGRAM_LINEAR:
+ case nsITelemetry::HISTOGRAM_CATEGORICAL:
+ *result = LinearHistogram::FactoryGet(name, min, max, bucketCount, Histogram::kUmaTargetedHistogramFlag);
+ break;
+ case nsITelemetry::HISTOGRAM_BOOLEAN:
+ *result = BooleanHistogram::FactoryGet(name, Histogram::kUmaTargetedHistogramFlag);
+ break;
+ case nsITelemetry::HISTOGRAM_FLAG:
+ *result = FlagHistogram::FactoryGet(name, Histogram::kUmaTargetedHistogramFlag);
+ break;
+ case nsITelemetry::HISTOGRAM_COUNT:
+ *result = CountHistogram::FactoryGet(name, Histogram::kUmaTargetedHistogramFlag);
+ break;
+ default:
+ NS_ASSERTION(false, "Invalid histogram type");
+ return NS_ERROR_INVALID_ARG;
+ }
+ return NS_OK;
+}
+
+// Read the process type from the given histogram name. The process type, if
+// one exists, is embedded in a suffix.
+GeckoProcessType
+GetProcessFromName(const nsACString& aString)
+{
+ if (StringEndsWith(aString, NS_LITERAL_CSTRING(CONTENT_HISTOGRAM_SUFFIX))) {
+ return GeckoProcessType_Content;
+ }
+ if (StringEndsWith(aString, NS_LITERAL_CSTRING(GPU_HISTOGRAM_SUFFIX))) {
+ return GeckoProcessType_GPU;
+ }
+ return GeckoProcessType_Default;
+}
+
+const char*
+SuffixForProcessType(GeckoProcessType aProcessType)
+{
+ switch (aProcessType) {
+ case GeckoProcessType_Default:
+ return nullptr;
+ case GeckoProcessType_Content:
+ return CONTENT_HISTOGRAM_SUFFIX;
+ case GeckoProcessType_GPU:
+ return GPU_HISTOGRAM_SUFFIX;
+ default:
+ MOZ_ASSERT_UNREACHABLE("unknown process type");
+ return nullptr;
+ }
+}
+
+CharPtrEntryType*
+internal_GetHistogramMapEntry(const char* aName)
+{
+ nsDependentCString name(aName);
+ GeckoProcessType process = GetProcessFromName(name);
+ const char* suffix = SuffixForProcessType(process);
+ if (!suffix) {
+ return gHistogramMap.GetEntry(aName);
+ }
+
+ auto root = Substring(name, 0, name.Length() - strlen(suffix));
+ return gHistogramMap.GetEntry(PromiseFlatCString(root).get());
+}
+
+nsresult
+internal_GetHistogramEnumId(const char *name, mozilla::Telemetry::ID *id)
+{
+ if (!gInitDone) {
+ return NS_ERROR_FAILURE;
+ }
+
+ CharPtrEntryType *entry = internal_GetHistogramMapEntry(name);
+ if (!entry) {
+ return NS_ERROR_INVALID_ARG;
+ }
+ *id = entry->mData;
+ return NS_OK;
+}
+
+// O(1) histogram lookup by numeric id
+nsresult
+internal_GetHistogramByEnumId(mozilla::Telemetry::ID id, Histogram **ret, GeckoProcessType aProcessType)
+{
+ static Histogram* knownHistograms[mozilla::Telemetry::HistogramCount] = {0};
+ static Histogram* knownContentHistograms[mozilla::Telemetry::HistogramCount] = {0};
+ static Histogram* knownGPUHistograms[mozilla::Telemetry::HistogramCount] = {0};
+
+ Histogram** knownList = nullptr;
+
+ switch (aProcessType) {
+ case GeckoProcessType_Default:
+ knownList = knownHistograms;
+ break;
+ case GeckoProcessType_Content:
+ knownList = knownContentHistograms;
+ break;
+ case GeckoProcessType_GPU:
+ knownList = knownGPUHistograms;
+ break;
+ default:
+ MOZ_ASSERT_UNREACHABLE("unknown process type");
+ return NS_ERROR_FAILURE;
+ }
+
+ Histogram* h = knownList[id];
+ if (h) {
+ *ret = h;
+ return NS_OK;
+ }
+
+ const HistogramInfo &p = gHistograms[id];
+ if (p.keyed) {
+ return NS_ERROR_FAILURE;
+ }
+
+ nsCString histogramName;
+ histogramName.Append(p.id());
+ if (const char* suffix = SuffixForProcessType(aProcessType)) {
+ histogramName.AppendASCII(suffix);
+ }
+
+ nsresult rv = internal_HistogramGet(histogramName.get(), p.expiration(),
+ p.histogramType, p.min, p.max,
+ p.bucketCount, true, &h);
+ if (NS_FAILED(rv))
+ return rv;
+
+#ifdef DEBUG
+ // Check that the C++ Histogram code computes the same ranges as the
+ // Python histogram code.
+ if (!IsExpiredVersion(p.expiration())) {
+ const struct bounds &b = gBucketLowerBoundIndex[id];
+ if (b.length != 0) {
+ MOZ_ASSERT(size_t(b.length) == h->bucket_count(),
+ "C++/Python bucket # mismatch");
+ for (int i = 0; i < b.length; ++i) {
+ MOZ_ASSERT(gBucketLowerBounds[b.offset + i] == h->ranges(i),
+ "C++/Python bucket mismatch");
+ }
+ }
+ }
+#endif
+
+ knownList[id] = h;
+ *ret = h;
+ return NS_OK;
+}
+
+nsresult
+internal_GetHistogramByName(const nsACString &name, Histogram **ret)
+{
+ mozilla::Telemetry::ID id;
+ nsresult rv
+ = internal_GetHistogramEnumId(PromiseFlatCString(name).get(), &id);
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+
+ GeckoProcessType process = GetProcessFromName(name);
+ rv = internal_GetHistogramByEnumId(id, ret, process);
+ if (NS_FAILED(rv))
+ return rv;
+
+ return NS_OK;
+}
+
+
+#if !defined(MOZ_WIDGET_GONK) && !defined(MOZ_WIDGET_ANDROID)
+
+/**
+ * This clones a histogram |existing| with the id |existingId| to a
+ * new histogram with the name |newName|.
+ * For simplicity this is limited to registered histograms.
+ */
+Histogram*
+internal_CloneHistogram(const nsACString& newName,
+ mozilla::Telemetry::ID existingId,
+ Histogram& existing)
+{
+ const HistogramInfo &info = gHistograms[existingId];
+ Histogram *clone = nullptr;
+ nsresult rv;
+
+ rv = internal_HistogramGet(PromiseFlatCString(newName).get(),
+ info.expiration(),
+ info.histogramType, existing.declared_min(),
+ existing.declared_max(), existing.bucket_count(),
+ true, &clone);
+ if (NS_FAILED(rv)) {
+ return nullptr;
+ }
+
+ Histogram::SampleSet ss;
+ existing.SnapshotSample(&ss);
+ clone->AddSampleSet(ss);
+
+ return clone;
+}
+
+GeckoProcessType
+GetProcessFromName(const std::string& aString)
+{
+ nsDependentCString string(aString.c_str(), aString.length());
+ return GetProcessFromName(string);
+}
+
+Histogram*
+internal_GetSubsessionHistogram(Histogram& existing)
+{
+ mozilla::Telemetry::ID id;
+ nsresult rv
+ = internal_GetHistogramEnumId(existing.histogram_name().c_str(), &id);
+ if (NS_FAILED(rv) || gHistograms[id].keyed) {
+ return nullptr;
+ }
+
+ static Histogram* subsession[mozilla::Telemetry::HistogramCount] = {};
+ static Histogram* subsessionContent[mozilla::Telemetry::HistogramCount] = {};
+ static Histogram* subsessionGPU[mozilla::Telemetry::HistogramCount] = {};
+
+ Histogram** cache = nullptr;
+
+ GeckoProcessType process = GetProcessFromName(existing.histogram_name());
+ switch (process) {
+ case GeckoProcessType_Default:
+ cache = subsession;
+ break;
+ case GeckoProcessType_Content:
+ cache = subsessionContent;
+ break;
+ case GeckoProcessType_GPU:
+ cache = subsessionGPU;
+ break;
+ default:
+ MOZ_ASSERT_UNREACHABLE("unknown process type");
+ return nullptr;
+ }
+
+ if (Histogram* cached = cache[id]) {
+ return cached;
+ }
+
+ NS_NAMED_LITERAL_CSTRING(prefix, SUBSESSION_HISTOGRAM_PREFIX);
+ nsDependentCString existingName(gHistograms[id].id());
+ if (StringBeginsWith(existingName, prefix)) {
+ return nullptr;
+ }
+
+ nsCString subsessionName(prefix);
+ subsessionName.Append(existing.histogram_name().c_str());
+
+ Histogram* clone = internal_CloneHistogram(subsessionName, id, existing);
+ cache[id] = clone;
+ return clone;
+}
+#endif
+
+nsresult
+internal_HistogramAdd(Histogram& histogram, int32_t value, uint32_t dataset)
+{
+ // Check if we are allowed to record the data.
+ bool canRecordDataset = CanRecordDataset(dataset,
+ internal_CanRecordBase(),
+ internal_CanRecordExtended());
+ if (!canRecordDataset || !histogram.IsRecordingEnabled()) {
+ return NS_OK;
+ }
+
+#if !defined(MOZ_WIDGET_GONK) && !defined(MOZ_WIDGET_ANDROID)
+ if (Histogram* subsession = internal_GetSubsessionHistogram(histogram)) {
+ subsession->Add(value);
+ }
+#endif
+
+ // It is safe to add to the histogram now: the subsession histogram was already
+ // cloned from this so we won't add the sample twice.
+ histogram.Add(value);
+
+ return NS_OK;
+}
+
+nsresult
+internal_HistogramAdd(Histogram& histogram, int32_t value)
+{
+ uint32_t dataset = nsITelemetry::DATASET_RELEASE_CHANNEL_OPTIN;
+ // We only really care about the dataset of the histogram if we are not recording
+ // extended telemetry. Otherwise, we always record histogram data.
+ if (!internal_CanRecordExtended()) {
+ mozilla::Telemetry::ID id;
+ nsresult rv
+ = internal_GetHistogramEnumId(histogram.histogram_name().c_str(), &id);
+ if (NS_FAILED(rv)) {
+ // If we can't look up the dataset, it might be because the histogram was added
+ // at runtime. Since we're not recording extended telemetry, bail out.
+ return NS_OK;
+ }
+ dataset = gHistograms[id].dataset;
+ }
+
+ return internal_HistogramAdd(histogram, value, dataset);
+}
+
+void
+internal_HistogramClear(Histogram& aHistogram, bool onlySubsession)
+{
+ MOZ_ASSERT(XRE_IsParentProcess());
+ if (!XRE_IsParentProcess()) {
+ return;
+ }
+ if (!onlySubsession) {
+ aHistogram.Clear();
+ }
+
+#if !defined(MOZ_WIDGET_GONK) && !defined(MOZ_WIDGET_ANDROID)
+ if (Histogram* subsession = internal_GetSubsessionHistogram(aHistogram)) {
+ subsession->Clear();
+ }
+#endif
+}
+
+} // namespace
+
+
+////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////
+//
+// PRIVATE: Histogram corruption helpers
+
+namespace {
+
+void internal_Accumulate(mozilla::Telemetry::ID aHistogram, uint32_t aSample);
+
+void
+internal_IdentifyCorruptHistograms(StatisticsRecorder::Histograms &hs)
+{
+ for (HistogramIterator it = hs.begin(); it != hs.end(); ++it) {
+ Histogram *h = *it;
+
+ mozilla::Telemetry::ID id;
+ nsresult rv = internal_GetHistogramEnumId(h->histogram_name().c_str(), &id);
+ // This histogram isn't a static histogram, just ignore it.
+ if (NS_FAILED(rv)) {
+ continue;
+ }
+
+ if (gCorruptHistograms[id]) {
+ continue;
+ }
+
+ Histogram::SampleSet ss;
+ h->SnapshotSample(&ss);
+
+ Histogram::Inconsistencies check = h->FindCorruption(ss);
+ bool corrupt = (check != Histogram::NO_INCONSISTENCIES);
+
+ if (corrupt) {
+ mozilla::Telemetry::ID corruptID = mozilla::Telemetry::HistogramCount;
+ if (check & Histogram::RANGE_CHECKSUM_ERROR) {
+ corruptID = mozilla::Telemetry::RANGE_CHECKSUM_ERRORS;
+ } else if (check & Histogram::BUCKET_ORDER_ERROR) {
+ corruptID = mozilla::Telemetry::BUCKET_ORDER_ERRORS;
+ } else if (check & Histogram::COUNT_HIGH_ERROR) {
+ corruptID = mozilla::Telemetry::TOTAL_COUNT_HIGH_ERRORS;
+ } else if (check & Histogram::COUNT_LOW_ERROR) {
+ corruptID = mozilla::Telemetry::TOTAL_COUNT_LOW_ERRORS;
+ }
+ internal_Accumulate(corruptID, 1);
+ }
+
+ gCorruptHistograms[id] = corrupt;
+ }
+}
+
+} // namespace
+
+
+////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////
+//
+// PRIVATE: Histogram reflection helpers
+
+namespace {
+
+bool
+internal_FillRanges(JSContext *cx, JS::Handle<JSObject*> array, Histogram *h)
+{
+ JS::Rooted<JS::Value> range(cx);
+ for (size_t i = 0; i < h->bucket_count(); i++) {
+ range.setInt32(h->ranges(i));
+ if (!JS_DefineElement(cx, array, i, range, JSPROP_ENUMERATE))
+ return false;
+ }
+ return true;
+}
+
+enum reflectStatus
+internal_ReflectHistogramAndSamples(JSContext *cx,
+ JS::Handle<JSObject*> obj, Histogram *h,
+ const Histogram::SampleSet &ss)
+{
+ // We don't want to reflect corrupt histograms.
+ if (h->FindCorruption(ss) != Histogram::NO_INCONSISTENCIES) {
+ return REFLECT_CORRUPT;
+ }
+
+ if (!(JS_DefineProperty(cx, obj, "min",
+ h->declared_min(), JSPROP_ENUMERATE)
+ && JS_DefineProperty(cx, obj, "max",
+ h->declared_max(), JSPROP_ENUMERATE)
+ && JS_DefineProperty(cx, obj, "histogram_type",
+ h->histogram_type(), JSPROP_ENUMERATE)
+ && JS_DefineProperty(cx, obj, "sum",
+ double(ss.sum()), JSPROP_ENUMERATE))) {
+ return REFLECT_FAILURE;
+ }
+
+ const size_t count = h->bucket_count();
+ JS::Rooted<JSObject*> rarray(cx, JS_NewArrayObject(cx, count));
+ if (!rarray) {
+ return REFLECT_FAILURE;
+ }
+ if (!(internal_FillRanges(cx, rarray, h)
+ && JS_DefineProperty(cx, obj, "ranges", rarray, JSPROP_ENUMERATE))) {
+ return REFLECT_FAILURE;
+ }
+
+ JS::Rooted<JSObject*> counts_array(cx, JS_NewArrayObject(cx, count));
+ if (!counts_array) {
+ return REFLECT_FAILURE;
+ }
+ if (!JS_DefineProperty(cx, obj, "counts", counts_array, JSPROP_ENUMERATE)) {
+ return REFLECT_FAILURE;
+ }
+ for (size_t i = 0; i < count; i++) {
+ if (!JS_DefineElement(cx, counts_array, i,
+ ss.counts(i), JSPROP_ENUMERATE)) {
+ return REFLECT_FAILURE;
+ }
+ }
+
+ return REFLECT_OK;
+}
+
+enum reflectStatus
+internal_ReflectHistogramSnapshot(JSContext *cx,
+ JS::Handle<JSObject*> obj, Histogram *h)
+{
+ Histogram::SampleSet ss;
+ h->SnapshotSample(&ss);
+ return internal_ReflectHistogramAndSamples(cx, obj, h, ss);
+}
+
+bool
+internal_ShouldReflectHistogram(Histogram *h)
+{
+ const char *name = h->histogram_name().c_str();
+ mozilla::Telemetry::ID id;
+ nsresult rv = internal_GetHistogramEnumId(name, &id);
+ if (NS_FAILED(rv)) {
+ // GetHistogramEnumId generally should not fail. But a lookup
+ // failure shouldn't prevent us from reflecting histograms into JS.
+ //
+ // However, these two histograms are created by Histogram itself for
+ // tracking corruption. We have our own histograms for that, so
+ // ignore these two.
+ if (strcmp(name, "Histogram.InconsistentCountHigh") == 0
+ || strcmp(name, "Histogram.InconsistentCountLow") == 0) {
+ return false;
+ }
+ return true;
+ } else {
+ return !gCorruptHistograms[id];
+ }
+}
+
+} // namespace
+
+
+////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////
+//
+// PRIVATE: class KeyedHistogram
+
+namespace {
+
+class KeyedHistogram {
+public:
+ KeyedHistogram(const nsACString &name, const nsACString &expiration,
+ uint32_t histogramType, uint32_t min, uint32_t max,
+ uint32_t bucketCount, uint32_t dataset);
+ nsresult GetHistogram(const nsCString& name, Histogram** histogram, bool subsession);
+ Histogram* GetHistogram(const nsCString& name, bool subsession);
+ uint32_t GetHistogramType() const { return mHistogramType; }
+ nsresult GetDataset(uint32_t* dataset) const;
+ nsresult GetJSKeys(JSContext* cx, JS::CallArgs& args);
+ nsresult GetJSSnapshot(JSContext* cx, JS::Handle<JSObject*> obj,
+ bool subsession, bool clearSubsession);
+
+ void SetRecordingEnabled(bool aEnabled) { mRecordingEnabled = aEnabled; };
+ bool IsRecordingEnabled() const { return mRecordingEnabled; };
+
+ nsresult Add(const nsCString& key, uint32_t aSample);
+ void Clear(bool subsession);
+
+ nsresult GetEnumId(mozilla::Telemetry::ID& id);
+
+private:
+ typedef nsBaseHashtableET<nsCStringHashKey, Histogram*> KeyedHistogramEntry;
+ typedef AutoHashtable<KeyedHistogramEntry> KeyedHistogramMapType;
+ KeyedHistogramMapType mHistogramMap;
+#if !defined(MOZ_WIDGET_GONK) && !defined(MOZ_WIDGET_ANDROID)
+ KeyedHistogramMapType mSubsessionMap;
+#endif
+
+ static bool ReflectKeyedHistogram(KeyedHistogramEntry* entry,
+ JSContext* cx,
+ JS::Handle<JSObject*> obj);
+
+ const nsCString mName;
+ const nsCString mExpiration;
+ const uint32_t mHistogramType;
+ const uint32_t mMin;
+ const uint32_t mMax;
+ const uint32_t mBucketCount;
+ const uint32_t mDataset;
+ mozilla::Atomic<bool, mozilla::Relaxed> mRecordingEnabled;
+};
+
+KeyedHistogram::KeyedHistogram(const nsACString &name,
+ const nsACString &expiration,
+ uint32_t histogramType,
+ uint32_t min, uint32_t max,
+ uint32_t bucketCount, uint32_t dataset)
+ : mHistogramMap()
+#if !defined(MOZ_WIDGET_GONK) && !defined(MOZ_WIDGET_ANDROID)
+ , mSubsessionMap()
+#endif
+ , mName(name)
+ , mExpiration(expiration)
+ , mHistogramType(histogramType)
+ , mMin(min)
+ , mMax(max)
+ , mBucketCount(bucketCount)
+ , mDataset(dataset)
+ , mRecordingEnabled(true)
+{
+}
+
+nsresult
+KeyedHistogram::GetHistogram(const nsCString& key, Histogram** histogram,
+ bool subsession)
+{
+#if !defined(MOZ_WIDGET_GONK) && !defined(MOZ_WIDGET_ANDROID)
+ KeyedHistogramMapType& map = subsession ? mSubsessionMap : mHistogramMap;
+#else
+ KeyedHistogramMapType& map = mHistogramMap;
+#endif
+ KeyedHistogramEntry* entry = map.GetEntry(key);
+ if (entry) {
+ *histogram = entry->mData;
+ return NS_OK;
+ }
+
+ nsCString histogramName;
+#if !defined(MOZ_WIDGET_GONK) && !defined(MOZ_WIDGET_ANDROID)
+ if (subsession) {
+ histogramName.AppendLiteral(SUBSESSION_HISTOGRAM_PREFIX);
+ }
+#endif
+ histogramName.Append(mName);
+ histogramName.AppendLiteral(KEYED_HISTOGRAM_NAME_SEPARATOR);
+ histogramName.Append(key);
+
+ Histogram* h;
+ nsresult rv = internal_HistogramGet(histogramName.get(), mExpiration.get(),
+ mHistogramType, mMin, mMax, mBucketCount,
+ true, &h);
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+
+ h->ClearFlags(Histogram::kUmaTargetedHistogramFlag);
+ *histogram = h;
+
+ entry = map.PutEntry(key);
+ if (MOZ_UNLIKELY(!entry)) {
+ return NS_ERROR_OUT_OF_MEMORY;
+ }
+
+ entry->mData = h;
+ return NS_OK;
+}
+
+Histogram*
+KeyedHistogram::GetHistogram(const nsCString& key, bool subsession)
+{
+ Histogram* h = nullptr;
+ if (NS_FAILED(GetHistogram(key, &h, subsession))) {
+ return nullptr;
+ }
+ return h;
+}
+
+nsresult
+KeyedHistogram::GetDataset(uint32_t* dataset) const
+{
+ MOZ_ASSERT(dataset);
+ *dataset = mDataset;
+ return NS_OK;
+}
+
+nsresult
+KeyedHistogram::Add(const nsCString& key, uint32_t sample)
+{
+ bool canRecordDataset = CanRecordDataset(mDataset,
+ internal_CanRecordBase(),
+ internal_CanRecordExtended());
+ if (!canRecordDataset) {
+ return NS_OK;
+ }
+
+ Histogram* histogram = GetHistogram(key, false);
+ MOZ_ASSERT(histogram);
+ if (!histogram) {
+ return NS_ERROR_FAILURE;
+ }
+#if !defined(MOZ_WIDGET_GONK) && !defined(MOZ_WIDGET_ANDROID)
+ Histogram* subsession = GetHistogram(key, true);
+ MOZ_ASSERT(subsession);
+ if (!subsession) {
+ return NS_ERROR_FAILURE;
+ }
+#endif
+
+ if (!IsRecordingEnabled()) {
+ return NS_OK;
+ }
+
+ histogram->Add(sample);
+#if !defined(MOZ_WIDGET_GONK) && !defined(MOZ_WIDGET_ANDROID)
+ subsession->Add(sample);
+#endif
+ return NS_OK;
+}
+
+void
+KeyedHistogram::Clear(bool onlySubsession)
+{
+ MOZ_ASSERT(XRE_IsParentProcess());
+ if (!XRE_IsParentProcess()) {
+ return;
+ }
+#if !defined(MOZ_WIDGET_GONK) && !defined(MOZ_WIDGET_ANDROID)
+ for (auto iter = mSubsessionMap.Iter(); !iter.Done(); iter.Next()) {
+ iter.Get()->mData->Clear();
+ }
+ mSubsessionMap.Clear();
+ if (onlySubsession) {
+ return;
+ }
+#endif
+
+ for (auto iter = mHistogramMap.Iter(); !iter.Done(); iter.Next()) {
+ iter.Get()->mData->Clear();
+ }
+ mHistogramMap.Clear();
+}
+
+nsresult
+KeyedHistogram::GetJSKeys(JSContext* cx, JS::CallArgs& args)
+{
+ JS::AutoValueVector keys(cx);
+ if (!keys.reserve(mHistogramMap.Count())) {
+ return NS_ERROR_OUT_OF_MEMORY;
+ }
+
+ for (auto iter = mHistogramMap.Iter(); !iter.Done(); iter.Next()) {
+ JS::RootedValue jsKey(cx);
+ const NS_ConvertUTF8toUTF16 key(iter.Get()->GetKey());
+ jsKey.setString(JS_NewUCStringCopyN(cx, key.Data(), key.Length()));
+ if (!keys.append(jsKey)) {
+ return NS_ERROR_OUT_OF_MEMORY;
+ }
+ }
+
+ JS::RootedObject jsKeys(cx, JS_NewArrayObject(cx, keys));
+ if (!jsKeys) {
+ return NS_ERROR_FAILURE;
+ }
+
+ args.rval().setObject(*jsKeys);
+ return NS_OK;
+}
+
+bool
+KeyedHistogram::ReflectKeyedHistogram(KeyedHistogramEntry* entry,
+ JSContext* cx, JS::Handle<JSObject*> obj)
+{
+ JS::RootedObject histogramSnapshot(cx, JS_NewPlainObject(cx));
+ if (!histogramSnapshot) {
+ return false;
+ }
+
+ if (internal_ReflectHistogramSnapshot(cx, histogramSnapshot,
+ entry->mData) != REFLECT_OK) {
+ return false;
+ }
+
+ const NS_ConvertUTF8toUTF16 key(entry->GetKey());
+ if (!JS_DefineUCProperty(cx, obj, key.Data(), key.Length(),
+ histogramSnapshot, JSPROP_ENUMERATE)) {
+ return false;
+ }
+
+ return true;
+}
+
+nsresult
+KeyedHistogram::GetJSSnapshot(JSContext* cx, JS::Handle<JSObject*> obj,
+ bool subsession, bool clearSubsession)
+{
+#if !defined(MOZ_WIDGET_GONK) && !defined(MOZ_WIDGET_ANDROID)
+ KeyedHistogramMapType& map = subsession ? mSubsessionMap : mHistogramMap;
+#else
+ KeyedHistogramMapType& map = mHistogramMap;
+#endif
+ if (!map.ReflectIntoJS(&KeyedHistogram::ReflectKeyedHistogram, cx, obj)) {
+ return NS_ERROR_FAILURE;
+ }
+
+#if !defined(MOZ_WIDGET_GONK) && !defined(MOZ_WIDGET_ANDROID)
+ if (subsession && clearSubsession) {
+ Clear(true);
+ }
+#endif
+
+ return NS_OK;
+}
+
+nsresult
+KeyedHistogram::GetEnumId(mozilla::Telemetry::ID& id)
+{
+ return internal_GetHistogramEnumId(mName.get(), &id);
+}
+
+} // namespace
+
+
+////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////
+//
+// PRIVATE: KeyedHistogram helpers
+
+namespace {
+
+KeyedHistogram*
+internal_GetKeyedHistogramById(const nsACString &name)
+{
+ if (!gInitDone) {
+ return nullptr;
+ }
+
+ KeyedHistogram* keyed = nullptr;
+ gKeyedHistograms.Get(name, &keyed);
+ return keyed;
+}
+
+} // namespace
+
+
+////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////
+//
+// PRIVATE: functions related to addon histograms
+
+namespace {
+
+// Compute the name to pass into Histogram for the addon histogram
+// 'name' from the addon 'id'. We can't use 'name' directly because it
+// might conflict with other histograms in other addons or even with our
+// own.
+void
+internal_AddonHistogramName(const nsACString &id, const nsACString &name,
+ nsACString &ret)
+{
+ ret.Append(id);
+ ret.Append(':');
+ ret.Append(name);
+}
+
+bool
+internal_CreateHistogramForAddon(const nsACString &name,
+ AddonHistogramInfo &info)
+{
+ Histogram *h;
+ nsresult rv = internal_HistogramGet(PromiseFlatCString(name).get(), "never",
+ info.histogramType, info.min, info.max,
+ info.bucketCount, true, &h);
+ if (NS_FAILED(rv)) {
+ return false;
+ }
+ // Don't let this histogram be reported via the normal means
+ // (e.g. Telemetry.registeredHistograms); we'll make it available in
+ // other ways.
+ h->ClearFlags(Histogram::kUmaTargetedHistogramFlag);
+ info.h = h;
+ return true;
+}
+
+bool
+internal_AddonHistogramReflector(AddonHistogramEntryType *entry,
+ JSContext *cx, JS::Handle<JSObject*> obj)
+{
+ AddonHistogramInfo &info = entry->mData;
+
+ // Never even accessed the histogram.
+ if (!info.h) {
+ // Have to force creation of HISTOGRAM_FLAG histograms.
+ if (info.histogramType != nsITelemetry::HISTOGRAM_FLAG)
+ return true;
+
+ if (!internal_CreateHistogramForAddon(entry->GetKey(), info)) {
+ return false;
+ }
+ }
+
+ if (internal_IsEmpty(info.h)) {
+ return true;
+ }
+
+ JS::Rooted<JSObject*> snapshot(cx, JS_NewPlainObject(cx));
+ if (!snapshot) {
+ // Just consider this to be skippable.
+ return true;
+ }
+ switch (internal_ReflectHistogramSnapshot(cx, snapshot, info.h)) {
+ case REFLECT_FAILURE:
+ case REFLECT_CORRUPT:
+ return false;
+ case REFLECT_OK:
+ const nsACString &histogramName = entry->GetKey();
+ if (!JS_DefineProperty(cx, obj, PromiseFlatCString(histogramName).get(),
+ snapshot, JSPROP_ENUMERATE)) {
+ return false;
+ }
+ break;
+ }
+ return true;
+}
+
+bool
+internal_AddonReflector(AddonEntryType *entry, JSContext *cx,
+ JS::Handle<JSObject*> obj)
+{
+ const nsACString &addonId = entry->GetKey();
+ JS::Rooted<JSObject*> subobj(cx, JS_NewPlainObject(cx));
+ if (!subobj) {
+ return false;
+ }
+
+ AddonHistogramMapType *map = entry->mData;
+ if (!(map->ReflectIntoJS(internal_AddonHistogramReflector, cx, subobj)
+ && JS_DefineProperty(cx, obj, PromiseFlatCString(addonId).get(),
+ subobj, JSPROP_ENUMERATE))) {
+ return false;
+ }
+ return true;
+}
+
+} // namespace
+
+
+////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////
+//
+// PRIVATE: thread-unsafe helpers for the external interface
+
+// This is a StaticMutex rather than a plain Mutex (1) so that
+// it gets initialised in a thread-safe manner the first time
+// it is used, and (2) because it is never de-initialised, and
+// a normal Mutex would show up as a leak in BloatView. StaticMutex
+// also has the "OffTheBooks" property, so it won't show as a leak
+// in BloatView.
+static StaticMutex gTelemetryHistogramMutex;
+
+namespace {
+
+void
+internal_SetHistogramRecordingEnabled(mozilla::Telemetry::ID aID, bool aEnabled)
+{
+ if (gHistograms[aID].keyed) {
+ const nsDependentCString id(gHistograms[aID].id());
+ KeyedHistogram* keyed = internal_GetKeyedHistogramById(id);
+ if (keyed) {
+ keyed->SetRecordingEnabled(aEnabled);
+ return;
+ }
+ } else {
+ Histogram *h;
+ nsresult rv = internal_GetHistogramByEnumId(aID, &h, GeckoProcessType_Default);
+ if (NS_SUCCEEDED(rv)) {
+ h->SetRecordingEnabled(aEnabled);
+ return;
+ }
+ }
+
+ MOZ_ASSERT(false, "Telemetry::SetHistogramRecordingEnabled(...) id not found");
+}
+
+void internal_armIPCTimerMainThread()
+{
+ MOZ_ASSERT(NS_IsMainThread());
+ gIPCTimerArming = false;
+ if (gIPCTimerArmed) {
+ return;
+ }
+ if (!gIPCTimer) {
+ CallCreateInstance(NS_TIMER_CONTRACTID, &gIPCTimer);
+ }
+ if (gIPCTimer) {
+ gIPCTimer->InitWithFuncCallback(TelemetryHistogram::IPCTimerFired,
+ nullptr, kBatchTimeoutMs,
+ nsITimer::TYPE_ONE_SHOT);
+ gIPCTimerArmed = true;
+ }
+}
+
+void internal_armIPCTimer()
+{
+ if (gIPCTimerArmed || gIPCTimerArming) {
+ return;
+ }
+ gIPCTimerArming = true;
+ if (NS_IsMainThread()) {
+ internal_armIPCTimerMainThread();
+ } else {
+ internal_DispatchToMainThread(NS_NewRunnableFunction([]() -> void {
+ StaticMutexAutoLock locker(gTelemetryHistogramMutex);
+ internal_armIPCTimerMainThread();
+ }));
+ }
+}
+
+bool
+internal_RemoteAccumulate(mozilla::Telemetry::ID aId, uint32_t aSample)
+{
+ if (XRE_IsParentProcess()) {
+ return false;
+ }
+ Histogram *h;
+ nsresult rv = internal_GetHistogramByEnumId(aId, &h, GeckoProcessType_Default);
+ if (NS_SUCCEEDED(rv) && !h->IsRecordingEnabled()) {
+ return true;
+ }
+ if (!gAccumulations) {
+ gAccumulations = new nsTArray<Accumulation>();
+ }
+ if (gAccumulations->Length() == kAccumulationsArrayHighWaterMark) {
+ internal_DispatchToMainThread(NS_NewRunnableFunction([]() -> void {
+ TelemetryHistogram::IPCTimerFired(nullptr, nullptr);
+ }));
+ }
+ gAccumulations->AppendElement(Accumulation{aId, aSample});
+ internal_armIPCTimer();
+ return true;
+}
+
+bool
+internal_RemoteAccumulate(mozilla::Telemetry::ID aId,
+ const nsCString& aKey, uint32_t aSample)
+{
+ if (XRE_IsParentProcess()) {
+ return false;
+ }
+ const HistogramInfo& th = gHistograms[aId];
+ KeyedHistogram* keyed
+ = internal_GetKeyedHistogramById(nsDependentCString(th.id()));
+ MOZ_ASSERT(keyed);
+ if (!keyed->IsRecordingEnabled()) {
+ return false;
+ }
+ if (!gKeyedAccumulations) {
+ gKeyedAccumulations = new nsTArray<KeyedAccumulation>();
+ }
+ if (gKeyedAccumulations->Length() == kAccumulationsArrayHighWaterMark) {
+ internal_DispatchToMainThread(NS_NewRunnableFunction([]() -> void {
+ TelemetryHistogram::IPCTimerFired(nullptr, nullptr);
+ }));
+ }
+ gKeyedAccumulations->AppendElement(KeyedAccumulation{aId, aSample, aKey});
+ internal_armIPCTimer();
+ return true;
+}
+
+void internal_Accumulate(mozilla::Telemetry::ID aHistogram, uint32_t aSample)
+{
+ if (!internal_CanRecordBase() ||
+ internal_RemoteAccumulate(aHistogram, aSample)) {
+ return;
+ }
+ Histogram *h;
+ nsresult rv = internal_GetHistogramByEnumId(aHistogram, &h, GeckoProcessType_Default);
+ if (NS_SUCCEEDED(rv)) {
+ internal_HistogramAdd(*h, aSample, gHistograms[aHistogram].dataset);
+ }
+}
+
+void
+internal_Accumulate(mozilla::Telemetry::ID aID,
+ const nsCString& aKey, uint32_t aSample)
+{
+ if (!gInitDone || !internal_CanRecordBase() ||
+ internal_RemoteAccumulate(aID, aKey, aSample)) {
+ return;
+ }
+ const HistogramInfo& th = gHistograms[aID];
+ KeyedHistogram* keyed
+ = internal_GetKeyedHistogramById(nsDependentCString(th.id()));
+ MOZ_ASSERT(keyed);
+ keyed->Add(aKey, aSample);
+}
+
+void
+internal_Accumulate(Histogram& aHistogram, uint32_t aSample)
+{
+ if (XRE_IsParentProcess()) {
+ internal_HistogramAdd(aHistogram, aSample);
+ return;
+ }
+
+ mozilla::Telemetry::ID id;
+ nsresult rv = internal_GetHistogramEnumId(aHistogram.histogram_name().c_str(), &id);
+ if (NS_SUCCEEDED(rv)) {
+ internal_RemoteAccumulate(id, aSample);
+ }
+}
+
+void
+internal_Accumulate(KeyedHistogram& aKeyed,
+ const nsCString& aKey, uint32_t aSample)
+{
+ if (XRE_IsParentProcess()) {
+ aKeyed.Add(aKey, aSample);
+ return;
+ }
+
+ mozilla::Telemetry::ID id;
+ if (NS_SUCCEEDED(aKeyed.GetEnumId(id))) {
+ internal_RemoteAccumulate(id, aKey, aSample);
+ }
+}
+
+void
+internal_AccumulateChild(GeckoProcessType aProcessType, mozilla::Telemetry::ID aId, uint32_t aSample)
+{
+ if (!internal_CanRecordBase()) {
+ return;
+ }
+ Histogram* h;
+ nsresult rv = internal_GetHistogramByEnumId(aId, &h, aProcessType);
+ if (NS_SUCCEEDED(rv)) {
+ internal_HistogramAdd(*h, aSample, gHistograms[aId].dataset);
+ } else {
+ NS_WARNING("NS_FAILED GetHistogramByEnumId for CHILD");
+ }
+}
+
+void
+internal_AccumulateChildKeyed(GeckoProcessType aProcessType, mozilla::Telemetry::ID aId,
+ const nsCString& aKey, uint32_t aSample)
+{
+ if (!gInitDone || !internal_CanRecordBase()) {
+ return;
+ }
+
+ const char* suffix = SuffixForProcessType(aProcessType);
+ if (!suffix) {
+ MOZ_ASSERT_UNREACHABLE("suffix should not be null");
+ return;
+ }
+
+ const HistogramInfo& th = gHistograms[aId];
+
+ nsCString id;
+ id.Append(th.id());
+ id.AppendASCII(suffix);
+
+ KeyedHistogram* keyed = internal_GetKeyedHistogramById(id);
+ MOZ_ASSERT(keyed);
+ keyed->Add(aKey, aSample);
+}
+
+} // namespace
+
+
+////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////
+//
+// PRIVATE: JSHistogram_* functions
+
+// NOTE: the functions in this section:
+//
+// internal_JSHistogram_Add
+// internal_JSHistogram_Snapshot
+// internal_JSHistogram_Clear
+// internal_JSHistogram_Dataset
+// internal_WrapAndReturnHistogram
+//
+// all run without protection from |gTelemetryHistogramMutex|. If they
+// held |gTelemetryHistogramMutex|, there would be the possibility of
+// deadlock because the JS_ calls that they make may call back into the
+// TelemetryHistogram interface, hence trying to re-acquire the mutex.
+//
+// This means that these functions potentially race against threads, but
+// that seems preferable to risking deadlock.
+
+namespace {
+
+bool
+internal_JSHistogram_Add(JSContext *cx, unsigned argc, JS::Value *vp)
+{
+ JSObject *obj = JS_THIS_OBJECT(cx, vp);
+ MOZ_ASSERT(obj);
+ if (!obj) {
+ return false;
+ }
+
+ Histogram *h = static_cast<Histogram*>(JS_GetPrivate(obj));
+ MOZ_ASSERT(h);
+ Histogram::ClassType type = h->histogram_type();
+
+ JS::CallArgs args = CallArgsFromVp(argc, vp);
+
+ if (!internal_CanRecordBase()) {
+ return true;
+ }
+
+ uint32_t value = 0;
+ mozilla::Telemetry::ID id;
+ if ((type == base::CountHistogram::COUNT_HISTOGRAM) && (args.length() == 0)) {
+ // If we don't have an argument for the count histogram, assume an increment of 1.
+ // Otherwise, make sure to run some sanity checks on the argument.
+ value = 1;
+ } else if (type == base::LinearHistogram::LINEAR_HISTOGRAM &&
+ (args.length() > 0) && args[0].isString() &&
+ NS_SUCCEEDED(internal_GetHistogramEnumId(h->histogram_name().c_str(), &id)) &&
+ gHistograms[id].histogramType == nsITelemetry::HISTOGRAM_CATEGORICAL) {
+ // For categorical histograms we allow passing a string argument that specifies the label.
+ nsAutoJSString label;
+ if (!label.init(cx, args[0])) {
+ JS_ReportErrorASCII(cx, "Invalid string parameter");
+ return false;
+ }
+
+ nsresult rv = gHistograms[id].label_id(NS_ConvertUTF16toUTF8(label).get(), &value);
+ if (NS_FAILED(rv)) {
+ JS_ReportErrorASCII(cx, "Unknown label for categorical histogram");
+ return false;
+ }
+ } else {
+ // All other accumulations expect one numerical argument.
+ if (!args.length()) {
+ JS_ReportErrorASCII(cx, "Expected one argument");
+ return false;
+ }
+
+ if (!(args[0].isNumber() || args[0].isBoolean())) {
+ JS_ReportErrorASCII(cx, "Not a number");
+ return false;
+ }
+
+ if (!JS::ToUint32(cx, args[0], &value)) {
+ JS_ReportErrorASCII(cx, "Failed to convert argument");
+ return false;
+ }
+ }
+
+ {
+ StaticMutexAutoLock locker(gTelemetryHistogramMutex);
+ internal_Accumulate(*h, value);
+ }
+ return true;
+}
+
+bool
+internal_JSHistogram_Snapshot(JSContext *cx, unsigned argc, JS::Value *vp)
+{
+ JS::CallArgs args = JS::CallArgsFromVp(argc, vp);
+ JSObject *obj = JS_THIS_OBJECT(cx, vp);
+ if (!obj) {
+ return false;
+ }
+
+ Histogram *h = static_cast<Histogram*>(JS_GetPrivate(obj));
+ JS::Rooted<JSObject*> snapshot(cx, JS_NewPlainObject(cx));
+ if (!snapshot)
+ return false;
+
+ switch (internal_ReflectHistogramSnapshot(cx, snapshot, h)) {
+ case REFLECT_FAILURE:
+ return false;
+ case REFLECT_CORRUPT:
+ JS_ReportErrorASCII(cx, "Histogram is corrupt");
+ return false;
+ case REFLECT_OK:
+ args.rval().setObject(*snapshot);
+ return true;
+ default:
+ MOZ_CRASH("unhandled reflection status");
+ }
+}
+
+bool
+internal_JSHistogram_Clear(JSContext *cx, unsigned argc, JS::Value *vp)
+{
+ JSObject *obj = JS_THIS_OBJECT(cx, vp);
+ if (!obj) {
+ return false;
+ }
+
+ bool onlySubsession = false;
+#if !defined(MOZ_WIDGET_GONK) && !defined(MOZ_WIDGET_ANDROID)
+ JS::CallArgs args = JS::CallArgsFromVp(argc, vp);
+
+ if (args.length() >= 1) {
+ if (!args[0].isBoolean()) {
+ JS_ReportErrorASCII(cx, "Not a boolean");
+ return false;
+ }
+
+ onlySubsession = JS::ToBoolean(args[0]);
+ }
+#endif
+
+ Histogram *h = static_cast<Histogram*>(JS_GetPrivate(obj));
+ MOZ_ASSERT(h);
+ if (h) {
+ internal_HistogramClear(*h, onlySubsession);
+ }
+
+ return true;
+}
+
+bool
+internal_JSHistogram_Dataset(JSContext *cx, unsigned argc, JS::Value *vp)
+{
+ JS::CallArgs args = JS::CallArgsFromVp(argc, vp);
+ JSObject *obj = JS_THIS_OBJECT(cx, vp);
+ if (!obj) {
+ return false;
+ }
+
+ Histogram *h = static_cast<Histogram*>(JS_GetPrivate(obj));
+ mozilla::Telemetry::ID id;
+ nsresult rv = internal_GetHistogramEnumId(h->histogram_name().c_str(), &id);
+ if (NS_SUCCEEDED(rv)) {
+ args.rval().setNumber(gHistograms[id].dataset);
+ return true;
+ }
+
+ return false;
+}
+
+// NOTE: Runs without protection from |gTelemetryHistogramMutex|.
+// See comment at the top of this section.
+nsresult
+internal_WrapAndReturnHistogram(Histogram *h, JSContext *cx,
+ JS::MutableHandle<JS::Value> ret)
+{
+ static const JSClass JSHistogram_class = {
+ "JSHistogram", /* name */
+ JSCLASS_HAS_PRIVATE /* flags */
+ };
+
+ JS::Rooted<JSObject*> obj(cx, JS_NewObject(cx, &JSHistogram_class));
+ if (!obj)
+ return NS_ERROR_FAILURE;
+ // The 4 functions that are wrapped up here are eventually called
+ // by the same thread that runs this function.
+ if (!(JS_DefineFunction(cx, obj, "add", internal_JSHistogram_Add, 1, 0)
+ && JS_DefineFunction(cx, obj, "snapshot",
+ internal_JSHistogram_Snapshot, 0, 0)
+ && JS_DefineFunction(cx, obj, "clear", internal_JSHistogram_Clear, 0, 0)
+ && JS_DefineFunction(cx, obj, "dataset",
+ internal_JSHistogram_Dataset, 0, 0))) {
+ return NS_ERROR_FAILURE;
+ }
+ JS_SetPrivate(obj, h);
+ ret.setObject(*obj);
+ return NS_OK;
+}
+
+} // namespace
+
+
+////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////
+//
+// PRIVATE: JSKeyedHistogram_* functions
+
+// NOTE: the functions in this section:
+//
+// internal_KeyedHistogram_SnapshotImpl
+// internal_JSKeyedHistogram_Add
+// internal_JSKeyedHistogram_Keys
+// internal_JSKeyedHistogram_Snapshot
+// internal_JSKeyedHistogram_SubsessionSnapshot
+// internal_JSKeyedHistogram_SnapshotSubsessionAndClear
+// internal_JSKeyedHistogram_Clear
+// internal_JSKeyedHistogram_Dataset
+// internal_WrapAndReturnKeyedHistogram
+//
+// Same comments as above, at the JSHistogram_* section, regarding
+// deadlock avoidance, apply.
+
+namespace {
+
+bool
+internal_KeyedHistogram_SnapshotImpl(JSContext *cx, unsigned argc,
+ JS::Value *vp,
+ bool subsession, bool clearSubsession)
+{
+ JSObject *obj = JS_THIS_OBJECT(cx, vp);
+ if (!obj) {
+ return false;
+ }
+
+ KeyedHistogram* keyed = static_cast<KeyedHistogram*>(JS_GetPrivate(obj));
+ if (!keyed) {
+ return false;
+ }
+
+ JS::CallArgs args = JS::CallArgsFromVp(argc, vp);
+
+ if (args.length() == 0) {
+ JS::RootedObject snapshot(cx, JS_NewPlainObject(cx));
+ if (!snapshot) {
+ JS_ReportErrorASCII(cx, "Failed to create object");
+ return false;
+ }
+
+ if (!NS_SUCCEEDED(keyed->GetJSSnapshot(cx, snapshot, subsession, clearSubsession))) {
+ JS_ReportErrorASCII(cx, "Failed to reflect keyed histograms");
+ return false;
+ }
+
+ args.rval().setObject(*snapshot);
+ return true;
+ }
+
+ nsAutoJSString key;
+ if (!args[0].isString() || !key.init(cx, args[0])) {
+ JS_ReportErrorASCII(cx, "Not a string");
+ return false;
+ }
+
+ Histogram* h = nullptr;
+ nsresult rv = keyed->GetHistogram(NS_ConvertUTF16toUTF8(key), &h, subsession);
+ if (NS_FAILED(rv)) {
+ JS_ReportErrorASCII(cx, "Failed to get histogram");
+ return false;
+ }
+
+ JS::RootedObject snapshot(cx, JS_NewPlainObject(cx));
+ if (!snapshot) {
+ return false;
+ }
+
+ switch (internal_ReflectHistogramSnapshot(cx, snapshot, h)) {
+ case REFLECT_FAILURE:
+ return false;
+ case REFLECT_CORRUPT:
+ JS_ReportErrorASCII(cx, "Histogram is corrupt");
+ return false;
+ case REFLECT_OK:
+ args.rval().setObject(*snapshot);
+ return true;
+ default:
+ MOZ_CRASH("unhandled reflection status");
+ }
+}
+
+bool
+internal_JSKeyedHistogram_Add(JSContext *cx, unsigned argc, JS::Value *vp)
+{
+ JSObject *obj = JS_THIS_OBJECT(cx, vp);
+ if (!obj) {
+ return false;
+ }
+
+ KeyedHistogram* keyed = static_cast<KeyedHistogram*>(JS_GetPrivate(obj));
+ if (!keyed) {
+ return false;
+ }
+
+ JS::CallArgs args = CallArgsFromVp(argc, vp);
+ if (args.length() < 1) {
+ JS_ReportErrorASCII(cx, "Expected one argument");
+ return false;
+ }
+
+ nsAutoJSString key;
+ if (!args[0].isString() || !key.init(cx, args[0])) {
+ JS_ReportErrorASCII(cx, "Not a string");
+ return false;
+ }
+
+ const uint32_t type = keyed->GetHistogramType();
+
+ // If we don't have an argument for the count histogram, assume an increment of 1.
+ // Otherwise, make sure to run some sanity checks on the argument.
+ int32_t value = 1;
+ if ((type != base::CountHistogram::COUNT_HISTOGRAM) || (args.length() == 2)) {
+ if (args.length() < 2) {
+ JS_ReportErrorASCII(cx, "Expected two arguments for this histogram type");
+ return false;
+ }
+
+ if (!(args[1].isNumber() || args[1].isBoolean())) {
+ JS_ReportErrorASCII(cx, "Not a number");
+ return false;
+ }
+
+ if (!JS::ToInt32(cx, args[1], &value)) {
+ return false;
+ }
+ }
+
+ {
+ StaticMutexAutoLock locker(gTelemetryHistogramMutex);
+ internal_Accumulate(*keyed, NS_ConvertUTF16toUTF8(key), value);
+ }
+ return true;
+}
+
+bool
+internal_JSKeyedHistogram_Keys(JSContext *cx, unsigned argc, JS::Value *vp)
+{
+ JSObject *obj = JS_THIS_OBJECT(cx, vp);
+ if (!obj) {
+ return false;
+ }
+
+ KeyedHistogram* keyed = static_cast<KeyedHistogram*>(JS_GetPrivate(obj));
+ if (!keyed) {
+ return false;
+ }
+
+ JS::CallArgs args = JS::CallArgsFromVp(argc, vp);
+ return NS_SUCCEEDED(keyed->GetJSKeys(cx, args));
+}
+
+bool
+internal_JSKeyedHistogram_Snapshot(JSContext *cx, unsigned argc, JS::Value *vp)
+{
+ return internal_KeyedHistogram_SnapshotImpl(cx, argc, vp, false, false);
+}
+
+#if !defined(MOZ_WIDGET_GONK) && !defined(MOZ_WIDGET_ANDROID)
+bool
+internal_JSKeyedHistogram_SubsessionSnapshot(JSContext *cx,
+ unsigned argc, JS::Value *vp)
+{
+ return internal_KeyedHistogram_SnapshotImpl(cx, argc, vp, true, false);
+}
+#endif
+
+#if !defined(MOZ_WIDGET_GONK) && !defined(MOZ_WIDGET_ANDROID)
+bool
+internal_JSKeyedHistogram_SnapshotSubsessionAndClear(JSContext *cx,
+ unsigned argc,
+ JS::Value *vp)
+{
+ JS::CallArgs args = JS::CallArgsFromVp(argc, vp);
+ if (args.length() != 0) {
+ JS_ReportErrorASCII(cx, "No key arguments supported for snapshotSubsessionAndClear");
+ }
+
+ return internal_KeyedHistogram_SnapshotImpl(cx, argc, vp, true, true);
+}
+#endif
+
+bool
+internal_JSKeyedHistogram_Clear(JSContext *cx, unsigned argc, JS::Value *vp)
+{
+ JSObject *obj = JS_THIS_OBJECT(cx, vp);
+ if (!obj) {
+ return false;
+ }
+
+ KeyedHistogram* keyed = static_cast<KeyedHistogram*>(JS_GetPrivate(obj));
+ if (!keyed) {
+ return false;
+ }
+
+#if !defined(MOZ_WIDGET_GONK) && !defined(MOZ_WIDGET_ANDROID)
+ bool onlySubsession = false;
+ JS::CallArgs args = JS::CallArgsFromVp(argc, vp);
+
+ if (args.length() >= 1) {
+ if (!(args[0].isNumber() || args[0].isBoolean())) {
+ JS_ReportErrorASCII(cx, "Not a boolean");
+ return false;
+ }
+
+ onlySubsession = JS::ToBoolean(args[0]);
+ }
+
+ keyed->Clear(onlySubsession);
+#else
+ keyed->Clear(false);
+#endif
+ return true;
+}
+
+bool
+internal_JSKeyedHistogram_Dataset(JSContext *cx, unsigned argc, JS::Value *vp)
+{
+ JS::CallArgs args = JS::CallArgsFromVp(argc, vp);
+ JSObject *obj = JS_THIS_OBJECT(cx, vp);
+ if (!obj) {
+ return false;
+ }
+
+ KeyedHistogram* keyed = static_cast<KeyedHistogram*>(JS_GetPrivate(obj));
+ if (!keyed) {
+ return false;
+ }
+
+ uint32_t dataset = nsITelemetry::DATASET_RELEASE_CHANNEL_OPTIN;
+ nsresult rv = keyed->GetDataset(&dataset);;
+ if (NS_FAILED(rv)) {
+ return false;
+ }
+
+ args.rval().setNumber(dataset);
+ return true;
+}
+
+// NOTE: Runs without protection from |gTelemetryHistogramMutex|.
+// See comment at the top of this section.
+nsresult
+internal_WrapAndReturnKeyedHistogram(KeyedHistogram *h, JSContext *cx,
+ JS::MutableHandle<JS::Value> ret)
+{
+ static const JSClass JSHistogram_class = {
+ "JSKeyedHistogram", /* name */
+ JSCLASS_HAS_PRIVATE /* flags */
+ };
+
+ JS::Rooted<JSObject*> obj(cx, JS_NewObject(cx, &JSHistogram_class));
+ if (!obj)
+ return NS_ERROR_FAILURE;
+ // The 7 functions that are wrapped up here are eventually called
+ // by the same thread that runs this function.
+ if (!(JS_DefineFunction(cx, obj, "add", internal_JSKeyedHistogram_Add, 2, 0)
+ && JS_DefineFunction(cx, obj, "snapshot",
+ internal_JSKeyedHistogram_Snapshot, 1, 0)
+#if !defined(MOZ_WIDGET_GONK) && !defined(MOZ_WIDGET_ANDROID)
+ && JS_DefineFunction(cx, obj, "subsessionSnapshot",
+ internal_JSKeyedHistogram_SubsessionSnapshot, 1, 0)
+ && JS_DefineFunction(cx, obj, "snapshotSubsessionAndClear",
+ internal_JSKeyedHistogram_SnapshotSubsessionAndClear, 0, 0)
+#endif
+ && JS_DefineFunction(cx, obj, "keys",
+ internal_JSKeyedHistogram_Keys, 0, 0)
+ && JS_DefineFunction(cx, obj, "clear",
+ internal_JSKeyedHistogram_Clear, 0, 0)
+ && JS_DefineFunction(cx, obj, "dataset",
+ internal_JSKeyedHistogram_Dataset, 0, 0))) {
+ return NS_ERROR_FAILURE;
+ }
+
+ JS_SetPrivate(obj, h);
+ ret.setObject(*obj);
+ return NS_OK;
+}
+
+} // namespace
+
+
+////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////
+//
+// EXTERNALLY VISIBLE FUNCTIONS in namespace TelemetryHistogram::
+
+// All of these functions are actually in namespace TelemetryHistogram::,
+// but the ::TelemetryHistogram prefix is given explicitly. This is
+// because it is critical to see which calls from these functions are
+// to another function in this interface. Mis-identifying "inwards
+// calls" from "calls to another function in this interface" will lead
+// to deadlocking and/or races. See comments at the top of the file
+// for further (important!) details.
+
+// Create and destroy the singleton StatisticsRecorder object.
+void TelemetryHistogram::CreateStatisticsRecorder()
+{
+ StaticMutexAutoLock locker(gTelemetryHistogramMutex);
+ MOZ_ASSERT(!gStatisticsRecorder);
+ gStatisticsRecorder = new base::StatisticsRecorder();
+}
+
+void TelemetryHistogram::DestroyStatisticsRecorder()
+{
+ StaticMutexAutoLock locker(gTelemetryHistogramMutex);
+ MOZ_ASSERT(gStatisticsRecorder);
+ if (gStatisticsRecorder) {
+ delete gStatisticsRecorder;
+ gStatisticsRecorder = nullptr;
+ }
+}
+
+void TelemetryHistogram::InitializeGlobalState(bool canRecordBase,
+ bool canRecordExtended)
+{
+ StaticMutexAutoLock locker(gTelemetryHistogramMutex);
+ MOZ_ASSERT(!gInitDone, "TelemetryHistogram::InitializeGlobalState "
+ "may only be called once");
+
+ gCanRecordBase = canRecordBase;
+ gCanRecordExtended = canRecordExtended;
+
+ // gHistogramMap should have been pre-sized correctly at the
+ // declaration point further up in this file.
+
+ // Populate the static histogram name->id cache.
+ // Note that the histogram names are statically allocated.
+ for (uint32_t i = 0; i < mozilla::Telemetry::HistogramCount; i++) {
+ CharPtrEntryType *entry = gHistogramMap.PutEntry(gHistograms[i].id());
+ entry->mData = (mozilla::Telemetry::ID) i;
+ }
+
+#ifdef DEBUG
+ gHistogramMap.MarkImmutable();
+#endif
+
+ mozilla::PodArrayZero(gCorruptHistograms);
+
+ // Create registered keyed histograms
+ for (size_t i = 0; i < mozilla::ArrayLength(gHistograms); ++i) {
+ const HistogramInfo& h = gHistograms[i];
+ if (!h.keyed) {
+ continue;
+ }
+
+ const nsDependentCString id(h.id());
+ const nsDependentCString expiration(h.expiration());
+ gKeyedHistograms.Put(id, new KeyedHistogram(id, expiration, h.histogramType,
+ h.min, h.max, h.bucketCount, h.dataset));
+ if (XRE_IsParentProcess()) {
+ // We must create registered child keyed histograms as well or else the
+ // same code in TelemetrySession.jsm that fails without parent keyed
+ // histograms will fail without child keyed histograms.
+ nsCString contentId(id);
+ contentId.AppendLiteral(CONTENT_HISTOGRAM_SUFFIX);
+ gKeyedHistograms.Put(contentId,
+ new KeyedHistogram(id, expiration, h.histogramType,
+ h.min, h.max, h.bucketCount, h.dataset));
+
+
+ nsCString gpuId(id);
+ gpuId.AppendLiteral(GPU_HISTOGRAM_SUFFIX);
+ gKeyedHistograms.Put(gpuId,
+ new KeyedHistogram(id, expiration, h.histogramType,
+ h.min, h.max, h.bucketCount, h.dataset));
+ }
+ }
+
+ // Some Telemetry histograms depend on the value of C++ constants and hardcode
+ // their values in Histograms.json.
+ // We add static asserts here for those values to match so that future changes
+ // don't go unnoticed.
+ // TODO: Compare explicitly with gHistograms[<histogram id>].bucketCount here
+ // once we can make gHistograms constexpr (requires VS2015).
+ static_assert((JS::gcreason::NUM_TELEMETRY_REASONS == 100),
+ "NUM_TELEMETRY_REASONS is assumed to be a fixed value in Histograms.json."
+ " If this was an intentional change, update this assert with its value "
+ "and update the n_values for the following in Histograms.json: "
+ "GC_MINOR_REASON, GC_MINOR_REASON_LONG, GC_REASON_2");
+ static_assert((mozilla::StartupTimeline::MAX_EVENT_ID == 16),
+ "MAX_EVENT_ID is assumed to be a fixed value in Histograms.json. If this"
+ " was an intentional change, update this assert with its value and update"
+ " the n_values for the following in Histograms.json:"
+ " STARTUP_MEASUREMENT_ERRORS");
+
+ gInitDone = true;
+}
+
+void TelemetryHistogram::DeInitializeGlobalState()
+{
+ StaticMutexAutoLock locker(gTelemetryHistogramMutex);
+ gCanRecordBase = false;
+ gCanRecordExtended = false;
+ gHistogramMap.Clear();
+ gKeyedHistograms.Clear();
+ gAddonMap.Clear();
+ gAccumulations = nullptr;
+ gKeyedAccumulations = nullptr;
+ if (gIPCTimer) {
+ NS_RELEASE(gIPCTimer);
+ }
+ gInitDone = false;
+}
+
+#ifdef DEBUG
+bool TelemetryHistogram::GlobalStateHasBeenInitialized() {
+ StaticMutexAutoLock locker(gTelemetryHistogramMutex);
+ return gInitDone;
+}
+#endif
+
+bool
+TelemetryHistogram::CanRecordBase() {
+ StaticMutexAutoLock locker(gTelemetryHistogramMutex);
+ return internal_CanRecordBase();
+}
+
+void
+TelemetryHistogram::SetCanRecordBase(bool b) {
+ StaticMutexAutoLock locker(gTelemetryHistogramMutex);
+ gCanRecordBase = b;
+}
+
+bool
+TelemetryHistogram::CanRecordExtended() {
+ StaticMutexAutoLock locker(gTelemetryHistogramMutex);
+ return internal_CanRecordExtended();
+}
+
+void
+TelemetryHistogram::SetCanRecordExtended(bool b) {
+ StaticMutexAutoLock locker(gTelemetryHistogramMutex);
+ gCanRecordExtended = b;
+}
+
+
+void
+TelemetryHistogram::InitHistogramRecordingEnabled()
+{
+ StaticMutexAutoLock locker(gTelemetryHistogramMutex);
+ const size_t length = mozilla::ArrayLength(kRecordingInitiallyDisabledIDs);
+ for (size_t i = 0; i < length; i++) {
+ internal_SetHistogramRecordingEnabled(kRecordingInitiallyDisabledIDs[i],
+ false);
+ }
+}
+
+void
+TelemetryHistogram::SetHistogramRecordingEnabled(mozilla::Telemetry::ID aID,
+ bool aEnabled)
+{
+ if (NS_WARN_IF(!internal_IsHistogramEnumId(aID))) {
+ MOZ_ASSERT_UNREACHABLE("Histogram usage requires valid ids.");
+ return;
+ }
+
+ StaticMutexAutoLock locker(gTelemetryHistogramMutex);
+ internal_SetHistogramRecordingEnabled(aID, aEnabled);
+}
+
+
+nsresult
+TelemetryHistogram::SetHistogramRecordingEnabled(const nsACString &id,
+ bool aEnabled)
+{
+ StaticMutexAutoLock locker(gTelemetryHistogramMutex);
+ Histogram *h;
+ nsresult rv = internal_GetHistogramByName(id, &h);
+ if (NS_SUCCEEDED(rv)) {
+ h->SetRecordingEnabled(aEnabled);
+ return NS_OK;
+ }
+
+ KeyedHistogram* keyed = internal_GetKeyedHistogramById(id);
+ if (keyed) {
+ keyed->SetRecordingEnabled(aEnabled);
+ return NS_OK;
+ }
+
+ return NS_ERROR_FAILURE;
+}
+
+
+void
+TelemetryHistogram::Accumulate(mozilla::Telemetry::ID aID,
+ uint32_t aSample)
+{
+ if (NS_WARN_IF(!internal_IsHistogramEnumId(aID))) {
+ MOZ_ASSERT_UNREACHABLE("Histogram usage requires valid ids.");
+ return;
+ }
+
+ StaticMutexAutoLock locker(gTelemetryHistogramMutex);
+ internal_Accumulate(aID, aSample);
+}
+
+void
+TelemetryHistogram::Accumulate(mozilla::Telemetry::ID aID,
+ const nsCString& aKey, uint32_t aSample)
+{
+ if (NS_WARN_IF(!internal_IsHistogramEnumId(aID))) {
+ MOZ_ASSERT_UNREACHABLE("Histogram usage requires valid ids.");
+ return;
+ }
+
+ StaticMutexAutoLock locker(gTelemetryHistogramMutex);
+ internal_Accumulate(aID, aKey, aSample);
+}
+
+void
+TelemetryHistogram::Accumulate(const char* name, uint32_t sample)
+{
+ StaticMutexAutoLock locker(gTelemetryHistogramMutex);
+ if (!internal_CanRecordBase()) {
+ return;
+ }
+ mozilla::Telemetry::ID id;
+ nsresult rv = internal_GetHistogramEnumId(name, &id);
+ if (NS_FAILED(rv)) {
+ return;
+ }
+ internal_Accumulate(id, sample);
+}
+
+void
+TelemetryHistogram::Accumulate(const char* name,
+ const nsCString& key, uint32_t sample)
+{
+ StaticMutexAutoLock locker(gTelemetryHistogramMutex);
+ if (!internal_CanRecordBase()) {
+ return;
+ }
+ mozilla::Telemetry::ID id;
+ nsresult rv = internal_GetHistogramEnumId(name, &id);
+ if (NS_SUCCEEDED(rv)) {
+ internal_Accumulate(id, key, sample);
+ }
+}
+
+void
+TelemetryHistogram::AccumulateCategorical(mozilla::Telemetry::ID aId,
+ const nsCString& label)
+{
+ if (NS_WARN_IF(!internal_IsHistogramEnumId(aId))) {
+ MOZ_ASSERT_UNREACHABLE("Histogram usage requires valid ids.");
+ return;
+ }
+
+ StaticMutexAutoLock locker(gTelemetryHistogramMutex);
+ if (!internal_CanRecordBase()) {
+ return;
+ }
+ uint32_t labelId = 0;
+ if (NS_FAILED(gHistograms[aId].label_id(label.get(), &labelId))) {
+ return;
+ }
+ internal_Accumulate(aId, labelId);
+}
+
+void
+TelemetryHistogram::AccumulateChild(GeckoProcessType aProcessType,
+ const nsTArray<Accumulation>& aAccumulations)
+{
+ MOZ_ASSERT(XRE_IsParentProcess());
+
+ StaticMutexAutoLock locker(gTelemetryHistogramMutex);
+ if (!internal_CanRecordBase()) {
+ return;
+ }
+ for (uint32_t i = 0; i < aAccumulations.Length(); ++i) {
+ if (NS_WARN_IF(!internal_IsHistogramEnumId(aAccumulations[i].mId))) {
+ MOZ_ASSERT_UNREACHABLE("Histogram usage requires valid ids.");
+ continue;
+ }
+ internal_AccumulateChild(aProcessType, aAccumulations[i].mId, aAccumulations[i].mSample);
+ }
+}
+
+void
+TelemetryHistogram::AccumulateChildKeyed(GeckoProcessType aProcessType,
+ const nsTArray<KeyedAccumulation>& aAccumulations)
+{
+ MOZ_ASSERT(XRE_IsParentProcess());
+ StaticMutexAutoLock locker(gTelemetryHistogramMutex);
+ if (!internal_CanRecordBase()) {
+ return;
+ }
+ for (uint32_t i = 0; i < aAccumulations.Length(); ++i) {
+ if (NS_WARN_IF(!internal_IsHistogramEnumId(aAccumulations[i].mId))) {
+ MOZ_ASSERT_UNREACHABLE("Histogram usage requires valid ids.");
+ continue;
+ }
+ internal_AccumulateChildKeyed(aProcessType,
+ aAccumulations[i].mId,
+ aAccumulations[i].mKey,
+ aAccumulations[i].mSample);
+ }
+}
+
+nsresult
+TelemetryHistogram::GetHistogramById(const nsACString &name, JSContext *cx,
+ JS::MutableHandle<JS::Value> ret)
+{
+ Histogram *h = nullptr;
+ {
+ StaticMutexAutoLock locker(gTelemetryHistogramMutex);
+ nsresult rv = internal_GetHistogramByName(name, &h);
+ if (NS_FAILED(rv))
+ return rv;
+ }
+ // Runs without protection from |gTelemetryHistogramMutex|
+ return internal_WrapAndReturnHistogram(h, cx, ret);
+}
+
+nsresult
+TelemetryHistogram::GetKeyedHistogramById(const nsACString &name,
+ JSContext *cx,
+ JS::MutableHandle<JS::Value> ret)
+{
+ KeyedHistogram* keyed = nullptr;
+ {
+ StaticMutexAutoLock locker(gTelemetryHistogramMutex);
+ if (!gKeyedHistograms.Get(name, &keyed)) {
+ return NS_ERROR_FAILURE;
+ }
+ }
+ // Runs without protection from |gTelemetryHistogramMutex|
+ return internal_WrapAndReturnKeyedHistogram(keyed, cx, ret);
+}
+
+const char*
+TelemetryHistogram::GetHistogramName(mozilla::Telemetry::ID id)
+{
+ if (NS_WARN_IF(!internal_IsHistogramEnumId(id))) {
+ MOZ_ASSERT_UNREACHABLE("Histogram usage requires valid ids.");
+ return nullptr;
+ }
+
+ StaticMutexAutoLock locker(gTelemetryHistogramMutex);
+ const HistogramInfo& h = gHistograms[id];
+ return h.id();
+}
+
+nsresult
+TelemetryHistogram::CreateHistogramSnapshots(JSContext *cx,
+ JS::MutableHandle<JS::Value> ret,
+ bool subsession,
+ bool clearSubsession)
+{
+ // Runs without protection from |gTelemetryHistogramMutex|
+ JS::Rooted<JSObject*> root_obj(cx, JS_NewPlainObject(cx));
+ if (!root_obj)
+ return NS_ERROR_FAILURE;
+ ret.setObject(*root_obj);
+
+ // Include the GPU process in histogram snapshots only if we actually tried
+ // to launch a process for it.
+ bool includeGPUProcess = false;
+ if (auto gpm = mozilla::gfx::GPUProcessManager::Get()) {
+ includeGPUProcess = gpm->AttemptedGPUProcess();
+ }
+
+ // Ensure that all the HISTOGRAM_FLAG & HISTOGRAM_COUNT histograms have
+ // been created, so that their values are snapshotted.
+ for (size_t i = 0; i < mozilla::Telemetry::HistogramCount; ++i) {
+ if (gHistograms[i].keyed) {
+ continue;
+ }
+ const uint32_t type = gHistograms[i].histogramType;
+ if (type == nsITelemetry::HISTOGRAM_FLAG ||
+ type == nsITelemetry::HISTOGRAM_COUNT) {
+ Histogram *h;
+ mozilla::DebugOnly<nsresult> rv;
+ mozilla::Telemetry::ID id = mozilla::Telemetry::ID(i);
+
+ rv = internal_GetHistogramByEnumId(id, &h, GeckoProcessType_Default);
+ MOZ_ASSERT(NS_SUCCEEDED(rv));
+
+ rv = internal_GetHistogramByEnumId(id, &h, GeckoProcessType_Content);
+ MOZ_ASSERT(NS_SUCCEEDED(rv));
+
+ if (includeGPUProcess) {
+ rv = internal_GetHistogramByEnumId(id, &h, GeckoProcessType_GPU);
+ MOZ_ASSERT(NS_SUCCEEDED(rv));
+ }
+ }
+ }
+
+ StatisticsRecorder::Histograms hs;
+ StatisticsRecorder::GetHistograms(&hs);
+
+ // We identify corrupt histograms first, rather than interspersing it
+ // in the loop below, to ensure that our corruption statistics don't
+ // depend on histogram enumeration order.
+ //
+ // Of course, we hope that all of these corruption-statistics
+ // histograms are not themselves corrupt...
+ internal_IdentifyCorruptHistograms(hs);
+
+ // OK, now we can actually reflect things.
+ JS::Rooted<JSObject*> hobj(cx);
+ for (HistogramIterator it = hs.begin(); it != hs.end(); ++it) {
+ Histogram *h = *it;
+ if (!internal_ShouldReflectHistogram(h) || internal_IsEmpty(h) ||
+ internal_IsExpired(h)) {
+ continue;
+ }
+
+ Histogram* original = h;
+#if !defined(MOZ_WIDGET_GONK) && !defined(MOZ_WIDGET_ANDROID)
+ if (subsession) {
+ h = internal_GetSubsessionHistogram(*h);
+ if (!h) {
+ continue;
+ }
+ }
+#endif
+
+ hobj = JS_NewPlainObject(cx);
+ if (!hobj) {
+ return NS_ERROR_FAILURE;
+ }
+ switch (internal_ReflectHistogramSnapshot(cx, hobj, h)) {
+ case REFLECT_CORRUPT:
+ // We can still hit this case even if ShouldReflectHistograms
+ // returns true. The histogram lies outside of our control
+ // somehow; just skip it.
+ continue;
+ case REFLECT_FAILURE:
+ return NS_ERROR_FAILURE;
+ case REFLECT_OK:
+ if (!JS_DefineProperty(cx, root_obj, original->histogram_name().c_str(),
+ hobj, JSPROP_ENUMERATE)) {
+ return NS_ERROR_FAILURE;
+ }
+ }
+
+#if !defined(MOZ_WIDGET_GONK) && !defined(MOZ_WIDGET_ANDROID)
+ if (subsession && clearSubsession) {
+ h->Clear();
+ }
+#endif
+ }
+ return NS_OK;
+}
+
+nsresult
+TelemetryHistogram::RegisteredHistograms(uint32_t aDataset, uint32_t *aCount,
+ char*** aHistograms)
+{
+ StaticMutexAutoLock locker(gTelemetryHistogramMutex);
+ return internal_GetRegisteredHistogramIds(false,
+ aDataset, aCount, aHistograms);
+}
+
+nsresult
+TelemetryHistogram::RegisteredKeyedHistograms(uint32_t aDataset,
+ uint32_t *aCount,
+ char*** aHistograms)
+{
+ StaticMutexAutoLock locker(gTelemetryHistogramMutex);
+ return internal_GetRegisteredHistogramIds(true,
+ aDataset, aCount, aHistograms);
+}
+
+nsresult
+TelemetryHistogram::GetKeyedHistogramSnapshots(JSContext *cx,
+ JS::MutableHandle<JS::Value> ret)
+{
+ // Runs without protection from |gTelemetryHistogramMutex|
+ JS::Rooted<JSObject*> obj(cx, JS_NewPlainObject(cx));
+ if (!obj) {
+ return NS_ERROR_FAILURE;
+ }
+
+ for (auto iter = gKeyedHistograms.Iter(); !iter.Done(); iter.Next()) {
+ JS::RootedObject snapshot(cx, JS_NewPlainObject(cx));
+ if (!snapshot) {
+ return NS_ERROR_FAILURE;
+ }
+
+ if (!NS_SUCCEEDED(iter.Data()->GetJSSnapshot(cx, snapshot, false, false))) {
+ return NS_ERROR_FAILURE;
+ }
+
+ if (!JS_DefineProperty(cx, obj, PromiseFlatCString(iter.Key()).get(),
+ snapshot, JSPROP_ENUMERATE)) {
+ return NS_ERROR_FAILURE;
+ }
+ }
+
+ ret.setObject(*obj);
+ return NS_OK;
+}
+
+nsresult
+TelemetryHistogram::RegisterAddonHistogram(const nsACString &id,
+ const nsACString &name,
+ uint32_t histogramType,
+ uint32_t min, uint32_t max,
+ uint32_t bucketCount,
+ uint8_t optArgCount)
+{
+ StaticMutexAutoLock locker(gTelemetryHistogramMutex);
+ if (histogramType == nsITelemetry::HISTOGRAM_EXPONENTIAL ||
+ histogramType == nsITelemetry::HISTOGRAM_LINEAR) {
+ if (optArgCount != 3) {
+ return NS_ERROR_INVALID_ARG;
+ }
+
+ // Sanity checks for histogram parameters.
+ if (min >= max)
+ return NS_ERROR_ILLEGAL_VALUE;
+
+ if (bucketCount <= 2)
+ return NS_ERROR_ILLEGAL_VALUE;
+
+ if (min < 1)
+ return NS_ERROR_ILLEGAL_VALUE;
+ } else {
+ min = 1;
+ max = 2;
+ bucketCount = 3;
+ }
+
+ AddonEntryType *addonEntry = gAddonMap.GetEntry(id);
+ if (!addonEntry) {
+ addonEntry = gAddonMap.PutEntry(id);
+ if (MOZ_UNLIKELY(!addonEntry)) {
+ return NS_ERROR_OUT_OF_MEMORY;
+ }
+ addonEntry->mData = new AddonHistogramMapType();
+ }
+
+ AddonHistogramMapType *histogramMap = addonEntry->mData;
+ AddonHistogramEntryType *histogramEntry = histogramMap->GetEntry(name);
+ // Can't re-register the same histogram.
+ if (histogramEntry) {
+ return NS_ERROR_FAILURE;
+ }
+
+ histogramEntry = histogramMap->PutEntry(name);
+ if (MOZ_UNLIKELY(!histogramEntry)) {
+ return NS_ERROR_OUT_OF_MEMORY;
+ }
+
+ AddonHistogramInfo &info = histogramEntry->mData;
+ info.min = min;
+ info.max = max;
+ info.bucketCount = bucketCount;
+ info.histogramType = histogramType;
+
+ return NS_OK;
+}
+
+nsresult
+TelemetryHistogram::GetAddonHistogram(const nsACString &id,
+ const nsACString &name,
+ JSContext *cx,
+ JS::MutableHandle<JS::Value> ret)
+{
+ AddonHistogramInfo* info = nullptr;
+ {
+ StaticMutexAutoLock locker(gTelemetryHistogramMutex);
+ AddonEntryType *addonEntry = gAddonMap.GetEntry(id);
+ // The given id has not been registered.
+ if (!addonEntry) {
+ return NS_ERROR_INVALID_ARG;
+ }
+
+ AddonHistogramMapType *histogramMap = addonEntry->mData;
+ AddonHistogramEntryType *histogramEntry = histogramMap->GetEntry(name);
+ // The given histogram name has not been registered.
+ if (!histogramEntry) {
+ return NS_ERROR_INVALID_ARG;
+ }
+
+ info = &histogramEntry->mData;
+ if (!info->h) {
+ nsAutoCString actualName;
+ internal_AddonHistogramName(id, name, actualName);
+ if (!internal_CreateHistogramForAddon(actualName, *info)) {
+ return NS_ERROR_FAILURE;
+ }
+ }
+ }
+
+ // Runs without protection from |gTelemetryHistogramMutex|
+ return internal_WrapAndReturnHistogram(info->h, cx, ret);
+}
+
+nsresult
+TelemetryHistogram::UnregisterAddonHistograms(const nsACString &id)
+{
+ StaticMutexAutoLock locker(gTelemetryHistogramMutex);
+ AddonEntryType *addonEntry = gAddonMap.GetEntry(id);
+ if (addonEntry) {
+ // Histogram's destructor is private, so this is the best we can do.
+ // The histograms the addon created *will* stick around, but they
+ // will be deleted if and when the addon registers histograms with
+ // the same names.
+ delete addonEntry->mData;
+ gAddonMap.RemoveEntry(addonEntry);
+ }
+
+ return NS_OK;
+}
+
+nsresult
+TelemetryHistogram::GetAddonHistogramSnapshots(JSContext *cx,
+ JS::MutableHandle<JS::Value> ret)
+{
+ // Runs without protection from |gTelemetryHistogramMutex|
+ JS::Rooted<JSObject*> obj(cx, JS_NewPlainObject(cx));
+ if (!obj) {
+ return NS_ERROR_FAILURE;
+ }
+
+ if (!gAddonMap.ReflectIntoJS(internal_AddonReflector, cx, obj)) {
+ return NS_ERROR_FAILURE;
+ }
+ ret.setObject(*obj);
+ return NS_OK;
+}
+
+size_t
+TelemetryHistogram::GetMapShallowSizesOfExcludingThis(mozilla::MallocSizeOf
+ aMallocSizeOf)
+{
+ StaticMutexAutoLock locker(gTelemetryHistogramMutex);
+ return gAddonMap.ShallowSizeOfExcludingThis(aMallocSizeOf) +
+ gHistogramMap.ShallowSizeOfExcludingThis(aMallocSizeOf);
+}
+
+size_t
+TelemetryHistogram::GetHistogramSizesofIncludingThis(mozilla::MallocSizeOf
+ aMallocSizeOf)
+{
+ StaticMutexAutoLock locker(gTelemetryHistogramMutex);
+ StatisticsRecorder::Histograms hs;
+ StatisticsRecorder::GetHistograms(&hs);
+ size_t n = 0;
+ for (HistogramIterator it = hs.begin(); it != hs.end(); ++it) {
+ Histogram *h = *it;
+ n += h->SizeOfIncludingThis(aMallocSizeOf);
+ }
+ return n;
+}
+
+// This method takes the lock only to double-buffer the batched telemetry.
+// It releases the lock before calling out to IPC code which can (and does)
+// Accumulate (which would deadlock)
+//
+// To ensure we don't loop IPCTimerFired->AccumulateChild->arm timer, we don't
+// unset gIPCTimerArmed until the IPC completes
+//
+// This function must be called on the main thread, otherwise IPC will fail.
+void
+TelemetryHistogram::IPCTimerFired(nsITimer* aTimer, void* aClosure)
+{
+ MOZ_ASSERT(NS_IsMainThread());
+ nsTArray<Accumulation> accumulationsToSend;
+ nsTArray<KeyedAccumulation> keyedAccumulationsToSend;
+ {
+ StaticMutexAutoLock locker(gTelemetryHistogramMutex);
+ if (gAccumulations) {
+ accumulationsToSend.SwapElements(*gAccumulations);
+ }
+ if (gKeyedAccumulations) {
+ keyedAccumulationsToSend.SwapElements(*gKeyedAccumulations);
+ }
+ }
+
+ switch (XRE_GetProcessType()) {
+ case GeckoProcessType_Content: {
+ mozilla::dom::ContentChild* contentChild = mozilla::dom::ContentChild::GetSingleton();
+ mozilla::Unused << NS_WARN_IF(!contentChild);
+ if (contentChild) {
+ if (accumulationsToSend.Length()) {
+ mozilla::Unused <<
+ NS_WARN_IF(!contentChild->SendAccumulateChildHistogram(accumulationsToSend));
+ }
+ if (keyedAccumulationsToSend.Length()) {
+ mozilla::Unused <<
+ NS_WARN_IF(!contentChild->SendAccumulateChildKeyedHistogram(keyedAccumulationsToSend));
+ }
+ }
+ break;
+ }
+ case GeckoProcessType_GPU: {
+ if (mozilla::gfx::GPUParent* gpu = mozilla::gfx::GPUParent::GetSingleton()) {
+ if (accumulationsToSend.Length()) {
+ mozilla::Unused << gpu->SendAccumulateChildHistogram(accumulationsToSend);
+ }
+ if (keyedAccumulationsToSend.Length()) {
+ mozilla::Unused << gpu->SendAccumulateChildKeyedHistogram(keyedAccumulationsToSend);
+ }
+ }
+ break;
+ }
+ default:
+ MOZ_ASSERT_UNREACHABLE("Unsupported process type");
+ break;
+ }
+
+ gIPCTimerArmed = false;
+}
diff --git a/toolkit/components/telemetry/TelemetryHistogram.h b/toolkit/components/telemetry/TelemetryHistogram.h
new file mode 100644
index 000000000..4aa13e259
--- /dev/null
+++ b/toolkit/components/telemetry/TelemetryHistogram.h
@@ -0,0 +1,104 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2; -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef TelemetryHistogram_h__
+#define TelemetryHistogram_h__
+
+#include "mozilla/TelemetryHistogramEnums.h"
+
+#include "mozilla/TelemetryComms.h"
+#include "nsXULAppAPI.h"
+
+// This module is internal to Telemetry. It encapsulates Telemetry's
+// histogram accumulation and storage logic. It should only be used by
+// Telemetry.cpp. These functions should not be used anywhere else.
+// For the public interface to Telemetry functionality, see Telemetry.h.
+
+namespace TelemetryHistogram {
+
+void CreateStatisticsRecorder();
+void DestroyStatisticsRecorder();
+
+void InitializeGlobalState(bool canRecordBase, bool canRecordExtended);
+void DeInitializeGlobalState();
+#ifdef DEBUG
+bool GlobalStateHasBeenInitialized();
+#endif
+
+bool CanRecordBase();
+void SetCanRecordBase(bool b);
+bool CanRecordExtended();
+void SetCanRecordExtended(bool b);
+
+void InitHistogramRecordingEnabled();
+void SetHistogramRecordingEnabled(mozilla::Telemetry::ID aID, bool aEnabled);
+
+nsresult SetHistogramRecordingEnabled(const nsACString &id, bool aEnabled);
+
+void Accumulate(mozilla::Telemetry::ID aHistogram, uint32_t aSample);
+void Accumulate(mozilla::Telemetry::ID aID, const nsCString& aKey,
+ uint32_t aSample);
+void Accumulate(const char* name, uint32_t sample);
+void Accumulate(const char* name, const nsCString& key, uint32_t sample);
+
+void AccumulateCategorical(mozilla::Telemetry::ID aId, const nsCString& aLabel);
+
+void AccumulateChild(GeckoProcessType aProcessType,
+ const nsTArray<mozilla::Telemetry::Accumulation>& aAccumulations);
+void AccumulateChildKeyed(GeckoProcessType aProcessType,
+ const nsTArray<mozilla::Telemetry::KeyedAccumulation>& aAccumulations);
+
+nsresult
+GetHistogramById(const nsACString &name, JSContext *cx,
+ JS::MutableHandle<JS::Value> ret);
+
+nsresult
+GetKeyedHistogramById(const nsACString &name, JSContext *cx,
+ JS::MutableHandle<JS::Value> ret);
+
+const char*
+GetHistogramName(mozilla::Telemetry::ID id);
+
+nsresult
+CreateHistogramSnapshots(JSContext *cx, JS::MutableHandle<JS::Value> ret,
+ bool subsession, bool clearSubsession);
+
+nsresult
+RegisteredHistograms(uint32_t aDataset, uint32_t *aCount,
+ char*** aHistograms);
+
+nsresult
+RegisteredKeyedHistograms(uint32_t aDataset, uint32_t *aCount,
+ char*** aHistograms);
+
+nsresult
+GetKeyedHistogramSnapshots(JSContext *cx, JS::MutableHandle<JS::Value> ret);
+
+nsresult
+RegisterAddonHistogram(const nsACString &id, const nsACString &name,
+ uint32_t histogramType, uint32_t min, uint32_t max,
+ uint32_t bucketCount, uint8_t optArgCount);
+
+nsresult
+GetAddonHistogram(const nsACString &id, const nsACString &name,
+ JSContext *cx, JS::MutableHandle<JS::Value> ret);
+
+nsresult
+UnregisterAddonHistograms(const nsACString &id);
+
+nsresult
+GetAddonHistogramSnapshots(JSContext *cx, JS::MutableHandle<JS::Value> ret);
+
+size_t
+GetMapShallowSizesOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf);
+
+size_t
+GetHistogramSizesofIncludingThis(mozilla::MallocSizeOf aMallocSizeOf);
+
+void
+IPCTimerFired(nsITimer* aTimer, void* aClosure);
+} // namespace TelemetryHistogram
+
+#endif // TelemetryHistogram_h__
diff --git a/toolkit/components/telemetry/TelemetryLog.jsm b/toolkit/components/telemetry/TelemetryLog.jsm
new file mode 100644
index 000000000..ab62f195b
--- /dev/null
+++ b/toolkit/components/telemetry/TelemetryLog.jsm
@@ -0,0 +1,35 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+this.EXPORTED_SYMBOLS = ["TelemetryLog"];
+
+const Cc = Components.classes;
+const Ci = Components.interfaces;
+
+const Telemetry = Cc["@mozilla.org/base/telemetry;1"].getService(Ci.nsITelemetry);
+var gLogEntries = [];
+
+this.TelemetryLog = Object.freeze({
+ log: function(id, data) {
+ id = String(id);
+ var ts;
+ try {
+ ts = Math.floor(Telemetry.msSinceProcessStart());
+ } catch (e) {
+ // If timestamp is screwed up, we just give up instead of making up
+ // data.
+ return;
+ }
+
+ var entry = [id, ts];
+ if (data !== undefined) {
+ entry = entry.concat(Array.prototype.map.call(data, String));
+ }
+ gLogEntries.push(entry);
+ },
+
+ entries: function() {
+ return gLogEntries;
+ }
+});
diff --git a/toolkit/components/telemetry/TelemetryReportingPolicy.jsm b/toolkit/components/telemetry/TelemetryReportingPolicy.jsm
new file mode 100644
index 000000000..d9c99df49
--- /dev/null
+++ b/toolkit/components/telemetry/TelemetryReportingPolicy.jsm
@@ -0,0 +1,496 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+"use strict";
+
+this.EXPORTED_SYMBOLS = [
+ "TelemetryReportingPolicy"
+];
+
+const {classes: Cc, interfaces: Ci, results: Cr, utils: Cu} = Components;
+
+Cu.import("resource://gre/modules/Log.jsm", this);
+Cu.import("resource://gre/modules/Preferences.jsm", this);
+Cu.import("resource://gre/modules/Services.jsm", this);
+Cu.import("resource://gre/modules/Timer.jsm", this);
+Cu.import("resource://gre/modules/XPCOMUtils.jsm", this);
+Cu.import("resource://services-common/observers.js", this);
+
+XPCOMUtils.defineLazyModuleGetter(this, "TelemetrySend",
+ "resource://gre/modules/TelemetrySend.jsm");
+XPCOMUtils.defineLazyModuleGetter(this, "UpdateUtils",
+ "resource://gre/modules/UpdateUtils.jsm");
+
+const LOGGER_NAME = "Toolkit.Telemetry";
+const LOGGER_PREFIX = "TelemetryReportingPolicy::";
+
+// Oldest year to allow in date preferences. The FHR infobar was implemented in
+// 2012 and no dates older than that should be encountered.
+const OLDEST_ALLOWED_ACCEPTANCE_YEAR = 2012;
+
+const PREF_BRANCH = "datareporting.policy.";
+// Indicates whether this is the first run or not. This is used to decide when to display
+// the policy.
+const PREF_FIRST_RUN = "toolkit.telemetry.reportingpolicy.firstRun";
+// Allows to skip the datachoices infobar. This should only be used in tests.
+const PREF_BYPASS_NOTIFICATION = PREF_BRANCH + "dataSubmissionPolicyBypassNotification";
+// The submission kill switch: if this preference is disable, no submission will ever take place.
+const PREF_DATA_SUBMISSION_ENABLED = PREF_BRANCH + "dataSubmissionEnabled";
+// This preference holds the current policy version, which overrides
+// DEFAULT_DATAREPORTING_POLICY_VERSION
+const PREF_CURRENT_POLICY_VERSION = PREF_BRANCH + "currentPolicyVersion";
+// This indicates the minimum required policy version. If the accepted policy version
+// is lower than this, the notification bar must be showed again.
+const PREF_MINIMUM_POLICY_VERSION = PREF_BRANCH + "minimumPolicyVersion";
+// The version of the accepted policy.
+const PREF_ACCEPTED_POLICY_VERSION = PREF_BRANCH + "dataSubmissionPolicyAcceptedVersion";
+// The date user accepted the policy.
+const PREF_ACCEPTED_POLICY_DATE = PREF_BRANCH + "dataSubmissionPolicyNotifiedTime";
+// URL of privacy policy to be opened in a background tab on first run instead of showing the
+// data choices infobar.
+const PREF_FIRST_RUN_URL = PREF_BRANCH + "firstRunURL";
+// The following preferences are deprecated and will be purged during the preferences
+// migration process.
+const DEPRECATED_FHR_PREFS = [
+ PREF_BRANCH + "dataSubmissionPolicyAccepted",
+ PREF_BRANCH + "dataSubmissionPolicyBypassAcceptance",
+ PREF_BRANCH + "dataSubmissionPolicyResponseType",
+ PREF_BRANCH + "dataSubmissionPolicyResponseTime"
+];
+
+// How much time until we display the data choices notification bar, on the first run.
+const NOTIFICATION_DELAY_FIRST_RUN_MSEC = 60 * 1000; // 60s
+// Same as above, for the next runs.
+const NOTIFICATION_DELAY_NEXT_RUNS_MSEC = 10 * 1000; // 10s
+
+/**
+ * This is a policy object used to override behavior within this module.
+ * Tests override properties on this object to allow for control of behavior
+ * that would otherwise be very hard to cover.
+ */
+var Policy = {
+ now: () => new Date(),
+ setShowInfobarTimeout: (callback, delayMs) => setTimeout(callback, delayMs),
+ clearShowInfobarTimeout: (id) => clearTimeout(id),
+};
+
+/**
+ * Represents a request to display data policy.
+ *
+ * Receivers of these instances are expected to call one or more of the on*
+ * functions when events occur.
+ *
+ * When one of these requests is received, the first thing a callee should do
+ * is present notification to the user of the data policy. When the notice
+ * is displayed to the user, the callee should call `onUserNotifyComplete`.
+ *
+ * If for whatever reason the callee could not display a notice,
+ * it should call `onUserNotifyFailed`.
+ *
+ * @param {Object} aLog The log object used to log the error in case of failures.
+ */
+function NotifyPolicyRequest(aLog) {
+ this._log = aLog;
+}
+
+NotifyPolicyRequest.prototype = Object.freeze({
+ /**
+ * Called when the user is notified of the policy.
+ */
+ onUserNotifyComplete: function() {
+ return TelemetryReportingPolicyImpl._userNotified();
+ },
+
+ /**
+ * Called when there was an error notifying the user about the policy.
+ *
+ * @param error
+ * (Error) Explains what went wrong.
+ */
+ onUserNotifyFailed: function (error) {
+ this._log.error("onUserNotifyFailed - " + error);
+ },
+});
+
+this.TelemetryReportingPolicy = {
+ // The current policy version number. If the version number stored in the prefs
+ // is smaller than this, data upload will be disabled until the user is re-notified
+ // about the policy changes.
+ DEFAULT_DATAREPORTING_POLICY_VERSION: 1,
+
+ /**
+ * Setup the policy.
+ */
+ setup: function() {
+ return TelemetryReportingPolicyImpl.setup();
+ },
+
+ /**
+ * Shutdown and clear the policy.
+ */
+ shutdown: function() {
+ return TelemetryReportingPolicyImpl.shutdown();
+ },
+
+ /**
+ * Check if we are allowed to upload data. In order to submit data both these conditions
+ * should be true:
+ * - The data submission preference should be true.
+ * - The datachoices infobar should have been displayed.
+ *
+ * @return {Boolean} True if we are allowed to upload data, false otherwise.
+ */
+ canUpload: function() {
+ return TelemetryReportingPolicyImpl.canUpload();
+ },
+
+ /**
+ * Test only method, restarts the policy.
+ */
+ reset: function() {
+ return TelemetryReportingPolicyImpl.reset();
+ },
+
+ /**
+ * Test only method, used to check if user is notified of the policy in tests.
+ */
+ testIsUserNotified: function() {
+ return TelemetryReportingPolicyImpl.isUserNotifiedOfCurrentPolicy;
+ },
+
+ /**
+ * Test only method, used to simulate the infobar being shown in xpcshell tests.
+ */
+ testInfobarShown: function() {
+ return TelemetryReportingPolicyImpl._userNotified();
+ },
+};
+
+var TelemetryReportingPolicyImpl = {
+ _logger: null,
+ // Keep track of the notification status if user wasn't notified already.
+ _notificationInProgress: false,
+ // The timer used to show the datachoices notification at startup.
+ _startupNotificationTimerId: null,
+
+ get _log() {
+ if (!this._logger) {
+ this._logger = Log.repository.getLoggerWithMessagePrefix(LOGGER_NAME, LOGGER_PREFIX);
+ }
+
+ return this._logger;
+ },
+
+ /**
+ * Get the date the policy was notified.
+ * @return {Object} A date object or null on errors.
+ */
+ get dataSubmissionPolicyNotifiedDate() {
+ let prefString = Preferences.get(PREF_ACCEPTED_POLICY_DATE, "0");
+ let valueInteger = parseInt(prefString, 10);
+
+ // Bail out if we didn't store any value yet.
+ if (valueInteger == 0) {
+ this._log.info("get dataSubmissionPolicyNotifiedDate - No date stored yet.");
+ return null;
+ }
+
+ // If an invalid value is saved in the prefs, bail out too.
+ if (Number.isNaN(valueInteger)) {
+ this._log.error("get dataSubmissionPolicyNotifiedDate - Invalid date stored.");
+ return null;
+ }
+
+ // Make sure the notification date is newer then the oldest allowed date.
+ let date = new Date(valueInteger);
+ if (date.getFullYear() < OLDEST_ALLOWED_ACCEPTANCE_YEAR) {
+ this._log.error("get dataSubmissionPolicyNotifiedDate - The stored date is too old.");
+ return null;
+ }
+
+ return date;
+ },
+
+ /**
+ * Set the date the policy was notified.
+ * @param {Object} aDate A valid date object.
+ */
+ set dataSubmissionPolicyNotifiedDate(aDate) {
+ this._log.trace("set dataSubmissionPolicyNotifiedDate - aDate: " + aDate);
+
+ if (!aDate || aDate.getFullYear() < OLDEST_ALLOWED_ACCEPTANCE_YEAR) {
+ this._log.error("set dataSubmissionPolicyNotifiedDate - Invalid notification date.");
+ return;
+ }
+
+ Preferences.set(PREF_ACCEPTED_POLICY_DATE, aDate.getTime().toString());
+ },
+
+ /**
+ * Whether submission of data is allowed.
+ *
+ * This is the master switch for remote server communication. If it is
+ * false, we never request upload or deletion.
+ */
+ get dataSubmissionEnabled() {
+ // Default is true because we are opt-out.
+ return Preferences.get(PREF_DATA_SUBMISSION_ENABLED, true);
+ },
+
+ get currentPolicyVersion() {
+ return Preferences.get(PREF_CURRENT_POLICY_VERSION,
+ TelemetryReportingPolicy.DEFAULT_DATAREPORTING_POLICY_VERSION);
+ },
+
+ /**
+ * The minimum policy version which for dataSubmissionPolicyAccepted to
+ * to be valid.
+ */
+ get minimumPolicyVersion() {
+ const minPolicyVersion = Preferences.get(PREF_MINIMUM_POLICY_VERSION, 1);
+
+ // First check if the current channel has a specific minimum policy version. If not,
+ // use the general minimum policy version.
+ let channel = "";
+ try {
+ channel = UpdateUtils.getUpdateChannel(false);
+ } catch (e) {
+ this._log.error("minimumPolicyVersion - Unable to retrieve the current channel.");
+ return minPolicyVersion;
+ }
+ const channelPref = PREF_MINIMUM_POLICY_VERSION + ".channel-" + channel;
+ return Preferences.get(channelPref, minPolicyVersion);
+ },
+
+ get dataSubmissionPolicyAcceptedVersion() {
+ return Preferences.get(PREF_ACCEPTED_POLICY_VERSION, 0);
+ },
+
+ set dataSubmissionPolicyAcceptedVersion(value) {
+ Preferences.set(PREF_ACCEPTED_POLICY_VERSION, value);
+ },
+
+ /**
+ * Checks to see if the user has been notified about data submission
+ * @return {Bool} True if user has been notified and the notification is still valid,
+ * false otherwise.
+ */
+ get isUserNotifiedOfCurrentPolicy() {
+ // If we don't have a sane notification date, the user was not notified yet.
+ if (!this.dataSubmissionPolicyNotifiedDate ||
+ this.dataSubmissionPolicyNotifiedDate.getTime() <= 0) {
+ return false;
+ }
+
+ // The accepted policy version should not be less than the minimum policy version.
+ if (this.dataSubmissionPolicyAcceptedVersion < this.minimumPolicyVersion) {
+ return false;
+ }
+
+ // Otherwise the user was already notified.
+ return true;
+ },
+
+ /**
+ * Test only method, restarts the policy.
+ */
+ reset: function() {
+ this.shutdown();
+ return this.setup();
+ },
+
+ /**
+ * Setup the policy.
+ */
+ setup: function() {
+ this._log.trace("setup");
+
+ // Migrate the data choices infobar, if needed.
+ this._migratePreferences();
+
+ // Add the event observers.
+ Services.obs.addObserver(this, "sessionstore-windows-restored", false);
+ },
+
+ /**
+ * Clean up the reporting policy.
+ */
+ shutdown: function() {
+ this._log.trace("shutdown");
+
+ this._detachObservers();
+
+ Policy.clearShowInfobarTimeout(this._startupNotificationTimerId);
+ },
+
+ /**
+ * Detach the observers that were attached during setup.
+ */
+ _detachObservers: function() {
+ Services.obs.removeObserver(this, "sessionstore-windows-restored");
+ },
+
+ /**
+ * Check if we are allowed to upload data. In order to submit data both these conditions
+ * should be true:
+ * - The data submission preference should be true.
+ * - The datachoices infobar should have been displayed.
+ *
+ * @return {Boolean} True if we are allowed to upload data, false otherwise.
+ */
+ canUpload: function() {
+ // If data submission is disabled, there's no point in showing the infobar. Just
+ // forbid to upload.
+ if (!this.dataSubmissionEnabled) {
+ return false;
+ }
+
+ // Submission is enabled. We enable upload if user is notified or we need to bypass
+ // the policy.
+ const bypassNotification = Preferences.get(PREF_BYPASS_NOTIFICATION, false);
+ return this.isUserNotifiedOfCurrentPolicy || bypassNotification;
+ },
+
+ /**
+ * Migrate the data policy preferences, if needed.
+ */
+ _migratePreferences: function() {
+ // Current prefs are mostly the same than the old ones, except for some deprecated ones.
+ for (let pref of DEPRECATED_FHR_PREFS) {
+ Preferences.reset(pref);
+ }
+ },
+
+ /**
+ * Show the data choices infobar if the user wasn't already notified and data submission
+ * is enabled.
+ */
+ _showInfobar: function() {
+ if (!this.dataSubmissionEnabled) {
+ this._log.trace("_showInfobar - Data submission disabled by the policy.");
+ return;
+ }
+
+ const bypassNotification = Preferences.get(PREF_BYPASS_NOTIFICATION, false);
+ if (this.isUserNotifiedOfCurrentPolicy || bypassNotification) {
+ this._log.trace("_showInfobar - User already notified or bypassing the policy.");
+ return;
+ }
+
+ if (this._notificationInProgress) {
+ this._log.trace("_showInfobar - User not notified, notification already in progress.");
+ return;
+ }
+
+ this._log.trace("_showInfobar - User not notified, notifying now.");
+ this._notificationInProgress = true;
+ let request = new NotifyPolicyRequest(this._log);
+ Observers.notify("datareporting:notify-data-policy:request", request);
+ },
+
+ /**
+ * Called when the user is notified with the infobar or otherwise.
+ */
+ _userNotified() {
+ this._log.trace("_userNotified");
+ this._recordNotificationData();
+ TelemetrySend.notifyCanUpload();
+ },
+
+ /**
+ * Record date and the version of the accepted policy.
+ */
+ _recordNotificationData: function() {
+ this._log.trace("_recordNotificationData");
+ this.dataSubmissionPolicyNotifiedDate = Policy.now();
+ this.dataSubmissionPolicyAcceptedVersion = this.currentPolicyVersion;
+ // The user was notified and the notification data saved: the notification
+ // is no longer in progress.
+ this._notificationInProgress = false;
+ },
+
+ /**
+ * Try to open the privacy policy in a background tab instead of showing the infobar.
+ */
+ _openFirstRunPage() {
+ let firstRunPolicyURL = Preferences.get(PREF_FIRST_RUN_URL, "");
+ if (!firstRunPolicyURL) {
+ return false;
+ }
+ firstRunPolicyURL = Services.urlFormatter.formatURL(firstRunPolicyURL);
+
+ let win;
+ try {
+ const { RecentWindow } = Cu.import("resource:///modules/RecentWindow.jsm", {});
+ win = RecentWindow.getMostRecentBrowserWindow();
+ } catch (e) {}
+
+ if (!win) {
+ this._log.info("Couldn't find browser window to open first-run page. Falling back to infobar.");
+ return false;
+ }
+
+ // We'll consider the user notified once the privacy policy has been loaded
+ // in a background tab even if that tab hasn't been selected.
+ let tab;
+ let progressListener = {};
+ progressListener.onStateChange =
+ (aBrowser, aWebProgress, aRequest, aStateFlags, aStatus) => {
+ if (aWebProgress.isTopLevel &&
+ tab &&
+ tab.linkedBrowser == aBrowser &&
+ aStateFlags & Ci.nsIWebProgressListener.STATE_STOP &&
+ aStateFlags & Ci.nsIWebProgressListener.STATE_IS_NETWORK) {
+ let uri = aBrowser.documentURI;
+ if (uri && !/^about:(blank|neterror|certerror|blocked)/.test(uri.spec)) {
+ this._userNotified();
+ } else {
+ this._log.info("Failed to load first-run page. Falling back to infobar.");
+ this._showInfobar();
+ }
+ removeListeners();
+ }
+ };
+
+ let removeListeners = () => {
+ win.removeEventListener("unload", removeListeners);
+ win.gBrowser.removeTabsProgressListener(progressListener);
+ };
+
+ win.addEventListener("unload", removeListeners);
+ win.gBrowser.addTabsProgressListener(progressListener);
+
+ tab = win.gBrowser.loadOneTab(firstRunPolicyURL, { inBackground: true });
+
+ return true;
+ },
+
+ observe: function(aSubject, aTopic, aData) {
+ if (aTopic != "sessionstore-windows-restored") {
+ return;
+ }
+
+ const isFirstRun = Preferences.get(PREF_FIRST_RUN, true);
+ if (isFirstRun) {
+ // We're performing the first run, flip firstRun preference for subsequent runs.
+ Preferences.set(PREF_FIRST_RUN, false);
+
+ try {
+ if (this._openFirstRunPage()) {
+ return;
+ }
+ } catch (e) {
+ this._log.error("Failed to open privacy policy tab: " + e);
+ }
+ }
+
+ // Show the info bar.
+ const delay =
+ isFirstRun ? NOTIFICATION_DELAY_FIRST_RUN_MSEC: NOTIFICATION_DELAY_NEXT_RUNS_MSEC;
+
+ this._startupNotificationTimerId = Policy.setShowInfobarTimeout(
+ // Calling |canUpload| eventually shows the infobar, if needed.
+ () => this._showInfobar(), delay);
+ },
+};
diff --git a/toolkit/components/telemetry/TelemetryScalar.cpp b/toolkit/components/telemetry/TelemetryScalar.cpp
new file mode 100644
index 000000000..6e9558070
--- /dev/null
+++ b/toolkit/components/telemetry/TelemetryScalar.cpp
@@ -0,0 +1,1896 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsITelemetry.h"
+#include "nsIVariant.h"
+#include "nsVariant.h"
+#include "nsHashKeys.h"
+#include "nsBaseHashtable.h"
+#include "nsClassHashtable.h"
+#include "nsIXPConnect.h"
+#include "nsContentUtils.h"
+#include "nsThreadUtils.h"
+#include "mozilla/StaticMutex.h"
+#include "mozilla/Unused.h"
+
+#include "TelemetryCommon.h"
+#include "TelemetryScalar.h"
+#include "TelemetryScalarData.h"
+
+using mozilla::StaticMutex;
+using mozilla::StaticMutexAutoLock;
+using mozilla::Telemetry::Common::AutoHashtable;
+using mozilla::Telemetry::Common::IsExpiredVersion;
+using mozilla::Telemetry::Common::CanRecordDataset;
+using mozilla::Telemetry::Common::IsInDataset;
+using mozilla::Telemetry::Common::LogToBrowserConsole;
+
+////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////
+//
+// Naming: there are two kinds of functions in this file:
+//
+// * Functions named internal_*: these can only be reached via an
+// interface function (TelemetryScalar::*). They expect the interface
+// function to have acquired |gTelemetryScalarsMutex|, so they do not
+// have to be thread-safe.
+//
+// * Functions named TelemetryScalar::*. This is the external interface.
+// Entries and exits to these functions are serialised using
+// |gTelemetryScalarsMutex|.
+//
+// Avoiding races and deadlocks:
+//
+// All functions in the external interface (TelemetryScalar::*) are
+// serialised using the mutex |gTelemetryScalarsMutex|. This means
+// that the external interface is thread-safe, and many of the
+// internal_* functions can ignore thread safety. But it also brings
+// a danger of deadlock if any function in the external interface can
+// get back to that interface. That is, we will deadlock on any call
+// chain like this
+//
+// TelemetryScalar::* -> .. any functions .. -> TelemetryScalar::*
+//
+// To reduce the danger of that happening, observe the following rules:
+//
+// * No function in TelemetryScalar::* may directly call, nor take the
+// address of, any other function in TelemetryScalar::*.
+//
+// * No internal function internal_* may call, nor take the address
+// of, any function in TelemetryScalar::*.
+
+////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////
+//
+// PRIVATE TYPES
+
+namespace {
+
+const uint32_t kMaximumNumberOfKeys = 100;
+const uint32_t kMaximumKeyStringLength = 70;
+const uint32_t kMaximumStringValueLength = 50;
+const uint32_t kScalarCount =
+ static_cast<uint32_t>(mozilla::Telemetry::ScalarID::ScalarCount);
+
+enum class ScalarResult : uint8_t {
+ // Nothing went wrong.
+ Ok,
+ // General Scalar Errors
+ OperationNotSupported,
+ InvalidType,
+ InvalidValue,
+ // Keyed Scalar Errors
+ KeyTooLong,
+ TooManyKeys,
+ // String Scalar Errors
+ StringTooLong,
+ // Unsigned Scalar Errors
+ UnsignedNegativeValue,
+ UnsignedTruncatedValue
+};
+
+typedef nsBaseHashtableET<nsDepCharHashKey, mozilla::Telemetry::ScalarID>
+ CharPtrEntryType;
+
+typedef AutoHashtable<CharPtrEntryType> ScalarMapType;
+
+/**
+ * Map the error codes used internally to NS_* error codes.
+ * @param aSr The error code used internally in this module.
+ * @return {nsresult} A NS_* error code.
+ */
+nsresult
+MapToNsResult(ScalarResult aSr)
+{
+ switch (aSr) {
+ case ScalarResult::Ok:
+ return NS_OK;
+ case ScalarResult::OperationNotSupported:
+ return NS_ERROR_NOT_AVAILABLE;
+ case ScalarResult::StringTooLong:
+ // We don't want to throw if we're setting a string that is too long.
+ return NS_OK;
+ case ScalarResult::InvalidType:
+ case ScalarResult::InvalidValue:
+ case ScalarResult::KeyTooLong:
+ return NS_ERROR_ILLEGAL_VALUE;
+ case ScalarResult::TooManyKeys:
+ return NS_ERROR_FAILURE;
+ case ScalarResult::UnsignedNegativeValue:
+ case ScalarResult::UnsignedTruncatedValue:
+ // We shouldn't throw if trying to set a negative number or are truncated,
+ // only warn the user.
+ return NS_OK;
+ }
+ return NS_ERROR_FAILURE;
+}
+
+bool
+IsValidEnumId(mozilla::Telemetry::ScalarID aID)
+{
+ return aID < mozilla::Telemetry::ScalarID::ScalarCount;
+}
+
+// Implements the methods for ScalarInfo.
+const char *
+ScalarInfo::name() const
+{
+ return &gScalarsStringTable[this->name_offset];
+}
+
+const char *
+ScalarInfo::expiration() const
+{
+ return &gScalarsStringTable[this->expiration_offset];
+}
+
+/**
+ * The base scalar object, that servers as a common ancestor for storage
+ * purposes.
+ */
+class ScalarBase
+{
+public:
+ virtual ~ScalarBase() {};
+
+ // Set, Add and SetMaximum functions as described in the Telemetry IDL.
+ virtual ScalarResult SetValue(nsIVariant* aValue) = 0;
+ virtual ScalarResult AddValue(nsIVariant* aValue) { return ScalarResult::OperationNotSupported; }
+ virtual ScalarResult SetMaximum(nsIVariant* aValue) { return ScalarResult::OperationNotSupported; }
+
+ // Convenience methods used by the C++ API.
+ virtual void SetValue(uint32_t aValue) { mozilla::Unused << HandleUnsupported(); }
+ virtual ScalarResult SetValue(const nsAString& aValue) { return HandleUnsupported(); }
+ virtual void SetValue(bool aValue) { mozilla::Unused << HandleUnsupported(); }
+ virtual void AddValue(uint32_t aValue) { mozilla::Unused << HandleUnsupported(); }
+ virtual void SetMaximum(uint32_t aValue) { mozilla::Unused << HandleUnsupported(); }
+
+ // GetValue is used to get the value of the scalar when persisting it to JS.
+ virtual nsresult GetValue(nsCOMPtr<nsIVariant>& aResult) const = 0;
+
+ // To measure the memory stats.
+ virtual size_t SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const = 0;
+
+private:
+ ScalarResult HandleUnsupported() const;
+};
+
+ScalarResult
+ScalarBase::HandleUnsupported() const
+{
+ MOZ_ASSERT(false, "This operation is not support for this scalar type.");
+ return ScalarResult::OperationNotSupported;
+}
+
+/**
+ * The implementation for the unsigned int scalar type.
+ */
+class ScalarUnsigned : public ScalarBase
+{
+public:
+ using ScalarBase::SetValue;
+
+ ScalarUnsigned() : mStorage(0) {};
+ ~ScalarUnsigned() {};
+
+ ScalarResult SetValue(nsIVariant* aValue) final;
+ void SetValue(uint32_t aValue) final;
+ ScalarResult AddValue(nsIVariant* aValue) final;
+ void AddValue(uint32_t aValue) final;
+ ScalarResult SetMaximum(nsIVariant* aValue) final;
+ void SetMaximum(uint32_t aValue) final;
+ nsresult GetValue(nsCOMPtr<nsIVariant>& aResult) const final;
+ size_t SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const final;
+
+private:
+ uint32_t mStorage;
+
+ ScalarResult CheckInput(nsIVariant* aValue);
+
+ // Prevent copying.
+ ScalarUnsigned(const ScalarUnsigned& aOther) = delete;
+ void operator=(const ScalarUnsigned& aOther) = delete;
+};
+
+ScalarResult
+ScalarUnsigned::SetValue(nsIVariant* aValue)
+{
+ ScalarResult sr = CheckInput(aValue);
+ if (sr == ScalarResult::UnsignedNegativeValue) {
+ return sr;
+ }
+
+ if (NS_FAILED(aValue->GetAsUint32(&mStorage))) {
+ return ScalarResult::InvalidValue;
+ }
+ return sr;
+}
+
+void
+ScalarUnsigned::SetValue(uint32_t aValue)
+{
+ mStorage = aValue;
+}
+
+ScalarResult
+ScalarUnsigned::AddValue(nsIVariant* aValue)
+{
+ ScalarResult sr = CheckInput(aValue);
+ if (sr == ScalarResult::UnsignedNegativeValue) {
+ return sr;
+ }
+
+ uint32_t newAddend = 0;
+ nsresult rv = aValue->GetAsUint32(&newAddend);
+ if (NS_FAILED(rv)) {
+ return ScalarResult::InvalidValue;
+ }
+ mStorage += newAddend;
+ return sr;
+}
+
+void
+ScalarUnsigned::AddValue(uint32_t aValue)
+{
+ mStorage += aValue;
+}
+
+ScalarResult
+ScalarUnsigned::SetMaximum(nsIVariant* aValue)
+{
+ ScalarResult sr = CheckInput(aValue);
+ if (sr == ScalarResult::UnsignedNegativeValue) {
+ return sr;
+ }
+
+ uint32_t newValue = 0;
+ nsresult rv = aValue->GetAsUint32(&newValue);
+ if (NS_FAILED(rv)) {
+ return ScalarResult::InvalidValue;
+ }
+ if (newValue > mStorage) {
+ mStorage = newValue;
+ }
+ return sr;
+}
+
+void
+ScalarUnsigned::SetMaximum(uint32_t aValue)
+{
+ if (aValue > mStorage) {
+ mStorage = aValue;
+ }
+}
+
+nsresult
+ScalarUnsigned::GetValue(nsCOMPtr<nsIVariant>& aResult) const
+{
+ nsCOMPtr<nsIWritableVariant> outVar(new nsVariant());
+ nsresult rv = outVar->SetAsUint32(mStorage);
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+ aResult = outVar.forget();
+ return NS_OK;
+}
+
+size_t
+ScalarUnsigned::SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const
+{
+ return aMallocSizeOf(this);
+}
+
+ScalarResult
+ScalarUnsigned::CheckInput(nsIVariant* aValue)
+{
+ // If this is a floating point value/double, we will probably get truncated.
+ uint16_t type;
+ aValue->GetDataType(&type);
+ if (type == nsIDataType::VTYPE_FLOAT ||
+ type == nsIDataType::VTYPE_DOUBLE) {
+ return ScalarResult::UnsignedTruncatedValue;
+ }
+
+ int32_t signedTest;
+ // If we're able to cast the number to an int, check its sign.
+ // Warn the user if he's trying to set the unsigned scalar to a negative
+ // number.
+ if (NS_SUCCEEDED(aValue->GetAsInt32(&signedTest)) &&
+ signedTest < 0) {
+ return ScalarResult::UnsignedNegativeValue;
+ }
+ return ScalarResult::Ok;
+}
+
+/**
+ * The implementation for the string scalar type.
+ */
+class ScalarString : public ScalarBase
+{
+public:
+ using ScalarBase::SetValue;
+
+ ScalarString() : mStorage(EmptyString()) {};
+ ~ScalarString() {};
+
+ ScalarResult SetValue(nsIVariant* aValue) final;
+ ScalarResult SetValue(const nsAString& aValue) final;
+ nsresult GetValue(nsCOMPtr<nsIVariant>& aResult) const final;
+ size_t SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const final;
+
+private:
+ nsString mStorage;
+
+ // Prevent copying.
+ ScalarString(const ScalarString& aOther) = delete;
+ void operator=(const ScalarString& aOther) = delete;
+};
+
+ScalarResult
+ScalarString::SetValue(nsIVariant* aValue)
+{
+ // Check that we got the correct data type.
+ uint16_t type;
+ aValue->GetDataType(&type);
+ if (type != nsIDataType::VTYPE_CHAR &&
+ type != nsIDataType::VTYPE_WCHAR &&
+ type != nsIDataType::VTYPE_DOMSTRING &&
+ type != nsIDataType::VTYPE_CHAR_STR &&
+ type != nsIDataType::VTYPE_WCHAR_STR &&
+ type != nsIDataType::VTYPE_STRING_SIZE_IS &&
+ type != nsIDataType::VTYPE_WSTRING_SIZE_IS &&
+ type != nsIDataType::VTYPE_UTF8STRING &&
+ type != nsIDataType::VTYPE_CSTRING &&
+ type != nsIDataType::VTYPE_ASTRING) {
+ return ScalarResult::InvalidType;
+ }
+
+ nsAutoString convertedString;
+ nsresult rv = aValue->GetAsAString(convertedString);
+ if (NS_FAILED(rv)) {
+ return ScalarResult::InvalidValue;
+ }
+ return SetValue(convertedString);
+};
+
+ScalarResult
+ScalarString::SetValue(const nsAString& aValue)
+{
+ mStorage = Substring(aValue, 0, kMaximumStringValueLength);
+ if (aValue.Length() > kMaximumStringValueLength) {
+ return ScalarResult::StringTooLong;
+ }
+ return ScalarResult::Ok;
+}
+
+nsresult
+ScalarString::GetValue(nsCOMPtr<nsIVariant>& aResult) const
+{
+ nsCOMPtr<nsIWritableVariant> outVar(new nsVariant());
+ nsresult rv = outVar->SetAsAString(mStorage);
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+ aResult = outVar.forget();
+ return NS_OK;
+}
+
+size_t
+ScalarString::SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const
+{
+ size_t n = aMallocSizeOf(this);
+ n+= mStorage.SizeOfExcludingThisIfUnshared(aMallocSizeOf);
+ return n;
+}
+
+/**
+ * The implementation for the boolean scalar type.
+ */
+class ScalarBoolean : public ScalarBase
+{
+public:
+ using ScalarBase::SetValue;
+
+ ScalarBoolean() : mStorage(false) {};
+ ~ScalarBoolean() {};
+
+ ScalarResult SetValue(nsIVariant* aValue) final;
+ void SetValue(bool aValue) final;
+ nsresult GetValue(nsCOMPtr<nsIVariant>& aResult) const final;
+ size_t SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const final;
+
+private:
+ bool mStorage;
+
+ // Prevent copying.
+ ScalarBoolean(const ScalarBoolean& aOther) = delete;
+ void operator=(const ScalarBoolean& aOther) = delete;
+};
+
+ScalarResult
+ScalarBoolean::SetValue(nsIVariant* aValue)
+{
+ // Check that we got the correct data type.
+ uint16_t type;
+ aValue->GetDataType(&type);
+ if (type != nsIDataType::VTYPE_BOOL &&
+ type != nsIDataType::VTYPE_INT8 &&
+ type != nsIDataType::VTYPE_INT16 &&
+ type != nsIDataType::VTYPE_INT32 &&
+ type != nsIDataType::VTYPE_INT64 &&
+ type != nsIDataType::VTYPE_UINT8 &&
+ type != nsIDataType::VTYPE_UINT16 &&
+ type != nsIDataType::VTYPE_UINT32 &&
+ type != nsIDataType::VTYPE_UINT64) {
+ return ScalarResult::InvalidType;
+ }
+
+ if (NS_FAILED(aValue->GetAsBool(&mStorage))) {
+ return ScalarResult::InvalidValue;
+ }
+ return ScalarResult::Ok;
+};
+
+void
+ScalarBoolean::SetValue(bool aValue)
+{
+ mStorage = aValue;
+}
+
+nsresult
+ScalarBoolean::GetValue(nsCOMPtr<nsIVariant>& aResult) const
+{
+ nsCOMPtr<nsIWritableVariant> outVar(new nsVariant());
+ nsresult rv = outVar->SetAsBool(mStorage);
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+ aResult = outVar.forget();
+ return NS_OK;
+}
+
+size_t
+ScalarBoolean::SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const
+{
+ return aMallocSizeOf(this);
+}
+
+/**
+ * Allocate a scalar class given the scalar info.
+ *
+ * @param aInfo The informations for the scalar coming from the definition file.
+ * @return nullptr if the scalar type is unknown, otherwise a valid pointer to the
+ * scalar type.
+ */
+ScalarBase*
+internal_ScalarAllocate(uint32_t aScalarKind)
+{
+ ScalarBase* scalar = nullptr;
+ switch (aScalarKind) {
+ case nsITelemetry::SCALAR_COUNT:
+ scalar = new ScalarUnsigned();
+ break;
+ case nsITelemetry::SCALAR_STRING:
+ scalar = new ScalarString();
+ break;
+ case nsITelemetry::SCALAR_BOOLEAN:
+ scalar = new ScalarBoolean();
+ break;
+ default:
+ MOZ_ASSERT(false, "Invalid scalar type");
+ }
+ return scalar;
+}
+
+/**
+ * The implementation for the keyed scalar type.
+ */
+class KeyedScalar
+{
+public:
+ typedef mozilla::Pair<nsCString, nsCOMPtr<nsIVariant>> KeyValuePair;
+
+ explicit KeyedScalar(uint32_t aScalarKind) : mScalarKind(aScalarKind) {};
+ ~KeyedScalar() {};
+
+ // Set, Add and SetMaximum functions as described in the Telemetry IDL.
+ // These methods implicitly instantiate a Scalar[*] for each key.
+ ScalarResult SetValue(const nsAString& aKey, nsIVariant* aValue);
+ ScalarResult AddValue(const nsAString& aKey, nsIVariant* aValue);
+ ScalarResult SetMaximum(const nsAString& aKey, nsIVariant* aValue);
+
+ // Convenience methods used by the C++ API.
+ void SetValue(const nsAString& aKey, uint32_t aValue);
+ void SetValue(const nsAString& aKey, bool aValue);
+ void AddValue(const nsAString& aKey, uint32_t aValue);
+ void SetMaximum(const nsAString& aKey, uint32_t aValue);
+
+ // GetValue is used to get the key-value pairs stored in the keyed scalar
+ // when persisting it to JS.
+ nsresult GetValue(nsTArray<KeyValuePair>& aValues) const;
+
+ // To measure the memory stats.
+ size_t SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf);
+
+private:
+ typedef nsClassHashtable<nsCStringHashKey, ScalarBase> ScalarKeysMapType;
+
+ ScalarKeysMapType mScalarKeys;
+ const uint32_t mScalarKind;
+
+ ScalarResult GetScalarForKey(const nsAString& aKey, ScalarBase** aRet);
+};
+
+ScalarResult
+KeyedScalar::SetValue(const nsAString& aKey, nsIVariant* aValue)
+{
+ ScalarBase* scalar = nullptr;
+ ScalarResult sr = GetScalarForKey(aKey, &scalar);
+ if (sr != ScalarResult::Ok) {
+ return sr;
+ }
+
+ return scalar->SetValue(aValue);
+}
+
+ScalarResult
+KeyedScalar::AddValue(const nsAString& aKey, nsIVariant* aValue)
+{
+ ScalarBase* scalar = nullptr;
+ ScalarResult sr = GetScalarForKey(aKey, &scalar);
+ if (sr != ScalarResult::Ok) {
+ return sr;
+ }
+
+ return scalar->AddValue(aValue);
+}
+
+ScalarResult
+KeyedScalar::SetMaximum(const nsAString& aKey, nsIVariant* aValue)
+{
+ ScalarBase* scalar = nullptr;
+ ScalarResult sr = GetScalarForKey(aKey, &scalar);
+ if (sr != ScalarResult::Ok) {
+ return sr;
+ }
+
+ return scalar->SetMaximum(aValue);
+}
+
+void
+KeyedScalar::SetValue(const nsAString& aKey, uint32_t aValue)
+{
+ ScalarBase* scalar = nullptr;
+ ScalarResult sr = GetScalarForKey(aKey, &scalar);
+ if (sr != ScalarResult::Ok) {
+ MOZ_ASSERT(false, "Key too long or too many keys are recorded in the scalar.");
+ return;
+ }
+
+ return scalar->SetValue(aValue);
+}
+
+void
+KeyedScalar::SetValue(const nsAString& aKey, bool aValue)
+{
+ ScalarBase* scalar = nullptr;
+ ScalarResult sr = GetScalarForKey(aKey, &scalar);
+ if (sr != ScalarResult::Ok) {
+ MOZ_ASSERT(false, "Key too long or too many keys are recorded in the scalar.");
+ return;
+ }
+
+ return scalar->SetValue(aValue);
+}
+
+void
+KeyedScalar::AddValue(const nsAString& aKey, uint32_t aValue)
+{
+ ScalarBase* scalar = nullptr;
+ ScalarResult sr = GetScalarForKey(aKey, &scalar);
+ if (sr != ScalarResult::Ok) {
+ MOZ_ASSERT(false, "Key too long or too many keys are recorded in the scalar.");
+ return;
+ }
+
+ return scalar->AddValue(aValue);
+}
+
+void
+KeyedScalar::SetMaximum(const nsAString& aKey, uint32_t aValue)
+{
+ ScalarBase* scalar = nullptr;
+ ScalarResult sr = GetScalarForKey(aKey, &scalar);
+ if (sr != ScalarResult::Ok) {
+ MOZ_ASSERT(false, "Key too long or too many keys are recorded in the scalar.");
+ return;
+ }
+
+ return scalar->SetMaximum(aValue);
+}
+
+/**
+ * Get a key-value array with the values for the Keyed Scalar.
+ * @param aValue The array that will hold the key-value pairs.
+ * @return {nsresult} NS_OK or an error value as reported by the
+ * the specific scalar objects implementations (e.g.
+ * ScalarUnsigned).
+ */
+nsresult
+KeyedScalar::GetValue(nsTArray<KeyValuePair>& aValues) const
+{
+ for (auto iter = mScalarKeys.ConstIter(); !iter.Done(); iter.Next()) {
+ ScalarBase* scalar = static_cast<ScalarBase*>(iter.Data());
+
+ // Get the scalar value.
+ nsCOMPtr<nsIVariant> scalarValue;
+ nsresult rv = scalar->GetValue(scalarValue);
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+
+ // Append it to value list.
+ aValues.AppendElement(mozilla::MakePair(nsCString(iter.Key()), scalarValue));
+ }
+
+ return NS_OK;
+}
+
+/**
+ * Get the scalar for the referenced key.
+ * If there's no such key, instantiate a new Scalar object with the
+ * same type of the Keyed scalar and create the key.
+ */
+ScalarResult
+KeyedScalar::GetScalarForKey(const nsAString& aKey, ScalarBase** aRet)
+{
+ if (aKey.Length() >= kMaximumKeyStringLength) {
+ return ScalarResult::KeyTooLong;
+ }
+
+ if (mScalarKeys.Count() >= kMaximumNumberOfKeys) {
+ return ScalarResult::TooManyKeys;
+ }
+
+ NS_ConvertUTF16toUTF8 utf8Key(aKey);
+
+ ScalarBase* scalar = nullptr;
+ if (mScalarKeys.Get(utf8Key, &scalar)) {
+ *aRet = scalar;
+ return ScalarResult::Ok;
+ }
+
+ scalar = internal_ScalarAllocate(mScalarKind);
+ if (!scalar) {
+ return ScalarResult::InvalidType;
+ }
+
+ mScalarKeys.Put(utf8Key, scalar);
+
+ *aRet = scalar;
+ return ScalarResult::Ok;
+}
+
+size_t
+KeyedScalar::SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf)
+{
+ size_t n = aMallocSizeOf(this);
+ for (auto iter = mScalarKeys.Iter(); !iter.Done(); iter.Next()) {
+ ScalarBase* scalar = static_cast<ScalarBase*>(iter.Data());
+ n += scalar->SizeOfIncludingThis(aMallocSizeOf);
+ }
+ return n;
+}
+
+typedef nsUint32HashKey ScalarIDHashKey;
+typedef nsClassHashtable<ScalarIDHashKey, ScalarBase> ScalarStorageMapType;
+typedef nsClassHashtable<ScalarIDHashKey, KeyedScalar> KeyedScalarStorageMapType;
+
+} // namespace
+
+////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////
+//
+// PRIVATE STATE, SHARED BY ALL THREADS
+
+namespace {
+
+// Set to true once this global state has been initialized.
+bool gInitDone = false;
+
+bool gCanRecordBase;
+bool gCanRecordExtended;
+
+// The Name -> ID cache map.
+ScalarMapType gScalarNameIDMap(kScalarCount);
+// The ID -> Scalar Object map. This is a nsClassHashtable, it owns
+// the scalar instance and takes care of deallocating them when they
+// get removed from the map.
+ScalarStorageMapType gScalarStorageMap;
+// The ID -> Keyed Scalar Object map. As for plain scalars, this is
+// nsClassHashtable. See above.
+KeyedScalarStorageMapType gKeyedScalarStorageMap;
+
+} // namespace
+
+////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////
+//
+// PRIVATE: Function that may call JS code.
+
+// NOTE: the functions in this section all run without protection from
+// |gTelemetryScalarsMutex|. If they held the mutex, there would be the
+// possibility of deadlock because the JS_ calls that they make may call
+// back into the TelemetryScalar interface, hence trying to re-acquire the mutex.
+//
+// This means that these functions potentially race against threads, but
+// that seems preferable to risking deadlock.
+
+namespace {
+
+/**
+ * Checks if the error should be logged.
+ *
+ * @param aSr The error code.
+ * @return true if the error should be logged, false otherwise.
+ */
+bool
+internal_ShouldLogError(ScalarResult aSr)
+{
+ switch (aSr) {
+ case ScalarResult::StringTooLong: MOZ_FALLTHROUGH;
+ case ScalarResult::KeyTooLong: MOZ_FALLTHROUGH;
+ case ScalarResult::TooManyKeys: MOZ_FALLTHROUGH;
+ case ScalarResult::UnsignedNegativeValue: MOZ_FALLTHROUGH;
+ case ScalarResult::UnsignedTruncatedValue:
+ // Intentional fall-through.
+ return true;
+
+ default:
+ return false;
+ }
+
+ // It should never reach this point.
+ return false;
+}
+
+/**
+ * Converts the error code to a human readable error message and prints it to the
+ * browser console.
+ *
+ * @param aScalarName The name of the scalar that raised the error.
+ * @param aSr The error code.
+ */
+void
+internal_LogScalarError(const nsACString& aScalarName, ScalarResult aSr)
+{
+ nsAutoString errorMessage;
+ AppendUTF8toUTF16(aScalarName, errorMessage);
+
+ switch (aSr) {
+ case ScalarResult::StringTooLong:
+ errorMessage.Append(NS_LITERAL_STRING(" - Truncating scalar value to 50 characters."));
+ break;
+ case ScalarResult::KeyTooLong:
+ errorMessage.Append(NS_LITERAL_STRING(" - The key length must be limited to 70 characters."));
+ break;
+ case ScalarResult::TooManyKeys:
+ errorMessage.Append(NS_LITERAL_STRING(" - Keyed scalars cannot have more than 100 keys."));
+ break;
+ case ScalarResult::UnsignedNegativeValue:
+ errorMessage.Append(NS_LITERAL_STRING(" - Trying to set an unsigned scalar to a negative number."));
+ break;
+ case ScalarResult::UnsignedTruncatedValue:
+ errorMessage.Append(NS_LITERAL_STRING(" - Truncating float/double number."));
+ break;
+ default:
+ // Nothing.
+ return;
+ }
+
+ LogToBrowserConsole(nsIScriptError::warningFlag, errorMessage);
+}
+
+} // namespace
+
+////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////
+//
+// PRIVATE: thread-unsafe helpers for the external interface
+
+namespace {
+
+bool
+internal_CanRecordBase()
+{
+ return gCanRecordBase;
+}
+
+bool
+internal_CanRecordExtended()
+{
+ return gCanRecordExtended;
+}
+
+const ScalarInfo&
+internal_InfoForScalarID(mozilla::Telemetry::ScalarID aId)
+{
+ return gScalars[static_cast<uint32_t>(aId)];
+}
+
+/**
+ * Check if the given scalar is a keyed scalar.
+ *
+ * @param aId The scalar enum.
+ * @return true if aId refers to a keyed scalar, false otherwise.
+ */
+bool
+internal_IsKeyedScalar(mozilla::Telemetry::ScalarID aId)
+{
+ return internal_InfoForScalarID(aId).keyed;
+}
+
+bool
+internal_CanRecordForScalarID(mozilla::Telemetry::ScalarID aId)
+{
+ // Get the scalar info from the id.
+ const ScalarInfo &info = internal_InfoForScalarID(aId);
+
+ // Can we record at all?
+ bool canRecordBase = internal_CanRecordBase();
+ if (!canRecordBase) {
+ return false;
+ }
+
+ bool canRecordDataset = CanRecordDataset(info.dataset,
+ canRecordBase,
+ internal_CanRecordExtended());
+ if (!canRecordDataset) {
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * Get the scalar enum id from the scalar name.
+ *
+ * @param aName The scalar name.
+ * @param aId The output variable to contain the enum.
+ * @return
+ * NS_ERROR_FAILURE if this was called before init is completed.
+ * NS_ERROR_INVALID_ARG if the name can't be found in the scalar definitions.
+ * NS_OK if the scalar was found and aId contains a valid enum id.
+ */
+nsresult
+internal_GetEnumByScalarName(const nsACString& aName, mozilla::Telemetry::ScalarID* aId)
+{
+ if (!gInitDone) {
+ return NS_ERROR_FAILURE;
+ }
+
+ CharPtrEntryType *entry = gScalarNameIDMap.GetEntry(PromiseFlatCString(aName).get());
+ if (!entry) {
+ return NS_ERROR_INVALID_ARG;
+ }
+ *aId = entry->mData;
+ return NS_OK;
+}
+
+/**
+ * Get a scalar object by its enum id. This implicitly allocates the scalar
+ * object in the storage if it wasn't previously allocated.
+ *
+ * @param aId The scalar id.
+ * @param aRes The output variable that stores scalar object.
+ * @return
+ * NS_ERROR_INVALID_ARG if the scalar id is unknown.
+ * NS_ERROR_NOT_AVAILABLE if the scalar is expired.
+ * NS_OK if the scalar was found. If that's the case, aResult contains a
+ * valid pointer to a scalar type.
+ */
+nsresult
+internal_GetScalarByEnum(mozilla::Telemetry::ScalarID aId, ScalarBase** aRet)
+{
+ if (!IsValidEnumId(aId)) {
+ MOZ_ASSERT(false, "Requested a scalar with an invalid id.");
+ return NS_ERROR_INVALID_ARG;
+ }
+
+ const uint32_t id = static_cast<uint32_t>(aId);
+
+ ScalarBase* scalar = nullptr;
+ if (gScalarStorageMap.Get(id, &scalar)) {
+ *aRet = scalar;
+ return NS_OK;
+ }
+
+ const ScalarInfo &info = gScalars[id];
+
+ if (IsExpiredVersion(info.expiration())) {
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ scalar = internal_ScalarAllocate(info.kind);
+ if (!scalar) {
+ return NS_ERROR_INVALID_ARG;
+ }
+
+ gScalarStorageMap.Put(id, scalar);
+
+ *aRet = scalar;
+ return NS_OK;
+}
+
+/**
+ * Get a scalar object by its enum id, if we're allowed to record it.
+ *
+ * @param aId The scalar id.
+ * @return The ScalarBase instance or nullptr if we're not allowed to record
+ * the scalar.
+ */
+ScalarBase*
+internal_GetRecordableScalar(mozilla::Telemetry::ScalarID aId)
+{
+ // Get the scalar by the enum (it also internally checks for aId validity).
+ ScalarBase* scalar = nullptr;
+ nsresult rv = internal_GetScalarByEnum(aId, &scalar);
+ if (NS_FAILED(rv)) {
+ return nullptr;
+ }
+
+ if (internal_IsKeyedScalar(aId)) {
+ return nullptr;
+ }
+
+ // Are we allowed to record this scalar?
+ if (!internal_CanRecordForScalarID(aId)) {
+ return nullptr;
+ }
+
+ return scalar;
+}
+
+} // namespace
+
+
+
+////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////
+//
+// PRIVATE: thread-unsafe helpers for the keyed scalars
+
+namespace {
+
+/**
+ * Get a keyed scalar object by its enum id. This implicitly allocates the keyed
+ * scalar object in the storage if it wasn't previously allocated.
+ *
+ * @param aId The scalar id.
+ * @param aRes The output variable that stores scalar object.
+ * @return
+ * NS_ERROR_INVALID_ARG if the scalar id is unknown or a this is a keyed string
+ * scalar.
+ * NS_ERROR_NOT_AVAILABLE if the scalar is expired.
+ * NS_OK if the scalar was found. If that's the case, aResult contains a
+ * valid pointer to a scalar type.
+ */
+nsresult
+internal_GetKeyedScalarByEnum(mozilla::Telemetry::ScalarID aId, KeyedScalar** aRet)
+{
+ if (!IsValidEnumId(aId)) {
+ MOZ_ASSERT(false, "Requested a keyed scalar with an invalid id.");
+ return NS_ERROR_INVALID_ARG;
+ }
+
+ const uint32_t id = static_cast<uint32_t>(aId);
+
+ KeyedScalar* scalar = nullptr;
+ if (gKeyedScalarStorageMap.Get(id, &scalar)) {
+ *aRet = scalar;
+ return NS_OK;
+ }
+
+ const ScalarInfo &info = gScalars[id];
+
+ if (IsExpiredVersion(info.expiration())) {
+ return NS_ERROR_NOT_AVAILABLE;
+ }
+
+ // We don't currently support keyed string scalars. Disable them.
+ if (info.kind == nsITelemetry::SCALAR_STRING) {
+ MOZ_ASSERT(false, "Keyed string scalars are not currently supported.");
+ return NS_ERROR_INVALID_ARG;
+ }
+
+ scalar = new KeyedScalar(info.kind);
+ if (!scalar) {
+ return NS_ERROR_INVALID_ARG;
+ }
+
+ gKeyedScalarStorageMap.Put(id, scalar);
+
+ *aRet = scalar;
+ return NS_OK;
+}
+
+/**
+ * Get a keyed scalar object by its enum id, if we're allowed to record it.
+ *
+ * @param aId The scalar id.
+ * @return The KeyedScalar instance or nullptr if we're not allowed to record
+ * the scalar.
+ */
+KeyedScalar*
+internal_GetRecordableKeyedScalar(mozilla::Telemetry::ScalarID aId)
+{
+ // Get the scalar by the enum (it also internally checks for aId validity).
+ KeyedScalar* scalar = nullptr;
+ nsresult rv = internal_GetKeyedScalarByEnum(aId, &scalar);
+ if (NS_FAILED(rv)) {
+ return nullptr;
+ }
+
+ if (!internal_IsKeyedScalar(aId)) {
+ return nullptr;
+ }
+
+ // Are we allowed to record this scalar?
+ if (!internal_CanRecordForScalarID(aId)) {
+ return nullptr;
+ }
+
+ return scalar;
+}
+
+} // namespace
+
+////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////
+//
+// EXTERNALLY VISIBLE FUNCTIONS in namespace TelemetryScalars::
+
+// This is a StaticMutex rather than a plain Mutex (1) so that
+// it gets initialised in a thread-safe manner the first time
+// it is used, and (2) because it is never de-initialised, and
+// a normal Mutex would show up as a leak in BloatView. StaticMutex
+// also has the "OffTheBooks" property, so it won't show as a leak
+// in BloatView.
+// Another reason to use a StaticMutex instead of a plain Mutex is
+// that, due to the nature of Telemetry, we cannot rely on having a
+// mutex initialized in InitializeGlobalState. Unfortunately, we
+// cannot make sure that no other function is called before this point.
+static StaticMutex gTelemetryScalarsMutex;
+
+void
+TelemetryScalar::InitializeGlobalState(bool aCanRecordBase, bool aCanRecordExtended)
+{
+ StaticMutexAutoLock locker(gTelemetryScalarsMutex);
+ MOZ_ASSERT(!gInitDone, "TelemetryScalar::InitializeGlobalState "
+ "may only be called once");
+
+ gCanRecordBase = aCanRecordBase;
+ gCanRecordExtended = aCanRecordExtended;
+
+ // Populate the static scalar name->id cache. Note that the scalar names are
+ // statically allocated and come from the automatically generated TelemetryScalarData.h.
+ uint32_t scalarCount = static_cast<uint32_t>(mozilla::Telemetry::ScalarID::ScalarCount);
+ for (uint32_t i = 0; i < scalarCount; i++) {
+ CharPtrEntryType *entry = gScalarNameIDMap.PutEntry(gScalars[i].name());
+ entry->mData = static_cast<mozilla::Telemetry::ScalarID>(i);
+ }
+
+#ifdef DEBUG
+ gScalarNameIDMap.MarkImmutable();
+#endif
+ gInitDone = true;
+}
+
+void
+TelemetryScalar::DeInitializeGlobalState()
+{
+ StaticMutexAutoLock locker(gTelemetryScalarsMutex);
+ gCanRecordBase = false;
+ gCanRecordExtended = false;
+ gScalarNameIDMap.Clear();
+ gScalarStorageMap.Clear();
+ gKeyedScalarStorageMap.Clear();
+ gInitDone = false;
+}
+
+void
+TelemetryScalar::SetCanRecordBase(bool b)
+{
+ StaticMutexAutoLock locker(gTelemetryScalarsMutex);
+ gCanRecordBase = b;
+}
+
+void
+TelemetryScalar::SetCanRecordExtended(bool b) {
+ StaticMutexAutoLock locker(gTelemetryScalarsMutex);
+ gCanRecordExtended = b;
+}
+
+/**
+ * Adds the value to the given scalar.
+ *
+ * @param aName The scalar name.
+ * @param aVal The numeric value to add to the scalar.
+ * @param aCx The JS context.
+ * @return NS_OK if the value was added or if we're not allowed to record to this
+ * dataset. Otherwise, return an error.
+ */
+nsresult
+TelemetryScalar::Add(const nsACString& aName, JS::HandleValue aVal, JSContext* aCx)
+{
+ // Unpack the aVal to nsIVariant. This uses the JS context.
+ nsCOMPtr<nsIVariant> unpackedVal;
+ nsresult rv =
+ nsContentUtils::XPConnect()->JSToVariant(aCx, aVal, getter_AddRefs(unpackedVal));
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+
+ ScalarResult sr;
+ {
+ StaticMutexAutoLock locker(gTelemetryScalarsMutex);
+
+ mozilla::Telemetry::ScalarID id;
+ rv = internal_GetEnumByScalarName(aName, &id);
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+
+ // We're trying to set a plain scalar, so make sure this is one.
+ if (internal_IsKeyedScalar(id)) {
+ return NS_ERROR_ILLEGAL_VALUE;
+ }
+
+ // Are we allowed to record this scalar?
+ if (!internal_CanRecordForScalarID(id)) {
+ return NS_OK;
+ }
+
+ // Finally get the scalar.
+ ScalarBase* scalar = nullptr;
+ rv = internal_GetScalarByEnum(id, &scalar);
+ if (NS_FAILED(rv)) {
+ // Don't throw on expired scalars.
+ if (rv == NS_ERROR_NOT_AVAILABLE) {
+ return NS_OK;
+ }
+ return rv;
+ }
+
+ sr = scalar->AddValue(unpackedVal);
+ }
+
+ // Warn the user about the error if we need to.
+ if (internal_ShouldLogError(sr)) {
+ internal_LogScalarError(aName, sr);
+ }
+
+ return MapToNsResult(sr);
+}
+
+/**
+ * Adds the value to the given scalar.
+ *
+ * @param aName The scalar name.
+ * @param aKey The key name.
+ * @param aVal The numeric value to add to the scalar.
+ * @param aCx The JS context.
+ * @return NS_OK if the value was added or if we're not allow to record to this
+ * dataset. Otherwise, return an error.
+ */
+nsresult
+TelemetryScalar::Add(const nsACString& aName, const nsAString& aKey, JS::HandleValue aVal,
+ JSContext* aCx)
+{
+ // Unpack the aVal to nsIVariant. This uses the JS context.
+ nsCOMPtr<nsIVariant> unpackedVal;
+ nsresult rv =
+ nsContentUtils::XPConnect()->JSToVariant(aCx, aVal, getter_AddRefs(unpackedVal));
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+
+ ScalarResult sr;
+ {
+ StaticMutexAutoLock locker(gTelemetryScalarsMutex);
+
+ mozilla::Telemetry::ScalarID id;
+ rv = internal_GetEnumByScalarName(aName, &id);
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+
+ // Make sure this is a keyed scalar.
+ if (!internal_IsKeyedScalar(id)) {
+ return NS_ERROR_ILLEGAL_VALUE;
+ }
+
+ // Are we allowed to record this scalar?
+ if (!internal_CanRecordForScalarID(id)) {
+ return NS_OK;
+ }
+
+ // Finally get the scalar.
+ KeyedScalar* scalar = nullptr;
+ rv = internal_GetKeyedScalarByEnum(id, &scalar);
+ if (NS_FAILED(rv)) {
+ // Don't throw on expired scalars.
+ if (rv == NS_ERROR_NOT_AVAILABLE) {
+ return NS_OK;
+ }
+ return rv;
+ }
+
+ sr = scalar->AddValue(aKey, unpackedVal);
+ }
+
+ // Warn the user about the error if we need to.
+ if (internal_ShouldLogError(sr)) {
+ internal_LogScalarError(aName, sr);
+ }
+
+ return MapToNsResult(sr);
+}
+
+/**
+ * Adds the value to the given scalar.
+ *
+ * @param aId The scalar enum id.
+ * @param aVal The numeric value to add to the scalar.
+ */
+void
+TelemetryScalar::Add(mozilla::Telemetry::ScalarID aId, uint32_t aValue)
+{
+ StaticMutexAutoLock locker(gTelemetryScalarsMutex);
+
+ ScalarBase* scalar = internal_GetRecordableScalar(aId);
+ if (!scalar) {
+ return;
+ }
+
+ scalar->AddValue(aValue);
+}
+
+/**
+ * Adds the value to the given keyed scalar.
+ *
+ * @param aId The scalar enum id.
+ * @param aKey The key name.
+ * @param aVal The numeric value to add to the scalar.
+ */
+void
+TelemetryScalar::Add(mozilla::Telemetry::ScalarID aId, const nsAString& aKey,
+ uint32_t aValue)
+{
+ StaticMutexAutoLock locker(gTelemetryScalarsMutex);
+
+ KeyedScalar* scalar = internal_GetRecordableKeyedScalar(aId);
+ if (!scalar) {
+ return;
+ }
+
+ scalar->AddValue(aKey, aValue);
+}
+
+/**
+ * Sets the scalar to the given value.
+ *
+ * @param aName The scalar name.
+ * @param aVal The value to set the scalar to.
+ * @param aCx The JS context.
+ * @return NS_OK if the value was added or if we're not allow to record to this
+ * dataset. Otherwise, return an error.
+ */
+nsresult
+TelemetryScalar::Set(const nsACString& aName, JS::HandleValue aVal, JSContext* aCx)
+{
+ // Unpack the aVal to nsIVariant. This uses the JS context.
+ nsCOMPtr<nsIVariant> unpackedVal;
+ nsresult rv =
+ nsContentUtils::XPConnect()->JSToVariant(aCx, aVal, getter_AddRefs(unpackedVal));
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+
+ ScalarResult sr;
+ {
+ StaticMutexAutoLock locker(gTelemetryScalarsMutex);
+
+ mozilla::Telemetry::ScalarID id;
+ rv = internal_GetEnumByScalarName(aName, &id);
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+
+ // We're trying to set a plain scalar, so make sure this is one.
+ if (internal_IsKeyedScalar(id)) {
+ return NS_ERROR_ILLEGAL_VALUE;
+ }
+
+ // Are we allowed to record this scalar?
+ if (!internal_CanRecordForScalarID(id)) {
+ return NS_OK;
+ }
+
+ // Finally get the scalar.
+ ScalarBase* scalar = nullptr;
+ rv = internal_GetScalarByEnum(id, &scalar);
+ if (NS_FAILED(rv)) {
+ // Don't throw on expired scalars.
+ if (rv == NS_ERROR_NOT_AVAILABLE) {
+ return NS_OK;
+ }
+ return rv;
+ }
+
+ sr = scalar->SetValue(unpackedVal);
+ }
+
+ // Warn the user about the error if we need to.
+ if (internal_ShouldLogError(sr)) {
+ internal_LogScalarError(aName, sr);
+ }
+
+ return MapToNsResult(sr);
+}
+
+/**
+ * Sets the keyed scalar to the given value.
+ *
+ * @param aName The scalar name.
+ * @param aKey The key name.
+ * @param aVal The value to set the scalar to.
+ * @param aCx The JS context.
+ * @return NS_OK if the value was added or if we're not allow to record to this
+ * dataset. Otherwise, return an error.
+ */
+nsresult
+TelemetryScalar::Set(const nsACString& aName, const nsAString& aKey, JS::HandleValue aVal,
+ JSContext* aCx)
+{
+ // Unpack the aVal to nsIVariant. This uses the JS context.
+ nsCOMPtr<nsIVariant> unpackedVal;
+ nsresult rv =
+ nsContentUtils::XPConnect()->JSToVariant(aCx, aVal, getter_AddRefs(unpackedVal));
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+
+ ScalarResult sr;
+ {
+ StaticMutexAutoLock locker(gTelemetryScalarsMutex);
+
+ mozilla::Telemetry::ScalarID id;
+ rv = internal_GetEnumByScalarName(aName, &id);
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+
+ // We're trying to set a keyed scalar. Report an error if this isn't one.
+ if (!internal_IsKeyedScalar(id)) {
+ return NS_ERROR_ILLEGAL_VALUE;
+ }
+
+ // Are we allowed to record this scalar?
+ if (!internal_CanRecordForScalarID(id)) {
+ return NS_OK;
+ }
+
+ // Finally get the scalar.
+ KeyedScalar* scalar = nullptr;
+ rv = internal_GetKeyedScalarByEnum(id, &scalar);
+ if (NS_FAILED(rv)) {
+ // Don't throw on expired scalars.
+ if (rv == NS_ERROR_NOT_AVAILABLE) {
+ return NS_OK;
+ }
+ return rv;
+ }
+
+ sr = scalar->SetValue(aKey, unpackedVal);
+ }
+
+ // Warn the user about the error if we need to.
+ if (internal_ShouldLogError(sr)) {
+ internal_LogScalarError(aName, sr);
+ }
+
+ return MapToNsResult(sr);
+}
+
+/**
+ * Sets the scalar to the given numeric value.
+ *
+ * @param aId The scalar enum id.
+ * @param aValue The numeric, unsigned value to set the scalar to.
+ */
+void
+TelemetryScalar::Set(mozilla::Telemetry::ScalarID aId, uint32_t aValue)
+{
+ StaticMutexAutoLock locker(gTelemetryScalarsMutex);
+
+ ScalarBase* scalar = internal_GetRecordableScalar(aId);
+ if (!scalar) {
+ return;
+ }
+
+ scalar->SetValue(aValue);
+}
+
+/**
+ * Sets the scalar to the given string value.
+ *
+ * @param aId The scalar enum id.
+ * @param aValue The string value to set the scalar to.
+ */
+void
+TelemetryScalar::Set(mozilla::Telemetry::ScalarID aId, const nsAString& aValue)
+{
+ StaticMutexAutoLock locker(gTelemetryScalarsMutex);
+
+ ScalarBase* scalar = internal_GetRecordableScalar(aId);
+ if (!scalar) {
+ return;
+ }
+
+ scalar->SetValue(aValue);
+}
+
+/**
+ * Sets the scalar to the given boolean value.
+ *
+ * @param aId The scalar enum id.
+ * @param aValue The boolean value to set the scalar to.
+ */
+void
+TelemetryScalar::Set(mozilla::Telemetry::ScalarID aId, bool aValue)
+{
+ StaticMutexAutoLock locker(gTelemetryScalarsMutex);
+
+ ScalarBase* scalar = internal_GetRecordableScalar(aId);
+ if (!scalar) {
+ return;
+ }
+
+ scalar->SetValue(aValue);
+}
+
+/**
+ * Sets the keyed scalar to the given numeric value.
+ *
+ * @param aId The scalar enum id.
+ * @param aKey The scalar key.
+ * @param aValue The numeric, unsigned value to set the scalar to.
+ */
+void
+TelemetryScalar::Set(mozilla::Telemetry::ScalarID aId, const nsAString& aKey,
+ uint32_t aValue)
+{
+ StaticMutexAutoLock locker(gTelemetryScalarsMutex);
+
+ KeyedScalar* scalar = internal_GetRecordableKeyedScalar(aId);
+ if (!scalar) {
+ return;
+ }
+
+ scalar->SetValue(aKey, aValue);
+}
+
+/**
+ * Sets the scalar to the given boolean value.
+ *
+ * @param aId The scalar enum id.
+ * @param aKey The scalar key.
+ * @param aValue The boolean value to set the scalar to.
+ */
+void
+TelemetryScalar::Set(mozilla::Telemetry::ScalarID aId, const nsAString& aKey,
+ bool aValue)
+{
+ StaticMutexAutoLock locker(gTelemetryScalarsMutex);
+
+ KeyedScalar* scalar = internal_GetRecordableKeyedScalar(aId);
+ if (!scalar) {
+ return;
+ }
+
+ scalar->SetValue(aKey, aValue);
+}
+
+/**
+ * Sets the scalar to the maximum of the current and the passed value.
+ *
+ * @param aName The scalar name.
+ * @param aVal The numeric value to set the scalar to.
+ * @param aCx The JS context.
+ * @return NS_OK if the value was added or if we're not allow to record to this
+ * dataset. Otherwise, return an error.
+ */
+nsresult
+TelemetryScalar::SetMaximum(const nsACString& aName, JS::HandleValue aVal, JSContext* aCx)
+{
+ // Unpack the aVal to nsIVariant. This uses the JS context.
+ nsCOMPtr<nsIVariant> unpackedVal;
+ nsresult rv =
+ nsContentUtils::XPConnect()->JSToVariant(aCx, aVal, getter_AddRefs(unpackedVal));
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+
+ ScalarResult sr;
+ {
+ StaticMutexAutoLock locker(gTelemetryScalarsMutex);
+
+ mozilla::Telemetry::ScalarID id;
+ rv = internal_GetEnumByScalarName(aName, &id);
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+
+ // Make sure this is not a keyed scalar.
+ if (internal_IsKeyedScalar(id)) {
+ return NS_ERROR_ILLEGAL_VALUE;
+ }
+
+ // Are we allowed to record this scalar?
+ if (!internal_CanRecordForScalarID(id)) {
+ return NS_OK;
+ }
+
+ // Finally get the scalar.
+ ScalarBase* scalar = nullptr;
+ rv = internal_GetScalarByEnum(id, &scalar);
+ if (NS_FAILED(rv)) {
+ // Don't throw on expired scalars.
+ if (rv == NS_ERROR_NOT_AVAILABLE) {
+ return NS_OK;
+ }
+ return rv;
+ }
+
+ sr = scalar->SetMaximum(unpackedVal);
+ }
+
+ // Warn the user about the error if we need to.
+ if (internal_ShouldLogError(sr)) {
+ internal_LogScalarError(aName, sr);
+ }
+
+ return MapToNsResult(sr);
+}
+
+/**
+ * Sets the scalar to the maximum of the current and the passed value.
+ *
+ * @param aName The scalar name.
+ * @param aKey The key name.
+ * @param aVal The numeric value to set the scalar to.
+ * @param aCx The JS context.
+ * @return NS_OK if the value was added or if we're not allow to record to this
+ * dataset. Otherwise, return an error.
+ */
+nsresult
+TelemetryScalar::SetMaximum(const nsACString& aName, const nsAString& aKey, JS::HandleValue aVal,
+ JSContext* aCx)
+{
+ // Unpack the aVal to nsIVariant. This uses the JS context.
+ nsCOMPtr<nsIVariant> unpackedVal;
+ nsresult rv =
+ nsContentUtils::XPConnect()->JSToVariant(aCx, aVal, getter_AddRefs(unpackedVal));
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+
+ ScalarResult sr;
+ {
+ StaticMutexAutoLock locker(gTelemetryScalarsMutex);
+
+ mozilla::Telemetry::ScalarID id;
+ rv = internal_GetEnumByScalarName(aName, &id);
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+
+ // Make sure this is a keyed scalar.
+ if (!internal_IsKeyedScalar(id)) {
+ return NS_ERROR_ILLEGAL_VALUE;
+ }
+
+ // Are we allowed to record this scalar?
+ if (!internal_CanRecordForScalarID(id)) {
+ return NS_OK;
+ }
+
+ // Finally get the scalar.
+ KeyedScalar* scalar = nullptr;
+ rv = internal_GetKeyedScalarByEnum(id, &scalar);
+ if (NS_FAILED(rv)) {
+ // Don't throw on expired scalars.
+ if (rv == NS_ERROR_NOT_AVAILABLE) {
+ return NS_OK;
+ }
+ return rv;
+ }
+
+ sr = scalar->SetMaximum(aKey, unpackedVal);
+ }
+
+ // Warn the user about the error if we need to.
+ if (internal_ShouldLogError(sr)) {
+ internal_LogScalarError(aName, sr);
+ }
+
+ return MapToNsResult(sr);
+}
+
+/**
+ * Sets the scalar to the maximum of the current and the passed value.
+ *
+ * @param aId The scalar enum id.
+ * @param aValue The numeric value to set the scalar to.
+ */
+void
+TelemetryScalar::SetMaximum(mozilla::Telemetry::ScalarID aId, uint32_t aValue)
+{
+ StaticMutexAutoLock locker(gTelemetryScalarsMutex);
+
+ ScalarBase* scalar = internal_GetRecordableScalar(aId);
+ if (!scalar) {
+ return;
+ }
+
+ scalar->SetMaximum(aValue);
+}
+
+/**
+ * Sets the keyed scalar to the maximum of the current and the passed value.
+ *
+ * @param aId The scalar enum id.
+ * @param aKey The key name.
+ * @param aValue The numeric value to set the scalar to.
+ */
+void
+TelemetryScalar::SetMaximum(mozilla::Telemetry::ScalarID aId, const nsAString& aKey,
+ uint32_t aValue)
+{
+ StaticMutexAutoLock locker(gTelemetryScalarsMutex);
+
+ KeyedScalar* scalar = internal_GetRecordableKeyedScalar(aId);
+ if (!scalar) {
+ return;
+ }
+
+ scalar->SetMaximum(aKey, aValue);
+}
+
+/**
+ * Serializes the scalars from the given dataset to a json-style object and resets them.
+ * The returned structure looks like {"group1.probe":1,"group1.other_probe":false,...}.
+ *
+ * @param aDataset DATASET_RELEASE_CHANNEL_OPTOUT or DATASET_RELEASE_CHANNEL_OPTIN.
+ * @param aClear Whether to clear out the scalars after snapshotting.
+ */
+nsresult
+TelemetryScalar::CreateSnapshots(unsigned int aDataset, bool aClearScalars, JSContext* aCx,
+ uint8_t optional_argc, JS::MutableHandle<JS::Value> aResult)
+{
+ // If no arguments were passed in, apply the default value.
+ if (!optional_argc) {
+ aClearScalars = false;
+ }
+
+ JS::Rooted<JSObject*> root_obj(aCx, JS_NewPlainObject(aCx));
+ if (!root_obj) {
+ return NS_ERROR_FAILURE;
+ }
+ aResult.setObject(*root_obj);
+
+ // Only lock the mutex while accessing our data, without locking any JS related code.
+ typedef mozilla::Pair<const char*, nsCOMPtr<nsIVariant>> DataPair;
+ nsTArray<DataPair> scalarsToReflect;
+ {
+ StaticMutexAutoLock locker(gTelemetryScalarsMutex);
+ // Iterate the scalars in gScalarStorageMap. The storage may contain empty or yet to be
+ // initialized scalars.
+ for (auto iter = gScalarStorageMap.Iter(); !iter.Done(); iter.Next()) {
+ ScalarBase* scalar = static_cast<ScalarBase*>(iter.Data());
+
+ // Get the informations for this scalar.
+ const ScalarInfo& info = gScalars[iter.Key()];
+
+ // Serialize the scalar if it's in the desired dataset.
+ if (IsInDataset(info.dataset, aDataset)) {
+ // Get the scalar value.
+ nsCOMPtr<nsIVariant> scalarValue;
+ nsresult rv = scalar->GetValue(scalarValue);
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+ // Append it to our list.
+ scalarsToReflect.AppendElement(mozilla::MakePair(info.name(), scalarValue));
+ }
+ }
+
+ if (aClearScalars) {
+ // The map already takes care of freeing the allocated memory.
+ gScalarStorageMap.Clear();
+ }
+ }
+
+ // Reflect it to JS.
+ for (nsTArray<DataPair>::size_type i = 0; i < scalarsToReflect.Length(); i++) {
+ const DataPair& scalar = scalarsToReflect[i];
+
+ // Convert it to a JS Val.
+ JS::Rooted<JS::Value> scalarJsValue(aCx);
+ nsresult rv =
+ nsContentUtils::XPConnect()->VariantToJS(aCx, root_obj, scalar.second(), &scalarJsValue);
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+
+ // Add it to the scalar object.
+ if (!JS_DefineProperty(aCx, root_obj, scalar.first(), scalarJsValue, JSPROP_ENUMERATE)) {
+ return NS_ERROR_FAILURE;
+ }
+ }
+
+ return NS_OK;
+}
+
+/**
+ * Serializes the scalars from the given dataset to a json-style object and resets them.
+ * The returned structure looks like:
+ * { "group1.probe": { "key_1": 2, "key_2": 1, ... }, ... }
+ *
+ * @param aDataset DATASET_RELEASE_CHANNEL_OPTOUT or DATASET_RELEASE_CHANNEL_OPTIN.
+ * @param aClear Whether to clear out the keyed scalars after snapshotting.
+ */
+nsresult
+TelemetryScalar::CreateKeyedSnapshots(unsigned int aDataset, bool aClearScalars, JSContext* aCx,
+ uint8_t optional_argc, JS::MutableHandle<JS::Value> aResult)
+{
+ // If no arguments were passed in, apply the default value.
+ if (!optional_argc) {
+ aClearScalars = false;
+ }
+
+ JS::Rooted<JSObject*> root_obj(aCx, JS_NewPlainObject(aCx));
+ if (!root_obj) {
+ return NS_ERROR_FAILURE;
+ }
+ aResult.setObject(*root_obj);
+
+ // Only lock the mutex while accessing our data, without locking any JS related code.
+ typedef mozilla::Pair<const char*, nsTArray<KeyedScalar::KeyValuePair>> DataPair;
+ nsTArray<DataPair> scalarsToReflect;
+ {
+ StaticMutexAutoLock locker(gTelemetryScalarsMutex);
+ // Iterate the scalars in gKeyedScalarStorageMap. The storage may contain empty or yet
+ // to be initialized scalars.
+ for (auto iter = gKeyedScalarStorageMap.Iter(); !iter.Done(); iter.Next()) {
+ KeyedScalar* scalar = static_cast<KeyedScalar*>(iter.Data());
+
+ // Get the informations for this scalar.
+ const ScalarInfo& info = gScalars[iter.Key()];
+
+ // Serialize the scalar if it's in the desired dataset.
+ if (IsInDataset(info.dataset, aDataset)) {
+ // Get the keys for this scalar.
+ nsTArray<KeyedScalar::KeyValuePair> scalarKeyedData;
+ nsresult rv = scalar->GetValue(scalarKeyedData);
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+ // Append it to our list.
+ scalarsToReflect.AppendElement(mozilla::MakePair(info.name(), scalarKeyedData));
+ }
+ }
+
+ if (aClearScalars) {
+ // The map already takes care of freeing the allocated memory.
+ gKeyedScalarStorageMap.Clear();
+ }
+ }
+
+ // Reflect it to JS.
+ for (nsTArray<DataPair>::size_type i = 0; i < scalarsToReflect.Length(); i++) {
+ const DataPair& keyedScalarData = scalarsToReflect[i];
+
+ // Go through each keyed scalar and create a keyed scalar object.
+ // This object will hold the values for all the keyed scalar keys.
+ JS::RootedObject keyedScalarObj(aCx, JS_NewPlainObject(aCx));
+
+ // Define a property for each scalar key, then add it to the keyed scalar
+ // object.
+ const nsTArray<KeyedScalar::KeyValuePair>& keyProps = keyedScalarData.second();
+ for (uint32_t i = 0; i < keyProps.Length(); i++) {
+ const KeyedScalar::KeyValuePair& keyData = keyProps[i];
+
+ // Convert the value for the key to a JSValue.
+ JS::Rooted<JS::Value> keyJsValue(aCx);
+ nsresult rv =
+ nsContentUtils::XPConnect()->VariantToJS(aCx, keyedScalarObj, keyData.second(), &keyJsValue);
+ if (NS_FAILED(rv)) {
+ return rv;
+ }
+
+ // Add the key to the scalar representation.
+ const NS_ConvertUTF8toUTF16 key(keyData.first());
+ if (!JS_DefineUCProperty(aCx, keyedScalarObj, key.Data(), key.Length(), keyJsValue, JSPROP_ENUMERATE)) {
+ return NS_ERROR_FAILURE;
+ }
+ }
+
+ // Add the scalar to the root object.
+ if (!JS_DefineProperty(aCx, root_obj, keyedScalarData.first(), keyedScalarObj, JSPROP_ENUMERATE)) {
+ return NS_ERROR_FAILURE;
+ }
+ }
+
+ return NS_OK;
+}
+
+/**
+ * Resets all the stored scalars. This is intended to be only used in tests.
+ */
+void
+TelemetryScalar::ClearScalars()
+{
+ StaticMutexAutoLock locker(gTelemetryScalarsMutex);
+ gScalarStorageMap.Clear();
+ gKeyedScalarStorageMap.Clear();
+}
+
+size_t
+TelemetryScalar::GetMapShallowSizesOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf)
+{
+ StaticMutexAutoLock locker(gTelemetryScalarsMutex);
+ return gScalarNameIDMap.ShallowSizeOfExcludingThis(aMallocSizeOf);
+}
+
+size_t
+TelemetryScalar::GetScalarSizesOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf)
+{
+ StaticMutexAutoLock locker(gTelemetryScalarsMutex);
+ size_t n = 0;
+ // For the plain scalars...
+ for (auto iter = gScalarStorageMap.Iter(); !iter.Done(); iter.Next()) {
+ ScalarBase* scalar = static_cast<ScalarBase*>(iter.Data());
+ n += scalar->SizeOfIncludingThis(aMallocSizeOf);
+ }
+ // ...and for the keyed scalars.
+ for (auto iter = gKeyedScalarStorageMap.Iter(); !iter.Done(); iter.Next()) {
+ KeyedScalar* scalar = static_cast<KeyedScalar*>(iter.Data());
+ n += scalar->SizeOfIncludingThis(aMallocSizeOf);
+ }
+ return n;
+}
diff --git a/toolkit/components/telemetry/TelemetryScalar.h b/toolkit/components/telemetry/TelemetryScalar.h
new file mode 100644
index 000000000..b20a8dace
--- /dev/null
+++ b/toolkit/components/telemetry/TelemetryScalar.h
@@ -0,0 +1,64 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2; -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef TelemetryScalar_h__
+#define TelemetryScalar_h__
+
+#include "mozilla/TelemetryScalarEnums.h"
+
+// This module is internal to Telemetry. It encapsulates Telemetry's
+// scalar accumulation and storage logic. It should only be used by
+// Telemetry.cpp. These functions should not be used anywhere else.
+// For the public interface to Telemetry functionality, see Telemetry.h.
+
+namespace TelemetryScalar {
+
+void InitializeGlobalState(bool canRecordBase, bool canRecordExtended);
+void DeInitializeGlobalState();
+
+void SetCanRecordBase(bool b);
+void SetCanRecordExtended(bool b);
+
+// JS API Endpoints.
+nsresult Add(const nsACString& aName, JS::HandleValue aVal, JSContext* aCx);
+nsresult Set(const nsACString& aName, JS::HandleValue aVal, JSContext* aCx);
+nsresult SetMaximum(const nsACString& aName, JS::HandleValue aVal, JSContext* aCx);
+nsresult CreateSnapshots(unsigned int aDataset, bool aClearScalars,
+ JSContext* aCx, uint8_t optional_argc,
+ JS::MutableHandle<JS::Value> aResult);
+
+// Keyed JS API Endpoints.
+nsresult Add(const nsACString& aName, const nsAString& aKey, JS::HandleValue aVal,
+ JSContext* aCx);
+nsresult Set(const nsACString& aName, const nsAString& aKey, JS::HandleValue aVal,
+ JSContext* aCx);
+nsresult SetMaximum(const nsACString& aName, const nsAString& aKey, JS::HandleValue aVal,
+ JSContext* aCx);
+nsresult CreateKeyedSnapshots(unsigned int aDataset, bool aClearScalars,
+ JSContext* aCx, uint8_t optional_argc,
+ JS::MutableHandle<JS::Value> aResult);
+
+// C++ API Endpoints.
+void Add(mozilla::Telemetry::ScalarID aId, uint32_t aValue);
+void Set(mozilla::Telemetry::ScalarID aId, uint32_t aValue);
+void Set(mozilla::Telemetry::ScalarID aId, const nsAString& aValue);
+void Set(mozilla::Telemetry::ScalarID aId, bool aValue);
+void SetMaximum(mozilla::Telemetry::ScalarID aId, uint32_t aValue);
+
+// Keyed C++ API Endpoints.
+void Add(mozilla::Telemetry::ScalarID aId, const nsAString& aKey, uint32_t aValue);
+void Set(mozilla::Telemetry::ScalarID aId, const nsAString& aKey, uint32_t aValue);
+void Set(mozilla::Telemetry::ScalarID aId, const nsAString& aKey, bool aValue);
+void SetMaximum(mozilla::Telemetry::ScalarID aId, const nsAString& aKey, uint32_t aValue);
+
+// Only to be used for testing.
+void ClearScalars();
+
+size_t GetMapShallowSizesOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf);
+size_t GetScalarSizesOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf);
+
+} // namespace TelemetryScalar
+
+#endif // TelemetryScalar_h__ \ No newline at end of file
diff --git a/toolkit/components/telemetry/TelemetrySend.jsm b/toolkit/components/telemetry/TelemetrySend.jsm
new file mode 100644
index 000000000..4694ac6a9
--- /dev/null
+++ b/toolkit/components/telemetry/TelemetrySend.jsm
@@ -0,0 +1,1114 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * This module is responsible for uploading pings to the server and persisting
+ * pings that can't be send now.
+ * Those pending pings are persisted on disk and sent at the next opportunity,
+ * newest first.
+ */
+
+"use strict";
+
+this.EXPORTED_SYMBOLS = [
+ "TelemetrySend",
+];
+
+const {classes: Cc, interfaces: Ci, utils: Cu} = Components;
+
+Cu.import("resource://gre/modules/XPCOMUtils.jsm", this);
+Cu.import("resource://gre/modules/Task.jsm", this);
+Cu.import("resource://gre/modules/Log.jsm", this);
+Cu.import("resource://gre/modules/Preferences.jsm");
+Cu.import("resource://gre/modules/PromiseUtils.jsm");
+Cu.import("resource://gre/modules/ServiceRequest.jsm", this);
+Cu.import("resource://gre/modules/Services.jsm", this);
+Cu.import("resource://gre/modules/TelemetryUtils.jsm", this);
+Cu.import("resource://gre/modules/Timer.jsm", this);
+
+XPCOMUtils.defineLazyModuleGetter(this, "AsyncShutdown",
+ "resource://gre/modules/AsyncShutdown.jsm");
+XPCOMUtils.defineLazyModuleGetter(this, "TelemetryStorage",
+ "resource://gre/modules/TelemetryStorage.jsm");
+XPCOMUtils.defineLazyModuleGetter(this, "TelemetryReportingPolicy",
+ "resource://gre/modules/TelemetryReportingPolicy.jsm");
+XPCOMUtils.defineLazyServiceGetter(this, "Telemetry",
+ "@mozilla.org/base/telemetry;1",
+ "nsITelemetry");
+
+const Utils = TelemetryUtils;
+
+const LOGGER_NAME = "Toolkit.Telemetry";
+const LOGGER_PREFIX = "TelemetrySend::";
+
+const PREF_BRANCH = "toolkit.telemetry.";
+const PREF_SERVER = PREF_BRANCH + "server";
+const PREF_UNIFIED = PREF_BRANCH + "unified";
+const PREF_FHR_UPLOAD_ENABLED = "datareporting.healthreport.uploadEnabled";
+
+const TOPIC_IDLE_DAILY = "idle-daily";
+const TOPIC_QUIT_APPLICATION = "quit-application";
+
+// Whether the FHR/Telemetry unification features are enabled.
+// Changing this pref requires a restart.
+const IS_UNIFIED_TELEMETRY = Preferences.get(PREF_UNIFIED, false);
+
+const PING_FORMAT_VERSION = 4;
+
+const MS_IN_A_MINUTE = 60 * 1000;
+
+const PING_TYPE_DELETION = "deletion";
+
+// We try to spread "midnight" pings out over this interval.
+const MIDNIGHT_FUZZING_INTERVAL_MS = 60 * MS_IN_A_MINUTE;
+// We delay sending "midnight" pings on this client by this interval.
+const MIDNIGHT_FUZZING_DELAY_MS = Math.random() * MIDNIGHT_FUZZING_INTERVAL_MS;
+
+// Timeout after which we consider a ping submission failed.
+const PING_SUBMIT_TIMEOUT_MS = 1.5 * MS_IN_A_MINUTE;
+
+// To keep resource usage in check, we limit ping sending to a maximum number
+// of pings per minute.
+const MAX_PING_SENDS_PER_MINUTE = 10;
+
+// If we have more pending pings then we can send right now, we schedule the next
+// send for after SEND_TICK_DELAY.
+const SEND_TICK_DELAY = 1 * MS_IN_A_MINUTE;
+// If we had any ping send failures since the last ping, we use a backoff timeout
+// for the next ping sends. We increase the delay exponentially up to a limit of
+// SEND_MAXIMUM_BACKOFF_DELAY_MS.
+// This exponential backoff will be reset by external ping submissions & idle-daily.
+const SEND_MAXIMUM_BACKOFF_DELAY_MS = 120 * MS_IN_A_MINUTE;
+
+// The age of a pending ping to be considered overdue (in milliseconds).
+const OVERDUE_PING_FILE_AGE = 7 * 24 * 60 * MS_IN_A_MINUTE; // 1 week
+
+function monotonicNow() {
+ try {
+ return Telemetry.msSinceProcessStart();
+ } catch (ex) {
+ // If this fails fall back to the (non-monotonic) Date value.
+ return Date.now();
+ }
+}
+
+/**
+ * This is a policy object used to override behavior within this module.
+ * Tests override properties on this object to allow for control of behavior
+ * that would otherwise be very hard to cover.
+ */
+var Policy = {
+ now: () => new Date(),
+ midnightPingFuzzingDelay: () => MIDNIGHT_FUZZING_DELAY_MS,
+ setSchedulerTickTimeout: (callback, delayMs) => setTimeout(callback, delayMs),
+ clearSchedulerTickTimeout: (id) => clearTimeout(id),
+};
+
+/**
+ * Determine if the ping has the new v4 ping format or the legacy v2 one or earlier.
+ */
+function isV4PingFormat(aPing) {
+ return ("id" in aPing) && ("application" in aPing) &&
+ ("version" in aPing) && (aPing.version >= 2);
+}
+
+/**
+ * Check if the provided ping is a deletion ping.
+ * @param {Object} aPing The ping to check.
+ * @return {Boolean} True if the ping is a deletion ping, false otherwise.
+ */
+function isDeletionPing(aPing) {
+ return isV4PingFormat(aPing) && (aPing.type == PING_TYPE_DELETION);
+}
+
+/**
+ * Save the provided ping as a pending ping. If it's a deletion ping, save it
+ * to a special location.
+ * @param {Object} aPing The ping to save.
+ * @return {Promise} A promise resolved when the ping is saved.
+ */
+function savePing(aPing) {
+ if (isDeletionPing(aPing)) {
+ return TelemetryStorage.saveDeletionPing(aPing);
+ }
+ return TelemetryStorage.savePendingPing(aPing);
+}
+
+/**
+ * @return {String} This returns a string with the gzip compressed data.
+ */
+function gzipCompressString(string) {
+ let observer = {
+ buffer: "",
+ onStreamComplete: function(loader, context, status, length, result) {
+ this.buffer = String.fromCharCode.apply(this, result);
+ }
+ };
+
+ let scs = Cc["@mozilla.org/streamConverters;1"]
+ .getService(Ci.nsIStreamConverterService);
+ let listener = Cc["@mozilla.org/network/stream-loader;1"]
+ .createInstance(Ci.nsIStreamLoader);
+ listener.init(observer);
+ let converter = scs.asyncConvertData("uncompressed", "gzip",
+ listener, null);
+ let stringStream = Cc["@mozilla.org/io/string-input-stream;1"]
+ .createInstance(Ci.nsIStringInputStream);
+ stringStream.data = string;
+ converter.onStartRequest(null, null);
+ converter.onDataAvailable(null, null, stringStream, 0, string.length);
+ converter.onStopRequest(null, null, null);
+ return observer.buffer;
+}
+
+this.TelemetrySend = {
+
+ /**
+ * Age in ms of a pending ping to be considered overdue.
+ */
+ get OVERDUE_PING_FILE_AGE() {
+ return OVERDUE_PING_FILE_AGE;
+ },
+
+ get pendingPingCount() {
+ return TelemetrySendImpl.pendingPingCount;
+ },
+
+ /**
+ * Initializes this module.
+ *
+ * @param {Boolean} testing Whether this is run in a test. This changes some behavior
+ * to enable proper testing.
+ * @return {Promise} Resolved when setup is finished.
+ */
+ setup: function(testing = false) {
+ return TelemetrySendImpl.setup(testing);
+ },
+
+ /**
+ * Shutdown this module - this will cancel any pending ping tasks and wait for
+ * outstanding async activity like network and disk I/O.
+ *
+ * @return {Promise} Promise that is resolved when shutdown is finished.
+ */
+ shutdown: function() {
+ return TelemetrySendImpl.shutdown();
+ },
+
+ /**
+ * Submit a ping for sending. This will:
+ * - send the ping right away if possible or
+ * - save the ping to disk and send it at the next opportunity
+ *
+ * @param {Object} ping The ping data to send, must be serializable to JSON.
+ * @return {Promise} Test-only - a promise that is resolved when the ping is sent or saved.
+ */
+ submitPing: function(ping) {
+ return TelemetrySendImpl.submitPing(ping);
+ },
+
+ /**
+ * Count of pending pings that were found to be overdue at startup.
+ */
+ get overduePingsCount() {
+ return TelemetrySendImpl.overduePingsCount;
+ },
+
+ /**
+ * Notify that we can start submitting data to the servers.
+ */
+ notifyCanUpload: function() {
+ return TelemetrySendImpl.notifyCanUpload();
+ },
+
+ /**
+ * Only used in tests. Used to reset the module data to emulate a restart.
+ */
+ reset: function() {
+ return TelemetrySendImpl.reset();
+ },
+
+ /**
+ * Only used in tests.
+ */
+ setServer: function(server) {
+ return TelemetrySendImpl.setServer(server);
+ },
+
+ /**
+ * Clear out unpersisted, yet to be sent, pings and cancel outgoing ping requests.
+ */
+ clearCurrentPings: function() {
+ return TelemetrySendImpl.clearCurrentPings();
+ },
+
+ /**
+ * Only used in tests to wait on outgoing pending pings.
+ */
+ testWaitOnOutgoingPings: function() {
+ return TelemetrySendImpl.promisePendingPingActivity();
+ },
+
+ /**
+ * Test-only - this allows overriding behavior to enable ping sending in debug builds.
+ */
+ setTestModeEnabled: function(testing) {
+ TelemetrySendImpl.setTestModeEnabled(testing);
+ },
+
+ /**
+ * This returns state info for this module for AsyncShutdown timeout diagnostics.
+ */
+ getShutdownState: function() {
+ return TelemetrySendImpl.getShutdownState();
+ },
+};
+
+var CancellableTimeout = {
+ _deferred: null,
+ _timer: null,
+
+ /**
+ * This waits until either the given timeout passed or the timeout was cancelled.
+ *
+ * @param {Number} timeoutMs The timeout in ms.
+ * @return {Promise<bool>} Promise that is resolved with false if the timeout was cancelled,
+ * false otherwise.
+ */
+ promiseWaitOnTimeout: function(timeoutMs) {
+ if (!this._deferred) {
+ this._deferred = PromiseUtils.defer();
+ this._timer = Policy.setSchedulerTickTimeout(() => this._onTimeout(), timeoutMs);
+ }
+
+ return this._deferred.promise;
+ },
+
+ _onTimeout: function() {
+ if (this._deferred) {
+ this._deferred.resolve(false);
+ this._timer = null;
+ this._deferred = null;
+ }
+ },
+
+ cancelTimeout: function() {
+ if (this._deferred) {
+ Policy.clearSchedulerTickTimeout(this._timer);
+ this._deferred.resolve(true);
+ this._timer = null;
+ this._deferred = null;
+ }
+ },
+};
+
+/**
+ * SendScheduler implements the timer & scheduling behavior for ping sends.
+ */
+var SendScheduler = {
+ // Whether any ping sends failed since the last tick. If yes, we start with our exponential
+ // backoff timeout.
+ _sendsFailed: false,
+ // The current retry delay after ping send failures. We use this for the exponential backoff,
+ // increasing this value everytime we had send failures since the last tick.
+ _backoffDelay: SEND_TICK_DELAY,
+ _shutdown: false,
+ _sendTask: null,
+ // A string that tracks the last seen send task state, null if it never ran.
+ _sendTaskState: null,
+
+ _logger: null,
+
+ get _log() {
+ if (!this._logger) {
+ this._logger = Log.repository.getLoggerWithMessagePrefix(LOGGER_NAME, LOGGER_PREFIX + "Scheduler::");
+ }
+
+ return this._logger;
+ },
+
+ shutdown: function() {
+ this._log.trace("shutdown");
+ this._shutdown = true;
+ CancellableTimeout.cancelTimeout();
+ return Promise.resolve(this._sendTask);
+ },
+
+ start: function() {
+ this._log.trace("start");
+ this._sendsFailed = false;
+ this._backoffDelay = SEND_TICK_DELAY;
+ this._shutdown = false;
+ },
+
+ /**
+ * Only used for testing, resets the state to emulate a restart.
+ */
+ reset: function() {
+ this._log.trace("reset");
+ return this.shutdown().then(() => this.start());
+ },
+
+ /**
+ * Notify the scheduler of a failure in sending out pings that warrants retrying.
+ * This will trigger the exponential backoff timer behavior on the next tick.
+ */
+ notifySendsFailed: function() {
+ this._log.trace("notifySendsFailed");
+ if (this._sendsFailed) {
+ return;
+ }
+
+ this._sendsFailed = true;
+ this._log.trace("notifySendsFailed - had send failures");
+ },
+
+ /**
+ * Returns whether ping submissions are currently throttled.
+ */
+ isThrottled: function() {
+ const now = Policy.now();
+ const nextPingSendTime = this._getNextPingSendTime(now);
+ return (nextPingSendTime > now.getTime());
+ },
+
+ waitOnSendTask: function() {
+ return Promise.resolve(this._sendTask);
+ },
+
+ triggerSendingPings: function(immediately) {
+ this._log.trace("triggerSendingPings - active send task: " + !!this._sendTask + ", immediately: " + immediately);
+
+ if (!this._sendTask) {
+ this._sendTask = this._doSendTask();
+ let clear = () => this._sendTask = null;
+ this._sendTask.then(clear, clear);
+ } else if (immediately) {
+ CancellableTimeout.cancelTimeout();
+ }
+
+ return this._sendTask;
+ },
+
+ _doSendTask: Task.async(function*() {
+ this._sendTaskState = "send task started";
+ this._backoffDelay = SEND_TICK_DELAY;
+ this._sendsFailed = false;
+
+ const resetBackoffTimer = () => {
+ this._backoffDelay = SEND_TICK_DELAY;
+ };
+
+ for (;;) {
+ this._log.trace("_doSendTask iteration");
+ this._sendTaskState = "start iteration";
+
+ if (this._shutdown) {
+ this._log.trace("_doSendTask - shutting down, bailing out");
+ this._sendTaskState = "bail out - shutdown check";
+ return;
+ }
+
+ // Get a list of pending pings, sorted by last modified, descending.
+ // Filter out all the pings we can't send now. This addresses scenarios like "deletion" pings
+ // which can be send even when upload is disabled.
+ let pending = TelemetryStorage.getPendingPingList();
+ let current = TelemetrySendImpl.getUnpersistedPings();
+ this._log.trace("_doSendTask - pending: " + pending.length + ", current: " + current.length);
+ // Note that the two lists contain different kind of data. |pending| only holds ping
+ // info, while |current| holds actual ping data.
+ if (!TelemetrySendImpl.sendingEnabled()) {
+ pending = pending.filter(pingInfo => TelemetryStorage.isDeletionPing(pingInfo.id));
+ current = current.filter(p => isDeletionPing(p));
+ }
+ this._log.trace("_doSendTask - can send - pending: " + pending.length + ", current: " + current.length);
+
+ // Bail out if there is nothing to send.
+ if ((pending.length == 0) && (current.length == 0)) {
+ this._log.trace("_doSendTask - no pending pings, bailing out");
+ this._sendTaskState = "bail out - no pings to send";
+ return;
+ }
+
+ // If we are currently throttled (e.g. fuzzing to avoid midnight spikes), wait for the next send window.
+ const now = Policy.now();
+ if (this.isThrottled()) {
+ const nextPingSendTime = this._getNextPingSendTime(now);
+ this._log.trace("_doSendTask - throttled, delaying ping send to " + new Date(nextPingSendTime));
+ this._sendTaskState = "wait for throttling to pass";
+
+ const delay = nextPingSendTime - now.getTime();
+ const cancelled = yield CancellableTimeout.promiseWaitOnTimeout(delay);
+ if (cancelled) {
+ this._log.trace("_doSendTask - throttling wait was cancelled, resetting backoff timer");
+ resetBackoffTimer();
+ }
+
+ continue;
+ }
+
+ let sending = pending.slice(0, MAX_PING_SENDS_PER_MINUTE);
+ pending = pending.slice(MAX_PING_SENDS_PER_MINUTE);
+ this._log.trace("_doSendTask - triggering sending of " + sending.length + " pings now" +
+ ", " + pending.length + " pings waiting");
+
+ this._sendsFailed = false;
+ const sendStartTime = Policy.now();
+ this._sendTaskState = "wait on ping sends";
+ yield TelemetrySendImpl.sendPings(current, sending.map(p => p.id));
+ if (this._shutdown || (TelemetrySend.pendingPingCount == 0)) {
+ this._log.trace("_doSendTask - bailing out after sending, shutdown: " + this._shutdown +
+ ", pendingPingCount: " + TelemetrySend.pendingPingCount);
+ this._sendTaskState = "bail out - shutdown & pending check after send";
+ return;
+ }
+
+ // Calculate the delay before sending the next batch of pings.
+ // We start with a delay that makes us send max. 1 batch per minute.
+ // If we had send failures in the last batch, we will override this with
+ // a backoff delay.
+ const timeSinceLastSend = Policy.now() - sendStartTime;
+ let nextSendDelay = Math.max(0, SEND_TICK_DELAY - timeSinceLastSend);
+
+ if (!this._sendsFailed) {
+ this._log.trace("_doSendTask - had no send failures, resetting backoff timer");
+ resetBackoffTimer();
+ } else {
+ const newDelay = Math.min(SEND_MAXIMUM_BACKOFF_DELAY_MS,
+ this._backoffDelay * 2);
+ this._log.trace("_doSendTask - had send failures, backing off -" +
+ " old timeout: " + this._backoffDelay +
+ ", new timeout: " + newDelay);
+ this._backoffDelay = newDelay;
+ nextSendDelay = this._backoffDelay;
+ }
+
+ this._log.trace("_doSendTask - waiting for next send opportunity, timeout is " + nextSendDelay)
+ this._sendTaskState = "wait on next send opportunity";
+ const cancelled = yield CancellableTimeout.promiseWaitOnTimeout(nextSendDelay);
+ if (cancelled) {
+ this._log.trace("_doSendTask - batch send wait was cancelled, resetting backoff timer");
+ resetBackoffTimer();
+ }
+ }
+ }),
+
+ /**
+ * This helper calculates the next time that we can send pings at.
+ * Currently this mostly redistributes ping sends from midnight until one hour after
+ * to avoid submission spikes around local midnight for daily pings.
+ *
+ * @param now Date The current time.
+ * @return Number The next time (ms from UNIX epoch) when we can send pings.
+ */
+ _getNextPingSendTime: function(now) {
+ // 1. First we check if the time is between 0am and 1am. If it's not, we send
+ // immediately.
+ // 2. If we confirmed the time is indeed between 0am and 1am in step 1, we disallow
+ // sending before (midnight + fuzzing delay), which is a random time between 0am-1am
+ // (decided at startup).
+
+ const midnight = Utils.truncateToDays(now);
+ // Don't delay pings if we are not within the fuzzing interval.
+ if ((now.getTime() - midnight.getTime()) > MIDNIGHT_FUZZING_INTERVAL_MS) {
+ return now.getTime();
+ }
+
+ // Delay ping send if we are within the midnight fuzzing range.
+ // We spread those ping sends out between |midnight| and |midnight + midnightPingFuzzingDelay|.
+ return midnight.getTime() + Policy.midnightPingFuzzingDelay();
+ },
+
+ getShutdownState: function() {
+ return {
+ shutdown: this._shutdown,
+ hasSendTask: !!this._sendTask,
+ sendsFailed: this._sendsFailed,
+ sendTaskState: this._sendTaskState,
+ backoffDelay: this._backoffDelay,
+ };
+ },
+ };
+
+var TelemetrySendImpl = {
+ _sendingEnabled: false,
+ // Tracks the shutdown state.
+ _shutdown: false,
+ _logger: null,
+ // This tracks all pending ping requests to the server.
+ _pendingPingRequests: new Map(),
+ // This tracks all the pending async ping activity.
+ _pendingPingActivity: new Set(),
+ // This is true when running in the test infrastructure.
+ _testMode: false,
+ // This holds pings that we currently try and haven't persisted yet.
+ _currentPings: new Map(),
+
+ // Count of pending pings that were overdue.
+ _overduePingCount: 0,
+
+ OBSERVER_TOPICS: [
+ TOPIC_IDLE_DAILY,
+ ],
+
+ get _log() {
+ if (!this._logger) {
+ this._logger = Log.repository.getLoggerWithMessagePrefix(LOGGER_NAME, LOGGER_PREFIX);
+ }
+
+ return this._logger;
+ },
+
+ get overduePingsCount() {
+ return this._overduePingCount;
+ },
+
+ get pendingPingRequests() {
+ return this._pendingPingRequests;
+ },
+
+ get pendingPingCount() {
+ return TelemetryStorage.getPendingPingList().length + this._currentPings.size;
+ },
+
+ setTestModeEnabled: function(testing) {
+ this._testMode = testing;
+ },
+
+ setup: Task.async(function*(testing) {
+ this._log.trace("setup");
+
+ this._testMode = testing;
+ this._sendingEnabled = true;
+
+ Services.obs.addObserver(this, TOPIC_IDLE_DAILY, false);
+
+ this._server = Preferences.get(PREF_SERVER, undefined);
+
+ // Check the pending pings on disk now.
+ try {
+ yield this._checkPendingPings();
+ } catch (ex) {
+ this._log.error("setup - _checkPendingPings rejected", ex);
+ }
+
+ // Enforce the pending pings storage quota. It could take a while so don't
+ // block on it.
+ TelemetryStorage.runEnforcePendingPingsQuotaTask();
+
+ // Start sending pings, but don't block on this.
+ SendScheduler.triggerSendingPings(true);
+ }),
+
+ /**
+ * Discard old pings from the pending pings and detect overdue ones.
+ * @return {Boolean} True if we have overdue pings, false otherwise.
+ */
+ _checkPendingPings: Task.async(function*() {
+ // Scan the pending pings - that gives us a list sorted by last modified, descending.
+ let infos = yield TelemetryStorage.loadPendingPingList();
+ this._log.info("_checkPendingPings - pending ping count: " + infos.length);
+ if (infos.length == 0) {
+ this._log.trace("_checkPendingPings - no pending pings");
+ return;
+ }
+
+ const now = Policy.now();
+
+ // Check for overdue pings.
+ const overduePings = infos.filter((info) =>
+ (now.getTime() - info.lastModificationDate) > OVERDUE_PING_FILE_AGE);
+ this._overduePingCount = overduePings.length;
+
+ // Submit the age of the pending pings.
+ for (let pingInfo of infos) {
+ const ageInDays =
+ Utils.millisecondsToDays(Math.abs(now.getTime() - pingInfo.lastModificationDate));
+ Telemetry.getHistogramById("TELEMETRY_PENDING_PINGS_AGE").add(ageInDays);
+ }
+ }),
+
+ shutdown: Task.async(function*() {
+ this._shutdown = true;
+
+ for (let topic of this.OBSERVER_TOPICS) {
+ try {
+ Services.obs.removeObserver(this, topic);
+ } catch (ex) {
+ this._log.error("shutdown - failed to remove observer for " + topic, ex);
+ }
+ }
+
+ // We can't send anymore now.
+ this._sendingEnabled = false;
+
+ // Cancel any outgoing requests.
+ yield this._cancelOutgoingRequests();
+
+ // Stop any active send tasks.
+ yield SendScheduler.shutdown();
+
+ // Wait for any outstanding async ping activity.
+ yield this.promisePendingPingActivity();
+
+ // Save any outstanding pending pings to disk.
+ yield this._persistCurrentPings();
+ }),
+
+ reset: function() {
+ this._log.trace("reset");
+
+ this._shutdown = false;
+ this._currentPings = new Map();
+ this._overduePingCount = 0;
+
+ const histograms = [
+ "TELEMETRY_SUCCESS",
+ "TELEMETRY_SEND_SUCCESS",
+ "TELEMETRY_SEND_FAILURE",
+ ];
+
+ histograms.forEach(h => Telemetry.getHistogramById(h).clear());
+
+ return SendScheduler.reset();
+ },
+
+ /**
+ * Notify that we can start submitting data to the servers.
+ */
+ notifyCanUpload: function() {
+ // Let the scheduler trigger sending pings if possible.
+ SendScheduler.triggerSendingPings(true);
+ return this.promisePendingPingActivity();
+ },
+
+ observe: function(subject, topic, data) {
+ switch (topic) {
+ case TOPIC_IDLE_DAILY:
+ SendScheduler.triggerSendingPings(true);
+ break;
+ }
+ },
+
+ submitPing: function(ping) {
+ this._log.trace("submitPing - ping id: " + ping.id);
+
+ if (!this.sendingEnabled(ping)) {
+ this._log.trace("submitPing - Telemetry is not allowed to send pings.");
+ return Promise.resolve();
+ }
+
+ if (!this.canSendNow) {
+ // Sending is disabled or throttled, add this to the persisted pending pings.
+ this._log.trace("submitPing - can't send ping now, persisting to disk - " +
+ "canSendNow: " + this.canSendNow);
+ return savePing(ping);
+ }
+
+ // Let the scheduler trigger sending pings if possible.
+ // As a safety mechanism, this resets any currently active throttling.
+ this._log.trace("submitPing - can send pings, trying to send now");
+ this._currentPings.set(ping.id, ping);
+ SendScheduler.triggerSendingPings(true);
+ return Promise.resolve();
+ },
+
+ /**
+ * Only used in tests.
+ */
+ setServer: function (server) {
+ this._log.trace("setServer", server);
+ this._server = server;
+ },
+
+ /**
+ * Clear out unpersisted, yet to be sent, pings and cancel outgoing ping requests.
+ */
+ clearCurrentPings: Task.async(function*() {
+ if (this._shutdown) {
+ this._log.trace("clearCurrentPings - in shutdown, bailing out");
+ return;
+ }
+
+ // Temporarily disable the scheduler. It must not try to reschedule ping sending
+ // while we're deleting them.
+ yield SendScheduler.shutdown();
+
+ // Now that the ping activity has settled, abort outstanding ping requests.
+ this._cancelOutgoingRequests();
+
+ // Also, purge current pings.
+ this._currentPings.clear();
+
+ // We might have been interrupted and shutdown could have been started.
+ // We need to bail out in that case to avoid triggering send activity etc.
+ // at unexpected times.
+ if (this._shutdown) {
+ this._log.trace("clearCurrentPings - in shutdown, not spinning SendScheduler up again");
+ return;
+ }
+
+ // Enable the scheduler again and spin the send task.
+ SendScheduler.start();
+ SendScheduler.triggerSendingPings(true);
+ }),
+
+ _cancelOutgoingRequests: function() {
+ // Abort any pending ping XHRs.
+ for (let [id, request] of this._pendingPingRequests) {
+ this._log.trace("_cancelOutgoingRequests - aborting ping request for id " + id);
+ try {
+ request.abort();
+ } catch (e) {
+ this._log.error("_cancelOutgoingRequests - failed to abort request for id " + id, e);
+ }
+ }
+ this._pendingPingRequests.clear();
+ },
+
+ sendPings: function(currentPings, persistedPingIds) {
+ let pingSends = [];
+
+ for (let current of currentPings) {
+ let ping = current;
+ let p = Task.spawn(function*() {
+ try {
+ yield this._doPing(ping, ping.id, false);
+ } catch (ex) {
+ this._log.info("sendPings - ping " + ping.id + " not sent, saving to disk", ex);
+ // Deletion pings must be saved to a special location.
+ yield savePing(ping);
+ } finally {
+ this._currentPings.delete(ping.id);
+ }
+ }.bind(this));
+
+ this._trackPendingPingTask(p);
+ pingSends.push(p);
+ }
+
+ if (persistedPingIds.length > 0) {
+ pingSends.push(this._sendPersistedPings(persistedPingIds).catch((ex) => {
+ this._log.info("sendPings - persisted pings not sent", ex);
+ }));
+ }
+
+ return Promise.all(pingSends);
+ },
+
+ /**
+ * Send the persisted pings to the server.
+ *
+ * @param {Array<string>} List of ping ids that should be sent.
+ *
+ * @return Promise A promise that is resolved when all pings finished sending or failed.
+ */
+ _sendPersistedPings: Task.async(function*(pingIds) {
+ this._log.trace("sendPersistedPings");
+
+ if (TelemetryStorage.pendingPingCount < 1) {
+ this._log.trace("_sendPersistedPings - no pings to send");
+ return;
+ }
+
+ if (pingIds.length < 1) {
+ this._log.trace("sendPersistedPings - no pings to send");
+ return;
+ }
+
+ // We can send now.
+ // If there are any send failures, _doPing() sets up handlers that e.g. trigger backoff timer behavior.
+ this._log.trace("sendPersistedPings - sending " + pingIds.length + " pings");
+ let pingSendPromises = [];
+ for (let pingId of pingIds) {
+ const id = pingId;
+ pingSendPromises.push(
+ TelemetryStorage.loadPendingPing(id)
+ .then((data) => this._doPing(data, id, true))
+ .catch(e => this._log.error("sendPersistedPings - failed to send ping " + id, e)));
+ }
+
+ let promise = Promise.all(pingSendPromises);
+ this._trackPendingPingTask(promise);
+ yield promise;
+ }),
+
+ _onPingRequestFinished: function(success, startTime, id, isPersisted) {
+ this._log.trace("_onPingRequestFinished - success: " + success + ", persisted: " + isPersisted);
+
+ let sendId = success ? "TELEMETRY_SEND_SUCCESS" : "TELEMETRY_SEND_FAILURE";
+ let hsend = Telemetry.getHistogramById(sendId);
+ let hsuccess = Telemetry.getHistogramById("TELEMETRY_SUCCESS");
+
+ hsend.add(monotonicNow() - startTime);
+ hsuccess.add(success);
+
+ if (!success) {
+ // Let the scheduler know about send failures for triggering backoff timeouts.
+ SendScheduler.notifySendsFailed();
+ }
+
+ if (success && isPersisted) {
+ if (TelemetryStorage.isDeletionPing(id)) {
+ return TelemetryStorage.removeDeletionPing();
+ }
+ return TelemetryStorage.removePendingPing(id);
+ }
+ return Promise.resolve();
+ },
+
+ _getSubmissionPath: function(ping) {
+ // The new ping format contains an "application" section, the old one doesn't.
+ let pathComponents;
+ if (isV4PingFormat(ping)) {
+ // We insert the Ping id in the URL to simplify server handling of duplicated
+ // pings.
+ let app = ping.application;
+ pathComponents = [
+ ping.id, ping.type, app.name, app.version, app.channel, app.buildId
+ ];
+ } else {
+ // This is a ping in the old format.
+ if (!("slug" in ping)) {
+ // That's odd, we don't have a slug. Generate one so that TelemetryStorage.jsm works.
+ ping.slug = Utils.generateUUID();
+ }
+
+ // Do we have enough info to build a submission URL?
+ let payload = ("payload" in ping) ? ping.payload : null;
+ if (payload && ("info" in payload)) {
+ let info = ping.payload.info;
+ pathComponents = [ ping.slug, info.reason, info.appName, info.appVersion,
+ info.appUpdateChannel, info.appBuildID ];
+ } else {
+ // Only use the UUID as the slug.
+ pathComponents = [ ping.slug ];
+ }
+ }
+
+ let slug = pathComponents.join("/");
+ return "/submit/telemetry/" + slug;
+ },
+
+ _doPing: function(ping, id, isPersisted) {
+ if (!this.sendingEnabled(ping)) {
+ // We can't send the pings to the server, so don't try to.
+ this._log.trace("_doPing - Can't send ping " + ping.id);
+ return Promise.resolve();
+ }
+
+ this._log.trace("_doPing - server: " + this._server + ", persisted: " + isPersisted +
+ ", id: " + id);
+
+ const isNewPing = isV4PingFormat(ping);
+ const version = isNewPing ? PING_FORMAT_VERSION : 1;
+ const url = this._server + this._getSubmissionPath(ping) + "?v=" + version;
+
+ let request = new ServiceRequest();
+ request.mozBackgroundRequest = true;
+ request.timeout = PING_SUBMIT_TIMEOUT_MS;
+
+ request.open("POST", url, true);
+ request.overrideMimeType("text/plain");
+ request.setRequestHeader("Content-Type", "application/json; charset=UTF-8");
+ request.setRequestHeader("Date", Policy.now().toUTCString());
+
+ this._pendingPingRequests.set(id, request);
+
+ // Prevent the request channel from running though URLClassifier (bug 1296802)
+ request.channel.loadFlags &= ~Ci.nsIChannel.LOAD_CLASSIFY_URI;
+
+ const monotonicStartTime = monotonicNow();
+ let deferred = PromiseUtils.defer();
+
+ let onRequestFinished = (success, event) => {
+ let onCompletion = () => {
+ if (success) {
+ deferred.resolve();
+ } else {
+ deferred.reject(event);
+ }
+ };
+
+ this._pendingPingRequests.delete(id);
+ this._onPingRequestFinished(success, monotonicStartTime, id, isPersisted)
+ .then(() => onCompletion(),
+ (error) => {
+ this._log.error("_doPing - request success: " + success + ", error: " + error);
+ onCompletion();
+ });
+ };
+
+ let errorhandler = (event) => {
+ this._log.error("_doPing - error making request to " + url + ": " + event.type);
+ onRequestFinished(false, event);
+ };
+ request.onerror = errorhandler;
+ request.ontimeout = errorhandler;
+ request.onabort = errorhandler;
+
+ request.onload = (event) => {
+ let status = request.status;
+ let statusClass = status - (status % 100);
+ let success = false;
+
+ if (statusClass === 200) {
+ // We can treat all 2XX as success.
+ this._log.info("_doPing - successfully loaded, status: " + status);
+ success = true;
+ } else if (statusClass === 400) {
+ // 4XX means that something with the request was broken.
+ this._log.error("_doPing - error submitting to " + url + ", status: " + status
+ + " - ping request broken?");
+ Telemetry.getHistogramById("TELEMETRY_PING_EVICTED_FOR_SERVER_ERRORS").add();
+ // TODO: we should handle this better, but for now we should avoid resubmitting
+ // broken requests by pretending success.
+ success = true;
+ } else if (statusClass === 500) {
+ // 5XX means there was a server-side error and we should try again later.
+ this._log.error("_doPing - error submitting to " + url + ", status: " + status
+ + " - server error, should retry later");
+ } else {
+ // We received an unexpected status code.
+ this._log.error("_doPing - error submitting to " + url + ", status: " + status
+ + ", type: " + event.type);
+ }
+
+ onRequestFinished(success, event);
+ };
+
+ // If that's a legacy ping format, just send its payload.
+ let networkPayload = isNewPing ? ping : ping.payload;
+ request.setRequestHeader("Content-Encoding", "gzip");
+ let converter = Cc["@mozilla.org/intl/scriptableunicodeconverter"]
+ .createInstance(Ci.nsIScriptableUnicodeConverter);
+ converter.charset = "UTF-8";
+ let startTime = new Date();
+ let utf8Payload = converter.ConvertFromUnicode(JSON.stringify(networkPayload));
+ utf8Payload += converter.Finish();
+ Telemetry.getHistogramById("TELEMETRY_STRINGIFY").add(new Date() - startTime);
+
+ // Check the size and drop pings which are too big.
+ const pingSizeBytes = utf8Payload.length;
+ if (pingSizeBytes > TelemetryStorage.MAXIMUM_PING_SIZE) {
+ this._log.error("_doPing - submitted ping exceeds the size limit, size: " + pingSizeBytes);
+ Telemetry.getHistogramById("TELEMETRY_PING_SIZE_EXCEEDED_SEND").add();
+ Telemetry.getHistogramById("TELEMETRY_DISCARDED_SEND_PINGS_SIZE_MB")
+ .add(Math.floor(pingSizeBytes / 1024 / 1024));
+ // We don't need to call |request.abort()| as it was not sent yet.
+ this._pendingPingRequests.delete(id);
+ return TelemetryStorage.removePendingPing(id);
+ }
+
+ let payloadStream = Cc["@mozilla.org/io/string-input-stream;1"]
+ .createInstance(Ci.nsIStringInputStream);
+ startTime = new Date();
+ payloadStream.data = gzipCompressString(utf8Payload);
+ Telemetry.getHistogramById("TELEMETRY_COMPRESS").add(new Date() - startTime);
+ startTime = new Date();
+ request.send(payloadStream);
+
+ return deferred.promise;
+ },
+
+ /**
+ * Check if sending is temporarily disabled.
+ * @return {Boolean} True if we can send pings to the server right now, false if
+ * sending is temporarily disabled.
+ */
+ get canSendNow() {
+ // If the reporting policy was not accepted yet, don't send pings.
+ if (!TelemetryReportingPolicy.canUpload()) {
+ return false;
+ }
+
+ return this._sendingEnabled;
+ },
+
+ /**
+ * Check if sending is disabled. If FHR is not allowed to upload,
+ * pings are not sent to the server (Telemetry is a sub-feature of FHR). If trying
+ * to send a deletion ping, don't block it.
+ * If unified telemetry is off, don't send pings if Telemetry is disabled.
+ *
+ * @param {Object} [ping=null] A ping to be checked.
+ * @return {Boolean} True if pings can be send to the servers, false otherwise.
+ */
+ sendingEnabled: function(ping = null) {
+ // We only send pings from official builds, but allow overriding this for tests.
+ if (!Telemetry.isOfficialTelemetry && !this._testMode) {
+ return false;
+ }
+
+ // With unified Telemetry, the FHR upload setting controls whether we can send pings.
+ // The Telemetry pref enables sending extended data sets instead.
+ if (IS_UNIFIED_TELEMETRY) {
+ // Deletion pings are sent even if the upload is disabled.
+ if (ping && isDeletionPing(ping)) {
+ return true;
+ }
+ return Preferences.get(PREF_FHR_UPLOAD_ENABLED, false);
+ }
+
+ // Without unified Telemetry, the Telemetry enabled pref controls ping sending.
+ return Utils.isTelemetryEnabled;
+ },
+
+ /**
+ * Track any pending ping send and save tasks through the promise passed here.
+ * This is needed to block shutdown on any outstanding ping activity.
+ */
+ _trackPendingPingTask: function (promise) {
+ let clear = () => this._pendingPingActivity.delete(promise);
+ promise.then(clear, clear);
+ this._pendingPingActivity.add(promise);
+ },
+
+ /**
+ * Return a promise that allows to wait on pending pings.
+ * @return {Object<Promise>} A promise resolved when all the pending pings promises
+ * are resolved.
+ */
+ promisePendingPingActivity: function () {
+ this._log.trace("promisePendingPingActivity - Waiting for ping task");
+ let p = Array.from(this._pendingPingActivity, p => p.catch(ex => {
+ this._log.error("promisePendingPingActivity - ping activity had an error", ex);
+ }));
+ p.push(SendScheduler.waitOnSendTask());
+ return Promise.all(p);
+ },
+
+ _persistCurrentPings: Task.async(function*() {
+ for (let [id, ping] of this._currentPings) {
+ try {
+ yield savePing(ping);
+ this._log.trace("_persistCurrentPings - saved ping " + id);
+ } catch (ex) {
+ this._log.error("_persistCurrentPings - failed to save ping " + id, ex);
+ } finally {
+ this._currentPings.delete(id);
+ }
+ }
+ }),
+
+ /**
+ * Returns the current pending, not yet persisted, pings, newest first.
+ */
+ getUnpersistedPings: function() {
+ let current = [...this._currentPings.values()];
+ current.reverse();
+ return current;
+ },
+
+ getShutdownState: function() {
+ return {
+ sendingEnabled: this._sendingEnabled,
+ pendingPingRequestCount: this._pendingPingRequests.size,
+ pendingPingActivityCount: this._pendingPingActivity.size,
+ unpersistedPingCount: this._currentPings.size,
+ persistedPingCount: TelemetryStorage.getPendingPingList().length,
+ schedulerState: SendScheduler.getShutdownState(),
+ };
+ },
+};
diff --git a/toolkit/components/telemetry/TelemetrySession.jsm b/toolkit/components/telemetry/TelemetrySession.jsm
new file mode 100644
index 000000000..3d97dc155
--- /dev/null
+++ b/toolkit/components/telemetry/TelemetrySession.jsm
@@ -0,0 +1,2124 @@
+/* -*- js-indent-level: 2; indent-tabs-mode: nil -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+"use strict";
+
+const Cc = Components.classes;
+const Ci = Components.interfaces;
+const Cr = Components.results;
+const Cu = Components.utils;
+
+Cu.import("resource://gre/modules/debug.js", this);
+Cu.import("resource://gre/modules/Log.jsm");
+Cu.import("resource://gre/modules/Services.jsm", this);
+Cu.import("resource://gre/modules/XPCOMUtils.jsm", this);
+Cu.import("resource://gre/modules/Promise.jsm", this);
+Cu.import("resource://gre/modules/DeferredTask.jsm", this);
+Cu.import("resource://gre/modules/Preferences.jsm");
+Cu.import("resource://gre/modules/Task.jsm");
+Cu.import("resource://gre/modules/Timer.jsm");
+Cu.import("resource://gre/modules/TelemetrySend.jsm", this);
+Cu.import("resource://gre/modules/TelemetryUtils.jsm", this);
+Cu.import("resource://gre/modules/AppConstants.jsm");
+
+const Utils = TelemetryUtils;
+
+const myScope = this;
+
+// When modifying the payload in incompatible ways, please bump this version number
+const PAYLOAD_VERSION = 4;
+const PING_TYPE_MAIN = "main";
+const PING_TYPE_SAVED_SESSION = "saved-session";
+
+const REASON_ABORTED_SESSION = "aborted-session";
+const REASON_DAILY = "daily";
+const REASON_SAVED_SESSION = "saved-session";
+const REASON_GATHER_PAYLOAD = "gather-payload";
+const REASON_GATHER_SUBSESSION_PAYLOAD = "gather-subsession-payload";
+const REASON_TEST_PING = "test-ping";
+const REASON_ENVIRONMENT_CHANGE = "environment-change";
+const REASON_SHUTDOWN = "shutdown";
+
+const HISTOGRAM_SUFFIXES = {
+ PARENT: "",
+ CONTENT: "#content",
+ GPU: "#gpu",
+}
+
+const ENVIRONMENT_CHANGE_LISTENER = "TelemetrySession::onEnvironmentChange";
+
+const MS_IN_ONE_HOUR = 60 * 60 * 1000;
+const MIN_SUBSESSION_LENGTH_MS = Preferences.get("toolkit.telemetry.minSubsessionLength", 10 * 60) * 1000;
+
+const LOGGER_NAME = "Toolkit.Telemetry";
+const LOGGER_PREFIX = "TelemetrySession" + (Utils.isContentProcess ? "#content::" : "::");
+
+const PREF_BRANCH = "toolkit.telemetry.";
+const PREF_PREVIOUS_BUILDID = PREF_BRANCH + "previousBuildID";
+const PREF_FHR_UPLOAD_ENABLED = "datareporting.healthreport.uploadEnabled";
+const PREF_ASYNC_PLUGIN_INIT = "dom.ipc.plugins.asyncInit.enabled";
+const PREF_UNIFIED = PREF_BRANCH + "unified";
+
+
+const MESSAGE_TELEMETRY_PAYLOAD = "Telemetry:Payload";
+const MESSAGE_TELEMETRY_THREAD_HANGS = "Telemetry:ChildThreadHangs";
+const MESSAGE_TELEMETRY_GET_CHILD_THREAD_HANGS = "Telemetry:GetChildThreadHangs";
+const MESSAGE_TELEMETRY_USS = "Telemetry:USS";
+const MESSAGE_TELEMETRY_GET_CHILD_USS = "Telemetry:GetChildUSS";
+
+const DATAREPORTING_DIRECTORY = "datareporting";
+const ABORTED_SESSION_FILE_NAME = "aborted-session-ping";
+
+// Whether the FHR/Telemetry unification features are enabled.
+// Changing this pref requires a restart.
+const IS_UNIFIED_TELEMETRY = Preferences.get(PREF_UNIFIED, false);
+
+// Maximum number of content payloads that we are willing to store.
+const MAX_NUM_CONTENT_PAYLOADS = 10;
+
+// Do not gather data more than once a minute (ms)
+const TELEMETRY_INTERVAL = 60 * 1000;
+// Delay before intializing telemetry (ms)
+const TELEMETRY_DELAY = Preferences.get("toolkit.telemetry.initDelay", 60) * 1000;
+// Delay before initializing telemetry if we're testing (ms)
+const TELEMETRY_TEST_DELAY = 1;
+// Execute a scheduler tick every 5 minutes.
+const SCHEDULER_TICK_INTERVAL_MS = Preferences.get("toolkit.telemetry.scheduler.tickInterval", 5 * 60) * 1000;
+// When user is idle, execute a scheduler tick every 60 minutes.
+const SCHEDULER_TICK_IDLE_INTERVAL_MS = Preferences.get("toolkit.telemetry.scheduler.idleTickInterval", 60 * 60) * 1000;
+
+// The tolerance we have when checking if it's midnight (15 minutes).
+const SCHEDULER_MIDNIGHT_TOLERANCE_MS = 15 * 60 * 1000;
+
+// Seconds of idle time before pinging.
+// On idle-daily a gather-telemetry notification is fired, during it probes can
+// start asynchronous tasks to gather data.
+const IDLE_TIMEOUT_SECONDS = Preferences.get("toolkit.telemetry.idleTimeout", 5 * 60);
+
+// To avoid generating too many main pings, we ignore environment changes that
+// happen within this interval since the last main ping.
+const CHANGE_THROTTLE_INTERVAL_MS = 5 * 60 * 1000;
+
+// The frequency at which we persist session data to the disk to prevent data loss
+// in case of aborted sessions (currently 5 minutes).
+const ABORTED_SESSION_UPDATE_INTERVAL_MS = 5 * 60 * 1000;
+
+const TOPIC_CYCLE_COLLECTOR_BEGIN = "cycle-collector-begin";
+
+// How long to wait in millis for all the child memory reports to come in
+const TOTAL_MEMORY_COLLECTOR_TIMEOUT = 200;
+
+var gLastMemoryPoll = null;
+
+var gWasDebuggerAttached = false;
+
+XPCOMUtils.defineLazyServiceGetter(this, "Telemetry",
+ "@mozilla.org/base/telemetry;1",
+ "nsITelemetry");
+XPCOMUtils.defineLazyServiceGetter(this, "idleService",
+ "@mozilla.org/widget/idleservice;1",
+ "nsIIdleService");
+XPCOMUtils.defineLazyServiceGetter(this, "cpmm",
+ "@mozilla.org/childprocessmessagemanager;1",
+ "nsIMessageSender");
+XPCOMUtils.defineLazyServiceGetter(this, "cpml",
+ "@mozilla.org/childprocessmessagemanager;1",
+ "nsIMessageListenerManager");
+XPCOMUtils.defineLazyServiceGetter(this, "ppmm",
+ "@mozilla.org/parentprocessmessagemanager;1",
+ "nsIMessageBroadcaster");
+XPCOMUtils.defineLazyServiceGetter(this, "ppml",
+ "@mozilla.org/parentprocessmessagemanager;1",
+ "nsIMessageListenerManager");
+
+XPCOMUtils.defineLazyModuleGetter(this, "AddonManagerPrivate",
+ "resource://gre/modules/AddonManager.jsm");
+XPCOMUtils.defineLazyModuleGetter(this, "AsyncShutdown",
+ "resource://gre/modules/AsyncShutdown.jsm");
+XPCOMUtils.defineLazyModuleGetter(this, "TelemetryController",
+ "resource://gre/modules/TelemetryController.jsm");
+XPCOMUtils.defineLazyModuleGetter(this, "TelemetryStorage",
+ "resource://gre/modules/TelemetryStorage.jsm");
+XPCOMUtils.defineLazyModuleGetter(this, "TelemetryLog",
+ "resource://gre/modules/TelemetryLog.jsm");
+XPCOMUtils.defineLazyModuleGetter(this, "ThirdPartyCookieProbe",
+ "resource://gre/modules/ThirdPartyCookieProbe.jsm");
+XPCOMUtils.defineLazyModuleGetter(this, "UITelemetry",
+ "resource://gre/modules/UITelemetry.jsm");
+XPCOMUtils.defineLazyModuleGetter(this, "GCTelemetry",
+ "resource://gre/modules/GCTelemetry.jsm");
+XPCOMUtils.defineLazyModuleGetter(this, "TelemetryEnvironment",
+ "resource://gre/modules/TelemetryEnvironment.jsm");
+
+function generateUUID() {
+ let str = Cc["@mozilla.org/uuid-generator;1"].getService(Ci.nsIUUIDGenerator).generateUUID().toString();
+ // strip {}
+ return str.substring(1, str.length - 1);
+}
+
+function getMsSinceProcessStart() {
+ try {
+ return Telemetry.msSinceProcessStart();
+ } catch (ex) {
+ // If this fails return a special value.
+ return -1;
+ }
+}
+
+/**
+ * This is a policy object used to override behavior for testing.
+ */
+var Policy = {
+ now: () => new Date(),
+ monotonicNow: getMsSinceProcessStart,
+ generateSessionUUID: () => generateUUID(),
+ generateSubsessionUUID: () => generateUUID(),
+ setSchedulerTickTimeout: (callback, delayMs) => setTimeout(callback, delayMs),
+ clearSchedulerTickTimeout: id => clearTimeout(id),
+};
+
+/**
+ * Get the ping type based on the payload.
+ * @param {Object} aPayload The ping payload.
+ * @return {String} A string representing the ping type.
+ */
+function getPingType(aPayload) {
+ // To remain consistent with server-side ping handling, set "saved-session" as the ping
+ // type for "saved-session" payload reasons.
+ if (aPayload.info.reason == REASON_SAVED_SESSION) {
+ return PING_TYPE_SAVED_SESSION;
+ }
+
+ return PING_TYPE_MAIN;
+}
+
+/**
+ * Annotate the current session ID with the crash reporter to map potential
+ * crash pings with the related main ping.
+ */
+function annotateCrashReport(sessionId) {
+ try {
+ const cr = Cc["@mozilla.org/toolkit/crash-reporter;1"];
+ if (cr) {
+ cr.getService(Ci.nsICrashReporter).setTelemetrySessionId(sessionId);
+ }
+ } catch (e) {
+ // Ignore errors when crash reporting is disabled
+ }
+}
+
+/**
+ * Read current process I/O counters.
+ */
+var processInfo = {
+ _initialized: false,
+ _IO_COUNTERS: null,
+ _kernel32: null,
+ _GetProcessIoCounters: null,
+ _GetCurrentProcess: null,
+ getCounters: function() {
+ let isWindows = ("@mozilla.org/windows-registry-key;1" in Components.classes);
+ if (isWindows)
+ return this.getCounters_Windows();
+ return null;
+ },
+ getCounters_Windows: function() {
+ if (!this._initialized) {
+ Cu.import("resource://gre/modules/ctypes.jsm");
+ this._IO_COUNTERS = new ctypes.StructType("IO_COUNTERS", [
+ {'readOps': ctypes.unsigned_long_long},
+ {'writeOps': ctypes.unsigned_long_long},
+ {'otherOps': ctypes.unsigned_long_long},
+ {'readBytes': ctypes.unsigned_long_long},
+ {'writeBytes': ctypes.unsigned_long_long},
+ {'otherBytes': ctypes.unsigned_long_long} ]);
+ try {
+ this._kernel32 = ctypes.open("Kernel32.dll");
+ this._GetProcessIoCounters = this._kernel32.declare("GetProcessIoCounters",
+ ctypes.winapi_abi,
+ ctypes.bool, // return
+ ctypes.voidptr_t, // hProcess
+ this._IO_COUNTERS.ptr); // lpIoCounters
+ this._GetCurrentProcess = this._kernel32.declare("GetCurrentProcess",
+ ctypes.winapi_abi,
+ ctypes.voidptr_t); // return
+ this._initialized = true;
+ } catch (err) {
+ return null;
+ }
+ }
+ let io = new this._IO_COUNTERS();
+ if (!this._GetProcessIoCounters(this._GetCurrentProcess(), io.address()))
+ return null;
+ return [parseInt(io.readBytes), parseInt(io.writeBytes)];
+ }
+};
+
+/**
+ * TelemetryScheduler contains a single timer driving all regularly-scheduled
+ * Telemetry related jobs. Having a single place with this logic simplifies
+ * reasoning about scheduling actions in a single place, making it easier to
+ * coordinate jobs and coalesce them.
+ */
+var TelemetryScheduler = {
+ _lastDailyPingTime: 0,
+ _lastSessionCheckpointTime: 0,
+
+ // For sanity checking.
+ _lastAdhocPingTime: 0,
+ _lastTickTime: 0,
+
+ _log: null,
+
+ // The timer which drives the scheduler.
+ _schedulerTimer: null,
+ // The interval used by the scheduler timer.
+ _schedulerInterval: 0,
+ _shuttingDown: true,
+ _isUserIdle: false,
+
+ /**
+ * Initialises the scheduler and schedules the first daily/aborted session pings.
+ */
+ init: function() {
+ this._log = Log.repository.getLoggerWithMessagePrefix(LOGGER_NAME, "TelemetryScheduler::");
+ this._log.trace("init");
+ this._shuttingDown = false;
+ this._isUserIdle = false;
+
+ // Initialize the last daily ping and aborted session last due times to the current time.
+ // Otherwise, we might end up sending daily pings even if the subsession is not long enough.
+ let now = Policy.now();
+ this._lastDailyPingTime = now.getTime();
+ this._lastSessionCheckpointTime = now.getTime();
+ this._rescheduleTimeout();
+
+ idleService.addIdleObserver(this, IDLE_TIMEOUT_SECONDS);
+ Services.obs.addObserver(this, "wake_notification", false);
+ },
+
+ /**
+ * Stops the scheduler.
+ */
+ shutdown: function() {
+ if (this._shuttingDown) {
+ if (this._log) {
+ this._log.error("shutdown - Already shut down");
+ } else {
+ Cu.reportError("TelemetryScheduler.shutdown - Already shut down");
+ }
+ return;
+ }
+
+ this._log.trace("shutdown");
+ if (this._schedulerTimer) {
+ Policy.clearSchedulerTickTimeout(this._schedulerTimer);
+ this._schedulerTimer = null;
+ }
+
+ idleService.removeIdleObserver(this, IDLE_TIMEOUT_SECONDS);
+ Services.obs.removeObserver(this, "wake_notification");
+
+ this._shuttingDown = true;
+ },
+
+ _clearTimeout: function() {
+ if (this._schedulerTimer) {
+ Policy.clearSchedulerTickTimeout(this._schedulerTimer);
+ }
+ },
+
+ /**
+ * Reschedules the tick timer.
+ */
+ _rescheduleTimeout: function() {
+ this._log.trace("_rescheduleTimeout - isUserIdle: " + this._isUserIdle);
+ if (this._shuttingDown) {
+ this._log.warn("_rescheduleTimeout - already shutdown");
+ return;
+ }
+
+ this._clearTimeout();
+
+ const now = Policy.now();
+ let timeout = SCHEDULER_TICK_INTERVAL_MS;
+
+ // When the user is idle we want to fire the timer less often.
+ if (this._isUserIdle) {
+ timeout = SCHEDULER_TICK_IDLE_INTERVAL_MS;
+ // We need to make sure though that we don't miss sending pings around
+ // midnight when we use the longer idle intervals.
+ const nextMidnight = Utils.getNextMidnight(now);
+ timeout = Math.min(timeout, nextMidnight.getTime() - now.getTime());
+ }
+
+ this._log.trace("_rescheduleTimeout - scheduling next tick for " + new Date(now.getTime() + timeout));
+ this._schedulerTimer =
+ Policy.setSchedulerTickTimeout(() => this._onSchedulerTick(), timeout);
+ },
+
+ _sentDailyPingToday: function(nowDate) {
+ // This is today's date and also the previous midnight (0:00).
+ const todayDate = Utils.truncateToDays(nowDate);
+ // We consider a ping sent for today if it occured after or at 00:00 today.
+ return (this._lastDailyPingTime >= todayDate.getTime());
+ },
+
+ /**
+ * Checks if we can send a daily ping or not.
+ * @param {Object} nowDate A date object.
+ * @return {Boolean} True if we can send the daily ping, false otherwise.
+ */
+ _isDailyPingDue: function(nowDate) {
+ // The daily ping is not due if we already sent one today.
+ if (this._sentDailyPingToday(nowDate)) {
+ this._log.trace("_isDailyPingDue - already sent one today");
+ return false;
+ }
+
+ // Avoid overly short sessions.
+ const timeSinceLastDaily = nowDate.getTime() - this._lastDailyPingTime;
+ if (timeSinceLastDaily < MIN_SUBSESSION_LENGTH_MS) {
+ this._log.trace("_isDailyPingDue - delaying daily to keep minimum session length");
+ return false;
+ }
+
+ this._log.trace("_isDailyPingDue - is due");
+ return true;
+ },
+
+ /**
+ * An helper function to save an aborted-session ping.
+ * @param {Number} now The current time, in milliseconds.
+ * @param {Object} [competingPayload=null] If we are coalescing the daily and the
+ * aborted-session pings, this is the payload for the former. Note
+ * that the reason field of this payload will be changed.
+ * @return {Promise} A promise resolved when the ping is saved.
+ */
+ _saveAbortedPing: function(now, competingPayload=null) {
+ this._lastSessionCheckpointTime = now;
+ return Impl._saveAbortedSessionPing(competingPayload)
+ .catch(e => this._log.error("_saveAbortedPing - Failed", e));
+ },
+
+ /**
+ * The notifications handler.
+ */
+ observe: function(aSubject, aTopic, aData) {
+ this._log.trace("observe - aTopic: " + aTopic);
+ switch (aTopic) {
+ case "idle":
+ // If the user is idle, increase the tick interval.
+ this._isUserIdle = true;
+ return this._onSchedulerTick();
+ case "active":
+ // User is back to work, restore the original tick interval.
+ this._isUserIdle = false;
+ return this._onSchedulerTick();
+ case "wake_notification":
+ // The machine woke up from sleep, trigger a tick to avoid sessions
+ // spanning more than a day.
+ // This is needed because sleep time does not count towards timeouts
+ // on Mac & Linux - see bug 1262386, bug 1204823 et al.
+ return this._onSchedulerTick();
+ }
+ return undefined;
+ },
+
+ /**
+ * Performs a scheduler tick. This function manages Telemetry recurring operations.
+ * @return {Promise} A promise, only used when testing, resolved when the scheduled
+ * operation completes.
+ */
+ _onSchedulerTick: function() {
+ // This call might not be triggered from a timeout. In that case we don't want to
+ // leave any previously scheduled timeouts pending.
+ this._clearTimeout();
+
+ if (this._shuttingDown) {
+ this._log.warn("_onSchedulerTick - already shutdown.");
+ return Promise.reject(new Error("Already shutdown."));
+ }
+
+ let promise = Promise.resolve();
+ try {
+ promise = this._schedulerTickLogic();
+ } catch (e) {
+ Telemetry.getHistogramById("TELEMETRY_SCHEDULER_TICK_EXCEPTION").add(1);
+ this._log.error("_onSchedulerTick - There was an exception", e);
+ } finally {
+ this._rescheduleTimeout();
+ }
+
+ // This promise is returned to make testing easier.
+ return promise;
+ },
+
+ /**
+ * Implements the scheduler logic.
+ * @return {Promise} Resolved when the scheduled task completes. Only used in tests.
+ */
+ _schedulerTickLogic: function() {
+ this._log.trace("_schedulerTickLogic");
+
+ let nowDate = Policy.now();
+ let now = nowDate.getTime();
+
+ if ((now - this._lastTickTime) > (1.1 * SCHEDULER_TICK_INTERVAL_MS) &&
+ (this._lastTickTime != 0)) {
+ Telemetry.getHistogramById("TELEMETRY_SCHEDULER_WAKEUP").add(1);
+ this._log.trace("_schedulerTickLogic - First scheduler tick after sleep.");
+ }
+ this._lastTickTime = now;
+
+ // Check if the daily ping is due.
+ const shouldSendDaily = this._isDailyPingDue(nowDate);
+
+ if (shouldSendDaily) {
+ Telemetry.getHistogramById("TELEMETRY_SCHEDULER_SEND_DAILY").add(1);
+ this._log.trace("_schedulerTickLogic - Daily ping due.");
+ this._lastDailyPingTime = now;
+ return Impl._sendDailyPing();
+ }
+
+ // Check if the aborted-session ping is due. If a daily ping was saved above, it was
+ // already duplicated as an aborted-session ping.
+ const isAbortedPingDue =
+ (now - this._lastSessionCheckpointTime) >= ABORTED_SESSION_UPDATE_INTERVAL_MS;
+ if (isAbortedPingDue) {
+ this._log.trace("_schedulerTickLogic - Aborted session ping due.");
+ return this._saveAbortedPing(now);
+ }
+
+ // No ping is due.
+ this._log.trace("_schedulerTickLogic - No ping due.");
+ return Promise.resolve();
+ },
+
+ /**
+ * Update the scheduled pings if some other ping was sent.
+ * @param {String} reason The reason of the ping that was sent.
+ * @param {Object} [competingPayload=null] The payload of the ping that was sent. The
+ * reason of this payload will be changed.
+ */
+ reschedulePings: function(reason, competingPayload = null) {
+ if (this._shuttingDown) {
+ this._log.error("reschedulePings - already shutdown");
+ return;
+ }
+
+ this._log.trace("reschedulePings - reason: " + reason);
+ let now = Policy.now();
+ this._lastAdhocPingTime = now.getTime();
+ if (reason == REASON_ENVIRONMENT_CHANGE) {
+ // We just generated an environment-changed ping, save it as an aborted session and
+ // update the schedules.
+ this._saveAbortedPing(now.getTime(), competingPayload);
+ // If we're close to midnight, skip today's daily ping and reschedule it for tomorrow.
+ let nearestMidnight = Utils.getNearestMidnight(now, SCHEDULER_MIDNIGHT_TOLERANCE_MS);
+ if (nearestMidnight) {
+ this._lastDailyPingTime = now.getTime();
+ }
+ }
+
+ this._rescheduleTimeout();
+ },
+};
+
+this.EXPORTED_SYMBOLS = ["TelemetrySession"];
+
+this.TelemetrySession = Object.freeze({
+ Constants: Object.freeze({
+ PREF_PREVIOUS_BUILDID: PREF_PREVIOUS_BUILDID,
+ }),
+ /**
+ * Send a ping to a test server. Used only for testing.
+ */
+ testPing: function() {
+ return Impl.testPing();
+ },
+ /**
+ * Returns the current telemetry payload.
+ * @param reason Optional, the reason to trigger the payload.
+ * @param clearSubsession Optional, whether to clear subsession specific data.
+ * @returns Object
+ */
+ getPayload: function(reason, clearSubsession = false) {
+ return Impl.getPayload(reason, clearSubsession);
+ },
+ /**
+ * Returns a promise that resolves to an array of thread hang stats from content processes, one entry per process.
+ * The structure of each entry is identical to that of "threadHangStats" in nsITelemetry.
+ * While thread hang stats are also part of the child payloads, this function is useful for cheaply getting this information,
+ * which is useful for realtime hang monitoring.
+ * Child processes that do not respond, or spawn/die during execution of this function are excluded from the result.
+ * @returns Promise
+ */
+ getChildThreadHangs: function() {
+ return Impl.getChildThreadHangs();
+ },
+ /**
+ * Save the session state to a pending file.
+ * Used only for testing purposes.
+ */
+ testSavePendingPing: function() {
+ return Impl.testSavePendingPing();
+ },
+ /**
+ * Collect and store information about startup.
+ */
+ gatherStartup: function() {
+ return Impl.gatherStartup();
+ },
+ /**
+ * Inform the ping which AddOns are installed.
+ *
+ * @param aAddOns - The AddOns.
+ */
+ setAddOns: function(aAddOns) {
+ return Impl.setAddOns(aAddOns);
+ },
+ /**
+ * Descriptive metadata
+ *
+ * @param reason
+ * The reason for the telemetry ping, this will be included in the
+ * returned metadata,
+ * @return The metadata as a JS object
+ */
+ getMetadata: function(reason) {
+ return Impl.getMetadata(reason);
+ },
+ /**
+ * Used only for testing purposes.
+ */
+ testReset: function() {
+ Impl._sessionId = null;
+ Impl._subsessionId = null;
+ Impl._previousSessionId = null;
+ Impl._previousSubsessionId = null;
+ Impl._subsessionCounter = 0;
+ Impl._profileSubsessionCounter = 0;
+ Impl._subsessionStartActiveTicks = 0;
+ Impl._subsessionStartTimeMonotonic = 0;
+ Impl._lastEnvironmentChangeDate = Policy.monotonicNow();
+ this.testUninstall();
+ },
+ /**
+ * Triggers shutdown of the module.
+ */
+ shutdown: function() {
+ return Impl.shutdownChromeProcess();
+ },
+ /**
+ * Sets up components used in the content process.
+ */
+ setupContent: function(testing = false) {
+ return Impl.setupContentProcess(testing);
+ },
+ /**
+ * Used only for testing purposes.
+ */
+ testUninstall: function() {
+ try {
+ Impl.uninstall();
+ } catch (ex) {
+ // Ignore errors
+ }
+ },
+ /**
+ * Lightweight init function, called as soon as Firefox starts.
+ */
+ earlyInit: function(aTesting = false) {
+ return Impl.earlyInit(aTesting);
+ },
+ /**
+ * Does the "heavy" Telemetry initialization later on, so we
+ * don't impact startup performance.
+ * @return {Promise} Resolved when the initialization completes.
+ */
+ delayedInit: function() {
+ return Impl.delayedInit();
+ },
+ /**
+ * Send a notification.
+ */
+ observe: function (aSubject, aTopic, aData) {
+ return Impl.observe(aSubject, aTopic, aData);
+ },
+});
+
+var Impl = {
+ _histograms: {},
+ _initialized: false,
+ _logger: null,
+ _prevValues: {},
+ _slowSQLStartup: {},
+ _hasWindowRestoredObserver: false,
+ _hasXulWindowVisibleObserver: false,
+ _startupIO : {},
+ // The previous build ID, if this is the first run with a new build.
+ // Null if this is the first run, or the previous build ID is unknown.
+ _previousBuildId: null,
+ // Telemetry payloads sent by child processes.
+ // Each element is in the format {source: <weak-ref>, payload: <object>},
+ // where source is a weak reference to the child process,
+ // and payload is the telemetry payload from that child process.
+ _childTelemetry: [],
+ // Thread hangs from child processes.
+ // Used for TelemetrySession.getChildThreadHangs(); not sent with Telemetry pings.
+ // TelemetrySession.getChildThreadHangs() is used by extensions such as Statuser (https://github.com/chutten/statuser).
+ // Each element is in the format {source: <weak-ref>, payload: <object>},
+ // where source is a weak reference to the child process,
+ // and payload contains the thread hang stats from that child process.
+ _childThreadHangs: [],
+ // Array of the resolve functions of all the promises that are waiting for the child thread hang stats to arrive, used to resolve all those promises at once.
+ _childThreadHangsResolveFunctions: [],
+ // Timeout function for child thread hang stats retrieval.
+ _childThreadHangsTimeout: null,
+ // Unique id that identifies this session so the server can cope with duplicate
+ // submissions, orphaning and other oddities. The id is shared across subsessions.
+ _sessionId: null,
+ // Random subsession id.
+ _subsessionId: null,
+ // Session id of the previous session, null on first run.
+ _previousSessionId: null,
+ // Subsession id of the previous subsession (even if it was in a different session),
+ // null on first run.
+ _previousSubsessionId: null,
+ // The running no. of subsessions since the start of the browser session
+ _subsessionCounter: 0,
+ // The running no. of all subsessions for the whole profile life time
+ _profileSubsessionCounter: 0,
+ // Date of the last session split
+ _subsessionStartDate: null,
+ // Start time of the current subsession using a monotonic clock for the subsession
+ // length measurements.
+ _subsessionStartTimeMonotonic: 0,
+ // The active ticks counted when the subsession starts
+ _subsessionStartActiveTicks: 0,
+ // A task performing delayed initialization of the chrome process
+ _delayedInitTask: null,
+ // Need a timeout in case children are tardy in giving back their memory reports.
+ _totalMemoryTimeout: undefined,
+ _testing: false,
+ // An accumulator of total memory across all processes. Only valid once the final child reports.
+ _totalMemory: null,
+ // A Set of outstanding USS report ids
+ _childrenToHearFrom: null,
+ // monotonically-increasing id for USS reports
+ _nextTotalMemoryId: 1,
+ _lastEnvironmentChangeDate: 0,
+
+
+ get _log() {
+ if (!this._logger) {
+ this._logger = Log.repository.getLoggerWithMessagePrefix(LOGGER_NAME, LOGGER_PREFIX);
+ }
+ return this._logger;
+ },
+
+ /**
+ * Gets a series of simple measurements (counters). At the moment, this
+ * only returns startup data from nsIAppStartup.getStartupInfo().
+ * @param {Boolean} isSubsession True if this is a subsession, false otherwise.
+ * @param {Boolean} clearSubsession True if a new subsession is being started, false otherwise.
+ *
+ * @return simple measurements as a dictionary.
+ */
+ getSimpleMeasurements: function getSimpleMeasurements(forSavedSession, isSubsession, clearSubsession) {
+ this._log.trace("getSimpleMeasurements");
+
+ let si = Services.startup.getStartupInfo();
+
+ // Measurements common to chrome and content processes.
+ let elapsedTime = Date.now() - si.process;
+ var ret = {
+ totalTime: Math.round(elapsedTime / 1000), // totalTime, in seconds
+ uptime: Math.round(elapsedTime / 60000) // uptime in minutes
+ }
+
+ // Look for app-specific timestamps
+ var appTimestamps = {};
+ try {
+ let o = {};
+ Cu.import("resource://gre/modules/TelemetryTimestamps.jsm", o);
+ appTimestamps = o.TelemetryTimestamps.get();
+ } catch (ex) {}
+
+ // Only submit this if the extended set is enabled.
+ if (!Utils.isContentProcess && Telemetry.canRecordExtended) {
+ try {
+ ret.addonManager = AddonManagerPrivate.getSimpleMeasures();
+ ret.UITelemetry = UITelemetry.getSimpleMeasures();
+ } catch (ex) {}
+ }
+
+ if (si.process) {
+ for (let field of Object.keys(si)) {
+ if (field == "process")
+ continue;
+ ret[field] = si[field] - si.process
+ }
+
+ for (let p in appTimestamps) {
+ if (!(p in ret) && appTimestamps[p])
+ ret[p] = appTimestamps[p] - si.process;
+ }
+ }
+
+ ret.startupInterrupted = Number(Services.startup.interrupted);
+
+ ret.js = Cu.getJSEngineTelemetryValue();
+
+ let maximalNumberOfConcurrentThreads = Telemetry.maximalNumberOfConcurrentThreads;
+ if (maximalNumberOfConcurrentThreads) {
+ ret.maximalNumberOfConcurrentThreads = maximalNumberOfConcurrentThreads;
+ }
+
+ if (Utils.isContentProcess) {
+ return ret;
+ }
+
+ // Measurements specific to chrome process
+
+ // Update debuggerAttached flag
+ let debugService = Cc["@mozilla.org/xpcom/debug;1"].getService(Ci.nsIDebug2);
+ let isDebuggerAttached = debugService.isDebuggerAttached;
+ gWasDebuggerAttached = gWasDebuggerAttached || isDebuggerAttached;
+ ret.debuggerAttached = Number(gWasDebuggerAttached);
+
+ let shutdownDuration = Telemetry.lastShutdownDuration;
+ if (shutdownDuration)
+ ret.shutdownDuration = shutdownDuration;
+
+ let failedProfileLockCount = Telemetry.failedProfileLockCount;
+ if (failedProfileLockCount)
+ ret.failedProfileLockCount = failedProfileLockCount;
+
+ for (let ioCounter in this._startupIO)
+ ret[ioCounter] = this._startupIO[ioCounter];
+
+ ret.savedPings = TelemetryStorage.pendingPingCount;
+
+ ret.activeTicks = -1;
+ let sr = TelemetryController.getSessionRecorder();
+ if (sr) {
+ let activeTicks = sr.activeTicks;
+ if (isSubsession) {
+ activeTicks = sr.activeTicks - this._subsessionStartActiveTicks;
+ }
+
+ if (clearSubsession) {
+ this._subsessionStartActiveTicks = activeTicks;
+ }
+
+ ret.activeTicks = activeTicks;
+ }
+
+ ret.pingsOverdue = TelemetrySend.overduePingsCount;
+
+ return ret;
+ },
+
+ /**
+ * When reflecting a histogram into JS, Telemetry hands us an object
+ * with the following properties:
+ *
+ * - min, max, histogram_type, sum, sum_squares_{lo,hi}: simple integers;
+ * - counts: array of counts for histogram buckets;
+ * - ranges: array of calculated bucket sizes.
+ *
+ * This format is not straightforward to read and potentially bulky
+ * with lots of zeros in the counts array. Packing histograms makes
+ * raw histograms easier to read and compresses the data a little bit.
+ *
+ * Returns an object:
+ * { range: [min, max], bucket_count: <number of buckets>,
+ * histogram_type: <histogram_type>, sum: <sum>,
+ * values: { bucket1: count1, bucket2: count2, ... } }
+ */
+ packHistogram: function packHistogram(hgram) {
+ let r = hgram.ranges;
+ let c = hgram.counts;
+ let retgram = {
+ range: [r[1], r[r.length - 1]],
+ bucket_count: r.length,
+ histogram_type: hgram.histogram_type,
+ values: {},
+ sum: hgram.sum
+ };
+
+ let first = true;
+ let last = 0;
+
+ for (let i = 0; i < c.length; i++) {
+ let value = c[i];
+ if (!value)
+ continue;
+
+ // add a lower bound
+ if (i && first) {
+ retgram.values[r[i - 1]] = 0;
+ }
+ first = false;
+ last = i + 1;
+ retgram.values[r[i]] = value;
+ }
+
+ // add an upper bound
+ if (last && last < c.length)
+ retgram.values[r[last]] = 0;
+ return retgram;
+ },
+
+ /**
+ * Get the type of the dataset that needs to be collected, based on the preferences.
+ * @return {Integer} A value from nsITelemetry.DATASET_*.
+ */
+ getDatasetType: function() {
+ return Telemetry.canRecordExtended ? Ci.nsITelemetry.DATASET_RELEASE_CHANNEL_OPTIN
+ : Ci.nsITelemetry.DATASET_RELEASE_CHANNEL_OPTOUT;
+ },
+
+ getHistograms: function getHistograms(subsession, clearSubsession) {
+ this._log.trace("getHistograms - subsession: " + subsession +
+ ", clearSubsession: " + clearSubsession);
+
+ let registered =
+ Telemetry.registeredHistograms(this.getDatasetType(), []);
+ if (this._testing == false) {
+ // Omit telemetry test histograms outside of tests.
+ registered = registered.filter(n => !n.startsWith("TELEMETRY_TEST_"));
+ }
+ registered = registered.concat(registered.map(n => "STARTUP_" + n));
+
+ let hls = subsession ? Telemetry.snapshotSubsessionHistograms(clearSubsession)
+ : Telemetry.histogramSnapshots;
+ let ret = {};
+
+ for (let name of registered) {
+ for (let suffix of Object.values(HISTOGRAM_SUFFIXES)) {
+ if (name + suffix in hls) {
+ if (!(suffix in ret)) {
+ ret[suffix] = {};
+ }
+ ret[suffix][name] = this.packHistogram(hls[name + suffix]);
+ }
+ }
+ }
+
+ return ret;
+ },
+
+ getAddonHistograms: function getAddonHistograms() {
+ this._log.trace("getAddonHistograms");
+
+ let ahs = Telemetry.addonHistogramSnapshots;
+ let ret = {};
+
+ for (let addonName in ahs) {
+ let addonHistograms = ahs[addonName];
+ let packedHistograms = {};
+ for (let name in addonHistograms) {
+ packedHistograms[name] = this.packHistogram(addonHistograms[name]);
+ }
+ if (Object.keys(packedHistograms).length != 0)
+ ret[addonName] = packedHistograms;
+ }
+
+ return ret;
+ },
+
+ getKeyedHistograms: function(subsession, clearSubsession) {
+ this._log.trace("getKeyedHistograms - subsession: " + subsession +
+ ", clearSubsession: " + clearSubsession);
+
+ let registered =
+ Telemetry.registeredKeyedHistograms(this.getDatasetType(), []);
+ if (this._testing == false) {
+ // Omit telemetry test histograms outside of tests.
+ registered = registered.filter(id => !id.startsWith("TELEMETRY_TEST_"));
+ }
+ let ret = {};
+
+ for (let id of registered) {
+ for (let suffix of Object.values(HISTOGRAM_SUFFIXES)) {
+ let keyed = Telemetry.getKeyedHistogramById(id + suffix);
+ let snapshot = null;
+ if (subsession) {
+ snapshot = clearSubsession ? keyed.snapshotSubsessionAndClear()
+ : keyed.subsessionSnapshot();
+ } else {
+ snapshot = keyed.snapshot();
+ }
+
+ let keys = Object.keys(snapshot);
+ if (keys.length == 0) {
+ // Skip empty keyed histogram.
+ continue;
+ }
+
+ if (!(suffix in ret)) {
+ ret[suffix] = {};
+ }
+ ret[suffix][id] = {};
+ for (let key of keys) {
+ ret[suffix][id][key] = this.packHistogram(snapshot[key]);
+ }
+ }
+ }
+
+ return ret;
+ },
+
+ /**
+ * Get a snapshot of the scalars and clear them.
+ * @param {subsession} If true, then we collect the data for a subsession.
+ * @param {clearSubsession} If true, we need to clear the subsession.
+ * @param {keyed} Take a snapshot of keyed or non keyed scalars.
+ * @return {Object} The scalar data as a Javascript object.
+ */
+ getScalars: function (subsession, clearSubsession, keyed) {
+ this._log.trace("getScalars - subsession: " + subsession + ", clearSubsession: " +
+ clearSubsession + ", keyed: " + keyed);
+
+ if (!subsession) {
+ // We only support scalars for subsessions.
+ this._log.trace("getScalars - We only support scalars in subsessions.");
+ return {};
+ }
+
+ let scalarsSnapshot = keyed ?
+ Telemetry.snapshotKeyedScalars(this.getDatasetType(), clearSubsession) :
+ Telemetry.snapshotScalars(this.getDatasetType(), clearSubsession);
+
+ // Don't return the test scalars.
+ let ret = {};
+ for (let name in scalarsSnapshot) {
+ if (name.startsWith('telemetry.test') && this._testing == false) {
+ this._log.trace("getScalars - Skipping test scalar: " + name);
+ } else {
+ ret[name] = scalarsSnapshot[name];
+ }
+ }
+
+ return ret;
+ },
+
+ getEvents: function(isSubsession, clearSubsession) {
+ if (!isSubsession) {
+ // We only support scalars for subsessions.
+ this._log.trace("getEvents - We only support events in subsessions.");
+ return [];
+ }
+
+ let events = Telemetry.snapshotBuiltinEvents(this.getDatasetType(),
+ clearSubsession);
+
+ // Don't return the test events outside of test environments.
+ if (!this._testing) {
+ events = events.filter(e => !e[1].startsWith("telemetry.test"));
+ }
+
+ return events;
+ },
+
+ getThreadHangStats: function getThreadHangStats(stats) {
+ this._log.trace("getThreadHangStats");
+
+ stats.forEach((thread) => {
+ thread.activity = this.packHistogram(thread.activity);
+ thread.hangs.forEach((hang) => {
+ hang.histogram = this.packHistogram(hang.histogram);
+ });
+ });
+ return stats;
+ },
+
+ /**
+ * Descriptive metadata
+ *
+ * @param reason
+ * The reason for the telemetry ping, this will be included in the
+ * returned metadata,
+ * @return The metadata as a JS object
+ */
+ getMetadata: function getMetadata(reason) {
+ this._log.trace("getMetadata - Reason " + reason);
+
+ const sessionStartDate = Utils.toLocalTimeISOString(Utils.truncateToDays(this._sessionStartDate));
+ const subsessionStartDate = Utils.toLocalTimeISOString(Utils.truncateToDays(this._subsessionStartDate));
+ const monotonicNow = Policy.monotonicNow();
+
+ let ret = {
+ reason: reason,
+ revision: AppConstants.SOURCE_REVISION_URL,
+ asyncPluginInit: Preferences.get(PREF_ASYNC_PLUGIN_INIT, false),
+
+ // Date.getTimezoneOffset() unintuitively returns negative values if we are ahead of
+ // UTC and vice versa (e.g. -60 for UTC+1). We invert the sign here.
+ timezoneOffset: -this._subsessionStartDate.getTimezoneOffset(),
+ previousBuildId: this._previousBuildId,
+
+ sessionId: this._sessionId,
+ subsessionId: this._subsessionId,
+ previousSessionId: this._previousSessionId,
+ previousSubsessionId: this._previousSubsessionId,
+
+ subsessionCounter: this._subsessionCounter,
+ profileSubsessionCounter: this._profileSubsessionCounter,
+
+ sessionStartDate: sessionStartDate,
+ subsessionStartDate: subsessionStartDate,
+
+ // Compute the session and subsession length in seconds.
+ // We use monotonic clocks as Date() is affected by jumping clocks (leading
+ // to negative lengths and other issues).
+ sessionLength: Math.floor(monotonicNow / 1000),
+ subsessionLength:
+ Math.floor((monotonicNow - this._subsessionStartTimeMonotonic) / 1000),
+ };
+
+ // TODO: Remove this when bug 1201837 lands.
+ if (this._addons)
+ ret.addons = this._addons;
+
+ // TODO: Remove this when bug 1201837 lands.
+ let flashVersion = this.getFlashVersion();
+ if (flashVersion)
+ ret.flashVersion = flashVersion;
+
+ return ret;
+ },
+
+ /**
+ * Pull values from about:memory into corresponding histograms
+ */
+ gatherMemory: function gatherMemory() {
+ if (!Telemetry.canRecordExtended) {
+ this._log.trace("gatherMemory - Extended data recording disabled, skipping.");
+ return;
+ }
+
+ this._log.trace("gatherMemory");
+
+ let mgr;
+ try {
+ mgr = Cc["@mozilla.org/memory-reporter-manager;1"].
+ getService(Ci.nsIMemoryReporterManager);
+ } catch (e) {
+ // OK to skip memory reporters in xpcshell
+ return;
+ }
+
+ let histogram = Telemetry.getHistogramById("TELEMETRY_MEMORY_REPORTER_MS");
+ let startTime = new Date();
+
+ // Get memory measurements from distinguished amount attributes. We used
+ // to measure "explicit" too, but it could cause hangs, and the data was
+ // always really noisy anyway. See bug 859657.
+ //
+ // test_TelemetryController.js relies on some of these histograms being
+ // here. If you remove any of the following histograms from here, you'll
+ // have to modify test_TelemetryController.js:
+ //
+ // * MEMORY_JS_GC_HEAP, and
+ // * MEMORY_JS_COMPARTMENTS_SYSTEM.
+ //
+ // The distinguished amount attribute names don't match the telemetry id
+ // names in some cases due to a combination of (a) historical reasons, and
+ // (b) the fact that we can't change telemetry id names without breaking
+ // data continuity.
+ //
+ let boundHandleMemoryReport = this.handleMemoryReport.bind(this);
+ function h(id, units, amountName) {
+ try {
+ // If mgr[amountName] throws an exception, just move on -- some amounts
+ // aren't available on all platforms. But if the attribute simply
+ // isn't present, that indicates the distinguished amounts have changed
+ // and this file hasn't been updated appropriately.
+ let amount = mgr[amountName];
+ NS_ASSERT(amount !== undefined,
+ "telemetry accessed an unknown distinguished amount");
+ boundHandleMemoryReport(id, units, amount);
+ } catch (e) {
+ }
+ }
+ let b = (id, n) => h(id, Ci.nsIMemoryReporter.UNITS_BYTES, n);
+ let c = (id, n) => h(id, Ci.nsIMemoryReporter.UNITS_COUNT, n);
+ let cc= (id, n) => h(id, Ci.nsIMemoryReporter.UNITS_COUNT_CUMULATIVE, n);
+ let p = (id, n) => h(id, Ci.nsIMemoryReporter.UNITS_PERCENTAGE, n);
+
+ b("MEMORY_VSIZE", "vsize");
+ b("MEMORY_VSIZE_MAX_CONTIGUOUS", "vsizeMaxContiguous");
+ b("MEMORY_RESIDENT_FAST", "residentFast");
+ b("MEMORY_UNIQUE", "residentUnique");
+ b("MEMORY_HEAP_ALLOCATED", "heapAllocated");
+ p("MEMORY_HEAP_OVERHEAD_FRACTION", "heapOverheadFraction");
+ b("MEMORY_JS_GC_HEAP", "JSMainRuntimeGCHeap");
+ c("MEMORY_JS_COMPARTMENTS_SYSTEM", "JSMainRuntimeCompartmentsSystem");
+ c("MEMORY_JS_COMPARTMENTS_USER", "JSMainRuntimeCompartmentsUser");
+ b("MEMORY_IMAGES_CONTENT_USED_UNCOMPRESSED", "imagesContentUsedUncompressed");
+ b("MEMORY_STORAGE_SQLITE", "storageSQLite");
+ cc("LOW_MEMORY_EVENTS_VIRTUAL", "lowMemoryEventsVirtual");
+ cc("LOW_MEMORY_EVENTS_PHYSICAL", "lowMemoryEventsPhysical");
+ c("GHOST_WINDOWS", "ghostWindows");
+ cc("PAGE_FAULTS_HARD", "pageFaultsHard");
+
+ if (!Utils.isContentProcess && !this._totalMemoryTimeout) {
+ // Only the chrome process should gather total memory
+ // total = parent RSS + sum(child USS)
+ this._totalMemory = mgr.residentFast;
+ if (ppmm.childCount > 1) {
+ // Do not report If we time out waiting for the children to call
+ this._totalMemoryTimeout = setTimeout(
+ () => {
+ this._totalMemoryTimeout = undefined;
+ this._childrenToHearFrom.clear();
+ },
+ TOTAL_MEMORY_COLLECTOR_TIMEOUT);
+ this._childrenToHearFrom = new Set();
+ for (let i = 1; i < ppmm.childCount; i++) {
+ let child = ppmm.getChildAt(i);
+ try {
+ child.sendAsyncMessage(MESSAGE_TELEMETRY_GET_CHILD_USS, {id: this._nextTotalMemoryId});
+ this._childrenToHearFrom.add(this._nextTotalMemoryId);
+ this._nextTotalMemoryId++;
+ } catch (ex) {
+ // If a content process has just crashed, then attempting to send it
+ // an async message will throw an exception.
+ Cu.reportError(ex);
+ }
+ }
+ } else {
+ boundHandleMemoryReport(
+ "MEMORY_TOTAL",
+ Ci.nsIMemoryReporter.UNITS_BYTES,
+ this._totalMemory);
+ }
+ }
+
+ histogram.add(new Date() - startTime);
+ },
+
+ handleMemoryReport: function(id, units, amount) {
+ let val;
+ if (units == Ci.nsIMemoryReporter.UNITS_BYTES) {
+ val = Math.floor(amount / 1024);
+ }
+ else if (units == Ci.nsIMemoryReporter.UNITS_PERCENTAGE) {
+ // UNITS_PERCENTAGE amounts are 100x greater than their raw value.
+ val = Math.floor(amount / 100);
+ }
+ else if (units == Ci.nsIMemoryReporter.UNITS_COUNT) {
+ val = amount;
+ }
+ else if (units == Ci.nsIMemoryReporter.UNITS_COUNT_CUMULATIVE) {
+ // If the reporter gives us a cumulative count, we'll report the
+ // difference in its value between now and our previous ping.
+
+ if (!(id in this._prevValues)) {
+ // If this is the first time we're reading this reporter, store its
+ // current value but don't report it in the telemetry ping, so we
+ // ignore the effect startup had on the reporter.
+ this._prevValues[id] = amount;
+ return;
+ }
+
+ val = amount - this._prevValues[id];
+ this._prevValues[id] = amount;
+ }
+ else {
+ NS_ASSERT(false, "Can't handle memory reporter with units " + units);
+ return;
+ }
+
+ let h = this._histograms[id];
+ if (!h) {
+ h = Telemetry.getHistogramById(id);
+ this._histograms[id] = h;
+ }
+ h.add(val);
+ },
+
+ getChildPayloads: function getChildPayloads() {
+ return this._childTelemetry.map(child => child.payload);
+ },
+
+ /**
+ * Get the current session's payload using the provided
+ * simpleMeasurements and info, which are typically obtained by a call
+ * to |this.getSimpleMeasurements| and |this.getMetadata|,
+ * respectively.
+ */
+ assemblePayloadWithMeasurements: function(simpleMeasurements, info, reason, clearSubsession) {
+ const isSubsession = IS_UNIFIED_TELEMETRY && !this._isClassicReason(reason);
+ clearSubsession = IS_UNIFIED_TELEMETRY && clearSubsession;
+ this._log.trace("assemblePayloadWithMeasurements - reason: " + reason +
+ ", submitting subsession data: " + isSubsession);
+
+ // This allows wrapping data retrieval calls in a try-catch block so that
+ // failures don't break the rest of the ping assembly.
+ const protect = (fn, defaultReturn = null) => {
+ try {
+ return fn();
+ } catch (ex) {
+ this._log.error("assemblePayloadWithMeasurements - caught exception", ex);
+ return defaultReturn;
+ }
+ };
+
+ // Payload common to chrome and content processes.
+ let payloadObj = {
+ ver: PAYLOAD_VERSION,
+ simpleMeasurements: simpleMeasurements,
+ };
+
+ // Add extended set measurements common to chrome & content processes
+ if (Telemetry.canRecordExtended) {
+ payloadObj.chromeHangs = protect(() => Telemetry.chromeHangs);
+ payloadObj.threadHangStats = protect(() => this.getThreadHangStats(Telemetry.threadHangStats));
+ payloadObj.log = protect(() => TelemetryLog.entries());
+ payloadObj.webrtc = protect(() => Telemetry.webrtcStats);
+ }
+
+ if (Utils.isContentProcess) {
+ return payloadObj;
+ }
+
+ // Additional payload for chrome process.
+ let histograms = protect(() => this.getHistograms(isSubsession, clearSubsession), {});
+ let keyedHistograms = protect(() => this.getKeyedHistograms(isSubsession, clearSubsession), {});
+ payloadObj.histograms = histograms[HISTOGRAM_SUFFIXES.PARENT] || {};
+ payloadObj.keyedHistograms = keyedHistograms[HISTOGRAM_SUFFIXES.PARENT] || {};
+ payloadObj.processes = {
+ parent: {
+ scalars: protect(() => this.getScalars(isSubsession, clearSubsession)),
+ keyedScalars: protect(() => this.getScalars(isSubsession, clearSubsession, true)),
+ events: protect(() => this.getEvents(isSubsession, clearSubsession)),
+ },
+ content: {
+ histograms: histograms[HISTOGRAM_SUFFIXES.CONTENT],
+ keyedHistograms: keyedHistograms[HISTOGRAM_SUFFIXES.CONTENT],
+ },
+ };
+
+ // Only include the GPU process if we've accumulated data for it.
+ if (HISTOGRAM_SUFFIXES.GPU in histograms ||
+ HISTOGRAM_SUFFIXES.GPU in keyedHistograms)
+ {
+ payloadObj.processes.gpu = {
+ histograms: histograms[HISTOGRAM_SUFFIXES.GPU],
+ keyedHistograms: keyedHistograms[HISTOGRAM_SUFFIXES.GPU],
+ };
+ }
+
+ payloadObj.info = info;
+
+ // Add extended set measurements for chrome process.
+ if (Telemetry.canRecordExtended) {
+ payloadObj.slowSQL = protect(() => Telemetry.slowSQL);
+ payloadObj.fileIOReports = protect(() => Telemetry.fileIOReports);
+ payloadObj.lateWrites = protect(() => Telemetry.lateWrites);
+
+ // Add the addon histograms if they are present
+ let addonHistograms = protect(() => this.getAddonHistograms());
+ if (addonHistograms && Object.keys(addonHistograms).length > 0) {
+ payloadObj.addonHistograms = addonHistograms;
+ }
+
+ payloadObj.addonDetails = protect(() => AddonManagerPrivate.getTelemetryDetails());
+
+ let clearUIsession = !(reason == REASON_GATHER_PAYLOAD || reason == REASON_GATHER_SUBSESSION_PAYLOAD);
+ payloadObj.UIMeasurements = protect(() => UITelemetry.getUIMeasurements(clearUIsession));
+
+ if (this._slowSQLStartup &&
+ Object.keys(this._slowSQLStartup).length != 0 &&
+ (Object.keys(this._slowSQLStartup.mainThread).length ||
+ Object.keys(this._slowSQLStartup.otherThreads).length)) {
+ payloadObj.slowSQLStartup = this._slowSQLStartup;
+ }
+
+ if (!this._isClassicReason(reason)) {
+ payloadObj.processes.parent.gc = protect(() => GCTelemetry.entries("main", clearSubsession));
+ payloadObj.processes.content.gc = protect(() => GCTelemetry.entries("content", clearSubsession));
+ }
+ }
+
+ if (this._childTelemetry.length) {
+ payloadObj.childPayloads = protect(() => this.getChildPayloads());
+ }
+
+ return payloadObj;
+ },
+
+ /**
+ * Start a new subsession.
+ */
+ startNewSubsession: function () {
+ this._subsessionStartDate = Policy.now();
+ this._subsessionStartTimeMonotonic = Policy.monotonicNow();
+ this._previousSubsessionId = this._subsessionId;
+ this._subsessionId = Policy.generateSubsessionUUID();
+ this._subsessionCounter++;
+ this._profileSubsessionCounter++;
+ },
+
+ getSessionPayload: function getSessionPayload(reason, clearSubsession) {
+ this._log.trace("getSessionPayload - reason: " + reason + ", clearSubsession: " + clearSubsession);
+
+ let payload;
+ try {
+ const isMobile = ["gonk", "android"].includes(AppConstants.platform);
+ const isSubsession = isMobile ? false : !this._isClassicReason(reason);
+
+ if (isMobile) {
+ clearSubsession = false;
+ }
+
+ let measurements =
+ this.getSimpleMeasurements(reason == REASON_SAVED_SESSION, isSubsession, clearSubsession);
+ let info = !Utils.isContentProcess ? this.getMetadata(reason) : null;
+ payload = this.assemblePayloadWithMeasurements(measurements, info, reason, clearSubsession);
+ } catch (ex) {
+ Telemetry.getHistogramById("TELEMETRY_ASSEMBLE_PAYLOAD_EXCEPTION").add(1);
+ throw ex;
+ } finally {
+ if (!Utils.isContentProcess && clearSubsession) {
+ this.startNewSubsession();
+ // Persist session data to disk (don't wait until it completes).
+ let sessionData = this._getSessionDataObject();
+ TelemetryStorage.saveSessionData(sessionData);
+
+ // Notify that there was a subsession split in the parent process. This is an
+ // internal topic and is only meant for internal Telemetry usage.
+ Services.obs.notifyObservers(null, "internal-telemetry-after-subsession-split", null);
+ }
+ }
+
+ return payload;
+ },
+
+ /**
+ * Send data to the server. Record success/send-time in histograms
+ */
+ send: function send(reason) {
+ this._log.trace("send - Reason " + reason);
+ // populate histograms one last time
+ this.gatherMemory();
+
+ const isSubsession = !this._isClassicReason(reason);
+ let payload = this.getSessionPayload(reason, isSubsession);
+ let options = {
+ addClientId: true,
+ addEnvironment: true,
+ };
+ return TelemetryController.submitExternalPing(getPingType(payload), payload, options);
+ },
+
+ attachObservers: function attachObservers() {
+ if (!this._initialized)
+ return;
+ Services.obs.addObserver(this, "idle-daily", false);
+ if (Telemetry.canRecordExtended) {
+ Services.obs.addObserver(this, TOPIC_CYCLE_COLLECTOR_BEGIN, false);
+ }
+ },
+
+ detachObservers: function detachObservers() {
+ if (!this._initialized)
+ return;
+ Services.obs.removeObserver(this, "idle-daily");
+ try {
+ // Tests may flip Telemetry.canRecordExtended on and off. Just try to remove this
+ // observer and catch if it fails because the observer was not added.
+ Services.obs.removeObserver(this, TOPIC_CYCLE_COLLECTOR_BEGIN);
+ } catch (e) {
+ this._log.warn("detachObservers - Failed to remove " + TOPIC_CYCLE_COLLECTOR_BEGIN, e);
+ }
+ },
+
+ /**
+ * Lightweight init function, called as soon as Firefox starts.
+ */
+ earlyInit: function(testing) {
+ this._log.trace("earlyInit");
+
+ this._initStarted = true;
+ this._testing = testing;
+
+ if (this._initialized && !testing) {
+ this._log.error("earlyInit - already initialized");
+ return;
+ }
+
+ if (!Telemetry.canRecordBase && !testing) {
+ this._log.config("earlyInit - Telemetry recording is disabled, skipping Chrome process setup.");
+ return;
+ }
+
+ // Generate a unique id once per session so the server can cope with duplicate
+ // submissions, orphaning and other oddities. The id is shared across subsessions.
+ this._sessionId = Policy.generateSessionUUID();
+ this.startNewSubsession();
+ // startNewSubsession sets |_subsessionStartDate| to the current date/time. Use
+ // the very same value for |_sessionStartDate|.
+ this._sessionStartDate = this._subsessionStartDate;
+
+ annotateCrashReport(this._sessionId);
+
+ // Initialize some probes that are kept in their own modules
+ this._thirdPartyCookies = new ThirdPartyCookieProbe();
+ this._thirdPartyCookies.init();
+
+ // Record old value and update build ID preference if this is the first
+ // run with a new build ID.
+ let previousBuildId = Preferences.get(PREF_PREVIOUS_BUILDID, null);
+ let thisBuildID = Services.appinfo.appBuildID;
+ // If there is no previousBuildId preference, we send null to the server.
+ if (previousBuildId != thisBuildID) {
+ this._previousBuildId = previousBuildId;
+ Preferences.set(PREF_PREVIOUS_BUILDID, thisBuildID);
+ }
+
+ Services.obs.addObserver(this, "sessionstore-windows-restored", false);
+ if (AppConstants.platform === "android") {
+ Services.obs.addObserver(this, "application-background", false);
+ }
+ Services.obs.addObserver(this, "xul-window-visible", false);
+ this._hasWindowRestoredObserver = true;
+ this._hasXulWindowVisibleObserver = true;
+
+ ppml.addMessageListener(MESSAGE_TELEMETRY_PAYLOAD, this);
+ ppml.addMessageListener(MESSAGE_TELEMETRY_THREAD_HANGS, this);
+ ppml.addMessageListener(MESSAGE_TELEMETRY_USS, this);
+},
+
+/**
+ * Does the "heavy" Telemetry initialization later on, so we
+ * don't impact startup performance.
+ * @return {Promise} Resolved when the initialization completes.
+ */
+ delayedInit:function() {
+ this._log.trace("delayedInit");
+
+ this._delayedInitTask = Task.spawn(function* () {
+ try {
+ this._initialized = true;
+
+ yield this._loadSessionData();
+ // Update the session data to keep track of new subsessions created before
+ // the initialization.
+ yield TelemetryStorage.saveSessionData(this._getSessionDataObject());
+
+ this.attachObservers();
+ this.gatherMemory();
+
+ if (Telemetry.canRecordExtended) {
+ GCTelemetry.init();
+ }
+
+ Telemetry.asyncFetchTelemetryData(function () {});
+
+ if (IS_UNIFIED_TELEMETRY) {
+ // Check for a previously written aborted session ping.
+ yield TelemetryController.checkAbortedSessionPing();
+
+ // Write the first aborted-session ping as early as possible. Just do that
+ // if we are not testing, since calling Telemetry.reset() will make a previous
+ // aborted ping a pending ping.
+ if (!this._testing) {
+ yield this._saveAbortedSessionPing();
+ }
+
+ // The last change date for the environment, used to throttle environment changes.
+ this._lastEnvironmentChangeDate = Policy.monotonicNow();
+ TelemetryEnvironment.registerChangeListener(ENVIRONMENT_CHANGE_LISTENER,
+ (reason, data) => this._onEnvironmentChange(reason, data));
+
+ // Start the scheduler.
+ // We skip this if unified telemetry is off, so we don't
+ // trigger the new unified ping types.
+ TelemetryScheduler.init();
+ }
+
+ this._delayedInitTask = null;
+ } catch (e) {
+ this._delayedInitTask = null;
+ throw e;
+ }
+ }.bind(this));
+
+ return this._delayedInitTask;
+ },
+
+ /**
+ * Initializes telemetry for a content process.
+ */
+ setupContentProcess: function setupContentProcess(testing) {
+ this._log.trace("setupContentProcess");
+ this._testing = testing;
+
+ if (!Telemetry.canRecordBase) {
+ this._log.trace("setupContentProcess - base recording is disabled, not initializing");
+ return;
+ }
+
+ Services.obs.addObserver(this, "content-child-shutdown", false);
+ cpml.addMessageListener(MESSAGE_TELEMETRY_GET_CHILD_THREAD_HANGS, this);
+ cpml.addMessageListener(MESSAGE_TELEMETRY_GET_CHILD_USS, this);
+
+ let delayedTask = new DeferredTask(function* () {
+ this._initialized = true;
+
+ this.attachObservers();
+ this.gatherMemory();
+
+ if (Telemetry.canRecordExtended) {
+ GCTelemetry.init();
+ }
+ }.bind(this), testing ? TELEMETRY_TEST_DELAY : TELEMETRY_DELAY);
+
+ delayedTask.arm();
+ },
+
+ getFlashVersion: function getFlashVersion() {
+ this._log.trace("getFlashVersion");
+ let host = Cc["@mozilla.org/plugin/host;1"].getService(Ci.nsIPluginHost);
+ let tags = host.getPluginTags();
+
+ for (let i = 0; i < tags.length; i++) {
+ if (tags[i].name == "Shockwave Flash")
+ return tags[i].version;
+ }
+
+ return null;
+ },
+
+ receiveMessage: function receiveMessage(message) {
+ this._log.trace("receiveMessage - Message name " + message.name);
+ switch (message.name) {
+ case MESSAGE_TELEMETRY_PAYLOAD:
+ {
+ // In parent process, receive Telemetry payload from child
+ let source = message.data.childUUID;
+ delete message.data.childUUID;
+
+ this._childTelemetry.push({
+ source: source,
+ payload: message.data,
+ });
+
+ if (this._childTelemetry.length == MAX_NUM_CONTENT_PAYLOADS + 1) {
+ this._childTelemetry.shift();
+ Telemetry.getHistogramById("TELEMETRY_DISCARDED_CONTENT_PINGS_COUNT").add();
+ }
+
+ break;
+ }
+ case MESSAGE_TELEMETRY_THREAD_HANGS:
+ {
+ // Accumulate child thread hang stats from this child
+ this._childThreadHangs.push(message.data);
+
+ // Check if we've got data from all the children, accounting for child processes dying
+ // if it happens before the last response is received and no new child processes are spawned at the exact same time
+ // If that happens, we can resolve the promise earlier rather than having to wait for the timeout to expire
+ // Basically, the number of replies is at most the number of messages sent out, this._childCount,
+ // and also at most the number of child processes that currently exist
+ if (this._childThreadHangs.length === Math.min(this._childCount, ppmm.childCount)) {
+ clearTimeout(this._childThreadHangsTimeout);
+
+ // Resolve all the promises that are waiting on these thread hang stats
+ // We resolve here instead of rejecting because
+ for (let resolve of this._childThreadHangsResolveFunctions) {
+ resolve(this._childThreadHangs);
+ }
+ this._childThreadHangsResolveFunctions = [];
+ }
+
+ break;
+ }
+ case MESSAGE_TELEMETRY_GET_CHILD_THREAD_HANGS:
+ {
+ // In child process, send the requested child thread hangs
+ this.sendContentProcessThreadHangs();
+ break;
+ }
+ case MESSAGE_TELEMETRY_USS:
+ {
+ // In parent process, receive the USS report from the child
+ if (this._totalMemoryTimeout && this._childrenToHearFrom.delete(message.data.id)) {
+ this._totalMemory += message.data.bytes;
+ if (this._childrenToHearFrom.size == 0) {
+ clearTimeout(this._totalMemoryTimeout);
+ this._totalMemoryTimeout = undefined;
+ this.handleMemoryReport(
+ "MEMORY_TOTAL",
+ Ci.nsIMemoryReporter.UNITS_BYTES,
+ this._totalMemory);
+ }
+ } else {
+ this._log.trace("Child USS report was missed");
+ }
+ break;
+ }
+ case MESSAGE_TELEMETRY_GET_CHILD_USS:
+ {
+ // In child process, send the requested USS report
+ this.sendContentProcessUSS(message.data.id);
+ break
+ }
+ default:
+ throw new Error("Telemetry.receiveMessage: bad message name");
+ }
+ },
+
+ _processUUID: generateUUID(),
+
+ sendContentProcessUSS: function sendContentProcessUSS(aMessageId) {
+ this._log.trace("sendContentProcessUSS");
+
+ let mgr;
+ try {
+ mgr = Cc["@mozilla.org/memory-reporter-manager;1"].
+ getService(Ci.nsIMemoryReporterManager);
+ } catch (e) {
+ // OK to skip memory reporters in xpcshell
+ return;
+ }
+
+ cpmm.sendAsyncMessage(
+ MESSAGE_TELEMETRY_USS,
+ {bytes: mgr.residentUnique, id: aMessageId}
+ );
+ },
+
+ sendContentProcessPing: function sendContentProcessPing(reason) {
+ this._log.trace("sendContentProcessPing - Reason " + reason);
+ const isSubsession = !this._isClassicReason(reason);
+ let payload = this.getSessionPayload(reason, isSubsession);
+ payload.childUUID = this._processUUID;
+ cpmm.sendAsyncMessage(MESSAGE_TELEMETRY_PAYLOAD, payload);
+ },
+
+ sendContentProcessThreadHangs: function sendContentProcessThreadHangs() {
+ this._log.trace("sendContentProcessThreadHangs");
+ let payload = {
+ childUUID: this._processUUID,
+ hangs: Telemetry.threadHangStats,
+ };
+ cpmm.sendAsyncMessage(MESSAGE_TELEMETRY_THREAD_HANGS, payload);
+ },
+
+ /**
+ * Save both the "saved-session" and the "shutdown" pings to disk.
+ * This needs to be called after TelemetrySend shuts down otherwise pings
+ * would be sent instead of getting persisted to disk.
+ */
+ saveShutdownPings: function() {
+ this._log.trace("saveShutdownPings");
+
+ // We don't wait for "shutdown" pings to be written to disk before gathering the
+ // "saved-session" payload. Instead we append the promises to this list and wait
+ // on both to be saved after kicking off their collection.
+ let p = [];
+
+ if (IS_UNIFIED_TELEMETRY) {
+ let shutdownPayload = this.getSessionPayload(REASON_SHUTDOWN, false);
+
+ let options = {
+ addClientId: true,
+ addEnvironment: true,
+ };
+ p.push(TelemetryController.submitExternalPing(getPingType(shutdownPayload), shutdownPayload, options)
+ .catch(e => this._log.error("saveShutdownPings - failed to submit shutdown ping", e)));
+ }
+
+ // As a temporary measure, we want to submit saved-session too if extended Telemetry is enabled
+ // to keep existing performance analysis working.
+ if (Telemetry.canRecordExtended) {
+ let payload = this.getSessionPayload(REASON_SAVED_SESSION, false);
+
+ let options = {
+ addClientId: true,
+ addEnvironment: true,
+ };
+ p.push(TelemetryController.submitExternalPing(getPingType(payload), payload, options)
+ .catch (e => this._log.error("saveShutdownPings - failed to submit saved-session ping", e)));
+ }
+
+ // Wait on pings to be saved.
+ return Promise.all(p);
+ },
+
+
+ testSavePendingPing: function testSaveHistograms() {
+ this._log.trace("testSaveHistograms");
+ let payload = this.getSessionPayload(REASON_SAVED_SESSION, false);
+ let options = {
+ addClientId: true,
+ addEnvironment: true,
+ overwrite: true,
+ };
+ return TelemetryController.addPendingPing(getPingType(payload), payload, options);
+ },
+
+ /**
+ * Do some shutdown work that is common to all process types.
+ */
+ uninstall: function uninstall() {
+ this.detachObservers();
+ if (this._hasWindowRestoredObserver) {
+ Services.obs.removeObserver(this, "sessionstore-windows-restored");
+ this._hasWindowRestoredObserver = false;
+ }
+ if (this._hasXulWindowVisibleObserver) {
+ Services.obs.removeObserver(this, "xul-window-visible");
+ this._hasXulWindowVisibleObserver = false;
+ }
+ if (AppConstants.platform === "android") {
+ Services.obs.removeObserver(this, "application-background", false);
+ }
+ GCTelemetry.shutdown();
+ },
+
+ getPayload: function getPayload(reason, clearSubsession) {
+ this._log.trace("getPayload - clearSubsession: " + clearSubsession);
+ reason = reason || REASON_GATHER_PAYLOAD;
+ // This function returns the current Telemetry payload to the caller.
+ // We only gather startup info once.
+ if (Object.keys(this._slowSQLStartup).length == 0) {
+ this._slowSQLStartup = Telemetry.slowSQL;
+ }
+ this.gatherMemory();
+ return this.getSessionPayload(reason, clearSubsession);
+ },
+
+ getChildThreadHangs: function getChildThreadHangs() {
+ return new Promise((resolve) => {
+ // Return immediately if there are no child processes to get stats from
+ if (ppmm.childCount === 0) {
+ resolve([]);
+ return;
+ }
+
+ // Register our promise so it will be resolved when we receive the child thread hang stats on the parent process
+ // The resolve functions will all be called from "receiveMessage" when a MESSAGE_TELEMETRY_THREAD_HANGS message comes in
+ this._childThreadHangsResolveFunctions.push((threadHangStats) => {
+ let hangs = threadHangStats.map(child => child.hangs);
+ return resolve(hangs);
+ });
+
+ // If we (the parent) are not currently in the process of requesting child thread hangs, request them
+ // If we are, then the resolve function we registered above will receive the results without needing to request them again
+ if (this._childThreadHangsResolveFunctions.length === 1) {
+ // We have to cache the number of children we send messages to, in case the child count changes while waiting for messages to arrive
+ // This handles the case where the child count increases later on, in which case the new processes won't respond since we never sent messages to them
+ this._childCount = ppmm.childCount;
+
+ this._childThreadHangs = []; // Clear the child hangs
+ for (let i = 0; i < this._childCount; i++) {
+ // If a child dies at exactly while we're running this loop, the message sending will fail but we won't get an exception
+ // In this case, since we won't know this has happened, we will simply rely on the timeout to handle it
+ ppmm.getChildAt(i).sendAsyncMessage(MESSAGE_TELEMETRY_GET_CHILD_THREAD_HANGS);
+ }
+
+ // Set up a timeout in case one or more of the content processes never responds
+ this._childThreadHangsTimeout = setTimeout(() => {
+ // Resolve all the promises that are waiting on these thread hang stats
+ // We resolve here instead of rejecting because the purpose of this function is
+ // to retrieve the BHR stats from all processes that will give us stats
+ // As a result, one process failing simply means it doesn't get included in the result.
+ for (let resolve of this._childThreadHangsResolveFunctions) {
+ resolve(this._childThreadHangs);
+ }
+ this._childThreadHangsResolveFunctions = [];
+ }, 200);
+ }
+ });
+ },
+
+ gatherStartup: function gatherStartup() {
+ this._log.trace("gatherStartup");
+ let counters = processInfo.getCounters();
+ if (counters) {
+ [this._startupIO.startupSessionRestoreReadBytes,
+ this._startupIO.startupSessionRestoreWriteBytes] = counters;
+ }
+ this._slowSQLStartup = Telemetry.slowSQL;
+ },
+
+ setAddOns: function setAddOns(aAddOns) {
+ this._addons = aAddOns;
+ },
+
+ testPing: function testPing() {
+ return this.send(REASON_TEST_PING);
+ },
+
+ /**
+ * This observer drives telemetry.
+ */
+ observe: function (aSubject, aTopic, aData) {
+ // Prevent the cycle collector begin topic from cluttering the log.
+ if (aTopic != TOPIC_CYCLE_COLLECTOR_BEGIN) {
+ this._log.trace("observe - " + aTopic + " notified.");
+ }
+
+ switch (aTopic) {
+ case "content-child-shutdown":
+ // content-child-shutdown is only registered for content processes.
+ Services.obs.removeObserver(this, "content-child-shutdown");
+ this.uninstall();
+ Telemetry.flushBatchedChildTelemetry();
+ this.sendContentProcessPing(REASON_SAVED_SESSION);
+ break;
+ case TOPIC_CYCLE_COLLECTOR_BEGIN:
+ let now = new Date();
+ if (!gLastMemoryPoll
+ || (TELEMETRY_INTERVAL <= now - gLastMemoryPoll)) {
+ gLastMemoryPoll = now;
+ this.gatherMemory();
+ }
+ break;
+ case "xul-window-visible":
+ Services.obs.removeObserver(this, "xul-window-visible");
+ this._hasXulWindowVisibleObserver = false;
+ var counters = processInfo.getCounters();
+ if (counters) {
+ [this._startupIO.startupWindowVisibleReadBytes,
+ this._startupIO.startupWindowVisibleWriteBytes] = counters;
+ }
+ break;
+ case "sessionstore-windows-restored":
+ Services.obs.removeObserver(this, "sessionstore-windows-restored");
+ this._hasWindowRestoredObserver = false;
+ // Check whether debugger was attached during startup
+ let debugService = Cc["@mozilla.org/xpcom/debug;1"].getService(Ci.nsIDebug2);
+ gWasDebuggerAttached = debugService.isDebuggerAttached;
+ this.gatherStartup();
+ break;
+ case "idle-daily":
+ // Enqueue to main-thread, otherwise components may be inited by the
+ // idle-daily category and miss the gather-telemetry notification.
+ Services.tm.mainThread.dispatch((function() {
+ // Notify that data should be gathered now.
+ // TODO: We are keeping this behaviour for now but it will be removed as soon as
+ // bug 1127907 lands.
+ Services.obs.notifyObservers(null, "gather-telemetry", null);
+ }).bind(this), Ci.nsIThread.DISPATCH_NORMAL);
+ break;
+
+ case "application-background":
+ if (AppConstants.platform !== "android") {
+ break;
+ }
+ // On Android, we can get killed without warning once we are in the background,
+ // but we may also submit data and/or come back into the foreground without getting
+ // killed. To deal with this, we save the current session data to file when we are
+ // put into the background. This handles the following post-backgrounding scenarios:
+ // 1) We are killed immediately. In this case the current session data (which we
+ // save to a file) will be loaded and submitted on a future run.
+ // 2) We submit the data while in the background, and then are killed. In this case
+ // the file that we saved will be deleted by the usual process in
+ // finishPingRequest after it is submitted.
+ // 3) We submit the data, and then come back into the foreground. Same as case (2).
+ // 4) We do not submit the data, but come back into the foreground. In this case
+ // we have the option of either deleting the file that we saved (since we will either
+ // send the live data while in the foreground, or create the file again on the next
+ // backgrounding), or not (in which case we will delete it on submit, or overwrite
+ // it on the next backgrounding). Not deleting it is faster, so that's what we do.
+ let payload = this.getSessionPayload(REASON_SAVED_SESSION, false);
+ let options = {
+ addClientId: true,
+ addEnvironment: true,
+ overwrite: true,
+ };
+ TelemetryController.addPendingPing(getPingType(payload), payload, options);
+ break;
+ }
+ return undefined;
+ },
+
+ /**
+ * This tells TelemetrySession to uninitialize and save any pending pings.
+ */
+ shutdownChromeProcess: function() {
+ this._log.trace("shutdownChromeProcess");
+
+ let cleanup = () => {
+ if (IS_UNIFIED_TELEMETRY) {
+ TelemetryEnvironment.unregisterChangeListener(ENVIRONMENT_CHANGE_LISTENER);
+ TelemetryScheduler.shutdown();
+ }
+ this.uninstall();
+
+ let reset = () => {
+ this._initStarted = false;
+ this._initialized = false;
+ };
+
+ return Task.spawn(function*() {
+ yield this.saveShutdownPings();
+
+ if (IS_UNIFIED_TELEMETRY) {
+ yield TelemetryController.removeAbortedSessionPing();
+ }
+
+ reset();
+ }.bind(this));
+ };
+
+ // We can be in one the following states here:
+ // 1) delayedInit was never called
+ // or it was called and
+ // 2) _delayedInitTask is running now.
+ // 3) _delayedInitTask finished running already.
+
+ // This handles 1).
+ if (!this._initStarted) {
+ return Promise.resolve();
+ }
+
+ // This handles 3).
+ if (!this._delayedInitTask) {
+ // We already ran the delayed initialization.
+ return cleanup();
+ }
+
+ // This handles 2).
+ return this._delayedInitTask.then(cleanup);
+ },
+
+ /**
+ * Gather and send a daily ping.
+ * @return {Promise} Resolved when the ping is sent.
+ */
+ _sendDailyPing: function() {
+ this._log.trace("_sendDailyPing");
+ let payload = this.getSessionPayload(REASON_DAILY, true);
+
+ let options = {
+ addClientId: true,
+ addEnvironment: true,
+ };
+
+ let promise = TelemetryController.submitExternalPing(getPingType(payload), payload, options);
+
+ // Also save the payload as an aborted session. If we delay this, aborted-session can
+ // lag behind for the profileSubsessionCounter and other state, complicating analysis.
+ if (IS_UNIFIED_TELEMETRY) {
+ this._saveAbortedSessionPing(payload)
+ .catch(e => this._log.error("_sendDailyPing - Failed to save the aborted session ping", e));
+ }
+
+ return promise;
+ },
+
+ /** Loads session data from the session data file.
+ * @return {Promise<object>} A promise which is resolved with an object when
+ * loading has completed, with null otherwise.
+ */
+ _loadSessionData: Task.async(function* () {
+ let data = yield TelemetryStorage.loadSessionData();
+
+ if (!data) {
+ return null;
+ }
+
+ if (!("profileSubsessionCounter" in data) ||
+ !(typeof(data.profileSubsessionCounter) == "number") ||
+ !("subsessionId" in data) || !("sessionId" in data)) {
+ this._log.error("_loadSessionData - session data is invalid");
+ Telemetry.getHistogramById("TELEMETRY_SESSIONDATA_FAILED_VALIDATION").add(1);
+ return null;
+ }
+
+ this._previousSessionId = data.sessionId;
+ this._previousSubsessionId = data.subsessionId;
+ // Add |_subsessionCounter| to the |_profileSubsessionCounter| to account for
+ // new subsession while loading still takes place. This will always be exactly
+ // 1 - the current subsessions.
+ this._profileSubsessionCounter = data.profileSubsessionCounter +
+ this._subsessionCounter;
+ return data;
+ }),
+
+ /**
+ * Get the session data object to serialise to disk.
+ */
+ _getSessionDataObject: function() {
+ return {
+ sessionId: this._sessionId,
+ subsessionId: this._subsessionId,
+ profileSubsessionCounter: this._profileSubsessionCounter,
+ };
+ },
+
+ _onEnvironmentChange: function(reason, oldEnvironment) {
+ this._log.trace("_onEnvironmentChange", reason);
+
+ let now = Policy.monotonicNow();
+ let timeDelta = now - this._lastEnvironmentChangeDate;
+ if (timeDelta <= CHANGE_THROTTLE_INTERVAL_MS) {
+ this._log.trace(`_onEnvironmentChange - throttling; last change was ${Math.round(timeDelta / 1000)}s ago.`);
+ return;
+ }
+
+ this._lastEnvironmentChangeDate = now;
+ let payload = this.getSessionPayload(REASON_ENVIRONMENT_CHANGE, true);
+ TelemetryScheduler.reschedulePings(REASON_ENVIRONMENT_CHANGE, payload);
+
+ let options = {
+ addClientId: true,
+ addEnvironment: true,
+ overrideEnvironment: oldEnvironment,
+ };
+ TelemetryController.submitExternalPing(getPingType(payload), payload, options);
+ },
+
+ _isClassicReason: function(reason) {
+ const classicReasons = [
+ REASON_SAVED_SESSION,
+ REASON_GATHER_PAYLOAD,
+ REASON_TEST_PING,
+ ];
+ return classicReasons.includes(reason);
+ },
+
+ /**
+ * Get an object describing the current state of this module for AsyncShutdown diagnostics.
+ */
+ _getState: function() {
+ return {
+ initialized: this._initialized,
+ initStarted: this._initStarted,
+ haveDelayedInitTask: !!this._delayedInitTask,
+ };
+ },
+
+ /**
+ * Saves the aborted session ping to disk.
+ * @param {Object} [aProvidedPayload=null] A payload object to be used as an aborted
+ * session ping. The reason of this payload is changed to aborted-session.
+ * If not provided, a new payload is gathered.
+ */
+ _saveAbortedSessionPing: function(aProvidedPayload = null) {
+ this._log.trace("_saveAbortedSessionPing");
+
+ let payload = null;
+ if (aProvidedPayload) {
+ payload = Cu.cloneInto(aProvidedPayload, myScope);
+ // Overwrite the original reason.
+ payload.info.reason = REASON_ABORTED_SESSION;
+ } else {
+ payload = this.getSessionPayload(REASON_ABORTED_SESSION, false);
+ }
+
+ return TelemetryController.saveAbortedSessionPing(payload);
+ },
+};
diff --git a/toolkit/components/telemetry/TelemetryStartup.js b/toolkit/components/telemetry/TelemetryStartup.js
new file mode 100644
index 000000000..28041b36b
--- /dev/null
+++ b/toolkit/components/telemetry/TelemetryStartup.js
@@ -0,0 +1,49 @@
+/* -*- js-indent-level: 2; indent-tabs-mode: nil -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+"use strict";
+
+const {classes: Cc, interfaces: Ci, utils: Cu} = Components;
+
+Cu.import("resource://gre/modules/XPCOMUtils.jsm", this);
+
+XPCOMUtils.defineLazyModuleGetter(this, "TelemetryController",
+ "resource://gre/modules/TelemetryController.jsm");
+XPCOMUtils.defineLazyModuleGetter(this, "TelemetryEnvironment",
+ "resource://gre/modules/TelemetryEnvironment.jsm");
+
+/**
+ * TelemetryStartup is needed to forward the "profile-after-change" notification
+ * to TelemetryController.jsm.
+ */
+function TelemetryStartup() {
+}
+
+TelemetryStartup.prototype.classID = Components.ID("{117b219f-92fe-4bd2-a21b-95a342a9d474}");
+TelemetryStartup.prototype.QueryInterface = XPCOMUtils.generateQI([Components.interfaces.nsIObserver]);
+TelemetryStartup.prototype.observe = function(aSubject, aTopic, aData) {
+ if (aTopic == "profile-after-change" || aTopic == "app-startup") {
+ TelemetryController.observe(null, aTopic, null);
+ }
+ if (aTopic == "profile-after-change") {
+ annotateEnvironment();
+ TelemetryEnvironment.registerChangeListener("CrashAnnotator", annotateEnvironment);
+ TelemetryEnvironment.onInitialized().then(() => annotateEnvironment());
+ }
+}
+
+function annotateEnvironment() {
+ try {
+ let cr = Cc["@mozilla.org/toolkit/crash-reporter;1"];
+ if (cr) {
+ let env = JSON.stringify(TelemetryEnvironment.currentEnvironment);
+ cr.getService(Ci.nsICrashReporter).annotateCrashReport("TelemetryEnvironment", env);
+ }
+ } catch (e) {
+ // crash reporting not built or disabled? Ignore errors
+ }
+}
+
+this.NSGetFactory = XPCOMUtils.generateNSGetFactory([TelemetryStartup]);
diff --git a/toolkit/components/telemetry/TelemetryStartup.manifest b/toolkit/components/telemetry/TelemetryStartup.manifest
new file mode 100644
index 000000000..f1638530b
--- /dev/null
+++ b/toolkit/components/telemetry/TelemetryStartup.manifest
@@ -0,0 +1,4 @@
+component {117b219f-92fe-4bd2-a21b-95a342a9d474} TelemetryStartup.js
+contract @mozilla.org/base/telemetry-startup;1 {117b219f-92fe-4bd2-a21b-95a342a9d474}
+category profile-after-change TelemetryStartup @mozilla.org/base/telemetry-startup;1 process=main
+category app-startup TelemetryStartup @mozilla.org/base/telemetry-startup;1 process=content
diff --git a/toolkit/components/telemetry/TelemetryStopwatch.jsm b/toolkit/components/telemetry/TelemetryStopwatch.jsm
new file mode 100644
index 000000000..ab6c6eafb
--- /dev/null
+++ b/toolkit/components/telemetry/TelemetryStopwatch.jsm
@@ -0,0 +1,335 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+const Cc = Components.classes;
+const Ci = Components.interfaces;
+const Cu = Components.utils;
+
+this.EXPORTED_SYMBOLS = ["TelemetryStopwatch"];
+
+Cu.import("resource://gre/modules/Log.jsm", this);
+var Telemetry = Cc["@mozilla.org/base/telemetry;1"]
+ .getService(Ci.nsITelemetry);
+
+// Weak map does not allow using null objects as keys. These objects are used
+// as 'null' placeholders.
+const NULL_OBJECT = {};
+const NULL_KEY = {};
+
+/**
+ * Timers is a variation of a Map used for storing information about running
+ * Stopwatches. Timers has the following data structure:
+ *
+ * {
+ * "HISTOGRAM_NAME": WeakMap {
+ * Object || NULL_OBJECT: Map {
+ * "KEY" || NULL_KEY: startTime
+ * ...
+ * }
+ * ...
+ * }
+ * ...
+ * }
+ *
+ *
+ * @example
+ * // Stores current time for a keyed histogram "PLAYING_WITH_CUTE_ANIMALS".
+ * Timers.put("PLAYING_WITH_CUTE_ANIMALS", null, "CATS", Date.now());
+ *
+ * @example
+ * // Returns information about a simple Stopwatch.
+ * let startTime = Timers.get("PLAYING_WITH_CUTE_ANIMALS", null, "CATS");
+ */
+let Timers = {
+ _timers: new Map(),
+
+ _validTypes: function(histogram, obj, key) {
+ let nonEmptyString = value => {
+ return typeof value === "string" && value !== "" && value.length > 0;
+ };
+ return nonEmptyString(histogram) &&
+ typeof obj == "object" &&
+ (key === NULL_KEY || nonEmptyString(key));
+ },
+
+ get: function(histogram, obj, key) {
+ key = key === null ? NULL_KEY : key;
+ obj = obj || NULL_OBJECT;
+
+ if (!this.has(histogram, obj, key)) {
+ return null;
+ }
+
+ return this._timers.get(histogram).get(obj).get(key);
+ },
+
+ put: function(histogram, obj, key, startTime) {
+ key = key === null ? NULL_KEY : key;
+ obj = obj || NULL_OBJECT;
+
+ if (!this._validTypes(histogram, obj, key)) {
+ return false;
+ }
+
+ let objectMap = this._timers.get(histogram) || new WeakMap();
+ let keyedInfo = objectMap.get(obj) || new Map();
+ keyedInfo.set(key, startTime);
+ objectMap.set(obj, keyedInfo);
+ this._timers.set(histogram, objectMap);
+ return true;
+ },
+
+ has: function(histogram, obj, key) {
+ key = key === null ? NULL_KEY : key;
+ obj = obj || NULL_OBJECT;
+
+ return this._timers.has(histogram) &&
+ this._timers.get(histogram).has(obj) &&
+ this._timers.get(histogram).get(obj).has(key);
+ },
+
+ delete: function(histogram, obj, key) {
+ key = key === null ? NULL_KEY : key;
+ obj = obj || NULL_OBJECT;
+
+ if (!this.has(histogram, obj, key)) {
+ return false;
+ }
+ let objectMap = this._timers.get(histogram);
+ let keyedInfo = objectMap.get(obj);
+ if (keyedInfo.size > 1) {
+ keyedInfo.delete(key);
+ return true;
+ }
+ objectMap.delete(obj);
+ // NOTE:
+ // We never delete empty objecMaps from this._timers because there is no
+ // nice solution for tracking the number of objects in a WeakMap.
+ // WeakMap is not enumerable, so we can't deterministically say when it's
+ // empty. We accept that trade-off here, given that entries for short-lived
+ // objects will go away when they are no longer referenced
+ return true;
+ }
+};
+
+this.TelemetryStopwatch = {
+ /**
+ * Starts a timer associated with a telemetry histogram. The timer can be
+ * directly associated with a histogram, or with a pair of a histogram and
+ * an object.
+ *
+ * @param {String} aHistogram - a string which must be a valid histogram name.
+ *
+ * @param {Object} aObj - Optional parameter. If specified, the timer is
+ * associated with this object, meaning that multiple
+ * timers for the same histogram may be run
+ * concurrently, as long as they are associated with
+ * different objects.
+ *
+ * @returns {Boolean} True if the timer was successfully started, false
+ * otherwise. If a timer already exists, it can't be
+ * started again, and the existing one will be cleared in
+ * order to avoid measurements errors.
+ */
+ start: function(aHistogram, aObj) {
+ return TelemetryStopwatchImpl.start(aHistogram, aObj, null);
+ },
+
+ /**
+ * Deletes the timer associated with a telemetry histogram. The timer can be
+ * directly associated with a histogram, or with a pair of a histogram and
+ * an object. Important: Only use this method when a legitimate cancellation
+ * should be done.
+ *
+ * @param {String} aHistogram - a string which must be a valid histogram name.
+ *
+ * @param {Object} aObj - Optional parameter. If specified, the timer is
+ * associated with this object, meaning that multiple
+ * timers or a same histogram may be run concurrently,
+ * as long as they are associated with different
+ * objects.
+ *
+ * @returns {Boolean} True if the timer exist and it was cleared, False
+ * otherwise.
+ */
+ cancel: function(aHistogram, aObj) {
+ return TelemetryStopwatchImpl.cancel(aHistogram, aObj, null);
+ },
+
+ /**
+ * Returns the elapsed time for a particular stopwatch. Primarily for
+ * debugging purposes. Must be called prior to finish.
+ *
+ * @param {String} aHistogram - a string which must be a valid histogram name.
+ * If an invalid name is given, the function will
+ * throw.
+ *
+ * @param (Object) aObj - Optional parameter which associates the histogram
+ * timer with the given object.
+ *
+ * @returns {Integer} time in milliseconds or -1 if the stopwatch was not
+ * found.
+ */
+ timeElapsed: function(aHistogram, aObj) {
+ return TelemetryStopwatchImpl.timeElapsed(aHistogram, aObj, null);
+ },
+
+ /**
+ * Stops the timer associated with the given histogram (and object),
+ * calculates the time delta between start and finish, and adds the value
+ * to the histogram.
+ *
+ * @param {String} aHistogram - a string which must be a valid histogram name.
+ *
+ * @param {Object} aObj - Optional parameter which associates the histogram
+ * timer with the given object.
+ *
+ * @returns {Boolean} True if the timer was succesfully stopped and the data
+ * was added to the histogram, False otherwise.
+ */
+ finish: function(aHistogram, aObj) {
+ return TelemetryStopwatchImpl.finish(aHistogram, aObj, null);
+ },
+
+ /**
+ * Starts a timer associated with a keyed telemetry histogram. The timer can
+ * be directly associated with a histogram and its key. Similarly to
+ * @see{TelemetryStopwatch.stat} the histogram and its key can be associated
+ * with an object. Each key may have multiple associated objects and each
+ * object can be associated with multiple keys.
+ *
+ * @param {String} aHistogram - a string which must be a valid histogram name.
+ *
+ * @param {String} aKey - a string which must be a valid histgram key.
+ *
+ * @param {Object} aObj - Optional parameter. If specified, the timer is
+ * associated with this object, meaning that multiple
+ * timers for the same histogram may be run
+ * concurrently,as long as they are associated with
+ * different objects.
+ *
+ * @returns {Boolean} True if the timer was successfully started, false
+ * otherwise. If a timer already exists, it can't be
+ * started again, and the existing one will be cleared in
+ * order to avoid measurements errors.
+ */
+ startKeyed: function(aHistogram, aKey, aObj) {
+ return TelemetryStopwatchImpl.start(aHistogram, aObj, aKey);
+ },
+
+ /**
+ * Deletes the timer associated with a keyed histogram. Important: Only use
+ * this method when a legitimate cancellation should be done.
+ *
+ * @param {String} aHistogram - a string which must be a valid histogram name.
+ *
+ * @param {String} aKey - a string which must be a valid histgram key.
+ *
+ * @param {Object} aObj - Optional parameter. If specified, the timer
+ * associated with this object is deleted.
+ *
+ * @return {Boolean} True if the timer exist and it was cleared, False
+ * otherwise.
+ */
+ cancelKeyed: function(aHistogram, aKey, aObj) {
+ return TelemetryStopwatchImpl.cancel(aHistogram, aObj, aKey);
+ },
+
+ /**
+ * Returns the elapsed time for a particular stopwatch. Primarily for
+ * debugging purposes. Must be called prior to finish.
+ *
+ * @param {String} aHistogram - a string which must be a valid histogram name.
+ *
+ * @param {String} aKey - a string which must be a valid histgram key.
+ *
+ * @param {Object} aObj - Optional parameter. If specified, the timer
+ * associated with this object is used to calculate
+ * the elapsed time.
+ *
+ * @return {Integer} time in milliseconds or -1 if the stopwatch was not
+ * found.
+ */
+ timeElapsedKeyed: function(aHistogram, aKey, aObj) {
+ return TelemetryStopwatchImpl.timeElapsed(aHistogram, aObj, aKey);
+ },
+
+ /**
+ * Stops the timer associated with the given keyed histogram (and object),
+ * calculates the time delta between start and finish, and adds the value
+ * to the keyed histogram.
+ *
+ * @param {String} aHistogram - a string which must be a valid histogram name.
+ *
+ * @param {String} aKey - a string which must be a valid histgram key.
+ *
+ * @param {Object} aObj - optional parameter which associates the histogram
+ * timer with the given object.
+ *
+ * @returns {Boolean} True if the timer was succesfully stopped and the data
+ * was added to the histogram, False otherwise.
+ */
+ finishKeyed: function(aHistogram, aKey, aObj) {
+ return TelemetryStopwatchImpl.finish(aHistogram, aObj, aKey);
+ }
+};
+
+this.TelemetryStopwatchImpl = {
+ start: function(histogram, object, key) {
+ if (Timers.has(histogram, object, key)) {
+ Timers.delete(histogram, object, key);
+ Cu.reportError(`TelemetryStopwatch: key "${histogram}" was already ` +
+ "initialized");
+ return false;
+ }
+
+ return Timers.put(histogram, object, key, Components.utils.now());
+ },
+
+ cancel: function (histogram, object, key) {
+ return Timers.delete(histogram, object, key);
+ },
+
+ timeElapsed: function(histogram, object, key) {
+ let startTime = Timers.get(histogram, object, key);
+ if (startTime === null) {
+ Cu.reportError("TelemetryStopwatch: requesting elapsed time for " +
+ `nonexisting stopwatch. Histogram: "${histogram}", ` +
+ `key: "${key}"`);
+ return -1;
+ }
+
+ try {
+ let delta = Components.utils.now() - startTime
+ return Math.round(delta);
+ } catch (e) {
+ Cu.reportError("TelemetryStopwatch: failed to calculate elapsed time " +
+ `for Histogram: "${histogram}", key: "${key}", ` +
+ `exception: ${Log.exceptionStr(e)}`);
+ return -1;
+ }
+ },
+
+ finish: function(histogram, object, key) {
+ let delta = this.timeElapsed(histogram, object, key);
+ if (delta == -1) {
+ return false;
+ }
+
+ try {
+ if (key) {
+ Telemetry.getKeyedHistogramById(histogram).add(key, delta);
+ } else {
+ Telemetry.getHistogramById(histogram).add(delta);
+ }
+ } catch (e) {
+ Cu.reportError("TelemetryStopwatch: failed to update the Histogram " +
+ `"${histogram}", using key: "${key}", ` +
+ `exception: ${Log.exceptionStr(e)}`);
+ return false;
+ }
+
+ return Timers.delete(histogram, object, key);
+ }
+}
diff --git a/toolkit/components/telemetry/TelemetryStorage.jsm b/toolkit/components/telemetry/TelemetryStorage.jsm
new file mode 100644
index 000000000..91cfc993d
--- /dev/null
+++ b/toolkit/components/telemetry/TelemetryStorage.jsm
@@ -0,0 +1,1882 @@
+/* -*- js-indent-level: 2; indent-tabs-mode: nil -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+"use strict";
+
+this.EXPORTED_SYMBOLS = ["TelemetryStorage"];
+
+const Cc = Components.classes;
+const Ci = Components.interfaces;
+const Cr = Components.results;
+const Cu = Components.utils;
+
+Cu.import("resource://gre/modules/AppConstants.jsm", this);
+Cu.import("resource://gre/modules/Log.jsm");
+Cu.import("resource://gre/modules/Services.jsm", this);
+Cu.import("resource://gre/modules/XPCOMUtils.jsm", this);
+Cu.import("resource://gre/modules/osfile.jsm", this);
+Cu.import("resource://gre/modules/Task.jsm", this);
+Cu.import("resource://gre/modules/TelemetryUtils.jsm", this);
+Cu.import("resource://gre/modules/Promise.jsm", this);
+Cu.import("resource://gre/modules/Preferences.jsm", this);
+
+const LOGGER_NAME = "Toolkit.Telemetry";
+const LOGGER_PREFIX = "TelemetryStorage::";
+
+const Telemetry = Services.telemetry;
+const Utils = TelemetryUtils;
+
+// Compute the path of the pings archive on the first use.
+const DATAREPORTING_DIR = "datareporting";
+const PINGS_ARCHIVE_DIR = "archived";
+const ABORTED_SESSION_FILE_NAME = "aborted-session-ping";
+const DELETION_PING_FILE_NAME = "pending-deletion-ping";
+const SESSION_STATE_FILE_NAME = "session-state.json";
+
+XPCOMUtils.defineLazyGetter(this, "gDataReportingDir", function() {
+ return OS.Path.join(OS.Constants.Path.profileDir, DATAREPORTING_DIR);
+});
+XPCOMUtils.defineLazyGetter(this, "gPingsArchivePath", function() {
+ return OS.Path.join(gDataReportingDir, PINGS_ARCHIVE_DIR);
+});
+XPCOMUtils.defineLazyGetter(this, "gAbortedSessionFilePath", function() {
+ return OS.Path.join(gDataReportingDir, ABORTED_SESSION_FILE_NAME);
+});
+XPCOMUtils.defineLazyGetter(this, "gDeletionPingFilePath", function() {
+ return OS.Path.join(gDataReportingDir, DELETION_PING_FILE_NAME);
+});
+XPCOMUtils.defineLazyModuleGetter(this, "CommonUtils",
+ "resource://services-common/utils.js");
+// Maxmimum time, in milliseconds, archive pings should be retained.
+const MAX_ARCHIVED_PINGS_RETENTION_MS = 60 * 24 * 60 * 60 * 1000; // 60 days
+
+// Maximum space the archive can take on disk (in Bytes).
+const ARCHIVE_QUOTA_BYTES = 120 * 1024 * 1024; // 120 MB
+// Maximum space the outgoing pings can take on disk, for Desktop (in Bytes).
+const PENDING_PINGS_QUOTA_BYTES_DESKTOP = 15 * 1024 * 1024; // 15 MB
+// Maximum space the outgoing pings can take on disk, for Mobile (in Bytes).
+const PENDING_PINGS_QUOTA_BYTES_MOBILE = 1024 * 1024; // 1 MB
+
+// The maximum size a pending/archived ping can take on disk.
+const PING_FILE_MAXIMUM_SIZE_BYTES = 1024 * 1024; // 1 MB
+
+// This special value is submitted when the archive is outside of the quota.
+const ARCHIVE_SIZE_PROBE_SPECIAL_VALUE = 300;
+
+// This special value is submitted when the pending pings is outside of the quota, as
+// we don't know the size of the pings above the quota.
+const PENDING_PINGS_SIZE_PROBE_SPECIAL_VALUE = 17;
+
+const UUID_REGEX = /^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$/i;
+
+/**
+ * This is thrown by |TelemetryStorage.loadPingFile| when reading the ping
+ * from the disk fails.
+ */
+function PingReadError(message="Error reading the ping file", becauseNoSuchFile = false) {
+ Error.call(this, message);
+ let error = new Error();
+ this.name = "PingReadError";
+ this.message = message;
+ this.stack = error.stack;
+ this.becauseNoSuchFile = becauseNoSuchFile;
+}
+PingReadError.prototype = Object.create(Error.prototype);
+PingReadError.prototype.constructor = PingReadError;
+
+/**
+ * This is thrown by |TelemetryStorage.loadPingFile| when parsing the ping JSON
+ * content fails.
+ */
+function PingParseError(message="Error parsing ping content") {
+ Error.call(this, message);
+ let error = new Error();
+ this.name = "PingParseError";
+ this.message = message;
+ this.stack = error.stack;
+}
+PingParseError.prototype = Object.create(Error.prototype);
+PingParseError.prototype.constructor = PingParseError;
+
+/**
+ * This is a policy object used to override behavior for testing.
+ */
+var Policy = {
+ now: () => new Date(),
+ getArchiveQuota: () => ARCHIVE_QUOTA_BYTES,
+ getPendingPingsQuota: () => (AppConstants.platform in ["android", "gonk"])
+ ? PENDING_PINGS_QUOTA_BYTES_MOBILE
+ : PENDING_PINGS_QUOTA_BYTES_DESKTOP,
+};
+
+/**
+ * Wait for all promises in iterable to resolve or reject. This function
+ * always resolves its promise with undefined, and never rejects.
+ */
+function waitForAll(it) {
+ let dummy = () => {};
+ let promises = Array.from(it, p => p.catch(dummy));
+ return Promise.all(promises);
+}
+
+/**
+ * Permanently intern the given string. This is mainly used for the ping.type
+ * strings that can be excessively duplicated in the _archivedPings map. Do not
+ * pass large or temporary strings to this function.
+ */
+function internString(str) {
+ return Symbol.keyFor(Symbol.for(str));
+}
+
+this.TelemetryStorage = {
+ get pingDirectoryPath() {
+ return OS.Path.join(OS.Constants.Path.profileDir, "saved-telemetry-pings");
+ },
+
+ /**
+ * The maximum size a ping can have, in bytes.
+ */
+ get MAXIMUM_PING_SIZE() {
+ return PING_FILE_MAXIMUM_SIZE_BYTES;
+ },
+ /**
+ * Shutdown & block on any outstanding async activity in this module.
+ *
+ * @return {Promise} Promise that is resolved when shutdown is complete.
+ */
+ shutdown: function() {
+ return TelemetryStorageImpl.shutdown();
+ },
+
+ /**
+ * Save an archived ping to disk.
+ *
+ * @param {object} ping The ping data to archive.
+ * @return {promise} Promise that is resolved when the ping is successfully archived.
+ */
+ saveArchivedPing: function(ping) {
+ return TelemetryStorageImpl.saveArchivedPing(ping);
+ },
+
+ /**
+ * Load an archived ping from disk.
+ *
+ * @param {string} id The pings id.
+ * @return {promise<object>} Promise that is resolved with the ping data.
+ */
+ loadArchivedPing: function(id) {
+ return TelemetryStorageImpl.loadArchivedPing(id);
+ },
+
+ /**
+ * Get a list of info on the archived pings.
+ * This will scan the archive directory and grab basic data about the existing
+ * pings out of their filename.
+ *
+ * @return {promise<sequence<object>>}
+ */
+ loadArchivedPingList: function() {
+ return TelemetryStorageImpl.loadArchivedPingList();
+ },
+
+ /**
+ * Clean the pings archive by removing old pings.
+ * This will scan the archive directory.
+ *
+ * @return {Promise} Resolved when the cleanup task completes.
+ */
+ runCleanPingArchiveTask: function() {
+ return TelemetryStorageImpl.runCleanPingArchiveTask();
+ },
+
+ /**
+ * Run the task to enforce the pending pings quota.
+ *
+ * @return {Promise} Resolved when the cleanup task completes.
+ */
+ runEnforcePendingPingsQuotaTask: function() {
+ return TelemetryStorageImpl.runEnforcePendingPingsQuotaTask();
+ },
+
+ /**
+ * Run the task to remove all the pending pings (except the deletion ping).
+ *
+ * @return {Promise} Resolved when the pings are removed.
+ */
+ runRemovePendingPingsTask: function() {
+ return TelemetryStorageImpl.runRemovePendingPingsTask();
+ },
+
+ /**
+ * Reset the storage state in tests.
+ */
+ reset: function() {
+ return TelemetryStorageImpl.reset();
+ },
+
+ /**
+ * Test method that allows waiting on the archive clean task to finish.
+ */
+ testCleanupTaskPromise: function() {
+ return (TelemetryStorageImpl._cleanArchiveTask || Promise.resolve());
+ },
+
+ /**
+ * Test method that allows waiting on the pending pings quota task to finish.
+ */
+ testPendingQuotaTaskPromise: function() {
+ return (TelemetryStorageImpl._enforcePendingPingsQuotaTask || Promise.resolve());
+ },
+
+ /**
+ * Save a pending - outgoing - ping to disk and track it.
+ *
+ * @param {Object} ping The ping data.
+ * @return {Promise} Resolved when the ping was saved.
+ */
+ savePendingPing: function(ping) {
+ return TelemetryStorageImpl.savePendingPing(ping);
+ },
+
+ /**
+ * Saves session data to disk.
+ * @param {Object} sessionData The session data.
+ * @return {Promise} Resolved when the data was saved.
+ */
+ saveSessionData: function(sessionData) {
+ return TelemetryStorageImpl.saveSessionData(sessionData);
+ },
+
+ /**
+ * Loads session data from a session data file.
+ * @return {Promise<object>} Resolved with the session data in object form.
+ */
+ loadSessionData: function() {
+ return TelemetryStorageImpl.loadSessionData();
+ },
+
+ /**
+ * Load a pending ping from disk by id.
+ *
+ * @param {String} id The pings id.
+ * @return {Promise} Resolved with the loaded ping data.
+ */
+ loadPendingPing: function(id) {
+ return TelemetryStorageImpl.loadPendingPing(id);
+ },
+
+ /**
+ * Remove a pending ping from disk by id.
+ *
+ * @param {String} id The pings id.
+ * @return {Promise} Resolved when the ping was removed.
+ */
+ removePendingPing: function(id) {
+ return TelemetryStorageImpl.removePendingPing(id);
+ },
+
+ /**
+ * Returns a list of the currently pending pings in the format:
+ * {
+ * id: <string>, // The pings UUID.
+ * lastModificationDate: <number>, // Timestamp of the pings last modification.
+ * }
+ * This populates the list by scanning the disk.
+ *
+ * @return {Promise<sequence>} Resolved with the ping list.
+ */
+ loadPendingPingList: function() {
+ return TelemetryStorageImpl.loadPendingPingList();
+ },
+
+ /**
+ * Returns a list of the currently pending pings in the format:
+ * {
+ * id: <string>, // The pings UUID.
+ * lastModificationDate: <number>, // Timestamp of the pings last modification.
+ * }
+ * This does not scan pending pings on disk.
+ *
+ * @return {sequence} The current pending ping list.
+ */
+ getPendingPingList: function() {
+ return TelemetryStorageImpl.getPendingPingList();
+ },
+
+ /**
+ * Save an aborted-session ping to disk. This goes to a special location so
+ * it is not picked up as a pending ping.
+ *
+ * @param {object} ping The ping data to save.
+ * @return {promise} Promise that is resolved when the ping is successfully saved.
+ */
+ saveAbortedSessionPing: function(ping) {
+ return TelemetryStorageImpl.saveAbortedSessionPing(ping);
+ },
+
+ /**
+ * Load the aborted-session ping from disk if present.
+ *
+ * @return {promise<object>} Promise that is resolved with the ping data if found.
+ * Otherwise returns null.
+ */
+ loadAbortedSessionPing: function() {
+ return TelemetryStorageImpl.loadAbortedSessionPing();
+ },
+
+ /**
+ * Save the deletion ping.
+ * @param ping The deletion ping.
+ * @return {Promise} A promise resolved when the ping is saved.
+ */
+ saveDeletionPing: function(ping) {
+ return TelemetryStorageImpl.saveDeletionPing(ping);
+ },
+
+ /**
+ * Remove the deletion ping.
+ * @return {Promise} Resolved when the ping is deleted from the disk.
+ */
+ removeDeletionPing: function() {
+ return TelemetryStorageImpl.removeDeletionPing();
+ },
+
+ /**
+ * Check if the ping id identifies a deletion ping.
+ */
+ isDeletionPing: function(aPingId) {
+ return TelemetryStorageImpl.isDeletionPing(aPingId);
+ },
+
+ /**
+ * Remove the aborted-session ping if present.
+ *
+ * @return {promise} Promise that is resolved once the ping is removed.
+ */
+ removeAbortedSessionPing: function() {
+ return TelemetryStorageImpl.removeAbortedSessionPing();
+ },
+
+ /**
+ * Save a single ping to a file.
+ *
+ * @param {object} ping The content of the ping to save.
+ * @param {string} file The destination file.
+ * @param {bool} overwrite If |true|, the file will be overwritten if it exists,
+ * if |false| the file will not be overwritten and no error will be reported if
+ * the file exists.
+ * @returns {promise}
+ */
+ savePingToFile: function(ping, file, overwrite) {
+ return TelemetryStorageImpl.savePingToFile(ping, file, overwrite);
+ },
+
+ /**
+ * Save a ping to its file.
+ *
+ * @param {object} ping The content of the ping to save.
+ * @param {bool} overwrite If |true|, the file will be overwritten
+ * if it exists.
+ * @returns {promise}
+ */
+ savePing: function(ping, overwrite) {
+ return TelemetryStorageImpl.savePing(ping, overwrite);
+ },
+
+ /**
+ * Add a ping to the saved pings directory so that it gets saved
+ * and sent along with other pings.
+ *
+ * @param {Object} pingData The ping object.
+ * @return {Promise} A promise resolved when the ping is saved to the pings directory.
+ */
+ addPendingPing: function(pingData) {
+ return TelemetryStorageImpl.addPendingPing(pingData);
+ },
+
+ /**
+ * Remove the file for a ping
+ *
+ * @param {object} ping The ping.
+ * @returns {promise}
+ */
+ cleanupPingFile: function(ping) {
+ return TelemetryStorageImpl.cleanupPingFile(ping);
+ },
+
+ /**
+ * The number of pending pings on disk.
+ */
+ get pendingPingCount() {
+ return TelemetryStorageImpl.pendingPingCount;
+ },
+
+ /**
+ * Loads a ping file.
+ * @param {String} aFilePath The path of the ping file.
+ * @return {Promise<Object>} A promise resolved with the ping content or rejected if the
+ * ping contains invalid data.
+ */
+ loadPingFile: Task.async(function* (aFilePath) {
+ return TelemetryStorageImpl.loadPingFile(aFilePath);
+ }),
+
+ /**
+ * Remove FHR database files. This is temporary and will be dropped in
+ * the future.
+ * @return {Promise} Resolved when the database files are deleted.
+ */
+ removeFHRDatabase: function() {
+ return TelemetryStorageImpl.removeFHRDatabase();
+ },
+
+ /**
+ * Only used in tests, builds an archived ping path from the ping metadata.
+ * @param {String} aPingId The ping id.
+ * @param {Object} aDate The ping creation date.
+ * @param {String} aType The ping type.
+ * @return {String} The full path to the archived ping.
+ */
+ _testGetArchivedPingPath: function(aPingId, aDate, aType) {
+ return getArchivedPingPath(aPingId, aDate, aType);
+ },
+
+ /**
+ * Only used in tests, this helper extracts ping metadata from a given filename.
+ *
+ * @param fileName {String} The filename.
+ * @return {Object} Null if the filename didn't match the expected form.
+ * Otherwise an object with the extracted data in the form:
+ * { timestamp: <number>,
+ * id: <string>,
+ * type: <string> }
+ */
+ _testGetArchivedPingDataFromFileName: function(aFileName) {
+ return TelemetryStorageImpl._getArchivedPingDataFromFileName(aFileName);
+ },
+
+ /**
+ * Only used in tests, this helper allows cleaning up the pending ping storage.
+ */
+ testClearPendingPings: function() {
+ return TelemetryStorageImpl.runRemovePendingPingsTask();
+ }
+};
+
+/**
+ * This object allows the serialisation of asynchronous tasks. This is particularly
+ * useful to serialise write access to the disk in order to prevent race conditions
+ * to corrupt the data being written.
+ * We are using this to synchronize saving to the file that TelemetrySession persists
+ * its state in.
+ */
+function SaveSerializer() {
+ this._queuedOperations = [];
+ this._queuedInProgress = false;
+ this._log = Log.repository.getLoggerWithMessagePrefix(LOGGER_NAME, LOGGER_PREFIX);
+}
+
+SaveSerializer.prototype = {
+ /**
+ * Enqueues an operation to a list to serialise their execution in order to prevent race
+ * conditions. Useful to serialise access to disk.
+ *
+ * @param {Function} aFunction The task function to enqueue. It must return a promise.
+ * @return {Promise} A promise resolved when the enqueued task completes.
+ */
+ enqueueTask: function (aFunction) {
+ let promise = new Promise((resolve, reject) =>
+ this._queuedOperations.push([aFunction, resolve, reject]));
+
+ if (this._queuedOperations.length == 1) {
+ this._popAndPerformQueuedOperation();
+ }
+ return promise;
+ },
+
+ /**
+ * Make sure to flush all the pending operations.
+ * @return {Promise} A promise resolved when all the pending operations have completed.
+ */
+ flushTasks: function () {
+ let dummyTask = () => new Promise(resolve => resolve());
+ return this.enqueueTask(dummyTask);
+ },
+
+ /**
+ * Pop a task from the queue, executes it and continue to the next one.
+ * This function recursively pops all the tasks.
+ */
+ _popAndPerformQueuedOperation: function () {
+ if (!this._queuedOperations.length || this._queuedInProgress) {
+ return;
+ }
+
+ this._log.trace("_popAndPerformQueuedOperation - Performing queued operation.");
+ let [func, resolve, reject] = this._queuedOperations.shift();
+ let promise;
+
+ try {
+ this._queuedInProgress = true;
+ promise = func();
+ } catch (ex) {
+ this._log.warn("_popAndPerformQueuedOperation - Queued operation threw during execution. ",
+ ex);
+ this._queuedInProgress = false;
+ reject(ex);
+ this._popAndPerformQueuedOperation();
+ return;
+ }
+
+ if (!promise || typeof(promise.then) != "function") {
+ let msg = "Queued operation did not return a promise: " + func;
+ this._log.warn("_popAndPerformQueuedOperation - " + msg);
+
+ this._queuedInProgress = false;
+ reject(new Error(msg));
+ this._popAndPerformQueuedOperation();
+ return;
+ }
+
+ promise.then(result => {
+ this._queuedInProgress = false;
+ resolve(result);
+ this._popAndPerformQueuedOperation();
+ },
+ error => {
+ this._log.warn("_popAndPerformQueuedOperation - Failure when performing queued operation.",
+ error);
+ this._queuedInProgress = false;
+ reject(error);
+ this._popAndPerformQueuedOperation();
+ });
+ },
+};
+
+var TelemetryStorageImpl = {
+ _logger: null,
+ // Used to serialize aborted session ping writes to disk.
+ _abortedSessionSerializer: new SaveSerializer(),
+ // Used to serialize deletion ping writes to disk.
+ _deletionPingSerializer: new SaveSerializer(),
+ // Used to serialize session state writes to disk.
+ _stateSaveSerializer: new SaveSerializer(),
+
+ // Tracks the archived pings in a Map of (id -> {timestampCreated, type}).
+ // We use this to cache info on archived pings to avoid scanning the disk more than once.
+ _archivedPings: new Map(),
+ // A set of promises for pings currently being archived
+ _activelyArchiving: new Set(),
+ // Track the archive loading task to prevent multiple tasks from being executed.
+ _scanArchiveTask: null,
+ // Track the archive cleanup task.
+ _cleanArchiveTask: null,
+ // Whether we already scanned the archived pings on disk.
+ _scannedArchiveDirectory: false,
+
+ // Track the pending ping removal task.
+ _removePendingPingsTask: null,
+
+ // This tracks all the pending async ping save activity.
+ _activePendingPingSaves: new Set(),
+
+ // Tracks the pending pings in a Map of (id -> {timestampCreated, type}).
+ // We use this to cache info on pending pings to avoid scanning the disk more than once.
+ _pendingPings: new Map(),
+
+ // Track the pending pings enforce quota task.
+ _enforcePendingPingsQuotaTask: null,
+
+ // Track the shutdown process to bail out of the clean up task quickly.
+ _shutdown: false,
+
+ get _log() {
+ if (!this._logger) {
+ this._logger = Log.repository.getLoggerWithMessagePrefix(LOGGER_NAME, LOGGER_PREFIX);
+ }
+
+ return this._logger;
+ },
+
+ /**
+ * Shutdown & block on any outstanding async activity in this module.
+ *
+ * @return {Promise} Promise that is resolved when shutdown is complete.
+ */
+ shutdown: Task.async(function*() {
+ this._shutdown = true;
+
+ // If the following tasks are still running, block on them. They will bail out as soon
+ // as possible.
+ yield this._abortedSessionSerializer.flushTasks().catch(ex => {
+ this._log.error("shutdown - failed to flush aborted-session writes", ex);
+ });
+
+ yield this._deletionPingSerializer.flushTasks().catch(ex => {
+ this._log.error("shutdown - failed to flush deletion ping writes", ex);
+ });
+
+ if (this._cleanArchiveTask) {
+ yield this._cleanArchiveTask.catch(ex => {
+ this._log.error("shutdown - the archive cleaning task failed", ex);
+ });
+ }
+
+ if (this._enforcePendingPingsQuotaTask) {
+ yield this._enforcePendingPingsQuotaTask.catch(ex => {
+ this._log.error("shutdown - the pending pings quota task failed", ex);
+ });
+ }
+
+ if (this._removePendingPingsTask) {
+ yield this._removePendingPingsTask.catch(ex => {
+ this._log.error("shutdown - the pending pings removal task failed", ex);
+ });
+ }
+
+ // Wait on pending pings still being saved. While OS.File should have shutdown
+ // blockers in place, we a) have seen weird errors being reported that might
+ // indicate a bad shutdown path and b) might have completion handlers hanging
+ // off the save operations that don't expect to be late in shutdown.
+ yield this.promisePendingPingSaves();
+ }),
+
+ /**
+ * Save an archived ping to disk.
+ *
+ * @param {object} ping The ping data to archive.
+ * @return {promise} Promise that is resolved when the ping is successfully archived.
+ */
+ saveArchivedPing: function(ping) {
+ let promise = this._saveArchivedPingTask(ping);
+ this._activelyArchiving.add(promise);
+ promise.then((r) => { this._activelyArchiving.delete(promise); },
+ (e) => { this._activelyArchiving.delete(promise); });
+ return promise;
+ },
+
+ _saveArchivedPingTask: Task.async(function*(ping) {
+ const creationDate = new Date(ping.creationDate);
+ if (this._archivedPings.has(ping.id)) {
+ const data = this._archivedPings.get(ping.id);
+ if (data.timestampCreated > creationDate.getTime()) {
+ this._log.error("saveArchivedPing - trying to overwrite newer ping with the same id");
+ return Promise.reject(new Error("trying to overwrite newer ping with the same id"));
+ }
+ this._log.warn("saveArchivedPing - overwriting older ping with the same id");
+ }
+
+ // Get the archived ping path and append the lz4 suffix to it (so we have 'jsonlz4').
+ const filePath = getArchivedPingPath(ping.id, creationDate, ping.type) + "lz4";
+ yield OS.File.makeDir(OS.Path.dirname(filePath), { ignoreExisting: true,
+ from: OS.Constants.Path.profileDir });
+ yield this.savePingToFile(ping, filePath, /* overwrite*/ true, /* compressed*/ true);
+
+ this._archivedPings.set(ping.id, {
+ timestampCreated: creationDate.getTime(),
+ type: internString(ping.type),
+ });
+
+ Telemetry.getHistogramById("TELEMETRY_ARCHIVE_SESSION_PING_COUNT").add();
+ return undefined;
+ }),
+
+ /**
+ * Load an archived ping from disk.
+ *
+ * @param {string} id The pings id.
+ * @return {promise<object>} Promise that is resolved with the ping data.
+ */
+ loadArchivedPing: Task.async(function*(id) {
+ this._log.trace("loadArchivedPing - id: " + id);
+
+ const data = this._archivedPings.get(id);
+ if (!data) {
+ this._log.trace("loadArchivedPing - no ping with id: " + id);
+ return Promise.reject(new Error("TelemetryStorage.loadArchivedPing - no ping with id " + id));
+ }
+
+ const path = getArchivedPingPath(id, new Date(data.timestampCreated), data.type);
+ const pathCompressed = path + "lz4";
+
+ // Purge pings which are too big.
+ let checkSize = function*(path) {
+ const fileSize = (yield OS.File.stat(path)).size;
+ if (fileSize > PING_FILE_MAXIMUM_SIZE_BYTES) {
+ Telemetry.getHistogramById("TELEMETRY_DISCARDED_ARCHIVED_PINGS_SIZE_MB")
+ .add(Math.floor(fileSize / 1024 / 1024));
+ Telemetry.getHistogramById("TELEMETRY_PING_SIZE_EXCEEDED_ARCHIVED").add();
+ yield OS.File.remove(path, {ignoreAbsent: true});
+ throw new Error("loadArchivedPing - exceeded the maximum ping size: " + fileSize);
+ }
+ };
+
+ try {
+ // Try to load a compressed version of the archived ping first.
+ this._log.trace("loadArchivedPing - loading ping from: " + pathCompressed);
+ yield* checkSize(pathCompressed);
+ return yield this.loadPingFile(pathCompressed, /* compressed*/ true);
+ } catch (ex) {
+ if (!ex.becauseNoSuchFile) {
+ throw ex;
+ }
+ // If that fails, look for the uncompressed version.
+ this._log.trace("loadArchivedPing - compressed ping not found, loading: " + path);
+ yield* checkSize(path);
+ return yield this.loadPingFile(path, /* compressed*/ false);
+ }
+ }),
+
+ /**
+ * Saves session data to disk.
+ */
+ saveSessionData: function(sessionData) {
+ return this._stateSaveSerializer.enqueueTask(() => this._saveSessionData(sessionData));
+ },
+
+ _saveSessionData: Task.async(function* (sessionData) {
+ let dataDir = OS.Path.join(OS.Constants.Path.profileDir, DATAREPORTING_DIR);
+ yield OS.File.makeDir(dataDir);
+
+ let filePath = OS.Path.join(gDataReportingDir, SESSION_STATE_FILE_NAME);
+ try {
+ yield CommonUtils.writeJSON(sessionData, filePath);
+ } catch (e) {
+ this._log.error("_saveSessionData - Failed to write session data to " + filePath, e);
+ Telemetry.getHistogramById("TELEMETRY_SESSIONDATA_FAILED_SAVE").add(1);
+ }
+ }),
+
+ /**
+ * Loads session data from the session data file.
+ * @return {Promise<Object>} A promise resolved with an object on success,
+ * with null otherwise.
+ */
+ loadSessionData: function() {
+ return this._stateSaveSerializer.enqueueTask(() => this._loadSessionData());
+ },
+
+ _loadSessionData: Task.async(function* () {
+ const dataFile = OS.Path.join(OS.Constants.Path.profileDir, DATAREPORTING_DIR,
+ SESSION_STATE_FILE_NAME);
+ let content;
+ try {
+ content = yield OS.File.read(dataFile, { encoding: "utf-8" });
+ } catch (ex) {
+ this._log.info("_loadSessionData - can not load session data file", ex);
+ Telemetry.getHistogramById("TELEMETRY_SESSIONDATA_FAILED_LOAD").add(1);
+ return null;
+ }
+
+ let data;
+ try {
+ data = JSON.parse(content);
+ } catch (ex) {
+ this._log.error("_loadSessionData - failed to parse session data", ex);
+ Telemetry.getHistogramById("TELEMETRY_SESSIONDATA_FAILED_PARSE").add(1);
+ return null;
+ }
+
+ return data;
+ }),
+
+ /**
+ * Remove an archived ping from disk.
+ *
+ * @param {string} id The pings id.
+ * @param {number} timestampCreated The pings creation timestamp.
+ * @param {string} type The pings type.
+ * @return {promise<object>} Promise that is resolved when the pings is removed.
+ */
+ _removeArchivedPing: Task.async(function*(id, timestampCreated, type) {
+ this._log.trace("_removeArchivedPing - id: " + id + ", timestampCreated: " + timestampCreated + ", type: " + type);
+ const path = getArchivedPingPath(id, new Date(timestampCreated), type);
+ const pathCompressed = path + "lz4";
+
+ this._log.trace("_removeArchivedPing - removing ping from: " + path);
+ yield OS.File.remove(path, {ignoreAbsent: true});
+ yield OS.File.remove(pathCompressed, {ignoreAbsent: true});
+ // Remove the ping from the cache.
+ this._archivedPings.delete(id);
+ }),
+
+ /**
+ * Clean the pings archive by removing old pings.
+ *
+ * @return {Promise} Resolved when the cleanup task completes.
+ */
+ runCleanPingArchiveTask: function() {
+ // If there's an archive cleaning task already running, return it.
+ if (this._cleanArchiveTask) {
+ return this._cleanArchiveTask;
+ }
+
+ // Make sure to clear |_cleanArchiveTask| once done.
+ let clear = () => this._cleanArchiveTask = null;
+ // Since there's no archive cleaning task running, start it.
+ this._cleanArchiveTask = this._cleanArchive().then(clear, clear);
+ return this._cleanArchiveTask;
+ },
+
+ /**
+ * Removes pings which are too old from the pings archive.
+ * @return {Promise} Resolved when the ping age check is complete.
+ */
+ _purgeOldPings: Task.async(function*() {
+ this._log.trace("_purgeOldPings");
+
+ const nowDate = Policy.now();
+ const startTimeStamp = nowDate.getTime();
+ let dirIterator = new OS.File.DirectoryIterator(gPingsArchivePath);
+ let subdirs = (yield dirIterator.nextBatch()).filter(e => e.isDir);
+ dirIterator.close();
+
+ // Keep track of the newest removed month to update the cache, if needed.
+ let newestRemovedMonthTimestamp = null;
+ let evictedDirsCount = 0;
+ let maxDirAgeInMonths = 0;
+
+ // Walk through the monthly subdirs of the form <YYYY-MM>/
+ for (let dir of subdirs) {
+ if (this._shutdown) {
+ this._log.trace("_purgeOldPings - Terminating the clean up task due to shutdown");
+ return;
+ }
+
+ if (!isValidArchiveDir(dir.name)) {
+ this._log.warn("_purgeOldPings - skipping invalidly named subdirectory " + dir.path);
+ continue;
+ }
+
+ const archiveDate = getDateFromArchiveDir(dir.name);
+ if (!archiveDate) {
+ this._log.warn("_purgeOldPings - skipping invalid subdirectory date " + dir.path);
+ continue;
+ }
+
+ // If this archive directory is older than 180 days, remove it.
+ if ((startTimeStamp - archiveDate.getTime()) > MAX_ARCHIVED_PINGS_RETENTION_MS) {
+ try {
+ yield OS.File.removeDir(dir.path);
+ evictedDirsCount++;
+
+ // Update the newest removed month.
+ newestRemovedMonthTimestamp = Math.max(archiveDate, newestRemovedMonthTimestamp);
+ } catch (ex) {
+ this._log.error("_purgeOldPings - Unable to remove " + dir.path, ex);
+ }
+ } else {
+ // We're not removing this directory, so record the age for the oldest directory.
+ const dirAgeInMonths = Utils.getElapsedTimeInMonths(archiveDate, nowDate);
+ maxDirAgeInMonths = Math.max(dirAgeInMonths, maxDirAgeInMonths);
+ }
+ }
+
+ // Trigger scanning of the archived pings.
+ yield this.loadArchivedPingList();
+
+ // Refresh the cache: we could still skip this, but it's cheap enough to keep it
+ // to avoid introducing task dependencies.
+ if (newestRemovedMonthTimestamp) {
+ // Scan the archive cache for pings older than the newest directory pruned above.
+ for (let [id, info] of this._archivedPings) {
+ const timestampCreated = new Date(info.timestampCreated);
+ if (timestampCreated.getTime() > newestRemovedMonthTimestamp) {
+ continue;
+ }
+ // Remove outdated pings from the cache.
+ this._archivedPings.delete(id);
+ }
+ }
+
+ const endTimeStamp = Policy.now().getTime();
+
+ // Save the time it takes to evict old directories and the eviction count.
+ Telemetry.getHistogramById("TELEMETRY_ARCHIVE_EVICTED_OLD_DIRS")
+ .add(evictedDirsCount);
+ Telemetry.getHistogramById("TELEMETRY_ARCHIVE_EVICTING_DIRS_MS")
+ .add(Math.ceil(endTimeStamp - startTimeStamp));
+ Telemetry.getHistogramById("TELEMETRY_ARCHIVE_OLDEST_DIRECTORY_AGE")
+ .add(maxDirAgeInMonths);
+ }),
+
+ /**
+ * Enforce a disk quota for the pings archive.
+ * @return {Promise} Resolved when the quota check is complete.
+ */
+ _enforceArchiveQuota: Task.async(function*() {
+ this._log.trace("_enforceArchiveQuota");
+ let startTimeStamp = Policy.now().getTime();
+
+ // Build an ordered list, from newer to older, of archived pings.
+ let pingList = Array.from(this._archivedPings, p => ({
+ id: p[0],
+ timestampCreated: p[1].timestampCreated,
+ type: p[1].type,
+ }));
+
+ pingList.sort((a, b) => b.timestampCreated - a.timestampCreated);
+
+ // If our archive is too big, we should reduce it to reach 90% of the quota.
+ const SAFE_QUOTA = Policy.getArchiveQuota() * 0.9;
+ // The index of the last ping to keep. Pings older than this one will be deleted if
+ // the archive exceeds the quota.
+ let lastPingIndexToKeep = null;
+ let archiveSizeInBytes = 0;
+
+ // Find the disk size of the archive.
+ for (let i = 0; i < pingList.length; i++) {
+ if (this._shutdown) {
+ this._log.trace("_enforceArchiveQuota - Terminating the clean up task due to shutdown");
+ return;
+ }
+
+ let ping = pingList[i];
+
+ // Get the size for this ping.
+ const fileSize =
+ yield getArchivedPingSize(ping.id, new Date(ping.timestampCreated), ping.type);
+ if (!fileSize) {
+ this._log.warn("_enforceArchiveQuota - Unable to find the size of ping " + ping.id);
+ continue;
+ }
+
+ // Enforce a maximum file size limit on archived pings.
+ if (fileSize > PING_FILE_MAXIMUM_SIZE_BYTES) {
+ this._log.error("_enforceArchiveQuota - removing file exceeding size limit, size: " + fileSize);
+ // We just remove the ping from the disk, we don't bother removing it from pingList
+ // since it won't contribute to the quota.
+ yield this._removeArchivedPing(ping.id, ping.timestampCreated, ping.type)
+ .catch(e => this._log.error("_enforceArchiveQuota - failed to remove archived ping" + ping.id));
+ Telemetry.getHistogramById("TELEMETRY_DISCARDED_ARCHIVED_PINGS_SIZE_MB")
+ .add(Math.floor(fileSize / 1024 / 1024));
+ Telemetry.getHistogramById("TELEMETRY_PING_SIZE_EXCEEDED_ARCHIVED").add();
+ continue;
+ }
+
+ archiveSizeInBytes += fileSize;
+
+ if (archiveSizeInBytes < SAFE_QUOTA) {
+ // We save the index of the last ping which is ok to keep in order to speed up ping
+ // pruning.
+ lastPingIndexToKeep = i;
+ } else if (archiveSizeInBytes > Policy.getArchiveQuota()) {
+ // Ouch, our ping archive is too big. Bail out and start pruning!
+ break;
+ }
+ }
+
+ // Save the time it takes to check if the archive is over-quota.
+ Telemetry.getHistogramById("TELEMETRY_ARCHIVE_CHECKING_OVER_QUOTA_MS")
+ .add(Math.round(Policy.now().getTime() - startTimeStamp));
+
+ let submitProbes = (sizeInMB, evictedPings, elapsedMs) => {
+ Telemetry.getHistogramById("TELEMETRY_ARCHIVE_SIZE_MB").add(sizeInMB);
+ Telemetry.getHistogramById("TELEMETRY_ARCHIVE_EVICTED_OVER_QUOTA").add(evictedPings);
+ Telemetry.getHistogramById("TELEMETRY_ARCHIVE_EVICTING_OVER_QUOTA_MS").add(elapsedMs);
+ };
+
+ // Check if we're using too much space. If not, submit the archive size and bail out.
+ if (archiveSizeInBytes < Policy.getArchiveQuota()) {
+ submitProbes(Math.round(archiveSizeInBytes / 1024 / 1024), 0, 0);
+ return;
+ }
+
+ this._log.info("_enforceArchiveQuota - archive size: " + archiveSizeInBytes + "bytes"
+ + ", safety quota: " + SAFE_QUOTA + "bytes");
+
+ startTimeStamp = Policy.now().getTime();
+ let pingsToPurge = pingList.slice(lastPingIndexToKeep + 1);
+
+ // Remove all the pings older than the last one which we are safe to keep.
+ for (let ping of pingsToPurge) {
+ if (this._shutdown) {
+ this._log.trace("_enforceArchiveQuota - Terminating the clean up task due to shutdown");
+ return;
+ }
+
+ // This list is guaranteed to be in order, so remove the pings at its
+ // beginning (oldest).
+ yield this._removeArchivedPing(ping.id, ping.timestampCreated, ping.type);
+ }
+
+ const endTimeStamp = Policy.now().getTime();
+ submitProbes(ARCHIVE_SIZE_PROBE_SPECIAL_VALUE, pingsToPurge.length,
+ Math.ceil(endTimeStamp - startTimeStamp));
+ }),
+
+ _cleanArchive: Task.async(function*() {
+ this._log.trace("cleanArchiveTask");
+
+ if (!(yield OS.File.exists(gPingsArchivePath))) {
+ return;
+ }
+
+ // Remove pings older than 180 days.
+ try {
+ yield this._purgeOldPings();
+ } catch (ex) {
+ this._log.error("_cleanArchive - There was an error removing old directories", ex);
+ }
+
+ // Make sure we respect the archive disk quota.
+ yield this._enforceArchiveQuota();
+ }),
+
+ /**
+ * Run the task to enforce the pending pings quota.
+ *
+ * @return {Promise} Resolved when the cleanup task completes.
+ */
+ runEnforcePendingPingsQuotaTask: Task.async(function*() {
+ // If there's a cleaning task already running, return it.
+ if (this._enforcePendingPingsQuotaTask) {
+ return this._enforcePendingPingsQuotaTask;
+ }
+
+ // Since there's no quota enforcing task running, start it.
+ try {
+ this._enforcePendingPingsQuotaTask = this._enforcePendingPingsQuota();
+ yield this._enforcePendingPingsQuotaTask;
+ } finally {
+ this._enforcePendingPingsQuotaTask = null;
+ }
+ return undefined;
+ }),
+
+ /**
+ * Enforce a disk quota for the pending pings.
+ * @return {Promise} Resolved when the quota check is complete.
+ */
+ _enforcePendingPingsQuota: Task.async(function*() {
+ this._log.trace("_enforcePendingPingsQuota");
+ let startTimeStamp = Policy.now().getTime();
+
+ // Build an ordered list, from newer to older, of pending pings.
+ let pingList = Array.from(this._pendingPings, p => ({
+ id: p[0],
+ lastModificationDate: p[1].lastModificationDate,
+ }));
+
+ pingList.sort((a, b) => b.lastModificationDate - a.lastModificationDate);
+
+ // If our pending pings directory is too big, we should reduce it to reach 90% of the quota.
+ const SAFE_QUOTA = Policy.getPendingPingsQuota() * 0.9;
+ // The index of the last ping to keep. Pings older than this one will be deleted if
+ // the pending pings directory size exceeds the quota.
+ let lastPingIndexToKeep = null;
+ let pendingPingsSizeInBytes = 0;
+
+ // Find the disk size of the pending pings directory.
+ for (let i = 0; i < pingList.length; i++) {
+ if (this._shutdown) {
+ this._log.trace("_enforcePendingPingsQuota - Terminating the clean up task due to shutdown");
+ return;
+ }
+
+ let ping = pingList[i];
+
+ // Get the size for this ping.
+ const fileSize = yield getPendingPingSize(ping.id);
+ if (!fileSize) {
+ this._log.warn("_enforcePendingPingsQuota - Unable to find the size of ping " + ping.id);
+ continue;
+ }
+
+ pendingPingsSizeInBytes += fileSize;
+ if (pendingPingsSizeInBytes < SAFE_QUOTA) {
+ // We save the index of the last ping which is ok to keep in order to speed up ping
+ // pruning.
+ lastPingIndexToKeep = i;
+ } else if (pendingPingsSizeInBytes > Policy.getPendingPingsQuota()) {
+ // Ouch, our pending pings directory size is too big. Bail out and start pruning!
+ break;
+ }
+ }
+
+ // Save the time it takes to check if the pending pings are over-quota.
+ Telemetry.getHistogramById("TELEMETRY_PENDING_CHECKING_OVER_QUOTA_MS")
+ .add(Math.round(Policy.now().getTime() - startTimeStamp));
+
+ let recordHistograms = (sizeInMB, evictedPings, elapsedMs) => {
+ Telemetry.getHistogramById("TELEMETRY_PENDING_PINGS_SIZE_MB").add(sizeInMB);
+ Telemetry.getHistogramById("TELEMETRY_PENDING_PINGS_EVICTED_OVER_QUOTA").add(evictedPings);
+ Telemetry.getHistogramById("TELEMETRY_PENDING_EVICTING_OVER_QUOTA_MS").add(elapsedMs);
+ };
+
+ // Check if we're using too much space. If not, bail out.
+ if (pendingPingsSizeInBytes < Policy.getPendingPingsQuota()) {
+ recordHistograms(Math.round(pendingPingsSizeInBytes / 1024 / 1024), 0, 0);
+ return;
+ }
+
+ this._log.info("_enforcePendingPingsQuota - size: " + pendingPingsSizeInBytes + "bytes"
+ + ", safety quota: " + SAFE_QUOTA + "bytes");
+
+ startTimeStamp = Policy.now().getTime();
+ let pingsToPurge = pingList.slice(lastPingIndexToKeep + 1);
+
+ // Remove all the pings older than the last one which we are safe to keep.
+ for (let ping of pingsToPurge) {
+ if (this._shutdown) {
+ this._log.trace("_enforcePendingPingsQuota - Terminating the clean up task due to shutdown");
+ return;
+ }
+
+ // This list is guaranteed to be in order, so remove the pings at its
+ // beginning (oldest).
+ yield this.removePendingPing(ping.id);
+ }
+
+ const endTimeStamp = Policy.now().getTime();
+ // We don't know the size of the pending pings directory if we are above the quota,
+ // since we stop scanning once we reach the quota. We use a special value to show
+ // this condition.
+ recordHistograms(PENDING_PINGS_SIZE_PROBE_SPECIAL_VALUE, pingsToPurge.length,
+ Math.ceil(endTimeStamp - startTimeStamp));
+ }),
+
+ /**
+ * Reset the storage state in tests.
+ */
+ reset: function() {
+ this._shutdown = false;
+ this._scannedArchiveDirectory = false;
+ this._archivedPings = new Map();
+ this._scannedPendingDirectory = false;
+ this._pendingPings = new Map();
+ },
+
+ /**
+ * Get a list of info on the archived pings.
+ * This will scan the archive directory and grab basic data about the existing
+ * pings out of their filename.
+ *
+ * @return {promise<sequence<object>>}
+ */
+ loadArchivedPingList: Task.async(function*() {
+ // If there's an archive loading task already running, return it.
+ if (this._scanArchiveTask) {
+ return this._scanArchiveTask;
+ }
+
+ yield waitForAll(this._activelyArchiving);
+
+ if (this._scannedArchiveDirectory) {
+ this._log.trace("loadArchivedPingList - Archive already scanned, hitting cache.");
+ return this._archivedPings;
+ }
+
+ // Since there's no archive loading task running, start it.
+ let result;
+ try {
+ this._scanArchiveTask = this._scanArchive();
+ result = yield this._scanArchiveTask;
+ } finally {
+ this._scanArchiveTask = null;
+ }
+ return result;
+ }),
+
+ _scanArchive: Task.async(function*() {
+ this._log.trace("_scanArchive");
+
+ let submitProbes = (pingCount, dirCount) => {
+ Telemetry.getHistogramById("TELEMETRY_ARCHIVE_SCAN_PING_COUNT")
+ .add(pingCount);
+ Telemetry.getHistogramById("TELEMETRY_ARCHIVE_DIRECTORIES_COUNT")
+ .add(dirCount);
+ };
+
+ if (!(yield OS.File.exists(gPingsArchivePath))) {
+ submitProbes(0, 0);
+ return new Map();
+ }
+
+ let dirIterator = new OS.File.DirectoryIterator(gPingsArchivePath);
+ let subdirs =
+ (yield dirIterator.nextBatch()).filter(e => e.isDir).filter(e => isValidArchiveDir(e.name));
+ dirIterator.close();
+
+ // Walk through the monthly subdirs of the form <YYYY-MM>/
+ for (let dir of subdirs) {
+ this._log.trace("_scanArchive - checking in subdir: " + dir.path);
+ let pingIterator = new OS.File.DirectoryIterator(dir.path);
+ let pings = (yield pingIterator.nextBatch()).filter(e => !e.isDir);
+ pingIterator.close();
+
+ // Now process any ping files of the form "<timestamp>.<uuid>.<type>.[json|jsonlz4]".
+ for (let p of pings) {
+ // data may be null if the filename doesn't match the above format.
+ let data = this._getArchivedPingDataFromFileName(p.name);
+ if (!data) {
+ continue;
+ }
+
+ // In case of conflicts, overwrite only with newer pings.
+ if (this._archivedPings.has(data.id)) {
+ const overwrite = data.timestamp > this._archivedPings.get(data.id).timestampCreated;
+ this._log.warn("_scanArchive - have seen this id before: " + data.id +
+ ", overwrite: " + overwrite);
+ if (!overwrite) {
+ continue;
+ }
+
+ yield this._removeArchivedPing(data.id, data.timestampCreated, data.type)
+ .catch((e) => this._log.warn("_scanArchive - failed to remove ping", e));
+ }
+
+ this._archivedPings.set(data.id, {
+ timestampCreated: data.timestamp,
+ type: internString(data.type),
+ });
+ }
+ }
+
+ // Mark the archive as scanned, so we no longer hit the disk.
+ this._scannedArchiveDirectory = true;
+ // Update the ping and directories count histograms.
+ submitProbes(this._archivedPings.size, subdirs.length);
+ return this._archivedPings;
+ }),
+
+ /**
+ * Save a single ping to a file.
+ *
+ * @param {object} ping The content of the ping to save.
+ * @param {string} file The destination file.
+ * @param {bool} overwrite If |true|, the file will be overwritten if it exists,
+ * if |false| the file will not be overwritten and no error will be reported if
+ * the file exists.
+ * @param {bool} [compress=false] If |true|, the file will use lz4 compression. Otherwise no
+ * compression will be used.
+ * @returns {promise}
+ */
+ savePingToFile: Task.async(function*(ping, filePath, overwrite, compress = false) {
+ try {
+ this._log.trace("savePingToFile - path: " + filePath);
+ let pingString = JSON.stringify(ping);
+ let options = { tmpPath: filePath + ".tmp", noOverwrite: !overwrite };
+ if (compress) {
+ options.compression = "lz4";
+ }
+ yield OS.File.writeAtomic(filePath, pingString, options);
+ } catch (e) {
+ if (!e.becauseExists) {
+ throw e;
+ }
+ }
+ }),
+
+ /**
+ * Save a ping to its file.
+ *
+ * @param {object} ping The content of the ping to save.
+ * @param {bool} overwrite If |true|, the file will be overwritten
+ * if it exists.
+ * @returns {promise}
+ */
+ savePing: Task.async(function*(ping, overwrite) {
+ yield getPingDirectory();
+ let file = pingFilePath(ping);
+ yield this.savePingToFile(ping, file, overwrite);
+ return file;
+ }),
+
+ /**
+ * Add a ping to the saved pings directory so that it gets saved
+ * and sent along with other pings.
+ * Note: that the original ping file will not be modified.
+ *
+ * @param {Object} ping The ping object.
+ * @return {Promise} A promise resolved when the ping is saved to the pings directory.
+ */
+ addPendingPing: function(ping) {
+ return this.savePendingPing(ping);
+ },
+
+ /**
+ * Remove the file for a ping
+ *
+ * @param {object} ping The ping.
+ * @returns {promise}
+ */
+ cleanupPingFile: function(ping) {
+ return OS.File.remove(pingFilePath(ping));
+ },
+
+ savePendingPing: function(ping) {
+ let p = this.savePing(ping, true).then((path) => {
+ this._pendingPings.set(ping.id, {
+ path: path,
+ lastModificationDate: Policy.now().getTime(),
+ });
+ this._log.trace("savePendingPing - saved ping with id " + ping.id);
+ });
+ this._trackPendingPingSaveTask(p);
+ return p;
+ },
+
+ loadPendingPing: Task.async(function*(id) {
+ this._log.trace("loadPendingPing - id: " + id);
+ let info = this._pendingPings.get(id);
+ if (!info) {
+ this._log.trace("loadPendingPing - unknown id " + id);
+ throw new Error("TelemetryStorage.loadPendingPing - no ping with id " + id);
+ }
+
+ // Try to get the dimension of the ping. If that fails, update the histograms.
+ let fileSize = 0;
+ try {
+ fileSize = (yield OS.File.stat(info.path)).size;
+ } catch (e) {
+ if (!(e instanceof OS.File.Error) || !e.becauseNoSuchFile) {
+ throw e;
+ }
+ // Fall through and let |loadPingFile| report the error.
+ }
+
+ // Purge pings which are too big.
+ if (fileSize > PING_FILE_MAXIMUM_SIZE_BYTES) {
+ yield this.removePendingPing(id);
+ Telemetry.getHistogramById("TELEMETRY_DISCARDED_PENDING_PINGS_SIZE_MB")
+ .add(Math.floor(fileSize / 1024 / 1024));
+ Telemetry.getHistogramById("TELEMETRY_PING_SIZE_EXCEEDED_PENDING").add();
+ throw new Error("loadPendingPing - exceeded the maximum ping size: " + fileSize);
+ }
+
+ // Try to load the ping file. Update the related histograms on failure.
+ let ping;
+ try {
+ ping = yield this.loadPingFile(info.path, false);
+ } catch (e) {
+ // If we failed to load the ping, check what happened and update the histogram.
+ if (e instanceof PingReadError) {
+ Telemetry.getHistogramById("TELEMETRY_PENDING_LOAD_FAILURE_READ").add();
+ } else if (e instanceof PingParseError) {
+ Telemetry.getHistogramById("TELEMETRY_PENDING_LOAD_FAILURE_PARSE").add();
+ }
+ // Remove the ping from the cache, so we don't try to load it again.
+ this._pendingPings.delete(id);
+ // Then propagate the rejection.
+ throw e;
+ }
+
+ return ping;
+ }),
+
+ removePendingPing: function(id) {
+ let info = this._pendingPings.get(id);
+ if (!info) {
+ this._log.trace("removePendingPing - unknown id " + id);
+ return Promise.resolve();
+ }
+
+ this._log.trace("removePendingPing - deleting ping with id: " + id +
+ ", path: " + info.path);
+ this._pendingPings.delete(id);
+ return OS.File.remove(info.path).catch((ex) =>
+ this._log.error("removePendingPing - failed to remove ping", ex));
+ },
+
+ /**
+ * Track any pending ping save tasks through the promise passed here.
+ * This is needed to block on any outstanding ping save activity.
+ *
+ * @param {Object<Promise>} The save promise to track.
+ */
+ _trackPendingPingSaveTask: function (promise) {
+ let clear = () => this._activePendingPingSaves.delete(promise);
+ promise.then(clear, clear);
+ this._activePendingPingSaves.add(promise);
+ },
+
+ /**
+ * Return a promise that allows to wait on pending pings being saved.
+ * @return {Object<Promise>} A promise resolved when all the pending pings save promises
+ * are resolved.
+ */
+ promisePendingPingSaves: function () {
+ // Make sure to wait for all the promises, even if they reject. We don't need to log
+ // the failures here, as they are already logged elsewhere.
+ return waitForAll(this._activePendingPingSaves);
+ },
+
+ /**
+ * Run the task to remove all the pending pings (except the deletion ping).
+ *
+ * @return {Promise} Resolved when the pings are removed.
+ */
+ runRemovePendingPingsTask: Task.async(function*() {
+ // If we already have a pending pings removal task active, return that.
+ if (this._removePendingPingsTask) {
+ return this._removePendingPingsTask;
+ }
+
+ // Start the task to remove all pending pings. Also make sure to clear the task once done.
+ try {
+ this._removePendingPingsTask = this.removePendingPings();
+ yield this._removePendingPingsTask;
+ } finally {
+ this._removePendingPingsTask = null;
+ }
+ return undefined;
+ }),
+
+ removePendingPings: Task.async(function*() {
+ this._log.trace("removePendingPings - removing all pending pings");
+
+ // Wait on pending pings still being saved, so so we don't miss removing them.
+ yield this.promisePendingPingSaves();
+
+ // Individually remove existing pings, so we don't interfere with operations expecting
+ // the pending pings directory to exist.
+ const directory = TelemetryStorage.pingDirectoryPath;
+ let iter = new OS.File.DirectoryIterator(directory);
+
+ try {
+ if (!(yield iter.exists())) {
+ this._log.trace("removePendingPings - the pending pings directory doesn't exist");
+ return;
+ }
+
+ let files = (yield iter.nextBatch()).filter(e => !e.isDir);
+ for (let file of files) {
+ try {
+ yield OS.File.remove(file.path);
+ } catch (ex) {
+ this._log.error("removePendingPings - failed to remove file " + file.path, ex);
+ continue;
+ }
+ }
+ } finally {
+ yield iter.close();
+ }
+ }),
+
+ loadPendingPingList: function() {
+ // If we already have a pending scanning task active, return that.
+ if (this._scanPendingPingsTask) {
+ return this._scanPendingPingsTask;
+ }
+
+ if (this._scannedPendingDirectory) {
+ this._log.trace("loadPendingPingList - Pending already scanned, hitting cache.");
+ return Promise.resolve(this._buildPingList());
+ }
+
+ // Since there's no pending pings scan task running, start it.
+ // Also make sure to clear the task once done.
+ this._scanPendingPingsTask = this._scanPendingPings().then(pings => {
+ this._scanPendingPingsTask = null;
+ return pings;
+ }, ex => {
+ this._scanPendingPingsTask = null;
+ throw ex;
+ });
+ return this._scanPendingPingsTask;
+ },
+
+ getPendingPingList: function() {
+ return this._buildPingList();
+ },
+
+ _scanPendingPings: Task.async(function*() {
+ this._log.trace("_scanPendingPings");
+
+ let directory = TelemetryStorage.pingDirectoryPath;
+ let iter = new OS.File.DirectoryIterator(directory);
+ let exists = yield iter.exists();
+
+ try {
+ if (!exists) {
+ return [];
+ }
+
+ let files = (yield iter.nextBatch()).filter(e => !e.isDir);
+
+ for (let file of files) {
+ if (this._shutdown) {
+ return [];
+ }
+
+ let info;
+ try {
+ info = yield OS.File.stat(file.path);
+ } catch (ex) {
+ this._log.error("_scanPendingPings - failed to stat file " + file.path, ex);
+ continue;
+ }
+
+ // Enforce a maximum file size limit on pending pings.
+ if (info.size > PING_FILE_MAXIMUM_SIZE_BYTES) {
+ this._log.error("_scanPendingPings - removing file exceeding size limit " + file.path);
+ try {
+ yield OS.File.remove(file.path);
+ } catch (ex) {
+ this._log.error("_scanPendingPings - failed to remove file " + file.path, ex);
+ } finally {
+ Telemetry.getHistogramById("TELEMETRY_DISCARDED_PENDING_PINGS_SIZE_MB")
+ .add(Math.floor(info.size / 1024 / 1024));
+ Telemetry.getHistogramById("TELEMETRY_PING_SIZE_EXCEEDED_PENDING").add();
+ continue;
+ }
+ }
+
+ let id = OS.Path.basename(file.path);
+ if (!UUID_REGEX.test(id)) {
+ this._log.trace("_scanPendingPings - filename is not a UUID: " + id);
+ id = Utils.generateUUID();
+ }
+
+ this._pendingPings.set(id, {
+ path: file.path,
+ lastModificationDate: info.lastModificationDate.getTime(),
+ });
+ }
+ } finally {
+ yield iter.close();
+ }
+
+ // Explicitly load the deletion ping from its known path, if it's there.
+ if (yield OS.File.exists(gDeletionPingFilePath)) {
+ this._log.trace("_scanPendingPings - Adding pending deletion ping.");
+ // We can't get the ping id or the last modification date without hitting the disk.
+ // Since deletion has a special handling, we don't really need those.
+ this._pendingPings.set(Utils.generateUUID(), {
+ path: gDeletionPingFilePath,
+ lastModificationDate: Date.now(),
+ });
+ }
+
+ this._scannedPendingDirectory = true;
+ return this._buildPingList();
+ }),
+
+ _buildPingList: function() {
+ const list = Array.from(this._pendingPings, p => ({
+ id: p[0],
+ lastModificationDate: p[1].lastModificationDate,
+ }));
+
+ list.sort((a, b) => b.lastModificationDate - a.lastModificationDate);
+ return list;
+ },
+
+ get pendingPingCount() {
+ return this._pendingPings.size;
+ },
+
+ /**
+ * Loads a ping file.
+ * @param {String} aFilePath The path of the ping file.
+ * @param {Boolean} [aCompressed=false] If |true|, expects the file to be compressed using lz4.
+ * @return {Promise<Object>} A promise resolved with the ping content or rejected if the
+ * ping contains invalid data.
+ * @throws {PingReadError} There was an error while reading the ping file from the disk.
+ * @throws {PingParseError} There was an error while parsing the JSON content of the ping file.
+ */
+ loadPingFile: Task.async(function* (aFilePath, aCompressed = false) {
+ let options = {};
+ if (aCompressed) {
+ options.compression = "lz4";
+ }
+
+ let array;
+ try {
+ array = yield OS.File.read(aFilePath, options);
+ } catch (e) {
+ this._log.trace("loadPingfile - unreadable ping " + aFilePath, e);
+ throw new PingReadError(e.message, e.becauseNoSuchFile);
+ }
+
+ let decoder = new TextDecoder();
+ let string = decoder.decode(array);
+ let ping;
+ try {
+ ping = JSON.parse(string);
+ } catch (e) {
+ this._log.trace("loadPingfile - unparseable ping " + aFilePath, e);
+ yield OS.File.remove(aFilePath).catch((ex) => {
+ this._log.error("loadPingFile - failed removing unparseable ping file", ex);
+ });
+ throw new PingParseError(e.message);
+ }
+
+ return ping;
+ }),
+
+ /**
+ * Archived pings are saved with file names of the form:
+ * "<timestamp>.<uuid>.<type>.[json|jsonlz4]"
+ * This helper extracts that data from a given filename.
+ *
+ * @param fileName {String} The filename.
+ * @return {Object} Null if the filename didn't match the expected form.
+ * Otherwise an object with the extracted data in the form:
+ * { timestamp: <number>,
+ * id: <string>,
+ * type: <string> }
+ */
+ _getArchivedPingDataFromFileName: function(fileName) {
+ // Extract the parts.
+ let parts = fileName.split(".");
+ if (parts.length != 4) {
+ this._log.trace("_getArchivedPingDataFromFileName - should have 4 parts");
+ return null;
+ }
+
+ let [timestamp, uuid, type, extension] = parts;
+ if (extension != "json" && extension != "jsonlz4") {
+ this._log.trace("_getArchivedPingDataFromFileName - should have 'json' or 'jsonlz4' extension");
+ return null;
+ }
+
+ // Check for a valid timestamp.
+ timestamp = parseInt(timestamp);
+ if (Number.isNaN(timestamp)) {
+ this._log.trace("_getArchivedPingDataFromFileName - should have a valid timestamp");
+ return null;
+ }
+
+ // Check for a valid UUID.
+ if (!UUID_REGEX.test(uuid)) {
+ this._log.trace("_getArchivedPingDataFromFileName - should have a valid id");
+ return null;
+ }
+
+ // Check for a valid type string.
+ const typeRegex = /^[a-z0-9][a-z0-9-]+[a-z0-9]$/i;
+ if (!typeRegex.test(type)) {
+ this._log.trace("_getArchivedPingDataFromFileName - should have a valid type");
+ return null;
+ }
+
+ return {
+ timestamp: timestamp,
+ id: uuid,
+ type: type,
+ };
+ },
+
+ saveAbortedSessionPing: Task.async(function*(ping) {
+ this._log.trace("saveAbortedSessionPing - ping path: " + gAbortedSessionFilePath);
+ yield OS.File.makeDir(gDataReportingDir, { ignoreExisting: true });
+
+ return this._abortedSessionSerializer.enqueueTask(() =>
+ this.savePingToFile(ping, gAbortedSessionFilePath, true));
+ }),
+
+ loadAbortedSessionPing: Task.async(function*() {
+ let ping = null;
+ try {
+ ping = yield this.loadPingFile(gAbortedSessionFilePath);
+ } catch (ex) {
+ if (ex.becauseNoSuchFile) {
+ this._log.trace("loadAbortedSessionPing - no such file");
+ } else {
+ this._log.error("loadAbortedSessionPing - error loading ping", ex)
+ }
+ }
+ return ping;
+ }),
+
+ removeAbortedSessionPing: function() {
+ return this._abortedSessionSerializer.enqueueTask(Task.async(function*() {
+ try {
+ yield OS.File.remove(gAbortedSessionFilePath, { ignoreAbsent: false });
+ this._log.trace("removeAbortedSessionPing - success");
+ } catch (ex) {
+ if (ex.becauseNoSuchFile) {
+ this._log.trace("removeAbortedSessionPing - no such file");
+ } else {
+ this._log.error("removeAbortedSessionPing - error removing ping", ex)
+ }
+ }
+ }.bind(this)));
+ },
+
+ /**
+ * Save the deletion ping.
+ * @param ping The deletion ping.
+ * @return {Promise} Resolved when the ping is saved.
+ */
+ saveDeletionPing: Task.async(function*(ping) {
+ this._log.trace("saveDeletionPing - ping path: " + gDeletionPingFilePath);
+ yield OS.File.makeDir(gDataReportingDir, { ignoreExisting: true });
+
+ let p = this._deletionPingSerializer.enqueueTask(() =>
+ this.savePingToFile(ping, gDeletionPingFilePath, true));
+ this._trackPendingPingSaveTask(p);
+ return p;
+ }),
+
+ /**
+ * Remove the deletion ping.
+ * @return {Promise} Resolved when the ping is deleted from the disk.
+ */
+ removeDeletionPing: Task.async(function*() {
+ return this._deletionPingSerializer.enqueueTask(Task.async(function*() {
+ try {
+ yield OS.File.remove(gDeletionPingFilePath, { ignoreAbsent: false });
+ this._log.trace("removeDeletionPing - success");
+ } catch (ex) {
+ if (ex.becauseNoSuchFile) {
+ this._log.trace("removeDeletionPing - no such file");
+ } else {
+ this._log.error("removeDeletionPing - error removing ping", ex)
+ }
+ }
+ }.bind(this)));
+ }),
+
+ isDeletionPing: function(aPingId) {
+ this._log.trace("isDeletionPing - id: " + aPingId);
+ let pingInfo = this._pendingPings.get(aPingId);
+ if (!pingInfo) {
+ return false;
+ }
+
+ if (pingInfo.path != gDeletionPingFilePath) {
+ return false;
+ }
+
+ return true;
+ },
+
+ /**
+ * Remove FHR database files. This is temporary and will be dropped in
+ * the future.
+ * @return {Promise} Resolved when the database files are deleted.
+ */
+ removeFHRDatabase: Task.async(function*() {
+ this._log.trace("removeFHRDatabase");
+
+ // Let's try to remove the FHR DB with the default filename first.
+ const FHR_DB_DEFAULT_FILENAME = "healthreport.sqlite";
+
+ // Even if it's uncommon, there may be 2 additional files: - a "write ahead log"
+ // (-wal) file and a "shared memory file" (-shm). We need to remove them as well.
+ let FILES_TO_REMOVE = [
+ OS.Path.join(OS.Constants.Path.profileDir, FHR_DB_DEFAULT_FILENAME),
+ OS.Path.join(OS.Constants.Path.profileDir, FHR_DB_DEFAULT_FILENAME + "-wal"),
+ OS.Path.join(OS.Constants.Path.profileDir, FHR_DB_DEFAULT_FILENAME + "-shm"),
+ ];
+
+ // FHR could have used either the default DB file name or a custom one
+ // through this preference.
+ const FHR_DB_CUSTOM_FILENAME =
+ Preferences.get("datareporting.healthreport.dbName", undefined);
+ if (FHR_DB_CUSTOM_FILENAME) {
+ FILES_TO_REMOVE.push(
+ OS.Path.join(OS.Constants.Path.profileDir, FHR_DB_CUSTOM_FILENAME),
+ OS.Path.join(OS.Constants.Path.profileDir, FHR_DB_CUSTOM_FILENAME + "-wal"),
+ OS.Path.join(OS.Constants.Path.profileDir, FHR_DB_CUSTOM_FILENAME + "-shm"));
+ }
+
+ for (let f of FILES_TO_REMOVE) {
+ yield OS.File.remove(f, {ignoreAbsent: true})
+ .catch(e => this._log.error("removeFHRDatabase - failed to remove " + f, e));
+ }
+ }),
+};
+
+// Utility functions
+
+function pingFilePath(ping) {
+ // Support legacy ping formats, who don't have an "id" field, but a "slug" field.
+ let pingIdentifier = (ping.slug) ? ping.slug : ping.id;
+ return OS.Path.join(TelemetryStorage.pingDirectoryPath, pingIdentifier);
+}
+
+function getPingDirectory() {
+ return Task.spawn(function*() {
+ let directory = TelemetryStorage.pingDirectoryPath;
+
+ if (!(yield OS.File.exists(directory))) {
+ yield OS.File.makeDir(directory, { unixMode: OS.Constants.S_IRWXU });
+ }
+
+ return directory;
+ });
+}
+
+/**
+ * Build the path to the archived ping.
+ * @param {String} aPingId The ping id.
+ * @param {Object} aDate The ping creation date.
+ * @param {String} aType The ping type.
+ * @return {String} The full path to the archived ping.
+ */
+function getArchivedPingPath(aPingId, aDate, aType) {
+ // Helper to pad the month to 2 digits, if needed (e.g. "1" -> "01").
+ let addLeftPadding = value => (value < 10) ? ("0" + value) : value;
+ // Get the ping creation date and generate the archive directory to hold it. Note
+ // that getMonth returns a 0-based month, so we need to add an offset.
+ let archivedPingDir = OS.Path.join(gPingsArchivePath,
+ aDate.getFullYear() + '-' + addLeftPadding(aDate.getMonth() + 1));
+ // Generate the archived ping file path as YYYY-MM/<TIMESTAMP>.UUID.type.json
+ let fileName = [aDate.getTime(), aPingId, aType, "json"].join(".");
+ return OS.Path.join(archivedPingDir, fileName);
+}
+
+/**
+ * Get the size of the ping file on the disk.
+ * @return {Integer} The file size, in bytes, of the ping file or 0 on errors.
+ */
+var getArchivedPingSize = Task.async(function*(aPingId, aDate, aType) {
+ const path = getArchivedPingPath(aPingId, aDate, aType);
+ let filePaths = [ path + "lz4", path ];
+
+ for (let path of filePaths) {
+ try {
+ return (yield OS.File.stat(path)).size;
+ } catch (e) {}
+ }
+
+ // That's odd, this ping doesn't seem to exist.
+ return 0;
+});
+
+/**
+ * Get the size of the pending ping file on the disk.
+ * @return {Integer} The file size, in bytes, of the ping file or 0 on errors.
+ */
+var getPendingPingSize = Task.async(function*(aPingId) {
+ const path = OS.Path.join(TelemetryStorage.pingDirectoryPath, aPingId)
+ try {
+ return (yield OS.File.stat(path)).size;
+ } catch (e) {}
+
+ // That's odd, this ping doesn't seem to exist.
+ return 0;
+});
+
+/**
+ * Check if a directory name is in the "YYYY-MM" format.
+ * @param {String} aDirName The name of the pings archive directory.
+ * @return {Boolean} True if the directory name is in the right format, false otherwise.
+ */
+function isValidArchiveDir(aDirName) {
+ const dirRegEx = /^[0-9]{4}-[0-9]{2}$/;
+ return dirRegEx.test(aDirName);
+}
+
+/**
+ * Gets a date object from an archive directory name.
+ * @param {String} aDirName The name of the pings archive directory. Must be in the YYYY-MM
+ * format.
+ * @return {Object} A Date object or null if the dir name is not valid.
+ */
+function getDateFromArchiveDir(aDirName) {
+ let [year, month] = aDirName.split("-");
+ year = parseInt(year);
+ month = parseInt(month);
+ // Make sure to have sane numbers.
+ if (!Number.isFinite(month) || !Number.isFinite(year) || month < 1 || month > 12) {
+ return null;
+ }
+ return new Date(year, month - 1, 1, 0, 0, 0);
+}
diff --git a/toolkit/components/telemetry/TelemetryTimestamps.jsm b/toolkit/components/telemetry/TelemetryTimestamps.jsm
new file mode 100644
index 000000000..e49d7453c
--- /dev/null
+++ b/toolkit/components/telemetry/TelemetryTimestamps.jsm
@@ -0,0 +1,54 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+this.EXPORTED_SYMBOLS = ["TelemetryTimestamps"];
+
+const Cu = Components.utils;
+
+/**
+ * This module's purpose is to collect timestamps for important
+ * application-specific events.
+ *
+ * The TelemetryController component attaches the timestamps stored by this module to
+ * the telemetry submission, substracting the process lifetime so that the times
+ * are relative to process startup. The overall goal is to produce a basic
+ * timeline of the startup process.
+ */
+var timeStamps = {};
+
+this.TelemetryTimestamps = {
+ /**
+ * Adds a timestamp to the list. The addition of TimeStamps that already have
+ * a value stored is ignored.
+ *
+ * @param name must be a unique, generally "camelCase" descriptor of what the
+ * timestamp represents. e.g.: "delayedStartupStarted"
+ * @param value is a timeStamp in milliseconds since the epoch. If omitted,
+ * defaults to Date.now().
+ */
+ add: function TT_add(name, value) {
+ // Default to "now" if not specified
+ if (value == null)
+ value = Date.now();
+
+ if (isNaN(value))
+ throw new Error("Value must be a timestamp");
+
+ // If there's an existing value, just ignore the new value.
+ if (timeStamps.hasOwnProperty(name))
+ return;
+
+ timeStamps[name] = value;
+ },
+
+ /**
+ * Returns a JS object containing all of the timeStamps as properties (can be
+ * easily serialized to JSON). Used by TelemetryController to retrieve the data
+ * to attach to the telemetry submission.
+ */
+ get: function TT_get() {
+ // Return a copy of the object.
+ return Cu.cloneInto(timeStamps, {});
+ }
+};
diff --git a/toolkit/components/telemetry/TelemetryUtils.jsm b/toolkit/components/telemetry/TelemetryUtils.jsm
new file mode 100644
index 000000000..4d934c9c1
--- /dev/null
+++ b/toolkit/components/telemetry/TelemetryUtils.jsm
@@ -0,0 +1,152 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+"use strict";
+
+this.EXPORTED_SYMBOLS = [
+ "TelemetryUtils"
+];
+
+const {classes: Cc, interfaces: Ci, results: Cr, utils: Cu} = Components;
+
+Cu.import("resource://gre/modules/Preferences.jsm", this);
+
+const MILLISECONDS_PER_DAY = 24 * 60 * 60 * 1000;
+
+const PREF_TELEMETRY_ENABLED = "toolkit.telemetry.enabled";
+
+const IS_CONTENT_PROCESS = (function() {
+ // We cannot use Services.appinfo here because in telemetry xpcshell tests,
+ // appinfo is initially unavailable, and becomes available only later on.
+ let runtime = Cc["@mozilla.org/xre/app-info;1"].getService(Ci.nsIXULRuntime);
+ return runtime.processType == Ci.nsIXULRuntime.PROCESS_TYPE_CONTENT;
+})();
+
+this.TelemetryUtils = {
+ /**
+ * True if this is a content process.
+ */
+ get isContentProcess() {
+ return IS_CONTENT_PROCESS;
+ },
+
+ /**
+ * Returns the state of the Telemetry enabled preference, making sure
+ * it correctly evaluates to a boolean type.
+ */
+ get isTelemetryEnabled() {
+ return Preferences.get(PREF_TELEMETRY_ENABLED, false) === true;
+ },
+
+ /**
+ * Turn a millisecond timestamp into a day timestamp.
+ *
+ * @param aMsec A number of milliseconds since Unix epoch.
+ * @return The number of whole days since Unix epoch.
+ */
+ millisecondsToDays: function(aMsec) {
+ return Math.floor(aMsec / MILLISECONDS_PER_DAY);
+ },
+
+ /**
+ * Takes a date and returns it trunctated to a date with daily precision.
+ */
+ truncateToDays: function(date) {
+ return new Date(date.getFullYear(),
+ date.getMonth(),
+ date.getDate(),
+ 0, 0, 0, 0);
+ },
+
+ /**
+ * Check if the difference between the times is within the provided tolerance.
+ * @param {Number} t1 A time in milliseconds.
+ * @param {Number} t2 A time in milliseconds.
+ * @param {Number} tolerance The tolerance, in milliseconds.
+ * @return {Boolean} True if the absolute time difference is within the tolerance, false
+ * otherwise.
+ */
+ areTimesClose: function(t1, t2, tolerance) {
+ return Math.abs(t1 - t2) <= tolerance;
+ },
+
+ /**
+ * Get the next midnight for a date.
+ * @param {Object} date The date object to check.
+ * @return {Object} The Date object representing the next midnight.
+ */
+ getNextMidnight: function(date) {
+ let nextMidnight = new Date(this.truncateToDays(date));
+ nextMidnight.setDate(nextMidnight.getDate() + 1);
+ return nextMidnight;
+ },
+
+ /**
+ * Get the midnight which is closer to the provided date.
+ * @param {Object} date The date object to check.
+ * @param {Number} tolerance The tolerance within we find the closest midnight.
+ * @return {Object} The Date object representing the closes midnight, or null if midnight
+ * is not within the midnight tolerance.
+ */
+ getNearestMidnight: function(date, tolerance) {
+ let lastMidnight = this.truncateToDays(date);
+ if (this.areTimesClose(date.getTime(), lastMidnight.getTime(), tolerance)) {
+ return lastMidnight;
+ }
+
+ const nextMidnightDate = this.getNextMidnight(date);
+ if (this.areTimesClose(date.getTime(), nextMidnightDate.getTime(), tolerance)) {
+ return nextMidnightDate;
+ }
+ return null;
+ },
+
+ generateUUID: function() {
+ let str = Cc["@mozilla.org/uuid-generator;1"].getService(Ci.nsIUUIDGenerator).generateUUID().toString();
+ // strip {}
+ return str.substring(1, str.length - 1);
+ },
+
+ /**
+ * Find how many months passed between two dates.
+ * @param {Object} aStartDate The starting date.
+ * @param {Object} aEndDate The ending date.
+ * @return {Integer} The number of months between the two dates.
+ */
+ getElapsedTimeInMonths: function(aStartDate, aEndDate) {
+ return (aEndDate.getMonth() - aStartDate.getMonth())
+ + 12 * (aEndDate.getFullYear() - aStartDate.getFullYear());
+ },
+
+ /**
+ * Date.toISOString() gives us UTC times, this gives us local times in
+ * the ISO date format. See http://www.w3.org/TR/NOTE-datetime
+ * @param {Object} date The input date.
+ * @return {String} The local time ISO string.
+ */
+ toLocalTimeISOString: function(date) {
+ function padNumber(number, places) {
+ number = number.toString();
+ while (number.length < places) {
+ number = "0" + number;
+ }
+ return number;
+ }
+
+ let sign = (n) => n >= 0 ? "+" : "-";
+ // getTimezoneOffset counter-intuitively returns -60 for UTC+1.
+ let tzOffset = - date.getTimezoneOffset();
+
+ // YYYY-MM-DDThh:mm:ss.sTZD (eg 1997-07-16T19:20:30.45+01:00)
+ return padNumber(date.getFullYear(), 4)
+ + "-" + padNumber(date.getMonth() + 1, 2)
+ + "-" + padNumber(date.getDate(), 2)
+ + "T" + padNumber(date.getHours(), 2)
+ + ":" + padNumber(date.getMinutes(), 2)
+ + ":" + padNumber(date.getSeconds(), 2)
+ + "." + date.getMilliseconds()
+ + sign(tzOffset) + padNumber(Math.floor(Math.abs(tzOffset / 60)), 2)
+ + ":" + padNumber(Math.abs(tzOffset % 60), 2);
+ },
+};
diff --git a/toolkit/components/telemetry/ThirdPartyCookieProbe.jsm b/toolkit/components/telemetry/ThirdPartyCookieProbe.jsm
new file mode 100644
index 000000000..fedac1710
--- /dev/null
+++ b/toolkit/components/telemetry/ThirdPartyCookieProbe.jsm
@@ -0,0 +1,181 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+"use strict";
+
+var Ci = Components.interfaces;
+var Cu = Components.utils;
+var Cr = Components.results;
+
+Cu.import("resource://gre/modules/XPCOMUtils.jsm", this);
+Cu.import("resource://gre/modules/Services.jsm", this);
+
+this.EXPORTED_SYMBOLS = ["ThirdPartyCookieProbe"];
+
+const MILLISECONDS_PER_DAY = 1000 * 60 * 60 * 24;
+
+/**
+ * A probe implementing the measurements detailed at
+ * https://wiki.mozilla.org/SecurityEngineering/ThirdPartyCookies/Telemetry
+ *
+ * This implementation uses only in-memory data.
+ */
+this.ThirdPartyCookieProbe = function() {
+ /**
+ * A set of third-party sites that have caused cookies to be
+ * rejected. These sites are trimmed down to ETLD + 1
+ * (i.e. "x.y.com" and "z.y.com" are both trimmed down to "y.com",
+ * "x.y.co.uk" is trimmed down to "y.co.uk").
+ *
+ * Used to answer the following question: "For each third-party
+ * site, how many other first parties embed them and result in
+ * cookie traffic?" (see
+ * https://wiki.mozilla.org/SecurityEngineering/ThirdPartyCookies/Telemetry#Breadth
+ * )
+ *
+ * @type Map<string, RejectStats> A mapping from third-party site
+ * to rejection statistics.
+ */
+ this._thirdPartyCookies = new Map();
+ /**
+ * Timestamp of the latest call to flush() in milliseconds since the Epoch.
+ */
+ this._latestFlush = Date.now();
+};
+
+this.ThirdPartyCookieProbe.prototype = {
+ QueryInterface: XPCOMUtils.generateQI([Ci.nsIObserver]),
+ init: function() {
+ Services.obs.addObserver(this, "profile-before-change", false);
+ Services.obs.addObserver(this, "third-party-cookie-accepted", false);
+ Services.obs.addObserver(this, "third-party-cookie-rejected", false);
+ },
+ dispose: function() {
+ Services.obs.removeObserver(this, "profile-before-change");
+ Services.obs.removeObserver(this, "third-party-cookie-accepted");
+ Services.obs.removeObserver(this, "third-party-cookie-rejected");
+ },
+ /**
+ * Observe either
+ * - "profile-before-change" (no meaningful subject or data) - time to flush statistics and unregister; or
+ * - "third-party-cookie-accepted"/"third-party-cookie-rejected" with
+ * subject: the nsIURI of the third-party that attempted to set the cookie;
+ * data: a string holding the uri of the page seen by the user.
+ */
+ observe: function(docURI, topic, referrer) {
+ try {
+ if (topic == "profile-before-change") {
+ // A final flush, then unregister
+ this.flush();
+ this.dispose();
+ }
+ if (topic != "third-party-cookie-accepted"
+ && topic != "third-party-cookie-rejected") {
+ // Not a third-party cookie
+ return;
+ }
+ // Add host to this._thirdPartyCookies
+ // Note: nsCookieService passes "?" if the issuer is unknown. Avoid
+ // normalizing in this case since its not a valid URI.
+ let firstParty = (referrer === "?") ? referrer : normalizeHost(referrer);
+ let thirdParty = normalizeHost(docURI.QueryInterface(Ci.nsIURI).host);
+ let data = this._thirdPartyCookies.get(thirdParty);
+ if (!data) {
+ data = new RejectStats();
+ this._thirdPartyCookies.set(thirdParty, data);
+ }
+ if (topic == "third-party-cookie-accepted") {
+ data.addAccepted(firstParty);
+ } else {
+ data.addRejected(firstParty);
+ }
+ } catch (ex) {
+ if (ex instanceof Ci.nsIXPCException) {
+ if (ex.result == Cr.NS_ERROR_HOST_IS_IP_ADDRESS ||
+ ex.result == Cr.NS_ERROR_INSUFFICIENT_DOMAIN_LEVELS) {
+ return;
+ }
+ }
+ // Other errors should not remain silent.
+ Services.console.logStringMessage("ThirdPartyCookieProbe: Uncaught error " + ex + "\n" + ex.stack);
+ }
+ },
+
+ /**
+ * Clear internal data, fill up corresponding histograms.
+ *
+ * @param {number} aNow (optional, used for testing purposes only)
+ * The current instant. Used to make tests time-independent.
+ */
+ flush: function(aNow = Date.now()) {
+ let updays = (aNow - this._latestFlush) / MILLISECONDS_PER_DAY;
+ if (updays <= 0) {
+ // Unlikely, but regardless, don't risk division by zero
+ // or weird stuff.
+ return;
+ }
+ this._latestFlush = aNow;
+ this._thirdPartyCookies.clear();
+ }
+};
+
+/**
+ * Data gathered on cookies that a third party site has attempted to set.
+ *
+ * Privacy note: the only data actually sent to the server is the size of
+ * the sets.
+ *
+ * @constructor
+ */
+var RejectStats = function() {
+ /**
+ * The set of all sites for which we have accepted third-party cookies.
+ */
+ this._acceptedSites = new Set();
+ /**
+ * The set of all sites for which we have rejected third-party cookies.
+ */
+ this._rejectedSites = new Set();
+ /**
+ * Total number of attempts to set a third-party cookie that have
+ * been accepted. Two accepted attempts on the same site will both
+ * augment this count.
+ */
+ this._acceptedRequests = 0;
+ /**
+ * Total number of attempts to set a third-party cookie that have
+ * been rejected. Two rejected attempts on the same site will both
+ * augment this count.
+ */
+ this._rejectedRequests = 0;
+};
+RejectStats.prototype = {
+ addAccepted: function(firstParty) {
+ this._acceptedSites.add(firstParty);
+ this._acceptedRequests++;
+ },
+ addRejected: function(firstParty) {
+ this._rejectedSites.add(firstParty);
+ this._rejectedRequests++;
+ },
+ get countAcceptedSites() {
+ return this._acceptedSites.size;
+ },
+ get countRejectedSites() {
+ return this._rejectedSites.size;
+ },
+ get countAcceptedRequests() {
+ return this._acceptedRequests;
+ },
+ get countRejectedRequests() {
+ return this._rejectedRequests;
+ }
+};
+
+/**
+ * Normalize a host to its eTLD + 1.
+ */
+function normalizeHost(host) {
+ return Services.eTLD.getBaseDomainFromHost(host);
+}
diff --git a/toolkit/components/telemetry/ThreadHangStats.h b/toolkit/components/telemetry/ThreadHangStats.h
new file mode 100644
index 000000000..60aa680c8
--- /dev/null
+++ b/toolkit/components/telemetry/ThreadHangStats.h
@@ -0,0 +1,230 @@
+/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2; -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_BackgroundHangTelemetry_h
+#define mozilla_BackgroundHangTelemetry_h
+
+#include "mozilla/Array.h"
+#include "mozilla/Assertions.h"
+#include "mozilla/HangAnnotations.h"
+#include "mozilla/Move.h"
+#include "mozilla/Mutex.h"
+#include "mozilla/PodOperations.h"
+#include "mozilla/Vector.h"
+
+#include "nsString.h"
+#include "prinrval.h"
+
+namespace mozilla {
+namespace Telemetry {
+
+static const size_t kTimeHistogramBuckets = 8 * sizeof(PRIntervalTime);
+
+/* TimeHistogram is an efficient histogram that puts time durations into
+ exponential (base 2) buckets; times are accepted in PRIntervalTime and
+ stored in milliseconds. */
+class TimeHistogram : public mozilla::Array<uint32_t, kTimeHistogramBuckets>
+{
+public:
+ TimeHistogram()
+ {
+ mozilla::PodArrayZero(*this);
+ }
+ // Get minimum (inclusive) range of bucket in milliseconds
+ uint32_t GetBucketMin(size_t aBucket) const {
+ MOZ_ASSERT(aBucket < ArrayLength(*this));
+ return (1u << aBucket) & ~1u; // Bucket 0 starts at 0, not 1
+ }
+ // Get maximum (inclusive) range of bucket in milliseconds
+ uint32_t GetBucketMax(size_t aBucket) const {
+ MOZ_ASSERT(aBucket < ArrayLength(*this));
+ return (1u << (aBucket + 1u)) - 1u;
+ }
+ void Add(PRIntervalTime aTime);
+};
+
+/* HangStack stores an array of const char pointers,
+ with optional internal storage for strings. */
+class HangStack
+{
+public:
+ static const size_t sMaxInlineStorage = 8;
+
+private:
+ typedef mozilla::Vector<const char*, sMaxInlineStorage> Impl;
+ Impl mImpl;
+
+ // Stack entries can either be a static const char*
+ // or a pointer to within this buffer.
+ mozilla::Vector<char, 0> mBuffer;
+
+public:
+ HangStack() { }
+
+ HangStack(HangStack&& aOther)
+ : mImpl(mozilla::Move(aOther.mImpl))
+ , mBuffer(mozilla::Move(aOther.mBuffer))
+ {
+ }
+
+ bool operator==(const HangStack& aOther) const {
+ for (size_t i = 0; i < length(); i++) {
+ if (!IsSameAsEntry(operator[](i), aOther[i])) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ bool operator!=(const HangStack& aOther) const {
+ return !operator==(aOther);
+ }
+
+ const char*& operator[](size_t aIndex) {
+ return mImpl[aIndex];
+ }
+
+ const char* const& operator[](size_t aIndex) const {
+ return mImpl[aIndex];
+ }
+
+ size_t capacity() const { return mImpl.capacity(); }
+ size_t length() const { return mImpl.length(); }
+ bool empty() const { return mImpl.empty(); }
+ bool canAppendWithoutRealloc(size_t aNeeded) const {
+ return mImpl.canAppendWithoutRealloc(aNeeded);
+ }
+ void infallibleAppend(const char* aEntry) { mImpl.infallibleAppend(aEntry); }
+ bool reserve(size_t aRequest) { return mImpl.reserve(aRequest); }
+ const char** begin() { return mImpl.begin(); }
+ const char* const* begin() const { return mImpl.begin(); }
+ const char** end() { return mImpl.end(); }
+ const char* const* end() const { return mImpl.end(); }
+ const char*& back() { return mImpl.back(); }
+ void erase(const char** aEntry) { mImpl.erase(aEntry); }
+ void erase(const char** aBegin, const char** aEnd) {
+ mImpl.erase(aBegin, aEnd);
+ }
+
+ void clear() {
+ mImpl.clear();
+ mBuffer.clear();
+ }
+
+ bool IsInBuffer(const char* aEntry) const {
+ return aEntry >= mBuffer.begin() && aEntry < mBuffer.end();
+ }
+
+ bool IsSameAsEntry(const char* aEntry, const char* aOther) const {
+ // If the entry came from the buffer, we need to compare its content;
+ // otherwise we only need to compare its pointer.
+ return IsInBuffer(aEntry) ? !strcmp(aEntry, aOther) : (aEntry == aOther);
+ }
+
+ size_t AvailableBufferSize() const {
+ return mBuffer.capacity() - mBuffer.length();
+ }
+
+ bool EnsureBufferCapacity(size_t aCapacity) {
+ // aCapacity is the minimal capacity and Vector may make the actual
+ // capacity larger, in which case we want to use up all the space.
+ return mBuffer.reserve(aCapacity) &&
+ mBuffer.reserve(mBuffer.capacity());
+ }
+
+ const char* InfallibleAppendViaBuffer(const char* aText, size_t aLength);
+ const char* AppendViaBuffer(const char* aText, size_t aLength);
+};
+
+/* A hang histogram consists of a stack associated with the
+ hang, along with a time histogram of the hang times. */
+class HangHistogram : public TimeHistogram
+{
+private:
+ static uint32_t GetHash(const HangStack& aStack);
+
+ HangStack mStack;
+ // Native stack that corresponds to the pseudostack in mStack
+ HangStack mNativeStack;
+ // Use a hash to speed comparisons
+ const uint32_t mHash;
+ // Annotations attributed to this stack
+ HangMonitor::HangAnnotationsVector mAnnotations;
+
+public:
+ explicit HangHistogram(HangStack&& aStack)
+ : mStack(mozilla::Move(aStack))
+ , mHash(GetHash(mStack))
+ {
+ }
+ HangHistogram(HangHistogram&& aOther)
+ : TimeHistogram(mozilla::Move(aOther))
+ , mStack(mozilla::Move(aOther.mStack))
+ , mNativeStack(mozilla::Move(aOther.mNativeStack))
+ , mHash(mozilla::Move(aOther.mHash))
+ , mAnnotations(mozilla::Move(aOther.mAnnotations))
+ {
+ }
+ bool operator==(const HangHistogram& aOther) const;
+ bool operator!=(const HangHistogram& aOther) const
+ {
+ return !operator==(aOther);
+ }
+ const HangStack& GetStack() const {
+ return mStack;
+ }
+ HangStack& GetNativeStack() {
+ return mNativeStack;
+ }
+ const HangStack& GetNativeStack() const {
+ return mNativeStack;
+ }
+ const HangMonitor::HangAnnotationsVector& GetAnnotations() const {
+ return mAnnotations;
+ }
+ void Add(PRIntervalTime aTime, HangMonitor::HangAnnotationsPtr aAnnotations) {
+ TimeHistogram::Add(aTime);
+ if (aAnnotations) {
+ if (!mAnnotations.append(Move(aAnnotations))) {
+ MOZ_CRASH();
+ }
+ }
+ }
+};
+
+/* Thread hang stats consist of
+ - thread name
+ - time histogram of all task run times
+ - hang histograms of individual hangs
+ - annotations for each hang
+*/
+class ThreadHangStats
+{
+private:
+ nsCString mName;
+
+public:
+ TimeHistogram mActivity;
+ mozilla::Vector<HangHistogram, 4> mHangs;
+
+ explicit ThreadHangStats(const char* aName)
+ : mName(aName)
+ {
+ }
+ ThreadHangStats(ThreadHangStats&& aOther)
+ : mName(mozilla::Move(aOther.mName))
+ , mActivity(mozilla::Move(aOther.mActivity))
+ , mHangs(mozilla::Move(aOther.mHangs))
+ {
+ }
+ const char* GetName() const {
+ return mName.get();
+ }
+};
+
+} // namespace Telemetry
+} // namespace mozilla
+
+#endif // mozilla_BackgroundHangTelemetry_h
diff --git a/toolkit/components/telemetry/UITelemetry.jsm b/toolkit/components/telemetry/UITelemetry.jsm
new file mode 100644
index 000000000..bd7a34b72
--- /dev/null
+++ b/toolkit/components/telemetry/UITelemetry.jsm
@@ -0,0 +1,235 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+"use strict";
+
+const Cu = Components.utils;
+
+const PREF_BRANCH = "toolkit.telemetry.";
+const PREF_ENABLED = PREF_BRANCH + "enabled";
+
+this.EXPORTED_SYMBOLS = [
+ "UITelemetry",
+];
+
+Cu.import("resource://gre/modules/Services.jsm", this);
+
+/**
+ * UITelemetry is a helper JSM used to record UI specific telemetry events.
+ *
+ * It implements nsIUITelemetryObserver, defined in nsIAndroidBridge.idl.
+ */
+this.UITelemetry = {
+ _enabled: undefined,
+ _activeSessions: {},
+ _measurements: [],
+
+ // Lazily decide whether telemetry is enabled.
+ get enabled() {
+ if (this._enabled !== undefined) {
+ return this._enabled;
+ }
+
+ // Set an observer to watch for changes at runtime.
+ Services.prefs.addObserver(PREF_ENABLED, this, false);
+ Services.obs.addObserver(this, "profile-before-change", false);
+
+ // Pick up the current value.
+ try {
+ this._enabled = Services.prefs.getBoolPref(PREF_ENABLED);
+ } catch (e) {
+ this._enabled = false;
+ }
+
+ return this._enabled;
+ },
+
+ observe: function(aSubject, aTopic, aData) {
+ if (aTopic == "profile-before-change") {
+ Services.obs.removeObserver(this, "profile-before-change");
+ Services.prefs.removeObserver(PREF_ENABLED, this);
+ this._enabled = undefined;
+ return;
+ }
+
+ if (aTopic == "nsPref:changed") {
+ switch (aData) {
+ case PREF_ENABLED:
+ let on = Services.prefs.getBoolPref(PREF_ENABLED);
+ this._enabled = on;
+
+ // Wipe ourselves if we were just disabled.
+ if (!on) {
+ this._activeSessions = {};
+ this._measurements = [];
+ }
+ break;
+ }
+ }
+ },
+
+ /**
+ * This exists exclusively for testing -- our events are not intended to
+ * be retrieved via an XPCOM interface.
+ */
+ get wrappedJSObject() {
+ return this;
+ },
+
+ /**
+ * Holds the functions that provide UITelemetry's simple
+ * measurements. Those functions are mapped to unique names,
+ * and should be registered with addSimpleMeasureFunction.
+ */
+ _simpleMeasureFunctions: {},
+
+ /**
+ * A hack to generate the relative timestamp from start when we don't have
+ * access to the Java timer.
+ * XXX: Bug 1007647 - Support realtime and/or uptime in JavaScript.
+ */
+ uptimeMillis: function() {
+ return Date.now() - Services.startup.getStartupInfo().process;
+ },
+
+ /**
+ * Adds a single event described by a timestamp, an action, and the calling
+ * method.
+ *
+ * Optionally provide a string 'extras', which will be recorded as part of
+ * the event.
+ *
+ * All extant sessions will be recorded by name for each event.
+ */
+ addEvent: function(aAction, aMethod, aTimestamp, aExtras) {
+ if (!this.enabled) {
+ return;
+ }
+
+ let sessions = Object.keys(this._activeSessions);
+ let aEvent = {
+ type: "event",
+ action: aAction,
+ method: aMethod,
+ sessions: sessions,
+ timestamp: (aTimestamp == undefined) ? this.uptimeMillis() : aTimestamp,
+ };
+
+ if (aExtras) {
+ aEvent.extras = aExtras;
+ }
+
+ this._recordEvent(aEvent);
+ },
+
+ /**
+ * Begins tracking a session by storing a timestamp for session start.
+ */
+ startSession: function(aName, aTimestamp) {
+ if (!this.enabled) {
+ return;
+ }
+
+ if (this._activeSessions[aName]) {
+ // Do not overwrite a previous event start if it already exists.
+ return;
+ }
+ this._activeSessions[aName] = (aTimestamp == undefined) ? this.uptimeMillis() : aTimestamp;
+ },
+
+ /**
+ * Tracks the end of a session with a timestamp.
+ */
+ stopSession: function(aName, aReason, aTimestamp) {
+ if (!this.enabled) {
+ return;
+ }
+
+ let sessionStart = this._activeSessions[aName];
+ delete this._activeSessions[aName];
+
+ if (!sessionStart) {
+ return;
+ }
+
+ let aEvent = {
+ type: "session",
+ name: aName,
+ reason: aReason,
+ start: sessionStart,
+ end: (aTimestamp == undefined) ? this.uptimeMillis() : aTimestamp,
+ };
+
+ this._recordEvent(aEvent);
+ },
+
+ _recordEvent: function(aEvent) {
+ this._measurements.push(aEvent);
+ },
+
+ /**
+ * Called by TelemetrySession to populate the simple measurement
+ * blob. This function will iterate over all functions added
+ * via addSimpleMeasureFunction and return an object with the
+ * results of those functions.
+ */
+ getSimpleMeasures: function() {
+ if (!this.enabled) {
+ return {};
+ }
+
+ let result = {};
+ for (let name in this._simpleMeasureFunctions) {
+ result[name] = this._simpleMeasureFunctions[name]();
+ }
+ return result;
+ },
+
+ /**
+ * Allows the caller to register functions that will get called
+ * for simple measures during a Telemetry ping. aName is a unique
+ * identifier used as they key for the simple measurement in the
+ * object that getSimpleMeasures returns.
+ *
+ * This function throws an exception if aName already has a function
+ * registered for it.
+ */
+ addSimpleMeasureFunction: function(aName, aFunction) {
+ if (!this.enabled) {
+ return;
+ }
+
+ if (aName in this._simpleMeasureFunctions) {
+ throw new Error("A simple measurement function is already registered for " + aName);
+ }
+
+ if (!aFunction || typeof aFunction !== 'function') {
+ throw new Error("addSimpleMeasureFunction called with non-function argument.");
+ }
+
+ this._simpleMeasureFunctions[aName] = aFunction;
+ },
+
+ removeSimpleMeasureFunction: function(aName) {
+ delete this._simpleMeasureFunctions[aName];
+ },
+
+ /**
+ * Called by TelemetrySession to populate the UI measurement
+ * blob.
+ *
+ * Optionally clears the set of measurements based on aClear.
+ */
+ getUIMeasurements: function(aClear) {
+ if (!this.enabled) {
+ return [];
+ }
+
+ let measurements = this._measurements.slice();
+ if (aClear) {
+ this._measurements = [];
+ }
+ return measurements;
+ }
+};
diff --git a/toolkit/components/telemetry/WebrtcTelemetry.cpp b/toolkit/components/telemetry/WebrtcTelemetry.cpp
new file mode 100644
index 000000000..29c22be23
--- /dev/null
+++ b/toolkit/components/telemetry/WebrtcTelemetry.cpp
@@ -0,0 +1,112 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+
+#include "Telemetry.h"
+#include "TelemetryCommon.h"
+#include "WebrtcTelemetry.h"
+#include "jsapi.h"
+#include "nsPrintfCString.h"
+#include "nsTHashtable.h"
+
+using mozilla::Telemetry::Common::AutoHashtable;
+
+void
+WebrtcTelemetry::RecordIceCandidateMask(const uint32_t iceCandidateBitmask,
+ const bool success)
+{
+ WebrtcIceCandidateType *entry = mWebrtcIceCandidates.GetEntry(iceCandidateBitmask);
+ if (!entry) {
+ entry = mWebrtcIceCandidates.PutEntry(iceCandidateBitmask);
+ if (MOZ_UNLIKELY(!entry))
+ return;
+ }
+
+ if (success) {
+ entry->mData.webrtc.successCount++;
+ } else {
+ entry->mData.webrtc.failureCount++;
+ }
+}
+
+bool
+ReflectIceEntry(const WebrtcTelemetry::WebrtcIceCandidateType *entry,
+ const WebrtcTelemetry::WebrtcIceCandidateStats *stat, JSContext *cx,
+ JS::Handle<JSObject*> obj)
+{
+ if ((stat->successCount == 0) && (stat->failureCount == 0))
+ return true;
+
+ const uint32_t &bitmask = entry->GetKey();
+
+ JS::Rooted<JSObject*> statsObj(cx, JS_NewPlainObject(cx));
+ if (!statsObj)
+ return false;
+ if (!JS_DefineProperty(cx, obj,
+ nsPrintfCString("%lu", bitmask).BeginReading(),
+ statsObj, JSPROP_ENUMERATE)) {
+ return false;
+ }
+ if (stat->successCount && !JS_DefineProperty(cx, statsObj, "successCount",
+ stat->successCount,
+ JSPROP_ENUMERATE)) {
+ return false;
+ }
+ if (stat->failureCount && !JS_DefineProperty(cx, statsObj, "failureCount",
+ stat->failureCount,
+ JSPROP_ENUMERATE)) {
+ return false;
+ }
+ return true;
+}
+
+bool
+ReflectIceWebrtc(WebrtcTelemetry::WebrtcIceCandidateType *entry, JSContext *cx,
+ JS::Handle<JSObject*> obj)
+{
+ return ReflectIceEntry(entry, &entry->mData.webrtc, cx, obj);
+}
+
+bool
+WebrtcTelemetry::AddIceInfo(JSContext *cx, JS::Handle<JSObject*> iceObj)
+{
+ JS::Rooted<JSObject*> statsObj(cx, JS_NewPlainObject(cx));
+ if (!statsObj)
+ return false;
+
+ if (!mWebrtcIceCandidates.ReflectIntoJS(ReflectIceWebrtc, cx, statsObj)) {
+ return false;
+ }
+
+ return JS_DefineProperty(cx, iceObj, "webrtc",
+ statsObj, JSPROP_ENUMERATE);
+}
+
+bool
+WebrtcTelemetry::GetWebrtcStats(JSContext *cx, JS::MutableHandle<JS::Value> ret)
+{
+ JS::Rooted<JSObject*> root_obj(cx, JS_NewPlainObject(cx));
+ if (!root_obj)
+ return false;
+ ret.setObject(*root_obj);
+
+ JS::Rooted<JSObject*> ice_obj(cx, JS_NewPlainObject(cx));
+ if (!ice_obj)
+ return false;
+ JS_DefineProperty(cx, root_obj, "IceCandidatesStats", ice_obj,
+ JSPROP_ENUMERATE);
+
+ if (!AddIceInfo(cx, ice_obj))
+ return false;
+
+ return true;
+}
+
+size_t
+WebrtcTelemetry::SizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf) const
+{
+ return mWebrtcIceCandidates.ShallowSizeOfExcludingThis(aMallocSizeOf);
+}
diff --git a/toolkit/components/telemetry/WebrtcTelemetry.h b/toolkit/components/telemetry/WebrtcTelemetry.h
new file mode 100644
index 000000000..ed87c7107
--- /dev/null
+++ b/toolkit/components/telemetry/WebrtcTelemetry.h
@@ -0,0 +1,43 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef WebrtcTelemetry_h__
+#define WebrtcTelemetry_h__
+
+#include "nsBaseHashtable.h"
+#include "nsHashKeys.h"
+#include "TelemetryCommon.h"
+
+class WebrtcTelemetry {
+public:
+ struct WebrtcIceCandidateStats {
+ uint32_t successCount;
+ uint32_t failureCount;
+ WebrtcIceCandidateStats() :
+ successCount(0),
+ failureCount(0)
+ {
+ }
+ };
+ struct WebrtcIceStatsCategory {
+ struct WebrtcIceCandidateStats webrtc;
+ };
+ typedef nsBaseHashtableET<nsUint32HashKey, WebrtcIceStatsCategory> WebrtcIceCandidateType;
+
+ void RecordIceCandidateMask(const uint32_t iceCandidateBitmask, bool success);
+
+ bool GetWebrtcStats(JSContext *cx, JS::MutableHandle<JS::Value> ret);
+
+ size_t SizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf) const;
+
+private:
+
+ bool AddIceInfo(JSContext *cx, JS::Handle<JSObject*> rootObj);
+
+ mozilla::Telemetry::Common::AutoHashtable<WebrtcIceCandidateType> mWebrtcIceCandidates;
+};
+
+#endif // WebrtcTelemetry_h__
diff --git a/toolkit/components/telemetry/datareporting-prefs.js b/toolkit/components/telemetry/datareporting-prefs.js
new file mode 100644
index 000000000..6a61f1853
--- /dev/null
+++ b/toolkit/components/telemetry/datareporting-prefs.js
@@ -0,0 +1,12 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+pref("datareporting.policy.dataSubmissionEnabled", true);
+pref("datareporting.policy.dataSubmissionPolicyNotifiedTime", "0");
+pref("datareporting.policy.dataSubmissionPolicyAcceptedVersion", 0);
+pref("datareporting.policy.dataSubmissionPolicyBypassNotification", false);
+pref("datareporting.policy.currentPolicyVersion", 2);
+pref("datareporting.policy.minimumPolicyVersion", 1);
+pref("datareporting.policy.minimumPolicyVersion.channel-beta", 2);
+pref("datareporting.policy.firstRunURL", "");
diff --git a/toolkit/components/telemetry/docs/collection/custom-pings.rst b/toolkit/components/telemetry/docs/collection/custom-pings.rst
new file mode 100644
index 000000000..daad87bfe
--- /dev/null
+++ b/toolkit/components/telemetry/docs/collection/custom-pings.rst
@@ -0,0 +1,74 @@
+=======================
+Submitting custom pings
+=======================
+
+Custom pings can be submitted from JavaScript using:
+
+.. code-block:: js
+
+ TelemetryController.submitExternalPing(type, payload, options)
+
+- ``type`` - a ``string`` that is the type of the ping, limited to ``/^[a-z0-9][a-z0-9-]+[a-z0-9]$/i``.
+- ``payload`` - the actual payload data for the ping, has to be a JSON style object.
+- ``options`` - optional, an object containing additional options:
+ - ``addClientId``- whether to add the client id to the ping, defaults to ``false``
+ - ``addEnvironment`` - whether to add the environment data to the ping, defaults to ``false``
+ - ``overrideEnvironment`` - a JSON style object that overrides the environment data
+
+``TelemetryController`` will assemble a ping with the passed payload and the specified options.
+That ping will be archived locally for use with Shield and inspection in ``about:telemetry``.
+If the preferences allow upload of Telemetry pings, the ping will be uploaded at the next opportunity (this is subject to throttling, retry-on-failure, etc.).
+
+Submission constraints
+----------------------
+
+When submitting pings on shutdown, they should not be submitted after Telemetry shutdown.
+Pings should be submitted at the latest within:
+
+- the `observer notification <https://developer.mozilla.org/de/docs/Observer_Notifications#Application_shutdown>`_ ``"profile-before-change"``
+- the :ref:`AsyncShutdown phase <AsyncShutdown_phases>` ``sendTelemetry``
+
+There are other constraints that can lead to a ping submission getting dropped:
+
+- invalid ping type strings
+- invalid payload types: E.g. strings instead of objects.
+- oversized payloads: We currently only drop pings >1MB, but targetting sizes of <=10KB is recommended.
+
+Tools
+=====
+
+Helpful tools for designing new pings include:
+
+- `gzipServer <https://github.com/mozilla/gzipServer>`_ - a Python script that can run locally and receives and saves Telemetry pings. Making Firefox send to it allows inspecting outgoing pings easily.
+- ``about:telemetry`` - allows inspecting submitted pings from the local archive, including all custom ones.
+
+Designing custom pings
+======================
+
+In general, creating a new custom ping means you don't benefit automatically from the existing tooling. Further work is needed to make data show up in re:dash or other analysis tools.
+
+In addition to the `data collection review <https://wiki.mozilla.org/Firefox/Data_Collection>`_, questions to guide a new pings design are:
+
+- Submission interval & triggers:
+ - What events trigger ping submission?
+ - What interval is the ping submitted in?
+ - Is there a throttling mechanism?
+ - What is the desired latency? (submitting "at least daily" still leads to certain latency tails)
+ - Are pings submitted on a clock schedule? Or based on "time since session start", "time since last ping" etc.? (I.e. will we get sharp spikes in submission volume?)
+- Size and volume:
+ - What’s the size of the submitted payload?
+ - What's the full ping size including metadata in the pipeline?
+ - What’s the target population?
+ - What's the overall estimated volume?
+- Dataset:
+ - Is it opt-out?
+ - Does it need to be opt-out?
+ - Does it need to be in a separate ping? (why can’t the data live in probes?)
+- Privacy:
+ - Is there risk to leak PII?
+ - How is that risk mitigated?
+- Data contents:
+ - Does the submitted data answer the posed product questions?
+ - Does the shape of the data allow to answer the questions efficiently?
+ - Is the data limited to whats needed to answer the questions?
+ - Does the data use common formats? (i.e. can we re-use tooling or analysis know-how)
diff --git a/toolkit/components/telemetry/docs/collection/histograms.rst b/toolkit/components/telemetry/docs/collection/histograms.rst
new file mode 100644
index 000000000..8d0233dbf
--- /dev/null
+++ b/toolkit/components/telemetry/docs/collection/histograms.rst
@@ -0,0 +1,5 @@
+==========
+Histograms
+==========
+
+Recording into histograms is currently documented in `a MDN article <https://developer.mozilla.org/en-US/docs/Mozilla/Performance/Adding_a_new_Telemetry_probe>`_.
diff --git a/toolkit/components/telemetry/docs/collection/index.rst b/toolkit/components/telemetry/docs/collection/index.rst
new file mode 100644
index 000000000..e4084e62a
--- /dev/null
+++ b/toolkit/components/telemetry/docs/collection/index.rst
@@ -0,0 +1,35 @@
+===============
+Data collection
+===============
+
+There are different APIs and formats to collect data in Firefox, all suiting different use cases.
+
+In general, we aim to submit data in a common format where possible. This has several advantages; from common code and tooling to sharing analysis know-how.
+
+In cases where this isn't possible and more flexibility is needed, we can submit custom pings or consider adding different data formats to existing pings.
+
+*Note:* Every new data collection must go through a `data collection review <https://wiki.mozilla.org/Firefox/Data_Collection>`_.
+
+The current data collection possibilities include:
+
+* :doc:`scalars` allow recording of a single value (string, boolean, a number)
+* :doc:`histograms` can efficiently record multiple data points
+* ``environment`` data records information about the system and settings a session occurs in
+* ``TelemetryLog`` allows collecting ordered event entries (note: this does not have supporting analysis tools)
+* :doc:`measuring elapsed time <measuring-time>`
+* :doc:`custom pings <custom-pings>`
+
+.. toctree::
+ :maxdepth: 2
+ :titlesonly:
+ :hidden:
+ :glob:
+
+ scalars
+ histograms
+ measuring-time
+ custom-pings
+
+Browser Usage Telemetry
+~~~~~~~~~~~~~~~~~~~~~~~
+For more information, see :ref:`browserusagetelemetry`.
diff --git a/toolkit/components/telemetry/docs/collection/measuring-time.rst b/toolkit/components/telemetry/docs/collection/measuring-time.rst
new file mode 100644
index 000000000..918c8a85a
--- /dev/null
+++ b/toolkit/components/telemetry/docs/collection/measuring-time.rst
@@ -0,0 +1,74 @@
+======================
+Measuring elapsed time
+======================
+
+To make it easier to measure how long operations take, we have helpers for both JavaScript and C++.
+These helpers record the elapsed time into histograms, so you have to create suitable histograms for them first.
+
+From JavaScript
+===============
+JavaScript can measure elapsed time using `TelemetryStopwatch.jsm <https://dxr.mozilla.org/mozilla-central/source/toolkit/components/telemetry/TelemetryStopwatch.jsm>`_.
+
+``TelemetryStopwatch`` is a helper that simplifies recording elapsed time (in milliseconds) into histograms (plain or keyed).
+
+API:
+
+.. code-block:: js
+
+ TelemetryStopwatch = {
+ // Start, cancel & finish recording elapsed time into a histogram.
+ // |aObject| is optional. If specificied, the timer is associated with this
+ // object, so multiple time measurements can be done concurrently.
+ start(histogramId, aObject);
+ cancel(histogramId, aObject);
+ finish(histogramId, aObject);
+ // Start, cancel & finished recording elapsed time into a keyed histogram.
+ // |key| specificies the key to record into.
+ // |aObject| is optional and used as above.
+ startKeyed(histogramId, key, aObject);
+ cancelKeyed(histogramId, key, aObject);
+ finishKeyed(histogramId, key, aObject);
+ };
+
+Example:
+
+.. code-block:: js
+
+ TelemetryStopwatch.start("SAMPLE_FILE_LOAD_TIME_MS");
+ // ... start loading file.
+ if (failedToOpenFile) {
+ // Cancel this if the operation failed early etc.
+ TelemetryStopwatch.cancel("SAMPLE_FILE_LOAD_TIME_MS");
+ return;
+ }
+ // ... do more work.
+ TelemetryStopwatch.finish("SAMPLE_FILE_LOAD_TIME_MS");
+
+From C++
+========
+
+API:
+
+.. code-block:: cpp
+
+ // This helper class is the preferred way to record elapsed time.
+ template<ID id, TimerResolution res = MilliSecond>
+ class AutoTimer {
+ // Record into a plain histogram.
+ explicit AutoTimer(TimeStamp aStart = TimeStamp::Now());
+ // Record into a keyed histogram, with key |aKey|.
+ explicit AutoTimer(const nsCString& aKey,
+ TimeStamp aStart = TimeStamp::Now());
+ };
+
+ void AccumulateTimeDelta(ID id, TimeStamp start, TimeStamp end = TimeStamp::Now());
+
+Example:
+
+.. code-block:: cpp
+
+ {
+ Telemetry::AutoTimer<Telemetry::FIND_PLUGINS> telemetry;
+ // ... scan disk for plugins.
+ }
+ // When leaving the scope, AutoTimers destructor will record the time that passed.
diff --git a/toolkit/components/telemetry/docs/collection/scalars.rst b/toolkit/components/telemetry/docs/collection/scalars.rst
new file mode 100644
index 000000000..2c48601a4
--- /dev/null
+++ b/toolkit/components/telemetry/docs/collection/scalars.rst
@@ -0,0 +1,140 @@
+=======
+Scalars
+=======
+
+Historically we started to overload our histogram mechanism to also collect scalar data,
+such as flag values, counts, labels and others.
+The scalar measurement types are the suggested way to collect that kind of scalar data.
+We currently only support recording of scalars from the parent process.
+The serialized scalar data is submitted with the :doc:`main pings <../data/main-ping>`.
+
+The API
+=======
+Scalar probes can be managed either through the `nsITelemetry interface <https://dxr.mozilla.org/mozilla-central/source/toolkit/components/telemetry/nsITelemetry.idl>`_
+or the `C++ API <https://dxr.mozilla.org/mozilla-central/source/toolkit/components/telemetry/Telemetry.h>`_.
+
+JS API
+------
+Probes in privileged JavaScript code can use the following functions to manipulate scalars:
+
+.. code-block:: js
+
+ Services.telemetry.scalarAdd(aName, aValue);
+ Services.telemetry.scalarSet(aName, aValue);
+ Services.telemetry.scalarSetMaximum(aName, aValue);
+
+ Services.telemetry.keyedScalarAdd(aName, aKey, aValue);
+ Services.telemetry.keyedScalarSet(aName, aKey, aValue);
+ Services.telemetry.keyedScalarSetMaximum(aName, aKey, aValue);
+
+These functions can throw if, for example, an operation is performed on a scalar type that doesn't support it
+(e.g. calling scalarSetMaximum on a scalar of the string kind). Please look at the `code documentation <https://dxr.mozilla.org/mozilla-central/search?q=regexp%3ATelemetryScalar%3A%3A(Set%7CAdd)+file%3ATelemetryScalar.cpp&redirect=false>`_ for
+additional information.
+
+C++ API
+-------
+Probes in native code can use the more convenient helper functions declared in `Telemetry.h <https://dxr.mozilla.org/mozilla-central/source/toolkit/components/telemetry/Telemetry.h>`_:
+
+.. code-block:: cpp
+
+ void ScalarAdd(mozilla::Telemetry::ScalarID aId, uint32_t aValue);
+ void ScalarSet(mozilla::Telemetry::ScalarID aId, uint32_t aValue);
+ void ScalarSet(mozilla::Telemetry::ScalarID aId, const nsAString& aValue);
+ void ScalarSet(mozilla::Telemetry::ScalarID aId, bool aValue);
+ void ScalarSetMaximum(mozilla::Telemetry::ScalarID aId, uint32_t aValue);
+
+ void ScalarAdd(mozilla::Telemetry::ScalarID aId, const nsAString& aKey, uint32_t aValue);
+ void ScalarSet(mozilla::Telemetry::ScalarID aId, const nsAString& aKey, uint32_t aValue);
+ void ScalarSet(mozilla::Telemetry::ScalarID aId, const nsAString& aKey, bool aValue);
+ void ScalarSetMaximum(mozilla::Telemetry::ScalarID aId, const nsAString& aKey, uint32_t aValue);
+
+The YAML definition file
+========================
+Scalar probes are required to be registered, both for validation and transparency reasons,
+in the `Scalars.yaml <https://dxr.mozilla.org/mozilla-central/source/toolkit/components/telemetry/Scalars.yaml>`_
+definition file.
+
+The probes in the definition file are represented in a fixed-depth, two-level structure:
+
+.. code-block:: yaml
+
+ # The following is a group.
+ a.group.hierarchy:
+ a_probe_name:
+ kind: uint
+ ...
+ another_probe:
+ kind: string
+ ...
+ ...
+ group2:
+ probe:
+ kind: int
+ ...
+
+Group and probe names need to follow a few rules:
+
+- they cannot exceed 40 characters each;
+- group names must be alpha-numeric + ``.``, with no leading/trailing digit or ``.``;
+- probe names must be alpha-numeric + ``_``, with no leading/trailing digit or ``_``.
+
+A probe can be defined as follows:
+
+.. code-block:: yaml
+
+ a.group.hierarchy:
+ a_scalar:
+ bug_numbers:
+ - 1276190
+ description: A nice one-line description.
+ expires: never
+ kind: uint
+ notification_emails:
+ - telemetry-client-dev@mozilla.com
+
+Required Fields
+---------------
+
+- ``bug_numbers``: A list of unsigned integers representing the number of the bugs the probe was introduced in.
+- ``description``: A single or multi-line string describing what data the probe collects and when it gets collected.
+- ``expires``: The version number in which the scalar expires, e.g. "30"; a version number of type "N" and "N.0" is automatically converted to "N.0a1" in order to expire the scalar also in the development channels. A telemetry probe acting on an expired scalar will print a warning into the browser console. For scalars that never expire the value ``never`` can be used.
+- ``kind``: A string representing the scalar type. Allowed values are ``uint``, ``string`` and ``boolean``.
+- ``notification_emails``: A list of email addresses to notify with alerts of expiring probes. More importantly, these are used by the data steward to verify that the probe is still useful.
+
+Optional Fields
+---------------
+
+- ``cpp_guard``: A string that gets inserted as an ``#ifdef`` directive around the automatically generated C++ declaration. This is typically used for platform-specific scalars, e.g. ``ANDROID``.
+- ``release_channel_collection``: This can be either ``opt-in`` (default) or ``opt-out``. With the former the scalar is submitted by default on pre-release channels; on the release channel only if the user opted into additional data collection. With the latter the scalar is submitted by default on release and pre-release channels, unless the user opted out.
+- ``keyed``: A boolean that determines whether this is a keyed scalar. It defaults to ``False``.
+
+String type restrictions
+------------------------
+To prevent abuses, the content of a string scalar is limited to 50 characters in length. Trying
+to set a longer string will result in an error and no string being set.
+
+Keyed Scalars
+-------------
+Keyed scalars are collections of one of the available scalar types, indexed by a string key that can contain UTF8 characters and cannot be longer than 70 characters. Keyed scalars can contain up to 100 keys. This scalar type is for example useful when you want to break down certain counts by a name, like how often searches happen with which search engine.
+
+Keyed scalars should only be used if the set of keys are not known beforehand. If the keys are from a known set of strings, other options are preferred if suitable, like categorical histograms or splitting measurements up into separate scalars.
+
+The processor scripts
+=====================
+The scalar definition file is processed and checked for correctness at compile time. If it
+conforms to the specification, the processor scripts generate two C++ headers files, included
+by the Telemetry C++ core.
+
+gen-scalar-data.py
+------------------
+This script is called by the build system to generate the ``TelemetryScalarData.h`` C++ header
+file out of the scalar definitions.
+This header file contains an array holding the scalar names and version strings, in addition
+to an array of ``ScalarInfo`` structures representing all the scalars.
+
+gen-scalar-enum.py
+------------------
+This script is called by the build system to generate the ``TelemetryScalarEnums.h`` C++ header
+file out of the scalar definitions.
+This header file contains an enum class with all the scalar identifiers used to access them
+from code through the C++ API.
diff --git a/toolkit/components/telemetry/docs/concepts/archiving.rst b/toolkit/components/telemetry/docs/concepts/archiving.rst
new file mode 100644
index 000000000..a2c57de43
--- /dev/null
+++ b/toolkit/components/telemetry/docs/concepts/archiving.rst
@@ -0,0 +1,12 @@
+=========
+Archiving
+=========
+
+When archiving is enabled through the relevant pref (``toolkit.telemetry.archive.enabled``), pings submitted to ``TelemetryController`` are also stored locally in the user profile directory, in ``<profile-dir>/datareporting/archived``.
+
+To allow for cheaper lookup of archived pings, storage follows a specific naming scheme for both the directory and the ping file name: `<YYYY-MM>/<timestamp>.<UUID>.<type>.jsonlz4`.
+
+* ``<YYYY-MM>`` - The subdirectory name, generated from the ping creation date.
+* ``<timestamp>`` - Timestamp of the ping creation date.
+* ``<UUID>`` - The ping identifier.
+* ``<type>`` - The ping type.
diff --git a/toolkit/components/telemetry/docs/concepts/crashes.rst b/toolkit/components/telemetry/docs/concepts/crashes.rst
new file mode 100644
index 000000000..c9f69a23b
--- /dev/null
+++ b/toolkit/components/telemetry/docs/concepts/crashes.rst
@@ -0,0 +1,23 @@
+=======
+Crashes
+=======
+
+There are many different kinds of crashes for Firefox, there is not a single system used to record all of them.
+
+Main process crashes
+====================
+
+If the Firefox main process dies, that should be recorded as an aborted session. We would submit a :doc:`main ping <../data/main-ping>` with the reason ``aborted-session``.
+If we have a crash dump for that crash, we should also submit a :doc:`crash ping <../data/crash-ping>`.
+
+The ``aborted-session`` information is first written to disk 60 seconds after startup, any earlier crashes will not trigger an ``aborted-session`` ping.
+Also, the ``aborted-session`` is updated at least every 5 minutes, so it may lag behind the last session state.
+
+Crashes during startup should be recorded in the next sessions main ping in the ``STARTUP_CRASH_DETECTED`` histogram.
+
+Child process crashes
+=====================
+
+If a Firefox plugin, content or gmplugin process dies unexpectedly, this is recorded in the main pings ``SUBPROCESS_ABNORMAL_ABORT`` keyed histogram.
+
+If we catch a crash report for this, then additionally the ``SUBPROCESS_CRASHES_WITH_DUMP`` keyed histogram is incremented.
diff --git a/toolkit/components/telemetry/docs/concepts/index.rst b/toolkit/components/telemetry/docs/concepts/index.rst
new file mode 100644
index 000000000..a49466f8d
--- /dev/null
+++ b/toolkit/components/telemetry/docs/concepts/index.rst
@@ -0,0 +1,23 @@
+========
+Concepts
+========
+
+There are common concepts used throughout Telemetry:
+
+* :doc:`pings <pings>` - the packets we use to submit data
+* :doc:`sessions & subsessions <sessions>` - how we slice a users' time in the browser
+* *measurements* - how we :doc:`collect data <../collection/index>`
+* *opt-in* & *opt-out* - the different sets of data we collect
+* :doc:`submission <submission>` - how we send data to the servers
+* :doc:`archiving <archiving>` - retaining ping data locally
+* :doc:`crashes <crashes>` - the different data crashes generate
+
+.. toctree::
+ :maxdepth: 2
+ :titlesonly:
+ :glob:
+ :hidden:
+
+ pings
+ crashes
+ *
diff --git a/toolkit/components/telemetry/docs/concepts/pings.rst b/toolkit/components/telemetry/docs/concepts/pings.rst
new file mode 100644
index 000000000..db7371b32
--- /dev/null
+++ b/toolkit/components/telemetry/docs/concepts/pings.rst
@@ -0,0 +1,32 @@
+.. _telemetry_pings:
+
+=====================
+Telemetry pings
+=====================
+
+A *Telemetry ping* is the data that we send to Mozillas Telemetry servers.
+
+That data is stored as a JSON object client-side and contains common information to all pings and a payload specific to a certain *ping types*.
+
+The top-level structure is defined by the :doc:`common ping format <../data/common-ping>` format.
+It contains:
+
+* some basic information shared between different ping types
+* the :doc:`environment data <../data/environment>` (optional)
+* the data specific to the *ping type*, the *payload*.
+
+Ping types
+==========
+
+We send Telemetry with different ping types. The :doc:`main <../data/main-ping>` ping is the ping that contains the bulk of the Telemetry measurements for Firefox. For more specific use-cases, we send other ping types.
+
+Pings sent from code that ships with Firefox are listed in the :doc:`data documentation <../data/index>`.
+
+Important examples are:
+
+* :doc:`main <../data/main-ping>` - contains the information collected by Telemetry (Histograms, hang stacks, ...)
+* :doc:`saved-session <../data/main-ping>` - has the same format as a main ping, but it contains the *"classic"* Telemetry payload with measurements covering the whole browser session. This is only a separate type to make storage of saved-session easier server-side. This is temporary and will be removed soon.
+* :doc:`crash <../data/crash-ping>` - a ping that is captured and sent after Firefox crashes.
+* ``activation`` - *planned* - sent right after installation or profile creation
+* ``upgrade`` - *planned* - sent right after an upgrade
+* :doc:`deletion <../data/deletion-ping>` - sent when FHR upload is disabled, requesting deletion of the data associated with this user
diff --git a/toolkit/components/telemetry/docs/concepts/sessions.rst b/toolkit/components/telemetry/docs/concepts/sessions.rst
new file mode 100644
index 000000000..088556978
--- /dev/null
+++ b/toolkit/components/telemetry/docs/concepts/sessions.rst
@@ -0,0 +1,40 @@
+========
+Sessions
+========
+
+A *session* is the time from when Firefox starts until it shut down.
+A session can be very long-running. E.g. for Mac users that are used to always put their laptops into sleep-mode, Firefox may run for weeks.
+We slice the sessions into smaller logical units called *subsessions*.
+
+Subsessions
+===========
+
+The first subsession starts when the browser starts. After that, we split the subsession for different reasons:
+
+* ``daily``, when crossing local midnight. This keeps latency acceptable by triggering a ping at least daily for most active users.
+* ``environment-change``, when a change to the *environment* happens. This happens for important changes to the Firefox settings and when addons activate or deactivate.
+
+On a subsession split, a :doc:`main ping <../data/main-ping>` with that reason will be submitted. We store the reason in the pings payload, to see what triggered it.
+
+A session always ends with a subsession with one of two reason:
+
+* ``shutdown``, when the browser was cleanly shut down. To avoid delaying shutdown, we only save this ping to disk and send it at the next opportunity (typically the next browsing session).
+* ``aborted-session``, when the browser crashed. While Firefox is active, we write the current ``main`` ping data to disk every 5 minutes. If the browser crashes, we find this data on disk on the next start and send it with this reason.
+
+.. image:: subsession_triggers.png
+
+Subsession data
+===============
+
+A subsessions data consists of:
+
+* general information: the date the subsession started, how long it lasted, etc.
+* specific measurements: histogram & scalar data, etc.
+
+This has some advantages:
+
+* Latency - Sending a ping with all the data of a subsession immediately after it ends means we get the data from installs faster. For ``main`` pings, we aim to send a ping at least daily by starting a new subsession at local midnight.
+* Correlation - By starting new subsessions when fundamental settings change (i.e. changes to the *environment*), we can correlate a subsessions data better to those settings.
+
+
+
diff --git a/toolkit/components/telemetry/docs/concepts/submission.rst b/toolkit/components/telemetry/docs/concepts/submission.rst
new file mode 100644
index 000000000..165917d40
--- /dev/null
+++ b/toolkit/components/telemetry/docs/concepts/submission.rst
@@ -0,0 +1,34 @@
+==========
+Submission
+==========
+
+*Note:* The server-side behaviour is documented in the `HTTP Edge Server specification <https://wiki.mozilla.org/CloudServices/DataPipeline/HTTPEdgeServerSpecification>`_.
+
+Pings are submitted via a common API on ``TelemetryController``.
+If a ping fails to successfully submit to the server immediately (e.g. because
+of missing internet connection), Telemetry will store it on disk and retry to
+send it until the maximum ping age is exceeded (14 days).
+
+*Note:* the :doc:`main pings <../data/main-ping>` are kept locally even after successful submission to enable the HealthReport and SelfSupport features. They will be deleted after their retention period of 180 days.
+
+Submission logic
+================
+
+Sending of pending pings starts as soon as the delayed startup is finished. They are sent in batches, newest-first, with up
+to 10 persisted pings per batch plus all unpersisted pings.
+The send logic then waits for each batch to complete.
+
+If it succeeds we trigger the next send of a ping batch. This is delayed as needed to only trigger one batch send per minute.
+
+If ping sending encounters an error that means retrying later, a backoff timeout behavior is
+triggered, exponentially increasing the timeout for the next try from 1 minute up to a limit of 120 minutes.
+Any new ping submissions and "idle-daily" events reset this behavior as a safety mechanism and trigger immediate ping sending.
+
+Status codes
+============
+
+The telemetry server team is working towards `the common services status codes <https://wiki.mozilla.org/CloudServices/DataPipeline/HTTPEdgeServerSpecification#Server_Responses>`_, but for now the following logic is sufficient for Telemetry:
+
+* `2XX` - success, don't resubmit
+* `4XX` - there was some problem with the request - the client should not try to resubmit as it would just receive the same response
+* `5XX` - there was a server-side error, the client should try to resubmit later
diff --git a/toolkit/components/telemetry/docs/concepts/subsession_triggers.png b/toolkit/components/telemetry/docs/concepts/subsession_triggers.png
new file mode 100644
index 000000000..5717b00a9
--- /dev/null
+++ b/toolkit/components/telemetry/docs/concepts/subsession_triggers.png
Binary files differ
diff --git a/toolkit/components/telemetry/docs/data/addons-malware-ping.rst b/toolkit/components/telemetry/docs/data/addons-malware-ping.rst
new file mode 100644
index 000000000..18502d748
--- /dev/null
+++ b/toolkit/components/telemetry/docs/data/addons-malware-ping.rst
@@ -0,0 +1,42 @@
+
+Add-ons malware ping
+====================
+
+This ping is generated by an add-on created by Mozilla and shipped to users on older versions of Firefox (44-46). The ping contains information about the profile that might have been altered by a third party malicious add-on.
+
+Structure:
+
+.. code-block:: js
+
+ {
+ type: "malware-addon-states",
+ ...
+ clientId: <UUID>,
+ environment: { ... },
+ // Common ping data.
+ payload: {
+ // True if the blocklist was disabled at startup time.
+ blocklistDisabled: <bool>,
+ // True if the malicious add-on exists and is enabled. False if it
+ // exists and is disabled or null if the add-on was not found.
+ mainAddonActive: <bool | null>,
+ // A value of the malicious add-on block list state, or null if the
+ // add-on was not found.
+ mainAddonBlocked: <int | null>,
+ // True if a malicious user.js file was found in the profile.
+ foundUserJS: <bool>,
+ // If a malicious secmodd.db file was found the extension ID that the // file contained..
+ secmoddAddon: <string | null>, .
+ // A list of IDs for extensions which were hidden by malicious CSS.
+ hiddenAddons: [
+ <string>,
+ ...
+ ],
+ // A mapping of installed add-on IDs with known malicious
+ // update URL patterns to their exact update URLs.
+ updateURLs: {
+ <extensionID>: <updateURL>,
+ ...
+ }
+ }
+ }
diff --git a/toolkit/components/telemetry/docs/data/common-ping.rst b/toolkit/components/telemetry/docs/data/common-ping.rst
new file mode 100644
index 000000000..445557efd
--- /dev/null
+++ b/toolkit/components/telemetry/docs/data/common-ping.rst
@@ -0,0 +1,42 @@
+
+Common ping format
+==================
+
+This defines the top-level structure of a Telemetry ping.
+It contains basic information shared between different ping types, which enables proper storage and processing of the raw pings server-side.
+
+It also contains optional further information:
+
+* the :doc:`environment data <../data/environment>`, which contains important info to correlate the measurements against
+* the ``clientId``, a UUID identifying a profile and allowing user-oriented correlation of data
+
+*Note:* Both are not submitted with all ping types due to privacy concerns. This and the data it that can be correlated against is inspected under the `data collection policy <https://wiki.mozilla.org/Firefox/Data_Collection>`_.
+
+Finally, the structure also contains the `payload`, which is the specific data submitted for the respective *ping type*.
+
+Structure:
+
+.. code-block:: js
+
+ {
+ type: <string>, // "main", "activation", "deletion", "saved-session", ...
+ id: <UUID>, // a UUID that identifies this ping
+ creationDate: <ISO date>, // the date the ping was generated
+ version: <number>, // the version of the ping format, currently 4
+
+ application: {
+ architecture: <string>, // build architecture, e.g. x86
+ buildId: <string>, // "20141126041045"
+ name: <string>, // "Firefox"
+ version: <string>, // "35.0"
+ displayVersion: <string>, // "35.0b3"
+ vendor: <string>, // "Mozilla"
+ platformVersion: <string>, // "35.0"
+ xpcomAbi: <string>, // e.g. "x86-msvc"
+ channel: <string>, // "beta"
+ },
+
+ clientId: <UUID>, // optional
+ environment: { ... }, // optional, not all pings contain the environment
+ payload: { ... }, // the actual payload data for this ping type
+ }
diff --git a/toolkit/components/telemetry/docs/data/core-ping.rst b/toolkit/components/telemetry/docs/data/core-ping.rst
new file mode 100644
index 000000000..7f38f2f7e
--- /dev/null
+++ b/toolkit/components/telemetry/docs/data/core-ping.rst
@@ -0,0 +1,191 @@
+
+"core" ping
+============
+
+This mobile-specific ping is intended to provide the most critical
+data in a concise format, allowing for frequent uploads.
+
+Since this ping is used to measure retention, it should be sent
+each time the browser is opened.
+
+Submission will be per the Edge server specification::
+
+ /submit/telemetry/docId/docType/appName/appVersion/appUpdateChannel/appBuildID
+
+* ``docId`` is a UUID for deduping
+* ``docType`` is “core”
+* ``appName`` is “Fennec”
+* ``appVersion`` is the version of the application (e.g. "46.0a1")
+* ``appUpdateChannel`` is “release”, “beta”, etc.
+* ``appBuildID`` is the build number
+
+Note: Counts below (e.g. search & usage times) are “since the last
+ping”, not total for the whole application lifetime.
+
+Structure:
+
+.. code-block:: js
+
+ {
+ "v": 7, // ping format version
+ "clientId": <string>, // client id, e.g.
+ // "c641eacf-c30c-4171-b403-f077724e848a"
+ "seq": <positive integer>, // running ping counter, e.g. 3
+ "locale": <string>, // application locale, e.g. "en-US"
+ "os": <string>, // OS name.
+ "osversion": <string>, // OS version.
+ "device": <string>, // Build.MANUFACTURER + " - " + Build.MODEL
+ // where manufacturer is truncated to 12 characters
+ // & model is truncated to 19 characters
+ "arch": <string>, // e.g. "arm", "x86"
+ "profileDate": <pos integer>, // Profile creation date in days since
+ // UNIX epoch.
+ "defaultSearch": <string>, // Identifier of the default search engine,
+ // e.g. "yahoo".
+ "distributionId": <string>, // Distribution identifier (optional)
+ "created": <string>, // date the ping was created
+ // in local time, "yyyy-mm-dd"
+ "tz": <integer>, // timezone offset (in minutes) of the
+ // device when the ping was created
+ "sessions": <integer>, // number of sessions since last upload
+ "durations": <integer>, // combined duration, in seconds, of all
+ // sessions since last upload
+ "searches": <object>, // Optional, object of search use counts in the
+ // format: { "engine.source": <pos integer> }
+ // e.g.: { "yahoo.suggestion": 3, "other.listitem": 1 }
+ "experiments": [<string>, /* … */], // Optional, array of identifiers
+ // for the active experiments
+ }
+
+Field details
+-------------
+
+device
+~~~~~~
+The ``device`` field is filled in with information specified by the hardware
+manufacturer. As such, it could be excessively long and use excessive amounts
+of limited user data. To avoid this, we limit the length of the field. We're
+more likely have collisions for models within a manufacturer (e.g. "Galaxy S5"
+vs. "Galaxy Note") than we are for shortened manufacturer names so we provide
+more characters for the model than the manufacturer.
+
+distributionId
+~~~~~~~~~~~~~~
+The ``distributionId`` contains the distribution ID as specified by
+preferences.json for a given distribution. More information on distributions
+can be found `here <https://wiki.mozilla.org/Mobile/Distribution_Files>`_.
+
+It is optional.
+
+defaultSearch
+~~~~~~~~~~~~~
+On Android, this field may be ``null``. To get the engine, we rely on
+``SearchEngineManager#getDefaultEngine``, which searches in several places in
+order to find the search engine identifier:
+
+* Shared Preferences
+* The distribution (if it exists)
+* The localized default engine
+
+If the identifier could not be retrieved, this field is ``null``. If the
+identifier is retrieved, we attempt to create an instance of the search
+engine from the search plugins (in order):
+
+* In the distribution
+* From the localized plugins shipped with the browser
+* The third-party plugins that are installed in the profile directory
+
+If the plugins fail to create a search engine instance, this field is also
+``null``.
+
+This field can also be ``null`` when a custom search engine is set as the
+default.
+
+sessions & durations
+~~~~~~~~~~~~~~~~~~~~
+On Android, a session is the time when Firefox is focused in the foreground.
+`sessions` tracks the number of sessions since the last upload and
+`durations` is the accumulated duration in seconds of all of these
+sessions. Note that showing a dialog (including a Firefox dialog) will
+take Firefox out of focus & end the current session.
+
+An implementation that records a session when Firefox is completely hidden is
+preferrable (e.g. to avoid the dialog issue above), however, it's more complex
+to implement and so we chose not to, at least for the initial implementation.
+
+profileDate
+~~~~~~~~~~~
+On Android, this value is created at profile creation time and retrieved or,
+for legacy profiles, taken from the package install time (note: this is not the
+same exact metric as profile creation time but we compromised in favor of ease
+of implementation).
+
+Additionally on Android, this field may be ``null`` in the unlikely event that
+all of the following events occur:
+
+#. The times.json file does not exist
+#. The package install date could not be persisted to disk
+
+The reason we don't just return the package install time even if the date could
+not be persisted to disk is to ensure the value doesn't change once we start
+sending it: we only want to send consistent values.
+
+searches
+~~~~~~~~
+In the case a search engine is added by a user, the engine identifier "other" is used, e.g. "other.<source>".
+
+Sources in Android are based on the existing UI telemetry values and are as
+follows:
+
+* actionbar: the user types in the url bar and hits enter to use the default
+ search engine
+* listitem: the user selects a search engine from the list of secondary search
+ engines at the bottom of the screen
+* suggestion: the user clicks on a search suggestion or, in the case that
+ suggestions are disabled, the row corresponding with the main engine
+
+Other parameters
+----------------
+
+HTTP "Date" header
+~~~~~~~~~~~~~~~~~~
+This header is used to track the submission date of the core ping in the format
+specified by
+`rfc 2616 sec 14.18 <https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.18>`_,
+et al (e.g. "Tue, 01 Feb 2011 14:00:00 GMT").
+
+
+Version history
+---------------
+* v7: added ``sessionCount`` & ``sessionDuration``
+* v6: added ``searches``
+* v5: added ``created`` & ``tz``
+* v4: ``profileDate`` will return package install time when times.json is not available
+* v3: added ``defaultSearch``
+* v2: added ``distributionId``
+* v1: initial version
+
+Notes
+~~~~~
+
+* ``distributionId`` (v2) actually landed after ``profileDate`` (v4) but was
+ uplifted to 46, whereas ``profileDate`` landed on 47. The version numbers in
+ code were updated to be increasing (bug 1264492) and the version history docs
+ rearranged accordingly.
+
+Android implementation notes
+----------------------------
+On Android, the uploader has a high probability of delivering the complete data
+for a given client but not a 100% probability. This was a conscious decision to
+keep the code simple. The cases where we can lose data:
+
+* Resetting the field measurements (including incrementing the sequence number)
+ and storing a ping for upload are not atomic. Android can kill our process
+ for memory pressure in between these distinct operations so we can just lose
+ a ping's worth of data. That sequence number will be missing on the server.
+* If we exceed some number of pings on disk that have not yet been uploaded,
+ we remove old pings to save storage space. For those pings, we will lose
+ their data and their sequence numbers will be missing on the server.
+
+Note: we never expect to drop data without also dropping a sequence number so
+we are able to determine when data loss occurs.
diff --git a/toolkit/components/telemetry/docs/data/crash-ping.rst b/toolkit/components/telemetry/docs/data/crash-ping.rst
new file mode 100644
index 000000000..3cdbc6030
--- /dev/null
+++ b/toolkit/components/telemetry/docs/data/crash-ping.rst
@@ -0,0 +1,144 @@
+
+"crash" ping
+============
+
+This ping is captured after the main Firefox process crashes, whether or not the crash report is submitted to crash-stats.mozilla.org. It includes non-identifying metadata about the crash.
+
+The environment block that is sent with this ping varies: if Firefox was running long enough to record the environment block before the crash, then the environment at the time of the crash will be recorded and ``hasCrashEnvironment`` will be true. If Firefox crashed before the environment was recorded, ``hasCrashEnvironment`` will be false and the recorded environment will be the environment at time of submission.
+
+The client ID is submitted with this ping.
+
+Structure:
+
+.. code-block:: js
+
+ {
+ version: 1,
+ type: "crash",
+ ... common ping data
+ clientId: <UUID>,
+ environment: { ... },
+ payload: {
+ crashDate: "YYYY-MM-DD",
+ sessionId: <UUID>, // may be missing for crashes that happen early
+ // in startup. Added in Firefox 48 with the
+ // intention of uplifting to Firefox 46
+ crashId: <UUID>, // Optional, ID of the associated crash
+ stackTraces: { ... }, // Optional, see below
+ metadata: { // Annotations saved while Firefox was running. See nsExceptionHandler.cpp for more information
+ ProductName: "Firefox",
+ ReleaseChannel: <channel>,
+ Version: <version number>,
+ BuildID: "YYYYMMDDHHMMSS",
+ AvailablePageFile: <size>, // Windows-only, available paging file
+ AvailablePhysicalMemory: <size>, // Windows-only, available physical memory
+ AvailableVirtualMemory: <size>, // Windows-only, available virtual memory
+ BlockedDllList: <list>, // Windows-only, see WindowsDllBlocklist.cpp for details
+ BlocklistInitFailed: 1, // Windows-only, present only if the DLL blocklist initialization failed
+ CrashTime: <time>, // Seconds since the Epoch
+ ContainsMemoryReport: 1, // Optional
+ EventLoopNestingLevel: <levels>, // Optional, present only if >0
+ IsGarbageCollecting: 1, // Optional, present only if set to 1
+ MozCrashReason: <reason>, // Optional, contains the string passed to MOZ_CRASH()
+ OOMAllocationSize: <size>, // Size of the allocation that caused an OOM
+ SecondsSinceLastCrash: <duration>, // Seconds elapsed since the last crash occurred
+ SystemMemoryUsePercentage: <percentage>, // Windows-only, percent of memory in use
+ TelemetrySessionId: <id>, // Active telemetry session ID when the crash was recorded
+ TextureUsage: <usage>, // Optional, usage of texture memory in bytes
+ TotalPageFile: <size>, // Windows-only, paging file in use
+ TotalPhysicalMemory: <size>, // Windows-only, physical memory in use
+ TotalVirtualMemory: <size>, // Windows-only, virtual memory in use
+ UptimeTS: <duration>, // Seconds since Firefox was started
+ User32BeforeBlocklist: 1, // Windows-only, present only if user32.dll was loaded before the DLL blocklist has been initialized
+ },
+ hasCrashEnvironment: bool
+ }
+ }
+
+Stack Traces
+------------
+
+The crash ping may contain a ``stackTraces`` field which has been populated
+with stack traces for all threads in the crashed process. The format of this
+field is similar to the one used by Socorro for representing a crash. The main
+differences are that redundant fields are not stored and that the module a
+frame belongs to is referenced by index in the module array rather than by its
+file name.
+
+Note that this field does not contain data from the application; only bare
+stack traces and module lists are stored.
+
+.. code-block:: js
+
+ {
+ status: <string>, // Status of the analysis, "OK" or an error message
+ crash_info: { // Basic crash information
+ type: <string>, // Type of crash, SIGSEGV, assertion, etc...
+ address: <addr>, // Crash address crash, hex format, see the notes below
+ crashing_thread: <index> // Index in the thread array below
+ },
+ main_module: <index>, // Index of Firefox' executable in the module list
+ modules: [{
+ base_addr: <addr>, // Base address of the module, hex format
+ end_addr: <addr>, // End address of the module, hex format
+ code_id: <string>, // Unique ID of this module, see the notes below
+ debug_file: <string>, // Name of the file holding the debug information
+ debug_id: <string>, // ID or hash of the debug information file
+ filename: <string>, // File name
+ version: <string>, // Library/executable version
+ },
+ ... // List of modules ordered by base memory address
+ ],
+ threads: [{ // Stack traces for every thread
+ frames: [{
+ module_index: <index>, // Index of the module this frame belongs to
+ ip: <ip>, // Program counter, hex format
+ trust: <string> // Trust of this frame, see the notes below
+ },
+ ... // List of frames, the first frame is the topmost
+ ]
+ }]
+ }
+
+Notes
+~~~~~
+
+Memory addresses and instruction pointers are always stored as strings in
+hexadecimal format (e.g. "0x4000"). They can be made of up to 16 characters for
+64-bit addresses.
+
+The crash type is both OS and CPU dependent and can be either a descriptive
+string (e.g. SIGSEGV, EXCEPTION_ACCESS_VIOLATION) or a raw numeric value. The
+crash address meaning depends on the type of crash. In a segmentation fault the
+crash address will be the memory address whose access caused the fault; in a
+crash triggered by an illegal instruction exception the address will be the
+instruction pointer where the invalid instruction resides.
+See `breakpad <https://chromium.googlesource.com/breakpad/breakpad/+/c99d374dde62654a024840accfb357b2851daea0/src/processor/minidump_processor.cc#675>`_'s
+relevant code for further information.
+
+Since it's not always possible to establish with certainty the address of the
+previous frame while walking the stack, every frame has a trust value that
+represents how it was found and thus how certain we are that it's a real frame.
+The trust levels are (from least trusted to most trusted):
+
++---------------+---------------------------------------------------+
+| Trust | Description |
++===============+===================================================+
+| context | Given as instruction pointer in a context |
++---------------+---------------------------------------------------+
+| prewalked | Explicitly provided by some external stack walker |
++---------------+---------------------------------------------------+
+| cfi | Derived from call frame info |
++---------------+---------------------------------------------------+
+| frame_pointer | Derived from frame pointer |
++---------------+---------------------------------------------------+
+| cfi_scan | Found while scanning stack using call frame info |
++---------------+---------------------------------------------------+
+| scan | Scanned the stack, found this |
++---------------+---------------------------------------------------+
+| none | Unknown, this is most likely not a valid frame |
++---------------+---------------------------------------------------+
+
+The ``code_id`` field holds a unique ID used to distinguish between different
+versions and builds of the same module. See `breakpad <https://chromium.googlesource.com/breakpad/breakpad/+/24f5931c5e0120982c0cbf1896641e3ef2bdd52f/src/google_breakpad/processor/code_module.h#60>`_'s
+description for further information. This field is populated only on Windows.
diff --git a/toolkit/components/telemetry/docs/data/deletion-ping.rst b/toolkit/components/telemetry/docs/data/deletion-ping.rst
new file mode 100644
index 000000000..c4523ce54
--- /dev/null
+++ b/toolkit/components/telemetry/docs/data/deletion-ping.rst
@@ -0,0 +1,19 @@
+
+"deletion" ping
+===============
+
+This ping is generated when a user turns off FHR upload from the Preferences panel, changing the related ``datareporting.healthreport.uploadEnabled`` preference. This requests that all associated data from that user be deleted.
+
+This ping contains the client id and no environment data.
+
+Structure:
+
+.. code-block:: js
+
+ {
+ version: 4,
+ type: "deletion",
+ ... common ping data
+ clientId: <UUID>,
+ payload: { }
+ } \ No newline at end of file
diff --git a/toolkit/components/telemetry/docs/data/environment.rst b/toolkit/components/telemetry/docs/data/environment.rst
new file mode 100644
index 000000000..ff0d204a4
--- /dev/null
+++ b/toolkit/components/telemetry/docs/data/environment.rst
@@ -0,0 +1,373 @@
+
+Environment
+===========
+
+The environment consists of data that is expected to be characteristic for performance and other behavior and not expected to change too often.
+
+Changes to most of these data points are detected (where possible and sensible) and will lead to a session split in the :doc:`main-ping`.
+The environment data may also be submitted by other ping types.
+
+*Note:* This is not submitted with all ping types due to privacy concerns. This and other data is inspected under the `data collection policy <https://wiki.mozilla.org/Firefox/Data_Collection>`_.
+
+Some parts of the environment must be fetched asynchronously at startup. We don't want other Telemetry components to block on waiting for the environment, so some items may be missing from it until the async fetching finished.
+This currently affects the following sections:
+
+- profile
+- addons
+
+
+Structure:
+
+.. code-block:: js
+
+ {
+ build: {
+ applicationId: <string>, // nsIXULAppInfo.ID
+ applicationName: <string>, // "Firefox"
+ architecture: <string>, // e.g. "x86", build architecture for the active build
+ architecturesInBinary: <string>, // e.g. "i386-x86_64", from nsIMacUtils.architecturesInBinary, only present for mac universal builds
+ buildId: <string>, // e.g. "20141126041045"
+ version: <string>, // e.g. "35.0"
+ vendor: <string>, // e.g. "Mozilla"
+ platformVersion: <string>, // e.g. "35.0"
+ xpcomAbi: <string>, // e.g. "x86-msvc"
+ hotfixVersion: <string>, // e.g. "20141211.01"
+ },
+ settings: {
+ addonCompatibilityCheckEnabled: <bool>, // Whether application compatibility is respected for add-ons
+ blocklistEnabled: <bool>, // true on failure
+ isDefaultBrowser: <bool>, // null on failure, not available on Android
+ defaultSearchEngine: <string>, // e.g. "yahoo"
+ defaultSearchEngineData: {, // data about the current default engine
+ name: <string>, // engine name, e.g. "Yahoo"; or "NONE" if no default
+ loadPath: <string>, // where the engine line is located; missing if no default
+ origin: <string>, // 'default', 'verified', 'unverified', or 'invalid'; based on the presence and validity of the engine's loadPath verification hash.
+ submissionURL: <string> // missing if no default or for user-installed engines
+ },
+ searchCohort: <string>, // optional, contains an identifier for any active search A/B experiments
+ e10sEnabled: <bool>, // whether e10s is on, i.e. browser tabs open by default in a different process
+ e10sCohort: <string>, // which e10s cohort was assigned for this user
+ telemetryEnabled: <bool>, // false on failure
+ locale: <string>, // e.g. "it", null on failure
+ update: {
+ channel: <string>, // e.g. "release", null on failure
+ enabled: <bool>, // true on failure
+ autoDownload: <bool>, // true on failure
+ },
+ userPrefs: {
+ // Only prefs which are changed from the default value are listed
+ // in this block
+ "pref.name.value": value // some prefs send the value
+ "pref.name.url": "<user-set>" // For some privacy-sensitive prefs
+ // only the fact that the value has been changed is recorded
+ },
+ attribution: { // optional, only present if the installation has attribution data
+ // all of these values are optional.
+ source: <string>, // referring partner domain, when install happens via a known partner
+ medium: <string>, // category of the source, such as "organic" for a search engine
+ campaign: <string>, // identifier of the particular campaign that led to the download of the product
+ content: <string>, // identifier to indicate the particular link within a campaign
+ },
+ },
+ profile: {
+ creationDate: <integer>, // integer days since UNIX epoch, e.g. 16446
+ resetDate: <integer>, // integer days since UNIX epoch, e.g. 16446 - optional
+ },
+ partner: { // This section may not be immediately available on startup
+ distributionId: <string>, // pref "distribution.id", null on failure
+ distributionVersion: <string>, // pref "distribution.version", null on failure
+ partnerId: <string>, // pref mozilla.partner.id, null on failure
+ distributor: <string>, // pref app.distributor, null on failure
+ distributorChannel: <string>, // pref app.distributor.channel, null on failure
+ partnerNames: [
+ // list from prefs app.partner.<name>=<name>
+ ],
+ },
+ system: {
+ memoryMB: <number>,
+ virtualMaxMB: <number>, // windows-only
+ isWow64: <bool>, // windows-only
+ cpu: {
+ count: <number>, // desktop only, e.g. 8, or null on failure - logical cpus
+ cores: <number>, // desktop only, e.g., 4, or null on failure - physical cores
+ vendor: <string>, // desktop only, e.g. "GenuineIntel", or null on failure
+ family: <number>, // desktop only, null on failure
+ model: <number, // desktop only, null on failure
+ stepping: <number>, // desktop only, null on failure
+ l2cacheKB: <number>, // L2 cache size in KB, only on windows & mac
+ l3cacheKB: <number>, // desktop only, L3 cache size in KB
+ speedMHz: <number>, // desktop only, cpu clock speed in MHz
+ extensions: [
+ <string>,
+ ...
+ // as applicable:
+ // "MMX", "SSE", "SSE2", "SSE3", "SSSE3", "SSE4A", "SSE4_1",
+ // "SSE4_2", "AVX", "AVX2", "EDSP", "ARMv6", "ARMv7", "NEON"
+ ],
+ },
+ device: { // This section is only available on mobile devices.
+ model: <string>, // the "device" from FHR, null on failure
+ manufacturer: <string>, // null on failure
+ hardware: <string>, // null on failure
+ isTablet: <bool>, // null on failure
+ },
+ os: {
+ name: <string>, // "Windows_NT" or null on failure
+ version: <string>, // e.g. "6.1", null on failure
+ kernelVersion: <string>, // android/b2g only or null on failure
+ servicePackMajor: <number>, // windows only or null on failure
+ servicePackMinor: <number>, // windows only or null on failure
+ windowsBuildNumber: <number>, // windows 10 only or null on failure
+ windowsUBR: <number>, // windows 10 only or null on failure
+ installYear: <number>, // windows only or null on failure
+ locale: <string>, // "en" or null on failure
+ },
+ hdd: {
+ profile: { // hdd where the profile folder is located
+ model: <string>, // windows only or null on failure
+ revision: <string>, // windows only or null on failure
+ },
+ binary: { // hdd where the application binary is located
+ model: <string>, // windows only or null on failure
+ revision: <string>, // windows only or null on failure
+ },
+ system: { // hdd where the system files are located
+ model: <string>, // windows only or null on failure
+ revision: <string>, // windows only or null on failure
+ },
+ },
+ gfx: {
+ D2DEnabled: <bool>, // null on failure
+ DWriteEnabled: <bool>, // null on failure
+ //DWriteVersion: <string>, // temporarily removed, pending bug 1154500
+ adapters: [
+ {
+ description: <string>, // e.g. "Intel(R) HD Graphics 4600", null on failure
+ vendorID: <string>, // null on failure
+ deviceID: <string>, // null on failure
+ subsysID: <string>, // null on failure
+ RAM: <number>, // in MB, null on failure
+ driver: <string>, // null on failure
+ driverVersion: <string>, // null on failure
+ driverDate: <string>, // null on failure
+ GPUActive: <bool>, // currently always true for the first adapter
+ },
+ ...
+ ],
+ // Note: currently only added on Desktop. On Linux, only a single
+ // monitor is returned representing the entire virtual screen.
+ monitors: [
+ {
+ screenWidth: <number>, // screen width in pixels
+ screenHeight: <number>, // screen height in pixels
+ refreshRate: <number>, // refresh rate in hertz (present on Windows only).
+ // (values <= 1 indicate an unknown value)
+ pseudoDisplay: <bool>, // networked screen (present on Windows only)
+ scale: <number>, // backing scale factor (present on Mac only)
+ },
+ ...
+ ],
+ features: {
+ compositor: <string>, // Layers backend for compositing (eg "d3d11", "none", "opengl")
+
+ // Each the following features can have one of the following statuses:
+ // "unused" - This feature has not been requested.
+ // "unavailable" - Safe Mode or OS restriction prevents use.
+ // "blocked" - Blocked due to an internal condition such as safe mode.
+ // "blacklisted" - Blocked due to a blacklist restriction.
+ // "disabled" - User explicitly disabled this default feature.
+ // "failed" - This feature was attempted but failed to initialize.
+ // "available" - User has this feature available.
+ "d3d11" { // This feature is Windows-only.
+ status: <string>,
+ warp: <bool>, // Software rendering (WARP) mode was chosen.
+ textureSharing: <bool> // Whether or not texture sharing works.
+ version: <number>, // The D3D11 device feature level.
+ blacklisted: <bool>, // Whether D3D11 is blacklisted; use to see whether WARP
+ // was blacklist induced or driver-failure induced.
+ },
+ "d2d" { // This feature is Windows-only.
+ status: <string>,
+ version: <string>, // Either "1.0" or "1.1".
+ },
+ },
+ },
+ },
+ addons: {
+ activeAddons: { // the currently enabled addons
+ <addon id>: {
+ blocklisted: <bool>,
+ description: <string>, // null if not available
+ name: <string>,
+ userDisabled: <bool>,
+ appDisabled: <bool>,
+ version: <string>,
+ scope: <integer>,
+ type: <string>, // "extension", "service", ...
+ foreignInstall: <bool>,
+ hasBinaryComponents: <bool>
+ installDay: <number>, // days since UNIX epoch, 0 on failure
+ updateDay: <number>, // days since UNIX epoch, 0 on failure
+ signedState: <integer>, // whether the add-on is signed by AMO, only present for extensions
+ isSystem: <bool>, // true if this is a System Add-on
+ },
+ ...
+ },
+ theme: { // the active theme
+ id: <string>,
+ blocklisted: <bool>,
+ description: <string>,
+ name: <string>,
+ userDisabled: <bool>,
+ appDisabled: <bool>,
+ version: <string>,
+ scope: <integer>,
+ foreignInstall: <bool>,
+ hasBinaryComponents: <bool>
+ installDay: <number>, // days since UNIX epoch, 0 on failure
+ updateDay: <number>, // days since UNIX epoch, 0 on failure
+ },
+ activePlugins: [
+ {
+ name: <string>,
+ version: <string>,
+ description: <string>,
+ blocklisted: <bool>,
+ disabled: <bool>,
+ clicktoplay: <bool>,
+ mimeTypes: [<string>, ...],
+ updateDay: <number>, // days since UNIX epoch, 0 on failure
+ },
+ ...
+ ],
+ activeGMPlugins: {
+ <gmp id>: {
+ version: <string>,
+ userDisabled: <bool>,
+ applyBackgroundUpdates: <integer>,
+ },
+ ...
+ },
+ activeExperiment: { // section is empty if there's no active experiment
+ id: <string>, // id
+ branch: <string>, // branch name
+ },
+ persona: <string>, // id of the current persona, null on GONK
+ },
+ }
+
+build
+-----
+
+buildId
+~~~~~~~
+Firefox builds downloaded from mozilla.org use a 14-digit buildId. Builds included in other distributions may have a different format (e.g. only 10 digits).
+
+Settings
+--------
+
+defaultSearchEngine
+~~~~~~~~~~~~~~~~~~~
+Note: Deprecated, use defaultSearchEngineData instead.
+
+Contains the string identifier or name of the default search engine provider. This will not be present in environment data collected before the Search Service initialization.
+
+The special value ``NONE`` could occur if there is no default search engine.
+
+The special value ``UNDEFINED`` could occur if a default search engine exists but its identifier could not be determined.
+
+This field's contents are ``Services.search.defaultEngine.identifier`` (if defined) or ``"other-"`` + ``Services.search.defaultEngine.name`` if not. In other words, search engines without an ``.identifier`` are prefixed with ``other-``.
+
+defaultSearchEngineData
+~~~~~~~~~~~~~~~~~~~~~~~
+Contains data identifying the engine currently set as the default.
+
+The object contains:
+
+- a ``name`` property with the name of the engine, or ``NONE`` if no
+ engine is currently set as the default.
+
+- a ``loadPath`` property: an anonymized path of the engine xml file, e.g.
+ jar:[app]/omni.ja!browser/engine.xml
+ (where 'browser' is the name of the chrome package, not a folder)
+ [profile]/searchplugins/engine.xml
+ [distribution]/searchplugins/common/engine.xml
+ [other]/engine.xml
+
+- an ``origin`` property: the value will be ``default`` for engines that are built-in or from distribution partners, ``verified`` for user-installed engines with valid verification hashes, ``unverified`` for non-default engines without verification hash, and ``invalid`` for engines with broken verification hashes.
+
+- a ``submissionURL`` property with the HTTP url we would use to search.
+ For privacy, we don't record this for user-installed engines.
+
+``loadPath`` and ``submissionURL`` are not present if ``name`` is ``NONE``.
+
+searchCohort
+~~~~~~~~~~~~
+
+If the user has been enrolled into a search default change experiment, this contains the string identifying the experiment the user is taking part in. Most user profiles will never be part of any search default change experiment, and will not send this value.
+
+userPrefs
+~~~~~~~~~
+
+This object contains user preferences.
+
+Each key in the object is the name of a preference. A key's value depends on the policy with which the preference was collected. There are two such policies, "value" and "state". For preferences collected under the "value" policy, the value will be the preference's value. For preferences collected under the "state" policy, the value will be an opaque marker signifying only that the preference has a user value. The "state" policy is therefore used when user privacy is a concern.
+
+The following is a partial list of collected preferences.
+
+- ``browser.search.suggest.enabled``: The "master switch" for search suggestions everywhere in Firefox (search bar, urlbar, etc.). Defaults to true.
+
+- ``browser.urlbar.suggest.searches``: True if search suggestions are enabled in the urlbar. Defaults to false.
+
+- ``browser.urlbar.userMadeSearchSuggestionsChoice``: True if the user has clicked Yes or No in the urlbar's opt-in notification. Defaults to false.
+
+- ``browser.zoom.full``: True if zoom is enabled for both text and images, that is if "Zoom Text Only" is not enabled. Defaults to true. Collection of this preference has been enabled in Firefox 50 and will be disabled again in Firefox 53 (`Bug 979323 <https://bugzilla.mozilla.org/show_bug.cgi?id=979323>`_).
+
+- ``security.sandbox.content.level``: The meanings of the values are OS dependent, but 0 means not sandboxed for all OS. Details of the meanings can be found in the `Firefox prefs file <http://hg.mozilla.org/mozilla-central/file/tip/browser/app/profile/firefox.js>`_.
+
+attribution
+~~~~~~~~~~~
+
+This object contains the attribution data for the product installation.
+
+Attribution data is used to link installations of Firefox with the source that the user arrived at the Firefox download page from. It would indicate, for instance, when a user executed a web search for Firefox and arrived at the download page from there, directly navigated to the site, clicked on a link from a particular social media campaign, etc.
+
+The attribution data is included in some versions of the default Firefox installer for Windows (the "stub" installer) and stored as part of the installation. All platforms other than Windows and also Windows installations that did not use the stub installer do not have this data and will not include the ``attribution`` object.
+
+partner
+-------
+
+If the user is using a partner repack, this contains information identifying the repack being used, otherwise "partnerNames" will be an empty array and other entries will be null. The information may be missing when the profile just becomes available. In Firefox for desktop, the information along with other customizations defined in distribution.ini are processed later in the startup phase, and will be fully applied when "distribution-customization-complete" notification is sent.
+
+Distributions are most reliably identified by the ``distributionId`` field. Partner information can be found in the `partner repacks <https://github.com/mozilla-partners>`_ (`the old one <http://hg.mozilla.org/build/partner-repacks/>`_ is deprecated): it contains one private repository per partner.
+Important values for ``distributionId`` include:
+
+- "MozillaOnline" for the Mozilla China repack.
+- "canonical", for the `Ubuntu Firefox repack <http://bazaar.launchpad.net/~mozillateam/firefox/firefox.trusty/view/head:/debian/distribution.ini>`_.
+- "yandex", for the Firefox Build by Yandex.
+
+system
+------
+
+os
+~~
+
+This object contains operating system information.
+
+- ``name``: the name of the OS.
+- ``version``: a string representing the OS version.
+- ``kernelVersion``: an Android/B2G only string representing the kernel version.
+- ``servicePackMajor``: the Windows only major version number for the installed service pack.
+- ``servicePackMinor``: the Windows only minor version number for the installed service pack.
+- ``windowsBuildNumber``: the Windows build number, only available for Windows >= 10.
+- ``windowsUBR``: the Windows UBR number, only available for Windows >= 10. This value is incremented by Windows cumulative updates patches.
+- ``installYear``: the Windows only integer representing the year the OS was installed.
+- ``locale``: the string representing the OS locale.
+
+addons
+------
+
+activeAddons
+~~~~~~~~~~~~
+
+Starting from Firefox 44, the length of the following string fields: ``name``, ``description`` and ``version`` is limited to 100 characters. The same limitation applies to the same fields in ``theme`` and ``activePlugins``.
diff --git a/toolkit/components/telemetry/docs/data/heartbeat-ping.rst b/toolkit/components/telemetry/docs/data/heartbeat-ping.rst
new file mode 100644
index 000000000..413da0376
--- /dev/null
+++ b/toolkit/components/telemetry/docs/data/heartbeat-ping.rst
@@ -0,0 +1,63 @@
+
+"heartbeat" ping
+=================
+
+This ping is submitted after a Firefox Heartbeat survey. Even if the user exits
+the browser, closes the survey window, or ignores the survey, Heartbeat will
+provide a ping to Telemetry for sending during the same session.
+
+The payload contains the user's survey response (if any) as well as timestamps
+of various Heartbeat events (survey shown, survey closed, link clicked, etc).
+
+The ping will also report the "surveyId", "surveyVersion" and "testing"
+Heartbeat survey parameters (if they are present in the survey config).
+These "meta fields" will be repeated verbatim in the payload section.
+
+The environment block and client ID are submitted with this ping.
+
+Structure:
+
+.. code-block:: js
+
+ {
+ type: "heartbeat",
+ version: 4,
+ clientId: <UUID>,
+ environment: { /* ... */ }
+ // ... common ping data
+ payload: {
+ version: 1,
+ flowId: <string>,
+ ... timestamps below ...
+ offeredTS: <integer epoch timestamp>,
+ learnMoreTS: <integer epoch timestamp>,
+ votedTS: <integer epoch timestamp>,
+ engagedTS: <integer epoch timestamp>,
+ closedTS: <integer epoch timestamp>,
+ expiredTS: <integer epoch timestamp>,
+ windowClosedTS: <integer epoch timestamp>,
+ // ... user's rating below
+ score: <integer>,
+ // ... survey meta fields below
+ surveyId: <string>,
+ surveyVersion: <integer>,
+ testing: <boolean>
+ }
+ }
+
+Notes:
+
+* Pings will **NOT** have all possible timestamps, timestamps are only reported for events that actually occurred.
+* Timestamp meanings:
+ * offeredTS: when the survey was shown to the user
+ * learnMoreTS: when the user clicked on the "Learn More" link
+ * votedTS: when the user voted
+ * engagedTS: when the user clicked on the survey-provided button (alternative to voting feature)
+ * closedTS: when the Heartbeat notification bar was closed
+ * expiredTS: indicates that the survey expired after 2 hours of no interaction (threshold regulated by "browser.uitour.surveyDuration" pref)
+ * windowClosedTS: the user closed the entire Firefox window containing the survey, thus ending the survey. This timestamp will also be reported when the survey is ended by the browser being shut down.
+* The surveyId/surveyVersion fields identify a specific survey (like a "1040EZ" tax paper form). The flowID is a UUID that uniquely identifies a single user's interaction with the survey. Think of it as a session token.
+* The self-support page cannot include additional data in this payload. Only the the 4 flowId/surveyId/surveyVersion/testing fields are under the self-support page's control.
+
+See also: :doc:`common ping fields <common-ping>`
+
diff --git a/toolkit/components/telemetry/docs/data/index.rst b/toolkit/components/telemetry/docs/data/index.rst
new file mode 100644
index 000000000..a0467e9a1
--- /dev/null
+++ b/toolkit/components/telemetry/docs/data/index.rst
@@ -0,0 +1,18 @@
+==================
+Data documentation
+==================
+
+.. toctree::
+ :maxdepth: 2
+ :titlesonly:
+ :glob:
+
+ common-ping
+ environment
+ main-ping
+ deletion-ping
+ crash-ping
+ *-ping
+ addons-malware-ping
+
+The `mozilla-pipeline-schemas repository <https://github.com/mozilla-services/mozilla-pipeline-schemas/>`_ contains schemas for some of the pings.
diff --git a/toolkit/components/telemetry/docs/data/main-ping.rst b/toolkit/components/telemetry/docs/data/main-ping.rst
new file mode 100644
index 000000000..445090af9
--- /dev/null
+++ b/toolkit/components/telemetry/docs/data/main-ping.rst
@@ -0,0 +1,609 @@
+
+"main" ping
+===========
+
+.. toctree::
+ :maxdepth: 2
+
+This is the "main" Telemetry ping type, whose payload contains most of the measurements that are used to track the performance and health of Firefox in the wild.
+It includes the histograms and other performance and diagnostic data.
+
+This ping is triggered by different scenarios, which is documented by the ``reason`` field:
+
+* ``aborted-session`` - this ping is regularly saved to disk (every 5 minutes), overwriting itself, and deleted at shutdown. If a previous aborted session ping is found at startup, it gets sent to the server. The first aborted-session ping is generated as soon as Telemetry starts
+* ``environment-change`` - the :doc:`environment` changed, so the session measurements got reset and a new subsession starts
+* ``shutdown`` - triggered when the browser session ends
+* ``daily`` - a session split triggered in 24h hour intervals at local midnight. If an ``environment-change`` ping is generated by the time it should be sent, the daily ping is rescheduled for the next midnight
+* ``saved-session`` - the *"classic"* Telemetry payload with measurements covering the whole browser session (only submitted for a transition period)
+
+Most reasons lead to a session split, initiating a new *subsession*. We reset important measurements for those subsessions.
+
+After a new subsession split, the ``internal-telemetry-after-subsession-split`` topic is notified to all the observers. *This is an internal topic and is only meant for internal Telemetry usage.*
+
+*Note:* ``saved-session`` is sent with a different ping type (``saved-session``, not ``main``), but otherwise has the same format as discussed here.
+
+Structure:
+
+.. code-block:: js
+
+ {
+ version: 4,
+
+ info: {
+ reason: <string>, // what triggered this ping: "saved-session", "environment-change", "shutdown", ...
+ revision: <string>, // the Histograms.json revision
+ timezoneOffset: <integer>, // time-zone offset from UTC, in minutes, for the current locale
+ previousBuildId: <string>, // null if this is the first run, or the previous build ID is unknown
+
+ sessionId: <uuid>, // random session id, shared by subsessions
+ subsessionId: <uuid>, // random subsession id
+ previousSessionId: <uuid>, // session id of the previous session, null on first run.
+ previousSubsessionId: <uuid>, // subsession id of the previous subsession (even if it was in a different session),
+ // null on first run.
+
+ subsessionCounter: <unsigned integer>, // the running no. of this subsession since the start of the browser session
+ profileSubsessionCounter: <unsigned integer>, // the running no. of all subsessions for the whole profile life time
+
+ sessionStartDate: <ISO date>, // daily precision
+ subsessionStartDate: <ISO date>, // daily precision, ISO date in local time
+ sessionLength: <integer>, // the session length until now in seconds, monotonic
+ subsessionLength: <integer>, // the subsession length in seconds, monotonic
+
+ flashVersion: <string>, // obsolete, use ``environment.addons.activePlugins``
+ addons: <string>, // obsolete, use ``environment.addons``
+ },
+
+ processes: {...},
+ childPayloads: [...], // only present with e10s; reduced payloads from content processes, null on failure
+ simpleMeasurements: {...},
+
+ // The following properties may all be null if we fail to collect them.
+ histograms: {...},
+ keyedHistograms: {...},
+ chromeHangs: {...},
+ threadHangStats: [...],
+ log: [...],
+ webrtc: {...},
+ gc: {...},
+ fileIOReports: {...},
+ lateWrites: {...},
+ addonDetails: {...},
+ addonHistograms: {...},
+ UIMeasurements: [...],
+ slowSQL: {...},
+ slowSQLstartup: {...},
+ }
+
+info
+----
+
+sessionLength
+~~~~~~~~~~~~~
+The length of the current session so far in seconds.
+This uses a monotonic clock, so this may mismatch with other measurements that
+are not monotonic like calculations based on ``Date.now()``.
+
+If the monotonic clock failed, this will be ``-1``.
+
+subsessionLength
+~~~~~~~~~~~~~~~~
+The length of this subsession in seconds.
+This uses a monotonic clock, so this may mismatch with other measurements that are not monotonic (e.g. based on Date.now()).
+
+If ``sessionLength`` is ``-1``, the monotonic clock is not working.
+
+processes
+---------
+This section contains per-process data.
+
+Structure:
+
+.. code-block:: js
+
+ "processes" : {
+ ... other processes ...
+ "parent": {
+ scalars: {...},
+ },
+ "content": {
+ histograms: {...},
+ keyedHistograms: {...},
+ },
+ }
+
+histograms and keyedHistograms
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+This section contains histograms and keyed histograms accumulated on content processes. Histograms recorded on a content child process have different character than parent histograms. For instance, ``GC_MS`` will be much different in ``processes.content`` as it has to contend with web content, whereas the instance in ``payload.histograms`` has only to contend with browser JS. Also, some histograms may be absent if never recorded on a content child process (``EVENTLOOP_UI_ACTIVITY`` is parent-process-only).
+
+This format was adopted in Firefox 51 via bug 1218576.
+
+scalars
+~~~~~~~
+This section contains the :doc:`../collection/scalars` that are valid for the current platform. Scalars are not created nor submitted if no data was added to them, and are only reported with subsession pings. Scalar data is only currently reported for the main process. Their type and format is described by the ``Scalars.yaml`` file. Its most recent version is available `here <https://dxr.mozilla.org/mozilla-central/source/toolkit/components/telemetry/Scalars.yaml>`_. The ``info.revision`` field indicates the revision of the file that describes the reported scalars.
+
+childPayloads
+-------------
+The Telemetry payloads sent by child processes, recorded on child process shutdown (event ``content-child-shutdown`` observed). They are reduced session payloads, only available with e10s. Among some other things, they don't contain histograms, keyed histograms, addon details, addon histograms, or UI Telemetry.
+
+Note: Child payloads are not collected and cleared with subsession splits, they are currently only meaningful when analysed from ``saved-session`` or ``main`` pings with ``reason`` set to ``shutdown``.
+
+Note: Before Firefox 51 and bug 1218576, content process histograms and keyedHistograms were in the individual child payloads instead of being aggregated into ``processes.content``.
+
+simpleMeasurements
+------------------
+This section contains a list of simple measurements, or counters. In addition to the ones highlighted below, Telemetry timestamps (see `here <https://dxr.mozilla.org/mozilla-central/search?q=%22TelemetryTimestamps.add%22&redirect=false&case=true>`_ and `here <https://dxr.mozilla.org/mozilla-central/search?q=%22recordTimestamp%22&redirect=false&case=true>`_) can be reported.
+
+totalTime
+~~~~~~~~~
+A non-monotonic integer representing the number of seconds the session has been alive.
+
+uptime
+~~~~~~
+A non-monotonic integer representing the number of minutes the session has been alive.
+
+addonManager
+~~~~~~~~~~~~
+Only available in the extended set of measures, it contains a set of counters related to Addons. See `here <https://dxr.mozilla.org/mozilla-central/search?q=%22AddonManagerPrivate.recordSimpleMeasure%22&redirect=false&case=true>`_ for a list of recorded measures.
+
+UITelemetry
+~~~~~~~~~~~
+Only available in the extended set of measures. For more see :ref:`uitelemetry`.
+
+startupInterrupted
+~~~~~~~~~~~~~~~~~~
+A boolean set to true if startup was interrupted by an interactive prompt.
+
+js
+~~
+This section contains a series of counters from the JavaScript engine.
+
+Structure:
+
+.. code-block:: js
+
+ "js" : {
+ "setProto": <unsigned integer>, // Number of times __proto__ is set
+ "customIter": <unsigned integer> // Number of times __iterator__ is used (i.e., is found for a for-in loop)
+ }
+
+maximalNumberOfConcurrentThreads
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+An integer representing the highest number of threads encountered so far during the session.
+
+startupSessionRestoreReadBytes
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Windows-only integer representing the number of bytes read by the main process up until the session store has finished restoring the windows.
+
+startupSessionRestoreWriteBytes
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Windows-only integer representing the number of bytes written by the main process up until the session store has finished restoring the windows.
+
+startupWindowVisibleReadBytes
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Windows-only integer representing the number of bytes read by the main process up until after a XUL window is made visible.
+
+startupWindowVisibleWriteBytes
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Windows-only integer representing the number of bytes written by the main process up until after a XUL window is made visible.
+
+debuggerAttached
+~~~~~~~~~~~~~~~~
+A boolean set to true if a debugger is attached to the main process.
+
+shutdownDuration
+~~~~~~~~~~~~~~~~
+The time, in milliseconds, it took to complete the last shutdown.
+
+failedProfileLockCount
+~~~~~~~~~~~~~~~~~~~~~~
+The number of times the system failed to lock the user profile.
+
+savedPings
+~~~~~~~~~~
+Integer count of the number of pings that need to be sent.
+
+activeTicks
+~~~~~~~~~~~
+Integer count of the number of five-second intervals ('ticks') the user was considered 'active' (sending UI events to the window). An extra event is fired immediately when the user becomes active after being inactive. This is for some mouse and gamepad events, and all touch, keyboard, wheel, and pointer events (see `EventStateManager.cpp <https://dxr.mozilla.org/mozilla-central/rev/e6463ae7eda2775bc84593bb4a0742940bb87379/dom/events/EventStateManager.cpp#549>`_).
+This measure might be useful to give a trend of how much a user actually interacts with the browser when compared to overall session duration. It does not take into account whether or not the window has focus or is in the foreground. Just if it is receiving these interaction events.
+Note that in ``main`` pings, this measure is reset on subsession splits, while in ``saved-session`` pings it covers the whole browser session.
+
+pingsOverdue
+~~~~~~~~~~~~
+Integer count of pending pings that are overdue.
+
+histograms
+----------
+This section contains the histograms that are valid for the current platform. ``Flag`` and ``count`` histograms are always created and submitted, with their default value being respectively ``false`` and ``0``. Other histogram types (`see here <https://developer.mozilla.org/en-US/docs/Mozilla/Performance/Adding_a_new_Telemetry_probe#Choosing_a_Histogram_Type>`_) are not created nor submitted if no data was added to them. The type and format of the reported histograms is described by the ``Histograms.json`` file. Its most recent version is available `here <https://dxr.mozilla.org/mozilla-central/source/toolkit/components/telemetry/Histograms.json>`_. The ``info.revision`` field indicates the revision of the file that describes the reported histograms.
+
+keyedHistograms
+---------------
+This section contains the keyed histograms available for the current platform.
+
+As of Firefox 48, this section does not contain empty keyed histograms anymore.
+
+threadHangStats
+---------------
+Contains the statistics about the hangs in main and background threads. Note that hangs in this section capture the [C++ pseudostack](https://developer.mozilla.org/en-US/docs/Mozilla/Performance/Profiling_with_the_Built-in_Profiler#Native_stack_vs._Pseudo_stack) and an incomplete JS stack, which is not 100% precise.
+
+To avoid submitting overly large payloads, some limits are applied:
+
+* Identical, adjacent "(chrome script)" or "(content script)" stack entries are collapsed together. If a stack is reduced, the "(reduced stack)" frame marker is added as the oldest frame.
+* The depth of the reported stacks is limited to 11 entries. This value represents the 99.9th percentile of the thread hangs stack depths reported by Telemetry.
+
+Structure:
+
+.. code-block:: js
+
+ "threadHangStats" : [
+ {
+ "name" : "Gecko",
+ "activity" : {...}, // a time histogram of all task run times
+ "hangs" : [
+ {
+ "stack" : [
+ "Startup::XRE_Main",
+ "Timer::Fire",
+ "(content script)",
+ "IPDL::PPluginScriptableObject::SendGetChildProperty",
+ ... up to 11 frames ...
+ ],
+ "nativeStack": [...], // optionally available
+ "histogram" : {...}, // the time histogram of the hang times
+ "annotations" : [
+ {
+ "pluginName" : "Shockwave Flash",
+ "pluginVersion" : "18.0.0.209"
+ },
+ ... other annotations ...
+ ]
+ },
+ ],
+ },
+ ... other threads ...
+ ]
+
+chromeHangs
+-----------
+Contains the statistics about the hangs happening exclusively on the main thread of the parent process. Precise C++ stacks are reported. This is only available on Nightly Release on Windows, when building using "--enable-profiling" switch.
+
+Some limits are applied:
+
+* Reported chrome hang stacks are limited in depth to 50 entries.
+* The maximum number of reported stacks is 50.
+
+Structure:
+
+.. code-block:: js
+
+ "chromeHangs" : {
+ "memoryMap" : [
+ ["wgdi32.pdb", "08A541B5942242BDB4AEABD8C87E4CFF2"],
+ ["igd10iumd32.pdb", "D36DEBF2E78149B5BE1856B772F1C3991"],
+ ... other entries in the format ["module name", "breakpad identifier"] ...
+ ],
+ "stacks" : [
+ [
+ [
+ 0, // the module index or -1 for invalid module indices
+ 190649 // the offset of this program counter in its module or an absolute pc
+ ],
+ [1, 2540075],
+ ... other frames, up to 50 ...
+ ],
+ ... other stacks, up to 50 ...
+ ],
+ "durations" : [8, ...], // the hang durations (in seconds)
+ "systemUptime" : [692, ...], // the system uptime (in minutes) at the time of the hang
+ "firefoxUptime" : [672, ...], // the Firefox uptime (in minutes) at the time of the hang
+ "annotations" : [
+ [
+ [0, ...], // the indices of the related hangs
+ {
+ "pluginName" : "Shockwave Flash",
+ "pluginVersion" : "18.0.0.209",
+ ... other annotations as key:value pairs ...
+ }
+ ],
+ ...
+ ]
+ },
+
+log
+---
+This section contains a log of important or unusual events reported through Telemetry.
+
+Structure:
+
+.. code-block:: js
+
+ "log": [
+ [
+ "Event_ID",
+ 3785, // the timestamp (in milliseconds) for the log entry
+ ... other data ...
+ ],
+ ...
+ ]
+
+
+webrtc
+------
+Contains special statistics gathered by WebRTC related components.
+
+So far only a bitmask for the ICE candidate type present in a successful or
+failed WebRTC connection is getting reported through C++ code as
+IceCandidatesStats, because the required bitmask is too big to be represented
+in a regular enum histogram. Further this data differentiates between Loop
+(aka Firefox Hello) connections and everything else, which is categorized as
+WebRTC.
+
+Note: in most cases the webrtc and loop dictionaries inside of
+IceCandidatesStats will simply be empty as the user has not used any WebRTC
+PeerConnection at all during the ping report time.
+
+Structure:
+
+.. code-block:: js
+
+ "webrtc": {
+ "IceCandidatesStats": {
+ "webrtc": {
+ "34526345": {
+ "successCount": 5
+ },
+ "2354353": {
+ "failureCount": 1
+ }
+ },
+ "loop": {
+ "2349346359": {
+ "successCount": 3
+ },
+ "73424": {
+ "successCount": 1,
+ "failureCount": 5
+ }
+ }
+ }
+ },
+
+gc
+--
+Contains statistics about selected garbage collections. To avoid
+bloating the ping, only a few GCs are included. There are two
+selection strategies. We always save the two GCs with the worst
+max_pause time. Additionally, in content processes, two collections
+are selected at random. If a GC runs for C milliseconds and the total
+time for all GCs since the session began is T milliseconds, then the
+GC has a C/T probablility of being selected for one of these "slots".
+
+Structure:
+
+.. code-block:: js
+
+ "gc": {
+ "random": [
+ {
+ // Timestamps are in milliseconds since startup. All the times here
+ // are wall-clock times, which may not be monotonically increasing.
+ "timestamp": 294872.2,
+ // All durations are in milliseconds.
+ "max_pause": 73.629,
+ "total_time": 364.951, // Sum of all slice times.
+ "zones_collected": 9,
+ "total_zones": 9,
+ "total_compartments": 309,
+ "minor_gcs": 44,
+ "store_buffer_overflows": 19,
+ "mmu_20ms": 0,
+ "mmu_50ms": 0,
+ // Reasons include "None", "NonIncrementalRequested",
+ // "AbortRequested", "KeepAtomsSet", "IncrementalDisabled",
+ // "ModeChange", "MallocBytesTrigger", "GCBytesTrigger",
+ // "ZoneChange".
+ "nonincremental_reason": "None",
+ "allocated": 37, // In megabytes.
+ "added_chunks": 54,
+ "removed_chunks": 12,
+ // Total number of slices (some of which may not appear
+ // in the "slices" array).
+ "num_slices": 15,
+ // We record at most 4 slices.
+ "slices": [
+ {
+ "slice": 0, // The index of this slice.
+ "pause": 23.221, // How long the slice took.
+ "when": 0, // Milliseconds since the start of the GC.
+ "reason": "SET_NEW_DOCUMENT",
+ // GC state when the slice started
+ "initial_state": "NotActive",
+ // GC state when the slice ended
+ "final_state": "Mark",
+ // Budget is either "Xms", "work(Y)", or
+ // "unlimited".
+ "budget": "10ms",
+ // Number of page faults during the slice.
+ "page_faults": 0,
+ "start_timestamp": 294875,
+ "end_timestamp": 294879,
+ // Time taken by each phase. There are at most 65 possible
+ // phases, but usually only a few phases run in a given slice.
+ "times": {
+ "wait_background_thread": 0.012,
+ "mark_discard_code": 2.845,
+ "purge": 0.723,
+ "mark": 9.831,
+ "mark_roots": 0.102,
+ "buffer_gray_roots": 3.095,
+ "mark_cross_compartment_wrappers": 0.039,
+ "mark_c_and_js_stacks": 0.005,
+ "mark_runtime_wide_data": 2.313,
+ "mark_embedding": 0.117,
+ "mark_compartments": 2.27,
+ "unmark": 1.063,
+ "minor_gcs_to_evict_nursery": 8.701,
+ ...
+ }
+ },
+ { ... },
+ ],
+ // Sum of the phase times across all slices, including
+ // omitted slices. As before, there are <= 65 possible phases.
+ "totals": {
+ "wait_background_thread": 0.012,
+ "mark_discard_code": 2.845,
+ "purge": 0.723,
+ "mark": 9.831,
+ "mark_roots": 0.102,
+ "buffer_gray_roots": 3.095,
+ "mark_cross_compartment_wrappers": 0.039,
+ "mark_c_and_js_stacks": 0.005,
+ "mark_runtime_wide_data": 2.313,
+ "mark_embedding": 0.117,
+ "mark_compartments": 2.27,
+ "unmark": 1.063,
+ "minor_gcs_to_evict_nursery": 8.701,
+ ...
+ }
+ },
+ ... // Up to four more selected GCs follow.
+ ],
+ "worst": [
+ ... // Same as above, but the 2 worst GCs by max_pause.
+ ]
+ },
+
+fileIOReports
+-------------
+Contains the statistics of main-thread I/O recorded during the execution. Only the I/O stats for the XRE and the profile directories are currently reported, neither of them disclosing the full local path.
+
+Structure:
+
+.. code-block:: js
+
+ "fileIOReports": {
+ "{xre}": [
+ totalTime, // Accumulated duration of all operations
+ creates, // Number of create/open operations
+ reads, // Number of read operations
+ writes, // Number of write operations
+ fsyncs, // Number of fsync operations
+ stats, // Number of stat operations
+ ],
+ "{profile}": [ ... ],
+ ...
+ }
+
+lateWrites
+----------
+This sections reports writes to the file system that happen during shutdown. The reported data contains the stack and the loaded libraries at the time the writes happened.
+
+Structure:
+
+.. code-block:: js
+
+ "lateWrites" : {
+ "memoryMap" : [
+ ["wgdi32.pdb", "08A541B5942242BDB4AEABD8C87E4CFF2"],
+ ... other entries in the format ["module name", "breakpad identifier"] ...
+ ],
+ "stacks" : [
+ [
+ [
+ 0, // the module index or -1 for invalid module indices
+ 190649 // the offset of this program counter in its module or an absolute pc
+ ],
+ [1, 2540075],
+ ... other frames ...
+ ],
+ ... other stacks ...
+ ],
+ },
+
+addonDetails
+------------
+This section contains per-addon telemetry details, as reported by each addon provider. The XPI provider is the only one reporting at the time of writing (`see DXR <https://dxr.mozilla.org/mozilla-central/search?q=setTelemetryDetails&case=true>`_). Telemetry does not manipulate or enforce a specific format for the supplied provider's data.
+
+Structure:
+
+.. code-block:: js
+
+ "addonDetails": {
+ "XPI": {
+ "adbhelper@mozilla.org": {
+ "scan_items": 24,
+ "scan_MS": 3,
+ "location": "app-profile",
+ "name": "ADB Helper",
+ "creator": "Mozilla & Android Open Source Project",
+ "startup_MS": 30
+ },
+ ...
+ },
+ ...
+ }
+
+addonHistograms
+---------------
+This section contains the histogram registered by the addons (`see here <https://dxr.mozilla.org/mozilla-central/rev/584870f1cbc5d060a57e147ce249f736956e2b62/toolkit/components/telemetry/nsITelemetry.idl#303>`_). This section is not present if no addon histogram is available.
+
+UITelemetry
+-----------
+See the ``UITelemetry data format`` documentation.
+
+slowSQL
+-------
+This section contains the informations about the slow SQL queries for both the main and other threads. The execution of an SQL statement is considered slow if it takes 50ms or more on the main thread or 100ms or more on other threads. Slow SQL statements will be automatically trimmed to 1000 characters. This limit doesn't include the ellipsis and database name, that are appended at the end of the stored statement.
+
+Structure:
+
+.. code-block:: js
+
+ "slowSQL": {
+ "mainThread": {
+ "Sanitized SQL Statement": [
+ 1, // the number of times this statement was hit
+ 200 // the total time (in milliseconds) that was spent on this statement
+ ],
+ ...
+ },
+ "otherThreads": {
+ "VACUUM /* places.sqlite */": [
+ 1,
+ 330
+ ],
+ ...
+ }
+ },
+
+slowSQLStartup
+--------------
+This section contains the slow SQL statements gathered at startup (until the "sessionstore-windows-restored" event is fired). The structure of this section resembles the one for `slowSQL`_.
+
+UIMeasurements
+--------------
+This section contains UI specific telemetry measurements and events. This section is mainly populated with Android-specific data and events (`see here <https://dxr.mozilla.org/mozilla-central/search?q=regexp%3AUITelemetry.%28addEvent|startSession|stopSession%29&redirect=false&case=false>`_).
+
+Structure:
+
+.. code-block:: js
+
+ "UIMeasurements": [
+ {
+ "type": "event", // either "session" or "event"
+ "action": "action.1",
+ "method": "menu",
+ "sessions": [],
+ "timestamp": 12345,
+ "extras": "settings"
+ },
+ {
+ "type": "session",
+ "name": "awesomescreen.1",
+ "reason": "commit",
+ "start": 123,
+ "end": 456
+ }
+ ...
+ ],
diff --git a/toolkit/components/telemetry/docs/data/sync-ping.rst b/toolkit/components/telemetry/docs/data/sync-ping.rst
new file mode 100644
index 000000000..775ab008a
--- /dev/null
+++ b/toolkit/components/telemetry/docs/data/sync-ping.rst
@@ -0,0 +1,182 @@
+
+"sync" ping
+===========
+
+This is an aggregated format that contains information about each sync that occurred during a timeframe. It is submitted every 12 hours, and on browser shutdown, but only if the syncs property would not be empty. The ping does not contain the enviroment block, nor the clientId.
+
+Each item in the syncs property is generated after a sync is completed, for both successful and failed syncs, and contains measurements pertaining to sync performance and error information.
+
+A JSON-schema document describing the exact format of the ping's payload property can be found at `services/sync/tests/unit/sync\_ping\_schema.json <https://dxr.mozilla.org/mozilla-central/source/services/sync/tests/unit/sync_ping_schema.json>`_.
+
+Structure:
+
+.. code-block:: js
+
+ {
+ version: 4,
+ type: "sync",
+ ... common ping data
+ payload: {
+ version: 1,
+ discarded: <integer count> // Number of syncs discarded -- left out if zero.
+ why: <string>, // Why did we submit the ping? Either "shutdown" or "schedule".
+ // Array of recorded syncs. The ping is not submitted if this would be empty
+ syncs: [{
+ when: <integer milliseconds since epoch>,
+ took: <integer duration in milliseconds>,
+ uid: <string>, // Hashed FxA unique ID, or string of 32 zeros.
+ deviceID: <string>, // Hashed FxA Device ID, hex string of 64 characters, not included if the user is not logged in.
+ didLogin: <bool>, // Optional, is this the first sync after login? Excluded if we don't know.
+ why: <string>, // Optional, why the sync occured, excluded if we don't know.
+
+ // Optional, excluded if there was no error.
+ failureReason: {
+ name: <string>, // "httperror", "networkerror", "shutdownerror", etc.
+ code: <integer>, // Only present for "httperror" and "networkerror".
+ error: <string>, // Only present for "othererror" and "unexpectederror".
+ from: <string>, // Optional, and only present for "autherror".
+ },
+
+ // Optional, excluded if we couldn't get a valid uid or local device id
+ devices: [{
+ os: <string>, // OS string as reported by Services.appinfo.OS,
+ version: <string>, // Firefox version, as reported by Services.appinfo.version
+ id: <string>, // Hashed FxA device id for device
+ }],
+
+ // Internal sync status information. Omitted if it would be empty.
+ status: {
+ sync: <string>, // The value of the Status.sync property, unless it indicates success.
+ service: <string>, // The value of the Status.service property, unless it indicates success.
+ },
+ // Information about each engine's sync.
+ engines: [
+ {
+ name: <string>, // "bookmarks", "tabs", etc.
+ took: <integer duration in milliseconds>, // Optional, values of 0 are omitted.
+
+ status: <string>, // The value of Status.engines, if it holds a non-success value.
+
+ // Optional, excluded if all items would be 0. A missing item indicates a value of 0.
+ incoming: {
+ applied: <integer>, // Number of records applied
+ succeeded: <integer>, // Number of records that applied without error
+ failed: <integer>, // Number of records that failed to apply
+ newFailed: <integer>, // Number of records that failed for the first time this sync
+ reconciled: <integer>, // Number of records that were reconciled
+ },
+
+ // Optional, excluded if it would be empty. Records that would be
+ // empty (e.g. 0 sent and 0 failed) are omitted.
+ outgoing: [
+ {
+ sent: <integer>, // Number of outgoing records sent. Zero values are omitted.
+ failed: <integer>, // Number that failed to send. Zero values are omitted.
+ }
+ ],
+ // Optional, excluded if there were no errors
+ failureReason: { ... }, // Same as above.
+
+ // Optional, excluded if it would be empty or if the engine cannot
+ // or did not run validation on itself.
+ validation: {
+ // Optional validator version, default of 0.
+ version: <integer>,
+ checked: <integer>,
+ took: <non-monotonic integer duration in milliseconds>,
+ // Entries with a count of 0 are excluded, the array is excluded if no problems are found.
+ problems: [
+ {
+ name: <string>, // The problem identified.
+ count: <integer>, // Number of times it occurred.
+ }
+ ],
+ // Format is same as above, this is only included if we tried and failed
+ // to run validation, and if it's present, all other fields in this object are optional.
+ failureReason: { ... },
+ }
+ }
+ ]
+ }]
+ }
+ }
+
+info
+----
+
+discarded
+~~~~~~~~~
+
+The ping may only contain a certain number of entries in the ``"syncs"`` array, currently 500 (it is determined by the ``"services.sync.telemetry.maxPayloadCount"`` preference). Entries beyond this are discarded, and recorded in the discarded count.
+
+syncs.took
+~~~~~~~~~~
+
+These values should be monotonic. If we can't get a monotonic timestamp, -1 will be reported on the payload, and the values will be omitted from the engines. Additionally, the value will be omitted from an engine if it would be 0 (either due to timer inaccuracy or finishing instantaneously).
+
+syncs.uid
+~~~~~~~~~
+
+This property containing a hash of the FxA account identifier, which is a 32 character hexidecimal string. In the case that we are unable to authenticate with FxA and have never authenticated in the past, it will be a placeholder string consisting of 32 repeated ``0`` characters.
+
+syncs.why
+~~~~~~~~~
+
+One of the following values:
+
+- ``startup``: This is the first sync triggered after browser startup.
+- ``schedule``: This is a sync triggered because it has been too long since the last sync.
+- ``score``: This sync is triggered by a high score value one of sync's trackers, indicating that many changes have occurred since the last sync.
+- ``user``: The user manually triggered the sync.
+- ``tabs``: The user opened the synced tabs sidebar, which triggers a sync.
+
+syncs.status
+~~~~~~~~~~~~
+
+The ``engine.status``, ``payload.status.sync``, and ``payload.status.service`` properties are sync error codes, which are listed in `services/sync/modules/constants.js <https://dxr.mozilla.org/mozilla-central/source/services/sync/modules/constants.js>`_, and success values are not reported.
+
+syncs.failureReason
+~~~~~~~~~~~~~~~~~~~
+
+Stores error information, if any is present. Always contains the "name" property, which identifies the type of error it is. The types can be.
+
+- ``httperror``: Indicates that we recieved an HTTP error response code, but are unable to be more specific about the error. Contains the following properties:
+
+ - ``code``: Integer HTTP status code.
+
+- ``nserror``: Indicates that an exception with the provided error code caused sync to fail.
+
+ - ``code``: The nsresult error code (integer).
+
+- ``shutdownerror``: Indicates that the sync failed because we shut down before completion.
+
+- ``autherror``: Indicates an unrecoverable authentication error.
+
+ - ``from``: Where the authentication error occurred, one of the following values: ``tokenserver``, ``fxaccounts``, or ``hawkclient``.
+
+- ``othererror``: Indicates that it is a sync error code that we are unable to give more specific information on. As with the ``syncStatus`` property, it is a sync error code, which are listed in `services/sync/modules/constants.js <https://dxr.mozilla.org/mozilla-central/source/services/sync/modules/constants.js>`_.
+
+ - ``error``: String identifying which error was present.
+
+- ``unexpectederror``: Indicates that some other error caused sync to fail, typically an uncaught exception.
+
+ - ``error``: The message provided by the error.
+
+- ``sqlerror``: Indicates that we recieved a ``mozIStorageError`` from a database query.
+
+ - ``code``: Value of the ``error.result`` property, one of the constants listed `here <https://developer.mozilla.org/en-US/docs/Mozilla/Tech/XPCOM/Reference/Interface/MozIStorageError#Constants>`_.
+
+syncs.engine.name
+~~~~~~~~~~~~~~~~~
+
+Third-party engines are not reported, so only the following values are allowed: ``addons``, ``bookmarks``, ``clients``, ``forms``, ``history``, ``passwords``, ``prefs``, and ``tabs``.
+
+syncs.engine.validation.problems
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+For engines that can run validation on themselves, an array of objects describing validation errors that have occurred. Items that would have a count of 0 are excluded. Each engine will have its own set of items that it might put in the ``name`` field, but there are a finite number. See ``BookmarkProblemData.getSummary`` in `services/sync/modules/bookmark\_validator.js <https://dxr.mozilla.org/mozilla-central/source/services/sync/modules/bookmark_validator.js>`_ for an example.
+
+syncs.devices
+~~~~~~~~~~~~~
+
+The list of remote devices associated with this account, as reported by the clients collection. The ID of each device is hashed using the same algorithm as the local id.
diff --git a/toolkit/components/telemetry/docs/data/uitour-ping.rst b/toolkit/components/telemetry/docs/data/uitour-ping.rst
new file mode 100644
index 000000000..8d17ec55a
--- /dev/null
+++ b/toolkit/components/telemetry/docs/data/uitour-ping.rst
@@ -0,0 +1,26 @@
+
+"uitour-tag" ping
+=================
+
+This ping is submitted via the UITour setTreatmentTag API. It may be used by
+the tour to record what settings were made by a user or to track the result of
+A/B experiments.
+
+The client ID is submitted with this ping.
+
+Structure:
+
+.. code-block:: js
+
+ {
+ version: 1,
+ type: "uitour-tag",
+ clientId: <string>,
+ payload: {
+ tagName: <string>,
+ tagValue: <string>
+ }
+ }
+
+See also: :doc:`common ping fields <common-ping>`
+
diff --git a/toolkit/components/telemetry/docs/fhr/architecture.rst b/toolkit/components/telemetry/docs/fhr/architecture.rst
new file mode 100644
index 000000000..6a857d329
--- /dev/null
+++ b/toolkit/components/telemetry/docs/fhr/architecture.rst
@@ -0,0 +1,226 @@
+.. _healthreport_architecture:
+
+============
+Architecture
+============
+
+``healthreporter.jsm`` contains the main interface for FHR, the
+``HealthReporter`` type. An instance of this is created by the
+``data_reporting_service`.
+
+``providers.jsm`` contains numerous ``Metrics.Provider`` and
+``Metrics.Measurement`` used for collecting application metrics. If you
+are looking for the FHR probes, this is where they are.
+
+Storage
+=======
+
+Firefox Health Report stores data in 3 locations:
+
+* Metrics measurements and provider state is stored in a SQLite database
+ (via ``Metrics.Storage``).
+* Service state (such as the IDs of documents uploaded) is stored in a
+ JSON file on disk (via OS.File).
+* Lesser state and run-time options are stored in preferences.
+
+Preferences
+===========
+
+Preferences controlling behavior of Firefox Health Report live in the
+``datareporting.healthreport.*`` branch.
+
+Service and Data Control
+------------------------
+
+The follow preferences control behavior of the service and data upload.
+
+service.enabled
+ Controls whether the entire health report service runs. The overall
+ service performs data collection, storing, and submission.
+
+ This is the primary kill switch for Firefox Health Report
+ outside of the build system variable. i.e. if you are using an
+ official Firefox build and wish to disable FHR, this is what you
+ should set to false to prevent FHR from not only submitting but
+ also collecting data.
+
+uploadEnabled
+ Whether uploading of data is enabled. This is the preference the
+ checkbox in the preferences UI reflects. If this is
+ disabled, FHR still collects data - it just doesn't upload it.
+
+service.loadDelayMsec
+ How long (in milliseconds) after initial application start should FHR
+ wait before initializing.
+
+ FHR may initialize sooner than this if the FHR service is requested.
+ This will happen if e.g. the user goes to ``about:healthreport``.
+
+service.loadDelayFirstRunMsec
+ How long (in milliseconds) FHR should wait to initialize on first
+ application run.
+
+ FHR waits longer than normal to initialize on first application run
+ because first-time initialization can use a lot of I/O to initialize
+ the SQLite database and this I/O should not interfere with the
+ first-run user experience.
+
+documentServerURI
+ The URI of a Bagheera server that FHR should interface with for
+ submitting documents.
+
+ You typically do not need to change this.
+
+documentServerNamespace
+ The namespace on the document server FHR should upload documents to.
+
+ You typically do not need to change this.
+
+infoURL
+ The URL of a page containing more info about FHR, it's privacy
+ policy, etc.
+
+about.reportUrl
+ The URL to load in ``about:healthreport``.
+
+about.reportUrlUnified
+ The URL to load in ``about:healthreport``. This is used instead of ``reportUrl`` for UnifiedTelemetry when it is not opt-in.
+
+service.providerCategories
+ A comma-delimited list of category manager categories that contain
+ registered ``Metrics.Provider`` records. Read below for how provider
+ registration works.
+
+If the entire service is disabled, you lose data collection. This means
+that **local** data analysis won't be available because there is no data
+to analyze! Keep in mind that Firefox Health Report can be useful even
+if it's not submitting data to remote servers!
+
+Logging
+-------
+
+The following preferences allow you to control the logging behavior of
+Firefox Health Report.
+
+logging.consoleEnabled
+ Whether to write log messages to the web console. This is true by
+ default.
+
+logging.consoleLevel
+ The minimum log level FHR messages must have to be written to the
+ web console. By default, only FHR warnings or errors will be written
+ to the web console. During normal/expected operation, no messages of
+ this type should be produced.
+
+logging.dumpEnabled
+ Whether to write log messages via ``dump()``. If true, FHR will write
+ messages to stdout/stderr.
+
+ This is typically only enabled when developing FHR.
+
+logging.dumpLevel
+ The minimum log level messages must have to be written via
+ ``dump()``.
+
+State
+-----
+
+currentDaySubmissionFailureCount
+ How many submission failures the client has encountered while
+ attempting to upload the most recent document.
+
+lastDataSubmissionFailureTime
+ The time of the last failed document upload.
+
+lastDataSubmissionRequestedTime
+ The time of the last document upload attempt.
+
+lastDataSubmissionSuccessfulTime
+ The time of the last successful document upload.
+
+nextDataSubmissionTime
+ The time the next data submission is scheduled for. FHR will not
+ attempt to upload a new document before this time.
+
+pendingDeleteRemoteData
+ Whether the client currently has a pending request to delete remote
+ data. If true, the client will attempt to delete all remote data
+ before an upload is performed.
+
+FHR stores various state in preferences.
+
+Registering Providers
+=====================
+
+Firefox Health Report providers are registered via the category manager.
+See ``HealthReportComponents.manifest`` for providers defined in this
+directory.
+
+Essentially, the category manager receives the name of a JS type and the
+URI of a JSM to import that exports this symbol. At run-time, the
+providers registered in the category manager are instantiated.
+
+Providers are registered via the category manager to make registration
+simple and less prone to errors. Any XPCOM component can create a
+category manager entry. Therefore, new data providers can be added
+without having to touch core Firefox Health Report code. Additionally,
+category manager registration means providers are more likely to be
+registered on FHR's terms, when it wants. If providers were registered
+in code at application run-time, there would be the risk of other
+components prematurely instantiating FHR (causing a performance hit if
+performed at an inopportune time) or semi-complicated code around
+observers or listeners. Category manager entries are only 1 line per
+provider and leave FHR in control: they are simple and safe.
+
+Document Generation and Lifecycle
+=================================
+
+FHR will attempt to submit a JSON document containing data every 24 wall
+clock hours.
+
+At upload time, FHR will query the database for **all** information from
+the last 180 days and assemble this data into a JSON document. We
+attempt to upload this JSON document with a client-generated UUID to the
+configured server.
+
+Before we attempt upload, the generated UUID is stored in the JSON state
+file on local disk. At this point, the client assumes the document with
+that UUID has been successfully stored on the server.
+
+If the client is aware of other document UUIDs that presumably exist on
+the server, those UUIDs are sent with the upload request so the client
+can request those UUIDs be deleted. This helps ensure that each client
+only has 1 document/UUID on the server at any one time.
+
+Importance of Persisting UUIDs
+------------------------------
+
+The choices of how, where, and when document UUIDs are stored and updated
+are very important. One should not attempt to change things unless she
+has a very detailed understanding of why things are the way they are.
+
+The client is purposefully very conservative about forgetting about
+generated UUIDs. In other words, once a UUID is generated, the client
+deliberately holds on to that UUID until it's very confident that UUID
+is no longer stored on the server. The reason we do this is because
+*orphaned* documents/UUIDs on the server can lead to faulty analysis,
+such as over-reporting the number of Firefox installs that stop being
+used.
+
+When uploading a new UUID, we update the state and save the state file
+to disk *before* an upload attempt because if the upload succeeds but
+the response never makes it back to the client, we want the client to
+know about the uploaded UUID so it can delete it later to prevent an
+orphan.
+
+We maintain a list of UUIDs locally (not simply the last UUID) because
+multiple upload attempts could fail the same way as the previous
+paragraph describes and we have no way of knowing which (if any)
+actually succeeded. The safest approach is to assume every document
+produced managed to get uploaded some how.
+
+We store the UUIDs on a file on disk and not anywhere else because we
+want storage to be robust. We originally stored UUIDs in preferences,
+which only flush to disk periodically. Writes to preferences were
+apparently getting lost. We switched to writing directly to files to
+eliminate this window.
diff --git a/toolkit/components/telemetry/docs/fhr/dataformat.rst b/toolkit/components/telemetry/docs/fhr/dataformat.rst
new file mode 100644
index 000000000..b067f9d0c
--- /dev/null
+++ b/toolkit/components/telemetry/docs/fhr/dataformat.rst
@@ -0,0 +1,1997 @@
+.. _healthreport_dataformat:
+
+==============
+Payload Format
+==============
+
+Currently, the Firefox Health Report is submitted as a compressed JSON
+document. The root JSON element is an object. A *version* field defines
+the version of the payload which in turn defines the expected contents
+the object.
+
+As of 2013-07-03, desktop submits Version 2, and Firefox for Android submits
+Version 3 payloads.
+
+Version 3
+=========
+
+Version 3 is a complete rebuild of the document format. Events are tracked in
+an "environment". Environments are computed from a large swath of local data
+(e.g., add-ons, CPU count, versions), and a new environment comes into being
+when one of its attributes changes.
+
+Client documents, then, will include descriptions of many environments, and
+measurements will be attributed to one particular environment.
+
+A map of environments is present at the top level of the document, with the
+current named "current" in the map. Each environment has a hash identifier and
+a set of attributes. The current environment is completely described, and has
+its hash present in a "hash" attribute. All other environments are represented
+as a tree diff from the current environment, with their hash as the key in the
+"environments" object.
+
+A removed add-on has the value 'null'.
+
+There is no "last" data at present.
+
+Daily data is hierarchical: by day, then by environment, and then by
+measurement, and is present in "data", just as in v2.
+
+Leading by example::
+
+ {
+ "lastPingDate": "2013-06-29",
+ "thisPingDate": "2013-07-03",
+ "version": 3,
+ "environments": {
+ "current": {
+ "org.mozilla.sysinfo.sysinfo": {
+ "memoryMB": 1567,
+ "cpuCount": 4,
+ "architecture": "armeabi-v7a",
+ "_v": 1,
+ "version": "4.1.2",
+ "name": "Android"
+ },
+ "org.mozilla.profile.age": {
+ "_v": 1,
+ "profileCreation": 15827
+ },
+ "org.mozilla.addons.active": {
+ "QuitNow@TWiGSoftware.com": {
+ "appDisabled": false,
+ "userDisabled": false,
+ "scope": 1,
+ "updateDay": 15885,
+ "foreignInstall": false,
+ "hasBinaryComponents": false,
+ "blocklistState": 0,
+ "type": "extension",
+ "installDay": 15885,
+ "version": "1.18.02"
+ },
+ "{dbbf9331-b713-6eda-1006-205efead09dc}": {
+ "appDisabled": false,
+ "userDisabled": "askToActivate",
+ "scope": 8,
+ "updateDay": 15779,
+ "foreignInstall": true,
+ "blocklistState": 0,
+ "type": "plugin",
+ "installDay": 15779,
+ "version": "11.1 r115"
+ },
+ "desktopbydefault@bnicholson.mozilla.org": {
+ "appDisabled": false,
+ "userDisabled": true,
+ "scope": 1,
+ "updateDay": 15870,
+ "foreignInstall": false,
+ "hasBinaryComponents": false,
+ "blocklistState": 0,
+ "type": "extension",
+ "installDay": 15870,
+ "version": "1.1"
+ },
+ "{6e092a7f-ba58-4abb-88c1-1a4e50b217e4}": {
+ "appDisabled": false,
+ "userDisabled": false,
+ "scope": 1,
+ "updateDay": 15828,
+ "foreignInstall": false,
+ "hasBinaryComponents": false,
+ "blocklistState": 0,
+ "type": "extension",
+ "installDay": 15828,
+ "version": "1.1.0"
+ },
+ "{46551EC9-40F0-4e47-8E18-8E5CF550CFB8}": {
+ "appDisabled": false,
+ "userDisabled": true,
+ "scope": 1,
+ "updateDay": 15879,
+ "foreignInstall": false,
+ "hasBinaryComponents": false,
+ "blocklistState": 0,
+ "type": "extension",
+ "installDay": 15879,
+ "version": "1.3.2"
+ },
+ "_v": 1
+ },
+ "org.mozilla.appInfo.appinfo": {
+ "_v": 3,
+ "appLocale": "en_us",
+ "osLocale": "en_us",
+ "distribution": "",
+ "acceptLangIsUserSet": 0,
+ "isTelemetryEnabled": 1,
+ "isBlocklistEnabled": 1
+ },
+ "geckoAppInfo": {
+ "updateChannel": "nightly",
+ "id": "{aa3c5121-dab2-40e2-81ca-7ea25febc110}",
+ "os": "Android",
+ "platformBuildID": "20130703031323",
+ "platformVersion": "25.0a1",
+ "vendor": "Mozilla",
+ "name": "fennec",
+ "xpcomabi": "arm-eabi-gcc3",
+ "appBuildID": "20130703031323",
+ "_v": 1,
+ "version": "25.0a1"
+ },
+ "hash": "tB4Pnnep9yTxnMDymc3dAB2RRB0=",
+ "org.mozilla.addons.counts": {
+ "extension": 4,
+ "plugin": 1,
+ "_v": 1,
+ "theme": 0
+ }
+ },
+ "k2O3hlreMeS7L1qtxeMsYWxgWWQ=": {
+ "geckoAppInfo": {
+ "platformBuildID": "20130630031138",
+ "appBuildID": "20130630031138",
+ "_v": 1
+ },
+ "org.mozilla.appInfo.appinfo": {
+ "_v": 2,
+ }
+ },
+ "1+KN9TutMpzdl4TJEl+aCxK+xcw=": {
+ "geckoAppInfo": {
+ "platformBuildID": "20130626031100",
+ "appBuildID": "20130626031100",
+ "_v": 1
+ },
+ "org.mozilla.addons.active": {
+ "QuitNow@TWiGSoftware.com": null,
+ "{dbbf9331-b713-6eda-1006-205efead09dc}": null,
+ "desktopbydefault@bnicholson.mozilla.org": null,
+ "{6e092a7f-ba58-4abb-88c1-1a4e50b217e4}": null,
+ "{46551EC9-40F0-4e47-8E18-8E5CF550CFB8}": null,
+ "_v": 1
+ },
+ "org.mozilla.addons.counts": {
+ "extension": 0,
+ "plugin": 0,
+ "_v": 1
+ }
+ }
+ },
+ "data": {
+ "last": {},
+ "days": {
+ "2013-07-03": {
+ "tB4Pnnep9yTxnMDymc3dAB2RRB0=": {
+ "org.mozilla.appSessions": {
+ "normal": [
+ {
+ "r": "P",
+ "d": 2,
+ "sj": 653
+ },
+ {
+ "r": "P",
+ "d": 22
+ },
+ {
+ "r": "P",
+ "d": 5
+ },
+ {
+ "r": "P",
+ "d": 0
+ },
+ {
+ "r": "P",
+ "sg": 3560,
+ "d": 171,
+ "sj": 518
+ },
+ {
+ "r": "P",
+ "d": 16
+ },
+ {
+ "r": "P",
+ "d": 1079
+ }
+ ],
+ "_v": "4"
+ }
+ },
+ "k2O3hlreMeS7L1qtxeMsYWxgWWQ=": {
+ "org.mozilla.appSessions": {
+ "normal": [
+ {
+ "r": "P",
+ "d": 27
+ },
+ {
+ "r": "P",
+ "d": 19
+ },
+ {
+ "r": "P",
+ "d": 55
+ }
+ ],
+ "_v": "4"
+ },
+ "org.mozilla.searches.counts": {
+ "bartext": {
+ "google": 1
+ },
+ "_v": "4"
+ },
+ "org.mozilla.experiment": {
+ "lastActive": "some.experiment.id"
+ "_v": "1"
+ }
+ }
+ }
+ }
+ }
+ }
+
+App sessions in Version 3
+-------------------------
+
+Sessions are divided into "normal" and "abnormal". Session objects are stored as discrete JSON::
+
+ "org.mozilla.appSessions": {
+ _v: 4,
+ "normal": [
+ {"r":"P", "d": 123},
+ ],
+ "abnormal": [
+ {"r":"A", "oom": true, "stopped": false}
+ ]
+ }
+
+Keys are:
+
+"r"
+ reason. Values are "P" (activity paused), "A" (abnormal termination).
+"d"
+ duration. Value in seconds.
+"sg"
+ Gecko startup time (msec). Present if this is a clean launch. This
+ corresponds to the telemetry timer *FENNEC_STARTUP_TIME_GECKOREADY*.
+"sj"
+ Java activity init time (msec). Present if this is a clean launch. This
+ corresponds to the telemetry timer *FENNEC_STARTUP_TIME_JAVAUI*,
+ and includes initialization tasks beyond initial
+ *onWindowFocusChanged*.
+
+Abnormal terminations will be missing a duration and will feature these keys:
+
+"oom"
+ was the session killed by an OOM exception?
+"stopped"
+ was the session stopped gently?
+
+Version 3.2
+-----------
+
+As of Firefox 35, the search counts measurement is now bumped to v6, including the *activity* location for the search activity.
+
+Version 3.1
+-----------
+
+As of Firefox 27, *appinfo* is now bumped to v3, including *osLocale*,
+*appLocale* (currently always the same as *osLocale*), *distribution* (a string
+containing the distribution ID and version, separated by a colon), and
+*acceptLangIsUserSet*, an integer-boolean that describes whether the user set
+an *intl.accept_languages* preference.
+
+The search counts measurement is now at version 5, which indicates that
+non-partner searches are recorded. You'll see identifiers like "other-Foo Bar"
+rather than "other".
+
+
+Version 3.2
+-----------
+
+In Firefox 32, Firefox for Android includes a device configuration section
+in the environment description::
+
+ "org.mozilla.device.config": {
+ "hasHardwareKeyboard": false,
+ "screenXInMM": 58,
+ "screenLayout": 2,
+ "uiType": "default",
+ "screenYInMM": 103,
+ "_v": 1,
+ "uiMode": 1
+ }
+
+Of these, the only keys that need explanation are:
+
+uiType
+ One of "default", "smalltablet", "largetablet".
+uiMode
+ A mask of the Android *Configuration.uiMode* value, e.g.,
+ *UI_MODE_TYPE_CAR*.
+screenLayout
+ A mask of the Android *Configuration.screenLayout* value. One of the
+ *SCREENLAYOUT_SIZE_* constants.
+
+Note that screen dimensions can be incorrect due to device inaccuracies and platform limitations.
+
+Other notable differences from Version 2
+----------------------------------------
+
+* There is no default browser indicator on Android.
+* Add-ons include a *blocklistState* attribute, as returned by AddonManager.
+* Searches are now version 4, and are hierarchical: how the search was started
+ (bartext, barkeyword, barsuggest), and then counts per provider.
+
+Version 2
+=========
+
+Version 2 is the same as version 1 with the exception that it has an additional
+top-level field, *geckoAppInfo*, which contains basic application info.
+
+geckoAppInfo
+------------
+
+This field is an object that is a simple map of string keys and values
+describing basic application metadata. It is very similar to the *appinfo*
+measurement in the *last* section. The difference is this field is almost
+certainly guaranteed to exist whereas the one in the data part of the
+payload may be omitted in certain scenarios (such as catastrophic client
+error).
+
+Its keys are as follows:
+
+appBuildID
+ The build ID/date of the application. e.g. "20130314113542".
+
+version
+ The value of nsXREAppData.version. This is the application's version. e.g.
+ "21.0.0".
+
+vendor
+ The value of nsXREAppData.vendor. Can be empty an empty string. For
+ official Mozilla builds, this will be "Mozilla".
+
+name
+ The value of nsXREAppData.name. For official Firefox builds, this
+ will be "Firefox".
+
+id
+ The value of nsXREAppData.ID.
+
+platformVersion
+ The version of the Gecko platform (as opposed to the app version). For
+ Firefox, this is almost certainly equivalent to the *version* field.
+
+platformBuildID
+ The build ID/date of the Gecko platfor (as opposed to the app version).
+ This is commonly equivalent to *appBuildID*.
+
+os
+ The name of the operating system the application is running on.
+
+xpcomabi
+ The binary architecture of the build.
+
+updateChannel
+ The name of the channel used for application updates. Official Mozilla
+ builds have one of the values {release, beta, aurora, nightly}. Local and
+ test builds have *default* as the channel.
+
+Version 1
+=========
+
+Top-level Properties
+--------------------
+
+The main JSON object contains the following properties:
+
+lastPingDate
+ UTC date of the last upload. If this is the first upload from this client,
+ this will not be present.
+
+thisPingDate
+ UTC date when this payload was constructed.
+
+version
+ Integer version of this payload format. Currently only 1 is defined.
+
+clientID
+ An identifier that identifies the client that is submitting data.
+
+ This property may not be present in older clients.
+
+ See :ref:`healthreport_identifiers` for more info on identifiers.
+
+clientIDVersion
+ Integer version associated with the generation semantics for the
+ ``clientID``.
+
+ If the value is ``1``, ``clientID`` is a randomly-generated UUID.
+
+ This property may not be present in older clients.
+
+data
+ Object holding data constituting health report.
+
+Data Properties
+---------------
+
+The bulk of the health report is contained within the *data* object. This
+object has the following keys:
+
+days
+ Object mapping UTC days to measurements from that day. Keys are in the
+ *YYYY-MM-DD* format. e.g. "2013-03-14"
+
+last
+ Object mapping measurement names to their values.
+
+
+The value of *days* and *last* are objects mapping measurement names to that
+measurement's values. The values are always objects. Each object contains
+a *_v* property. This property defines the version of this measurement.
+Additional non-underscore-prefixed properties are defined by the measurement
+itself (see sections below).
+
+Example
+-------
+
+Here is an example JSON document for version 1::
+
+ {
+ "version": 1,
+ "thisPingDate": "2013-03-11",
+ "lastPingDate": "2013-03-10",
+ "data": {
+ "last": {
+ "org.mozilla.addons.active": {
+ "masspasswordreset@johnathan.nightingale": {
+ "userDisabled": false,
+ "appDisabled": false,
+ "version": "1.05",
+ "type": "extension",
+ "scope": 1,
+ "foreignInstall": false,
+ "hasBinaryComponents": false,
+ "installDay": 14973,
+ "updateDay": 15317
+ },
+ "places-maintenance@bonardo.net": {
+ "userDisabled": false,
+ "appDisabled": false,
+ "version": "1.3",
+ "type": "extension",
+ "scope": 1,
+ "foreignInstall": false,
+ "hasBinaryComponents": false,
+ "installDay": 15268,
+ "updateDay": 15379
+ },
+ "_v": 1
+ },
+ "org.mozilla.appInfo.appinfo": {
+ "_v": 1,
+ "appBuildID": "20130309030841",
+ "distributionID": "",
+ "distributionVersion": "",
+ "hotfixVersion": "",
+ "id": "{ec8030f7-c20a-464f-9b0e-13a3a9e97384}",
+ "locale": "en-US",
+ "name": "Firefox",
+ "os": "Darwin",
+ "platformBuildID": "20130309030841",
+ "platformVersion": "22.0a1",
+ "updateChannel": "nightly",
+ "vendor": "Mozilla",
+ "version": "22.0a1",
+ "xpcomabi": "x86_64-gcc3"
+ },
+ "org.mozilla.profile.age": {
+ "_v": 1,
+ "profileCreation": 12444
+ },
+ "org.mozilla.appSessions.current": {
+ "_v": 3,
+ "startDay": 15773,
+ "activeTicks": 522,
+ "totalTime": 70858,
+ "main": 1245,
+ "firstPaint": 2695,
+ "sessionRestored": 3436
+ },
+ "org.mozilla.sysinfo.sysinfo": {
+ "_v": 1,
+ "cpuCount": 8,
+ "memoryMB": 16384,
+ "architecture": "x86-64",
+ "name": "Darwin",
+ "version": "12.2.1"
+ }
+ },
+ "days": {
+ "2013-03-11": {
+ "org.mozilla.addons.counts": {
+ "_v": 1,
+ "extension": 15,
+ "plugin": 12,
+ "theme": 1
+ },
+ "org.mozilla.places.places": {
+ "_v": 1,
+ "bookmarks": 757,
+ "pages": 104858
+ },
+ "org.mozilla.appInfo.appinfo": {
+ "_v": 1,
+ "isDefaultBrowser": 1
+ }
+ },
+ "2013-03-10": {
+ "org.mozilla.addons.counts": {
+ "_v": 1,
+ "extension": 15,
+ "plugin": 12,
+ "theme": 1
+ },
+ "org.mozilla.places.places": {
+ "_v": 1,
+ "bookmarks": 757,
+ "pages": 104857
+ },
+ "org.mozilla.searches.counts": {
+ "_v": 1,
+ "google.urlbar": 4
+ },
+ "org.mozilla.appInfo.appinfo": {
+ "_v": 1,
+ "isDefaultBrowser": 1
+ }
+ }
+ }
+ }
+ }
+
+Measurements
+============
+
+The bulk of payloads consists of measurement data. An individual measurement
+is merely a collection of related values e.g. *statistics about the Places
+database* or *system information*.
+
+Each measurement has an integer version number attached. When the fields in
+a measurement or the semantics of data within that measurement change, the
+version number is incremented.
+
+All measurements are defined alphabetically in the sections below.
+
+org.mozilla.addons.addons
+-------------------------
+
+This measurement contains information about the currently-installed add-ons.
+
+Version 2
+^^^^^^^^^
+
+This version adds the human-readable fields *name* and *description*, both
+coming directly from the Addon instance as most properties in version 1.
+Also, all plugin details are now in org.mozilla.addons.plugins.
+
+Version 1
+^^^^^^^^^
+
+The measurement object is a mapping of add-on IDs to objects containing
+add-on metadata.
+
+Each add-on contains the following properties:
+
+* userDisabled
+* appDisabled
+* version
+* type
+* scope
+* foreignInstall
+* hasBinaryComponents
+* installDay
+* updateDay
+
+With the exception of *installDay* and *updateDay*, all these properties
+come direct from the Addon instance. See https://developer.mozilla.org/en-US/docs/Addons/Add-on_Manager/Addon.
+*installDay* and *updateDay* are the number of days since UNIX epoch of
+the add-ons *installDate* and *updateDate* properties, respectively.
+
+Notes
+^^^^^
+
+Add-ons that have opted out of AMO updates via the
+*extensions._id_.getAddons.cache.enabled* preference are, since Bug 868306
+(Firefox 24), included in the list of submitted add-ons.
+
+Example
+^^^^^^^
+::
+
+ "org.mozilla.addons.addons": {
+ "_v": 2,
+ "{d10d0bf8-f5b5-c8b4-a8b2-2b9879e08c5d}": {
+ "userDisabled": false,
+ "appDisabled": false,
+ "name": "Adblock Plus",
+ "version": "2.4.1",
+ "type": "extension",
+ "scope": 1,
+ "description": "Ads were yesterday!",
+ "foreignInstall": false,
+ "hasBinaryComponents": false,
+ "installDay": 16093,
+ "updateDay": 16093
+ },
+ "{e4a8a97b-f2ed-450b-b12d-ee082ba24781}": {
+ "userDisabled": true,
+ "appDisabled": false,
+ "name": "Greasemonkey",
+ "version": "1.14",
+ "type": "extension",
+ "scope": 1,
+ "description": "A User Script Manager for Firefox",
+ "foreignInstall": false,
+ "hasBinaryComponents": false,
+ "installDay": 16093,
+ "updateDay": 16093
+ }
+ }
+
+org.mozilla.addons.plugins
+--------------------------
+
+This measurement contains information about the currently-installed plugins.
+
+Version 1
+^^^^^^^^^
+
+The measurement object is a mapping of plugin IDs to objects containing
+plugin metadata.
+
+The plugin ID is constructed of the plugins filename, name, version and
+description. Every plugin has at least a filename and a name.
+
+Each plugin contains the following properties:
+
+* name
+* version
+* description
+* blocklisted
+* disabled
+* clicktoplay
+* mimeTypes
+* updateDay
+
+With the exception of *updateDay* and *mimeTypes*, all these properties come
+directly from ``nsIPluginTag`` via ``nsIPluginHost``.
+*updateDay* is the number of days since UNIX epoch of the plugins last modified
+time.
+*mimeTypes* is the list of mimetypes the plugin supports, see
+``nsIPluginTag.getMimeTypes()``.
+
+Example
+^^^^^^^
+
+::
+
+ "org.mozilla.addons.plugins": {
+ "_v": 1,
+ "Flash Player.plugin:Shockwave Flash:12.0.0.38:Shockwave Flash 12.0 r0": {
+ "mimeTypes": [
+ "application/x-shockwave-flash",
+ "application/futuresplash"
+ ],
+ "name": "Shockwave Flash",
+ "version": "12.0.0.38",
+ "description": "Shockwave Flash 12.0 r0",
+ "blocklisted": false,
+ "disabled": false,
+ "clicktoplay": false
+ },
+ "Default Browser.plugin:Default Browser Helper:537:Provides information about the default web browser": {
+ "mimeTypes": [
+ "application/apple-default-browser"
+ ],
+ "name": "Default Browser Helper",
+ "version": "537",
+ "description": "Provides information about the default web browser",
+ "blocklisted": false,
+ "disabled": true,
+ "clicktoplay": false
+ }
+ }
+
+org.mozilla.addons.counts
+-------------------------
+
+This measurement contains information about historical add-on counts.
+
+Version 1
+^^^^^^^^^
+
+The measurement object consists of counts of different add-on types. The
+properties are:
+
+extension
+ Integer count of installed extensions.
+plugin
+ Integer count of installed plugins.
+theme
+ Integer count of installed themes.
+lwtheme
+ Integer count of installed lightweigh themes.
+
+Notes
+^^^^^
+
+Add-ons opted out of AMO updates are included in the counts. This differs from
+the behavior of the active add-ons measurement.
+
+If no add-ons of a particular type are installed, the property for that type
+will not be present (as opposed to an explicit property with value of 0).
+
+Example
+^^^^^^^
+
+::
+
+ "2013-03-14": {
+ "org.mozilla.addons.counts": {
+ "_v": 1,
+ "extension": 21,
+ "plugin": 4,
+ "theme": 1
+ }
+ }
+
+
+
+org.mozilla.appInfo.appinfo
+---------------------------
+
+This measurement contains basic XUL application and Gecko platform
+information. It is reported in the *last* section.
+
+Version 2
+^^^^^^^^^
+
+In addition to fields present in version 1, this version has the following
+fields appearing in the *days* section:
+
+isBlocklistEnabled
+ Whether the blocklist ping is enabled. This is an integer, 0 or 1.
+ This does not indicate whether the blocklist ping was sent but merely
+ whether the application will try to send the blocklist ping.
+
+isTelemetryEnabled
+ Whether Telemetry is enabled. This is an integer, 0 or 1.
+
+Version 1
+^^^^^^^^^
+
+The measurement object contains mostly string values describing the
+current application and build. The properties are:
+
+* vendor
+* name
+* id
+* version
+* appBuildID
+* platformVersion
+* platformBuildID
+* os
+* xpcomabi
+* updateChannel
+* distributionID
+* distributionVersion
+* hotfixVersion
+* locale
+* isDefaultBrowser
+
+Notes
+^^^^^
+
+All of the properties appear in the *last* section except for
+*isDefaultBrowser*, which appears under *days*.
+
+Example
+^^^^^^^
+
+This example comes from an official OS X Nightly build::
+
+ "org.mozilla.appInfo.appinfo": {
+ "_v": 1,
+ "appBuildID": "20130311030946",
+ "distributionID": "",
+ "distributionVersion": "",
+ "hotfixVersion": "",
+ "id": "{ec8030f7-c20a-464f-9b0e-13a3a9e97384}",
+ "locale": "en-US",
+ "name": "Firefox",
+ "os": "Darwin",
+ "platformBuildID": "20130311030946",
+ "platformVersion": "22.0a1",
+ "updateChannel": "nightly",
+ "vendor": "Mozilla",
+ "version": "22.0a1",
+ "xpcomabi": "x86_64-gcc3"
+ },
+
+org.mozilla.appInfo.update
+--------------------------
+
+This measurement contains information about the application update mechanism
+in the application.
+
+Version 1
+^^^^^^^^^
+
+The following daily values are reported:
+
+enabled
+ Whether automatic application update checking is enabled. 1 for yes,
+ 0 for no.
+autoDownload
+ Whether automatic download of available updates is enabled.
+
+Notes
+^^^^^
+
+This measurement was merged to mozilla-central for JS FHR on 2013-07-15.
+
+Example
+^^^^^^^
+
+::
+
+ "2013-07-15": {
+ "org.mozilla.appInfo.update": {
+ "_v": 1,
+ "enabled": 1,
+ "autoDownload": 1,
+ }
+ }
+
+org.mozilla.appInfo.versions
+----------------------------
+
+This measurement contains a history of application version numbers.
+
+Version 2
+^^^^^^^^^
+
+Version 2 reports more fields than version 1 and is not backwards compatible.
+The following fields are present in version 2:
+
+appVersion
+ An array of application version strings.
+appBuildID
+ An array of application build ID strings.
+platformVersion
+ An array of platform version strings.
+platformBuildID
+ An array of platform build ID strings.
+
+When the application is upgraded, the new version and/or build IDs are
+appended to their appropriate fields.
+
+Version 1
+^^^^^^^^^
+
+When the application version (*version* from *org.mozilla.appinfo.appinfo*)
+changes, we record the new version on the day the change was seen. The new
+versions for a day are recorded in an array under the *version* property.
+
+Notes
+^^^^^
+
+If the application isn't upgraded, this measurement will not be present.
+This means this measurement will not be present for most days if a user is
+on the release channel (since updates are typically released every 6 weeks).
+However, users on the Nightly and Aurora channels will likely have a lot
+of these entries since those builds are updated every day.
+
+Values for this measurement are collected when performing the daily
+collection (typically occurs at upload time). As a result, it's possible
+the actual upgrade day may not be attributed to the proper day - the
+reported day may lag behind.
+
+The app and platform versions and build IDs should be identical for most
+clients. If they are different, we are possibly looking at a *Frankenfox*.
+
+Example
+^^^^^^^
+
+::
+
+ "2013-03-27": {
+ "org.mozilla.appInfo.versions": {
+ "_v": 2,
+ "appVersion": [
+ "22.0.0"
+ ],
+ "appBuildID": [
+ "20130325031100"
+ ],
+ "platformVersion": [
+ "22.0.0"
+ ],
+ "platformBuildID": [
+ "20130325031100"
+ ]
+ }
+ }
+
+org.mozilla.appSessions.current
+-------------------------------
+
+This measurement contains information about the currently running XUL
+application's session.
+
+Version 3
+^^^^^^^^^
+
+This measurement has the following properties:
+
+startDay
+ Integer days since UNIX epoch when this session began.
+activeTicks
+ Integer count of *ticks* the session was active for. Gecko periodically
+ sends out a signal when the session is active. Session activity
+ involves keyboard or mouse interaction with the application. Each tick
+ represents a window of 5 seconds where there was interaction.
+totalTime
+ Integer seconds the session has been alive.
+main
+ Integer milliseconds it took for the Gecko process to start up.
+firstPaint
+ Integer milliseconds from process start to first paint.
+sessionRestored
+ Integer milliseconds from process start to session restore.
+
+Example
+^^^^^^^
+
+::
+
+ "org.mozilla.appSessions.current": {
+ "_v": 3,
+ "startDay": 15775,
+ "activeTicks": 4282,
+ "totalTime": 249422,
+ "main": 851,
+ "firstPaint": 3271,
+ "sessionRestored": 5998
+ }
+
+org.mozilla.appSessions.previous
+--------------------------------
+
+This measurement contains information about previous XUL application sessions.
+
+Version 3
+^^^^^^^^^
+
+This measurement contains per-day lists of all the sessions started on that
+day. The following properties may be present:
+
+cleanActiveTicks
+ Active ticks of sessions that were properly shut down.
+cleanTotalTime
+ Total number of seconds for sessions that were properly shut down.
+abortedActiveTicks
+ Active ticks of sessions that were not properly shut down.
+abortedTotalTime
+ Total number of seconds for sessions that were not properly shut down.
+main
+ Time in milliseconds from process start to main process initialization.
+firstPaint
+ Time in milliseconds from process start to first paint.
+sessionRestored
+ Time in milliseconds from process start to session restore.
+
+Notes
+^^^^^
+
+Sessions are recorded on the date on which they began.
+
+If a session was aborted/crashed, the total time may be less than the actual
+total time. This is because we don't always update total time during periods
+of inactivity and the abort/crash could occur after a long period of idle,
+before we've updated the total time.
+
+The lengths of the arrays for {cleanActiveTicks, cleanTotalTime},
+{abortedActiveTicks, abortedTotalTime}, and {main, firstPaint, sessionRestored}
+should all be identical.
+
+The length of the clean sessions plus the length of the aborted sessions should
+be equal to the length of the {main, firstPaint, sessionRestored} properties.
+
+It is not possible to distinguish the main, firstPaint, and sessionRestored
+values from a clean vs aborted session: they are all lumped together.
+
+For sessions spanning multiple UTC days, it's not possible to know which
+days the session was active for. It's possible a week long session only
+had activity for 2 days and there's no way for us to tell which days.
+
+Example
+^^^^^^^
+
+::
+
+ "org.mozilla.appSessions.previous": {
+ "_v": 3,
+ "cleanActiveTicks": [
+ 78,
+ 1785
+ ],
+ "cleanTotalTime": [
+ 4472,
+ 88908
+ ],
+ "main": [
+ 32,
+ 952
+ ],
+ "firstPaint": [
+ 2755,
+ 3497
+ ],
+ "sessionRestored": [
+ 5149,
+ 5520
+ ]
+ }
+
+org.mozilla.crashes.crashes
+---------------------------
+
+This measurement contains a historical record of application crashes.
+
+Version 6
+^^^^^^^^^
+
+This version adds tracking for out-of-memory (OOM) crashes in the main process.
+An OOM crash will be counted as both main-crash and main-crash-oom.
+
+This measurement will be reported on each day there was a crash or crash
+submission. Records may contain the following fields, whose values indicate
+the number of crashes, hangs, or submissions that occurred on the given day:
+
+* content-crash
+* content-crash-submission-succeeded
+* content-crash-submission-failed
+* content-hang
+* content-hang-submission-succeeded
+* content-hang-submission-failed
+* gmplugin-crash
+* gmplugin-crash-submission-succeeded
+* gmplugin-crash-submission-failed
+* main-crash
+* main-crash-oom
+* main-crash-submission-succeeded
+* main-crash-submission-failed
+* main-hang
+* main-hang-submission-succeeded
+* main-hang-submission-failed
+* plugin-crash
+* plugin-crash-submission-succeeded
+* plugin-crash-submission-failed
+* plugin-hang
+* plugin-hang-submission-succeeded
+* plugin-hang-submission-failed
+
+Version 5
+^^^^^^^^^
+
+This version adds support for Gecko media plugin (GMP) crashes.
+
+This measurement will be reported on each day there was a crash or crash
+submission. Records may contain the following fields, whose values indicate
+the number of crashes, hangs, or submissions that occurred on the given day:
+
+* content-crash
+* content-crash-submission-succeeded
+* content-crash-submission-failed
+* content-hang
+* content-hang-submission-succeeded
+* content-hang-submission-failed
+* gmplugin-crash
+* gmplugin-crash-submission-succeeded
+* gmplugin-crash-submission-failed
+* main-crash
+* main-crash-submission-succeeded
+* main-crash-submission-failed
+* main-hang
+* main-hang-submission-succeeded
+* main-hang-submission-failed
+* plugin-crash
+* plugin-crash-submission-succeeded
+* plugin-crash-submission-failed
+* plugin-hang
+* plugin-hang-submission-succeeded
+* plugin-hang-submission-failed
+
+Version 4
+^^^^^^^^^
+
+This version follows up from version 3, adding submissions which are now
+tracked by the :ref:`crashes_crashmanager`.
+
+This measurement will be reported on each day there was a crash or crash
+submission. Records may contain the following fields, whose values indicate
+the number of crashes, hangs, or submissions that occurred on the given day:
+
+* main-crash
+* main-crash-submission-succeeded
+* main-crash-submission-failed
+* main-hang
+* main-hang-submission-succeeded
+* main-hang-submission-failed
+* content-crash
+* content-crash-submission-succeeded
+* content-crash-submission-failed
+* content-hang
+* content-hang-submission-succeeded
+* content-hang-submission-failed
+* plugin-crash
+* plugin-crash-submission-succeeded
+* plugin-crash-submission-failed
+* plugin-hang
+* plugin-hang-submission-succeeded
+* plugin-hang-submission-failed
+
+Version 3
+^^^^^^^^^
+
+This version follows up from version 2, building on improvements to
+the :ref:`crashes_crashmanager`.
+
+This measurement will be reported on each day there was a
+crash. Records may contain the following fields, whose values indicate
+the number of crashes or hangs that occurred on the given day:
+
+* main-crash
+* main-hang
+* content-crash
+* content-hang
+* plugin-crash
+* plugin-hang
+
+Version 2
+^^^^^^^^^
+
+The switch to version 2 coincides with the introduction of the
+:ref:`crashes_crashmanager`, which provides a more robust source of
+crash data.
+
+This measurement will be reported on each day there was a crash. The
+following fields may be present in each record:
+
+mainCrash
+ The number of main process crashes that occurred on the given day.
+
+Yes, version 2 does not track submissions like version 1. It is very
+likely submissions will be re-added later.
+
+Also absent from version 2 are plugin crashes and hangs. These will be
+re-added, likely in version 3.
+
+Version 1
+^^^^^^^^^
+
+This measurement will be reported on each day there was a crash. The
+following properties are reported:
+
+pending
+ The number of crash reports that haven't been submitted.
+submitted
+ The number of crash reports that were submitted.
+
+Notes
+^^^^^
+
+Main process crashes are typically submitted immediately after they
+occur (by checking a box in the crash reporter, which should appear
+automatically after a crash). If the crash reporter submits the crash
+successfully, we get a submitted crash. Else, we leave it as pending.
+
+A pending crash does not mean it will eventually be submitted.
+
+Pending crash reports can be submitted post-crash by going to
+about:crashes.
+
+If a pending crash is submitted via about:crashes, the submitted count
+increments but the pending count does not decrement. This is because FHR
+does not know which pending crash was just submitted and therefore it does
+not know which day's pending crash to decrement.
+
+Example
+^^^^^^^
+
+::
+
+ "org.mozilla.crashes.crashes": {
+ "_v": 1,
+ "pending": 1,
+ "submitted": 2
+ },
+ "org.mozilla.crashes.crashes": {
+ "_v": 2,
+ "mainCrash": 2
+ }
+ "org.mozilla.crashes.crashes": {
+ "_v": 4,
+ "main-crash": 2,
+ "main-crash-submission-succeeded": 1,
+ "main-crash-submission-failed": 1,
+ "main-hang": 1,
+ "plugin-crash": 2
+ }
+
+org.mozilla.healthreport.submissions
+------------------------------------
+
+This measurement contains a history of FHR's own data submission activity.
+It was added in Firefox 23 in early May 2013.
+
+Version 2
+^^^^^^^^^
+
+This is the same as version 1 except an additional field has been added.
+
+uploadAlreadyInProgress
+ A request for upload was initiated while another upload was in progress.
+ This should not occur in well-behaving clients. It (along with a lock
+ preventing simultaneous upload) was added to ensure this never occurs.
+
+Version 1
+^^^^^^^^^
+
+Daily counts of upload events are recorded.
+
+firstDocumentUploadAttempt
+ An attempt was made to upload the client's first document to the server.
+ These are uploads where the client is not aware of a previous document ID
+ on the server. Unless the client had disabled upload, there should be at
+ most one of these in the history of the client.
+
+continuationUploadAttempt
+ An attempt was made to upload a document that replaces an existing document
+ on the server. Most upload attempts should be attributed to this as opposed
+ to *firstDocumentUploadAttempt*.
+
+uploadSuccess
+ The upload attempt recorded by *firstDocumentUploadAttempt* or
+ *continuationUploadAttempt* was successful.
+
+uploadTransportFailure
+ An upload attempt failed due to transport failure (network unavailable,
+ etc).
+
+uploadServerFailure
+ An upload attempt failed due to a server-reported failure. Ideally these
+ are failures reported by the FHR server itself. However, intermediate
+ proxies, firewalls, etc may trigger this depending on how things are
+ configured.
+
+uploadClientFailure
+ An upload attempt failued due to an error/exception in the client.
+ This almost certainly points to a bug in the client.
+
+The result for an upload attempt is always attributed to the same day as
+the attempt, even if the result occurred on a different day from the attempt.
+Therefore, the sum of the result counts should equal the result of the attempt
+counts.
+
+org.mozilla.hotfix.update
+-------------------------
+
+This measurement contains results from the Firefox update hotfix.
+
+The Firefox update hotfix bypasses the built-in application update mechanism
+and installs a modern Firefox.
+
+Version 1
+^^^^^^^^^
+
+The fields in this measurement are dynamically created based on which
+versions of the update hotfix state file are found on disk.
+
+The general format of the fields is ``<version>.<thing>`` where ``version``
+is a hotfix version like ``v20140527`` and ``thing`` is a key from the
+hotfix state file, e.g. ``upgradedFrom``. Here are some of the ``things``
+that can be defined.
+
+upgradedFrom
+ String identifying the Firefox version that the hotfix upgraded from.
+ e.g. ``16.0`` or ``17.0.1``.
+
+uninstallReason
+ String with enumerated values identifying why the hotfix was uninstalled.
+ Value will be ``STILL_INSTALLED`` if the hotfix is still installed.
+
+downloadAttempts
+ Integer number of times the hotfix started downloading an installer.
+ Download resumes are part of this count.
+
+downloadFailures
+ Integer count of times a download supposedly completed but couldn't
+ be validated. This likely represents something wrong with the network
+ connection. The ratio of this to ``downloadAttempts`` should be low.
+
+installAttempts
+ Integer count of times the hotfix attempted to run the installer.
+ This should ideally be 1. It should only be greater than 1 if UAC
+ elevation was cancelled or not allowed.
+
+installFailures
+ Integer count of total installation failures this client experienced.
+ Can be 0. ``installAttempts - installFailures`` implies install successes.
+
+notificationsShown
+ Integer count of times a notification was displayed to the user that
+ they are running an older Firefox.
+
+org.mozilla.places.places
+-------------------------
+
+This measurement contains information about the Places database (where Firefox
+stores its history and bookmarks).
+
+Version 1
+^^^^^^^^^
+
+Daily counts of items in the database are reported in the following properties:
+
+bookmarks
+ Integer count of bookmarks present.
+pages
+ Integer count of pages in the history database.
+
+Example
+^^^^^^^
+
+::
+
+ "org.mozilla.places.places": {
+ "_v": 1,
+ "bookmarks": 388,
+ "pages": 94870
+ }
+
+org.mozilla.profile.age
+-----------------------
+
+This measurement contains information about the current profile's age (and
+in version 2, the profile's most recent reset date)
+
+Version 2
+^^^^^^^^^
+
+*profileCreation* and *profileReset* properties are present. Both define
+the integer days since UNIX epoch that the current profile was created or
+reset accordingly.
+
+Version 1
+^^^^^^^^^
+
+A single *profileCreation* property is present. It defines the integer
+days since UNIX epoch that the current profile was created.
+
+Notes
+^^^^^
+
+It is somewhat difficult to obtain a reliable *profile born date* due to a
+number of factors, but since Version 2, improvements have been made - on a
+"profile reset" we copy the profileCreation date from the old profile and
+record the time of the reset in profileReset.
+
+Example
+^^^^^^^
+
+::
+
+ "org.mozilla.profile.age": {
+ "_v": 2,
+ "profileCreation": 15176
+ "profileReset": 15576
+ }
+
+org.mozilla.searches.counts
+---------------------------
+
+This measurement contains information about searches performed in the
+application.
+
+Version 6 (mobile)
+^^^^^^^^^^^^^^^^^^
+
+This adds two new search locations: *widget* and *activity*, corresponding to the search widget and search activity respectively.
+
+Version 2
+^^^^^^^^^
+
+This behaves like version 1 except we added all search engines that
+Mozilla has a partner agreement with. Like version 1, we concatenate
+a search engine ID with a search origin.
+
+Another difference with version 2 is we should no longer misattribute
+a search to the *other* bucket if the search engine name is localized.
+
+The set of search engine providers is:
+
+* amazon-co-uk
+* amazon-de
+* amazon-en-GB
+* amazon-france
+* amazon-it
+* amazon-jp
+* amazondotcn
+* amazondotcom
+* amazondotcom-de
+* aol-en-GB
+* aol-web-search
+* bing
+* eBay
+* eBay-de
+* eBay-en-GB
+* eBay-es
+* eBay-fi
+* eBay-france
+* eBay-hu
+* eBay-in
+* eBay-it
+* google
+* google-jp
+* google-ku
+* google-maps-zh-TW
+* mailru
+* mercadolibre-ar
+* mercadolibre-cl
+* mercadolibre-mx
+* seznam-cz
+* twitter
+* twitter-de
+* twitter-ja
+* yahoo
+* yahoo-NO
+* yahoo-answer-zh-TW
+* yahoo-ar
+* yahoo-bid-zh-TW
+* yahoo-br
+* yahoo-ch
+* yahoo-cl
+* yahoo-de
+* yahoo-en-GB
+* yahoo-es
+* yahoo-fi
+* yahoo-france
+* yahoo-fy-NL
+* yahoo-id
+* yahoo-in
+* yahoo-it
+* yahoo-jp
+* yahoo-jp-auctions
+* yahoo-mx
+* yahoo-sv-SE
+* yahoo-zh-TW
+* yandex
+* yandex-ru
+* yandex-slovari
+* yandex-tr
+* yandex.by
+* yandex.ru-be
+
+And of course, *other*.
+
+The sources for searches remain:
+
+* abouthome
+* contextmenu
+* searchbar
+* urlbar
+
+The measurement will only be populated with providers and sources that
+occurred that day.
+
+If a user switches locales, searches from default providers on the older
+locale will still be supported. However, if that same search engine is
+added by the user to the new build and is *not* a default search engine
+provider, its searches will be attributed to the *other* bucket.
+
+Version 1
+^^^^^^^^^
+
+We record counts of performed searches grouped by search engine and search
+origin. Only search engines with which Mozilla has a business relationship
+are explicitly counted. All other search engines are grouped into an
+*other* bucket.
+
+The following search engines are explicitly counted:
+
+* Amazon.com
+* Bing
+* Google
+* Yahoo
+* Other
+
+The following search origins are distinguished:
+
+about:home
+ Searches initiated from the search text box on about:home.
+context menu
+ Searches initiated from the context menu (highlight text, right click,
+ and select "search for...")
+search bar
+ Searches initiated from the search bar (the text field next to the
+ Awesomebar)
+url bar
+ Searches initiated from the awesomebar/url bar.
+
+Due to the localization of search engine names, non en-US locales may wrongly
+attribute searches to the *other* bucket. This is fixed in version 2.
+
+Example
+^^^^^^^
+
+::
+
+ "org.mozilla.searches.counts": {
+ "_v": 1,
+ "google.searchbar": 3,
+ "google.urlbar": 7
+ },
+
+org.mozilla.searches.engines
+----------------------------
+
+This measurement contains information about search engines.
+
+Version 1
+^^^^^^^^^
+
+This version debuted with Firefox 31 on desktop. It contains the
+following properties:
+
+default
+ Daily string identifier or name of the default search engine provider.
+
+ This field will only be collected if Telemetry is enabled. If
+ Telemetry is enabled and then later disabled, this field may
+ disappear from future days in the payload.
+
+ The special value ``NONE`` could occur if there is no default search
+ engine.
+
+ The special value ``UNDEFINED`` could occur if a default search
+ engine exists but its identifier could not be determined.
+
+ This field's contents are
+ ``Services.search.defaultEngine.identifier`` (if defined) or
+ ``"other-"`` + ``Services.search.defaultEngine.name`` if not.
+ In other words, search engines without an ``.identifier``
+ are prefixed with ``other-``.
+
+Version 2
+^^^^^^^^^
+
+Starting with Firefox 40, there is an additional optional value:
+
+cohort
+ Daily cohort string identifier, recorded if the user is part of
+ search defaults A/B testing.
+
+org.mozilla.sync.sync
+---------------------
+
+This daily measurement contains information about the Sync service.
+
+Values should be recorded for every day FHR measurements occurred.
+
+Version 1
+^^^^^^^^^
+
+This version debuted with Firefox 30 on desktop. It contains the following
+properties:
+
+enabled
+ Daily numeric indicating whether Sync is configured and enabled. 1 if so,
+ 0 otherwise.
+
+preferredProtocol
+ String version of the maximum Sync protocol version the client supports.
+ This will be ``1.1`` for for legacy Sync and ``1.5`` for clients that
+ speak the Firefox Accounts protocol.
+
+actualProtocol
+ The actual Sync protocol version the client is configured to use.
+
+ This will be ``1.1`` if the client is configured with the legacy Sync
+ service or if the client only supports ``1.1``.
+
+ It will be ``1.5`` if the client supports ``1.5`` and either a) the
+ client is not configured b) the client is using Firefox Accounts Sync.
+
+syncStart
+ Count of sync operations performed.
+
+syncSuccess
+ Count of sync operations that completed successfully.
+
+syncError
+ Count of sync operations that did not complete successfully.
+
+ This is a measure of overall sync success. This does *not* reflect
+ recoverable errors (such as record conflict) that can occur during
+ sync. This is thus a rough proxy of whether the sync service is
+ operating without error.
+
+org.mozilla.sync.devices
+------------------------
+
+This daily measurement contains information about the device type composition
+for the configured Sync account.
+
+Version 1
+^^^^^^^^^
+
+Version 1 was introduced with Firefox 30.
+
+Field names are dynamic according to the client-reported device types from
+Sync records. All fields are daily last seen integer values corresponding to
+the number of devices of that type.
+
+Common values include:
+
+desktop
+ Corresponds to a Firefox desktop client.
+
+mobile
+ Corresponds to a Fennec client.
+
+org.mozilla.sync.migration
+--------------------------
+
+This daily measurement contains information about sync migration (that is, the
+semi-automated process of migrating a legacy sync account to an FxA account.)
+
+Measurements will start being recorded after a migration is offered by the
+sync server and stop after migration is complete or the user elects to "unlink"
+their sync account. In other words, it is expected that users with Sync setup
+for FxA or with sync unconfigured will not collect data, and that for users
+where data is collected, the collection will only be for a relatively short
+period.
+
+Version 1
+^^^^^^^^^
+
+Version 1 was introduced with Firefox 37 and includes the following properties:
+
+state
+ Corresponds to either a STATE_USER_* string or a STATE_INTERNAL_* string in
+ FxaMigration.jsm. This reflects a state where we are waiting for the user,
+ or waiting for some internal process to complete on the way to completing
+ the migration.
+
+declined
+ Corresponds to the number of times the user closed the migration infobar.
+
+unlinked
+ Set if the user declined to migrate and instead "unlinked" Sync from the
+ browser.
+
+accepted
+ Corresponds to the number of times the user explicitly elected to start or
+ continue the migration - it counts how often the user clicked on any UI
+ created specifically for migration. The "ideal" UX for migration would see
+ this at exactly 1, some known edge-cases (eg, browser restart required to
+ finish) could expect this to be 2, and anything more means we are doing
+ something wrong.
+
+org.mozilla.sysinfo.sysinfo
+---------------------------
+
+This measurement contains basic information about the system the application
+is running on.
+
+Version 2
+^^^^^^^^^
+
+This version debuted with Firefox 29 on desktop.
+
+A single property was introduced.
+
+isWow64
+ If present, this property indicates whether the machine supports WoW64.
+ This property can be used to identify whether the host machine is 64-bit.
+
+ This property is only present on Windows machines. It is the preferred way
+ to identify 32- vs 64-bit support in that environment.
+
+Version 1
+^^^^^^^^^
+
+The following properties may be available:
+
+cpuCount
+ Integer number of CPUs/cores in the machine.
+memoryMB
+ Integer megabytes of memory in the machine.
+manufacturer
+ The manufacturer of the device.
+device
+ The name of the device (like model number).
+hardware
+ Unknown.
+name
+ OS name.
+version
+ OS version.
+architecture
+ OS architecture that the application is built for. This is not the
+ actual system architecture.
+
+Example
+^^^^^^^
+
+::
+
+ "org.mozilla.sysinfo.sysinfo": {
+ "_v": 1,
+ "cpuCount": 8,
+ "memoryMB": 8192,
+ "architecture": "x86-64",
+ "name": "Darwin",
+ "version": "12.2.0"
+ }
+
+
+org.mozilla.translation.translation
+-----------------------------------
+
+This daily measurement contains information about the usage of the translation
+feature. It is a special telemetry measurement which will only be recorded in
+FHR if telemetry is enabled.
+
+Version 1
+^^^^^^^^^
+
+Daily counts are reported in the following properties:
+
+translationOpportunityCount
+ Integer count of the number of opportunities there were to translate a page.
+missedTranslationOpportunityCount
+ Integer count of the number of missed opportunities there were to translate a page.
+ A missed opportunity is when the page language is not supported by the translation
+ provider.
+pageTranslatedCount
+ Integer count of the number of pages translated.
+charactersTranslatedCount
+ Integer count of the number of characters translated.
+detectedLanguageChangedBefore
+ Integer count of the number of times the user manually adjusted the detected
+ language before translating.
+detectedLanguageChangedAfter
+ Integer count of the number of times the user manually adjusted the detected
+ language after having first translated the page.
+targetLanguageChanged
+ Integer count of the number of times the user manually adjusted the target
+ language.
+deniedTranslationOffer
+ Integer count of the number of times the user opted-out offered
+ page translation, either by the Not Now button or by the notification's
+ close button in the "offer" state.
+autoRejectedTranlationOffer
+ Integer count of the number of times the user is not offered page
+ translation because they had previously clicked "Never translate this
+ language" or "Never translate this site".
+showOriginalContent
+ Integer count of the number of times the user activated the Show Original
+ command.
+
+Additional daily counts broken down by language are reported in the following
+properties:
+
+translationOpportunityCountsByLanguage
+ A mapping from language to count of opportunities to translate that
+ language.
+missedTranslationOpportunityCountsByLanguage
+ A mapping from language to count of missed opportunities to translate that
+ language.
+pageTranslatedCountsByLanguage
+ A mapping from language to the counts of pages translated from that
+ language. Each language entry will be an object containing a "total" member
+ along with individual counts for each language translated to.
+
+Other properties:
+
+detectLanguageEnabled
+ Whether automatic language detection is enabled. This is an integer, 0 or 1.
+showTranslationUI
+ Whether the translation feature UI will be shown. This is an integer, 0 or 1.
+
+Example
+^^^^^^^
+
+::
+
+ "org.mozilla.translation.translation": {
+ "_v": 1,
+ "detectLanguageEnabled": 1,
+ "showTranslationUI": 1,
+ "translationOpportunityCount": 134,
+ "missedTranslationOpportunityCount": 32,
+ "pageTranslatedCount": 6,
+ "charactersTranslatedCount": "1126",
+ "detectedLanguageChangedBefore": 1,
+ "detectedLanguageChangedAfter": 2,
+ "targetLanguageChanged": 0,
+ "deniedTranslationOffer": 3,
+ "autoRejectedTranlationOffer": 1,
+ "showOriginalContent": 2,
+ "translationOpportunityCountsByLanguage": {
+ "fr": 100,
+ "es": 34
+ },
+ "missedTranslationOpportunityCountsByLanguage": {
+ "it": 20,
+ "nl": 10,
+ "fi": 2
+ },
+ "pageTranslatedCountsByLanguage": {
+ "fr": {
+ "total": 6,
+ "es": 5,
+ "en": 1
+ }
+ }
+ }
+
+
+org.mozilla.experiments.info
+----------------------------------
+
+Daily measurement reporting information about the Telemetry Experiments service.
+
+Version 1
+^^^^^^^^^
+
+Property:
+
+lastActive
+ ID of the final Telemetry Experiment that is active on a given day, if any.
+
+
+Version 2
+^^^^^^^^^
+
+Adds an additional optional property:
+
+lastActiveBranch
+ If the experiment uses branches, the branch identifier string.
+
+Example
+^^^^^^^
+
+::
+
+ "org.mozilla.experiments.info": {
+ "_v": 2,
+ "lastActive": "some.experiment.id",
+ "lastActiveBranch": "control"
+ }
+
+org.mozilla.uitour.treatment
+----------------------------
+
+Daily measurement reporting information about treatment tagging done
+by the UITour module.
+
+Version 1
+^^^^^^^^^
+
+Daily text values in the following properties:
+
+<tag>:
+ Array of discrete strings corresponding to calls for setTreatmentTag(tag, value).
+
+Example
+^^^^^^^
+
+::
+
+ "org.mozilla.uitour.treatment": {
+ "_v": 1,
+ "treatment": [
+ "optin",
+ "optin-DNT"
+ ],
+ "another-tag": [
+ "foobar-value"
+ ]
+ }
+
+org.mozilla.passwordmgr.passwordmgr
+-----------------------------------
+
+Daily measurement reporting information about the Password Manager
+
+Version 1
+^^^^^^^^^
+
+Property:
+
+numSavedPasswords
+ number of passwords saved in the Password Manager
+
+enabled
+ Whether or not the user has disabled the Password Manager in prefernces
+
+Example
+^^^^^^^
+
+::
+
+ "org.mozilla.passwordmgr.passwordmgr": {
+ "_v": 1,
+ "numSavedPasswords": 5,
+ "enabled": 0,
+ }
+
+Version 2
+^^^^^^^^^
+
+More detailed measurements of login forms & their behavior
+
+numNewSavedPasswordsInSession
+ Number of passwords saved to the password manager this session.
+
+numSuccessfulFills
+ Number of times the password manager filled in password fields for user this session.
+
+numTotalLoginsEncountered
+ Number of times a login form was encountered by the user in the session.
+
+Example
+^^^^^^^
+
+::
+ "org.mozilla.passwordmgr.passwordmgr": {
+ "_v": 2,
+ "numSavedPasswords": 32,
+ "enabled": 1,
+ "numNewSavedPasswords": 5,
+ "numSuccessfulFills": 11,
+ "numTotalLoginsEncountered": 23,
+ }
diff --git a/toolkit/components/telemetry/docs/fhr/identifiers.rst b/toolkit/components/telemetry/docs/fhr/identifiers.rst
new file mode 100644
index 000000000..82ad0e49e
--- /dev/null
+++ b/toolkit/components/telemetry/docs/fhr/identifiers.rst
@@ -0,0 +1,83 @@
+.. _healthreport_identifiers:
+
+===========
+Identifiers
+===========
+
+Firefox Health Report records some identifiers to keep track of clients
+and uploaded documents.
+
+Identifier Types
+================
+
+Document/Upload IDs
+-------------------
+
+A random UUID called the *Document ID* or *Upload ID* is generated when the FHR
+client creates or uploads a new document.
+
+When clients generate a new *Document ID*, they persist this ID to disk
+**before** the upload attempt.
+
+As part of the upload, the client sends all old *Document IDs* to the server
+and asks the server to delete them. In well-behaving clients, the server
+has a single record for each client with a randomly-changing *Document ID*.
+
+Client IDs
+----------
+
+A *Client ID* is an identifier that **attempts** to uniquely identify an
+individual FHR client. Please note the emphasis on *attempts* in that last
+sentence: *Client IDs* do not guarantee uniqueness.
+
+The *Client ID* is generated when the client first runs or as needed.
+
+The *Client ID* is transferred to the server as part of every upload. The
+server is thus able to affiliate multiple document uploads with a single
+*Client ID*.
+
+Client ID Versions
+^^^^^^^^^^^^^^^^^^
+
+The semantics for how a *Client ID* is generated are versioned.
+
+Version 1
+ The *Client ID* is a randomly-generated UUID.
+
+History of Identifiers
+======================
+
+In the beginning, there were just *Document IDs*. The thinking was clients
+would clean up after themselves and leave at most 1 active document on the
+server.
+
+Unfortunately, this did not work out. Using brute force analysis to
+deduplicate records on the server, a number of interesting patterns emerged.
+
+Orphaning
+ Clients would upload a new payload while not deleting the old payload.
+
+Divergent records
+ Records would share data up to a certain date and then the data would
+ almost completely diverge. This appears to be indicative of profile
+ copying.
+
+Rollback
+ Records would share data up to a certain date. Each record in this set
+ would contain data for a day or two but no extra data. This could be
+ explained by filesystem rollback on the client.
+
+A significant percentage of the records on the server belonged to
+misbehaving clients. Identifying these records was extremely resource
+intensive and error-prone. These records were undermining the ability
+to use Firefox Health Report data.
+
+Thus, the *Client ID* was born. The intent of the *Client ID* was to
+uniquely identify clients so the extreme effort required and the
+questionable reliability of deduplicating server data would become
+problems of the past.
+
+The *Client ID* was originally a randomly-generated UUID (version 1). This
+allowed detection of orphaning and rollback. However, these version 1
+*Client IDs* were still susceptible to use on multiple profiles and
+machines if the profile was copied.
diff --git a/toolkit/components/telemetry/docs/fhr/index.rst b/toolkit/components/telemetry/docs/fhr/index.rst
new file mode 100644
index 000000000..497385dd8
--- /dev/null
+++ b/toolkit/components/telemetry/docs/fhr/index.rst
@@ -0,0 +1,34 @@
+================================
+Firefox Health Report (Obsolete)
+================================
+
+**Firefox Health Report (FHR) is obsolete and no longer ships with Firefox.
+This documentation will live here for a few more cycles.**
+
+Firefox Health Report is a background service that collects application
+metrics and periodically submits them to a central server. The core
+parts of the service are implemented in this directory. However, the
+actual XPCOM service is implemented in the
+``data_reporting_service`.
+
+The core types can actually be instantiated multiple times and used to
+power multiple data submission services within a single Gecko
+application. In other words, everything in this directory is effectively
+a reusable library. However, the terminology and some of the features
+are very specific to what the Firefox Health Report feature requires.
+
+.. toctree::
+ :maxdepth: 1
+
+ architecture
+ dataformat
+ identifiers
+
+Legal and Privacy Concerns
+==========================
+
+Because Firefox Health Report collects and submits data to remote
+servers and is an opt-out feature, there are legal and privacy
+concerns over what data may be collected and submitted. **Additions or
+changes to submitted data should be signed off by responsible
+parties.**
diff --git a/toolkit/components/telemetry/docs/index.rst b/toolkit/components/telemetry/docs/index.rst
new file mode 100644
index 000000000..5d30c5e92
--- /dev/null
+++ b/toolkit/components/telemetry/docs/index.rst
@@ -0,0 +1,25 @@
+.. _telemetry:
+
+=========
+Telemetry
+=========
+
+Telemetry is a feature that allows data collection. This is being used to collect performance metrics and other information about how Firefox performs in the wild.
+
+Client-side, this consists of:
+
+* data collection in `Histograms <https://developer.mozilla.org/en-US/docs/Mozilla/Performance/Adding_a_new_Telemetry_probe>`_, :doc:`collection/scalars` and other data structures
+* assembling :doc:`concepts/pings` with the general information and the data payload
+* sending them to the server and local ping retention
+
+*Note:* the `data collection policy <https://wiki.mozilla.org/Firefox/Data_Collection>`_ documents the process and requirements that are applied here.
+
+.. toctree::
+ :maxdepth: 5
+ :titlesonly:
+
+ concepts/index
+ collection/index
+ data/index
+ internals/index
+ fhr/index
diff --git a/toolkit/components/telemetry/docs/internals/index.rst b/toolkit/components/telemetry/docs/internals/index.rst
new file mode 100644
index 000000000..e912ea49a
--- /dev/null
+++ b/toolkit/components/telemetry/docs/internals/index.rst
@@ -0,0 +1,9 @@
+=========
+Internals
+=========
+
+.. toctree::
+ :maxdepth: 2
+ :titlesonly:
+
+ preferences
diff --git a/toolkit/components/telemetry/docs/internals/preferences.rst b/toolkit/components/telemetry/docs/internals/preferences.rst
new file mode 100644
index 000000000..c8af2f2d5
--- /dev/null
+++ b/toolkit/components/telemetry/docs/internals/preferences.rst
@@ -0,0 +1,119 @@
+Preferences
+===========
+
+Telemetry behaviour is controlled through the preferences listed here.
+
+Default behaviors
+-----------------
+
+Sending only happens on official builds (i.e. with ``MOZILLA_OFFICIAL`` set) with ``MOZ_TELEMETRY_REPORTING`` defined.
+All other builds drop all outgoing pings, so they will also not retry sending them later.
+
+Preferences
+-----------
+
+``toolkit.telemetry.unified``
+
+ This controls whether unified behavior is enabled. If true:
+
+ * Telemetry is always enabled and recording *base* data.
+ * Telemetry will send additional ``main`` pings.
+
+``toolkit.telemetry.enabled``
+
+ If ``unified`` is off, this controls whether the Telemetry module is enabled.
+ If ``unified`` is on, this controls whether to record *extended* data.
+ This preference is controlled through the `Preferences` dialog.
+
+ Note that the default value here of this pref depends on the define ``RELEASE_OR_BETA`` and the channel.
+ If ``RELEASE_OR_BETA`` is set, ``MOZ_TELEMETRY_ON_BY_DEFAULT`` gets set, which means this pref will default to ``true``.
+ This is overridden by the preferences code on the "beta" channel, the pref also defaults to ``true`` there.
+
+``datareporting.healthreport.uploadEnabled``
+
+ Send the data we record if user has consented to FHR. This preference is controlled through the `Preferences` dialog.
+
+``toolkit.telemetry.archive.enabled``
+
+ Allow pings to be archived locally. This can only be enabled if ``unified`` is on.
+
+``toolkit.telemetry.server``
+
+ The server Telemetry pings are sent to.
+
+``toolkit.telemetry.log.level``
+
+ This sets the Telemetry logging verbosity per ``Log.jsm``, with ``Trace`` or ``0`` being the most verbose and the default being ``Warn``.
+ By default logging goes only the console service.
+
+``toolkit.telemetry.log.dump``
+
+ Sets whether to dump Telemetry log messages to ``stdout`` too.
+
+Data-choices notification
+-------------------------
+
+``toolkit.telemetry.reportingpolicy.firstRun``
+
+ This preference is not present until the first run. After, its value is set to false. This is used to show the infobar with a more aggressive timeout if it wasn't shown yet.
+
+``datareporting.policy.firstRunURL``
+
+ If set, a browser tab will be opened on first run instead of the infobar.
+
+``datareporting.policy.dataSubmissionEnabled``
+
+ This is the data submission master kill switch. If disabled, no policy is shown or upload takes place, ever.
+
+``datareporting.policy.dataSubmissionPolicyNotifiedTime``
+
+ Records the date user was shown the policy. This preference is also used on Android.
+
+``datareporting.policy.dataSubmissionPolicyAcceptedVersion``
+
+ Records the version of the policy notified to the user. This preference is also used on Android.
+
+``datareporting.policy.dataSubmissionPolicyBypassNotification``
+
+ Used in tests, it allows to skip the notification check.
+
+``datareporting.policy.currentPolicyVersion``
+
+ Stores the current policy version, overrides the default value defined in TelemetryReportingPolicy.jsm.
+
+``datareporting.policy.minimumPolicyVersion``
+
+ The minimum policy version that is accepted for the current policy. This can be set per channel.
+
+``datareporting.policy.minimumPolicyVersion.channel-NAME``
+
+ This is the only channel-specific version that we currently use for the minimum policy version.
+
+Testing
+-------
+
+The following prefs are for testing purpose only.
+
+``toolkit.telemetry.initDelay``
+
+ Delay before initializing telemetry (seconds).
+
+``toolkit.telemetry.minSubsessionLength``
+
+ Minimum length of a telemetry subsession (seconds).
+
+``toolkit.telemetry.collectInterval``
+
+ Minimum interval between data collection (seconds).
+
+``toolkit.telemetry.scheduler.tickInterval``
+
+ Interval between scheduler ticks (seconds).
+
+``toolkit.telemetry.scheduler.idleTickInterval``
+
+ Interval between scheduler ticks when the user is idle (seconds).
+
+``toolkit.telemetry.idleTimeout``
+
+ Timeout until we decide whether a user is idle or not (seconds).
diff --git a/toolkit/components/telemetry/gen-event-data.py b/toolkit/components/telemetry/gen-event-data.py
new file mode 100644
index 000000000..0884dbba0
--- /dev/null
+++ b/toolkit/components/telemetry/gen-event-data.py
@@ -0,0 +1,142 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# Write out event information for C++. The events are defined
+# in a file provided as a command-line argument.
+
+from __future__ import print_function
+from shared_telemetry_utils import StringTable, static_assert
+
+import parse_events
+import sys
+import itertools
+
+# The banner/text at the top of the generated file.
+banner = """/* This file is auto-generated, only for internal use in TelemetryEvent.h,
+ see gen-event-data.py. */
+"""
+
+file_header = """\
+#ifndef mozilla_TelemetryEventData_h
+#define mozilla_TelemetryEventData_h
+#include "EventInfo.h"
+namespace {
+"""
+
+file_footer = """\
+} // namespace
+#endif // mozilla_TelemetryEventData_h
+"""
+
+def write_extra_table(events, output, string_table):
+ table_name = "gExtraKeysTable"
+ extra_table = []
+ extra_count = 0
+
+ print("const uint32_t %s[] = {" % table_name, file=output)
+
+ for e in events:
+ extra_index = 0
+ extra_keys = e.extra_keys
+ if len(extra_keys) > 0:
+ extra_index = extra_count
+ extra_count += len(extra_keys)
+ indexes = string_table.stringIndexes(extra_keys)
+
+ print(" // %s, [%s], [%s]" % (
+ e.category,
+ ", ".join(e.methods),
+ ", ".join(e.objects)),
+ file=output)
+ print(" // extra_keys: %s" % ", ".join(extra_keys), file=output)
+ print(" %s," % ", ".join(map(str, indexes)),
+ file=output)
+
+ extra_table.append((extra_index, len(extra_keys)))
+
+ print("};", file=output)
+ static_assert(output, "sizeof(%s) <= UINT32_MAX" % table_name,
+ "index overflow")
+
+ return extra_table
+
+def write_common_event_table(events, output, string_table, extra_table):
+ table_name = "gCommonEventInfo"
+ extra_count = 0
+
+ print("const CommonEventInfo %s[] = {" % table_name, file=output)
+ for e,extras in zip(events, extra_table):
+ # Write a comment to make the file human-readable.
+ print(" // category: %s" % e.category, file=output)
+ print(" // methods: [%s]" % ", ".join(e.methods), file=output)
+ print(" // objects: [%s]" % ", ".join(e.objects), file=output)
+
+ # Write the common info structure
+ print(" {%d, %d, %d, %d, %d, %s}," %
+ (string_table.stringIndex(e.category),
+ string_table.stringIndex(e.expiry_version),
+ extras[0], # extra keys index
+ extras[1], # extra keys count
+ e.expiry_day,
+ e.dataset),
+ file=output)
+
+ print("};", file=output)
+ static_assert(output, "sizeof(%s) <= UINT32_MAX" % table_name,
+ "index overflow")
+
+def write_event_table(events, output, string_table):
+ table_name = "gEventInfo"
+ print("const EventInfo %s[] = {" % table_name, file=output)
+
+ for common_info_index,e in enumerate(events):
+ for method_name, object_name in itertools.product(e.methods, e.objects):
+ print(" // category: %s, method: %s, object: %s" %
+ (e.category, method_name, object_name),
+ file=output)
+
+ print(" {gCommonEventInfo[%d], %d, %d}," %
+ (common_info_index,
+ string_table.stringIndex(method_name),
+ string_table.stringIndex(object_name)),
+ file=output)
+
+ print("};", file=output)
+ static_assert(output, "sizeof(%s) <= UINT32_MAX" % table_name,
+ "index overflow")
+
+def main(output, *filenames):
+ # Load the event data.
+ if len(filenames) > 1:
+ raise Exception('We don\'t support loading from more than one file.')
+ events = parse_events.load_events(filenames[0])
+
+ # Write the scalar data file.
+ print(banner, file=output)
+ print(file_header, file=output)
+
+ # Write the extra keys table.
+ string_table = StringTable()
+ extra_table = write_extra_table(events, output, string_table)
+ print("", file=output)
+
+ # Write a table with the common event data.
+ write_common_event_table(events, output, string_table, extra_table)
+ print("", file=output)
+
+ # Write the data for individual events.
+ write_event_table(events, output, string_table)
+ print("", file=output)
+
+ # Write the string table.
+ string_table_name = "gEventsStringTable"
+ string_table.writeDefinition(output, string_table_name)
+ static_assert(output, "sizeof(%s) <= UINT32_MAX" % string_table_name,
+ "index overflow")
+ print("", file=output)
+
+ print(file_footer, file=output)
+
+if __name__ == '__main__':
+ main(sys.stdout, *sys.argv[1:])
diff --git a/toolkit/components/telemetry/gen-event-enum.py b/toolkit/components/telemetry/gen-event-enum.py
new file mode 100644
index 000000000..775ff8475
--- /dev/null
+++ b/toolkit/components/telemetry/gen-event-enum.py
@@ -0,0 +1,73 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# Write out C++ enum definitions that represent the different event types.
+#
+# The events are defined in files provided as command-line arguments.
+
+from __future__ import print_function
+
+import sys
+import parse_events
+
+banner = """/* This file is auto-generated, see gen-event-enum.py. */
+"""
+
+file_header = """\
+#ifndef mozilla_TelemetryEventEnums_h
+#define mozilla_TelemetryEventEnums_h
+namespace mozilla {
+namespace Telemetry {
+namespace EventID {
+"""
+
+file_footer = """\
+} // namespace EventID
+} // namespace mozilla
+} // namespace Telemetry
+#endif // mozilla_TelemetryEventEnums_h
+"""
+
+def main(output, *filenames):
+ # Load the events first.
+ if len(filenames) > 1:
+ raise Exception('We don\'t support loading from more than one file.')
+ events = parse_events.load_events(filenames[0])
+
+ grouped = dict()
+ index = 0
+ for e in events:
+ category = e.category
+ if not category in grouped:
+ grouped[category] = []
+ grouped[category].append((index, e))
+ index += len(e.enum_labels)
+
+ # Write the enum file.
+ print(banner, file=output)
+ print(file_header, file=output);
+
+ for category,indexed in grouped.iteritems():
+ category_cpp = indexed[0][1].category_cpp
+
+ print("// category: %s" % category, file=output)
+ print("enum class %s : uint32_t {" % category_cpp, file=output)
+
+ for event_index,e in indexed:
+ cpp_guard = e.cpp_guard
+ if cpp_guard:
+ print("#if defined(%s)" % cpp_guard, file=output)
+ for offset,label in enumerate(e.enum_labels):
+ print(" %s = %d," % (label, event_index + offset), file=output)
+ if cpp_guard:
+ print("#endif", file=output)
+
+ print("};\n", file=output)
+
+ print("const uint32_t EventCount = %d;\n" % index, file=output)
+
+ print(file_footer, file=output)
+
+if __name__ == '__main__':
+ main(sys.stdout, *sys.argv[1:])
diff --git a/toolkit/components/telemetry/gen-histogram-bucket-ranges.py b/toolkit/components/telemetry/gen-histogram-bucket-ranges.py
new file mode 100644
index 000000000..286bc0e7b
--- /dev/null
+++ b/toolkit/components/telemetry/gen-histogram-bucket-ranges.py
@@ -0,0 +1,52 @@
+#!/usr/bin/env python
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# Write out detailed histogram information, including the ranges of the
+# buckets specified by each histogram.
+
+import sys
+import re
+import histogram_tools
+import json
+
+from collections import OrderedDict
+
+def main(argv):
+ filenames = argv
+
+ all_histograms = OrderedDict()
+
+ for histogram in histogram_tools.from_files(filenames):
+ name = histogram.name()
+ parameters = OrderedDict()
+ table = {
+ 'boolean': '2',
+ 'flag': '3',
+ 'enumerated': '1',
+ 'linear': '1',
+ 'exponential': '0',
+ 'count': '4',
+ }
+ # Use __setitem__ because Python lambdas are so limited.
+ histogram_tools.table_dispatch(histogram.kind(), table,
+ lambda k: parameters.__setitem__('kind', k))
+ if histogram.low() == 0:
+ parameters['min'] = 1
+ else:
+ parameters['min'] = histogram.low()
+
+ try:
+ buckets = histogram.ranges()
+ parameters['buckets'] = buckets
+ parameters['max'] = buckets[-1]
+ parameters['bucket_count'] = len(buckets)
+ except histogram_tools.DefinitionException:
+ continue
+
+ all_histograms.update({ name: parameters });
+
+ print json.dumps({ 'histograms': all_histograms})
+
+main(sys.argv[1:])
diff --git a/toolkit/components/telemetry/gen-histogram-data.py b/toolkit/components/telemetry/gen-histogram-data.py
new file mode 100644
index 000000000..8e227201d
--- /dev/null
+++ b/toolkit/components/telemetry/gen-histogram-data.py
@@ -0,0 +1,178 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# Write out histogram information for C++. The histograms are defined
+# in a file provided as a command-line argument.
+
+from __future__ import print_function
+from shared_telemetry_utils import StringTable, static_assert
+
+import sys
+import histogram_tools
+import itertools
+
+banner = """/* This file is auto-generated, see gen-histogram-data.py. */
+"""
+
+def print_array_entry(output, histogram, name_index, exp_index, label_index, label_count):
+ cpp_guard = histogram.cpp_guard()
+ if cpp_guard:
+ print("#if defined(%s)" % cpp_guard, file=output)
+ print(" { %s, %s, %s, %s, %d, %d, %s, %d, %d, %s }," \
+ % (histogram.low(),
+ histogram.high(),
+ histogram.n_buckets(),
+ histogram.nsITelemetry_kind(),
+ name_index,
+ exp_index,
+ histogram.dataset(),
+ label_index,
+ label_count,
+ "true" if histogram.keyed() else "false"), file=output)
+ if cpp_guard:
+ print("#endif", file=output)
+
+def write_histogram_table(output, histograms):
+ string_table = StringTable()
+ label_table = []
+ label_count = 0
+
+ print("const HistogramInfo gHistograms[] = {", file=output)
+ for histogram in histograms:
+ name_index = string_table.stringIndex(histogram.name())
+ exp_index = string_table.stringIndex(histogram.expiration())
+
+ labels = histogram.labels()
+ label_index = 0
+ if len(labels) > 0:
+ label_index = label_count
+ label_table.append((histogram.name(), string_table.stringIndexes(labels)))
+ label_count += len(labels)
+
+ print_array_entry(output, histogram,
+ name_index, exp_index,
+ label_index, len(labels))
+ print("};\n", file=output)
+
+ strtab_name = "gHistogramStringTable"
+ string_table.writeDefinition(output, strtab_name)
+ static_assert(output, "sizeof(%s) <= UINT32_MAX" % strtab_name,
+ "index overflow")
+
+ print("\nconst uint32_t gHistogramLabelTable[] = {", file=output)
+ for name,indexes in label_table:
+ print("/* %s */ %s," % (name, ", ".join(map(str, indexes))), file=output)
+ print("};", file=output)
+
+
+# Write out static asserts for histogram data. We'd prefer to perform
+# these checks in this script itself, but since several histograms
+# (generally enumerated histograms) use compile-time constants for
+# their upper bounds, we have to let the compiler do the checking.
+
+def static_asserts_for_boolean(output, histogram):
+ pass
+
+def static_asserts_for_flag(output, histogram):
+ pass
+
+def static_asserts_for_count(output, histogram):
+ pass
+
+def static_asserts_for_enumerated(output, histogram):
+ n_values = histogram.high()
+ static_assert(output, "%s > 2" % n_values,
+ "Not enough values for %s" % histogram.name())
+
+def shared_static_asserts(output, histogram):
+ name = histogram.name()
+ low = histogram.low()
+ high = histogram.high()
+ n_buckets = histogram.n_buckets()
+ static_assert(output, "%s < %s" % (low, high), "low >= high for %s" % name)
+ static_assert(output, "%s > 2" % n_buckets, "Not enough values for %s" % name)
+ static_assert(output, "%s >= 1" % low, "Incorrect low value for %s" % name)
+ static_assert(output, "%s > %s" % (high, n_buckets),
+ "high must be > number of buckets for %s; you may want an enumerated histogram" % name)
+
+def static_asserts_for_linear(output, histogram):
+ shared_static_asserts(output, histogram)
+
+def static_asserts_for_exponential(output, histogram):
+ shared_static_asserts(output, histogram)
+
+def write_histogram_static_asserts(output, histograms):
+ print("""
+// Perform the checks at the beginning of HistogramGet at
+// compile time, so that incorrect histogram definitions
+// give compile-time errors, not runtime errors.""", file=output)
+
+ table = {
+ 'boolean' : static_asserts_for_boolean,
+ 'flag' : static_asserts_for_flag,
+ 'count': static_asserts_for_count,
+ 'enumerated' : static_asserts_for_enumerated,
+ 'categorical' : static_asserts_for_enumerated,
+ 'linear' : static_asserts_for_linear,
+ 'exponential' : static_asserts_for_exponential,
+ }
+
+ for histogram in histograms:
+ histogram_tools.table_dispatch(histogram.kind(), table,
+ lambda f: f(output, histogram))
+
+def write_debug_histogram_ranges(output, histograms):
+ ranges_lengths = []
+
+ # Collect all the range information from individual histograms.
+ # Write that information out as well.
+ print("#ifdef DEBUG", file=output)
+ print("const int gBucketLowerBounds[] = {", file=output)
+ for histogram in histograms:
+ ranges = []
+ try:
+ ranges = histogram.ranges()
+ except histogram_tools.DefinitionException:
+ pass
+ ranges_lengths.append(len(ranges))
+ # Note that we do not test cpp_guard here. We do this so we
+ # will have complete information about all the histograms in
+ # this array. Just having information about the ranges of
+ # histograms is not platform-specific; if there are histograms
+ # that have platform-specific constants in their definitions,
+ # those histograms will fail in the .ranges() call above and
+ # we'll have a zero-length array to deal with here.
+ if len(ranges) > 0:
+ print(','.join(map(str, ranges)), ',', file=output)
+ else:
+ print('/* Skipping %s */' % histogram.name(), file=output)
+ print("};", file=output)
+
+ # Write the offsets into gBucketLowerBounds.
+ print("struct bounds { int offset; int length; };", file=output)
+ print("const struct bounds gBucketLowerBoundIndex[] = {", file=output)
+ offset = 0
+ for (histogram, range_length) in itertools.izip(histograms, ranges_lengths):
+ cpp_guard = histogram.cpp_guard()
+ # We do test cpp_guard here, so that histogram IDs are valid
+ # indexes into this array.
+ if cpp_guard:
+ print("#if defined(%s)" % cpp_guard, file=output)
+ print("{ %d, %d }," % (offset, range_length), file=output)
+ if cpp_guard:
+ print("#endif", file=output)
+ offset += range_length
+ print("};", file=output)
+ print("#endif", file=output)
+
+def main(output, *filenames):
+ histograms = list(histogram_tools.from_files(filenames))
+
+ print(banner, file=output)
+ write_histogram_table(output, histograms)
+ write_histogram_static_asserts(output, histograms)
+ write_debug_histogram_ranges(output, histograms)
+
+if __name__ == '__main__':
+ main(sys.stdout, *sys.argv[1:])
diff --git a/toolkit/components/telemetry/gen-histogram-enum.py b/toolkit/components/telemetry/gen-histogram-enum.py
new file mode 100644
index 000000000..8e08bc484
--- /dev/null
+++ b/toolkit/components/telemetry/gen-histogram-enum.py
@@ -0,0 +1,107 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# Write out a C++ enum definition whose members are the names of
+# histograms as well as the following other members:
+#
+# - HistogramCount
+# - HistogramFirstUseCounter
+# - HistogramLastUseCounter
+# - HistogramUseCounterCount
+#
+# The histograms are defined in files provided as command-line arguments.
+
+from __future__ import print_function
+
+import histogram_tools
+import itertools
+import sys
+
+banner = """/* This file is auto-generated, see gen-histogram-enum.py. */
+"""
+
+header = """
+#ifndef mozilla_TelemetryHistogramEnums_h
+#define mozilla_TelemetryHistogramEnums_h
+
+#include "mozilla/TemplateLib.h"
+
+namespace mozilla {
+namespace Telemetry {
+"""
+
+footer = """
+} // namespace mozilla
+} // namespace Telemetry
+#endif // mozilla_TelemetryHistogramEnums_h"""
+
+def main(output, *filenames):
+ # Print header.
+ print(banner, file=output)
+ print(header, file=output)
+
+ # Load the histograms.
+ all_histograms = list(histogram_tools.from_files(filenames))
+ groups = itertools.groupby(all_histograms,
+ lambda h: h.name().startswith("USE_COUNTER2_"))
+
+ # Print the histogram enums.
+ # Note that histogram_tools.py guarantees that all of the USE_COUNTER2_*
+ # histograms are defined in a contiguous block. We therefore assume
+ # that there's at most one group for which use_counter_group is true.
+ print("enum ID : uint32_t {", file=output)
+ seen_use_counters = False
+ for (use_counter_group, histograms) in groups:
+ if use_counter_group:
+ seen_use_counters = True
+
+ # The HistogramDUMMY* enum variables are used to make the computation
+ # of Histogram{First,Last}UseCounter easier. Otherwise, we'd have to
+ # special case the first and last histogram in the group.
+ if use_counter_group:
+ print(" HistogramFirstUseCounter,", file=output)
+ print(" HistogramDUMMY1 = HistogramFirstUseCounter - 1,", file=output)
+
+ for histogram in histograms:
+ cpp_guard = histogram.cpp_guard()
+ if cpp_guard:
+ print("#if defined(%s)" % cpp_guard, file=output)
+ print(" %s," % histogram.name(), file=output)
+ if cpp_guard:
+ print("#endif", file=output)
+
+ if use_counter_group:
+ print(" HistogramDUMMY2,", file=output)
+ print(" HistogramLastUseCounter = HistogramDUMMY2 - 1,", file=output)
+
+ print(" HistogramCount,", file=output)
+ if seen_use_counters:
+ print(" HistogramUseCounterCount = HistogramLastUseCounter - HistogramFirstUseCounter + 1", file=output)
+ else:
+ print(" HistogramFirstUseCounter = 0,", file=output)
+ print(" HistogramLastUseCounter = 0,", file=output)
+ print(" HistogramUseCounterCount = 0", file=output)
+ print("};", file=output)
+
+ # Write categorical label enums.
+ categorical = filter(lambda h: h.kind() == "categorical", all_histograms)
+ enums = [("LABELS_" + h.name(), h.labels(), h.name()) for h in categorical]
+ for name,labels,_ in enums:
+ print("\nenum class %s : uint32_t {" % name, file=output)
+ print(" %s" % ",\n ".join(labels), file=output)
+ print("};", file=output)
+
+ print("\ntemplate<class T> struct IsCategoricalLabelEnum : FalseType {};", file=output)
+ for name,_,_ in enums:
+ print("template<> struct IsCategoricalLabelEnum<%s> : TrueType {};" % name, file=output)
+
+ print("\ntemplate<class T> struct CategoricalLabelId {};", file=output)
+ for name,_,id in enums:
+ print("template<> struct CategoricalLabelId<%s> : IntegralConstant<uint32_t, %s> {};" % (name, id), file=output)
+
+ # Footer.
+ print(footer, file=output)
+
+if __name__ == '__main__':
+ main(sys.stdout, *sys.argv[1:])
diff --git a/toolkit/components/telemetry/gen-scalar-data.py b/toolkit/components/telemetry/gen-scalar-data.py
new file mode 100644
index 000000000..6c17c602f
--- /dev/null
+++ b/toolkit/components/telemetry/gen-scalar-data.py
@@ -0,0 +1,90 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# Write out scalar information for C++. The scalars are defined
+# in a file provided as a command-line argument.
+
+from __future__ import print_function
+from shared_telemetry_utils import StringTable, static_assert
+
+import parse_scalars
+import sys
+
+# The banner/text at the top of the generated file.
+banner = """/* This file is auto-generated, only for internal use in TelemetryScalar.h,
+ see gen-scalar-data.py. */
+"""
+
+file_header = """\
+#ifndef mozilla_TelemetryScalarData_h
+#define mozilla_TelemetryScalarData_h
+#include "ScalarInfo.h"
+namespace {
+"""
+
+file_footer = """\
+} // namespace
+#endif // mozilla_TelemetryScalarData_h
+"""
+
+def write_scalar_info(scalar, output, name_index, expiration_index):
+ """Writes a scalar entry to the output file.
+
+ :param scalar: a ScalarType instance describing the scalar.
+ :param output: the output stream.
+ :param name_index: the index of the scalar name in the strings table.
+ :param expiration_index: the index of the expiration version in the strings table.
+ """
+ cpp_guard = scalar.cpp_guard
+ if cpp_guard:
+ print("#if defined(%s)" % cpp_guard, file=output)
+
+ print(" {{ {}, {}, {}, {}, {} }},"\
+ .format(scalar.nsITelemetry_kind,
+ name_index,
+ expiration_index,
+ scalar.dataset,
+ "true" if scalar.keyed else "false"),
+ file=output)
+
+ if cpp_guard:
+ print("#endif", file=output)
+
+def write_scalar_tables(scalars, output):
+ """Writes the scalar and strings tables to an header file.
+
+ :param scalars: a list of ScalarType instances describing the scalars.
+ :param output: the output stream.
+ """
+ string_table = StringTable()
+
+ print("const ScalarInfo gScalars[] = {", file=output)
+ for s in scalars:
+ # We add both the scalar label and the expiration string to the strings
+ # table.
+ name_index = string_table.stringIndex(s.label)
+ exp_index = string_table.stringIndex(s.expires)
+ # Write the scalar info entry.
+ write_scalar_info(s, output, name_index, exp_index)
+ print("};", file=output)
+
+ string_table_name = "gScalarsStringTable"
+ string_table.writeDefinition(output, string_table_name)
+ static_assert(output, "sizeof(%s) <= UINT32_MAX" % string_table_name,
+ "index overflow")
+
+def main(output, *filenames):
+ # Load the scalars first.
+ if len(filenames) > 1:
+ raise Exception('We don\'t support loading from more than one file.')
+ scalars = parse_scalars.load_scalars(filenames[0])
+
+ # Write the scalar data file.
+ print(banner, file=output)
+ print(file_header, file=output)
+ write_scalar_tables(scalars, output)
+ print(file_footer, file=output)
+
+if __name__ == '__main__':
+ main(sys.stdout, *sys.argv[1:])
diff --git a/toolkit/components/telemetry/gen-scalar-enum.py b/toolkit/components/telemetry/gen-scalar-enum.py
new file mode 100644
index 000000000..f0ca01d4b
--- /dev/null
+++ b/toolkit/components/telemetry/gen-scalar-enum.py
@@ -0,0 +1,56 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# Write out a C++ enum definition whose members are the names of
+# scalar types.
+#
+# The scalars are defined in files provided as command-line arguments.
+
+from __future__ import print_function
+
+import sys
+import parse_scalars
+
+banner = """/* This file is auto-generated, see gen-scalar-enum.py. */
+"""
+
+file_header = """\
+#ifndef mozilla_TelemetryScalarEnums_h
+#define mozilla_TelemetryScalarEnums_h
+namespace mozilla {
+namespace Telemetry {
+enum class ScalarID : uint32_t {\
+"""
+
+file_footer = """\
+};
+} // namespace mozilla
+} // namespace Telemetry
+#endif // mozilla_TelemetryScalarEnums_h
+"""
+
+def main(output, *filenames):
+ # Load the scalars first.
+ if len(filenames) > 1:
+ raise Exception('We don\'t support loading from more than one file.')
+ scalars = parse_scalars.load_scalars(filenames[0])
+
+ # Write the enum file.
+ print(banner, file=output)
+ print(file_header, file=output);
+
+ for s in scalars:
+ cpp_guard = s.cpp_guard
+ if cpp_guard:
+ print("#if defined(%s)" % cpp_guard, file=output)
+ print(" %s," % s.enum_label, file=output)
+ if cpp_guard:
+ print("#endif", file=output)
+
+ print(" ScalarCount,", file=output)
+
+ print(file_footer, file=output)
+
+if __name__ == '__main__':
+ main(sys.stdout, *sys.argv[1:])
diff --git a/toolkit/components/telemetry/healthreport-prefs.js b/toolkit/components/telemetry/healthreport-prefs.js
new file mode 100644
index 000000000..021028e1c
--- /dev/null
+++ b/toolkit/components/telemetry/healthreport-prefs.js
@@ -0,0 +1,10 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+pref("datareporting.healthreport.infoURL", "https://www.mozilla.org/legal/privacy/firefox.html#health-report");
+
+// Health Report is enabled by default on all channels.
+pref("datareporting.healthreport.uploadEnabled", true);
+
+pref("datareporting.healthreport.about.reportUrl", "https://fhr.cdn.mozilla.net/%LOCALE%/v4/");
diff --git a/toolkit/components/telemetry/histogram-whitelists.json b/toolkit/components/telemetry/histogram-whitelists.json
new file mode 100644
index 000000000..52db33192
--- /dev/null
+++ b/toolkit/components/telemetry/histogram-whitelists.json
@@ -0,0 +1,1990 @@
+{
+ "alert_emails": [
+ "A11Y_CONSUMERS",
+ "A11Y_IATABLE_USAGE_FLAG",
+ "A11Y_INSTANTIATED_FLAG",
+ "A11Y_ISIMPLEDOM_USAGE_FLAG",
+ "A11Y_UPDATE_TIME",
+ "ADDON_SHIM_USAGE",
+ "AUDIOSTREAM_FIRST_OPEN_MS",
+ "AUDIOSTREAM_LATER_OPEN_MS",
+ "AUTO_REJECTED_TRANSLATION_OFFERS",
+ "BACKGROUNDFILESAVER_THREAD_COUNT",
+ "BAD_FALLBACK_FONT",
+ "BROWSERPROVIDER_XUL_IMPORT_BOOKMARKS",
+ "BROWSER_IS_ASSIST_DEFAULT",
+ "BROWSER_IS_USER_DEFAULT",
+ "BROWSER_IS_USER_DEFAULT_ERROR",
+ "BROWSER_SET_DEFAULT_ALWAYS_CHECK",
+ "BROWSER_SET_DEFAULT_DIALOG_PROMPT_RAWCOUNT",
+ "BROWSER_SET_DEFAULT_ERROR",
+ "BROWSER_SET_DEFAULT_RESULT",
+ "BROWSER_SET_DEFAULT_TIME_TO_COMPLETION_SECONDS",
+ "BR_9_2_1_SUBJECT_ALT_NAMES",
+ "BR_9_2_2_SUBJECT_COMMON_NAME",
+ "CACHE_DEVICE_SEARCH_2",
+ "CACHE_DISK_SEARCH_2",
+ "CACHE_LM_INCONSISTENT",
+ "CACHE_MEMORY_SEARCH_2",
+ "CACHE_OFFLINE_SEARCH_2",
+ "CACHE_SERVICE_LOCK_WAIT_2",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_2",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSASYNCDOOMEVENT_RUN",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSBLOCKONCACHETHREADEVENT_RUN",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_CLOSE",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_DOOM",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_DOOMANDFAILPENDINGREQUESTS",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_GETCACHEELEMENT",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_GETCLIENTID",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_GETDATASIZE",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_GETDEVICEID",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_GETEXPIRATIONTIME",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_GETFETCHCOUNT",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_GETFILE",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_GETKEY",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_GETLASTFETCHED",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_GETLASTMODIFIED",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_GETMETADATAELEMENT",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_GETPREDICTEDDATASIZE",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_GETSECURITYINFO",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_GETSTORAGEDATASIZE",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_GETSTORAGEPOLICY",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_ISSTREAMBASED",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_MARKVALID",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_OPENINPUTSTREAM",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_OPENOUTPUTSTREAM",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_REQUESTDATASIZECHANGE",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_SETCACHEELEMENT",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_SETDATASIZE",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_SETEXPIRATIONTIME",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_SETMETADATAELEMENT",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_SETPREDICTEDDATASIZE",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_SETSECURITYINFO",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_SETSTORAGEPOLICY",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_VISITMETADATA",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHESERVICE_CLOSEALLSTREAMS",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHESERVICE_DISKDEVICEHEAPSIZE",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHESERVICE_EVICTENTRIESFORCLIENT",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHESERVICE_GETCACHEIOTARGET",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHESERVICE_ISSTORAGEENABLEDFORPOLICY",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHESERVICE_ONPROFILECHANGED",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHESERVICE_ONPROFILESHUTDOWN",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHESERVICE_OPENCACHEENTRY",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHESERVICE_PROCESSREQUEST",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHESERVICE_SETDISKCACHECAPACITY",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHESERVICE_SETDISKCACHEENABLED",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHESERVICE_SETDISKCACHEMAXENTRYSIZE",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHESERVICE_SETDISKSMARTSIZE",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHESERVICE_SETMEMORYCACHE",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHESERVICE_SETMEMORYCACHEMAXENTRYSIZE",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHESERVICE_SETOFFLINECACHECAPACITY",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHESERVICE_SETOFFLINECACHEENABLED",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHESERVICE_SHUTDOWN",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHESERVICE_VISITENTRIES",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCOMPRESSOUTPUTSTREAMWRAPPER_RELEASE",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSDECOMPRESSINPUTSTREAMWRAPPER_RELEASE",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSINPUTSTREAMWRAPPER_CLOSEINTERNAL",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSINPUTSTREAMWRAPPER_LAZYINIT",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSINPUTSTREAMWRAPPER_RELEASE",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSOUTPUTSTREAMWRAPPER_CLOSEINTERNAL",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSOUTPUTSTREAMWRAPPER_LAZYINIT",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSOUTPUTSTREAMWRAPPER_RELEASE",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSPROCESSREQUESTEVENT_RUN",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSSETDISKSMARTSIZECALLBACK_NOTIFY",
+ "CANVAS_2D_USED",
+ "CANVAS_WEBGL_USED",
+ "CERT_CHAIN_KEY_SIZE_STATUS",
+ "CERT_CHAIN_SHA1_POLICY_STATUS",
+ "CERT_VALIDATION_SUCCESS_BY_CA",
+ "CHANGES_OF_DETECTED_LANGUAGE",
+ "CHANGES_OF_TARGET_LANGUAGE",
+ "CHARSET_OVERRIDE_SITUATION",
+ "CHARSET_OVERRIDE_USED",
+ "CHECK_ADDONS_MODIFIED_MS",
+ "CHECK_JAVA_ENABLED",
+ "COMPONENTS_SHIM_ACCESSED_BY_CONTENT",
+ "COMPOSITE_FRAME_ROUNDTRIP_TIME",
+ "COMPOSITE_TIME",
+ "CONTENT_DOCUMENTS_DESTROYED",
+ "CRASH_STORE_COMPRESSED_BYTES",
+ "DATABASE_LOCKED_EXCEPTION",
+ "DATABASE_SUCCESSFUL_UNLOCK",
+ "DATA_STORAGE_ENTRIES",
+ "DECODER_INSTANTIATED_IBM866",
+ "DECODER_INSTANTIATED_ISO2022JP",
+ "DECODER_INSTANTIATED_ISO_8859_5",
+ "DECODER_INSTANTIATED_KOI8R",
+ "DECODER_INSTANTIATED_KOI8U",
+ "DECODER_INSTANTIATED_MACARABIC",
+ "DECODER_INSTANTIATED_MACCE",
+ "DECODER_INSTANTIATED_MACCROATIAN",
+ "DECODER_INSTANTIATED_MACCYRILLIC",
+ "DECODER_INSTANTIATED_MACDEVANAGARI",
+ "DECODER_INSTANTIATED_MACFARSI",
+ "DECODER_INSTANTIATED_MACGREEK",
+ "DECODER_INSTANTIATED_MACGUJARATI",
+ "DECODER_INSTANTIATED_MACGURMUKHI",
+ "DECODER_INSTANTIATED_MACHEBREW",
+ "DECODER_INSTANTIATED_MACICELANDIC",
+ "DECODER_INSTANTIATED_MACROMANIAN",
+ "DECODER_INSTANTIATED_MACTURKISH",
+ "DEFECTIVE_PERMISSIONS_SQL_REMOVED",
+ "DEFERRED_FINALIZE_ASYNC",
+ "DENIED_TRANSLATION_OFFERS",
+ "DEVICE_RESET_REASON",
+ "DEVTOOLS_ANIMATIONINSPECTOR_TIME_ACTIVE_SECONDS",
+ "DEVTOOLS_BROWSERCONSOLE_TIME_ACTIVE_SECONDS",
+ "DEVTOOLS_CANVASDEBUGGER_TIME_ACTIVE_SECONDS",
+ "DEVTOOLS_COMPUTEDVIEW_TIME_ACTIVE_SECONDS",
+ "DEVTOOLS_CUSTOM_TIME_ACTIVE_SECONDS",
+ "DEVTOOLS_DEBUGGER_DISPLAY_SOURCE_LOCAL_MS",
+ "DEVTOOLS_DEBUGGER_DISPLAY_SOURCE_REMOTE_MS",
+ "DEVTOOLS_DEVELOPERTOOLBAR_TIME_ACTIVE_SECONDS",
+ "DEVTOOLS_FONTINSPECTOR_TIME_ACTIVE_SECONDS",
+ "DEVTOOLS_HEAP_SNAPSHOT_EDGE_COUNT",
+ "DEVTOOLS_HEAP_SNAPSHOT_NODE_COUNT",
+ "DEVTOOLS_INSPECTOR_TIME_ACTIVE_SECONDS",
+ "DEVTOOLS_JSBROWSERDEBUGGER_TIME_ACTIVE_SECONDS",
+ "DEVTOOLS_JSDEBUGGER_TIME_ACTIVE_SECONDS",
+ "DEVTOOLS_JSPROFILER_TIME_ACTIVE_SECONDS",
+ "DEVTOOLS_MEMORY_BREAKDOWN_CENSUS_COUNT",
+ "DEVTOOLS_MEMORY_BREAKDOWN_DOMINATOR_TREE_COUNT",
+ "DEVTOOLS_MEMORY_DIFF_CENSUS",
+ "DEVTOOLS_MEMORY_DOMINATOR_TREE_COUNT",
+ "DEVTOOLS_MEMORY_EXPORT_SNAPSHOT_COUNT",
+ "DEVTOOLS_MEMORY_FILTER_CENSUS",
+ "DEVTOOLS_MEMORY_IMPORT_SNAPSHOT_COUNT",
+ "DEVTOOLS_MEMORY_INVERTED_CENSUS",
+ "DEVTOOLS_MEMORY_TAKE_SNAPSHOT_COUNT",
+ "DEVTOOLS_MEMORY_TIME_ACTIVE_SECONDS",
+ "DEVTOOLS_NETMONITOR_TIME_ACTIVE_SECONDS",
+ "DEVTOOLS_OPTIONS_TIME_ACTIVE_SECONDS",
+ "DEVTOOLS_OS_ENUMERATED_PER_USER",
+ "DEVTOOLS_OS_IS_64_BITS_PER_USER",
+ "DEVTOOLS_PAINTFLASHING_TIME_ACTIVE_SECONDS",
+ "DEVTOOLS_PERFTOOLS_CONSOLE_RECORDING_COUNT",
+ "DEVTOOLS_PERFTOOLS_RECORDING_COUNT",
+ "DEVTOOLS_PERFTOOLS_RECORDING_DURATION_MS",
+ "DEVTOOLS_PERFTOOLS_RECORDING_EXPORT_FLAG",
+ "DEVTOOLS_PERFTOOLS_RECORDING_FEATURES_USED",
+ "DEVTOOLS_PERFTOOLS_RECORDING_IMPORT_FLAG",
+ "DEVTOOLS_PERFTOOLS_SELECTED_VIEW_MS",
+ "DEVTOOLS_READ_HEAP_SNAPSHOT_MS",
+ "DEVTOOLS_RESPONSIVE_TIME_ACTIVE_SECONDS",
+ "DEVTOOLS_RULEVIEW_TIME_ACTIVE_SECONDS",
+ "DEVTOOLS_SAVE_HEAP_SNAPSHOT_MS",
+ "DEVTOOLS_SCRATCHPAD_TIME_ACTIVE_SECONDS",
+ "DEVTOOLS_SCREEN_RESOLUTION_ENUMERATED_PER_USER",
+ "DEVTOOLS_SHADEREDITOR_TIME_ACTIVE_SECONDS",
+ "DEVTOOLS_STORAGE_TIME_ACTIVE_SECONDS",
+ "DEVTOOLS_STYLEEDITOR_TIME_ACTIVE_SECONDS",
+ "DEVTOOLS_TABS_OPEN_AVERAGE_LINEAR",
+ "DEVTOOLS_TABS_OPEN_PEAK_LINEAR",
+ "DEVTOOLS_TABS_PINNED_AVERAGE_LINEAR",
+ "DEVTOOLS_TABS_PINNED_PEAK_LINEAR",
+ "DEVTOOLS_TILT_TIME_ACTIVE_SECONDS",
+ "DEVTOOLS_TOOLBOX_TIME_ACTIVE_SECONDS",
+ "DEVTOOLS_WEBAUDIOEDITOR_TIME_ACTIVE_SECONDS",
+ "DEVTOOLS_WEBCONSOLE_TIME_ACTIVE_SECONDS",
+ "DEVTOOLS_WEBIDE_CONNECTED_RUNTIME_APP_TYPE",
+ "DEVTOOLS_WEBIDE_CONNECTED_RUNTIME_ID",
+ "DEVTOOLS_WEBIDE_CONNECTED_RUNTIME_OS",
+ "DEVTOOLS_WEBIDE_CONNECTED_RUNTIME_PLATFORM_VERSION",
+ "DEVTOOLS_WEBIDE_CONNECTED_RUNTIME_PROCESSOR",
+ "DEVTOOLS_WEBIDE_CONNECTED_RUNTIME_TYPE",
+ "DEVTOOLS_WEBIDE_CONNECTED_RUNTIME_VERSION",
+ "DEVTOOLS_WEBIDE_CONNECTION_DEBUG_USED",
+ "DEVTOOLS_WEBIDE_CONNECTION_PLAY_USED",
+ "DEVTOOLS_WEBIDE_CONNECTION_RESULT",
+ "DEVTOOLS_WEBIDE_CONNECTION_TIME_SECONDS",
+ "DEVTOOLS_WEBIDE_LOCAL_CONNECTION_RESULT",
+ "DEVTOOLS_WEBIDE_OTHER_CONNECTION_RESULT",
+ "DEVTOOLS_WEBIDE_PROJECT_EDITOR_TIME_ACTIVE_SECONDS",
+ "DEVTOOLS_WEBIDE_REMOTE_CONNECTION_RESULT",
+ "DEVTOOLS_WEBIDE_SIMULATOR_CONNECTION_RESULT",
+ "DEVTOOLS_WEBIDE_TIME_ACTIVE_SECONDS",
+ "DEVTOOLS_WEBIDE_USB_CONNECTION_RESULT",
+ "DEVTOOLS_WEBIDE_WIFI_CONNECTION_RESULT",
+ "DISPLAY_SCALING_LINUX",
+ "DISPLAY_SCALING_MSWIN",
+ "DISPLAY_SCALING_OSX",
+ "DNS_BLACKLIST_COUNT",
+ "DNS_CLEANUP_AGE",
+ "DNS_FAILED_LOOKUP_TIME",
+ "DNS_LOOKUP_METHOD2",
+ "DNS_LOOKUP_TIME",
+ "DNS_RENEWAL_TIME",
+ "DNS_RENEWAL_TIME_FOR_TTL",
+ "DNT_USAGE",
+ "DWRITEFONT_DELAYEDINITFONTLIST_COLLECT",
+ "DWRITEFONT_DELAYEDINITFONTLIST_COUNT",
+ "DWRITEFONT_DELAYEDINITFONTLIST_TOTAL",
+ "DWRITEFONT_INIT_PROBLEM",
+ "E10S_BLOCKED_FROM_RUNNING",
+ "E10S_WINDOW",
+ "ENABLE_PRIVILEGE_EVER_CALLED",
+ "FENNEC_DISTRIBUTION_CODE_CATEGORY",
+ "FENNEC_DISTRIBUTION_DOWNLOAD_TIME_MS",
+ "FENNEC_DISTRIBUTION_REFERRER_INVALID",
+ "FENNEC_GLOBALHISTORY_ADD_MS",
+ "FENNEC_GLOBALHISTORY_UPDATE_MS",
+ "FENNEC_GLOBALHISTORY_VISITED_BUILD_MS",
+ "FENNEC_HOMEPANELS_CUSTOM",
+ "FENNEC_READING_LIST_COUNT",
+ "FENNEC_RESTORING_ACTIVITY",
+ "FENNEC_SEARCH_LOADER_TIME_MS",
+ "FENNEC_STARTUP_TIME_GECKOREADY",
+ "FENNEC_STARTUP_TIME_JAVAUI",
+ "FENNEC_SYNC11_MIGRATIONS_COMPLETED",
+ "FENNEC_SYNC11_MIGRATIONS_FAILED",
+ "FENNEC_SYNC11_MIGRATIONS_SUCCEEDED",
+ "FENNEC_SYNC11_MIGRATION_NOTIFICATIONS_OFFERED",
+ "FENNEC_SYNC11_MIGRATION_SENTINELS_SEEN",
+ "FENNEC_SYNC_NUMBER_OF_SYNCS_COMPLETED",
+ "FENNEC_SYNC_NUMBER_OF_SYNCS_FAILED",
+ "FENNEC_SYNC_NUMBER_OF_SYNCS_FAILED_BACKOFF",
+ "FENNEC_SYNC_NUMBER_OF_SYNCS_STARTED",
+ "FENNEC_TABQUEUE_QUEUESIZE",
+ "FENNEC_TOPSITES_LOADER_TIME_MS",
+ "FENNEC_WAS_KILLED",
+ "FETCH_IS_MAINTHREAD",
+ "FLASH_PLUGIN_AREA",
+ "FLASH_PLUGIN_HEIGHT",
+ "FLASH_PLUGIN_INSTANCES_ON_PAGE",
+ "FLASH_PLUGIN_STATES",
+ "FLASH_PLUGIN_WIDTH",
+ "FONTLIST_INITFACENAMELISTS",
+ "FONTLIST_INITOTHERFAMILYNAMES",
+ "FONT_CACHE_HIT",
+ "FORCED_DEVICE_RESET_REASON",
+ "FX_BOOKMARKS_TOOLBAR_INIT_MS",
+ "FX_BROWSER_FULLSCREEN_USED",
+ "FX_GESTURE_COMPRESS_SNAPSHOT_OF_PAGE",
+ "FX_GESTURE_INSTALL_SNAPSHOT_OF_PAGE",
+ "FX_NEW_WINDOW_MS",
+ "FX_PAGE_LOAD_MS",
+ "FX_SESSION_RESTORE_DOM_STORAGE_SIZE_ESTIMATE_CHARS",
+ "FX_SESSION_RESTORE_NUMBER_OF_EAGER_TABS_RESTORED",
+ "FX_SESSION_RESTORE_NUMBER_OF_TABS_RESTORED",
+ "FX_SESSION_RESTORE_NUMBER_OF_WINDOWS_RESTORED",
+ "FX_TABLETMODE_PAGE_LOAD",
+ "FX_TAB_ANIM_ANY_FRAME_INTERVAL_MS",
+ "FX_TAB_ANIM_OPEN_FRAME_INTERVAL_MS",
+ "FX_TAB_ANIM_OPEN_PREVIEW_FRAME_INTERVAL_MS",
+ "FX_TAB_CLICK_MS",
+ "FX_TAB_SWITCH_SPINNER_VISIBLE_MS",
+ "FX_TAB_SWITCH_TOTAL_E10S_MS",
+ "FX_TAB_SWITCH_TOTAL_MS",
+ "FX_THUMBNAILS_BG_CAPTURE_CANVAS_DRAW_TIME_MS",
+ "FX_THUMBNAILS_BG_CAPTURE_DONE_REASON_2",
+ "FX_THUMBNAILS_BG_CAPTURE_PAGE_LOAD_TIME_MS",
+ "FX_THUMBNAILS_BG_CAPTURE_QUEUE_TIME_MS",
+ "FX_THUMBNAILS_BG_CAPTURE_SERVICE_TIME_MS",
+ "FX_THUMBNAILS_BG_QUEUE_SIZE_ON_CAPTURE",
+ "FX_THUMBNAILS_CAPTURE_TIME_MS",
+ "FX_THUMBNAILS_HIT_OR_MISS",
+ "FX_THUMBNAILS_STORE_TIME_MS",
+ "FX_TOTAL_TOP_VISITS",
+ "FX_TOUCH_USED",
+ "GDI_INITFONTLIST_TOTAL",
+ "GEOLOCATION_ACCURACY_EXPONENTIAL",
+ "GEOLOCATION_ERROR",
+ "GEOLOCATION_GETCURRENTPOSITION_SECURE_ORIGIN",
+ "GEOLOCATION_OSX_SOURCE_IS_MLS",
+ "GEOLOCATION_REQUEST_GRANTED",
+ "GEOLOCATION_WATCHPOSITION_SECURE_ORIGIN",
+ "GEOLOCATION_WIN8_SOURCE_IS_MLS",
+ "GFX_CRASH",
+ "GRADIENT_DURATION",
+ "GRADIENT_RETENTION_TIME",
+ "HISTORY_LASTVISITED_TREE_QUERY_TIME_MS",
+ "HTTPCONNMGR_TOTAL_SPECULATIVE_CONN",
+ "HTTPCONNMGR_UNUSED_SPECULATIVE_CONN",
+ "HTTPCONNMGR_USED_SPECULATIVE_CONN",
+ "HTTP_AUTH_DIALOG_STATS",
+ "HTTP_CACHE_DISPOSITION_2",
+ "HTTP_CACHE_DISPOSITION_2_V2",
+ "HTTP_CACHE_ENTRY_ALIVE_TIME",
+ "HTTP_CACHE_ENTRY_RELOAD_TIME",
+ "HTTP_CACHE_ENTRY_REUSE_COUNT",
+ "HTTP_CACHE_MISS_HALFLIFE_EXPERIMENT_2",
+ "HTTP_CONNECTION_ENTRY_CACHE_HIT_1",
+ "HTTP_CONTENT_ENCODING",
+ "HTTP_DISK_CACHE_DISPOSITION_2",
+ "HTTP_DISK_CACHE_OVERHEAD",
+ "HTTP_KBREAD_PER_CONN",
+ "HTTP_MEMORY_CACHE_DISPOSITION_2",
+ "HTTP_OFFLINE_CACHE_DISPOSITION_2",
+ "HTTP_OFFLINE_CACHE_DOCUMENT_LOAD",
+ "HTTP_PAGELOAD_IS_SSL",
+ "HTTP_PAGE_CACHE_READ_TIME",
+ "HTTP_PAGE_CACHE_READ_TIME_V2",
+ "HTTP_PAGE_COMPLETE_LOAD",
+ "HTTP_PAGE_COMPLETE_LOAD_CACHED",
+ "HTTP_PAGE_COMPLETE_LOAD_CACHED_V2",
+ "HTTP_PAGE_COMPLETE_LOAD_NET",
+ "HTTP_PAGE_COMPLETE_LOAD_NET_V2",
+ "HTTP_PAGE_COMPLETE_LOAD_V2",
+ "HTTP_PAGE_DNS_ISSUE_TIME",
+ "HTTP_PAGE_DNS_LOOKUP_TIME",
+ "HTTP_PAGE_FIRST_SENT_TO_LAST_RECEIVED",
+ "HTTP_PAGE_OPEN_TO_FIRST_FROM_CACHE",
+ "HTTP_PAGE_OPEN_TO_FIRST_FROM_CACHE_V2",
+ "HTTP_PAGE_OPEN_TO_FIRST_RECEIVED",
+ "HTTP_PAGE_OPEN_TO_FIRST_SENT",
+ "HTTP_PAGE_REVALIDATION",
+ "HTTP_PAGE_TCP_CONNECTION",
+ "HTTP_PROXY_TYPE",
+ "HTTP_REQUEST_PER_CONN",
+ "HTTP_REQUEST_PER_PAGE",
+ "HTTP_REQUEST_PER_PAGE_FROM_CACHE",
+ "HTTP_RESPONSE_VERSION",
+ "HTTP_SAW_QUIC_ALT_PROTOCOL",
+ "HTTP_SCHEME_UPGRADE",
+ "HTTP_SUBITEM_FIRST_BYTE_LATENCY_TIME",
+ "HTTP_SUBITEM_OPEN_LATENCY_TIME",
+ "HTTP_SUB_CACHE_READ_TIME",
+ "HTTP_SUB_CACHE_READ_TIME_V2",
+ "HTTP_SUB_COMPLETE_LOAD",
+ "HTTP_SUB_COMPLETE_LOAD_CACHED",
+ "HTTP_SUB_COMPLETE_LOAD_CACHED_V2",
+ "HTTP_SUB_COMPLETE_LOAD_NET",
+ "HTTP_SUB_COMPLETE_LOAD_NET_V2",
+ "HTTP_SUB_COMPLETE_LOAD_V2",
+ "HTTP_SUB_DNS_ISSUE_TIME",
+ "HTTP_SUB_DNS_LOOKUP_TIME",
+ "HTTP_SUB_FIRST_SENT_TO_LAST_RECEIVED",
+ "HTTP_SUB_OPEN_TO_FIRST_FROM_CACHE",
+ "HTTP_SUB_OPEN_TO_FIRST_FROM_CACHE_V2",
+ "HTTP_SUB_OPEN_TO_FIRST_RECEIVED",
+ "HTTP_SUB_OPEN_TO_FIRST_SENT",
+ "HTTP_SUB_REVALIDATION",
+ "HTTP_SUB_TCP_CONNECTION",
+ "HTTP_TRANSACTION_IS_SSL",
+ "HTTP_TRANSACTION_USE_ALTSVC",
+ "HTTP_TRANSACTION_USE_ALTSVC_OE",
+ "IMAGE_DECODE_CHUNKS",
+ "IMAGE_DECODE_COUNT",
+ "IMAGE_DECODE_LATENCY_US",
+ "IMAGE_DECODE_ON_DRAW_LATENCY",
+ "IMAGE_DECODE_SPEED_GIF",
+ "IMAGE_DECODE_SPEED_JPEG",
+ "IMAGE_DECODE_SPEED_PNG",
+ "IMAGE_DECODE_TIME",
+ "INNERWINDOWS_WITH_MUTATION_LISTENERS",
+ "IPC_SAME_PROCESS_MESSAGE_COPY_OOM_KB",
+ "IPV4_AND_IPV6_ADDRESS_CONNECTIVITY",
+ "JS_TELEMETRY_ADDON_EXCEPTIONS",
+ "LINK_ICON_SIZES_ATTR_DIMENSION",
+ "LINK_ICON_SIZES_ATTR_USAGE",
+ "LOCALDOMSTORAGE_CLEAR_BLOCKING_MS",
+ "LOCALDOMSTORAGE_GETALLKEYS_BLOCKING_MS",
+ "LOCALDOMSTORAGE_GETKEY_BLOCKING_MS",
+ "LOCALDOMSTORAGE_GETLENGTH_BLOCKING_MS",
+ "LOCALDOMSTORAGE_GETVALUE_BLOCKING_MS",
+ "LOCALDOMSTORAGE_PRELOAD_PENDING_ON_FIRST_ACCESS",
+ "LOCALDOMSTORAGE_REMOVEKEY_BLOCKING_MS",
+ "LOCALDOMSTORAGE_SESSIONONLY_PRELOAD_BLOCKING_MS",
+ "LOCALDOMSTORAGE_SETVALUE_BLOCKING_MS",
+ "LOCALDOMSTORAGE_SHUTDOWN_DATABASE_MS",
+ "LOCALDOMSTORAGE_UNLOAD_BLOCKING_MS",
+ "LONG_REFLOW_INTERRUPTIBLE",
+ "MAC_INITFONTLIST_TOTAL",
+ "MASTER_PASSWORD_ENABLED",
+ "MEDIA_WMF_DECODE_ERROR",
+ "MIXED_CONTENT_PAGE_LOAD",
+ "MIXED_CONTENT_UNBLOCK_COUNTER",
+ "MOZ_SQLITE_COOKIES_OPEN_READAHEAD_MS",
+ "MOZ_SQLITE_COOKIES_READ_B",
+ "MOZ_SQLITE_COOKIES_READ_MAIN_THREAD_MS",
+ "MOZ_SQLITE_COOKIES_READ_MS",
+ "MOZ_SQLITE_COOKIES_SYNC_MAIN_THREAD_MS",
+ "MOZ_SQLITE_COOKIES_SYNC_MS",
+ "MOZ_SQLITE_COOKIES_WRITE_B",
+ "MOZ_SQLITE_COOKIES_WRITE_MAIN_THREAD_MS",
+ "MOZ_SQLITE_COOKIES_WRITE_MS",
+ "MOZ_SQLITE_OPEN_MAIN_THREAD_MS",
+ "MOZ_SQLITE_OPEN_MS",
+ "MOZ_SQLITE_OTHER_READ_B",
+ "MOZ_SQLITE_OTHER_READ_MAIN_THREAD_MS",
+ "MOZ_SQLITE_OTHER_READ_MS",
+ "MOZ_SQLITE_OTHER_SYNC_MAIN_THREAD_MS",
+ "MOZ_SQLITE_OTHER_SYNC_MS",
+ "MOZ_SQLITE_OTHER_WRITE_B",
+ "MOZ_SQLITE_OTHER_WRITE_MAIN_THREAD_MS",
+ "MOZ_SQLITE_OTHER_WRITE_MS",
+ "MOZ_SQLITE_PLACES_READ_B",
+ "MOZ_SQLITE_PLACES_READ_MAIN_THREAD_MS",
+ "MOZ_SQLITE_PLACES_READ_MS",
+ "MOZ_SQLITE_PLACES_SYNC_MAIN_THREAD_MS",
+ "MOZ_SQLITE_PLACES_SYNC_MS",
+ "MOZ_SQLITE_PLACES_WRITE_B",
+ "MOZ_SQLITE_PLACES_WRITE_MAIN_THREAD_MS",
+ "MOZ_SQLITE_PLACES_WRITE_MS",
+ "MOZ_SQLITE_TRUNCATE_MAIN_THREAD_MS",
+ "MOZ_SQLITE_TRUNCATE_MS",
+ "MOZ_SQLITE_WEBAPPS_READ_B",
+ "MOZ_SQLITE_WEBAPPS_READ_MAIN_THREAD_MS",
+ "MOZ_SQLITE_WEBAPPS_READ_MS",
+ "MOZ_SQLITE_WEBAPPS_SYNC_MAIN_THREAD_MS",
+ "MOZ_SQLITE_WEBAPPS_SYNC_MS",
+ "MOZ_SQLITE_WEBAPPS_WRITE_B",
+ "MOZ_SQLITE_WEBAPPS_WRITE_MAIN_THREAD_MS",
+ "MOZ_SQLITE_WEBAPPS_WRITE_MS",
+ "NETWORK_CACHE_FS_TYPE",
+ "NETWORK_CACHE_HASH_STATS",
+ "NETWORK_CACHE_HIT_MISS_STAT_PER_CACHE_SIZE",
+ "NETWORK_CACHE_HIT_RATE_PER_CACHE_SIZE",
+ "NETWORK_CACHE_METADATA_FIRST_READ_SIZE",
+ "NETWORK_CACHE_METADATA_FIRST_READ_TIME_MS",
+ "NETWORK_CACHE_METADATA_SECOND_READ_TIME_MS",
+ "NETWORK_CACHE_METADATA_SIZE",
+ "NETWORK_CACHE_SIZE_FULL_FAT",
+ "NETWORK_CACHE_V1_HIT_TIME_MS",
+ "NETWORK_CACHE_V1_MISS_TIME_MS",
+ "NETWORK_CACHE_V1_TRUNCATE_TIME_MS",
+ "NETWORK_CACHE_V2_HIT_TIME_MS",
+ "NETWORK_CACHE_V2_INPUT_STREAM_STATUS",
+ "NETWORK_CACHE_V2_MISS_TIME_MS",
+ "NETWORK_CACHE_V2_OUTPUT_STREAM_STATUS",
+ "NETWORK_DISK_CACHE2_SHUTDOWN_CLEAR_PRIVATE",
+ "NETWORK_DISK_CACHE_DELETEDIR",
+ "NETWORK_DISK_CACHE_DELETEDIR_SHUTDOWN",
+ "NETWORK_DISK_CACHE_OPEN",
+ "NETWORK_DISK_CACHE_SHUTDOWN",
+ "NETWORK_DISK_CACHE_SHUTDOWN_CLEAR_PRIVATE",
+ "NETWORK_DISK_CACHE_SHUTDOWN_V2",
+ "NETWORK_DISK_CACHE_TRASHRENAME",
+ "NEWTAB_PAGE_BLOCKED_SITES_COUNT",
+ "NEWTAB_PAGE_ENABLED",
+ "NEWTAB_PAGE_ENHANCED",
+ "NEWTAB_PAGE_LIFE_SPAN",
+ "NEWTAB_PAGE_LIFE_SPAN_SUGGESTED",
+ "NEWTAB_PAGE_PINNED_SITES_COUNT",
+ "NEWTAB_PAGE_SHOWN",
+ "NEWTAB_PAGE_SITE_CLICKED",
+ "NTLM_MODULE_USED_2",
+ "ONBEFOREUNLOAD_PROMPT_ACTION",
+ "ONBEFOREUNLOAD_PROMPT_COUNT",
+ "OSFILE_WORKER_LAUNCH_MS",
+ "OSFILE_WORKER_READY_MS",
+ "OSFILE_WRITEATOMIC_JANK_MS",
+ "PAGE_FAULTS_HARD",
+ "PAINT_BUILD_DISPLAYLIST_TIME",
+ "PAINT_RASTERIZE_TIME",
+ "PDF_VIEWER_DOCUMENT_GENERATOR",
+ "PDF_VIEWER_DOCUMENT_SIZE_KB",
+ "PDF_VIEWER_DOCUMENT_VERSION",
+ "PDF_VIEWER_EMBED",
+ "PDF_VIEWER_FALLBACK_SHOWN",
+ "PDF_VIEWER_FONT_TYPES",
+ "PDF_VIEWER_FORM",
+ "PDF_VIEWER_PRINT",
+ "PDF_VIEWER_STREAM_TYPES",
+ "PDF_VIEWER_TIME_TO_VIEW_MS",
+ "PDF_VIEWER_USED",
+ "PERF_MONITORING_SLOW_ADDON_CPOW_US",
+ "PERF_MONITORING_SLOW_ADDON_JANK_US",
+ "PERMISSIONS_SQL_CORRUPTED",
+ "PLACES_ANNOS_BOOKMARKS_COUNT",
+ "PLACES_ANNOS_PAGES_COUNT",
+ "PLACES_AUTOCOMPLETE_1ST_RESULT_TIME_MS",
+ "PLACES_AUTOCOMPLETE_6_FIRST_RESULTS_TIME_MS",
+ "PLACES_AUTOCOMPLETE_URLINLINE_DOMAIN_QUERY_TIME_MS",
+ "PLACES_BACKUPS_BOOKMARKSTREE_MS",
+ "PLACES_BACKUPS_DAYSFROMLAST",
+ "PLACES_BACKUPS_TOJSON_MS",
+ "PLACES_BOOKMARKS_COUNT",
+ "PLACES_DATABASE_FILESIZE_MB",
+ "PLACES_DATABASE_PAGESIZE_B",
+ "PLACES_DATABASE_SIZE_PER_PAGE_B",
+ "PLACES_EXPIRATION_STEPS_TO_CLEAN2",
+ "PLACES_EXPORT_TOHTML_MS",
+ "PLACES_FAVICON_BMP_SIZES",
+ "PLACES_FAVICON_GIF_SIZES",
+ "PLACES_FAVICON_ICO_SIZES",
+ "PLACES_FAVICON_JPEG_SIZES",
+ "PLACES_FAVICON_OTHER_SIZES",
+ "PLACES_FAVICON_PNG_SIZES",
+ "PLACES_FAVICON_SVG_SIZES",
+ "PLACES_HISTORY_LIBRARY_SEARCH_TIME_MS",
+ "PLACES_IDLE_FRECENCY_DECAY_TIME_MS",
+ "PLACES_IDLE_MAINTENANCE_TIME_MS",
+ "PLACES_KEYWORDS_COUNT",
+ "PLACES_MAINTENANCE_DAYSFROMLAST",
+ "PLACES_PAGES_COUNT",
+ "PLACES_SORTED_BOOKMARKS_PERC",
+ "PLACES_TAGGED_BOOKMARKS_PERC",
+ "PLACES_TAGS_COUNT",
+ "PLUGINS_INFOBAR_ALLOW",
+ "PLUGINS_INFOBAR_BLOCK",
+ "PLUGINS_INFOBAR_SHOWN",
+ "PLUGINS_NOTIFICATION_PLUGIN_COUNT",
+ "PLUGINS_NOTIFICATION_SHOWN",
+ "PLUGINS_NOTIFICATION_USER_ACTION",
+ "PLUGIN_CALLED_DIRECTLY",
+ "PLUGIN_HANG_TIME",
+ "PLUGIN_HANG_UI_DONT_ASK",
+ "PLUGIN_HANG_UI_RESPONSE_TIME",
+ "PLUGIN_HANG_UI_USER_RESPONSE",
+ "PLUGIN_SHUTDOWN_MS",
+ "PRCLOSE_TCP_BLOCKING_TIME_CONNECTIVITY_CHANGE",
+ "PRCLOSE_TCP_BLOCKING_TIME_LINK_CHANGE",
+ "PRCLOSE_TCP_BLOCKING_TIME_NORMAL",
+ "PRCLOSE_TCP_BLOCKING_TIME_OFFLINE",
+ "PRCLOSE_TCP_BLOCKING_TIME_SHUTDOWN",
+ "PRCLOSE_UDP_BLOCKING_TIME_CONNECTIVITY_CHANGE",
+ "PRCLOSE_UDP_BLOCKING_TIME_LINK_CHANGE",
+ "PRCLOSE_UDP_BLOCKING_TIME_NORMAL",
+ "PRCLOSE_UDP_BLOCKING_TIME_OFFLINE",
+ "PRCLOSE_UDP_BLOCKING_TIME_SHUTDOWN",
+ "PRCONNECTCONTINUE_BLOCKING_TIME_CONNECTIVITY_CHANGE",
+ "PRCONNECTCONTINUE_BLOCKING_TIME_LINK_CHANGE",
+ "PRCONNECTCONTINUE_BLOCKING_TIME_NORMAL",
+ "PRCONNECTCONTINUE_BLOCKING_TIME_OFFLINE",
+ "PRCONNECTCONTINUE_BLOCKING_TIME_SHUTDOWN",
+ "PRCONNECT_BLOCKING_TIME_CONNECTIVITY_CHANGE",
+ "PRCONNECT_BLOCKING_TIME_LINK_CHANGE",
+ "PRCONNECT_BLOCKING_TIME_NORMAL",
+ "PRCONNECT_BLOCKING_TIME_OFFLINE",
+ "PRCONNECT_BLOCKING_TIME_SHUTDOWN",
+ "PREDICTOR_BASE_CONFIDENCE",
+ "PREDICTOR_CONFIDENCE",
+ "PREDICTOR_GLOBAL_DEGRADATION",
+ "PREDICTOR_LEARN_ATTEMPTS",
+ "PREDICTOR_LEARN_FULL_QUEUE",
+ "PREDICTOR_LEARN_WORK_TIME",
+ "PREDICTOR_PREDICTIONS_CALCULATED",
+ "PREDICTOR_PREDICT_ATTEMPTS",
+ "PREDICTOR_PREDICT_FULL_QUEUE",
+ "PREDICTOR_PREDICT_TIME_TO_ACTION",
+ "PREDICTOR_PREDICT_TIME_TO_INACTION",
+ "PREDICTOR_PREDICT_WORK_TIME",
+ "PREDICTOR_SUBRESOURCE_DEGRADATION",
+ "PREDICTOR_TOTAL_PRECONNECTS",
+ "PREDICTOR_TOTAL_PRECONNECTS_CREATED",
+ "PREDICTOR_TOTAL_PRECONNECTS_UNUSED",
+ "PREDICTOR_TOTAL_PRECONNECTS_USED",
+ "PREDICTOR_TOTAL_PREDICTIONS",
+ "PREDICTOR_TOTAL_PRERESOLVES",
+ "PREDICTOR_WAIT_TIME",
+ "PROCESS_CRASH_SUBMIT_ATTEMPT",
+ "PROCESS_CRASH_SUBMIT_SUCCESS",
+ "PWMGR_BLOCKLIST_NUM_SITES",
+ "PWMGR_FORM_AUTOFILL_RESULT",
+ "PWMGR_LOGIN_LAST_USED_DAYS",
+ "PWMGR_LOGIN_PAGE_SAFETY",
+ "PWMGR_MANAGE_COPIED_PASSWORD",
+ "PWMGR_MANAGE_COPIED_USERNAME",
+ "PWMGR_MANAGE_DELETED",
+ "PWMGR_MANAGE_DELETED_ALL",
+ "PWMGR_MANAGE_OPENED",
+ "PWMGR_MANAGE_SORTED",
+ "PWMGR_MANAGE_VISIBILITY_TOGGLED",
+ "PWMGR_NUM_HTTPAUTH_PASSWORDS",
+ "PWMGR_NUM_PASSWORDS_PER_HOSTNAME",
+ "PWMGR_NUM_SAVED_PASSWORDS",
+ "PWMGR_PASSWORD_INPUT_IN_FORM",
+ "PWMGR_PROMPT_REMEMBER_ACTION",
+ "PWMGR_PROMPT_UPDATE_ACTION",
+ "PWMGR_SAVING_ENABLED",
+ "PWMGR_USERNAME_PRESENT",
+ "REFRESH_DRIVER_TICK",
+ "REQUESTS_OF_ORIGINAL_CONTENT",
+ "SAFE_MODE_USAGE",
+ "SEARCH_COUNTS",
+ "SEARCH_SERVICE_INIT_MS",
+ "SECURITY_UI",
+ "SERVICE_WORKER_CONTROLLED_DOCUMENTS",
+ "SERVICE_WORKER_LIFE_TIME",
+ "SERVICE_WORKER_REGISTRATIONS",
+ "SERVICE_WORKER_REGISTRATION_LOADING",
+ "SERVICE_WORKER_REQUEST_PASSTHROUGH",
+ "SERVICE_WORKER_SPAWN_ATTEMPTS",
+ "SERVICE_WORKER_UPDATED",
+ "SERVICE_WORKER_WAS_SPAWNED",
+ "SHOULD_AUTO_DETECT_LANGUAGE",
+ "SHOULD_TRANSLATION_UI_APPEAR",
+ "SHUTDOWN_OK",
+ "SHUTDOWN_PHASE_DURATION_TICKS_PROFILE_BEFORE_CHANGE",
+ "SHUTDOWN_PHASE_DURATION_TICKS_PROFILE_CHANGE_TEARDOWN",
+ "SHUTDOWN_PHASE_DURATION_TICKS_QUIT_APPLICATION",
+ "SHUTDOWN_PHASE_DURATION_TICKS_XPCOM_WILL_SHUTDOWN",
+ "SLOW_ADDON_WARNING_RESPONSE_TIME",
+ "SLOW_ADDON_WARNING_STATES",
+ "SOCIAL_ENABLED_ON_SESSION",
+ "SOCIAL_PANEL_CLICKS",
+ "SOCIAL_SIDEBAR_OPEN_DURATION",
+ "SOCIAL_SIDEBAR_STATE",
+ "SOCIAL_TOOLBAR_BUTTONS",
+ "SPDY_CHUNK_RECVD",
+ "SPDY_GOAWAY_LOCAL",
+ "SPDY_GOAWAY_PEER",
+ "SPDY_KBREAD_PER_CONN",
+ "SPDY_NPN_CONNECT",
+ "SPDY_NPN_JOIN",
+ "SPDY_PARALLEL_STREAMS",
+ "SPDY_REQUEST_PER_CONN",
+ "SPDY_SERVER_INITIATED_STREAMS",
+ "SPDY_SETTINGS_CWND",
+ "SPDY_SETTINGS_DL_BW",
+ "SPDY_SETTINGS_IW",
+ "SPDY_SETTINGS_MAX_STREAMS",
+ "SPDY_SETTINGS_RETRANS",
+ "SPDY_SETTINGS_RTT",
+ "SPDY_SETTINGS_UL_BW",
+ "SPDY_SYN_RATIO",
+ "SPDY_SYN_REPLY_RATIO",
+ "SPDY_SYN_REPLY_SIZE",
+ "SPDY_SYN_SIZE",
+ "SPDY_VERSION2",
+ "STARTUP_CACHE_AGE_HOURS",
+ "STARTUP_CRASH_DETECTED",
+ "STARTUP_MEASUREMENT_ERRORS",
+ "STS_NUMBER_OF_ONSOCKETREADY_CALLS",
+ "STS_NUMBER_OF_PENDING_EVENTS",
+ "STS_NUMBER_OF_PENDING_EVENTS_IN_THE_LAST_CYCLE",
+ "STS_POLL_AND_EVENTS_CYCLE",
+ "STS_POLL_AND_EVENT_THE_LAST_CYCLE",
+ "STS_POLL_BLOCK_TIME",
+ "STS_POLL_CYCLE",
+ "STUMBLER_OBSERVATIONS_PER_DAY",
+ "STUMBLER_TIME_BETWEEN_RECEIVED_LOCATIONS_SEC",
+ "STUMBLER_TIME_BETWEEN_START_SEC",
+ "STUMBLER_TIME_BETWEEN_UPLOADS_SEC",
+ "STUMBLER_UPLOAD_BYTES",
+ "STUMBLER_UPLOAD_CELL_COUNT",
+ "STUMBLER_UPLOAD_OBSERVATION_COUNT",
+ "STUMBLER_UPLOAD_WIFI_AP_COUNT",
+ "STUMBLER_VOLUME_BYTES_UPLOADED_PER_SEC",
+ "SUBPROCESS_ABNORMAL_ABORT",
+ "SUBPROCESS_CRASHES_WITH_DUMP",
+ "SYSTEM_FONT_FALLBACK",
+ "SYSTEM_FONT_FALLBACK_FIRST",
+ "SYSTEM_FONT_FALLBACK_SCRIPT",
+ "TAB_SWITCH_CACHE_POSITION",
+ "TAP_TO_LOAD_ENABLED",
+ "TAP_TO_LOAD_IMAGE_SIZE",
+ "TELEMETRY_COMPRESS",
+ "TELEMETRY_STRINGIFY",
+ "TELEMETRY_SUCCESS",
+ "TELEMETRY_TEST_COUNT",
+ "TELEMETRY_TEST_COUNT_INIT_NO_RECORD",
+ "TELEMETRY_TEST_EXPIRED",
+ "TELEMETRY_TEST_FLAG",
+ "TELEMETRY_TEST_KEYED_COUNT",
+ "TELEMETRY_TEST_KEYED_COUNT_INIT_NO_RECORD",
+ "TELEMETRY_TEST_KEYED_FLAG",
+ "TELEMETRY_TEST_KEYED_RELEASE_OPTIN",
+ "TELEMETRY_TEST_KEYED_RELEASE_OPTOUT",
+ "TELEMETRY_TEST_RELEASE_OPTIN",
+ "TELEMETRY_TEST_RELEASE_OPTOUT",
+ "THUNDERBIRD_CONVERSATIONS_TIME_TO_2ND_GLODA_QUERY_MS",
+ "THUNDERBIRD_GLODA_SIZE_MB",
+ "THUNDERBIRD_INDEXING_RATE_MSG_PER_S",
+ "TLS_ERROR_REPORT_UI",
+ "TOP_LEVEL_CONTENT_DOCUMENTS_DESTROYED",
+ "TOUCH_ENABLED_DEVICE",
+ "TRACKING_PROTECTION_ENABLED",
+ "TRACKING_PROTECTION_EVENTS",
+ "TRACKING_PROTECTION_PBM_DISABLED",
+ "TRACKING_PROTECTION_SHIELD",
+ "TRANSACTION_WAIT_TIME_HTTP",
+ "TRANSACTION_WAIT_TIME_HTTP_PIPELINES",
+ "TRANSACTION_WAIT_TIME_SPDY",
+ "TRANSLATED_CHARACTERS",
+ "TRANSLATED_PAGES",
+ "TRANSLATED_PAGES_BY_LANGUAGE",
+ "TRANSLATION_OPPORTUNITIES",
+ "TRANSLATION_OPPORTUNITIES_BY_LANGUAGE",
+ "VIDEO_CANPLAYTYPE_H264_CONSTRAINT_SET_FLAG",
+ "VIDEO_CANPLAYTYPE_H264_LEVEL",
+ "VIDEO_CANPLAYTYPE_H264_PROFILE",
+ "VIDEO_DECODED_H264_SPS_CONSTRAINT_SET_FLAG",
+ "VIDEO_DECODED_H264_SPS_LEVEL",
+ "VIDEO_DECODED_H264_SPS_PROFILE",
+ "VIDEO_EME_PLAY_SUCCESS",
+ "VIDEO_H264_SPS_MAX_NUM_REF_FRAMES",
+ "WEAVE_COMPLETE_SUCCESS_COUNT",
+ "WEAVE_CONFIGURED",
+ "WEAVE_CONFIGURED_MASTER_PASSWORD",
+ "WEAVE_START_COUNT",
+ "WEBCRYPTO_ALG",
+ "WEBCRYPTO_EXTRACTABLE_ENC",
+ "WEBCRYPTO_EXTRACTABLE_GENERATE",
+ "WEBCRYPTO_EXTRACTABLE_IMPORT",
+ "WEBCRYPTO_EXTRACTABLE_SIG",
+ "WEBCRYPTO_METHOD",
+ "WEBCRYPTO_RESOLVED",
+ "WEBSOCKETS_HANDSHAKE_TYPE",
+ "WORD_CACHE_HITS_CHROME",
+ "WORD_CACHE_HITS_CONTENT",
+ "WORD_CACHE_MISSES_CHROME",
+ "WORD_CACHE_MISSES_CONTENT",
+ "XMLHTTPREQUEST_ASYNC_OR_SYNC",
+ "XUL_CACHE_DISABLED"
+ ],
+ "bug_numbers": [
+ "A11Y_CONSUMERS",
+ "A11Y_IATABLE_USAGE_FLAG",
+ "A11Y_INSTANTIATED_FLAG",
+ "A11Y_ISIMPLEDOM_USAGE_FLAG",
+ "A11Y_UPDATE_TIME",
+ "ADDON_SHIM_USAGE",
+ "APPLICATION_REPUTATION_COUNT",
+ "APPLICATION_REPUTATION_LOCAL",
+ "APPLICATION_REPUTATION_SERVER",
+ "APPLICATION_REPUTATION_SHOULD_BLOCK",
+ "AUDIOSTREAM_FIRST_OPEN_MS",
+ "AUDIOSTREAM_LATER_OPEN_MS",
+ "AUTO_REJECTED_TRANSLATION_OFFERS",
+ "BACKGROUNDFILESAVER_THREAD_COUNT",
+ "BAD_FALLBACK_FONT",
+ "BLOCKED_ON_PLUGINASYNCSURROGATE_WAITFORINIT_MS",
+ "BLOCKED_ON_PLUGIN_INSTANCE_DESTROY_MS",
+ "BLOCKED_ON_PLUGIN_INSTANCE_INIT_MS",
+ "BLOCKED_ON_PLUGIN_MODULE_INIT_MS",
+ "BLOCKED_ON_PLUGIN_STREAM_INIT_MS",
+ "BLOCKLIST_SYNC_FILE_LOAD",
+ "BROWSERPROVIDER_XUL_IMPORT_BOOKMARKS",
+ "BROWSER_IS_ASSIST_DEFAULT",
+ "BROWSER_IS_USER_DEFAULT",
+ "BROWSER_IS_USER_DEFAULT_ERROR",
+ "BROWSER_SET_DEFAULT_ALWAYS_CHECK",
+ "BROWSER_SET_DEFAULT_DIALOG_PROMPT_RAWCOUNT",
+ "BROWSER_SET_DEFAULT_ERROR",
+ "BROWSER_SET_DEFAULT_RESULT",
+ "BROWSER_SET_DEFAULT_TIME_TO_COMPLETION_SECONDS",
+ "BR_9_2_1_SUBJECT_ALT_NAMES",
+ "BR_9_2_2_SUBJECT_COMMON_NAME",
+ "BUCKET_ORDER_ERRORS",
+ "CACHE_DEVICE_SEARCH_2",
+ "CACHE_DISK_SEARCH_2",
+ "CACHE_LM_INCONSISTENT",
+ "CACHE_MEMORY_SEARCH_2",
+ "CACHE_OFFLINE_SEARCH_2",
+ "CACHE_SERVICE_LOCK_WAIT_2",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_2",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSASYNCDOOMEVENT_RUN",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSBLOCKONCACHETHREADEVENT_RUN",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_CLOSE",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_DOOM",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_DOOMANDFAILPENDINGREQUESTS",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_GETCACHEELEMENT",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_GETCLIENTID",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_GETDATASIZE",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_GETDEVICEID",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_GETEXPIRATIONTIME",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_GETFETCHCOUNT",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_GETFILE",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_GETKEY",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_GETLASTFETCHED",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_GETLASTMODIFIED",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_GETMETADATAELEMENT",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_GETPREDICTEDDATASIZE",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_GETSECURITYINFO",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_GETSTORAGEDATASIZE",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_GETSTORAGEPOLICY",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_ISSTREAMBASED",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_MARKVALID",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_OPENINPUTSTREAM",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_OPENOUTPUTSTREAM",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_REQUESTDATASIZECHANGE",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_SETCACHEELEMENT",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_SETDATASIZE",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_SETEXPIRATIONTIME",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_SETMETADATAELEMENT",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_SETPREDICTEDDATASIZE",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_SETSECURITYINFO",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_SETSTORAGEPOLICY",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHEENTRYDESCRIPTOR_VISITMETADATA",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHESERVICE_CLOSEALLSTREAMS",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHESERVICE_DISKDEVICEHEAPSIZE",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHESERVICE_EVICTENTRIESFORCLIENT",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHESERVICE_GETCACHEIOTARGET",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHESERVICE_ISSTORAGEENABLEDFORPOLICY",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHESERVICE_ONPROFILECHANGED",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHESERVICE_ONPROFILESHUTDOWN",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHESERVICE_OPENCACHEENTRY",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHESERVICE_PROCESSREQUEST",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHESERVICE_SETDISKCACHECAPACITY",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHESERVICE_SETDISKCACHEENABLED",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHESERVICE_SETDISKCACHEMAXENTRYSIZE",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHESERVICE_SETDISKSMARTSIZE",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHESERVICE_SETMEMORYCACHE",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHESERVICE_SETMEMORYCACHEMAXENTRYSIZE",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHESERVICE_SETOFFLINECACHECAPACITY",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHESERVICE_SETOFFLINECACHEENABLED",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHESERVICE_SHUTDOWN",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCACHESERVICE_VISITENTRIES",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSCOMPRESSOUTPUTSTREAMWRAPPER_RELEASE",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSDECOMPRESSINPUTSTREAMWRAPPER_RELEASE",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSINPUTSTREAMWRAPPER_CLOSEINTERNAL",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSINPUTSTREAMWRAPPER_LAZYINIT",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSINPUTSTREAMWRAPPER_RELEASE",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSOUTPUTSTREAMWRAPPER_CLOSEINTERNAL",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSOUTPUTSTREAMWRAPPER_LAZYINIT",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSOUTPUTSTREAMWRAPPER_RELEASE",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSPROCESSREQUESTEVENT_RUN",
+ "CACHE_SERVICE_LOCK_WAIT_MAINTHREAD_NSSETDISKSMARTSIZECALLBACK_NOTIFY",
+ "CANVAS_2D_USED",
+ "CANVAS_WEBGL_USED",
+ "CERT_CHAIN_KEY_SIZE_STATUS",
+ "CERT_CHAIN_SHA1_POLICY_STATUS",
+ "CERT_OCSP_ENABLED",
+ "CERT_OCSP_REQUIRED",
+ "CERT_PINNING_FAILURES_BY_CA",
+ "CERT_PINNING_MOZ_RESULTS",
+ "CERT_PINNING_MOZ_RESULTS_BY_HOST",
+ "CERT_PINNING_MOZ_TEST_RESULTS",
+ "CERT_PINNING_MOZ_TEST_RESULTS_BY_HOST",
+ "CERT_PINNING_RESULTS",
+ "CERT_PINNING_TEST_RESULTS",
+ "CERT_VALIDATION_HTTP_REQUEST_CANCELED_TIME",
+ "CERT_VALIDATION_HTTP_REQUEST_FAILED_TIME",
+ "CERT_VALIDATION_HTTP_REQUEST_RESULT",
+ "CERT_VALIDATION_HTTP_REQUEST_SUCCEEDED_TIME",
+ "CERT_VALIDATION_SUCCESS_BY_CA",
+ "CHANGES_OF_DETECTED_LANGUAGE",
+ "CHANGES_OF_TARGET_LANGUAGE",
+ "CHARSET_OVERRIDE_SITUATION",
+ "CHARSET_OVERRIDE_USED",
+ "CHECK_ADDONS_MODIFIED_MS",
+ "CHECK_JAVA_ENABLED",
+ "COMPONENTS_SHIM_ACCESSED_BY_CONTENT",
+ "COMPOSITE_FRAME_ROUNDTRIP_TIME",
+ "COMPOSITE_TIME",
+ "CONTENT_DOCUMENTS_DESTROYED",
+ "COOKIE_SCHEME_SECURITY",
+ "CRASH_STORE_COMPRESSED_BYTES",
+ "CYCLE_COLLECTOR",
+ "CYCLE_COLLECTOR_ASYNC_SNOW_WHITE_FREEING",
+ "CYCLE_COLLECTOR_COLLECTED",
+ "CYCLE_COLLECTOR_FINISH_IGC",
+ "CYCLE_COLLECTOR_FULL",
+ "CYCLE_COLLECTOR_MAX_PAUSE",
+ "CYCLE_COLLECTOR_NEED_GC",
+ "CYCLE_COLLECTOR_OOM",
+ "CYCLE_COLLECTOR_SYNC_SKIPPABLE",
+ "CYCLE_COLLECTOR_TIME_BETWEEN",
+ "CYCLE_COLLECTOR_VISITED_GCED",
+ "CYCLE_COLLECTOR_VISITED_REF_COUNTED",
+ "CYCLE_COLLECTOR_WORKER",
+ "CYCLE_COLLECTOR_WORKER_COLLECTED",
+ "CYCLE_COLLECTOR_WORKER_NEED_GC",
+ "CYCLE_COLLECTOR_WORKER_OOM",
+ "CYCLE_COLLECTOR_WORKER_VISITED_GCED",
+ "CYCLE_COLLECTOR_WORKER_VISITED_REF_COUNTED",
+ "D3D11_SYNC_HANDLE_FAILURE",
+ "DATABASE_LOCKED_EXCEPTION",
+ "DATABASE_SUCCESSFUL_UNLOCK",
+ "DATA_STORAGE_ENTRIES",
+ "DECODER_INSTANTIATED_IBM866",
+ "DECODER_INSTANTIATED_ISO2022JP",
+ "DECODER_INSTANTIATED_ISO_8859_5",
+ "DECODER_INSTANTIATED_KOI8R",
+ "DECODER_INSTANTIATED_KOI8U",
+ "DECODER_INSTANTIATED_MACARABIC",
+ "DECODER_INSTANTIATED_MACCE",
+ "DECODER_INSTANTIATED_MACCROATIAN",
+ "DECODER_INSTANTIATED_MACCYRILLIC",
+ "DECODER_INSTANTIATED_MACDEVANAGARI",
+ "DECODER_INSTANTIATED_MACFARSI",
+ "DECODER_INSTANTIATED_MACGREEK",
+ "DECODER_INSTANTIATED_MACGUJARATI",
+ "DECODER_INSTANTIATED_MACGURMUKHI",
+ "DECODER_INSTANTIATED_MACHEBREW",
+ "DECODER_INSTANTIATED_MACICELANDIC",
+ "DECODER_INSTANTIATED_MACROMANIAN",
+ "DECODER_INSTANTIATED_MACTURKISH",
+ "DEFECTIVE_PERMISSIONS_SQL_REMOVED",
+ "DEFERRED_FINALIZE_ASYNC",
+ "DENIED_TRANSLATION_OFFERS",
+ "DEVICE_RESET_REASON",
+ "DEVTOOLS_ABOUTDEBUGGING_TIME_ACTIVE_SECONDS",
+ "DEVTOOLS_ANIMATIONINSPECTOR_TIME_ACTIVE_SECONDS",
+ "DEVTOOLS_BROWSERCONSOLE_TIME_ACTIVE_SECONDS",
+ "DEVTOOLS_CANVASDEBUGGER_TIME_ACTIVE_SECONDS",
+ "DEVTOOLS_COMPUTEDVIEW_TIME_ACTIVE_SECONDS",
+ "DEVTOOLS_CUSTOM_TIME_ACTIVE_SECONDS",
+ "DEVTOOLS_DEBUGGER_DISPLAY_SOURCE_LOCAL_MS",
+ "DEVTOOLS_DEBUGGER_DISPLAY_SOURCE_REMOTE_MS",
+ "DEVTOOLS_DEVELOPERTOOLBAR_TIME_ACTIVE_SECONDS",
+ "DEVTOOLS_FONTINSPECTOR_TIME_ACTIVE_SECONDS",
+ "DEVTOOLS_HEAP_SNAPSHOT_EDGE_COUNT",
+ "DEVTOOLS_HEAP_SNAPSHOT_NODE_COUNT",
+ "DEVTOOLS_HUD_APP_MEMORY_CONTENTINTERACTIVE_V2",
+ "DEVTOOLS_HUD_APP_MEMORY_FULLYLOADED_V2",
+ "DEVTOOLS_HUD_APP_MEMORY_MEDIAENUMERATED_V2",
+ "DEVTOOLS_HUD_APP_MEMORY_NAVIGATIONINTERACTIVE_V2",
+ "DEVTOOLS_HUD_APP_MEMORY_NAVIGATIONLOADED_V2",
+ "DEVTOOLS_HUD_APP_MEMORY_SCANEND_V2",
+ "DEVTOOLS_HUD_APP_MEMORY_VISUALLYLOADED_V2",
+ "DEVTOOLS_HUD_APP_STARTUP_TIME_CONTENTINTERACTIVE",
+ "DEVTOOLS_HUD_APP_STARTUP_TIME_FULLYLOADED",
+ "DEVTOOLS_HUD_APP_STARTUP_TIME_MEDIAENUMERATED",
+ "DEVTOOLS_HUD_APP_STARTUP_TIME_NAVIGATIONINTERACTIVE",
+ "DEVTOOLS_HUD_APP_STARTUP_TIME_NAVIGATIONLOADED",
+ "DEVTOOLS_HUD_APP_STARTUP_TIME_SCANEND",
+ "DEVTOOLS_HUD_APP_STARTUP_TIME_VISUALLYLOADED",
+ "DEVTOOLS_HUD_ERRORS",
+ "DEVTOOLS_HUD_JANK",
+ "DEVTOOLS_HUD_REFLOWS",
+ "DEVTOOLS_HUD_REFLOW_DURATION",
+ "DEVTOOLS_HUD_SECURITY_CATEGORY",
+ "DEVTOOLS_HUD_USS",
+ "DEVTOOLS_HUD_WARNINGS",
+ "DEVTOOLS_INSPECTOR_TIME_ACTIVE_SECONDS",
+ "DEVTOOLS_JSBROWSERDEBUGGER_TIME_ACTIVE_SECONDS",
+ "DEVTOOLS_JSDEBUGGER_TIME_ACTIVE_SECONDS",
+ "DEVTOOLS_JSPROFILER_TIME_ACTIVE_SECONDS",
+ "DEVTOOLS_MEMORY_TIME_ACTIVE_SECONDS",
+ "DEVTOOLS_NETMONITOR_TIME_ACTIVE_SECONDS",
+ "DEVTOOLS_OPTIONS_TIME_ACTIVE_SECONDS",
+ "DEVTOOLS_OS_ENUMERATED_PER_USER",
+ "DEVTOOLS_OS_IS_64_BITS_PER_USER",
+ "DEVTOOLS_PAINTFLASHING_TIME_ACTIVE_SECONDS",
+ "DEVTOOLS_PERFTOOLS_CONSOLE_RECORDING_COUNT",
+ "DEVTOOLS_PERFTOOLS_RECORDING_COUNT",
+ "DEVTOOLS_PERFTOOLS_RECORDING_DURATION_MS",
+ "DEVTOOLS_PERFTOOLS_RECORDING_EXPORT_FLAG",
+ "DEVTOOLS_PERFTOOLS_RECORDING_FEATURES_USED",
+ "DEVTOOLS_PERFTOOLS_RECORDING_IMPORT_FLAG",
+ "DEVTOOLS_PERFTOOLS_SELECTED_VIEW_MS",
+ "DEVTOOLS_READ_HEAP_SNAPSHOT_MS",
+ "DEVTOOLS_RULEVIEW_TIME_ACTIVE_SECONDS",
+ "DEVTOOLS_SAVE_HEAP_SNAPSHOT_MS",
+ "DEVTOOLS_SCRATCHPAD_TIME_ACTIVE_SECONDS",
+ "DEVTOOLS_SCREEN_RESOLUTION_ENUMERATED_PER_USER",
+ "DEVTOOLS_SHADEREDITOR_TIME_ACTIVE_SECONDS",
+ "DEVTOOLS_STORAGE_TIME_ACTIVE_SECONDS",
+ "DEVTOOLS_STYLEEDITOR_TIME_ACTIVE_SECONDS",
+ "DEVTOOLS_TABS_OPEN_AVERAGE_LINEAR",
+ "DEVTOOLS_TABS_OPEN_PEAK_LINEAR",
+ "DEVTOOLS_TABS_PINNED_AVERAGE_LINEAR",
+ "DEVTOOLS_TABS_PINNED_PEAK_LINEAR",
+ "DEVTOOLS_TILT_TIME_ACTIVE_SECONDS",
+ "DEVTOOLS_TOOLBOX_TIME_ACTIVE_SECONDS",
+ "DEVTOOLS_WEBAUDIOEDITOR_TIME_ACTIVE_SECONDS",
+ "DEVTOOLS_WEBCONSOLE_TIME_ACTIVE_SECONDS",
+ "DEVTOOLS_WEBIDE_CONNECTED_RUNTIME_APP_TYPE",
+ "DEVTOOLS_WEBIDE_CONNECTED_RUNTIME_ID",
+ "DEVTOOLS_WEBIDE_CONNECTED_RUNTIME_OS",
+ "DEVTOOLS_WEBIDE_CONNECTED_RUNTIME_PLATFORM_VERSION",
+ "DEVTOOLS_WEBIDE_CONNECTED_RUNTIME_PROCESSOR",
+ "DEVTOOLS_WEBIDE_CONNECTED_RUNTIME_TYPE",
+ "DEVTOOLS_WEBIDE_CONNECTED_RUNTIME_VERSION",
+ "DEVTOOLS_WEBIDE_CONNECTION_DEBUG_USED",
+ "DEVTOOLS_WEBIDE_CONNECTION_PLAY_USED",
+ "DEVTOOLS_WEBIDE_CONNECTION_RESULT",
+ "DEVTOOLS_WEBIDE_CONNECTION_TIME_SECONDS",
+ "DEVTOOLS_WEBIDE_LOCAL_CONNECTION_RESULT",
+ "DEVTOOLS_WEBIDE_OTHER_CONNECTION_RESULT",
+ "DEVTOOLS_WEBIDE_PROJECT_EDITOR_TIME_ACTIVE_SECONDS",
+ "DEVTOOLS_WEBIDE_REMOTE_CONNECTION_RESULT",
+ "DEVTOOLS_WEBIDE_SIMULATOR_CONNECTION_RESULT",
+ "DEVTOOLS_WEBIDE_TIME_ACTIVE_SECONDS",
+ "DEVTOOLS_WEBIDE_USB_CONNECTION_RESULT",
+ "DEVTOOLS_WEBIDE_WIFI_CONNECTION_RESULT",
+ "DISPLAY_SCALING_LINUX",
+ "DISPLAY_SCALING_MSWIN",
+ "DISPLAY_SCALING_OSX",
+ "DNS_BLACKLIST_COUNT",
+ "DNS_CLEANUP_AGE",
+ "DNS_FAILED_LOOKUP_TIME",
+ "DNS_LOOKUP_METHOD2",
+ "DNS_LOOKUP_TIME",
+ "DNS_RENEWAL_TIME",
+ "DNS_RENEWAL_TIME_FOR_TTL",
+ "DNT_USAGE",
+ "DWRITEFONT_DELAYEDINITFONTLIST_COLLECT",
+ "DWRITEFONT_DELAYEDINITFONTLIST_COUNT",
+ "DWRITEFONT_DELAYEDINITFONTLIST_TOTAL",
+ "DWRITEFONT_INIT_PROBLEM",
+ "E10S_BLOCKED_FROM_RUNNING",
+ "E10S_WINDOW",
+ "ENABLE_PRIVILEGE_EVER_CALLED",
+ "FENNEC_DISTRIBUTION_CODE_CATEGORY",
+ "FENNEC_DISTRIBUTION_DOWNLOAD_TIME_MS",
+ "FENNEC_DISTRIBUTION_REFERRER_INVALID",
+ "FENNEC_GLOBALHISTORY_ADD_MS",
+ "FENNEC_GLOBALHISTORY_UPDATE_MS",
+ "FENNEC_GLOBALHISTORY_VISITED_BUILD_MS",
+ "FENNEC_READING_LIST_COUNT",
+ "FENNEC_RESTORING_ACTIVITY",
+ "FENNEC_SEARCH_LOADER_TIME_MS",
+ "FENNEC_STARTUP_TIME_GECKOREADY",
+ "FENNEC_STARTUP_TIME_JAVAUI",
+ "FENNEC_SYNC11_MIGRATIONS_COMPLETED",
+ "FENNEC_SYNC11_MIGRATIONS_FAILED",
+ "FENNEC_SYNC11_MIGRATIONS_SUCCEEDED",
+ "FENNEC_SYNC11_MIGRATION_NOTIFICATIONS_OFFERED",
+ "FENNEC_SYNC11_MIGRATION_SENTINELS_SEEN",
+ "FENNEC_SYNC_NUMBER_OF_SYNCS_COMPLETED",
+ "FENNEC_SYNC_NUMBER_OF_SYNCS_FAILED",
+ "FENNEC_SYNC_NUMBER_OF_SYNCS_FAILED_BACKOFF",
+ "FENNEC_SYNC_NUMBER_OF_SYNCS_STARTED",
+ "FENNEC_TABQUEUE_QUEUESIZE",
+ "FENNEC_TOPSITES_LOADER_TIME_MS",
+ "FENNEC_WAS_KILLED",
+ "FETCH_IS_MAINTHREAD",
+ "FIND_PLUGINS",
+ "FLASH_PLUGIN_AREA",
+ "FLASH_PLUGIN_HEIGHT",
+ "FLASH_PLUGIN_INSTANCES_ON_PAGE",
+ "FLASH_PLUGIN_STATES",
+ "FLASH_PLUGIN_WIDTH",
+ "FONTLIST_INITFACENAMELISTS",
+ "FONTLIST_INITOTHERFAMILYNAMES",
+ "FONT_CACHE_HIT",
+ "FORCED_DEVICE_RESET_REASON",
+ "FORGET_SKIPPABLE_MAX",
+ "FX_BOOKMARKS_TOOLBAR_INIT_MS",
+ "FX_BROWSER_FULLSCREEN_USED",
+ "FX_GESTURE_COMPRESS_SNAPSHOT_OF_PAGE",
+ "FX_GESTURE_INSTALL_SNAPSHOT_OF_PAGE",
+ "FX_NEW_WINDOW_MS",
+ "FX_PAGE_LOAD_MS",
+ "FX_SANITIZE_CACHE",
+ "FX_SANITIZE_COOKIES_2",
+ "FX_SANITIZE_DOWNLOADS",
+ "FX_SANITIZE_FORMDATA",
+ "FX_SANITIZE_HISTORY",
+ "FX_SANITIZE_OPENWINDOWS",
+ "FX_SANITIZE_SESSIONS",
+ "FX_SANITIZE_SITESETTINGS",
+ "FX_SANITIZE_TOTAL",
+ "FX_SESSION_RESTORE_ALL_FILES_CORRUPT",
+ "FX_SESSION_RESTORE_AUTO_RESTORE_DURATION_UNTIL_EAGER_TABS_RESTORED_MS",
+ "FX_SESSION_RESTORE_COLLECT_ALL_WINDOWS_DATA_MS",
+ "FX_SESSION_RESTORE_COLLECT_COOKIES_MS",
+ "FX_SESSION_RESTORE_COLLECT_DATA_LONGEST_OP_MS",
+ "FX_SESSION_RESTORE_COLLECT_DATA_MS",
+ "FX_SESSION_RESTORE_CONTENT_COLLECT_DATA_LONGEST_OP_MS",
+ "FX_SESSION_RESTORE_CORRUPT_FILE",
+ "FX_SESSION_RESTORE_DOM_STORAGE_SIZE_ESTIMATE_CHARS",
+ "FX_SESSION_RESTORE_FILE_SIZE_BYTES",
+ "FX_SESSION_RESTORE_MANUAL_RESTORE_DURATION_UNTIL_EAGER_TABS_RESTORED_MS",
+ "FX_SESSION_RESTORE_NUMBER_OF_EAGER_TABS_RESTORED",
+ "FX_SESSION_RESTORE_NUMBER_OF_TABS_RESTORED",
+ "FX_SESSION_RESTORE_NUMBER_OF_WINDOWS_RESTORED",
+ "FX_SESSION_RESTORE_READ_FILE_MS",
+ "FX_SESSION_RESTORE_RESTORE_WINDOW_MS",
+ "FX_SESSION_RESTORE_SEND_UPDATE_CAUSED_OOM",
+ "FX_SESSION_RESTORE_SERIALIZE_DATA_MS",
+ "FX_SESSION_RESTORE_STARTUP_INIT_SESSION_MS",
+ "FX_SESSION_RESTORE_STARTUP_ONLOAD_INITIAL_WINDOW_MS",
+ "FX_SESSION_RESTORE_WRITE_FILE_MS",
+ "FX_TABLETMODE_PAGE_LOAD",
+ "FX_TAB_ANIM_ANY_FRAME_INTERVAL_MS",
+ "FX_TAB_ANIM_OPEN_FRAME_INTERVAL_MS",
+ "FX_TAB_ANIM_OPEN_PREVIEW_FRAME_INTERVAL_MS",
+ "FX_TAB_CLICK_MS",
+ "FX_TAB_SWITCH_SPINNER_VISIBLE_MS",
+ "FX_TAB_SWITCH_TOTAL_E10S_MS",
+ "FX_TAB_SWITCH_TOTAL_MS",
+ "FX_TAB_SWITCH_UPDATE_MS",
+ "FX_THUMBNAILS_BG_CAPTURE_CANVAS_DRAW_TIME_MS",
+ "FX_THUMBNAILS_BG_CAPTURE_DONE_REASON_2",
+ "FX_THUMBNAILS_BG_CAPTURE_PAGE_LOAD_TIME_MS",
+ "FX_THUMBNAILS_BG_CAPTURE_QUEUE_TIME_MS",
+ "FX_THUMBNAILS_BG_CAPTURE_SERVICE_TIME_MS",
+ "FX_THUMBNAILS_BG_QUEUE_SIZE_ON_CAPTURE",
+ "FX_THUMBNAILS_CAPTURE_TIME_MS",
+ "FX_THUMBNAILS_HIT_OR_MISS",
+ "FX_THUMBNAILS_STORE_TIME_MS",
+ "FX_TOTAL_TOP_VISITS",
+ "FX_TOUCH_USED",
+ "GC_ANIMATION_MS",
+ "GC_BUDGET_MS",
+ "GC_COMPACT_MS",
+ "GC_INCREMENTAL_DISABLED",
+ "GC_IS_COMPARTMENTAL",
+ "GC_MARK_GRAY_MS",
+ "GC_MARK_MS",
+ "GC_MARK_ROOTS_MS",
+ "GC_MAX_PAUSE_MS",
+ "GC_MINOR_REASON",
+ "GC_MINOR_REASON_LONG",
+ "GC_MINOR_US",
+ "GC_MMU_50",
+ "GC_MS",
+ "GC_NON_INCREMENTAL",
+ "GC_REASON_2",
+ "GC_RESET",
+ "GC_SCC_SWEEP_MAX_PAUSE_MS",
+ "GC_SCC_SWEEP_TOTAL_MS",
+ "GC_SLICE_MS",
+ "GC_SLOW_PHASE",
+ "GC_SWEEP_MS",
+ "GDI_INITFONTLIST_TOTAL",
+ "GEOLOCATION_ACCURACY_EXPONENTIAL",
+ "GEOLOCATION_ERROR",
+ "GEOLOCATION_OSX_SOURCE_IS_MLS",
+ "GEOLOCATION_WIN8_SOURCE_IS_MLS",
+ "GFX_CONTENT_FAILED_TO_ACQUIRE_DEVICE",
+ "GFX_CRASH",
+ "GHOST_WINDOWS",
+ "GRADIENT_DURATION",
+ "GRADIENT_RETENTION_TIME",
+ "GRAPHICS_DRIVER_STARTUP_TEST",
+ "GRAPHICS_SANITY_TEST",
+ "GRAPHICS_SANITY_TEST_OS_SNAPSHOT",
+ "GRAPHICS_SANITY_TEST_REASON",
+ "HISTORY_LASTVISITED_TREE_QUERY_TIME_MS",
+ "HTTPCONNMGR_TOTAL_SPECULATIVE_CONN",
+ "HTTPCONNMGR_UNUSED_SPECULATIVE_CONN",
+ "HTTPCONNMGR_USED_SPECULATIVE_CONN",
+ "HTTP_AUTH_DIALOG_STATS",
+ "HTTP_CACHE_DISPOSITION_2",
+ "HTTP_CACHE_DISPOSITION_2_V2",
+ "HTTP_CACHE_ENTRY_ALIVE_TIME",
+ "HTTP_CACHE_ENTRY_RELOAD_TIME",
+ "HTTP_CACHE_ENTRY_REUSE_COUNT",
+ "HTTP_CACHE_MISS_HALFLIFE_EXPERIMENT_2",
+ "HTTP_CONNECTION_ENTRY_CACHE_HIT_1",
+ "HTTP_CONTENT_ENCODING",
+ "HTTP_DISK_CACHE_DISPOSITION_2",
+ "HTTP_DISK_CACHE_OVERHEAD",
+ "HTTP_KBREAD_PER_CONN",
+ "HTTP_MEMORY_CACHE_DISPOSITION_2",
+ "HTTP_OFFLINE_CACHE_DISPOSITION_2",
+ "HTTP_OFFLINE_CACHE_DOCUMENT_LOAD",
+ "HTTP_PAGELOAD_IS_SSL",
+ "HTTP_PAGE_CACHE_READ_TIME",
+ "HTTP_PAGE_CACHE_READ_TIME_V2",
+ "HTTP_PAGE_COMPLETE_LOAD",
+ "HTTP_PAGE_COMPLETE_LOAD_CACHED",
+ "HTTP_PAGE_COMPLETE_LOAD_CACHED_V2",
+ "HTTP_PAGE_COMPLETE_LOAD_NET",
+ "HTTP_PAGE_COMPLETE_LOAD_NET_V2",
+ "HTTP_PAGE_COMPLETE_LOAD_V2",
+ "HTTP_PAGE_DNS_ISSUE_TIME",
+ "HTTP_PAGE_DNS_LOOKUP_TIME",
+ "HTTP_PAGE_FIRST_SENT_TO_LAST_RECEIVED",
+ "HTTP_PAGE_OPEN_TO_FIRST_FROM_CACHE",
+ "HTTP_PAGE_OPEN_TO_FIRST_FROM_CACHE_V2",
+ "HTTP_PAGE_OPEN_TO_FIRST_RECEIVED",
+ "HTTP_PAGE_OPEN_TO_FIRST_SENT",
+ "HTTP_PAGE_REVALIDATION",
+ "HTTP_PAGE_TCP_CONNECTION",
+ "HTTP_PROXY_TYPE",
+ "HTTP_REQUEST_PER_CONN",
+ "HTTP_REQUEST_PER_PAGE",
+ "HTTP_REQUEST_PER_PAGE_FROM_CACHE",
+ "HTTP_RESPONSE_VERSION",
+ "HTTP_SAW_QUIC_ALT_PROTOCOL",
+ "HTTP_SCHEME_UPGRADE",
+ "HTTP_SUBITEM_FIRST_BYTE_LATENCY_TIME",
+ "HTTP_SUBITEM_OPEN_LATENCY_TIME",
+ "HTTP_SUB_CACHE_READ_TIME",
+ "HTTP_SUB_CACHE_READ_TIME_V2",
+ "HTTP_SUB_COMPLETE_LOAD",
+ "HTTP_SUB_COMPLETE_LOAD_CACHED",
+ "HTTP_SUB_COMPLETE_LOAD_CACHED_V2",
+ "HTTP_SUB_COMPLETE_LOAD_NET",
+ "HTTP_SUB_COMPLETE_LOAD_NET_V2",
+ "HTTP_SUB_COMPLETE_LOAD_V2",
+ "HTTP_SUB_DNS_ISSUE_TIME",
+ "HTTP_SUB_DNS_LOOKUP_TIME",
+ "HTTP_SUB_FIRST_SENT_TO_LAST_RECEIVED",
+ "HTTP_SUB_OPEN_TO_FIRST_FROM_CACHE",
+ "HTTP_SUB_OPEN_TO_FIRST_FROM_CACHE_V2",
+ "HTTP_SUB_OPEN_TO_FIRST_RECEIVED",
+ "HTTP_SUB_OPEN_TO_FIRST_SENT",
+ "HTTP_SUB_REVALIDATION",
+ "HTTP_SUB_TCP_CONNECTION",
+ "HTTP_TRANSACTION_IS_SSL",
+ "HTTP_TRANSACTION_USE_ALTSVC",
+ "HTTP_TRANSACTION_USE_ALTSVC_OE",
+ "IMAGE_DECODE_CHUNKS",
+ "IMAGE_DECODE_COUNT",
+ "IMAGE_DECODE_LATENCY_US",
+ "IMAGE_DECODE_ON_DRAW_LATENCY",
+ "IMAGE_DECODE_SPEED_GIF",
+ "IMAGE_DECODE_SPEED_JPEG",
+ "IMAGE_DECODE_SPEED_PNG",
+ "IMAGE_DECODE_TIME",
+ "INNERWINDOWS_WITH_MUTATION_LISTENERS",
+ "IPC_SAME_PROCESS_MESSAGE_COPY_OOM_KB",
+ "IPC_TRANSACTION_CANCEL",
+ "IPV4_AND_IPV6_ADDRESS_CONNECTIVITY",
+ "JS_DEPRECATED_LANGUAGE_EXTENSIONS_IN_ADDONS",
+ "JS_DEPRECATED_LANGUAGE_EXTENSIONS_IN_CONTENT",
+ "JS_TELEMETRY_ADDON_EXCEPTIONS",
+ "LINK_ICON_SIZES_ATTR_DIMENSION",
+ "LINK_ICON_SIZES_ATTR_USAGE",
+ "LOCALDOMSTORAGE_CLEAR_BLOCKING_MS",
+ "LOCALDOMSTORAGE_GETALLKEYS_BLOCKING_MS",
+ "LOCALDOMSTORAGE_GETKEY_BLOCKING_MS",
+ "LOCALDOMSTORAGE_GETLENGTH_BLOCKING_MS",
+ "LOCALDOMSTORAGE_GETVALUE_BLOCKING_MS",
+ "LOCALDOMSTORAGE_PRELOAD_PENDING_ON_FIRST_ACCESS",
+ "LOCALDOMSTORAGE_REMOVEKEY_BLOCKING_MS",
+ "LOCALDOMSTORAGE_SESSIONONLY_PRELOAD_BLOCKING_MS",
+ "LOCALDOMSTORAGE_SETVALUE_BLOCKING_MS",
+ "LOCALDOMSTORAGE_SHUTDOWN_DATABASE_MS",
+ "LOCALDOMSTORAGE_UNLOAD_BLOCKING_MS",
+ "LONG_REFLOW_INTERRUPTIBLE",
+ "LOW_MEMORY_EVENTS_COMMIT_SPACE",
+ "LOW_MEMORY_EVENTS_PHYSICAL",
+ "LOW_MEMORY_EVENTS_VIRTUAL",
+ "MAC_INITFONTLIST_TOTAL",
+ "MASTER_PASSWORD_ENABLED",
+ "MEDIA_CODEC_USED",
+ "MEDIA_WMF_DECODE_ERROR",
+ "MEMORY_FREE_PURGED_PAGES_MS",
+ "MEMORY_HEAP_ALLOCATED",
+ "MEMORY_HEAP_COMMITTED_UNUSED",
+ "MEMORY_IMAGES_CONTENT_USED_UNCOMPRESSED",
+ "MEMORY_JS_COMPARTMENTS_SYSTEM",
+ "MEMORY_JS_COMPARTMENTS_USER",
+ "MEMORY_JS_GC_HEAP",
+ "MEMORY_STORAGE_SQLITE",
+ "MEMORY_VSIZE",
+ "MEMORY_VSIZE_MAX_CONTIGUOUS",
+ "MIXED_CONTENT_HSTS",
+ "MIXED_CONTENT_PAGE_LOAD",
+ "MIXED_CONTENT_UNBLOCK_COUNTER",
+ "MOZ_SQLITE_COOKIES_OPEN_READAHEAD_MS",
+ "MOZ_SQLITE_COOKIES_READ_B",
+ "MOZ_SQLITE_COOKIES_READ_MAIN_THREAD_MS",
+ "MOZ_SQLITE_COOKIES_READ_MS",
+ "MOZ_SQLITE_COOKIES_SYNC_MAIN_THREAD_MS",
+ "MOZ_SQLITE_COOKIES_SYNC_MS",
+ "MOZ_SQLITE_COOKIES_WRITE_B",
+ "MOZ_SQLITE_COOKIES_WRITE_MAIN_THREAD_MS",
+ "MOZ_SQLITE_COOKIES_WRITE_MS",
+ "MOZ_SQLITE_OPEN_MAIN_THREAD_MS",
+ "MOZ_SQLITE_OPEN_MS",
+ "MOZ_SQLITE_OTHER_READ_B",
+ "MOZ_SQLITE_OTHER_READ_MAIN_THREAD_MS",
+ "MOZ_SQLITE_OTHER_READ_MS",
+ "MOZ_SQLITE_OTHER_SYNC_MAIN_THREAD_MS",
+ "MOZ_SQLITE_OTHER_SYNC_MS",
+ "MOZ_SQLITE_OTHER_WRITE_B",
+ "MOZ_SQLITE_OTHER_WRITE_MAIN_THREAD_MS",
+ "MOZ_SQLITE_OTHER_WRITE_MS",
+ "MOZ_SQLITE_PLACES_READ_B",
+ "MOZ_SQLITE_PLACES_READ_MAIN_THREAD_MS",
+ "MOZ_SQLITE_PLACES_READ_MS",
+ "MOZ_SQLITE_PLACES_SYNC_MAIN_THREAD_MS",
+ "MOZ_SQLITE_PLACES_SYNC_MS",
+ "MOZ_SQLITE_PLACES_WRITE_B",
+ "MOZ_SQLITE_PLACES_WRITE_MAIN_THREAD_MS",
+ "MOZ_SQLITE_PLACES_WRITE_MS",
+ "MOZ_SQLITE_TRUNCATE_MAIN_THREAD_MS",
+ "MOZ_SQLITE_TRUNCATE_MS",
+ "MOZ_SQLITE_WEBAPPS_READ_B",
+ "MOZ_SQLITE_WEBAPPS_READ_MAIN_THREAD_MS",
+ "MOZ_SQLITE_WEBAPPS_READ_MS",
+ "MOZ_SQLITE_WEBAPPS_SYNC_MAIN_THREAD_MS",
+ "MOZ_SQLITE_WEBAPPS_SYNC_MS",
+ "MOZ_SQLITE_WEBAPPS_WRITE_B",
+ "MOZ_SQLITE_WEBAPPS_WRITE_MAIN_THREAD_MS",
+ "MOZ_SQLITE_WEBAPPS_WRITE_MS",
+ "MOZ_STORAGE_ASYNC_REQUESTS_MS",
+ "MOZ_STORAGE_ASYNC_REQUESTS_SUCCESS",
+ "NETWORK_CACHE_FS_TYPE",
+ "NETWORK_CACHE_HASH_STATS",
+ "NETWORK_CACHE_HIT_MISS_STAT_PER_CACHE_SIZE",
+ "NETWORK_CACHE_HIT_RATE_PER_CACHE_SIZE",
+ "NETWORK_CACHE_METADATA_FIRST_READ_SIZE",
+ "NETWORK_CACHE_METADATA_FIRST_READ_TIME_MS",
+ "NETWORK_CACHE_METADATA_SECOND_READ_TIME_MS",
+ "NETWORK_CACHE_METADATA_SIZE",
+ "NETWORK_CACHE_SIZE_FULL_FAT",
+ "NETWORK_CACHE_V1_HIT_TIME_MS",
+ "NETWORK_CACHE_V1_MISS_TIME_MS",
+ "NETWORK_CACHE_V1_TRUNCATE_TIME_MS",
+ "NETWORK_CACHE_V2_HIT_TIME_MS",
+ "NETWORK_CACHE_V2_INPUT_STREAM_STATUS",
+ "NETWORK_CACHE_V2_MISS_TIME_MS",
+ "NETWORK_CACHE_V2_OUTPUT_STREAM_STATUS",
+ "NETWORK_DISK_CACHE2_SHUTDOWN_CLEAR_PRIVATE",
+ "NETWORK_DISK_CACHE_DELETEDIR",
+ "NETWORK_DISK_CACHE_DELETEDIR_SHUTDOWN",
+ "NETWORK_DISK_CACHE_OPEN",
+ "NETWORK_DISK_CACHE_SHUTDOWN",
+ "NETWORK_DISK_CACHE_SHUTDOWN_CLEAR_PRIVATE",
+ "NETWORK_DISK_CACHE_SHUTDOWN_V2",
+ "NETWORK_DISK_CACHE_TRASHRENAME",
+ "NEWTAB_PAGE_BLOCKED_SITES_COUNT",
+ "NEWTAB_PAGE_ENABLED",
+ "NEWTAB_PAGE_ENHANCED",
+ "NEWTAB_PAGE_LIFE_SPAN",
+ "NEWTAB_PAGE_LIFE_SPAN_SUGGESTED",
+ "NEWTAB_PAGE_PINNED_SITES_COUNT",
+ "NEWTAB_PAGE_SHOWN",
+ "NEWTAB_PAGE_SITE_CLICKED",
+ "NTLM_MODULE_USED_2",
+ "ONBEFOREUNLOAD_PROMPT_ACTION",
+ "ONBEFOREUNLOAD_PROMPT_COUNT",
+ "OSFILE_WORKER_LAUNCH_MS",
+ "OSFILE_WORKER_READY_MS",
+ "OSFILE_WRITEATOMIC_JANK_MS",
+ "PAGE_FAULTS_HARD",
+ "PAINT_BUILD_DISPLAYLIST_TIME",
+ "PAINT_RASTERIZE_TIME",
+ "PDF_VIEWER_DOCUMENT_GENERATOR",
+ "PDF_VIEWER_DOCUMENT_SIZE_KB",
+ "PDF_VIEWER_DOCUMENT_VERSION",
+ "PDF_VIEWER_EMBED",
+ "PDF_VIEWER_FALLBACK_SHOWN",
+ "PDF_VIEWER_FONT_TYPES",
+ "PDF_VIEWER_FORM",
+ "PDF_VIEWER_PRINT",
+ "PDF_VIEWER_STREAM_TYPES",
+ "PDF_VIEWER_TIME_TO_VIEW_MS",
+ "PDF_VIEWER_USED",
+ "PERF_MONITORING_SLOW_ADDON_CPOW_US",
+ "PERF_MONITORING_SLOW_ADDON_JANK_US",
+ "PERF_MONITORING_TEST_CPU_RESCHEDULING_PROPORTION_MOVED",
+ "PERMISSIONS_MIGRATION_7_ERROR",
+ "PERMISSIONS_REMIGRATION_COMPARISON",
+ "PERMISSIONS_SQL_CORRUPTED",
+ "PLACES_ANNOS_BOOKMARKS_COUNT",
+ "PLACES_ANNOS_PAGES_COUNT",
+ "PLACES_AUTOCOMPLETE_1ST_RESULT_TIME_MS",
+ "PLACES_AUTOCOMPLETE_6_FIRST_RESULTS_TIME_MS",
+ "PLACES_AUTOCOMPLETE_URLINLINE_DOMAIN_QUERY_TIME_MS",
+ "PLACES_BACKUPS_BOOKMARKSTREE_MS",
+ "PLACES_BACKUPS_DAYSFROMLAST",
+ "PLACES_BACKUPS_TOJSON_MS",
+ "PLACES_BOOKMARKS_COUNT",
+ "PLACES_DATABASE_FILESIZE_MB",
+ "PLACES_DATABASE_PAGESIZE_B",
+ "PLACES_DATABASE_SIZE_PER_PAGE_B",
+ "PLACES_EXPIRATION_STEPS_TO_CLEAN2",
+ "PLACES_EXPORT_TOHTML_MS",
+ "PLACES_FAVICON_BMP_SIZES",
+ "PLACES_FAVICON_GIF_SIZES",
+ "PLACES_FAVICON_ICO_SIZES",
+ "PLACES_FAVICON_JPEG_SIZES",
+ "PLACES_FAVICON_OTHER_SIZES",
+ "PLACES_FAVICON_PNG_SIZES",
+ "PLACES_FAVICON_SVG_SIZES",
+ "PLACES_HISTORY_LIBRARY_SEARCH_TIME_MS",
+ "PLACES_IDLE_FRECENCY_DECAY_TIME_MS",
+ "PLACES_IDLE_MAINTENANCE_TIME_MS",
+ "PLACES_KEYWORDS_COUNT",
+ "PLACES_MAINTENANCE_DAYSFROMLAST",
+ "PLACES_MOST_RECENT_EXPIRED_VISIT_DAYS",
+ "PLACES_PAGES_COUNT",
+ "PLACES_SORTED_BOOKMARKS_PERC",
+ "PLACES_TAGGED_BOOKMARKS_PERC",
+ "PLACES_TAGS_COUNT",
+ "PLUGINS_INFOBAR_ALLOW",
+ "PLUGINS_INFOBAR_BLOCK",
+ "PLUGINS_INFOBAR_SHOWN",
+ "PLUGINS_NOTIFICATION_PLUGIN_COUNT",
+ "PLUGINS_NOTIFICATION_SHOWN",
+ "PLUGINS_NOTIFICATION_USER_ACTION",
+ "PLUGIN_CALLED_DIRECTLY",
+ "PLUGIN_HANG_NOTICE_COUNT",
+ "PLUGIN_HANG_TIME",
+ "PLUGIN_HANG_UI_DONT_ASK",
+ "PLUGIN_HANG_UI_RESPONSE_TIME",
+ "PLUGIN_HANG_UI_USER_RESPONSE",
+ "PLUGIN_LOAD_METADATA",
+ "PLUGIN_SHUTDOWN_MS",
+ "PRCLOSE_TCP_BLOCKING_TIME_CONNECTIVITY_CHANGE",
+ "PRCLOSE_TCP_BLOCKING_TIME_LINK_CHANGE",
+ "PRCLOSE_TCP_BLOCKING_TIME_NORMAL",
+ "PRCLOSE_TCP_BLOCKING_TIME_OFFLINE",
+ "PRCLOSE_TCP_BLOCKING_TIME_SHUTDOWN",
+ "PRCLOSE_UDP_BLOCKING_TIME_CONNECTIVITY_CHANGE",
+ "PRCLOSE_UDP_BLOCKING_TIME_LINK_CHANGE",
+ "PRCLOSE_UDP_BLOCKING_TIME_NORMAL",
+ "PRCLOSE_UDP_BLOCKING_TIME_OFFLINE",
+ "PRCLOSE_UDP_BLOCKING_TIME_SHUTDOWN",
+ "PRCONNECTCONTINUE_BLOCKING_TIME_CONNECTIVITY_CHANGE",
+ "PRCONNECTCONTINUE_BLOCKING_TIME_LINK_CHANGE",
+ "PRCONNECTCONTINUE_BLOCKING_TIME_NORMAL",
+ "PRCONNECTCONTINUE_BLOCKING_TIME_OFFLINE",
+ "PRCONNECTCONTINUE_BLOCKING_TIME_SHUTDOWN",
+ "PRCONNECT_BLOCKING_TIME_CONNECTIVITY_CHANGE",
+ "PRCONNECT_BLOCKING_TIME_LINK_CHANGE",
+ "PRCONNECT_BLOCKING_TIME_NORMAL",
+ "PRCONNECT_BLOCKING_TIME_OFFLINE",
+ "PRCONNECT_BLOCKING_TIME_SHUTDOWN",
+ "PREDICTOR_BASE_CONFIDENCE",
+ "PREDICTOR_CONFIDENCE",
+ "PREDICTOR_GLOBAL_DEGRADATION",
+ "PREDICTOR_LEARN_ATTEMPTS",
+ "PREDICTOR_LEARN_FULL_QUEUE",
+ "PREDICTOR_LEARN_WORK_TIME",
+ "PREDICTOR_PREDICTIONS_CALCULATED",
+ "PREDICTOR_PREDICT_ATTEMPTS",
+ "PREDICTOR_PREDICT_FULL_QUEUE",
+ "PREDICTOR_PREDICT_TIME_TO_ACTION",
+ "PREDICTOR_PREDICT_TIME_TO_INACTION",
+ "PREDICTOR_PREDICT_WORK_TIME",
+ "PREDICTOR_SUBRESOURCE_DEGRADATION",
+ "PREDICTOR_TOTAL_PRECONNECTS",
+ "PREDICTOR_TOTAL_PRECONNECTS_CREATED",
+ "PREDICTOR_TOTAL_PRECONNECTS_UNUSED",
+ "PREDICTOR_TOTAL_PRECONNECTS_USED",
+ "PREDICTOR_TOTAL_PREDICTIONS",
+ "PREDICTOR_TOTAL_PRERESOLVES",
+ "PREDICTOR_WAIT_TIME",
+ "PROCESS_CRASH_SUBMIT_ATTEMPT",
+ "PROCESS_CRASH_SUBMIT_SUCCESS",
+ "PUSH_API_NOTIFICATION_RECEIVED",
+ "PUSH_API_NOTIFICATION_RECEIVED_BUT_DID_NOT_NOTIFY",
+ "PUSH_API_NOTIFY",
+ "PUSH_API_NOTIFY_REGISTRATION_LOST",
+ "PUSH_API_PERMISSION_DENIED",
+ "PUSH_API_PERMISSION_GRANTED",
+ "PUSH_API_PERMISSION_REQUESTED",
+ "PUSH_API_QUOTA_EXPIRATION_TIME",
+ "PUSH_API_QUOTA_RESET_TO",
+ "PUSH_API_SUBSCRIBE_ATTEMPT",
+ "PUSH_API_SUBSCRIBE_FAILED",
+ "PUSH_API_SUBSCRIBE_HTTP2_TIME",
+ "PUSH_API_SUBSCRIBE_SUCCEEDED",
+ "PUSH_API_SUBSCRIBE_WS_TIME",
+ "PUSH_API_UNSUBSCRIBE_ATTEMPT",
+ "PUSH_API_UNSUBSCRIBE_FAILED",
+ "PUSH_API_UNSUBSCRIBE_SUCCEEDED",
+ "PUSH_API_USED",
+ "PWMGR_BLOCKLIST_NUM_SITES",
+ "PWMGR_FORM_AUTOFILL_RESULT",
+ "PWMGR_LOGIN_LAST_USED_DAYS",
+ "PWMGR_LOGIN_PAGE_SAFETY",
+ "PWMGR_MANAGE_COPIED_PASSWORD",
+ "PWMGR_MANAGE_COPIED_USERNAME",
+ "PWMGR_MANAGE_DELETED",
+ "PWMGR_MANAGE_DELETED_ALL",
+ "PWMGR_MANAGE_OPENED",
+ "PWMGR_MANAGE_SORTED",
+ "PWMGR_MANAGE_VISIBILITY_TOGGLED",
+ "PWMGR_NUM_HTTPAUTH_PASSWORDS",
+ "PWMGR_NUM_PASSWORDS_PER_HOSTNAME",
+ "PWMGR_NUM_SAVED_PASSWORDS",
+ "PWMGR_PASSWORD_INPUT_IN_FORM",
+ "PWMGR_PROMPT_REMEMBER_ACTION",
+ "PWMGR_PROMPT_UPDATE_ACTION",
+ "PWMGR_SAVING_ENABLED",
+ "PWMGR_USERNAME_PRESENT",
+ "RANGE_CHECKSUM_ERRORS",
+ "READER_MODE_DOWNLOAD_RESULT",
+ "READER_MODE_PARSE_RESULT",
+ "REFRESH_DRIVER_TICK",
+ "REQUESTS_OF_ORIGINAL_CONTENT",
+ "SAFE_MODE_USAGE",
+ "SEARCH_COUNTS",
+ "SEARCH_SERVICE_COUNTRY_FETCH_CAUSED_SYNC_INIT",
+ "SEARCH_SERVICE_COUNTRY_FETCH_RESULT",
+ "SEARCH_SERVICE_COUNTRY_FETCH_TIME_MS",
+ "SEARCH_SERVICE_COUNTRY_TIMEOUT",
+ "SEARCH_SERVICE_INIT_MS",
+ "SEARCH_SERVICE_INIT_SYNC",
+ "SEARCH_SERVICE_NONUS_COUNTRY_MISMATCHED_PLATFORM_OSX",
+ "SEARCH_SERVICE_NONUS_COUNTRY_MISMATCHED_PLATFORM_WIN",
+ "SEARCH_SERVICE_US_COUNTRY_MISMATCHED_PLATFORM_OSX",
+ "SEARCH_SERVICE_US_COUNTRY_MISMATCHED_PLATFORM_WIN",
+ "SEARCH_SERVICE_US_COUNTRY_MISMATCHED_TIMEZONE",
+ "SEARCH_SERVICE_US_TIMEZONE_MISMATCHED_COUNTRY",
+ "SECURITY_UI",
+ "SERVICE_WORKER_CONTROLLED_DOCUMENTS",
+ "SERVICE_WORKER_LIFE_TIME",
+ "SERVICE_WORKER_REGISTRATIONS",
+ "SERVICE_WORKER_REGISTRATION_LOADING",
+ "SERVICE_WORKER_REQUEST_PASSTHROUGH",
+ "SERVICE_WORKER_SPAWN_ATTEMPTS",
+ "SERVICE_WORKER_UPDATED",
+ "SERVICE_WORKER_WAS_SPAWNED",
+ "SHOULD_AUTO_DETECT_LANGUAGE",
+ "SHOULD_TRANSLATION_UI_APPEAR",
+ "SHUTDOWN_OK",
+ "SHUTDOWN_PHASE_DURATION_TICKS_PROFILE_BEFORE_CHANGE",
+ "SHUTDOWN_PHASE_DURATION_TICKS_PROFILE_CHANGE_TEARDOWN",
+ "SHUTDOWN_PHASE_DURATION_TICKS_QUIT_APPLICATION",
+ "SHUTDOWN_PHASE_DURATION_TICKS_XPCOM_WILL_SHUTDOWN",
+ "SLOW_ADDON_WARNING_RESPONSE_TIME",
+ "SLOW_ADDON_WARNING_STATES",
+ "SLOW_SCRIPT_NOTICE_COUNT",
+ "SOCIAL_ENABLED_ON_SESSION",
+ "SOCIAL_PANEL_CLICKS",
+ "SOCIAL_SIDEBAR_OPEN_DURATION",
+ "SOCIAL_SIDEBAR_STATE",
+ "SOCIAL_TOOLBAR_BUTTONS",
+ "SPDY_CHUNK_RECVD",
+ "SPDY_GOAWAY_LOCAL",
+ "SPDY_GOAWAY_PEER",
+ "SPDY_KBREAD_PER_CONN",
+ "SPDY_NPN_CONNECT",
+ "SPDY_NPN_JOIN",
+ "SPDY_PARALLEL_STREAMS",
+ "SPDY_REQUEST_PER_CONN",
+ "SPDY_SERVER_INITIATED_STREAMS",
+ "SPDY_SETTINGS_CWND",
+ "SPDY_SETTINGS_DL_BW",
+ "SPDY_SETTINGS_IW",
+ "SPDY_SETTINGS_MAX_STREAMS",
+ "SPDY_SETTINGS_RETRANS",
+ "SPDY_SETTINGS_RTT",
+ "SPDY_SETTINGS_UL_BW",
+ "SPDY_SYN_RATIO",
+ "SPDY_SYN_REPLY_RATIO",
+ "SPDY_SYN_REPLY_SIZE",
+ "SPDY_SYN_SIZE",
+ "SPDY_VERSION2",
+ "SSL_AUTH_ALGORITHM_FULL",
+ "SSL_AUTH_ECDSA_CURVE_FULL",
+ "SSL_AUTH_RSA_KEY_SIZE_FULL",
+ "SSL_BYTES_BEFORE_CERT_CALLBACK",
+ "SSL_CERT_ERROR_OVERRIDES",
+ "SSL_CERT_VERIFICATION_ERRORS",
+ "SSL_CIPHER_SUITE_FULL",
+ "SSL_CIPHER_SUITE_RESUMED",
+ "SSL_HANDSHAKE_TYPE",
+ "SSL_INITIAL_FAILED_CERT_VALIDATION_TIME_MOZILLAPKIX",
+ "SSL_KEA_DHE_KEY_SIZE_FULL",
+ "SSL_KEA_ECDHE_CURVE_FULL",
+ "SSL_KEA_RSA_KEY_SIZE_FULL",
+ "SSL_KEY_EXCHANGE_ALGORITHM_FULL",
+ "SSL_KEY_EXCHANGE_ALGORITHM_RESUMED",
+ "SSL_NPN_TYPE",
+ "SSL_OBSERVED_END_ENTITY_CERTIFICATE_LIFETIME",
+ "SSL_OCSP_MAY_FETCH",
+ "SSL_OCSP_STAPLING",
+ "SSL_PERMANENT_CERT_ERROR_OVERRIDES",
+ "SSL_REASONS_FOR_NOT_FALSE_STARTING",
+ "SSL_RESUMED_SESSION",
+ "SSL_SERVER_AUTH_EKU",
+ "SSL_SUCCESFUL_CERT_VALIDATION_TIME_MOZILLAPKIX",
+ "SSL_SYMMETRIC_CIPHER_FULL",
+ "SSL_SYMMETRIC_CIPHER_RESUMED",
+ "SSL_TIME_UNTIL_HANDSHAKE_FINISHED",
+ "SSL_TIME_UNTIL_READY",
+ "SSL_TLS10_INTOLERANCE_REASON_POST",
+ "SSL_TLS10_INTOLERANCE_REASON_PRE",
+ "SSL_TLS11_INTOLERANCE_REASON_POST",
+ "SSL_TLS11_INTOLERANCE_REASON_PRE",
+ "SSL_TLS12_INTOLERANCE_REASON_POST",
+ "SSL_TLS12_INTOLERANCE_REASON_PRE",
+ "SSL_VERSION_FALLBACK_INAPPROPRIATE",
+ "SSL_WEAK_CIPHERS_FALLBACK",
+ "STARTUP_CACHE_AGE_HOURS",
+ "STARTUP_CACHE_INVALID",
+ "STARTUP_CRASH_DETECTED",
+ "STARTUP_MEASUREMENT_ERRORS",
+ "STS_NUMBER_OF_ONSOCKETREADY_CALLS",
+ "STS_NUMBER_OF_PENDING_EVENTS",
+ "STS_NUMBER_OF_PENDING_EVENTS_IN_THE_LAST_CYCLE",
+ "STS_POLL_AND_EVENTS_CYCLE",
+ "STS_POLL_AND_EVENT_THE_LAST_CYCLE",
+ "STS_POLL_BLOCK_TIME",
+ "STS_POLL_CYCLE",
+ "STUMBLER_OBSERVATIONS_PER_DAY",
+ "STUMBLER_TIME_BETWEEN_RECEIVED_LOCATIONS_SEC",
+ "STUMBLER_TIME_BETWEEN_START_SEC",
+ "STUMBLER_TIME_BETWEEN_UPLOADS_SEC",
+ "STUMBLER_UPLOAD_BYTES",
+ "STUMBLER_UPLOAD_CELL_COUNT",
+ "STUMBLER_UPLOAD_OBSERVATION_COUNT",
+ "STUMBLER_UPLOAD_WIFI_AP_COUNT",
+ "STUMBLER_VOLUME_BYTES_UPLOADED_PER_SEC",
+ "SUBJECT_PRINCIPAL_ACCESSED_WITHOUT_SCRIPT_ON_STACK",
+ "SUBPROCESS_ABNORMAL_ABORT",
+ "SUBPROCESS_CRASHES_WITH_DUMP",
+ "SYSTEM_FONT_FALLBACK",
+ "SYSTEM_FONT_FALLBACK_FIRST",
+ "SYSTEM_FONT_FALLBACK_SCRIPT",
+ "TELEMETRY_COMPRESS",
+ "TELEMETRY_DISCARDED_ARCHIVED_PINGS_SIZE_MB",
+ "TELEMETRY_DISCARDED_CONTENT_PINGS_COUNT",
+ "TELEMETRY_DISCARDED_PENDING_PINGS_SIZE_MB",
+ "TELEMETRY_DISCARDED_SEND_PINGS_SIZE_MB",
+ "TELEMETRY_INVALID_PING_TYPE_SUBMITTED",
+ "TELEMETRY_MEMORY_REPORTER_MS",
+ "TELEMETRY_PENDING_CHECKING_OVER_QUOTA_MS",
+ "TELEMETRY_PENDING_EVICTING_OVER_QUOTA_MS",
+ "TELEMETRY_PENDING_LOAD_FAILURE_PARSE",
+ "TELEMETRY_PENDING_LOAD_FAILURE_READ",
+ "TELEMETRY_PENDING_PINGS_AGE",
+ "TELEMETRY_PENDING_PINGS_EVICTED_OVER_QUOTA",
+ "TELEMETRY_PENDING_PINGS_SIZE_MB",
+ "TELEMETRY_PING_EVICTED_FOR_SERVER_ERRORS",
+ "TELEMETRY_PING_SIZE_EXCEEDED_ARCHIVED",
+ "TELEMETRY_PING_SIZE_EXCEEDED_PENDING",
+ "TELEMETRY_PING_SIZE_EXCEEDED_SEND",
+ "TELEMETRY_SESSIONDATA_FAILED_LOAD",
+ "TELEMETRY_SESSIONDATA_FAILED_PARSE",
+ "TELEMETRY_SESSIONDATA_FAILED_SAVE",
+ "TELEMETRY_SESSIONDATA_FAILED_VALIDATION",
+ "TELEMETRY_STRINGIFY",
+ "TELEMETRY_SUCCESS",
+ "TELEMETRY_TEST_COUNT",
+ "TELEMETRY_TEST_COUNT_INIT_NO_RECORD",
+ "TELEMETRY_TEST_EXPIRED",
+ "TELEMETRY_TEST_FLAG",
+ "TELEMETRY_TEST_KEYED_COUNT",
+ "TELEMETRY_TEST_KEYED_COUNT_INIT_NO_RECORD",
+ "TELEMETRY_TEST_KEYED_FLAG",
+ "TELEMETRY_TEST_KEYED_RELEASE_OPTIN",
+ "TELEMETRY_TEST_KEYED_RELEASE_OPTOUT",
+ "TELEMETRY_TEST_RELEASE_OPTIN",
+ "TELEMETRY_TEST_RELEASE_OPTOUT",
+ "THUNDERBIRD_CONVERSATIONS_TIME_TO_2ND_GLODA_QUERY_MS",
+ "THUNDERBIRD_GLODA_SIZE_MB",
+ "THUNDERBIRD_INDEXING_RATE_MSG_PER_S",
+ "TLS_ERROR_REPORT_UI",
+ "TOP_LEVEL_CONTENT_DOCUMENTS_DESTROYED",
+ "TOTAL_CONTENT_PAGE_LOAD_TIME",
+ "TOTAL_COUNT_HIGH_ERRORS",
+ "TOTAL_COUNT_LOW_ERRORS",
+ "TOUCH_ENABLED_DEVICE",
+ "TRACKING_PROTECTION_ENABLED",
+ "TRACKING_PROTECTION_EVENTS",
+ "TRACKING_PROTECTION_PBM_DISABLED",
+ "TRACKING_PROTECTION_SHIELD",
+ "TRANSACTION_WAIT_TIME_HTTP",
+ "TRANSACTION_WAIT_TIME_HTTP_PIPELINES",
+ "TRANSACTION_WAIT_TIME_SPDY",
+ "TRANSLATED_CHARACTERS",
+ "TRANSLATED_PAGES",
+ "TRANSLATED_PAGES_BY_LANGUAGE",
+ "TRANSLATION_OPPORTUNITIES",
+ "TRANSLATION_OPPORTUNITIES_BY_LANGUAGE",
+ "UPDATE_CANNOT_STAGE_EXTERNAL",
+ "UPDATE_CANNOT_STAGE_NOTIFY",
+ "UPDATE_CHECK_CODE_EXTERNAL",
+ "UPDATE_CHECK_CODE_NOTIFY",
+ "UPDATE_CHECK_EXTENDED_ERROR_EXTERNAL",
+ "UPDATE_CHECK_EXTENDED_ERROR_NOTIFY",
+ "UPDATE_CHECK_NO_UPDATE_EXTERNAL",
+ "UPDATE_CHECK_NO_UPDATE_NOTIFY",
+ "UPDATE_DOWNLOAD_CODE_COMPLETE",
+ "UPDATE_DOWNLOAD_CODE_PARTIAL",
+ "UPDATE_INVALID_LASTUPDATETIME_EXTERNAL",
+ "UPDATE_INVALID_LASTUPDATETIME_NOTIFY",
+ "UPDATE_LAST_NOTIFY_INTERVAL_DAYS_EXTERNAL",
+ "UPDATE_LAST_NOTIFY_INTERVAL_DAYS_NOTIFY",
+ "UPDATE_NOT_PREF_UPDATE_AUTO_EXTERNAL",
+ "UPDATE_NOT_PREF_UPDATE_AUTO_NOTIFY",
+ "UPDATE_NOT_PREF_UPDATE_ENABLED_EXTERNAL",
+ "UPDATE_NOT_PREF_UPDATE_ENABLED_NOTIFY",
+ "UPDATE_NOT_PREF_UPDATE_SERVICE_ENABLED_EXTERNAL",
+ "UPDATE_NOT_PREF_UPDATE_SERVICE_ENABLED_NOTIFY",
+ "UPDATE_NOT_PREF_UPDATE_STAGING_ENABLED_EXTERNAL",
+ "UPDATE_NOT_PREF_UPDATE_STAGING_ENABLED_NOTIFY",
+ "UPDATE_PING_COUNT_EXTERNAL",
+ "UPDATE_PING_COUNT_NOTIFY",
+ "UPDATE_PREF_SERVICE_ERRORS_EXTERNAL",
+ "UPDATE_PREF_SERVICE_ERRORS_NOTIFY",
+ "UPDATE_PREF_UPDATE_CANCELATIONS_EXTERNAL",
+ "UPDATE_PREF_UPDATE_CANCELATIONS_NOTIFY",
+ "UPDATE_SERVICE_INSTALLED_EXTERNAL",
+ "UPDATE_SERVICE_INSTALLED_NOTIFY",
+ "UPDATE_SERVICE_MANUALLY_UNINSTALLED_EXTERNAL",
+ "UPDATE_SERVICE_MANUALLY_UNINSTALLED_NOTIFY",
+ "UPDATE_STATE_CODE_COMPLETE_STAGE",
+ "UPDATE_STATE_CODE_COMPLETE_STARTUP",
+ "UPDATE_STATE_CODE_PARTIAL_STAGE",
+ "UPDATE_STATE_CODE_PARTIAL_STARTUP",
+ "UPDATE_STATE_CODE_UNKNOWN_STAGE",
+ "UPDATE_STATE_CODE_UNKNOWN_STARTUP",
+ "UPDATE_STATUS_ERROR_CODE_COMPLETE_STAGE",
+ "UPDATE_STATUS_ERROR_CODE_COMPLETE_STARTUP",
+ "UPDATE_STATUS_ERROR_CODE_PARTIAL_STAGE",
+ "UPDATE_STATUS_ERROR_CODE_PARTIAL_STARTUP",
+ "UPDATE_STATUS_ERROR_CODE_UNKNOWN_STAGE",
+ "UPDATE_STATUS_ERROR_CODE_UNKNOWN_STARTUP",
+ "UPDATE_UNABLE_TO_APPLY_EXTERNAL",
+ "UPDATE_UNABLE_TO_APPLY_NOTIFY",
+ "UPDATE_WIZ_LAST_PAGE_CODE",
+ "URLCLASSIFIER_CL_CHECK_TIME",
+ "URLCLASSIFIER_CL_UPDATE_TIME",
+ "URLCLASSIFIER_LC_COMPLETIONS",
+ "URLCLASSIFIER_LC_PREFIXES",
+ "URLCLASSIFIER_LOOKUP_TIME",
+ "URLCLASSIFIER_PS_CONSTRUCT_TIME",
+ "URLCLASSIFIER_PS_FALLOCATE_TIME",
+ "URLCLASSIFIER_PS_FILELOAD_TIME",
+ "VIDEO_CANPLAYTYPE_H264_CONSTRAINT_SET_FLAG",
+ "VIDEO_CANPLAYTYPE_H264_LEVEL",
+ "VIDEO_CANPLAYTYPE_H264_PROFILE",
+ "VIDEO_CAN_CREATE_AAC_DECODER",
+ "VIDEO_CAN_CREATE_H264_DECODER",
+ "VIDEO_DECODED_H264_SPS_CONSTRAINT_SET_FLAG",
+ "VIDEO_DECODED_H264_SPS_LEVEL",
+ "VIDEO_DECODED_H264_SPS_PROFILE",
+ "VIDEO_EME_PLAY_SUCCESS",
+ "VIDEO_EME_REQUEST_FAILURE_LATENCY_MS",
+ "VIDEO_EME_REQUEST_SUCCESS_LATENCY_MS",
+ "VIDEO_H264_SPS_MAX_NUM_REF_FRAMES",
+ "VIEW_SOURCE_EXTERNAL_RESULT_BOOLEAN",
+ "VIEW_SOURCE_IN_BROWSER_OPENED_BOOLEAN",
+ "VIEW_SOURCE_IN_WINDOW_OPENED_BOOLEAN",
+ "WEAVE_COMPLETE_SUCCESS_COUNT",
+ "WEAVE_CONFIGURED",
+ "WEAVE_CONFIGURED_MASTER_PASSWORD",
+ "WEAVE_START_COUNT",
+ "WEBCRYPTO_ALG",
+ "WEBCRYPTO_EXTRACTABLE_ENC",
+ "WEBCRYPTO_EXTRACTABLE_GENERATE",
+ "WEBCRYPTO_EXTRACTABLE_IMPORT",
+ "WEBCRYPTO_EXTRACTABLE_SIG",
+ "WEBCRYPTO_METHOD",
+ "WEBCRYPTO_RESOLVED",
+ "WEBFONT_COMPRESSION_WOFF",
+ "WEBFONT_COMPRESSION_WOFF2",
+ "WEBFONT_DOWNLOAD_TIME",
+ "WEBFONT_DOWNLOAD_TIME_AFTER_START",
+ "WEBFONT_FONTTYPE",
+ "WEBFONT_PER_PAGE",
+ "WEBFONT_SIZE",
+ "WEBFONT_SIZE_PER_PAGE",
+ "WEBFONT_SRCTYPE",
+ "WEBRTC_AUDIO_QUALITY_INBOUND_BANDWIDTH_KBITS",
+ "WEBRTC_AUDIO_QUALITY_INBOUND_JITTER",
+ "WEBRTC_AUDIO_QUALITY_INBOUND_PACKETLOSS_RATE",
+ "WEBRTC_AUDIO_QUALITY_OUTBOUND_BANDWIDTH_KBITS",
+ "WEBRTC_AUDIO_QUALITY_OUTBOUND_JITTER",
+ "WEBRTC_AUDIO_QUALITY_OUTBOUND_PACKETLOSS_RATE",
+ "WEBRTC_AUDIO_QUALITY_OUTBOUND_RTT",
+ "WEBRTC_AVSYNC_WHEN_AUDIO_LAGS_VIDEO_MS",
+ "WEBRTC_AVSYNC_WHEN_VIDEO_LAGS_AUDIO_MS",
+ "WEBRTC_CALL_COUNT",
+ "WEBRTC_CALL_DURATION",
+ "WEBRTC_CALL_TYPE",
+ "WEBRTC_DATACHANNEL_NEGOTIATED",
+ "WEBRTC_GET_USER_MEDIA_SECURE_ORIGIN",
+ "WEBRTC_GET_USER_MEDIA_TYPE",
+ "WEBRTC_ICE_ADD_CANDIDATE_ERRORS_GIVEN_FAILURE",
+ "WEBRTC_ICE_ADD_CANDIDATE_ERRORS_GIVEN_SUCCESS",
+ "WEBRTC_ICE_FAILURE_TIME",
+ "WEBRTC_ICE_FINAL_CONNECTION_STATE",
+ "WEBRTC_ICE_LATE_TRICKLE_ARRIVAL_TIME",
+ "WEBRTC_ICE_ON_TIME_TRICKLE_ARRIVAL_TIME",
+ "WEBRTC_ICE_SUCCESS_RATE",
+ "WEBRTC_ICE_SUCCESS_TIME",
+ "WEBRTC_LOAD_STATE_NORMAL",
+ "WEBRTC_LOAD_STATE_NORMAL_SHORT",
+ "WEBRTC_LOAD_STATE_RELAXED",
+ "WEBRTC_LOAD_STATE_RELAXED_SHORT",
+ "WEBRTC_LOAD_STATE_STRESSED",
+ "WEBRTC_LOAD_STATE_STRESSED_SHORT",
+ "WEBRTC_MAX_AUDIO_RECEIVE_TRACK",
+ "WEBRTC_MAX_AUDIO_SEND_TRACK",
+ "WEBRTC_MAX_VIDEO_RECEIVE_TRACK",
+ "WEBRTC_MAX_VIDEO_SEND_TRACK",
+ "WEBRTC_RENEGOTIATIONS",
+ "WEBRTC_STUN_RATE_LIMIT_EXCEEDED_BY_TYPE_GIVEN_FAILURE",
+ "WEBRTC_STUN_RATE_LIMIT_EXCEEDED_BY_TYPE_GIVEN_SUCCESS",
+ "WEBRTC_VIDEO_DECODER_BITRATE_AVG_PER_CALL_KBPS",
+ "WEBRTC_VIDEO_DECODER_BITRATE_STD_DEV_PER_CALL_KBPS",
+ "WEBRTC_VIDEO_DECODER_DISCARDED_PACKETS_PER_CALL_PPM",
+ "WEBRTC_VIDEO_DECODER_FRAMERATE_10X_STD_DEV_PER_CALL",
+ "WEBRTC_VIDEO_DECODER_FRAMERATE_AVG_PER_CALL",
+ "WEBRTC_VIDEO_DECODE_ERROR_TIME_PERMILLE",
+ "WEBRTC_VIDEO_ENCODER_BITRATE_AVG_PER_CALL_KBPS",
+ "WEBRTC_VIDEO_ENCODER_BITRATE_STD_DEV_PER_CALL_KBPS",
+ "WEBRTC_VIDEO_ENCODER_DROPPED_FRAMES_PER_CALL_FPM",
+ "WEBRTC_VIDEO_ENCODER_FRAMERATE_10X_STD_DEV_PER_CALL",
+ "WEBRTC_VIDEO_ENCODER_FRAMERATE_AVG_PER_CALL",
+ "WEBRTC_VIDEO_ERROR_RECOVERY_MS",
+ "WEBRTC_VIDEO_QUALITY_INBOUND_BANDWIDTH_KBITS",
+ "WEBRTC_VIDEO_QUALITY_INBOUND_JITTER",
+ "WEBRTC_VIDEO_QUALITY_INBOUND_PACKETLOSS_RATE",
+ "WEBRTC_VIDEO_QUALITY_OUTBOUND_BANDWIDTH_KBITS",
+ "WEBRTC_VIDEO_QUALITY_OUTBOUND_JITTER",
+ "WEBRTC_VIDEO_QUALITY_OUTBOUND_PACKETLOSS_RATE",
+ "WEBRTC_VIDEO_QUALITY_OUTBOUND_RTT",
+ "WEBRTC_VIDEO_RECOVERY_AFTER_ERROR_PER_MIN",
+ "WEBRTC_VIDEO_RECOVERY_BEFORE_ERROR_PER_MIN",
+ "WEBSOCKETS_HANDSHAKE_TYPE",
+ "WORD_CACHE_HITS_CHROME",
+ "WORD_CACHE_HITS_CONTENT",
+ "WORD_CACHE_MISSES_CHROME",
+ "WORD_CACHE_MISSES_CONTENT",
+ "XMLHTTPREQUEST_ASYNC_OR_SYNC",
+ "XUL_CACHE_DISABLED"
+ ],
+ "n_buckets": [
+ "MEMORY_JS_GC_HEAP",
+ "MEMORY_HEAP_ALLOCATED",
+ "SYSTEM_FONT_FALLBACK_SCRIPT",
+ "HTTP_REQUEST_PER_PAGE_FROM_CACHE",
+ "SSL_TIME_UNTIL_READY",
+ "SSL_TIME_UNTIL_HANDSHAKE_FINISHED",
+ "CERT_VALIDATION_HTTP_REQUEST_CANCELED_TIME",
+ "CERT_VALIDATION_HTTP_REQUEST_SUCCEEDED_TIME",
+ "CERT_VALIDATION_HTTP_REQUEST_FAILED_TIME",
+ "SSL_OBSERVED_END_ENTITY_CERTIFICATE_LIFETIME",
+ "SPDY_SERVER_INITIATED_STREAMS",
+ "STS_POLL_AND_EVENTS_CYCLE",
+ "STS_POLL_CYCLE",
+ "STS_POLL_AND_EVENT_THE_LAST_CYCLE",
+ "STS_POLL_BLOCK_TIME",
+ "PRCONNECT_BLOCKING_TIME_NORMAL",
+ "PRCONNECT_BLOCKING_TIME_SHUTDOWN",
+ "PRCONNECT_BLOCKING_TIME_CONNECTIVITY_CHANGE",
+ "PRCONNECT_BLOCKING_TIME_LINK_CHANGE",
+ "PRCONNECT_BLOCKING_TIME_OFFLINE",
+ "PRCONNECTCONTINUE_BLOCKING_TIME_NORMAL",
+ "PRCONNECTCONTINUE_BLOCKING_TIME_SHUTDOWN",
+ "PRCONNECTCONTINUE_BLOCKING_TIME_CONNECTIVITY_CHANGE",
+ "PRCONNECTCONTINUE_BLOCKING_TIME_LINK_CHANGE",
+ "PRCONNECTCONTINUE_BLOCKING_TIME_OFFLINE",
+ "PRCLOSE_TCP_BLOCKING_TIME_NORMAL",
+ "PRCLOSE_TCP_BLOCKING_TIME_SHUTDOWN",
+ "PRCLOSE_TCP_BLOCKING_TIME_CONNECTIVITY_CHANGE",
+ "PRCLOSE_TCP_BLOCKING_TIME_LINK_CHANGE",
+ "PRCLOSE_TCP_BLOCKING_TIME_OFFLINE",
+ "PRCLOSE_UDP_BLOCKING_TIME_NORMAL",
+ "PRCLOSE_UDP_BLOCKING_TIME_SHUTDOWN",
+ "PRCLOSE_UDP_BLOCKING_TIME_CONNECTIVITY_CHANGE",
+ "PRCLOSE_UDP_BLOCKING_TIME_LINK_CHANGE",
+ "PRCLOSE_UDP_BLOCKING_TIME_OFFLINE",
+ "UPDATE_PREF_UPDATE_CANCELATIONS_EXTERNAL",
+ "UPDATE_PREF_UPDATE_CANCELATIONS_NOTIFY",
+ "UPDATE_STATUS_ERROR_CODE_COMPLETE_STARTUP",
+ "UPDATE_STATUS_ERROR_CODE_PARTIAL_STARTUP",
+ "UPDATE_STATUS_ERROR_CODE_UNKNOWN_STARTUP",
+ "UPDATE_STATUS_ERROR_CODE_COMPLETE_STAGE",
+ "UPDATE_STATUS_ERROR_CODE_PARTIAL_STAGE",
+ "UPDATE_STATUS_ERROR_CODE_UNKNOWN_STAGE",
+ "SECURITY_UI",
+ "CRASH_STORE_COMPRESSED_BYTES",
+ "MEDIA_WMF_DECODE_ERROR",
+ "VIDEO_CANPLAYTYPE_H264_CONSTRAINT_SET_FLAG",
+ "VIDEO_CANPLAYTYPE_H264_PROFILE",
+ "VIDEO_DECODED_H264_SPS_CONSTRAINT_SET_FLAG",
+ "VIDEO_DECODED_H264_SPS_PROFILE",
+ "WEBRTC_AVSYNC_WHEN_AUDIO_LAGS_VIDEO_MS",
+ "WEBRTC_AVSYNC_WHEN_VIDEO_LAGS_AUDIO_MS",
+ "WEBRTC_VIDEO_QUALITY_INBOUND_BANDWIDTH_KBITS",
+ "WEBRTC_AUDIO_QUALITY_INBOUND_BANDWIDTH_KBITS",
+ "WEBRTC_VIDEO_QUALITY_OUTBOUND_BANDWIDTH_KBITS",
+ "WEBRTC_AUDIO_QUALITY_OUTBOUND_BANDWIDTH_KBITS",
+ "WEBRTC_AUDIO_QUALITY_INBOUND_JITTER",
+ "WEBRTC_VIDEO_QUALITY_OUTBOUND_JITTER",
+ "WEBRTC_AUDIO_QUALITY_OUTBOUND_JITTER",
+ "WEBRTC_VIDEO_ERROR_RECOVERY_MS",
+ "WEBRTC_VIDEO_RECOVERY_BEFORE_ERROR_PER_MIN",
+ "WEBRTC_VIDEO_RECOVERY_AFTER_ERROR_PER_MIN",
+ "WEBRTC_VIDEO_QUALITY_OUTBOUND_RTT",
+ "WEBRTC_AUDIO_QUALITY_OUTBOUND_RTT",
+ "WEBRTC_CALL_DURATION",
+ "DEVTOOLS_DEBUGGER_DISPLAY_SOURCE_LOCAL_MS",
+ "DEVTOOLS_DEBUGGER_DISPLAY_SOURCE_REMOTE_MS",
+ "DEVTOOLS_SAVE_HEAP_SNAPSHOT_MS",
+ "DEVTOOLS_READ_HEAP_SNAPSHOT_MS",
+ "DEVTOOLS_HEAP_SNAPSHOT_NODE_COUNT",
+ "DEVTOOLS_HEAP_SNAPSHOT_EDGE_COUNT",
+ "NETWORK_CACHE_HIT_RATE_PER_CACHE_SIZE",
+ "NETWORK_CACHE_METADATA_FIRST_READ_SIZE",
+ "NETWORK_CACHE_METADATA_SIZE",
+ "NETWORK_CACHE_HASH_STATS",
+ "SSL_CIPHER_SUITE_FULL",
+ "SSL_CIPHER_SUITE_RESUMED",
+ "SSL_HANDSHAKE_RESULT",
+ "SSL_REASONS_FOR_NOT_FALSE_STARTING",
+ "SSL_CERT_VERIFICATION_ERRORS",
+ "CERT_VALIDATION_SUCCESS_BY_CA",
+ "CERT_PINNING_FAILURES_BY_CA",
+ "CERT_PINNING_MOZ_RESULTS_BY_HOST",
+ "CERT_PINNING_MOZ_TEST_RESULTS_BY_HOST",
+ "GFX_CRASH",
+ "GC_REASON_2",
+ "GC_MINOR_REASON",
+ "GC_MINOR_REASON_LONG"
+ ],
+ "expiry_default": [
+ "A11Y_CONSUMERS",
+ "IDLE_NOTIFY_IDLE_MS",
+ "CACHE_MEMORY_SEARCH_2",
+ "OSFILE_WORKER_LAUNCH_MS",
+ "GEOLOCATION_OSX_SOURCE_IS_MLS",
+ "TRANSLATION_OPPORTUNITIES_BY_LANGUAGE",
+ "FX_THUMBNAILS_BG_CAPTURE_DONE_REASON_2",
+ "TRANSLATED_PAGES",
+ "FX_SESSION_RESTORE_SEND_UPDATE_CAUSED_OOM",
+ "SSL_PERMANENT_CERT_ERROR_OVERRIDES",
+ "FX_THUMBNAILS_BG_QUEUE_SIZE_ON_CAPTURE",
+ "AUTO_REJECTED_TRANSLATION_OFFERS",
+ "TRANSLATED_CHARACTERS",
+ "WEAVE_CONFIGURED",
+ "NEWTAB_PAGE_ENABLED",
+ "GRADIENT_DURATION",
+ "MOZ_SQLITE_OPEN_MS",
+ "SHOULD_TRANSLATION_UI_APPEAR",
+ "NEWTAB_PAGE_LIFE_SPAN",
+ "FX_TOTAL_TOP_VISITS",
+ "FX_SESSION_RESTORE_NUMBER_OF_EAGER_TABS_RESTORED",
+ "CACHE_DISK_SEARCH_2",
+ "FX_THUMBNAILS_BG_CAPTURE_QUEUE_TIME_MS",
+ "NEWTAB_PAGE_PINNED_SITES_COUNT",
+ "WEAVE_COMPLETE_SUCCESS_COUNT",
+ "A11Y_UPDATE_TIME",
+ "OSFILE_WRITEATOMIC_JANK_MS",
+ "STARTUP_MEASUREMENT_ERRORS",
+ "CERT_CHAIN_KEY_SIZE_STATUS",
+ "CHANGES_OF_TARGET_LANGUAGE",
+ "FX_NEW_WINDOW_MS",
+ "PDF_VIEWER_TIME_TO_VIEW_MS",
+ "SSL_OCSP_MAY_FETCH",
+ "MOZ_SQLITE_OTHER_READ_B",
+ "CHECK_JAVA_ENABLED",
+ "TRANSLATION_OPPORTUNITIES",
+ "FX_SESSION_RESTORE_CONTENT_COLLECT_DATA_LONGEST_OP_MS",
+ "NEWTAB_PAGE_BLOCKED_SITES_COUNT",
+ "FX_SESSION_RESTORE_NUMBER_OF_TABS_RESTORED",
+ "WEAVE_START_COUNT",
+ "FX_SESSION_RESTORE_RESTORE_WINDOW_MS",
+ "NEWTAB_PAGE_LIFE_SPAN_SUGGESTED",
+ "HTTP_DISK_CACHE_OVERHEAD",
+ "FX_SESSION_RESTORE_CORRUPT_FILE",
+ "FX_TAB_CLICK_MS",
+ "LOCALDOMSTORAGE_GETVALUE_BLOCKING_MS",
+ "PDF_VIEWER_DOCUMENT_VERSION",
+ "GEOLOCATION_ACCURACY_EXPONENTIAL",
+ "FX_SESSION_RESTORE_READ_FILE_MS",
+ "CHANGES_OF_DETECTED_LANGUAGE",
+ "OSFILE_WORKER_READY_MS",
+ "PDF_VIEWER_FORM",
+ "FX_SESSION_RESTORE_AUTO_RESTORE_DURATION_UNTIL_EAGER_TABS_RESTORED_MS",
+ "FX_SESSION_RESTORE_COLLECT_DATA_MS",
+ "FX_SESSION_RESTORE_FILE_SIZE_BYTES",
+ "STARTUP_CACHE_AGE_HOURS",
+ "FX_SESSION_RESTORE_DOM_STORAGE_SIZE_ESTIMATE_CHARS",
+ "DATA_STORAGE_ENTRIES",
+ "TRANSLATED_PAGES_BY_LANGUAGE",
+ "MOZ_SQLITE_OTHER_WRITE_B",
+ "LOCALDOMSTORAGE_SHUTDOWN_DATABASE_MS",
+ "SSL_CERT_VERIFICATION_ERRORS",
+ "FX_SESSION_RESTORE_NUMBER_OF_WINDOWS_RESTORED",
+ "MOZ_SQLITE_PLACES_WRITE_MS",
+ "FX_THUMBNAILS_BG_CAPTURE_CANVAS_DRAW_TIME_MS",
+ "FX_SESSION_RESTORE_STARTUP_INIT_SESSION_MS",
+ "FX_SESSION_RESTORE_WRITE_FILE_MS",
+ "FX_THUMBNAILS_BG_CAPTURE_PAGE_LOAD_TIME_MS",
+ "REQUESTS_OF_ORIGINAL_CONTENT",
+ "NEWTAB_PAGE_ENHANCED",
+ "CERT_CHAIN_SHA1_POLICY_STATUS",
+ "PDF_VIEWER_DOCUMENT_SIZE_KB",
+ "FX_THUMBNAILS_BG_CAPTURE_SERVICE_TIME_MS",
+ "SHUTDOWN_OK",
+ "PLACES_BACKUPS_TOJSON_MS",
+ "A11Y_ISIMPLEDOM_USAGE_FLAG",
+ "FX_SESSION_RESTORE_MANUAL_RESTORE_DURATION_UNTIL_EAGER_TABS_RESTORED_MS",
+ "PDF_VIEWER_DOCUMENT_GENERATOR",
+ "PDF_VIEWER_FALLBACK_SHOWN",
+ "FX_SESSION_RESTORE_ALL_FILES_CORRUPT",
+ "SHOULD_AUTO_DETECT_LANGUAGE",
+ "A11Y_IATABLE_USAGE_FLAG",
+ "FX_PAGE_LOAD_MS",
+ "LOCALDOMSTORAGE_PRELOAD_PENDING_ON_FIRST_ACCESS",
+ "DENIED_TRANSLATION_OFFERS",
+ "XUL_CACHE_DISABLED",
+ "PAGE_FAULTS_HARD",
+ "BROWSERPROVIDER_XUL_IMPORT_BOOKMARKS",
+ "PDF_VIEWER_USED",
+ "NETWORK_DISK_CACHE_OPEN",
+ "GEOLOCATION_WIN8_SOURCE_IS_MLS"
+ ]
+}
diff --git a/toolkit/components/telemetry/histogram_tools.py b/toolkit/components/telemetry/histogram_tools.py
new file mode 100644
index 000000000..db64be268
--- /dev/null
+++ b/toolkit/components/telemetry/histogram_tools.py
@@ -0,0 +1,513 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import collections
+import itertools
+import json
+import math
+import os
+import re
+import sys
+
+# Constants.
+MAX_LABEL_LENGTH = 20
+MAX_LABEL_COUNT = 100
+
+# histogram_tools.py is used by scripts from a mozilla-central build tree
+# and also by outside consumers, such as the telemetry server. We need
+# to ensure that importing things works in both contexts. Therefore,
+# unconditionally importing things that are local to the build tree, such
+# as buildconfig, is a no-no.
+try:
+ import buildconfig
+
+ # Need to update sys.path to be able to find usecounters.
+ sys.path.append(os.path.join(buildconfig.topsrcdir, 'dom/base/'))
+except ImportError:
+ # Must be in an out-of-tree usage scenario. Trust that whoever is
+ # running this script knows we need the usecounters module and has
+ # ensured it's in our sys.path.
+ pass
+
+from collections import OrderedDict
+
+def table_dispatch(kind, table, body):
+ """Call body with table[kind] if it exists. Raise an error otherwise."""
+ if kind in table:
+ return body(table[kind])
+ else:
+ raise BaseException, "don't know how to handle a histogram of kind %s" % kind
+
+class DefinitionException(BaseException):
+ pass
+
+def linear_buckets(dmin, dmax, n_buckets):
+ ret_array = [0] * n_buckets
+ dmin = float(dmin)
+ dmax = float(dmax)
+ for i in range(1, n_buckets):
+ linear_range = (dmin * (n_buckets - 1 - i) + dmax * (i - 1)) / (n_buckets - 2)
+ ret_array[i] = int(linear_range + 0.5)
+ return ret_array
+
+def exponential_buckets(dmin, dmax, n_buckets):
+ log_max = math.log(dmax);
+ bucket_index = 2;
+ ret_array = [0] * n_buckets
+ current = dmin
+ ret_array[1] = current
+ for bucket_index in range(2, n_buckets):
+ log_current = math.log(current)
+ log_ratio = (log_max - log_current) / (n_buckets - bucket_index)
+ log_next = log_current + log_ratio
+ next_value = int(math.floor(math.exp(log_next) + 0.5))
+ if next_value > current:
+ current = next_value
+ else:
+ current = current + 1
+ ret_array[bucket_index] = current
+ return ret_array
+
+always_allowed_keys = ['kind', 'description', 'cpp_guard', 'expires_in_version',
+ 'alert_emails', 'keyed', 'releaseChannelCollection',
+ 'bug_numbers']
+
+whitelists = None;
+try:
+ whitelist_path = os.path.join(os.path.abspath(os.path.realpath(os.path.dirname(__file__))), 'histogram-whitelists.json')
+ with open(whitelist_path, 'r') as f:
+ try:
+ whitelists = json.load(f)
+ for name, whitelist in whitelists.iteritems():
+ whitelists[name] = set(whitelist)
+ except ValueError, e:
+ raise BaseException, 'error parsing whitelist (%s)' % whitelist_path
+except IOError:
+ whitelists = None
+ print 'Unable to parse whitelist (%s). Assuming all histograms are acceptable.' % whitelist_path
+
+class Histogram:
+ """A class for representing a histogram definition."""
+
+ def __init__(self, name, definition, strict_type_checks=False):
+ """Initialize a histogram named name with the given definition.
+definition is a dict-like object that must contain at least the keys:
+
+ - 'kind': The kind of histogram. Must be one of 'boolean', 'flag',
+ 'count', 'enumerated', 'linear', or 'exponential'.
+ - 'description': A textual description of the histogram.
+ - 'strict_type_checks': A boolean indicating whether to use the new, stricter type checks.
+ The server-side still has to deal with old, oddly typed submissions,
+ so we have to skip them there by default.
+
+The key 'cpp_guard' is optional; if present, it denotes a preprocessor
+symbol that should guard C/C++ definitions associated with the histogram."""
+ self._strict_type_checks = strict_type_checks
+ self._is_use_counter = name.startswith("USE_COUNTER2_")
+ self.verify_attributes(name, definition)
+ self._name = name
+ self._description = definition['description']
+ self._kind = definition['kind']
+ self._cpp_guard = definition.get('cpp_guard')
+ self._keyed = definition.get('keyed', False)
+ self._expiration = definition.get('expires_in_version')
+ self._labels = definition.get('labels', [])
+ self.compute_bucket_parameters(definition)
+ table = {
+ 'boolean': 'BOOLEAN',
+ 'flag': 'FLAG',
+ 'count': 'COUNT',
+ 'enumerated': 'LINEAR',
+ 'categorical': 'CATEGORICAL',
+ 'linear': 'LINEAR',
+ 'exponential': 'EXPONENTIAL',
+ }
+ table_dispatch(self.kind(), table,
+ lambda k: self._set_nsITelemetry_kind(k))
+ datasets = { 'opt-in': 'DATASET_RELEASE_CHANNEL_OPTIN',
+ 'opt-out': 'DATASET_RELEASE_CHANNEL_OPTOUT' }
+ value = definition.get('releaseChannelCollection', 'opt-in')
+ if not value in datasets:
+ raise DefinitionException, "unknown release channel collection policy for " + name
+ self._dataset = "nsITelemetry::" + datasets[value]
+
+ def name(self):
+ """Return the name of the histogram."""
+ return self._name
+
+ def description(self):
+ """Return the description of the histogram."""
+ return self._description
+
+ def kind(self):
+ """Return the kind of the histogram.
+Will be one of 'boolean', 'flag', 'count', 'enumerated', 'categorical', 'linear',
+or 'exponential'."""
+ return self._kind
+
+ def expiration(self):
+ """Return the expiration version of the histogram."""
+ return self._expiration
+
+ def nsITelemetry_kind(self):
+ """Return the nsITelemetry constant corresponding to the kind of
+the histogram."""
+ return self._nsITelemetry_kind
+
+ def _set_nsITelemetry_kind(self, kind):
+ self._nsITelemetry_kind = "nsITelemetry::HISTOGRAM_%s" % kind
+
+ def low(self):
+ """Return the lower bound of the histogram."""
+ return self._low
+
+ def high(self):
+ """Return the high bound of the histogram."""
+ return self._high
+
+ def n_buckets(self):
+ """Return the number of buckets in the histogram."""
+ return self._n_buckets
+
+ def cpp_guard(self):
+ """Return the preprocessor symbol that should guard C/C++ definitions
+associated with the histogram. Returns None if no guarding is necessary."""
+ return self._cpp_guard
+
+ def keyed(self):
+ """Returns True if this a keyed histogram, false otherwise."""
+ return self._keyed
+
+ def dataset(self):
+ """Returns the dataset this histogram belongs into."""
+ return self._dataset
+
+ def labels(self):
+ """Returns a list of labels for a categorical histogram, [] for others."""
+ return self._labels
+
+ def ranges(self):
+ """Return an array of lower bounds for each bucket in the histogram."""
+ table = {
+ 'boolean': linear_buckets,
+ 'flag': linear_buckets,
+ 'count': linear_buckets,
+ 'enumerated': linear_buckets,
+ 'categorical': linear_buckets,
+ 'linear': linear_buckets,
+ 'exponential': exponential_buckets,
+ }
+ return table_dispatch(self.kind(), table,
+ lambda p: p(self.low(), self.high(), self.n_buckets()))
+
+ def compute_bucket_parameters(self, definition):
+ table = {
+ 'boolean': Histogram.boolean_flag_bucket_parameters,
+ 'flag': Histogram.boolean_flag_bucket_parameters,
+ 'count': Histogram.boolean_flag_bucket_parameters,
+ 'enumerated': Histogram.enumerated_bucket_parameters,
+ 'categorical': Histogram.categorical_bucket_parameters,
+ 'linear': Histogram.linear_bucket_parameters,
+ 'exponential': Histogram.exponential_bucket_parameters,
+ }
+ table_dispatch(self.kind(), table,
+ lambda p: self.set_bucket_parameters(*p(definition)))
+
+ def verify_attributes(self, name, definition):
+ global always_allowed_keys
+ general_keys = always_allowed_keys + ['low', 'high', 'n_buckets']
+
+ table = {
+ 'boolean': always_allowed_keys,
+ 'flag': always_allowed_keys,
+ 'count': always_allowed_keys,
+ 'enumerated': always_allowed_keys + ['n_values'],
+ 'categorical': always_allowed_keys + ['labels'],
+ 'linear': general_keys,
+ 'exponential': general_keys,
+ }
+ # We removed extended_statistics_ok on the client, but the server-side,
+ # where _strict_type_checks==False, has to deal with historical data.
+ if not self._strict_type_checks:
+ table['exponential'].append('extended_statistics_ok')
+
+ table_dispatch(definition['kind'], table,
+ lambda allowed_keys: Histogram.check_keys(name, definition, allowed_keys))
+
+ self.check_name(name)
+ self.check_field_types(name, definition)
+ self.check_whitelistable_fields(name, definition)
+ self.check_expiration(name, definition)
+ self.check_label_values(name, definition)
+
+ def check_name(self, name):
+ if '#' in name:
+ raise ValueError, '"#" not permitted for %s' % (name)
+
+ # Avoid C++ identifier conflicts between histogram enums and label enum names.
+ if name.startswith("LABELS_"):
+ raise ValueError, "Histogram name '%s' can not start with LABELS_" % (name)
+
+ # To make it easier to generate C++ identifiers from this etc., we restrict
+ # the histogram names to a strict pattern.
+ # We skip this on the server to avoid failures with old Histogram.json revisions.
+ if self._strict_type_checks:
+ pattern = '^[a-z][a-z0-9_]+[a-z0-9]$'
+ if not re.match(pattern, name, re.IGNORECASE):
+ raise ValueError, "Histogram name '%s' doesn't confirm to '%s'" % (name, pattern)
+
+ def check_expiration(self, name, definition):
+ field = 'expires_in_version'
+ expiration = definition.get(field)
+
+ if not expiration:
+ return
+
+ # We forbid new probes from using "expires_in_version" : "default" field/value pair.
+ # Old ones that use this are added to the whitelist.
+ if expiration == "default" and name not in whitelists['expiry_default']:
+ raise ValueError, 'New histogram "%s" cannot have "default" %s value.' % (name, field)
+
+ if re.match(r'^[1-9][0-9]*$', expiration):
+ expiration = expiration + ".0a1"
+ elif re.match(r'^[1-9][0-9]*\.0$', expiration):
+ expiration = expiration + "a1"
+
+ definition[field] = expiration
+
+ def check_label_values(self, name, definition):
+ labels = definition.get('labels')
+ if not labels:
+ return
+
+ invalid = filter(lambda l: len(l) > MAX_LABEL_LENGTH, labels)
+ if len(invalid) > 0:
+ raise ValueError, 'Label values for %s exceed length limit of %d: %s' % \
+ (name, MAX_LABEL_LENGTH, ', '.join(invalid))
+
+ if len(labels) > MAX_LABEL_COUNT:
+ raise ValueError, 'Label count for %s exceeds limit of %d' % \
+ (name, MAX_LABEL_COUNT)
+
+ # To make it easier to generate C++ identifiers from this etc., we restrict
+ # the label values to a strict pattern.
+ pattern = '^[a-z][a-z0-9_]+[a-z0-9]$'
+ invalid = filter(lambda l: not re.match(pattern, l, re.IGNORECASE), labels)
+ if len(invalid) > 0:
+ raise ValueError, 'Label values for %s are not matching pattern "%s": %s' % \
+ (name, pattern, ', '.join(invalid))
+
+ # Check for the presence of fields that old histograms are whitelisted for.
+ def check_whitelistable_fields(self, name, definition):
+ # Use counters don't have any mechanism to add the fields checked here,
+ # so skip the check for them.
+ # We also don't need to run any of these checks on the server.
+ if self._is_use_counter or not self._strict_type_checks:
+ return
+
+ # In the pipeline we don't have whitelists available.
+ if whitelists is None:
+ return
+
+ for field in ['alert_emails', 'bug_numbers']:
+ if field not in definition and name not in whitelists[field]:
+ raise KeyError, 'New histogram "%s" must have a %s field.' % (name, field)
+ if field in definition and name in whitelists[field]:
+ msg = 'Should remove histogram "%s" from the whitelist for "%s" in histogram-whitelists.json'
+ raise KeyError, msg % (name, field)
+
+ def check_field_types(self, name, definition):
+ # Define expected types for the histogram properties.
+ type_checked_fields = {
+ "n_buckets": int,
+ "n_values": int,
+ "low": int,
+ "high": int,
+ "keyed": bool,
+ "expires_in_version": basestring,
+ "kind": basestring,
+ "description": basestring,
+ "cpp_guard": basestring,
+ "releaseChannelCollection": basestring,
+ }
+
+ # For list fields we check the items types.
+ type_checked_list_fields = {
+ "bug_numbers": int,
+ "alert_emails": basestring,
+ "labels": basestring,
+ }
+
+ # For the server-side, where _strict_type_checks==False, we want to
+ # skip the stricter type checks for these fields for dealing with
+ # historical data.
+ coerce_fields = ["low", "high", "n_values", "n_buckets"]
+ if not self._strict_type_checks:
+ def try_to_coerce_to_number(v):
+ try:
+ return eval(v, {})
+ except:
+ return v
+ for key in [k for k in coerce_fields if k in definition]:
+ definition[key] = try_to_coerce_to_number(definition[key])
+ # This handles old "keyed":"true" definitions (bug 1271986).
+ if definition.get("keyed", None) == "true":
+ definition["keyed"] = True
+
+ def nice_type_name(t):
+ if t is basestring:
+ return "string"
+ return t.__name__
+
+ for key, key_type in type_checked_fields.iteritems():
+ if not key in definition:
+ continue
+ if not isinstance(definition[key], key_type):
+ raise ValueError, ('value for key "{0}" in Histogram "{1}" '
+ 'should be {2}').format(key, name, nice_type_name(key_type))
+
+ for key, key_type in type_checked_list_fields.iteritems():
+ if not key in definition:
+ continue
+ if not all(isinstance(x, key_type) for x in definition[key]):
+ raise ValueError, ('all values for list "{0}" in Histogram "{1}" '
+ 'should be {2}').format(key, name, nice_type_name(key_type))
+
+ @staticmethod
+ def check_keys(name, definition, allowed_keys):
+ for key in definition.iterkeys():
+ if key not in allowed_keys:
+ raise KeyError, '%s not permitted for %s' % (key, name)
+
+ def set_bucket_parameters(self, low, high, n_buckets):
+ self._low = low
+ self._high = high
+ self._n_buckets = n_buckets
+ if whitelists is not None and self._n_buckets > 100 and type(self._n_buckets) is int:
+ if self._name not in whitelists['n_buckets']:
+ raise KeyError, ('New histogram "%s" is not permitted to have more than 100 buckets. '
+ 'Histograms with large numbers of buckets use disproportionately high amounts of resources. '
+ 'Contact the Telemetry team (e.g. in #telemetry) if you think an exception ought to be made.' % self._name)
+
+ @staticmethod
+ def boolean_flag_bucket_parameters(definition):
+ return (1, 2, 3)
+
+ @staticmethod
+ def linear_bucket_parameters(definition):
+ return (definition.get('low', 1),
+ definition['high'],
+ definition['n_buckets'])
+
+ @staticmethod
+ def enumerated_bucket_parameters(definition):
+ n_values = definition['n_values']
+ return (1, n_values, n_values + 1)
+
+ @staticmethod
+ def categorical_bucket_parameters(definition):
+ n_values = len(definition['labels'])
+ return (1, n_values, n_values + 1)
+
+ @staticmethod
+ def exponential_bucket_parameters(definition):
+ return (definition.get('low', 1),
+ definition['high'],
+ definition['n_buckets'])
+
+# We support generating histograms from multiple different input files, not
+# just Histograms.json. For each file's basename, we have a specific
+# routine to parse that file, and return a dictionary mapping histogram
+# names to histogram parameters.
+def from_Histograms_json(filename):
+ with open(filename, 'r') as f:
+ try:
+ histograms = json.load(f, object_pairs_hook=OrderedDict)
+ except ValueError, e:
+ raise BaseException, "error parsing histograms in %s: %s" % (filename, e.message)
+ return histograms
+
+def from_UseCounters_conf(filename):
+ return usecounters.generate_histograms(filename)
+
+def from_nsDeprecatedOperationList(filename):
+ operation_regex = re.compile('^DEPRECATED_OPERATION\\(([^)]+)\\)')
+ histograms = collections.OrderedDict()
+
+ with open(filename, 'r') as f:
+ for line in f:
+ match = operation_regex.search(line)
+ if not match:
+ continue
+
+ op = match.group(1)
+
+ def add_counter(context):
+ name = 'USE_COUNTER2_DEPRECATED_%s_%s' % (op, context.upper())
+ histograms[name] = {
+ 'expires_in_version': 'never',
+ 'kind': 'boolean',
+ 'description': 'Whether a %s used %s' % (context, op)
+ }
+ add_counter('document')
+ add_counter('page')
+
+ return histograms
+
+FILENAME_PARSERS = {
+ 'Histograms.json': from_Histograms_json,
+ 'nsDeprecatedOperationList.h': from_nsDeprecatedOperationList,
+}
+
+# Similarly to the dance above with buildconfig, usecounters may not be
+# available, so handle that gracefully.
+try:
+ import usecounters
+
+ FILENAME_PARSERS['UseCounters.conf'] = from_UseCounters_conf
+except ImportError:
+ pass
+
+def from_files(filenames):
+ """Return an iterator that provides a sequence of Histograms for
+the histograms defined in filenames.
+ """
+ all_histograms = OrderedDict()
+ for filename in filenames:
+ parser = FILENAME_PARSERS[os.path.basename(filename)]
+ histograms = parser(filename)
+
+ # OrderedDicts are important, because then the iteration order over
+ # the parsed histograms is stable, which makes the insertion into
+ # all_histograms stable, which makes ordering in generated files
+ # stable, which makes builds more deterministic.
+ if not isinstance(histograms, OrderedDict):
+ raise BaseException, "histogram parser didn't provide an OrderedDict"
+
+ for (name, definition) in histograms.iteritems():
+ if all_histograms.has_key(name):
+ raise DefinitionException, "duplicate histogram name %s" % name
+ all_histograms[name] = definition
+
+ # We require that all USE_COUNTER2_* histograms be defined in a contiguous
+ # block.
+ use_counter_indices = filter(lambda x: x[1].startswith("USE_COUNTER2_"),
+ enumerate(all_histograms.iterkeys()));
+ if use_counter_indices:
+ lower_bound = use_counter_indices[0][0]
+ upper_bound = use_counter_indices[-1][0]
+ n_counters = upper_bound - lower_bound + 1
+ if n_counters != len(use_counter_indices):
+ raise DefinitionException, "use counter histograms must be defined in a contiguous block"
+
+ # Check that histograms that were removed from Histograms.json etc. are also removed from the whitelists.
+ if whitelists is not None:
+ all_whitelist_entries = itertools.chain.from_iterable(whitelists.itervalues())
+ orphaned = set(all_whitelist_entries) - set(all_histograms.keys())
+ if len(orphaned) > 0:
+ msg = 'The following entries are orphaned and should be removed from histogram-whitelists.json: %s'
+ raise BaseException, msg % (', '.join(sorted(orphaned)))
+
+ for (name, definition) in all_histograms.iteritems():
+ yield Histogram(name, definition, strict_type_checks=True)
diff --git a/toolkit/components/telemetry/moz.build b/toolkit/components/telemetry/moz.build
new file mode 100644
index 000000000..118d61b71
--- /dev/null
+++ b/toolkit/components/telemetry/moz.build
@@ -0,0 +1,130 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+HAS_MISC_RULE = True
+
+include('/ipc/chromium/chromium-config.mozbuild')
+
+FINAL_LIBRARY = 'xul'
+
+DEFINES['MOZ_APP_VERSION'] = '"%s"' % CONFIG['MOZ_APP_VERSION']
+
+LOCAL_INCLUDES += [
+ '/xpcom/build',
+ '/xpcom/threads',
+]
+
+SPHINX_TREES['telemetry'] = 'docs'
+
+if CONFIG['GNU_CXX']:
+ CXXFLAGS += ['-Wno-error=shadow']
+
+XPCSHELL_TESTS_MANIFESTS += ['tests/unit/xpcshell.ini']
+BROWSER_CHROME_MANIFESTS += ['tests/browser/browser.ini']
+
+XPIDL_SOURCES += [
+ 'nsITelemetry.idl',
+]
+
+XPIDL_MODULE = 'telemetry'
+
+EXPORTS.mozilla += [
+ '!TelemetryEventEnums.h',
+ '!TelemetryHistogramEnums.h',
+ '!TelemetryScalarEnums.h',
+ 'ProcessedStack.h',
+ 'Telemetry.h',
+ 'TelemetryComms.h',
+ 'ThreadHangStats.h',
+]
+
+SOURCES += [
+ 'Telemetry.cpp',
+ 'TelemetryCommon.cpp',
+ 'TelemetryEvent.cpp',
+ 'TelemetryHistogram.cpp',
+ 'TelemetryScalar.cpp',
+ 'WebrtcTelemetry.cpp',
+]
+
+EXTRA_COMPONENTS += [
+ 'TelemetryStartup.js',
+ 'TelemetryStartup.manifest'
+]
+
+EXTRA_JS_MODULES += [
+ 'GCTelemetry.jsm',
+ 'TelemetryArchive.jsm',
+ 'TelemetryController.jsm',
+ 'TelemetryEnvironment.jsm',
+ 'TelemetryLog.jsm',
+ 'TelemetryReportingPolicy.jsm',
+ 'TelemetrySend.jsm',
+ 'TelemetrySession.jsm',
+ 'TelemetryStopwatch.jsm',
+ 'TelemetryStorage.jsm',
+ 'TelemetryTimestamps.jsm',
+ 'TelemetryUtils.jsm',
+ 'ThirdPartyCookieProbe.jsm',
+ 'UITelemetry.jsm',
+]
+
+TESTING_JS_MODULES += [
+ 'tests/unit/TelemetryArchiveTesting.jsm',
+]
+
+GENERATED_FILES = [
+ 'TelemetryEventData.h',
+ 'TelemetryEventEnums.h',
+ 'TelemetryHistogramData.inc',
+ 'TelemetryHistogramEnums.h',
+ 'TelemetryScalarData.h',
+ 'TelemetryScalarEnums.h',
+]
+
+# Generate histogram files.
+histogram_files = [
+ 'Histograms.json',
+ '/dom/base/UseCounters.conf',
+ '/dom/base/nsDeprecatedOperationList.h',
+]
+
+data = GENERATED_FILES['TelemetryHistogramData.inc']
+data.script = 'gen-histogram-data.py'
+data.inputs = histogram_files
+
+enums = GENERATED_FILES['TelemetryHistogramEnums.h']
+enums.script = 'gen-histogram-enum.py'
+enums.inputs = histogram_files
+
+# Generate scalar files.
+scalar_files = [
+ 'Scalars.yaml',
+]
+
+scalar_data = GENERATED_FILES['TelemetryScalarData.h']
+scalar_data.script = 'gen-scalar-data.py'
+scalar_data.inputs = scalar_files
+
+scalar_enums = GENERATED_FILES['TelemetryScalarEnums.h']
+scalar_enums.script = 'gen-scalar-enum.py'
+scalar_enums.inputs = scalar_files
+
+# Generate event files.
+event_files = [
+ 'Events.yaml',
+]
+
+event_data = GENERATED_FILES['TelemetryEventData.h']
+event_data.script = 'gen-event-data.py'
+event_data.inputs = event_files
+
+event_enums = GENERATED_FILES['TelemetryEventEnums.h']
+event_enums.script = 'gen-event-enum.py'
+event_enums.inputs = event_files
+
+with Files('**'):
+ BUG_COMPONENT = ('Toolkit', 'Telemetry')
diff --git a/toolkit/components/telemetry/nsITelemetry.idl b/toolkit/components/telemetry/nsITelemetry.idl
new file mode 100644
index 000000000..3b74b2d1b
--- /dev/null
+++ b/toolkit/components/telemetry/nsITelemetry.idl
@@ -0,0 +1,469 @@
+/* -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil; tab-width: 8 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "nsISupports.idl"
+#include "nsIFile.idl"
+
+[scriptable,function, uuid(3d3b9075-5549-4244-9c08-b64fefa1dd60)]
+interface nsIFetchTelemetryDataCallback : nsISupports
+{
+ void complete();
+};
+
+[scriptable, uuid(273d2dd0-6c63-475a-b864-cb65160a1909)]
+interface nsITelemetry : nsISupports
+{
+ /**
+ * Histogram types:
+ * HISTOGRAM_EXPONENTIAL - buckets increase exponentially
+ * HISTOGRAM_LINEAR - buckets increase linearly
+ * HISTOGRAM_BOOLEAN - For storing 0/1 values
+ * HISTOGRAM_FLAG - For storing a single value; its count is always == 1.
+ * HISTOGRAM_COUNT - For storing counter values without bucketing.
+ * HISTOGRAM_CATEGORICAL - For storing enumerated values by label.
+ */
+ const unsigned long HISTOGRAM_EXPONENTIAL = 0;
+ const unsigned long HISTOGRAM_LINEAR = 1;
+ const unsigned long HISTOGRAM_BOOLEAN = 2;
+ const unsigned long HISTOGRAM_FLAG = 3;
+ const unsigned long HISTOGRAM_COUNT = 4;
+ const unsigned long HISTOGRAM_CATEGORICAL = 5;
+
+ /**
+ * Scalar types:
+ * SCALAR_COUNT - for storing a numeric value
+ * SCALAR_STRING - for storing a string value
+ * SCALAR_BOOLEAN - for storing a boolean value
+ */
+ const unsigned long SCALAR_COUNT = 0;
+ const unsigned long SCALAR_STRING = 1;
+ const unsigned long SCALAR_BOOLEAN = 2;
+
+ /**
+ * Dataset types:
+ * DATASET_RELEASE_CHANNEL_OPTOUT - the basic dataset that is on-by-default on all channels
+ * DATASET_RELEASE_CHANNEL_OPTIN - the extended dataset that is opt-in on release,
+ * opt-out on pre-release channels.
+ */
+ const unsigned long DATASET_RELEASE_CHANNEL_OPTOUT = 0;
+ const unsigned long DATASET_RELEASE_CHANNEL_OPTIN = 1;
+
+
+ /**
+ * An object containing a snapshot from all of the currently registered histograms.
+ * { name1: {data1}, name2:{data2}...}
+ * where data is consists of the following properties:
+ * min - Minimal bucket size
+ * max - Maximum bucket size
+ * histogram_type - HISTOGRAM_EXPONENTIAL, HISTOGRAM_LINEAR, HISTOGRAM_BOOLEAN
+ * or HISTOGRAM_COUNT
+ * counts - array representing contents of the buckets in the histogram
+ * ranges - an array with calculated bucket sizes
+ * sum - sum of the bucket contents
+ */
+ [implicit_jscontext]
+ readonly attribute jsval histogramSnapshots;
+
+ /**
+ * Get a snapshot of the internally duplicated subsession histograms.
+ * @param clear Whether to clear out the subsession histograms after snapshotting.
+ * @return An object as histogramSnapshots, except this contains the internally duplicated histograms for subsession telemetry.
+ */
+ [implicit_jscontext]
+ jsval snapshotSubsessionHistograms([optional] in boolean clear);
+
+ /**
+ * The amount of time, in milliseconds, that the last session took
+ * to shutdown. Reads as 0 to indicate failure.
+ */
+ readonly attribute uint32_t lastShutdownDuration;
+
+ /**
+ * The number of failed profile lock attempts that have occurred prior to
+ * successfully locking the profile
+ */
+ readonly attribute uint32_t failedProfileLockCount;
+
+ /*
+ * An object containing information about slow SQL statements.
+ *
+ * {
+ * mainThread: { "sqlString1": [<hit count>, <total time>], "sqlString2": [...], ... },
+ * otherThreads: { "sqlString3": [<hit count>, <total time>], "sqlString4": [...], ... }
+ * }
+ *
+ * where:
+ * mainThread: Slow statements that executed on the main thread
+ * otherThreads: Slow statements that executed on a non-main thread
+ * sqlString - String of the offending statement (see note)
+ * hit count - The number of times this statement required longer than the threshold time to execute
+ * total time - The sum of all execution times above the threshold time for this statement
+ *
+ * Note that dynamic SQL strings and SQL strings executed against addon DBs could contain private information.
+ * This property represents such SQL as aggregate database-level stats and the sqlString contains the database
+ * filename instead.
+ */
+ [implicit_jscontext]
+ readonly attribute jsval slowSQL;
+
+ /*
+ * See slowSQL above.
+ *
+ * An object containing full strings of every slow SQL statement if toolkit.telemetry.debugSlowSql = true
+ * The returned SQL strings may contain private information and should not be reported to Telemetry.
+ */
+ [implicit_jscontext]
+ readonly attribute jsval debugSlowSQL;
+
+ /*
+ * An object containing information about Webrtc related stats. For now it
+ * only contains local and remote ICE candidates avaiable when a Webrtc
+ * PeerConnection gets terminated.
+ */
+ [implicit_jscontext]
+ readonly attribute jsval webrtcStats;
+
+ /**
+ * A number representing the highest number of concurrent threads
+ * reached during this session.
+ */
+ readonly attribute uint32_t maximalNumberOfConcurrentThreads;
+
+ /*
+ * An array of chrome hang reports. Each element is a hang report represented
+ * as an object containing the hang duration, call stack PCs and information
+ * about modules in memory.
+ */
+ [implicit_jscontext]
+ readonly attribute jsval chromeHangs;
+
+ /*
+ * An array of thread hang stats,
+ * [<thread>, <thread>, ...]
+ * <thread> represents a single thread,
+ * {"name": "<name>",
+ * "activity": <time>,
+ * "hangs": [<hang>, <hang>, ...]}
+ * <time> represents a histogram of time intervals in milliseconds,
+ * with the same format as histogramSnapshots
+ * <hang> represents a particular hang,
+ * {"stack": <stack>, "nativeStack": <stack>, "histogram": <time>}
+ * <stack> represents the hang's stack,
+ * ["<frame_0>", "<frame_1>", ...]
+ */
+ [implicit_jscontext]
+ readonly attribute jsval threadHangStats;
+
+ /*
+ * An object with two fields: memoryMap and stacks.
+ * * memoryMap is a list of loaded libraries.
+ * * stacks is a list of stacks. Each stack is a list of pairs of the form
+ * [moduleIndex, offset]. The moduleIndex is an index into the memoryMap and
+ * offset is an offset in the library at memoryMap[moduleIndex].
+ * This format is used to make it easier to send the stacks to the
+ * symbolication server.
+ */
+ [implicit_jscontext]
+ readonly attribute jsval lateWrites;
+
+ /**
+ * Returns an array whose values are the names of histograms defined
+ * in Histograms.json.
+ *
+ * @param dataset - DATASET_RELEASE_CHANNEL_OPTOUT or DATASET_RELEASE_CHANNEL_OPTIN
+ */
+ void registeredHistograms(in uint32_t dataset,
+ out uint32_t count,
+ [retval, array, size_is(count)] out string histograms);
+
+ /**
+ * Create and return a histogram registered in TelemetryHistograms.h.
+ *
+ * @param id - unique identifier from TelemetryHistograms.h
+ * The returned object has the following functions:
+ * add(int) - Adds an int value to the appropriate bucket
+ * snapshot() - Returns a snapshot of the histogram with the same data fields as in histogramSnapshots()
+ * clear() - Zeros out the histogram's buckets and sum
+ * dataset() - identifies what dataset this is in: DATASET_RELEASE_CHANNEL_OPTOUT or ...OPTIN
+ */
+ [implicit_jscontext]
+ jsval getHistogramById(in ACString id);
+
+ /*
+ * An object containing a snapshot from all of the currently registered keyed histograms.
+ * { name1: {histogramData1}, name2:{histogramData2}...}
+ * where the histogramData is as described in histogramSnapshots.
+ */
+ [implicit_jscontext]
+ readonly attribute jsval keyedHistogramSnapshots;
+
+ /**
+ * Returns an array whose values are the names of histograms defined
+ * in Histograms.json.
+ *
+ * @param dataset - DATASET_RELEASE_CHANNEL_OPTOUT or ...OPTIN
+ */
+ void registeredKeyedHistograms(in uint32_t dataset, out uint32_t count,
+ [retval, array, size_is(count)] out string histograms);
+
+ /**
+ * Create and return a histogram registered in TelemetryHistograms.h.
+ *
+ * @param id - unique identifier from TelemetryHistograms.h
+ * The returned object has the following functions:
+ * add(string key, [optional] int) - Add an int value to the histogram for that key. If no histogram for that key exists yet, it is created.
+ * snapshot([optional] string key) - If key is provided, returns a snapshot for the histogram with that key or null. If key is not provided, returns the snapshots of all the registered keys in the form {key1: snapshot1, key2: snapshot2, ...}.
+ * keys() - Returns an array with the string keys of the currently registered histograms
+ * clear() - Clears the registered histograms from this.
+ * dataset() - identifies what dataset this is in: DATASET_RELEASE_CHANNEL_OPTOUT or ...OPTIN
+ */
+ [implicit_jscontext]
+ jsval getKeyedHistogramById(in ACString id);
+
+ /**
+ * A flag indicating if Telemetry can record base data (FHR data). This is true if the
+ * FHR data reporting service or self-support are enabled.
+ *
+ * In the unlikely event that adding a new base probe is needed, please check the data
+ * collection wiki at https://wiki.mozilla.org/Firefox/Data_Collection and talk to the
+ * Telemetry team.
+ */
+ attribute boolean canRecordBase;
+
+ /**
+ * A flag indicating if Telemetry is allowed to record extended data. Returns false if
+ * the user hasn't opted into "extended Telemetry" on the Release channel, when the
+ * user has explicitly opted out of Telemetry on Nightly/Aurora/Beta or if manually
+ * set to false during tests.
+ *
+ * Set this to false in tests to disable gathering of extended telemetry statistics.
+ */
+ attribute boolean canRecordExtended;
+
+ /**
+ * A flag indicating whether Telemetry can submit official results (for base or extended
+ * data). This is true on official, non-debug builds with built in support for Mozilla
+ * Telemetry reporting.
+ */
+ readonly attribute boolean isOfficialTelemetry;
+
+ /** Addon telemetry hooks */
+
+ /**
+ * Register a histogram for an addon. Throws an error if the
+ * histogram name has been registered previously.
+ *
+ * @param addon_id - Unique ID of the addon
+ * @param name - Unique histogram name
+ * @param histogram_type - HISTOGRAM_EXPONENTIAL, HISTOGRAM_LINEAR,
+ * HISTOGRAM_BOOLEAN or HISTOGRAM_COUNT
+ * @param min - Minimal bucket size
+ * @param max - Maximum bucket size
+ * @param bucket_count - number of buckets in the histogram
+ */
+ [optional_argc]
+ void registerAddonHistogram(in ACString addon_id, in ACString name,
+ in unsigned long histogram_type,
+ [optional] in uint32_t min,
+ [optional] in uint32_t max,
+ [optional] in uint32_t bucket_count);
+
+ /**
+ * Return a histogram previously registered via
+ * registerAddonHistogram. Throws an error if the id/name combo has
+ * not been registered via registerAddonHistogram.
+ *
+ * @param addon_id - Unique ID of the addon
+ * @param name - Registered histogram name
+ *
+ */
+ [implicit_jscontext]
+ jsval getAddonHistogram(in ACString addon_id, in ACString name);
+
+ /**
+ * Delete all histograms associated with the given addon id.
+ *
+ * @param addon_id - Unique ID of the addon
+ */
+ void unregisterAddonHistograms(in ACString addon_id);
+
+ /**
+ * Enable/disable recording for this histogram at runtime.
+ * Recording is enabled by default, unless listed at kRecordingInitiallyDisabledIDs[].
+ * Name must be a valid Histogram identifier, otherwise an assertion will be triggered.
+ *
+ * @param id - unique identifier from histograms.json
+ * @param enabled - whether or not to enable recording from now on.
+ */
+ void setHistogramRecordingEnabled(in ACString id, in boolean enabled);
+
+ /**
+ * An object containing a snapshot from all of the currently
+ * registered addon histograms.
+ * { addon-id1 : data1, ... }
+ *
+ * where data is an object whose properties are the names of the
+ * addon's histograms and whose corresponding values are as in
+ * histogramSnapshots.
+ */
+ [implicit_jscontext]
+ readonly attribute jsval addonHistogramSnapshots;
+
+ /**
+ * Read data from the previous run. After the callback is called, the last
+ * shutdown time is available in lastShutdownDuration and any late
+ * writes in lateWrites.
+ */
+ void asyncFetchTelemetryData(in nsIFetchTelemetryDataCallback aCallback);
+
+ /**
+ * Get statistics of file IO reports, null, if not recorded.
+ *
+ * The statistics are returned as an object whose propoerties are the names
+ * of the files that have been accessed and whose corresponding values are
+ * arrays of size three, representing startup, normal, and shutdown stages.
+ * Each stage's entry is either null or an array with the layout
+ * [total_time, #creates, #reads, #writes, #fsyncs, #stats]
+ */
+ [implicit_jscontext]
+ readonly attribute jsval fileIOReports;
+
+ /**
+ * Return the number of milliseconds since process start using monotonic
+ * timestamps (unaffected by system clock changes).
+ * @throws NS_ERROR_NOT_AVAILABLE if TimeStamp doesn't have the data.
+ */
+ double msSinceProcessStart();
+
+ /**
+ * Adds the value to the given scalar.
+ *
+ * @param aName The scalar name.
+ * @param aValue The numeric value to add to the scalar. Only unsigned integers supported.
+ */
+ [implicit_jscontext]
+ void scalarAdd(in ACString aName, in jsval aValue);
+
+ /**
+ * Sets the scalar to the given value.
+ *
+ * @param aName The scalar name.
+ * @param aValue The value to set the scalar to. If the type of aValue doesn't match the
+ * type of the scalar, the function will fail. For scalar string types, the this
+ * is truncated to 50 characters.
+ */
+ [implicit_jscontext]
+ void scalarSet(in ACString aName, in jsval aValue);
+
+ /**
+ * Sets the scalar to the maximum of the current and the passed value.
+ *
+ * @param aName The scalar name.
+ * @param aValue The numeric value to set the scalar to. Only unsigned integers supported.
+ */
+ [implicit_jscontext]
+ void scalarSetMaximum(in ACString aName, in jsval aValue);
+
+ /**
+ * Serializes the scalars from the given dataset to a JSON-style object and resets them.
+ * The returned structure looks like:
+ * { "group1.probe": 1, "group1.other_probe": false, ... }
+ *
+ * @param aDataset DATASET_RELEASE_CHANNEL_OPTOUT or DATASET_RELEASE_CHANNEL_OPTIN.
+ * @param [aClear=false] Whether to clear out the scalars after snapshotting.
+ */
+ [implicit_jscontext, optional_argc]
+ jsval snapshotScalars(in uint32_t aDataset, [optional] in boolean aClear);
+
+ /**
+ * Adds the value to the given keyed scalar.
+ *
+ * @param aName The scalar name.
+ * @param aKey The key name.
+ * @param aValue The numeric value to add to the scalar. Only unsigned integers supported.
+ */
+ [implicit_jscontext]
+ void keyedScalarAdd(in ACString aName, in AString aKey, in jsval aValue);
+
+ /**
+ * Sets the keyed scalar to the given value.
+ *
+ * @param aName The scalar name.
+ * @param aKey The key name.
+ * @param aValue The value to set the scalar to. If the type of aValue doesn't match the
+ * type of the scalar, the function will fail.
+ */
+ [implicit_jscontext]
+ void keyedScalarSet(in ACString aName, in AString aKey, in jsval aValue);
+
+ /**
+ * Sets the keyed scalar to the maximum of the current and the passed value.
+ *
+ * @param aName The scalar name.
+ * @param aKey The key name.
+ * @param aValue The numeric value to set the scalar to. Only unsigned integers supported.
+ */
+ [implicit_jscontext]
+ void keyedScalarSetMaximum(in ACString aName, in AString aKey, in jsval aValue);
+
+ /**
+ * Serializes the keyed scalars from the given dataset to a JSON-style object and
+ * resets them.
+ * The returned structure looks like:
+ * { "group1.probe": { "key_1": 2, "key_2": 1, ... }, ... }
+ *
+ * @param aDataset DATASET_RELEASE_CHANNEL_OPTOUT or DATASET_RELEASE_CHANNEL_OPTIN.
+ * @param [aClear=false] Whether to clear out the scalars after snapshotting.
+ */
+ [implicit_jscontext, optional_argc]
+ jsval snapshotKeyedScalars(in uint32_t aDataset, [optional] in boolean aClear);
+
+ /**
+ * Resets all the stored scalars. This is intended to be only used in tests.
+ */
+ void clearScalars();
+
+ /**
+ * Immediately sends any Telemetry batched on this process to the parent
+ * process. This is intended only to be used on process shutdown.
+ */
+ void flushBatchedChildTelemetry();
+
+ /**
+ * Record an event in Telemetry.
+ *
+ * @param aCategory The category name.
+ * @param aMethod The method name.
+ * @param aMethod The object name.
+ * @param aValue An optional string value to record.
+ * @param aExtra An optional object of the form (string -> string).
+ * It should only contain registered extra keys.
+ *
+ * @throws NS_ERROR_INVALID_ARG When trying to record an unknown event.
+ */
+ [implicit_jscontext, optional_argc]
+ void recordEvent(in ACString aCategory, in ACString aMethod, in ACString aObject, [optional] in jsval aValue, [optional] in jsval extra);
+
+ /**
+ * Serializes the recorded events to a JSON-appropriate array and optionally resets them.
+ * The returned structure looks like this:
+ * [
+ * // [timestamp, category, method, object, stringValue, extraValues]
+ * [43245, "category1", "method1", "object1", "string value", null],
+ * [43258, "category1", "method2", "object1", null, {"key1": "string value"}],
+ * ...
+ * ]
+ *
+ * @param aDataset DATASET_RELEASE_CHANNEL_OPTOUT or DATASET_RELEASE_CHANNEL_OPTIN.
+ * @param [aClear=false] Whether to clear out the scalars after snapshotting.
+ */
+ [implicit_jscontext, optional_argc]
+ jsval snapshotBuiltinEvents(in uint32_t aDataset, [optional] in boolean aClear);
+
+ /**
+ * Resets all the stored events. This is intended to be only used in tests.
+ */
+ void clearEvents();
+};
diff --git a/toolkit/components/telemetry/parse_events.py b/toolkit/components/telemetry/parse_events.py
new file mode 100644
index 000000000..b31e9bc04
--- /dev/null
+++ b/toolkit/components/telemetry/parse_events.py
@@ -0,0 +1,271 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import re
+import yaml
+import itertools
+import datetime
+import string
+from shared_telemetry_utils import add_expiration_postfix
+
+MAX_CATEGORY_NAME_LENGTH = 30
+MAX_METHOD_NAME_LENGTH = 20
+MAX_OBJECT_NAME_LENGTH = 20
+MAX_EXTRA_KEYS_COUNT = 10
+MAX_EXTRA_KEY_NAME_LENGTH = 15
+
+IDENTIFIER_PATTERN = r'^[a-zA-Z][a-zA-Z0-9_.]+[a-zA-Z0-9]$'
+DATE_PATTERN = r'^[0-9]{4}-[0-9]{2}-[0-9]{2}$'
+
+def nice_type_name(t):
+ if isinstance(t, basestring):
+ return "string"
+ return t.__name__
+
+def convert_to_cpp_identifier(s, sep):
+ return string.capwords(s, sep).replace(sep, "")
+
+class OneOf:
+ """This is a placeholder type for the TypeChecker below.
+ It signals that the checked value should match one of the following arguments
+ passed to the TypeChecker constructor.
+ """
+ pass
+
+class TypeChecker:
+ """This implements a convenience type TypeChecker to make the validation code more readable."""
+ def __init__(self, kind, *args):
+ """This takes 1-3 arguments, specifying the value type to check for.
+ It supports:
+ - atomic values, e.g.: TypeChecker(int)
+ - list values, e.g.: TypeChecker(list, basestring)
+ - dict values, e.g.: TypeChecker(dict, basestring, int)
+ - atomic values that can have different types, e.g.: TypeChecker(OneOf, int, date)"""
+ self._kind = kind
+ self._args = args
+
+ def check(self, key, value):
+ # Check fields that can be one of two different types.
+ if self._kind is OneOf:
+ if not isinstance(value, self._args[0]) and not isinstance(value, self._args[1]):
+ raise ValueError, "failed type check for %s - expected %s or %s, got %s" %\
+ (key,
+ nice_type_name(self._args[0]),
+ nice_type_name(self._args[1]),
+ nice_type_name(type(value)))
+ return
+
+ # Check basic type of value.
+ if not isinstance(value, self._kind):
+ raise ValueError, "failed type check for %s - expected %s, got %s" %\
+ (key,
+ nice_type_name(self._kind),
+ nice_type_name(type(value)))
+
+ # Check types of values in lists.
+ if self._kind is list:
+ if len(value) < 1:
+ raise ValueError, "failed check for %s - list should not be empty" % key
+ for x in value:
+ if not isinstance(x, self._args[0]):
+ raise ValueError, "failed type check for %s - expected list value type %s, got %s" %\
+ (key,
+ nice_type_name(self._args[0]),
+ nice_type_name(type(x)))
+ # Check types of keys and values in dictionaries.
+ elif self._kind is dict:
+ if len(value.keys()) < 1:
+ raise ValueError, "failed check for %s - dict should not be empty" % key
+ for x in value.iterkeys():
+ if not isinstance(x, self._args[0]):
+ raise ValueError, "failed dict type check for %s - expected key type %s, got %s" %\
+ (key,
+ nice_type_name(self._args[0]),
+ nice_type_name(type(x)))
+ for k,v in value.iteritems():
+ if not isinstance(x, self._args[1]):
+ raise ValueError, "failed dict type check for %s - expected value type %s for key %s, got %s" %\
+ (key,
+ nice_type_name(self._args[1]),
+ k,
+ nice_type_name(type(x)))
+
+def type_check_event_fields(category, definition):
+ """Perform a type/schema check on the event definition."""
+ REQUIRED_FIELDS = {
+ 'methods': TypeChecker(list, basestring),
+ 'objects': TypeChecker(list, basestring),
+ 'bug_numbers': TypeChecker(list, int),
+ 'notification_emails': TypeChecker(list, basestring),
+ 'description': TypeChecker(basestring),
+ }
+ OPTIONAL_FIELDS = {
+ 'release_channel_collection': TypeChecker(basestring),
+ 'expiry_date': TypeChecker(OneOf, basestring, datetime.date),
+ 'expiry_version': TypeChecker(basestring),
+ 'extra_keys': TypeChecker(dict, basestring, basestring),
+ }
+ ALL_FIELDS = REQUIRED_FIELDS.copy()
+ ALL_FIELDS.update(OPTIONAL_FIELDS)
+
+ # Check that all the required fields are available.
+ missing_fields = [f for f in REQUIRED_FIELDS.keys() if f not in definition]
+ if len(missing_fields) > 0:
+ raise KeyError(category + ' - missing required fields: ' + ', '.join(missing_fields))
+
+ # Is there any unknown field?
+ unknown_fields = [f for f in definition.keys() if f not in ALL_FIELDS]
+ if len(unknown_fields) > 0:
+ raise KeyError(category + ' - unknown fields: ' + ', '.join(unknown_fields))
+
+ # Type-check fields.
+ for k,v in definition.iteritems():
+ ALL_FIELDS[k].check(k, v)
+
+def string_check(category, field_name, value, min_length, max_length, regex=None):
+ # Length check.
+ if len(value) > max_length:
+ raise ValueError("Value '%s' for %s in %s exceeds maximum length of %d" %\
+ (value, field_name, category, max_length))
+ # Regex check.
+ if regex and not re.match(regex, value):
+ raise ValueError, 'String value for %s in %s is not matching pattern "%s": %s' % \
+ (field_name, category, regex, value)
+
+class EventData:
+ """A class representing one event."""
+
+ def __init__(self, category, definition):
+ type_check_event_fields(category, definition)
+
+ string_check(category, 'methods', definition.get('methods')[0], 1, MAX_METHOD_NAME_LENGTH, regex=IDENTIFIER_PATTERN)
+ string_check(category, 'objects', definition.get('objects')[0], 1, MAX_OBJECT_NAME_LENGTH, regex=IDENTIFIER_PATTERN)
+
+ # Check release_channel_collection
+ rcc_key = 'release_channel_collection'
+ rcc = definition.get(rcc_key, 'opt-in')
+ allowed_rcc = ["opt-in", "opt-out"]
+ if not rcc in allowed_rcc:
+ raise ValueError, "Value for %s in %s should be one of: %s" %\
+ (rcc_key, category, ", ".join(allowed_rcc))
+
+ # Check extra_keys.
+ extra_keys = definition.get('extra_keys', {})
+ if len(extra_keys.keys()) > MAX_EXTRA_KEYS_COUNT:
+ raise ValueError, "Number of extra_keys in %s exceeds limit %d" %\
+ (category, MAX_EXTRA_KEYS_COUNT)
+ for key in extra_keys.iterkeys():
+ string_check(category, 'extra_keys', key, 1, MAX_EXTRA_KEY_NAME_LENGTH, regex=IDENTIFIER_PATTERN)
+
+ # Check expiry.
+ if not 'expiry_version' in definition and not 'expiry_date' in definition:
+ raise KeyError, "Event in %s is missing an expiration - either expiry_version or expiry_date is required" %\
+ (category)
+ expiry_date = definition.get('expiry_date')
+ if expiry_date and isinstance(expiry_date, basestring) and expiry_date != 'never':
+ if not re.match(DATE_PATTERN, expiry_date):
+ raise ValueError, "Event in %s has invalid expiry_date, it should be either 'never' or match this format: %s" %\
+ (category, DATE_PATTERN)
+ # Parse into date.
+ definition['expiry_date'] = datetime.datetime.strptime(expiry_date, '%Y-%m-%d')
+
+ # Finish setup.
+ self._category = category
+ self._definition = definition
+ definition['expiry_version'] = add_expiration_postfix(definition.get('expiry_version', 'never'))
+
+ @property
+ def category(self):
+ return self._category
+
+ @property
+ def category_cpp(self):
+ # Transform e.g. category.example into CategoryExample.
+ return convert_to_cpp_identifier(self._category, ".")
+
+ @property
+ def methods(self):
+ return self._definition.get('methods')
+
+ @property
+ def objects(self):
+ return self._definition.get('objects')
+
+ @property
+ def expiry_version(self):
+ return self._definition.get('expiry_version')
+
+ @property
+ def expiry_day(self):
+ date = self._definition.get('expiry_date')
+ if not date:
+ return 0
+ if isinstance(date, basestring) and date == 'never':
+ return 0
+
+ # Convert date to days since UNIX epoch.
+ epoch = datetime.date(1970, 1, 1)
+ days = (date - epoch).total_seconds() / (24 * 60 * 60)
+ return round(days)
+
+ @property
+ def cpp_guard(self):
+ return self._definition.get('cpp_guard')
+
+ @property
+ def enum_labels(self):
+ def enum(method_name, object_name):
+ m = convert_to_cpp_identifier(method_name, "_")
+ o = convert_to_cpp_identifier(object_name, "_")
+ return m + '_' + o
+ combinations = itertools.product(self.methods, self.objects)
+ return [enum(t[0], t[1]) for t in combinations]
+
+ @property
+ def dataset(self):
+ """Get the nsITelemetry constant equivalent for release_channel_collection.
+ """
+ rcc = self._definition.get('release_channel_collection', 'opt-in')
+ if rcc == 'opt-out':
+ return 'nsITelemetry::DATASET_RELEASE_CHANNEL_OPTOUT'
+ else:
+ return 'nsITelemetry::DATASET_RELEASE_CHANNEL_OPTIN'
+
+ @property
+ def extra_keys(self):
+ return self._definition.get('extra_keys', {}).keys()
+
+def load_events(filename):
+ """Parses a YAML file containing the event definitions.
+
+ :param filename: the YAML file containing the event definitions.
+ :raises Exception: if the event file cannot be opened or parsed.
+ """
+
+ # Parse the event definitions from the YAML file.
+ events = None
+ try:
+ with open(filename, 'r') as f:
+ events = yaml.safe_load(f)
+ except IOError, e:
+ raise Exception('Error opening ' + filename + ': ' + e.message)
+ except ValueError, e:
+ raise Exception('Error parsing events in ' + filename + ': ' + e.message)
+
+ event_list = []
+
+ # Events are defined in a fixed two-level hierarchy within the definition file.
+ # The first level contains the category (group name), while the second level contains the
+ # event definitions (e.g. "category.name: [<event definition>, ...], ...").
+ for category_name,category in events.iteritems():
+ string_check('', 'category', category_name, 1, MAX_CATEGORY_NAME_LENGTH, regex=IDENTIFIER_PATTERN)
+
+ # Make sure that the category has at least one entry in it.
+ if not category or len(category) == 0:
+ raise ValueError(category_name + ' must contain at least one entry')
+
+ for entry in category:
+ event_list.append(EventData(category_name, entry))
+
+ return event_list
diff --git a/toolkit/components/telemetry/parse_scalars.py b/toolkit/components/telemetry/parse_scalars.py
new file mode 100644
index 000000000..a560a3013
--- /dev/null
+++ b/toolkit/components/telemetry/parse_scalars.py
@@ -0,0 +1,262 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import re
+import yaml
+from shared_telemetry_utils import add_expiration_postfix
+
+# The map of containing the allowed scalar types and their mapping to
+# nsITelemetry::SCALAR_* type constants.
+SCALAR_TYPES_MAP = {
+ 'uint': 'nsITelemetry::SCALAR_COUNT',
+ 'string': 'nsITelemetry::SCALAR_STRING',
+ 'boolean': 'nsITelemetry::SCALAR_BOOLEAN'
+}
+
+class ScalarType:
+ """A class for representing a scalar definition."""
+
+ def __init__(self, group_name, probe_name, definition):
+ # Validate and set the name, so we don't need to pass it to the other
+ # validation functions.
+ self.validate_names(group_name, probe_name)
+ self._name = probe_name
+ self._group_name = group_name
+
+ # Validating the scalar definition.
+ self.validate_types(definition)
+ self.validate_values(definition)
+
+ # Everything is ok, set the rest of the data.
+ self._definition = definition
+ definition['expires'] = add_expiration_postfix(definition['expires'])
+
+ def validate_names(self, group_name, probe_name):
+ """Validate the group and probe name:
+ - Group name must be alpha-numeric + '.', no leading/trailing digit or '.'.
+ - Probe name must be alpha-numeric + '_', no leading/trailing digit or '_'.
+
+ :param group_name: the name of the group the probe is in.
+ :param probe_name: the name of the scalar probe.
+ :raises ValueError: if the length of the names exceeds the limit or they don't
+ conform our name specification.
+ """
+
+ # Enforce a maximum length on group and probe names.
+ MAX_NAME_LENGTH = 40
+ for n in [group_name, probe_name]:
+ if len(n) > MAX_NAME_LENGTH:
+ raise ValueError("Name '{}' exceeds maximum name length of {} characters."\
+ .format(n, MAX_NAME_LENGTH))
+
+ def check_name(name, error_msg_prefix, allowed_char_regexp):
+ # Check if we only have the allowed characters.
+ chars_regxp = r'^[a-zA-Z0-9' + allowed_char_regexp + r']+$'
+ if not re.search(chars_regxp, name):
+ raise ValueError(error_msg_prefix + " name must be alpha-numeric. Got: '{}'".format(name))
+
+ # Don't allow leading/trailing digits, '.' or '_'.
+ if re.search(r'(^[\d\._])|([\d\._])$', name):
+ raise ValueError(error_msg_prefix +
+ " name must not have a leading/trailing digit, a dot or underscore. Got: '{}'"\
+ .format(name))
+
+ check_name(group_name, 'Group', r'\.')
+ check_name(probe_name, 'Probe', r'_')
+
+ def validate_types(self, definition):
+ """This function performs some basic sanity checks on the scalar definition:
+ - Checks that all the required fields are available.
+ - Checks that all the fields have the expected types.
+
+ :param definition: the dictionary containing the scalar properties.
+ :raises TypeError: if a scalar definition field is of the wrong type.
+ :raise KeyError: if a required field is missing or unknown fields are present.
+ """
+
+ # The required and optional fields in a scalar type definition.
+ REQUIRED_FIELDS = {
+ 'bug_numbers': list, # This contains ints. See LIST_FIELDS_CONTENT.
+ 'description': basestring,
+ 'expires': basestring,
+ 'kind': basestring,
+ 'notification_emails': list # This contains strings. See LIST_FIELDS_CONTENT.
+ }
+
+ OPTIONAL_FIELDS = {
+ 'cpp_guard': basestring,
+ 'release_channel_collection': basestring,
+ 'keyed': bool
+ }
+
+ # The types for the data within the fields that hold lists.
+ LIST_FIELDS_CONTENT = {
+ 'bug_numbers': int,
+ 'notification_emails': basestring
+ }
+
+ # Concatenate the required and optional field definitions.
+ ALL_FIELDS = REQUIRED_FIELDS.copy()
+ ALL_FIELDS.update(OPTIONAL_FIELDS)
+
+ # Checks that all the required fields are available.
+ missing_fields = [f for f in REQUIRED_FIELDS.keys() if f not in definition]
+ if len(missing_fields) > 0:
+ raise KeyError(self._name + ' - missing required fields: ' + ', '.join(missing_fields))
+
+ # Do we have any unknown field?
+ unknown_fields = [f for f in definition.keys() if f not in ALL_FIELDS]
+ if len(unknown_fields) > 0:
+ raise KeyError(self._name + ' - unknown fields: ' + ', '.join(unknown_fields))
+
+ # Checks the type for all the fields.
+ wrong_type_names = ['{} must be {}'.format(f, ALL_FIELDS[f].__name__) \
+ for f in definition.keys() if not isinstance(definition[f], ALL_FIELDS[f])]
+ if len(wrong_type_names) > 0:
+ raise TypeError(self._name + ' - ' + ', '.join(wrong_type_names))
+
+ # Check that the lists are not empty and that data in the lists
+ # have the correct types.
+ list_fields = [f for f in definition if isinstance(definition[f], list)]
+ for field in list_fields:
+ # Check for empty lists.
+ if len(definition[field]) == 0:
+ raise TypeError("Field '{}' for probe '{}' must not be empty."
+ .format(field, self._name))
+ # Check the type of the list content.
+ broken_types =\
+ [not isinstance(v, LIST_FIELDS_CONTENT[field]) for v in definition[field]]
+ if any(broken_types):
+ raise TypeError("Field '{}' for probe '{}' must only contain values of type {}"
+ .format(field, self._name, LIST_FIELDS_CONTENT[field].__name__))
+
+ def validate_values(self, definition):
+ """This function checks that the fields have the correct values.
+
+ :param definition: the dictionary containing the scalar properties.
+ :raises ValueError: if a scalar definition field contains an unexpected value.
+ """
+
+ # Validate the scalar kind.
+ scalar_kind = definition.get('kind')
+ if scalar_kind not in SCALAR_TYPES_MAP.keys():
+ raise ValueError(self._name + ' - unknown scalar kind: ' + scalar_kind)
+
+ # Validate the collection policy.
+ collection_policy = definition.get('release_channel_collection', None)
+ if collection_policy and collection_policy not in ['opt-in', 'opt-out']:
+ raise ValueError(self._name + ' - unknown collection policy: ' + collection_policy)
+
+ # Validate the cpp_guard.
+ cpp_guard = definition.get('cpp_guard')
+ if cpp_guard and re.match(r'\W', cpp_guard):
+ raise ValueError(self._name + ' - invalid cpp_guard: ' + cpp_guard)
+
+ @property
+ def name(self):
+ """Get the scalar name"""
+ return self._name
+
+ @property
+ def label(self):
+ """Get the scalar label generated from the scalar and group names."""
+ return self._group_name + '.' + self._name
+
+ @property
+ def enum_label(self):
+ """Get the enum label generated from the scalar and group names. This is used to
+ generate the enum tables."""
+
+ # The scalar name can contain informations about its hierarchy (e.g. 'a.b.scalar').
+ # We can't have dots in C++ enums, replace them with an underscore. Also, make the
+ # label upper case for consistency with the histogram enums.
+ return self.label.replace('.', '_').upper()
+
+ @property
+ def bug_numbers(self):
+ """Get the list of related bug numbers"""
+ return self._definition['bug_numbers']
+
+ @property
+ def description(self):
+ """Get the scalar description"""
+ return self._definition['description']
+
+ @property
+ def expires(self):
+ """Get the scalar expiration"""
+ return self._definition['expires']
+
+ @property
+ def kind(self):
+ """Get the scalar kind"""
+ return self._definition['kind']
+
+ @property
+ def keyed(self):
+ """Boolean indicating whether this is a keyed scalar"""
+ return self._definition.get('keyed', False)
+
+ @property
+ def nsITelemetry_kind(self):
+ """Get the scalar kind constant defined in nsITelemetry"""
+ return SCALAR_TYPES_MAP.get(self.kind)
+
+ @property
+ def notification_emails(self):
+ """Get the list of notification emails"""
+ return self._definition['notification_emails']
+
+ @property
+ def dataset(self):
+ """Get the nsITelemetry constant equivalent to the chose release channel collection
+ policy for the scalar.
+ """
+ # The collection policy is optional, but we still define a default
+ # behaviour for it.
+ release_channel_collection = \
+ self._definition.get('release_channel_collection', 'opt-in')
+ return 'nsITelemetry::' + ('DATASET_RELEASE_CHANNEL_OPTOUT' \
+ if release_channel_collection == 'opt-out' else 'DATASET_RELEASE_CHANNEL_OPTIN')
+
+ @property
+ def cpp_guard(self):
+ """Get the cpp guard for this scalar"""
+ return self._definition.get('cpp_guard')
+
+def load_scalars(filename):
+ """Parses a YAML file containing the scalar definition.
+
+ :param filename: the YAML file containing the scalars definition.
+ :raises Exception: if the scalar file cannot be opened or parsed.
+ """
+
+ # Parse the scalar definitions from the YAML file.
+ scalars = None
+ try:
+ with open(filename, 'r') as f:
+ scalars = yaml.safe_load(f)
+ except IOError, e:
+ raise Exception('Error opening ' + filename + ': ' + e.message)
+ except ValueError, e:
+ raise Exception('Error parsing scalars in ' + filename + ': ' + e.message)
+
+ scalar_list = []
+
+ # Scalars are defined in a fixed two-level hierarchy within the definition file.
+ # The first level contains the group name, while the second level contains the
+ # probe name (e.g. "group.name: probe: ...").
+ for group_name in scalars:
+ group = scalars[group_name]
+
+ # Make sure that the group has at least one probe in it.
+ if not group or len(group) == 0:
+ raise ValueError(group_name + ' must have at least a probe in it')
+
+ for probe_name in group:
+ # We found a scalar type. Go ahead and parse it.
+ scalar_info = group[probe_name]
+ scalar_list.append(ScalarType(group_name, probe_name, scalar_info))
+
+ return scalar_list
diff --git a/toolkit/components/telemetry/schemas/core.schema.json b/toolkit/components/telemetry/schemas/core.schema.json
new file mode 100644
index 000000000..327cdc298
--- /dev/null
+++ b/toolkit/components/telemetry/schemas/core.schema.json
@@ -0,0 +1,41 @@
+{
+ "$schema" : "http://json-schema.org/draft-04/schema#",
+ "type" : "object",
+ "name" : "core",
+ "properties" : {
+ "arch" : {
+ "type" : "string"
+ },
+ "clientId" : {
+ "type" : "string",
+ "pattern": "^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$"
+ },
+ "device" : {
+ "type" : "string"
+ },
+ "experiments" : {
+ "type" : "array",
+ "items" : {
+ "type" : "string"
+ }
+ },
+ "locale" : {
+ "type" : "string"
+ },
+ "os" : {
+ "type" : "string"
+ },
+ "osversion" : {
+ "type" : "string"
+ },
+ "seq" : {
+ "type" : "integer",
+ "minimum": 0
+ },
+ "v" : {
+ "type" : "integer",
+ "enum" : [ 1 ]
+ }
+ },
+ "required" : ["arch", "clientId", "device", "locale", "os", "osversion", "seq", "v"]
+}
diff --git a/toolkit/components/telemetry/shared_telemetry_utils.py b/toolkit/components/telemetry/shared_telemetry_utils.py
new file mode 100644
index 000000000..740c27e34
--- /dev/null
+++ b/toolkit/components/telemetry/shared_telemetry_utils.py
@@ -0,0 +1,103 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# This file contains utility functions shared by the scalars and the histogram generation
+# scripts.
+
+from __future__ import print_function
+
+import re
+
+class StringTable:
+ """Manages a string table and allows C style serialization to a file."""
+
+ def __init__(self):
+ self.current_index = 0;
+ self.table = {}
+
+ def c_strlen(self, string):
+ """The length of a string including the null terminating character.
+ :param string: the input string.
+ """
+ return len(string) + 1
+
+ def stringIndex(self, string):
+ """Returns the index in the table of the provided string. Adds the string to
+ the table if it's not there.
+ :param string: the input string.
+ """
+ if string in self.table:
+ return self.table[string]
+ else:
+ result = self.current_index
+ self.table[string] = result
+ self.current_index += self.c_strlen(string)
+ return result
+
+ def stringIndexes(self, strings):
+ """ Returns a list of indexes for the provided list of strings.
+ Adds the strings to the table if they are not in it yet.
+ :param strings: list of strings to put into the table.
+ """
+ return [self.stringIndex(s) for s in strings]
+
+ def writeDefinition(self, f, name):
+ """Writes the string table to a file as a C const char array.
+
+ This writes out the string table as one single C char array for memory
+ size reasons, separating the individual strings with '\0' characters.
+ This way we can index directly into the string array and avoid the additional
+ storage costs for the pointers to them (and potential extra relocations for those).
+
+ :param f: the output stream.
+ :param name: the name of the output array.
+ """
+ entries = self.table.items()
+ entries.sort(key=lambda x:x[1])
+
+ # Avoid null-in-string warnings with GCC and potentially
+ # overlong string constants; write everything out the long way.
+ def explodeToCharArray(string):
+ def toCChar(s):
+ if s == "'":
+ return "'\\''"
+ else:
+ return "'%s'" % s
+ return ", ".join(map(toCChar, string))
+
+ f.write("const char %s[] = {\n" % name)
+ for (string, offset) in entries:
+ if "*/" in string:
+ raise ValueError, "String in string table contains unexpected sequence '*/': %s" % string
+
+ e = explodeToCharArray(string)
+ if e:
+ f.write(" /* %5d - \"%s\" */ %s, '\\0',\n"
+ % (offset, string, explodeToCharArray(string)))
+ else:
+ f.write(" /* %5d - \"%s\" */ '\\0',\n" % (offset, string))
+ f.write("};\n\n")
+
+def static_assert(output, expression, message):
+ """Writes a C++ compile-time assertion expression to a file.
+ :param output: the output stream.
+ :param expression: the expression to check.
+ :param message: the string literal that will appear if the expression evaluates to
+ false.
+ """
+ print("static_assert(%s, \"%s\");" % (expression, message), file=output)
+
+def add_expiration_postfix(expiration):
+ """ Formats the expiration version and adds a version postfix if needed.
+
+ :param expiration: the expiration version string.
+ :return: the modified expiration string.
+ """
+ if re.match(r'^[1-9][0-9]*$', expiration):
+ return expiration + ".0a1"
+
+ if re.match(r'^[1-9][0-9]*\.0$', expiration):
+ return expiration + "a1"
+
+ return expiration
diff --git a/toolkit/components/telemetry/tests/addons/dictionary/install.rdf b/toolkit/components/telemetry/tests/addons/dictionary/install.rdf
new file mode 100644
index 000000000..ff0039b39
--- /dev/null
+++ b/toolkit/components/telemetry/tests/addons/dictionary/install.rdf
@@ -0,0 +1,25 @@
+<?xml version="1.0"?>
+
+<RDF xmlns="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:em="http://www.mozilla.org/2004/em-rdf#">
+
+ <Description about="urn:mozilla:install-manifest">
+ <em:id>telemetry-dictionary@tests.mozilla.org</em:id>
+ <em:version>1</em:version>
+ <em:type>64</em:type>
+
+ <em:targetApplication>
+ <Description>
+ <em:id>toolkit@mozilla.org</em:id>
+ <em:minVersion>0</em:minVersion>
+ <em:maxVersion>*</em:maxVersion>
+ </Description>
+ </em:targetApplication>
+
+ <!-- Front End MetaData -->
+ <em:name>Telemetry test dictionary</em:name>
+ <em:description>A nice dictionary to prevent all typos for Telemetry.</em:description>
+ <em:bootstrap>true</em:bootstrap>
+
+ </Description>
+</RDF>
diff --git a/toolkit/components/telemetry/tests/addons/experiment/install.rdf b/toolkit/components/telemetry/tests/addons/experiment/install.rdf
new file mode 100644
index 000000000..d12f06816
--- /dev/null
+++ b/toolkit/components/telemetry/tests/addons/experiment/install.rdf
@@ -0,0 +1,16 @@
+<?xml version="1.0"?>
+
+<RDF xmlns="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:em="http://www.mozilla.org/2004/em-rdf#">
+
+ <Description about="urn:mozilla:install-manifest">
+ <em:id>telemetry-experiment-1@tests.mozilla.org</em:id>
+ <em:version>1</em:version>
+ <em:type>128</em:type>
+
+ <!-- Front End MetaData -->
+ <em:name>Telemetry test experiment</em:name>
+ <em:description>Yet another experiment that experiments experimentally.</em:description>
+
+ </Description>
+</RDF>
diff --git a/toolkit/components/telemetry/tests/addons/extension-2/install.rdf b/toolkit/components/telemetry/tests/addons/extension-2/install.rdf
new file mode 100644
index 000000000..ddb5904f8
--- /dev/null
+++ b/toolkit/components/telemetry/tests/addons/extension-2/install.rdf
@@ -0,0 +1,16 @@
+<?xml version="1.0"?>
+
+<RDF xmlns="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:em="http://www.mozilla.org/2004/em-rdf#">
+
+ <Description about="urn:mozilla:install-manifest">
+ <em:id>telemetry-ext-2@tests.mozilla.org</em:id>
+ <em:version>2</em:version>
+ <em:type>2</em:type>
+
+ <!-- Front End MetaData -->
+ <em:name>Telemetry test extension 2</em:name>
+ <em:description>Yet another extension that extends twice.</em:description>
+
+ </Description>
+</RDF>
diff --git a/toolkit/components/telemetry/tests/addons/extension/install.rdf b/toolkit/components/telemetry/tests/addons/extension/install.rdf
new file mode 100644
index 000000000..4b1bd2da7
--- /dev/null
+++ b/toolkit/components/telemetry/tests/addons/extension/install.rdf
@@ -0,0 +1,16 @@
+<?xml version="1.0"?>
+
+<RDF xmlns="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:em="http://www.mozilla.org/2004/em-rdf#">
+
+ <Description about="urn:mozilla:install-manifest">
+ <em:id>telemetry-ext-1@tests.mozilla.org</em:id>
+ <em:version>1</em:version>
+ <em:type>2</em:type>
+
+ <!-- Front End MetaData -->
+ <em:name>Telemetry test extension</em:name>
+ <em:description>Yet another extension that extends.</em:description>
+
+ </Description>
+</RDF>
diff --git a/toolkit/components/telemetry/tests/addons/long-fields/install.rdf b/toolkit/components/telemetry/tests/addons/long-fields/install.rdf
new file mode 100644
index 000000000..23ca7523c
--- /dev/null
+++ b/toolkit/components/telemetry/tests/addons/long-fields/install.rdf
@@ -0,0 +1,24 @@
+<?xml version="1.0"?>
+
+<RDF xmlns="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:em="http://www.mozilla.org/2004/em-rdf#">
+
+ <Description about="urn:mozilla:install-manifest">
+ <em:id>tel-longfields-xpi@tests.mozilla.org</em:id>
+ <em:version>This is a really long addon version, that will get limited to 100 characters. We're much longer, we're at about 116.</em:version>
+
+ <em:targetApplication>
+ <Description>
+ <em:id>toolkit@mozilla.org</em:id>
+ <em:minVersion>0</em:minVersion>
+ <em:maxVersion>*</em:maxVersion>
+ </Description>
+ </em:targetApplication>
+
+ <!-- Front End MetaData -->
+ <em:name>This is a really long addon name, that will get limited to 100 characters. We're much longer, we're at about 219. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vivamus nullam sodales. Yeah, Latin placeholder.</em:name>
+ <em:description>This is a really long addon description, that will get limited to 100 characters. We're much longer, we're at about 200. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vivamus nullam sodales.</em:description>
+ <em:bootstrap>true</em:bootstrap>
+
+ </Description>
+</RDF>
diff --git a/toolkit/components/telemetry/tests/addons/restartless/install.rdf b/toolkit/components/telemetry/tests/addons/restartless/install.rdf
new file mode 100644
index 000000000..f6cda9f25
--- /dev/null
+++ b/toolkit/components/telemetry/tests/addons/restartless/install.rdf
@@ -0,0 +1,24 @@
+<?xml version="1.0"?>
+
+<RDF xmlns="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:em="http://www.mozilla.org/2004/em-rdf#">
+
+ <Description about="urn:mozilla:install-manifest">
+ <em:id>tel-restartless-xpi@tests.mozilla.org</em:id>
+ <em:version>1.0</em:version>
+
+ <em:targetApplication>
+ <Description>
+ <em:id>toolkit@mozilla.org</em:id>
+ <em:minVersion>0</em:minVersion>
+ <em:maxVersion>*</em:maxVersion>
+ </Description>
+ </em:targetApplication>
+
+ <!-- Front End MetaData -->
+ <em:name>XPI Telemetry Restartless Test</em:name>
+ <em:description>A restartless addon which gets enabled without a reboot.</em:description>
+ <em:bootstrap>true</em:bootstrap>
+
+ </Description>
+</RDF>
diff --git a/toolkit/components/telemetry/tests/addons/signed/META-INF/manifest.mf b/toolkit/components/telemetry/tests/addons/signed/META-INF/manifest.mf
new file mode 100644
index 000000000..e6e279dbc
--- /dev/null
+++ b/toolkit/components/telemetry/tests/addons/signed/META-INF/manifest.mf
@@ -0,0 +1,7 @@
+Manifest-Version: 1.0
+
+Name: install.rdf
+Digest-Algorithms: MD5 SHA1
+MD5-Digest: YEilRfaecTg2bMNPoYqexQ==
+SHA1-Digest: GEnQKM8Coyw83phx/z1oNh327+0=
+
diff --git a/toolkit/components/telemetry/tests/addons/signed/META-INF/mozilla.rsa b/toolkit/components/telemetry/tests/addons/signed/META-INF/mozilla.rsa
new file mode 100644
index 000000000..8e5a92650
--- /dev/null
+++ b/toolkit/components/telemetry/tests/addons/signed/META-INF/mozilla.rsa
Binary files differ
diff --git a/toolkit/components/telemetry/tests/addons/signed/META-INF/mozilla.sf b/toolkit/components/telemetry/tests/addons/signed/META-INF/mozilla.sf
new file mode 100644
index 000000000..21ce46081
--- /dev/null
+++ b/toolkit/components/telemetry/tests/addons/signed/META-INF/mozilla.sf
@@ -0,0 +1,4 @@
+Signature-Version: 1.0
+MD5-Digest-Manifest: Ko2bKTrwTXCdstWHWqCR4w==
+SHA1-Digest-Manifest: k6+jfNGFxXtDd1cSX0ZoIyQ1cww=
+
diff --git a/toolkit/components/telemetry/tests/addons/signed/install.rdf b/toolkit/components/telemetry/tests/addons/signed/install.rdf
new file mode 100644
index 000000000..5fdca172c
--- /dev/null
+++ b/toolkit/components/telemetry/tests/addons/signed/install.rdf
@@ -0,0 +1,24 @@
+<?xml version="1.0"?>
+
+<RDF xmlns="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:em="http://www.mozilla.org/2004/em-rdf#">
+
+ <Description about="urn:mozilla:install-manifest">
+ <em:id>tel-signed-xpi@tests.mozilla.org</em:id>
+ <em:version>1.0</em:version>
+
+ <em:targetApplication>
+ <Description>
+ <em:id>toolkit@mozilla.org</em:id>
+ <em:minVersion>0</em:minVersion>
+ <em:maxVersion>*</em:maxVersion>
+ </Description>
+ </em:targetApplication>
+
+ <!-- Front End MetaData -->
+ <em:name>XPI Telemetry Signed Test</em:name>
+ <em:description>A signed addon which gets enabled without a reboot.</em:description>
+ <em:bootstrap>true</em:bootstrap>
+
+ </Description>
+</RDF>
diff --git a/toolkit/components/telemetry/tests/addons/system/install.rdf b/toolkit/components/telemetry/tests/addons/system/install.rdf
new file mode 100644
index 000000000..12cb143a7
--- /dev/null
+++ b/toolkit/components/telemetry/tests/addons/system/install.rdf
@@ -0,0 +1,24 @@
+<?xml version="1.0"?>
+
+<RDF xmlns="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:em="http://www.mozilla.org/2004/em-rdf#">
+
+ <Description about="urn:mozilla:install-manifest">
+ <em:id>tel-system-xpi@tests.mozilla.org</em:id>
+ <em:version>1.0</em:version>
+
+ <em:targetApplication>
+ <Description>
+ <em:id>toolkit@mozilla.org</em:id>
+ <em:minVersion>0</em:minVersion>
+ <em:maxVersion>*</em:maxVersion>
+ </Description>
+ </em:targetApplication>
+
+ <!-- Front End MetaData -->
+ <em:name>XPI Telemetry System Add-on Test</em:name>
+ <em:description>A system addon which is shipped with Firefox.</em:description>
+ <em:bootstrap>true</em:bootstrap>
+
+ </Description>
+</RDF>
diff --git a/toolkit/components/telemetry/tests/addons/theme/install.rdf b/toolkit/components/telemetry/tests/addons/theme/install.rdf
new file mode 100644
index 000000000..a35249dba
--- /dev/null
+++ b/toolkit/components/telemetry/tests/addons/theme/install.rdf
@@ -0,0 +1,16 @@
+<?xml version="1.0"?>
+
+<RDF xmlns="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+ xmlns:em="http://www.mozilla.org/2004/em-rdf#">
+
+ <Description about="urn:mozilla:install-manifest">
+ <em:id>telemetry-theme@tests.mozilla.org</em:id>
+ <em:version>1</em:version>
+ <em:type>4</em:type>
+
+ <!-- Front End MetaData -->
+ <em:name>Telemetry test theme</em:name>
+ <em:description>A good looking test theme for Telemetry.</em:description>
+
+ </Description>
+</RDF>
diff --git a/toolkit/components/telemetry/tests/browser/browser.ini b/toolkit/components/telemetry/tests/browser/browser.ini
new file mode 100644
index 000000000..a1725d54d
--- /dev/null
+++ b/toolkit/components/telemetry/tests/browser/browser.ini
@@ -0,0 +1,5 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+[browser_TelemetryGC.js]
diff --git a/toolkit/components/telemetry/tests/browser/browser_TelemetryGC.js b/toolkit/components/telemetry/tests/browser/browser_TelemetryGC.js
new file mode 100644
index 000000000..262fd69ff
--- /dev/null
+++ b/toolkit/components/telemetry/tests/browser/browser_TelemetryGC.js
@@ -0,0 +1,193 @@
+"use strict";
+
+/*
+ *********************************************************************************
+ * *
+ * WARNING *
+ * *
+ * If you adjust any of the constants here (slice limit, number of keys, etc.) *
+ * make sure to update the JSON schema at: *
+ * https://github.com/mozilla-services/mozilla-pipeline-schemas/blob/master/ *
+ * telemetry/main.schema.json *
+ * *
+ * Otherwise, pings may be dropped by the telemetry backend! *
+ * *
+ ********************************************************************************/
+
+const {GCTelemetry} = Cu.import("resource://gre/modules/GCTelemetry.jsm", {});
+
+function check(entries) {
+ const FIELDS = ["random", "worst"];
+
+ // Check that all FIELDS are in |entries|.
+ for (let f of FIELDS) {
+ ok(f in entries, `${f} found in entries`);
+ }
+
+ // Check that only FIELDS are in |entries|.
+ for (let k of Object.keys(entries)) {
+ ok(FIELDS.includes(k), `${k} found in FIELDS`);
+ }
+
+ let foundGCs = 0;
+
+ for (let f of FIELDS) {
+ ok(Array.isArray(entries[f]), "have an array of GCs");
+
+ ok(entries[f].length <= 2, "not too many GCs");
+
+ for (let gc of entries[f]) {
+ ok(gc !== null, "GC is non-null");
+
+ foundGCs++;
+
+ ok(Object.keys(gc).length <= 25, "number of keys in GC is not too large");
+
+ // Sanity check the GC data.
+ ok("total_time" in gc, "total_time field present");
+ ok("max_pause" in gc, "max_pause field present");
+
+ ok("slices" in gc, "slices field present");
+ ok(Array.isArray(gc.slices), "slices is an array");
+ ok(gc.slices.length > 0, "slices array non-empty");
+ ok(gc.slices.length <= 4, "slices array is not too long");
+
+ ok("totals" in gc, "totals field present");
+ ok(typeof(gc.totals) == "object", "totals is an object");
+ ok(Object.keys(gc.totals).length <= 65, "totals array is not too long");
+
+ // Make sure we don't skip any big objects.
+ for (let key in gc) {
+ if (key != "slices" && key != "totals") {
+ ok(typeof(gc[key]) != "object", `${key} property should be primitive`);
+ }
+ }
+
+ let phases = new Set();
+
+ for (let slice of gc.slices) {
+ ok(Object.keys(slice).length <= 15, "slice is not too large");
+
+ ok("pause" in slice, "pause field present in slice");
+ ok("reason" in slice, "reason field present in slice");
+ ok("times" in slice, "times field present in slice");
+
+ // Make sure we don't skip any big objects.
+ for (let key in slice) {
+ if (key != "times") {
+ ok(typeof(slice[key]) != "object", `${key} property should be primitive`);
+ }
+ }
+
+ ok(Object.keys(slice.times).length <= 65, "no more than 65 phases");
+
+ for (let phase in slice.times) {
+ phases.add(phase);
+ ok(typeof(slice.times[phase]) == "number", `${phase} property should be a number`);
+ }
+ }
+
+ let totals = gc.totals;
+ // Make sure we don't skip any big objects.
+ for (let phase in totals) {
+ ok(typeof(totals[phase]) == "number", `${phase} property should be a number`);
+ }
+
+ for (let phase of phases) {
+ ok(phase in totals, `${phase} is in totals`);
+ }
+ }
+ }
+
+ ok(foundGCs > 0, "saw at least one GC");
+}
+
+add_task(function* test() {
+ let multiprocess = Services.appinfo.browserTabsRemoteAutostart;
+
+ // Set these prefs to ensure that we get measurements.
+ const prefs = {"set": [["javascript.options.mem.notify", true]]};
+ yield new Promise(resolve => SpecialPowers.pushPrefEnv(prefs, resolve));
+
+ function runRemote(f) {
+ gBrowser.selectedBrowser.messageManager.loadFrameScript(`data:,(${f})()`, false);
+ }
+
+ function initScript() {
+ const {GCTelemetry} = Components.utils.import("resource://gre/modules/GCTelemetry.jsm", {});
+
+ /*
+ * Don't shut down GC telemetry if it was already running before the test!
+ * Note: We need to use a multiline comment here since this code is turned into a data: URI.
+ */
+ let shutdown = GCTelemetry.init();
+
+ function listener() {
+ removeMessageListener("GCTelemTest:Shutdown", listener);
+ if (shutdown) {
+ GCTelemetry.shutdown();
+ }
+ }
+ addMessageListener("GCTelemTest:Shutdown", listener);
+ }
+
+ if (multiprocess) {
+ runRemote(initScript);
+ }
+
+ // Don't shut down GC telemetry if it was already running before the test!
+ let shutdown = GCTelemetry.init();
+ registerCleanupFunction(() => {
+ if (shutdown) {
+ GCTelemetry.shutdown();
+ }
+
+ gBrowser.selectedBrowser.messageManager.sendAsyncMessage("GCTelemTest:Shutdown");
+ });
+
+ let localPromise = new Promise(resolve => {
+ function obs() {
+ Services.obs.removeObserver(obs, "garbage-collection-statistics");
+ resolve();
+ }
+ Services.obs.addObserver(obs, "garbage-collection-statistics", false);
+ });
+
+ let remotePromise;
+ if (multiprocess) {
+ remotePromise = new Promise(resolve => {
+ function obs() {
+ Services.ppmm.removeMessageListener("Telemetry:GCStatistics", obs);
+ resolve();
+ }
+ Services.ppmm.addMessageListener("Telemetry:GCStatistics", obs);
+ });
+ } else {
+ remotePromise = Promise.resolve();
+ }
+
+ // Make sure we have a GC to work with in both processes.
+ Cu.forceGC();
+ if (multiprocess) {
+ runRemote(() => Components.utils.forceGC());
+ }
+
+ info("Waiting for GCs");
+
+ yield Promise.all([localPromise, remotePromise]);
+
+ let localEntries = GCTelemetry.entries("main", true);
+ let remoteEntries = multiprocess ? GCTelemetry.entries("content", true) : localEntries;
+
+ check(localEntries);
+ check(remoteEntries);
+
+ localEntries = GCTelemetry.entries("main", false);
+ remoteEntries = multiprocess ? GCTelemetry.entries("content", false) : localEntries;
+
+ is(localEntries.random.length, 0, "no random GCs after reset");
+ is(localEntries.worst.length, 0, "no worst GCs after reset");
+
+ is(remoteEntries.random.length, 0, "no random GCs after reset");
+ is(remoteEntries.worst.length, 0, "no worst GCs after reset");
+});
diff --git a/toolkit/components/telemetry/tests/search/chrome.manifest b/toolkit/components/telemetry/tests/search/chrome.manifest
new file mode 100644
index 000000000..ec412e050
--- /dev/null
+++ b/toolkit/components/telemetry/tests/search/chrome.manifest
@@ -0,0 +1,3 @@
+locale testsearchplugin ar jar:jar:searchTest.jar!/chrome/searchTest.jar!/
+content testsearchplugin ./
+
diff --git a/toolkit/components/telemetry/tests/search/searchTest.jar b/toolkit/components/telemetry/tests/search/searchTest.jar
new file mode 100644
index 000000000..b10fc0c3e
--- /dev/null
+++ b/toolkit/components/telemetry/tests/search/searchTest.jar
Binary files differ
diff --git a/toolkit/components/telemetry/tests/unit/.eslintrc.js b/toolkit/components/telemetry/tests/unit/.eslintrc.js
new file mode 100644
index 000000000..d35787cd2
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/.eslintrc.js
@@ -0,0 +1,7 @@
+"use strict";
+
+module.exports = {
+ "extends": [
+ "../../../../../testing/xpcshell/xpcshell.eslintrc.js"
+ ]
+};
diff --git a/toolkit/components/telemetry/tests/unit/TelemetryArchiveTesting.jsm b/toolkit/components/telemetry/tests/unit/TelemetryArchiveTesting.jsm
new file mode 100644
index 000000000..9be82c883
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/TelemetryArchiveTesting.jsm
@@ -0,0 +1,86 @@
+const {utils: Cu} = Components;
+Cu.import("resource://gre/modules/TelemetryArchive.jsm");
+Cu.import("resource://testing-common/Assert.jsm");
+Cu.import("resource://gre/modules/Task.jsm");
+Cu.import("resource://gre/modules/Services.jsm");
+Cu.import("resource://gre/modules/TelemetryController.jsm");
+
+this.EXPORTED_SYMBOLS = [
+ "TelemetryArchiveTesting",
+];
+
+function checkForProperties(ping, expected) {
+ for (let [props, val] of expected) {
+ let test = ping;
+ for (let prop of props) {
+ test = test[prop];
+ if (test === undefined) {
+ return false;
+ }
+ }
+ if (test !== val) {
+ return false;
+ }
+ }
+ return true;
+}
+
+/**
+ * A helper object that allows test code to check whether a telemetry ping
+ * was properly saved. To use, first initialize to collect the starting pings
+ * and then check for new ping data.
+ */
+function Checker() {
+}
+Checker.prototype = {
+ promiseInit: function() {
+ this._pingMap = new Map();
+ return TelemetryArchive.promiseArchivedPingList().then((plist) => {
+ for (let ping of plist) {
+ this._pingMap.set(ping.id, ping);
+ }
+ });
+ },
+
+ /**
+ * Find and return a new ping with certain properties.
+ *
+ * @param expected: an array of [['prop'...], 'value'] to check
+ * For example:
+ * [
+ * [['environment', 'build', 'applicationId'], '20150101010101'],
+ * [['version'], 1],
+ * [['metadata', 'OOMAllocationSize'], 123456789],
+ * ]
+ * @returns a matching ping if found, or null
+ */
+ promiseFindPing: Task.async(function*(type, expected) {
+ let candidates = [];
+ let plist = yield TelemetryArchive.promiseArchivedPingList();
+ for (let ping of plist) {
+ if (this._pingMap.has(ping.id)) {
+ continue;
+ }
+ if (ping.type == type) {
+ candidates.push(ping);
+ }
+ }
+
+ for (let candidate of candidates) {
+ let ping = yield TelemetryArchive.promiseArchivedPingById(candidate.id);
+ if (checkForProperties(ping, expected)) {
+ return ping;
+ }
+ }
+ return null;
+ }),
+};
+
+const TelemetryArchiveTesting = {
+ setup: function() {
+ Services.prefs.setCharPref("toolkit.telemetry.log.level", "Trace");
+ Services.prefs.setBoolPref("toolkit.telemetry.archive.enabled", true);
+ },
+
+ Checker: Checker,
+};
diff --git a/toolkit/components/telemetry/tests/unit/engine.xml b/toolkit/components/telemetry/tests/unit/engine.xml
new file mode 100644
index 000000000..2304fcdd7
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/engine.xml
@@ -0,0 +1,7 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<SearchPlugin xmlns="http://www.mozilla.org/2006/browser/search/">
+<ShortName>engine-telemetry</ShortName>
+<Url type="text/html" method="GET" template="http://www.example.com/search">
+ <Param name="q" value="{searchTerms}"/>
+</Url>
+</SearchPlugin>
diff --git a/toolkit/components/telemetry/tests/unit/head.js b/toolkit/components/telemetry/tests/unit/head.js
new file mode 100644
index 000000000..51be25766
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/head.js
@@ -0,0 +1,319 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+var { classes: Cc, utils: Cu, interfaces: Ci, results: Cr } = Components;
+
+Cu.import("resource://gre/modules/TelemetryController.jsm", this);
+Cu.import("resource://gre/modules/Services.jsm", this);
+Cu.import("resource://gre/modules/PromiseUtils.jsm", this);
+Cu.import("resource://gre/modules/Task.jsm", this);
+Cu.import("resource://gre/modules/FileUtils.jsm", this);
+Cu.import("resource://gre/modules/XPCOMUtils.jsm", this);
+Cu.import("resource://testing-common/httpd.js", this);
+Cu.import("resource://gre/modules/AppConstants.jsm");
+
+XPCOMUtils.defineLazyModuleGetter(this, "AddonTestUtils",
+ "resource://testing-common/AddonTestUtils.jsm");
+XPCOMUtils.defineLazyModuleGetter(this, "OS",
+ "resource://gre/modules/osfile.jsm");
+
+const gIsWindows = AppConstants.platform == "win";
+const gIsMac = AppConstants.platform == "macosx";
+const gIsAndroid = AppConstants.platform == "android";
+const gIsGonk = AppConstants.platform == "gonk";
+const gIsLinux = AppConstants.platform == "linux";
+
+const Telemetry = Cc["@mozilla.org/base/telemetry;1"].getService(Ci.nsITelemetry);
+
+const MILLISECONDS_PER_MINUTE = 60 * 1000;
+const MILLISECONDS_PER_HOUR = 60 * MILLISECONDS_PER_MINUTE;
+const MILLISECONDS_PER_DAY = 24 * MILLISECONDS_PER_HOUR;
+
+const PREF_TELEMETRY_ENABLED = "toolkit.telemetry.enabled";
+
+const UUID_REGEX = /^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$/i;
+
+var gGlobalScope = this;
+
+const PingServer = {
+ _httpServer: null,
+ _started: false,
+ _defers: [ PromiseUtils.defer() ],
+ _currentDeferred: 0,
+
+ get port() {
+ return this._httpServer.identity.primaryPort;
+ },
+
+ get started() {
+ return this._started;
+ },
+
+ registerPingHandler: function(handler) {
+ const wrapped = wrapWithExceptionHandler(handler);
+ this._httpServer.registerPrefixHandler("/submit/telemetry/", wrapped);
+ },
+
+ resetPingHandler: function() {
+ this.registerPingHandler((request, response) => {
+ let deferred = this._defers[this._defers.length - 1];
+ this._defers.push(PromiseUtils.defer());
+ deferred.resolve(request);
+ });
+ },
+
+ start: function() {
+ this._httpServer = new HttpServer();
+ this._httpServer.start(-1);
+ this._started = true;
+ this.clearRequests();
+ this.resetPingHandler();
+ },
+
+ stop: function() {
+ return new Promise(resolve => {
+ this._httpServer.stop(resolve);
+ this._started = false;
+ });
+ },
+
+ clearRequests: function() {
+ this._defers = [ PromiseUtils.defer() ];
+ this._currentDeferred = 0;
+ },
+
+ promiseNextRequest: function() {
+ const deferred = this._defers[this._currentDeferred++];
+ // Send the ping to the consumer on the next tick, so that the completion gets
+ // signaled to Telemetry.
+ return new Promise(r => Services.tm.currentThread.dispatch(() => r(deferred.promise),
+ Ci.nsIThread.DISPATCH_NORMAL));
+ },
+
+ promiseNextPing: function() {
+ return this.promiseNextRequest().then(request => decodeRequestPayload(request));
+ },
+
+ promiseNextRequests: Task.async(function*(count) {
+ let results = [];
+ for (let i=0; i<count; ++i) {
+ results.push(yield this.promiseNextRequest());
+ }
+
+ return results;
+ }),
+
+ promiseNextPings: function(count) {
+ return this.promiseNextRequests(count).then(requests => {
+ return Array.from(requests, decodeRequestPayload);
+ });
+ },
+};
+
+/**
+ * Decode the payload of an HTTP request into a ping.
+ * @param {Object} request The data representing an HTTP request (nsIHttpRequest).
+ * @return {Object} The decoded ping payload.
+ */
+function decodeRequestPayload(request) {
+ let s = request.bodyInputStream;
+ let payload = null;
+ let decoder = Cc["@mozilla.org/dom/json;1"].createInstance(Ci.nsIJSON)
+
+ if (request.getHeader("content-encoding") == "gzip") {
+ let observer = {
+ buffer: "",
+ onStreamComplete: function(loader, context, status, length, result) {
+ this.buffer = String.fromCharCode.apply(this, result);
+ }
+ };
+
+ let scs = Cc["@mozilla.org/streamConverters;1"]
+ .getService(Ci.nsIStreamConverterService);
+ let listener = Cc["@mozilla.org/network/stream-loader;1"]
+ .createInstance(Ci.nsIStreamLoader);
+ listener.init(observer);
+ let converter = scs.asyncConvertData("gzip", "uncompressed",
+ listener, null);
+ converter.onStartRequest(null, null);
+ converter.onDataAvailable(null, null, s, 0, s.available());
+ converter.onStopRequest(null, null, null);
+ let unicodeConverter = Cc["@mozilla.org/intl/scriptableunicodeconverter"]
+ .createInstance(Ci.nsIScriptableUnicodeConverter);
+ unicodeConverter.charset = "UTF-8";
+ let utf8string = unicodeConverter.ConvertToUnicode(observer.buffer);
+ utf8string += unicodeConverter.Finish();
+ payload = JSON.parse(utf8string);
+ } else {
+ payload = decoder.decodeFromStream(s, s.available());
+ }
+
+ return payload;
+}
+
+function wrapWithExceptionHandler(f) {
+ function wrapper(...args) {
+ try {
+ f(...args);
+ } catch (ex) {
+ if (typeof(ex) != 'object') {
+ throw ex;
+ }
+ dump("Caught exception: " + ex.message + "\n");
+ dump(ex.stack);
+ do_test_finished();
+ }
+ }
+ return wrapper;
+}
+
+function loadAddonManager(...args) {
+ AddonTestUtils.init(gGlobalScope);
+ AddonTestUtils.overrideCertDB();
+ createAppInfo(...args);
+
+ // As we're not running in application, we need to setup the features directory
+ // used by system add-ons.
+ const distroDir = FileUtils.getDir("ProfD", ["sysfeatures", "app0"], true);
+ AddonTestUtils.registerDirectory("XREAppFeat", distroDir);
+ return AddonTestUtils.promiseStartupManager();
+}
+
+var gAppInfo = null;
+
+function createAppInfo(ID="xpcshell@tests.mozilla.org", name="XPCShell",
+ version="1.0", platformVersion="1.0") {
+ AddonTestUtils.createAppInfo(ID, name, version, platformVersion);
+ gAppInfo = AddonTestUtils.appInfo;
+}
+
+// Fake the timeout functions for the TelemetryScheduler.
+function fakeSchedulerTimer(set, clear) {
+ let session = Cu.import("resource://gre/modules/TelemetrySession.jsm");
+ session.Policy.setSchedulerTickTimeout = set;
+ session.Policy.clearSchedulerTickTimeout = clear;
+}
+
+/**
+ * Fake the current date.
+ * This passes all received arguments to a new Date constructor and
+ * uses the resulting date to fake the time in Telemetry modules.
+ *
+ * @return Date The new faked date.
+ */
+function fakeNow(...args) {
+ const date = new Date(...args);
+ const modules = [
+ Cu.import("resource://gre/modules/TelemetrySession.jsm"),
+ Cu.import("resource://gre/modules/TelemetryEnvironment.jsm"),
+ Cu.import("resource://gre/modules/TelemetryController.jsm"),
+ Cu.import("resource://gre/modules/TelemetryStorage.jsm"),
+ Cu.import("resource://gre/modules/TelemetrySend.jsm"),
+ Cu.import("resource://gre/modules/TelemetryReportingPolicy.jsm"),
+ ];
+
+ for (let m of modules) {
+ m.Policy.now = () => date;
+ }
+
+ return new Date(date);
+}
+
+function fakeMonotonicNow(ms) {
+ const m = Cu.import("resource://gre/modules/TelemetrySession.jsm");
+ m.Policy.monotonicNow = () => ms;
+ return ms;
+}
+
+// Fake the timeout functions for TelemetryController sending.
+function fakePingSendTimer(set, clear) {
+ let module = Cu.import("resource://gre/modules/TelemetrySend.jsm");
+ let obj = Cu.cloneInto({set, clear}, module, {cloneFunctions:true});
+ module.Policy.setSchedulerTickTimeout = obj.set;
+ module.Policy.clearSchedulerTickTimeout = obj.clear;
+}
+
+function fakeMidnightPingFuzzingDelay(delayMs) {
+ let module = Cu.import("resource://gre/modules/TelemetrySend.jsm");
+ module.Policy.midnightPingFuzzingDelay = () => delayMs;
+}
+
+function fakeGeneratePingId(func) {
+ let module = Cu.import("resource://gre/modules/TelemetryController.jsm");
+ module.Policy.generatePingId = func;
+}
+
+function fakeCachedClientId(uuid) {
+ let module = Cu.import("resource://gre/modules/TelemetryController.jsm");
+ module.Policy.getCachedClientID = () => uuid;
+}
+
+// Return a date that is |offset| ms in the future from |date|.
+function futureDate(date, offset) {
+ return new Date(date.getTime() + offset);
+}
+
+function truncateToDays(aMsec) {
+ return Math.floor(aMsec / MILLISECONDS_PER_DAY);
+}
+
+// Returns a promise that resolves to true when the passed promise rejects,
+// false otherwise.
+function promiseRejects(promise) {
+ return promise.then(() => false, () => true);
+}
+
+// Generates a random string of at least a specific length.
+function generateRandomString(length) {
+ let string = "";
+
+ while (string.length < length) {
+ string += Math.random().toString(36);
+ }
+
+ return string.substring(0, length);
+}
+
+// Short-hand for retrieving the histogram with that id.
+function getHistogram(histogramId) {
+ return Telemetry.getHistogramById(histogramId);
+}
+
+// Short-hand for retrieving the snapshot of the Histogram with that id.
+function getSnapshot(histogramId) {
+ return Telemetry.getHistogramById(histogramId).snapshot();
+}
+
+// Helper for setting an empty list of Environment preferences to watch.
+function setEmptyPrefWatchlist() {
+ let TelemetryEnvironment =
+ Cu.import("resource://gre/modules/TelemetryEnvironment.jsm").TelemetryEnvironment;
+ return TelemetryEnvironment.onInitialized().then(() => {
+ TelemetryEnvironment.testWatchPreferences(new Map());
+ });
+}
+
+if (runningInParent) {
+ // Set logging preferences for all the tests.
+ Services.prefs.setCharPref("toolkit.telemetry.log.level", "Trace");
+ // Telemetry archiving should be on.
+ Services.prefs.setBoolPref("toolkit.telemetry.archive.enabled", true);
+ // Telemetry xpcshell tests cannot show the infobar.
+ Services.prefs.setBoolPref("datareporting.policy.dataSubmissionPolicyBypassNotification", true);
+ // FHR uploads should be enabled.
+ Services.prefs.setBoolPref("datareporting.healthreport.uploadEnabled", true);
+
+ fakePingSendTimer((callback, timeout) => {
+ Services.tm.mainThread.dispatch(() => callback(), Ci.nsIThread.DISPATCH_NORMAL);
+ },
+ () => {});
+
+ do_register_cleanup(() => TelemetrySend.shutdown());
+}
+
+TelemetryController.testInitLogging();
+
+// Avoid timers interrupting test behavior.
+fakeSchedulerTimer(() => {}, () => {});
+// Make pind sending predictable.
+fakeMidnightPingFuzzingDelay(0);
diff --git a/toolkit/components/telemetry/tests/unit/test_ChildHistograms.js b/toolkit/components/telemetry/tests/unit/test_ChildHistograms.js
new file mode 100644
index 000000000..11d730499
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_ChildHistograms.js
@@ -0,0 +1,107 @@
+
+Cu.import("resource://gre/modules/TelemetryController.jsm", this);
+Cu.import("resource://gre/modules/TelemetrySession.jsm", this);
+Cu.import("resource://gre/modules/PromiseUtils.jsm", this);
+Cu.import("resource://testing-common/ContentTaskUtils.jsm", this);
+
+const MESSAGE_TELEMETRY_PAYLOAD = "Telemetry:Payload";
+const MESSAGE_TELEMETRY_GET_CHILD_PAYLOAD = "Telemetry:GetChildPayload";
+const MESSAGE_CHILD_TEST_DONE = "ChildTest:Done";
+
+const PLATFORM_VERSION = "1.9.2";
+const APP_VERSION = "1";
+const APP_ID = "xpcshell@tests.mozilla.org";
+const APP_NAME = "XPCShell";
+
+function run_child_test() {
+ // Setup histograms with some fixed values.
+ let flagHist = Telemetry.getHistogramById("TELEMETRY_TEST_FLAG");
+ flagHist.add(1);
+ let countHist = Telemetry.getHistogramById("TELEMETRY_TEST_COUNT");
+ Telemetry.setHistogramRecordingEnabled("TELEMETRY_TEST_COUNT", false);
+ countHist.add();
+ Telemetry.setHistogramRecordingEnabled("TELEMETRY_TEST_COUNT", true);
+ countHist.add();
+ countHist.add();
+ let categHist = Telemetry.getHistogramById("TELEMETRY_TEST_CATEGORICAL");
+ categHist.add("Label2");
+ categHist.add("Label3");
+
+ let flagKeyed = Telemetry.getKeyedHistogramById("TELEMETRY_TEST_KEYED_FLAG");
+ flagKeyed.add("a", 1);
+ flagKeyed.add("b", 1);
+ let countKeyed = Telemetry.getKeyedHistogramById("TELEMETRY_TEST_KEYED_COUNT");
+ Telemetry.setHistogramRecordingEnabled("TELEMETRY_TEST_KEYED_COUNT", false);
+ countKeyed.add("a");
+ countKeyed.add("b");
+ Telemetry.setHistogramRecordingEnabled("TELEMETRY_TEST_KEYED_COUNT", true);
+ countKeyed.add("a");
+ countKeyed.add("b");
+ countKeyed.add("b");
+}
+
+function check_histogram_values(payload) {
+ const hs = payload.histograms;
+ Assert.ok("TELEMETRY_TEST_COUNT" in hs, "Should have count test histogram.");
+ Assert.ok("TELEMETRY_TEST_FLAG" in hs, "Should have flag test histogram.");
+ Assert.ok("TELEMETRY_TEST_CATEGORICAL" in hs, "Should have categorical test histogram.");
+ Assert.equal(hs["TELEMETRY_TEST_COUNT"].sum, 2,
+ "Count test histogram should have the right value.");
+ Assert.equal(hs["TELEMETRY_TEST_FLAG"].sum, 1,
+ "Flag test histogram should have the right value.");
+ Assert.equal(hs["TELEMETRY_TEST_CATEGORICAL"].sum, 3,
+ "Categorical test histogram should have the right sum.");
+
+ const kh = payload.keyedHistograms;
+ Assert.ok("TELEMETRY_TEST_KEYED_COUNT" in kh, "Should have keyed count test histogram.");
+ Assert.ok("TELEMETRY_TEST_KEYED_FLAG" in kh, "Should have keyed flag test histogram.");
+ Assert.equal(kh["TELEMETRY_TEST_KEYED_COUNT"]["a"].sum, 1,
+ "Keyed count test histogram should have the right value.");
+ Assert.equal(kh["TELEMETRY_TEST_KEYED_COUNT"]["b"].sum, 2,
+ "Keyed count test histogram should have the right value.");
+ Assert.equal(kh["TELEMETRY_TEST_KEYED_FLAG"]["a"].sum, 1,
+ "Keyed flag test histogram should have the right value.");
+ Assert.equal(kh["TELEMETRY_TEST_KEYED_FLAG"]["b"].sum, 1,
+ "Keyed flag test histogram should have the right value.");
+}
+
+add_task(function*() {
+ if (!runningInParent) {
+ TelemetryController.testSetupContent();
+ run_child_test();
+ dump("... done with child test\n");
+ do_send_remote_message(MESSAGE_CHILD_TEST_DONE);
+ return;
+ }
+
+ // Setup.
+ do_get_profile(true);
+ loadAddonManager(APP_ID, APP_NAME, APP_VERSION, PLATFORM_VERSION);
+ Services.prefs.setBoolPref(PREF_TELEMETRY_ENABLED, true);
+ yield TelemetryController.testSetup();
+ if (runningInParent) {
+ // Make sure we don't generate unexpected pings due to pref changes.
+ yield setEmptyPrefWatchlist();
+ }
+
+ // Run test in child, don't wait for it to finish.
+ run_test_in_child("test_ChildHistograms.js");
+ yield do_await_remote_message(MESSAGE_CHILD_TEST_DONE);
+
+ yield ContentTaskUtils.waitForCondition(() => {
+ let payload = TelemetrySession.getPayload("test-ping");
+ return payload &&
+ "processes" in payload &&
+ "content" in payload.processes &&
+ "histograms" in payload.processes.content &&
+ "TELEMETRY_TEST_COUNT" in payload.processes.content.histograms;
+ });
+ const payload = TelemetrySession.getPayload("test-ping");
+ Assert.ok("processes" in payload, "Should have processes section");
+ Assert.ok("content" in payload.processes, "Should have child process section");
+ Assert.ok("histograms" in payload.processes.content, "Child process section should have histograms.");
+ Assert.ok("keyedHistograms" in payload.processes.content, "Child process section should have keyed histograms.");
+ check_histogram_values(payload.processes.content);
+
+ do_test_finished();
+});
diff --git a/toolkit/components/telemetry/tests/unit/test_PingAPI.js b/toolkit/components/telemetry/tests/unit/test_PingAPI.js
new file mode 100644
index 000000000..d4d79aad4
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_PingAPI.js
@@ -0,0 +1,502 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/
+*/
+
+// This tests the public Telemetry API for submitting pings.
+
+"use strict";
+
+Cu.import("resource://gre/modules/ClientID.jsm", this);
+Cu.import("resource://gre/modules/TelemetryController.jsm", this);
+Cu.import("resource://gre/modules/TelemetryArchive.jsm", this);
+Cu.import("resource://gre/modules/XPCOMUtils.jsm", this);
+Cu.import("resource://gre/modules/osfile.jsm", this);
+Cu.import("resource://gre/modules/Task.jsm", this);
+Cu.import("resource://gre/modules/Services.jsm", this);
+
+XPCOMUtils.defineLazyGetter(this, "gPingsArchivePath", function() {
+ return OS.Path.join(OS.Constants.Path.profileDir, "datareporting", "archived");
+});
+
+/**
+ * Fakes the archive storage quota.
+ * @param {Integer} aArchiveQuota The new quota, in bytes.
+ */
+function fakeStorageQuota(aArchiveQuota) {
+ let storage = Cu.import("resource://gre/modules/TelemetryStorage.jsm");
+ storage.Policy.getArchiveQuota = () => aArchiveQuota;
+}
+
+/**
+ * Lists all the valid archived pings and their metadata, sorted by creation date.
+ *
+ * @param aFileName {String} The filename.
+ * @return {Object[]} A list of objects with the extracted data in the form:
+ * { timestamp: <number>,
+ * id: <string>,
+ * type: <string>,
+ * size: <integer> }
+ */
+var getArchivedPingsInfo = Task.async(function*() {
+ let dirIterator = new OS.File.DirectoryIterator(gPingsArchivePath);
+ let subdirs = (yield dirIterator.nextBatch()).filter(e => e.isDir);
+ let archivedPings = [];
+
+ // Iterate through the subdirs of |gPingsArchivePath|.
+ for (let dir of subdirs) {
+ let fileIterator = new OS.File.DirectoryIterator(dir.path);
+ let files = (yield fileIterator.nextBatch()).filter(e => !e.isDir);
+
+ // Then get a list of the files for the current subdir.
+ for (let f of files) {
+ let pingInfo = TelemetryStorage._testGetArchivedPingDataFromFileName(f.name);
+ if (!pingInfo) {
+ // This is not a valid archived ping, skip it.
+ continue;
+ }
+ // Find the size of the ping and then add the info to the array.
+ pingInfo.size = (yield OS.File.stat(f.path)).size;
+ archivedPings.push(pingInfo);
+ }
+ }
+
+ // Sort the list by creation date and then return it.
+ archivedPings.sort((a, b) => b.timestamp - a.timestamp);
+ return archivedPings;
+});
+
+add_task(function* test_setup() {
+ do_get_profile(true);
+ // Make sure we don't generate unexpected pings due to pref changes.
+ yield setEmptyPrefWatchlist();
+ Services.prefs.setBoolPref(PREF_TELEMETRY_ENABLED, true);
+});
+
+add_task(function* test_archivedPings() {
+ // TelemetryController should not be fully initialized at this point.
+ // Submitting pings should still work fine.
+
+ const PINGS = [
+ {
+ type: "test-ping-api-1",
+ payload: { foo: "bar"},
+ dateCreated: new Date(2010, 1, 1, 10, 0, 0),
+ },
+ {
+ type: "test-ping-api-2",
+ payload: { moo: "meh"},
+ dateCreated: new Date(2010, 2, 1, 10, 0, 0),
+ },
+ ];
+
+ // Submit pings and check the ping list.
+ let expectedPingList = [];
+
+ for (let data of PINGS) {
+ fakeNow(data.dateCreated);
+ data.id = yield TelemetryController.submitExternalPing(data.type, data.payload);
+ let list = yield TelemetryArchive.promiseArchivedPingList();
+
+ expectedPingList.push({
+ id: data.id,
+ type: data.type,
+ timestampCreated: data.dateCreated.getTime(),
+ });
+ Assert.deepEqual(list, expectedPingList, "Archived ping list should contain submitted pings");
+ }
+
+ // Check loading the archived pings.
+ let checkLoadingPings = Task.async(function*() {
+ for (let data of PINGS) {
+ let ping = yield TelemetryArchive.promiseArchivedPingById(data.id);
+ Assert.equal(ping.id, data.id, "Archived ping should have matching id");
+ Assert.equal(ping.type, data.type, "Archived ping should have matching type");
+ Assert.equal(ping.creationDate, data.dateCreated.toISOString(),
+ "Archived ping should have matching creation date");
+ }
+ });
+
+ yield checkLoadingPings();
+
+ // Check that we find the archived pings again by scanning after a restart.
+ yield TelemetryController.testReset();
+
+ let pingList = yield TelemetryArchive.promiseArchivedPingList();
+ Assert.deepEqual(expectedPingList, pingList,
+ "Should have submitted pings in archive list after restart");
+ yield checkLoadingPings();
+
+ // Write invalid pings into the archive with both valid and invalid names.
+ let writeToArchivedDir = Task.async(function*(dirname, filename, content, compressed) {
+ const dirPath = OS.Path.join(gPingsArchivePath, dirname);
+ yield OS.File.makeDir(dirPath, { ignoreExisting: true });
+ const filePath = OS.Path.join(dirPath, filename);
+ const options = { tmpPath: filePath + ".tmp", noOverwrite: false };
+ if (compressed) {
+ options.compression = "lz4";
+ }
+ yield OS.File.writeAtomic(filePath, content, options);
+ });
+
+ const FAKE_ID1 = "10000000-0123-0123-0123-0123456789a1";
+ const FAKE_ID2 = "20000000-0123-0123-0123-0123456789a2";
+ const FAKE_ID3 = "20000000-0123-0123-0123-0123456789a3";
+ const FAKE_TYPE = "foo";
+
+ // These should get rejected.
+ yield writeToArchivedDir("xx", "foo.json", "{}");
+ yield writeToArchivedDir("2010-02", "xx.xx.xx.json", "{}");
+ // This one should get picked up...
+ yield writeToArchivedDir("2010-02", "1." + FAKE_ID1 + "." + FAKE_TYPE + ".json", "{}");
+ // ... but get overwritten by this one.
+ yield writeToArchivedDir("2010-02", "2." + FAKE_ID1 + "." + FAKE_TYPE + ".json", "");
+ // This should get picked up fine.
+ yield writeToArchivedDir("2010-02", "3." + FAKE_ID2 + "." + FAKE_TYPE + ".json", "");
+ // This compressed ping should get picked up fine as well.
+ yield writeToArchivedDir("2010-02", "4." + FAKE_ID3 + "." + FAKE_TYPE + ".jsonlz4", "");
+
+ expectedPingList.push({
+ id: FAKE_ID1,
+ type: "foo",
+ timestampCreated: 2,
+ });
+ expectedPingList.push({
+ id: FAKE_ID2,
+ type: "foo",
+ timestampCreated: 3,
+ });
+ expectedPingList.push({
+ id: FAKE_ID3,
+ type: "foo",
+ timestampCreated: 4,
+ });
+ expectedPingList.sort((a, b) => a.timestampCreated - b.timestampCreated);
+
+ // Reset the TelemetryArchive so we scan the archived dir again.
+ yield TelemetryController.testReset();
+
+ // Check that we are still picking up the valid archived pings on disk,
+ // plus the valid ones above.
+ pingList = yield TelemetryArchive.promiseArchivedPingList();
+ Assert.deepEqual(expectedPingList, pingList, "Should have picked up valid archived pings");
+ yield checkLoadingPings();
+
+ // Now check that we fail to load the two invalid pings from above.
+ Assert.ok((yield promiseRejects(TelemetryArchive.promiseArchivedPingById(FAKE_ID1))),
+ "Should have rejected invalid ping");
+ Assert.ok((yield promiseRejects(TelemetryArchive.promiseArchivedPingById(FAKE_ID2))),
+ "Should have rejected invalid ping");
+});
+
+add_task(function* test_archiveCleanup() {
+ const PING_TYPE = "foo";
+
+ // Empty the archive.
+ yield OS.File.removeDir(gPingsArchivePath);
+
+ Telemetry.getHistogramById("TELEMETRY_ARCHIVE_SCAN_PING_COUNT").clear();
+ Telemetry.getHistogramById("TELEMETRY_ARCHIVE_DIRECTORIES_COUNT").clear();
+ // Also reset these histograms to make sure normal sized pings don't get counted.
+ Telemetry.getHistogramById("TELEMETRY_PING_SIZE_EXCEEDED_ARCHIVED").clear();
+ Telemetry.getHistogramById("TELEMETRY_DISCARDED_ARCHIVED_PINGS_SIZE_MB").clear();
+
+ // Build the cache. Nothing should be evicted as there's no ping directory.
+ yield TelemetryController.testReset();
+ yield TelemetryStorage.testCleanupTaskPromise();
+ yield TelemetryArchive.promiseArchivedPingList();
+
+ let h = Telemetry.getHistogramById("TELEMETRY_ARCHIVE_SCAN_PING_COUNT").snapshot();
+ Assert.equal(h.sum, 0, "Telemetry must report 0 pings scanned if no archive dir exists.");
+ // One directory out of four was removed as well.
+ h = Telemetry.getHistogramById("TELEMETRY_ARCHIVE_EVICTED_OLD_DIRS").snapshot();
+ Assert.equal(h.sum, 0, "Telemetry must report 0 evicted dirs if no archive dir exists.");
+
+ let expectedPrunedInfo = [];
+ let expectedNotPrunedInfo = [];
+
+ let checkArchive = Task.async(function*() {
+ // Check that the pruned pings are not on disk anymore.
+ for (let prunedInfo of expectedPrunedInfo) {
+ yield Assert.rejects(TelemetryArchive.promiseArchivedPingById(prunedInfo.id),
+ "Ping " + prunedInfo.id + " should have been pruned.");
+ const pingPath =
+ TelemetryStorage._testGetArchivedPingPath(prunedInfo.id, prunedInfo.creationDate, PING_TYPE);
+ Assert.ok(!(yield OS.File.exists(pingPath)), "The ping should not be on the disk anymore.");
+ }
+
+ // Check that the expected pings are there.
+ for (let expectedInfo of expectedNotPrunedInfo) {
+ Assert.ok((yield TelemetryArchive.promiseArchivedPingById(expectedInfo.id)),
+ "Ping" + expectedInfo.id + " should be in the archive.");
+ }
+ });
+
+ Telemetry.getHistogramById("TELEMETRY_ARCHIVE_SESSION_PING_COUNT").clear();
+
+ // Create a ping which should be pruned because it is past the retention period.
+ let date = fakeNow(2010, 1, 1, 1, 0, 0);
+ let firstDate = date;
+ let pingId = yield TelemetryController.submitExternalPing(PING_TYPE, {}, {});
+ expectedPrunedInfo.push({ id: pingId, creationDate: date });
+
+ // Create a ping which should be kept because it is within the retention period.
+ const oldestDirectoryDate = fakeNow(2010, 2, 1, 1, 0, 0);
+ pingId = yield TelemetryController.submitExternalPing(PING_TYPE, {}, {});
+ expectedNotPrunedInfo.push({ id: pingId, creationDate: oldestDirectoryDate });
+
+ // Create 20 other pings which are within the retention period, but would be affected
+ // by the disk quota.
+ for (let month of [3, 4]) {
+ for (let minute = 0; minute < 10; minute++) {
+ date = fakeNow(2010, month, 1, 1, minute, 0);
+ pingId = yield TelemetryController.submitExternalPing(PING_TYPE, {}, {});
+ expectedNotPrunedInfo.push({ id: pingId, creationDate: date });
+ }
+ }
+
+ // We expect all the pings we archived to be in this histogram.
+ h = Telemetry.getHistogramById("TELEMETRY_ARCHIVE_SESSION_PING_COUNT");
+ Assert.equal(h.snapshot().sum, 22, "All the pings must be live-accumulated in the histogram.");
+ // Reset the histogram that will be populated by the archive scan.
+ Telemetry.getHistogramById("TELEMETRY_ARCHIVE_EVICTED_OLD_DIRS").clear();
+ Telemetry.getHistogramById("TELEMETRY_ARCHIVE_OLDEST_DIRECTORY_AGE").clear();
+
+ // Move the current date 60 days ahead of the first ping.
+ fakeNow(futureDate(firstDate, 60 * MILLISECONDS_PER_DAY));
+ // Reset TelemetryArchive and TelemetryController to start the startup cleanup.
+ yield TelemetryController.testReset();
+ // Wait for the cleanup to finish.
+ yield TelemetryStorage.testCleanupTaskPromise();
+ // Then scan the archived dir.
+ yield TelemetryArchive.promiseArchivedPingList();
+
+ // Check that the archive is in the correct state.
+ yield checkArchive();
+
+ // Make sure the ping count is correct after the scan (one ping was removed).
+ h = Telemetry.getHistogramById("TELEMETRY_ARCHIVE_SCAN_PING_COUNT").snapshot();
+ Assert.equal(h.sum, 21, "The histogram must count all the pings in the archive.");
+ // One directory out of four was removed as well.
+ h = Telemetry.getHistogramById("TELEMETRY_ARCHIVE_EVICTED_OLD_DIRS").snapshot();
+ Assert.equal(h.sum, 1, "Telemetry must correctly report removed archive directories.");
+ // Check that the remaining directories are correctly counted.
+ h = Telemetry.getHistogramById("TELEMETRY_ARCHIVE_DIRECTORIES_COUNT").snapshot();
+ Assert.equal(h.sum, 3, "Telemetry must correctly report the remaining archive directories.");
+ // Check that the remaining directories are correctly counted.
+ const oldestAgeInMonths = 1;
+ h = Telemetry.getHistogramById("TELEMETRY_ARCHIVE_OLDEST_DIRECTORY_AGE").snapshot();
+ Assert.equal(h.sum, oldestAgeInMonths,
+ "Telemetry must correctly report age of the oldest directory in the archive.");
+
+ // We need to test the archive size before we hit the quota, otherwise a special
+ // value is recorded.
+ Telemetry.getHistogramById("TELEMETRY_ARCHIVE_SIZE_MB").clear();
+ Telemetry.getHistogramById("TELEMETRY_ARCHIVE_EVICTED_OVER_QUOTA").clear();
+ Telemetry.getHistogramById("TELEMETRY_ARCHIVE_EVICTING_OVER_QUOTA_MS").clear();
+
+ // Move the current date 60 days ahead of the second ping.
+ fakeNow(futureDate(oldestDirectoryDate, 60 * MILLISECONDS_PER_DAY));
+ // Reset TelemetryController and TelemetryArchive.
+ yield TelemetryController.testReset();
+ // Wait for the cleanup to finish.
+ yield TelemetryStorage.testCleanupTaskPromise();
+ // Then scan the archived dir again.
+ yield TelemetryArchive.promiseArchivedPingList();
+
+ // Move the oldest ping to the unexpected pings list.
+ expectedPrunedInfo.push(expectedNotPrunedInfo.shift());
+ // Check that the archive is in the correct state.
+ yield checkArchive();
+
+ // Find how much disk space the archive takes.
+ const archivedPingsInfo = yield getArchivedPingsInfo();
+ let archiveSizeInBytes =
+ archivedPingsInfo.reduce((lastResult, element) => lastResult + element.size, 0);
+
+ // Check that the correct values for quota probes are reported when no quota is hit.
+ h = Telemetry.getHistogramById("TELEMETRY_ARCHIVE_SIZE_MB").snapshot();
+ Assert.equal(h.sum, Math.round(archiveSizeInBytes / 1024 / 1024),
+ "Telemetry must report the correct archive size.");
+ h = Telemetry.getHistogramById("TELEMETRY_ARCHIVE_EVICTED_OVER_QUOTA").snapshot();
+ Assert.equal(h.sum, 0, "Telemetry must report 0 evictions if quota is not hit.");
+ h = Telemetry.getHistogramById("TELEMETRY_ARCHIVE_EVICTING_OVER_QUOTA_MS").snapshot();
+ Assert.equal(h.sum, 0, "Telemetry must report a null elapsed time if quota is not hit.");
+
+ // Set the quota to 80% of the space.
+ const testQuotaInBytes = archiveSizeInBytes * 0.8;
+ fakeStorageQuota(testQuotaInBytes);
+
+ // The storage prunes archived pings until we reach 90% of the requested storage quota.
+ // Based on that, find how many pings should be kept.
+ const safeQuotaSize = testQuotaInBytes * 0.9;
+ let sizeInBytes = 0;
+ let pingsWithinQuota = [];
+ let pingsOutsideQuota = [];
+
+ for (let pingInfo of archivedPingsInfo) {
+ sizeInBytes += pingInfo.size;
+ if (sizeInBytes >= safeQuotaSize) {
+ pingsOutsideQuota.push({ id: pingInfo.id, creationDate: new Date(pingInfo.timestamp) });
+ continue;
+ }
+ pingsWithinQuota.push({ id: pingInfo.id, creationDate: new Date(pingInfo.timestamp) });
+ }
+
+ expectedNotPrunedInfo = pingsWithinQuota;
+ expectedPrunedInfo = expectedPrunedInfo.concat(pingsOutsideQuota);
+
+ // Reset TelemetryArchive and TelemetryController to start the startup cleanup.
+ yield TelemetryController.testReset();
+ yield TelemetryStorage.testCleanupTaskPromise();
+ yield TelemetryArchive.promiseArchivedPingList();
+ // Check that the archive is in the correct state.
+ yield checkArchive();
+
+ h = Telemetry.getHistogramById("TELEMETRY_ARCHIVE_EVICTED_OVER_QUOTA").snapshot();
+ Assert.equal(h.sum, pingsOutsideQuota.length,
+ "Telemetry must correctly report the over quota pings evicted from the archive.");
+ h = Telemetry.getHistogramById("TELEMETRY_ARCHIVE_SIZE_MB").snapshot();
+ Assert.equal(h.sum, 300, "Archive quota was hit, a special size must be reported.");
+
+ // Trigger a cleanup again and make sure we're not removing anything.
+ yield TelemetryController.testReset();
+ yield TelemetryStorage.testCleanupTaskPromise();
+ yield TelemetryArchive.promiseArchivedPingList();
+ yield checkArchive();
+
+ const OVERSIZED_PING_ID = "9b21ec8f-f762-4d28-a2c1-44e1c4694f24";
+ // Create and archive an oversized, uncompressed, ping.
+ const OVERSIZED_PING = {
+ id: OVERSIZED_PING_ID,
+ type: PING_TYPE,
+ creationDate: (new Date()).toISOString(),
+ // Generate a ~2MB string to use as the payload.
+ payload: generateRandomString(2 * 1024 * 1024)
+ };
+ yield TelemetryArchive.promiseArchivePing(OVERSIZED_PING);
+
+ // Get the size of the archived ping.
+ const oversizedPingPath =
+ TelemetryStorage._testGetArchivedPingPath(OVERSIZED_PING.id, new Date(OVERSIZED_PING.creationDate), PING_TYPE) + "lz4";
+ const archivedPingSizeMB = Math.floor((yield OS.File.stat(oversizedPingPath)).size / 1024 / 1024);
+
+ // We expect the oversized ping to be pruned when scanning the archive.
+ expectedPrunedInfo.push({ id: OVERSIZED_PING_ID, creationDate: new Date(OVERSIZED_PING.creationDate) });
+
+ // Scan the archive.
+ yield TelemetryController.testReset();
+ yield TelemetryStorage.testCleanupTaskPromise();
+ yield TelemetryArchive.promiseArchivedPingList();
+ // The following also checks that non oversized pings are not removed.
+ yield checkArchive();
+
+ // Make sure we're correctly updating the related histograms.
+ h = Telemetry.getHistogramById("TELEMETRY_PING_SIZE_EXCEEDED_ARCHIVED").snapshot();
+ Assert.equal(h.sum, 1, "Telemetry must report 1 oversized ping in the archive.");
+ h = Telemetry.getHistogramById("TELEMETRY_DISCARDED_ARCHIVED_PINGS_SIZE_MB").snapshot();
+ Assert.equal(h.counts[archivedPingSizeMB], 1,
+ "Telemetry must report the correct size for the oversized ping.");
+});
+
+add_task(function* test_clientId() {
+ // Check that a ping submitted after the delayed telemetry initialization completed
+ // should get a valid client id.
+ yield TelemetryController.testReset();
+ const clientId = yield ClientID.getClientID();
+
+ let id = yield TelemetryController.submitExternalPing("test-type", {}, {addClientId: true});
+ let ping = yield TelemetryArchive.promiseArchivedPingById(id);
+
+ Assert.ok(!!ping, "Should have loaded the ping.");
+ Assert.ok("clientId" in ping, "Ping should have a client id.");
+ Assert.ok(UUID_REGEX.test(ping.clientId), "Client id is in UUID format.");
+ Assert.equal(ping.clientId, clientId, "Ping client id should match the global client id.");
+
+ // We should have cached the client id now. Lets confirm that by
+ // checking the client id on a ping submitted before the async
+ // controller setup is finished.
+ let promiseSetup = TelemetryController.testReset();
+ id = yield TelemetryController.submitExternalPing("test-type", {}, {addClientId: true});
+ ping = yield TelemetryArchive.promiseArchivedPingById(id);
+ Assert.equal(ping.clientId, clientId);
+
+ // Finish setup.
+ yield promiseSetup;
+});
+
+add_task(function* test_InvalidPingType() {
+ const TYPES = [
+ "a",
+ "-",
+ "¿€€€?",
+ "-foo-",
+ "-moo",
+ "zoo-",
+ ".bar",
+ "asfd.asdf",
+ ];
+
+ for (let type of TYPES) {
+ let histogram = Telemetry.getKeyedHistogramById("TELEMETRY_INVALID_PING_TYPE_SUBMITTED");
+ Assert.equal(histogram.snapshot(type).sum, 0,
+ "Should not have counted this invalid ping yet: " + type);
+ Assert.ok(promiseRejects(TelemetryController.submitExternalPing(type, {})),
+ "Ping type should have been rejected.");
+ Assert.equal(histogram.snapshot(type).sum, 1,
+ "Should have counted this as an invalid ping type.");
+ }
+});
+
+add_task(function* test_InvalidPayloadType() {
+ const PAYLOAD_TYPES = [
+ 19,
+ "string",
+ [1, 2, 3, 4],
+ null,
+ undefined,
+ ];
+
+ let histogram = Telemetry.getHistogramById("TELEMETRY_INVALID_PAYLOAD_SUBMITTED");
+ for (let i = 0; i < PAYLOAD_TYPES.length; i++) {
+ histogram.clear();
+ Assert.equal(histogram.snapshot().sum, 0,
+ "Should not have counted this invalid payload yet: " + JSON.stringify(PAYLOAD_TYPES[i]));
+ Assert.ok(yield promiseRejects(TelemetryController.submitExternalPing("payload-test", PAYLOAD_TYPES[i])),
+ "Payload type should have been rejected.");
+ Assert.equal(histogram.snapshot().sum, 1,
+ "Should have counted this as an invalid payload type.");
+ }
+});
+
+add_task(function* test_currentPingData() {
+ yield TelemetryController.testSetup();
+
+ // Setup test data.
+ let h = Telemetry.getHistogramById("TELEMETRY_TEST_RELEASE_OPTOUT");
+ h.clear();
+ h.add(1);
+ let k = Telemetry.getKeyedHistogramById("TELEMETRY_TEST_KEYED_RELEASE_OPTOUT");
+ k.clear();
+ k.add("a", 1);
+
+ // Get current ping data objects and check that their data is sane.
+ for (let subsession of [true, false]) {
+ let ping = TelemetryController.getCurrentPingData(subsession);
+
+ Assert.ok(!!ping, "Should have gotten a ping.");
+ Assert.equal(ping.type, "main", "Ping should have correct type.");
+ const expectedReason = subsession ? "gather-subsession-payload" : "gather-payload";
+ Assert.equal(ping.payload.info.reason, expectedReason, "Ping should have the correct reason.");
+
+ let id = "TELEMETRY_TEST_RELEASE_OPTOUT";
+ Assert.ok(id in ping.payload.histograms, "Payload should have test count histogram.");
+ Assert.equal(ping.payload.histograms[id].sum, 1, "Test count value should match.");
+ id = "TELEMETRY_TEST_KEYED_RELEASE_OPTOUT";
+ Assert.ok(id in ping.payload.keyedHistograms, "Payload should have keyed test histogram.");
+ Assert.equal(ping.payload.keyedHistograms[id]["a"].sum, 1, "Keyed test value should match.");
+ }
+});
+
+add_task(function* test_shutdown() {
+ yield TelemetryController.testShutdown();
+});
diff --git a/toolkit/components/telemetry/tests/unit/test_SubsessionChaining.js b/toolkit/components/telemetry/tests/unit/test_SubsessionChaining.js
new file mode 100644
index 000000000..c86fb0499
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_SubsessionChaining.js
@@ -0,0 +1,236 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/
+*/
+
+Cu.import("resource://gre/modules/Preferences.jsm", this);
+Cu.import("resource://gre/modules/Promise.jsm", this);
+Cu.import("resource://gre/modules/Task.jsm", this);
+Cu.import("resource://gre/modules/TelemetryArchive.jsm", this);
+Cu.import("resource://gre/modules/TelemetryController.jsm", this);
+Cu.import("resource://gre/modules/TelemetryEnvironment.jsm", this);
+Cu.import("resource://gre/modules/osfile.jsm", this);
+Cu.import("resource://gre/modules/XPCOMUtils.jsm", this);
+
+const MS_IN_ONE_HOUR = 60 * 60 * 1000;
+const MS_IN_ONE_DAY = 24 * MS_IN_ONE_HOUR;
+
+const PREF_BRANCH = "toolkit.telemetry.";
+const PREF_ARCHIVE_ENABLED = PREF_BRANCH + "archive.enabled";
+
+const REASON_ABORTED_SESSION = "aborted-session";
+const REASON_DAILY = "daily";
+const REASON_ENVIRONMENT_CHANGE = "environment-change";
+const REASON_SHUTDOWN = "shutdown";
+
+XPCOMUtils.defineLazyGetter(this, "DATAREPORTING_PATH", function() {
+ return OS.Path.join(OS.Constants.Path.profileDir, "datareporting");
+});
+
+var promiseValidateArchivedPings = Task.async(function*(aExpectedReasons) {
+ // The list of ping reasons which mark the session end (and must reset the subsession
+ // count).
+ const SESSION_END_PING_REASONS = new Set([ REASON_ABORTED_SESSION, REASON_SHUTDOWN ]);
+
+ let list = yield TelemetryArchive.promiseArchivedPingList();
+
+ // We're just interested in the "main" pings.
+ list = list.filter(p => p.type == "main");
+
+ Assert.equal(aExpectedReasons.length, list.length, "All the expected pings must be received.");
+
+ let previousPing = yield TelemetryArchive.promiseArchivedPingById(list[0].id);
+ Assert.equal(aExpectedReasons.shift(), previousPing.payload.info.reason,
+ "Telemetry should only get pings with expected reasons.");
+ Assert.equal(previousPing.payload.info.previousSessionId, null,
+ "The first session must report a null previous session id.");
+ Assert.equal(previousPing.payload.info.previousSubsessionId, null,
+ "The first subsession must report a null previous subsession id.");
+ Assert.equal(previousPing.payload.info.profileSubsessionCounter, 1,
+ "profileSubsessionCounter must be 1 the first time.");
+ Assert.equal(previousPing.payload.info.subsessionCounter, 1,
+ "subsessionCounter must be 1 the first time.");
+
+ let expectedSubsessionCounter = 1;
+ let expectedPreviousSessionId = previousPing.payload.info.sessionId;
+
+ for (let i = 1; i < list.length; i++) {
+ let currentPing = yield TelemetryArchive.promiseArchivedPingById(list[i].id);
+ let currentInfo = currentPing.payload.info;
+ let previousInfo = previousPing.payload.info;
+ do_print("Archive entry " + i + " - id: " + currentPing.id + ", reason: " + currentInfo.reason);
+
+ Assert.equal(aExpectedReasons.shift(), currentInfo.reason,
+ "Telemetry should only get pings with expected reasons.");
+ Assert.equal(currentInfo.previousSessionId, expectedPreviousSessionId,
+ "Telemetry must correctly chain session identifiers.");
+ Assert.equal(currentInfo.previousSubsessionId, previousInfo.subsessionId,
+ "Telemetry must correctly chain subsession identifiers.");
+ Assert.equal(currentInfo.profileSubsessionCounter, previousInfo.profileSubsessionCounter + 1,
+ "Telemetry must correctly track the profile subsessions count.");
+ Assert.equal(currentInfo.subsessionCounter, expectedSubsessionCounter,
+ "The subsession counter should be monotonically increasing.");
+
+ // Store the current ping as previous.
+ previousPing = currentPing;
+ // Reset the expected subsession counter, if required. Otherwise increment the expected
+ // subsession counter.
+ // If this is the final subsession of a session we need to update expected values accordingly.
+ if (SESSION_END_PING_REASONS.has(currentInfo.reason)) {
+ expectedSubsessionCounter = 1;
+ expectedPreviousSessionId = currentInfo.sessionId;
+ } else {
+ expectedSubsessionCounter++;
+ }
+ }
+});
+
+add_task(function* test_setup() {
+ do_test_pending();
+
+ // Addon manager needs a profile directory
+ do_get_profile();
+ loadAddonManager("xpcshell@tests.mozilla.org", "XPCShell", "1", "1.9.2");
+ // Make sure we don't generate unexpected pings due to pref changes.
+ yield setEmptyPrefWatchlist();
+
+ Preferences.set(PREF_TELEMETRY_ENABLED, true);
+});
+
+add_task(function* test_subsessionsChaining() {
+ if (gIsAndroid) {
+ // We don't support subsessions yet on Android, so skip the next checks.
+ return;
+ }
+
+ const PREF_TEST = PREF_BRANCH + "test.pref1";
+ const PREFS_TO_WATCH = new Map([
+ [PREF_TEST, {what: TelemetryEnvironment.RECORD_PREF_VALUE}],
+ ]);
+ Preferences.reset(PREF_TEST);
+
+ // Fake the clock data to manually trigger an aborted-session ping and a daily ping.
+ // This is also helpful to make sure we get the archived pings in an expected order.
+ let now = fakeNow(2009, 9, 18, 0, 0, 0);
+ let monotonicNow = fakeMonotonicNow(1000);
+
+ let moveClockForward = (minutes) => {
+ let ms = minutes * MILLISECONDS_PER_MINUTE;
+ now = fakeNow(futureDate(now, ms));
+ monotonicNow = fakeMonotonicNow(monotonicNow + ms);
+ }
+
+ // Keep track of the ping reasons we're expecting in this test.
+ let expectedReasons = [];
+
+ // Start and shut down Telemetry. We expect a shutdown ping with profileSubsessionCounter: 1,
+ // subsessionCounter: 1, subsessionId: A, and previousSubsessionId: null to be archived.
+ yield TelemetryController.testSetup();
+ yield TelemetryController.testShutdown();
+ expectedReasons.push(REASON_SHUTDOWN);
+
+ // Start Telemetry but don't wait for it to initialise before shutting down. We expect a
+ // shutdown ping with profileSubsessionCounter: 2, subsessionCounter: 1, subsessionId: B
+ // and previousSubsessionId: A to be archived.
+ moveClockForward(30);
+ TelemetryController.testReset();
+ yield TelemetryController.testShutdown();
+ expectedReasons.push(REASON_SHUTDOWN);
+
+ // Start Telemetry and simulate an aborted-session ping. We expect an aborted-session ping
+ // with profileSubsessionCounter: 3, subsessionCounter: 1, subsessionId: C and
+ // previousSubsessionId: B to be archived.
+ let schedulerTickCallback = null;
+ fakeSchedulerTimer(callback => schedulerTickCallback = callback, () => {});
+ yield TelemetryController.testReset();
+ moveClockForward(6);
+ // Trigger the an aborted session ping save. When testing,we are not saving the aborted-session
+ // ping as soon as Telemetry starts, otherwise we would end up with unexpected pings being
+ // sent when calling |TelemetryController.testReset()|, thus breaking some tests.
+ Assert.ok(!!schedulerTickCallback);
+ yield schedulerTickCallback();
+ expectedReasons.push(REASON_ABORTED_SESSION);
+
+ // Start Telemetry and trigger an environment change through a pref modification. We expect
+ // an environment-change ping with profileSubsessionCounter: 4, subsessionCounter: 1,
+ // subsessionId: D and previousSubsessionId: C to be archived.
+ moveClockForward(30);
+ yield TelemetryController.testReset();
+ TelemetryEnvironment.testWatchPreferences(PREFS_TO_WATCH);
+ moveClockForward(30);
+ Preferences.set(PREF_TEST, 1);
+ expectedReasons.push(REASON_ENVIRONMENT_CHANGE);
+
+ // Shut down Telemetry. We expect a shutdown ping with profileSubsessionCounter: 5,
+ // subsessionCounter: 2, subsessionId: E and previousSubsessionId: D to be archived.
+ moveClockForward(30);
+ yield TelemetryController.testShutdown();
+ expectedReasons.push(REASON_SHUTDOWN);
+
+ // Start Telemetry and trigger a daily ping. We expect a daily ping with
+ // profileSubsessionCounter: 6, subsessionCounter: 1, subsessionId: F and
+ // previousSubsessionId: E to be archived.
+ moveClockForward(30);
+ yield TelemetryController.testReset();
+
+ // Delay the callback around midnight.
+ now = fakeNow(futureDate(now, MS_IN_ONE_DAY));
+ // Trigger the daily ping.
+ yield schedulerTickCallback();
+ expectedReasons.push(REASON_DAILY);
+
+ // Trigger an environment change ping. We expect an environment-changed ping with
+ // profileSubsessionCounter: 7, subsessionCounter: 2, subsessionId: G and
+ // previousSubsessionId: F to be archived.
+ moveClockForward(30);
+ Preferences.set(PREF_TEST, 0);
+ expectedReasons.push(REASON_ENVIRONMENT_CHANGE);
+
+ // Shut down Telemetry and trigger a shutdown ping.
+ moveClockForward(30);
+ yield TelemetryController.testShutdown();
+ expectedReasons.push(REASON_SHUTDOWN);
+
+ // Start Telemetry and trigger an environment change.
+ yield TelemetryController.testReset();
+ TelemetryEnvironment.testWatchPreferences(PREFS_TO_WATCH);
+ moveClockForward(30);
+ Preferences.set(PREF_TEST, 1);
+ expectedReasons.push(REASON_ENVIRONMENT_CHANGE);
+
+ // Don't shut down, instead trigger an aborted-session ping.
+ moveClockForward(6);
+ // Trigger the an aborted session ping save.
+ yield schedulerTickCallback();
+ expectedReasons.push(REASON_ABORTED_SESSION);
+
+ // Start Telemetry and trigger a daily ping.
+ moveClockForward(30);
+ yield TelemetryController.testReset();
+ // Delay the callback around midnight.
+ now = futureDate(now, MS_IN_ONE_DAY);
+ fakeNow(now);
+ // Trigger the daily ping.
+ yield schedulerTickCallback();
+ expectedReasons.push(REASON_DAILY);
+
+ // Trigger an environment change.
+ moveClockForward(30);
+ Preferences.set(PREF_TEST, 0);
+ expectedReasons.push(REASON_ENVIRONMENT_CHANGE);
+
+ // And an aborted-session ping again.
+ moveClockForward(6);
+ // Trigger the an aborted session ping save.
+ yield schedulerTickCallback();
+ expectedReasons.push(REASON_ABORTED_SESSION);
+
+ // Make sure the aborted-session ping gets archived.
+ yield TelemetryController.testReset();
+
+ yield promiseValidateArchivedPings(expectedReasons);
+});
+
+add_task(function* () {
+ yield TelemetryController.testShutdown();
+ do_test_finished();
+});
diff --git a/toolkit/components/telemetry/tests/unit/test_TelemetryController.js b/toolkit/components/telemetry/tests/unit/test_TelemetryController.js
new file mode 100644
index 000000000..b383de6bf
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_TelemetryController.js
@@ -0,0 +1,507 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/
+*/
+/* This testcase triggers two telemetry pings.
+ *
+ * Telemetry code keeps histograms of past telemetry pings. The first
+ * ping populates these histograms. One of those histograms is then
+ * checked in the second request.
+ */
+
+Cu.import("resource://gre/modules/ClientID.jsm");
+Cu.import("resource://gre/modules/Services.jsm");
+Cu.import("resource://gre/modules/XPCOMUtils.jsm", this);
+Cu.import("resource://gre/modules/TelemetryController.jsm", this);
+Cu.import("resource://gre/modules/TelemetryStorage.jsm", this);
+Cu.import("resource://gre/modules/TelemetrySend.jsm", this);
+Cu.import("resource://gre/modules/TelemetryArchive.jsm", this);
+Cu.import("resource://gre/modules/Task.jsm", this);
+Cu.import("resource://gre/modules/Promise.jsm", this);
+Cu.import("resource://gre/modules/Preferences.jsm");
+
+const PING_FORMAT_VERSION = 4;
+const DELETION_PING_TYPE = "deletion";
+const TEST_PING_TYPE = "test-ping-type";
+
+const PLATFORM_VERSION = "1.9.2";
+const APP_VERSION = "1";
+const APP_NAME = "XPCShell";
+
+const PREF_BRANCH = "toolkit.telemetry.";
+const PREF_ENABLED = PREF_BRANCH + "enabled";
+const PREF_ARCHIVE_ENABLED = PREF_BRANCH + "archive.enabled";
+const PREF_FHR_UPLOAD_ENABLED = "datareporting.healthreport.uploadEnabled";
+const PREF_UNIFIED = PREF_BRANCH + "unified";
+
+var gClientID = null;
+
+function sendPing(aSendClientId, aSendEnvironment) {
+ if (PingServer.started) {
+ TelemetrySend.setServer("http://localhost:" + PingServer.port);
+ } else {
+ TelemetrySend.setServer("http://doesnotexist");
+ }
+
+ let options = {
+ addClientId: aSendClientId,
+ addEnvironment: aSendEnvironment,
+ };
+ return TelemetryController.submitExternalPing(TEST_PING_TYPE, {}, options);
+}
+
+function checkPingFormat(aPing, aType, aHasClientId, aHasEnvironment) {
+ const MANDATORY_PING_FIELDS = [
+ "type", "id", "creationDate", "version", "application", "payload"
+ ];
+
+ const APPLICATION_TEST_DATA = {
+ buildId: gAppInfo.appBuildID,
+ name: APP_NAME,
+ version: APP_VERSION,
+ displayVersion: AppConstants.MOZ_APP_VERSION_DISPLAY,
+ vendor: "Mozilla",
+ platformVersion: PLATFORM_VERSION,
+ xpcomAbi: "noarch-spidermonkey",
+ };
+
+ // Check that the ping contains all the mandatory fields.
+ for (let f of MANDATORY_PING_FIELDS) {
+ Assert.ok(f in aPing, f + " must be available.");
+ }
+
+ Assert.equal(aPing.type, aType, "The ping must have the correct type.");
+ Assert.equal(aPing.version, PING_FORMAT_VERSION, "The ping must have the correct version.");
+
+ // Test the application section.
+ for (let f in APPLICATION_TEST_DATA) {
+ Assert.equal(aPing.application[f], APPLICATION_TEST_DATA[f],
+ f + " must have the correct value.");
+ }
+
+ // We can't check the values for channel and architecture. Just make
+ // sure they are in.
+ Assert.ok("architecture" in aPing.application,
+ "The application section must have an architecture field.");
+ Assert.ok("channel" in aPing.application,
+ "The application section must have a channel field.");
+
+ // Check the clientId and environment fields, as needed.
+ Assert.equal("clientId" in aPing, aHasClientId);
+ Assert.equal("environment" in aPing, aHasEnvironment);
+}
+
+add_task(function* test_setup() {
+ // Addon manager needs a profile directory
+ do_get_profile();
+ loadAddonManager("xpcshell@tests.mozilla.org", "XPCShell", "1", "1.9.2");
+ // Make sure we don't generate unexpected pings due to pref changes.
+ yield setEmptyPrefWatchlist();
+
+ Services.prefs.setBoolPref(PREF_ENABLED, true);
+ Services.prefs.setBoolPref(PREF_FHR_UPLOAD_ENABLED, true);
+
+ yield new Promise(resolve =>
+ Telemetry.asyncFetchTelemetryData(wrapWithExceptionHandler(resolve)));
+});
+
+add_task(function* asyncSetup() {
+ yield TelemetryController.testSetup();
+});
+
+// Ensure that not overwriting an existing file fails silently
+add_task(function* test_overwritePing() {
+ let ping = {id: "foo"};
+ yield TelemetryStorage.savePing(ping, true);
+ yield TelemetryStorage.savePing(ping, false);
+ yield TelemetryStorage.cleanupPingFile(ping);
+});
+
+// Checks that a sent ping is correctly received by a dummy http server.
+add_task(function* test_simplePing() {
+ PingServer.start();
+ // Update the Telemetry Server preference with the address of the local server.
+ // Otherwise we might end up sending stuff to a non-existing server after
+ // |TelemetryController.testReset| is called.
+ Preferences.set(TelemetryController.Constants.PREF_SERVER, "http://localhost:" + PingServer.port);
+
+ yield sendPing(false, false);
+ let request = yield PingServer.promiseNextRequest();
+
+ // Check that we have a version query parameter in the URL.
+ Assert.notEqual(request.queryString, "");
+
+ // Make sure the version in the query string matches the new ping format version.
+ let params = request.queryString.split("&");
+ Assert.ok(params.find(p => p == ("v=" + PING_FORMAT_VERSION)));
+
+ let ping = decodeRequestPayload(request);
+ checkPingFormat(ping, TEST_PING_TYPE, false, false);
+});
+
+add_task(function* test_disableDataUpload() {
+ const isUnified = Preferences.get(PREF_UNIFIED, false);
+ if (!isUnified) {
+ // Skipping the test if unified telemetry is off, as no deletion ping will
+ // be generated.
+ return;
+ }
+
+ // Disable FHR upload: this should trigger a deletion ping.
+ Preferences.set(PREF_FHR_UPLOAD_ENABLED, false);
+
+ let ping = yield PingServer.promiseNextPing();
+ checkPingFormat(ping, DELETION_PING_TYPE, true, false);
+ // Wait on ping activity to settle.
+ yield TelemetrySend.testWaitOnOutgoingPings();
+
+ // Restore FHR Upload.
+ Preferences.set(PREF_FHR_UPLOAD_ENABLED, true);
+
+ // Simulate a failure in sending the deletion ping by disabling the HTTP server.
+ yield PingServer.stop();
+
+ // Try to send a ping. It will be saved as pending and get deleted when disabling upload.
+ TelemetryController.submitExternalPing(TEST_PING_TYPE, {});
+
+ // Disable FHR upload to send a deletion ping again.
+ Preferences.set(PREF_FHR_UPLOAD_ENABLED, false);
+
+ // Wait on sending activity to settle, as |TelemetryController.testReset()| doesn't do that.
+ yield TelemetrySend.testWaitOnOutgoingPings();
+ // Wait for the pending pings to be deleted. Resetting TelemetryController doesn't
+ // trigger the shutdown, so we need to call it ourselves.
+ yield TelemetryStorage.shutdown();
+ // Simulate a restart, and spin the send task.
+ yield TelemetryController.testReset();
+
+ // Disabling Telemetry upload must clear out all the pending pings.
+ let pendingPings = yield TelemetryStorage.loadPendingPingList();
+ Assert.equal(pendingPings.length, 1,
+ "All the pending pings but the deletion ping should have been deleted");
+
+ // Enable the ping server again.
+ PingServer.start();
+ // We set the new server using the pref, otherwise it would get reset with
+ // |TelemetryController.testReset|.
+ Preferences.set(TelemetryController.Constants.PREF_SERVER, "http://localhost:" + PingServer.port);
+
+ // Stop the sending task and then start it again.
+ yield TelemetrySend.shutdown();
+ // Reset the controller to spin the ping sending task.
+ yield TelemetryController.testReset();
+ ping = yield PingServer.promiseNextPing();
+ checkPingFormat(ping, DELETION_PING_TYPE, true, false);
+
+ // Wait on ping activity to settle before moving on to the next test. If we were
+ // to shut down telemetry, even though the PingServer caught the expected pings,
+ // TelemetrySend could still be processing them (clearing pings would happen in
+ // a couple of ticks). Shutting down would cancel the request and save them as
+ // pending pings.
+ yield TelemetrySend.testWaitOnOutgoingPings();
+ // Restore FHR Upload.
+ Preferences.set(PREF_FHR_UPLOAD_ENABLED, true);
+});
+
+add_task(function* test_pingHasClientId() {
+ const PREF_CACHED_CLIENTID = "toolkit.telemetry.cachedClientID";
+
+ // Make sure we have no cached client ID for this test: we'll try to send
+ // a ping with it while Telemetry is being initialized.
+ Preferences.reset(PREF_CACHED_CLIENTID);
+ yield TelemetryController.testShutdown();
+ yield ClientID._reset();
+ yield TelemetryStorage.testClearPendingPings();
+ // And also clear the counter histogram since we're here.
+ let h = Telemetry.getHistogramById("TELEMETRY_PING_SUBMISSION_WAITING_CLIENTID");
+ h.clear();
+
+ // Init telemetry and try to send a ping with a client ID.
+ let promisePingSetup = TelemetryController.testReset();
+ yield sendPing(true, false);
+ Assert.equal(h.snapshot().sum, 1,
+ "We must have a ping waiting for the clientId early during startup.");
+ // Wait until we are fully initialized. Pings will be assembled but won't get
+ // sent before then.
+ yield promisePingSetup;
+
+ let ping = yield PingServer.promiseNextPing();
+ // Fetch the client ID after initializing and fetching the the ping, so we
+ // don't unintentionally trigger its loading. We'll still need the client ID
+ // to see if the ping looks sane.
+ gClientID = yield ClientID.getClientID();
+
+ checkPingFormat(ping, TEST_PING_TYPE, true, false);
+ Assert.equal(ping.clientId, gClientID, "The correct clientId must be reported.");
+
+ // Shutdown Telemetry so we can safely restart it.
+ yield TelemetryController.testShutdown();
+ yield TelemetryStorage.testClearPendingPings();
+
+ // We should have cached the client ID now. Lets confirm that by checking it before
+ // the async ping setup is finished.
+ h.clear();
+ promisePingSetup = TelemetryController.testReset();
+ yield sendPing(true, false);
+ yield promisePingSetup;
+
+ // Check that we received the cached client id.
+ Assert.equal(h.snapshot().sum, 0, "We must have used the cached clientId.");
+ ping = yield PingServer.promiseNextPing();
+ checkPingFormat(ping, TEST_PING_TYPE, true, false);
+ Assert.equal(ping.clientId, gClientID,
+ "Telemetry should report the correct cached clientId.");
+
+ // Check that sending a ping without relying on the cache, after the
+ // initialization, still works.
+ Preferences.reset(PREF_CACHED_CLIENTID);
+ yield TelemetryController.testShutdown();
+ yield TelemetryStorage.testClearPendingPings();
+ yield TelemetryController.testReset();
+ yield sendPing(true, false);
+ ping = yield PingServer.promiseNextPing();
+ checkPingFormat(ping, TEST_PING_TYPE, true, false);
+ Assert.equal(ping.clientId, gClientID, "The correct clientId must be reported.");
+ Assert.equal(h.snapshot().sum, 0, "No ping should have been waiting for a clientId.");
+});
+
+add_task(function* test_pingHasEnvironment() {
+ // Send a ping with the environment data.
+ yield sendPing(false, true);
+ let ping = yield PingServer.promiseNextPing();
+ checkPingFormat(ping, TEST_PING_TYPE, false, true);
+
+ // Test a field in the environment build section.
+ Assert.equal(ping.application.buildId, ping.environment.build.buildId);
+});
+
+add_task(function* test_pingHasEnvironmentAndClientId() {
+ // Send a ping with the environment data and client id.
+ yield sendPing(true, true);
+ let ping = yield PingServer.promiseNextPing();
+ checkPingFormat(ping, TEST_PING_TYPE, true, true);
+
+ // Test a field in the environment build section.
+ Assert.equal(ping.application.buildId, ping.environment.build.buildId);
+ // Test that we have the correct clientId.
+ Assert.equal(ping.clientId, gClientID, "The correct clientId must be reported.");
+});
+
+add_task(function* test_archivePings() {
+ let now = new Date(2009, 10, 18, 12, 0, 0);
+ fakeNow(now);
+
+ // Disable ping upload so that pings don't get sent.
+ // With unified telemetry the FHR upload pref controls this,
+ // with non-unified telemetry the Telemetry enabled pref.
+ const isUnified = Preferences.get(PREF_UNIFIED, false);
+ const uploadPref = isUnified ? PREF_FHR_UPLOAD_ENABLED : PREF_ENABLED;
+ Preferences.set(uploadPref, false);
+
+ // If we're using unified telemetry, disabling ping upload will generate a "deletion"
+ // ping. Catch it.
+ if (isUnified) {
+ let ping = yield PingServer.promiseNextPing();
+ checkPingFormat(ping, DELETION_PING_TYPE, true, false);
+ }
+
+ // Register a new Ping Handler that asserts if a ping is received, then send a ping.
+ PingServer.registerPingHandler(() => Assert.ok(false, "Telemetry must not send pings if not allowed to."));
+ let pingId = yield sendPing(true, true);
+
+ // Check that the ping was archived, even with upload disabled.
+ let ping = yield TelemetryArchive.promiseArchivedPingById(pingId);
+ Assert.equal(ping.id, pingId, "TelemetryController should still archive pings.");
+
+ // Check that pings don't get archived if not allowed to.
+ now = new Date(2010, 10, 18, 12, 0, 0);
+ fakeNow(now);
+ Preferences.set(PREF_ARCHIVE_ENABLED, false);
+ pingId = yield sendPing(true, true);
+ let promise = TelemetryArchive.promiseArchivedPingById(pingId);
+ Assert.ok((yield promiseRejects(promise)),
+ "TelemetryController should not archive pings if the archive pref is disabled.");
+
+ // Enable archiving and the upload so that pings get sent and archived again.
+ Preferences.set(uploadPref, true);
+ Preferences.set(PREF_ARCHIVE_ENABLED, true);
+
+ now = new Date(2014, 6, 18, 22, 0, 0);
+ fakeNow(now);
+ // Restore the non asserting ping handler.
+ PingServer.resetPingHandler();
+ pingId = yield sendPing(true, true);
+
+ // Check that we archive pings when successfully sending them.
+ yield PingServer.promiseNextPing();
+ ping = yield TelemetryArchive.promiseArchivedPingById(pingId);
+ Assert.equal(ping.id, pingId,
+ "TelemetryController should still archive pings if ping upload is enabled.");
+});
+
+// Test that we fuzz the submission time around midnight properly
+// to avoid overloading the telemetry servers.
+add_task(function* test_midnightPingSendFuzzing() {
+ const fuzzingDelay = 60 * 60 * 1000;
+ fakeMidnightPingFuzzingDelay(fuzzingDelay);
+ let now = new Date(2030, 5, 1, 11, 0, 0);
+ fakeNow(now);
+
+ let waitForTimer = () => new Promise(resolve => {
+ fakePingSendTimer((callback, timeout) => {
+ resolve([callback, timeout]);
+ }, () => {});
+ });
+
+ PingServer.clearRequests();
+ yield TelemetryController.testReset();
+
+ // A ping after midnight within the fuzzing delay should not get sent.
+ now = new Date(2030, 5, 2, 0, 40, 0);
+ fakeNow(now);
+ PingServer.registerPingHandler((req, res) => {
+ Assert.ok(false, "No ping should be received yet.");
+ });
+ let timerPromise = waitForTimer();
+ yield sendPing(true, true);
+ let [timerCallback, timerTimeout] = yield timerPromise;
+ Assert.ok(!!timerCallback);
+ Assert.deepEqual(futureDate(now, timerTimeout), new Date(2030, 5, 2, 1, 0, 0));
+
+ // A ping just before the end of the fuzzing delay should not get sent.
+ now = new Date(2030, 5, 2, 0, 59, 59);
+ fakeNow(now);
+ timerPromise = waitForTimer();
+ yield sendPing(true, true);
+ [timerCallback, timerTimeout] = yield timerPromise;
+ Assert.deepEqual(timerTimeout, 1 * 1000);
+
+ // Restore the previous ping handler.
+ PingServer.resetPingHandler();
+
+ // Setting the clock to after the fuzzing delay, we should trigger the two ping sends
+ // with the timer callback.
+ now = futureDate(now, timerTimeout);
+ fakeNow(now);
+ yield timerCallback();
+ const pings = yield PingServer.promiseNextPings(2);
+ for (let ping of pings) {
+ checkPingFormat(ping, TEST_PING_TYPE, true, true);
+ }
+ yield TelemetrySend.testWaitOnOutgoingPings();
+
+ // Moving the clock further we should still send pings immediately.
+ now = futureDate(now, 5 * 60 * 1000);
+ yield sendPing(true, true);
+ let ping = yield PingServer.promiseNextPing();
+ checkPingFormat(ping, TEST_PING_TYPE, true, true);
+ yield TelemetrySend.testWaitOnOutgoingPings();
+
+ // Check that pings shortly before midnight are immediately sent.
+ now = fakeNow(2030, 5, 3, 23, 59, 0);
+ yield sendPing(true, true);
+ ping = yield PingServer.promiseNextPing();
+ checkPingFormat(ping, TEST_PING_TYPE, true, true);
+ yield TelemetrySend.testWaitOnOutgoingPings();
+
+ // Clean-up.
+ fakeMidnightPingFuzzingDelay(0);
+ fakePingSendTimer(() => {}, () => {});
+});
+
+add_task(function* test_changePingAfterSubmission() {
+ // Submit a ping with a custom payload.
+ let payload = { canary: "test" };
+ let pingPromise = TelemetryController.submitExternalPing(TEST_PING_TYPE, payload, options);
+
+ // Change the payload with a predefined value.
+ payload.canary = "changed";
+
+ // Wait for the ping to be archived.
+ const pingId = yield pingPromise;
+
+ // Make sure our changes didn't affect the submitted payload.
+ let archivedCopy = yield TelemetryArchive.promiseArchivedPingById(pingId);
+ Assert.equal(archivedCopy.payload.canary, "test",
+ "The payload must not be changed after being submitted.");
+});
+
+add_task(function* test_telemetryEnabledUnexpectedValue() {
+ // Remove the default value for toolkit.telemetry.enabled from the default prefs.
+ // Otherwise, we wouldn't be able to set the pref to a string.
+ let defaultPrefBranch = Services.prefs.getDefaultBranch(null);
+ defaultPrefBranch.deleteBranch(PREF_ENABLED);
+
+ // Set the preferences controlling the Telemetry status to a string.
+ Preferences.set(PREF_ENABLED, "false");
+ // Check that Telemetry is not enabled.
+ yield TelemetryController.testReset();
+ Assert.equal(Telemetry.canRecordExtended, false,
+ "Invalid values must not enable Telemetry recording.");
+
+ // Delete the pref again.
+ defaultPrefBranch.deleteBranch(PREF_ENABLED);
+
+ // Make sure that flipping it to true works.
+ Preferences.set(PREF_ENABLED, true);
+ yield TelemetryController.testReset();
+ Assert.equal(Telemetry.canRecordExtended, true,
+ "True must enable Telemetry recording.");
+
+ // Also check that the false works as well.
+ Preferences.set(PREF_ENABLED, false);
+ yield TelemetryController.testReset();
+ Assert.equal(Telemetry.canRecordExtended, false,
+ "False must disable Telemetry recording.");
+});
+
+add_task(function* test_telemetryCleanFHRDatabase() {
+ const FHR_DBNAME_PREF = "datareporting.healthreport.dbName";
+ const CUSTOM_DB_NAME = "unlikely.to.be.used.sqlite";
+ const DEFAULT_DB_NAME = "healthreport.sqlite";
+
+ // Check that we're able to remove a FHR DB with a custom name.
+ const CUSTOM_DB_PATHS = [
+ OS.Path.join(OS.Constants.Path.profileDir, CUSTOM_DB_NAME),
+ OS.Path.join(OS.Constants.Path.profileDir, CUSTOM_DB_NAME + "-wal"),
+ OS.Path.join(OS.Constants.Path.profileDir, CUSTOM_DB_NAME + "-shm"),
+ ];
+ Preferences.set(FHR_DBNAME_PREF, CUSTOM_DB_NAME);
+
+ // Write fake DB files to the profile directory.
+ for (let dbFilePath of CUSTOM_DB_PATHS) {
+ yield OS.File.writeAtomic(dbFilePath, "some data");
+ }
+
+ // Trigger the cleanup and check that the files were removed.
+ yield TelemetryStorage.removeFHRDatabase();
+ for (let dbFilePath of CUSTOM_DB_PATHS) {
+ Assert.ok(!(yield OS.File.exists(dbFilePath)), "The DB must not be on the disk anymore: " + dbFilePath);
+ }
+
+ // We should not break anything if there's no DB file.
+ yield TelemetryStorage.removeFHRDatabase();
+
+ // Check that we're able to remove a FHR DB with the default name.
+ Preferences.reset(FHR_DBNAME_PREF);
+
+ const DEFAULT_DB_PATHS = [
+ OS.Path.join(OS.Constants.Path.profileDir, DEFAULT_DB_NAME),
+ OS.Path.join(OS.Constants.Path.profileDir, DEFAULT_DB_NAME + "-wal"),
+ OS.Path.join(OS.Constants.Path.profileDir, DEFAULT_DB_NAME + "-shm"),
+ ];
+
+ // Write fake DB files to the profile directory.
+ for (let dbFilePath of DEFAULT_DB_PATHS) {
+ yield OS.File.writeAtomic(dbFilePath, "some data");
+ }
+
+ // Trigger the cleanup and check that the files were removed.
+ yield TelemetryStorage.removeFHRDatabase();
+ for (let dbFilePath of DEFAULT_DB_PATHS) {
+ Assert.ok(!(yield OS.File.exists(dbFilePath)), "The DB must not be on the disk anymore: " + dbFilePath);
+ }
+});
+
+add_task(function* stopServer() {
+ yield PingServer.stop();
+});
diff --git a/toolkit/components/telemetry/tests/unit/test_TelemetryControllerBuildID.js b/toolkit/components/telemetry/tests/unit/test_TelemetryControllerBuildID.js
new file mode 100644
index 000000000..b8a88afa2
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_TelemetryControllerBuildID.js
@@ -0,0 +1,70 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/
+*/
+/* Test inclusion of previous build ID in telemetry pings when build ID changes.
+ * bug 841028
+ *
+ * Cases to cover:
+ * 1) Run with no "previousBuildID" stored in prefs:
+ * -> no previousBuildID in telemetry system info, new value set in prefs.
+ * 2) previousBuildID in prefs, equal to current build ID:
+ * -> no previousBuildID in telemetry, prefs not updated.
+ * 3) previousBuildID in prefs, not equal to current build ID:
+ * -> previousBuildID in telemetry, new value set in prefs.
+ */
+
+"use strict";
+
+Cu.import("resource://gre/modules/Services.jsm", this);
+Cu.import("resource://gre/modules/TelemetryController.jsm", this);
+Cu.import("resource://gre/modules/TelemetrySession.jsm", this);
+Cu.import("resource://gre/modules/XPCOMUtils.jsm");
+
+// Force the Telemetry enabled preference so that TelemetrySession.testReset() doesn't exit early.
+Services.prefs.setBoolPref(PREF_TELEMETRY_ENABLED, true);
+
+// Set up our dummy AppInfo object so we can control the appBuildID.
+Cu.import("resource://testing-common/AppInfo.jsm", this);
+updateAppInfo();
+
+// Check that when run with no previous build ID stored, we update the pref but do not
+// put anything into the metadata.
+add_task(function* test_firstRun() {
+ yield TelemetryController.testReset();
+ let metadata = TelemetrySession.getMetadata();
+ do_check_false("previousBuildID" in metadata);
+ let appBuildID = getAppInfo().appBuildID;
+ let buildIDPref = Services.prefs.getCharPref(TelemetrySession.Constants.PREF_PREVIOUS_BUILDID);
+ do_check_eq(appBuildID, buildIDPref);
+});
+
+// Check that a subsequent run with the same build ID does not put prev build ID in
+// metadata. Assumes testFirstRun() has already been called to set the previousBuildID pref.
+add_task(function* test_secondRun() {
+ yield TelemetryController.testReset();
+ let metadata = TelemetrySession.getMetadata();
+ do_check_false("previousBuildID" in metadata);
+});
+
+// Set up telemetry with a different app build ID and check that the old build ID
+// is returned in the metadata and the pref is updated to the new build ID.
+// Assumes testFirstRun() has been called to set the previousBuildID pref.
+const NEW_BUILD_ID = "20130314";
+add_task(function* test_newBuild() {
+ let info = getAppInfo();
+ let oldBuildID = info.appBuildID;
+ info.appBuildID = NEW_BUILD_ID;
+ yield TelemetryController.testReset();
+ let metadata = TelemetrySession.getMetadata();
+ do_check_eq(metadata.previousBuildId, oldBuildID);
+ let buildIDPref = Services.prefs.getCharPref(TelemetrySession.Constants.PREF_PREVIOUS_BUILDID);
+ do_check_eq(NEW_BUILD_ID, buildIDPref);
+});
+
+
+function run_test() {
+ // Make sure we have a profile directory.
+ do_get_profile();
+
+ run_next_test();
+}
diff --git a/toolkit/components/telemetry/tests/unit/test_TelemetryControllerShutdown.js b/toolkit/components/telemetry/tests/unit/test_TelemetryControllerShutdown.js
new file mode 100644
index 000000000..391db0d9d
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_TelemetryControllerShutdown.js
@@ -0,0 +1,70 @@
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/ */
+
+// Test that TelemetryController sends close to shutdown don't lead
+// to AsyncShutdown timeouts.
+
+"use strict";
+
+Cu.import("resource://gre/modules/Services.jsm", this);
+Cu.import("resource://gre/modules/TelemetryController.jsm", this);
+Cu.import("resource://gre/modules/TelemetrySend.jsm", this);
+Cu.import("resource://gre/modules/Timer.jsm", this);
+Cu.import("resource://gre/modules/XPCOMUtils.jsm", this);
+Cu.import("resource://gre/modules/AsyncShutdown.jsm", this);
+Cu.import("resource://testing-common/httpd.js", this);
+
+const PREF_BRANCH = "toolkit.telemetry.";
+const PREF_FHR_UPLOAD_ENABLED = "datareporting.healthreport.uploadEnabled";
+
+function contentHandler(metadata, response)
+{
+ dump("contentHandler called for path: " + metadata._path + "\n");
+ // We intentionally don't finish writing the response here to let the
+ // client time out.
+ response.processAsync();
+ response.setHeader("Content-Type", "text/plain");
+}
+
+add_task(function* test_setup() {
+ // Addon manager needs a profile directory
+ do_get_profile();
+ loadAddonManager("xpcshell@tests.mozilla.org", "XPCShell", "1", "1.9.2");
+ // Make sure we don't generate unexpected pings due to pref changes.
+ yield setEmptyPrefWatchlist();
+
+ Services.prefs.setBoolPref(PREF_TELEMETRY_ENABLED, true);
+ Services.prefs.setBoolPref(PREF_FHR_UPLOAD_ENABLED, true);
+});
+
+/**
+ * Ensures that TelemetryController does not hang processing shutdown
+ * phases. Assumes that Telemetry shutdown routines do not take longer than
+ * CRASH_TIMEOUT_MS to complete.
+ */
+add_task(function* test_sendTelemetryShutsDownWithinReasonableTimeout() {
+ const CRASH_TIMEOUT_MS = 5 * 1000;
+ // Enable testing mode for AsyncShutdown, otherwise some testing-only functionality
+ // is not available.
+ Services.prefs.setBoolPref("toolkit.asyncshutdown.testing", true);
+ // Reducing the max delay for waitiing on phases to complete from 1 minute
+ // (standard) to 10 seconds to avoid blocking the tests in case of misbehavior.
+ Services.prefs.setIntPref("toolkit.asyncshutdown.crash_timeout", CRASH_TIMEOUT_MS);
+
+ let httpServer = new HttpServer();
+ httpServer.registerPrefixHandler("/", contentHandler);
+ httpServer.start(-1);
+
+ yield TelemetryController.testSetup();
+ TelemetrySend.setServer("http://localhost:" + httpServer.identity.primaryPort);
+ let submissionPromise = TelemetryController.submitExternalPing("test-ping-type", {});
+
+ // Trigger the AsyncShutdown phase TelemetryController hangs off.
+ AsyncShutdown.profileBeforeChange._trigger();
+ AsyncShutdown.sendTelemetry._trigger();
+ // Now wait for the ping submission.
+ yield submissionPromise;
+
+ // If we get here, we didn't time out in the shutdown routines.
+ Assert.ok(true, "Didn't time out on shutdown.");
+});
diff --git a/toolkit/components/telemetry/tests/unit/test_TelemetryController_idle.js b/toolkit/components/telemetry/tests/unit/test_TelemetryController_idle.js
new file mode 100644
index 000000000..ca5d1820b
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_TelemetryController_idle.js
@@ -0,0 +1,73 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+// Check that TelemetrySession notifies correctly on idle-daily.
+
+Cu.import("resource://testing-common/httpd.js", this);
+Cu.import("resource://gre/modules/PromiseUtils.jsm", this);
+Cu.import("resource://gre/modules/Services.jsm", this);
+Cu.import("resource://gre/modules/Task.jsm", this);
+Cu.import("resource://gre/modules/TelemetryStorage.jsm", this);
+Cu.import("resource://gre/modules/TelemetryController.jsm", this);
+Cu.import("resource://gre/modules/TelemetrySession.jsm", this);
+Cu.import("resource://gre/modules/TelemetrySend.jsm", this);
+
+const PREF_FHR_UPLOAD_ENABLED = "datareporting.healthreport.uploadEnabled";
+
+var gHttpServer = null;
+
+add_task(function* test_setup() {
+ do_get_profile();
+
+ // Make sure we don't generate unexpected pings due to pref changes.
+ yield setEmptyPrefWatchlist();
+
+ Services.prefs.setBoolPref(PREF_TELEMETRY_ENABLED, true);
+ Services.prefs.setBoolPref(PREF_FHR_UPLOAD_ENABLED, true);
+
+ // Start the webserver to check if the pending ping correctly arrives.
+ gHttpServer = new HttpServer();
+ gHttpServer.start(-1);
+});
+
+add_task(function* testSendPendingOnIdleDaily() {
+ // Create a valid pending ping.
+ const PENDING_PING = {
+ id: "2133234d-4ea1-44f4-909e-ce8c6c41e0fc",
+ type: "test-ping",
+ version: 4,
+ application: {},
+ payload: {},
+ };
+ yield TelemetryStorage.savePing(PENDING_PING, true);
+
+ // Telemetry will not send this ping at startup, because it's not overdue.
+ yield TelemetryController.testSetup();
+ TelemetrySend.setServer("http://localhost:" + gHttpServer.identity.primaryPort);
+
+ let pendingPromise = new Promise(resolve =>
+ gHttpServer.registerPrefixHandler("/submit/telemetry/", request => resolve(request)));
+
+ let gatherPromise = PromiseUtils.defer();
+ Services.obs.addObserver(gatherPromise.resolve, "gather-telemetry", false);
+
+ // Check that we are correctly receiving the gather-telemetry notification.
+ TelemetrySession.observe(null, "idle-daily", null);
+ yield gatherPromise;
+ Assert.ok(true, "Received gather-telemetry notification.");
+
+ Services.obs.removeObserver(gatherPromise.resolve, "gather-telemetry");
+
+ // Check that the pending ping is correctly received.
+ let ns = {};
+ let module = Cu.import("resource://gre/modules/TelemetrySend.jsm", ns);
+ module.TelemetrySendImpl.observe(null, "idle-daily", null);
+ let request = yield pendingPromise;
+ let ping = decodeRequestPayload(request);
+
+ // Validate the ping data.
+ Assert.equal(ping.id, PENDING_PING.id);
+ Assert.equal(ping.type, PENDING_PING.type);
+
+ yield new Promise(resolve => gHttpServer.stop(resolve));
+});
diff --git a/toolkit/components/telemetry/tests/unit/test_TelemetryEnvironment.js b/toolkit/components/telemetry/tests/unit/test_TelemetryEnvironment.js
new file mode 100644
index 000000000..35181272a
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_TelemetryEnvironment.js
@@ -0,0 +1,1528 @@
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/ */
+
+Cu.import("resource://gre/modules/AddonManager.jsm");
+Cu.import("resource://gre/modules/TelemetryEnvironment.jsm", this);
+Cu.import("resource://gre/modules/Preferences.jsm", this);
+Cu.import("resource://gre/modules/PromiseUtils.jsm", this);
+Cu.import("resource://gre/modules/XPCOMUtils.jsm", this);
+Cu.import("resource://testing-common/AddonManagerTesting.jsm");
+Cu.import("resource://testing-common/httpd.js");
+Cu.import("resource://testing-common/MockRegistrar.jsm", this);
+Cu.import("resource://gre/modules/FileUtils.jsm");
+
+// AttributionCode is only needed for Firefox
+XPCOMUtils.defineLazyModuleGetter(this, "AttributionCode",
+ "resource:///modules/AttributionCode.jsm");
+
+// Lazy load |LightweightThemeManager|, we won't be using it on Gonk.
+XPCOMUtils.defineLazyModuleGetter(this, "LightweightThemeManager",
+ "resource://gre/modules/LightweightThemeManager.jsm");
+
+XPCOMUtils.defineLazyModuleGetter(this, "ProfileAge",
+ "resource://gre/modules/ProfileAge.jsm");
+
+// The webserver hosting the addons.
+var gHttpServer = null;
+// The URL of the webserver root.
+var gHttpRoot = null;
+// The URL of the data directory, on the webserver.
+var gDataRoot = null;
+
+const PLATFORM_VERSION = "1.9.2";
+const APP_VERSION = "1";
+const APP_ID = "xpcshell@tests.mozilla.org";
+const APP_NAME = "XPCShell";
+const APP_HOTFIX_VERSION = "2.3.4a";
+
+const DISTRIBUTION_ID = "distributor-id";
+const DISTRIBUTION_VERSION = "4.5.6b";
+const DISTRIBUTOR_NAME = "Some Distributor";
+const DISTRIBUTOR_CHANNEL = "A Channel";
+const PARTNER_NAME = "test";
+const PARTNER_ID = "NicePartner-ID-3785";
+const DISTRIBUTION_CUSTOMIZATION_COMPLETE_TOPIC = "distribution-customization-complete";
+
+const GFX_VENDOR_ID = "0xabcd";
+const GFX_DEVICE_ID = "0x1234";
+
+// The profile reset date, in milliseconds (Today)
+const PROFILE_RESET_DATE_MS = Date.now();
+// The profile creation date, in milliseconds (Yesterday).
+const PROFILE_CREATION_DATE_MS = PROFILE_RESET_DATE_MS - MILLISECONDS_PER_DAY;
+
+const FLASH_PLUGIN_NAME = "Shockwave Flash";
+const FLASH_PLUGIN_DESC = "A mock flash plugin";
+const FLASH_PLUGIN_VERSION = "\u201c1.1.1.1\u201d";
+const PLUGIN_MIME_TYPE1 = "application/x-shockwave-flash";
+const PLUGIN_MIME_TYPE2 = "text/plain";
+
+const PLUGIN2_NAME = "Quicktime";
+const PLUGIN2_DESC = "A mock Quicktime plugin";
+const PLUGIN2_VERSION = "2.3";
+
+const PERSONA_ID = "3785";
+// Defined by LightweightThemeManager, it is appended to the PERSONA_ID.
+const PERSONA_ID_SUFFIX = "@personas.mozilla.org";
+const PERSONA_NAME = "Test Theme";
+const PERSONA_DESCRIPTION = "A nice theme/persona description.";
+
+const PLUGIN_UPDATED_TOPIC = "plugins-list-updated";
+
+// system add-ons are enabled at startup, so record date when the test starts
+const SYSTEM_ADDON_INSTALL_DATE = Date.now();
+
+// Valid attribution code to write so that settings.attribution can be tested.
+const ATTRIBUTION_CODE = "source%3Dgoogle.com";
+
+/**
+ * Used to mock plugin tags in our fake plugin host.
+ */
+function PluginTag(aName, aDescription, aVersion, aEnabled) {
+ this.name = aName;
+ this.description = aDescription;
+ this.version = aVersion;
+ this.disabled = !aEnabled;
+}
+
+PluginTag.prototype = {
+ name: null,
+ description: null,
+ version: null,
+ filename: null,
+ fullpath: null,
+ disabled: false,
+ blocklisted: false,
+ clicktoplay: true,
+
+ mimeTypes: [ PLUGIN_MIME_TYPE1, PLUGIN_MIME_TYPE2 ],
+
+ getMimeTypes: function(count) {
+ count.value = this.mimeTypes.length;
+ return this.mimeTypes;
+ }
+};
+
+// A container for the plugins handled by the fake plugin host.
+var gInstalledPlugins = [
+ new PluginTag("Java", "A mock Java plugin", "1.0", false /* Disabled */),
+ new PluginTag(FLASH_PLUGIN_NAME, FLASH_PLUGIN_DESC, FLASH_PLUGIN_VERSION, true),
+];
+
+// A fake plugin host for testing plugin telemetry environment.
+var PluginHost = {
+ getPluginTags: function(countRef) {
+ countRef.value = gInstalledPlugins.length;
+ return gInstalledPlugins;
+ },
+
+ QueryInterface: function(iid) {
+ if (iid.equals(Ci.nsIPluginHost)
+ || iid.equals(Ci.nsISupports))
+ return this;
+
+ throw Components.results.NS_ERROR_NO_INTERFACE;
+ }
+}
+
+function registerFakePluginHost() {
+ MockRegistrar.register("@mozilla.org/plugin/host;1", PluginHost);
+}
+
+var SysInfo = {
+ overrides: {},
+
+ getProperty(name) {
+ // Assert.ok(false, "Mock SysInfo: " + name + ", " + JSON.stringify(this.overrides));
+ if (name in this.overrides) {
+ return this.overrides[name];
+ }
+ try {
+ return this._genuine.getProperty(name);
+ } catch (ex) {
+ throw ex;
+ }
+ },
+
+ get(name) {
+ return this._genuine.get(name);
+ },
+
+ QueryInterface(iid) {
+ if (iid.equals(Ci.nsIPropertyBag2)
+ || iid.equals(Ci.nsISupports))
+ return this;
+
+ throw Cr.NS_ERROR_NO_INTERFACE;
+ }
+};
+
+function registerFakeSysInfo() {
+ MockRegistrar.register("@mozilla.org/system-info;1", SysInfo);
+}
+
+function MockAddonWrapper(aAddon) {
+ this.addon = aAddon;
+}
+MockAddonWrapper.prototype = {
+ get id() {
+ return this.addon.id;
+ },
+
+ get type() {
+ return "service";
+ },
+
+ get appDisabled() {
+ return false;
+ },
+
+ get isCompatible() {
+ return true;
+ },
+
+ get isPlatformCompatible() {
+ return true;
+ },
+
+ get scope() {
+ return AddonManager.SCOPE_PROFILE;
+ },
+
+ get foreignInstall() {
+ return false;
+ },
+
+ get providesUpdatesSecurely() {
+ return true;
+ },
+
+ get blocklistState() {
+ return 0; // Not blocked.
+ },
+
+ get pendingOperations() {
+ return AddonManager.PENDING_NONE;
+ },
+
+ get permissions() {
+ return AddonManager.PERM_CAN_UNINSTALL | AddonManager.PERM_CAN_DISABLE;
+ },
+
+ get isActive() {
+ return true;
+ },
+
+ get name() {
+ return this.addon.name;
+ },
+
+ get version() {
+ return this.addon.version;
+ },
+
+ get creator() {
+ return new AddonManagerPrivate.AddonAuthor(this.addon.author);
+ },
+
+ get userDisabled() {
+ return this.appDisabled;
+ },
+};
+
+function createMockAddonProvider(aName) {
+ let mockProvider = {
+ _addons: [],
+
+ get name() {
+ return aName;
+ },
+
+ addAddon: function(aAddon) {
+ this._addons.push(aAddon);
+ AddonManagerPrivate.callAddonListeners("onInstalled", new MockAddonWrapper(aAddon));
+ },
+
+ getAddonsByTypes: function (aTypes, aCallback) {
+ aCallback(this._addons.map(a => new MockAddonWrapper(a)));
+ },
+
+ shutdown() {
+ return Promise.resolve();
+ },
+ };
+
+ return mockProvider;
+}
+
+/**
+ * Used to spoof the Persona Id.
+ */
+function spoofTheme(aId, aName, aDesc) {
+ return {
+ id: aId,
+ name: aName,
+ description: aDesc,
+ headerURL: "http://lwttest.invalid/a.png",
+ footerURL: "http://lwttest.invalid/b.png",
+ textcolor: Math.random().toString(),
+ accentcolor: Math.random().toString()
+ };
+}
+
+function spoofGfxAdapter() {
+ try {
+ let gfxInfo = Cc["@mozilla.org/gfx/info;1"].getService(Ci.nsIGfxInfoDebug);
+ gfxInfo.spoofVendorID(GFX_VENDOR_ID);
+ gfxInfo.spoofDeviceID(GFX_DEVICE_ID);
+ } catch (x) {
+ // If we can't test gfxInfo, that's fine, we'll note it later.
+ }
+}
+
+function spoofProfileReset() {
+ let profileAccessor = new ProfileAge();
+
+ return profileAccessor.writeTimes({
+ created: PROFILE_CREATION_DATE_MS,
+ reset: PROFILE_RESET_DATE_MS
+ });
+}
+
+function spoofPartnerInfo() {
+ let prefsToSpoof = {};
+ prefsToSpoof["distribution.id"] = DISTRIBUTION_ID;
+ prefsToSpoof["distribution.version"] = DISTRIBUTION_VERSION;
+ prefsToSpoof["app.distributor"] = DISTRIBUTOR_NAME;
+ prefsToSpoof["app.distributor.channel"] = DISTRIBUTOR_CHANNEL;
+ prefsToSpoof["app.partner.test"] = PARTNER_NAME;
+ prefsToSpoof["mozilla.partner.id"] = PARTNER_ID;
+
+ // Spoof the preferences.
+ for (let pref in prefsToSpoof) {
+ Preferences.set(pref, prefsToSpoof[pref]);
+ }
+}
+
+function getAttributionFile() {
+ let file = Services.dirsvc.get("LocalAppData", Ci.nsIFile);
+ file.append("mozilla");
+ file.append(AppConstants.MOZ_APP_NAME);
+ file.append("postSigningData");
+ return file;
+}
+
+function spoofAttributionData() {
+ if (gIsWindows) {
+ AttributionCode._clearCache();
+ let stream = Cc["@mozilla.org/network/file-output-stream;1"].
+ createInstance(Ci.nsIFileOutputStream);
+ stream.init(getAttributionFile(), -1, -1, 0);
+ stream.write(ATTRIBUTION_CODE, ATTRIBUTION_CODE.length);
+ }
+}
+
+function cleanupAttributionData() {
+ if (gIsWindows) {
+ getAttributionFile().remove(false);
+ AttributionCode._clearCache();
+ }
+}
+
+/**
+ * Check that a value is a string and not empty.
+ *
+ * @param aValue The variable to check.
+ * @return True if |aValue| has type "string" and is not empty, False otherwise.
+ */
+function checkString(aValue) {
+ return (typeof aValue == "string") && (aValue != "");
+}
+
+/**
+ * If value is non-null, check if it's a valid string.
+ *
+ * @param aValue The variable to check.
+ * @return True if it's null or a valid string, false if it's non-null and an invalid
+ * string.
+ */
+function checkNullOrString(aValue) {
+ if (aValue) {
+ return checkString(aValue);
+ } else if (aValue === null) {
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * If value is non-null, check if it's a boolean.
+ *
+ * @param aValue The variable to check.
+ * @return True if it's null or a valid boolean, false if it's non-null and an invalid
+ * boolean.
+ */
+function checkNullOrBool(aValue) {
+ return aValue === null || (typeof aValue == "boolean");
+}
+
+function checkBuildSection(data) {
+ const expectedInfo = {
+ applicationId: APP_ID,
+ applicationName: APP_NAME,
+ buildId: gAppInfo.appBuildID,
+ version: APP_VERSION,
+ vendor: "Mozilla",
+ platformVersion: PLATFORM_VERSION,
+ xpcomAbi: "noarch-spidermonkey",
+ };
+
+ Assert.ok("build" in data, "There must be a build section in Environment.");
+
+ for (let f in expectedInfo) {
+ Assert.ok(checkString(data.build[f]), f + " must be a valid string.");
+ Assert.equal(data.build[f], expectedInfo[f], f + " must have the correct value.");
+ }
+
+ // Make sure architecture and hotfixVersion are in the environment.
+ Assert.ok(checkString(data.build.architecture));
+ Assert.ok(checkString(data.build.hotfixVersion));
+ Assert.equal(data.build.hotfixVersion, APP_HOTFIX_VERSION);
+
+ if (gIsMac) {
+ let macUtils = Cc["@mozilla.org/xpcom/mac-utils;1"].getService(Ci.nsIMacUtils);
+ if (macUtils && macUtils.isUniversalBinary) {
+ Assert.ok(checkString(data.build.architecturesInBinary));
+ }
+ }
+}
+
+function checkSettingsSection(data) {
+ const EXPECTED_FIELDS_TYPES = {
+ blocklistEnabled: "boolean",
+ e10sEnabled: "boolean",
+ e10sCohort: "string",
+ telemetryEnabled: "boolean",
+ locale: "string",
+ update: "object",
+ userPrefs: "object",
+ };
+
+ Assert.ok("settings" in data, "There must be a settings section in Environment.");
+
+ for (let f in EXPECTED_FIELDS_TYPES) {
+ Assert.equal(typeof data.settings[f], EXPECTED_FIELDS_TYPES[f],
+ f + " must have the correct type.");
+ }
+
+ // Check "addonCompatibilityCheckEnabled" separately, as it is not available
+ // on Gonk.
+ if (gIsGonk) {
+ Assert.ok(!("addonCompatibilityCheckEnabled" in data.settings), "Must not be available on Gonk.");
+ } else {
+ Assert.equal(data.settings.addonCompatibilityCheckEnabled, AddonManager.checkCompatibility);
+ }
+
+ // Check "isDefaultBrowser" separately, as it is not available on Android an can either be
+ // null or boolean on other platforms.
+ if (gIsAndroid) {
+ Assert.ok(!("isDefaultBrowser" in data.settings), "Must not be available on Android.");
+ } else {
+ Assert.ok(checkNullOrBool(data.settings.isDefaultBrowser));
+ }
+
+ // Check "channel" separately, as it can either be null or string.
+ let update = data.settings.update;
+ Assert.ok(checkNullOrString(update.channel));
+ Assert.equal(typeof update.enabled, "boolean");
+ Assert.equal(typeof update.autoDownload, "boolean");
+
+ // Check "defaultSearchEngine" separately, as it can either be undefined or string.
+ if ("defaultSearchEngine" in data.settings) {
+ checkString(data.settings.defaultSearchEngine);
+ Assert.equal(typeof data.settings.defaultSearchEngineData, "object");
+ }
+
+ if ("attribution" in data.settings) {
+ Assert.equal(typeof data.settings.attribution, "object");
+ Assert.equal(data.settings.attribution.source, "google.com");
+ }
+}
+
+function checkProfileSection(data) {
+ Assert.ok("profile" in data, "There must be a profile section in Environment.");
+ Assert.equal(data.profile.creationDate, truncateToDays(PROFILE_CREATION_DATE_MS));
+ Assert.equal(data.profile.resetDate, truncateToDays(PROFILE_RESET_DATE_MS));
+}
+
+function checkPartnerSection(data, isInitial) {
+ const EXPECTED_FIELDS = {
+ distributionId: DISTRIBUTION_ID,
+ distributionVersion: DISTRIBUTION_VERSION,
+ partnerId: PARTNER_ID,
+ distributor: DISTRIBUTOR_NAME,
+ distributorChannel: DISTRIBUTOR_CHANNEL,
+ };
+
+ Assert.ok("partner" in data, "There must be a partner section in Environment.");
+
+ for (let f in EXPECTED_FIELDS) {
+ let expected = isInitial ? null : EXPECTED_FIELDS[f];
+ Assert.strictEqual(data.partner[f], expected, f + " must have the correct value.");
+ }
+
+ // Check that "partnerNames" exists and contains the correct element.
+ Assert.ok(Array.isArray(data.partner.partnerNames));
+ if (isInitial) {
+ Assert.equal(data.partner.partnerNames.length, 0);
+ } else {
+ Assert.ok(data.partner.partnerNames.includes(PARTNER_NAME));
+ }
+}
+
+function checkGfxAdapter(data) {
+ const EXPECTED_ADAPTER_FIELDS_TYPES = {
+ description: "string",
+ vendorID: "string",
+ deviceID: "string",
+ subsysID: "string",
+ RAM: "number",
+ driver: "string",
+ driverVersion: "string",
+ driverDate: "string",
+ GPUActive: "boolean",
+ };
+
+ for (let f in EXPECTED_ADAPTER_FIELDS_TYPES) {
+ Assert.ok(f in data, f + " must be available.");
+
+ if (data[f]) {
+ // Since we have a non-null value, check if it has the correct type.
+ Assert.equal(typeof data[f], EXPECTED_ADAPTER_FIELDS_TYPES[f],
+ f + " must have the correct type.");
+ }
+ }
+}
+
+function checkSystemSection(data) {
+ const EXPECTED_FIELDS = [ "memoryMB", "cpu", "os", "hdd", "gfx" ];
+ const EXPECTED_HDD_FIELDS = [ "profile", "binary", "system" ];
+
+ Assert.ok("system" in data, "There must be a system section in Environment.");
+
+ // Make sure we have all the top level sections and fields.
+ for (let f of EXPECTED_FIELDS) {
+ Assert.ok(f in data.system, f + " must be available.");
+ }
+
+ Assert.ok(Number.isFinite(data.system.memoryMB), "MemoryMB must be a number.");
+
+ if (gIsWindows || gIsMac || gIsLinux) {
+ let EXTRA_CPU_FIELDS = ["cores", "model", "family", "stepping",
+ "l2cacheKB", "l3cacheKB", "speedMHz", "vendor"];
+
+ for (let f of EXTRA_CPU_FIELDS) {
+ // Note this is testing TelemetryEnvironment.js only, not that the
+ // values are valid - null is the fallback.
+ Assert.ok(f in data.system.cpu, f + " must be available under cpu.");
+ }
+
+ if (gIsWindows) {
+ Assert.equal(typeof data.system.isWow64, "boolean",
+ "isWow64 must be available on Windows and have the correct type.");
+ Assert.ok("virtualMaxMB" in data.system, "virtualMaxMB must be available.");
+ Assert.ok(Number.isFinite(data.system.virtualMaxMB),
+ "virtualMaxMB must be a number.");
+ }
+
+ // We insist these are available
+ for (let f of ["cores"]) {
+ Assert.ok(!(f in data.system.cpu) ||
+ Number.isFinite(data.system.cpu[f]),
+ f + " must be a number if non null.");
+ }
+
+ // These should be numbers if they are not null
+ for (let f of ["model", "family", "stepping", "l2cacheKB",
+ "l3cacheKB", "speedMHz"]) {
+ Assert.ok(!(f in data.system.cpu) ||
+ data.system.cpu[f] === null ||
+ Number.isFinite(data.system.cpu[f]),
+ f + " must be a number if non null.");
+ }
+ }
+
+ let cpuData = data.system.cpu;
+ Assert.ok(Number.isFinite(cpuData.count), "CPU count must be a number.");
+ Assert.ok(Array.isArray(cpuData.extensions), "CPU extensions must be available.");
+
+ // Device data is only available on Android or Gonk.
+ if (gIsAndroid || gIsGonk) {
+ let deviceData = data.system.device;
+ Assert.ok(checkNullOrString(deviceData.model));
+ Assert.ok(checkNullOrString(deviceData.manufacturer));
+ Assert.ok(checkNullOrString(deviceData.hardware));
+ Assert.ok(checkNullOrBool(deviceData.isTablet));
+ }
+
+ let osData = data.system.os;
+ Assert.ok(checkNullOrString(osData.name));
+ Assert.ok(checkNullOrString(osData.version));
+ Assert.ok(checkNullOrString(osData.locale));
+
+ // Service pack is only available on Windows.
+ if (gIsWindows) {
+ Assert.ok(Number.isFinite(osData["servicePackMajor"]),
+ "ServicePackMajor must be a number.");
+ Assert.ok(Number.isFinite(osData["servicePackMinor"]),
+ "ServicePackMinor must be a number.");
+ if ("windowsBuildNumber" in osData) {
+ // This might not be available on all Windows platforms.
+ Assert.ok(Number.isFinite(osData["windowsBuildNumber"]),
+ "windowsBuildNumber must be a number.");
+ }
+ if ("windowsUBR" in osData) {
+ // This might not be available on all Windows platforms.
+ Assert.ok((osData["windowsUBR"] === null) || Number.isFinite(osData["windowsUBR"]),
+ "windowsUBR must be null or a number.");
+ }
+ } else if (gIsAndroid || gIsGonk) {
+ Assert.ok(checkNullOrString(osData.kernelVersion));
+ }
+
+ let check = gIsWindows ? checkString : checkNullOrString;
+ for (let disk of EXPECTED_HDD_FIELDS) {
+ Assert.ok(check(data.system.hdd[disk].model));
+ Assert.ok(check(data.system.hdd[disk].revision));
+ }
+
+ let gfxData = data.system.gfx;
+ Assert.ok("D2DEnabled" in gfxData);
+ Assert.ok("DWriteEnabled" in gfxData);
+ // DWriteVersion is disabled due to main thread jank and will be enabled
+ // again as part of bug 1154500.
+ // Assert.ok("DWriteVersion" in gfxData);
+ if (gIsWindows) {
+ Assert.equal(typeof gfxData.D2DEnabled, "boolean");
+ Assert.equal(typeof gfxData.DWriteEnabled, "boolean");
+ // As above, will be enabled again as part of bug 1154500.
+ // Assert.ok(checkString(gfxData.DWriteVersion));
+ }
+
+ Assert.ok("adapters" in gfxData);
+ Assert.ok(gfxData.adapters.length > 0, "There must be at least one GFX adapter.");
+ for (let adapter of gfxData.adapters) {
+ checkGfxAdapter(adapter);
+ }
+ Assert.equal(typeof gfxData.adapters[0].GPUActive, "boolean");
+ Assert.ok(gfxData.adapters[0].GPUActive, "The first GFX adapter must be active.");
+
+ Assert.ok(Array.isArray(gfxData.monitors));
+ if (gIsWindows || gIsMac) {
+ Assert.ok(gfxData.monitors.length >= 1, "There is at least one monitor.");
+ Assert.equal(typeof gfxData.monitors[0].screenWidth, "number");
+ Assert.equal(typeof gfxData.monitors[0].screenHeight, "number");
+ if (gIsWindows) {
+ Assert.equal(typeof gfxData.monitors[0].refreshRate, "number");
+ Assert.equal(typeof gfxData.monitors[0].pseudoDisplay, "boolean");
+ }
+ if (gIsMac) {
+ Assert.equal(typeof gfxData.monitors[0].scale, "number");
+ }
+ }
+
+ Assert.equal(typeof gfxData.features, "object");
+ Assert.equal(typeof gfxData.features.compositor, "string");
+
+ try {
+ // If we've not got nsIGfxInfoDebug, then this will throw and stop us doing
+ // this test.
+ let gfxInfo = Cc["@mozilla.org/gfx/info;1"].getService(Ci.nsIGfxInfoDebug);
+
+ if (gIsWindows || gIsMac) {
+ Assert.equal(GFX_VENDOR_ID, gfxData.adapters[0].vendorID);
+ Assert.equal(GFX_DEVICE_ID, gfxData.adapters[0].deviceID);
+ }
+
+ let features = gfxInfo.getFeatures();
+ Assert.equal(features.compositor, gfxData.features.compositor);
+ Assert.equal(features.opengl, gfxData.features.opengl);
+ Assert.equal(features.webgl, gfxData.features.webgl);
+ }
+ catch (e) {}
+}
+
+function checkActiveAddon(data) {
+ let signedState = mozinfo.addon_signing ? "number" : "undefined";
+ // system add-ons have an undefined signState
+ if (data.isSystem)
+ signedState = "undefined";
+
+ const EXPECTED_ADDON_FIELDS_TYPES = {
+ blocklisted: "boolean",
+ name: "string",
+ userDisabled: "boolean",
+ appDisabled: "boolean",
+ version: "string",
+ scope: "number",
+ type: "string",
+ foreignInstall: "boolean",
+ hasBinaryComponents: "boolean",
+ installDay: "number",
+ updateDay: "number",
+ signedState: signedState,
+ isSystem: "boolean",
+ };
+
+ for (let f in EXPECTED_ADDON_FIELDS_TYPES) {
+ Assert.ok(f in data, f + " must be available.");
+ Assert.equal(typeof data[f], EXPECTED_ADDON_FIELDS_TYPES[f],
+ f + " must have the correct type.");
+ }
+
+ // We check "description" separately, as it can be null.
+ Assert.ok(checkNullOrString(data.description));
+}
+
+function checkPlugin(data) {
+ const EXPECTED_PLUGIN_FIELDS_TYPES = {
+ name: "string",
+ version: "string",
+ description: "string",
+ blocklisted: "boolean",
+ disabled: "boolean",
+ clicktoplay: "boolean",
+ updateDay: "number",
+ };
+
+ for (let f in EXPECTED_PLUGIN_FIELDS_TYPES) {
+ Assert.ok(f in data, f + " must be available.");
+ Assert.equal(typeof data[f], EXPECTED_PLUGIN_FIELDS_TYPES[f],
+ f + " must have the correct type.");
+ }
+
+ Assert.ok(Array.isArray(data.mimeTypes));
+ for (let type of data.mimeTypes) {
+ Assert.ok(checkString(type));
+ }
+}
+
+function checkTheme(data) {
+ // "hasBinaryComponents" is not available when testing.
+ const EXPECTED_THEME_FIELDS_TYPES = {
+ id: "string",
+ blocklisted: "boolean",
+ name: "string",
+ userDisabled: "boolean",
+ appDisabled: "boolean",
+ version: "string",
+ scope: "number",
+ foreignInstall: "boolean",
+ installDay: "number",
+ updateDay: "number",
+ };
+
+ for (let f in EXPECTED_THEME_FIELDS_TYPES) {
+ Assert.ok(f in data, f + " must be available.");
+ Assert.equal(typeof data[f], EXPECTED_THEME_FIELDS_TYPES[f],
+ f + " must have the correct type.");
+ }
+
+ // We check "description" separately, as it can be null.
+ Assert.ok(checkNullOrString(data.description));
+}
+
+function checkActiveGMPlugin(data) {
+ // GMP plugin version defaults to null until GMPDownloader runs to update it.
+ if (data.version) {
+ Assert.equal(typeof data.version, "string");
+ }
+ Assert.equal(typeof data.userDisabled, "boolean");
+ Assert.equal(typeof data.applyBackgroundUpdates, "number");
+}
+
+function checkAddonsSection(data, expectBrokenAddons) {
+ const EXPECTED_FIELDS = [
+ "activeAddons", "theme", "activePlugins", "activeGMPlugins", "activeExperiment",
+ "persona",
+ ];
+
+ Assert.ok("addons" in data, "There must be an addons section in Environment.");
+ for (let f of EXPECTED_FIELDS) {
+ Assert.ok(f in data.addons, f + " must be available.");
+ }
+
+ // Check the active addons, if available.
+ if (!expectBrokenAddons) {
+ let activeAddons = data.addons.activeAddons;
+ for (let addon in activeAddons) {
+ checkActiveAddon(activeAddons[addon]);
+ }
+ }
+
+ // Check "theme" structure.
+ if (Object.keys(data.addons.theme).length !== 0) {
+ checkTheme(data.addons.theme);
+ }
+
+ // Check the active plugins.
+ Assert.ok(Array.isArray(data.addons.activePlugins));
+ for (let plugin of data.addons.activePlugins) {
+ checkPlugin(plugin);
+ }
+
+ // Check active GMPlugins
+ let activeGMPlugins = data.addons.activeGMPlugins;
+ for (let gmPlugin in activeGMPlugins) {
+ checkActiveGMPlugin(activeGMPlugins[gmPlugin]);
+ }
+
+ // Check the active Experiment
+ let experiment = data.addons.activeExperiment;
+ if (Object.keys(experiment).length !== 0) {
+ Assert.ok(checkString(experiment.id));
+ Assert.ok(checkString(experiment.branch));
+ }
+
+ // Check persona
+ Assert.ok(checkNullOrString(data.addons.persona));
+}
+
+function checkEnvironmentData(data, isInitial = false, expectBrokenAddons = false) {
+ checkBuildSection(data);
+ checkSettingsSection(data);
+ checkProfileSection(data);
+ checkPartnerSection(data, isInitial);
+ checkSystemSection(data);
+ checkAddonsSection(data, expectBrokenAddons);
+}
+
+add_task(function* setup() {
+ // Load a custom manifest to provide search engine loading from JAR files.
+ do_load_manifest("chrome.manifest");
+ registerFakeSysInfo();
+ spoofGfxAdapter();
+ do_get_profile();
+
+ // The system add-on must be installed before AddonManager is started.
+ const distroDir = FileUtils.getDir("ProfD", ["sysfeatures", "app0"], true);
+ do_get_file("system.xpi").copyTo(distroDir, "tel-system-xpi@tests.mozilla.org.xpi");
+ let system_addon = FileUtils.File(distroDir.path);
+ system_addon.append("tel-system-xpi@tests.mozilla.org.xpi");
+ system_addon.lastModifiedTime = SYSTEM_ADDON_INSTALL_DATE;
+ loadAddonManager(APP_ID, APP_NAME, APP_VERSION, PLATFORM_VERSION);
+
+ // Spoof the persona ID, but not on Gonk.
+ if (!gIsGonk) {
+ LightweightThemeManager.currentTheme =
+ spoofTheme(PERSONA_ID, PERSONA_NAME, PERSONA_DESCRIPTION);
+ }
+ // Register a fake plugin host for consistent flash version data.
+ registerFakePluginHost();
+
+ // Setup a webserver to serve Addons, Plugins, etc.
+ gHttpServer = new HttpServer();
+ gHttpServer.start(-1);
+ let port = gHttpServer.identity.primaryPort;
+ gHttpRoot = "http://localhost:" + port + "/";
+ gDataRoot = gHttpRoot + "data/";
+ gHttpServer.registerDirectory("/data/", do_get_cwd());
+ do_register_cleanup(() => gHttpServer.stop(() => {}));
+
+ // Spoof the the hotfixVersion
+ Preferences.set("extensions.hotfix.lastVersion", APP_HOTFIX_VERSION);
+
+ // Create the attribution data file, so that settings.attribution will exist.
+ // The attribution functionality only exists in Firefox.
+ if (AppConstants.MOZ_BUILD_APP == "browser") {
+ spoofAttributionData();
+ do_register_cleanup(cleanupAttributionData);
+ }
+
+ yield spoofProfileReset();
+ TelemetryEnvironment.delayedInit();
+});
+
+add_task(function* test_checkEnvironment() {
+ let environmentData = yield TelemetryEnvironment.onInitialized();
+ checkEnvironmentData(environmentData, true);
+
+ spoofPartnerInfo();
+ Services.obs.notifyObservers(null, DISTRIBUTION_CUSTOMIZATION_COMPLETE_TOPIC, null);
+
+ environmentData = TelemetryEnvironment.currentEnvironment;
+ checkEnvironmentData(environmentData);
+});
+
+add_task(function* test_prefWatchPolicies() {
+ const PREF_TEST_1 = "toolkit.telemetry.test.pref_new";
+ const PREF_TEST_2 = "toolkit.telemetry.test.pref1";
+ const PREF_TEST_3 = "toolkit.telemetry.test.pref2";
+ const PREF_TEST_4 = "toolkit.telemetry.test.pref_old";
+ const PREF_TEST_5 = "toolkit.telemetry.test.requiresRestart";
+
+ const expectedValue = "some-test-value";
+ const unexpectedValue = "unexpected-test-value";
+
+ const PREFS_TO_WATCH = new Map([
+ [PREF_TEST_1, {what: TelemetryEnvironment.RECORD_PREF_VALUE}],
+ [PREF_TEST_2, {what: TelemetryEnvironment.RECORD_PREF_STATE}],
+ [PREF_TEST_3, {what: TelemetryEnvironment.RECORD_PREF_STATE}],
+ [PREF_TEST_4, {what: TelemetryEnvironment.RECORD_PREF_VALUE}],
+ [PREF_TEST_5, {what: TelemetryEnvironment.RECORD_PREF_VALUE, requiresRestart: true}],
+ ]);
+
+ Preferences.set(PREF_TEST_4, expectedValue);
+ Preferences.set(PREF_TEST_5, expectedValue);
+
+ // Set the Environment preferences to watch.
+ TelemetryEnvironment.testWatchPreferences(PREFS_TO_WATCH);
+ let deferred = PromiseUtils.defer();
+
+ // Check that the pref values are missing or present as expected
+ Assert.strictEqual(TelemetryEnvironment.currentEnvironment.settings.userPrefs[PREF_TEST_1], undefined);
+ Assert.strictEqual(TelemetryEnvironment.currentEnvironment.settings.userPrefs[PREF_TEST_4], expectedValue);
+ Assert.strictEqual(TelemetryEnvironment.currentEnvironment.settings.userPrefs[PREF_TEST_5], expectedValue);
+
+ TelemetryEnvironment.registerChangeListener("testWatchPrefs",
+ (reason, data) => deferred.resolve(data));
+ let oldEnvironmentData = TelemetryEnvironment.currentEnvironment;
+
+ // Trigger a change in the watched preferences.
+ Preferences.set(PREF_TEST_1, expectedValue);
+ Preferences.set(PREF_TEST_2, false);
+ Preferences.set(PREF_TEST_5, unexpectedValue);
+ let eventEnvironmentData = yield deferred.promise;
+
+ // Unregister the listener.
+ TelemetryEnvironment.unregisterChangeListener("testWatchPrefs");
+
+ // Check environment contains the correct data.
+ Assert.deepEqual(oldEnvironmentData, eventEnvironmentData);
+ let userPrefs = TelemetryEnvironment.currentEnvironment.settings.userPrefs;
+
+ Assert.equal(userPrefs[PREF_TEST_1], expectedValue,
+ "Environment contains the correct preference value.");
+ Assert.equal(userPrefs[PREF_TEST_2], "<user-set>",
+ "Report that the pref was user set but the value is not shown.");
+ Assert.ok(!(PREF_TEST_3 in userPrefs),
+ "Do not report if preference not user set.");
+ Assert.equal(userPrefs[PREF_TEST_5], expectedValue,
+ "The pref value in the environment data should still be the same");
+});
+
+add_task(function* test_prefWatch_prefReset() {
+ const PREF_TEST = "toolkit.telemetry.test.pref1";
+ const PREFS_TO_WATCH = new Map([
+ [PREF_TEST, {what: TelemetryEnvironment.RECORD_PREF_STATE}],
+ ]);
+
+ // Set the preference to a non-default value.
+ Preferences.set(PREF_TEST, false);
+
+ // Set the Environment preferences to watch.
+ TelemetryEnvironment.testWatchPreferences(PREFS_TO_WATCH);
+ let deferred = PromiseUtils.defer();
+ TelemetryEnvironment.registerChangeListener("testWatchPrefs_reset", deferred.resolve);
+
+ Assert.strictEqual(TelemetryEnvironment.currentEnvironment.settings.userPrefs[PREF_TEST], "<user-set>");
+
+ // Trigger a change in the watched preferences.
+ Preferences.reset(PREF_TEST);
+ yield deferred.promise;
+
+ Assert.strictEqual(TelemetryEnvironment.currentEnvironment.settings.userPrefs[PREF_TEST], undefined);
+
+ // Unregister the listener.
+ TelemetryEnvironment.unregisterChangeListener("testWatchPrefs_reset");
+});
+
+add_task(function* test_addonsWatch_InterestingChange() {
+ const ADDON_INSTALL_URL = gDataRoot + "restartless.xpi";
+ const ADDON_ID = "tel-restartless-xpi@tests.mozilla.org";
+ // We only expect a single notification for each install, uninstall, enable, disable.
+ const EXPECTED_NOTIFICATIONS = 4;
+
+ let receivedNotifications = 0;
+
+ let registerCheckpointPromise = (aExpected) => {
+ return new Promise(resolve => TelemetryEnvironment.registerChangeListener(
+ "testWatchAddons_Changes" + aExpected, (reason, data) => {
+ Assert.equal(reason, "addons-changed");
+ receivedNotifications++;
+ resolve();
+ }));
+ };
+
+ let assertCheckpoint = (aExpected) => {
+ Assert.equal(receivedNotifications, aExpected);
+ TelemetryEnvironment.unregisterChangeListener("testWatchAddons_Changes" + aExpected);
+ };
+
+ // Test for receiving one notification after each change.
+ let checkpointPromise = registerCheckpointPromise(1);
+ yield AddonManagerTesting.installXPIFromURL(ADDON_INSTALL_URL);
+ yield checkpointPromise;
+ assertCheckpoint(1);
+ Assert.ok(ADDON_ID in TelemetryEnvironment.currentEnvironment.addons.activeAddons);
+
+ checkpointPromise = registerCheckpointPromise(2);
+ let addon = yield AddonManagerTesting.getAddonById(ADDON_ID);
+ addon.userDisabled = true;
+ yield checkpointPromise;
+ assertCheckpoint(2);
+ Assert.ok(!(ADDON_ID in TelemetryEnvironment.currentEnvironment.addons.activeAddons));
+
+ checkpointPromise = registerCheckpointPromise(3);
+ addon.userDisabled = false;
+ yield checkpointPromise;
+ assertCheckpoint(3);
+ Assert.ok(ADDON_ID in TelemetryEnvironment.currentEnvironment.addons.activeAddons);
+
+ checkpointPromise = registerCheckpointPromise(4);
+ yield AddonManagerTesting.uninstallAddonByID(ADDON_ID);
+ yield checkpointPromise;
+ assertCheckpoint(4);
+ Assert.ok(!(ADDON_ID in TelemetryEnvironment.currentEnvironment.addons.activeAddons));
+
+ Assert.equal(receivedNotifications, EXPECTED_NOTIFICATIONS,
+ "We must only receive the notifications we expect.");
+});
+
+add_task(function* test_pluginsWatch_Add() {
+ if (gIsAndroid) {
+ Assert.ok(true, "Skipping: there is no Plugin Manager on Android.");
+ return;
+ }
+
+ Assert.equal(TelemetryEnvironment.currentEnvironment.addons.activePlugins.length, 1);
+
+ let newPlugin = new PluginTag(PLUGIN2_NAME, PLUGIN2_DESC, PLUGIN2_VERSION, true);
+ gInstalledPlugins.push(newPlugin);
+
+ let deferred = PromiseUtils.defer();
+ let receivedNotifications = 0;
+ let callback = (reason, data) => {
+ receivedNotifications++;
+ Assert.equal(reason, "addons-changed");
+ deferred.resolve();
+ };
+ TelemetryEnvironment.registerChangeListener("testWatchPlugins_Add", callback);
+
+ Services.obs.notifyObservers(null, PLUGIN_UPDATED_TOPIC, null);
+ yield deferred.promise;
+
+ Assert.equal(TelemetryEnvironment.currentEnvironment.addons.activePlugins.length, 2);
+
+ TelemetryEnvironment.unregisterChangeListener("testWatchPlugins_Add");
+
+ Assert.equal(receivedNotifications, 1, "We must only receive one notification.");
+});
+
+add_task(function* test_pluginsWatch_Remove() {
+ if (gIsAndroid) {
+ Assert.ok(true, "Skipping: there is no Plugin Manager on Android.");
+ return;
+ }
+
+ // Find the test plugin.
+ let plugin = gInstalledPlugins.find(p => (p.name == PLUGIN2_NAME));
+ Assert.ok(plugin, "The test plugin must exist.");
+
+ // Remove it from the PluginHost.
+ gInstalledPlugins = gInstalledPlugins.filter(p => p != plugin);
+
+ let deferred = PromiseUtils.defer();
+ let receivedNotifications = 0;
+ let callback = () => {
+ receivedNotifications++;
+ deferred.resolve();
+ };
+ TelemetryEnvironment.registerChangeListener("testWatchPlugins_Remove", callback);
+
+ Services.obs.notifyObservers(null, PLUGIN_UPDATED_TOPIC, null);
+ yield deferred.promise;
+
+ TelemetryEnvironment.unregisterChangeListener("testWatchPlugins_Remove");
+
+ Assert.equal(receivedNotifications, 1, "We must only receive one notification.");
+});
+
+add_task(function* test_addonsWatch_NotInterestingChange() {
+ // We are not interested to dictionary addons changes.
+ const DICTIONARY_ADDON_INSTALL_URL = gDataRoot + "dictionary.xpi";
+ const INTERESTING_ADDON_INSTALL_URL = gDataRoot + "restartless.xpi";
+
+ let receivedNotification = false;
+ let deferred = PromiseUtils.defer();
+ TelemetryEnvironment.registerChangeListener("testNotInteresting",
+ () => {
+ Assert.ok(!receivedNotification, "Should not receive multiple notifications");
+ receivedNotification = true;
+ deferred.resolve();
+ });
+
+ yield AddonManagerTesting.installXPIFromURL(DICTIONARY_ADDON_INSTALL_URL);
+ yield AddonManagerTesting.installXPIFromURL(INTERESTING_ADDON_INSTALL_URL);
+
+ yield deferred.promise;
+ Assert.ok(!("telemetry-dictionary@tests.mozilla.org" in
+ TelemetryEnvironment.currentEnvironment.addons.activeAddons),
+ "Dictionaries should not appear in active addons.");
+
+ TelemetryEnvironment.unregisterChangeListener("testNotInteresting");
+});
+
+add_task(function* test_addonsAndPlugins() {
+ const ADDON_INSTALL_URL = gDataRoot + "restartless.xpi";
+ const ADDON_ID = "tel-restartless-xpi@tests.mozilla.org";
+ const ADDON_INSTALL_DATE = truncateToDays(Date.now());
+ const EXPECTED_ADDON_DATA = {
+ blocklisted: false,
+ description: "A restartless addon which gets enabled without a reboot.",
+ name: "XPI Telemetry Restartless Test",
+ userDisabled: false,
+ appDisabled: false,
+ version: "1.0",
+ scope: 1,
+ type: "extension",
+ foreignInstall: false,
+ hasBinaryComponents: false,
+ installDay: ADDON_INSTALL_DATE,
+ updateDay: ADDON_INSTALL_DATE,
+ signedState: mozinfo.addon_signing ? AddonManager.SIGNEDSTATE_SIGNED : AddonManager.SIGNEDSTATE_NOT_REQUIRED,
+ isSystem: false,
+ };
+ const SYSTEM_ADDON_ID = "tel-system-xpi@tests.mozilla.org";
+ const EXPECTED_SYSTEM_ADDON_DATA = {
+ blocklisted: false,
+ description: "A system addon which is shipped with Firefox.",
+ name: "XPI Telemetry System Add-on Test",
+ userDisabled: false,
+ appDisabled: false,
+ version: "1.0",
+ scope: 1,
+ type: "extension",
+ foreignInstall: false,
+ hasBinaryComponents: false,
+ installDay: truncateToDays(SYSTEM_ADDON_INSTALL_DATE),
+ updateDay: truncateToDays(SYSTEM_ADDON_INSTALL_DATE),
+ signedState: undefined,
+ isSystem: true,
+ };
+
+ const EXPECTED_PLUGIN_DATA = {
+ name: FLASH_PLUGIN_NAME,
+ version: FLASH_PLUGIN_VERSION,
+ description: FLASH_PLUGIN_DESC,
+ blocklisted: false,
+ disabled: false,
+ clicktoplay: true,
+ };
+
+ // Install an addon so we have some data.
+ yield AddonManagerTesting.installXPIFromURL(ADDON_INSTALL_URL);
+
+ let data = TelemetryEnvironment.currentEnvironment;
+ checkEnvironmentData(data);
+
+ // Check addon data.
+ Assert.ok(ADDON_ID in data.addons.activeAddons, "We must have one active addon.");
+ let targetAddon = data.addons.activeAddons[ADDON_ID];
+ for (let f in EXPECTED_ADDON_DATA) {
+ Assert.equal(targetAddon[f], EXPECTED_ADDON_DATA[f], f + " must have the correct value.");
+ }
+
+ // Check system add-on data.
+ Assert.ok(SYSTEM_ADDON_ID in data.addons.activeAddons, "We must have one active system addon.");
+ let targetSystemAddon = data.addons.activeAddons[SYSTEM_ADDON_ID];
+ for (let f in EXPECTED_SYSTEM_ADDON_DATA) {
+ Assert.equal(targetSystemAddon[f], EXPECTED_SYSTEM_ADDON_DATA[f], f + " must have the correct value.");
+ }
+
+ // Check theme data.
+ let theme = data.addons.theme;
+ Assert.equal(theme.id, (PERSONA_ID + PERSONA_ID_SUFFIX));
+ Assert.equal(theme.name, PERSONA_NAME);
+ Assert.equal(theme.description, PERSONA_DESCRIPTION);
+
+ // Check plugin data.
+ Assert.equal(data.addons.activePlugins.length, 1, "We must have only one active plugin.");
+ let targetPlugin = data.addons.activePlugins[0];
+ for (let f in EXPECTED_PLUGIN_DATA) {
+ Assert.equal(targetPlugin[f], EXPECTED_PLUGIN_DATA[f], f + " must have the correct value.");
+ }
+
+ // Check plugin mime types.
+ Assert.ok(targetPlugin.mimeTypes.find(m => m == PLUGIN_MIME_TYPE1));
+ Assert.ok(targetPlugin.mimeTypes.find(m => m == PLUGIN_MIME_TYPE2));
+ Assert.ok(!targetPlugin.mimeTypes.find(m => m == "Not There."));
+
+ let personaId = (gIsGonk) ? null : PERSONA_ID;
+ Assert.equal(data.addons.persona, personaId, "The correct Persona Id must be reported.");
+
+ // Uninstall the addon.
+ yield AddonManagerTesting.uninstallAddonByID(ADDON_ID);
+});
+
+add_task(function* test_signedAddon() {
+ const ADDON_INSTALL_URL = gDataRoot + "signed.xpi";
+ const ADDON_ID = "tel-signed-xpi@tests.mozilla.org";
+ const ADDON_INSTALL_DATE = truncateToDays(Date.now());
+ const EXPECTED_ADDON_DATA = {
+ blocklisted: false,
+ description: "A signed addon which gets enabled without a reboot.",
+ name: "XPI Telemetry Signed Test",
+ userDisabled: false,
+ appDisabled: false,
+ version: "1.0",
+ scope: 1,
+ type: "extension",
+ foreignInstall: false,
+ hasBinaryComponents: false,
+ installDay: ADDON_INSTALL_DATE,
+ updateDay: ADDON_INSTALL_DATE,
+ signedState: mozinfo.addon_signing ? AddonManager.SIGNEDSTATE_SIGNED : AddonManager.SIGNEDSTATE_NOT_REQUIRED,
+ };
+
+ let deferred = PromiseUtils.defer();
+ TelemetryEnvironment.registerChangeListener("test_signedAddon", deferred.resolve);
+
+ // Install the addon.
+ yield AddonManagerTesting.installXPIFromURL(ADDON_INSTALL_URL);
+
+ yield deferred.promise;
+ // Unregister the listener.
+ TelemetryEnvironment.unregisterChangeListener("test_signedAddon");
+
+ let data = TelemetryEnvironment.currentEnvironment;
+ checkEnvironmentData(data);
+
+ // Check addon data.
+ Assert.ok(ADDON_ID in data.addons.activeAddons, "Add-on should be in the environment.");
+ let targetAddon = data.addons.activeAddons[ADDON_ID];
+ for (let f in EXPECTED_ADDON_DATA) {
+ Assert.equal(targetAddon[f], EXPECTED_ADDON_DATA[f], f + " must have the correct value.");
+ }
+});
+
+add_task(function* test_addonsFieldsLimit() {
+ const ADDON_INSTALL_URL = gDataRoot + "long-fields.xpi";
+ const ADDON_ID = "tel-longfields-xpi@tests.mozilla.org";
+
+ // Install the addon and wait for the TelemetryEnvironment to pick it up.
+ let deferred = PromiseUtils.defer();
+ TelemetryEnvironment.registerChangeListener("test_longFieldsAddon", deferred.resolve);
+ yield AddonManagerTesting.installXPIFromURL(ADDON_INSTALL_URL);
+ yield deferred.promise;
+ TelemetryEnvironment.unregisterChangeListener("test_longFieldsAddon");
+
+ let data = TelemetryEnvironment.currentEnvironment;
+ checkEnvironmentData(data);
+
+ // Check that the addon is available and that the string fields are limited.
+ Assert.ok(ADDON_ID in data.addons.activeAddons, "Add-on should be in the environment.");
+ let targetAddon = data.addons.activeAddons[ADDON_ID];
+
+ // TelemetryEnvironment limits the length of string fields for activeAddons to 100 chars,
+ // to mitigate misbehaving addons.
+ Assert.lessOrEqual(targetAddon.version.length, 100,
+ "The version string must have been limited");
+ Assert.lessOrEqual(targetAddon.name.length, 100,
+ "The name string must have been limited");
+ Assert.lessOrEqual(targetAddon.description.length, 100,
+ "The description string must have been limited");
+});
+
+add_task(function* test_collectionWithbrokenAddonData() {
+ const BROKEN_ADDON_ID = "telemetry-test2.example.com@services.mozilla.org";
+ const BROKEN_MANIFEST = {
+ id: "telemetry-test2.example.com@services.mozilla.org",
+ name: "telemetry broken addon",
+ origin: "https://telemetry-test2.example.com",
+ version: 1, // This is intentionally not a string.
+ signedState: AddonManager.SIGNEDSTATE_SIGNED,
+ };
+
+ const ADDON_INSTALL_URL = gDataRoot + "restartless.xpi";
+ const ADDON_ID = "tel-restartless-xpi@tests.mozilla.org";
+ const ADDON_INSTALL_DATE = truncateToDays(Date.now());
+ const EXPECTED_ADDON_DATA = {
+ blocklisted: false,
+ description: "A restartless addon which gets enabled without a reboot.",
+ name: "XPI Telemetry Restartless Test",
+ userDisabled: false,
+ appDisabled: false,
+ version: "1.0",
+ scope: 1,
+ type: "extension",
+ foreignInstall: false,
+ hasBinaryComponents: false,
+ installDay: ADDON_INSTALL_DATE,
+ updateDay: ADDON_INSTALL_DATE,
+ signedState: mozinfo.addon_signing ? AddonManager.SIGNEDSTATE_MISSING :
+ AddonManager.SIGNEDSTATE_NOT_REQUIRED,
+ };
+
+ let receivedNotifications = 0;
+
+ let registerCheckpointPromise = (aExpected) => {
+ return new Promise(resolve => TelemetryEnvironment.registerChangeListener(
+ "testBrokenAddon_collection" + aExpected, (reason, data) => {
+ Assert.equal(reason, "addons-changed");
+ receivedNotifications++;
+ resolve();
+ }));
+ };
+
+ let assertCheckpoint = (aExpected) => {
+ Assert.equal(receivedNotifications, aExpected);
+ TelemetryEnvironment.unregisterChangeListener("testBrokenAddon_collection" + aExpected);
+ };
+
+ // Register the broken provider and install the broken addon.
+ let checkpointPromise = registerCheckpointPromise(1);
+ let brokenAddonProvider = createMockAddonProvider("Broken Extensions Provider");
+ AddonManagerPrivate.registerProvider(brokenAddonProvider);
+ brokenAddonProvider.addAddon(BROKEN_MANIFEST);
+ yield checkpointPromise;
+ assertCheckpoint(1);
+
+ // Now install an addon which returns the correct information.
+ checkpointPromise = registerCheckpointPromise(2);
+ yield AddonManagerTesting.installXPIFromURL(ADDON_INSTALL_URL);
+ yield checkpointPromise;
+ assertCheckpoint(2);
+
+ // Check that the new environment contains the Social addon installed with the broken
+ // manifest and the rest of the data.
+ let data = TelemetryEnvironment.currentEnvironment;
+ checkEnvironmentData(data, false, true /* expect broken addons*/);
+
+ let activeAddons = data.addons.activeAddons;
+ Assert.ok(BROKEN_ADDON_ID in activeAddons,
+ "The addon with the broken manifest must be reported.");
+ Assert.equal(activeAddons[BROKEN_ADDON_ID].version, null,
+ "null should be reported for invalid data.");
+ Assert.ok(ADDON_ID in activeAddons,
+ "The valid addon must be reported.");
+ Assert.equal(activeAddons[ADDON_ID].description, EXPECTED_ADDON_DATA.description,
+ "The description for the valid addon should be correct.");
+
+ // Unregister the broken provider so we don't mess with other tests.
+ AddonManagerPrivate.unregisterProvider(brokenAddonProvider);
+
+ // Uninstall the valid addon.
+ yield AddonManagerTesting.uninstallAddonByID(ADDON_ID);
+});
+
+add_task(function* test_defaultSearchEngine() {
+ // Check that no default engine is in the environment before the search service is
+ // initialized.
+ let data = TelemetryEnvironment.currentEnvironment;
+ checkEnvironmentData(data);
+ Assert.ok(!("defaultSearchEngine" in data.settings));
+ Assert.ok(!("defaultSearchEngineData" in data.settings));
+
+ // Load the engines definitions from a custom JAR file: that's needed so that
+ // the search provider reports an engine identifier.
+ let url = "chrome://testsearchplugin/locale/searchplugins/";
+ let resProt = Services.io.getProtocolHandler("resource")
+ .QueryInterface(Ci.nsIResProtocolHandler);
+ resProt.setSubstitution("search-plugins",
+ Services.io.newURI(url, null, null));
+
+ // Initialize the search service.
+ yield new Promise(resolve => Services.search.init(resolve));
+
+ // Our default engine from the JAR file has an identifier. Check if it is correctly
+ // reported.
+ data = TelemetryEnvironment.currentEnvironment;
+ checkEnvironmentData(data);
+ Assert.equal(data.settings.defaultSearchEngine, "telemetrySearchIdentifier");
+ let expectedSearchEngineData = {
+ name: "telemetrySearchIdentifier",
+ loadPath: "jar:[other]/searchTest.jar!testsearchplugin/telemetrySearchIdentifier.xml",
+ origin: "default",
+ submissionURL: "http://ar.wikipedia.org/wiki/%D8%AE%D8%A7%D8%B5:%D8%A8%D8%AD%D8%AB?search=&sourceid=Mozilla-search"
+ };
+ Assert.deepEqual(data.settings.defaultSearchEngineData, expectedSearchEngineData);
+
+ // Remove all the search engines.
+ for (let engine of Services.search.getEngines()) {
+ Services.search.removeEngine(engine);
+ }
+ // The search service does not notify "engine-current" when removing a default engine.
+ // Manually force the notification.
+ // TODO: remove this when bug 1165341 is resolved.
+ Services.obs.notifyObservers(null, "browser-search-engine-modified", "engine-current");
+
+ // Then check that no default engine is reported if none is available.
+ data = TelemetryEnvironment.currentEnvironment;
+ checkEnvironmentData(data);
+ Assert.equal(data.settings.defaultSearchEngine, "NONE");
+ Assert.deepEqual(data.settings.defaultSearchEngineData, {name:"NONE"});
+
+ // Add a new search engine (this will have no engine identifier).
+ const SEARCH_ENGINE_ID = "telemetry_default";
+ const SEARCH_ENGINE_URL = "http://www.example.org/?search={searchTerms}";
+ Services.search.addEngineWithDetails(SEARCH_ENGINE_ID, "", null, "", "get", SEARCH_ENGINE_URL);
+
+ // Register a new change listener and then wait for the search engine change to be notified.
+ let deferred = PromiseUtils.defer();
+ TelemetryEnvironment.registerChangeListener("testWatch_SearchDefault", deferred.resolve);
+ Services.search.defaultEngine = Services.search.getEngineByName(SEARCH_ENGINE_ID);
+ yield deferred.promise;
+
+ data = TelemetryEnvironment.currentEnvironment;
+ checkEnvironmentData(data);
+
+ const EXPECTED_SEARCH_ENGINE = "other-" + SEARCH_ENGINE_ID;
+ Assert.equal(data.settings.defaultSearchEngine, EXPECTED_SEARCH_ENGINE);
+
+ const EXPECTED_SEARCH_ENGINE_DATA = {
+ name: "telemetry_default",
+ loadPath: "[other]addEngineWithDetails",
+ origin: "verified"
+ };
+ Assert.deepEqual(data.settings.defaultSearchEngineData, EXPECTED_SEARCH_ENGINE_DATA);
+ TelemetryEnvironment.unregisterChangeListener("testWatch_SearchDefault");
+
+ // Cleanly install an engine from an xml file, and check if origin is
+ // recorded as "verified".
+ let promise = new Promise(resolve => {
+ TelemetryEnvironment.registerChangeListener("testWatch_SearchDefault", resolve);
+ });
+ let engine = yield new Promise((resolve, reject) => {
+ Services.obs.addObserver(function obs(obsSubject, obsTopic, obsData) {
+ try {
+ let searchEngine = obsSubject.QueryInterface(Ci.nsISearchEngine);
+ do_print("Observed " + obsData + " for " + searchEngine.name);
+ if (obsData != "engine-added" || searchEngine.name != "engine-telemetry") {
+ return;
+ }
+
+ Services.obs.removeObserver(obs, "browser-search-engine-modified");
+ resolve(searchEngine);
+ } catch (ex) {
+ reject(ex);
+ }
+ }, "browser-search-engine-modified", false);
+ Services.search.addEngine("file://" + do_get_cwd().path + "/engine.xml",
+ null, null, false);
+ });
+ Services.search.defaultEngine = engine;
+ yield promise;
+ TelemetryEnvironment.unregisterChangeListener("testWatch_SearchDefault");
+ data = TelemetryEnvironment.currentEnvironment;
+ checkEnvironmentData(data);
+ Assert.deepEqual(data.settings.defaultSearchEngineData,
+ {"name":"engine-telemetry", "loadPath":"[other]/engine.xml", "origin":"verified"});
+
+ // Now break this engine's load path hash.
+ promise = new Promise(resolve => {
+ TelemetryEnvironment.registerChangeListener("testWatch_SearchDefault", resolve);
+ });
+ engine.wrappedJSObject.setAttr("loadPathHash", "broken");
+ Services.obs.notifyObservers(null, "browser-search-engine-modified", "engine-current");
+ yield promise;
+ TelemetryEnvironment.unregisterChangeListener("testWatch_SearchDefault");
+ data = TelemetryEnvironment.currentEnvironment;
+ Assert.equal(data.settings.defaultSearchEngineData.origin, "invalid");
+ Services.search.removeEngine(engine);
+
+ // Define and reset the test preference.
+ const PREF_TEST = "toolkit.telemetry.test.pref1";
+ const PREFS_TO_WATCH = new Map([
+ [PREF_TEST, {what: TelemetryEnvironment.RECORD_PREF_STATE}],
+ ]);
+ Preferences.reset(PREF_TEST);
+
+ // Watch the test preference.
+ TelemetryEnvironment.testWatchPreferences(PREFS_TO_WATCH);
+ deferred = PromiseUtils.defer();
+ TelemetryEnvironment.registerChangeListener("testSearchEngine_pref", deferred.resolve);
+ // Trigger an environment change.
+ Preferences.set(PREF_TEST, 1);
+ yield deferred.promise;
+ TelemetryEnvironment.unregisterChangeListener("testSearchEngine_pref");
+
+ // Check that the search engine information is correctly retained when prefs change.
+ data = TelemetryEnvironment.currentEnvironment;
+ checkEnvironmentData(data);
+ Assert.equal(data.settings.defaultSearchEngine, EXPECTED_SEARCH_ENGINE);
+
+ // Check that by default we are not sending a cohort identifier...
+ Assert.equal(data.settings.searchCohort, undefined);
+
+ // ... but that if a cohort identifier is set, we send it.
+ Services.prefs.setCharPref("browser.search.cohort", "testcohort");
+ Services.obs.notifyObservers(null, "browser-search-service", "init-complete");
+ data = TelemetryEnvironment.currentEnvironment;
+ Assert.equal(data.settings.searchCohort, "testcohort");
+});
+
+add_task(function* test_osstrings() {
+ // First test that numbers in sysinfo properties are converted to string fields
+ // in system.os.
+ SysInfo.overrides = {
+ version: 1,
+ name: 2,
+ kernel_version: 3,
+ };
+
+ yield TelemetryEnvironment.testCleanRestart().onInitialized();
+ let data = TelemetryEnvironment.currentEnvironment;
+ checkEnvironmentData(data);
+
+ Assert.equal(data.system.os.version, "1");
+ Assert.equal(data.system.os.name, "2");
+ if (AppConstants.platform == "android") {
+ Assert.equal(data.system.os.kernelVersion, "3");
+ }
+
+ // Check that null values are also handled.
+ SysInfo.overrides = {
+ version: null,
+ name: null,
+ kernel_version: null,
+ };
+
+ yield TelemetryEnvironment.testCleanRestart().onInitialized();
+ data = TelemetryEnvironment.currentEnvironment;
+ checkEnvironmentData(data);
+
+ Assert.equal(data.system.os.version, null);
+ Assert.equal(data.system.os.name, null);
+ if (AppConstants.platform == "android") {
+ Assert.equal(data.system.os.kernelVersion, null);
+ }
+
+ // Clean up.
+ SysInfo.overrides = {};
+ yield TelemetryEnvironment.testCleanRestart().onInitialized();
+});
+
+add_task(function* test_environmentShutdown() {
+ // Define and reset the test preference.
+ const PREF_TEST = "toolkit.telemetry.test.pref1";
+ const PREFS_TO_WATCH = new Map([
+ [PREF_TEST, {what: TelemetryEnvironment.RECORD_PREF_STATE}],
+ ]);
+ Preferences.reset(PREF_TEST);
+
+ // Set up the preferences and listener, then the trigger shutdown
+ TelemetryEnvironment.testWatchPreferences(PREFS_TO_WATCH);
+ TelemetryEnvironment.registerChangeListener("test_environmentShutdownChange", () => {
+ // Register a new change listener that asserts if change is propogated
+ Assert.ok(false, "No change should be propagated after shutdown.");
+ });
+ TelemetryEnvironment.shutdown();
+
+ // Flipping the test preference after shutdown should not trigger the listener
+ Preferences.set(PREF_TEST, 1);
+
+ // Unregister the listener.
+ TelemetryEnvironment.unregisterChangeListener("test_environmentShutdownChange");
+});
diff --git a/toolkit/components/telemetry/tests/unit/test_TelemetryEvents.js b/toolkit/components/telemetry/tests/unit/test_TelemetryEvents.js
new file mode 100644
index 000000000..2bfb62c14
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_TelemetryEvents.js
@@ -0,0 +1,249 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/
+*/
+
+const OPTIN = Ci.nsITelemetry.DATASET_RELEASE_CHANNEL_OPTIN;
+const OPTOUT = Ci.nsITelemetry.DATASET_RELEASE_CHANNEL_OPTOUT;
+
+function checkEventFormat(events) {
+ Assert.ok(Array.isArray(events), "Events should be serialized to an array.");
+ for (let e of events) {
+ Assert.ok(Array.isArray(e), "Event should be an array.");
+ Assert.greaterOrEqual(e.length, 4, "Event should have at least 4 elements.");
+ Assert.lessOrEqual(e.length, 6, "Event should have at most 6 elements.");
+
+ Assert.equal(typeof(e[0]), "number", "Element 0 should be a number.");
+ Assert.equal(typeof(e[1]), "string", "Element 1 should be a string.");
+ Assert.equal(typeof(e[2]), "string", "Element 2 should be a string.");
+ Assert.equal(typeof(e[3]), "string", "Element 3 should be a string.");
+
+ if (e.length > 4) {
+ Assert.ok(e[4] === null || typeof(e[4]) == "string",
+ "Event element 4 should be null or a string.");
+ }
+ if (e.length > 5) {
+ Assert.ok(e[5] === null || typeof(e[5]) == "object",
+ "Event element 5 should be null or an object.");
+ }
+
+ let extra = e[5];
+ if (extra) {
+ Assert.ok(Object.keys(extra).every(k => typeof(k) == "string"),
+ "All extra keys should be strings.");
+ Assert.ok(Object.values(extra).every(v => typeof(v) == "string"),
+ "All extra values should be strings.");
+ }
+ }
+}
+
+add_task(function* test_recording() {
+ Telemetry.clearEvents();
+
+ // Record some events.
+ let expected = [
+ {optout: false, event: ["telemetry.test", "test1", "object1"]},
+ {optout: false, event: ["telemetry.test", "test2", "object2"]},
+
+ {optout: false, event: ["telemetry.test", "test1", "object1", "value"]},
+ {optout: false, event: ["telemetry.test", "test1", "object1", "value", null]},
+ {optout: false, event: ["telemetry.test", "test1", "object1", null, {"key1": "value1"}]},
+ {optout: false, event: ["telemetry.test", "test1", "object1", "value", {"key1": "value1", "key2": "value2"}]},
+
+ {optout: true, event: ["telemetry.test", "optout", "object1"]},
+ {optout: false, event: ["telemetry.test.second", "test", "object1"]},
+ {optout: false, event: ["telemetry.test.second", "test", "object1", null, {"key1": "value1"}]},
+ ];
+
+ for (let entry of expected) {
+ entry.tsBefore = Math.floor(Telemetry.msSinceProcessStart());
+ try {
+ Telemetry.recordEvent(...entry.event);
+ } catch (ex) {
+ Assert.ok(false, `Failed to record event ${JSON.stringify(entry.event)}: ${ex}`);
+ }
+ entry.tsAfter = Math.floor(Telemetry.msSinceProcessStart());
+ }
+
+ // Strip off trailing null values to match the serialized events.
+ for (let entry of expected) {
+ let e = entry.event;
+ while ((e.length >= 3) && (e[e.length - 1] === null)) {
+ e.pop();
+ }
+ }
+
+ // The following should not result in any recorded events.
+ Assert.throws(() => Telemetry.recordEvent("unknown.category", "test1", "object1"),
+ /Error: Unknown event: \["unknown.category", "test1", "object1"\]/,
+ "Should throw on unknown category.");
+ Assert.throws(() => Telemetry.recordEvent("telemetry.test", "unknown", "object1"),
+ /Error: Unknown event: \["telemetry.test", "unknown", "object1"\]/,
+ "Should throw on unknown method.");
+ Assert.throws(() => Telemetry.recordEvent("telemetry.test", "test1", "unknown"),
+ /Error: Unknown event: \["telemetry.test", "test1", "unknown"\]/,
+ "Should throw on unknown object.");
+
+ let checkEvents = (events, expectedEvents) => {
+ checkEventFormat(events);
+ Assert.equal(events.length, expectedEvents.length,
+ "Snapshot should have the right number of events.");
+
+ for (let i = 0; i < events.length; ++i) {
+ let {tsBefore, tsAfter} = expectedEvents[i];
+ let ts = events[i][0];
+ Assert.greaterOrEqual(ts, tsBefore, "The recorded timestamp should be greater than the one before recording.");
+ Assert.lessOrEqual(ts, tsAfter, "The recorded timestamp should be less than the one after recording.");
+
+ let recordedData = events[i].slice(1);
+ let expectedData = expectedEvents[i].event.slice();
+ Assert.deepEqual(recordedData, expectedData, "The recorded event data should match.");
+ }
+ };
+
+ // Check that the expected events were recorded.
+ let events = Telemetry.snapshotBuiltinEvents(OPTIN, false);
+ checkEvents(events, expected);
+
+ // Check serializing only opt-out events.
+ events = Telemetry.snapshotBuiltinEvents(OPTOUT, false);
+ filtered = expected.filter(e => e.optout == true);
+ checkEvents(events, filtered);
+});
+
+add_task(function* test_clear() {
+ Telemetry.clearEvents();
+
+ const COUNT = 10;
+ for (let i = 0; i < COUNT; ++i) {
+ Telemetry.recordEvent("telemetry.test", "test1", "object1");
+ Telemetry.recordEvent("telemetry.test.second", "test", "object1");
+ }
+
+ // Check that events were recorded.
+ // The events are cleared by passing the respective flag.
+ let events = Telemetry.snapshotBuiltinEvents(OPTIN, true);
+ Assert.equal(events.length, 2 * COUNT, `Should have recorded ${2 * COUNT} events.`);
+
+ // Now the events should be cleared.
+ events = Telemetry.snapshotBuiltinEvents(OPTIN, false);
+ Assert.equal(events.length, 0, `Should have cleared the events.`);
+});
+
+add_task(function* test_expiry() {
+ Telemetry.clearEvents();
+
+ // Recording call with event that is expired by version.
+ Telemetry.recordEvent("telemetry.test", "expired_version", "object1");
+ let events = Telemetry.snapshotBuiltinEvents(OPTIN, true);
+ Assert.equal(events.length, 0, "Should not record event with expired version.");
+
+ // Recording call with event that is expired by date.
+ Telemetry.recordEvent("telemetry.test", "expired_date", "object1");
+ events = Telemetry.snapshotBuiltinEvents(OPTIN, true);
+ Assert.equal(events.length, 0, "Should not record event with expired date.");
+
+ // Recording call with event that has expiry_version and expiry_date in the future.
+ Telemetry.recordEvent("telemetry.test", "not_expired_optout", "object1");
+ events = Telemetry.snapshotBuiltinEvents(OPTOUT, true);
+ Assert.equal(events.length, 1, "Should record event when date and version are not expired.");
+});
+
+add_task(function* test_invalidParams() {
+ Telemetry.clearEvents();
+
+ // Recording call with wrong type for value argument.
+ Telemetry.recordEvent("telemetry.test", "test1", "object1", 1);
+ let events = Telemetry.snapshotBuiltinEvents(OPTIN, true);
+ Assert.equal(events.length, 0, "Should not record event when value argument with invalid type is passed.");
+
+ // Recording call with wrong type for extra argument.
+ Telemetry.recordEvent("telemetry.test", "test1", "object1", null, "invalid");
+ events = Telemetry.snapshotBuiltinEvents(OPTIN, true);
+ Assert.equal(events.length, 0, "Should not record event when extra argument with invalid type is passed.");
+
+ // Recording call with unknown extra key.
+ Telemetry.recordEvent("telemetry.test", "test1", "object1", null, {"key3": "x"});
+ events = Telemetry.snapshotBuiltinEvents(OPTIN, true);
+ Assert.equal(events.length, 0, "Should not record event when extra argument with invalid key is passed.");
+
+ // Recording call with invalid value type.
+ Telemetry.recordEvent("telemetry.test", "test1", "object1", null, {"key3": 1});
+ events = Telemetry.snapshotBuiltinEvents(OPTIN, true);
+ Assert.equal(events.length, 0, "Should not record event when extra argument with invalid value type is passed.");
+});
+
+add_task(function* test_storageLimit() {
+ Telemetry.clearEvents();
+
+ // Record more events than the storage limit allows.
+ let LIMIT = 1000;
+ let COUNT = LIMIT + 10;
+ for (let i = 0; i < COUNT; ++i) {
+ Telemetry.recordEvent("telemetry.test", "test1", "object1", String(i));
+ }
+
+ // Check that the right events were recorded.
+ let events = Telemetry.snapshotBuiltinEvents(OPTIN, true);
+ Assert.equal(events.length, LIMIT, `Should have only recorded ${LIMIT} events`);
+ Assert.ok(events.every((e, idx) => e[4] === String(idx)),
+ "Should have recorded all events from before hitting the limit.");
+});
+
+add_task(function* test_valueLimits() {
+ Telemetry.clearEvents();
+
+ // Record values that are at or over the limits for string lengths.
+ let LIMIT = 80;
+ let expected = [
+ ["telemetry.test", "test1", "object1", "a".repeat(LIMIT - 10), null],
+ ["telemetry.test", "test1", "object1", "a".repeat(LIMIT ), null],
+ ["telemetry.test", "test1", "object1", "a".repeat(LIMIT + 1), null],
+ ["telemetry.test", "test1", "object1", "a".repeat(LIMIT + 10), null],
+
+ ["telemetry.test", "test1", "object1", null, {key1: "a".repeat(LIMIT - 10)}],
+ ["telemetry.test", "test1", "object1", null, {key1: "a".repeat(LIMIT )}],
+ ["telemetry.test", "test1", "object1", null, {key1: "a".repeat(LIMIT + 1)}],
+ ["telemetry.test", "test1", "object1", null, {key1: "a".repeat(LIMIT + 10)}],
+ ];
+
+ for (let event of expected) {
+ Telemetry.recordEvent(...event);
+ if (event[3]) {
+ event[3] = event[3].substr(0, LIMIT);
+ }
+ if (event[4]) {
+ event[4].key1 = event[4].key1.substr(0, LIMIT);
+ }
+ }
+
+ // Strip off trailing null values to match the serialized events.
+ for (let e of expected) {
+ while ((e.length >= 3) && (e[e.length - 1] === null)) {
+ e.pop();
+ }
+ }
+
+ // Check that the right events were recorded.
+ let events = Telemetry.snapshotBuiltinEvents(OPTIN, true);
+ Assert.equal(events.length, expected.length,
+ "Should have recorded the expected number of events");
+ for (let i = 0; i < expected.length; ++i) {
+ Assert.deepEqual(events[i].slice(1), expected[i],
+ "Should have recorded the expected event data.");
+ }
+});
+
+add_task(function* test_unicodeValues() {
+ Telemetry.clearEvents();
+
+ // Record string values containing unicode characters.
+ let value = "漢語";
+ Telemetry.recordEvent("telemetry.test", "test1", "object1", value);
+ Telemetry.recordEvent("telemetry.test", "test1", "object1", null, {"key1": value});
+
+ // Check that the values were correctly recorded.
+ let events = Telemetry.snapshotBuiltinEvents(OPTIN, true);
+ Assert.equal(events.length, 2, "Should have recorded 2 events.");
+ Assert.equal(events[0][4], value, "Should have recorded the right value.");
+ Assert.equal(events[1][5]["key1"], value, "Should have recorded the right extra value.");
+});
diff --git a/toolkit/components/telemetry/tests/unit/test_TelemetryFlagClear.js b/toolkit/components/telemetry/tests/unit/test_TelemetryFlagClear.js
new file mode 100644
index 000000000..712aceb3b
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_TelemetryFlagClear.js
@@ -0,0 +1,14 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+function run_test()
+{
+ let testFlag = Services.telemetry.getHistogramById("TELEMETRY_TEST_FLAG");
+ equal(JSON.stringify(testFlag.snapshot().counts), "[1,0,0]", "Original value is correct");
+ testFlag.add(1);
+ equal(JSON.stringify(testFlag.snapshot().counts), "[0,1,0]", "Value is correct after ping.");
+ testFlag.clear();
+ equal(JSON.stringify(testFlag.snapshot().counts), "[1,0,0]", "Value is correct after calling clear()");
+ testFlag.add(1);
+ equal(JSON.stringify(testFlag.snapshot().counts), "[0,1,0]", "Value is correct after ping.");
+}
diff --git a/toolkit/components/telemetry/tests/unit/test_TelemetryLateWrites.js b/toolkit/components/telemetry/tests/unit/test_TelemetryLateWrites.js
new file mode 100644
index 000000000..f2b2b3bba
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_TelemetryLateWrites.js
@@ -0,0 +1,127 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/
+*/
+/* A testcase to make sure reading late writes stacks works. */
+
+Cu.import("resource://gre/modules/Services.jsm", this);
+
+// Constants from prio.h for nsIFileOutputStream.init
+const PR_WRONLY = 0x2;
+const PR_CREATE_FILE = 0x8;
+const PR_TRUNCATE = 0x20;
+const RW_OWNER = parseInt("0600", 8);
+
+const STACK_SUFFIX1 = "stack1.txt";
+const STACK_SUFFIX2 = "stack2.txt";
+const STACK_BOGUS_SUFFIX = "bogus.txt";
+const LATE_WRITE_PREFIX = "Telemetry.LateWriteFinal-";
+
+// The names and IDs don't matter, but the format of the IDs does.
+const LOADED_MODULES = {
+ '4759A7E6993548C89CAF716A67EC242D00': 'libtest.so',
+ 'F77AF15BB8D6419FA875954B4A3506CA00': 'libxul.so',
+ '1E2F7FB590424E8F93D60BB88D66B8C500': 'libc.so'
+};
+const N_MODULES = Object.keys(LOADED_MODULES).length;
+
+// Format of individual items is [index, offset-in-library].
+const STACK1 = [
+ [ 0, 0 ],
+ [ 1, 1 ],
+ [ 2, 2 ]
+];
+const STACK2 = [
+ [ 0, 0 ],
+ [ 1, 5 ],
+ [ 2, 10 ],
+];
+// XXX The only error checking is for a zero-sized stack.
+const STACK_BOGUS = [];
+
+function write_string_to_file(file, contents) {
+ let ostream = Cc["@mozilla.org/network/safe-file-output-stream;1"]
+ .createInstance(Ci.nsIFileOutputStream);
+ ostream.init(file, PR_WRONLY | PR_CREATE_FILE | PR_TRUNCATE,
+ RW_OWNER, ostream.DEFER_OPEN);
+ ostream.write(contents, contents.length);
+ ostream.QueryInterface(Ci.nsISafeOutputStream).finish();
+ ostream.close();
+}
+
+function construct_file(suffix) {
+ let profileDirectory = Services.dirsvc.get("ProfD", Ci.nsIFile);
+ let file = profileDirectory.clone();
+ file.append(LATE_WRITE_PREFIX + suffix);
+ return file;
+}
+
+function write_late_writes_file(stack, suffix)
+{
+ let file = construct_file(suffix);
+ let contents = N_MODULES + "\n";
+ for (let id in LOADED_MODULES) {
+ contents += id + " " + LOADED_MODULES[id] + "\n";
+ }
+
+ contents += stack.length + "\n";
+ for (let element of stack) {
+ contents += element[0] + " " + element[1].toString(16) + "\n";
+ }
+
+ write_string_to_file(file, contents);
+}
+
+function run_test() {
+ do_get_profile();
+
+ write_late_writes_file(STACK1, STACK_SUFFIX1);
+ write_late_writes_file(STACK2, STACK_SUFFIX2);
+ write_late_writes_file(STACK_BOGUS, STACK_BOGUS_SUFFIX);
+
+ let lateWrites = Telemetry.lateWrites;
+ do_check_true("memoryMap" in lateWrites);
+ do_check_eq(lateWrites.memoryMap.length, 0);
+ do_check_true("stacks" in lateWrites);
+ do_check_eq(lateWrites.stacks.length, 0);
+
+ do_test_pending();
+ Telemetry.asyncFetchTelemetryData(function () {
+ actual_test();
+ });
+}
+
+function actual_test() {
+ do_check_false(construct_file(STACK_SUFFIX1).exists());
+ do_check_false(construct_file(STACK_SUFFIX2).exists());
+ do_check_false(construct_file(STACK_BOGUS_SUFFIX).exists());
+
+ let lateWrites = Telemetry.lateWrites;
+
+ do_check_true("memoryMap" in lateWrites);
+ do_check_eq(lateWrites.memoryMap.length, N_MODULES);
+ for (let id in LOADED_MODULES) {
+ let matchingLibrary = lateWrites.memoryMap.filter(function(library, idx, array) {
+ return library[1] == id;
+ });
+ do_check_eq(matchingLibrary.length, 1);
+ let library = matchingLibrary[0]
+ let name = library[0];
+ do_check_eq(LOADED_MODULES[id], name);
+ }
+
+ do_check_true("stacks" in lateWrites);
+ do_check_eq(lateWrites.stacks.length, 2);
+ let uneval_STACKS = [uneval(STACK1), uneval(STACK2)];
+ let first_stack = lateWrites.stacks[0];
+ let second_stack = lateWrites.stacks[1];
+ function stackChecker(canonicalStack) {
+ let unevalCanonicalStack = uneval(canonicalStack);
+ return function(obj, idx, array) {
+ return unevalCanonicalStack == obj;
+ }
+ }
+ do_check_eq(uneval_STACKS.filter(stackChecker(first_stack)).length, 1);
+ do_check_eq(uneval_STACKS.filter(stackChecker(second_stack)).length, 1);
+
+ do_test_finished();
+}
diff --git a/toolkit/components/telemetry/tests/unit/test_TelemetryLockCount.js b/toolkit/components/telemetry/tests/unit/test_TelemetryLockCount.js
new file mode 100644
index 000000000..808f2f3ec
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_TelemetryLockCount.js
@@ -0,0 +1,53 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/
+*/
+/* A testcase to make sure reading the failed profile lock count works. */
+
+Cu.import("resource://gre/modules/Services.jsm", this);
+
+const LOCK_FILE_NAME = "Telemetry.FailedProfileLocks.txt";
+const N_FAILED_LOCKS = 10;
+
+// Constants from prio.h for nsIFileOutputStream.init
+const PR_WRONLY = 0x2;
+const PR_CREATE_FILE = 0x8;
+const PR_TRUNCATE = 0x20;
+const RW_OWNER = parseInt("0600", 8);
+
+function write_string_to_file(file, contents) {
+ let ostream = Cc["@mozilla.org/network/safe-file-output-stream;1"]
+ .createInstance(Ci.nsIFileOutputStream);
+ ostream.init(file, PR_WRONLY | PR_CREATE_FILE | PR_TRUNCATE,
+ RW_OWNER, ostream.DEFER_OPEN);
+ ostream.write(contents, contents.length);
+ ostream.QueryInterface(Ci.nsISafeOutputStream).finish();
+ ostream.close();
+}
+
+function construct_file() {
+ let profileDirectory = Services.dirsvc.get("ProfD", Ci.nsIFile);
+ let file = profileDirectory.clone();
+ file.append(LOCK_FILE_NAME);
+ return file;
+}
+
+function run_test() {
+ do_get_profile();
+
+ do_check_eq(Telemetry.failedProfileLockCount, 0);
+
+ write_string_to_file(construct_file(), N_FAILED_LOCKS.toString());
+
+ // Make sure that we're not eagerly reading the count now that the
+ // file exists.
+ do_check_eq(Telemetry.failedProfileLockCount, 0);
+
+ do_test_pending();
+ Telemetry.asyncFetchTelemetryData(actual_test);
+}
+
+function actual_test() {
+ do_check_eq(Telemetry.failedProfileLockCount, N_FAILED_LOCKS);
+ do_check_false(construct_file().exists());
+ do_test_finished();
+}
diff --git a/toolkit/components/telemetry/tests/unit/test_TelemetryLog.js b/toolkit/components/telemetry/tests/unit/test_TelemetryLog.js
new file mode 100644
index 000000000..ea37a1bc5
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_TelemetryLog.js
@@ -0,0 +1,51 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+Cu.import("resource://gre/modules/TelemetryLog.jsm", this);
+Cu.import("resource://gre/modules/TelemetrySession.jsm", this);
+
+const TEST_PREFIX = "TEST-";
+const TEST_REGEX = new RegExp("^" + TEST_PREFIX);
+
+function check_event(event, id, data)
+{
+ do_print("Checking message " + id);
+ do_check_eq(event[0], id);
+ do_check_true(event[1] > 0);
+
+ if (data === undefined) {
+ do_check_true(event.length == 2);
+ } else {
+ do_check_eq(event.length, data.length + 2);
+ for (var i = 0; i < data.length; ++i) {
+ do_check_eq(typeof(event[i + 2]), "string");
+ do_check_eq(event[i + 2], data[i]);
+ }
+ }
+}
+
+add_task(function* ()
+{
+ do_get_profile();
+ // TODO: After Bug 1254550 lands we should not need to set the pref here.
+ Services.prefs.setBoolPref(PREF_TELEMETRY_ENABLED, true);
+ yield TelemetryController.testSetup();
+
+ TelemetryLog.log(TEST_PREFIX + "1", ["val", 123, undefined]);
+ TelemetryLog.log(TEST_PREFIX + "2", []);
+ TelemetryLog.log(TEST_PREFIX + "3");
+
+ var log = TelemetrySession.getPayload().log.filter(function(e) {
+ // Only want events that were generated by the test.
+ return TEST_REGEX.test(e[0]);
+ });
+
+ do_check_eq(log.length, 3);
+ check_event(log[0], TEST_PREFIX + "1", ["val", "123", "undefined"]);
+ check_event(log[1], TEST_PREFIX + "2", []);
+ check_event(log[2], TEST_PREFIX + "3", undefined);
+ do_check_true(log[0][1] <= log[1][1]);
+ do_check_true(log[1][1] <= log[2][1]);
+
+ yield TelemetryController.testShutdown();
+});
diff --git a/toolkit/components/telemetry/tests/unit/test_TelemetryReportingPolicy.js b/toolkit/components/telemetry/tests/unit/test_TelemetryReportingPolicy.js
new file mode 100644
index 000000000..68606a98e
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_TelemetryReportingPolicy.js
@@ -0,0 +1,268 @@
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/ */
+
+// Test that TelemetryController sends close to shutdown don't lead
+// to AsyncShutdown timeouts.
+
+"use strict";
+
+Cu.import("resource://gre/modules/Preferences.jsm", this);
+Cu.import("resource://gre/modules/Services.jsm", this);
+Cu.import("resource://gre/modules/TelemetryController.jsm", this);
+Cu.import("resource://gre/modules/TelemetrySend.jsm", this);
+Cu.import("resource://gre/modules/TelemetryReportingPolicy.jsm", this);
+Cu.import("resource://gre/modules/TelemetryUtils.jsm", this);
+Cu.import("resource://gre/modules/Timer.jsm", this);
+Cu.import("resource://gre/modules/XPCOMUtils.jsm", this);
+Cu.import("resource://gre/modules/UpdateUtils.jsm", this);
+
+const PREF_BRANCH = "toolkit.telemetry.";
+const PREF_SERVER = PREF_BRANCH + "server";
+
+const TEST_CHANNEL = "TestChannelABC";
+
+const PREF_POLICY_BRANCH = "datareporting.policy.";
+const PREF_BYPASS_NOTIFICATION = PREF_POLICY_BRANCH + "dataSubmissionPolicyBypassNotification";
+const PREF_DATA_SUBMISSION_ENABLED = PREF_POLICY_BRANCH + "dataSubmissionEnabled";
+const PREF_CURRENT_POLICY_VERSION = PREF_POLICY_BRANCH + "currentPolicyVersion";
+const PREF_MINIMUM_POLICY_VERSION = PREF_POLICY_BRANCH + "minimumPolicyVersion";
+const PREF_MINIMUM_CHANNEL_POLICY_VERSION = PREF_MINIMUM_POLICY_VERSION + ".channel-" + TEST_CHANNEL;
+const PREF_ACCEPTED_POLICY_VERSION = PREF_POLICY_BRANCH + "dataSubmissionPolicyAcceptedVersion";
+const PREF_ACCEPTED_POLICY_DATE = PREF_POLICY_BRANCH + "dataSubmissionPolicyNotifiedTime";
+
+function fakeShowPolicyTimeout(set, clear) {
+ let reportingPolicy = Cu.import("resource://gre/modules/TelemetryReportingPolicy.jsm");
+ reportingPolicy.Policy.setShowInfobarTimeout = set;
+ reportingPolicy.Policy.clearShowInfobarTimeout = clear;
+}
+
+function fakeResetAcceptedPolicy() {
+ Preferences.reset(PREF_ACCEPTED_POLICY_DATE);
+ Preferences.reset(PREF_ACCEPTED_POLICY_VERSION);
+}
+
+function setMinimumPolicyVersion(aNewPolicyVersion) {
+ const CHANNEL_NAME = UpdateUtils.getUpdateChannel(false);
+ // We might have channel-dependent minimum policy versions.
+ const CHANNEL_DEPENDENT_PREF = PREF_MINIMUM_POLICY_VERSION + ".channel-" + CHANNEL_NAME;
+
+ // Does the channel-dependent pref exist? If so, set its value.
+ if (Preferences.get(CHANNEL_DEPENDENT_PREF, undefined)) {
+ Preferences.set(CHANNEL_DEPENDENT_PREF, aNewPolicyVersion);
+ return;
+ }
+
+ // We don't have a channel specific minimu, so set the common one.
+ Preferences.set(PREF_MINIMUM_POLICY_VERSION, aNewPolicyVersion);
+}
+
+add_task(function* test_setup() {
+ // Addon manager needs a profile directory
+ do_get_profile(true);
+ loadAddonManager("xpcshell@tests.mozilla.org", "XPCShell", "1", "1.9.2");
+
+ // Make sure we don't generate unexpected pings due to pref changes.
+ yield setEmptyPrefWatchlist();
+
+ Services.prefs.setBoolPref(PREF_TELEMETRY_ENABLED, true);
+ // Don't bypass the notifications in this test, we'll fake it.
+ Services.prefs.setBoolPref(PREF_BYPASS_NOTIFICATION, false);
+
+ TelemetryReportingPolicy.setup();
+});
+
+add_task(function* test_firstRun() {
+ const PREF_FIRST_RUN = "toolkit.telemetry.reportingpolicy.firstRun";
+ const FIRST_RUN_TIMEOUT_MSEC = 60 * 1000; // 60s
+ const OTHER_RUNS_TIMEOUT_MSEC = 10 * 1000; // 10s
+
+ Preferences.reset(PREF_FIRST_RUN);
+
+ let startupTimeout = 0;
+ fakeShowPolicyTimeout((callback, timeout) => startupTimeout = timeout, () => {});
+ TelemetryReportingPolicy.reset();
+
+ Services.obs.notifyObservers(null, "sessionstore-windows-restored", null);
+ Assert.equal(startupTimeout, FIRST_RUN_TIMEOUT_MSEC,
+ "The infobar display timeout should be 60s on the first run.");
+
+ // Run again, and check that we actually wait only 10 seconds.
+ TelemetryReportingPolicy.reset();
+ Services.obs.notifyObservers(null, "sessionstore-windows-restored", null);
+ Assert.equal(startupTimeout, OTHER_RUNS_TIMEOUT_MSEC,
+ "The infobar display timeout should be 10s on other runs.");
+});
+
+add_task(function* test_prefs() {
+ TelemetryReportingPolicy.reset();
+
+ let now = fakeNow(2009, 11, 18);
+
+ // If the date is not valid (earlier than 2012), we don't regard the policy as accepted.
+ TelemetryReportingPolicy.testInfobarShown();
+ Assert.ok(!TelemetryReportingPolicy.testIsUserNotified());
+ Assert.equal(Preferences.get(PREF_ACCEPTED_POLICY_DATE, null), 0,
+ "Invalid dates should not make the policy accepted.");
+
+ // Check that the notification date and version are correctly saved to the prefs.
+ now = fakeNow(2012, 11, 18);
+ TelemetryReportingPolicy.testInfobarShown();
+ Assert.equal(Preferences.get(PREF_ACCEPTED_POLICY_DATE, null), now.getTime(),
+ "A valid date must correctly be saved.");
+
+ // Now that user is notified, check if we are allowed to upload.
+ Assert.ok(TelemetryReportingPolicy.canUpload(),
+ "We must be able to upload after the policy is accepted.");
+
+ // Disable submission and check that we're no longer allowed to upload.
+ Preferences.set(PREF_DATA_SUBMISSION_ENABLED, false);
+ Assert.ok(!TelemetryReportingPolicy.canUpload(),
+ "We must not be able to upload if data submission is disabled.");
+
+ // Turn the submission back on.
+ Preferences.set(PREF_DATA_SUBMISSION_ENABLED, true);
+ Assert.ok(TelemetryReportingPolicy.canUpload(),
+ "We must be able to upload if data submission is enabled and the policy was accepted.");
+
+ // Set a new minimum policy version and check that user is no longer notified.
+ let newMinimum = Preferences.get(PREF_CURRENT_POLICY_VERSION, 1) + 1;
+ setMinimumPolicyVersion(newMinimum);
+ Assert.ok(!TelemetryReportingPolicy.testIsUserNotified(),
+ "A greater minimum policy version must invalidate the policy and disable upload.");
+
+ // Eventually accept the policy and make sure user is notified.
+ Preferences.set(PREF_CURRENT_POLICY_VERSION, newMinimum);
+ TelemetryReportingPolicy.testInfobarShown();
+ Assert.ok(TelemetryReportingPolicy.testIsUserNotified(),
+ "Accepting the policy again should show the user as notified.");
+ Assert.ok(TelemetryReportingPolicy.canUpload(),
+ "Accepting the policy again should let us upload data.");
+
+ // Set a new, per channel, minimum policy version. Start by setting a test current channel.
+ let defaultPrefs = new Preferences({ defaultBranch: true });
+ defaultPrefs.set("app.update.channel", TEST_CHANNEL);
+
+ // Increase and set the new minimum version, then check that we're not notified anymore.
+ newMinimum++;
+ Preferences.set(PREF_MINIMUM_CHANNEL_POLICY_VERSION, newMinimum);
+ Assert.ok(!TelemetryReportingPolicy.testIsUserNotified(),
+ "Increasing the minimum policy version should invalidate the policy.");
+
+ // Eventually accept the policy and make sure user is notified.
+ Preferences.set(PREF_CURRENT_POLICY_VERSION, newMinimum);
+ TelemetryReportingPolicy.testInfobarShown();
+ Assert.ok(TelemetryReportingPolicy.testIsUserNotified(),
+ "Accepting the policy again should show the user as notified.");
+ Assert.ok(TelemetryReportingPolicy.canUpload(),
+ "Accepting the policy again should let us upload data.");
+});
+
+add_task(function* test_migratePrefs() {
+ const DEPRECATED_FHR_PREFS = {
+ "datareporting.policy.dataSubmissionPolicyAccepted": true,
+ "datareporting.policy.dataSubmissionPolicyBypassAcceptance": true,
+ "datareporting.policy.dataSubmissionPolicyResponseType": "foxyeah",
+ "datareporting.policy.dataSubmissionPolicyResponseTime": Date.now().toString(),
+ };
+
+ // Make sure the preferences are set before setting up the policy.
+ for (let name in DEPRECATED_FHR_PREFS) {
+ Preferences.set(name, DEPRECATED_FHR_PREFS[name]);
+ }
+ // Set up the policy.
+ TelemetryReportingPolicy.reset();
+ // They should have been removed by now.
+ for (let name in DEPRECATED_FHR_PREFS) {
+ Assert.ok(!Preferences.has(name), name + " should have been removed.");
+ }
+});
+
+add_task(function* test_userNotifiedOfCurrentPolicy() {
+ fakeResetAcceptedPolicy();
+ TelemetryReportingPolicy.reset();
+
+ // User should be reported as not notified by default.
+ Assert.ok(!TelemetryReportingPolicy.testIsUserNotified(),
+ "The initial state should be unnotified.");
+
+ // Forcing a policy version should not automatically make the user notified.
+ Preferences.set(PREF_ACCEPTED_POLICY_VERSION,
+ TelemetryReportingPolicy.DEFAULT_DATAREPORTING_POLICY_VERSION);
+ Assert.ok(!TelemetryReportingPolicy.testIsUserNotified(),
+ "The default state of the date should have a time of 0 and it should therefore fail");
+
+ // Showing the notification bar should make the user notified.
+ fakeNow(2012, 11, 11);
+ TelemetryReportingPolicy.testInfobarShown();
+ Assert.ok(TelemetryReportingPolicy.testIsUserNotified(),
+ "Using the proper API causes user notification to report as true.");
+
+ // It is assumed that later versions of the policy will incorporate previous
+ // ones, therefore this should also return true.
+ let newVersion =
+ Preferences.get(PREF_CURRENT_POLICY_VERSION, 1) + 1;
+ Preferences.set(PREF_ACCEPTED_POLICY_VERSION, newVersion);
+ Assert.ok(TelemetryReportingPolicy.testIsUserNotified(),
+ "A future version of the policy should pass.");
+
+ newVersion =
+ Preferences.get(PREF_CURRENT_POLICY_VERSION, 1) - 1;
+ Preferences.set(PREF_ACCEPTED_POLICY_VERSION, newVersion);
+ Assert.ok(!TelemetryReportingPolicy.testIsUserNotified(),
+ "A previous version of the policy should fail.");
+});
+
+add_task(function* test_canSend() {
+ const TEST_PING_TYPE = "test-ping";
+
+ PingServer.start();
+ Preferences.set(PREF_SERVER, "http://localhost:" + PingServer.port);
+
+ yield TelemetryController.testReset();
+ TelemetryReportingPolicy.reset();
+
+ // User should be reported as not notified by default.
+ Assert.ok(!TelemetryReportingPolicy.testIsUserNotified(),
+ "The initial state should be unnotified.");
+
+ // Assert if we receive any ping before the policy is accepted.
+ PingServer.registerPingHandler(() => Assert.ok(false, "Should not have received any pings now"));
+ yield TelemetryController.submitExternalPing(TEST_PING_TYPE, {});
+
+ // Reset the ping handler.
+ PingServer.resetPingHandler();
+
+ // Fake the infobar: this should also trigger the ping send task.
+ TelemetryReportingPolicy.testInfobarShown();
+ let ping = yield PingServer.promiseNextPings(1);
+ Assert.equal(ping.length, 1, "We should have received one ping.");
+ Assert.equal(ping[0].type, TEST_PING_TYPE,
+ "We should have received the previous ping.");
+
+ // Submit another ping, to make sure it gets sent.
+ yield TelemetryController.submitExternalPing(TEST_PING_TYPE, {});
+
+ // Get the ping and check its type.
+ ping = yield PingServer.promiseNextPings(1);
+ Assert.equal(ping.length, 1, "We should have received one ping.");
+ Assert.equal(ping[0].type, TEST_PING_TYPE, "We should have received the new ping.");
+
+ // Fake a restart with a pending ping.
+ yield TelemetryController.addPendingPing(TEST_PING_TYPE, {});
+ yield TelemetryController.testReset();
+
+ // We should be immediately sending the ping out.
+ ping = yield PingServer.promiseNextPings(1);
+ Assert.equal(ping.length, 1, "We should have received one ping.");
+ Assert.equal(ping[0].type, TEST_PING_TYPE, "We should have received the pending ping.");
+
+ // Submit another ping, to make sure it gets sent.
+ yield TelemetryController.submitExternalPing(TEST_PING_TYPE, {});
+
+ // Get the ping and check its type.
+ ping = yield PingServer.promiseNextPings(1);
+ Assert.equal(ping.length, 1, "We should have received one ping.");
+ Assert.equal(ping[0].type, TEST_PING_TYPE, "We should have received the new ping.");
+
+ yield PingServer.stop();
+});
diff --git a/toolkit/components/telemetry/tests/unit/test_TelemetryScalars.js b/toolkit/components/telemetry/tests/unit/test_TelemetryScalars.js
new file mode 100644
index 000000000..5914a4235
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_TelemetryScalars.js
@@ -0,0 +1,574 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/
+*/
+
+const UINT_SCALAR = "telemetry.test.unsigned_int_kind";
+const STRING_SCALAR = "telemetry.test.string_kind";
+const BOOLEAN_SCALAR = "telemetry.test.boolean_kind";
+const KEYED_UINT_SCALAR = "telemetry.test.keyed_unsigned_int";
+
+add_task(function* test_serializationFormat() {
+ Telemetry.clearScalars();
+
+ // Set the scalars to a known value.
+ const expectedUint = 3785;
+ const expectedString = "some value";
+ Telemetry.scalarSet(UINT_SCALAR, expectedUint);
+ Telemetry.scalarSet(STRING_SCALAR, expectedString);
+ Telemetry.scalarSet(BOOLEAN_SCALAR, true);
+ Telemetry.keyedScalarSet(KEYED_UINT_SCALAR, "first_key", 1234);
+
+ // Get a snapshot of the scalars.
+ const scalars =
+ Telemetry.snapshotScalars(Ci.nsITelemetry.DATASET_RELEASE_CHANNEL_OPTIN);
+
+ // Check that they are serialized to the correct format.
+ Assert.equal(typeof(scalars[UINT_SCALAR]), "number",
+ UINT_SCALAR + " must be serialized to the correct format.");
+ Assert.ok(Number.isInteger(scalars[UINT_SCALAR]),
+ UINT_SCALAR + " must be a finite integer.");
+ Assert.equal(scalars[UINT_SCALAR], expectedUint,
+ UINT_SCALAR + " must have the correct value.");
+ Assert.equal(typeof(scalars[STRING_SCALAR]), "string",
+ STRING_SCALAR + " must be serialized to the correct format.");
+ Assert.equal(scalars[STRING_SCALAR], expectedString,
+ STRING_SCALAR + " must have the correct value.");
+ Assert.equal(typeof(scalars[BOOLEAN_SCALAR]), "boolean",
+ BOOLEAN_SCALAR + " must be serialized to the correct format.");
+ Assert.equal(scalars[BOOLEAN_SCALAR], true,
+ BOOLEAN_SCALAR + " must have the correct value.");
+ Assert.ok(!(KEYED_UINT_SCALAR in scalars),
+ "Keyed scalars must be reported in a separate section.");
+});
+
+add_task(function* test_keyedSerializationFormat() {
+ Telemetry.clearScalars();
+
+ const expectedKey = "first_key";
+ const expectedOtherKey = "漢語";
+ const expectedUint = 3785;
+ const expectedOtherValue = 1107;
+
+ Telemetry.scalarSet(UINT_SCALAR, expectedUint);
+ Telemetry.keyedScalarSet(KEYED_UINT_SCALAR, expectedKey, expectedUint);
+ Telemetry.keyedScalarSet(KEYED_UINT_SCALAR, expectedOtherKey, expectedOtherValue);
+
+ // Get a snapshot of the scalars.
+ const keyedScalars =
+ Telemetry.snapshotKeyedScalars(Ci.nsITelemetry.DATASET_RELEASE_CHANNEL_OPTIN);
+
+ Assert.ok(!(UINT_SCALAR in keyedScalars),
+ UINT_SCALAR + " must not be serialized with the keyed scalars.");
+ Assert.ok(KEYED_UINT_SCALAR in keyedScalars,
+ KEYED_UINT_SCALAR + " must be serialized with the keyed scalars.");
+ Assert.equal(Object.keys(keyedScalars[KEYED_UINT_SCALAR]).length, 2,
+ "The keyed scalar must contain exactly 2 keys.");
+ Assert.ok(expectedKey in keyedScalars[KEYED_UINT_SCALAR],
+ KEYED_UINT_SCALAR + " must contain the expected keys.");
+ Assert.ok(expectedOtherKey in keyedScalars[KEYED_UINT_SCALAR],
+ KEYED_UINT_SCALAR + " must contain the expected keys.");
+ Assert.ok(Number.isInteger(keyedScalars[KEYED_UINT_SCALAR][expectedKey]),
+ KEYED_UINT_SCALAR + "." + expectedKey + " must be a finite integer.");
+ Assert.equal(keyedScalars[KEYED_UINT_SCALAR][expectedKey], expectedUint,
+ KEYED_UINT_SCALAR + "." + expectedKey + " must have the correct value.");
+ Assert.equal(keyedScalars[KEYED_UINT_SCALAR][expectedOtherKey], expectedOtherValue,
+ KEYED_UINT_SCALAR + "." + expectedOtherKey + " must have the correct value.");
+});
+
+add_task(function* test_nonexistingScalar() {
+ const NON_EXISTING_SCALAR = "telemetry.test.non_existing";
+
+ Telemetry.clearScalars();
+
+ // Make sure we throw on any operation for non-existing scalars.
+ Assert.throws(() => Telemetry.scalarAdd(NON_EXISTING_SCALAR, 11715),
+ /NS_ERROR_ILLEGAL_VALUE/,
+ "Adding to a non existing scalar must throw.");
+ Assert.throws(() => Telemetry.scalarSet(NON_EXISTING_SCALAR, 11715),
+ /NS_ERROR_ILLEGAL_VALUE/,
+ "Setting a non existing scalar must throw.");
+ Assert.throws(() => Telemetry.scalarSetMaximum(NON_EXISTING_SCALAR, 11715),
+ /NS_ERROR_ILLEGAL_VALUE/,
+ "Setting the maximum of a non existing scalar must throw.");
+
+ // Make sure we throw on any operation for non-existing scalars.
+ Assert.throws(() => Telemetry.keyedScalarAdd(NON_EXISTING_SCALAR, "some_key", 11715),
+ /NS_ERROR_ILLEGAL_VALUE/,
+ "Adding to a non existing keyed scalar must throw.");
+ Assert.throws(() => Telemetry.keyedScalarSet(NON_EXISTING_SCALAR, "some_key", 11715),
+ /NS_ERROR_ILLEGAL_VALUE/,
+ "Setting a non existing keyed scalar must throw.");
+ Assert.throws(() => Telemetry.keyedScalarSetMaximum(NON_EXISTING_SCALAR, "some_key", 11715),
+ /NS_ERROR_ILLEGAL_VALUE/,
+ "Setting the maximum of a non keyed existing scalar must throw.");
+
+ // Get a snapshot of the scalars.
+ const scalars =
+ Telemetry.snapshotScalars(Ci.nsITelemetry.DATASET_RELEASE_CHANNEL_OPTIN);
+
+ Assert.ok(!(NON_EXISTING_SCALAR in scalars), "The non existing scalar must not be persisted.");
+
+ const keyedScalars =
+ Telemetry.snapshotKeyedScalars(Ci.nsITelemetry.DATASET_RELEASE_CHANNEL_OPTIN);
+
+ Assert.ok(!(NON_EXISTING_SCALAR in keyedScalars),
+ "The non existing keyed scalar must not be persisted.");
+});
+
+add_task(function* test_expiredScalar() {
+ const EXPIRED_SCALAR = "telemetry.test.expired";
+ const EXPIRED_KEYED_SCALAR = "telemetry.test.keyed_expired";
+ const UNEXPIRED_SCALAR = "telemetry.test.unexpired";
+
+ Telemetry.clearScalars();
+
+ // Try to set the expired scalar to some value. We will not be recording the value,
+ // but we shouldn't throw.
+ Telemetry.scalarAdd(EXPIRED_SCALAR, 11715);
+ Telemetry.scalarSet(EXPIRED_SCALAR, 11715);
+ Telemetry.scalarSetMaximum(EXPIRED_SCALAR, 11715);
+ Telemetry.keyedScalarAdd(EXPIRED_KEYED_SCALAR, "some_key", 11715);
+ Telemetry.keyedScalarSet(EXPIRED_KEYED_SCALAR, "some_key", 11715);
+ Telemetry.keyedScalarSetMaximum(EXPIRED_KEYED_SCALAR, "some_key", 11715);
+
+ // The unexpired scalar has an expiration version, but far away in the future.
+ const expectedValue = 11716;
+ Telemetry.scalarSet(UNEXPIRED_SCALAR, expectedValue);
+
+ // Get a snapshot of the scalars.
+ const scalars =
+ Telemetry.snapshotScalars(Ci.nsITelemetry.DATASET_RELEASE_CHANNEL_OPTIN);
+ const keyedScalars =
+ Telemetry.snapshotKeyedScalars(Ci.nsITelemetry.DATASET_RELEASE_CHANNEL_OPTIN);
+
+ Assert.ok(!(EXPIRED_SCALAR in scalars), "The expired scalar must not be persisted.");
+ Assert.equal(scalars[UNEXPIRED_SCALAR], expectedValue,
+ "The unexpired scalar must be persisted with the correct value.");
+ Assert.ok(!(EXPIRED_KEYED_SCALAR in keyedScalars),
+ "The expired keyed scalar must not be persisted.");
+});
+
+add_task(function* test_unsignedIntScalar() {
+ let checkScalar = (expectedValue) => {
+ const scalars =
+ Telemetry.snapshotScalars(Ci.nsITelemetry.DATASET_RELEASE_CHANNEL_OPTIN);
+ Assert.equal(scalars[UINT_SCALAR], expectedValue,
+ UINT_SCALAR + " must contain the expected value.");
+ };
+
+ Telemetry.clearScalars();
+
+ // Let's start with an accumulation without a prior set.
+ Telemetry.scalarAdd(UINT_SCALAR, 1);
+ Telemetry.scalarAdd(UINT_SCALAR, 2);
+ // Do we get what we expect?
+ checkScalar(3);
+
+ // Let's test setting the scalar to a value.
+ Telemetry.scalarSet(UINT_SCALAR, 3785);
+ checkScalar(3785);
+ Telemetry.scalarAdd(UINT_SCALAR, 1);
+ checkScalar(3786);
+
+ // Does setMaximum work?
+ Telemetry.scalarSet(UINT_SCALAR, 2);
+ checkScalar(2);
+ Telemetry.scalarSetMaximum(UINT_SCALAR, 5);
+ checkScalar(5);
+ // The value of the probe should still be 5, as the previous value
+ // is greater than the one we want to set.
+ Telemetry.scalarSetMaximum(UINT_SCALAR, 3);
+ checkScalar(5);
+
+ // Check that non-integer numbers get truncated and set.
+ Telemetry.scalarSet(UINT_SCALAR, 3.785);
+ checkScalar(3);
+
+ // Setting or adding a negative number must report an error through
+ // the console and drop the change (shouldn't throw).
+ Telemetry.scalarAdd(UINT_SCALAR, -5);
+ Telemetry.scalarSet(UINT_SCALAR, -5);
+ Telemetry.scalarSetMaximum(UINT_SCALAR, -1);
+ checkScalar(3);
+
+ // What happens if we try to set a value of a different type?
+ Telemetry.scalarSet(UINT_SCALAR, 1);
+ Assert.throws(() => Telemetry.scalarSet(UINT_SCALAR, "unexpected value"),
+ /NS_ERROR_ILLEGAL_VALUE/,
+ "Setting the scalar to an unexpected value type must throw.");
+ Assert.throws(() => Telemetry.scalarAdd(UINT_SCALAR, "unexpected value"),
+ /NS_ERROR_ILLEGAL_VALUE/,
+ "Adding an unexpected value type must throw.");
+ Assert.throws(() => Telemetry.scalarSetMaximum(UINT_SCALAR, "unexpected value"),
+ /NS_ERROR_ILLEGAL_VALUE/,
+ "Setting the scalar to an unexpected value type must throw.");
+ // The stored value must not be compromised.
+ checkScalar(1);
+});
+
+add_task(function* test_stringScalar() {
+ let checkExpectedString = (expectedString) => {
+ const scalars =
+ Telemetry.snapshotScalars(Ci.nsITelemetry.DATASET_RELEASE_CHANNEL_OPTIN);
+ Assert.equal(scalars[STRING_SCALAR], expectedString,
+ STRING_SCALAR + " must contain the expected string value.");
+ };
+
+ Telemetry.clearScalars();
+
+ // Let's check simple strings...
+ let expected = "test string";
+ Telemetry.scalarSet(STRING_SCALAR, expected);
+ checkExpectedString(expected);
+ expected = "漢語";
+ Telemetry.scalarSet(STRING_SCALAR, expected);
+ checkExpectedString(expected);
+
+ // We have some unsupported operations for strings.
+ Assert.throws(() => Telemetry.scalarAdd(STRING_SCALAR, 1),
+ /NS_ERROR_NOT_AVAILABLE/,
+ "Using an unsupported operation must throw.");
+ Assert.throws(() => Telemetry.scalarAdd(STRING_SCALAR, "string value"),
+ /NS_ERROR_NOT_AVAILABLE/,
+ "Using an unsupported operation must throw.");
+ Assert.throws(() => Telemetry.scalarSetMaximum(STRING_SCALAR, 1),
+ /NS_ERROR_NOT_AVAILABLE/,
+ "Using an unsupported operation must throw.");
+ Assert.throws(() => Telemetry.scalarSetMaximum(STRING_SCALAR, "string value"),
+ /NS_ERROR_NOT_AVAILABLE/,
+ "Using an unsupported operation must throw.");
+ Assert.throws(() => Telemetry.scalarSet(STRING_SCALAR, 1),
+ /NS_ERROR_ILLEGAL_VALUE/,
+ "The string scalar must throw if we're not setting a string.");
+
+ // Try to set the scalar to a string longer than the maximum length limit.
+ const LONG_STRING = "browser.qaxfiuosnzmhlg.rpvxicawolhtvmbkpnludhedobxvkjwqyeyvmv";
+ Telemetry.scalarSet(STRING_SCALAR, LONG_STRING);
+ checkExpectedString(LONG_STRING.substr(0, 50));
+});
+
+add_task(function* test_booleanScalar() {
+ let checkExpectedBool = (expectedBoolean) => {
+ const scalars =
+ Telemetry.snapshotScalars(Ci.nsITelemetry.DATASET_RELEASE_CHANNEL_OPTIN);
+ Assert.equal(scalars[BOOLEAN_SCALAR], expectedBoolean,
+ BOOLEAN_SCALAR + " must contain the expected boolean value.");
+ };
+
+ Telemetry.clearScalars();
+
+ // Set a test boolean value.
+ let expected = false;
+ Telemetry.scalarSet(BOOLEAN_SCALAR, expected);
+ checkExpectedBool(expected);
+ expected = true;
+ Telemetry.scalarSet(BOOLEAN_SCALAR, expected);
+ checkExpectedBool(expected);
+
+ // Check that setting a numeric value implicitly converts to boolean.
+ Telemetry.scalarSet(BOOLEAN_SCALAR, 1);
+ checkExpectedBool(true);
+ Telemetry.scalarSet(BOOLEAN_SCALAR, 0);
+ checkExpectedBool(false);
+ Telemetry.scalarSet(BOOLEAN_SCALAR, 1.0);
+ checkExpectedBool(true);
+ Telemetry.scalarSet(BOOLEAN_SCALAR, 0.0);
+ checkExpectedBool(false);
+
+ // Check that unsupported operations for booleans throw.
+ Assert.throws(() => Telemetry.scalarAdd(BOOLEAN_SCALAR, 1),
+ /NS_ERROR_NOT_AVAILABLE/,
+ "Using an unsupported operation must throw.");
+ Assert.throws(() => Telemetry.scalarAdd(BOOLEAN_SCALAR, "string value"),
+ /NS_ERROR_NOT_AVAILABLE/,
+ "Using an unsupported operation must throw.");
+ Assert.throws(() => Telemetry.scalarSetMaximum(BOOLEAN_SCALAR, 1),
+ /NS_ERROR_NOT_AVAILABLE/,
+ "Using an unsupported operation must throw.");
+ Assert.throws(() => Telemetry.scalarSetMaximum(BOOLEAN_SCALAR, "string value"),
+ /NS_ERROR_NOT_AVAILABLE/,
+ "Using an unsupported operation must throw.");
+ Assert.throws(() => Telemetry.scalarSet(BOOLEAN_SCALAR, "true"),
+ /NS_ERROR_ILLEGAL_VALUE/,
+ "The boolean scalar must throw if we're not setting a boolean.");
+});
+
+add_task(function* test_scalarRecording() {
+ const OPTIN_SCALAR = "telemetry.test.release_optin";
+ const OPTOUT_SCALAR = "telemetry.test.release_optout";
+
+ let checkValue = (scalarName, expectedValue) => {
+ const scalars =
+ Telemetry.snapshotScalars(Ci.nsITelemetry.DATASET_RELEASE_CHANNEL_OPTIN);
+ Assert.equal(scalars[scalarName], expectedValue,
+ scalarName + " must contain the expected value.");
+ };
+
+ let checkNotSerialized = (scalarName) => {
+ const scalars =
+ Telemetry.snapshotScalars(Ci.nsITelemetry.DATASET_RELEASE_CHANNEL_OPTIN);
+ Assert.ok(!(scalarName in scalars), scalarName + " was not recorded.");
+ };
+
+ Telemetry.canRecordBase = false;
+ Telemetry.canRecordExtended = false;
+ Telemetry.clearScalars();
+
+ // Check that no scalar is recorded if both base and extended recording are off.
+ Telemetry.scalarSet(OPTOUT_SCALAR, 3);
+ Telemetry.scalarSet(OPTIN_SCALAR, 3);
+ checkNotSerialized(OPTOUT_SCALAR);
+ checkNotSerialized(OPTIN_SCALAR);
+
+ // Check that opt-out scalars are recorded, while opt-in are not.
+ Telemetry.canRecordBase = true;
+ Telemetry.scalarSet(OPTOUT_SCALAR, 3);
+ Telemetry.scalarSet(OPTIN_SCALAR, 3);
+ checkValue(OPTOUT_SCALAR, 3);
+ checkNotSerialized(OPTIN_SCALAR);
+
+ // Check that both opt-out and opt-in scalars are recorded.
+ Telemetry.canRecordExtended = true;
+ Telemetry.scalarSet(OPTOUT_SCALAR, 5);
+ Telemetry.scalarSet(OPTIN_SCALAR, 6);
+ checkValue(OPTOUT_SCALAR, 5);
+ checkValue(OPTIN_SCALAR, 6);
+});
+
+add_task(function* test_keyedScalarRecording() {
+ const OPTIN_SCALAR = "telemetry.test.keyed_release_optin";
+ const OPTOUT_SCALAR = "telemetry.test.keyed_release_optout";
+ const testKey = "policy_key";
+
+ let checkValue = (scalarName, expectedValue) => {
+ const scalars =
+ Telemetry.snapshotKeyedScalars(Ci.nsITelemetry.DATASET_RELEASE_CHANNEL_OPTIN);
+ Assert.equal(scalars[scalarName][testKey], expectedValue,
+ scalarName + " must contain the expected value.");
+ };
+
+ let checkNotSerialized = (scalarName) => {
+ const scalars =
+ Telemetry.snapshotKeyedScalars(Ci.nsITelemetry.DATASET_RELEASE_CHANNEL_OPTIN);
+ Assert.ok(!(scalarName in scalars), scalarName + " was not recorded.");
+ };
+
+ Telemetry.canRecordBase = false;
+ Telemetry.canRecordExtended = false;
+ Telemetry.clearScalars();
+
+ // Check that no scalar is recorded if both base and extended recording are off.
+ Telemetry.keyedScalarSet(OPTOUT_SCALAR, testKey, 3);
+ Telemetry.keyedScalarSet(OPTIN_SCALAR, testKey, 3);
+ checkNotSerialized(OPTOUT_SCALAR);
+ checkNotSerialized(OPTIN_SCALAR);
+
+ // Check that opt-out scalars are recorded, while opt-in are not.
+ Telemetry.canRecordBase = true;
+ Telemetry.keyedScalarSet(OPTOUT_SCALAR, testKey, 3);
+ Telemetry.keyedScalarSet(OPTIN_SCALAR, testKey, 3);
+ checkValue(OPTOUT_SCALAR, 3);
+ checkNotSerialized(OPTIN_SCALAR);
+
+ // Check that both opt-out and opt-in scalars are recorded.
+ Telemetry.canRecordExtended = true;
+ Telemetry.keyedScalarSet(OPTOUT_SCALAR, testKey, 5);
+ Telemetry.keyedScalarSet(OPTIN_SCALAR, testKey, 6);
+ checkValue(OPTOUT_SCALAR, 5);
+ checkValue(OPTIN_SCALAR, 6);
+});
+
+add_task(function* test_subsession() {
+ Telemetry.clearScalars();
+
+ // Set the scalars to a known value.
+ Telemetry.scalarSet(UINT_SCALAR, 3785);
+ Telemetry.scalarSet(STRING_SCALAR, "some value");
+ Telemetry.scalarSet(BOOLEAN_SCALAR, false);
+ Telemetry.keyedScalarSet(KEYED_UINT_SCALAR, "some_random_key", 12);
+
+ // Get a snapshot and reset the subsession. The value we set must be there.
+ let scalars =
+ Telemetry.snapshotScalars(Ci.nsITelemetry.DATASET_RELEASE_CHANNEL_OPTIN, true);
+ let keyedScalars =
+ Telemetry.snapshotKeyedScalars(Ci.nsITelemetry.DATASET_RELEASE_CHANNEL_OPTIN, true);
+
+ Assert.equal(scalars[UINT_SCALAR], 3785,
+ UINT_SCALAR + " must contain the expected value.");
+ Assert.equal(scalars[STRING_SCALAR], "some value",
+ STRING_SCALAR + " must contain the expected value.");
+ Assert.equal(scalars[BOOLEAN_SCALAR], false,
+ BOOLEAN_SCALAR + " must contain the expected value.");
+ Assert.equal(keyedScalars[KEYED_UINT_SCALAR]["some_random_key"], 12,
+ KEYED_UINT_SCALAR + " must contain the expected value.");
+
+ // Get a new snapshot and reset the subsession again. Since no new value
+ // was set, the scalars should not be reported.
+ scalars =
+ Telemetry.snapshotScalars(Ci.nsITelemetry.DATASET_RELEASE_CHANNEL_OPTIN, true);
+ keyedScalars =
+ Telemetry.snapshotKeyedScalars(Ci.nsITelemetry.DATASET_RELEASE_CHANNEL_OPTIN, true);
+
+ Assert.ok(!(UINT_SCALAR in scalars), UINT_SCALAR + " must be empty and not reported.");
+ Assert.ok(!(STRING_SCALAR in scalars), STRING_SCALAR + " must be empty and not reported.");
+ Assert.ok(!(BOOLEAN_SCALAR in scalars), BOOLEAN_SCALAR + " must be empty and not reported.");
+ Assert.ok(!(KEYED_UINT_SCALAR in keyedScalars), KEYED_UINT_SCALAR + " must be empty and not reported.");
+});
+
+add_task(function* test_keyed_uint() {
+ Telemetry.clearScalars();
+
+ const KEYS = [ "a_key", "another_key", "third_key" ];
+ let expectedValues = [ 1, 1, 1 ];
+
+ // Set all the keys to a baseline value.
+ for (let key of KEYS) {
+ Telemetry.keyedScalarSet(KEYED_UINT_SCALAR, key, 1);
+ }
+
+ // Increment only one key.
+ Telemetry.keyedScalarAdd(KEYED_UINT_SCALAR, KEYS[1], 1);
+ expectedValues[1]++;
+
+ // Use SetMaximum on the third key.
+ Telemetry.keyedScalarSetMaximum(KEYED_UINT_SCALAR, KEYS[2], 37);
+ expectedValues[2] = 37;
+
+ // Get a snapshot of the scalars and make sure the keys contain
+ // the correct values.
+ const keyedScalars =
+ Telemetry.snapshotKeyedScalars(Ci.nsITelemetry.DATASET_RELEASE_CHANNEL_OPTIN);
+
+ for (let k = 0; k < 3; k++) {
+ const keyName = KEYS[k];
+ Assert.equal(keyedScalars[KEYED_UINT_SCALAR][keyName], expectedValues[k],
+ KEYED_UINT_SCALAR + "." + keyName + " must contain the correct value.");
+ }
+
+ // Are we still throwing when doing unsupported things on uint keyed scalars?
+ // Just test one single unsupported operation, the other are covered in the plain
+ // unsigned scalar test.
+ Assert.throws(() => Telemetry.scalarSet(KEYED_UINT_SCALAR, "new_key", "unexpected value"),
+ /NS_ERROR_ILLEGAL_VALUE/,
+ "Setting the scalar to an unexpected value type must throw.");
+});
+
+add_task(function* test_keyed_boolean() {
+ Telemetry.clearScalars();
+
+ const KEYED_BOOLEAN_TYPE = "telemetry.test.keyed_boolean_kind";
+ const first_key = "first_key";
+ const second_key = "second_key";
+
+ // Set the initial values.
+ Telemetry.keyedScalarSet(KEYED_BOOLEAN_TYPE, first_key, true);
+ Telemetry.keyedScalarSet(KEYED_BOOLEAN_TYPE, second_key, false);
+
+ // Get a snapshot of the scalars and make sure the keys contain
+ // the correct values.
+ let keyedScalars =
+ Telemetry.snapshotKeyedScalars(Ci.nsITelemetry.DATASET_RELEASE_CHANNEL_OPTIN);
+ Assert.equal(keyedScalars[KEYED_BOOLEAN_TYPE][first_key], true,
+ "The key must contain the expected value.");
+ Assert.equal(keyedScalars[KEYED_BOOLEAN_TYPE][second_key], false,
+ "The key must contain the expected value.");
+
+ // Now flip the values and make sure we get the expected values back.
+ Telemetry.keyedScalarSet(KEYED_BOOLEAN_TYPE, first_key, false);
+ Telemetry.keyedScalarSet(KEYED_BOOLEAN_TYPE, second_key, true);
+
+ keyedScalars =
+ Telemetry.snapshotKeyedScalars(Ci.nsITelemetry.DATASET_RELEASE_CHANNEL_OPTIN);
+ Assert.equal(keyedScalars[KEYED_BOOLEAN_TYPE][first_key], false,
+ "The key must contain the expected value.");
+ Assert.equal(keyedScalars[KEYED_BOOLEAN_TYPE][second_key], true,
+ "The key must contain the expected value.");
+
+ // Are we still throwing when doing unsupported things on a boolean keyed scalars?
+ // Just test one single unsupported operation, the other are covered in the plain
+ // boolean scalar test.
+ Assert.throws(() => Telemetry.keyedScalarAdd(KEYED_BOOLEAN_TYPE, "somehey", 1),
+ /NS_ERROR_NOT_AVAILABLE/,
+ "Using an unsupported operation must throw.");
+});
+
+add_task(function* test_keyed_keys_length() {
+ Telemetry.clearScalars();
+
+ const LONG_KEY_STRING =
+ "browser.qaxfiuosnzmhlg.rpvxicawolhtvmbkpnludhedobxvkjwqyeyvmv.somemoresowereach70chars";
+ const NORMAL_KEY = "a_key";
+
+ // Set the value for a key within the length limits.
+ Telemetry.keyedScalarSet(KEYED_UINT_SCALAR, NORMAL_KEY, 1);
+
+ // Now try to set and modify the value for a very long key.
+ Assert.throws(() => Telemetry.keyedScalarAdd(KEYED_UINT_SCALAR, LONG_KEY_STRING, 10),
+ /NS_ERROR_ILLEGAL_VALUE/,
+ "Using keys longer than 70 characters must throw.");
+ Assert.throws(() => Telemetry.keyedScalarSet(KEYED_UINT_SCALAR, LONG_KEY_STRING, 1),
+ /NS_ERROR_ILLEGAL_VALUE/,
+ "Using keys longer than 70 characters must throw.");
+ Assert.throws(() => Telemetry.keyedScalarSetMaximum(KEYED_UINT_SCALAR, LONG_KEY_STRING, 10),
+ /NS_ERROR_ILLEGAL_VALUE/,
+ "Using keys longer than 70 characters must throw.");
+
+ // Make sure the key with the right length contains the expected value.
+ let keyedScalars =
+ Telemetry.snapshotKeyedScalars(Ci.nsITelemetry.DATASET_RELEASE_CHANNEL_OPTIN);
+ Assert.equal(Object.keys(keyedScalars[KEYED_UINT_SCALAR]).length, 1,
+ "The keyed scalar must contain exactly 1 key.");
+ Assert.ok(NORMAL_KEY in keyedScalars[KEYED_UINT_SCALAR],
+ "The keyed scalar must contain the expected key.");
+ Assert.equal(keyedScalars[KEYED_UINT_SCALAR][NORMAL_KEY], 1,
+ "The key must contain the expected value.");
+ Assert.ok(!(LONG_KEY_STRING in keyedScalars[KEYED_UINT_SCALAR]),
+ "The data for the long key should not have been recorded.");
+});
+
+add_task(function* test_keyed_max_keys() {
+ Telemetry.clearScalars();
+
+ // Generate the names for the first 100 keys.
+ let keyNamesSet = new Set();
+ for (let k = 0; k < 100; k++) {
+ keyNamesSet.add("key_" + k);
+ }
+
+ // Add 100 keys to an histogram and set their initial value.
+ let valueToSet = 0;
+ keyNamesSet.forEach(keyName => {
+ Telemetry.keyedScalarSet(KEYED_UINT_SCALAR, keyName, valueToSet++);
+ });
+
+ // Perform some operations on the 101th key. This should throw, as
+ // we're not allowed to have more than 100 keys.
+ const LAST_KEY_NAME = "overflowing_key";
+ Assert.throws(() => Telemetry.keyedScalarAdd(KEYED_UINT_SCALAR, LAST_KEY_NAME, 10),
+ /NS_ERROR_FAILURE/,
+ "Using more than 100 keys must throw.");
+ Assert.throws(() => Telemetry.keyedScalarSet(KEYED_UINT_SCALAR, LAST_KEY_NAME, 1),
+ /NS_ERROR_FAILURE/,
+ "Using more than 100 keys must throw.");
+ Assert.throws(() => Telemetry.keyedScalarSetMaximum(KEYED_UINT_SCALAR, LAST_KEY_NAME, 10),
+ /NS_ERROR_FAILURE/,
+ "Using more than 100 keys must throw.");
+
+ // Make sure all the keys except the last one are available and have the correct
+ // values.
+ let keyedScalars =
+ Telemetry.snapshotKeyedScalars(Ci.nsITelemetry.DATASET_RELEASE_CHANNEL_OPTIN);
+
+ // Check that the keyed scalar only contain the first 100 keys.
+ const reportedKeysSet = new Set(Object.keys(keyedScalars[KEYED_UINT_SCALAR]));
+ Assert.ok([...keyNamesSet].filter(x => reportedKeysSet.has(x)) &&
+ [...reportedKeysSet].filter(x => keyNamesSet.has(x)),
+ "The keyed scalar must contain all the 100 keys, and drop the others.");
+
+ // Check that all the keys recorded the expected values.
+ let expectedValue = 0;
+ keyNamesSet.forEach(keyName => {
+ Assert.equal(keyedScalars[KEYED_UINT_SCALAR][keyName], expectedValue++,
+ "The key must contain the expected value.");
+ });
+});
diff --git a/toolkit/components/telemetry/tests/unit/test_TelemetrySend.js b/toolkit/components/telemetry/tests/unit/test_TelemetrySend.js
new file mode 100644
index 000000000..88ff8cf44
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_TelemetrySend.js
@@ -0,0 +1,427 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/
+*/
+
+// This tests the public Telemetry API for submitting pings.
+
+"use strict";
+
+Cu.import("resource://gre/modules/TelemetryController.jsm", this);
+Cu.import("resource://gre/modules/TelemetrySession.jsm", this);
+Cu.import("resource://gre/modules/TelemetrySend.jsm", this);
+Cu.import("resource://gre/modules/TelemetryUtils.jsm", this);
+Cu.import("resource://gre/modules/Services.jsm", this);
+Cu.import("resource://gre/modules/Preferences.jsm", this);
+Cu.import("resource://gre/modules/osfile.jsm", this);
+
+const PREF_TELEMETRY_SERVER = "toolkit.telemetry.server";
+
+const MS_IN_A_MINUTE = 60 * 1000;
+
+function countPingTypes(pings) {
+ let countByType = new Map();
+ for (let p of pings) {
+ countByType.set(p.type, 1 + (countByType.get(p.type) || 0));
+ }
+ return countByType;
+}
+
+function setPingLastModified(id, timestamp) {
+ const path = OS.Path.join(TelemetryStorage.pingDirectoryPath, id);
+ return OS.File.setDates(path, null, timestamp);
+}
+
+// Mock out the send timer activity.
+function waitForTimer() {
+ return new Promise(resolve => {
+ fakePingSendTimer((callback, timeout) => {
+ resolve([callback, timeout]);
+ }, () => {});
+ });
+}
+
+// Allow easy faking of readable ping ids.
+// This helps with debugging issues with e.g. ordering in the send logic.
+function fakePingId(type, number) {
+ const HEAD = "93bd0011-2c8f-4e1c-bee0-";
+ const TAIL = "000000000000";
+ const N = String(number);
+ const id = HEAD + type + TAIL.slice(type.length, - N.length) + N;
+ fakeGeneratePingId(() => id);
+ return id;
+}
+
+var checkPingsSaved = Task.async(function* (pingIds) {
+ let allFound = true;
+ for (let id of pingIds) {
+ const path = OS.Path.join(TelemetryStorage.pingDirectoryPath, id);
+ let exists = false;
+ try {
+ exists = yield OS.File.exists(path);
+ } catch (ex) {}
+
+ if (!exists) {
+ dump("checkPingsSaved - failed to find ping: " + path + "\n");
+ allFound = false;
+ }
+ }
+
+ return allFound;
+});
+
+function histogramValueCount(h) {
+ return h.counts.reduce((a, b) => a + b);
+}
+
+add_task(function* test_setup() {
+ // Trigger a proper telemetry init.
+ do_get_profile(true);
+ // Make sure we don't generate unexpected pings due to pref changes.
+ yield setEmptyPrefWatchlist();
+ Services.prefs.setBoolPref(PREF_TELEMETRY_ENABLED, true);
+});
+
+// Test the ping sending logic.
+add_task(function* test_sendPendingPings() {
+ const TYPE_PREFIX = "test-sendPendingPings-";
+ const TEST_TYPE_A = TYPE_PREFIX + "A";
+ const TEST_TYPE_B = TYPE_PREFIX + "B";
+
+ const TYPE_A_COUNT = 20;
+ const TYPE_B_COUNT = 5;
+
+ let histSuccess = Telemetry.getHistogramById("TELEMETRY_SUCCESS");
+ let histSendTimeSuccess = Telemetry.getHistogramById("TELEMETRY_SEND_SUCCESS");
+ let histSendTimeFail = Telemetry.getHistogramById("TELEMETRY_SEND_FAILURE");
+ histSuccess.clear();
+ histSendTimeSuccess.clear();
+ histSendTimeFail.clear();
+
+ // Fake a current date.
+ let now = TelemetryUtils.truncateToDays(new Date());
+ now = fakeNow(futureDate(now, 10 * 60 * MS_IN_A_MINUTE));
+
+ // Enable test-mode for TelemetrySend, otherwise we won't store pending pings
+ // before the module is fully initialized later.
+ TelemetrySend.setTestModeEnabled(true);
+
+ // Submit some pings without the server and telemetry started yet.
+ for (let i = 0; i < TYPE_A_COUNT; ++i) {
+ fakePingId("a", i);
+ const id = yield TelemetryController.submitExternalPing(TEST_TYPE_A, {});
+ yield setPingLastModified(id, now.getTime() + (i * 1000));
+ }
+
+ Assert.equal(TelemetrySend.pendingPingCount, TYPE_A_COUNT,
+ "Should have correct pending ping count");
+
+ // Submit some more pings of a different type.
+ now = fakeNow(futureDate(now, 5 * MS_IN_A_MINUTE));
+ for (let i = 0; i < TYPE_B_COUNT; ++i) {
+ fakePingId("b", i);
+ const id = yield TelemetryController.submitExternalPing(TEST_TYPE_B, {});
+ yield setPingLastModified(id, now.getTime() + (i * 1000));
+ }
+
+ Assert.equal(TelemetrySend.pendingPingCount, TYPE_A_COUNT + TYPE_B_COUNT,
+ "Should have correct pending ping count");
+
+ Assert.deepEqual(histSuccess.snapshot().counts, [0, 0, 0],
+ "Should not have recorded any sending in histograms yet.");
+ Assert.equal(histSendTimeSuccess.snapshot().sum, 0,
+ "Should not have recorded any sending in histograms yet.");
+ Assert.equal(histSendTimeFail.snapshot().sum, 0,
+ "Should not have recorded any sending in histograms yet.");
+
+ // Now enable sending to the ping server.
+ now = fakeNow(futureDate(now, MS_IN_A_MINUTE));
+ PingServer.start();
+ Preferences.set(PREF_TELEMETRY_SERVER, "http://localhost:" + PingServer.port);
+
+ let timerPromise = waitForTimer();
+ yield TelemetryController.testReset();
+ let [pingSendTimerCallback, pingSendTimeout] = yield timerPromise;
+ Assert.ok(!!pingSendTimerCallback, "Should have a timer callback");
+
+ // We should have received 10 pings from the first send batch:
+ // 5 of type B and 5 of type A, as sending is newest-first.
+ // The other pings should be delayed by the 10-pings-per-minute limit.
+ let pings = yield PingServer.promiseNextPings(10);
+ Assert.equal(TelemetrySend.pendingPingCount, TYPE_A_COUNT - 5,
+ "Should have correct pending ping count");
+ PingServer.registerPingHandler(() => Assert.ok(false, "Should not have received any pings now"));
+ let countByType = countPingTypes(pings);
+
+ Assert.equal(countByType.get(TEST_TYPE_B), TYPE_B_COUNT,
+ "Should have received the correct amount of type B pings");
+ Assert.equal(countByType.get(TEST_TYPE_A), 10 - TYPE_B_COUNT,
+ "Should have received the correct amount of type A pings");
+
+ Assert.deepEqual(histSuccess.snapshot().counts, [0, 10, 0],
+ "Should have recorded sending success in histograms.");
+ Assert.equal(histogramValueCount(histSendTimeSuccess.snapshot()), 10,
+ "Should have recorded successful send times in histograms.");
+ Assert.equal(histogramValueCount(histSendTimeFail.snapshot()), 0,
+ "Should not have recorded any failed sending in histograms yet.");
+
+ // As we hit the ping send limit and still have pending pings, a send tick should
+ // be scheduled in a minute.
+ Assert.ok(!!pingSendTimerCallback, "Timer callback should be set");
+ Assert.equal(pingSendTimeout, MS_IN_A_MINUTE, "Send tick timeout should be correct");
+
+ // Trigger the next tick - we should receive the next 10 type A pings.
+ PingServer.resetPingHandler();
+ now = fakeNow(futureDate(now, pingSendTimeout));
+ timerPromise = waitForTimer();
+ pingSendTimerCallback();
+ [pingSendTimerCallback, pingSendTimeout] = yield timerPromise;
+
+ pings = yield PingServer.promiseNextPings(10);
+ PingServer.registerPingHandler(() => Assert.ok(false, "Should not have received any pings now"));
+ countByType = countPingTypes(pings);
+
+ Assert.equal(countByType.get(TEST_TYPE_A), 10, "Should have received the correct amount of type A pings");
+
+ // We hit the ping send limit again and still have pending pings, a send tick should
+ // be scheduled in a minute.
+ Assert.equal(pingSendTimeout, MS_IN_A_MINUTE, "Send tick timeout should be correct");
+
+ // Trigger the next tick - we should receive the remaining type A pings.
+ PingServer.resetPingHandler();
+ now = fakeNow(futureDate(now, pingSendTimeout));
+ yield pingSendTimerCallback();
+
+ pings = yield PingServer.promiseNextPings(5);
+ PingServer.registerPingHandler(() => Assert.ok(false, "Should not have received any pings now"));
+ countByType = countPingTypes(pings);
+
+ Assert.equal(countByType.get(TEST_TYPE_A), 5, "Should have received the correct amount of type A pings");
+
+ yield TelemetrySend.testWaitOnOutgoingPings();
+ PingServer.resetPingHandler();
+});
+
+add_task(function* test_sendDateHeader() {
+ fakeNow(new Date(Date.UTC(2011, 1, 1, 11, 0, 0)));
+ yield TelemetrySend.reset();
+
+ let pingId = yield TelemetryController.submitExternalPing("test-send-date-header", {});
+ let req = yield PingServer.promiseNextRequest();
+ let ping = decodeRequestPayload(req);
+ Assert.equal(req.getHeader("Date"), "Tue, 01 Feb 2011 11:00:00 GMT",
+ "Telemetry should send the correct Date header with requests.");
+ Assert.equal(ping.id, pingId, "Should have received the correct ping id.");
+});
+
+// Test the backoff timeout behavior after send failures.
+add_task(function* test_backoffTimeout() {
+ const TYPE_PREFIX = "test-backoffTimeout-";
+ const TEST_TYPE_C = TYPE_PREFIX + "C";
+ const TEST_TYPE_D = TYPE_PREFIX + "D";
+ const TEST_TYPE_E = TYPE_PREFIX + "E";
+
+ let histSuccess = Telemetry.getHistogramById("TELEMETRY_SUCCESS");
+ let histSendTimeSuccess = Telemetry.getHistogramById("TELEMETRY_SEND_SUCCESS");
+ let histSendTimeFail = Telemetry.getHistogramById("TELEMETRY_SEND_FAILURE");
+
+ // Failing a ping send now should trigger backoff behavior.
+ let now = fakeNow(2010, 1, 1, 11, 0, 0);
+ yield TelemetrySend.reset();
+ PingServer.stop();
+
+ histSuccess.clear();
+ histSendTimeSuccess.clear();
+ histSendTimeFail.clear();
+
+ fakePingId("c", 0);
+ now = fakeNow(futureDate(now, MS_IN_A_MINUTE));
+ let sendAttempts = 0;
+ let timerPromise = waitForTimer();
+ yield TelemetryController.submitExternalPing(TEST_TYPE_C, {});
+ let [pingSendTimerCallback, pingSendTimeout] = yield timerPromise;
+ Assert.equal(TelemetrySend.pendingPingCount, 1, "Should have one pending ping.");
+ ++sendAttempts;
+
+ const MAX_BACKOFF_TIMEOUT = 120 * MS_IN_A_MINUTE;
+ for (let timeout = 2 * MS_IN_A_MINUTE; timeout <= MAX_BACKOFF_TIMEOUT; timeout *= 2) {
+ Assert.ok(!!pingSendTimerCallback, "Should have received a timer callback");
+ Assert.equal(pingSendTimeout, timeout, "Send tick timeout should be correct");
+
+ let callback = pingSendTimerCallback;
+ now = fakeNow(futureDate(now, pingSendTimeout));
+ timerPromise = waitForTimer();
+ yield callback();
+ [pingSendTimerCallback, pingSendTimeout] = yield timerPromise;
+ ++sendAttempts;
+ }
+
+ timerPromise = waitForTimer();
+ yield pingSendTimerCallback();
+ [pingSendTimerCallback, pingSendTimeout] = yield timerPromise;
+ Assert.equal(pingSendTimeout, MAX_BACKOFF_TIMEOUT, "Tick timeout should be capped");
+ ++sendAttempts;
+
+ Assert.deepEqual(histSuccess.snapshot().counts, [sendAttempts, 0, 0],
+ "Should have recorded sending failure in histograms.");
+ Assert.equal(histSendTimeSuccess.snapshot().sum, 0,
+ "Should not have recorded any sending success in histograms yet.");
+ Assert.greater(histSendTimeFail.snapshot().sum, 0,
+ "Should have recorded send failure times in histograms.");
+ Assert.equal(histogramValueCount(histSendTimeFail.snapshot()), sendAttempts,
+ "Should have recorded send failure times in histograms.");
+
+ // Submitting a new ping should reset the backoff behavior.
+ fakePingId("d", 0);
+ now = fakeNow(futureDate(now, MS_IN_A_MINUTE));
+ timerPromise = waitForTimer();
+ yield TelemetryController.submitExternalPing(TEST_TYPE_D, {});
+ [pingSendTimerCallback, pingSendTimeout] = yield timerPromise;
+ Assert.equal(pingSendTimeout, 2 * MS_IN_A_MINUTE, "Send tick timeout should be correct");
+ sendAttempts += 2;
+
+ // With the server running again, we should send out the pending pings immediately
+ // when a new ping is submitted.
+ PingServer.start();
+ TelemetrySend.setServer("http://localhost:" + PingServer.port);
+ fakePingId("e", 0);
+ now = fakeNow(futureDate(now, MS_IN_A_MINUTE));
+ timerPromise = waitForTimer();
+ yield TelemetryController.submitExternalPing(TEST_TYPE_E, {});
+
+ let pings = yield PingServer.promiseNextPings(3);
+ let countByType = countPingTypes(pings);
+
+ Assert.equal(countByType.get(TEST_TYPE_C), 1, "Should have received the correct amount of type C pings");
+ Assert.equal(countByType.get(TEST_TYPE_D), 1, "Should have received the correct amount of type D pings");
+ Assert.equal(countByType.get(TEST_TYPE_E), 1, "Should have received the correct amount of type E pings");
+
+ yield TelemetrySend.testWaitOnOutgoingPings();
+ Assert.equal(TelemetrySend.pendingPingCount, 0, "Should have no pending pings left");
+
+ Assert.deepEqual(histSuccess.snapshot().counts, [sendAttempts, 3, 0],
+ "Should have recorded sending failure in histograms.");
+ Assert.greater(histSendTimeSuccess.snapshot().sum, 0,
+ "Should have recorded sending success in histograms.");
+ Assert.equal(histogramValueCount(histSendTimeSuccess.snapshot()), 3,
+ "Should have recorded sending success in histograms.");
+ Assert.equal(histogramValueCount(histSendTimeFail.snapshot()), sendAttempts,
+ "Should have recorded send failure times in histograms.");
+});
+
+add_task(function* test_discardBigPings() {
+ const TEST_PING_TYPE = "test-ping-type";
+
+ let histSizeExceeded = Telemetry.getHistogramById("TELEMETRY_PING_SIZE_EXCEEDED_SEND");
+ let histDiscardedSize = Telemetry.getHistogramById("TELEMETRY_DISCARDED_SEND_PINGS_SIZE_MB");
+ let histSuccess = Telemetry.getHistogramById("TELEMETRY_SUCCESS");
+ let histSendTimeSuccess = Telemetry.getHistogramById("TELEMETRY_SEND_SUCCESS");
+ let histSendTimeFail = Telemetry.getHistogramById("TELEMETRY_SEND_FAILURE");
+ for (let h of [histSizeExceeded, histDiscardedSize, histSuccess, histSendTimeSuccess, histSendTimeFail]) {
+ h.clear();
+ }
+
+ // Generate a 2MB string and create an oversized payload.
+ const OVERSIZED_PAYLOAD = {"data": generateRandomString(2 * 1024 * 1024)};
+
+ // Reset the histograms.
+ Telemetry.getHistogramById("TELEMETRY_PING_SIZE_EXCEEDED_SEND").clear();
+ Telemetry.getHistogramById("TELEMETRY_DISCARDED_SEND_PINGS_SIZE_MB").clear();
+
+ // Submit a ping of a normal size and check that we don't count it in the histogram.
+ yield TelemetryController.submitExternalPing(TEST_PING_TYPE, { test: "test" });
+ yield TelemetrySend.testWaitOnOutgoingPings();
+
+ Assert.equal(histSizeExceeded.snapshot().sum, 0, "Telemetry must report no oversized ping submitted.");
+ Assert.equal(histDiscardedSize.snapshot().sum, 0, "Telemetry must report no oversized pings.");
+ Assert.deepEqual(histSuccess.snapshot().counts, [0, 1, 0], "Should have recorded sending success.");
+ Assert.equal(histogramValueCount(histSendTimeSuccess.snapshot()), 1, "Should have recorded send success time.");
+ Assert.greater(histSendTimeSuccess.snapshot().sum, 0, "Should have recorded send success time.");
+ Assert.equal(histogramValueCount(histSendTimeFail.snapshot()), 0, "Should not have recorded send failure time.");
+
+ // Submit an oversized ping and check that it gets discarded.
+ yield TelemetryController.submitExternalPing(TEST_PING_TYPE, OVERSIZED_PAYLOAD);
+ yield TelemetrySend.testWaitOnOutgoingPings();
+
+ Assert.equal(histSizeExceeded.snapshot().sum, 1, "Telemetry must report 1 oversized ping submitted.");
+ Assert.equal(histDiscardedSize.snapshot().counts[2], 1, "Telemetry must report a 2MB, oversized, ping submitted.");
+ Assert.deepEqual(histSuccess.snapshot().counts, [0, 1, 0], "Should have recorded sending success.");
+ Assert.equal(histogramValueCount(histSendTimeSuccess.snapshot()), 1, "Should have recorded send success time.");
+ Assert.greater(histSendTimeSuccess.snapshot().sum, 0, "Should have recorded send success time.");
+ Assert.equal(histogramValueCount(histSendTimeFail.snapshot()), 0, "Should not have recorded send failure time.");
+});
+
+add_task(function* test_evictedOnServerErrors() {
+ const TEST_TYPE = "test-evicted";
+
+ yield TelemetrySend.reset();
+
+ let histEvicted = Telemetry.getHistogramById("TELEMETRY_PING_EVICTED_FOR_SERVER_ERRORS");
+ let histSuccess = Telemetry.getHistogramById("TELEMETRY_SUCCESS");
+ let histSendTimeSuccess = Telemetry.getHistogramById("TELEMETRY_SEND_SUCCESS");
+ let histSendTimeFail = Telemetry.getHistogramById("TELEMETRY_SEND_FAILURE");
+ for (let h of [histEvicted, histSuccess, histSendTimeSuccess, histSendTimeFail]) {
+ h.clear();
+ }
+
+ // Write a custom ping handler which will return 403. This will trigger ping eviction
+ // on client side.
+ PingServer.registerPingHandler((req, res) => {
+ res.setStatusLine(null, 403, "Forbidden");
+ res.processAsync();
+ res.finish();
+ });
+
+ // Clear the histogram and submit a ping.
+ let pingId = yield TelemetryController.submitExternalPing(TEST_TYPE, {});
+ yield TelemetrySend.testWaitOnOutgoingPings();
+
+ Assert.equal(histEvicted.snapshot().sum, 1,
+ "Telemetry must report a ping evicted due to server errors");
+ Assert.deepEqual(histSuccess.snapshot().counts, [0, 1, 0]);
+ Assert.equal(histogramValueCount(histSendTimeSuccess.snapshot()), 1);
+ Assert.greater(histSendTimeSuccess.snapshot().sum, 0);
+ Assert.equal(histogramValueCount(histSendTimeFail.snapshot()), 0);
+
+ // The ping should not be persisted.
+ yield Assert.rejects(TelemetryStorage.loadPendingPing(pingId), "The ping must not be persisted.");
+
+ // Reset the ping handler and submit a new ping.
+ PingServer.resetPingHandler();
+ pingId = yield TelemetryController.submitExternalPing(TEST_TYPE, {});
+
+ let ping = yield PingServer.promiseNextPings(1);
+ Assert.equal(ping[0].id, pingId, "The correct ping must be received");
+
+ // We should not have updated the error histogram.
+ yield TelemetrySend.testWaitOnOutgoingPings();
+ Assert.equal(histEvicted.snapshot().sum, 1, "Telemetry must report only one ping evicted due to server errors");
+ Assert.deepEqual(histSuccess.snapshot().counts, [0, 2, 0]);
+ Assert.equal(histogramValueCount(histSendTimeSuccess.snapshot()), 2);
+ Assert.equal(histogramValueCount(histSendTimeFail.snapshot()), 0);
+});
+
+// Test that the current, non-persisted pending pings are properly saved on shutdown.
+add_task(function* test_persistCurrentPingsOnShutdown() {
+ const TEST_TYPE = "test-persistCurrentPingsOnShutdown";
+ const PING_COUNT = 5;
+ yield TelemetrySend.reset();
+ PingServer.stop();
+ Assert.equal(TelemetrySend.pendingPingCount, 0, "Should have no pending pings yet");
+
+ // Submit new pings that shouldn't be persisted yet.
+ let ids = [];
+ for (let i=0; i<5; ++i) {
+ ids.push(fakePingId("f", i));
+ TelemetryController.submitExternalPing(TEST_TYPE, {});
+ }
+
+ Assert.equal(TelemetrySend.pendingPingCount, PING_COUNT, "Should have the correct pending ping count");
+
+ // Triggering a shutdown should persist the pings.
+ yield TelemetrySend.shutdown();
+ Assert.ok((yield checkPingsSaved(ids)), "All pending pings should have been persisted");
+
+ // After a restart the pings should have been found when scanning.
+ yield TelemetrySend.reset();
+ Assert.equal(TelemetrySend.pendingPingCount, PING_COUNT, "Should have the correct pending ping count");
+});
diff --git a/toolkit/components/telemetry/tests/unit/test_TelemetrySendOldPings.js b/toolkit/components/telemetry/tests/unit/test_TelemetrySendOldPings.js
new file mode 100644
index 000000000..221b6bcab
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_TelemetrySendOldPings.js
@@ -0,0 +1,547 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/
+
+/**
+ * This test case populates the profile with some fake stored
+ * pings, and checks that pending pings are immediatlely sent
+ * after delayed init.
+ */
+
+"use strict"
+
+Cu.import("resource://gre/modules/osfile.jsm", this);
+Cu.import("resource://gre/modules/Services.jsm", this);
+Cu.import("resource://gre/modules/Promise.jsm", this);
+Cu.import("resource://gre/modules/TelemetryStorage.jsm", this);
+Cu.import("resource://gre/modules/TelemetryController.jsm", this);
+Cu.import("resource://gre/modules/TelemetrySend.jsm", this);
+Cu.import("resource://gre/modules/Task.jsm", this);
+Cu.import("resource://gre/modules/XPCOMUtils.jsm");
+var {OS: {File, Path, Constants}} = Cu.import("resource://gre/modules/osfile.jsm", {});
+
+// We increment TelemetryStorage's MAX_PING_FILE_AGE and
+// OVERDUE_PING_FILE_AGE by 1 minute so that our test pings exceed
+// those points in time, even taking into account file system imprecision.
+const ONE_MINUTE_MS = 60 * 1000;
+const OVERDUE_PING_FILE_AGE = TelemetrySend.OVERDUE_PING_FILE_AGE + ONE_MINUTE_MS;
+
+const PING_SAVE_FOLDER = "saved-telemetry-pings";
+const PING_TIMEOUT_LENGTH = 5000;
+const OVERDUE_PINGS = 6;
+const OLD_FORMAT_PINGS = 4;
+const RECENT_PINGS = 4;
+
+const TOTAL_EXPECTED_PINGS = OVERDUE_PINGS + RECENT_PINGS + OLD_FORMAT_PINGS;
+
+const PREF_FHR_UPLOAD = "datareporting.healthreport.uploadEnabled";
+
+var gCreatedPings = 0;
+var gSeenPings = 0;
+
+/**
+ * Creates some Telemetry pings for the and saves them to disk. Each ping gets a
+ * unique ID based on an incrementor.
+ *
+ * @param {Array} aPingInfos An array of ping type objects. Each entry must be an
+ * object containing a "num" field for the number of pings to create and
+ * an "age" field. The latter representing the age in milliseconds to offset
+ * from now. A value of 10 would make the ping 10ms older than now, for
+ * example.
+ * @returns Promise
+ * @resolve an Array with the created pings ids.
+ */
+var createSavedPings = Task.async(function* (aPingInfos) {
+ let pingIds = [];
+ let now = Date.now();
+
+ for (let type in aPingInfos) {
+ let num = aPingInfos[type].num;
+ let age = now - (aPingInfos[type].age || 0);
+ for (let i = 0; i < num; ++i) {
+ let pingId = yield TelemetryController.addPendingPing("test-ping", {}, { overwrite: true });
+ if (aPingInfos[type].age) {
+ // savePing writes to the file synchronously, so we're good to
+ // modify the lastModifedTime now.
+ let filePath = getSavePathForPingId(pingId);
+ yield File.setDates(filePath, null, age);
+ }
+ gCreatedPings++;
+ pingIds.push(pingId);
+ }
+ }
+
+ return pingIds;
+});
+
+/**
+ * Deletes locally saved pings if they exist.
+ *
+ * @param aPingIds an Array of ping ids to delete.
+ * @returns Promise
+ */
+var clearPings = Task.async(function* (aPingIds) {
+ for (let pingId of aPingIds) {
+ yield TelemetryStorage.removePendingPing(pingId);
+ }
+});
+
+/**
+ * Fakes the pending pings storage quota.
+ * @param {Integer} aPendingQuota The new quota, in bytes.
+ */
+function fakePendingPingsQuota(aPendingQuota) {
+ let storage = Cu.import("resource://gre/modules/TelemetryStorage.jsm");
+ storage.Policy.getPendingPingsQuota = () => aPendingQuota;
+}
+
+/**
+ * Returns a handle for the file that a ping should be
+ * stored in locally.
+ *
+ * @returns path
+ */
+function getSavePathForPingId(aPingId) {
+ return Path.join(Constants.Path.profileDir, PING_SAVE_FOLDER, aPingId);
+}
+
+/**
+ * Check if the number of Telemetry pings received by the HttpServer is not equal
+ * to aExpectedNum.
+ *
+ * @param aExpectedNum the number of pings we expect to receive.
+ */
+function assertReceivedPings(aExpectedNum) {
+ do_check_eq(gSeenPings, aExpectedNum);
+}
+
+/**
+ * Throws if any pings with the id in aPingIds is saved locally.
+ *
+ * @param aPingIds an Array of pings ids to check.
+ * @returns Promise
+ */
+var assertNotSaved = Task.async(function* (aPingIds) {
+ let saved = 0;
+ for (let id of aPingIds) {
+ let filePath = getSavePathForPingId(id);
+ if (yield File.exists(filePath)) {
+ saved++;
+ }
+ }
+ if (saved > 0) {
+ do_throw("Found " + saved + " unexpected saved pings.");
+ }
+});
+
+/**
+ * Our handler function for the HttpServer that simply
+ * increments the gSeenPings global when it successfully
+ * receives and decodes a Telemetry payload.
+ *
+ * @param aRequest the HTTP request sent from HttpServer.
+ */
+function pingHandler(aRequest) {
+ gSeenPings++;
+}
+
+add_task(function* test_setup() {
+ PingServer.start();
+ PingServer.registerPingHandler(pingHandler);
+ do_get_profile();
+ loadAddonManager("xpcshell@tests.mozilla.org", "XPCShell", "1", "1.9.2");
+ // Make sure we don't generate unexpected pings due to pref changes.
+ yield setEmptyPrefWatchlist();
+
+ Services.prefs.setBoolPref(PREF_TELEMETRY_ENABLED, true);
+ Services.prefs.setCharPref(TelemetryController.Constants.PREF_SERVER,
+ "http://localhost:" + PingServer.port);
+});
+
+/**
+ * Setup the tests by making sure the ping storage directory is available, otherwise
+ * |TelemetryController.testSaveDirectoryToFile| could fail.
+ */
+add_task(function* setupEnvironment() {
+ // The following tests assume this pref to be true by default.
+ Services.prefs.setBoolPref(PREF_FHR_UPLOAD, true);
+
+ yield TelemetryController.testSetup();
+
+ let directory = TelemetryStorage.pingDirectoryPath;
+ yield File.makeDir(directory, { ignoreExisting: true, unixMode: OS.Constants.S_IRWXU });
+
+ yield TelemetryStorage.testClearPendingPings();
+});
+
+/**
+ * Test that really recent pings are sent on Telemetry initialization.
+ */
+add_task(function* test_recent_pings_sent() {
+ let pingTypes = [{ num: RECENT_PINGS }];
+ yield createSavedPings(pingTypes);
+
+ yield TelemetryController.testReset();
+ yield TelemetrySend.testWaitOnOutgoingPings();
+ assertReceivedPings(RECENT_PINGS);
+
+ yield TelemetryStorage.testClearPendingPings();
+});
+
+/**
+ * Create an overdue ping in the old format and try to send it.
+ */
+add_task(function* test_overdue_old_format() {
+ // A test ping in the old, standard format.
+ const PING_OLD_FORMAT = {
+ slug: "1234567abcd",
+ reason: "test-ping",
+ payload: {
+ info: {
+ reason: "test-ping",
+ OS: "XPCShell",
+ appID: "SomeId",
+ appVersion: "1.0",
+ appName: "XPCShell",
+ appBuildID: "123456789",
+ appUpdateChannel: "Test",
+ platformBuildID: "987654321",
+ },
+ },
+ };
+
+ // A ping with no info section, but with a slug.
+ const PING_NO_INFO = {
+ slug: "1234-no-info-ping",
+ reason: "test-ping",
+ payload: {}
+ };
+
+ // A ping with no payload.
+ const PING_NO_PAYLOAD = {
+ slug: "5678-no-payload",
+ reason: "test-ping",
+ };
+
+ // A ping with no info and no slug.
+ const PING_NO_SLUG = {
+ reason: "test-ping",
+ payload: {}
+ };
+
+ const PING_FILES_PATHS = [
+ getSavePathForPingId(PING_OLD_FORMAT.slug),
+ getSavePathForPingId(PING_NO_INFO.slug),
+ getSavePathForPingId(PING_NO_PAYLOAD.slug),
+ getSavePathForPingId("no-slug-file"),
+ ];
+
+ // Write the ping to file and make it overdue.
+ yield TelemetryStorage.savePing(PING_OLD_FORMAT, true);
+ yield TelemetryStorage.savePing(PING_NO_INFO, true);
+ yield TelemetryStorage.savePing(PING_NO_PAYLOAD, true);
+ yield TelemetryStorage.savePingToFile(PING_NO_SLUG, PING_FILES_PATHS[3], true);
+
+ for (let f in PING_FILES_PATHS) {
+ yield File.setDates(PING_FILES_PATHS[f], null, Date.now() - OVERDUE_PING_FILE_AGE);
+ }
+
+ gSeenPings = 0;
+ yield TelemetryController.testReset();
+ yield TelemetrySend.testWaitOnOutgoingPings();
+ assertReceivedPings(OLD_FORMAT_PINGS);
+
+ // |TelemetryStorage.cleanup| doesn't know how to remove a ping with no slug or id,
+ // so remove it manually so that the next test doesn't fail.
+ yield OS.File.remove(PING_FILES_PATHS[3]);
+
+ yield TelemetryStorage.testClearPendingPings();
+});
+
+add_task(function* test_corrupted_pending_pings() {
+ const TEST_TYPE = "test_corrupted";
+
+ Telemetry.getHistogramById("TELEMETRY_PENDING_LOAD_FAILURE_READ").clear();
+ Telemetry.getHistogramById("TELEMETRY_PENDING_LOAD_FAILURE_PARSE").clear();
+
+ // Save a pending ping and get its id.
+ let pendingPingId = yield TelemetryController.addPendingPing(TEST_TYPE, {}, {});
+
+ // Try to load it: there should be no error.
+ yield TelemetryStorage.loadPendingPing(pendingPingId);
+
+ let h = Telemetry.getHistogramById("TELEMETRY_PENDING_LOAD_FAILURE_READ").snapshot();
+ Assert.equal(h.sum, 0, "Telemetry must not report a pending ping load failure");
+ h = Telemetry.getHistogramById("TELEMETRY_PENDING_LOAD_FAILURE_PARSE").snapshot();
+ Assert.equal(h.sum, 0, "Telemetry must not report a pending ping parse failure");
+
+ // Delete it from the disk, so that its id will be kept in the cache but it will
+ // fail loading the file.
+ yield OS.File.remove(getSavePathForPingId(pendingPingId));
+
+ // Try to load a pending ping which isn't there anymore.
+ yield Assert.rejects(TelemetryStorage.loadPendingPing(pendingPingId),
+ "Telemetry must fail loading a ping which isn't there");
+
+ h = Telemetry.getHistogramById("TELEMETRY_PENDING_LOAD_FAILURE_READ").snapshot();
+ Assert.equal(h.sum, 1, "Telemetry must report a pending ping load failure");
+ h = Telemetry.getHistogramById("TELEMETRY_PENDING_LOAD_FAILURE_PARSE").snapshot();
+ Assert.equal(h.sum, 0, "Telemetry must not report a pending ping parse failure");
+
+ // Save a new ping, so that it gets in the pending pings cache.
+ pendingPingId = yield TelemetryController.addPendingPing(TEST_TYPE, {}, {});
+ // Overwrite it with a corrupted JSON file and then try to load it.
+ const INVALID_JSON = "{ invalid,JSON { {1}";
+ yield OS.File.writeAtomic(getSavePathForPingId(pendingPingId), INVALID_JSON, { encoding: "utf-8" });
+
+ // Try to load the ping with the corrupted JSON content.
+ yield Assert.rejects(TelemetryStorage.loadPendingPing(pendingPingId),
+ "Telemetry must fail loading a corrupted ping");
+
+ h = Telemetry.getHistogramById("TELEMETRY_PENDING_LOAD_FAILURE_READ").snapshot();
+ Assert.equal(h.sum, 1, "Telemetry must report a pending ping load failure");
+ h = Telemetry.getHistogramById("TELEMETRY_PENDING_LOAD_FAILURE_PARSE").snapshot();
+ Assert.equal(h.sum, 1, "Telemetry must report a pending ping parse failure");
+
+ let exists = yield OS.File.exists(getSavePathForPingId(pendingPingId));
+ Assert.ok(!exists, "The unparseable ping should have been removed");
+
+ yield TelemetryStorage.testClearPendingPings();
+});
+
+/**
+ * Create some recent and overdue pings and verify that they get sent.
+ */
+add_task(function* test_overdue_pings_trigger_send() {
+ let pingTypes = [
+ { num: RECENT_PINGS },
+ { num: OVERDUE_PINGS, age: OVERDUE_PING_FILE_AGE },
+ ];
+ let pings = yield createSavedPings(pingTypes);
+ let recentPings = pings.slice(0, RECENT_PINGS);
+ let overduePings = pings.slice(-OVERDUE_PINGS);
+
+ yield TelemetryController.testReset();
+ yield TelemetrySend.testWaitOnOutgoingPings();
+ assertReceivedPings(TOTAL_EXPECTED_PINGS);
+
+ yield assertNotSaved(recentPings);
+ yield assertNotSaved(overduePings);
+
+ Assert.equal(TelemetrySend.overduePingsCount, overduePings.length,
+ "Should have tracked the correct amount of overdue pings");
+
+ yield TelemetryStorage.testClearPendingPings();
+});
+
+/**
+ * Create a ping in the old format, send it, and make sure the request URL contains
+ * the correct version query parameter.
+ */
+add_task(function* test_overdue_old_format() {
+ // A test ping in the old, standard format.
+ const PING_OLD_FORMAT = {
+ slug: "1234567abcd",
+ reason: "test-ping",
+ payload: {
+ info: {
+ reason: "test-ping",
+ OS: "XPCShell",
+ appID: "SomeId",
+ appVersion: "1.0",
+ appName: "XPCShell",
+ appBuildID: "123456789",
+ appUpdateChannel: "Test",
+ platformBuildID: "987654321",
+ },
+ },
+ };
+
+ const filePath =
+ Path.join(Constants.Path.profileDir, PING_SAVE_FOLDER, PING_OLD_FORMAT.slug);
+
+ // Write the ping to file and make it overdue.
+ yield TelemetryStorage.savePing(PING_OLD_FORMAT, true);
+ yield File.setDates(filePath, null, Date.now() - OVERDUE_PING_FILE_AGE);
+
+ let receivedPings = 0;
+ // Register a new prefix handler to validate the URL.
+ PingServer.registerPingHandler(request => {
+ // Check that we have a version query parameter in the URL.
+ Assert.notEqual(request.queryString, "");
+
+ // Make sure the version in the query string matches the old ping format version.
+ let params = request.queryString.split("&");
+ Assert.ok(params.find(p => p == "v=1"));
+
+ receivedPings++;
+ });
+
+ yield TelemetryController.testReset();
+ yield TelemetrySend.testWaitOnOutgoingPings();
+ Assert.equal(receivedPings, 1, "We must receive a ping in the old format.");
+
+ yield TelemetryStorage.testClearPendingPings();
+ PingServer.resetPingHandler();
+});
+
+add_task(function* test_pendingPingsQuota() {
+ const PING_TYPE = "foo";
+
+ // Disable upload so pings don't get sent and removed from the pending pings directory.
+ Services.prefs.setBoolPref(PREF_FHR_UPLOAD, false);
+
+ // Remove all the pending pings then startup and wait for the cleanup task to complete.
+ // There should be nothing to remove.
+ yield TelemetryStorage.testClearPendingPings();
+ yield TelemetryController.testReset();
+ yield TelemetrySend.testWaitOnOutgoingPings();
+ yield TelemetryStorage.testPendingQuotaTaskPromise();
+
+ // Remove the pending deletion ping generated when flipping FHR upload off.
+ yield TelemetryStorage.testClearPendingPings();
+
+ let expectedPrunedPings = [];
+ let expectedNotPrunedPings = [];
+
+ let checkPendingPings = Task.async(function*() {
+ // Check that the pruned pings are not on disk anymore.
+ for (let prunedPingId of expectedPrunedPings) {
+ yield Assert.rejects(TelemetryStorage.loadPendingPing(prunedPingId),
+ "Ping " + prunedPingId + " should have been pruned.");
+ const pingPath = getSavePathForPingId(prunedPingId);
+ Assert.ok(!(yield OS.File.exists(pingPath)), "The ping should not be on the disk anymore.");
+ }
+
+ // Check that the expected pings are there.
+ for (let expectedPingId of expectedNotPrunedPings) {
+ Assert.ok((yield TelemetryStorage.loadPendingPing(expectedPingId)),
+ "Ping" + expectedPingId + " should be among the pending pings.");
+ }
+ });
+
+ let pendingPingsInfo = [];
+ let pingsSizeInBytes = 0;
+
+ // Create 10 pings to test the pending pings quota.
+ for (let days = 1; days < 11; days++) {
+ const date = fakeNow(2010, 1, days, 1, 1, 0);
+ const pingId = yield TelemetryController.addPendingPing(PING_TYPE, {}, {});
+
+ // Find the size of the ping.
+ const pingFilePath = getSavePathForPingId(pingId);
+ const pingSize = (yield OS.File.stat(pingFilePath)).size;
+ // Add the info at the beginning of the array, so that most recent pings come first.
+ pendingPingsInfo.unshift({id: pingId, size: pingSize, timestamp: date.getTime() });
+
+ // Set the last modification date.
+ yield OS.File.setDates(pingFilePath, null, date.getTime());
+
+ // Add it to the pending ping directory size.
+ pingsSizeInBytes += pingSize;
+ }
+
+ // We need to test the pending pings size before we hit the quota, otherwise a special
+ // value is recorded.
+ Telemetry.getHistogramById("TELEMETRY_PENDING_PINGS_SIZE_MB").clear();
+ Telemetry.getHistogramById("TELEMETRY_PENDING_PINGS_EVICTED_OVER_QUOTA").clear();
+ Telemetry.getHistogramById("TELEMETRY_PENDING_EVICTING_OVER_QUOTA_MS").clear();
+
+ yield TelemetryController.testReset();
+ yield TelemetryStorage.testPendingQuotaTaskPromise();
+
+ // Check that the correct values for quota probes are reported when no quota is hit.
+ let h = Telemetry.getHistogramById("TELEMETRY_PENDING_PINGS_SIZE_MB").snapshot();
+ Assert.equal(h.sum, Math.round(pingsSizeInBytes / 1024 / 1024),
+ "Telemetry must report the correct pending pings directory size.");
+ h = Telemetry.getHistogramById("TELEMETRY_PENDING_PINGS_EVICTED_OVER_QUOTA").snapshot();
+ Assert.equal(h.sum, 0, "Telemetry must report 0 evictions if quota is not hit.");
+ h = Telemetry.getHistogramById("TELEMETRY_PENDING_EVICTING_OVER_QUOTA_MS").snapshot();
+ Assert.equal(h.sum, 0, "Telemetry must report a null elapsed time if quota is not hit.");
+
+ // Set the quota to 80% of the space.
+ const testQuotaInBytes = pingsSizeInBytes * 0.8;
+ fakePendingPingsQuota(testQuotaInBytes);
+
+ // The storage prunes pending pings until we reach 90% of the requested storage quota.
+ // Based on that, find how many pings should be kept.
+ const safeQuotaSize = Math.round(testQuotaInBytes * 0.9);
+ let sizeInBytes = 0;
+ let pingsWithinQuota = [];
+ let pingsOutsideQuota = [];
+
+ for (let pingInfo of pendingPingsInfo) {
+ sizeInBytes += pingInfo.size;
+ if (sizeInBytes >= safeQuotaSize) {
+ pingsOutsideQuota.push(pingInfo.id);
+ continue;
+ }
+ pingsWithinQuota.push(pingInfo.id);
+ }
+
+ expectedNotPrunedPings = pingsWithinQuota;
+ expectedPrunedPings = pingsOutsideQuota;
+
+ // Reset TelemetryController to start the pending pings cleanup.
+ yield TelemetryController.testReset();
+ yield TelemetryStorage.testPendingQuotaTaskPromise();
+ yield checkPendingPings();
+
+ h = Telemetry.getHistogramById("TELEMETRY_PENDING_PINGS_EVICTED_OVER_QUOTA").snapshot();
+ Assert.equal(h.sum, pingsOutsideQuota.length,
+ "Telemetry must correctly report the over quota pings evicted from the pending pings directory.");
+ h = Telemetry.getHistogramById("TELEMETRY_PENDING_PINGS_SIZE_MB").snapshot();
+ Assert.equal(h.sum, 17, "Pending pings quota was hit, a special size must be reported.");
+
+ // Trigger a cleanup again and make sure we're not removing anything.
+ yield TelemetryController.testReset();
+ yield TelemetryStorage.testPendingQuotaTaskPromise();
+ yield checkPendingPings();
+
+ const OVERSIZED_PING_ID = "9b21ec8f-f762-4d28-a2c1-44e1c4694f24";
+ // Create a pending oversized ping.
+ const OVERSIZED_PING = {
+ id: OVERSIZED_PING_ID,
+ type: PING_TYPE,
+ creationDate: (new Date()).toISOString(),
+ // Generate a 2MB string to use as the ping payload.
+ payload: generateRandomString(2 * 1024 * 1024),
+ };
+ yield TelemetryStorage.savePendingPing(OVERSIZED_PING);
+
+ // Reset the histograms.
+ Telemetry.getHistogramById("TELEMETRY_PING_SIZE_EXCEEDED_PENDING").clear();
+ Telemetry.getHistogramById("TELEMETRY_DISCARDED_PENDING_PINGS_SIZE_MB").clear();
+
+ // Try to manually load the oversized ping.
+ yield Assert.rejects(TelemetryStorage.loadPendingPing(OVERSIZED_PING_ID),
+ "The oversized ping should have been pruned.");
+ Assert.ok(!(yield OS.File.exists(getSavePathForPingId(OVERSIZED_PING_ID))),
+ "The ping should not be on the disk anymore.");
+
+ // Make sure we're correctly updating the related histograms.
+ h = Telemetry.getHistogramById("TELEMETRY_PING_SIZE_EXCEEDED_PENDING").snapshot();
+ Assert.equal(h.sum, 1, "Telemetry must report 1 oversized ping in the pending pings directory.");
+ h = Telemetry.getHistogramById("TELEMETRY_DISCARDED_PENDING_PINGS_SIZE_MB").snapshot();
+ Assert.equal(h.counts[2], 1, "Telemetry must report a 2MB, oversized, ping.");
+
+ // Save the ping again to check if it gets pruned when scanning the pings directory.
+ yield TelemetryStorage.savePendingPing(OVERSIZED_PING);
+ expectedPrunedPings.push(OVERSIZED_PING_ID);
+
+ // Scan the pending pings directory.
+ yield TelemetryController.testReset();
+ yield TelemetryStorage.testPendingQuotaTaskPromise();
+ yield checkPendingPings();
+
+ // Make sure we're correctly updating the related histograms.
+ h = Telemetry.getHistogramById("TELEMETRY_PING_SIZE_EXCEEDED_PENDING").snapshot();
+ Assert.equal(h.sum, 2, "Telemetry must report 1 oversized ping in the pending pings directory.");
+ h = Telemetry.getHistogramById("TELEMETRY_DISCARDED_PENDING_PINGS_SIZE_MB").snapshot();
+ Assert.equal(h.counts[2], 2, "Telemetry must report two 2MB, oversized, pings.");
+
+ Services.prefs.setBoolPref(PREF_FHR_UPLOAD, true);
+});
+
+add_task(function* teardown() {
+ yield PingServer.stop();
+});
diff --git a/toolkit/components/telemetry/tests/unit/test_TelemetrySession.js b/toolkit/components/telemetry/tests/unit/test_TelemetrySession.js
new file mode 100644
index 000000000..698133162
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_TelemetrySession.js
@@ -0,0 +1,2029 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/
+*/
+/* This testcase triggers two telemetry pings.
+ *
+ * Telemetry code keeps histograms of past telemetry pings. The first
+ * ping populates these histograms. One of those histograms is then
+ * checked in the second request.
+ */
+
+Cu.import("resource://services-common/utils.js");
+Cu.import("resource://gre/modules/ClientID.jsm");
+Cu.import("resource://gre/modules/Services.jsm");
+Cu.import("resource://gre/modules/LightweightThemeManager.jsm", this);
+Cu.import("resource://gre/modules/XPCOMUtils.jsm", this);
+Cu.import("resource://gre/modules/TelemetryController.jsm", this);
+Cu.import("resource://gre/modules/TelemetrySession.jsm", this);
+Cu.import("resource://gre/modules/TelemetryStorage.jsm", this);
+Cu.import("resource://gre/modules/TelemetryEnvironment.jsm", this);
+Cu.import("resource://gre/modules/TelemetrySend.jsm", this);
+Cu.import("resource://gre/modules/Task.jsm", this);
+Cu.import("resource://gre/modules/Promise.jsm", this);
+Cu.import("resource://gre/modules/Preferences.jsm");
+Cu.import("resource://gre/modules/osfile.jsm", this);
+
+const PING_FORMAT_VERSION = 4;
+const PING_TYPE_MAIN = "main";
+const PING_TYPE_SAVED_SESSION = "saved-session";
+
+const REASON_ABORTED_SESSION = "aborted-session";
+const REASON_SAVED_SESSION = "saved-session";
+const REASON_SHUTDOWN = "shutdown";
+const REASON_TEST_PING = "test-ping";
+const REASON_DAILY = "daily";
+const REASON_ENVIRONMENT_CHANGE = "environment-change";
+
+const PLATFORM_VERSION = "1.9.2";
+const APP_VERSION = "1";
+const APP_ID = "xpcshell@tests.mozilla.org";
+const APP_NAME = "XPCShell";
+
+const IGNORE_HISTOGRAM_TO_CLONE = "MEMORY_HEAP_ALLOCATED";
+const IGNORE_CLONED_HISTOGRAM = "test::ignore_me_also";
+const ADDON_NAME = "Telemetry test addon";
+const ADDON_HISTOGRAM = "addon-histogram";
+// Add some unicode characters here to ensure that sending them works correctly.
+const SHUTDOWN_TIME = 10000;
+const FAILED_PROFILE_LOCK_ATTEMPTS = 2;
+
+// Constants from prio.h for nsIFileOutputStream.init
+const PR_WRONLY = 0x2;
+const PR_CREATE_FILE = 0x8;
+const PR_TRUNCATE = 0x20;
+const RW_OWNER = parseInt("0600", 8);
+
+const NUMBER_OF_THREADS_TO_LAUNCH = 30;
+var gNumberOfThreadsLaunched = 0;
+
+const MS_IN_ONE_HOUR = 60 * 60 * 1000;
+const MS_IN_ONE_DAY = 24 * MS_IN_ONE_HOUR;
+
+const PREF_BRANCH = "toolkit.telemetry.";
+const PREF_SERVER = PREF_BRANCH + "server";
+const PREF_FHR_UPLOAD_ENABLED = "datareporting.healthreport.uploadEnabled";
+
+const DATAREPORTING_DIR = "datareporting";
+const ABORTED_PING_FILE_NAME = "aborted-session-ping";
+const ABORTED_SESSION_UPDATE_INTERVAL_MS = 5 * 60 * 1000;
+
+XPCOMUtils.defineLazyGetter(this, "DATAREPORTING_PATH", function() {
+ return OS.Path.join(OS.Constants.Path.profileDir, DATAREPORTING_DIR);
+});
+
+var gClientID = null;
+var gMonotonicNow = 0;
+
+function generateUUID() {
+ let str = Cc["@mozilla.org/uuid-generator;1"].getService(Ci.nsIUUIDGenerator).generateUUID().toString();
+ // strip {}
+ return str.substring(1, str.length - 1);
+}
+
+function truncateDateToDays(date) {
+ return new Date(date.getFullYear(),
+ date.getMonth(),
+ date.getDate(),
+ 0, 0, 0, 0);
+}
+
+function sendPing() {
+ TelemetrySession.gatherStartup();
+ if (PingServer.started) {
+ TelemetrySend.setServer("http://localhost:" + PingServer.port);
+ return TelemetrySession.testPing();
+ }
+ TelemetrySend.setServer("http://doesnotexist");
+ return TelemetrySession.testPing();
+}
+
+function fakeGenerateUUID(sessionFunc, subsessionFunc) {
+ let session = Cu.import("resource://gre/modules/TelemetrySession.jsm");
+ session.Policy.generateSessionUUID = sessionFunc;
+ session.Policy.generateSubsessionUUID = subsessionFunc;
+}
+
+function fakeIdleNotification(topic) {
+ let session = Cu.import("resource://gre/modules/TelemetrySession.jsm");
+ return session.TelemetryScheduler.observe(null, topic, null);
+}
+
+function setupTestData() {
+
+ Services.startup.interrupted = true;
+ Telemetry.registerAddonHistogram(ADDON_NAME, ADDON_HISTOGRAM,
+ Telemetry.HISTOGRAM_LINEAR,
+ 1, 5, 6);
+ let h1 = Telemetry.getAddonHistogram(ADDON_NAME, ADDON_HISTOGRAM);
+ h1.add(1);
+ let h2 = Telemetry.getHistogramById("TELEMETRY_TEST_COUNT");
+ h2.add();
+
+ let k1 = Telemetry.getKeyedHistogramById("TELEMETRY_TEST_KEYED_COUNT");
+ k1.add("a");
+ k1.add("a");
+ k1.add("b");
+}
+
+function getSavedPingFile(basename) {
+ let tmpDir = Services.dirsvc.get("ProfD", Ci.nsIFile);
+ let pingFile = tmpDir.clone();
+ pingFile.append(basename);
+ if (pingFile.exists()) {
+ pingFile.remove(true);
+ }
+ do_register_cleanup(function () {
+ try {
+ pingFile.remove(true);
+ } catch (e) {
+ }
+ });
+ return pingFile;
+}
+
+function checkPingFormat(aPing, aType, aHasClientId, aHasEnvironment) {
+ const MANDATORY_PING_FIELDS = [
+ "type", "id", "creationDate", "version", "application", "payload"
+ ];
+
+ const APPLICATION_TEST_DATA = {
+ buildId: gAppInfo.appBuildID,
+ name: APP_NAME,
+ version: APP_VERSION,
+ vendor: "Mozilla",
+ platformVersion: PLATFORM_VERSION,
+ xpcomAbi: "noarch-spidermonkey",
+ };
+
+ // Check that the ping contains all the mandatory fields.
+ for (let f of MANDATORY_PING_FIELDS) {
+ Assert.ok(f in aPing, f + "must be available.");
+ }
+
+ Assert.equal(aPing.type, aType, "The ping must have the correct type.");
+ Assert.equal(aPing.version, PING_FORMAT_VERSION, "The ping must have the correct version.");
+
+ // Test the application section.
+ for (let f in APPLICATION_TEST_DATA) {
+ Assert.equal(aPing.application[f], APPLICATION_TEST_DATA[f],
+ f + " must have the correct value.");
+ }
+
+ // We can't check the values for channel and architecture. Just make
+ // sure they are in.
+ Assert.ok("architecture" in aPing.application,
+ "The application section must have an architecture field.");
+ Assert.ok("channel" in aPing.application,
+ "The application section must have a channel field.");
+
+ // Check the clientId and environment fields, as needed.
+ Assert.equal("clientId" in aPing, aHasClientId);
+ Assert.equal("environment" in aPing, aHasEnvironment);
+}
+
+function checkPayloadInfo(data) {
+ const ALLOWED_REASONS = [
+ "environment-change", "shutdown", "daily", "saved-session", "test-ping"
+ ];
+ let numberCheck = arg => { return (typeof arg == "number"); };
+ let positiveNumberCheck = arg => { return numberCheck(arg) && (arg >= 0); };
+ let stringCheck = arg => { return (typeof arg == "string") && (arg != ""); };
+ let revisionCheck = arg => {
+ return (Services.appinfo.isOfficial) ? stringCheck(arg) : (typeof arg == "string");
+ };
+ let uuidCheck = arg => {
+ return UUID_REGEX.test(arg);
+ };
+ let isoDateCheck = arg => {
+ // We expect use of this version of the ISO format:
+ // 2015-04-12T18:51:19.1+00:00
+ const isoDateRegEx = /^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d+[+-]\d{2}:\d{2}$/;
+ return stringCheck(arg) && !Number.isNaN(Date.parse(arg)) &&
+ isoDateRegEx.test(arg);
+ };
+
+ const EXPECTED_INFO_FIELDS_TYPES = {
+ reason: stringCheck,
+ revision: revisionCheck,
+ timezoneOffset: numberCheck,
+ sessionId: uuidCheck,
+ subsessionId: uuidCheck,
+ // Special cases: previousSessionId and previousSubsessionId are null on first run.
+ previousSessionId: (arg) => { return (arg) ? uuidCheck(arg) : true; },
+ previousSubsessionId: (arg) => { return (arg) ? uuidCheck(arg) : true; },
+ subsessionCounter: positiveNumberCheck,
+ profileSubsessionCounter: positiveNumberCheck,
+ sessionStartDate: isoDateCheck,
+ subsessionStartDate: isoDateCheck,
+ subsessionLength: positiveNumberCheck,
+ };
+
+ for (let f in EXPECTED_INFO_FIELDS_TYPES) {
+ Assert.ok(f in data, f + " must be available.");
+
+ let checkFunc = EXPECTED_INFO_FIELDS_TYPES[f];
+ Assert.ok(checkFunc(data[f]),
+ f + " must have the correct type and valid data " + data[f]);
+ }
+
+ // Previous buildId is not mandatory.
+ if (data.previousBuildId) {
+ Assert.ok(stringCheck(data.previousBuildId));
+ }
+
+ Assert.ok(ALLOWED_REASONS.find(r => r == data.reason),
+ "Payload must contain an allowed reason.");
+
+ Assert.ok(Date.parse(data.subsessionStartDate) >= Date.parse(data.sessionStartDate));
+ Assert.ok(data.profileSubsessionCounter >= data.subsessionCounter);
+ Assert.ok(data.timezoneOffset >= -12*60, "The timezone must be in a valid range.");
+ Assert.ok(data.timezoneOffset <= 12*60, "The timezone must be in a valid range.");
+}
+
+function checkScalars(processes) {
+ // Check that the scalars section is available in the ping payload.
+ const parentProcess = processes.parent;
+ Assert.ok("scalars" in parentProcess, "The scalars section must be available in the parent process.");
+ Assert.ok("keyedScalars" in parentProcess, "The keyedScalars section must be available in the parent process.");
+ Assert.equal(typeof parentProcess.scalars, "object", "The scalars entry must be an object.");
+ Assert.equal(typeof parentProcess.keyedScalars, "object", "The keyedScalars entry must be an object.");
+
+ let checkScalar = function(scalar) {
+ // Check if the value is of a supported type.
+ const valueType = typeof(scalar);
+ switch (valueType) {
+ case "string":
+ Assert.ok(scalar.length <= 50,
+ "String values can't have more than 50 characters");
+ break;
+ case "number":
+ Assert.ok(scalar >= 0,
+ "We only support unsigned integer values in scalars.");
+ break;
+ case "boolean":
+ Assert.ok(true,
+ "Boolean scalar found.");
+ break;
+ default:
+ Assert.ok(false,
+ name + " contains an unsupported value type (" + valueType + ")");
+ }
+ }
+
+ // Check that we have valid scalar entries.
+ const scalars = parentProcess.scalars;
+ for (let name in scalars) {
+ Assert.equal(typeof name, "string", "Scalar names must be strings.");
+ checkScalar(scalar[name]);
+ }
+
+ // Check that we have valid keyed scalar entries.
+ const keyedScalars = parentProcess.keyedScalars;
+ for (let name in keyedScalars) {
+ Assert.equal(typeof name, "string", "Scalar names must be strings.");
+ Assert.ok(Object.keys(keyedScalars[name]).length,
+ "The reported keyed scalars must contain at least 1 key.");
+ for (let key in keyedScalars[name]) {
+ Assert.equal(typeof key, "string", "Keyed scalar keys must be strings.");
+ Assert.ok(key.length <= 70, "Keyed scalar keys can't have more than 70 characters.");
+ checkScalar(scalar[name][key]);
+ }
+ }
+}
+
+function checkEvents(processes) {
+ // Check that the events section is available in the ping payload.
+ const parent = processes.parent;
+ Assert.ok("events" in parent, "The events section must be available in the parent process.");
+
+ // Check that the events section has the right format.
+ Assert.ok(Array.isArray(parent.events), "The events entry must be an array.");
+ for (let [ts, category, method, object, value, extra] of parent.events) {
+ Assert.equal(typeof(ts), "number", "Timestamp field should be a number.");
+ Assert.greaterOrEqual(ts, 0, "Timestamp should be >= 0.");
+
+ Assert.equal(typeof(category), "string", "Category should have the right type.");
+ Assert.lessOrEqual(category.length, 100, "Category should have the right string length.");
+
+ Assert.equal(typeof(method), "string", "Method should have the right type.");
+ Assert.lessOrEqual(method.length, 40, "Method should have the right string length.");
+
+ Assert.equal(typeof(object), "string", "Object should have the right type.");
+ Assert.lessOrEqual(object.length, 40, "Object should have the right string length.");
+
+ Assert.ok(value === null || typeof(value) === "string",
+ "Value should be null or a string.");
+ if (value) {
+ Assert.lessOrEqual(value.length, 100, "Value should have the right string length.");
+ }
+
+ Assert.ok(extra === null || typeof(extra) === "object",
+ "Extra should be null or an object.");
+ if (extra) {
+ let keys = Object.keys(extra);
+ let keyTypes = keys.map(k => typeof(k));
+ Assert.lessOrEqual(keys.length, 20, "Should not have too many extra keys.");
+ Assert.ok(keyTypes.every(t => t === "string"),
+ "All extra keys should be strings.");
+ Assert.ok(keys.every(k => k.length <= 20),
+ "All extra keys should have the right string length.");
+
+ let values = Object.values(extra);
+ let valueTypes = values.map(v => typeof(v));
+ Assert.ok(valueTypes.every(t => t === "string"),
+ "All extra values should be strings.");
+ Assert.ok(values.every(v => v.length <= 100),
+ "All extra values should have the right string length.");
+ }
+ }
+}
+
+function checkPayload(payload, reason, successfulPings, savedPings) {
+ Assert.ok("info" in payload, "Payload must contain an info section.");
+ checkPayloadInfo(payload.info);
+
+ Assert.ok(payload.simpleMeasurements.totalTime >= 0);
+ Assert.ok(payload.simpleMeasurements.uptime >= 0);
+ Assert.equal(payload.simpleMeasurements.startupInterrupted, 1);
+ Assert.equal(payload.simpleMeasurements.shutdownDuration, SHUTDOWN_TIME);
+ Assert.equal(payload.simpleMeasurements.savedPings, savedPings);
+ Assert.ok("maximalNumberOfConcurrentThreads" in payload.simpleMeasurements);
+ Assert.ok(payload.simpleMeasurements.maximalNumberOfConcurrentThreads >= gNumberOfThreadsLaunched);
+
+ let activeTicks = payload.simpleMeasurements.activeTicks;
+ Assert.ok(activeTicks >= 0);
+
+ Assert.equal(payload.simpleMeasurements.failedProfileLockCount,
+ FAILED_PROFILE_LOCK_ATTEMPTS);
+ let profileDirectory = Services.dirsvc.get("ProfD", Ci.nsIFile);
+ let failedProfileLocksFile = profileDirectory.clone();
+ failedProfileLocksFile.append("Telemetry.FailedProfileLocks.txt");
+ Assert.ok(!failedProfileLocksFile.exists());
+
+
+ let isWindows = ("@mozilla.org/windows-registry-key;1" in Components.classes);
+ if (isWindows) {
+ Assert.ok(payload.simpleMeasurements.startupSessionRestoreReadBytes > 0);
+ Assert.ok(payload.simpleMeasurements.startupSessionRestoreWriteBytes > 0);
+ }
+
+ const TELEMETRY_SEND_SUCCESS = "TELEMETRY_SEND_SUCCESS";
+ const TELEMETRY_SUCCESS = "TELEMETRY_SUCCESS";
+ const TELEMETRY_TEST_FLAG = "TELEMETRY_TEST_FLAG";
+ const TELEMETRY_TEST_COUNT = "TELEMETRY_TEST_COUNT";
+ const TELEMETRY_TEST_KEYED_FLAG = "TELEMETRY_TEST_KEYED_FLAG";
+ const TELEMETRY_TEST_KEYED_COUNT = "TELEMETRY_TEST_KEYED_COUNT";
+
+ if (successfulPings > 0) {
+ Assert.ok(TELEMETRY_SEND_SUCCESS in payload.histograms);
+ }
+ Assert.ok(TELEMETRY_TEST_FLAG in payload.histograms);
+ Assert.ok(TELEMETRY_TEST_COUNT in payload.histograms);
+
+ Assert.ok(!(IGNORE_CLONED_HISTOGRAM in payload.histograms));
+
+ // Flag histograms should automagically spring to life.
+ const expected_flag = {
+ range: [1, 2],
+ bucket_count: 3,
+ histogram_type: 3,
+ values: {0:1, 1:0},
+ sum: 0
+ };
+ let flag = payload.histograms[TELEMETRY_TEST_FLAG];
+ Assert.equal(uneval(flag), uneval(expected_flag));
+
+ // We should have a test count.
+ const expected_count = {
+ range: [1, 2],
+ bucket_count: 3,
+ histogram_type: 4,
+ values: {0:1, 1:0},
+ sum: 1,
+ };
+ let count = payload.histograms[TELEMETRY_TEST_COUNT];
+ Assert.equal(uneval(count), uneval(expected_count));
+
+ // There should be one successful report from the previous telemetry ping.
+ if (successfulPings > 0) {
+ const expected_tc = {
+ range: [1, 2],
+ bucket_count: 3,
+ histogram_type: 2,
+ values: {0:2, 1:successfulPings, 2:0},
+ sum: successfulPings
+ };
+ let tc = payload.histograms[TELEMETRY_SUCCESS];
+ Assert.equal(uneval(tc), uneval(expected_tc));
+ }
+
+ // The ping should include data from memory reporters. We can't check that
+ // this data is correct, because we can't control the values returned by the
+ // memory reporters. But we can at least check that the data is there.
+ //
+ // It's important to check for the presence of reporters with a mix of units,
+ // because TelemetryController has separate logic for each one. But we can't
+ // currently check UNITS_COUNT_CUMULATIVE or UNITS_PERCENTAGE because
+ // Telemetry doesn't touch a memory reporter with these units that's
+ // available on all platforms.
+
+ Assert.ok('MEMORY_JS_GC_HEAP' in payload.histograms); // UNITS_BYTES
+ Assert.ok('MEMORY_JS_COMPARTMENTS_SYSTEM' in payload.histograms); // UNITS_COUNT
+
+ // We should have included addon histograms.
+ Assert.ok("addonHistograms" in payload);
+ Assert.ok(ADDON_NAME in payload.addonHistograms);
+ Assert.ok(ADDON_HISTOGRAM in payload.addonHistograms[ADDON_NAME]);
+
+ Assert.ok(("mainThread" in payload.slowSQL) &&
+ ("otherThreads" in payload.slowSQL));
+
+ Assert.ok(("IceCandidatesStats" in payload.webrtc) &&
+ ("webrtc" in payload.webrtc.IceCandidatesStats));
+
+ // Check keyed histogram payload.
+
+ Assert.ok("keyedHistograms" in payload);
+ let keyedHistograms = payload.keyedHistograms;
+ Assert.ok(!(TELEMETRY_TEST_KEYED_FLAG in keyedHistograms));
+ Assert.ok(TELEMETRY_TEST_KEYED_COUNT in keyedHistograms);
+
+ const expected_keyed_count = {
+ "a": {
+ range: [1, 2],
+ bucket_count: 3,
+ histogram_type: 4,
+ values: {0:2, 1:0},
+ sum: 2,
+ },
+ "b": {
+ range: [1, 2],
+ bucket_count: 3,
+ histogram_type: 4,
+ values: {0:1, 1:0},
+ sum: 1,
+ },
+ };
+ Assert.deepEqual(expected_keyed_count, keyedHistograms[TELEMETRY_TEST_KEYED_COUNT]);
+
+ Assert.ok("processes" in payload, "The payload must have a processes section.");
+ Assert.ok("parent" in payload.processes, "There must be at least a parent process.");
+ checkScalars(payload.processes);
+ checkEvents(payload.processes);
+}
+
+function writeStringToFile(file, contents) {
+ let ostream = Cc["@mozilla.org/network/safe-file-output-stream;1"]
+ .createInstance(Ci.nsIFileOutputStream);
+ ostream.init(file, PR_WRONLY | PR_CREATE_FILE | PR_TRUNCATE,
+ RW_OWNER, ostream.DEFER_OPEN);
+ ostream.write(contents, contents.length);
+ ostream.QueryInterface(Ci.nsISafeOutputStream).finish();
+ ostream.close();
+}
+
+function write_fake_shutdown_file() {
+ let profileDirectory = Services.dirsvc.get("ProfD", Ci.nsIFile);
+ let file = profileDirectory.clone();
+ file.append("Telemetry.ShutdownTime.txt");
+ let contents = "" + SHUTDOWN_TIME;
+ writeStringToFile(file, contents);
+}
+
+function write_fake_failedprofilelocks_file() {
+ let profileDirectory = Services.dirsvc.get("ProfD", Ci.nsIFile);
+ let file = profileDirectory.clone();
+ file.append("Telemetry.FailedProfileLocks.txt");
+ let contents = "" + FAILED_PROFILE_LOCK_ATTEMPTS;
+ writeStringToFile(file, contents);
+}
+
+add_task(function* test_setup() {
+ // Addon manager needs a profile directory
+ do_get_profile();
+ loadAddonManager(APP_ID, APP_NAME, APP_VERSION, PLATFORM_VERSION);
+ // Make sure we don't generate unexpected pings due to pref changes.
+ yield setEmptyPrefWatchlist();
+
+ Services.prefs.setBoolPref(PREF_TELEMETRY_ENABLED, true);
+ Services.prefs.setBoolPref(PREF_FHR_UPLOAD_ENABLED, true);
+
+ // Make it look like we've previously failed to lock a profile a couple times.
+ write_fake_failedprofilelocks_file();
+
+ // Make it look like we've shutdown before.
+ write_fake_shutdown_file();
+
+ let currentMaxNumberOfThreads = Telemetry.maximalNumberOfConcurrentThreads;
+ do_check_true(currentMaxNumberOfThreads > 0);
+
+ // Try to augment the maximal number of threads currently launched
+ let threads = [];
+ try {
+ for (let i = 0; i < currentMaxNumberOfThreads + 10; ++i) {
+ threads.push(Services.tm.newThread(0));
+ }
+ } catch (ex) {
+ // If memory is too low, it is possible that not all threads will be launched.
+ }
+ gNumberOfThreadsLaunched = threads.length;
+
+ do_check_true(Telemetry.maximalNumberOfConcurrentThreads >= gNumberOfThreadsLaunched);
+
+ do_register_cleanup(function() {
+ threads.forEach(function(thread) {
+ thread.shutdown();
+ });
+ });
+
+ yield new Promise(resolve =>
+ Telemetry.asyncFetchTelemetryData(wrapWithExceptionHandler(resolve)));
+});
+
+add_task(function* asyncSetup() {
+ yield TelemetryController.testSetup();
+ // Load the client ID from the client ID provider to check for pings sanity.
+ gClientID = yield ClientID.getClientID();
+});
+
+// Ensures that expired histograms are not part of the payload.
+add_task(function* test_expiredHistogram() {
+
+ let dummy = Telemetry.getHistogramById("TELEMETRY_TEST_EXPIRED");
+
+ dummy.add(1);
+
+ do_check_eq(TelemetrySession.getPayload()["histograms"]["TELEMETRY_TEST_EXPIRED"], undefined);
+});
+
+// Sends a ping to a non existing server. If we remove this test, we won't get
+// all the histograms we need in the main ping.
+add_task(function* test_noServerPing() {
+ yield sendPing();
+ // We need two pings in order to make sure STARTUP_MEMORY_STORAGE_SQLIE histograms
+ // are initialised. See bug 1131585.
+ yield sendPing();
+ // Allowing Telemetry to persist unsent pings as pending. If omitted may cause
+ // problems to the consequent tests.
+ yield TelemetryController.testShutdown();
+});
+
+// Checks that a sent ping is correctly received by a dummy http server.
+add_task(function* test_simplePing() {
+ yield TelemetryStorage.testClearPendingPings();
+ PingServer.start();
+ Preferences.set(PREF_SERVER, "http://localhost:" + PingServer.port);
+
+ let now = new Date(2020, 1, 1, 12, 0, 0);
+ let expectedDate = new Date(2020, 1, 1, 0, 0, 0);
+ fakeNow(now);
+ gMonotonicNow = fakeMonotonicNow(gMonotonicNow + 5000);
+
+ const expectedSessionUUID = "bd314d15-95bf-4356-b682-b6c4a8942202";
+ const expectedSubsessionUUID = "3e2e5f6c-74ba-4e4d-a93f-a48af238a8c7";
+ fakeGenerateUUID(() => expectedSessionUUID, () => expectedSubsessionUUID);
+ yield TelemetryController.testReset();
+
+ // Session and subsession start dates are faked during TelemetrySession setup. We can
+ // now fake the session duration.
+ const SESSION_DURATION_IN_MINUTES = 15;
+ fakeNow(new Date(2020, 1, 1, 12, SESSION_DURATION_IN_MINUTES, 0));
+ gMonotonicNow = fakeMonotonicNow(gMonotonicNow + SESSION_DURATION_IN_MINUTES * 60 * 1000);
+
+ yield sendPing();
+ let ping = yield PingServer.promiseNextPing();
+
+ checkPingFormat(ping, PING_TYPE_MAIN, true, true);
+
+ // Check that we get the data we expect.
+ let payload = ping.payload;
+ Assert.equal(payload.info.sessionId, expectedSessionUUID);
+ Assert.equal(payload.info.subsessionId, expectedSubsessionUUID);
+ let sessionStartDate = new Date(payload.info.sessionStartDate);
+ Assert.equal(sessionStartDate.toISOString(), expectedDate.toISOString());
+ let subsessionStartDate = new Date(payload.info.subsessionStartDate);
+ Assert.equal(subsessionStartDate.toISOString(), expectedDate.toISOString());
+ Assert.equal(payload.info.subsessionLength, SESSION_DURATION_IN_MINUTES * 60);
+
+ // Restore the UUID generator so we don't mess with other tests.
+ fakeGenerateUUID(generateUUID, generateUUID);
+});
+
+// Saves the current session histograms, reloads them, performs a ping
+// and checks that the dummy http server received both the previously
+// saved ping and the new one.
+add_task(function* test_saveLoadPing() {
+ // Let's start out with a defined state.
+ yield TelemetryStorage.testClearPendingPings();
+ yield TelemetryController.testReset();
+ PingServer.clearRequests();
+
+ // Setup test data and trigger pings.
+ setupTestData();
+ yield TelemetrySession.testSavePendingPing();
+ yield sendPing();
+
+ // Get requests received by dummy server.
+ const requests = yield PingServer.promiseNextRequests(2);
+
+ for (let req of requests) {
+ Assert.equal(req.getHeader("content-type"), "application/json; charset=UTF-8",
+ "The request must have the correct content-type.");
+ }
+
+ // We decode both requests to check for the |reason|.
+ let pings = Array.from(requests, decodeRequestPayload);
+
+ // Check we have the correct two requests. Ordering is not guaranteed. The ping type
+ // is encoded in the URL.
+ if (pings[0].type != PING_TYPE_MAIN) {
+ pings.reverse();
+ }
+
+ checkPingFormat(pings[0], PING_TYPE_MAIN, true, true);
+ checkPayload(pings[0].payload, REASON_TEST_PING, 0, 1);
+ checkPingFormat(pings[1], PING_TYPE_SAVED_SESSION, true, true);
+ checkPayload(pings[1].payload, REASON_SAVED_SESSION, 0, 0);
+});
+
+add_task(function* test_checkSubsessionScalars() {
+ if (gIsAndroid) {
+ // We don't support subsessions yet on Android.
+ return;
+ }
+
+ // Clear the scalars.
+ Telemetry.clearScalars();
+ yield TelemetryController.testReset();
+
+ // Set some scalars.
+ const UINT_SCALAR = "telemetry.test.unsigned_int_kind";
+ const STRING_SCALAR = "telemetry.test.string_kind";
+ let expectedUint = 37;
+ let expectedString = "Test value. Yay.";
+ Telemetry.scalarSet(UINT_SCALAR, expectedUint);
+ Telemetry.scalarSet(STRING_SCALAR, expectedString);
+
+ // Check that scalars are not available in classic pings but are in subsession
+ // pings. Also clear the subsession.
+ let classic = TelemetrySession.getPayload();
+ let subsession = TelemetrySession.getPayload("environment-change", true);
+
+ const TEST_SCALARS = [ UINT_SCALAR, STRING_SCALAR ];
+ for (let name of TEST_SCALARS) {
+ // Scalar must be reported in subsession pings (e.g. main).
+ Assert.ok(name in subsession.processes.parent.scalars,
+ name + " must be reported in a subsession ping.");
+ }
+ // No scalar must be reported in classic pings (e.g. saved-session).
+ Assert.ok(Object.keys(classic.processes.parent.scalars).length == 0,
+ "Scalars must not be reported in a classic ping.");
+
+ // And make sure that we're getting the right values in the
+ // subsession ping.
+ Assert.equal(subsession.processes.parent.scalars[UINT_SCALAR], expectedUint,
+ UINT_SCALAR + " must contain the expected value.");
+ Assert.equal(subsession.processes.parent.scalars[STRING_SCALAR], expectedString,
+ STRING_SCALAR + " must contain the expected value.");
+
+ // Since we cleared the subsession in the last getPayload(), check that
+ // breaking subsessions clears the scalars.
+ subsession = TelemetrySession.getPayload("environment-change");
+ for (let name of TEST_SCALARS) {
+ Assert.ok(!(name in subsession.processes.parent.scalars),
+ name + " must be cleared with the new subsession.");
+ }
+
+ // Check if setting the scalars again works as expected.
+ expectedUint = 85;
+ expectedString = "A creative different value";
+ Telemetry.scalarSet(UINT_SCALAR, expectedUint);
+ Telemetry.scalarSet(STRING_SCALAR, expectedString);
+ subsession = TelemetrySession.getPayload("environment-change");
+ Assert.equal(subsession.processes.parent.scalars[UINT_SCALAR], expectedUint,
+ UINT_SCALAR + " must contain the expected value.");
+ Assert.equal(subsession.processes.parent.scalars[STRING_SCALAR], expectedString,
+ STRING_SCALAR + " must contain the expected value.");
+});
+
+add_task(function* test_checkSubsessionEvents() {
+ if (gIsAndroid) {
+ // We don't support subsessions yet on Android.
+ return;
+ }
+
+ // Clear the events.
+ Telemetry.clearEvents();
+ yield TelemetryController.testReset();
+
+ // Record some events.
+ let expected = [
+ ["telemetry.test", "test1", "object1", "a", null],
+ ["telemetry.test", "test1", "object1", null, {key1: "value"}],
+ ];
+ for (let event of expected) {
+ Telemetry.recordEvent(...event);
+ }
+
+ // Strip off trailing null values to match the serialized events.
+ for (let e of expected) {
+ while ((e.length >= 3) && (e[e.length - 1] === null)) {
+ e.pop();
+ }
+ }
+
+ // Check that events are not available in classic pings but are in subsession
+ // pings. Also clear the subsession.
+ let classic = TelemetrySession.getPayload();
+ let subsession = TelemetrySession.getPayload("environment-change", true);
+
+ Assert.ok("events" in classic.processes.parent, "Should have an events field in classic payload.");
+ Assert.ok("events" in subsession.processes.parent, "Should have an events field in subsession payload.");
+
+ // They should be empty in the classic payload.
+ Assert.deepEqual(classic.processes.parent.events, [], "Events in classic payload should be empty.");
+
+ // In the subsession payload, they should contain the recorded test events.
+ let events = subsession.processes.parent.events.filter(e => e[1] === "telemetry.test");
+ Assert.equal(events.length, expected.length, "Should have the right amount of events in the payload.");
+ for (let i = 0; i < expected.length; ++i) {
+ Assert.deepEqual(events[i].slice(1), expected[i],
+ "Should have the right event data in the ping.");
+ }
+
+ // As we cleared the subsession above, the events entry should now be empty.
+ subsession = TelemetrySession.getPayload("environment-change", false);
+ Assert.ok("events" in subsession.processes.parent, "Should have an events field in subsession payload.");
+ events = subsession.processes.parent.events.filter(e => e[1] === "telemetry.test");
+ Assert.equal(events.length, 0, "Should have no test events in the subsession payload now.");
+});
+
+add_task(function* test_checkSubsessionHistograms() {
+ if (gIsAndroid) {
+ // We don't support subsessions yet on Android.
+ return;
+ }
+
+ let now = new Date(2020, 1, 1, 12, 0, 0);
+ let expectedDate = new Date(2020, 1, 1, 0, 0, 0);
+ fakeNow(now);
+ yield TelemetryController.testReset();
+
+ const COUNT_ID = "TELEMETRY_TEST_COUNT";
+ const KEYED_ID = "TELEMETRY_TEST_KEYED_COUNT";
+ const count = Telemetry.getHistogramById(COUNT_ID);
+ const keyed = Telemetry.getKeyedHistogramById(KEYED_ID);
+ const registeredIds =
+ new Set(Telemetry.registeredHistograms(Ci.nsITelemetry.DATASET_RELEASE_CHANNEL_OPTIN, []));
+
+ const stableHistograms = new Set([
+ "TELEMETRY_TEST_FLAG",
+ "TELEMETRY_TEST_COUNT",
+ "TELEMETRY_TEST_RELEASE_OPTOUT",
+ "TELEMETRY_TEST_RELEASE_OPTIN",
+ "STARTUP_CRASH_DETECTED",
+ ]);
+
+ const stableKeyedHistograms = new Set([
+ "TELEMETRY_TEST_KEYED_FLAG",
+ "TELEMETRY_TEST_KEYED_COUNT",
+ "TELEMETRY_TEST_KEYED_RELEASE_OPTIN",
+ "TELEMETRY_TEST_KEYED_RELEASE_OPTOUT",
+ ]);
+
+ // Compare the two sets of histograms.
+ // The "subsession" histograms should match the registered
+ // "classic" histograms. However, histograms can change
+ // between us collecting the different payloads, so we only
+ // check for deep equality on known stable histograms.
+ checkHistograms = (classic, subsession) => {
+ for (let id of Object.keys(classic)) {
+ if (!registeredIds.has(id)) {
+ continue;
+ }
+
+ Assert.ok(id in subsession);
+ if (stableHistograms.has(id)) {
+ Assert.deepEqual(classic[id],
+ subsession[id]);
+ } else {
+ Assert.equal(classic[id].histogram_type,
+ subsession[id].histogram_type);
+ }
+ }
+ };
+
+ // Same as above, except for keyed histograms.
+ checkKeyedHistograms = (classic, subsession) => {
+ for (let id of Object.keys(classic)) {
+ if (!registeredIds.has(id)) {
+ continue;
+ }
+
+ Assert.ok(id in subsession);
+ if (stableKeyedHistograms.has(id)) {
+ Assert.deepEqual(classic[id],
+ subsession[id]);
+ }
+ }
+ };
+
+ // Both classic and subsession payload histograms should start the same.
+ // The payloads should be identical for now except for the reason.
+ count.clear();
+ keyed.clear();
+ let classic = TelemetrySession.getPayload();
+ let subsession = TelemetrySession.getPayload("environment-change");
+
+ Assert.equal(classic.info.reason, "gather-payload");
+ Assert.equal(subsession.info.reason, "environment-change");
+ Assert.ok(!(COUNT_ID in classic.histograms));
+ Assert.ok(!(COUNT_ID in subsession.histograms));
+ Assert.ok(!(KEYED_ID in classic.keyedHistograms));
+ Assert.ok(!(KEYED_ID in subsession.keyedHistograms));
+
+ checkHistograms(classic.histograms, subsession.histograms);
+ checkKeyedHistograms(classic.keyedHistograms, subsession.keyedHistograms);
+
+ // Adding values should get picked up in both.
+ count.add(1);
+ keyed.add("a", 1);
+ keyed.add("b", 1);
+ classic = TelemetrySession.getPayload();
+ subsession = TelemetrySession.getPayload("environment-change");
+
+ Assert.ok(COUNT_ID in classic.histograms);
+ Assert.ok(COUNT_ID in subsession.histograms);
+ Assert.ok(KEYED_ID in classic.keyedHistograms);
+ Assert.ok(KEYED_ID in subsession.keyedHistograms);
+ Assert.equal(classic.histograms[COUNT_ID].sum, 1);
+ Assert.equal(classic.keyedHistograms[KEYED_ID]["a"].sum, 1);
+ Assert.equal(classic.keyedHistograms[KEYED_ID]["b"].sum, 1);
+
+ checkHistograms(classic.histograms, subsession.histograms);
+ checkKeyedHistograms(classic.keyedHistograms, subsession.keyedHistograms);
+
+ // Values should still reset properly.
+ count.clear();
+ keyed.clear();
+ classic = TelemetrySession.getPayload();
+ subsession = TelemetrySession.getPayload("environment-change");
+
+ Assert.ok(!(COUNT_ID in classic.histograms));
+ Assert.ok(!(COUNT_ID in subsession.histograms));
+ Assert.ok(!(KEYED_ID in classic.keyedHistograms));
+ Assert.ok(!(KEYED_ID in subsession.keyedHistograms));
+
+ checkHistograms(classic.histograms, subsession.histograms);
+ checkKeyedHistograms(classic.keyedHistograms, subsession.keyedHistograms);
+
+ // Adding values should get picked up in both.
+ count.add(1);
+ keyed.add("a", 1);
+ keyed.add("b", 1);
+ classic = TelemetrySession.getPayload();
+ subsession = TelemetrySession.getPayload("environment-change");
+
+ Assert.ok(COUNT_ID in classic.histograms);
+ Assert.ok(COUNT_ID in subsession.histograms);
+ Assert.ok(KEYED_ID in classic.keyedHistograms);
+ Assert.ok(KEYED_ID in subsession.keyedHistograms);
+ Assert.equal(classic.histograms[COUNT_ID].sum, 1);
+ Assert.equal(classic.keyedHistograms[KEYED_ID]["a"].sum, 1);
+ Assert.equal(classic.keyedHistograms[KEYED_ID]["b"].sum, 1);
+
+ checkHistograms(classic.histograms, subsession.histograms);
+ checkKeyedHistograms(classic.keyedHistograms, subsession.keyedHistograms);
+
+ // We should be able to reset only the subsession histograms.
+ // First check that "snapshot and clear" still returns the old state...
+ classic = TelemetrySession.getPayload();
+ subsession = TelemetrySession.getPayload("environment-change", true);
+
+ let subsessionStartDate = new Date(classic.info.subsessionStartDate);
+ Assert.equal(subsessionStartDate.toISOString(), expectedDate.toISOString());
+ subsessionStartDate = new Date(subsession.info.subsessionStartDate);
+ Assert.equal(subsessionStartDate.toISOString(), expectedDate.toISOString());
+ checkHistograms(classic.histograms, subsession.histograms);
+ checkKeyedHistograms(classic.keyedHistograms, subsession.keyedHistograms);
+
+ // ... then check that the next snapshot shows the subsession
+ // histograms got reset.
+ classic = TelemetrySession.getPayload();
+ subsession = TelemetrySession.getPayload("environment-change");
+
+ Assert.ok(COUNT_ID in classic.histograms);
+ Assert.ok(COUNT_ID in subsession.histograms);
+ Assert.equal(classic.histograms[COUNT_ID].sum, 1);
+ Assert.equal(subsession.histograms[COUNT_ID].sum, 0);
+
+ Assert.ok(KEYED_ID in classic.keyedHistograms);
+ Assert.ok(!(KEYED_ID in subsession.keyedHistograms));
+ Assert.equal(classic.keyedHistograms[KEYED_ID]["a"].sum, 1);
+ Assert.equal(classic.keyedHistograms[KEYED_ID]["b"].sum, 1);
+
+ // Adding values should get picked up in both again.
+ count.add(1);
+ keyed.add("a", 1);
+ keyed.add("b", 1);
+ classic = TelemetrySession.getPayload();
+ subsession = TelemetrySession.getPayload("environment-change");
+
+ Assert.ok(COUNT_ID in classic.histograms);
+ Assert.ok(COUNT_ID in subsession.histograms);
+ Assert.equal(classic.histograms[COUNT_ID].sum, 2);
+ Assert.equal(subsession.histograms[COUNT_ID].sum, 1);
+
+ Assert.ok(KEYED_ID in classic.keyedHistograms);
+ Assert.ok(KEYED_ID in subsession.keyedHistograms);
+ Assert.equal(classic.keyedHistograms[KEYED_ID]["a"].sum, 2);
+ Assert.equal(classic.keyedHistograms[KEYED_ID]["b"].sum, 2);
+ Assert.equal(subsession.keyedHistograms[KEYED_ID]["a"].sum, 1);
+ Assert.equal(subsession.keyedHistograms[KEYED_ID]["b"].sum, 1);
+});
+
+add_task(function* test_checkSubsessionData() {
+ if (gIsAndroid) {
+ // We don't support subsessions yet on Android.
+ return;
+ }
+
+ // Keep track of the active ticks count if the session recorder is available.
+ let sessionRecorder = TelemetryController.getSessionRecorder();
+ let activeTicksAtSubsessionStart = sessionRecorder.activeTicks;
+ let expectedActiveTicks = activeTicksAtSubsessionStart;
+
+ incrementActiveTicks = () => {
+ sessionRecorder.incrementActiveTicks();
+ ++expectedActiveTicks;
+ }
+
+ yield TelemetryController.testReset();
+
+ // Both classic and subsession payload data should be the same on the first subsession.
+ incrementActiveTicks();
+ let classic = TelemetrySession.getPayload();
+ let subsession = TelemetrySession.getPayload("environment-change");
+ Assert.equal(classic.simpleMeasurements.activeTicks, expectedActiveTicks,
+ "Classic pings must count active ticks since the beginning of the session.");
+ Assert.equal(subsession.simpleMeasurements.activeTicks, expectedActiveTicks,
+ "Subsessions must count active ticks as classic pings on the first subsession.");
+
+ // Start a new subsession and check that the active ticks are correctly reported.
+ incrementActiveTicks();
+ activeTicksAtSubsessionStart = sessionRecorder.activeTicks;
+ classic = TelemetrySession.getPayload();
+ subsession = TelemetrySession.getPayload("environment-change", true);
+ Assert.equal(classic.simpleMeasurements.activeTicks, expectedActiveTicks,
+ "Classic pings must count active ticks since the beginning of the session.");
+ Assert.equal(subsession.simpleMeasurements.activeTicks, expectedActiveTicks,
+ "Pings must not loose the tick count when starting a new subsession.");
+
+ // Get a new subsession payload without clearing the subsession.
+ incrementActiveTicks();
+ classic = TelemetrySession.getPayload();
+ subsession = TelemetrySession.getPayload("environment-change");
+ Assert.equal(classic.simpleMeasurements.activeTicks, expectedActiveTicks,
+ "Classic pings must count active ticks since the beginning of the session.");
+ Assert.equal(subsession.simpleMeasurements.activeTicks,
+ expectedActiveTicks - activeTicksAtSubsessionStart,
+ "Subsessions must count active ticks since the last new subsession.");
+});
+
+add_task(function* test_dailyCollection() {
+ if (gIsAndroid) {
+ // We don't do daily collections yet on Android.
+ return;
+ }
+
+ let now = new Date(2030, 1, 1, 12, 0, 0);
+ let nowDay = new Date(2030, 1, 1, 0, 0, 0);
+ let schedulerTickCallback = null;
+
+ PingServer.clearRequests();
+
+ fakeNow(now);
+
+ // Fake scheduler functions to control daily collection flow in tests.
+ fakeSchedulerTimer(callback => schedulerTickCallback = callback, () => {});
+
+ // Init and check timer.
+ yield TelemetryStorage.testClearPendingPings();
+ yield TelemetryController.testSetup();
+ TelemetrySend.setServer("http://localhost:" + PingServer.port);
+
+ // Set histograms to expected state.
+ const COUNT_ID = "TELEMETRY_TEST_COUNT";
+ const KEYED_ID = "TELEMETRY_TEST_KEYED_COUNT";
+ const count = Telemetry.getHistogramById(COUNT_ID);
+ const keyed = Telemetry.getKeyedHistogramById(KEYED_ID);
+
+ count.clear();
+ keyed.clear();
+ count.add(1);
+ keyed.add("a", 1);
+ keyed.add("b", 1);
+ keyed.add("b", 1);
+
+ // Make sure the daily ping gets triggered.
+ let expectedDate = nowDay;
+ now = futureDate(nowDay, MS_IN_ONE_DAY);
+ fakeNow(now);
+
+ Assert.ok(!!schedulerTickCallback);
+ // Run a scheduler tick: it should trigger the daily ping.
+ yield schedulerTickCallback();
+
+ // Collect the daily ping.
+ let ping = yield PingServer.promiseNextPing();
+ Assert.ok(!!ping);
+
+ Assert.equal(ping.type, PING_TYPE_MAIN);
+ Assert.equal(ping.payload.info.reason, REASON_DAILY);
+ let subsessionStartDate = new Date(ping.payload.info.subsessionStartDate);
+ Assert.equal(subsessionStartDate.toISOString(), expectedDate.toISOString());
+
+ Assert.equal(ping.payload.histograms[COUNT_ID].sum, 1);
+ Assert.equal(ping.payload.keyedHistograms[KEYED_ID]["a"].sum, 1);
+ Assert.equal(ping.payload.keyedHistograms[KEYED_ID]["b"].sum, 2);
+
+ // The daily ping is rescheduled for "tomorrow".
+ expectedDate = futureDate(expectedDate, MS_IN_ONE_DAY);
+ now = futureDate(now, MS_IN_ONE_DAY);
+ fakeNow(now);
+
+ // Run a scheduler tick. Trigger and collect another ping. The histograms should be reset.
+ yield schedulerTickCallback();
+
+ ping = yield PingServer.promiseNextPing();
+ Assert.ok(!!ping);
+
+ Assert.equal(ping.type, PING_TYPE_MAIN);
+ Assert.equal(ping.payload.info.reason, REASON_DAILY);
+ subsessionStartDate = new Date(ping.payload.info.subsessionStartDate);
+ Assert.equal(subsessionStartDate.toISOString(), expectedDate.toISOString());
+
+ Assert.equal(ping.payload.histograms[COUNT_ID].sum, 0);
+ Assert.ok(!(KEYED_ID in ping.payload.keyedHistograms));
+
+ // Trigger and collect another daily ping, with the histograms being set again.
+ count.add(1);
+ keyed.add("a", 1);
+ keyed.add("b", 1);
+
+ // The daily ping is rescheduled for "tomorrow".
+ expectedDate = futureDate(expectedDate, MS_IN_ONE_DAY);
+ now = futureDate(now, MS_IN_ONE_DAY);
+ fakeNow(now);
+
+ yield schedulerTickCallback();
+ ping = yield PingServer.promiseNextPing();
+ Assert.ok(!!ping);
+
+ Assert.equal(ping.type, PING_TYPE_MAIN);
+ Assert.equal(ping.payload.info.reason, REASON_DAILY);
+ subsessionStartDate = new Date(ping.payload.info.subsessionStartDate);
+ Assert.equal(subsessionStartDate.toISOString(), expectedDate.toISOString());
+
+ Assert.equal(ping.payload.histograms[COUNT_ID].sum, 1);
+ Assert.equal(ping.payload.keyedHistograms[KEYED_ID]["a"].sum, 1);
+ Assert.equal(ping.payload.keyedHistograms[KEYED_ID]["b"].sum, 1);
+
+ // Shutdown to cleanup the aborted-session if it gets created.
+ yield TelemetryController.testShutdown();
+});
+
+add_task(function* test_dailyDuplication() {
+ if (gIsAndroid) {
+ // We don't do daily collections yet on Android.
+ return;
+ }
+
+ yield TelemetrySend.reset();
+ yield TelemetryStorage.testClearPendingPings();
+ PingServer.clearRequests();
+
+ let schedulerTickCallback = null;
+ let now = new Date(2030, 1, 1, 0, 0, 0);
+ fakeNow(now);
+ // Fake scheduler functions to control daily collection flow in tests.
+ fakeSchedulerTimer(callback => schedulerTickCallback = callback, () => {});
+ yield TelemetryController.testReset();
+
+ // Make sure the daily ping gets triggered at midnight.
+ // We need to make sure that we trigger this after the period where we wait for
+ // the user to become idle.
+ let firstDailyDue = new Date(2030, 1, 2, 0, 0, 0);
+ fakeNow(firstDailyDue);
+
+ // Run a scheduler tick: it should trigger the daily ping.
+ Assert.ok(!!schedulerTickCallback);
+ yield schedulerTickCallback();
+
+ // Get the first daily ping.
+ let ping = yield PingServer.promiseNextPing();
+ Assert.ok(!!ping);
+
+ Assert.equal(ping.type, PING_TYPE_MAIN);
+ Assert.equal(ping.payload.info.reason, REASON_DAILY);
+
+ // We don't expect to receive any other daily ping in this test, so assert if we do.
+ PingServer.registerPingHandler((req, res) => {
+ Assert.ok(false, "No more daily pings should be sent/received in this test.");
+ });
+
+ // Set the current time to a bit after midnight.
+ let secondDailyDue = new Date(firstDailyDue);
+ secondDailyDue.setHours(0);
+ secondDailyDue.setMinutes(15);
+ fakeNow(secondDailyDue);
+
+ // Run a scheduler tick: it should NOT trigger the daily ping.
+ Assert.ok(!!schedulerTickCallback);
+ yield schedulerTickCallback();
+
+ // Shutdown to cleanup the aborted-session if it gets created.
+ PingServer.resetPingHandler();
+ yield TelemetryController.testShutdown();
+});
+
+add_task(function* test_dailyOverdue() {
+ if (gIsAndroid) {
+ // We don't do daily collections yet on Android.
+ return;
+ }
+
+ let schedulerTickCallback = null;
+ let now = new Date(2030, 1, 1, 11, 0, 0);
+ fakeNow(now);
+ // Fake scheduler functions to control daily collection flow in tests.
+ fakeSchedulerTimer(callback => schedulerTickCallback = callback, () => {});
+ yield TelemetryStorage.testClearPendingPings();
+ yield TelemetryController.testReset();
+
+ // Skip one hour ahead: nothing should be due.
+ now.setHours(now.getHours() + 1);
+ fakeNow(now);
+
+ // Assert if we receive something!
+ PingServer.registerPingHandler((req, res) => {
+ Assert.ok(false, "No daily ping should be received if not overdue!.");
+ });
+
+ // This tick should not trigger any daily ping.
+ Assert.ok(!!schedulerTickCallback);
+ yield schedulerTickCallback();
+
+ // Restore the non asserting ping handler.
+ PingServer.resetPingHandler();
+ PingServer.clearRequests();
+
+ // Simulate an overdue ping: we're not close to midnight, but the last daily ping
+ // time is too long ago.
+ let dailyOverdue = new Date(2030, 1, 2, 13, 0, 0);
+ fakeNow(dailyOverdue);
+
+ // Run a scheduler tick: it should trigger the daily ping.
+ Assert.ok(!!schedulerTickCallback);
+ yield schedulerTickCallback();
+
+ // Get the first daily ping.
+ let ping = yield PingServer.promiseNextPing();
+ Assert.ok(!!ping);
+
+ Assert.equal(ping.type, PING_TYPE_MAIN);
+ Assert.equal(ping.payload.info.reason, REASON_DAILY);
+
+ // Shutdown to cleanup the aborted-session if it gets created.
+ yield TelemetryController.testShutdown();
+});
+
+add_task(function* test_environmentChange() {
+ if (gIsAndroid) {
+ // We don't split subsessions on environment changes yet on Android.
+ return;
+ }
+
+ yield TelemetryStorage.testClearPendingPings();
+ PingServer.clearRequests();
+
+ let now = fakeNow(2040, 1, 1, 12, 0, 0);
+ gMonotonicNow = fakeMonotonicNow(gMonotonicNow + 10 * MILLISECONDS_PER_MINUTE);
+
+ const PREF_TEST = "toolkit.telemetry.test.pref1";
+ Preferences.reset(PREF_TEST);
+
+ const PREFS_TO_WATCH = new Map([
+ [PREF_TEST, {what: TelemetryEnvironment.RECORD_PREF_VALUE}],
+ ]);
+
+ // Setup.
+ yield TelemetryController.testReset();
+ TelemetrySend.setServer("http://localhost:" + PingServer.port);
+ TelemetryEnvironment.testWatchPreferences(PREFS_TO_WATCH);
+
+ // Set histograms to expected state.
+ const COUNT_ID = "TELEMETRY_TEST_COUNT";
+ const KEYED_ID = "TELEMETRY_TEST_KEYED_COUNT";
+ const count = Telemetry.getHistogramById(COUNT_ID);
+ const keyed = Telemetry.getKeyedHistogramById(KEYED_ID);
+
+ count.clear();
+ keyed.clear();
+ count.add(1);
+ keyed.add("a", 1);
+ keyed.add("b", 1);
+
+ // Trigger and collect environment-change ping.
+ gMonotonicNow = fakeMonotonicNow(gMonotonicNow + 10 * MILLISECONDS_PER_MINUTE);
+ let startDay = truncateDateToDays(now);
+ now = fakeNow(futureDate(now, 10 * MILLISECONDS_PER_MINUTE));
+
+ Preferences.set(PREF_TEST, 1);
+ let ping = yield PingServer.promiseNextPing();
+ Assert.ok(!!ping);
+
+ Assert.equal(ping.type, PING_TYPE_MAIN);
+ Assert.equal(ping.environment.settings.userPrefs[PREF_TEST], undefined);
+ Assert.equal(ping.payload.info.reason, REASON_ENVIRONMENT_CHANGE);
+ let subsessionStartDate = new Date(ping.payload.info.subsessionStartDate);
+ Assert.equal(subsessionStartDate.toISOString(), startDay.toISOString());
+
+ Assert.equal(ping.payload.histograms[COUNT_ID].sum, 1);
+ Assert.equal(ping.payload.keyedHistograms[KEYED_ID]["a"].sum, 1);
+
+ // Trigger and collect another ping. The histograms should be reset.
+ startDay = truncateDateToDays(now);
+ gMonotonicNow = fakeMonotonicNow(gMonotonicNow + 10 * MILLISECONDS_PER_MINUTE);
+ now = fakeNow(futureDate(now, 10 * MILLISECONDS_PER_MINUTE));
+
+ Preferences.set(PREF_TEST, 2);
+ ping = yield PingServer.promiseNextPing();
+ Assert.ok(!!ping);
+
+ Assert.equal(ping.type, PING_TYPE_MAIN);
+ Assert.equal(ping.environment.settings.userPrefs[PREF_TEST], 1);
+ Assert.equal(ping.payload.info.reason, REASON_ENVIRONMENT_CHANGE);
+ subsessionStartDate = new Date(ping.payload.info.subsessionStartDate);
+ Assert.equal(subsessionStartDate.toISOString(), startDay.toISOString());
+
+ Assert.equal(ping.payload.histograms[COUNT_ID].sum, 0);
+ Assert.ok(!(KEYED_ID in ping.payload.keyedHistograms));
+});
+
+add_task(function* test_savedPingsOnShutdown() {
+ // On desktop, we expect both "saved-session" and "shutdown" pings. We only expect
+ // the former on Android.
+ const expectedPingCount = (gIsAndroid) ? 1 : 2;
+ // Assure that we store the ping properly when saving sessions on shutdown.
+ // We make the TelemetryController shutdown to trigger a session save.
+ const dir = TelemetryStorage.pingDirectoryPath;
+ yield OS.File.removeDir(dir, {ignoreAbsent: true});
+ yield OS.File.makeDir(dir);
+ yield TelemetryController.testShutdown();
+
+ PingServer.clearRequests();
+ yield TelemetryController.testReset();
+
+ const pings = yield PingServer.promiseNextPings(expectedPingCount);
+
+ for (let ping of pings) {
+ Assert.ok("type" in ping);
+
+ let expectedReason =
+ (ping.type == PING_TYPE_SAVED_SESSION) ? REASON_SAVED_SESSION : REASON_SHUTDOWN;
+
+ checkPingFormat(ping, ping.type, true, true);
+ Assert.equal(ping.payload.info.reason, expectedReason);
+ Assert.equal(ping.clientId, gClientID);
+ }
+});
+
+add_task(function* test_savedSessionData() {
+ // Create the directory which will contain the data file, if it doesn't already
+ // exist.
+ yield OS.File.makeDir(DATAREPORTING_PATH);
+ getHistogram("TELEMETRY_SESSIONDATA_FAILED_LOAD").clear();
+ getHistogram("TELEMETRY_SESSIONDATA_FAILED_PARSE").clear();
+ getHistogram("TELEMETRY_SESSIONDATA_FAILED_VALIDATION").clear();
+
+ // Write test data to the session data file.
+ const dataFilePath = OS.Path.join(DATAREPORTING_PATH, "session-state.json");
+ const sessionState = {
+ sessionId: null,
+ subsessionId: null,
+ profileSubsessionCounter: 3785,
+ };
+ yield CommonUtils.writeJSON(sessionState, dataFilePath);
+
+ const PREF_TEST = "toolkit.telemetry.test.pref1";
+ Preferences.reset(PREF_TEST);
+ const PREFS_TO_WATCH = new Map([
+ [PREF_TEST, {what: TelemetryEnvironment.RECORD_PREF_VALUE}],
+ ]);
+
+ // We expect one new subsession when starting TelemetrySession and one after triggering
+ // an environment change.
+ const expectedSubsessions = sessionState.profileSubsessionCounter + 2;
+ const expectedSessionUUID = "ff602e52-47a1-b7e8-4c1a-ffffffffc87a";
+ const expectedSubsessionUUID = "009fd1ad-b85e-4817-b3e5-000000003785";
+ fakeGenerateUUID(() => expectedSessionUUID, () => expectedSubsessionUUID);
+
+ if (gIsAndroid) {
+ // We don't support subsessions yet on Android, so skip the next checks.
+ return;
+ }
+
+ // Start TelemetrySession so that it loads the session data file.
+ yield TelemetryController.testReset();
+ Assert.equal(0, getSnapshot("TELEMETRY_SESSIONDATA_FAILED_LOAD").sum);
+ Assert.equal(0, getSnapshot("TELEMETRY_SESSIONDATA_FAILED_PARSE").sum);
+ Assert.equal(0, getSnapshot("TELEMETRY_SESSIONDATA_FAILED_VALIDATION").sum);
+
+ // Watch a test preference, trigger and environment change and wait for it to propagate.
+ // _watchPreferences triggers a subsession notification
+ gMonotonicNow = fakeMonotonicNow(gMonotonicNow + 10 * MILLISECONDS_PER_MINUTE);
+ fakeNow(new Date(2050, 1, 1, 12, 0, 0));
+ TelemetryEnvironment.testWatchPreferences(PREFS_TO_WATCH);
+ let changePromise = new Promise(resolve =>
+ TelemetryEnvironment.registerChangeListener("test_fake_change", resolve));
+ Preferences.set(PREF_TEST, 1);
+ yield changePromise;
+ TelemetryEnvironment.unregisterChangeListener("test_fake_change");
+
+ let payload = TelemetrySession.getPayload();
+ Assert.equal(payload.info.profileSubsessionCounter, expectedSubsessions);
+ yield TelemetryController.testShutdown();
+
+ // Restore the UUID generator so we don't mess with other tests.
+ fakeGenerateUUID(generateUUID, generateUUID);
+
+ // Load back the serialised session data.
+ let data = yield CommonUtils.readJSON(dataFilePath);
+ Assert.equal(data.profileSubsessionCounter, expectedSubsessions);
+ Assert.equal(data.sessionId, expectedSessionUUID);
+ Assert.equal(data.subsessionId, expectedSubsessionUUID);
+});
+
+add_task(function* test_sessionData_ShortSession() {
+ if (gIsAndroid) {
+ // We don't support subsessions yet on Android, so skip the next checks.
+ return;
+ }
+
+ const SESSION_STATE_PATH = OS.Path.join(DATAREPORTING_PATH, "session-state.json");
+
+ // Shut down Telemetry and remove the session state file.
+ yield TelemetryController.testReset();
+ yield OS.File.remove(SESSION_STATE_PATH, { ignoreAbsent: true });
+ getHistogram("TELEMETRY_SESSIONDATA_FAILED_LOAD").clear();
+ getHistogram("TELEMETRY_SESSIONDATA_FAILED_PARSE").clear();
+ getHistogram("TELEMETRY_SESSIONDATA_FAILED_VALIDATION").clear();
+
+ const expectedSessionUUID = "ff602e52-47a1-b7e8-4c1a-ffffffffc87a";
+ const expectedSubsessionUUID = "009fd1ad-b85e-4817-b3e5-000000003785";
+ fakeGenerateUUID(() => expectedSessionUUID, () => expectedSubsessionUUID);
+
+ // We intentionally don't wait for the setup to complete and shut down to simulate
+ // short sessions. We expect the profile subsession counter to be 1.
+ TelemetryController.testReset();
+ yield TelemetryController.testShutdown();
+
+ Assert.equal(1, getSnapshot("TELEMETRY_SESSIONDATA_FAILED_LOAD").sum);
+ Assert.equal(0, getSnapshot("TELEMETRY_SESSIONDATA_FAILED_PARSE").sum);
+ Assert.equal(0, getSnapshot("TELEMETRY_SESSIONDATA_FAILED_VALIDATION").sum);
+
+ // Restore the UUID generation functions.
+ fakeGenerateUUID(generateUUID, generateUUID);
+
+ // Start TelemetryController so that it loads the session data file. We expect the profile
+ // subsession counter to be incremented by 1 again.
+ yield TelemetryController.testReset();
+
+ // We expect 2 profile subsession counter updates.
+ let payload = TelemetrySession.getPayload();
+ Assert.equal(payload.info.profileSubsessionCounter, 2);
+ Assert.equal(payload.info.previousSessionId, expectedSessionUUID);
+ Assert.equal(payload.info.previousSubsessionId, expectedSubsessionUUID);
+ Assert.equal(1, getSnapshot("TELEMETRY_SESSIONDATA_FAILED_LOAD").sum);
+ Assert.equal(0, getSnapshot("TELEMETRY_SESSIONDATA_FAILED_PARSE").sum);
+ Assert.equal(0, getSnapshot("TELEMETRY_SESSIONDATA_FAILED_VALIDATION").sum);
+});
+
+add_task(function* test_invalidSessionData() {
+ // Create the directory which will contain the data file, if it doesn't already
+ // exist.
+ yield OS.File.makeDir(DATAREPORTING_PATH);
+ getHistogram("TELEMETRY_SESSIONDATA_FAILED_LOAD").clear();
+ getHistogram("TELEMETRY_SESSIONDATA_FAILED_PARSE").clear();
+ getHistogram("TELEMETRY_SESSIONDATA_FAILED_VALIDATION").clear();
+
+ // Write test data to the session data file. This should fail to parse.
+ const dataFilePath = OS.Path.join(DATAREPORTING_PATH, "session-state.json");
+ const unparseableData = "{asdf:@äü";
+ OS.File.writeAtomic(dataFilePath, unparseableData,
+ {encoding: "utf-8", tmpPath: dataFilePath + ".tmp"});
+
+ // Start TelemetryController so that it loads the session data file.
+ yield TelemetryController.testReset();
+
+ // The session data file should not load. Only expect the current subsession.
+ Assert.equal(0, getSnapshot("TELEMETRY_SESSIONDATA_FAILED_LOAD").sum);
+ Assert.equal(1, getSnapshot("TELEMETRY_SESSIONDATA_FAILED_PARSE").sum);
+ Assert.equal(0, getSnapshot("TELEMETRY_SESSIONDATA_FAILED_VALIDATION").sum);
+
+ // Write test data to the session data file. This should fail validation.
+ const sessionState = {
+ profileSubsessionCounter: "not-a-number?",
+ someOtherField: 12,
+ };
+ yield CommonUtils.writeJSON(sessionState, dataFilePath);
+
+ // The session data file should not load. Only expect the current subsession.
+ const expectedSubsessions = 1;
+ const expectedSessionUUID = "ff602e52-47a1-b7e8-4c1a-ffffffffc87a";
+ const expectedSubsessionUUID = "009fd1ad-b85e-4817-b3e5-000000003785";
+ fakeGenerateUUID(() => expectedSessionUUID, () => expectedSubsessionUUID);
+
+ // Start TelemetryController so that it loads the session data file.
+ yield TelemetryController.testReset();
+
+ let payload = TelemetrySession.getPayload();
+ Assert.equal(payload.info.profileSubsessionCounter, expectedSubsessions);
+ Assert.equal(0, getSnapshot("TELEMETRY_SESSIONDATA_FAILED_LOAD").sum);
+ Assert.equal(1, getSnapshot("TELEMETRY_SESSIONDATA_FAILED_PARSE").sum);
+ Assert.equal(1, getSnapshot("TELEMETRY_SESSIONDATA_FAILED_VALIDATION").sum);
+
+ yield TelemetryController.testShutdown();
+
+ // Restore the UUID generator so we don't mess with other tests.
+ fakeGenerateUUID(generateUUID, generateUUID);
+
+ // Load back the serialised session data.
+ let data = yield CommonUtils.readJSON(dataFilePath);
+ Assert.equal(data.profileSubsessionCounter, expectedSubsessions);
+ Assert.equal(data.sessionId, expectedSessionUUID);
+ Assert.equal(data.subsessionId, expectedSubsessionUUID);
+});
+
+add_task(function* test_abortedSession() {
+ if (gIsAndroid || gIsGonk) {
+ // We don't have the aborted session ping here.
+ return;
+ }
+
+ const ABORTED_FILE = OS.Path.join(DATAREPORTING_PATH, ABORTED_PING_FILE_NAME);
+
+ // Make sure the aborted sessions directory does not exist to test its creation.
+ yield OS.File.removeDir(DATAREPORTING_PATH, { ignoreAbsent: true });
+
+ let schedulerTickCallback = null;
+ let now = new Date(2040, 1, 1, 0, 0, 0);
+ fakeNow(now);
+ // Fake scheduler functions to control aborted-session flow in tests.
+ fakeSchedulerTimer(callback => schedulerTickCallback = callback, () => {});
+ yield TelemetryController.testReset();
+
+ Assert.ok((yield OS.File.exists(DATAREPORTING_PATH)),
+ "Telemetry must create the aborted session directory when starting.");
+
+ // Fake now again so that the scheduled aborted-session save takes place.
+ now = futureDate(now, ABORTED_SESSION_UPDATE_INTERVAL_MS);
+ fakeNow(now);
+ // The first aborted session checkpoint must take place right after the initialisation.
+ Assert.ok(!!schedulerTickCallback);
+ // Execute one scheduler tick.
+ yield schedulerTickCallback();
+ // Check that the aborted session is due at the correct time.
+ Assert.ok((yield OS.File.exists(ABORTED_FILE)),
+ "There must be an aborted session ping.");
+
+ // This ping is not yet in the pending pings folder, so we can't access it using
+ // TelemetryStorage.popPendingPings().
+ let pingContent = yield OS.File.read(ABORTED_FILE, { encoding: "utf-8" });
+ let abortedSessionPing = JSON.parse(pingContent);
+
+ // Validate the ping.
+ checkPingFormat(abortedSessionPing, PING_TYPE_MAIN, true, true);
+ Assert.equal(abortedSessionPing.payload.info.reason, REASON_ABORTED_SESSION);
+
+ // Trigger a another aborted-session ping and check that it overwrites the previous one.
+ now = futureDate(now, ABORTED_SESSION_UPDATE_INTERVAL_MS);
+ fakeNow(now);
+ yield schedulerTickCallback();
+
+ pingContent = yield OS.File.read(ABORTED_FILE, { encoding: "utf-8" });
+ let updatedAbortedSessionPing = JSON.parse(pingContent);
+ checkPingFormat(updatedAbortedSessionPing, PING_TYPE_MAIN, true, true);
+ Assert.equal(updatedAbortedSessionPing.payload.info.reason, REASON_ABORTED_SESSION);
+ Assert.notEqual(abortedSessionPing.id, updatedAbortedSessionPing.id);
+ Assert.notEqual(abortedSessionPing.creationDate, updatedAbortedSessionPing.creationDate);
+
+ yield TelemetryController.testShutdown();
+ Assert.ok(!(yield OS.File.exists(ABORTED_FILE)),
+ "No aborted session ping must be available after a shutdown.");
+
+ // Write the ping to the aborted-session file. TelemetrySession will add it to the
+ // saved pings directory when it starts.
+ yield TelemetryStorage.savePingToFile(abortedSessionPing, ABORTED_FILE, false);
+ Assert.ok((yield OS.File.exists(ABORTED_FILE)),
+ "The aborted session ping must exist in the aborted session ping directory.");
+
+ yield TelemetryStorage.testClearPendingPings();
+ PingServer.clearRequests();
+ yield TelemetryController.testReset();
+
+ Assert.ok(!(yield OS.File.exists(ABORTED_FILE)),
+ "The aborted session ping must be removed from the aborted session ping directory.");
+
+ // Restarting Telemetry again to trigger sending pings in TelemetrySend.
+ yield TelemetryController.testReset();
+
+ // We should have received an aborted-session ping.
+ const receivedPing = yield PingServer.promiseNextPing();
+ Assert.equal(receivedPing.type, PING_TYPE_MAIN, "Should have the correct type");
+ Assert.equal(receivedPing.payload.info.reason, REASON_ABORTED_SESSION, "Ping should have the correct reason");
+
+ yield TelemetryController.testShutdown();
+});
+
+add_task(function* test_abortedSession_Shutdown() {
+ if (gIsAndroid || gIsGonk) {
+ // We don't have the aborted session ping here.
+ return;
+ }
+
+ const ABORTED_FILE = OS.Path.join(DATAREPORTING_PATH, ABORTED_PING_FILE_NAME);
+
+ let schedulerTickCallback = null;
+ let now = fakeNow(2040, 1, 1, 0, 0, 0);
+ // Fake scheduler functions to control aborted-session flow in tests.
+ fakeSchedulerTimer(callback => schedulerTickCallback = callback, () => {});
+ yield TelemetryController.testReset();
+
+ Assert.ok((yield OS.File.exists(DATAREPORTING_PATH)),
+ "Telemetry must create the aborted session directory when starting.");
+
+ // Fake now again so that the scheduled aborted-session save takes place.
+ fakeNow(futureDate(now, ABORTED_SESSION_UPDATE_INTERVAL_MS));
+ // The first aborted session checkpoint must take place right after the initialisation.
+ Assert.ok(!!schedulerTickCallback);
+ // Execute one scheduler tick.
+ yield schedulerTickCallback();
+ // Check that the aborted session is due at the correct time.
+ Assert.ok((yield OS.File.exists(ABORTED_FILE)), "There must be an aborted session ping.");
+
+ // Remove the aborted session file and then shut down to make sure exceptions (e.g file
+ // not found) do not compromise the shutdown.
+ yield OS.File.remove(ABORTED_FILE);
+
+ yield TelemetryController.testShutdown();
+});
+
+add_task(function* test_abortedDailyCoalescing() {
+ if (gIsAndroid || gIsGonk) {
+ // We don't have the aborted session or the daily ping here.
+ return;
+ }
+
+ const ABORTED_FILE = OS.Path.join(DATAREPORTING_PATH, ABORTED_PING_FILE_NAME);
+
+ // Make sure the aborted sessions directory does not exist to test its creation.
+ yield OS.File.removeDir(DATAREPORTING_PATH, { ignoreAbsent: true });
+
+ let schedulerTickCallback = null;
+ PingServer.clearRequests();
+
+ let nowDate = new Date(2009, 10, 18, 0, 0, 0);
+ fakeNow(nowDate);
+
+ // Fake scheduler functions to control aborted-session flow in tests.
+ fakeSchedulerTimer(callback => schedulerTickCallback = callback, () => {});
+ yield TelemetryStorage.testClearPendingPings();
+ PingServer.clearRequests();
+ yield TelemetryController.testReset();
+
+ Assert.ok((yield OS.File.exists(DATAREPORTING_PATH)),
+ "Telemetry must create the aborted session directory when starting.");
+
+ // Delay the callback around midnight so that the aborted-session ping gets merged with the
+ // daily ping.
+ let dailyDueDate = futureDate(nowDate, MS_IN_ONE_DAY);
+ fakeNow(dailyDueDate);
+ // Trigger both the daily ping and the saved-session.
+ Assert.ok(!!schedulerTickCallback);
+ // Execute one scheduler tick.
+ yield schedulerTickCallback();
+
+ // Wait for the daily ping.
+ let dailyPing = yield PingServer.promiseNextPing();
+ Assert.equal(dailyPing.payload.info.reason, REASON_DAILY);
+
+ // Check that an aborted session ping was also written to disk.
+ Assert.ok((yield OS.File.exists(ABORTED_FILE)),
+ "There must be an aborted session ping.");
+
+ // Read aborted session ping and check that the session/subsession ids equal the
+ // ones in the daily ping.
+ let pingContent = yield OS.File.read(ABORTED_FILE, { encoding: "utf-8" });
+ let abortedSessionPing = JSON.parse(pingContent);
+ Assert.equal(abortedSessionPing.payload.info.sessionId, dailyPing.payload.info.sessionId);
+ Assert.equal(abortedSessionPing.payload.info.subsessionId, dailyPing.payload.info.subsessionId);
+
+ yield TelemetryController.testShutdown();
+});
+
+add_task(function* test_schedulerComputerSleep() {
+ if (gIsAndroid || gIsGonk) {
+ // We don't have the aborted session or the daily ping here.
+ return;
+ }
+
+ const ABORTED_FILE = OS.Path.join(DATAREPORTING_PATH, ABORTED_PING_FILE_NAME);
+
+ yield TelemetryStorage.testClearPendingPings();
+ yield TelemetryController.testReset();
+ PingServer.clearRequests();
+
+ // Remove any aborted-session ping from the previous tests.
+ yield OS.File.removeDir(DATAREPORTING_PATH, { ignoreAbsent: true });
+
+ // Set a fake current date and start Telemetry.
+ let nowDate = fakeNow(2009, 10, 18, 0, 0, 0);
+ let schedulerTickCallback = null;
+ fakeSchedulerTimer(callback => schedulerTickCallback = callback, () => {});
+ yield TelemetryController.testReset();
+
+ // Set the current time 3 days in the future at midnight, before running the callback.
+ nowDate = fakeNow(futureDate(nowDate, 3 * MS_IN_ONE_DAY));
+ Assert.ok(!!schedulerTickCallback);
+ // Execute one scheduler tick.
+ yield schedulerTickCallback();
+
+ let dailyPing = yield PingServer.promiseNextPing();
+ Assert.equal(dailyPing.payload.info.reason, REASON_DAILY,
+ "The wake notification should have triggered a daily ping.");
+ Assert.equal(dailyPing.creationDate, nowDate.toISOString(),
+ "The daily ping date should be correct.");
+
+ Assert.ok((yield OS.File.exists(ABORTED_FILE)),
+ "There must be an aborted session ping.");
+
+ // Now also test if we are sending a daily ping if we wake up on the next
+ // day even when the timer doesn't trigger.
+ // This can happen due to timeouts not running out during sleep times,
+ // see bug 1262386, bug 1204823 et al.
+ // Note that we don't get wake notifications on Linux due to bug 758848.
+ nowDate = fakeNow(futureDate(nowDate, 1 * MS_IN_ONE_DAY));
+
+ // We emulate the mentioned timeout behavior by sending the wake notification
+ // instead of triggering the timeout callback.
+ // This should trigger a daily ping, because we passed midnight.
+ Services.obs.notifyObservers(null, "wake_notification", null);
+
+ dailyPing = yield PingServer.promiseNextPing();
+ Assert.equal(dailyPing.payload.info.reason, REASON_DAILY,
+ "The wake notification should have triggered a daily ping.");
+ Assert.equal(dailyPing.creationDate, nowDate.toISOString(),
+ "The daily ping date should be correct.");
+
+ yield TelemetryController.testShutdown();
+});
+
+add_task(function* test_schedulerEnvironmentReschedules() {
+ if (gIsAndroid || gIsGonk) {
+ // We don't have the aborted session or the daily ping here.
+ return;
+ }
+
+ // Reset the test preference.
+ const PREF_TEST = "toolkit.telemetry.test.pref1";
+ Preferences.reset(PREF_TEST);
+ const PREFS_TO_WATCH = new Map([
+ [PREF_TEST, {what: TelemetryEnvironment.RECORD_PREF_VALUE}],
+ ]);
+
+ yield TelemetryStorage.testClearPendingPings();
+ PingServer.clearRequests();
+ yield TelemetryController.testReset();
+
+ // Set a fake current date and start Telemetry.
+ let nowDate = fakeNow(2060, 10, 18, 0, 0, 0);
+ gMonotonicNow = fakeMonotonicNow(gMonotonicNow + 10 * MILLISECONDS_PER_MINUTE);
+ let schedulerTickCallback = null;
+ fakeSchedulerTimer(callback => schedulerTickCallback = callback, () => {});
+ yield TelemetryController.testReset();
+ TelemetryEnvironment.testWatchPreferences(PREFS_TO_WATCH);
+
+ // Set the current time at midnight.
+ fakeNow(futureDate(nowDate, MS_IN_ONE_DAY));
+ gMonotonicNow = fakeMonotonicNow(gMonotonicNow + 10 * MILLISECONDS_PER_MINUTE);
+
+ // Trigger the environment change.
+ Preferences.set(PREF_TEST, 1);
+
+ // Wait for the environment-changed ping.
+ yield PingServer.promiseNextPing();
+
+ // We don't expect to receive any daily ping in this test, so assert if we do.
+ PingServer.registerPingHandler((req, res) => {
+ Assert.ok(false, "No ping should be sent/received in this test.");
+ });
+
+ // Execute one scheduler tick. It should not trigger a daily ping.
+ Assert.ok(!!schedulerTickCallback);
+ yield schedulerTickCallback();
+ yield TelemetryController.testShutdown();
+});
+
+add_task(function* test_schedulerNothingDue() {
+ if (gIsAndroid || gIsGonk) {
+ // We don't have the aborted session or the daily ping here.
+ return;
+ }
+
+ const ABORTED_FILE = OS.Path.join(DATAREPORTING_PATH, ABORTED_PING_FILE_NAME);
+
+ // Remove any aborted-session ping from the previous tests.
+ yield OS.File.removeDir(DATAREPORTING_PATH, { ignoreAbsent: true });
+ yield TelemetryStorage.testClearPendingPings();
+ yield TelemetryController.testReset();
+
+ // We don't expect to receive any ping in this test, so assert if we do.
+ PingServer.registerPingHandler((req, res) => {
+ Assert.ok(false, "No ping should be sent/received in this test.");
+ });
+
+ // Set a current date/time away from midnight, so that the daily ping doesn't get
+ // sent.
+ let nowDate = new Date(2009, 10, 18, 11, 0, 0);
+ fakeNow(nowDate);
+ let schedulerTickCallback = null;
+ fakeSchedulerTimer(callback => schedulerTickCallback = callback, () => {});
+ yield TelemetryController.testReset();
+
+ // Delay the callback execution to a time when no ping should be due.
+ let nothingDueDate = futureDate(nowDate, ABORTED_SESSION_UPDATE_INTERVAL_MS / 2);
+ fakeNow(nothingDueDate);
+ Assert.ok(!!schedulerTickCallback);
+ // Execute one scheduler tick.
+ yield schedulerTickCallback();
+
+ // Check that no aborted session ping was written to disk.
+ Assert.ok(!(yield OS.File.exists(ABORTED_FILE)));
+
+ yield TelemetryController.testShutdown();
+ PingServer.resetPingHandler();
+});
+
+add_task(function* test_pingExtendedStats() {
+ const EXTENDED_PAYLOAD_FIELDS = [
+ "chromeHangs", "threadHangStats", "log", "slowSQL", "fileIOReports", "lateWrites",
+ "addonHistograms", "addonDetails", "UIMeasurements", "webrtc"
+ ];
+
+ // Reset telemetry and disable sending extended statistics.
+ yield TelemetryStorage.testClearPendingPings();
+ PingServer.clearRequests();
+ yield TelemetryController.testReset();
+ Telemetry.canRecordExtended = false;
+
+ yield sendPing();
+
+ let ping = yield PingServer.promiseNextPing();
+ checkPingFormat(ping, PING_TYPE_MAIN, true, true);
+
+ // Check that the payload does not contain extended statistics fields.
+ for (let f in EXTENDED_PAYLOAD_FIELDS) {
+ Assert.ok(!(EXTENDED_PAYLOAD_FIELDS[f] in ping.payload),
+ EXTENDED_PAYLOAD_FIELDS[f] + " must not be in the payload if the extended set is off.");
+ }
+
+ // We check this one separately so that we can reuse EXTENDED_PAYLOAD_FIELDS below, since
+ // slowSQLStartup might not be there.
+ Assert.ok(!("slowSQLStartup" in ping.payload),
+ "slowSQLStartup must not be sent if the extended set is off");
+
+ Assert.ok(!("addonManager" in ping.payload.simpleMeasurements),
+ "addonManager must not be sent if the extended set is off.");
+ Assert.ok(!("UITelemetry" in ping.payload.simpleMeasurements),
+ "UITelemetry must not be sent if the extended set is off.");
+
+ // Restore the preference.
+ Telemetry.canRecordExtended = true;
+
+ // Send a new ping that should contain the extended data.
+ yield sendPing();
+ ping = yield PingServer.promiseNextPing();
+ checkPingFormat(ping, PING_TYPE_MAIN, true, true);
+
+ // Check that the payload now contains extended statistics fields.
+ for (let f in EXTENDED_PAYLOAD_FIELDS) {
+ Assert.ok(EXTENDED_PAYLOAD_FIELDS[f] in ping.payload,
+ EXTENDED_PAYLOAD_FIELDS[f] + " must be in the payload if the extended set is on.");
+ }
+
+ Assert.ok("addonManager" in ping.payload.simpleMeasurements,
+ "addonManager must be sent if the extended set is on.");
+ Assert.ok("UITelemetry" in ping.payload.simpleMeasurements,
+ "UITelemetry must be sent if the extended set is on.");
+});
+
+add_task(function* test_schedulerUserIdle() {
+ if (gIsAndroid || gIsGonk) {
+ // We don't have the aborted session or the daily ping here.
+ return;
+ }
+
+ const SCHEDULER_TICK_INTERVAL_MS = 5 * 60 * 1000;
+ const SCHEDULER_TICK_IDLE_INTERVAL_MS = 60 * 60 * 1000;
+
+ let now = new Date(2010, 1, 1, 11, 0, 0);
+ fakeNow(now);
+
+ let schedulerTimeout = 0;
+ fakeSchedulerTimer((callback, timeout) => {
+ schedulerTimeout = timeout;
+ }, () => {});
+ yield TelemetryController.testReset();
+ yield TelemetryStorage.testClearPendingPings();
+ PingServer.clearRequests();
+
+ // When not idle, the scheduler should have a 5 minutes tick interval.
+ Assert.equal(schedulerTimeout, SCHEDULER_TICK_INTERVAL_MS);
+
+ // Send an "idle" notification to the scheduler.
+ fakeIdleNotification("idle");
+
+ // When idle, the scheduler should have a 1hr tick interval.
+ Assert.equal(schedulerTimeout, SCHEDULER_TICK_IDLE_INTERVAL_MS);
+
+ // Send an "active" notification to the scheduler.
+ fakeIdleNotification("active");
+
+ // When user is back active, the scheduler tick should be 5 minutes again.
+ Assert.equal(schedulerTimeout, SCHEDULER_TICK_INTERVAL_MS);
+
+ // We should not miss midnight when going to idle.
+ now.setHours(23);
+ now.setMinutes(50);
+ fakeNow(now);
+ fakeIdleNotification("idle");
+ Assert.equal(schedulerTimeout, 10 * 60 * 1000);
+
+ yield TelemetryController.testShutdown();
+});
+
+add_task(function* test_DailyDueAndIdle() {
+ if (gIsAndroid || gIsGonk) {
+ // We don't have the aborted session or the daily ping here.
+ return;
+ }
+
+ yield TelemetryStorage.testClearPendingPings();
+ PingServer.clearRequests();
+
+ let receivedPingRequest = null;
+ // Register a ping handler that will assert when receiving multiple daily pings.
+ PingServer.registerPingHandler(req => {
+ Assert.ok(!receivedPingRequest, "Telemetry must only send one daily ping.");
+ receivedPingRequest = req;
+ });
+
+ // Faking scheduler timer has to happen before resetting TelemetryController
+ // to be effective.
+ let schedulerTickCallback = null;
+ let now = new Date(2030, 1, 1, 0, 0, 0);
+ fakeNow(now);
+ // Fake scheduler functions to control daily collection flow in tests.
+ fakeSchedulerTimer(callback => schedulerTickCallback = callback, () => {});
+ yield TelemetryController.testReset();
+
+ // Trigger the daily ping.
+ let firstDailyDue = new Date(2030, 1, 2, 0, 0, 0);
+ fakeNow(firstDailyDue);
+
+ // Run a scheduler tick: it should trigger the daily ping.
+ Assert.ok(!!schedulerTickCallback);
+ let tickPromise = schedulerTickCallback();
+
+ // Send an idle and then an active user notification.
+ fakeIdleNotification("idle");
+ fakeIdleNotification("active");
+
+ // Wait on the tick promise.
+ yield tickPromise;
+
+ yield TelemetrySend.testWaitOnOutgoingPings();
+
+ // Decode the ping contained in the request and check that's a daily ping.
+ Assert.ok(receivedPingRequest, "Telemetry must send one daily ping.");
+ const receivedPing = decodeRequestPayload(receivedPingRequest);
+ checkPingFormat(receivedPing, PING_TYPE_MAIN, true, true);
+ Assert.equal(receivedPing.payload.info.reason, REASON_DAILY);
+
+ yield TelemetryController.testShutdown();
+});
+
+add_task(function* test_userIdleAndSchedlerTick() {
+ if (gIsAndroid || gIsGonk) {
+ // We don't have the aborted session or the daily ping here.
+ return;
+ }
+
+ let receivedPingRequest = null;
+ // Register a ping handler that will assert when receiving multiple daily pings.
+ PingServer.registerPingHandler(req => {
+ Assert.ok(!receivedPingRequest, "Telemetry must only send one daily ping.");
+ receivedPingRequest = req;
+ });
+
+ let schedulerTickCallback = null;
+ let now = new Date(2030, 1, 1, 0, 0, 0);
+ fakeNow(now);
+ // Fake scheduler functions to control daily collection flow in tests.
+ fakeSchedulerTimer(callback => schedulerTickCallback = callback, () => {});
+ yield TelemetryStorage.testClearPendingPings();
+ yield TelemetryController.testReset();
+ PingServer.clearRequests();
+
+ // Move the current date/time to midnight.
+ let firstDailyDue = new Date(2030, 1, 2, 0, 0, 0);
+ fakeNow(firstDailyDue);
+
+ // The active notification should trigger a scheduler tick. The latter will send the
+ // due daily ping.
+ fakeIdleNotification("active");
+
+ // Immediately running another tick should not send a daily ping again.
+ Assert.ok(!!schedulerTickCallback);
+ yield schedulerTickCallback();
+
+ // A new "idle" notification should not send a new daily ping.
+ fakeIdleNotification("idle");
+
+ yield TelemetrySend.testWaitOnOutgoingPings();
+
+ // Decode the ping contained in the request and check that's a daily ping.
+ Assert.ok(receivedPingRequest, "Telemetry must send one daily ping.");
+ const receivedPing = decodeRequestPayload(receivedPingRequest);
+ checkPingFormat(receivedPing, PING_TYPE_MAIN, true, true);
+ Assert.equal(receivedPing.payload.info.reason, REASON_DAILY);
+
+ PingServer.resetPingHandler();
+ yield TelemetryController.testShutdown();
+});
+
+add_task(function* test_changeThrottling() {
+ if (gIsAndroid) {
+ // We don't support subsessions yet on Android.
+ return;
+ }
+
+ let getSubsessionCount = () => {
+ return TelemetrySession.getPayload().info.subsessionCounter;
+ };
+
+ const PREF_TEST = "toolkit.telemetry.test.pref1";
+ const PREFS_TO_WATCH = new Map([
+ [PREF_TEST, {what: TelemetryEnvironment.RECORD_PREF_STATE}],
+ ]);
+ Preferences.reset(PREF_TEST);
+
+ let now = fakeNow(2050, 1, 2, 0, 0, 0);
+ gMonotonicNow = fakeMonotonicNow(gMonotonicNow + 10 * MILLISECONDS_PER_MINUTE);
+ yield TelemetryController.testReset();
+ Assert.equal(getSubsessionCount(), 1);
+
+ // Set the Environment preferences to watch.
+ TelemetryEnvironment.testWatchPreferences(PREFS_TO_WATCH);
+
+ // The first pref change should not trigger a notification.
+ Preferences.set(PREF_TEST, 1);
+ Assert.equal(getSubsessionCount(), 1);
+
+ // We should get a change notification after the 5min throttling interval.
+ fakeNow(futureDate(now, 5 * MILLISECONDS_PER_MINUTE + 1));
+ gMonotonicNow = fakeMonotonicNow(gMonotonicNow + 5 * MILLISECONDS_PER_MINUTE + 1);
+ Preferences.set(PREF_TEST, 2);
+ Assert.equal(getSubsessionCount(), 2);
+
+ // After that, changes should be throttled again.
+ now = fakeNow(futureDate(now, 1 * MILLISECONDS_PER_MINUTE));
+ gMonotonicNow = fakeMonotonicNow(gMonotonicNow + 1 * MILLISECONDS_PER_MINUTE);
+ Preferences.set(PREF_TEST, 3);
+ Assert.equal(getSubsessionCount(), 2);
+
+ // ... for 5min.
+ now = fakeNow(futureDate(now, 4 * MILLISECONDS_PER_MINUTE + 1));
+ gMonotonicNow = fakeMonotonicNow(gMonotonicNow + 4 * MILLISECONDS_PER_MINUTE + 1);
+ Preferences.set(PREF_TEST, 4);
+ Assert.equal(getSubsessionCount(), 3);
+
+ // Unregister the listener.
+ TelemetryEnvironment.unregisterChangeListener("testWatchPrefs_throttling");
+});
+
+add_task(function* stopServer() {
+ yield PingServer.stop();
+});
diff --git a/toolkit/components/telemetry/tests/unit/test_TelemetryStopwatch.js b/toolkit/components/telemetry/tests/unit/test_TelemetryStopwatch.js
new file mode 100644
index 000000000..d162d9b17
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_TelemetryStopwatch.js
@@ -0,0 +1,156 @@
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/ */
+
+var tmpScope = {};
+Cu.import("resource://gre/modules/TelemetryStopwatch.jsm", tmpScope);
+var TelemetryStopwatch = tmpScope.TelemetryStopwatch;
+
+const HIST_NAME = "TELEMETRY_SEND_SUCCESS";
+const HIST_NAME2 = "RANGE_CHECKSUM_ERRORS";
+const KEYED_HIST = { id: "TELEMETRY_INVALID_PING_TYPE_SUBMITTED", key: "TEST" };
+
+var refObj = {}, refObj2 = {};
+
+var originalCount1, originalCount2;
+
+function run_test() {
+ let histogram = Telemetry.getHistogramById(HIST_NAME);
+ let snapshot = histogram.snapshot();
+ originalCount1 = snapshot.counts.reduce((a, b) => a += b);
+
+ histogram = Telemetry.getHistogramById(HIST_NAME2);
+ snapshot = histogram.snapshot();
+ originalCount2 = snapshot.counts.reduce((a, b) => a += b);
+
+ histogram = Telemetry.getKeyedHistogramById(KEYED_HIST.id);
+ snapshot = histogram.snapshot(KEYED_HIST.key);
+ originalCount3 = snapshot.counts.reduce((a, b) => a += b);
+
+ do_check_false(TelemetryStopwatch.start(3));
+ do_check_false(TelemetryStopwatch.start({}));
+ do_check_false(TelemetryStopwatch.start("", 3));
+ do_check_false(TelemetryStopwatch.start("", ""));
+ do_check_false(TelemetryStopwatch.start({}, {}));
+
+ do_check_true(TelemetryStopwatch.start("mark1"));
+ do_check_true(TelemetryStopwatch.start("mark2"));
+
+ do_check_true(TelemetryStopwatch.start("mark1", refObj));
+ do_check_true(TelemetryStopwatch.start("mark2", refObj));
+
+ // Same timer can't be re-started before being stopped
+ do_check_false(TelemetryStopwatch.start("mark1"));
+ do_check_false(TelemetryStopwatch.start("mark1", refObj));
+
+ // Can't stop a timer that was accidentaly started twice
+ do_check_false(TelemetryStopwatch.finish("mark1"));
+ do_check_false(TelemetryStopwatch.finish("mark1", refObj));
+
+ do_check_true(TelemetryStopwatch.start("NON-EXISTENT_HISTOGRAM"));
+ do_check_false(TelemetryStopwatch.finish("NON-EXISTENT_HISTOGRAM"));
+
+ do_check_true(TelemetryStopwatch.start("NON-EXISTENT_HISTOGRAM", refObj));
+ do_check_false(TelemetryStopwatch.finish("NON-EXISTENT_HISTOGRAM", refObj));
+
+ do_check_true(TelemetryStopwatch.start(HIST_NAME));
+ do_check_true(TelemetryStopwatch.start(HIST_NAME2));
+ do_check_true(TelemetryStopwatch.start(HIST_NAME, refObj));
+ do_check_true(TelemetryStopwatch.start(HIST_NAME2, refObj));
+ do_check_true(TelemetryStopwatch.start(HIST_NAME, refObj2));
+ do_check_true(TelemetryStopwatch.start(HIST_NAME2, refObj2));
+
+ do_check_true(TelemetryStopwatch.finish(HIST_NAME));
+ do_check_true(TelemetryStopwatch.finish(HIST_NAME2));
+ do_check_true(TelemetryStopwatch.finish(HIST_NAME, refObj));
+ do_check_true(TelemetryStopwatch.finish(HIST_NAME2, refObj));
+ do_check_true(TelemetryStopwatch.finish(HIST_NAME, refObj2));
+ do_check_true(TelemetryStopwatch.finish(HIST_NAME2, refObj2));
+
+ // Verify that TS.finish deleted the timers
+ do_check_false(TelemetryStopwatch.finish(HIST_NAME));
+ do_check_false(TelemetryStopwatch.finish(HIST_NAME, refObj));
+
+ // Verify that they can be used again
+ do_check_true(TelemetryStopwatch.start(HIST_NAME));
+ do_check_true(TelemetryStopwatch.start(HIST_NAME, refObj));
+ do_check_true(TelemetryStopwatch.finish(HIST_NAME));
+ do_check_true(TelemetryStopwatch.finish(HIST_NAME, refObj));
+
+ do_check_false(TelemetryStopwatch.finish("unknown-mark")); // Unknown marker
+ do_check_false(TelemetryStopwatch.finish("unknown-mark", {})); // Unknown object
+ do_check_false(TelemetryStopwatch.finish(HIST_NAME, {})); // Known mark on unknown object
+
+ // Test cancel
+ do_check_true(TelemetryStopwatch.start(HIST_NAME));
+ do_check_true(TelemetryStopwatch.start(HIST_NAME, refObj));
+ do_check_true(TelemetryStopwatch.cancel(HIST_NAME));
+ do_check_true(TelemetryStopwatch.cancel(HIST_NAME, refObj));
+
+ // Verify that can not cancel twice
+ do_check_false(TelemetryStopwatch.cancel(HIST_NAME));
+ do_check_false(TelemetryStopwatch.cancel(HIST_NAME, refObj));
+
+ // Verify that cancel removes the timers
+ do_check_false(TelemetryStopwatch.finish(HIST_NAME));
+ do_check_false(TelemetryStopwatch.finish(HIST_NAME, refObj));
+
+ // Verify that keyed stopwatch reject invalid keys.
+ for (let key of [3, {}, ""]) {
+ do_check_false(TelemetryStopwatch.startKeyed(KEYED_HIST.id, key));
+ }
+
+ // Verify that keyed histograms can be started.
+ do_check_true(TelemetryStopwatch.startKeyed("HISTOGRAM", "KEY1"));
+ do_check_true(TelemetryStopwatch.startKeyed("HISTOGRAM", "KEY2"));
+ do_check_true(TelemetryStopwatch.startKeyed("HISTOGRAM", "KEY1", refObj));
+ do_check_true(TelemetryStopwatch.startKeyed("HISTOGRAM", "KEY2", refObj));
+
+ // Restarting keyed histograms should fail.
+ do_check_false(TelemetryStopwatch.startKeyed("HISTOGRAM", "KEY1"));
+ do_check_false(TelemetryStopwatch.startKeyed("HISTOGRAM", "KEY1", refObj));
+
+ // Finishing a stopwatch of a non existing histogram should return false.
+ do_check_false(TelemetryStopwatch.finishKeyed("HISTOGRAM", "KEY2"));
+ do_check_false(TelemetryStopwatch.finishKeyed("HISTOGRAM", "KEY2", refObj));
+
+ // Starting & finishing a keyed stopwatch for an existing histogram should work.
+ do_check_true(TelemetryStopwatch.startKeyed(KEYED_HIST.id, KEYED_HIST.key));
+ do_check_true(TelemetryStopwatch.finishKeyed(KEYED_HIST.id, KEYED_HIST.key));
+ // Verify that TS.finish deleted the timers
+ do_check_false(TelemetryStopwatch.finishKeyed(KEYED_HIST.id, KEYED_HIST.key));
+
+ // Verify that they can be used again
+ do_check_true(TelemetryStopwatch.startKeyed(KEYED_HIST.id, KEYED_HIST.key));
+ do_check_true(TelemetryStopwatch.finishKeyed(KEYED_HIST.id, KEYED_HIST.key));
+
+ do_check_false(TelemetryStopwatch.finishKeyed("unknown-mark", "unknown-key"));
+ do_check_false(TelemetryStopwatch.finishKeyed(KEYED_HIST.id, "unknown-key"));
+
+ // Verify that keyed histograms can only be canceled through "keyed" API.
+ do_check_true(TelemetryStopwatch.startKeyed(KEYED_HIST.id, KEYED_HIST.key));
+ do_check_false(TelemetryStopwatch.cancel(KEYED_HIST.id, KEYED_HIST.key));
+ do_check_true(TelemetryStopwatch.cancelKeyed(KEYED_HIST.id, KEYED_HIST.key));
+ do_check_false(TelemetryStopwatch.cancelKeyed(KEYED_HIST.id, KEYED_HIST.key));
+
+ finishTest();
+}
+
+function finishTest() {
+ let histogram = Telemetry.getHistogramById(HIST_NAME);
+ let snapshot = histogram.snapshot();
+ let newCount = snapshot.counts.reduce((a, b) => a += b);
+
+ do_check_eq(newCount - originalCount1, 5, "The correct number of histograms were added for histogram 1.");
+
+ histogram = Telemetry.getHistogramById(HIST_NAME2);
+ snapshot = histogram.snapshot();
+ newCount = snapshot.counts.reduce((a, b) => a += b);
+
+ do_check_eq(newCount - originalCount2, 3, "The correct number of histograms were added for histogram 2.");
+
+ histogram = Telemetry.getKeyedHistogramById(KEYED_HIST.id);
+ snapshot = histogram.snapshot(KEYED_HIST.key);
+ newCount = snapshot.counts.reduce((a, b) => a += b);
+
+ do_check_eq(newCount - originalCount3, 2, "The correct number of histograms were added for histogram 3.");
+}
diff --git a/toolkit/components/telemetry/tests/unit/test_TelemetryTimestamps.js b/toolkit/components/telemetry/tests/unit/test_TelemetryTimestamps.js
new file mode 100644
index 000000000..75bf3157a
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_TelemetryTimestamps.js
@@ -0,0 +1,77 @@
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/ */
+
+var Cu = Components.utils;
+var Cc = Components.classes;
+var Ci = Components.interfaces;
+Cu.import("resource://gre/modules/Services.jsm");
+Cu.import("resource://gre/modules/TelemetryController.jsm", this);
+Cu.import("resource://gre/modules/TelemetrySession.jsm", this);
+Cu.import('resource://gre/modules/XPCOMUtils.jsm');
+
+// The @mozilla/xre/app-info;1 XPCOM object provided by the xpcshell test harness doesn't
+// implement the nsIXULAppInfo interface, which is needed by Services.jsm and
+// TelemetrySession.jsm. updateAppInfo() creates and registers a minimal mock app-info.
+Cu.import("resource://testing-common/AppInfo.jsm");
+updateAppInfo();
+
+var gGlobalScope = this;
+
+function getSimpleMeasurementsFromTelemetryController() {
+ return TelemetrySession.getPayload().simpleMeasurements;
+}
+
+add_task(function* test_setup() {
+ // Telemetry needs the AddonManager.
+ loadAddonManager();
+ // Make profile available for |TelemetryController.testShutdown()|.
+ do_get_profile();
+
+ // Make sure we don't generate unexpected pings due to pref changes.
+ yield setEmptyPrefWatchlist();
+
+ yield new Promise(resolve =>
+ Services.telemetry.asyncFetchTelemetryData(resolve));
+});
+
+add_task(function* actualTest() {
+ yield TelemetryController.testSetup();
+
+ // Test the module logic
+ let tmp = {};
+ Cu.import("resource://gre/modules/TelemetryTimestamps.jsm", tmp);
+ let TelemetryTimestamps = tmp.TelemetryTimestamps;
+ let now = Date.now();
+ TelemetryTimestamps.add("foo");
+ do_check_true(TelemetryTimestamps.get().foo != null); // foo was added
+ do_check_true(TelemetryTimestamps.get().foo >= now); // foo has a reasonable value
+
+ // Add timestamp with value
+ // Use a value far in the future since TelemetryController substracts the time of
+ // process initialization.
+ const YEAR_4000_IN_MS = 64060588800000;
+ TelemetryTimestamps.add("bar", YEAR_4000_IN_MS);
+ do_check_eq(TelemetryTimestamps.get().bar, YEAR_4000_IN_MS); // bar has the right value
+
+ // Can't add the same timestamp twice
+ TelemetryTimestamps.add("bar", 2);
+ do_check_eq(TelemetryTimestamps.get().bar, YEAR_4000_IN_MS); // bar wasn't overwritten
+
+ let threw = false;
+ try {
+ TelemetryTimestamps.add("baz", "this isn't a number");
+ } catch (ex) {
+ threw = true;
+ }
+ do_check_true(threw); // adding non-number threw
+ do_check_null(TelemetryTimestamps.get().baz); // no baz was added
+
+ // Test that the data gets added to the telemetry ping properly
+ let simpleMeasurements = getSimpleMeasurementsFromTelemetryController();
+ do_check_true(simpleMeasurements != null); // got simple measurements from ping data
+ do_check_true(simpleMeasurements.foo > 1); // foo was included
+ do_check_true(simpleMeasurements.bar > 1); // bar was included
+ do_check_eq(undefined, simpleMeasurements.baz); // baz wasn't included since it wasn't added
+
+ yield TelemetryController.testShutdown();
+});
diff --git a/toolkit/components/telemetry/tests/unit/test_ThreadHangStats.js b/toolkit/components/telemetry/tests/unit/test_ThreadHangStats.js
new file mode 100644
index 000000000..e8c9f868a
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_ThreadHangStats.js
@@ -0,0 +1,102 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+Cu.import("resource://gre/modules/Services.jsm");
+
+function getMainThreadHangStats() {
+ let threads = Services.telemetry.threadHangStats;
+ return threads.find((thread) => (thread.name === "Gecko"));
+}
+
+function run_test() {
+ let startHangs = getMainThreadHangStats();
+
+ // We disable hang reporting in several situations (e.g. debug builds,
+ // official releases). In those cases, we don't have hang stats available
+ // and should exit the test early.
+ if (!startHangs) {
+ ok("Hang reporting not enabled.");
+ return;
+ }
+
+ if (Services.appinfo.OS === 'Linux' || Services.appinfo.OS === 'Android') {
+ // We use the rt_tgsigqueueinfo syscall on Linux which requires a
+ // certain kernel version. It's not an error if the system running
+ // the test is older than that.
+ let kernel = Services.sysinfo.get('kernel_version') ||
+ Services.sysinfo.get('version');
+ if (Services.vc.compare(kernel, '2.6.31') < 0) {
+ ok("Hang reporting not supported for old kernel.");
+ return;
+ }
+ }
+
+ // Run three events in the event loop:
+ // the first event causes a transient hang;
+ // the second event causes a permanent hang;
+ // the third event checks results from previous events.
+
+ do_execute_soon(() => {
+ // Cause a hang lasting 1 second (transient hang).
+ let startTime = Date.now();
+ while ((Date.now() - startTime) < 1000);
+ });
+
+ do_execute_soon(() => {
+ // Cause a hang lasting 10 seconds (permanent hang).
+ let startTime = Date.now();
+ while ((Date.now() - startTime) < 10000);
+ });
+
+ do_execute_soon(() => {
+ do_test_pending();
+
+ let check_results = () => {
+ let endHangs = getMainThreadHangStats();
+
+ // Because hangs are recorded asynchronously, if we don't see new hangs,
+ // we should wait for pending hangs to be recorded. On the other hand,
+ // if hang monitoring is broken, this test will time out.
+ if (endHangs.hangs.length === startHangs.hangs.length) {
+ do_timeout(100, check_results);
+ return;
+ }
+
+ let check_histogram = (histogram) => {
+ equal(typeof histogram, "object");
+ equal(histogram.histogram_type, 0);
+ equal(typeof histogram.min, "number");
+ equal(typeof histogram.max, "number");
+ equal(typeof histogram.sum, "number");
+ ok(Array.isArray(histogram.ranges));
+ ok(Array.isArray(histogram.counts));
+ equal(histogram.counts.length, histogram.ranges.length);
+ };
+
+ // Make sure the hang stats structure is what we expect.
+ equal(typeof endHangs, "object");
+ check_histogram(endHangs.activity);
+
+ ok(Array.isArray(endHangs.hangs));
+ notEqual(endHangs.hangs.length, 0);
+
+ ok(Array.isArray(endHangs.hangs[0].stack));
+ notEqual(endHangs.hangs[0].stack.length, 0);
+ equal(typeof endHangs.hangs[0].stack[0], "string");
+
+ // Make sure one of the hangs is a permanent
+ // hang containing a native stack.
+ ok(endHangs.hangs.some((hang) => (
+ Array.isArray(hang.nativeStack) &&
+ hang.nativeStack.length !== 0 &&
+ typeof hang.nativeStack[0] === "string"
+ )));
+
+ check_histogram(endHangs.hangs[0].histogram);
+
+ do_test_finished();
+ };
+
+ check_results();
+ });
+}
diff --git a/toolkit/components/telemetry/tests/unit/test_nsITelemetry.js b/toolkit/components/telemetry/tests/unit/test_nsITelemetry.js
new file mode 100644
index 000000000..8dc552604
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_nsITelemetry.js
@@ -0,0 +1,883 @@
+/* Any copyright is dedicated to the Public Domain.
+ http://creativecommons.org/publicdomain/zero/1.0/ */
+
+const INT_MAX = 0x7FFFFFFF;
+
+Cu.import("resource://gre/modules/Services.jsm", this);
+Cu.import("resource://gre/modules/TelemetryUtils.jsm", this);
+
+// Return an array of numbers from lower up to, excluding, upper
+function numberRange(lower, upper)
+{
+ let a = [];
+ for (let i=lower; i<upper; ++i) {
+ a.push(i);
+ }
+ return a;
+}
+
+function expect_fail(f) {
+ let failed = false;
+ try {
+ f();
+ failed = false;
+ } catch (e) {
+ failed = true;
+ }
+ do_check_true(failed);
+}
+
+function expect_success(f) {
+ let succeeded = false;
+ try {
+ f();
+ succeeded = true;
+ } catch (e) {
+ succeeded = false;
+ }
+ do_check_true(succeeded);
+}
+
+function compareHistograms(h1, h2) {
+ let s1 = h1.snapshot();
+ let s2 = h2.snapshot();
+
+ do_check_eq(s1.histogram_type, s2.histogram_type);
+ do_check_eq(s1.min, s2.min);
+ do_check_eq(s1.max, s2.max);
+ do_check_eq(s1.sum, s2.sum);
+
+ do_check_eq(s1.counts.length, s2.counts.length);
+ for (let i = 0; i < s1.counts.length; i++)
+ do_check_eq(s1.counts[i], s2.counts[i]);
+
+ do_check_eq(s1.ranges.length, s2.ranges.length);
+ for (let i = 0; i < s1.ranges.length; i++)
+ do_check_eq(s1.ranges[i], s2.ranges[i]);
+}
+
+function check_histogram(histogram_type, name, min, max, bucket_count) {
+ var h = Telemetry.getHistogramById(name);
+ var r = h.snapshot().ranges;
+ var sum = 0;
+ for (let i=0;i<r.length;i++) {
+ var v = r[i];
+ sum += v;
+ h.add(v);
+ }
+ var s = h.snapshot();
+ // verify properties
+ do_check_eq(sum, s.sum);
+
+ // there should be exactly one element per bucket
+ for (let i of s.counts) {
+ do_check_eq(i, 1);
+ }
+ var hgrams = Telemetry.histogramSnapshots
+ let gh = hgrams[name]
+ do_check_eq(gh.histogram_type, histogram_type);
+
+ do_check_eq(gh.min, min)
+ do_check_eq(gh.max, max)
+
+ // Check that booleans work with nonboolean histograms
+ h.add(false);
+ h.add(true);
+ s = h.snapshot().counts;
+ do_check_eq(s[0], 2)
+ do_check_eq(s[1], 2)
+
+ // Check that clearing works.
+ h.clear();
+ s = h.snapshot();
+ for (var i of s.counts) {
+ do_check_eq(i, 0);
+ }
+ do_check_eq(s.sum, 0);
+
+ h.add(0);
+ h.add(1);
+ var c = h.snapshot().counts;
+ do_check_eq(c[0], 1);
+ do_check_eq(c[1], 1);
+}
+
+// This MUST be the very first test of this file.
+add_task({
+ skip_if: () => gIsAndroid
+},
+function* test_instantiate() {
+ const ID = "TELEMETRY_TEST_COUNT";
+ let h = Telemetry.getHistogramById(ID);
+
+ // Instantiate the subsession histogram through |add| and make sure they match.
+ // This MUST be the first use of "TELEMETRY_TEST_COUNT" in this file, otherwise
+ // |add| will not instantiate the histogram.
+ h.add(1);
+ let snapshot = h.snapshot();
+ let subsession = Telemetry.snapshotSubsessionHistograms();
+ Assert.equal(snapshot.sum, subsession[ID].sum,
+ "Histogram and subsession histogram sum must match.");
+ // Clear the histogram, so we don't void the assumptions from the other tests.
+ h.clear();
+});
+
+add_task(function* test_parameterChecks() {
+ let kinds = [Telemetry.HISTOGRAM_EXPONENTIAL, Telemetry.HISTOGRAM_LINEAR]
+ let testNames = ["TELEMETRY_TEST_EXPONENTIAL", "TELEMETRY_TEST_LINEAR"]
+ for (let i = 0; i < kinds.length; i++) {
+ let histogram_type = kinds[i];
+ let test_type = testNames[i];
+ let [min, max, bucket_count] = [1, INT_MAX - 1, 10]
+ check_histogram(histogram_type, test_type, min, max, bucket_count);
+ }
+});
+
+add_task(function* test_noSerialization() {
+ // Instantiate the storage for this histogram and make sure it doesn't
+ // get reflected into JS, as it has no interesting data in it.
+ Telemetry.getHistogramById("NEWTAB_PAGE_PINNED_SITES_COUNT");
+ do_check_false("NEWTAB_PAGE_PINNED_SITES_COUNT" in Telemetry.histogramSnapshots);
+});
+
+add_task(function* test_boolean_histogram() {
+ var h = Telemetry.getHistogramById("TELEMETRY_TEST_BOOLEAN");
+ var r = h.snapshot().ranges;
+ // boolean histograms ignore numeric parameters
+ do_check_eq(uneval(r), uneval([0, 1, 2]))
+ for (var i=0;i<r.length;i++) {
+ var v = r[i];
+ h.add(v);
+ }
+ h.add(true);
+ h.add(false);
+ var s = h.snapshot();
+ do_check_eq(s.histogram_type, Telemetry.HISTOGRAM_BOOLEAN);
+ // last bucket should always be 0 since .add parameters are normalized to either 0 or 1
+ do_check_eq(s.counts[2], 0);
+ do_check_eq(s.sum, 3);
+ do_check_eq(s.counts[0], 2);
+});
+
+add_task(function* test_flag_histogram() {
+ var h = Telemetry.getHistogramById("TELEMETRY_TEST_FLAG");
+ var r = h.snapshot().ranges;
+ // Flag histograms ignore numeric parameters.
+ do_check_eq(uneval(r), uneval([0, 1, 2]));
+ // Should already have a 0 counted.
+ var c = h.snapshot().counts;
+ var s = h.snapshot().sum;
+ do_check_eq(uneval(c), uneval([1, 0, 0]));
+ do_check_eq(s, 0);
+ // Should switch counts.
+ h.add(1);
+ var c2 = h.snapshot().counts;
+ var s2 = h.snapshot().sum;
+ do_check_eq(uneval(c2), uneval([0, 1, 0]));
+ do_check_eq(s2, 1);
+ // Should only switch counts once.
+ h.add(1);
+ var c3 = h.snapshot().counts;
+ var s3 = h.snapshot().sum;
+ do_check_eq(uneval(c3), uneval([0, 1, 0]));
+ do_check_eq(s3, 1);
+ do_check_eq(h.snapshot().histogram_type, Telemetry.HISTOGRAM_FLAG);
+});
+
+add_task(function* test_count_histogram() {
+ let h = Telemetry.getHistogramById("TELEMETRY_TEST_COUNT2");
+ let s = h.snapshot();
+ do_check_eq(uneval(s.ranges), uneval([0, 1, 2]));
+ do_check_eq(uneval(s.counts), uneval([0, 0, 0]));
+ do_check_eq(s.sum, 0);
+ h.add();
+ s = h.snapshot();
+ do_check_eq(uneval(s.counts), uneval([1, 0, 0]));
+ do_check_eq(s.sum, 1);
+ h.add();
+ s = h.snapshot();
+ do_check_eq(uneval(s.counts), uneval([2, 0, 0]));
+ do_check_eq(s.sum, 2);
+});
+
+add_task(function* test_categorical_histogram()
+{
+ let h1 = Telemetry.getHistogramById("TELEMETRY_TEST_CATEGORICAL");
+ for (let v of ["CommonLabel", "Label2", "Label3", "Label3", 0, 0, 1]) {
+ h1.add(v);
+ }
+ for (let s of ["", "Label4", "1234"]) {
+ Assert.throws(() => h1.add(s));
+ }
+
+ let snapshot = h1.snapshot();
+ Assert.equal(snapshot.sum, 6);
+ Assert.deepEqual(snapshot.ranges, [0, 1, 2, 3]);
+ Assert.deepEqual(snapshot.counts, [3, 2, 2, 0]);
+
+ let h2 = Telemetry.getHistogramById("TELEMETRY_TEST_CATEGORICAL_OPTOUT");
+ for (let v of ["CommonLabel", "CommonLabel", "Label4", "Label5", "Label6", 0, 1]) {
+ h2.add(v);
+ }
+ for (let s of ["", "Label3", "1234"]) {
+ Assert.throws(() => h2.add(s));
+ }
+
+ snapshot = h2.snapshot();
+ Assert.equal(snapshot.sum, 7);
+ Assert.deepEqual(snapshot.ranges, [0, 1, 2, 3, 4]);
+ Assert.deepEqual(snapshot.counts, [3, 2, 1, 1, 0]);
+});
+
+add_task(function* test_getHistogramById() {
+ try {
+ Telemetry.getHistogramById("nonexistent");
+ do_throw("This can't happen");
+ } catch (e) {
+
+ }
+ var h = Telemetry.getHistogramById("CYCLE_COLLECTOR");
+ var s = h.snapshot();
+ do_check_eq(s.histogram_type, Telemetry.HISTOGRAM_EXPONENTIAL);
+ do_check_eq(s.min, 1);
+ do_check_eq(s.max, 10000);
+});
+
+add_task(function* test_getSlowSQL() {
+ var slow = Telemetry.slowSQL;
+ do_check_true(("mainThread" in slow) && ("otherThreads" in slow));
+});
+
+add_task(function* test_getWebrtc() {
+ var webrtc = Telemetry.webrtcStats;
+ do_check_true("IceCandidatesStats" in webrtc);
+ var icestats = webrtc.IceCandidatesStats;
+ do_check_true("webrtc" in icestats);
+});
+
+// Check that telemetry doesn't record in private mode
+add_task(function* test_privateMode() {
+ var h = Telemetry.getHistogramById("TELEMETRY_TEST_BOOLEAN");
+ var orig = h.snapshot();
+ Telemetry.canRecordExtended = false;
+ h.add(1);
+ do_check_eq(uneval(orig), uneval(h.snapshot()));
+ Telemetry.canRecordExtended = true;
+ h.add(1);
+ do_check_neq(uneval(orig), uneval(h.snapshot()));
+});
+
+// Check that telemetry records only when it is suppose to.
+add_task(function* test_histogramRecording() {
+ // Check that no histogram is recorded if both base and extended recording are off.
+ Telemetry.canRecordBase = false;
+ Telemetry.canRecordExtended = false;
+
+ let h = Telemetry.getHistogramById("TELEMETRY_TEST_RELEASE_OPTOUT");
+ h.clear();
+ let orig = h.snapshot();
+ h.add(1);
+ Assert.equal(orig.sum, h.snapshot().sum);
+
+ // Check that only base histograms are recorded.
+ Telemetry.canRecordBase = true;
+ h.add(1);
+ Assert.equal(orig.sum + 1, h.snapshot().sum,
+ "Histogram value should have incremented by 1 due to recording.");
+
+ // Extended histograms should not be recorded.
+ h = Telemetry.getHistogramById("TELEMETRY_TEST_RELEASE_OPTIN");
+ orig = h.snapshot();
+ h.add(1);
+ Assert.equal(orig.sum, h.snapshot().sum,
+ "Histograms should be equal after recording.");
+
+ // Runtime created histograms should not be recorded.
+ h = Telemetry.getHistogramById("TELEMETRY_TEST_BOOLEAN");
+ orig = h.snapshot();
+ h.add(1);
+ Assert.equal(orig.sum, h.snapshot().sum,
+ "Histograms should be equal after recording.");
+
+ // Check that extended histograms are recorded when required.
+ Telemetry.canRecordExtended = true;
+
+ h.add(1);
+ Assert.equal(orig.sum + 1, h.snapshot().sum,
+ "Runtime histogram value should have incremented by 1 due to recording.");
+
+ h = Telemetry.getHistogramById("TELEMETRY_TEST_RELEASE_OPTIN");
+ orig = h.snapshot();
+ h.add(1);
+ Assert.equal(orig.sum + 1, h.snapshot().sum,
+ "Histogram value should have incremented by 1 due to recording.");
+
+ // Check that base histograms are still being recorded.
+ h = Telemetry.getHistogramById("TELEMETRY_TEST_RELEASE_OPTOUT");
+ h.clear();
+ orig = h.snapshot();
+ h.add(1);
+ Assert.equal(orig.sum + 1, h.snapshot().sum,
+ "Histogram value should have incremented by 1 due to recording.");
+});
+
+add_task(function* test_addons() {
+ var addon_id = "testing-addon";
+ var fake_addon_id = "fake-addon";
+ var name1 = "testing-histogram1";
+ var register = Telemetry.registerAddonHistogram;
+ expect_success(() =>
+ register(addon_id, name1, Telemetry.HISTOGRAM_LINEAR, 1, 5, 6));
+ // Can't register the same histogram multiple times.
+ expect_fail(() =>
+ register(addon_id, name1, Telemetry.HISTOGRAM_LINEAR, 1, 5, 6));
+ // Make sure we can't get at it with another name.
+ expect_fail(() => Telemetry.getAddonHistogram(fake_addon_id, name1));
+
+ // Check for reflection capabilities.
+ var h1 = Telemetry.getAddonHistogram(addon_id, name1);
+ // Verify that although we've created storage for it, we don't reflect it into JS.
+ var snapshots = Telemetry.addonHistogramSnapshots;
+ do_check_false(name1 in snapshots[addon_id]);
+ h1.add(1);
+ h1.add(3);
+ var s1 = h1.snapshot();
+ do_check_eq(s1.histogram_type, Telemetry.HISTOGRAM_LINEAR);
+ do_check_eq(s1.min, 1);
+ do_check_eq(s1.max, 5);
+ do_check_eq(s1.counts[1], 1);
+ do_check_eq(s1.counts[3], 1);
+
+ var name2 = "testing-histogram2";
+ expect_success(() =>
+ register(addon_id, name2, Telemetry.HISTOGRAM_LINEAR, 2, 4, 4));
+
+ var h2 = Telemetry.getAddonHistogram(addon_id, name2);
+ h2.add(2);
+ h2.add(3);
+ var s2 = h2.snapshot();
+ do_check_eq(s2.histogram_type, Telemetry.HISTOGRAM_LINEAR);
+ do_check_eq(s2.min, 2);
+ do_check_eq(s2.max, 4);
+ do_check_eq(s2.counts[1], 1);
+ do_check_eq(s2.counts[2], 1);
+
+ // Check that we can register histograms for a different addon with
+ // identical names.
+ var extra_addon = "testing-extra-addon";
+ expect_success(() =>
+ register(extra_addon, name1, Telemetry.HISTOGRAM_BOOLEAN));
+
+ // Check that we can register flag histograms.
+ var flag_addon = "testing-flag-addon";
+ var flag_histogram = "flag-histogram";
+ expect_success(() =>
+ register(flag_addon, flag_histogram, Telemetry.HISTOGRAM_FLAG));
+ expect_success(() =>
+ register(flag_addon, name2, Telemetry.HISTOGRAM_LINEAR, 2, 4, 4));
+
+ // Check that we reflect registered addons and histograms.
+ snapshots = Telemetry.addonHistogramSnapshots;
+ do_check_true(addon_id in snapshots)
+ do_check_true(extra_addon in snapshots);
+ do_check_true(flag_addon in snapshots);
+
+ // Check that we have data for our created histograms.
+ do_check_true(name1 in snapshots[addon_id]);
+ do_check_true(name2 in snapshots[addon_id]);
+ var s1_alt = snapshots[addon_id][name1];
+ var s2_alt = snapshots[addon_id][name2];
+ do_check_eq(s1_alt.min, s1.min);
+ do_check_eq(s1_alt.max, s1.max);
+ do_check_eq(s1_alt.histogram_type, s1.histogram_type);
+ do_check_eq(s2_alt.min, s2.min);
+ do_check_eq(s2_alt.max, s2.max);
+ do_check_eq(s2_alt.histogram_type, s2.histogram_type);
+
+ // Even though we've registered it, it shouldn't show up until data is added to it.
+ do_check_false(name1 in snapshots[extra_addon]);
+
+ // Flag histograms should show up automagically.
+ do_check_true(flag_histogram in snapshots[flag_addon]);
+ do_check_false(name2 in snapshots[flag_addon]);
+
+ // Check that we can remove addon histograms.
+ Telemetry.unregisterAddonHistograms(addon_id);
+ snapshots = Telemetry.addonHistogramSnapshots;
+ do_check_false(addon_id in snapshots);
+ // Make sure other addons are unaffected.
+ do_check_true(extra_addon in snapshots);
+});
+
+add_task(function* test_expired_histogram() {
+ var test_expired_id = "TELEMETRY_TEST_EXPIRED";
+ var dummy = Telemetry.getHistogramById(test_expired_id);
+ var rh = Telemetry.registeredHistograms(Ci.nsITelemetry.DATASET_RELEASE_CHANNEL_OPTIN, []);
+ Assert.ok(!!rh);
+
+ dummy.add(1);
+
+ do_check_eq(Telemetry.histogramSnapshots["__expired__"], undefined);
+ do_check_eq(Telemetry.histogramSnapshots[test_expired_id], undefined);
+ do_check_eq(rh[test_expired_id], undefined);
+});
+
+add_task(function* test_keyed_histogram() {
+ // Check that invalid names get rejected.
+
+ let threw = false;
+ try {
+ Telemetry.getKeyedHistogramById("test::unknown histogram", "never", Telemetry.HISTOGRAM_BOOLEAN);
+ } catch (e) {
+ // This should throw as it is an unknown ID
+ threw = true;
+ }
+ Assert.ok(threw, "getKeyedHistogramById should have thrown");
+});
+
+add_task(function* test_keyed_boolean_histogram() {
+ const KEYED_ID = "TELEMETRY_TEST_KEYED_BOOLEAN";
+ let KEYS = numberRange(0, 2).map(i => "key" + (i + 1));
+ KEYS.push("漢語");
+ let histogramBase = {
+ "min": 1,
+ "max": 2,
+ "histogram_type": 2,
+ "sum": 1,
+ "ranges": [0, 1, 2],
+ "counts": [0, 1, 0]
+ };
+ let testHistograms = numberRange(0, 3).map(i => JSON.parse(JSON.stringify(histogramBase)));
+ let testKeys = [];
+ let testSnapShot = {};
+
+ let h = Telemetry.getKeyedHistogramById(KEYED_ID);
+ for (let i=0; i<2; ++i) {
+ let key = KEYS[i];
+ h.add(key, true);
+ testSnapShot[key] = testHistograms[i];
+ testKeys.push(key);
+
+ Assert.deepEqual(h.keys().sort(), testKeys);
+ Assert.deepEqual(h.snapshot(), testSnapShot);
+ }
+
+ h = Telemetry.getKeyedHistogramById(KEYED_ID);
+ Assert.deepEqual(h.keys().sort(), testKeys);
+ Assert.deepEqual(h.snapshot(), testSnapShot);
+
+ let key = KEYS[2];
+ h.add(key, false);
+ testKeys.push(key);
+ testSnapShot[key] = testHistograms[2];
+ testSnapShot[key].sum = 0;
+ testSnapShot[key].counts = [1, 0, 0];
+ Assert.deepEqual(h.keys().sort(), testKeys);
+ Assert.deepEqual(h.snapshot(), testSnapShot);
+
+ let allSnapshots = Telemetry.keyedHistogramSnapshots;
+ Assert.deepEqual(allSnapshots[KEYED_ID], testSnapShot);
+
+ h.clear();
+ Assert.deepEqual(h.keys(), []);
+ Assert.deepEqual(h.snapshot(), {});
+});
+
+add_task(function* test_keyed_count_histogram() {
+ const KEYED_ID = "TELEMETRY_TEST_KEYED_COUNT";
+ const KEYS = numberRange(0, 5).map(i => "key" + (i + 1));
+ let histogramBase = {
+ "min": 1,
+ "max": 2,
+ "histogram_type": 4,
+ "sum": 0,
+ "ranges": [0, 1, 2],
+ "counts": [1, 0, 0]
+ };
+ let testHistograms = numberRange(0, 5).map(i => JSON.parse(JSON.stringify(histogramBase)));
+ let testKeys = [];
+ let testSnapShot = {};
+
+ let h = Telemetry.getKeyedHistogramById(KEYED_ID);
+ for (let i=0; i<4; ++i) {
+ let key = KEYS[i];
+ let value = i*2 + 1;
+
+ for (let k=0; k<value; ++k) {
+ h.add(key);
+ }
+ testHistograms[i].counts[0] = value;
+ testHistograms[i].sum = value;
+ testSnapShot[key] = testHistograms[i];
+ testKeys.push(key);
+
+ Assert.deepEqual(h.keys().sort(), testKeys);
+ Assert.deepEqual(h.snapshot(key), testHistograms[i]);
+ Assert.deepEqual(h.snapshot(), testSnapShot);
+ }
+
+ h = Telemetry.getKeyedHistogramById(KEYED_ID);
+ Assert.deepEqual(h.keys().sort(), testKeys);
+ Assert.deepEqual(h.snapshot(), testSnapShot);
+
+ let key = KEYS[4];
+ h.add(key);
+ testKeys.push(key);
+ testHistograms[4].counts[0] = 1;
+ testHistograms[4].sum = 1;
+ testSnapShot[key] = testHistograms[4];
+
+ Assert.deepEqual(h.keys().sort(), testKeys);
+ Assert.deepEqual(h.snapshot(), testSnapShot);
+
+ let allSnapshots = Telemetry.keyedHistogramSnapshots;
+ Assert.deepEqual(allSnapshots[KEYED_ID], testSnapShot);
+
+ h.clear();
+ Assert.deepEqual(h.keys(), []);
+ Assert.deepEqual(h.snapshot(), {});
+});
+
+add_task(function* test_keyed_flag_histogram() {
+ const KEYED_ID = "TELEMETRY_TEST_KEYED_FLAG";
+ let h = Telemetry.getKeyedHistogramById(KEYED_ID);
+
+ const KEY = "default";
+ h.add(KEY, true);
+
+ let testSnapshot = {};
+ testSnapshot[KEY] = {
+ "min": 1,
+ "max": 2,
+ "histogram_type": 3,
+ "sum": 1,
+ "ranges": [0, 1, 2],
+ "counts": [0, 1, 0]
+ };
+
+ Assert.deepEqual(h.keys().sort(), [KEY]);
+ Assert.deepEqual(h.snapshot(), testSnapshot);
+
+ let allSnapshots = Telemetry.keyedHistogramSnapshots;
+ Assert.deepEqual(allSnapshots[KEYED_ID], testSnapshot);
+
+ h.clear();
+ Assert.deepEqual(h.keys(), []);
+ Assert.deepEqual(h.snapshot(), {});
+});
+
+add_task(function* test_keyed_histogram_recording() {
+ // Check that no histogram is recorded if both base and extended recording are off.
+ Telemetry.canRecordBase = false;
+ Telemetry.canRecordExtended = false;
+
+ const TEST_KEY = "record_foo";
+ let h = Telemetry.getKeyedHistogramById("TELEMETRY_TEST_KEYED_RELEASE_OPTOUT");
+ h.clear();
+ h.add(TEST_KEY, 1);
+ Assert.equal(h.snapshot(TEST_KEY).sum, 0);
+
+ // Check that only base histograms are recorded.
+ Telemetry.canRecordBase = true;
+ h.add(TEST_KEY, 1);
+ Assert.equal(h.snapshot(TEST_KEY).sum, 1,
+ "The keyed histogram should record the correct value.");
+
+ // Extended set keyed histograms should not be recorded.
+ h = Telemetry.getKeyedHistogramById("TELEMETRY_TEST_KEYED_RELEASE_OPTIN");
+ h.clear();
+ h.add(TEST_KEY, 1);
+ Assert.equal(h.snapshot(TEST_KEY).sum, 0,
+ "The keyed histograms should not record any data.");
+
+ // Check that extended histograms are recorded when required.
+ Telemetry.canRecordExtended = true;
+
+ h.add(TEST_KEY, 1);
+ Assert.equal(h.snapshot(TEST_KEY).sum, 1,
+ "The runtime keyed histogram should record the correct value.");
+
+ h = Telemetry.getKeyedHistogramById("TELEMETRY_TEST_KEYED_RELEASE_OPTIN");
+ h.clear();
+ h.add(TEST_KEY, 1);
+ Assert.equal(h.snapshot(TEST_KEY).sum, 1,
+ "The keyed histogram should record the correct value.");
+
+ // Check that base histograms are still being recorded.
+ h = Telemetry.getKeyedHistogramById("TELEMETRY_TEST_KEYED_RELEASE_OPTOUT");
+ h.clear();
+ h.add(TEST_KEY, 1);
+ Assert.equal(h.snapshot(TEST_KEY).sum, 1);
+});
+
+add_task(function* test_histogram_recording_enabled() {
+ Telemetry.canRecordBase = true;
+ Telemetry.canRecordExtended = true;
+
+ // Check that a "normal" histogram respects recording-enabled on/off
+ var h = Telemetry.getHistogramById("TELEMETRY_TEST_COUNT");
+ var orig = h.snapshot();
+
+ h.add(1);
+ Assert.equal(orig.sum + 1, h.snapshot().sum,
+ "add should record by default.");
+
+ // Check that when recording is disabled - add is ignored
+ Telemetry.setHistogramRecordingEnabled("TELEMETRY_TEST_COUNT", false);
+ h.add(1);
+ Assert.equal(orig.sum + 1, h.snapshot().sum,
+ "When recording is disabled add should not record.");
+
+ // Check that we're back to normal after recording is enabled
+ Telemetry.setHistogramRecordingEnabled("TELEMETRY_TEST_COUNT", true);
+ h.add(1);
+ Assert.equal(orig.sum + 2, h.snapshot().sum,
+ "When recording is re-enabled add should record.");
+
+ // Check that we're correctly accumulating values other than 1.
+ h.clear();
+ h.add(3);
+ Assert.equal(3, h.snapshot().sum, "Recording counts greater than 1 should work.");
+
+ // Check that a histogram with recording disabled by default behaves correctly
+ h = Telemetry.getHistogramById("TELEMETRY_TEST_COUNT_INIT_NO_RECORD");
+ orig = h.snapshot();
+
+ h.add(1);
+ Assert.equal(orig.sum, h.snapshot().sum,
+ "When recording is disabled by default, add should not record by default.");
+
+ Telemetry.setHistogramRecordingEnabled("TELEMETRY_TEST_COUNT_INIT_NO_RECORD", true);
+ h.add(1);
+ Assert.equal(orig.sum + 1, h.snapshot().sum,
+ "When recording is enabled add should record.");
+
+ // Restore to disabled
+ Telemetry.setHistogramRecordingEnabled("TELEMETRY_TEST_COUNT_INIT_NO_RECORD", false);
+ h.add(1);
+ Assert.equal(orig.sum + 1, h.snapshot().sum,
+ "When recording is disabled add should not record.");
+});
+
+add_task(function* test_keyed_histogram_recording_enabled() {
+ Telemetry.canRecordBase = true;
+ Telemetry.canRecordExtended = true;
+
+ // Check RecordingEnabled for keyed histograms which are recording by default
+ const TEST_KEY = "record_foo";
+ let h = Telemetry.getKeyedHistogramById("TELEMETRY_TEST_KEYED_RELEASE_OPTOUT");
+
+ h.clear();
+ h.add(TEST_KEY, 1);
+ Assert.equal(h.snapshot(TEST_KEY).sum, 1,
+ "Keyed histogram add should record by default");
+
+ Telemetry.setHistogramRecordingEnabled("TELEMETRY_TEST_KEYED_RELEASE_OPTOUT", false);
+ h.add(TEST_KEY, 1);
+ Assert.equal(h.snapshot(TEST_KEY).sum, 1,
+ "Keyed histogram add should not record when recording is disabled");
+
+ Telemetry.setHistogramRecordingEnabled("TELEMETRY_TEST_KEYED_RELEASE_OPTOUT", true);
+ h.clear();
+ h.add(TEST_KEY, 1);
+ Assert.equal(h.snapshot(TEST_KEY).sum, 1,
+ "Keyed histogram add should record when recording is re-enabled");
+
+ // Check that a histogram with recording disabled by default behaves correctly
+ h = Telemetry.getKeyedHistogramById("TELEMETRY_TEST_KEYED_COUNT_INIT_NO_RECORD");
+ h.clear();
+
+ h.add(TEST_KEY, 1);
+ Assert.equal(h.snapshot(TEST_KEY).sum, 0,
+ "Keyed histogram add should not record by default for histograms which don't record by default");
+
+ Telemetry.setHistogramRecordingEnabled("TELEMETRY_TEST_KEYED_COUNT_INIT_NO_RECORD", true);
+ h.add(TEST_KEY, 1);
+ Assert.equal(h.snapshot(TEST_KEY).sum, 1,
+ "Keyed histogram add should record when recording is enabled");
+
+ // Restore to disabled
+ Telemetry.setHistogramRecordingEnabled("TELEMETRY_TEST_KEYED_COUNT_INIT_NO_RECORD", false);
+ h.add(TEST_KEY, 1);
+ Assert.equal(h.snapshot(TEST_KEY).sum, 1,
+ "Keyed histogram add should not record when recording is disabled");
+});
+
+add_task(function* test_datasets() {
+ // Check that datasets work as expected.
+
+ const RELEASE_CHANNEL_OPTOUT = Ci.nsITelemetry.DATASET_RELEASE_CHANNEL_OPTOUT;
+ const RELEASE_CHANNEL_OPTIN = Ci.nsITelemetry.DATASET_RELEASE_CHANNEL_OPTIN;
+
+ // Histograms should default to the extended dataset
+ let h = Telemetry.getHistogramById("TELEMETRY_TEST_FLAG");
+ Assert.equal(h.dataset(), RELEASE_CHANNEL_OPTIN);
+ h = Telemetry.getKeyedHistogramById("TELEMETRY_TEST_KEYED_FLAG");
+ Assert.equal(h.dataset(), RELEASE_CHANNEL_OPTIN);
+
+ // Check test histograms with explicit dataset definitions
+ h = Telemetry.getHistogramById("TELEMETRY_TEST_RELEASE_OPTIN");
+ Assert.equal(h.dataset(), RELEASE_CHANNEL_OPTIN);
+ h = Telemetry.getHistogramById("TELEMETRY_TEST_RELEASE_OPTOUT");
+ Assert.equal(h.dataset(), RELEASE_CHANNEL_OPTOUT);
+ h = Telemetry.getKeyedHistogramById("TELEMETRY_TEST_KEYED_RELEASE_OPTIN");
+ Assert.equal(h.dataset(), RELEASE_CHANNEL_OPTIN);
+ h = Telemetry.getKeyedHistogramById("TELEMETRY_TEST_KEYED_RELEASE_OPTOUT");
+ Assert.equal(h.dataset(), RELEASE_CHANNEL_OPTOUT);
+
+ // Check that registeredHistogram works properly
+ let registered = Telemetry.registeredHistograms(RELEASE_CHANNEL_OPTIN, []);
+ registered = new Set(registered);
+ Assert.ok(registered.has("TELEMETRY_TEST_FLAG"));
+ Assert.ok(registered.has("TELEMETRY_TEST_RELEASE_OPTIN"));
+ Assert.ok(registered.has("TELEMETRY_TEST_RELEASE_OPTOUT"));
+ registered = Telemetry.registeredHistograms(RELEASE_CHANNEL_OPTOUT, []);
+ registered = new Set(registered);
+ Assert.ok(!registered.has("TELEMETRY_TEST_FLAG"));
+ Assert.ok(!registered.has("TELEMETRY_TEST_RELEASE_OPTIN"));
+ Assert.ok(registered.has("TELEMETRY_TEST_RELEASE_OPTOUT"));
+
+ // Check that registeredKeyedHistograms works properly
+ registered = Telemetry.registeredKeyedHistograms(RELEASE_CHANNEL_OPTIN, []);
+ registered = new Set(registered);
+ Assert.ok(registered.has("TELEMETRY_TEST_KEYED_FLAG"));
+ Assert.ok(registered.has("TELEMETRY_TEST_KEYED_RELEASE_OPTOUT"));
+ registered = Telemetry.registeredKeyedHistograms(RELEASE_CHANNEL_OPTOUT, []);
+ registered = new Set(registered);
+ Assert.ok(!registered.has("TELEMETRY_TEST_KEYED_FLAG"));
+ Assert.ok(registered.has("TELEMETRY_TEST_KEYED_RELEASE_OPTOUT"));
+});
+
+add_task({
+ skip_if: () => gIsAndroid
+},
+function* test_subsession() {
+ const ID = "TELEMETRY_TEST_COUNT";
+ const FLAG = "TELEMETRY_TEST_FLAG";
+ let h = Telemetry.getHistogramById(ID);
+ let flag = Telemetry.getHistogramById(FLAG);
+
+ // Both original and duplicate should start out the same.
+ h.clear();
+ let snapshot = Telemetry.histogramSnapshots;
+ let subsession = Telemetry.snapshotSubsessionHistograms();
+ Assert.ok(!(ID in snapshot));
+ Assert.ok(!(ID in subsession));
+
+ // They should instantiate and pick-up the count.
+ h.add(1);
+ snapshot = Telemetry.histogramSnapshots;
+ subsession = Telemetry.snapshotSubsessionHistograms();
+ Assert.ok(ID in snapshot);
+ Assert.ok(ID in subsession);
+ Assert.equal(snapshot[ID].sum, 1);
+ Assert.equal(subsession[ID].sum, 1);
+
+ // They should still reset properly.
+ h.clear();
+ snapshot = Telemetry.histogramSnapshots;
+ subsession = Telemetry.snapshotSubsessionHistograms();
+ Assert.ok(!(ID in snapshot));
+ Assert.ok(!(ID in subsession));
+
+ // Both should instantiate and pick-up the count.
+ h.add(1);
+ snapshot = Telemetry.histogramSnapshots;
+ subsession = Telemetry.snapshotSubsessionHistograms();
+ Assert.equal(snapshot[ID].sum, 1);
+ Assert.equal(subsession[ID].sum, 1);
+
+ // Check that we are able to only reset the duplicate histogram.
+ h.clear(true);
+ snapshot = Telemetry.histogramSnapshots;
+ subsession = Telemetry.snapshotSubsessionHistograms();
+ Assert.ok(ID in snapshot);
+ Assert.ok(ID in subsession);
+ Assert.equal(snapshot[ID].sum, 1);
+ Assert.equal(subsession[ID].sum, 0);
+
+ // Both should register the next count.
+ h.add(1);
+ snapshot = Telemetry.histogramSnapshots;
+ subsession = Telemetry.snapshotSubsessionHistograms();
+ Assert.equal(snapshot[ID].sum, 2);
+ Assert.equal(subsession[ID].sum, 1);
+
+ // Retrieve a subsession snapshot and pass the flag to
+ // clear subsession histograms too.
+ h.clear();
+ flag.clear();
+ h.add(1);
+ flag.add(1);
+ snapshot = Telemetry.histogramSnapshots;
+ subsession = Telemetry.snapshotSubsessionHistograms(true);
+ Assert.ok(ID in snapshot);
+ Assert.ok(ID in subsession);
+ Assert.ok(FLAG in snapshot);
+ Assert.ok(FLAG in subsession);
+ Assert.equal(snapshot[ID].sum, 1);
+ Assert.equal(subsession[ID].sum, 1);
+ Assert.equal(snapshot[FLAG].sum, 1);
+ Assert.equal(subsession[FLAG].sum, 1);
+
+ // The next subsesssion snapshot should show the histograms
+ // got reset.
+ snapshot = Telemetry.histogramSnapshots;
+ subsession = Telemetry.snapshotSubsessionHistograms();
+ Assert.ok(ID in snapshot);
+ Assert.ok(ID in subsession);
+ Assert.ok(FLAG in snapshot);
+ Assert.ok(FLAG in subsession);
+ Assert.equal(snapshot[ID].sum, 1);
+ Assert.equal(subsession[ID].sum, 0);
+ Assert.equal(snapshot[FLAG].sum, 1);
+ Assert.equal(subsession[FLAG].sum, 0);
+});
+
+add_task({
+ skip_if: () => gIsAndroid
+},
+function* test_keyed_subsession() {
+ let h = Telemetry.getKeyedHistogramById("TELEMETRY_TEST_KEYED_FLAG");
+ const KEY = "foo";
+
+ // Both original and subsession should start out the same.
+ h.clear();
+ Assert.ok(!(KEY in h.snapshot()));
+ Assert.ok(!(KEY in h.subsessionSnapshot()));
+ Assert.equal(h.snapshot(KEY).sum, 0);
+ Assert.equal(h.subsessionSnapshot(KEY).sum, 0);
+
+ // Both should register the flag.
+ h.add(KEY, 1);
+ Assert.ok(KEY in h.snapshot());
+ Assert.ok(KEY in h.subsessionSnapshot());
+ Assert.equal(h.snapshot(KEY).sum, 1);
+ Assert.equal(h.subsessionSnapshot(KEY).sum, 1);
+
+ // Check that we are able to only reset the subsession histogram.
+ h.clear(true);
+ Assert.ok(KEY in h.snapshot());
+ Assert.ok(!(KEY in h.subsessionSnapshot()));
+ Assert.equal(h.snapshot(KEY).sum, 1);
+ Assert.equal(h.subsessionSnapshot(KEY).sum, 0);
+
+ // Setting the flag again should make both match again.
+ h.add(KEY, 1);
+ Assert.ok(KEY in h.snapshot());
+ Assert.ok(KEY in h.subsessionSnapshot());
+ Assert.equal(h.snapshot(KEY).sum, 1);
+ Assert.equal(h.subsessionSnapshot(KEY).sum, 1);
+
+ // Check that "snapshot and clear" works properly.
+ let snapshot = h.snapshot();
+ let subsession = h.snapshotSubsessionAndClear();
+ Assert.ok(KEY in snapshot);
+ Assert.ok(KEY in subsession);
+ Assert.equal(snapshot[KEY].sum, 1);
+ Assert.equal(subsession[KEY].sum, 1);
+
+ subsession = h.subsessionSnapshot();
+ Assert.ok(!(KEY in subsession));
+ Assert.equal(h.subsessionSnapshot(KEY).sum, 0);
+});
diff --git a/toolkit/components/telemetry/tests/unit/xpcshell.ini b/toolkit/components/telemetry/tests/unit/xpcshell.ini
new file mode 100644
index 000000000..74067580a
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/xpcshell.ini
@@ -0,0 +1,63 @@
+[DEFAULT]
+head = head.js
+tail =
+firefox-appdir = browser
+# The *.xpi files are only needed for test_TelemetryEnvironment.js, but
+# xpcshell fails to install tests if we move them under the test entry.
+support-files =
+ ../search/chrome.manifest
+ ../search/searchTest.jar
+ dictionary.xpi
+ experiment.xpi
+ extension.xpi
+ extension-2.xpi
+ engine.xml
+ system.xpi
+ restartless.xpi
+ theme.xpi
+ !/toolkit/mozapps/extensions/test/xpcshell/head_addons.js
+generated-files =
+ dictionary.xpi
+ experiment.xpi
+ extension.xpi
+ extension-2.xpi
+ system.xpi
+ restartless.xpi
+ theme.xpi
+
+[test_nsITelemetry.js]
+[test_SubsessionChaining.js]
+tags = addons
+[test_TelemetryEnvironment.js]
+skip-if = os == "android"
+tags = addons
+[test_PingAPI.js]
+skip-if = os == "android"
+[test_TelemetryFlagClear.js]
+[test_TelemetryLateWrites.js]
+[test_TelemetryLockCount.js]
+[test_TelemetryLog.js]
+[test_TelemetryController.js]
+tags = addons
+[test_TelemetryController_idle.js]
+[test_TelemetryControllerShutdown.js]
+tags = addons
+[test_TelemetryStopwatch.js]
+[test_TelemetryControllerBuildID.js]
+[test_TelemetrySendOldPings.js]
+skip-if = os == "android" # Disabled due to intermittent orange on Android
+tags = addons
+[test_TelemetrySession.js]
+tags = addons
+[test_ThreadHangStats.js]
+run-sequentially = Bug 1046307, test can fail intermittently when CPU load is high
+[test_TelemetrySend.js]
+[test_ChildHistograms.js]
+skip-if = os == "android"
+tags = addons
+[test_TelemetryReportingPolicy.js]
+tags = addons
+[test_TelemetryScalars.js]
+[test_TelemetryTimestamps.js]
+skip-if = toolkit == 'android'
+[test_TelemetryEvents.js]