summaryrefslogtreecommitdiffstats
path: root/services/sync/modules
diff options
context:
space:
mode:
authorMatt A. Tobin <mattatobin@localhost.localdomain>2018-02-02 04:16:08 -0500
committerMatt A. Tobin <mattatobin@localhost.localdomain>2018-02-02 04:16:08 -0500
commit5f8de423f190bbb79a62f804151bc24824fa32d8 (patch)
tree10027f336435511475e392454359edea8e25895d /services/sync/modules
parent49ee0794b5d912db1f95dce6eb52d781dc210db5 (diff)
downloadUXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.gz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.lz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.xz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.zip
Add m-esr52 at 52.6.0
Diffstat (limited to 'services/sync/modules')
-rw-r--r--services/sync/modules/FxaMigrator.jsm99
-rw-r--r--services/sync/modules/SyncedTabs.jsm301
-rw-r--r--services/sync/modules/addonsreconciler.js676
-rw-r--r--services/sync/modules/addonutils.js506
-rw-r--r--services/sync/modules/bookmark_validator.js784
-rw-r--r--services/sync/modules/browserid_identity.js869
-rw-r--r--services/sync/modules/collection_validator.js204
-rw-r--r--services/sync/modules/constants.js198
-rw-r--r--services/sync/modules/engines.js1813
-rw-r--r--services/sync/modules/engines/addons.js813
-rw-r--r--services/sync/modules/engines/bookmarks.js1378
-rw-r--r--services/sync/modules/engines/clients.js782
-rw-r--r--services/sync/modules/engines/extension-storage.js277
-rw-r--r--services/sync/modules/engines/forms.js305
-rw-r--r--services/sync/modules/engines/history.js442
-rw-r--r--services/sync/modules/engines/passwords.js371
-rw-r--r--services/sync/modules/engines/prefs.js273
-rw-r--r--services/sync/modules/engines/tabs.js393
-rw-r--r--services/sync/modules/identity.js605
-rw-r--r--services/sync/modules/jpakeclient.js773
-rw-r--r--services/sync/modules/keys.js214
-rw-r--r--services/sync/modules/main.js30
-rw-r--r--services/sync/modules/policies.js983
-rw-r--r--services/sync/modules/record.js1039
-rw-r--r--services/sync/modules/resource.js669
-rw-r--r--services/sync/modules/rest.js90
-rw-r--r--services/sync/modules/service.js1756
-rw-r--r--services/sync/modules/stages/cluster.js113
-rw-r--r--services/sync/modules/stages/declined.js76
-rw-r--r--services/sync/modules/stages/enginesync.js449
-rw-r--r--services/sync/modules/status.js145
-rw-r--r--services/sync/modules/telemetry.js578
-rw-r--r--services/sync/modules/userapi.js224
-rw-r--r--services/sync/modules/util.js797
34 files changed, 19025 insertions, 0 deletions
diff --git a/services/sync/modules/FxaMigrator.jsm b/services/sync/modules/FxaMigrator.jsm
new file mode 100644
index 000000000..735b60144
--- /dev/null
+++ b/services/sync/modules/FxaMigrator.jsm
@@ -0,0 +1,99 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+"use strict;"
+
+// Note that this module used to supervise the step-by-step migration from
+// a legacy Sync account to a FxA-based Sync account. In bug 1205928, this
+// changed to automatically disconnect the legacy Sync account.
+
+const {classes: Cc, interfaces: Ci, results: Cr, utils: Cu} = Components;
+
+Cu.import("resource://gre/modules/XPCOMUtils.jsm");
+Cu.import("resource://gre/modules/Log.jsm");
+Cu.import("resource://gre/modules/Services.jsm");
+
+XPCOMUtils.defineLazyGetter(this, "WeaveService", function() {
+ return Cc["@mozilla.org/weave/service;1"]
+ .getService(Components.interfaces.nsISupports)
+ .wrappedJSObject;
+});
+
+XPCOMUtils.defineLazyModuleGetter(this, "Weave",
+ "resource://services-sync/main.js");
+
+// We send this notification when we perform the disconnection. The browser
+// window will show a one-off notification bar.
+const OBSERVER_STATE_CHANGE_TOPIC = "fxa-migration:state-changed";
+
+const OBSERVER_TOPICS = [
+ "xpcom-shutdown",
+ "weave:eol",
+];
+
+function Migrator() {
+ // Leave the log-level as Debug - Sync will setup log appenders such that
+ // these messages generally will not be seen unless other log related
+ // prefs are set.
+ this.log.level = Log.Level.Debug;
+
+ for (let topic of OBSERVER_TOPICS) {
+ Services.obs.addObserver(this, topic, false);
+ }
+}
+
+Migrator.prototype = {
+ log: Log.repository.getLogger("Sync.SyncMigration"),
+
+ finalize() {
+ for (let topic of OBSERVER_TOPICS) {
+ Services.obs.removeObserver(this, topic);
+ }
+ },
+
+ observe(subject, topic, data) {
+ this.log.debug("observed " + topic);
+ switch (topic) {
+ case "xpcom-shutdown":
+ this.finalize();
+ break;
+
+ default:
+ // this notification when configured with legacy Sync means we want to
+ // disconnect
+ if (!WeaveService.fxAccountsEnabled) {
+ this.log.info("Disconnecting from legacy Sync");
+ // Set up an observer for when the disconnection is complete.
+ let observe;
+ Services.obs.addObserver(observe = () => {
+ this.log.info("observed that startOver is complete");
+ Services.obs.removeObserver(observe, "weave:service:start-over:finish");
+ // Send the notification for the UI.
+ Services.obs.notifyObservers(null, OBSERVER_STATE_CHANGE_TOPIC, null);
+ }, "weave:service:start-over:finish", false);
+
+ // Do the disconnection.
+ Weave.Service.startOver();
+ }
+ }
+ },
+
+ get learnMoreLink() {
+ try {
+ var url = Services.prefs.getCharPref("app.support.baseURL");
+ } catch (err) {
+ return null;
+ }
+ url += "sync-upgrade";
+ let sb = Services.strings.createBundle("chrome://weave/locale/services/sync.properties");
+ return {
+ text: sb.GetStringFromName("sync.eol.learnMore.label"),
+ href: Services.urlFormatter.formatURL(url),
+ };
+ },
+};
+
+// We expose a singleton
+this.EXPORTED_SYMBOLS = ["fxaMigrator"];
+var fxaMigrator = new Migrator();
diff --git a/services/sync/modules/SyncedTabs.jsm b/services/sync/modules/SyncedTabs.jsm
new file mode 100644
index 000000000..1a69e3564
--- /dev/null
+++ b/services/sync/modules/SyncedTabs.jsm
@@ -0,0 +1,301 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+"use strict";
+
+this.EXPORTED_SYMBOLS = ["SyncedTabs"];
+
+
+const { classes: Cc, interfaces: Ci, results: Cr, utils: Cu } = Components;
+
+Cu.import("resource://gre/modules/Services.jsm");
+Cu.import("resource://gre/modules/XPCOMUtils.jsm");
+Cu.import("resource://gre/modules/Task.jsm");
+Cu.import("resource://gre/modules/Log.jsm");
+Cu.import("resource://gre/modules/PlacesUtils.jsm", this);
+Cu.import("resource://services-sync/main.js");
+Cu.import("resource://gre/modules/Preferences.jsm");
+
+// The Sync XPCOM service
+XPCOMUtils.defineLazyGetter(this, "weaveXPCService", function() {
+ return Cc["@mozilla.org/weave/service;1"]
+ .getService(Ci.nsISupports)
+ .wrappedJSObject;
+});
+
+// from MDN...
+function escapeRegExp(string) {
+ return string.replace(/[.*+?^${}()|[\]\\]/g, "\\$&");
+}
+
+// A topic we fire whenever we have new tabs available. This might be due
+// to a request made by this module to refresh the tab list, or as the result
+// of a regularly scheduled sync. The intent is that consumers just listen
+// for this notification and update their UI in response.
+const TOPIC_TABS_CHANGED = "services.sync.tabs.changed";
+
+// The interval, in seconds, before which we consider the existing list
+// of tabs "fresh enough" and don't force a new sync.
+const TABS_FRESH_ENOUGH_INTERVAL = 30;
+
+let log = Log.repository.getLogger("Sync.RemoteTabs");
+// A new scope to do the logging thang...
+(function() {
+ let level = Preferences.get("services.sync.log.logger.tabs");
+ if (level) {
+ let appender = new Log.DumpAppender();
+ log.level = appender.level = Log.Level[level] || Log.Level.Debug;
+ log.addAppender(appender);
+ }
+})();
+
+
+// A private singleton that does the work.
+let SyncedTabsInternal = {
+ /* Make a "tab" record. Returns a promise */
+ _makeTab: Task.async(function* (client, tab, url, showRemoteIcons) {
+ let icon;
+ if (showRemoteIcons) {
+ icon = tab.icon;
+ }
+ if (!icon) {
+ try {
+ icon = (yield PlacesUtils.promiseFaviconLinkUrl(url)).spec;
+ } catch (ex) { /* no favicon avaiable */ }
+ }
+ if (!icon) {
+ icon = "";
+ }
+ return {
+ type: "tab",
+ title: tab.title || url,
+ url,
+ icon,
+ client: client.id,
+ lastUsed: tab.lastUsed,
+ };
+ }),
+
+ /* Make a "client" record. Returns a promise for consistency with _makeTab */
+ _makeClient: Task.async(function* (client) {
+ return {
+ id: client.id,
+ type: "client",
+ name: Weave.Service.clientsEngine.getClientName(client.id),
+ isMobile: Weave.Service.clientsEngine.isMobile(client.id),
+ lastModified: client.lastModified * 1000, // sec to ms
+ tabs: []
+ };
+ }),
+
+ _tabMatchesFilter(tab, filter) {
+ let reFilter = new RegExp(escapeRegExp(filter), "i");
+ return tab.url.match(reFilter) || tab.title.match(reFilter);
+ },
+
+ getTabClients: Task.async(function* (filter) {
+ log.info("Generating tab list with filter", filter);
+ let result = [];
+
+ // If Sync isn't ready, don't try and get anything.
+ if (!weaveXPCService.ready) {
+ log.debug("Sync isn't yet ready, so returning an empty tab list");
+ return result;
+ }
+
+ // A boolean that controls whether we should show the icon from the remote tab.
+ const showRemoteIcons = Preferences.get("services.sync.syncedTabs.showRemoteIcons", true);
+
+ let engine = Weave.Service.engineManager.get("tabs");
+
+ let seenURLs = new Set();
+ let parentIndex = 0;
+ let ntabs = 0;
+
+ for (let [guid, client] of Object.entries(engine.getAllClients())) {
+ if (!Weave.Service.clientsEngine.remoteClientExists(client.id)) {
+ continue;
+ }
+ let clientRepr = yield this._makeClient(client);
+ log.debug("Processing client", clientRepr);
+
+ for (let tab of client.tabs) {
+ let url = tab.urlHistory[0];
+ log.debug("remote tab", url);
+ // Note there are some issues with tracking "seen" tabs, including:
+ // * We really can't return the entire urlHistory record as we are
+ // only checking the first entry - others might be different.
+ // * We don't update the |lastUsed| timestamp to reflect the
+ // most-recently-seen time.
+ // In a followup we should consider simply dropping this |seenUrls|
+ // check and return duplicate records - it seems the user will be more
+ // confused by tabs not showing up on a device (because it was detected
+ // as a dupe so it only appears on a different device) than being
+ // confused by seeing the same tab on different clients.
+ if (!url || seenURLs.has(url)) {
+ continue;
+ }
+ let tabRepr = yield this._makeTab(client, tab, url, showRemoteIcons);
+ if (filter && !this._tabMatchesFilter(tabRepr, filter)) {
+ continue;
+ }
+ seenURLs.add(url);
+ clientRepr.tabs.push(tabRepr);
+ }
+ // We return all clients, even those without tabs - the consumer should
+ // filter it if they care.
+ ntabs += clientRepr.tabs.length;
+ result.push(clientRepr);
+ }
+ log.info(`Final tab list has ${result.length} clients with ${ntabs} tabs.`);
+ return result;
+ }),
+
+ syncTabs(force) {
+ if (!force) {
+ // Don't bother refetching tabs if we already did so recently
+ let lastFetch = Preferences.get("services.sync.lastTabFetch", 0);
+ let now = Math.floor(Date.now() / 1000);
+ if (now - lastFetch < TABS_FRESH_ENOUGH_INTERVAL) {
+ log.info("_refetchTabs was done recently, do not doing it again");
+ return Promise.resolve(false);
+ }
+ }
+
+ // If Sync isn't configured don't try and sync, else we will get reports
+ // of a login failure.
+ if (Weave.Status.checkSetup() == Weave.CLIENT_NOT_CONFIGURED) {
+ log.info("Sync client is not configured, so not attempting a tab sync");
+ return Promise.resolve(false);
+ }
+ // Ask Sync to just do the tabs engine if it can.
+ // Sync is currently synchronous, so do it after an event-loop spin to help
+ // keep the UI responsive.
+ return new Promise((resolve, reject) => {
+ Services.tm.currentThread.dispatch(() => {
+ try {
+ log.info("Doing a tab sync.");
+ Weave.Service.sync(["tabs"]);
+ resolve(true);
+ } catch (ex) {
+ log.error("Sync failed", ex);
+ reject(ex);
+ };
+ }, Ci.nsIThread.DISPATCH_NORMAL);
+ });
+ },
+
+ observe(subject, topic, data) {
+ log.trace(`observed topic=${topic}, data=${data}, subject=${subject}`);
+ switch (topic) {
+ case "weave:engine:sync:finish":
+ if (data != "tabs") {
+ return;
+ }
+ // The tabs engine just finished syncing
+ // Set our lastTabFetch pref here so it tracks both explicit sync calls
+ // and normally scheduled ones.
+ Preferences.set("services.sync.lastTabFetch", Math.floor(Date.now() / 1000));
+ Services.obs.notifyObservers(null, TOPIC_TABS_CHANGED, null);
+ break;
+ case "weave:service:start-over":
+ // start-over needs to notify so consumers find no tabs.
+ Preferences.reset("services.sync.lastTabFetch");
+ Services.obs.notifyObservers(null, TOPIC_TABS_CHANGED, null);
+ break;
+ case "nsPref:changed":
+ Services.obs.notifyObservers(null, TOPIC_TABS_CHANGED, null);
+ break;
+ default:
+ break;
+ }
+ },
+
+ // Returns true if Sync is configured to Sync tabs, false otherwise
+ get isConfiguredToSyncTabs() {
+ if (!weaveXPCService.ready) {
+ log.debug("Sync isn't yet ready; assuming tab engine is enabled");
+ return true;
+ }
+
+ let engine = Weave.Service.engineManager.get("tabs");
+ return engine && engine.enabled;
+ },
+
+ get hasSyncedThisSession() {
+ let engine = Weave.Service.engineManager.get("tabs");
+ return engine && engine.hasSyncedThisSession;
+ },
+};
+
+Services.obs.addObserver(SyncedTabsInternal, "weave:engine:sync:finish", false);
+Services.obs.addObserver(SyncedTabsInternal, "weave:service:start-over", false);
+// Observe the pref the indicates the state of the tabs engine has changed.
+// This will force consumers to re-evaluate the state of sync and update
+// accordingly.
+Services.prefs.addObserver("services.sync.engine.tabs", SyncedTabsInternal, false);
+
+// The public interface.
+this.SyncedTabs = {
+ // A mock-point for tests.
+ _internal: SyncedTabsInternal,
+
+ // We make the topic for the observer notification public.
+ TOPIC_TABS_CHANGED,
+
+ // Returns true if Sync is configured to Sync tabs, false otherwise
+ get isConfiguredToSyncTabs() {
+ return this._internal.isConfiguredToSyncTabs;
+ },
+
+ // Returns true if a tab sync has completed once this session. If this
+ // returns false, then getting back no clients/tabs possibly just means we
+ // are waiting for that first sync to complete.
+ get hasSyncedThisSession() {
+ return this._internal.hasSyncedThisSession;
+ },
+
+ // Return a promise that resolves with an array of client records, each with
+ // a .tabs array. Note that part of the contract for this module is that the
+ // returned objects are not shared between invocations, so callers are free
+ // to mutate the returned objects (eg, sort, truncate) however they see fit.
+ getTabClients(query) {
+ return this._internal.getTabClients(query);
+ },
+
+ // Starts a background request to start syncing tabs. Returns a promise that
+ // resolves when the sync is complete, but there's no resolved value -
+ // callers should be listening for TOPIC_TABS_CHANGED.
+ // If |force| is true we always sync. If false, we only sync if the most
+ // recent sync wasn't "recently".
+ syncTabs(force) {
+ return this._internal.syncTabs(force);
+ },
+
+ sortTabClientsByLastUsed(clients, maxTabs = Infinity) {
+ // First sort and filter the list of tabs for each client. Note that
+ // this module promises that the objects it returns are never
+ // shared, so we are free to mutate those objects directly.
+ for (let client of clients) {
+ let tabs = client.tabs;
+ tabs.sort((a, b) => b.lastUsed - a.lastUsed);
+ if (Number.isFinite(maxTabs)) {
+ client.tabs = tabs.slice(0, maxTabs);
+ }
+ }
+ // Now sort the clients - the clients are sorted in the order of the
+ // most recent tab for that client (ie, it is important the tabs for
+ // each client are already sorted.)
+ clients.sort((a, b) => {
+ if (a.tabs.length == 0) {
+ return 1; // b comes first.
+ }
+ if (b.tabs.length == 0) {
+ return -1; // a comes first.
+ }
+ return b.tabs[0].lastUsed - a.tabs[0].lastUsed;
+ });
+ },
+};
+
diff --git a/services/sync/modules/addonsreconciler.js b/services/sync/modules/addonsreconciler.js
new file mode 100644
index 000000000..a60fc8d56
--- /dev/null
+++ b/services/sync/modules/addonsreconciler.js
@@ -0,0 +1,676 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * This file contains middleware to reconcile state of AddonManager for
+ * purposes of tracking events for Sync. The content in this file exists
+ * because AddonManager does not have a getChangesSinceX() API and adding
+ * that functionality properly was deemed too time-consuming at the time
+ * add-on sync was originally written. If/when AddonManager adds this API,
+ * this file can go away and the add-ons engine can be rewritten to use it.
+ *
+ * It was decided to have this tracking functionality exist in a separate
+ * standalone file so it could be more easily understood, tested, and
+ * hopefully ported.
+ */
+
+"use strict";
+
+var Cu = Components.utils;
+
+Cu.import("resource://gre/modules/Log.jsm");
+Cu.import("resource://services-sync/util.js");
+Cu.import("resource://gre/modules/AddonManager.jsm");
+
+const DEFAULT_STATE_FILE = "addonsreconciler";
+
+this.CHANGE_INSTALLED = 1;
+this.CHANGE_UNINSTALLED = 2;
+this.CHANGE_ENABLED = 3;
+this.CHANGE_DISABLED = 4;
+
+this.EXPORTED_SYMBOLS = ["AddonsReconciler", "CHANGE_INSTALLED",
+ "CHANGE_UNINSTALLED", "CHANGE_ENABLED",
+ "CHANGE_DISABLED"];
+/**
+ * Maintains state of add-ons.
+ *
+ * State is maintained in 2 data structures, an object mapping add-on IDs
+ * to metadata and an array of changes over time. The object mapping can be
+ * thought of as a minimal copy of data from AddonManager which is needed for
+ * Sync. The array is effectively a log of changes over time.
+ *
+ * The data structures are persisted to disk by serializing to a JSON file in
+ * the current profile. The data structures are updated by 2 mechanisms. First,
+ * they can be refreshed from the global state of the AddonManager. This is a
+ * sure-fire way of ensuring the reconciler is up to date. Second, the
+ * reconciler adds itself as an AddonManager listener. When it receives change
+ * notifications, it updates its internal state incrementally.
+ *
+ * The internal state is persisted to a JSON file in the profile directory.
+ *
+ * An instance of this is bound to an AddonsEngine instance. In reality, it
+ * likely exists as a singleton. To AddonsEngine, it functions as a store and
+ * an entity which emits events for tracking.
+ *
+ * The usage pattern for instances of this class is:
+ *
+ * let reconciler = new AddonsReconciler();
+ * reconciler.loadState(null, function(error) { ... });
+ *
+ * // At this point, your instance should be ready to use.
+ *
+ * When you are finished with the instance, please call:
+ *
+ * reconciler.stopListening();
+ * reconciler.saveState(...);
+ *
+ * There are 2 classes of listeners in the AddonManager: AddonListener and
+ * InstallListener. This class is a listener for both (member functions just
+ * get called directly).
+ *
+ * When an add-on is installed, listeners are called in the following order:
+ *
+ * IL.onInstallStarted, AL.onInstalling, IL.onInstallEnded, AL.onInstalled
+ *
+ * For non-restartless add-ons, an application restart may occur between
+ * IL.onInstallEnded and AL.onInstalled. Unfortunately, Sync likely will
+ * not be loaded when AL.onInstalled is fired shortly after application
+ * start, so it won't see this event. Therefore, for add-ons requiring a
+ * restart, Sync treats the IL.onInstallEnded event as good enough to
+ * indicate an install. For restartless add-ons, Sync assumes AL.onInstalled
+ * will follow shortly after IL.onInstallEnded and thus it ignores
+ * IL.onInstallEnded.
+ *
+ * The listeners can also see events related to the download of the add-on.
+ * This class isn't interested in those. However, there are failure events,
+ * IL.onDownloadFailed and IL.onDownloadCanceled which get called if a
+ * download doesn't complete successfully.
+ *
+ * For uninstalls, we see AL.onUninstalling then AL.onUninstalled. Like
+ * installs, the events could be separated by an application restart and Sync
+ * may not see the onUninstalled event. Again, if we require a restart, we
+ * react to onUninstalling. If not, we assume we'll get onUninstalled.
+ *
+ * Enabling and disabling work by sending:
+ *
+ * AL.onEnabling, AL.onEnabled
+ * AL.onDisabling, AL.onDisabled
+ *
+ * Again, they may be separated by a restart, so we heed the requiresRestart
+ * flag.
+ *
+ * Actions can be undone. All undoable actions notify the same
+ * AL.onOperationCancelled event. We treat this event like any other.
+ *
+ * Restartless add-ons have interesting behavior during uninstall. These
+ * add-ons are first disabled then they are actually uninstalled. So, we will
+ * see AL.onDisabling and AL.onDisabled. The onUninstalling and onUninstalled
+ * events only come after the Addon Manager is closed or another view is
+ * switched to. In the case of Sync performing the uninstall, the uninstall
+ * events will occur immediately. However, we still see disabling events and
+ * heed them like they were normal. In the end, the state is proper.
+ */
+this.AddonsReconciler = function AddonsReconciler() {
+ this._log = Log.repository.getLogger("Sync.AddonsReconciler");
+ let level = Svc.Prefs.get("log.logger.addonsreconciler", "Debug");
+ this._log.level = Log.Level[level];
+
+ Svc.Obs.add("xpcom-shutdown", this.stopListening, this);
+};
+AddonsReconciler.prototype = {
+ /** Flag indicating whether we are listening to AddonManager events. */
+ _listening: false,
+
+ /**
+ * Whether state has been loaded from a file.
+ *
+ * State is loaded on demand if an operation requires it.
+ */
+ _stateLoaded: false,
+
+ /**
+ * Define this as false if the reconciler should not persist state
+ * to disk when handling events.
+ *
+ * This allows test code to avoid spinning to write during observer
+ * notifications and xpcom shutdown, which appears to cause hangs on WinXP
+ * (Bug 873861).
+ */
+ _shouldPersist: true,
+
+ /** Log logger instance */
+ _log: null,
+
+ /**
+ * Container for add-on metadata.
+ *
+ * Keys are add-on IDs. Values are objects which describe the state of the
+ * add-on. This is a minimal mirror of data that can be queried from
+ * AddonManager. In some cases, we retain data longer than AddonManager.
+ */
+ _addons: {},
+
+ /**
+ * List of add-on changes over time.
+ *
+ * Each element is an array of [time, change, id].
+ */
+ _changes: [],
+
+ /**
+ * Objects subscribed to changes made to this instance.
+ */
+ _listeners: [],
+
+ /**
+ * Accessor for add-ons in this object.
+ *
+ * Returns an object mapping add-on IDs to objects containing metadata.
+ */
+ get addons() {
+ this._ensureStateLoaded();
+ return this._addons;
+ },
+
+ /**
+ * Load reconciler state from a file.
+ *
+ * The path is relative to the weave directory in the profile. If no
+ * path is given, the default one is used.
+ *
+ * If the file does not exist or there was an error parsing the file, the
+ * state will be transparently defined as empty.
+ *
+ * @param path
+ * Path to load. ".json" is appended automatically. If not defined,
+ * a default path will be consulted.
+ * @param callback
+ * Callback to be executed upon file load. The callback receives a
+ * truthy error argument signifying whether an error occurred and a
+ * boolean indicating whether data was loaded.
+ */
+ loadState: function loadState(path, callback) {
+ let file = path || DEFAULT_STATE_FILE;
+ Utils.jsonLoad(file, this, function(json) {
+ this._addons = {};
+ this._changes = [];
+
+ if (!json) {
+ this._log.debug("No data seen in loaded file: " + file);
+ if (callback) {
+ callback(null, false);
+ }
+
+ return;
+ }
+
+ let version = json.version;
+ if (!version || version != 1) {
+ this._log.error("Could not load JSON file because version not " +
+ "supported: " + version);
+ if (callback) {
+ callback(null, false);
+ }
+
+ return;
+ }
+
+ this._addons = json.addons;
+ for (let id in this._addons) {
+ let record = this._addons[id];
+ record.modified = new Date(record.modified);
+ }
+
+ for (let [time, change, id] of json.changes) {
+ this._changes.push([new Date(time), change, id]);
+ }
+
+ if (callback) {
+ callback(null, true);
+ }
+ });
+ },
+
+ /**
+ * Saves the current state to a file in the local profile.
+ *
+ * @param path
+ * String path in profile to save to. If not defined, the default
+ * will be used.
+ * @param callback
+ * Function to be invoked on save completion. No parameters will be
+ * passed to callback.
+ */
+ saveState: function saveState(path, callback) {
+ let file = path || DEFAULT_STATE_FILE;
+ let state = {version: 1, addons: {}, changes: []};
+
+ for (let [id, record] of Object.entries(this._addons)) {
+ state.addons[id] = {};
+ for (let [k, v] of Object.entries(record)) {
+ if (k == "modified") {
+ state.addons[id][k] = v.getTime();
+ }
+ else {
+ state.addons[id][k] = v;
+ }
+ }
+ }
+
+ for (let [time, change, id] of this._changes) {
+ state.changes.push([time.getTime(), change, id]);
+ }
+
+ this._log.info("Saving reconciler state to file: " + file);
+ Utils.jsonSave(file, this, state, callback);
+ },
+
+ /**
+ * Registers a change listener with this instance.
+ *
+ * Change listeners are called every time a change is recorded. The listener
+ * is an object with the function "changeListener" that takes 3 arguments,
+ * the Date at which the change happened, the type of change (a CHANGE_*
+ * constant), and the add-on state object reflecting the current state of
+ * the add-on at the time of the change.
+ *
+ * @param listener
+ * Object containing changeListener function.
+ */
+ addChangeListener: function addChangeListener(listener) {
+ if (this._listeners.indexOf(listener) == -1) {
+ this._log.debug("Adding change listener.");
+ this._listeners.push(listener);
+ }
+ },
+
+ /**
+ * Removes a previously-installed change listener from the instance.
+ *
+ * @param listener
+ * Listener instance to remove.
+ */
+ removeChangeListener: function removeChangeListener(listener) {
+ this._listeners = this._listeners.filter(function(element) {
+ if (element == listener) {
+ this._log.debug("Removing change listener.");
+ return false;
+ } else {
+ return true;
+ }
+ }.bind(this));
+ },
+
+ /**
+ * Tells the instance to start listening for AddonManager changes.
+ *
+ * This is typically called automatically when Sync is loaded.
+ */
+ startListening: function startListening() {
+ if (this._listening) {
+ return;
+ }
+
+ this._log.info("Registering as Add-on Manager listener.");
+ AddonManager.addAddonListener(this);
+ AddonManager.addInstallListener(this);
+ this._listening = true;
+ },
+
+ /**
+ * Tells the instance to stop listening for AddonManager changes.
+ *
+ * The reconciler should always be listening. This should only be called when
+ * the instance is being destroyed.
+ *
+ * This function will get called automatically on XPCOM shutdown. However, it
+ * is a best practice to call it yourself.
+ */
+ stopListening: function stopListening() {
+ if (!this._listening) {
+ return;
+ }
+
+ this._log.debug("Stopping listening and removing AddonManager listeners.");
+ AddonManager.removeInstallListener(this);
+ AddonManager.removeAddonListener(this);
+ this._listening = false;
+ },
+
+ /**
+ * Refreshes the global state of add-ons by querying the AddonManager.
+ */
+ refreshGlobalState: function refreshGlobalState(callback) {
+ this._log.info("Refreshing global state from AddonManager.");
+ this._ensureStateLoaded();
+
+ let installs;
+
+ AddonManager.getAllAddons(function (addons) {
+ let ids = {};
+
+ for (let addon of addons) {
+ ids[addon.id] = true;
+ this.rectifyStateFromAddon(addon);
+ }
+
+ // Look for locally-defined add-ons that no longer exist and update their
+ // record.
+ for (let [id, addon] of Object.entries(this._addons)) {
+ if (id in ids) {
+ continue;
+ }
+
+ // If the id isn't in ids, it means that the add-on has been deleted or
+ // the add-on is in the process of being installed. We detect the
+ // latter by seeing if an AddonInstall is found for this add-on.
+
+ if (!installs) {
+ let cb = Async.makeSyncCallback();
+ AddonManager.getAllInstalls(cb);
+ installs = Async.waitForSyncCallback(cb);
+ }
+
+ let installFound = false;
+ for (let install of installs) {
+ if (install.addon && install.addon.id == id &&
+ install.state == AddonManager.STATE_INSTALLED) {
+
+ installFound = true;
+ break;
+ }
+ }
+
+ if (installFound) {
+ continue;
+ }
+
+ if (addon.installed) {
+ addon.installed = false;
+ this._log.debug("Adding change because add-on not present in " +
+ "Add-on Manager: " + id);
+ this._addChange(new Date(), CHANGE_UNINSTALLED, addon);
+ }
+ }
+
+ // See note for _shouldPersist.
+ if (this._shouldPersist) {
+ this.saveState(null, callback);
+ } else {
+ callback();
+ }
+ }.bind(this));
+ },
+
+ /**
+ * Rectifies the state of an add-on from an Addon instance.
+ *
+ * This basically says "given an Addon instance, assume it is truth and
+ * apply changes to the local state to reflect it."
+ *
+ * This function could result in change listeners being called if the local
+ * state differs from the passed add-on's state.
+ *
+ * @param addon
+ * Addon instance being updated.
+ */
+ rectifyStateFromAddon: function rectifyStateFromAddon(addon) {
+ this._log.debug(`Rectifying state for addon ${addon.name} (version=${addon.version}, id=${addon.id})`);
+ this._ensureStateLoaded();
+
+ let id = addon.id;
+ let enabled = !addon.userDisabled;
+ let guid = addon.syncGUID;
+ let now = new Date();
+
+ if (!(id in this._addons)) {
+ let record = {
+ id: id,
+ guid: guid,
+ enabled: enabled,
+ installed: true,
+ modified: now,
+ type: addon.type,
+ scope: addon.scope,
+ foreignInstall: addon.foreignInstall,
+ isSyncable: addon.isSyncable,
+ };
+ this._addons[id] = record;
+ this._log.debug("Adding change because add-on not present locally: " +
+ id);
+ this._addChange(now, CHANGE_INSTALLED, record);
+ return;
+ }
+
+ let record = this._addons[id];
+ record.isSyncable = addon.isSyncable;
+
+ if (!record.installed) {
+ // It is possible the record is marked as uninstalled because an
+ // uninstall is pending.
+ if (!(addon.pendingOperations & AddonManager.PENDING_UNINSTALL)) {
+ record.installed = true;
+ record.modified = now;
+ }
+ }
+
+ if (record.enabled != enabled) {
+ record.enabled = enabled;
+ record.modified = now;
+ let change = enabled ? CHANGE_ENABLED : CHANGE_DISABLED;
+ this._log.debug("Adding change because enabled state changed: " + id);
+ this._addChange(new Date(), change, record);
+ }
+
+ if (record.guid != guid) {
+ record.guid = guid;
+ // We don't record a change because the Sync engine rectifies this on its
+ // own. This is tightly coupled with Sync. If this code is ever lifted
+ // outside of Sync, this exception should likely be removed.
+ }
+ },
+
+ /**
+ * Record a change in add-on state.
+ *
+ * @param date
+ * Date at which the change occurred.
+ * @param change
+ * The type of the change. A CHANGE_* constant.
+ * @param state
+ * The new state of the add-on. From this.addons.
+ */
+ _addChange: function _addChange(date, change, state) {
+ this._log.info("Change recorded for " + state.id);
+ this._changes.push([date, change, state.id]);
+
+ for (let listener of this._listeners) {
+ try {
+ listener.changeListener.call(listener, date, change, state);
+ } catch (ex) {
+ this._log.warn("Exception calling change listener", ex);
+ }
+ }
+ },
+
+ /**
+ * Obtain the set of changes to add-ons since the date passed.
+ *
+ * This will return an array of arrays. Each entry in the array has the
+ * elements [date, change_type, id], where
+ *
+ * date - Date instance representing when the change occurred.
+ * change_type - One of CHANGE_* constants.
+ * id - ID of add-on that changed.
+ */
+ getChangesSinceDate: function getChangesSinceDate(date) {
+ this._ensureStateLoaded();
+
+ let length = this._changes.length;
+ for (let i = 0; i < length; i++) {
+ if (this._changes[i][0] >= date) {
+ return this._changes.slice(i);
+ }
+ }
+
+ return [];
+ },
+
+ /**
+ * Prunes all recorded changes from before the specified Date.
+ *
+ * @param date
+ * Entries older than this Date will be removed.
+ */
+ pruneChangesBeforeDate: function pruneChangesBeforeDate(date) {
+ this._ensureStateLoaded();
+
+ this._changes = this._changes.filter(function test_age(change) {
+ return change[0] >= date;
+ });
+ },
+
+ /**
+ * Obtains the set of all known Sync GUIDs for add-ons.
+ *
+ * @return Object with guids as keys and values of true.
+ */
+ getAllSyncGUIDs: function getAllSyncGUIDs() {
+ let result = {};
+ for (let id in this.addons) {
+ result[id] = true;
+ }
+
+ return result;
+ },
+
+ /**
+ * Obtain the add-on state record for an add-on by Sync GUID.
+ *
+ * If the add-on could not be found, returns null.
+ *
+ * @param guid
+ * Sync GUID of add-on to retrieve.
+ * @return Object on success on null on failure.
+ */
+ getAddonStateFromSyncGUID: function getAddonStateFromSyncGUID(guid) {
+ for (let id in this.addons) {
+ let addon = this.addons[id];
+ if (addon.guid == guid) {
+ return addon;
+ }
+ }
+
+ return null;
+ },
+
+ /**
+ * Ensures that state is loaded before continuing.
+ *
+ * This is called internally by anything that accesses the internal data
+ * structures. It effectively just-in-time loads serialized state.
+ */
+ _ensureStateLoaded: function _ensureStateLoaded() {
+ if (this._stateLoaded) {
+ return;
+ }
+
+ let cb = Async.makeSpinningCallback();
+ this.loadState(null, cb);
+ cb.wait();
+ this._stateLoaded = true;
+ },
+
+ /**
+ * Handler that is invoked as part of the AddonManager listeners.
+ */
+ _handleListener: function _handlerListener(action, addon, requiresRestart) {
+ // Since this is called as an observer, we explicitly trap errors and
+ // log them to ourselves so we don't see errors reported elsewhere.
+ try {
+ let id = addon.id;
+ this._log.debug("Add-on change: " + action + " to " + id);
+
+ // We assume that every event for non-restartless add-ons is
+ // followed by another event and that this follow-up event is the most
+ // appropriate to react to. Currently we ignore onEnabling, onDisabling,
+ // and onUninstalling for non-restartless add-ons.
+ if (requiresRestart === false) {
+ this._log.debug("Ignoring " + action + " for restartless add-on.");
+ return;
+ }
+
+ switch (action) {
+ case "onEnabling":
+ case "onEnabled":
+ case "onDisabling":
+ case "onDisabled":
+ case "onInstalled":
+ case "onInstallEnded":
+ case "onOperationCancelled":
+ this.rectifyStateFromAddon(addon);
+ break;
+
+ case "onUninstalling":
+ case "onUninstalled":
+ let id = addon.id;
+ let addons = this.addons;
+ if (id in addons) {
+ let now = new Date();
+ let record = addons[id];
+ record.installed = false;
+ record.modified = now;
+ this._log.debug("Adding change because of uninstall listener: " +
+ id);
+ this._addChange(now, CHANGE_UNINSTALLED, record);
+ }
+ }
+
+ // See note for _shouldPersist.
+ if (this._shouldPersist) {
+ let cb = Async.makeSpinningCallback();
+ this.saveState(null, cb);
+ cb.wait();
+ }
+ }
+ catch (ex) {
+ this._log.warn("Exception", ex);
+ }
+ },
+
+ // AddonListeners
+ onEnabling: function onEnabling(addon, requiresRestart) {
+ this._handleListener("onEnabling", addon, requiresRestart);
+ },
+ onEnabled: function onEnabled(addon) {
+ this._handleListener("onEnabled", addon);
+ },
+ onDisabling: function onDisabling(addon, requiresRestart) {
+ this._handleListener("onDisabling", addon, requiresRestart);
+ },
+ onDisabled: function onDisabled(addon) {
+ this._handleListener("onDisabled", addon);
+ },
+ onInstalling: function onInstalling(addon, requiresRestart) {
+ this._handleListener("onInstalling", addon, requiresRestart);
+ },
+ onInstalled: function onInstalled(addon) {
+ this._handleListener("onInstalled", addon);
+ },
+ onUninstalling: function onUninstalling(addon, requiresRestart) {
+ this._handleListener("onUninstalling", addon, requiresRestart);
+ },
+ onUninstalled: function onUninstalled(addon) {
+ this._handleListener("onUninstalled", addon);
+ },
+ onOperationCancelled: function onOperationCancelled(addon) {
+ this._handleListener("onOperationCancelled", addon);
+ },
+
+ // InstallListeners
+ onInstallEnded: function onInstallEnded(install, addon) {
+ this._handleListener("onInstallEnded", addon);
+ }
+};
diff --git a/services/sync/modules/addonutils.js b/services/sync/modules/addonutils.js
new file mode 100644
index 000000000..95da6be0a
--- /dev/null
+++ b/services/sync/modules/addonutils.js
@@ -0,0 +1,506 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+"use strict";
+
+this.EXPORTED_SYMBOLS = ["AddonUtils"];
+
+var {interfaces: Ci, utils: Cu} = Components;
+
+Cu.import("resource://gre/modules/XPCOMUtils.jsm");
+Cu.import("resource://gre/modules/Log.jsm");
+Cu.import("resource://services-sync/util.js");
+
+XPCOMUtils.defineLazyModuleGetter(this, "AddonManager",
+ "resource://gre/modules/AddonManager.jsm");
+XPCOMUtils.defineLazyModuleGetter(this, "AddonRepository",
+ "resource://gre/modules/addons/AddonRepository.jsm");
+
+function AddonUtilsInternal() {
+ this._log = Log.repository.getLogger("Sync.AddonUtils");
+ this._log.Level = Log.Level[Svc.Prefs.get("log.logger.addonutils")];
+}
+AddonUtilsInternal.prototype = {
+ /**
+ * Obtain an AddonInstall object from an AddonSearchResult instance.
+ *
+ * The callback will be invoked with the result of the operation. The
+ * callback receives 2 arguments, error and result. Error will be falsy
+ * on success or some kind of error value otherwise. The result argument
+ * will be an AddonInstall on success or null on failure. It is possible
+ * for the error to be falsy but result to be null. This could happen if
+ * an install was not found.
+ *
+ * @param addon
+ * AddonSearchResult to obtain install from.
+ * @param cb
+ * Function to be called with result of operation.
+ */
+ getInstallFromSearchResult:
+ function getInstallFromSearchResult(addon, cb) {
+
+ this._log.debug("Obtaining install for " + addon.id);
+
+ // We should theoretically be able to obtain (and use) addon.install if
+ // it is available. However, the addon.sourceURI rewriting won't be
+ // reflected in the AddonInstall, so we can't use it. If we ever get rid
+ // of sourceURI rewriting, we can avoid having to reconstruct the
+ // AddonInstall.
+ AddonManager.getInstallForURL(
+ addon.sourceURI.spec,
+ function handleInstall(install) {
+ cb(null, install);
+ },
+ "application/x-xpinstall",
+ undefined,
+ addon.name,
+ addon.iconURL,
+ addon.version
+ );
+ },
+
+ /**
+ * Installs an add-on from an AddonSearchResult instance.
+ *
+ * The options argument defines extra options to control the install.
+ * Recognized keys in this map are:
+ *
+ * syncGUID - Sync GUID to use for the new add-on.
+ * enabled - Boolean indicating whether the add-on should be enabled upon
+ * install.
+ *
+ * When complete it calls a callback with 2 arguments, error and result.
+ *
+ * If error is falsy, result is an object. If error is truthy, result is
+ * null.
+ *
+ * The result object has the following keys:
+ *
+ * id ID of add-on that was installed.
+ * install AddonInstall that was installed.
+ * addon Addon that was installed.
+ *
+ * @param addon
+ * AddonSearchResult to install add-on from.
+ * @param options
+ * Object with additional metadata describing how to install add-on.
+ * @param cb
+ * Function to be invoked with result of operation.
+ */
+ installAddonFromSearchResult:
+ function installAddonFromSearchResult(addon, options, cb) {
+ this._log.info("Trying to install add-on from search result: " + addon.id);
+
+ this.getInstallFromSearchResult(addon, function onResult(error, install) {
+ if (error) {
+ cb(error, null);
+ return;
+ }
+
+ if (!install) {
+ cb(new Error("AddonInstall not available: " + addon.id), null);
+ return;
+ }
+
+ try {
+ this._log.info("Installing " + addon.id);
+ let log = this._log;
+
+ let listener = {
+ onInstallStarted: function onInstallStarted(install) {
+ if (!options) {
+ return;
+ }
+
+ if (options.syncGUID) {
+ log.info("Setting syncGUID of " + install.name +": " +
+ options.syncGUID);
+ install.addon.syncGUID = options.syncGUID;
+ }
+
+ // We only need to change userDisabled if it is disabled because
+ // enabled is the default.
+ if ("enabled" in options && !options.enabled) {
+ log.info("Marking add-on as disabled for install: " +
+ install.name);
+ install.addon.userDisabled = true;
+ }
+ },
+ onInstallEnded: function(install, addon) {
+ install.removeListener(listener);
+
+ cb(null, {id: addon.id, install: install, addon: addon});
+ },
+ onInstallFailed: function(install) {
+ install.removeListener(listener);
+
+ cb(new Error("Install failed: " + install.error), null);
+ },
+ onDownloadFailed: function(install) {
+ install.removeListener(listener);
+
+ cb(new Error("Download failed: " + install.error), null);
+ }
+ };
+ install.addListener(listener);
+ install.install();
+ }
+ catch (ex) {
+ this._log.error("Error installing add-on", ex);
+ cb(ex, null);
+ }
+ }.bind(this));
+ },
+
+ /**
+ * Uninstalls the Addon instance and invoke a callback when it is done.
+ *
+ * @param addon
+ * Addon instance to uninstall.
+ * @param cb
+ * Function to be invoked when uninstall has finished. It receives a
+ * truthy value signifying error and the add-on which was uninstalled.
+ */
+ uninstallAddon: function uninstallAddon(addon, cb) {
+ let listener = {
+ onUninstalling: function(uninstalling, needsRestart) {
+ if (addon.id != uninstalling.id) {
+ return;
+ }
+
+ // We assume restartless add-ons will send the onUninstalled event
+ // soon.
+ if (!needsRestart) {
+ return;
+ }
+
+ // For non-restartless add-ons, we issue the callback on uninstalling
+ // because we will likely never see the uninstalled event.
+ AddonManager.removeAddonListener(listener);
+ cb(null, addon);
+ },
+ onUninstalled: function(uninstalled) {
+ if (addon.id != uninstalled.id) {
+ return;
+ }
+
+ AddonManager.removeAddonListener(listener);
+ cb(null, addon);
+ }
+ };
+ AddonManager.addAddonListener(listener);
+ addon.uninstall();
+ },
+
+ /**
+ * Installs multiple add-ons specified by metadata.
+ *
+ * The first argument is an array of objects. Each object must have the
+ * following keys:
+ *
+ * id - public ID of the add-on to install.
+ * syncGUID - syncGUID for new add-on.
+ * enabled - boolean indicating whether the add-on should be enabled.
+ * requireSecureURI - Boolean indicating whether to require a secure
+ * URI when installing from a remote location. This defaults to
+ * true.
+ *
+ * The callback will be called when activity on all add-ons is complete. The
+ * callback receives 2 arguments, error and result.
+ *
+ * If error is truthy, it contains a string describing the overall error.
+ *
+ * The 2nd argument to the callback is always an object with details on the
+ * overall execution state. It contains the following keys:
+ *
+ * installedIDs Array of add-on IDs that were installed.
+ * installs Array of AddonInstall instances that were installed.
+ * addons Array of Addon instances that were installed.
+ * errors Array of errors encountered. Only has elements if error is
+ * truthy.
+ *
+ * @param installs
+ * Array of objects describing add-ons to install.
+ * @param cb
+ * Function to be called when all actions are complete.
+ */
+ installAddons: function installAddons(installs, cb) {
+ if (!cb) {
+ throw new Error("Invalid argument: cb is not defined.");
+ }
+
+ let ids = [];
+ for (let addon of installs) {
+ ids.push(addon.id);
+ }
+
+ AddonRepository.getAddonsByIDs(ids, {
+ searchSucceeded: function searchSucceeded(addons, addonsLength, total) {
+ this._log.info("Found " + addonsLength + "/" + ids.length +
+ " add-ons during repository search.");
+
+ let ourResult = {
+ installedIDs: [],
+ installs: [],
+ addons: [],
+ skipped: [],
+ errors: []
+ };
+
+ if (!addonsLength) {
+ cb(null, ourResult);
+ return;
+ }
+
+ let expectedInstallCount = 0;
+ let finishedCount = 0;
+ let installCallback = function installCallback(error, result) {
+ finishedCount++;
+
+ if (error) {
+ ourResult.errors.push(error);
+ } else {
+ ourResult.installedIDs.push(result.id);
+ ourResult.installs.push(result.install);
+ ourResult.addons.push(result.addon);
+ }
+
+ if (finishedCount >= expectedInstallCount) {
+ if (ourResult.errors.length > 0) {
+ cb(new Error("1 or more add-ons failed to install"), ourResult);
+ } else {
+ cb(null, ourResult);
+ }
+ }
+ }.bind(this);
+
+ let toInstall = [];
+
+ // Rewrite the "src" query string parameter of the source URI to note
+ // that the add-on was installed by Sync and not something else so
+ // server-side metrics aren't skewed (bug 708134). The server should
+ // ideally send proper URLs, but this solution was deemed too
+ // complicated at the time the functionality was implemented.
+ for (let addon of addons) {
+ // Find the specified options for this addon.
+ let options;
+ for (let install of installs) {
+ if (install.id == addon.id) {
+ options = install;
+ break;
+ }
+ }
+ if (!this.canInstallAddon(addon, options)) {
+ ourResult.skipped.push(addon.id);
+ continue;
+ }
+
+ // We can go ahead and attempt to install it.
+ toInstall.push(addon);
+
+ // We should always be able to QI the nsIURI to nsIURL. If not, we
+ // still try to install the add-on, but we don't rewrite the URL,
+ // potentially skewing metrics.
+ try {
+ addon.sourceURI.QueryInterface(Ci.nsIURL);
+ } catch (ex) {
+ this._log.warn("Unable to QI sourceURI to nsIURL: " +
+ addon.sourceURI.spec);
+ continue;
+ }
+
+ let params = addon.sourceURI.query.split("&").map(
+ function rewrite(param) {
+
+ if (param.indexOf("src=") == 0) {
+ return "src=sync";
+ } else {
+ return param;
+ }
+ });
+
+ addon.sourceURI.query = params.join("&");
+ }
+
+ expectedInstallCount = toInstall.length;
+
+ if (!expectedInstallCount) {
+ cb(null, ourResult);
+ return;
+ }
+
+ // Start all the installs asynchronously. They will report back to us
+ // as they finish, eventually triggering the global callback.
+ for (let addon of toInstall) {
+ let options = {};
+ for (let install of installs) {
+ if (install.id == addon.id) {
+ options = install;
+ break;
+ }
+ }
+
+ this.installAddonFromSearchResult(addon, options, installCallback);
+ }
+
+ }.bind(this),
+
+ searchFailed: function searchFailed() {
+ cb(new Error("AddonRepository search failed"), null);
+ },
+ });
+ },
+
+ /**
+ * Returns true if we are able to install the specified addon, false
+ * otherwise. It is expected that this will log the reason if it returns
+ * false.
+ *
+ * @param addon
+ * (Addon) Add-on instance to check.
+ * @param options
+ * (object) The options specified for this addon. See installAddons()
+ * for the valid elements.
+ */
+ canInstallAddon(addon, options) {
+ // sourceURI presence isn't enforced by AddonRepository. So, we skip
+ // add-ons without a sourceURI.
+ if (!addon.sourceURI) {
+ this._log.info("Skipping install of add-on because missing " +
+ "sourceURI: " + addon.id);
+ return false;
+ }
+ // Verify that the source URI uses TLS. We don't allow installs from
+ // insecure sources for security reasons. The Addon Manager ensures
+ // that cert validation etc is performed.
+ // (We should also consider just dropping this entirely and calling
+ // XPIProvider.isInstallAllowed, but that has additional semantics we might
+ // need to think through...)
+ let requireSecureURI = true;
+ if (options && options.requireSecureURI !== undefined) {
+ requireSecureURI = options.requireSecureURI;
+ }
+
+ if (requireSecureURI) {
+ let scheme = addon.sourceURI.scheme;
+ if (scheme != "https") {
+ this._log.info(`Skipping install of add-on "${addon.id}" because sourceURI's scheme of "${scheme}" is not trusted`);
+ return false;
+ }
+ }
+ this._log.info(`Add-on "${addon.id}" is able to be installed`);
+ return true;
+ },
+
+
+ /**
+ * Update the user disabled flag for an add-on.
+ *
+ * The supplied callback will be called when the operation is
+ * complete. If the new flag matches the existing or if the add-on
+ * isn't currently active, the function will fire the callback
+ * immediately. Else, the callback is invoked when the AddonManager
+ * reports the change has taken effect or has been registered.
+ *
+ * The callback receives as arguments:
+ *
+ * (Error) Encountered error during operation or null on success.
+ * (Addon) The add-on instance being operated on.
+ *
+ * @param addon
+ * (Addon) Add-on instance to operate on.
+ * @param value
+ * (bool) New value for add-on's userDisabled property.
+ * @param cb
+ * (function) Callback to be invoked on completion.
+ */
+ updateUserDisabled: function updateUserDisabled(addon, value, cb) {
+ if (addon.userDisabled == value) {
+ cb(null, addon);
+ return;
+ }
+
+ let listener = {
+ onEnabling: function onEnabling(wrapper, needsRestart) {
+ this._log.debug("onEnabling: " + wrapper.id);
+ if (wrapper.id != addon.id) {
+ return;
+ }
+
+ // We ignore the restartless case because we'll get onEnabled shortly.
+ if (!needsRestart) {
+ return;
+ }
+
+ AddonManager.removeAddonListener(listener);
+ cb(null, wrapper);
+ }.bind(this),
+
+ onEnabled: function onEnabled(wrapper) {
+ this._log.debug("onEnabled: " + wrapper.id);
+ if (wrapper.id != addon.id) {
+ return;
+ }
+
+ AddonManager.removeAddonListener(listener);
+ cb(null, wrapper);
+ }.bind(this),
+
+ onDisabling: function onDisabling(wrapper, needsRestart) {
+ this._log.debug("onDisabling: " + wrapper.id);
+ if (wrapper.id != addon.id) {
+ return;
+ }
+
+ if (!needsRestart) {
+ return;
+ }
+
+ AddonManager.removeAddonListener(listener);
+ cb(null, wrapper);
+ }.bind(this),
+
+ onDisabled: function onDisabled(wrapper) {
+ this._log.debug("onDisabled: " + wrapper.id);
+ if (wrapper.id != addon.id) {
+ return;
+ }
+
+ AddonManager.removeAddonListener(listener);
+ cb(null, wrapper);
+ }.bind(this),
+
+ onOperationCancelled: function onOperationCancelled(wrapper) {
+ this._log.debug("onOperationCancelled: " + wrapper.id);
+ if (wrapper.id != addon.id) {
+ return;
+ }
+
+ AddonManager.removeAddonListener(listener);
+ cb(new Error("Operation cancelled"), wrapper);
+ }.bind(this)
+ };
+
+ // The add-on listeners are only fired if the add-on is active. If not, the
+ // change is silently updated and made active when/if the add-on is active.
+
+ if (!addon.appDisabled) {
+ AddonManager.addAddonListener(listener);
+ }
+
+ this._log.info("Updating userDisabled flag: " + addon.id + " -> " + value);
+ addon.userDisabled = !!value;
+
+ if (!addon.appDisabled) {
+ cb(null, addon);
+ return;
+ }
+ // Else the listener will handle invoking the callback.
+ },
+
+};
+
+XPCOMUtils.defineLazyGetter(this, "AddonUtils", function() {
+ return new AddonUtilsInternal();
+});
diff --git a/services/sync/modules/bookmark_validator.js b/services/sync/modules/bookmark_validator.js
new file mode 100644
index 000000000..2a94ba043
--- /dev/null
+++ b/services/sync/modules/bookmark_validator.js
@@ -0,0 +1,784 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+"use strict";
+
+const Cu = Components.utils;
+
+Cu.import("resource://gre/modules/PlacesUtils.jsm");
+Cu.import("resource://gre/modules/PlacesSyncUtils.jsm");
+Cu.import("resource://gre/modules/Task.jsm");
+Cu.import("resource://gre/modules/XPCOMUtils.jsm");
+
+
+this.EXPORTED_SYMBOLS = ["BookmarkValidator", "BookmarkProblemData"];
+
+const LEFT_PANE_ROOT_ANNO = "PlacesOrganizer/OrganizerFolder";
+const LEFT_PANE_QUERY_ANNO = "PlacesOrganizer/OrganizerQuery";
+
+// Indicates if a local bookmark tree node should be excluded from syncing.
+function isNodeIgnored(treeNode) {
+ return treeNode.annos && treeNode.annos.some(anno => anno.name == LEFT_PANE_ROOT_ANNO ||
+ anno.name == LEFT_PANE_QUERY_ANNO);
+}
+const BOOKMARK_VALIDATOR_VERSION = 1;
+
+/**
+ * Result of bookmark validation. Contains the following fields which describe
+ * server-side problems unless otherwise specified.
+ *
+ * - missingIDs (number): # of objects with missing ids
+ * - duplicates (array of ids): ids seen more than once
+ * - parentChildMismatches (array of {parent: parentid, child: childid}):
+ * instances where the child's parentid and the parent's children array
+ * do not match
+ * - cycles (array of array of ids). List of cycles found in the server-side tree.
+ * - clientCycles (array of array of ids). List of cycles found in the client-side tree.
+ * - orphans (array of {id: string, parent: string}): List of nodes with
+ * either no parentid, or where the parent could not be found.
+ * - missingChildren (array of {parent: id, child: id}):
+ * List of parent/children where the child id couldn't be found
+ * - deletedChildren (array of { parent: id, child: id }):
+ * List of parent/children where child id was a deleted item (but still showed up
+ * in the children array)
+ * - multipleParents (array of {child: id, parents: array of ids}):
+ * List of children that were part of multiple parent arrays
+ * - deletedParents (array of ids) : List of records that aren't deleted but
+ * had deleted parents
+ * - childrenOnNonFolder (array of ids): list of non-folders that still have
+ * children arrays
+ * - duplicateChildren (array of ids): list of records who have the same
+ * child listed multiple times in their children array
+ * - parentNotFolder (array of ids): list of records that have parents that
+ * aren't folders
+ * - rootOnServer (boolean): true if the root came from the server
+ * - badClientRoots (array of ids): Contains any client-side root ids where
+ * the root is missing or isn't a (direct) child of the places root.
+ *
+ * - clientMissing: Array of ids on the server missing from the client
+ * - serverMissing: Array of ids on the client missing from the server
+ * - serverDeleted: Array of ids on the client that the server had marked as deleted.
+ * - serverUnexpected: Array of ids that appear on the server but shouldn't
+ * because the client attempts to never upload them.
+ * - differences: Array of {id: string, differences: string array} recording
+ * the non-structural properties that are differente between the client and server
+ * - structuralDifferences: As above, but contains the items where the differences were
+ * structural, that is, they contained childGUIDs or parentid
+ */
+class BookmarkProblemData {
+ constructor() {
+ this.rootOnServer = false;
+ this.missingIDs = 0;
+
+ this.duplicates = [];
+ this.parentChildMismatches = [];
+ this.cycles = [];
+ this.clientCycles = [];
+ this.orphans = [];
+ this.missingChildren = [];
+ this.deletedChildren = [];
+ this.multipleParents = [];
+ this.deletedParents = [];
+ this.childrenOnNonFolder = [];
+ this.duplicateChildren = [];
+ this.parentNotFolder = [];
+
+ this.badClientRoots = [];
+ this.clientMissing = [];
+ this.serverMissing = [];
+ this.serverDeleted = [];
+ this.serverUnexpected = [];
+ this.differences = [];
+ this.structuralDifferences = [];
+ }
+
+ /**
+ * Convert ("difference", [{ differences: ["tags", "name"] }, { differences: ["name"] }]) into
+ * [{ name: "difference:tags", count: 1}, { name: "difference:name", count: 2 }], etc.
+ */
+ _summarizeDifferences(prefix, diffs) {
+ let diffCounts = new Map();
+ for (let { differences } of diffs) {
+ for (let type of differences) {
+ let name = prefix + ":" + type;
+ let count = diffCounts.get(name) || 0;
+ diffCounts.set(name, count + 1);
+ }
+ }
+ return [...diffCounts].map(([name, count]) => ({ name, count }));
+ }
+
+ /**
+ * Produce a list summarizing problems found. Each entry contains {name, count},
+ * where name is the field name for the problem, and count is the number of times
+ * the problem was encountered.
+ *
+ * Validation has failed if all counts are not 0.
+ *
+ * If the `full` argument is truthy, we also include information about which
+ * properties we saw structural differences in. Currently, this means either
+ * "sdiff:parentid" and "sdiff:childGUIDS" may be present.
+ */
+ getSummary(full) {
+ let result = [
+ { name: "clientMissing", count: this.clientMissing.length },
+ { name: "serverMissing", count: this.serverMissing.length },
+ { name: "serverDeleted", count: this.serverDeleted.length },
+ { name: "serverUnexpected", count: this.serverUnexpected.length },
+
+ { name: "structuralDifferences", count: this.structuralDifferences.length },
+ { name: "differences", count: this.differences.length },
+
+ { name: "missingIDs", count: this.missingIDs },
+ { name: "rootOnServer", count: this.rootOnServer ? 1 : 0 },
+
+ { name: "duplicates", count: this.duplicates.length },
+ { name: "parentChildMismatches", count: this.parentChildMismatches.length },
+ { name: "cycles", count: this.cycles.length },
+ { name: "clientCycles", count: this.clientCycles.length },
+ { name: "badClientRoots", count: this.badClientRoots.length },
+ { name: "orphans", count: this.orphans.length },
+ { name: "missingChildren", count: this.missingChildren.length },
+ { name: "deletedChildren", count: this.deletedChildren.length },
+ { name: "multipleParents", count: this.multipleParents.length },
+ { name: "deletedParents", count: this.deletedParents.length },
+ { name: "childrenOnNonFolder", count: this.childrenOnNonFolder.length },
+ { name: "duplicateChildren", count: this.duplicateChildren.length },
+ { name: "parentNotFolder", count: this.parentNotFolder.length },
+ ];
+ if (full) {
+ let structural = this._summarizeDifferences("sdiff", this.structuralDifferences);
+ result.push.apply(result, structural);
+ }
+ return result;
+ }
+}
+
+// Defined lazily to avoid initializing PlacesUtils.bookmarks too soon.
+XPCOMUtils.defineLazyGetter(this, "SYNCED_ROOTS", () => [
+ PlacesUtils.bookmarks.menuGuid,
+ PlacesUtils.bookmarks.toolbarGuid,
+ PlacesUtils.bookmarks.unfiledGuid,
+ PlacesUtils.bookmarks.mobileGuid,
+]);
+
+class BookmarkValidator {
+
+ _followQueries(recordMap) {
+ for (let [guid, entry] of recordMap) {
+ if (entry.type !== "query" && (!entry.bmkUri || !entry.bmkUri.startsWith("place:"))) {
+ continue;
+ }
+ // Might be worth trying to parse the place: query instead so that this
+ // works "automatically" with things like aboutsync.
+ let queryNodeParent = PlacesUtils.getFolderContents(entry, false, true);
+ if (!queryNodeParent || !queryNodeParent.root.hasChildren) {
+ continue;
+ }
+ queryNodeParent = queryNodeParent.root;
+ let queryNode = null;
+ let numSiblings = 0;
+ let containerWasOpen = queryNodeParent.containerOpen;
+ queryNodeParent.containerOpen = true;
+ try {
+ try {
+ numSiblings = queryNodeParent.childCount;
+ } catch (e) {
+ // This throws when we can't actually get the children. This is the
+ // case for history containers, tag queries, ...
+ continue;
+ }
+ for (let i = 0; i < numSiblings && !queryNode; ++i) {
+ let child = queryNodeParent.getChild(i);
+ if (child && child.bookmarkGuid && child.bookmarkGuid === guid) {
+ queryNode = child;
+ }
+ }
+ } finally {
+ queryNodeParent.containerOpen = containerWasOpen;
+ }
+ if (!queryNode) {
+ continue;
+ }
+
+ let concreteId = PlacesUtils.getConcreteItemGuid(queryNode);
+ if (!concreteId) {
+ continue;
+ }
+ let concreteItem = recordMap.get(concreteId);
+ if (!concreteItem) {
+ continue;
+ }
+ entry.concrete = concreteItem;
+ }
+ }
+
+ createClientRecordsFromTree(clientTree) {
+ // Iterate over the treeNode, converting it to something more similar to what
+ // the server stores.
+ let records = [];
+ let recordsByGuid = new Map();
+ let syncedRoots = SYNCED_ROOTS;
+ function traverse(treeNode, synced) {
+ if (!synced) {
+ synced = syncedRoots.includes(treeNode.guid);
+ } else if (isNodeIgnored(treeNode)) {
+ synced = false;
+ }
+ let guid = PlacesSyncUtils.bookmarks.guidToSyncId(treeNode.guid);
+ let itemType = 'item';
+ treeNode.ignored = !synced;
+ treeNode.id = guid;
+ switch (treeNode.type) {
+ case PlacesUtils.TYPE_X_MOZ_PLACE:
+ let query = null;
+ if (treeNode.annos && treeNode.uri.startsWith("place:")) {
+ query = treeNode.annos.find(({name}) =>
+ name === PlacesSyncUtils.bookmarks.SMART_BOOKMARKS_ANNO);
+ }
+ if (query && query.value) {
+ itemType = 'query';
+ } else {
+ itemType = 'bookmark';
+ }
+ break;
+ case PlacesUtils.TYPE_X_MOZ_PLACE_CONTAINER:
+ let isLivemark = false;
+ if (treeNode.annos) {
+ for (let anno of treeNode.annos) {
+ if (anno.name === PlacesUtils.LMANNO_FEEDURI) {
+ isLivemark = true;
+ treeNode.feedUri = anno.value;
+ } else if (anno.name === PlacesUtils.LMANNO_SITEURI) {
+ isLivemark = true;
+ treeNode.siteUri = anno.value;
+ }
+ }
+ }
+ itemType = isLivemark ? "livemark" : "folder";
+ break;
+ case PlacesUtils.TYPE_X_MOZ_PLACE_SEPARATOR:
+ itemType = 'separator';
+ break;
+ }
+
+ if (treeNode.tags) {
+ treeNode.tags = treeNode.tags.split(",");
+ } else {
+ treeNode.tags = [];
+ }
+ treeNode.type = itemType;
+ treeNode.pos = treeNode.index;
+ treeNode.bmkUri = treeNode.uri;
+ records.push(treeNode);
+ // We want to use the "real" guid here.
+ recordsByGuid.set(treeNode.guid, treeNode);
+ if (treeNode.type === 'folder') {
+ treeNode.childGUIDs = [];
+ if (!treeNode.children) {
+ treeNode.children = [];
+ }
+ for (let child of treeNode.children) {
+ traverse(child, synced);
+ child.parent = treeNode;
+ child.parentid = guid;
+ treeNode.childGUIDs.push(child.guid);
+ }
+ }
+ }
+ traverse(clientTree, false);
+ clientTree.id = 'places';
+ this._followQueries(recordsByGuid);
+ return records;
+ }
+
+ /**
+ * Process the server-side list. Mainly this builds the records into a tree,
+ * but it also records information about problems, and produces arrays of the
+ * deleted and non-deleted nodes.
+ *
+ * Returns an object containing:
+ * - records:Array of non-deleted records. Each record contains the following
+ * properties
+ * - childGUIDs (array of strings, only present if type is 'folder'): the
+ * list of child GUIDs stored on the server.
+ * - children (array of records, only present if type is 'folder'):
+ * each record has these same properties. This may differ in content
+ * from what you may expect from the childGUIDs list, as it won't
+ * contain any records that could not be found.
+ * - parent (record): The parent to this record.
+ * - Unchanged properties send down from the server: id, title, type,
+ * parentName, parentid, bmkURI, keyword, tags, pos, queryId, loadInSidebar
+ * - root: Root of the server-side bookmark tree. Has the same properties as
+ * above.
+ * - deletedRecords: As above, but only contains items that the server sent
+ * where it also sent indication that the item should be deleted.
+ * - problemData: a BookmarkProblemData object, with the caveat that
+ * the fields describing client/server relationship will not have been filled
+ * out yet.
+ */
+ inspectServerRecords(serverRecords) {
+ let deletedItemIds = new Set();
+ let idToRecord = new Map();
+ let deletedRecords = [];
+
+ let folders = [];
+ let problems = [];
+
+ let problemData = new BookmarkProblemData();
+
+ let resultRecords = [];
+
+ for (let record of serverRecords) {
+ if (!record.id) {
+ ++problemData.missingIDs;
+ continue;
+ }
+ if (record.deleted) {
+ deletedItemIds.add(record.id);
+ } else {
+ if (idToRecord.has(record.id)) {
+ problemData.duplicates.push(record.id);
+ continue;
+ }
+ }
+ idToRecord.set(record.id, record);
+
+ if (record.children) {
+ if (record.type !== "folder") {
+ // Due to implementation details in engines/bookmarks.js, (Livemark
+ // subclassing BookmarkFolder) Livemarks will have a children array,
+ // but it should still be empty.
+ if (!record.children.length) {
+ continue;
+ }
+ // Otherwise we mark it as an error and still try to resolve the children
+ problemData.childrenOnNonFolder.push(record.id);
+ }
+ folders.push(record);
+
+ if (new Set(record.children).size !== record.children.length) {
+ problemData.duplicateChildren.push(record.id)
+ }
+
+ // The children array stores special guids as their local guid values,
+ // e.g. 'menu________' instead of 'menu', but all other parts of the
+ // serverside bookmark info stores it as the special value ('menu').
+ record.childGUIDs = record.children;
+ record.children = record.children.map(childID => {
+ return PlacesSyncUtils.bookmarks.guidToSyncId(childID);
+ });
+ }
+ }
+
+ for (let deletedId of deletedItemIds) {
+ let record = idToRecord.get(deletedId);
+ if (record && !record.isDeleted) {
+ deletedRecords.push(record);
+ record.isDeleted = true;
+ }
+ }
+
+ let root = idToRecord.get('places');
+
+ if (!root) {
+ // Fabricate a root. We want to remember that it's fake so that we can
+ // avoid complaining about stuff like it missing it's childGUIDs later.
+ root = { id: 'places', children: [], type: 'folder', title: '', fake: true };
+ resultRecords.push(root);
+ idToRecord.set('places', root);
+ } else {
+ problemData.rootOnServer = true;
+ }
+
+ // Build the tree, find orphans, and record most problems having to do with
+ // the tree structure.
+ for (let [id, record] of idToRecord) {
+ if (record === root) {
+ continue;
+ }
+
+ if (record.isDeleted) {
+ continue;
+ }
+
+ let parentID = record.parentid;
+ if (!parentID) {
+ problemData.orphans.push({id: record.id, parent: parentID});
+ continue;
+ }
+
+ let parent = idToRecord.get(parentID);
+ if (!parent) {
+ problemData.orphans.push({id: record.id, parent: parentID});
+ continue;
+ }
+
+ if (parent.type !== 'folder') {
+ problemData.parentNotFolder.push(record.id);
+ if (!parent.children) {
+ parent.children = [];
+ }
+ if (!parent.childGUIDs) {
+ parent.childGUIDs = [];
+ }
+ }
+
+ if (!record.isDeleted) {
+ resultRecords.push(record);
+ }
+
+ record.parent = parent;
+ if (parent !== root || problemData.rootOnServer) {
+ let childIndex = parent.children.indexOf(id);
+ if (childIndex < 0) {
+ problemData.parentChildMismatches.push({parent: parent.id, child: record.id});
+ } else {
+ parent.children[childIndex] = record;
+ }
+ } else {
+ parent.children.push(record);
+ }
+
+ if (parent.isDeleted && !record.isDeleted) {
+ problemData.deletedParents.push(record.id);
+ }
+
+ // We used to check if the parentName on the server matches the actual
+ // local parent name, but given this is used only for de-duping a record
+ // the first time it is seen and expensive to keep up-to-date, we decided
+ // to just stop recording it. See bug 1276969 for more.
+ }
+
+ // Check that we aren't missing any children.
+ for (let folder of folders) {
+ folder.unfilteredChildren = folder.children;
+ folder.children = [];
+ for (let ci = 0; ci < folder.unfilteredChildren.length; ++ci) {
+ let child = folder.unfilteredChildren[ci];
+ let childObject;
+ if (typeof child == "string") {
+ // This can happen the parent refers to a child that has a different
+ // parentid, or if it refers to a missing or deleted child. It shouldn't
+ // be possible with totally valid bookmarks.
+ childObject = idToRecord.get(child);
+ if (!childObject) {
+ problemData.missingChildren.push({parent: folder.id, child});
+ } else {
+ folder.unfilteredChildren[ci] = childObject;
+ if (childObject.isDeleted) {
+ problemData.deletedChildren.push({ parent: folder.id, child });
+ }
+ }
+ } else {
+ childObject = child;
+ }
+
+ if (!childObject) {
+ continue;
+ }
+
+ if (childObject.parentid === folder.id) {
+ folder.children.push(childObject);
+ continue;
+ }
+
+ // The child is very probably in multiple `children` arrays --
+ // see if we already have a problem record about it.
+ let currentProblemRecord = problemData.multipleParents.find(pr =>
+ pr.child === child);
+
+ if (currentProblemRecord) {
+ currentProblemRecord.parents.push(folder.id);
+ continue;
+ }
+
+ let otherParent = idToRecord.get(childObject.parentid);
+ // it's really an ... orphan ... sort of.
+ if (!otherParent) {
+ // if we never end up adding to this parent's list, we filter it out after this loop.
+ problemData.multipleParents.push({
+ child,
+ parents: [folder.id]
+ });
+ if (!problemData.orphans.some(r => r.id === child)) {
+ problemData.orphans.push({
+ id: child,
+ parent: childObject.parentid
+ });
+ }
+ continue;
+ }
+
+ if (otherParent.isDeleted) {
+ if (!problemData.deletedParents.includes(child)) {
+ problemData.deletedParents.push(child);
+ }
+ continue;
+ }
+
+ if (otherParent.childGUIDs && !otherParent.childGUIDs.includes(child)) {
+ if (!problemData.parentChildMismatches.some(r => r.child === child)) {
+ // Might not be possible to get here.
+ problemData.parentChildMismatches.push({ child, parent: folder.id });
+ }
+ }
+
+ problemData.multipleParents.push({
+ child,
+ parents: [childObject.parentid, folder.id]
+ });
+ }
+ }
+ problemData.multipleParents = problemData.multipleParents.filter(record =>
+ record.parents.length >= 2);
+
+ problemData.cycles = this._detectCycles(resultRecords);
+
+ return {
+ deletedRecords,
+ records: resultRecords,
+ problemData,
+ root,
+ };
+ }
+
+ // helper for inspectServerRecords
+ _detectCycles(records) {
+ // currentPath and pathLookup contain the same data. pathLookup is faster to
+ // query, but currentPath gives is the order of traversal that we need in
+ // order to report the members of the cycles.
+ let pathLookup = new Set();
+ let currentPath = [];
+ let cycles = [];
+ let seenEver = new Set();
+ const traverse = node => {
+ if (pathLookup.has(node)) {
+ let cycleStart = currentPath.lastIndexOf(node);
+ let cyclePath = currentPath.slice(cycleStart).map(n => n.id);
+ cycles.push(cyclePath);
+ return;
+ } else if (seenEver.has(node)) {
+ // If we're checking the server, this is a problem, but it should already be reported.
+ // On the client, this could happen due to including `node.concrete` in the child list.
+ return;
+ }
+ seenEver.add(node);
+ let children = node.children || [];
+ if (node.concrete) {
+ children.push(node.concrete);
+ }
+ if (children) {
+ pathLookup.add(node);
+ currentPath.push(node);
+ for (let child of children) {
+ traverse(child);
+ }
+ currentPath.pop();
+ pathLookup.delete(node);
+ }
+ };
+ for (let record of records) {
+ if (!seenEver.has(record)) {
+ traverse(record);
+ }
+ }
+
+ return cycles;
+ }
+
+ // Perform client-side sanity checking that doesn't involve server data
+ _validateClient(problemData, clientRecords) {
+ problemData.clientCycles = this._detectCycles(clientRecords);
+ for (let rootGUID of SYNCED_ROOTS) {
+ let record = clientRecords.find(record =>
+ record.guid === rootGUID);
+ if (!record || record.parentid !== "places") {
+ problemData.badClientRoots.push(rootGUID);
+ }
+ }
+ }
+
+ /**
+ * Compare the list of server records with the client tree.
+ *
+ * Returns the same data as described in the inspectServerRecords comment,
+ * with the following additional fields.
+ * - clientRecords: an array of client records in a similar format to
+ * the .records (ie, server records) entry.
+ * - problemData is the same as for inspectServerRecords, except all properties
+ * will be filled out.
+ */
+ compareServerWithClient(serverRecords, clientTree) {
+
+ let clientRecords = this.createClientRecordsFromTree(clientTree);
+ let inspectionInfo = this.inspectServerRecords(serverRecords);
+ inspectionInfo.clientRecords = clientRecords;
+
+ // Mainly do this to remove deleted items and normalize child guids.
+ serverRecords = inspectionInfo.records;
+ let problemData = inspectionInfo.problemData;
+
+ this._validateClient(problemData, clientRecords);
+
+ let matches = [];
+
+ let allRecords = new Map();
+ let serverDeletedLookup = new Set(inspectionInfo.deletedRecords.map(r => r.id));
+
+ for (let sr of serverRecords) {
+ if (sr.fake) {
+ continue;
+ }
+ allRecords.set(sr.id, {client: null, server: sr});
+ }
+
+ for (let cr of clientRecords) {
+ let unified = allRecords.get(cr.id);
+ if (!unified) {
+ allRecords.set(cr.id, {client: cr, server: null});
+ } else {
+ unified.client = cr;
+ }
+ }
+
+
+ for (let [id, {client, server}] of allRecords) {
+ if (!client && server) {
+ problemData.clientMissing.push(id);
+ continue;
+ }
+ if (!server && client) {
+ if (serverDeletedLookup.has(id)) {
+ problemData.serverDeleted.push(id);
+ } else if (!client.ignored && client.id != "places") {
+ problemData.serverMissing.push(id);
+ }
+ continue;
+ }
+ if (server && client && client.ignored) {
+ problemData.serverUnexpected.push(id);
+ }
+ let differences = [];
+ let structuralDifferences = [];
+
+ // Don't bother comparing titles of roots. It's okay if locally it's
+ // "Mobile Bookmarks", but the server thinks it's "mobile".
+ // TODO: We probably should be handing other localized bookmarks (e.g.
+ // default bookmarks) here as well, see bug 1316041.
+ if (!SYNCED_ROOTS.includes(client.guid)) {
+ // We want to treat undefined, null and an empty string as identical
+ if ((client.title || "") !== (server.title || "")) {
+ differences.push("title");
+ }
+ }
+
+ if (client.parentid || server.parentid) {
+ if (client.parentid !== server.parentid) {
+ structuralDifferences.push('parentid');
+ }
+ }
+
+ if (client.tags || server.tags) {
+ let cl = client.tags || [];
+ let sl = server.tags || [];
+ if (cl.length !== sl.length || !cl.every((tag, i) => sl.indexOf(tag) >= 0)) {
+ differences.push('tags');
+ }
+ }
+
+ let sameType = client.type === server.type;
+ if (!sameType) {
+ if (server.type === "query" && client.type === "bookmark" && client.bmkUri.startsWith("place:")) {
+ sameType = true;
+ }
+ }
+
+
+ if (!sameType) {
+ differences.push('type');
+ } else {
+ switch (server.type) {
+ case 'bookmark':
+ case 'query':
+ if (server.bmkUri !== client.bmkUri) {
+ differences.push('bmkUri');
+ }
+ break;
+ case "livemark":
+ if (server.feedUri != client.feedUri) {
+ differences.push("feedUri");
+ }
+ if (server.siteUri != client.siteUri) {
+ differences.push("siteUri");
+ }
+ break;
+ case 'folder':
+ if (server.id === 'places' && !problemData.rootOnServer) {
+ // It's the fabricated places root. It won't have the GUIDs, but
+ // it doesn't matter.
+ break;
+ }
+ if (client.childGUIDs || server.childGUIDs) {
+ let cl = client.childGUIDs || [];
+ let sl = server.childGUIDs || [];
+ if (cl.length !== sl.length || !cl.every((id, i) => sl[i] === id)) {
+ structuralDifferences.push('childGUIDs');
+ }
+ }
+ break;
+ }
+ }
+
+ if (differences.length) {
+ problemData.differences.push({id, differences});
+ }
+ if (structuralDifferences.length) {
+ problemData.structuralDifferences.push({ id, differences: structuralDifferences });
+ }
+ }
+ return inspectionInfo;
+ }
+
+ _getServerState(engine) {
+ let collection = engine.itemSource();
+ let collectionKey = engine.service.collectionKeys.keyForCollection(engine.name);
+ collection.full = true;
+ let items = [];
+ collection.recordHandler = function(item) {
+ item.decrypt(collectionKey);
+ items.push(item.cleartext);
+ };
+ let resp = collection.getBatched();
+ if (!resp.success) {
+ throw resp;
+ }
+ return items;
+ }
+
+ validate(engine) {
+ let self = this;
+ return Task.spawn(function*() {
+ let start = Date.now();
+ let clientTree = yield PlacesUtils.promiseBookmarksTree("", {
+ includeItemIds: true
+ });
+ let serverState = self._getServerState(engine);
+ let serverRecordCount = serverState.length;
+ let result = self.compareServerWithClient(serverState, clientTree);
+ let end = Date.now();
+ let duration = end-start;
+ return {
+ duration,
+ version: self.version,
+ problems: result.problemData,
+ recordCount: serverRecordCount
+ };
+ });
+ }
+
+};
+
+BookmarkValidator.prototype.version = BOOKMARK_VALIDATOR_VERSION;
+
diff --git a/services/sync/modules/browserid_identity.js b/services/sync/modules/browserid_identity.js
new file mode 100644
index 000000000..db3821518
--- /dev/null
+++ b/services/sync/modules/browserid_identity.js
@@ -0,0 +1,869 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+"use strict";
+
+this.EXPORTED_SYMBOLS = ["BrowserIDManager", "AuthenticationError"];
+
+var {classes: Cc, interfaces: Ci, utils: Cu, results: Cr} = Components;
+
+Cu.import("resource://gre/modules/Log.jsm");
+Cu.import("resource://services-common/async.js");
+Cu.import("resource://services-common/utils.js");
+Cu.import("resource://services-common/tokenserverclient.js");
+Cu.import("resource://services-crypto/utils.js");
+Cu.import("resource://services-sync/identity.js");
+Cu.import("resource://services-sync/util.js");
+Cu.import("resource://services-common/tokenserverclient.js");
+Cu.import("resource://gre/modules/Services.jsm");
+Cu.import("resource://services-sync/constants.js");
+Cu.import("resource://gre/modules/Promise.jsm");
+Cu.import("resource://services-sync/stages/cluster.js");
+Cu.import("resource://gre/modules/FxAccounts.jsm");
+
+// Lazy imports to prevent unnecessary load on startup.
+XPCOMUtils.defineLazyModuleGetter(this, "Weave",
+ "resource://services-sync/main.js");
+
+XPCOMUtils.defineLazyModuleGetter(this, "BulkKeyBundle",
+ "resource://services-sync/keys.js");
+
+XPCOMUtils.defineLazyModuleGetter(this, "fxAccounts",
+ "resource://gre/modules/FxAccounts.jsm");
+
+XPCOMUtils.defineLazyGetter(this, 'log', function() {
+ let log = Log.repository.getLogger("Sync.BrowserIDManager");
+ log.level = Log.Level[Svc.Prefs.get("log.logger.identity")] || Log.Level.Error;
+ return log;
+});
+
+// FxAccountsCommon.js doesn't use a "namespace", so create one here.
+var fxAccountsCommon = {};
+Cu.import("resource://gre/modules/FxAccountsCommon.js", fxAccountsCommon);
+
+const OBSERVER_TOPICS = [
+ fxAccountsCommon.ONLOGIN_NOTIFICATION,
+ fxAccountsCommon.ONLOGOUT_NOTIFICATION,
+ fxAccountsCommon.ON_ACCOUNT_STATE_CHANGE_NOTIFICATION,
+];
+
+const PREF_SYNC_SHOW_CUSTOMIZATION = "services.sync-setup.ui.showCustomizationDialog";
+
+function deriveKeyBundle(kB) {
+ let out = CryptoUtils.hkdf(kB, undefined,
+ "identity.mozilla.com/picl/v1/oldsync", 2*32);
+ let bundle = new BulkKeyBundle();
+ // [encryptionKey, hmacKey]
+ bundle.keyPair = [out.slice(0, 32), out.slice(32, 64)];
+ return bundle;
+}
+
+/*
+ General authentication error for abstracting authentication
+ errors from multiple sources (e.g., from FxAccounts, TokenServer).
+ details is additional details about the error - it might be a string, or
+ some other error object (which should do the right thing when toString() is
+ called on it)
+*/
+function AuthenticationError(details, source) {
+ this.details = details;
+ this.source = source;
+}
+
+AuthenticationError.prototype = {
+ toString: function() {
+ return "AuthenticationError(" + this.details + ")";
+ }
+}
+
+this.BrowserIDManager = function BrowserIDManager() {
+ // NOTE: _fxaService and _tokenServerClient are replaced with mocks by
+ // the test suite.
+ this._fxaService = fxAccounts;
+ this._tokenServerClient = new TokenServerClient();
+ this._tokenServerClient.observerPrefix = "weave:service";
+ // will be a promise that resolves when we are ready to authenticate
+ this.whenReadyToAuthenticate = null;
+ this._log = log;
+};
+
+this.BrowserIDManager.prototype = {
+ __proto__: IdentityManager.prototype,
+
+ _fxaService: null,
+ _tokenServerClient: null,
+ // https://docs.services.mozilla.com/token/apis.html
+ _token: null,
+ _signedInUser: null, // the signedinuser we got from FxAccounts.
+
+ // null if no error, otherwise a LOGIN_FAILED_* value that indicates why
+ // we failed to authenticate (but note it might not be an actual
+ // authentication problem, just a transient network error or similar)
+ _authFailureReason: null,
+
+ // it takes some time to fetch a sync key bundle, so until this flag is set,
+ // we don't consider the lack of a keybundle as a failure state.
+ _shouldHaveSyncKeyBundle: false,
+
+ get needsCustomization() {
+ try {
+ return Services.prefs.getBoolPref(PREF_SYNC_SHOW_CUSTOMIZATION);
+ } catch (e) {
+ return false;
+ }
+ },
+
+ hashedUID() {
+ if (!this._token) {
+ throw new Error("hashedUID: Don't have token");
+ }
+ return this._token.hashed_fxa_uid
+ },
+
+ deviceID() {
+ return this._signedInUser && this._signedInUser.deviceId;
+ },
+
+ initialize: function() {
+ for (let topic of OBSERVER_TOPICS) {
+ Services.obs.addObserver(this, topic, false);
+ }
+ // and a background fetch of account data just so we can set this.account,
+ // so we have a username available before we've actually done a login.
+ // XXX - this is actually a hack just for tests and really shouldn't be
+ // necessary. Also, you'd think it would be safe to allow this.account to
+ // be set to null when there's no user logged in, but argue with the test
+ // suite, not with me :)
+ this._fxaService.getSignedInUser().then(accountData => {
+ if (accountData) {
+ this.account = accountData.email;
+ }
+ }).catch(err => {
+ // As above, this is only for tests so it is safe to ignore.
+ });
+ },
+
+ /**
+ * Ensure the user is logged in. Returns a promise that resolves when
+ * the user is logged in, or is rejected if the login attempt has failed.
+ */
+ ensureLoggedIn: function() {
+ if (!this._shouldHaveSyncKeyBundle && this.whenReadyToAuthenticate) {
+ // We are already in the process of logging in.
+ return this.whenReadyToAuthenticate.promise;
+ }
+
+ // If we are already happy then there is nothing more to do.
+ if (this._syncKeyBundle) {
+ return Promise.resolve();
+ }
+
+ // Similarly, if we have a previous failure that implies an explicit
+ // re-entering of credentials by the user is necessary we don't take any
+ // further action - an observer will fire when the user does that.
+ if (Weave.Status.login == LOGIN_FAILED_LOGIN_REJECTED) {
+ return Promise.reject(new Error("User needs to re-authenticate"));
+ }
+
+ // So - we've a previous auth problem and aren't currently attempting to
+ // log in - so fire that off.
+ this.initializeWithCurrentIdentity();
+ return this.whenReadyToAuthenticate.promise;
+ },
+
+ finalize: function() {
+ // After this is called, we can expect Service.identity != this.
+ for (let topic of OBSERVER_TOPICS) {
+ Services.obs.removeObserver(this, topic);
+ }
+ this.resetCredentials();
+ this._signedInUser = null;
+ },
+
+ offerSyncOptions: function () {
+ // If the user chose to "Customize sync options" when signing
+ // up with Firefox Accounts, ask them to choose what to sync.
+ const url = "chrome://browser/content/sync/customize.xul";
+ const features = "centerscreen,chrome,modal,dialog,resizable=no";
+ let win = Services.wm.getMostRecentWindow("navigator:browser");
+
+ let data = {accepted: false};
+ win.openDialog(url, "_blank", features, data);
+
+ return data;
+ },
+
+ initializeWithCurrentIdentity: function(isInitialSync=false) {
+ // While this function returns a promise that resolves once we've started
+ // the auth process, that process is complete when
+ // this.whenReadyToAuthenticate.promise resolves.
+ this._log.trace("initializeWithCurrentIdentity");
+
+ // Reset the world before we do anything async.
+ this.whenReadyToAuthenticate = Promise.defer();
+ this.whenReadyToAuthenticate.promise.catch(err => {
+ this._log.error("Could not authenticate", err);
+ });
+
+ // initializeWithCurrentIdentity() can be called after the
+ // identity module was first initialized, e.g., after the
+ // user completes a force authentication, so we should make
+ // sure all credentials are reset before proceeding.
+ this.resetCredentials();
+ this._authFailureReason = null;
+
+ return this._fxaService.getSignedInUser().then(accountData => {
+ if (!accountData) {
+ this._log.info("initializeWithCurrentIdentity has no user logged in");
+ this.account = null;
+ // and we are as ready as we can ever be for auth.
+ this._shouldHaveSyncKeyBundle = true;
+ this.whenReadyToAuthenticate.reject("no user is logged in");
+ return;
+ }
+
+ this.account = accountData.email;
+ this._updateSignedInUser(accountData);
+ // The user must be verified before we can do anything at all; we kick
+ // this and the rest of initialization off in the background (ie, we
+ // don't return the promise)
+ this._log.info("Waiting for user to be verified.");
+ this._fxaService.whenVerified(accountData).then(accountData => {
+ this._updateSignedInUser(accountData);
+ this._log.info("Starting fetch for key bundle.");
+ if (this.needsCustomization) {
+ let data = this.offerSyncOptions();
+ if (data.accepted) {
+ Services.prefs.clearUserPref(PREF_SYNC_SHOW_CUSTOMIZATION);
+
+ // Mark any non-selected engines as declined.
+ Weave.Service.engineManager.declineDisabled();
+ } else {
+ // Log out if the user canceled the dialog.
+ return this._fxaService.signOut();
+ }
+ }
+ }).then(() => {
+ return this._fetchTokenForUser();
+ }).then(token => {
+ this._token = token;
+ this._shouldHaveSyncKeyBundle = true; // and we should actually have one...
+ this.whenReadyToAuthenticate.resolve();
+ this._log.info("Background fetch for key bundle done");
+ Weave.Status.login = LOGIN_SUCCEEDED;
+ if (isInitialSync) {
+ this._log.info("Doing initial sync actions");
+ Svc.Prefs.set("firstSync", "resetClient");
+ Services.obs.notifyObservers(null, "weave:service:setup-complete", null);
+ Weave.Utils.nextTick(Weave.Service.sync, Weave.Service);
+ }
+ }).catch(authErr => {
+ // report what failed...
+ this._log.error("Background fetch for key bundle failed", authErr);
+ this._shouldHaveSyncKeyBundle = true; // but we probably don't have one...
+ this.whenReadyToAuthenticate.reject(authErr);
+ });
+ // and we are done - the fetch continues on in the background...
+ }).catch(err => {
+ this._log.error("Processing logged in account", err);
+ });
+ },
+
+ _updateSignedInUser: function(userData) {
+ // This object should only ever be used for a single user. It is an
+ // error to update the data if the user changes (but updates are still
+ // necessary, as each call may add more attributes to the user).
+ // We start with no user, so an initial update is always ok.
+ if (this._signedInUser && this._signedInUser.email != userData.email) {
+ throw new Error("Attempting to update to a different user.")
+ }
+ this._signedInUser = userData;
+ },
+
+ logout: function() {
+ // This will be called when sync fails (or when the account is being
+ // unlinked etc). It may have failed because we got a 401 from a sync
+ // server, so we nuke the token. Next time sync runs and wants an
+ // authentication header, we will notice the lack of the token and fetch a
+ // new one.
+ this._token = null;
+ },
+
+ observe: function (subject, topic, data) {
+ this._log.debug("observed " + topic);
+ switch (topic) {
+ case fxAccountsCommon.ONLOGIN_NOTIFICATION:
+ // This should only happen if we've been initialized without a current
+ // user - otherwise we'd have seen the LOGOUT notification and been
+ // thrown away.
+ // The exception is when we've initialized with a user that needs to
+ // reauth with the server - in that case we will also get here, but
+ // should have the same identity.
+ // initializeWithCurrentIdentity will throw and log if these constraints
+ // aren't met (indirectly, via _updateSignedInUser()), so just go ahead
+ // and do the init.
+ this.initializeWithCurrentIdentity(true);
+ break;
+
+ case fxAccountsCommon.ONLOGOUT_NOTIFICATION:
+ Weave.Service.startOver();
+ // startOver will cause this instance to be thrown away, so there's
+ // nothing else to do.
+ break;
+
+ case fxAccountsCommon.ON_ACCOUNT_STATE_CHANGE_NOTIFICATION:
+ // throw away token and fetch a new one
+ this.resetCredentials();
+ this._ensureValidToken().catch(err =>
+ this._log.error("Error while fetching a new token", err));
+ break;
+ }
+ },
+
+ /**
+ * Compute the sha256 of the message bytes. Return bytes.
+ */
+ _sha256: function(message) {
+ let hasher = Cc["@mozilla.org/security/hash;1"]
+ .createInstance(Ci.nsICryptoHash);
+ hasher.init(hasher.SHA256);
+ return CryptoUtils.digestBytes(message, hasher);
+ },
+
+ /**
+ * Compute the X-Client-State header given the byte string kB.
+ *
+ * Return string: hex(first16Bytes(sha256(kBbytes)))
+ */
+ _computeXClientState: function(kBbytes) {
+ return CommonUtils.bytesAsHex(this._sha256(kBbytes).slice(0, 16), false);
+ },
+
+ /**
+ * Provide override point for testing token expiration.
+ */
+ _now: function() {
+ return this._fxaService.now()
+ },
+
+ get _localtimeOffsetMsec() {
+ return this._fxaService.localtimeOffsetMsec;
+ },
+
+ usernameFromAccount: function(val) {
+ // we don't differentiate between "username" and "account"
+ return val;
+ },
+
+ /**
+ * Obtains the HTTP Basic auth password.
+ *
+ * Returns a string if set or null if it is not set.
+ */
+ get basicPassword() {
+ this._log.error("basicPassword getter should be not used in BrowserIDManager");
+ return null;
+ },
+
+ /**
+ * Set the HTTP basic password to use.
+ *
+ * Changes will not persist unless persistSyncCredentials() is called.
+ */
+ set basicPassword(value) {
+ throw "basicPassword setter should be not used in BrowserIDManager";
+ },
+
+ /**
+ * Obtain the Sync Key.
+ *
+ * This returns a 26 character "friendly" Base32 encoded string on success or
+ * null if no Sync Key could be found.
+ *
+ * If the Sync Key hasn't been set in this session, this will look in the
+ * password manager for the sync key.
+ */
+ get syncKey() {
+ if (this.syncKeyBundle) {
+ // TODO: This is probably fine because the code shouldn't be
+ // using the sync key directly (it should use the sync key
+ // bundle), but I don't like it. We should probably refactor
+ // code that is inspecting this to not do validation on this
+ // field directly and instead call a isSyncKeyValid() function
+ // that we can override.
+ return "99999999999999999999999999";
+ }
+ else {
+ return null;
+ }
+ },
+
+ set syncKey(value) {
+ throw "syncKey setter should be not used in BrowserIDManager";
+ },
+
+ get syncKeyBundle() {
+ return this._syncKeyBundle;
+ },
+
+ /**
+ * Resets/Drops all credentials we hold for the current user.
+ */
+ resetCredentials: function() {
+ this.resetSyncKey();
+ this._token = null;
+ // The cluster URL comes from the token, so resetting it to empty will
+ // force Sync to not accidentally use a value from an earlier token.
+ Weave.Service.clusterURL = null;
+ },
+
+ /**
+ * Resets/Drops the sync key we hold for the current user.
+ */
+ resetSyncKey: function() {
+ this._syncKey = null;
+ this._syncKeyBundle = null;
+ this._syncKeyUpdated = true;
+ this._shouldHaveSyncKeyBundle = false;
+ },
+
+ /**
+ * Pre-fetches any information that might help with migration away from this
+ * identity. Called after every sync and is really just an optimization that
+ * allows us to avoid a network request for when we actually need the
+ * migration info.
+ */
+ prefetchMigrationSentinel: function(service) {
+ // nothing to do here until we decide to migrate away from FxA.
+ },
+
+ /**
+ * Return credentials hosts for this identity only.
+ */
+ _getSyncCredentialsHosts: function() {
+ return Utils.getSyncCredentialsHostsFxA();
+ },
+
+ /**
+ * The current state of the auth credentials.
+ *
+ * This essentially validates that enough credentials are available to use
+ * Sync. It doesn't check we have all the keys we need as the master-password
+ * may have been locked when we tried to get them - we rely on
+ * unlockAndVerifyAuthState to check that for us.
+ */
+ get currentAuthState() {
+ if (this._authFailureReason) {
+ this._log.info("currentAuthState returning " + this._authFailureReason +
+ " due to previous failure");
+ return this._authFailureReason;
+ }
+ // TODO: need to revisit this. Currently this isn't ready to go until
+ // both the username and syncKeyBundle are both configured and having no
+ // username seems to make things fail fast so that's good.
+ if (!this.username) {
+ return LOGIN_FAILED_NO_USERNAME;
+ }
+
+ return STATUS_OK;
+ },
+
+ // Do we currently have keys, or do we have enough that we should be able
+ // to successfully fetch them?
+ _canFetchKeys: function() {
+ let userData = this._signedInUser;
+ // a keyFetchToken means we can almost certainly grab them.
+ // kA and kB means we already have them.
+ return userData && (userData.keyFetchToken || (userData.kA && userData.kB));
+ },
+
+ /**
+ * Verify the current auth state, unlocking the master-password if necessary.
+ *
+ * Returns a promise that resolves with the current auth state after
+ * attempting to unlock.
+ */
+ unlockAndVerifyAuthState: function() {
+ if (this._canFetchKeys()) {
+ log.debug("unlockAndVerifyAuthState already has (or can fetch) sync keys");
+ return Promise.resolve(STATUS_OK);
+ }
+ // so no keys - ensure MP unlocked.
+ if (!Utils.ensureMPUnlocked()) {
+ // user declined to unlock, so we don't know if they are stored there.
+ log.debug("unlockAndVerifyAuthState: user declined to unlock master-password");
+ return Promise.resolve(MASTER_PASSWORD_LOCKED);
+ }
+ // now we are unlocked we must re-fetch the user data as we may now have
+ // the details that were previously locked away.
+ return this._fxaService.getSignedInUser().then(
+ accountData => {
+ this._updateSignedInUser(accountData);
+ // If we still can't get keys it probably means the user authenticated
+ // without unlocking the MP or cleared the saved logins, so we've now
+ // lost them - the user will need to reauth before continuing.
+ let result;
+ if (this._canFetchKeys()) {
+ result = STATUS_OK;
+ } else {
+ result = LOGIN_FAILED_LOGIN_REJECTED;
+ }
+ log.debug("unlockAndVerifyAuthState re-fetched credentials and is returning", result);
+ return result;
+ }
+ );
+ },
+
+ /**
+ * Do we have a non-null, not yet expired token for the user currently
+ * signed in?
+ */
+ hasValidToken: function() {
+ // If pref is set to ignore cached authentication credentials for debugging,
+ // then return false to force the fetching of a new token.
+ let ignoreCachedAuthCredentials = false;
+ try {
+ ignoreCachedAuthCredentials = Svc.Prefs.get("debug.ignoreCachedAuthCredentials");
+ } catch(e) {
+ // Pref doesn't exist
+ }
+ if (ignoreCachedAuthCredentials) {
+ return false;
+ }
+ if (!this._token) {
+ return false;
+ }
+ if (this._token.expiration < this._now()) {
+ return false;
+ }
+ return true;
+ },
+
+ // Get our tokenServerURL - a private helper. Returns a string.
+ get _tokenServerUrl() {
+ // We used to support services.sync.tokenServerURI but this was a
+ // pain-point for people using non-default servers as Sync may auto-reset
+ // all services.sync prefs. So if that still exists, it wins.
+ let url = Svc.Prefs.get("tokenServerURI"); // Svc.Prefs "root" is services.sync
+ if (!url) {
+ url = Services.prefs.getCharPref("identity.sync.tokenserver.uri");
+ }
+ while (url.endsWith("/")) { // trailing slashes cause problems...
+ url = url.slice(0, -1);
+ }
+ return url;
+ },
+
+ // Refresh the sync token for our user. Returns a promise that resolves
+ // with a token (which may be null in one sad edge-case), or rejects with an
+ // error.
+ _fetchTokenForUser: function() {
+ // tokenServerURI is mis-named - convention is uri means nsISomething...
+ let tokenServerURI = this._tokenServerUrl;
+ let log = this._log;
+ let client = this._tokenServerClient;
+ let fxa = this._fxaService;
+ let userData = this._signedInUser;
+
+ // We need kA and kB for things to work. If we don't have them, just
+ // return null for the token - sync calling unlockAndVerifyAuthState()
+ // before actually syncing will setup the error states if necessary.
+ if (!this._canFetchKeys()) {
+ log.info("Unable to fetch keys (master-password locked?), so aborting token fetch");
+ return Promise.resolve(null);
+ }
+
+ let maybeFetchKeys = () => {
+ // This is called at login time and every time we need a new token - in
+ // the latter case we already have kA and kB, so optimise that case.
+ if (userData.kA && userData.kB) {
+ return;
+ }
+ log.info("Fetching new keys");
+ return this._fxaService.getKeys().then(
+ newUserData => {
+ userData = newUserData;
+ this._updateSignedInUser(userData); // throws if the user changed.
+ }
+ );
+ }
+
+ let getToken = assertion => {
+ log.debug("Getting a token");
+ let deferred = Promise.defer();
+ let cb = function (err, token) {
+ if (err) {
+ return deferred.reject(err);
+ }
+ log.debug("Successfully got a sync token");
+ return deferred.resolve(token);
+ };
+
+ let kBbytes = CommonUtils.hexToBytes(userData.kB);
+ let headers = {"X-Client-State": this._computeXClientState(kBbytes)};
+ client.getTokenFromBrowserIDAssertion(tokenServerURI, assertion, cb, headers);
+ return deferred.promise;
+ }
+
+ let getAssertion = () => {
+ log.info("Getting an assertion from", tokenServerURI);
+ let audience = Services.io.newURI(tokenServerURI, null, null).prePath;
+ return fxa.getAssertion(audience);
+ };
+
+ // wait until the account email is verified and we know that
+ // getAssertion() will return a real assertion (not null).
+ return fxa.whenVerified(this._signedInUser)
+ .then(() => maybeFetchKeys())
+ .then(() => getAssertion())
+ .then(assertion => getToken(assertion))
+ .catch(err => {
+ // If we get a 401 fetching the token it may be that our certificate
+ // needs to be regenerated.
+ if (!err.response || err.response.status !== 401) {
+ return Promise.reject(err);
+ }
+ log.warn("Token server returned 401, refreshing certificate and retrying token fetch");
+ return fxa.invalidateCertificate()
+ .then(() => getAssertion())
+ .then(assertion => getToken(assertion))
+ })
+ .then(token => {
+ // TODO: Make it be only 80% of the duration, so refresh the token
+ // before it actually expires. This is to avoid sync storage errors
+ // otherwise, we get a nasty notification bar briefly. Bug 966568.
+ token.expiration = this._now() + (token.duration * 1000) * 0.80;
+ if (!this._syncKeyBundle) {
+ // We are given kA/kB as hex.
+ this._syncKeyBundle = deriveKeyBundle(Utils.hexToBytes(userData.kB));
+ }
+ return token;
+ })
+ .catch(err => {
+ // TODO: unify these errors - we need to handle errors thrown by
+ // both tokenserverclient and hawkclient.
+ // A tokenserver error thrown based on a bad response.
+ if (err.response && err.response.status === 401) {
+ err = new AuthenticationError(err, "tokenserver");
+ // A hawkclient error.
+ } else if (err.code && err.code === 401) {
+ err = new AuthenticationError(err, "hawkclient");
+ // An FxAccounts.jsm error.
+ } else if (err.message == fxAccountsCommon.ERROR_AUTH_ERROR) {
+ err = new AuthenticationError(err, "fxaccounts");
+ }
+
+ // TODO: write tests to make sure that different auth error cases are handled here
+ // properly: auth error getting assertion, auth error getting token (invalid generation
+ // and client-state error)
+ if (err instanceof AuthenticationError) {
+ this._log.error("Authentication error in _fetchTokenForUser", err);
+ // set it to the "fatal" LOGIN_FAILED_LOGIN_REJECTED reason.
+ this._authFailureReason = LOGIN_FAILED_LOGIN_REJECTED;
+ } else {
+ this._log.error("Non-authentication error in _fetchTokenForUser", err);
+ // for now assume it is just a transient network related problem
+ // (although sadly, it might also be a regular unhandled exception)
+ this._authFailureReason = LOGIN_FAILED_NETWORK_ERROR;
+ }
+ // this._authFailureReason being set to be non-null in the above if clause
+ // ensures we are in the correct currentAuthState, and
+ // this._shouldHaveSyncKeyBundle being true ensures everything that cares knows
+ // that there is no authentication dance still under way.
+ this._shouldHaveSyncKeyBundle = true;
+ Weave.Status.login = this._authFailureReason;
+ throw err;
+ });
+ },
+
+ // Returns a promise that is resolved when we have a valid token for the
+ // current user stored in this._token. When resolved, this._token is valid.
+ _ensureValidToken: function() {
+ if (this.hasValidToken()) {
+ this._log.debug("_ensureValidToken already has one");
+ return Promise.resolve();
+ }
+ const notifyStateChanged =
+ () => Services.obs.notifyObservers(null, "weave:service:login:change", null);
+ // reset this._token as a safety net to reduce the possibility of us
+ // repeatedly attempting to use an invalid token if _fetchTokenForUser throws.
+ this._token = null;
+ return this._fetchTokenForUser().then(
+ token => {
+ this._token = token;
+ notifyStateChanged();
+ },
+ error => {
+ notifyStateChanged();
+ throw error
+ }
+ );
+ },
+
+ getResourceAuthenticator: function () {
+ return this._getAuthenticationHeader.bind(this);
+ },
+
+ /**
+ * Obtain a function to be used for adding auth to RESTRequest instances.
+ */
+ getRESTRequestAuthenticator: function() {
+ return this._addAuthenticationHeader.bind(this);
+ },
+
+ /**
+ * @return a Hawk HTTP Authorization Header, lightly wrapped, for the .uri
+ * of a RESTRequest or AsyncResponse object.
+ */
+ _getAuthenticationHeader: function(httpObject, method) {
+ let cb = Async.makeSpinningCallback();
+ this._ensureValidToken().then(cb, cb);
+ // Note that in failure states we return null, causing the request to be
+ // made without authorization headers, thereby presumably causing a 401,
+ // which causes Sync to log out. If we throw, this may not happen as
+ // expected.
+ try {
+ cb.wait();
+ } catch (ex) {
+ if (Async.isShutdownException(ex)) {
+ throw ex;
+ }
+ this._log.error("Failed to fetch a token for authentication", ex);
+ return null;
+ }
+ if (!this._token) {
+ return null;
+ }
+ let credentials = {algorithm: "sha256",
+ id: this._token.id,
+ key: this._token.key,
+ };
+ method = method || httpObject.method;
+
+ // Get the local clock offset from the Firefox Accounts server. This should
+ // be close to the offset from the storage server.
+ let options = {
+ now: this._now(),
+ localtimeOffsetMsec: this._localtimeOffsetMsec,
+ credentials: credentials,
+ };
+
+ let headerValue = CryptoUtils.computeHAWK(httpObject.uri, method, options);
+ return {headers: {authorization: headerValue.field}};
+ },
+
+ _addAuthenticationHeader: function(request, method) {
+ let header = this._getAuthenticationHeader(request, method);
+ if (!header) {
+ return null;
+ }
+ request.setHeader("authorization", header.headers.authorization);
+ return request;
+ },
+
+ createClusterManager: function(service) {
+ return new BrowserIDClusterManager(service);
+ },
+
+ // Tell Sync what the login status should be if it saw a 401 fetching
+ // info/collections as part of login verification (typically immediately
+ // after login.)
+ // In our case, it almost certainly means a transient error fetching a token
+ // (and hitting this will cause us to logout, which will correctly handle an
+ // authoritative login issue.)
+ loginStatusFromVerification404() {
+ return LOGIN_FAILED_NETWORK_ERROR;
+ },
+};
+
+/* An implementation of the ClusterManager for this identity
+ */
+
+function BrowserIDClusterManager(service) {
+ ClusterManager.call(this, service);
+}
+
+BrowserIDClusterManager.prototype = {
+ __proto__: ClusterManager.prototype,
+
+ _findCluster: function() {
+ let endPointFromIdentityToken = function() {
+ // The only reason (in theory ;) that we can end up with a null token
+ // is when this.identity._canFetchKeys() returned false. In turn, this
+ // should only happen if the master-password is locked or the credentials
+ // storage is screwed, and in those cases we shouldn't have started
+ // syncing so shouldn't get here anyway.
+ // But better safe than sorry! To keep things clearer, throw an explicit
+ // exception - the message will appear in the logs and the error will be
+ // treated as transient.
+ if (!this.identity._token) {
+ throw new Error("Can't get a cluster URL as we can't fetch keys.");
+ }
+ let endpoint = this.identity._token.endpoint;
+ // For Sync 1.5 storage endpoints, we use the base endpoint verbatim.
+ // However, it should end in "/" because we will extend it with
+ // well known path components. So we add a "/" if it's missing.
+ if (!endpoint.endsWith("/")) {
+ endpoint += "/";
+ }
+ log.debug("_findCluster returning " + endpoint);
+ return endpoint;
+ }.bind(this);
+
+ // Spinningly ensure we are ready to authenticate and have a valid token.
+ let promiseClusterURL = function() {
+ return this.identity.whenReadyToAuthenticate.promise.then(
+ () => {
+ // We need to handle node reassignment here. If we are being asked
+ // for a clusterURL while the service already has a clusterURL, then
+ // it's likely a 401 was received using the existing token - in which
+ // case we just discard the existing token and fetch a new one.
+ if (this.service.clusterURL) {
+ log.debug("_findCluster has a pre-existing clusterURL, so discarding the current token");
+ this.identity._token = null;
+ }
+ return this.identity._ensureValidToken();
+ }
+ ).then(endPointFromIdentityToken
+ );
+ }.bind(this);
+
+ let cb = Async.makeSpinningCallback();
+ promiseClusterURL().then(function (clusterURL) {
+ cb(null, clusterURL);
+ }).then(
+ null, err => {
+ log.info("Failed to fetch the cluster URL", err);
+ // service.js's verifyLogin() method will attempt to fetch a cluster
+ // URL when it sees a 401. If it gets null, it treats it as a "real"
+ // auth error and sets Status.login to LOGIN_FAILED_LOGIN_REJECTED, which
+ // in turn causes a notification bar to appear informing the user they
+ // need to re-authenticate.
+ // On the other hand, if fetching the cluster URL fails with an exception,
+ // verifyLogin() assumes it is a transient error, and thus doesn't show
+ // the notification bar under the assumption the issue will resolve
+ // itself.
+ // Thus:
+ // * On a real 401, we must return null.
+ // * On any other problem we must let an exception bubble up.
+ if (err instanceof AuthenticationError) {
+ // callback with no error and a null result - cb.wait() returns null.
+ cb(null, null);
+ } else {
+ // callback with an error - cb.wait() completes by raising an exception.
+ cb(err);
+ }
+ });
+ return cb.wait();
+ },
+
+ getUserBaseURL: function() {
+ // Legacy Sync and FxA Sync construct the userBaseURL differently. Legacy
+ // Sync appends path components onto an empty path, and in FxA Sync the
+ // token server constructs this for us in an opaque manner. Since the
+ // cluster manager already sets the clusterURL on Service and also has
+ // access to the current identity, we added this functionality here.
+ return this.service.clusterURL;
+ }
+}
diff --git a/services/sync/modules/collection_validator.js b/services/sync/modules/collection_validator.js
new file mode 100644
index 000000000..41141bba3
--- /dev/null
+++ b/services/sync/modules/collection_validator.js
@@ -0,0 +1,204 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+"use strict";
+
+const Cu = Components.utils;
+
+Cu.import("resource://services-sync/record.js");
+Cu.import("resource://services-sync/main.js");
+
+this.EXPORTED_SYMBOLS = ["CollectionValidator", "CollectionProblemData"];
+
+class CollectionProblemData {
+ constructor() {
+ this.missingIDs = 0;
+ this.duplicates = [];
+ this.clientMissing = [];
+ this.serverMissing = [];
+ this.serverDeleted = [];
+ this.serverUnexpected = [];
+ this.differences = [];
+ }
+
+ /**
+ * Produce a list summarizing problems found. Each entry contains {name, count},
+ * where name is the field name for the problem, and count is the number of times
+ * the problem was encountered.
+ *
+ * Validation has failed if all counts are not 0.
+ */
+ getSummary() {
+ return [
+ { name: "clientMissing", count: this.clientMissing.length },
+ { name: "serverMissing", count: this.serverMissing.length },
+ { name: "serverDeleted", count: this.serverDeleted.length },
+ { name: "serverUnexpected", count: this.serverUnexpected.length },
+ { name: "differences", count: this.differences.length },
+ { name: "missingIDs", count: this.missingIDs },
+ { name: "duplicates", count: this.duplicates.length }
+ ];
+ }
+}
+
+class CollectionValidator {
+ // Construct a generic collection validator. This is intended to be called by
+ // subclasses.
+ // - name: Name of the engine
+ // - idProp: Property that identifies a record. That is, if a client and server
+ // record have the same value for the idProp property, they should be
+ // compared against eachother.
+ // - props: Array of properties that should be compared
+ constructor(name, idProp, props) {
+ this.name = name;
+ this.props = props;
+ this.idProp = idProp;
+ }
+
+ // Should a custom ProblemData type be needed, return it here.
+ emptyProblemData() {
+ return new CollectionProblemData();
+ }
+
+ getServerItems(engine) {
+ let collection = engine.itemSource();
+ let collectionKey = engine.service.collectionKeys.keyForCollection(engine.name);
+ collection.full = true;
+ let items = [];
+ collection.recordHandler = function(item) {
+ item.decrypt(collectionKey);
+ items.push(item.cleartext);
+ };
+ let resp = collection.getBatched();
+ if (!resp.success) {
+ throw resp;
+ }
+ return items;
+ }
+
+ // Should return a promise that resolves to an array of client items.
+ getClientItems() {
+ return Promise.reject("Must implement");
+ }
+
+ // Turn the client item into something that can be compared with the server item,
+ // and is also safe to mutate.
+ normalizeClientItem(item) {
+ return Cu.cloneInto(item, {});
+ }
+
+ // Turn the server item into something that can be easily compared with the client
+ // items.
+ normalizeServerItem(item) {
+ return item;
+ }
+
+ // Return whether or not a server item should be present on the client. Expected
+ // to be overridden.
+ clientUnderstands(item) {
+ return true;
+ }
+
+ // Return whether or not a client item should be present on the server. Expected
+ // to be overridden
+ syncedByClient(item) {
+ return true;
+ }
+
+ // Compare the server item and the client item, and return a list of property
+ // names that are different. Can be overridden if needed.
+ getDifferences(client, server) {
+ let differences = [];
+ for (let prop of this.props) {
+ let clientProp = client[prop];
+ let serverProp = server[prop];
+ if ((clientProp || "") !== (serverProp || "")) {
+ differences.push(prop);
+ }
+ }
+ return differences;
+ }
+
+ // Returns an object containing
+ // problemData: an instance of the class returned by emptyProblemData(),
+ // clientRecords: Normalized client records
+ // records: Normalized server records,
+ // deletedRecords: Array of ids that were marked as deleted by the server.
+ compareClientWithServer(clientItems, serverItems) {
+ clientItems = clientItems.map(item => this.normalizeClientItem(item));
+ serverItems = serverItems.map(item => this.normalizeServerItem(item));
+ let problems = this.emptyProblemData();
+ let seenServer = new Map();
+ let serverDeleted = new Set();
+ let allRecords = new Map();
+
+ for (let record of serverItems) {
+ let id = record[this.idProp];
+ if (!id) {
+ ++problems.missingIDs;
+ continue;
+ }
+ if (record.deleted) {
+ serverDeleted.add(record);
+ } else {
+ let possibleDupe = seenServer.get(id);
+ if (possibleDupe) {
+ problems.duplicates.push(id);
+ } else {
+ seenServer.set(id, record);
+ allRecords.set(id, { server: record, client: null, });
+ }
+ record.understood = this.clientUnderstands(record);
+ }
+ }
+
+ let recordPairs = [];
+ let seenClient = new Map();
+ for (let record of clientItems) {
+ let id = record[this.idProp];
+ record.shouldSync = this.syncedByClient(record);
+ seenClient.set(id, record);
+ let combined = allRecords.get(id);
+ if (combined) {
+ combined.client = record;
+ } else {
+ allRecords.set(id, { client: record, server: null });
+ }
+ }
+
+ for (let [id, { server, client }] of allRecords) {
+ if (!client && !server) {
+ throw new Error("Impossible: no client or server record for " + id);
+ } else if (server && !client) {
+ if (server.understood) {
+ problems.clientMissing.push(id);
+ }
+ } else if (client && !server) {
+ if (client.shouldSync) {
+ problems.serverMissing.push(id);
+ }
+ } else {
+ if (!client.shouldSync) {
+ if (!problems.serverUnexpected.includes(id)) {
+ problems.serverUnexpected.push(id);
+ }
+ continue;
+ }
+ let differences = this.getDifferences(client, server);
+ if (differences && differences.length) {
+ problems.differences.push({ id, differences });
+ }
+ }
+ }
+ return {
+ problemData: problems,
+ clientRecords: clientItems,
+ records: serverItems,
+ deletedRecords: [...serverDeleted]
+ };
+ }
+}
+
+// Default to 0, some engines may override.
+CollectionValidator.prototype.version = 0;
diff --git a/services/sync/modules/constants.js b/services/sync/modules/constants.js
new file mode 100644
index 000000000..f70bbd61c
--- /dev/null
+++ b/services/sync/modules/constants.js
@@ -0,0 +1,198 @@
+#filter substitution
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// Process each item in the "constants hash" to add to "global" and give a name
+this.EXPORTED_SYMBOLS = [];
+for (let [key, val] of Object.entries({
+
+WEAVE_VERSION: "@weave_version@",
+
+// Sync Server API version that the client supports.
+SYNC_API_VERSION: "1.1",
+USER_API_VERSION: "1.0",
+MISC_API_VERSION: "1.0",
+
+// Version of the data format this client supports. The data format describes
+// how records are packaged; this is separate from the Server API version and
+// the per-engine cleartext formats.
+STORAGE_VERSION: 5,
+PREFS_BRANCH: "services.sync.",
+
+// Host "key" to access Weave Identity in the password manager
+PWDMGR_HOST: "chrome://weave",
+PWDMGR_PASSWORD_REALM: "Mozilla Services Password",
+PWDMGR_PASSPHRASE_REALM: "Mozilla Services Encryption Passphrase",
+PWDMGR_KEYBUNDLE_REALM: "Mozilla Services Key Bundles",
+
+// Put in [] because those aren't allowed in a collection name.
+DEFAULT_KEYBUNDLE_NAME: "[default]",
+
+// Our extra input to SHA256-HMAC in generateEntry.
+// This includes the full crypto spec; change this when our algo changes.
+HMAC_INPUT: "Sync-AES_256_CBC-HMAC256",
+
+// Key dimensions.
+SYNC_KEY_ENCODED_LENGTH: 26,
+SYNC_KEY_DECODED_LENGTH: 16,
+SYNC_KEY_HYPHENATED_LENGTH: 31, // 26 chars, 5 hyphens.
+
+NO_SYNC_NODE_INTERVAL: 10 * 60 * 1000, // 10 minutes
+
+MAX_ERROR_COUNT_BEFORE_BACKOFF: 3,
+MAX_IGNORE_ERROR_COUNT: 5,
+
+// Backoff intervals
+MINIMUM_BACKOFF_INTERVAL: 15 * 60 * 1000, // 15 minutes
+MAXIMUM_BACKOFF_INTERVAL: 8 * 60 * 60 * 1000, // 8 hours
+
+// HMAC event handling timeout.
+// 10 minutes: a compromise between the multi-desktop sync interval
+// and the mobile sync interval.
+HMAC_EVENT_INTERVAL: 600000,
+
+// How long to wait between sync attempts if the Master Password is locked.
+MASTER_PASSWORD_LOCKED_RETRY_INTERVAL: 15 * 60 * 1000, // 15 minutes
+
+// The default for how long we "block" sync from running when doing a migration.
+DEFAULT_BLOCK_PERIOD: 2 * 24 * 60 * 60 * 1000, // 2 days
+
+// Separate from the ID fetch batch size to allow tuning for mobile.
+MOBILE_BATCH_SIZE: 50,
+
+// 50 is hardcoded here because of URL length restrictions.
+// (GUIDs can be up to 64 chars long.)
+// Individual engines can set different values for their limit if their
+// identifiers are shorter.
+DEFAULT_GUID_FETCH_BATCH_SIZE: 50,
+DEFAULT_MOBILE_GUID_FETCH_BATCH_SIZE: 50,
+
+// Default batch size for applying incoming records.
+DEFAULT_STORE_BATCH_SIZE: 1,
+HISTORY_STORE_BATCH_SIZE: 50, // same as MOBILE_BATCH_SIZE
+FORMS_STORE_BATCH_SIZE: 50, // same as MOBILE_BATCH_SIZE
+PASSWORDS_STORE_BATCH_SIZE: 50, // same as MOBILE_BATCH_SIZE
+ADDONS_STORE_BATCH_SIZE: 1000000, // process all addons at once
+APPS_STORE_BATCH_SIZE: 50, // same as MOBILE_BATCH_SIZE
+
+// Default batch size for download batching
+// (how many records are fetched at a time from the server when batching is used).
+DEFAULT_DOWNLOAD_BATCH_SIZE: 1000,
+
+// score thresholds for early syncs
+SINGLE_USER_THRESHOLD: 1000,
+MULTI_DEVICE_THRESHOLD: 300,
+
+// Other score increment constants
+SCORE_INCREMENT_SMALL: 1,
+SCORE_INCREMENT_MEDIUM: 10,
+
+// Instant sync score increment
+SCORE_INCREMENT_XLARGE: 300 + 1, //MULTI_DEVICE_THRESHOLD + 1
+
+// Delay before incrementing global score
+SCORE_UPDATE_DELAY: 100,
+
+// Delay for the back observer debouncer. This is chosen to be longer than any
+// observed spurious idle/back events and short enough to pre-empt user activity.
+IDLE_OBSERVER_BACK_DELAY: 100,
+
+// Max number of records or bytes to upload in a single POST - we'll do multiple POSTS if either
+// MAX_UPLOAD_RECORDS or MAX_UPLOAD_BYTES is hit)
+MAX_UPLOAD_RECORDS: 100,
+MAX_UPLOAD_BYTES: 1024 * 1023, // just under 1MB
+MAX_HISTORY_UPLOAD: 5000,
+MAX_HISTORY_DOWNLOAD: 5000,
+
+// TTL of the message sent to another device when sending a tab
+NOTIFY_TAB_SENT_TTL_SECS: 1 * 3600, // 1 hour
+
+// Top-level statuses:
+STATUS_OK: "success.status_ok",
+SYNC_FAILED: "error.sync.failed",
+LOGIN_FAILED: "error.login.failed",
+SYNC_FAILED_PARTIAL: "error.sync.failed_partial",
+CLIENT_NOT_CONFIGURED: "service.client_not_configured",
+STATUS_DISABLED: "service.disabled",
+MASTER_PASSWORD_LOCKED: "service.master_password_locked",
+
+// success states
+LOGIN_SUCCEEDED: "success.login",
+SYNC_SUCCEEDED: "success.sync",
+ENGINE_SUCCEEDED: "success.engine",
+
+// login failure status codes:
+LOGIN_FAILED_NO_USERNAME: "error.login.reason.no_username",
+LOGIN_FAILED_NO_PASSWORD: "error.login.reason.no_password2",
+LOGIN_FAILED_NO_PASSPHRASE: "error.login.reason.no_recoverykey",
+LOGIN_FAILED_NETWORK_ERROR: "error.login.reason.network",
+LOGIN_FAILED_SERVER_ERROR: "error.login.reason.server",
+LOGIN_FAILED_INVALID_PASSPHRASE: "error.login.reason.recoverykey",
+LOGIN_FAILED_LOGIN_REJECTED: "error.login.reason.account",
+
+// sync failure status codes
+METARECORD_DOWNLOAD_FAIL: "error.sync.reason.metarecord_download_fail",
+VERSION_OUT_OF_DATE: "error.sync.reason.version_out_of_date",
+DESKTOP_VERSION_OUT_OF_DATE: "error.sync.reason.desktop_version_out_of_date",
+SETUP_FAILED_NO_PASSPHRASE: "error.sync.reason.setup_failed_no_passphrase",
+CREDENTIALS_CHANGED: "error.sync.reason.credentials_changed",
+ABORT_SYNC_COMMAND: "aborting sync, process commands said so",
+NO_SYNC_NODE_FOUND: "error.sync.reason.no_node_found",
+OVER_QUOTA: "error.sync.reason.over_quota",
+PROLONGED_SYNC_FAILURE: "error.sync.prolonged_failure",
+SERVER_MAINTENANCE: "error.sync.reason.serverMaintenance",
+
+RESPONSE_OVER_QUOTA: "14",
+
+// engine failure status codes
+ENGINE_UPLOAD_FAIL: "error.engine.reason.record_upload_fail",
+ENGINE_DOWNLOAD_FAIL: "error.engine.reason.record_download_fail",
+ENGINE_UNKNOWN_FAIL: "error.engine.reason.unknown_fail",
+ENGINE_APPLY_FAIL: "error.engine.reason.apply_fail",
+ENGINE_METARECORD_DOWNLOAD_FAIL: "error.engine.reason.metarecord_download_fail",
+ENGINE_METARECORD_UPLOAD_FAIL: "error.engine.reason.metarecord_upload_fail",
+// an upload failure where the batch was interrupted with a 412
+ENGINE_BATCH_INTERRUPTED: "error.engine.reason.batch_interrupted",
+
+JPAKE_ERROR_CHANNEL: "jpake.error.channel",
+JPAKE_ERROR_NETWORK: "jpake.error.network",
+JPAKE_ERROR_SERVER: "jpake.error.server",
+JPAKE_ERROR_TIMEOUT: "jpake.error.timeout",
+JPAKE_ERROR_INTERNAL: "jpake.error.internal",
+JPAKE_ERROR_INVALID: "jpake.error.invalid",
+JPAKE_ERROR_NODATA: "jpake.error.nodata",
+JPAKE_ERROR_KEYMISMATCH: "jpake.error.keymismatch",
+JPAKE_ERROR_WRONGMESSAGE: "jpake.error.wrongmessage",
+JPAKE_ERROR_USERABORT: "jpake.error.userabort",
+JPAKE_ERROR_DELAYUNSUPPORTED: "jpake.error.delayunsupported",
+
+// info types for Service.getStorageInfo
+INFO_COLLECTIONS: "collections",
+INFO_COLLECTION_USAGE: "collection_usage",
+INFO_COLLECTION_COUNTS: "collection_counts",
+INFO_QUOTA: "quota",
+
+// Ways that a sync can be disabled (messages only to be printed in debug log)
+kSyncMasterPasswordLocked: "User elected to leave Master Password locked",
+kSyncWeaveDisabled: "Weave is disabled",
+kSyncNetworkOffline: "Network is offline",
+kSyncBackoffNotMet: "Trying to sync before the server said it's okay",
+kFirstSyncChoiceNotMade: "User has not selected an action for first sync",
+
+// Application IDs
+FIREFOX_ID: "{ec8030f7-c20a-464f-9b0e-13a3a9e97384}",
+FENNEC_ID: "{a23983c0-fd0e-11dc-95ff-0800200c9a66}",
+SEAMONKEY_ID: "{92650c4d-4b8e-4d2a-b7eb-24ecf4f6b63a}",
+TEST_HARNESS_ID: "xuth@mozilla.org",
+
+MIN_PP_LENGTH: 12,
+MIN_PASS_LENGTH: 8,
+
+DEVICE_TYPE_DESKTOP: "desktop",
+DEVICE_TYPE_MOBILE: "mobile",
+
+})) {
+ this[key] = val;
+ this.EXPORTED_SYMBOLS.push(key);
+}
diff --git a/services/sync/modules/engines.js b/services/sync/modules/engines.js
new file mode 100644
index 000000000..1eaa1863a
--- /dev/null
+++ b/services/sync/modules/engines.js
@@ -0,0 +1,1813 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+this.EXPORTED_SYMBOLS = [
+ "EngineManager",
+ "Engine",
+ "SyncEngine",
+ "Tracker",
+ "Store",
+ "Changeset"
+];
+
+var {classes: Cc, interfaces: Ci, results: Cr, utils: Cu} = Components;
+
+Cu.import("resource://services-common/async.js");
+Cu.import("resource://gre/modules/Log.jsm");
+Cu.import("resource://services-common/observers.js");
+Cu.import("resource://services-sync/constants.js");
+Cu.import("resource://services-sync/identity.js");
+Cu.import("resource://services-sync/record.js");
+Cu.import("resource://services-sync/resource.js");
+Cu.import("resource://services-sync/util.js");
+
+XPCOMUtils.defineLazyModuleGetter(this, "fxAccounts",
+ "resource://gre/modules/FxAccounts.jsm");
+
+/*
+ * Trackers are associated with a single engine and deal with
+ * listening for changes to their particular data type.
+ *
+ * There are two things they keep track of:
+ * 1) A score, indicating how urgently the engine wants to sync
+ * 2) A list of IDs for all the changed items that need to be synced
+ * and updating their 'score', indicating how urgently they
+ * want to sync.
+ *
+ */
+this.Tracker = function Tracker(name, engine) {
+ if (!engine) {
+ throw new Error("Tracker must be associated with an Engine instance.");
+ }
+
+ name = name || "Unnamed";
+ this.name = this.file = name.toLowerCase();
+ this.engine = engine;
+
+ this._log = Log.repository.getLogger("Sync.Tracker." + name);
+ let level = Svc.Prefs.get("log.logger.engine." + this.name, "Debug");
+ this._log.level = Log.Level[level];
+
+ this._score = 0;
+ this._ignored = [];
+ this.ignoreAll = false;
+ this.changedIDs = {};
+ this.loadChangedIDs();
+
+ Svc.Obs.add("weave:engine:start-tracking", this);
+ Svc.Obs.add("weave:engine:stop-tracking", this);
+
+ Svc.Prefs.observe("engine." + this.engine.prefName, this);
+};
+
+Tracker.prototype = {
+ /*
+ * Score can be called as often as desired to decide which engines to sync
+ *
+ * Valid values for score:
+ * -1: Do not sync unless the user specifically requests it (almost disabled)
+ * 0: Nothing has changed
+ * 100: Please sync me ASAP!
+ *
+ * Setting it to other values should (but doesn't currently) throw an exception
+ */
+ get score() {
+ return this._score;
+ },
+
+ set score(value) {
+ this._score = value;
+ Observers.notify("weave:engine:score:updated", this.name);
+ },
+
+ // Should be called by service everytime a sync has been done for an engine
+ resetScore: function () {
+ this._score = 0;
+ },
+
+ persistChangedIDs: true,
+
+ /**
+ * Persist changedIDs to disk at a later date.
+ * Optionally pass a callback to be invoked when the write has occurred.
+ */
+ saveChangedIDs: function (cb) {
+ if (!this.persistChangedIDs) {
+ this._log.debug("Not saving changedIDs.");
+ return;
+ }
+ Utils.namedTimer(function () {
+ this._log.debug("Saving changed IDs to " + this.file);
+ Utils.jsonSave("changes/" + this.file, this, this.changedIDs, cb);
+ }, 1000, this, "_lazySave");
+ },
+
+ loadChangedIDs: function (cb) {
+ Utils.jsonLoad("changes/" + this.file, this, function(json) {
+ if (json && (typeof(json) == "object")) {
+ this.changedIDs = json;
+ } else if (json !== null) {
+ this._log.warn("Changed IDs file " + this.file + " contains non-object value.");
+ json = null;
+ }
+ if (cb) {
+ cb.call(this, json);
+ }
+ });
+ },
+
+ // ignore/unignore specific IDs. Useful for ignoring items that are
+ // being processed, or that shouldn't be synced.
+ // But note: not persisted to disk
+
+ ignoreID: function (id) {
+ this.unignoreID(id);
+ this._ignored.push(id);
+ },
+
+ unignoreID: function (id) {
+ let index = this._ignored.indexOf(id);
+ if (index != -1)
+ this._ignored.splice(index, 1);
+ },
+
+ _saveChangedID(id, when) {
+ this._log.trace(`Adding changed ID: ${id}, ${JSON.stringify(when)}`);
+ this.changedIDs[id] = when;
+ this.saveChangedIDs(this.onSavedChangedIDs);
+ },
+
+ addChangedID: function (id, when) {
+ if (!id) {
+ this._log.warn("Attempted to add undefined ID to tracker");
+ return false;
+ }
+
+ if (this.ignoreAll || this._ignored.includes(id)) {
+ return false;
+ }
+
+ // Default to the current time in seconds if no time is provided.
+ if (when == null) {
+ when = this._now();
+ }
+
+ // Add/update the entry if we have a newer time.
+ if ((this.changedIDs[id] || -Infinity) < when) {
+ this._saveChangedID(id, when);
+ }
+
+ return true;
+ },
+
+ removeChangedID: function (id) {
+ if (!id) {
+ this._log.warn("Attempted to remove undefined ID to tracker");
+ return false;
+ }
+ if (this.ignoreAll || this._ignored.includes(id)) {
+ return false;
+ }
+ if (this.changedIDs[id] != null) {
+ this._log.trace("Removing changed ID " + id);
+ delete this.changedIDs[id];
+ this.saveChangedIDs();
+ }
+ return true;
+ },
+
+ clearChangedIDs: function () {
+ this._log.trace("Clearing changed ID list");
+ this.changedIDs = {};
+ this.saveChangedIDs();
+ },
+
+ _now() {
+ return Date.now() / 1000;
+ },
+
+ _isTracking: false,
+
+ // Override these in your subclasses.
+ startTracking: function () {
+ },
+
+ stopTracking: function () {
+ },
+
+ engineIsEnabled: function () {
+ if (!this.engine) {
+ // Can't tell -- we must be running in a test!
+ return true;
+ }
+ return this.engine.enabled;
+ },
+
+ onEngineEnabledChanged: function (engineEnabled) {
+ if (engineEnabled == this._isTracking) {
+ return;
+ }
+
+ if (engineEnabled) {
+ this.startTracking();
+ this._isTracking = true;
+ } else {
+ this.stopTracking();
+ this._isTracking = false;
+ this.clearChangedIDs();
+ }
+ },
+
+ observe: function (subject, topic, data) {
+ switch (topic) {
+ case "weave:engine:start-tracking":
+ if (!this.engineIsEnabled()) {
+ return;
+ }
+ this._log.trace("Got start-tracking.");
+ if (!this._isTracking) {
+ this.startTracking();
+ this._isTracking = true;
+ }
+ return;
+ case "weave:engine:stop-tracking":
+ this._log.trace("Got stop-tracking.");
+ if (this._isTracking) {
+ this.stopTracking();
+ this._isTracking = false;
+ }
+ return;
+ case "nsPref:changed":
+ if (data == PREFS_BRANCH + "engine." + this.engine.prefName) {
+ this.onEngineEnabledChanged(this.engine.enabled);
+ }
+ return;
+ }
+ }
+};
+
+
+
+/**
+ * The Store serves as the interface between Sync and stored data.
+ *
+ * The name "store" is slightly a misnomer because it doesn't actually "store"
+ * anything. Instead, it serves as a gateway to something that actually does
+ * the "storing."
+ *
+ * The store is responsible for record management inside an engine. It tells
+ * Sync what items are available for Sync, converts items to and from Sync's
+ * record format, and applies records from Sync into changes on the underlying
+ * store.
+ *
+ * Store implementations require a number of functions to be implemented. These
+ * are all documented below.
+ *
+ * For stores that deal with many records or which have expensive store access
+ * routines, it is highly recommended to implement a custom applyIncomingBatch
+ * and/or applyIncoming function on top of the basic APIs.
+ */
+
+this.Store = function Store(name, engine) {
+ if (!engine) {
+ throw new Error("Store must be associated with an Engine instance.");
+ }
+
+ name = name || "Unnamed";
+ this.name = name.toLowerCase();
+ this.engine = engine;
+
+ this._log = Log.repository.getLogger("Sync.Store." + name);
+ let level = Svc.Prefs.get("log.logger.engine." + this.name, "Debug");
+ this._log.level = Log.Level[level];
+
+ XPCOMUtils.defineLazyGetter(this, "_timer", function() {
+ return Cc["@mozilla.org/timer;1"].createInstance(Ci.nsITimer);
+ });
+}
+Store.prototype = {
+
+ _sleep: function _sleep(delay) {
+ let cb = Async.makeSyncCallback();
+ this._timer.initWithCallback(cb, delay, Ci.nsITimer.TYPE_ONE_SHOT);
+ Async.waitForSyncCallback(cb);
+ },
+
+ /**
+ * Apply multiple incoming records against the store.
+ *
+ * This is called with a set of incoming records to process. The function
+ * should look at each record, reconcile with the current local state, and
+ * make the local changes required to bring its state in alignment with the
+ * record.
+ *
+ * The default implementation simply iterates over all records and calls
+ * applyIncoming(). Store implementations may overwrite this function
+ * if desired.
+ *
+ * @param records Array of records to apply
+ * @return Array of record IDs which did not apply cleanly
+ */
+ applyIncomingBatch: function (records) {
+ let failed = [];
+ for (let record of records) {
+ try {
+ this.applyIncoming(record);
+ } catch (ex) {
+ if (ex.code == Engine.prototype.eEngineAbortApplyIncoming) {
+ // This kind of exception should have a 'cause' attribute, which is an
+ // originating exception.
+ // ex.cause will carry its stack with it when rethrown.
+ throw ex.cause;
+ }
+ if (Async.isShutdownException(ex)) {
+ throw ex;
+ }
+ this._log.warn("Failed to apply incoming record " + record.id, ex);
+ this.engine._noteApplyFailure();
+ failed.push(record.id);
+ }
+ };
+ return failed;
+ },
+
+ /**
+ * Apply a single record against the store.
+ *
+ * This takes a single record and makes the local changes required so the
+ * local state matches what's in the record.
+ *
+ * The default implementation calls one of remove(), create(), or update()
+ * depending on the state obtained from the store itself. Store
+ * implementations may overwrite this function if desired.
+ *
+ * @param record
+ * Record to apply
+ */
+ applyIncoming: function (record) {
+ if (record.deleted)
+ this.remove(record);
+ else if (!this.itemExists(record.id))
+ this.create(record);
+ else
+ this.update(record);
+ },
+
+ // override these in derived objects
+
+ /**
+ * Create an item in the store from a record.
+ *
+ * This is called by the default implementation of applyIncoming(). If using
+ * applyIncomingBatch(), this won't be called unless your store calls it.
+ *
+ * @param record
+ * The store record to create an item from
+ */
+ create: function (record) {
+ throw "override create in a subclass";
+ },
+
+ /**
+ * Remove an item in the store from a record.
+ *
+ * This is called by the default implementation of applyIncoming(). If using
+ * applyIncomingBatch(), this won't be called unless your store calls it.
+ *
+ * @param record
+ * The store record to delete an item from
+ */
+ remove: function (record) {
+ throw "override remove in a subclass";
+ },
+
+ /**
+ * Update an item from a record.
+ *
+ * This is called by the default implementation of applyIncoming(). If using
+ * applyIncomingBatch(), this won't be called unless your store calls it.
+ *
+ * @param record
+ * The record to use to update an item from
+ */
+ update: function (record) {
+ throw "override update in a subclass";
+ },
+
+ /**
+ * Determine whether a record with the specified ID exists.
+ *
+ * Takes a string record ID and returns a booleans saying whether the record
+ * exists.
+ *
+ * @param id
+ * string record ID
+ * @return boolean indicating whether record exists locally
+ */
+ itemExists: function (id) {
+ throw "override itemExists in a subclass";
+ },
+
+ /**
+ * Create a record from the specified ID.
+ *
+ * If the ID is known, the record should be populated with metadata from
+ * the store. If the ID is not known, the record should be created with the
+ * delete field set to true.
+ *
+ * @param id
+ * string record ID
+ * @param collection
+ * Collection to add record to. This is typically passed into the
+ * constructor for the newly-created record.
+ * @return record type for this engine
+ */
+ createRecord: function (id, collection) {
+ throw "override createRecord in a subclass";
+ },
+
+ /**
+ * Change the ID of a record.
+ *
+ * @param oldID
+ * string old/current record ID
+ * @param newID
+ * string new record ID
+ */
+ changeItemID: function (oldID, newID) {
+ throw "override changeItemID in a subclass";
+ },
+
+ /**
+ * Obtain the set of all known record IDs.
+ *
+ * @return Object with ID strings as keys and values of true. The values
+ * are ignored.
+ */
+ getAllIDs: function () {
+ throw "override getAllIDs in a subclass";
+ },
+
+ /**
+ * Wipe all data in the store.
+ *
+ * This function is called during remote wipes or when replacing local data
+ * with remote data.
+ *
+ * This function should delete all local data that the store is managing. It
+ * can be thought of as clearing out all state and restoring the "new
+ * browser" state.
+ */
+ wipe: function () {
+ throw "override wipe in a subclass";
+ }
+};
+
+this.EngineManager = function EngineManager(service) {
+ this.service = service;
+
+ this._engines = {};
+
+ // This will be populated by Service on startup.
+ this._declined = new Set();
+ this._log = Log.repository.getLogger("Sync.EngineManager");
+ this._log.level = Log.Level[Svc.Prefs.get("log.logger.service.engines", "Debug")];
+}
+EngineManager.prototype = {
+ get: function (name) {
+ // Return an array of engines if we have an array of names
+ if (Array.isArray(name)) {
+ let engines = [];
+ name.forEach(function(name) {
+ let engine = this.get(name);
+ if (engine) {
+ engines.push(engine);
+ }
+ }, this);
+ return engines;
+ }
+
+ let engine = this._engines[name];
+ if (!engine) {
+ this._log.debug("Could not get engine: " + name);
+ if (Object.keys) {
+ this._log.debug("Engines are: " + JSON.stringify(Object.keys(this._engines)));
+ }
+ }
+ return engine;
+ },
+
+ getAll: function () {
+ let engines = [];
+ for (let [, engine] of Object.entries(this._engines)) {
+ engines.push(engine);
+ }
+ return engines;
+ },
+
+ /**
+ * N.B., does not pay attention to the declined list.
+ */
+ getEnabled: function () {
+ return this.getAll()
+ .filter((engine) => engine.enabled)
+ .sort((a, b) => a.syncPriority - b.syncPriority);
+ },
+
+ get enabledEngineNames() {
+ return this.getEnabled().map(e => e.name);
+ },
+
+ persistDeclined: function () {
+ Svc.Prefs.set("declinedEngines", [...this._declined].join(","));
+ },
+
+ /**
+ * Returns an array.
+ */
+ getDeclined: function () {
+ return [...this._declined];
+ },
+
+ setDeclined: function (engines) {
+ this._declined = new Set(engines);
+ this.persistDeclined();
+ },
+
+ isDeclined: function (engineName) {
+ return this._declined.has(engineName);
+ },
+
+ /**
+ * Accepts a Set or an array.
+ */
+ decline: function (engines) {
+ for (let e of engines) {
+ this._declined.add(e);
+ }
+ this.persistDeclined();
+ },
+
+ undecline: function (engines) {
+ for (let e of engines) {
+ this._declined.delete(e);
+ }
+ this.persistDeclined();
+ },
+
+ /**
+ * Mark any non-enabled engines as declined.
+ *
+ * This is useful after initial customization during setup.
+ */
+ declineDisabled: function () {
+ for (let e of this.getAll()) {
+ if (!e.enabled) {
+ this._log.debug("Declining disabled engine " + e.name);
+ this._declined.add(e.name);
+ }
+ }
+ this.persistDeclined();
+ },
+
+ /**
+ * Register an Engine to the service. Alternatively, give an array of engine
+ * objects to register.
+ *
+ * @param engineObject
+ * Engine object used to get an instance of the engine
+ * @return The engine object if anything failed
+ */
+ register: function (engineObject) {
+ if (Array.isArray(engineObject)) {
+ return engineObject.map(this.register, this);
+ }
+
+ try {
+ let engine = new engineObject(this.service);
+ let name = engine.name;
+ if (name in this._engines) {
+ this._log.error("Engine '" + name + "' is already registered!");
+ } else {
+ this._engines[name] = engine;
+ }
+ } catch (ex) {
+ let name = engineObject || "";
+ name = name.prototype || "";
+ name = name.name || "";
+
+ this._log.error(`Could not initialize engine ${name}`, ex);
+ return engineObject;
+ }
+ },
+
+ unregister: function (val) {
+ let name = val;
+ if (val instanceof Engine) {
+ name = val.name;
+ }
+ delete this._engines[name];
+ },
+
+ clear: function () {
+ for (let name in this._engines) {
+ delete this._engines[name];
+ }
+ },
+};
+
+this.Engine = function Engine(name, service) {
+ if (!service) {
+ throw new Error("Engine must be associated with a Service instance.");
+ }
+
+ this.Name = name || "Unnamed";
+ this.name = name.toLowerCase();
+ this.service = service;
+
+ this._notify = Utils.notify("weave:engine:");
+ this._log = Log.repository.getLogger("Sync.Engine." + this.Name);
+ let level = Svc.Prefs.get("log.logger.engine." + this.name, "Debug");
+ this._log.level = Log.Level[level];
+
+ this._tracker; // initialize tracker to load previously changed IDs
+ this._log.debug("Engine initialized");
+}
+Engine.prototype = {
+ // _storeObj, and _trackerObj should to be overridden in subclasses
+ _storeObj: Store,
+ _trackerObj: Tracker,
+
+ // Local 'constant'.
+ // Signal to the engine that processing further records is pointless.
+ eEngineAbortApplyIncoming: "error.engine.abort.applyincoming",
+
+ // Should we keep syncing if we find a record that cannot be uploaded (ever)?
+ // If this is false, we'll throw, otherwise, we'll ignore the record and
+ // continue. This currently can only happen due to the record being larger
+ // than the record upload limit.
+ allowSkippedRecord: true,
+
+ get prefName() {
+ return this.name;
+ },
+
+ get enabled() {
+ return Svc.Prefs.get("engine." + this.prefName, false);
+ },
+
+ set enabled(val) {
+ Svc.Prefs.set("engine." + this.prefName, !!val);
+ },
+
+ get score() {
+ return this._tracker.score;
+ },
+
+ get _store() {
+ let store = new this._storeObj(this.Name, this);
+ this.__defineGetter__("_store", () => store);
+ return store;
+ },
+
+ get _tracker() {
+ let tracker = new this._trackerObj(this.Name, this);
+ this.__defineGetter__("_tracker", () => tracker);
+ return tracker;
+ },
+
+ sync: function () {
+ if (!this.enabled) {
+ return;
+ }
+
+ if (!this._sync) {
+ throw "engine does not implement _sync method";
+ }
+
+ this._notify("sync", this.name, this._sync)();
+ },
+
+ /**
+ * Get rid of any local meta-data.
+ */
+ resetClient: function () {
+ if (!this._resetClient) {
+ throw "engine does not implement _resetClient method";
+ }
+
+ this._notify("reset-client", this.name, this._resetClient)();
+ },
+
+ _wipeClient: function () {
+ this.resetClient();
+ this._log.debug("Deleting all local data");
+ this._tracker.ignoreAll = true;
+ this._store.wipe();
+ this._tracker.ignoreAll = false;
+ this._tracker.clearChangedIDs();
+ },
+
+ wipeClient: function () {
+ this._notify("wipe-client", this.name, this._wipeClient)();
+ },
+
+ /**
+ * If one exists, initialize and return a validator for this engine (which
+ * must have a `validate(engine)` method that returns a promise to an object
+ * with a getSummary method). Otherwise return null.
+ */
+ getValidator: function () {
+ return null;
+ }
+};
+
+this.SyncEngine = function SyncEngine(name, service) {
+ Engine.call(this, name || "SyncEngine", service);
+
+ this.loadToFetch();
+ this.loadPreviousFailed();
+}
+
+// Enumeration to define approaches to handling bad records.
+// Attached to the constructor to allow use as a kind of static enumeration.
+SyncEngine.kRecoveryStrategy = {
+ ignore: "ignore",
+ retry: "retry",
+ error: "error"
+};
+
+SyncEngine.prototype = {
+ __proto__: Engine.prototype,
+ _recordObj: CryptoWrapper,
+ version: 1,
+
+ // Which sortindex to use when retrieving records for this engine.
+ _defaultSort: undefined,
+
+ // A relative priority to use when computing an order
+ // for engines to be synced. Higher-priority engines
+ // (lower numbers) are synced first.
+ // It is recommended that a unique value be used for each engine,
+ // in order to guarantee a stable sequence.
+ syncPriority: 0,
+
+ // How many records to pull in a single sync. This is primarily to avoid very
+ // long first syncs against profiles with many history records.
+ downloadLimit: null,
+
+ // How many records to pull at one time when specifying IDs. This is to avoid
+ // URI length limitations.
+ guidFetchBatchSize: DEFAULT_GUID_FETCH_BATCH_SIZE,
+ mobileGUIDFetchBatchSize: DEFAULT_MOBILE_GUID_FETCH_BATCH_SIZE,
+
+ // How many records to process in a single batch.
+ applyIncomingBatchSize: DEFAULT_STORE_BATCH_SIZE,
+
+ get storageURL() {
+ return this.service.storageURL;
+ },
+
+ get engineURL() {
+ return this.storageURL + this.name;
+ },
+
+ get cryptoKeysURL() {
+ return this.storageURL + "crypto/keys";
+ },
+
+ get metaURL() {
+ return this.storageURL + "meta/global";
+ },
+
+ get syncID() {
+ // Generate a random syncID if we don't have one
+ let syncID = Svc.Prefs.get(this.name + ".syncID", "");
+ return syncID == "" ? this.syncID = Utils.makeGUID() : syncID;
+ },
+ set syncID(value) {
+ Svc.Prefs.set(this.name + ".syncID", value);
+ },
+
+ /*
+ * lastSync is a timestamp in server time.
+ */
+ get lastSync() {
+ return parseFloat(Svc.Prefs.get(this.name + ".lastSync", "0"));
+ },
+ set lastSync(value) {
+ // Reset the pref in-case it's a number instead of a string
+ Svc.Prefs.reset(this.name + ".lastSync");
+ // Store the value as a string to keep floating point precision
+ Svc.Prefs.set(this.name + ".lastSync", value.toString());
+ },
+ resetLastSync: function () {
+ this._log.debug("Resetting " + this.name + " last sync time");
+ Svc.Prefs.reset(this.name + ".lastSync");
+ Svc.Prefs.set(this.name + ".lastSync", "0");
+ this.lastSyncLocal = 0;
+ },
+
+ get toFetch() {
+ return this._toFetch;
+ },
+ set toFetch(val) {
+ let cb = (error) => {
+ if (error) {
+ this._log.error("Failed to read JSON records to fetch", error);
+ }
+ }
+ // Coerce the array to a string for more efficient comparison.
+ if (val + "" == this._toFetch) {
+ return;
+ }
+ this._toFetch = val;
+ Utils.namedTimer(function () {
+ Utils.jsonSave("toFetch/" + this.name, this, val, cb);
+ }, 0, this, "_toFetchDelay");
+ },
+
+ loadToFetch: function () {
+ // Initialize to empty if there's no file.
+ this._toFetch = [];
+ Utils.jsonLoad("toFetch/" + this.name, this, function(toFetch) {
+ if (toFetch) {
+ this._toFetch = toFetch;
+ }
+ });
+ },
+
+ get previousFailed() {
+ return this._previousFailed;
+ },
+ set previousFailed(val) {
+ let cb = (error) => {
+ if (error) {
+ this._log.error("Failed to set previousFailed", error);
+ } else {
+ this._log.debug("Successfully wrote previousFailed.");
+ }
+ }
+ // Coerce the array to a string for more efficient comparison.
+ if (val + "" == this._previousFailed) {
+ return;
+ }
+ this._previousFailed = val;
+ Utils.namedTimer(function () {
+ Utils.jsonSave("failed/" + this.name, this, val, cb);
+ }, 0, this, "_previousFailedDelay");
+ },
+
+ loadPreviousFailed: function () {
+ // Initialize to empty if there's no file
+ this._previousFailed = [];
+ Utils.jsonLoad("failed/" + this.name, this, function(previousFailed) {
+ if (previousFailed) {
+ this._previousFailed = previousFailed;
+ }
+ });
+ },
+
+ /*
+ * lastSyncLocal is a timestamp in local time.
+ */
+ get lastSyncLocal() {
+ return parseInt(Svc.Prefs.get(this.name + ".lastSyncLocal", "0"), 10);
+ },
+ set lastSyncLocal(value) {
+ // Store as a string because pref can only store C longs as numbers.
+ Svc.Prefs.set(this.name + ".lastSyncLocal", value.toString());
+ },
+
+ /*
+ * Returns a changeset for this sync. Engine implementations can override this
+ * method to bypass the tracker for certain or all changed items.
+ */
+ getChangedIDs: function () {
+ return this._tracker.changedIDs;
+ },
+
+ // Create a new record using the store and add in crypto fields.
+ _createRecord: function (id) {
+ let record = this._store.createRecord(id, this.name);
+ record.id = id;
+ record.collection = this.name;
+ return record;
+ },
+
+ // Any setup that needs to happen at the beginning of each sync.
+ _syncStartup: function () {
+
+ // Determine if we need to wipe on outdated versions
+ let metaGlobal = this.service.recordManager.get(this.metaURL);
+ let engines = metaGlobal.payload.engines || {};
+ let engineData = engines[this.name] || {};
+
+ let needsWipe = false;
+
+ // Assume missing versions are 0 and wipe the server
+ if ((engineData.version || 0) < this.version) {
+ this._log.debug("Old engine data: " + [engineData.version, this.version]);
+
+ // Prepare to clear the server and upload everything
+ needsWipe = true;
+ this.syncID = "";
+
+ // Set the newer version and newly generated syncID
+ engineData.version = this.version;
+ engineData.syncID = this.syncID;
+
+ // Put the new data back into meta/global and mark for upload
+ engines[this.name] = engineData;
+ metaGlobal.payload.engines = engines;
+ metaGlobal.changed = true;
+ }
+ // Don't sync this engine if the server has newer data
+ else if (engineData.version > this.version) {
+ let error = new String("New data: " + [engineData.version, this.version]);
+ error.failureCode = VERSION_OUT_OF_DATE;
+ throw error;
+ }
+ // Changes to syncID mean we'll need to upload everything
+ else if (engineData.syncID != this.syncID) {
+ this._log.debug("Engine syncIDs: " + [engineData.syncID, this.syncID]);
+ this.syncID = engineData.syncID;
+ this._resetClient();
+ };
+
+ // Delete any existing data and reupload on bad version or missing meta.
+ // No crypto component here...? We could regenerate per-collection keys...
+ if (needsWipe) {
+ this.wipeServer();
+ }
+
+ // Save objects that need to be uploaded in this._modified. We also save
+ // the timestamp of this fetch in this.lastSyncLocal. As we successfully
+ // upload objects we remove them from this._modified. If an error occurs
+ // or any objects fail to upload, they will remain in this._modified. At
+ // the end of a sync, or after an error, we add all objects remaining in
+ // this._modified to the tracker.
+ this.lastSyncLocal = Date.now();
+ if (this.lastSync) {
+ this._modified = this.pullNewChanges();
+ } else {
+ this._log.debug("First sync, uploading all items");
+ this._modified = this.pullAllChanges();
+ }
+ // Clear the tracker now. If the sync fails we'll add the ones we failed
+ // to upload back.
+ this._tracker.clearChangedIDs();
+
+ this._log.info(this._modified.count() +
+ " outgoing items pre-reconciliation");
+
+ // Keep track of what to delete at the end of sync
+ this._delete = {};
+ },
+
+ /**
+ * A tiny abstraction to make it easier to test incoming record
+ * application.
+ */
+ itemSource: function () {
+ return new Collection(this.engineURL, this._recordObj, this.service);
+ },
+
+ /**
+ * Process incoming records.
+ * In the most awful and untestable way possible.
+ * This now accepts something that makes testing vaguely less impossible.
+ */
+ _processIncoming: function (newitems) {
+ this._log.trace("Downloading & applying server changes");
+
+ // Figure out how many total items to fetch this sync; do less on mobile.
+ let batchSize = this.downloadLimit || Infinity;
+ let isMobile = (Svc.Prefs.get("client.type") == "mobile");
+
+ if (!newitems) {
+ newitems = this.itemSource();
+ }
+
+ if (this._defaultSort) {
+ newitems.sort = this._defaultSort;
+ }
+
+ if (isMobile) {
+ batchSize = MOBILE_BATCH_SIZE;
+ }
+ newitems.newer = this.lastSync;
+ newitems.full = true;
+ newitems.limit = batchSize;
+
+ // applied => number of items that should be applied.
+ // failed => number of items that failed in this sync.
+ // newFailed => number of items that failed for the first time in this sync.
+ // reconciled => number of items that were reconciled.
+ let count = {applied: 0, failed: 0, newFailed: 0, reconciled: 0};
+ let handled = [];
+ let applyBatch = [];
+ let failed = [];
+ let failedInPreviousSync = this.previousFailed;
+ let fetchBatch = Utils.arrayUnion(this.toFetch, failedInPreviousSync);
+ // Reset previousFailed for each sync since previously failed items may not fail again.
+ this.previousFailed = [];
+
+ // Used (via exceptions) to allow the record handler/reconciliation/etc.
+ // methods to signal that they would like processing of incoming records to
+ // cease.
+ let aborting = undefined;
+
+ function doApplyBatch() {
+ this._tracker.ignoreAll = true;
+ try {
+ failed = failed.concat(this._store.applyIncomingBatch(applyBatch));
+ } catch (ex) {
+ if (Async.isShutdownException(ex)) {
+ throw ex;
+ }
+ // Catch any error that escapes from applyIncomingBatch. At present
+ // those will all be abort events.
+ this._log.warn("Got exception, aborting processIncoming", ex);
+ aborting = ex;
+ }
+ this._tracker.ignoreAll = false;
+ applyBatch = [];
+ }
+
+ function doApplyBatchAndPersistFailed() {
+ // Apply remaining batch.
+ if (applyBatch.length) {
+ doApplyBatch.call(this);
+ }
+ // Persist failed items so we refetch them.
+ if (failed.length) {
+ this.previousFailed = Utils.arrayUnion(failed, this.previousFailed);
+ count.failed += failed.length;
+ this._log.debug("Records that failed to apply: " + failed);
+ failed = [];
+ }
+ }
+
+ let key = this.service.collectionKeys.keyForCollection(this.name);
+
+ // Not binding this method to 'this' for performance reasons. It gets
+ // called for every incoming record.
+ let self = this;
+
+ newitems.recordHandler = function(item) {
+ if (aborting) {
+ return;
+ }
+
+ // Grab a later last modified if possible
+ if (self.lastModified == null || item.modified > self.lastModified)
+ self.lastModified = item.modified;
+
+ // Track the collection for the WBO.
+ item.collection = self.name;
+
+ // Remember which records were processed
+ handled.push(item.id);
+
+ try {
+ try {
+ item.decrypt(key);
+ } catch (ex) {
+ if (!Utils.isHMACMismatch(ex)) {
+ throw ex;
+ }
+ let strategy = self.handleHMACMismatch(item, true);
+ if (strategy == SyncEngine.kRecoveryStrategy.retry) {
+ // You only get one retry.
+ try {
+ // Try decrypting again, typically because we've got new keys.
+ self._log.info("Trying decrypt again...");
+ key = self.service.collectionKeys.keyForCollection(self.name);
+ item.decrypt(key);
+ strategy = null;
+ } catch (ex) {
+ if (!Utils.isHMACMismatch(ex)) {
+ throw ex;
+ }
+ strategy = self.handleHMACMismatch(item, false);
+ }
+ }
+
+ switch (strategy) {
+ case null:
+ // Retry succeeded! No further handling.
+ break;
+ case SyncEngine.kRecoveryStrategy.retry:
+ self._log.debug("Ignoring second retry suggestion.");
+ // Fall through to error case.
+ case SyncEngine.kRecoveryStrategy.error:
+ self._log.warn("Error decrypting record", ex);
+ self._noteApplyFailure();
+ failed.push(item.id);
+ return;
+ case SyncEngine.kRecoveryStrategy.ignore:
+ self._log.debug("Ignoring record " + item.id +
+ " with bad HMAC: already handled.");
+ return;
+ }
+ }
+ } catch (ex) {
+ if (Async.isShutdownException(ex)) {
+ throw ex;
+ }
+ self._log.warn("Error decrypting record", ex);
+ self._noteApplyFailure();
+ failed.push(item.id);
+ return;
+ }
+
+ let shouldApply;
+ try {
+ shouldApply = self._reconcile(item);
+ } catch (ex) {
+ if (ex.code == Engine.prototype.eEngineAbortApplyIncoming) {
+ self._log.warn("Reconciliation failed: aborting incoming processing.");
+ self._noteApplyFailure();
+ failed.push(item.id);
+ aborting = ex.cause;
+ } else if (!Async.isShutdownException(ex)) {
+ self._log.warn("Failed to reconcile incoming record " + item.id, ex);
+ self._noteApplyFailure();
+ failed.push(item.id);
+ return;
+ } else {
+ throw ex;
+ }
+ }
+
+ if (shouldApply) {
+ count.applied++;
+ applyBatch.push(item);
+ } else {
+ count.reconciled++;
+ self._log.trace("Skipping reconciled incoming item " + item.id);
+ }
+
+ if (applyBatch.length == self.applyIncomingBatchSize) {
+ doApplyBatch.call(self);
+ }
+ self._store._sleep(0);
+ };
+
+ // Only bother getting data from the server if there's new things
+ if (this.lastModified == null || this.lastModified > this.lastSync) {
+ let resp = newitems.getBatched();
+ doApplyBatchAndPersistFailed.call(this);
+ if (!resp.success) {
+ resp.failureCode = ENGINE_DOWNLOAD_FAIL;
+ throw resp;
+ }
+
+ if (aborting) {
+ throw aborting;
+ }
+ }
+
+ // Mobile: check if we got the maximum that we requested; get the rest if so.
+ if (handled.length == newitems.limit) {
+ let guidColl = new Collection(this.engineURL, null, this.service);
+
+ // Sort and limit so that on mobile we only get the last X records.
+ guidColl.limit = this.downloadLimit;
+ guidColl.newer = this.lastSync;
+
+ // index: Orders by the sortindex descending (highest weight first).
+ guidColl.sort = "index";
+
+ let guids = guidColl.get();
+ if (!guids.success)
+ throw guids;
+
+ // Figure out which guids weren't just fetched then remove any guids that
+ // were already waiting and prepend the new ones
+ let extra = Utils.arraySub(guids.obj, handled);
+ if (extra.length > 0) {
+ fetchBatch = Utils.arrayUnion(extra, fetchBatch);
+ this.toFetch = Utils.arrayUnion(extra, this.toFetch);
+ }
+ }
+
+ // Fast-foward the lastSync timestamp since we have stored the
+ // remaining items in toFetch.
+ if (this.lastSync < this.lastModified) {
+ this.lastSync = this.lastModified;
+ }
+
+ // Process any backlog of GUIDs.
+ // At this point we impose an upper limit on the number of items to fetch
+ // in a single request, even for desktop, to avoid hitting URI limits.
+ batchSize = isMobile ? this.mobileGUIDFetchBatchSize :
+ this.guidFetchBatchSize;
+
+ while (fetchBatch.length && !aborting) {
+ // Reuse the original query, but get rid of the restricting params
+ // and batch remaining records.
+ newitems.limit = 0;
+ newitems.newer = 0;
+ newitems.ids = fetchBatch.slice(0, batchSize);
+
+ // Reuse the existing record handler set earlier
+ let resp = newitems.get();
+ if (!resp.success) {
+ resp.failureCode = ENGINE_DOWNLOAD_FAIL;
+ throw resp;
+ }
+
+ // This batch was successfully applied. Not using
+ // doApplyBatchAndPersistFailed() here to avoid writing toFetch twice.
+ fetchBatch = fetchBatch.slice(batchSize);
+ this.toFetch = Utils.arraySub(this.toFetch, newitems.ids);
+ this.previousFailed = Utils.arrayUnion(this.previousFailed, failed);
+ if (failed.length) {
+ count.failed += failed.length;
+ this._log.debug("Records that failed to apply: " + failed);
+ }
+ failed = [];
+
+ if (aborting) {
+ throw aborting;
+ }
+
+ if (this.lastSync < this.lastModified) {
+ this.lastSync = this.lastModified;
+ }
+ }
+
+ // Apply remaining items.
+ doApplyBatchAndPersistFailed.call(this);
+
+ count.newFailed = this.previousFailed.reduce((count, engine) => {
+ if (failedInPreviousSync.indexOf(engine) == -1) {
+ count++;
+ this._noteApplyNewFailure();
+ }
+ return count;
+ }, 0);
+ count.succeeded = Math.max(0, count.applied - count.failed);
+ this._log.info(["Records:",
+ count.applied, "applied,",
+ count.succeeded, "successfully,",
+ count.failed, "failed to apply,",
+ count.newFailed, "newly failed to apply,",
+ count.reconciled, "reconciled."].join(" "));
+ Observers.notify("weave:engine:sync:applied", count, this.name);
+ },
+
+ _noteApplyFailure: function () {
+ // here would be a good place to record telemetry...
+ },
+
+ _noteApplyNewFailure: function () {
+ // here would be a good place to record telemetry...
+ },
+
+ /**
+ * Find a GUID of an item that is a duplicate of the incoming item but happens
+ * to have a different GUID
+ *
+ * @return GUID of the similar item; falsy otherwise
+ */
+ _findDupe: function (item) {
+ // By default, assume there's no dupe items for the engine
+ },
+
+ // Called when the server has a record marked as deleted, but locally we've
+ // changed it more recently than the deletion. If we return false, the
+ // record will be deleted locally. If we return true, we'll reupload the
+ // record to the server -- any extra work that's needed as part of this
+ // process should be done at this point (such as mark the record's parent
+ // for reuploading in the case of bookmarks).
+ _shouldReviveRemotelyDeletedRecord(remoteItem) {
+ return true;
+ },
+
+ _deleteId: function (id) {
+ this._tracker.removeChangedID(id);
+
+ // Remember this id to delete at the end of sync
+ if (this._delete.ids == null)
+ this._delete.ids = [id];
+ else
+ this._delete.ids.push(id);
+ },
+
+ _switchItemToDupe(localDupeGUID, incomingItem) {
+ // The local, duplicate ID is always deleted on the server.
+ this._deleteId(localDupeGUID);
+
+ // We unconditionally change the item's ID in case the engine knows of
+ // an item but doesn't expose it through itemExists. If the API
+ // contract were stronger, this could be changed.
+ this._log.debug("Switching local ID to incoming: " + localDupeGUID + " -> " +
+ incomingItem.id);
+ this._store.changeItemID(localDupeGUID, incomingItem.id);
+ },
+
+ /**
+ * Reconcile incoming record with local state.
+ *
+ * This function essentially determines whether to apply an incoming record.
+ *
+ * @param item
+ * Record from server to be tested for application.
+ * @return boolean
+ * Truthy if incoming record should be applied. False if not.
+ */
+ _reconcile: function (item) {
+ if (this._log.level <= Log.Level.Trace) {
+ this._log.trace("Incoming: " + item);
+ }
+
+ // We start reconciling by collecting a bunch of state. We do this here
+ // because some state may change during the course of this function and we
+ // need to operate on the original values.
+ let existsLocally = this._store.itemExists(item.id);
+ let locallyModified = this._modified.has(item.id);
+
+ // TODO Handle clock drift better. Tracked in bug 721181.
+ let remoteAge = AsyncResource.serverTime - item.modified;
+ let localAge = locallyModified ?
+ (Date.now() / 1000 - this._modified.getModifiedTimestamp(item.id)) : null;
+ let remoteIsNewer = remoteAge < localAge;
+
+ this._log.trace("Reconciling " + item.id + ". exists=" +
+ existsLocally + "; modified=" + locallyModified +
+ "; local age=" + localAge + "; incoming age=" +
+ remoteAge);
+
+ // We handle deletions first so subsequent logic doesn't have to check
+ // deleted flags.
+ if (item.deleted) {
+ // If the item doesn't exist locally, there is nothing for us to do. We
+ // can't check for duplicates because the incoming record has no data
+ // which can be used for duplicate detection.
+ if (!existsLocally) {
+ this._log.trace("Ignoring incoming item because it was deleted and " +
+ "the item does not exist locally.");
+ return false;
+ }
+
+ // We decide whether to process the deletion by comparing the record
+ // ages. If the item is not modified locally, the remote side wins and
+ // the deletion is processed. If it is modified locally, we take the
+ // newer record.
+ if (!locallyModified) {
+ this._log.trace("Applying incoming delete because the local item " +
+ "exists and isn't modified.");
+ return true;
+ }
+ this._log.trace("Incoming record is deleted but we had local changes.");
+
+ if (remoteIsNewer) {
+ this._log.trace("Remote record is newer -- deleting local record.");
+ return true;
+ }
+ // If the local record is newer, we defer to individual engines for
+ // how to handle this. By default, we revive the record.
+ let willRevive = this._shouldReviveRemotelyDeletedRecord(item);
+ this._log.trace("Local record is newer -- reviving? " + willRevive);
+
+ return !willRevive;
+ }
+
+ // At this point the incoming record is not for a deletion and must have
+ // data. If the incoming record does not exist locally, we check for a local
+ // duplicate existing under a different ID. The default implementation of
+ // _findDupe() is empty, so engines have to opt in to this functionality.
+ //
+ // If we find a duplicate, we change the local ID to the incoming ID and we
+ // refresh the metadata collected above. See bug 710448 for the history
+ // of this logic.
+ if (!existsLocally) {
+ let localDupeGUID = this._findDupe(item);
+ if (localDupeGUID) {
+ this._log.trace("Local item " + localDupeGUID + " is a duplicate for " +
+ "incoming item " + item.id);
+
+ // The current API contract does not mandate that the ID returned by
+ // _findDupe() actually exists. Therefore, we have to perform this
+ // check.
+ existsLocally = this._store.itemExists(localDupeGUID);
+
+ // If the local item was modified, we carry its metadata forward so
+ // appropriate reconciling can be performed.
+ if (this._modified.has(localDupeGUID)) {
+ locallyModified = true;
+ localAge = this._tracker._now() - this._modified.getModifiedTimestamp(localDupeGUID);
+ remoteIsNewer = remoteAge < localAge;
+
+ this._modified.swap(localDupeGUID, item.id);
+ } else {
+ locallyModified = false;
+ localAge = null;
+ }
+
+ // Tell the engine to do whatever it needs to switch the items.
+ this._switchItemToDupe(localDupeGUID, item);
+
+ this._log.debug("Local item after duplication: age=" + localAge +
+ "; modified=" + locallyModified + "; exists=" +
+ existsLocally);
+ } else {
+ this._log.trace("No duplicate found for incoming item: " + item.id);
+ }
+ }
+
+ // At this point we've performed duplicate detection. But, nothing here
+ // should depend on duplicate detection as the above should have updated
+ // state seamlessly.
+
+ if (!existsLocally) {
+ // If the item doesn't exist locally and we have no local modifications
+ // to the item (implying that it was not deleted), always apply the remote
+ // item.
+ if (!locallyModified) {
+ this._log.trace("Applying incoming because local item does not exist " +
+ "and was not deleted.");
+ return true;
+ }
+
+ // If the item was modified locally but isn't present, it must have
+ // been deleted. If the incoming record is younger, we restore from
+ // that record.
+ if (remoteIsNewer) {
+ this._log.trace("Applying incoming because local item was deleted " +
+ "before the incoming item was changed.");
+ this._modified.delete(item.id);
+ return true;
+ }
+
+ this._log.trace("Ignoring incoming item because the local item's " +
+ "deletion is newer.");
+ return false;
+ }
+
+ // If the remote and local records are the same, there is nothing to be
+ // done, so we don't do anything. In the ideal world, this logic wouldn't
+ // be here and the engine would take a record and apply it. The reason we
+ // want to defer this logic is because it would avoid a redundant and
+ // possibly expensive dip into the storage layer to query item state.
+ // This should get addressed in the async rewrite, so we ignore it for now.
+ let localRecord = this._createRecord(item.id);
+ let recordsEqual = Utils.deepEquals(item.cleartext,
+ localRecord.cleartext);
+
+ // If the records are the same, we don't need to do anything. This does
+ // potentially throw away a local modification time. But, if the records
+ // are the same, does it matter?
+ if (recordsEqual) {
+ this._log.trace("Ignoring incoming item because the local item is " +
+ "identical.");
+
+ this._modified.delete(item.id);
+ return false;
+ }
+
+ // At this point the records are different.
+
+ // If we have no local modifications, always take the server record.
+ if (!locallyModified) {
+ this._log.trace("Applying incoming record because no local conflicts.");
+ return true;
+ }
+
+ // At this point, records are different and the local record is modified.
+ // We resolve conflicts by record age, where the newest one wins. This does
+ // result in data loss and should be handled by giving the engine an
+ // opportunity to merge the records. Bug 720592 tracks this feature.
+ this._log.warn("DATA LOSS: Both local and remote changes to record: " +
+ item.id);
+ return remoteIsNewer;
+ },
+
+ // Upload outgoing records.
+ _uploadOutgoing: function () {
+ this._log.trace("Uploading local changes to server.");
+
+ let modifiedIDs = this._modified.ids();
+ if (modifiedIDs.length) {
+ this._log.trace("Preparing " + modifiedIDs.length +
+ " outgoing records");
+
+ let counts = { sent: modifiedIDs.length, failed: 0 };
+
+ // collection we'll upload
+ let up = new Collection(this.engineURL, null, this.service);
+
+ let failed = [];
+ let successful = [];
+ let handleResponse = (resp, batchOngoing = false) => {
+ // Note: We don't want to update this.lastSync, or this._modified until
+ // the batch is complete, however we want to remember success/failure
+ // indicators for when that happens.
+ if (!resp.success) {
+ this._log.debug("Uploading records failed: " + resp);
+ resp.failureCode = resp.status == 412 ? ENGINE_BATCH_INTERRUPTED : ENGINE_UPLOAD_FAIL;
+ throw resp;
+ }
+
+ // Update server timestamp from the upload.
+ failed = failed.concat(Object.keys(resp.obj.failed));
+ successful = successful.concat(resp.obj.success);
+
+ if (batchOngoing) {
+ // Nothing to do yet
+ return;
+ }
+ // Advance lastSync since we've finished the batch.
+ let modified = resp.headers["x-weave-timestamp"];
+ if (modified > this.lastSync) {
+ this.lastSync = modified;
+ }
+ if (failed.length && this._log.level <= Log.Level.Debug) {
+ this._log.debug("Records that will be uploaded again because "
+ + "the server couldn't store them: "
+ + failed.join(", "));
+ }
+
+ counts.failed += failed.length;
+
+ for (let id of successful) {
+ this._modified.delete(id);
+ }
+
+ this._onRecordsWritten(successful, failed);
+
+ // clear for next batch
+ failed.length = 0;
+ successful.length = 0;
+ };
+
+ let postQueue = up.newPostQueue(this._log, this.lastSync, handleResponse);
+
+ for (let id of modifiedIDs) {
+ let out;
+ let ok = false;
+ try {
+ out = this._createRecord(id);
+ if (this._log.level <= Log.Level.Trace)
+ this._log.trace("Outgoing: " + out);
+
+ out.encrypt(this.service.collectionKeys.keyForCollection(this.name));
+ ok = true;
+ } catch (ex) {
+ if (Async.isShutdownException(ex)) {
+ throw ex;
+ }
+ this._log.warn("Error creating record", ex);
+ }
+ if (ok) {
+ let { enqueued, error } = postQueue.enqueue(out);
+ if (!enqueued) {
+ ++counts.failed;
+ if (!this.allowSkippedRecord) {
+ throw error;
+ }
+ }
+ }
+ this._store._sleep(0);
+ }
+ postQueue.flush(true);
+ Observers.notify("weave:engine:sync:uploaded", counts, this.name);
+ }
+ },
+
+ _onRecordsWritten(succeeded, failed) {
+ // Implement this method to take specific actions against successfully
+ // uploaded records and failed records.
+ },
+
+ // Any cleanup necessary.
+ // Save the current snapshot so as to calculate changes at next sync
+ _syncFinish: function () {
+ this._log.trace("Finishing up sync");
+ this._tracker.resetScore();
+
+ let doDelete = Utils.bind2(this, function(key, val) {
+ let coll = new Collection(this.engineURL, this._recordObj, this.service);
+ coll[key] = val;
+ coll.delete();
+ });
+
+ for (let [key, val] of Object.entries(this._delete)) {
+ // Remove the key for future uses
+ delete this._delete[key];
+
+ // Send a simple delete for the property
+ if (key != "ids" || val.length <= 100)
+ doDelete(key, val);
+ else {
+ // For many ids, split into chunks of at most 100
+ while (val.length > 0) {
+ doDelete(key, val.slice(0, 100));
+ val = val.slice(100);
+ }
+ }
+ }
+ },
+
+ _syncCleanup: function () {
+ if (!this._modified) {
+ return;
+ }
+
+ // Mark failed WBOs as changed again so they are reuploaded next time.
+ this.trackRemainingChanges();
+ this._modified.clear();
+ },
+
+ _sync: function () {
+ try {
+ this._syncStartup();
+ Observers.notify("weave:engine:sync:status", "process-incoming");
+ this._processIncoming();
+ Observers.notify("weave:engine:sync:status", "upload-outgoing");
+ this._uploadOutgoing();
+ this._syncFinish();
+ } finally {
+ this._syncCleanup();
+ }
+ },
+
+ canDecrypt: function () {
+ // Report failure even if there's nothing to decrypt
+ let canDecrypt = false;
+
+ // Fetch the most recently uploaded record and try to decrypt it
+ let test = new Collection(this.engineURL, this._recordObj, this.service);
+ test.limit = 1;
+ test.sort = "newest";
+ test.full = true;
+
+ let key = this.service.collectionKeys.keyForCollection(this.name);
+ test.recordHandler = function recordHandler(record) {
+ record.decrypt(key);
+ canDecrypt = true;
+ }.bind(this);
+
+ // Any failure fetching/decrypting will just result in false
+ try {
+ this._log.trace("Trying to decrypt a record from the server..");
+ test.get();
+ } catch (ex) {
+ if (Async.isShutdownException(ex)) {
+ throw ex;
+ }
+ this._log.debug("Failed test decrypt", ex);
+ }
+
+ return canDecrypt;
+ },
+
+ _resetClient: function () {
+ this.resetLastSync();
+ this.previousFailed = [];
+ this.toFetch = [];
+ },
+
+ wipeServer: function () {
+ let response = this.service.resource(this.engineURL).delete();
+ if (response.status != 200 && response.status != 404) {
+ throw response;
+ }
+ this._resetClient();
+ },
+
+ removeClientData: function () {
+ // Implement this method in engines that store client specific data
+ // on the server.
+ },
+
+ /*
+ * Decide on (and partially effect) an error-handling strategy.
+ *
+ * Asks the Service to respond to an HMAC error, which might result in keys
+ * being downloaded. That call returns true if an action which might allow a
+ * retry to occur.
+ *
+ * If `mayRetry` is truthy, and the Service suggests a retry,
+ * handleHMACMismatch returns kRecoveryStrategy.retry. Otherwise, it returns
+ * kRecoveryStrategy.error.
+ *
+ * Subclasses of SyncEngine can override this method to allow for different
+ * behavior -- e.g., to delete and ignore erroneous entries.
+ *
+ * All return values will be part of the kRecoveryStrategy enumeration.
+ */
+ handleHMACMismatch: function (item, mayRetry) {
+ // By default we either try again, or bail out noisily.
+ return (this.service.handleHMACEvent() && mayRetry) ?
+ SyncEngine.kRecoveryStrategy.retry :
+ SyncEngine.kRecoveryStrategy.error;
+ },
+
+ /**
+ * Returns a changeset containing all items in the store. The default
+ * implementation returns a changeset with timestamps from long ago, to
+ * ensure we always use the remote version if one exists.
+ *
+ * This function is only called for the first sync. Subsequent syncs call
+ * `pullNewChanges`.
+ *
+ * @return A `Changeset` object.
+ */
+ pullAllChanges() {
+ let changeset = new Changeset();
+ for (let id in this._store.getAllIDs()) {
+ changeset.set(id, 0);
+ }
+ return changeset;
+ },
+
+ /*
+ * Returns a changeset containing entries for all currently tracked items.
+ * The default implementation returns a changeset with timestamps indicating
+ * when the item was added to the tracker.
+ *
+ * @return A `Changeset` object.
+ */
+ pullNewChanges() {
+ return new Changeset(this.getChangedIDs());
+ },
+
+ /**
+ * Adds all remaining changeset entries back to the tracker, typically for
+ * items that failed to upload. This method is called at the end of each sync.
+ *
+ */
+ trackRemainingChanges() {
+ for (let [id, change] of this._modified.entries()) {
+ this._tracker.addChangedID(id, change);
+ }
+ },
+};
+
+/**
+ * A changeset is created for each sync in `Engine::get{Changed, All}IDs`,
+ * and stores opaque change data for tracked IDs. The default implementation
+ * only records timestamps, though engines can extend this to store additional
+ * data for each entry.
+ */
+class Changeset {
+ // Creates a changeset with an initial set of tracked entries.
+ constructor(changes = {}) {
+ this.changes = changes;
+ }
+
+ // Returns the last modified time, in seconds, for an entry in the changeset.
+ // `id` is guaranteed to be in the set.
+ getModifiedTimestamp(id) {
+ return this.changes[id];
+ }
+
+ // Adds a change for a tracked ID to the changeset.
+ set(id, change) {
+ this.changes[id] = change;
+ }
+
+ // Indicates whether an entry is in the changeset.
+ has(id) {
+ return id in this.changes;
+ }
+
+ // Deletes an entry from the changeset. Used to clean up entries for
+ // reconciled and successfully uploaded records.
+ delete(id) {
+ delete this.changes[id];
+ }
+
+ // Swaps two entries in the changeset. Used when reconciling duplicates that
+ // have local changes.
+ swap(oldID, newID) {
+ this.changes[newID] = this.changes[oldID];
+ delete this.changes[oldID];
+ }
+
+ // Returns an array of all tracked IDs in this changeset.
+ ids() {
+ return Object.keys(this.changes);
+ }
+
+ // Returns an array of `[id, change]` tuples. Used to repopulate the tracker
+ // with entries for failed uploads at the end of a sync.
+ entries() {
+ return Object.entries(this.changes);
+ }
+
+ // Returns the number of entries in this changeset.
+ count() {
+ return this.ids().length;
+ }
+
+ // Clears the changeset.
+ clear() {
+ this.changes = {};
+ }
+}
diff --git a/services/sync/modules/engines/addons.js b/services/sync/modules/engines/addons.js
new file mode 100644
index 000000000..01dab58d1
--- /dev/null
+++ b/services/sync/modules/engines/addons.js
@@ -0,0 +1,813 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * This file defines the add-on sync functionality.
+ *
+ * There are currently a number of known limitations:
+ * - We only sync XPI extensions and themes available from addons.mozilla.org.
+ * We hope to expand support for other add-ons eventually.
+ * - We only attempt syncing of add-ons between applications of the same type.
+ * This means add-ons will not synchronize between Firefox desktop and
+ * Firefox mobile, for example. This is because of significant add-on
+ * incompatibility between application types.
+ *
+ * Add-on records exist for each known {add-on, app-id} pair in the Sync client
+ * set. Each record has a randomly chosen GUID. The records then contain
+ * basic metadata about the add-on.
+ *
+ * We currently synchronize:
+ *
+ * - Installations
+ * - Uninstallations
+ * - User enabling and disabling
+ *
+ * Synchronization is influenced by the following preferences:
+ *
+ * - services.sync.addons.ignoreUserEnabledChanges
+ * - services.sync.addons.trustedSourceHostnames
+ *
+ * and also influenced by whether addons have repository caching enabled and
+ * whether they allow installation of addons from insecure options (both of
+ * which are themselves influenced by the "extensions." pref branch)
+ *
+ * See the documentation in services-sync.js for the behavior of these prefs.
+ */
+"use strict";
+
+var {classes: Cc, interfaces: Ci, utils: Cu} = Components;
+
+Cu.import("resource://services-sync/addonutils.js");
+Cu.import("resource://services-sync/addonsreconciler.js");
+Cu.import("resource://services-sync/engines.js");
+Cu.import("resource://services-sync/record.js");
+Cu.import("resource://services-sync/util.js");
+Cu.import("resource://services-sync/constants.js");
+Cu.import("resource://services-sync/collection_validator.js");
+Cu.import("resource://services-common/async.js");
+
+Cu.import("resource://gre/modules/Preferences.jsm");
+Cu.import("resource://gre/modules/XPCOMUtils.jsm");
+XPCOMUtils.defineLazyModuleGetter(this, "AddonManager",
+ "resource://gre/modules/AddonManager.jsm");
+XPCOMUtils.defineLazyModuleGetter(this, "AddonRepository",
+ "resource://gre/modules/addons/AddonRepository.jsm");
+
+this.EXPORTED_SYMBOLS = ["AddonsEngine", "AddonValidator"];
+
+// 7 days in milliseconds.
+const PRUNE_ADDON_CHANGES_THRESHOLD = 60 * 60 * 24 * 7 * 1000;
+
+/**
+ * AddonRecord represents the state of an add-on in an application.
+ *
+ * Each add-on has its own record for each application ID it is installed
+ * on.
+ *
+ * The ID of add-on records is a randomly-generated GUID. It is random instead
+ * of deterministic so the URIs of the records cannot be guessed and so
+ * compromised server credentials won't result in disclosure of the specific
+ * add-ons present in a Sync account.
+ *
+ * The record contains the following fields:
+ *
+ * addonID
+ * ID of the add-on. This correlates to the "id" property on an Addon type.
+ *
+ * applicationID
+ * The application ID this record is associated with.
+ *
+ * enabled
+ * Boolean stating whether add-on is enabled or disabled by the user.
+ *
+ * source
+ * String indicating where an add-on is from. Currently, we only support
+ * the value "amo" which indicates that the add-on came from the official
+ * add-ons repository, addons.mozilla.org. In the future, we may support
+ * installing add-ons from other sources. This provides a future-compatible
+ * mechanism for clients to only apply records they know how to handle.
+ */
+function AddonRecord(collection, id) {
+ CryptoWrapper.call(this, collection, id);
+}
+AddonRecord.prototype = {
+ __proto__: CryptoWrapper.prototype,
+ _logName: "Record.Addon"
+};
+
+Utils.deferGetSet(AddonRecord, "cleartext", ["addonID",
+ "applicationID",
+ "enabled",
+ "source"]);
+
+/**
+ * The AddonsEngine handles synchronization of add-ons between clients.
+ *
+ * The engine maintains an instance of an AddonsReconciler, which is the entity
+ * maintaining state for add-ons. It provides the history and tracking APIs
+ * that AddonManager doesn't.
+ *
+ * The engine instance overrides a handful of functions on the base class. The
+ * rationale for each is documented by that function.
+ */
+this.AddonsEngine = function AddonsEngine(service) {
+ SyncEngine.call(this, "Addons", service);
+
+ this._reconciler = new AddonsReconciler();
+}
+AddonsEngine.prototype = {
+ __proto__: SyncEngine.prototype,
+ _storeObj: AddonsStore,
+ _trackerObj: AddonsTracker,
+ _recordObj: AddonRecord,
+ version: 1,
+
+ syncPriority: 5,
+
+ _reconciler: null,
+
+ /**
+ * Override parent method to find add-ons by their public ID, not Sync GUID.
+ */
+ _findDupe: function _findDupe(item) {
+ let id = item.addonID;
+
+ // The reconciler should have been updated at the top of the sync, so we
+ // can assume it is up to date when this function is called.
+ let addons = this._reconciler.addons;
+ if (!(id in addons)) {
+ return null;
+ }
+
+ let addon = addons[id];
+ if (addon.guid != item.id) {
+ return addon.guid;
+ }
+
+ return null;
+ },
+
+ /**
+ * Override getChangedIDs to pull in tracker changes plus changes from the
+ * reconciler log.
+ */
+ getChangedIDs: function getChangedIDs() {
+ let changes = {};
+ for (let [id, modified] of Object.entries(this._tracker.changedIDs)) {
+ changes[id] = modified;
+ }
+
+ let lastSyncDate = new Date(this.lastSync * 1000);
+
+ // The reconciler should have been refreshed at the beginning of a sync and
+ // we assume this function is only called from within a sync.
+ let reconcilerChanges = this._reconciler.getChangesSinceDate(lastSyncDate);
+ let addons = this._reconciler.addons;
+ for (let change of reconcilerChanges) {
+ let changeTime = change[0];
+ let id = change[2];
+
+ if (!(id in addons)) {
+ continue;
+ }
+
+ // Keep newest modified time.
+ if (id in changes && changeTime < changes[id]) {
+ continue;
+ }
+
+ if (!this.isAddonSyncable(addons[id])) {
+ continue;
+ }
+
+ this._log.debug("Adding changed add-on from changes log: " + id);
+ let addon = addons[id];
+ changes[addon.guid] = changeTime.getTime() / 1000;
+ }
+
+ return changes;
+ },
+
+ /**
+ * Override start of sync function to refresh reconciler.
+ *
+ * Many functions in this class assume the reconciler is refreshed at the
+ * top of a sync. If this ever changes, those functions should be revisited.
+ *
+ * Technically speaking, we don't need to refresh the reconciler on every
+ * sync since it is installed as an AddonManager listener. However, add-ons
+ * are complicated and we force a full refresh, just in case the listeners
+ * missed something.
+ */
+ _syncStartup: function _syncStartup() {
+ // We refresh state before calling parent because syncStartup in the parent
+ // looks for changed IDs, which is dependent on add-on state being up to
+ // date.
+ this._refreshReconcilerState();
+
+ SyncEngine.prototype._syncStartup.call(this);
+ },
+
+ /**
+ * Override end of sync to perform a little housekeeping on the reconciler.
+ *
+ * We prune changes to prevent the reconciler state from growing without
+ * bound. Even if it grows unbounded, there would have to be many add-on
+ * changes (thousands) for it to slow things down significantly. This is
+ * highly unlikely to occur. Still, we exercise defense just in case.
+ */
+ _syncCleanup: function _syncCleanup() {
+ let ms = 1000 * this.lastSync - PRUNE_ADDON_CHANGES_THRESHOLD;
+ this._reconciler.pruneChangesBeforeDate(new Date(ms));
+
+ SyncEngine.prototype._syncCleanup.call(this);
+ },
+
+ /**
+ * Helper function to ensure reconciler is up to date.
+ *
+ * This will synchronously load the reconciler's state from the file
+ * system (if needed) and refresh the state of the reconciler.
+ */
+ _refreshReconcilerState: function _refreshReconcilerState() {
+ this._log.debug("Refreshing reconciler state");
+ let cb = Async.makeSpinningCallback();
+ this._reconciler.refreshGlobalState(cb);
+ cb.wait();
+ },
+
+ isAddonSyncable(addon, ignoreRepoCheck) {
+ return this._store.isAddonSyncable(addon, ignoreRepoCheck);
+ }
+};
+
+/**
+ * This is the primary interface between Sync and the Addons Manager.
+ *
+ * In addition to the core store APIs, we provide convenience functions to wrap
+ * Add-on Manager APIs with Sync-specific semantics.
+ */
+function AddonsStore(name, engine) {
+ Store.call(this, name, engine);
+}
+AddonsStore.prototype = {
+ __proto__: Store.prototype,
+
+ // Define the add-on types (.type) that we support.
+ _syncableTypes: ["extension", "theme"],
+
+ _extensionsPrefs: new Preferences("extensions."),
+
+ get reconciler() {
+ return this.engine._reconciler;
+ },
+
+ /**
+ * Override applyIncoming to filter out records we can't handle.
+ */
+ applyIncoming: function applyIncoming(record) {
+ // The fields we look at aren't present when the record is deleted.
+ if (!record.deleted) {
+ // Ignore records not belonging to our application ID because that is the
+ // current policy.
+ if (record.applicationID != Services.appinfo.ID) {
+ this._log.info("Ignoring incoming record from other App ID: " +
+ record.id);
+ return;
+ }
+
+ // Ignore records that aren't from the official add-on repository, as that
+ // is our current policy.
+ if (record.source != "amo") {
+ this._log.info("Ignoring unknown add-on source (" + record.source + ")" +
+ " for " + record.id);
+ return;
+ }
+ }
+
+ // Ignore incoming records for which an existing non-syncable addon
+ // exists.
+ let existingMeta = this.reconciler.addons[record.addonID];
+ if (existingMeta && !this.isAddonSyncable(existingMeta)) {
+ this._log.info("Ignoring incoming record for an existing but non-syncable addon", record.addonID);
+ return;
+ }
+
+ Store.prototype.applyIncoming.call(this, record);
+ },
+
+
+ /**
+ * Provides core Store API to create/install an add-on from a record.
+ */
+ create: function create(record) {
+ let cb = Async.makeSpinningCallback();
+ AddonUtils.installAddons([{
+ id: record.addonID,
+ syncGUID: record.id,
+ enabled: record.enabled,
+ requireSecureURI: this._extensionsPrefs.get("install.requireSecureOrigin", true),
+ }], cb);
+
+ // This will throw if there was an error. This will get caught by the sync
+ // engine and the record will try to be applied later.
+ let results = cb.wait();
+
+ if (results.skipped.includes(record.addonID)) {
+ this._log.info("Add-on skipped: " + record.addonID);
+ // Just early-return for skipped addons - we don't want to arrange to
+ // try again next time because the condition that caused up to skip
+ // will remain true for this addon forever.
+ return;
+ }
+
+ let addon;
+ for (let a of results.addons) {
+ if (a.id == record.addonID) {
+ addon = a;
+ break;
+ }
+ }
+
+ // This should never happen, but is present as a fail-safe.
+ if (!addon) {
+ throw new Error("Add-on not found after install: " + record.addonID);
+ }
+
+ this._log.info("Add-on installed: " + record.addonID);
+ },
+
+ /**
+ * Provides core Store API to remove/uninstall an add-on from a record.
+ */
+ remove: function remove(record) {
+ // If this is called, the payload is empty, so we have to find by GUID.
+ let addon = this.getAddonByGUID(record.id);
+ if (!addon) {
+ // We don't throw because if the add-on could not be found then we assume
+ // it has already been uninstalled and there is nothing for this function
+ // to do.
+ return;
+ }
+
+ this._log.info("Uninstalling add-on: " + addon.id);
+ let cb = Async.makeSpinningCallback();
+ AddonUtils.uninstallAddon(addon, cb);
+ cb.wait();
+ },
+
+ /**
+ * Provides core Store API to update an add-on from a record.
+ */
+ update: function update(record) {
+ let addon = this.getAddonByID(record.addonID);
+
+ // update() is called if !this.itemExists. And, since itemExists consults
+ // the reconciler only, we need to take care of some corner cases.
+ //
+ // First, the reconciler could know about an add-on that was uninstalled
+ // and no longer present in the add-ons manager.
+ if (!addon) {
+ this.create(record);
+ return;
+ }
+
+ // It's also possible that the add-on is non-restartless and has pending
+ // install/uninstall activity.
+ //
+ // We wouldn't get here if the incoming record was for a deletion. So,
+ // check for pending uninstall and cancel if necessary.
+ if (addon.pendingOperations & AddonManager.PENDING_UNINSTALL) {
+ addon.cancelUninstall();
+
+ // We continue with processing because there could be state or ID change.
+ }
+
+ let cb = Async.makeSpinningCallback();
+ this.updateUserDisabled(addon, !record.enabled, cb);
+ cb.wait();
+ },
+
+ /**
+ * Provide core Store API to determine if a record exists.
+ */
+ itemExists: function itemExists(guid) {
+ let addon = this.reconciler.getAddonStateFromSyncGUID(guid);
+
+ return !!addon;
+ },
+
+ /**
+ * Create an add-on record from its GUID.
+ *
+ * @param guid
+ * Add-on GUID (from extensions DB)
+ * @param collection
+ * Collection to add record to.
+ *
+ * @return AddonRecord instance
+ */
+ createRecord: function createRecord(guid, collection) {
+ let record = new AddonRecord(collection, guid);
+ record.applicationID = Services.appinfo.ID;
+
+ let addon = this.reconciler.getAddonStateFromSyncGUID(guid);
+
+ // If we don't know about this GUID or if it has been uninstalled, we mark
+ // the record as deleted.
+ if (!addon || !addon.installed) {
+ record.deleted = true;
+ return record;
+ }
+
+ record.modified = addon.modified.getTime() / 1000;
+
+ record.addonID = addon.id;
+ record.enabled = addon.enabled;
+
+ // This needs to be dynamic when add-ons don't come from AddonRepository.
+ record.source = "amo";
+
+ return record;
+ },
+
+ /**
+ * Changes the id of an add-on.
+ *
+ * This implements a core API of the store.
+ */
+ changeItemID: function changeItemID(oldID, newID) {
+ // We always update the GUID in the reconciler because it will be
+ // referenced later in the sync process.
+ let state = this.reconciler.getAddonStateFromSyncGUID(oldID);
+ if (state) {
+ state.guid = newID;
+ let cb = Async.makeSpinningCallback();
+ this.reconciler.saveState(null, cb);
+ cb.wait();
+ }
+
+ let addon = this.getAddonByGUID(oldID);
+ if (!addon) {
+ this._log.debug("Cannot change item ID (" + oldID + ") in Add-on " +
+ "Manager because old add-on not present: " + oldID);
+ return;
+ }
+
+ addon.syncGUID = newID;
+ },
+
+ /**
+ * Obtain the set of all syncable add-on Sync GUIDs.
+ *
+ * This implements a core Store API.
+ */
+ getAllIDs: function getAllIDs() {
+ let ids = {};
+
+ let addons = this.reconciler.addons;
+ for (let id in addons) {
+ let addon = addons[id];
+ if (this.isAddonSyncable(addon)) {
+ ids[addon.guid] = true;
+ }
+ }
+
+ return ids;
+ },
+
+ /**
+ * Wipe engine data.
+ *
+ * This uninstalls all syncable addons from the application. In case of
+ * error, it logs the error and keeps trying with other add-ons.
+ */
+ wipe: function wipe() {
+ this._log.info("Processing wipe.");
+
+ this.engine._refreshReconcilerState();
+
+ // We only wipe syncable add-ons. Wipe is a Sync feature not a security
+ // feature.
+ for (let guid in this.getAllIDs()) {
+ let addon = this.getAddonByGUID(guid);
+ if (!addon) {
+ this._log.debug("Ignoring add-on because it couldn't be obtained: " +
+ guid);
+ continue;
+ }
+
+ this._log.info("Uninstalling add-on as part of wipe: " + addon.id);
+ Utils.catch.call(this, () => addon.uninstall())();
+ }
+ },
+
+ /***************************************************************************
+ * Functions below are unique to this store and not part of the Store API *
+ ***************************************************************************/
+
+ /**
+ * Synchronously obtain an add-on from its public ID.
+ *
+ * @param id
+ * Add-on ID
+ * @return Addon or undefined if not found
+ */
+ getAddonByID: function getAddonByID(id) {
+ let cb = Async.makeSyncCallback();
+ AddonManager.getAddonByID(id, cb);
+ return Async.waitForSyncCallback(cb);
+ },
+
+ /**
+ * Synchronously obtain an add-on from its Sync GUID.
+ *
+ * @param guid
+ * Add-on Sync GUID
+ * @return DBAddonInternal or null
+ */
+ getAddonByGUID: function getAddonByGUID(guid) {
+ let cb = Async.makeSyncCallback();
+ AddonManager.getAddonBySyncGUID(guid, cb);
+ return Async.waitForSyncCallback(cb);
+ },
+
+ /**
+ * Determines whether an add-on is suitable for Sync.
+ *
+ * @param addon
+ * Addon instance
+ * @param ignoreRepoCheck
+ * Should we skip checking the Addons repository (primarially useful
+ * for testing and validation).
+ * @return Boolean indicating whether it is appropriate for Sync
+ */
+ isAddonSyncable: function isAddonSyncable(addon, ignoreRepoCheck = false) {
+ // Currently, we limit syncable add-ons to those that are:
+ // 1) In a well-defined set of types
+ // 2) Installed in the current profile
+ // 3) Not installed by a foreign entity (i.e. installed by the app)
+ // since they act like global extensions.
+ // 4) Is not a hotfix.
+ // 5) The addons XPIProvider doesn't veto it (i.e not being installed in
+ // the profile directory, or any other reasons it says the addon can't
+ // be synced)
+ // 6) Are installed from AMO
+
+ // We could represent the test as a complex boolean expression. We go the
+ // verbose route so the failure reason is logged.
+ if (!addon) {
+ this._log.debug("Null object passed to isAddonSyncable.");
+ return false;
+ }
+
+ if (this._syncableTypes.indexOf(addon.type) == -1) {
+ this._log.debug(addon.id + " not syncable: type not in whitelist: " +
+ addon.type);
+ return false;
+ }
+
+ if (!(addon.scope & AddonManager.SCOPE_PROFILE)) {
+ this._log.debug(addon.id + " not syncable: not installed in profile.");
+ return false;
+ }
+
+ // If the addon manager says it's not syncable, we skip it.
+ if (!addon.isSyncable) {
+ this._log.debug(addon.id + " not syncable: vetoed by the addon manager.");
+ return false;
+ }
+
+ // This may be too aggressive. If an add-on is downloaded from AMO and
+ // manually placed in the profile directory, foreignInstall will be set.
+ // Arguably, that add-on should be syncable.
+ // TODO Address the edge case and come up with more robust heuristics.
+ if (addon.foreignInstall) {
+ this._log.debug(addon.id + " not syncable: is foreign install.");
+ return false;
+ }
+
+ // Ignore hotfix extensions (bug 741670). The pref may not be defined.
+ // XXX - note that addon.isSyncable will be false for hotfix addons, so
+ // this check isn't strictly necessary - except for Sync tests which aren't
+ // setup to create a "real" hotfix addon. This can be removed once those
+ // tests are fixed (but keeping it doesn't hurt either)
+ if (this._extensionsPrefs.get("hotfix.id", null) == addon.id) {
+ this._log.debug(addon.id + " not syncable: is a hotfix.");
+ return false;
+ }
+
+ // If the AddonRepository's cache isn't enabled (which it typically isn't
+ // in tests), getCachedAddonByID always returns null - so skip the check
+ // in that case. We also provide a way to specifically opt-out of the check
+ // even if the cache is enabled, which is used by the validators.
+ if (ignoreRepoCheck || !AddonRepository.cacheEnabled) {
+ return true;
+ }
+
+ let cb = Async.makeSyncCallback();
+ AddonRepository.getCachedAddonByID(addon.id, cb);
+ let result = Async.waitForSyncCallback(cb);
+
+ if (!result) {
+ this._log.debug(addon.id + " not syncable: add-on not found in add-on " +
+ "repository.");
+ return false;
+ }
+
+ return this.isSourceURITrusted(result.sourceURI);
+ },
+
+ /**
+ * Determine whether an add-on's sourceURI field is trusted and the add-on
+ * can be installed.
+ *
+ * This function should only ever be called from isAddonSyncable(). It is
+ * exposed as a separate function to make testing easier.
+ *
+ * @param uri
+ * nsIURI instance to validate
+ * @return bool
+ */
+ isSourceURITrusted: function isSourceURITrusted(uri) {
+ // For security reasons, we currently limit synced add-ons to those
+ // installed from trusted hostname(s). We additionally require TLS with
+ // the add-ons site to help prevent forgeries.
+ let trustedHostnames = Svc.Prefs.get("addons.trustedSourceHostnames", "")
+ .split(",");
+
+ if (!uri) {
+ this._log.debug("Undefined argument to isSourceURITrusted().");
+ return false;
+ }
+
+ // Scheme is validated before the hostname because uri.host may not be
+ // populated for certain schemes. It appears to always be populated for
+ // https, so we avoid the potential NS_ERROR_FAILURE on field access.
+ if (uri.scheme != "https") {
+ this._log.debug("Source URI not HTTPS: " + uri.spec);
+ return false;
+ }
+
+ if (trustedHostnames.indexOf(uri.host) == -1) {
+ this._log.debug("Source hostname not trusted: " + uri.host);
+ return false;
+ }
+
+ return true;
+ },
+
+ /**
+ * Update the userDisabled flag on an add-on.
+ *
+ * This will enable or disable an add-on and call the supplied callback when
+ * the action is complete. If no action is needed, the callback gets called
+ * immediately.
+ *
+ * @param addon
+ * Addon instance to manipulate.
+ * @param value
+ * Boolean to which to set userDisabled on the passed Addon.
+ * @param callback
+ * Function to be called when action is complete. Will receive 2
+ * arguments, a truthy value that signifies error, and the Addon
+ * instance passed to this function.
+ */
+ updateUserDisabled: function updateUserDisabled(addon, value, callback) {
+ if (addon.userDisabled == value) {
+ callback(null, addon);
+ return;
+ }
+
+ // A pref allows changes to the enabled flag to be ignored.
+ if (Svc.Prefs.get("addons.ignoreUserEnabledChanges", false)) {
+ this._log.info("Ignoring enabled state change due to preference: " +
+ addon.id);
+ callback(null, addon);
+ return;
+ }
+
+ AddonUtils.updateUserDisabled(addon, value, callback);
+ },
+};
+
+/**
+ * The add-ons tracker keeps track of real-time changes to add-ons.
+ *
+ * It hooks up to the reconciler and receives notifications directly from it.
+ */
+function AddonsTracker(name, engine) {
+ Tracker.call(this, name, engine);
+}
+AddonsTracker.prototype = {
+ __proto__: Tracker.prototype,
+
+ get reconciler() {
+ return this.engine._reconciler;
+ },
+
+ get store() {
+ return this.engine._store;
+ },
+
+ /**
+ * This callback is executed whenever the AddonsReconciler sends out a change
+ * notification. See AddonsReconciler.addChangeListener().
+ */
+ changeListener: function changeHandler(date, change, addon) {
+ this._log.debug("changeListener invoked: " + change + " " + addon.id);
+ // Ignore changes that occur during sync.
+ if (this.ignoreAll) {
+ return;
+ }
+
+ if (!this.store.isAddonSyncable(addon)) {
+ this._log.debug("Ignoring change because add-on isn't syncable: " +
+ addon.id);
+ return;
+ }
+
+ this.addChangedID(addon.guid, date.getTime() / 1000);
+ this.score += SCORE_INCREMENT_XLARGE;
+ },
+
+ startTracking: function() {
+ if (this.engine.enabled) {
+ this.reconciler.startListening();
+ }
+
+ this.reconciler.addChangeListener(this);
+ },
+
+ stopTracking: function() {
+ this.reconciler.removeChangeListener(this);
+ this.reconciler.stopListening();
+ },
+};
+
+class AddonValidator extends CollectionValidator {
+ constructor(engine = null) {
+ super("addons", "id", [
+ "addonID",
+ "enabled",
+ "applicationID",
+ "source"
+ ]);
+ this.engine = engine;
+ }
+
+ getClientItems() {
+ return Promise.all([
+ new Promise(resolve =>
+ AddonManager.getAllAddons(resolve)),
+ new Promise(resolve =>
+ AddonManager.getAddonsWithOperationsByTypes(["extension", "theme"], resolve)),
+ ]).then(([installed, addonsWithPendingOperation]) => {
+ // Addons pending install won't be in the first list, but addons pending
+ // uninstall/enable/disable will be in both lists.
+ let all = new Map(installed.map(addon => [addon.id, addon]));
+ for (let addon of addonsWithPendingOperation) {
+ all.set(addon.id, addon);
+ }
+ // Convert to an array since Map.prototype.values returns an iterable
+ return [...all.values()];
+ });
+ }
+
+ normalizeClientItem(item) {
+ let enabled = !item.userDisabled;
+ if (item.pendingOperations & AddonManager.PENDING_ENABLE) {
+ enabled = true;
+ } else if (item.pendingOperations & AddonManager.PENDING_DISABLE) {
+ enabled = false;
+ }
+ return {
+ enabled,
+ id: item.syncGUID,
+ addonID: item.id,
+ applicationID: Services.appinfo.ID,
+ source: "amo", // check item.foreignInstall?
+ original: item
+ };
+ }
+
+ normalizeServerItem(item) {
+ let guid = this.engine._findDupe(item);
+ if (guid) {
+ item.id = guid;
+ }
+ return item;
+ }
+
+ clientUnderstands(item) {
+ return item.applicationID === Services.appinfo.ID;
+ }
+
+ syncedByClient(item) {
+ return !item.original.hidden &&
+ !item.original.isSystem &&
+ !(item.original.pendingOperations & AddonManager.PENDING_UNINSTALL) &&
+ this.engine.isAddonSyncable(item.original, true);
+ }
+}
diff --git a/services/sync/modules/engines/bookmarks.js b/services/sync/modules/engines/bookmarks.js
new file mode 100644
index 000000000..76a198a8b
--- /dev/null
+++ b/services/sync/modules/engines/bookmarks.js
@@ -0,0 +1,1378 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+this.EXPORTED_SYMBOLS = ['BookmarksEngine', "PlacesItem", "Bookmark",
+ "BookmarkFolder", "BookmarkQuery",
+ "Livemark", "BookmarkSeparator"];
+
+var Cc = Components.classes;
+var Ci = Components.interfaces;
+var Cu = Components.utils;
+
+Cu.import("resource://gre/modules/PlacesUtils.jsm");
+Cu.import("resource://gre/modules/PlacesSyncUtils.jsm");
+Cu.import("resource://gre/modules/XPCOMUtils.jsm");
+Cu.import("resource://services-common/async.js");
+Cu.import("resource://services-sync/constants.js");
+Cu.import("resource://services-sync/engines.js");
+Cu.import("resource://services-sync/record.js");
+Cu.import("resource://services-sync/util.js");
+Cu.import("resource://gre/modules/Task.jsm");
+Cu.import("resource://gre/modules/PlacesBackups.jsm");
+XPCOMUtils.defineLazyModuleGetter(this, "BookmarkValidator",
+ "resource://services-sync/bookmark_validator.js");
+XPCOMUtils.defineLazyGetter(this, "PlacesBundle", () => {
+ let bundleService = Cc["@mozilla.org/intl/stringbundle;1"]
+ .getService(Ci.nsIStringBundleService);
+ return bundleService.createBundle("chrome://places/locale/places.properties");
+});
+
+const ANNOS_TO_TRACK = [PlacesSyncUtils.bookmarks.DESCRIPTION_ANNO,
+ PlacesSyncUtils.bookmarks.SIDEBAR_ANNO,
+ PlacesUtils.LMANNO_FEEDURI, PlacesUtils.LMANNO_SITEURI];
+
+const SERVICE_NOT_SUPPORTED = "Service not supported on this platform";
+const FOLDER_SORTINDEX = 1000000;
+const {
+ SOURCE_SYNC,
+ SOURCE_IMPORT,
+ SOURCE_IMPORT_REPLACE,
+} = Ci.nsINavBookmarksService;
+
+const SQLITE_MAX_VARIABLE_NUMBER = 999;
+
+const ORGANIZERQUERY_ANNO = "PlacesOrganizer/OrganizerQuery";
+const ALLBOOKMARKS_ANNO = "AllBookmarks";
+const MOBILE_ANNO = "MobileBookmarks";
+
+// The tracker ignores changes made by bookmark import and restore, and
+// changes made by Sync. We don't need to exclude `SOURCE_IMPORT`, but both
+// import and restore fire `bookmarks-restore-*` observer notifications, and
+// the tracker doesn't currently distinguish between the two.
+const IGNORED_SOURCES = [SOURCE_SYNC, SOURCE_IMPORT, SOURCE_IMPORT_REPLACE];
+
+// Returns the constructor for a bookmark record type.
+function getTypeObject(type) {
+ switch (type) {
+ case "bookmark":
+ case "microsummary":
+ return Bookmark;
+ case "query":
+ return BookmarkQuery;
+ case "folder":
+ return BookmarkFolder;
+ case "livemark":
+ return Livemark;
+ case "separator":
+ return BookmarkSeparator;
+ case "item":
+ return PlacesItem;
+ }
+ return null;
+}
+
+this.PlacesItem = function PlacesItem(collection, id, type) {
+ CryptoWrapper.call(this, collection, id);
+ this.type = type || "item";
+}
+PlacesItem.prototype = {
+ decrypt: function PlacesItem_decrypt(keyBundle) {
+ // Do the normal CryptoWrapper decrypt, but change types before returning
+ let clear = CryptoWrapper.prototype.decrypt.call(this, keyBundle);
+
+ // Convert the abstract places item to the actual object type
+ if (!this.deleted)
+ this.__proto__ = this.getTypeObject(this.type).prototype;
+
+ return clear;
+ },
+
+ getTypeObject: function PlacesItem_getTypeObject(type) {
+ let recordObj = getTypeObject(type);
+ if (!recordObj) {
+ throw new Error("Unknown places item object type: " + type);
+ }
+ return recordObj;
+ },
+
+ __proto__: CryptoWrapper.prototype,
+ _logName: "Sync.Record.PlacesItem",
+
+ // Converts the record to a Sync bookmark object that can be passed to
+ // `PlacesSyncUtils.bookmarks.{insert, update}`.
+ toSyncBookmark() {
+ return {
+ kind: this.type,
+ syncId: this.id,
+ parentSyncId: this.parentid,
+ };
+ },
+
+ // Populates the record from a Sync bookmark object returned from
+ // `PlacesSyncUtils.bookmarks.fetch`.
+ fromSyncBookmark(item) {
+ this.parentid = item.parentSyncId;
+ this.parentName = item.parentTitle;
+ },
+};
+
+Utils.deferGetSet(PlacesItem,
+ "cleartext",
+ ["hasDupe", "parentid", "parentName", "type"]);
+
+this.Bookmark = function Bookmark(collection, id, type) {
+ PlacesItem.call(this, collection, id, type || "bookmark");
+}
+Bookmark.prototype = {
+ __proto__: PlacesItem.prototype,
+ _logName: "Sync.Record.Bookmark",
+
+ toSyncBookmark() {
+ let info = PlacesItem.prototype.toSyncBookmark.call(this);
+ info.title = this.title;
+ info.url = this.bmkUri;
+ info.description = this.description;
+ info.loadInSidebar = this.loadInSidebar;
+ info.tags = this.tags;
+ info.keyword = this.keyword;
+ return info;
+ },
+
+ fromSyncBookmark(item) {
+ PlacesItem.prototype.fromSyncBookmark.call(this, item);
+ this.title = item.title;
+ this.bmkUri = item.url.href;
+ this.description = item.description;
+ this.loadInSidebar = item.loadInSidebar;
+ this.tags = item.tags;
+ this.keyword = item.keyword;
+ },
+};
+
+Utils.deferGetSet(Bookmark,
+ "cleartext",
+ ["title", "bmkUri", "description",
+ "loadInSidebar", "tags", "keyword"]);
+
+this.BookmarkQuery = function BookmarkQuery(collection, id) {
+ Bookmark.call(this, collection, id, "query");
+}
+BookmarkQuery.prototype = {
+ __proto__: Bookmark.prototype,
+ _logName: "Sync.Record.BookmarkQuery",
+
+ toSyncBookmark() {
+ let info = Bookmark.prototype.toSyncBookmark.call(this);
+ info.folder = this.folderName;
+ info.query = this.queryId;
+ return info;
+ },
+
+ fromSyncBookmark(item) {
+ Bookmark.prototype.fromSyncBookmark.call(this, item);
+ this.folderName = item.folder;
+ this.queryId = item.query;
+ },
+};
+
+Utils.deferGetSet(BookmarkQuery,
+ "cleartext",
+ ["folderName", "queryId"]);
+
+this.BookmarkFolder = function BookmarkFolder(collection, id, type) {
+ PlacesItem.call(this, collection, id, type || "folder");
+}
+BookmarkFolder.prototype = {
+ __proto__: PlacesItem.prototype,
+ _logName: "Sync.Record.Folder",
+
+ toSyncBookmark() {
+ let info = PlacesItem.prototype.toSyncBookmark.call(this);
+ info.description = this.description;
+ info.title = this.title;
+ return info;
+ },
+
+ fromSyncBookmark(item) {
+ PlacesItem.prototype.fromSyncBookmark.call(this, item);
+ this.title = item.title;
+ this.description = item.description;
+ this.children = item.childSyncIds;
+ },
+};
+
+Utils.deferGetSet(BookmarkFolder, "cleartext", ["description", "title",
+ "children"]);
+
+this.Livemark = function Livemark(collection, id) {
+ BookmarkFolder.call(this, collection, id, "livemark");
+}
+Livemark.prototype = {
+ __proto__: BookmarkFolder.prototype,
+ _logName: "Sync.Record.Livemark",
+
+ toSyncBookmark() {
+ let info = BookmarkFolder.prototype.toSyncBookmark.call(this);
+ info.feed = this.feedUri;
+ info.site = this.siteUri;
+ return info;
+ },
+
+ fromSyncBookmark(item) {
+ BookmarkFolder.prototype.fromSyncBookmark.call(this, item);
+ this.feedUri = item.feed.href;
+ if (item.site) {
+ this.siteUri = item.site.href;
+ }
+ },
+};
+
+Utils.deferGetSet(Livemark, "cleartext", ["siteUri", "feedUri"]);
+
+this.BookmarkSeparator = function BookmarkSeparator(collection, id) {
+ PlacesItem.call(this, collection, id, "separator");
+}
+BookmarkSeparator.prototype = {
+ __proto__: PlacesItem.prototype,
+ _logName: "Sync.Record.Separator",
+
+ fromSyncBookmark(item) {
+ PlacesItem.prototype.fromSyncBookmark.call(this, item);
+ this.pos = item.index;
+ },
+};
+
+Utils.deferGetSet(BookmarkSeparator, "cleartext", "pos");
+
+this.BookmarksEngine = function BookmarksEngine(service) {
+ SyncEngine.call(this, "Bookmarks", service);
+}
+BookmarksEngine.prototype = {
+ __proto__: SyncEngine.prototype,
+ _recordObj: PlacesItem,
+ _storeObj: BookmarksStore,
+ _trackerObj: BookmarksTracker,
+ version: 2,
+ _defaultSort: "index",
+
+ syncPriority: 4,
+ allowSkippedRecord: false,
+
+ // A diagnostic helper to get the string value for a bookmark's URL given
+ // its ID. Always returns a string - on error will return a string in the
+ // form of "<description of error>" as this is purely for, eg, logging.
+ // (This means hitting the DB directly and we don't bother using a cached
+ // statement - we should rarely hit this.)
+ _getStringUrlForId(id) {
+ let url;
+ try {
+ let stmt = this._store._getStmt(`
+ SELECT h.url
+ FROM moz_places h
+ JOIN moz_bookmarks b ON h.id = b.fk
+ WHERE b.id = :id`);
+ stmt.params.id = id;
+ let rows = Async.querySpinningly(stmt, ["url"]);
+ url = rows.length == 0 ? "<not found>" : rows[0].url;
+ } catch (ex) {
+ if (Async.isShutdownException(ex)) {
+ throw ex;
+ }
+ if (ex instanceof Ci.mozIStorageError) {
+ url = `<failed: Storage error: ${ex.message} (${ex.result})>`;
+ } else {
+ url = `<failed: ${ex.toString()}>`;
+ }
+ }
+ return url;
+ },
+
+ _guidMapFailed: false,
+ _buildGUIDMap: function _buildGUIDMap() {
+ let store = this._store;
+ let guidMap = {};
+ let tree = Async.promiseSpinningly(PlacesUtils.promiseBookmarksTree("", {
+ includeItemIds: true
+ }));
+ function* walkBookmarksTree(tree, parent=null) {
+ if (tree) {
+ // Skip root node
+ if (parent) {
+ yield [tree, parent];
+ }
+ if (tree.children) {
+ for (let child of tree.children) {
+ store._sleep(0); // avoid jank while looping.
+ yield* walkBookmarksTree(child, tree);
+ }
+ }
+ }
+ }
+
+ function* walkBookmarksRoots(tree, rootIDs) {
+ for (let id of rootIDs) {
+ let bookmarkRoot = tree.children.find(child => child.id === id);
+ if (bookmarkRoot === null) {
+ continue;
+ }
+ yield* walkBookmarksTree(bookmarkRoot, tree);
+ }
+ }
+
+ let rootsToWalk = getChangeRootIds();
+
+ for (let [node, parent] of walkBookmarksRoots(tree, rootsToWalk)) {
+ let {guid, id, type: placeType} = node;
+ guid = PlacesSyncUtils.bookmarks.guidToSyncId(guid);
+ let key;
+ switch (placeType) {
+ case PlacesUtils.TYPE_X_MOZ_PLACE:
+ // Bookmark
+ let query = null;
+ if (node.annos && node.uri.startsWith("place:")) {
+ query = node.annos.find(({name}) =>
+ name === PlacesSyncUtils.bookmarks.SMART_BOOKMARKS_ANNO);
+ }
+ if (query && query.value) {
+ key = "q" + query.value;
+ } else {
+ key = "b" + node.uri + ":" + (node.title || "");
+ }
+ break;
+ case PlacesUtils.TYPE_X_MOZ_PLACE_CONTAINER:
+ // Folder
+ key = "f" + (node.title || "");
+ break;
+ case PlacesUtils.TYPE_X_MOZ_PLACE_SEPARATOR:
+ // Separator
+ key = "s" + node.index;
+ break;
+ default:
+ this._log.error("Unknown place type: '"+placeType+"'");
+ continue;
+ }
+
+ let parentName = parent.title || "";
+ if (guidMap[parentName] == null)
+ guidMap[parentName] = {};
+
+ // If the entry already exists, remember that there are explicit dupes.
+ let entry = new String(guid);
+ entry.hasDupe = guidMap[parentName][key] != null;
+
+ // Remember this item's GUID for its parent-name/key pair.
+ guidMap[parentName][key] = entry;
+ this._log.trace("Mapped: " + [parentName, key, entry, entry.hasDupe]);
+ }
+
+ return guidMap;
+ },
+
+ // Helper function to get a dupe GUID for an item.
+ _mapDupe: function _mapDupe(item) {
+ // Figure out if we have something to key with.
+ let key;
+ let altKey;
+ switch (item.type) {
+ case "query":
+ // Prior to Bug 610501, records didn't carry their Smart Bookmark
+ // anno, so we won't be able to dupe them correctly. This altKey
+ // hack should get them to dupe correctly.
+ if (item.queryId) {
+ key = "q" + item.queryId;
+ altKey = "b" + item.bmkUri + ":" + (item.title || "");
+ break;
+ }
+ // No queryID? Fall through to the regular bookmark case.
+ case "bookmark":
+ case "microsummary":
+ key = "b" + item.bmkUri + ":" + (item.title || "");
+ break;
+ case "folder":
+ case "livemark":
+ key = "f" + (item.title || "");
+ break;
+ case "separator":
+ key = "s" + item.pos;
+ break;
+ default:
+ return;
+ }
+
+ // Figure out if we have a map to use!
+ // This will throw in some circumstances. That's fine.
+ let guidMap = this._guidMap;
+
+ // Give the GUID if we have the matching pair.
+ let parentName = item.parentName || "";
+ this._log.trace("Finding mapping: " + parentName + ", " + key);
+ let parent = guidMap[parentName];
+
+ if (!parent) {
+ this._log.trace("No parent => no dupe.");
+ return undefined;
+ }
+
+ let dupe = parent[key];
+
+ if (dupe) {
+ this._log.trace("Mapped dupe: " + dupe);
+ return dupe;
+ }
+
+ if (altKey) {
+ dupe = parent[altKey];
+ if (dupe) {
+ this._log.trace("Mapped dupe using altKey " + altKey + ": " + dupe);
+ return dupe;
+ }
+ }
+
+ this._log.trace("No dupe found for key " + key + "/" + altKey + ".");
+ return undefined;
+ },
+
+ _syncStartup: function _syncStart() {
+ SyncEngine.prototype._syncStartup.call(this);
+
+ let cb = Async.makeSpinningCallback();
+ Task.spawn(function* () {
+ // For first-syncs, make a backup for the user to restore
+ if (this.lastSync == 0) {
+ this._log.debug("Bookmarks backup starting.");
+ yield PlacesBackups.create(null, true);
+ this._log.debug("Bookmarks backup done.");
+ }
+ }.bind(this)).then(
+ cb, ex => {
+ // Failure to create a backup is somewhat bad, but probably not bad
+ // enough to prevent syncing of bookmarks - so just log the error and
+ // continue.
+ this._log.warn("Error while backing up bookmarks, but continuing with sync", ex);
+ cb();
+ }
+ );
+
+ cb.wait();
+
+ this.__defineGetter__("_guidMap", function() {
+ // Create a mapping of folder titles and separator positions to GUID.
+ // We do this lazily so that we don't do any work unless we reconcile
+ // incoming items.
+ let guidMap;
+ try {
+ guidMap = this._buildGUIDMap();
+ } catch (ex) {
+ if (Async.isShutdownException(ex)) {
+ throw ex;
+ }
+ this._log.warn("Error while building GUID map, skipping all other incoming items", ex);
+ throw {code: Engine.prototype.eEngineAbortApplyIncoming,
+ cause: ex};
+ }
+ delete this._guidMap;
+ return this._guidMap = guidMap;
+ });
+
+ this._store._childrenToOrder = {};
+ this._store.clearPendingDeletions();
+ },
+
+ _deletePending() {
+ // Delete pending items -- See the comment above BookmarkStore's deletePending
+ let newlyModified = Async.promiseSpinningly(this._store.deletePending());
+ let now = this._tracker._now();
+ this._log.debug("Deleted pending items", newlyModified);
+ for (let modifiedSyncID of newlyModified) {
+ if (!this._modified.has(modifiedSyncID)) {
+ this._modified.set(modifiedSyncID, { timestamp: now, deleted: false });
+ }
+ }
+ },
+
+ // We avoid reviving folders since reviving them properly would require
+ // reviving their children as well. Unfortunately, this is the wrong choice
+ // in the case of a bookmark restore where wipeServer failed -- if the
+ // server has the folder as deleted, we *would* want to reupload this folder.
+ // This is mitigated by the fact that we move any undeleted children to the
+ // grandparent when deleting the parent.
+ _shouldReviveRemotelyDeletedRecord(item) {
+ let kind = Async.promiseSpinningly(
+ PlacesSyncUtils.bookmarks.getKindForSyncId(item.id));
+ if (kind === PlacesSyncUtils.bookmarks.KINDS.FOLDER) {
+ return false;
+ }
+
+ // In addition to preventing the deletion of this record (handled by the caller),
+ // we need to mark the parent of this record for uploading next sync, in order
+ // to ensure its children array is accurate.
+ let modifiedTimestamp = this._modified.getModifiedTimestamp(item.id);
+ if (!modifiedTimestamp) {
+ // We only expect this to be called with items locally modified, so
+ // something strange is going on - play it safe and don't revive it.
+ this._log.error("_shouldReviveRemotelyDeletedRecord called on unmodified item: " + item.id);
+ return false;
+ }
+
+ let localID = this._store.idForGUID(item.id);
+ let localParentID = PlacesUtils.bookmarks.getFolderIdForItem(localID);
+ let localParentSyncID = this._store.GUIDForId(localParentID);
+
+ this._log.trace(`Reviving item "${item.id}" and marking parent ${localParentSyncID} as modified.`);
+
+ if (!this._modified.has(localParentSyncID)) {
+ this._modified.set(localParentSyncID, {
+ timestamp: modifiedTimestamp,
+ deleted: false
+ });
+ }
+ return true
+ },
+
+ _processIncoming: function (newitems) {
+ try {
+ SyncEngine.prototype._processIncoming.call(this, newitems);
+ } finally {
+ try {
+ this._deletePending();
+ } finally {
+ // Reorder children.
+ this._store._orderChildren();
+ delete this._store._childrenToOrder;
+ }
+ }
+ },
+
+ _syncFinish: function _syncFinish() {
+ SyncEngine.prototype._syncFinish.call(this);
+ this._tracker._ensureMobileQuery();
+ },
+
+ _syncCleanup: function _syncCleanup() {
+ SyncEngine.prototype._syncCleanup.call(this);
+ delete this._guidMap;
+ },
+
+ _createRecord: function _createRecord(id) {
+ // Create the record as usual, but mark it as having dupes if necessary.
+ let record = SyncEngine.prototype._createRecord.call(this, id);
+ let entry = this._mapDupe(record);
+ if (entry != null && entry.hasDupe) {
+ record.hasDupe = true;
+ }
+ return record;
+ },
+
+ _findDupe: function _findDupe(item) {
+ this._log.trace("Finding dupe for " + item.id +
+ " (already duped: " + item.hasDupe + ").");
+
+ // Don't bother finding a dupe if the incoming item has duplicates.
+ if (item.hasDupe) {
+ this._log.trace(item.id + " already a dupe: not finding one.");
+ return;
+ }
+ let mapped = this._mapDupe(item);
+ this._log.debug(item.id + " mapped to " + mapped);
+ // We must return a string, not an object, and the entries in the GUIDMap
+ // are created via "new String()" making them an object.
+ return mapped ? mapped.toString() : mapped;
+ },
+
+ pullAllChanges() {
+ return new BookmarksChangeset(this._store.getAllIDs());
+ },
+
+ pullNewChanges() {
+ let modifiedGUIDs = this._getModifiedGUIDs();
+ if (!modifiedGUIDs.length) {
+ return new BookmarksChangeset(this._tracker.changedIDs);
+ }
+
+ // We don't use `PlacesUtils.promiseDBConnection` here because
+ // `getChangedIDs` might be called while we're in a batch, meaning we
+ // won't see any changes until the batch finishes and the transaction
+ // commits.
+ let db = PlacesUtils.history.QueryInterface(Ci.nsPIPlacesDatabase)
+ .DBConnection;
+
+ // Filter out tags, organizer queries, and other descendants that we're
+ // not tracking. We chunk `modifiedGUIDs` because SQLite limits the number
+ // of bound parameters per query.
+ for (let startIndex = 0;
+ startIndex < modifiedGUIDs.length;
+ startIndex += SQLITE_MAX_VARIABLE_NUMBER) {
+
+ let chunkLength = Math.min(SQLITE_MAX_VARIABLE_NUMBER,
+ modifiedGUIDs.length - startIndex);
+
+ let query = `
+ WITH RECURSIVE
+ modifiedGuids(guid) AS (
+ VALUES ${new Array(chunkLength).fill("(?)").join(", ")}
+ ),
+ syncedItems(id) AS (
+ VALUES ${getChangeRootIds().map(id => `(${id})`).join(", ")}
+ UNION ALL
+ SELECT b.id
+ FROM moz_bookmarks b
+ JOIN syncedItems s ON b.parent = s.id
+ )
+ SELECT b.guid
+ FROM modifiedGuids m
+ JOIN moz_bookmarks b ON b.guid = m.guid
+ LEFT JOIN syncedItems s ON b.id = s.id
+ WHERE s.id IS NULL
+ `;
+
+ let statement = db.createAsyncStatement(query);
+ try {
+ for (let i = 0; i < chunkLength; i++) {
+ statement.bindByIndex(i, modifiedGUIDs[startIndex + i]);
+ }
+ let results = Async.querySpinningly(statement, ["guid"]);
+ for (let { guid } of results) {
+ let syncID = PlacesSyncUtils.bookmarks.guidToSyncId(guid);
+ this._tracker.removeChangedID(syncID);
+ }
+ } finally {
+ statement.finalize();
+ }
+ }
+
+ return new BookmarksChangeset(this._tracker.changedIDs);
+ },
+
+ // Returns an array of Places GUIDs for all changed items. Ignores deletions,
+ // which won't exist in the DB and shouldn't be removed from the tracker.
+ _getModifiedGUIDs() {
+ let guids = [];
+ for (let syncID in this._tracker.changedIDs) {
+ if (this._tracker.changedIDs[syncID].deleted === true) {
+ // The `===` check also filters out old persisted timestamps,
+ // which won't have a `deleted` property.
+ continue;
+ }
+ let guid = PlacesSyncUtils.bookmarks.syncIdToGuid(syncID);
+ guids.push(guid);
+ }
+ return guids;
+ },
+
+ // Called when _findDupe returns a dupe item and the engine has decided to
+ // switch the existing item to the new incoming item.
+ _switchItemToDupe(localDupeGUID, incomingItem) {
+ // We unconditionally change the item's ID in case the engine knows of
+ // an item but doesn't expose it through itemExists. If the API
+ // contract were stronger, this could be changed.
+ this._log.debug("Switching local ID to incoming: " + localDupeGUID + " -> " +
+ incomingItem.id);
+ this._store.changeItemID(localDupeGUID, incomingItem.id);
+
+ // And mark the parent as being modified. Given we de-dupe based on the
+ // parent *name* it's possible the item having its GUID changed has a
+ // different parent from the incoming record.
+ // So we need to find the GUID of the local parent.
+ let now = this._tracker._now();
+ let localID = this._store.idForGUID(incomingItem.id);
+ let localParentID = PlacesUtils.bookmarks.getFolderIdForItem(localID);
+ let localParentGUID = this._store.GUIDForId(localParentID);
+ this._modified.set(localParentGUID, { modified: now, deleted: false });
+
+ // And we also add the parent as reflected in the incoming record as the
+ // de-dupe process might have used an existing item in a different folder.
+ // But only if the parent exists, otherwise we will upload a deleted item
+ // when it might actually be valid, just unknown to us. Note that this
+ // scenario will still leave us with inconsistent client and server states;
+ // the incoming record on the server references a parent that isn't the
+ // actual parent locally - see bug 1297955.
+ if (localParentGUID != incomingItem.parentid) {
+ let remoteParentID = this._store.idForGUID(incomingItem.parentid);
+ if (remoteParentID > 0) {
+ // The parent specified in the record does exist, so we are going to
+ // attempt a move when we come to applying the record. Mark the parent
+ // as being modified so we will later upload it with the new child
+ // reference.
+ this._modified.set(incomingItem.parentid, { modified: now, deleted: false });
+ } else {
+ // We aren't going to do a move as we don't have the parent (yet?).
+ // When applying the record we will add our special PARENT_ANNO
+ // annotation, so if it arrives in the future (either this Sync or a
+ // later one) it will be reparented.
+ this._log.debug(`Incoming duplicate item ${incomingItem.id} specifies ` +
+ `non-existing parent ${incomingItem.parentid}`);
+ }
+ }
+
+ // The local, duplicate ID is always deleted on the server - but for
+ // bookmarks it is a logical delete.
+ // Simply adding this (now non-existing) ID to the tracker is enough.
+ this._modified.set(localDupeGUID, { modified: now, deleted: true });
+ },
+ getValidator() {
+ return new BookmarkValidator();
+ }
+};
+
+function BookmarksStore(name, engine) {
+ Store.call(this, name, engine);
+ this._foldersToDelete = new Set();
+ this._atomsToDelete = new Set();
+ // Explicitly nullify our references to our cached services so we don't leak
+ Svc.Obs.add("places-shutdown", function() {
+ for (let query in this._stmts) {
+ let stmt = this._stmts[query];
+ stmt.finalize();
+ }
+ this._stmts = {};
+ }, this);
+}
+BookmarksStore.prototype = {
+ __proto__: Store.prototype,
+
+ itemExists: function BStore_itemExists(id) {
+ return this.idForGUID(id) > 0;
+ },
+
+ applyIncoming: function BStore_applyIncoming(record) {
+ this._log.debug("Applying record " + record.id);
+ let isSpecial = PlacesSyncUtils.bookmarks.ROOTS.includes(record.id);
+
+ if (record.deleted) {
+ if (isSpecial) {
+ this._log.warn("Ignoring deletion for special record " + record.id);
+ return;
+ }
+
+ // Don't bother with pre and post-processing for deletions.
+ Store.prototype.applyIncoming.call(this, record);
+ return;
+ }
+
+ // For special folders we're only interested in child ordering.
+ if (isSpecial && record.children) {
+ this._log.debug("Processing special node: " + record.id);
+ // Reorder children later
+ this._childrenToOrder[record.id] = record.children;
+ return;
+ }
+
+ // Skip malformed records. (Bug 806460.)
+ if (record.type == "query" &&
+ !record.bmkUri) {
+ this._log.warn("Skipping malformed query bookmark: " + record.id);
+ return;
+ }
+
+ // Figure out the local id of the parent GUID if available
+ let parentGUID = record.parentid;
+ if (!parentGUID) {
+ throw "Record " + record.id + " has invalid parentid: " + parentGUID;
+ }
+ this._log.debug("Remote parent is " + parentGUID);
+
+ // Do the normal processing of incoming records
+ Store.prototype.applyIncoming.call(this, record);
+
+ if (record.type == "folder" && record.children) {
+ this._childrenToOrder[record.id] = record.children;
+ }
+ },
+
+ create: function BStore_create(record) {
+ let info = record.toSyncBookmark();
+ // This can throw if we're inserting an invalid or incomplete bookmark.
+ // That's fine; the exception will be caught by `applyIncomingBatch`
+ // without aborting further processing.
+ let item = Async.promiseSpinningly(PlacesSyncUtils.bookmarks.insert(info));
+ if (item) {
+ this._log.debug(`Created ${item.kind} ${item.syncId} under ${
+ item.parentSyncId}`, item);
+ }
+ },
+
+ remove: function BStore_remove(record) {
+ if (PlacesSyncUtils.bookmarks.isRootSyncID(record.id)) {
+ this._log.warn("Refusing to remove special folder " + record.id);
+ return;
+ }
+ let recordKind = Async.promiseSpinningly(
+ PlacesSyncUtils.bookmarks.getKindForSyncId(record.id));
+ let isFolder = recordKind === PlacesSyncUtils.bookmarks.KINDS.FOLDER;
+ this._log.trace(`Buffering removal of item "${record.id}" of type "${recordKind}".`);
+ if (isFolder) {
+ this._foldersToDelete.add(record.id);
+ } else {
+ this._atomsToDelete.add(record.id);
+ }
+ },
+
+ update: function BStore_update(record) {
+ let info = record.toSyncBookmark();
+ let item = Async.promiseSpinningly(PlacesSyncUtils.bookmarks.update(info));
+ if (item) {
+ this._log.debug(`Updated ${item.kind} ${item.syncId} under ${
+ item.parentSyncId}`, item);
+ }
+ },
+
+ _orderChildren: function _orderChildren() {
+ let promises = Object.keys(this._childrenToOrder).map(syncID => {
+ let children = this._childrenToOrder[syncID];
+ return PlacesSyncUtils.bookmarks.order(syncID, children).catch(ex => {
+ this._log.debug(`Could not order children for ${syncID}`, ex);
+ });
+ });
+ Async.promiseSpinningly(Promise.all(promises));
+ },
+
+ // There's some complexity here around pending deletions. Our goals:
+ //
+ // - Don't delete any bookmarks a user has created but not explicitly deleted
+ // (This includes any bookmark that was not a child of the folder at the
+ // time the deletion was recorded, and also bookmarks restored from a backup).
+ // - Don't undelete any bookmark without ensuring the server structure
+ // includes it (see `BookmarkEngine.prototype._shouldReviveRemotelyDeletedRecord`)
+ //
+ // This leads the following approach:
+ //
+ // - Additions, moves, and updates are processed before deletions.
+ // - To do this, all deletion operations are buffered during a sync. Folders
+ // we plan on deleting have their sync id's stored in `this._foldersToDelete`,
+ // and non-folders we plan on deleting have their sync id's stored in
+ // `this._atomsToDelete`.
+ // - The exception to this is the moves that occur to fix the order of bookmark
+ // children, which are performed after we process deletions.
+ // - Non-folders are deleted before folder deletions, so that when we process
+ // folder deletions we know the correct state.
+ // - Remote deletions always win for folders, but do not result in recursive
+ // deletion of children. This is a hack because we're not able to distinguish
+ // between value changes and structural changes to folders, and we don't even
+ // have the old server record to compare to. See `BookmarkEngine`'s
+ // `_shouldReviveRemotelyDeletedRecord` method.
+ // - When a folder is deleted, its remaining children are moved in order to
+ // their closest living ancestor. If this is interrupted (unlikely, but
+ // possible given that we don't perform this operation in a transaction),
+ // we revive the folder.
+ // - Remote deletions can lose for non-folders, but only until we handle
+ // bookmark restores correctly (removing stale state from the server -- this
+ // is to say, if bug 1230011 is fixed, we should never revive bookmarks).
+
+ deletePending: Task.async(function* deletePending() {
+ yield this._deletePendingAtoms();
+ let guidsToUpdate = yield this._deletePendingFolders();
+ this.clearPendingDeletions();
+ return guidsToUpdate;
+ }),
+
+ clearPendingDeletions() {
+ this._foldersToDelete.clear();
+ this._atomsToDelete.clear();
+ },
+
+ _deleteAtom: Task.async(function* _deleteAtom(syncID) {
+ try {
+ let info = yield PlacesSyncUtils.bookmarks.remove(syncID, {
+ preventRemovalOfNonEmptyFolders: true
+ });
+ this._log.trace(`Removed item ${syncID} with type ${info.type}`);
+ } catch (ex) {
+ // Likely already removed.
+ this._log.trace(`Error removing ${syncID}`, ex);
+ }
+ }),
+
+ _deletePendingAtoms() {
+ return Promise.all(
+ [...this._atomsToDelete.values()]
+ .map(syncID => this._deleteAtom(syncID)));
+ },
+
+ // Returns an array of sync ids that need updates.
+ _deletePendingFolders: Task.async(function* _deletePendingFolders() {
+ // To avoid data loss, we don't want to just delete the folder outright,
+ // so we buffer folder deletions and process them at the end (now).
+ //
+ // At this point, any member in the folder that remains is either a folder
+ // pending deletion (which we'll get to in this function), or an item that
+ // should not be deleted. To avoid deleting these items, we first move them
+ // to the parent of the folder we're about to delete.
+ let needUpdate = new Set();
+ for (let syncId of this._foldersToDelete) {
+ let childSyncIds = yield PlacesSyncUtils.bookmarks.fetchChildSyncIds(syncId);
+ if (!childSyncIds.length) {
+ // No children -- just delete the folder.
+ yield this._deleteAtom(syncId)
+ continue;
+ }
+ // We could avoid some redundant work here by finding the nearest
+ // grandparent who isn't present in `this._toDelete`...
+
+ let grandparentSyncId = this.GUIDForId(
+ PlacesUtils.bookmarks.getFolderIdForItem(
+ this.idForGUID(PlacesSyncUtils.bookmarks.syncIdToGuid(syncId))));
+
+ this._log.trace(`Moving ${childSyncIds.length} children of "${syncId}" to ` +
+ `grandparent "${grandparentSyncId}" before deletion.`);
+
+ // Move children out of the parent and into the grandparent
+ yield Promise.all(childSyncIds.map(child => PlacesSyncUtils.bookmarks.update({
+ syncId: child,
+ parentSyncId: grandparentSyncId
+ })));
+
+ // Delete the (now empty) parent
+ try {
+ yield PlacesSyncUtils.bookmarks.remove(syncId, {
+ preventRemovalOfNonEmptyFolders: true
+ });
+ } catch (e) {
+ // We failed, probably because someone added something to this folder
+ // between when we got the children and now (or the database is corrupt,
+ // or something else happened...) This is unlikely, but possible. To
+ // avoid corruption in this case, we need to reupload the record to the
+ // server.
+ //
+ // (Ideally this whole operation would be done in a transaction, and this
+ // wouldn't be possible).
+ needUpdate.add(syncId);
+ }
+
+ // Add children (for parentid) and grandparent (for children list) to set
+ // of records needing an update, *unless* they're marked for deletion.
+ if (!this._foldersToDelete.has(grandparentSyncId)) {
+ needUpdate.add(grandparentSyncId);
+ }
+ for (let childSyncId of childSyncIds) {
+ if (!this._foldersToDelete.has(childSyncId)) {
+ needUpdate.add(childSyncId);
+ }
+ }
+ }
+ return [...needUpdate];
+ }),
+
+ changeItemID: function BStore_changeItemID(oldID, newID) {
+ this._log.debug("Changing GUID " + oldID + " to " + newID);
+
+ Async.promiseSpinningly(PlacesSyncUtils.bookmarks.changeGuid(oldID, newID));
+ },
+
+ // Create a record starting from the weave id (places guid)
+ createRecord: function createRecord(id, collection) {
+ let item = Async.promiseSpinningly(PlacesSyncUtils.bookmarks.fetch(id));
+ if (!item) { // deleted item
+ let record = new PlacesItem(collection, id);
+ record.deleted = true;
+ return record;
+ }
+
+ let recordObj = getTypeObject(item.kind);
+ if (!recordObj) {
+ this._log.warn("Unknown item type, cannot serialize: " + item.kind);
+ recordObj = PlacesItem;
+ }
+ let record = new recordObj(collection, id);
+ record.fromSyncBookmark(item);
+
+ record.sortindex = this._calculateIndex(record);
+
+ return record;
+ },
+
+ _stmts: {},
+ _getStmt: function(query) {
+ if (query in this._stmts) {
+ return this._stmts[query];
+ }
+
+ this._log.trace("Creating SQL statement: " + query);
+ let db = PlacesUtils.history.QueryInterface(Ci.nsPIPlacesDatabase)
+ .DBConnection;
+ return this._stmts[query] = db.createAsyncStatement(query);
+ },
+
+ get _frecencyStm() {
+ return this._getStmt(
+ "SELECT frecency " +
+ "FROM moz_places " +
+ "WHERE url_hash = hash(:url) AND url = :url " +
+ "LIMIT 1");
+ },
+ _frecencyCols: ["frecency"],
+
+ GUIDForId: function GUIDForId(id) {
+ let guid = Async.promiseSpinningly(PlacesUtils.promiseItemGuid(id));
+ return PlacesSyncUtils.bookmarks.guidToSyncId(guid);
+ },
+
+ idForGUID: function idForGUID(guid) {
+ // guid might be a String object rather than a string.
+ guid = PlacesSyncUtils.bookmarks.syncIdToGuid(guid.toString());
+
+ return Async.promiseSpinningly(PlacesUtils.promiseItemId(guid).catch(
+ ex => -1));
+ },
+
+ _calculateIndex: function _calculateIndex(record) {
+ // Ensure folders have a very high sort index so they're not synced last.
+ if (record.type == "folder")
+ return FOLDER_SORTINDEX;
+
+ // For anything directly under the toolbar, give it a boost of more than an
+ // unvisited bookmark
+ let index = 0;
+ if (record.parentid == "toolbar")
+ index += 150;
+
+ // Add in the bookmark's frecency if we have something.
+ if (record.bmkUri != null) {
+ this._frecencyStm.params.url = record.bmkUri;
+ let result = Async.querySpinningly(this._frecencyStm, this._frecencyCols);
+ if (result.length)
+ index += result[0].frecency;
+ }
+
+ return index;
+ },
+
+ getAllIDs: function BStore_getAllIDs() {
+ let items = {};
+
+ let query = `
+ WITH RECURSIVE
+ changeRootContents(id) AS (
+ VALUES ${getChangeRootIds().map(id => `(${id})`).join(", ")}
+ UNION ALL
+ SELECT b.id
+ FROM moz_bookmarks b
+ JOIN changeRootContents c ON b.parent = c.id
+ )
+ SELECT guid
+ FROM changeRootContents
+ JOIN moz_bookmarks USING (id)
+ `;
+
+ let statement = this._getStmt(query);
+ let results = Async.querySpinningly(statement, ["guid"]);
+ for (let { guid } of results) {
+ let syncID = PlacesSyncUtils.bookmarks.guidToSyncId(guid);
+ items[syncID] = { modified: 0, deleted: false };
+ }
+
+ return items;
+ },
+
+ wipe: function BStore_wipe() {
+ this.clearPendingDeletions();
+ Async.promiseSpinningly(Task.spawn(function* () {
+ // Save a backup before clearing out all bookmarks.
+ yield PlacesBackups.create(null, true);
+ yield PlacesUtils.bookmarks.eraseEverything({
+ source: SOURCE_SYNC,
+ });
+ }));
+ }
+};
+
+function BookmarksTracker(name, engine) {
+ this._batchDepth = 0;
+ this._batchSawScoreIncrement = false;
+ Tracker.call(this, name, engine);
+
+ Svc.Obs.add("places-shutdown", this);
+}
+BookmarksTracker.prototype = {
+ __proto__: Tracker.prototype,
+
+ //`_ignore` checks the change source for each observer notification, so we
+ // don't want to let the engine ignore all changes during a sync.
+ get ignoreAll() {
+ return false;
+ },
+
+ // Define an empty setter so that the engine doesn't throw a `TypeError`
+ // setting a read-only property.
+ set ignoreAll(value) {},
+
+ startTracking: function() {
+ PlacesUtils.bookmarks.addObserver(this, true);
+ Svc.Obs.add("bookmarks-restore-begin", this);
+ Svc.Obs.add("bookmarks-restore-success", this);
+ Svc.Obs.add("bookmarks-restore-failed", this);
+ },
+
+ stopTracking: function() {
+ PlacesUtils.bookmarks.removeObserver(this);
+ Svc.Obs.remove("bookmarks-restore-begin", this);
+ Svc.Obs.remove("bookmarks-restore-success", this);
+ Svc.Obs.remove("bookmarks-restore-failed", this);
+ },
+
+ observe: function observe(subject, topic, data) {
+ Tracker.prototype.observe.call(this, subject, topic, data);
+
+ switch (topic) {
+ case "bookmarks-restore-begin":
+ this._log.debug("Ignoring changes from importing bookmarks.");
+ break;
+ case "bookmarks-restore-success":
+ this._log.debug("Tracking all items on successful import.");
+
+ this._log.debug("Restore succeeded: wiping server and other clients.");
+ this.engine.service.resetClient([this.name]);
+ this.engine.service.wipeServer([this.name]);
+ this.engine.service.clientsEngine.sendCommand("wipeEngine", [this.name]);
+ break;
+ case "bookmarks-restore-failed":
+ this._log.debug("Tracking all items on failed import.");
+ break;
+ }
+ },
+
+ QueryInterface: XPCOMUtils.generateQI([
+ Ci.nsINavBookmarkObserver,
+ Ci.nsINavBookmarkObserver_MOZILLA_1_9_1_ADDITIONS,
+ Ci.nsISupportsWeakReference
+ ]),
+
+ addChangedID(id, change) {
+ if (!id) {
+ this._log.warn("Attempted to add undefined ID to tracker");
+ return false;
+ }
+ if (this._ignored.includes(id)) {
+ return false;
+ }
+ let shouldSaveChange = false;
+ let currentChange = this.changedIDs[id];
+ if (currentChange) {
+ if (typeof currentChange == "number") {
+ // Allow raw timestamps for backward-compatibility with persisted
+ // changed IDs. The new format uses tuples to track deleted items.
+ shouldSaveChange = currentChange < change.modified;
+ } else {
+ shouldSaveChange = currentChange.modified < change.modified ||
+ currentChange.deleted != change.deleted;
+ }
+ } else {
+ shouldSaveChange = true;
+ }
+ if (shouldSaveChange) {
+ this._saveChangedID(id, change);
+ }
+ return true;
+ },
+
+ /**
+ * Add a bookmark GUID to be uploaded and bump up the sync score.
+ *
+ * @param itemId
+ * The Places item ID of the bookmark to upload.
+ * @param guid
+ * The Places GUID of the bookmark to upload.
+ * @param isTombstone
+ * Whether we're uploading a tombstone for a removed bookmark.
+ */
+ _add: function BMT__add(itemId, guid, isTombstone = false) {
+ let syncID = PlacesSyncUtils.bookmarks.guidToSyncId(guid);
+ let info = { modified: Date.now() / 1000, deleted: isTombstone };
+ if (this.addChangedID(syncID, info)) {
+ this._upScore();
+ }
+ },
+
+ /* Every add/remove/change will trigger a sync for MULTI_DEVICE (except in
+ a batch operation, where we do it at the end of the batch) */
+ _upScore: function BMT__upScore() {
+ if (this._batchDepth == 0) {
+ this.score += SCORE_INCREMENT_XLARGE;
+ } else {
+ this._batchSawScoreIncrement = true;
+ }
+ },
+
+ onItemAdded: function BMT_onItemAdded(itemId, folder, index,
+ itemType, uri, title, dateAdded,
+ guid, parentGuid, source) {
+ if (IGNORED_SOURCES.includes(source)) {
+ return;
+ }
+
+ this._log.trace("onItemAdded: " + itemId);
+ this._add(itemId, guid);
+ this._add(folder, parentGuid);
+ },
+
+ onItemRemoved: function (itemId, parentId, index, type, uri,
+ guid, parentGuid, source) {
+ if (IGNORED_SOURCES.includes(source)) {
+ return;
+ }
+
+ // Ignore changes to tags (folders under the tags folder).
+ if (parentId == PlacesUtils.tagsFolderId) {
+ return;
+ }
+
+ let grandParentId = -1;
+ try {
+ grandParentId = PlacesUtils.bookmarks.getFolderIdForItem(parentId);
+ } catch (ex) {
+ // `getFolderIdForItem` can throw if the item no longer exists, such as
+ // when we've removed a subtree using `removeFolderChildren`.
+ return;
+ }
+
+ // Ignore tag items (the actual instance of a tag for a bookmark).
+ if (grandParentId == PlacesUtils.tagsFolderId) {
+ return;
+ }
+
+ /**
+ * The above checks are incomplete: we can still write tombstones for
+ * items that we don't track, and upload extraneous roots.
+ *
+ * Consider the left pane root: it's a child of the Places root, and has
+ * children and grandchildren. `PlacesUIUtils` can create, delete, and
+ * recreate it as needed. We can't determine ancestors when the root or its
+ * children are deleted, because they've already been removed from the
+ * database when `onItemRemoved` is called. Likewise, we can't check their
+ * "exclude from backup" annos, because they've *also* been removed.
+ *
+ * So, we end up writing tombstones for the left pane queries and left
+ * pane root. For good measure, we'll also upload the Places root, because
+ * it's the parent of the left pane root.
+ *
+ * As a workaround, we can track the parent GUID and reconstruct the item's
+ * ancestry at sync time. This is complicated, and the previous behavior was
+ * already wrong, so we'll wait for bug 1258127 to fix this generally.
+ */
+ this._log.trace("onItemRemoved: " + itemId);
+ this._add(itemId, guid, /* isTombstone */ true);
+ this._add(parentId, parentGuid);
+ },
+
+ _ensureMobileQuery: function _ensureMobileQuery() {
+ let find = val =>
+ PlacesUtils.annotations.getItemsWithAnnotation(ORGANIZERQUERY_ANNO, {}).filter(
+ id => PlacesUtils.annotations.getItemAnnotation(id, ORGANIZERQUERY_ANNO) == val
+ );
+
+ // Don't continue if the Library isn't ready
+ let all = find(ALLBOOKMARKS_ANNO);
+ if (all.length == 0)
+ return;
+
+ let mobile = find(MOBILE_ANNO);
+ let queryURI = Utils.makeURI("place:folder=" + PlacesUtils.mobileFolderId);
+ let title = PlacesBundle.GetStringFromName("MobileBookmarksFolderTitle");
+
+ // Don't add OR remove the mobile bookmarks if there's nothing.
+ if (PlacesUtils.bookmarks.getIdForItemAt(PlacesUtils.mobileFolderId, 0) == -1) {
+ if (mobile.length != 0)
+ PlacesUtils.bookmarks.removeItem(mobile[0], SOURCE_SYNC);
+ }
+ // Add the mobile bookmarks query if it doesn't exist
+ else if (mobile.length == 0) {
+ let query = PlacesUtils.bookmarks.insertBookmark(all[0], queryURI, -1, title, /* guid */ null, SOURCE_SYNC);
+ PlacesUtils.annotations.setItemAnnotation(query, ORGANIZERQUERY_ANNO, MOBILE_ANNO, 0,
+ PlacesUtils.annotations.EXPIRE_NEVER, SOURCE_SYNC);
+ PlacesUtils.annotations.setItemAnnotation(query, PlacesUtils.EXCLUDE_FROM_BACKUP_ANNO, 1, 0,
+ PlacesUtils.annotations.EXPIRE_NEVER, SOURCE_SYNC);
+ }
+ // Make sure the existing query URL and title are correct
+ else {
+ if (!PlacesUtils.bookmarks.getBookmarkURI(mobile[0]).equals(queryURI)) {
+ PlacesUtils.bookmarks.changeBookmarkURI(mobile[0], queryURI,
+ SOURCE_SYNC);
+ }
+ let queryTitle = PlacesUtils.bookmarks.getItemTitle(mobile[0]);
+ if (queryTitle != title) {
+ PlacesUtils.bookmarks.setItemTitle(mobile[0], title, SOURCE_SYNC);
+ }
+ let rootTitle =
+ PlacesUtils.bookmarks.getItemTitle(PlacesUtils.mobileFolderId);
+ if (rootTitle != title) {
+ PlacesUtils.bookmarks.setItemTitle(PlacesUtils.mobileFolderId, title,
+ SOURCE_SYNC);
+ }
+ }
+ },
+
+ // This method is oddly structured, but the idea is to return as quickly as
+ // possible -- this handler gets called *every time* a bookmark changes, for
+ // *each change*.
+ onItemChanged: function BMT_onItemChanged(itemId, property, isAnno, value,
+ lastModified, itemType, parentId,
+ guid, parentGuid, oldValue,
+ source) {
+ if (IGNORED_SOURCES.includes(source)) {
+ return;
+ }
+
+ if (isAnno && (ANNOS_TO_TRACK.indexOf(property) == -1))
+ // Ignore annotations except for the ones that we sync.
+ return;
+
+ // Ignore favicon changes to avoid unnecessary churn.
+ if (property == "favicon")
+ return;
+
+ this._log.trace("onItemChanged: " + itemId +
+ (", " + property + (isAnno? " (anno)" : "")) +
+ (value ? (" = \"" + value + "\"") : ""));
+ this._add(itemId, guid);
+ },
+
+ onItemMoved: function BMT_onItemMoved(itemId, oldParent, oldIndex,
+ newParent, newIndex, itemType,
+ guid, oldParentGuid, newParentGuid,
+ source) {
+ if (IGNORED_SOURCES.includes(source)) {
+ return;
+ }
+
+ this._log.trace("onItemMoved: " + itemId);
+ this._add(oldParent, oldParentGuid);
+ if (oldParent != newParent) {
+ this._add(itemId, guid);
+ this._add(newParent, newParentGuid);
+ }
+
+ // Remove any position annotations now that the user moved the item
+ PlacesUtils.annotations.removeItemAnnotation(itemId,
+ PlacesSyncUtils.bookmarks.SYNC_PARENT_ANNO, SOURCE_SYNC);
+ },
+
+ onBeginUpdateBatch: function () {
+ ++this._batchDepth;
+ },
+ onEndUpdateBatch: function () {
+ if (--this._batchDepth === 0 && this._batchSawScoreIncrement) {
+ this.score += SCORE_INCREMENT_XLARGE;
+ this._batchSawScoreIncrement = false;
+ }
+ },
+ onItemVisited: function () {}
+};
+
+// Returns an array of root IDs to recursively query for synced bookmarks.
+// Items in other roots, including tags and organizer queries, will be
+// ignored.
+function getChangeRootIds() {
+ return [
+ PlacesUtils.bookmarksMenuFolderId,
+ PlacesUtils.toolbarFolderId,
+ PlacesUtils.unfiledBookmarksFolderId,
+ PlacesUtils.mobileFolderId,
+ ];
+}
+
+class BookmarksChangeset extends Changeset {
+ getModifiedTimestamp(id) {
+ let change = this.changes[id];
+ return change ? change.modified : Number.NaN;
+ }
+}
diff --git a/services/sync/modules/engines/clients.js b/services/sync/modules/engines/clients.js
new file mode 100644
index 000000000..3dd679570
--- /dev/null
+++ b/services/sync/modules/engines/clients.js
@@ -0,0 +1,782 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * How does the clients engine work?
+ *
+ * - We use 2 files - commands.json and commands-syncing.json.
+ *
+ * - At sync upload time, we attempt a rename of commands.json to
+ * commands-syncing.json, and ignore errors (helps for crash during sync!).
+ * - We load commands-syncing.json and stash the contents in
+ * _currentlySyncingCommands which lives for the duration of the upload process.
+ * - We use _currentlySyncingCommands to build the outgoing records
+ * - Immediately after successful upload, we delete commands-syncing.json from
+ * disk (and clear _currentlySyncingCommands). We reconcile our local records
+ * with what we just wrote in the server, and add failed IDs commands
+ * back in commands.json
+ * - Any time we need to "save" a command for future syncs, we load
+ * commands.json, update it, and write it back out.
+ */
+
+this.EXPORTED_SYMBOLS = [
+ "ClientEngine",
+ "ClientsRec"
+];
+
+var {classes: Cc, interfaces: Ci, utils: Cu} = Components;
+
+Cu.import("resource://services-common/async.js");
+Cu.import("resource://services-common/stringbundle.js");
+Cu.import("resource://services-sync/constants.js");
+Cu.import("resource://services-sync/engines.js");
+Cu.import("resource://services-sync/record.js");
+Cu.import("resource://services-sync/resource.js");
+Cu.import("resource://services-sync/util.js");
+Cu.import("resource://gre/modules/Services.jsm");
+
+XPCOMUtils.defineLazyModuleGetter(this, "fxAccounts",
+ "resource://gre/modules/FxAccounts.jsm");
+
+const CLIENTS_TTL = 1814400; // 21 days
+const CLIENTS_TTL_REFRESH = 604800; // 7 days
+const STALE_CLIENT_REMOTE_AGE = 604800; // 7 days
+
+const SUPPORTED_PROTOCOL_VERSIONS = ["1.1", "1.5"];
+
+function hasDupeCommand(commands, action) {
+ if (!commands) {
+ return false;
+ }
+ return commands.some(other => other.command == action.command &&
+ Utils.deepEquals(other.args, action.args));
+}
+
+this.ClientsRec = function ClientsRec(collection, id) {
+ CryptoWrapper.call(this, collection, id);
+}
+ClientsRec.prototype = {
+ __proto__: CryptoWrapper.prototype,
+ _logName: "Sync.Record.Clients",
+ ttl: CLIENTS_TTL
+};
+
+Utils.deferGetSet(ClientsRec,
+ "cleartext",
+ ["name", "type", "commands",
+ "version", "protocols",
+ "formfactor", "os", "appPackage", "application", "device",
+ "fxaDeviceId"]);
+
+
+this.ClientEngine = function ClientEngine(service) {
+ SyncEngine.call(this, "Clients", service);
+
+ // Reset the last sync timestamp on every startup so that we fetch all clients
+ this.resetLastSync();
+}
+ClientEngine.prototype = {
+ __proto__: SyncEngine.prototype,
+ _storeObj: ClientStore,
+ _recordObj: ClientsRec,
+ _trackerObj: ClientsTracker,
+ allowSkippedRecord: false,
+
+ // Always sync client data as it controls other sync behavior
+ get enabled() {
+ return true;
+ },
+
+ get lastRecordUpload() {
+ return Svc.Prefs.get(this.name + ".lastRecordUpload", 0);
+ },
+ set lastRecordUpload(value) {
+ Svc.Prefs.set(this.name + ".lastRecordUpload", Math.floor(value));
+ },
+
+ get remoteClients() {
+ // return all non-stale clients for external consumption.
+ return Object.values(this._store._remoteClients).filter(v => !v.stale);
+ },
+
+ remoteClientExists(id) {
+ let client = this._store._remoteClients[id];
+ return !!(client && !client.stale);
+ },
+
+ // Aggregate some stats on the composition of clients on this account
+ get stats() {
+ let stats = {
+ hasMobile: this.localType == DEVICE_TYPE_MOBILE,
+ names: [this.localName],
+ numClients: 1,
+ };
+
+ for (let id in this._store._remoteClients) {
+ let {name, type, stale} = this._store._remoteClients[id];
+ if (!stale) {
+ stats.hasMobile = stats.hasMobile || type == DEVICE_TYPE_MOBILE;
+ stats.names.push(name);
+ stats.numClients++;
+ }
+ }
+
+ return stats;
+ },
+
+ /**
+ * Obtain information about device types.
+ *
+ * Returns a Map of device types to integer counts.
+ */
+ get deviceTypes() {
+ let counts = new Map();
+
+ counts.set(this.localType, 1);
+
+ for (let id in this._store._remoteClients) {
+ let record = this._store._remoteClients[id];
+ if (record.stale) {
+ continue; // pretend "stale" records don't exist.
+ }
+ let type = record.type;
+ if (!counts.has(type)) {
+ counts.set(type, 0);
+ }
+
+ counts.set(type, counts.get(type) + 1);
+ }
+
+ return counts;
+ },
+
+ get localID() {
+ // Generate a random GUID id we don't have one
+ let localID = Svc.Prefs.get("client.GUID", "");
+ return localID == "" ? this.localID = Utils.makeGUID() : localID;
+ },
+ set localID(value) {
+ Svc.Prefs.set("client.GUID", value);
+ },
+
+ get brandName() {
+ let brand = new StringBundle("chrome://branding/locale/brand.properties");
+ return brand.get("brandShortName");
+ },
+
+ get localName() {
+ let name = Utils.getDeviceName();
+ // If `getDeviceName` returns the default name, set the pref. FxA registers
+ // the device before syncing, so we don't need to update the registration
+ // in this case.
+ Svc.Prefs.set("client.name", name);
+ return name;
+ },
+ set localName(value) {
+ Svc.Prefs.set("client.name", value);
+ // Update the registration in the background.
+ fxAccounts.updateDeviceRegistration().catch(error => {
+ this._log.warn("failed to update fxa device registration", error);
+ });
+ },
+
+ get localType() {
+ return Utils.getDeviceType();
+ },
+ set localType(value) {
+ Svc.Prefs.set("client.type", value);
+ },
+
+ getClientName(id) {
+ if (id == this.localID) {
+ return this.localName;
+ }
+ let client = this._store._remoteClients[id];
+ return client ? client.name : "";
+ },
+
+ getClientFxaDeviceId(id) {
+ if (this._store._remoteClients[id]) {
+ return this._store._remoteClients[id].fxaDeviceId;
+ }
+ return null;
+ },
+
+ isMobile: function isMobile(id) {
+ if (this._store._remoteClients[id])
+ return this._store._remoteClients[id].type == DEVICE_TYPE_MOBILE;
+ return false;
+ },
+
+ _readCommands() {
+ let cb = Async.makeSpinningCallback();
+ Utils.jsonLoad("commands", this, commands => cb(null, commands));
+ return cb.wait() || {};
+ },
+
+ /**
+ * Low level function, do not use directly (use _addClientCommand instead).
+ */
+ _saveCommands(commands) {
+ let cb = Async.makeSpinningCallback();
+ Utils.jsonSave("commands", this, commands, error => {
+ if (error) {
+ this._log.error("Failed to save JSON outgoing commands", error);
+ }
+ cb();
+ });
+ cb.wait();
+ },
+
+ _prepareCommandsForUpload() {
+ let cb = Async.makeSpinningCallback();
+ Utils.jsonMove("commands", "commands-syncing", this).catch(() => {}) // Ignore errors
+ .then(() => {
+ Utils.jsonLoad("commands-syncing", this, commands => cb(null, commands));
+ });
+ return cb.wait() || {};
+ },
+
+ _deleteUploadedCommands() {
+ delete this._currentlySyncingCommands;
+ Async.promiseSpinningly(
+ Utils.jsonRemove("commands-syncing", this).catch(err => {
+ this._log.error("Failed to delete syncing-commands file", err);
+ })
+ );
+ },
+
+ _addClientCommand(clientId, command) {
+ const allCommands = this._readCommands();
+ const clientCommands = allCommands[clientId] || [];
+ if (hasDupeCommand(clientCommands, command)) {
+ return;
+ }
+ allCommands[clientId] = clientCommands.concat(command);
+ this._saveCommands(allCommands);
+ },
+
+ _syncStartup: function _syncStartup() {
+ // Reupload new client record periodically.
+ if (Date.now() / 1000 - this.lastRecordUpload > CLIENTS_TTL_REFRESH) {
+ this._tracker.addChangedID(this.localID);
+ this.lastRecordUpload = Date.now() / 1000;
+ }
+ SyncEngine.prototype._syncStartup.call(this);
+ },
+
+ _processIncoming() {
+ // Fetch all records from the server.
+ this.lastSync = 0;
+ this._incomingClients = {};
+ try {
+ SyncEngine.prototype._processIncoming.call(this);
+ // Since clients are synced unconditionally, any records in the local store
+ // that don't exist on the server must be for disconnected clients. Remove
+ // them, so that we don't upload records with commands for clients that will
+ // never see them. We also do this to filter out stale clients from the
+ // tabs collection, since showing their list of tabs is confusing.
+ for (let id in this._store._remoteClients) {
+ if (!this._incomingClients[id]) {
+ this._log.info(`Removing local state for deleted client ${id}`);
+ this._removeRemoteClient(id);
+ }
+ }
+ // Bug 1264498: Mobile clients don't remove themselves from the clients
+ // collection when the user disconnects Sync, so we mark as stale clients
+ // with the same name that haven't synced in over a week.
+ // (Note we can't simply delete them, or we re-apply them next sync - see
+ // bug 1287687)
+ delete this._incomingClients[this.localID];
+ let names = new Set([this.localName]);
+ for (let id in this._incomingClients) {
+ let record = this._store._remoteClients[id];
+ if (!names.has(record.name)) {
+ names.add(record.name);
+ continue;
+ }
+ let remoteAge = AsyncResource.serverTime - this._incomingClients[id];
+ if (remoteAge > STALE_CLIENT_REMOTE_AGE) {
+ this._log.info(`Hiding stale client ${id} with age ${remoteAge}`);
+ record.stale = true;
+ }
+ }
+ } finally {
+ this._incomingClients = null;
+ }
+ },
+
+ _uploadOutgoing() {
+ this._currentlySyncingCommands = this._prepareCommandsForUpload();
+ const clientWithPendingCommands = Object.keys(this._currentlySyncingCommands);
+ for (let clientId of clientWithPendingCommands) {
+ if (this._store._remoteClients[clientId] || this.localID == clientId) {
+ this._modified.set(clientId, 0);
+ }
+ }
+ SyncEngine.prototype._uploadOutgoing.call(this);
+ },
+
+ _onRecordsWritten(succeeded, failed) {
+ // Reconcile the status of the local records with what we just wrote on the
+ // server
+ for (let id of succeeded) {
+ const commandChanges = this._currentlySyncingCommands[id];
+ if (id == this.localID) {
+ if (this.localCommands) {
+ this.localCommands = this.localCommands.filter(command => !hasDupeCommand(commandChanges, command));
+ }
+ } else {
+ const clientRecord = this._store._remoteClients[id];
+ if (!commandChanges || !clientRecord) {
+ // should be impossible, else we wouldn't have been writing it.
+ this._log.warn("No command/No record changes for a client we uploaded");
+ continue;
+ }
+ // fixup the client record, so our copy of _remoteClients matches what we uploaded.
+ clientRecord.commands = this._store.createRecord(id);
+ // we could do better and pass the reference to the record we just uploaded,
+ // but this will do for now
+ }
+ }
+
+ // Re-add failed commands
+ for (let id of failed) {
+ const commandChanges = this._currentlySyncingCommands[id];
+ if (!commandChanges) {
+ continue;
+ }
+ this._addClientCommand(id, commandChanges);
+ }
+
+ this._deleteUploadedCommands();
+
+ // Notify other devices that their own client collection changed
+ const idsToNotify = succeeded.reduce((acc, id) => {
+ if (id == this.localID) {
+ return acc;
+ }
+ const fxaDeviceId = this.getClientFxaDeviceId(id);
+ return fxaDeviceId ? acc.concat(fxaDeviceId) : acc;
+ }, []);
+ if (idsToNotify.length > 0) {
+ this._notifyCollectionChanged(idsToNotify);
+ }
+ },
+
+ _notifyCollectionChanged(ids) {
+ const message = {
+ version: 1,
+ command: "sync:collection_changed",
+ data: {
+ collections: ["clients"]
+ }
+ };
+ fxAccounts.notifyDevices(ids, message, NOTIFY_TAB_SENT_TTL_SECS);
+ },
+
+ _syncFinish() {
+ // Record histograms for our device types, and also write them to a pref
+ // so non-histogram telemetry (eg, UITelemetry) has easy access to them.
+ for (let [deviceType, count] of this.deviceTypes) {
+ let hid;
+ let prefName = this.name + ".devices.";
+ switch (deviceType) {
+ case "desktop":
+ hid = "WEAVE_DEVICE_COUNT_DESKTOP";
+ prefName += "desktop";
+ break;
+ case "mobile":
+ hid = "WEAVE_DEVICE_COUNT_MOBILE";
+ prefName += "mobile";
+ break;
+ default:
+ this._log.warn(`Unexpected deviceType "${deviceType}" recording device telemetry.`);
+ continue;
+ }
+ Services.telemetry.getHistogramById(hid).add(count);
+ Svc.Prefs.set(prefName, count);
+ }
+ SyncEngine.prototype._syncFinish.call(this);
+ },
+
+ _reconcile: function _reconcile(item) {
+ // Every incoming record is reconciled, so we use this to track the
+ // contents of the collection on the server.
+ this._incomingClients[item.id] = item.modified;
+
+ if (!this._store.itemExists(item.id)) {
+ return true;
+ }
+ // Clients are synced unconditionally, so we'll always have new records.
+ // Unfortunately, this will cause the scheduler to use the immediate sync
+ // interval for the multi-device case, instead of the active interval. We
+ // work around this by updating the record during reconciliation, and
+ // returning false to indicate that the record doesn't need to be applied
+ // later.
+ this._store.update(item);
+ return false;
+ },
+
+ // Treat reset the same as wiping for locally cached clients
+ _resetClient() {
+ this._wipeClient();
+ },
+
+ _wipeClient: function _wipeClient() {
+ SyncEngine.prototype._resetClient.call(this);
+ delete this.localCommands;
+ this._store.wipe();
+ const logRemoveError = err => this._log.warn("Could not delete json file", err);
+ Async.promiseSpinningly(
+ Utils.jsonRemove("commands", this).catch(logRemoveError)
+ .then(Utils.jsonRemove("commands-syncing", this).catch(logRemoveError))
+ );
+ },
+
+ removeClientData: function removeClientData() {
+ let res = this.service.resource(this.engineURL + "/" + this.localID);
+ res.delete();
+ },
+
+ // Override the default behavior to delete bad records from the server.
+ handleHMACMismatch: function handleHMACMismatch(item, mayRetry) {
+ this._log.debug("Handling HMAC mismatch for " + item.id);
+
+ let base = SyncEngine.prototype.handleHMACMismatch.call(this, item, mayRetry);
+ if (base != SyncEngine.kRecoveryStrategy.error)
+ return base;
+
+ // It's a bad client record. Save it to be deleted at the end of the sync.
+ this._log.debug("Bad client record detected. Scheduling for deletion.");
+ this._deleteId(item.id);
+
+ // Neither try again nor error; we're going to delete it.
+ return SyncEngine.kRecoveryStrategy.ignore;
+ },
+
+ /**
+ * A hash of valid commands that the client knows about. The key is a command
+ * and the value is a hash containing information about the command such as
+ * number of arguments and description.
+ */
+ _commands: {
+ resetAll: { args: 0, desc: "Clear temporary local data for all engines" },
+ resetEngine: { args: 1, desc: "Clear temporary local data for engine" },
+ wipeAll: { args: 0, desc: "Delete all client data for all engines" },
+ wipeEngine: { args: 1, desc: "Delete all client data for engine" },
+ logout: { args: 0, desc: "Log out client" },
+ displayURI: { args: 3, desc: "Instruct a client to display a URI" },
+ },
+
+ /**
+ * Sends a command+args pair to a specific client.
+ *
+ * @param command Command string
+ * @param args Array of arguments/data for command
+ * @param clientId Client to send command to
+ */
+ _sendCommandToClient: function sendCommandToClient(command, args, clientId) {
+ this._log.trace("Sending " + command + " to " + clientId);
+
+ let client = this._store._remoteClients[clientId];
+ if (!client) {
+ throw new Error("Unknown remote client ID: '" + clientId + "'.");
+ }
+ if (client.stale) {
+ throw new Error("Stale remote client ID: '" + clientId + "'.");
+ }
+
+ let action = {
+ command: command,
+ args: args,
+ };
+
+ this._log.trace("Client " + clientId + " got a new action: " + [command, args]);
+ this._addClientCommand(clientId, action);
+ this._tracker.addChangedID(clientId);
+ },
+
+ /**
+ * Check if the local client has any remote commands and perform them.
+ *
+ * @return false to abort sync
+ */
+ processIncomingCommands: function processIncomingCommands() {
+ return this._notify("clients:process-commands", "", function() {
+ if (!this.localCommands) {
+ return true;
+ }
+
+ const clearedCommands = this._readCommands()[this.localID];
+ const commands = this.localCommands.filter(command => !hasDupeCommand(clearedCommands, command));
+
+ let URIsToDisplay = [];
+ // Process each command in order.
+ for (let rawCommand of commands) {
+ let {command, args} = rawCommand;
+ this._log.debug("Processing command: " + command + "(" + args + ")");
+
+ let engines = [args[0]];
+ switch (command) {
+ case "resetAll":
+ engines = null;
+ // Fallthrough
+ case "resetEngine":
+ this.service.resetClient(engines);
+ break;
+ case "wipeAll":
+ engines = null;
+ // Fallthrough
+ case "wipeEngine":
+ this.service.wipeClient(engines);
+ break;
+ case "logout":
+ this.service.logout();
+ return false;
+ case "displayURI":
+ let [uri, clientId, title] = args;
+ URIsToDisplay.push({ uri, clientId, title });
+ break;
+ default:
+ this._log.debug("Received an unknown command: " + command);
+ break;
+ }
+ // Add the command to the "cleared" commands list
+ this._addClientCommand(this.localID, rawCommand)
+ }
+ this._tracker.addChangedID(this.localID);
+
+ if (URIsToDisplay.length) {
+ this._handleDisplayURIs(URIsToDisplay);
+ }
+
+ return true;
+ })();
+ },
+
+ /**
+ * Validates and sends a command to a client or all clients.
+ *
+ * Calling this does not actually sync the command data to the server. If the
+ * client already has the command/args pair, it won't receive a duplicate
+ * command.
+ *
+ * @param command
+ * Command to invoke on remote clients
+ * @param args
+ * Array of arguments to give to the command
+ * @param clientId
+ * Client ID to send command to. If undefined, send to all remote
+ * clients.
+ */
+ sendCommand: function sendCommand(command, args, clientId) {
+ let commandData = this._commands[command];
+ // Don't send commands that we don't know about.
+ if (!commandData) {
+ this._log.error("Unknown command to send: " + command);
+ return;
+ }
+ // Don't send a command with the wrong number of arguments.
+ else if (!args || args.length != commandData.args) {
+ this._log.error("Expected " + commandData.args + " args for '" +
+ command + "', but got " + args);
+ return;
+ }
+
+ if (clientId) {
+ this._sendCommandToClient(command, args, clientId);
+ } else {
+ for (let [id, record] of Object.entries(this._store._remoteClients)) {
+ if (!record.stale) {
+ this._sendCommandToClient(command, args, id);
+ }
+ }
+ }
+ },
+
+ /**
+ * Send a URI to another client for display.
+ *
+ * A side effect is the score is increased dramatically to incur an
+ * immediate sync.
+ *
+ * If an unknown client ID is specified, sendCommand() will throw an
+ * Error object.
+ *
+ * @param uri
+ * URI (as a string) to send and display on the remote client
+ * @param clientId
+ * ID of client to send the command to. If not defined, will be sent
+ * to all remote clients.
+ * @param title
+ * Title of the page being sent.
+ */
+ sendURIToClientForDisplay: function sendURIToClientForDisplay(uri, clientId, title) {
+ this._log.info("Sending URI to client: " + uri + " -> " +
+ clientId + " (" + title + ")");
+ this.sendCommand("displayURI", [uri, this.localID, title], clientId);
+
+ this._tracker.score += SCORE_INCREMENT_XLARGE;
+ },
+
+ /**
+ * Handle a bunch of received 'displayURI' commands.
+ *
+ * Interested parties should observe the "weave:engine:clients:display-uris"
+ * topic. The callback will receive an array as the subject parameter
+ * containing objects with the following keys:
+ *
+ * uri URI (string) that is requested for display.
+ * clientId ID of client that sent the command.
+ * title Title of page that loaded URI (likely) corresponds to.
+ *
+ * The 'data' parameter to the callback will not be defined.
+ *
+ * @param uris
+ * An array containing URI objects to display
+ * @param uris[].uri
+ * String URI that was received
+ * @param uris[].clientId
+ * ID of client that sent URI
+ * @param uris[].title
+ * String title of page that URI corresponds to. Older clients may not
+ * send this.
+ */
+ _handleDisplayURIs: function _handleDisplayURIs(uris) {
+ Svc.Obs.notify("weave:engine:clients:display-uris", uris);
+ },
+
+ _removeRemoteClient(id) {
+ delete this._store._remoteClients[id];
+ this._tracker.removeChangedID(id);
+ },
+};
+
+function ClientStore(name, engine) {
+ Store.call(this, name, engine);
+}
+ClientStore.prototype = {
+ __proto__: Store.prototype,
+
+ _remoteClients: {},
+
+ create(record) {
+ this.update(record);
+ },
+
+ update: function update(record) {
+ if (record.id == this.engine.localID) {
+ // Only grab commands from the server; local name/type always wins
+ this.engine.localCommands = record.commands;
+ } else {
+ this._remoteClients[record.id] = record.cleartext;
+ }
+ },
+
+ createRecord: function createRecord(id, collection) {
+ let record = new ClientsRec(collection, id);
+
+ const commandsChanges = this.engine._currentlySyncingCommands ?
+ this.engine._currentlySyncingCommands[id] :
+ [];
+
+ // Package the individual components into a record for the local client
+ if (id == this.engine.localID) {
+ let cb = Async.makeSpinningCallback();
+ fxAccounts.getDeviceId().then(id => cb(null, id), cb);
+ try {
+ record.fxaDeviceId = cb.wait();
+ } catch(error) {
+ this._log.warn("failed to get fxa device id", error);
+ }
+ record.name = this.engine.localName;
+ record.type = this.engine.localType;
+ record.version = Services.appinfo.version;
+ record.protocols = SUPPORTED_PROTOCOL_VERSIONS;
+
+ // Substract the commands we recorded that we've already executed
+ if (commandsChanges && commandsChanges.length &&
+ this.engine.localCommands && this.engine.localCommands.length) {
+ record.commands = this.engine.localCommands.filter(command => !hasDupeCommand(commandsChanges, command));
+ }
+
+ // Optional fields.
+ record.os = Services.appinfo.OS; // "Darwin"
+ record.appPackage = Services.appinfo.ID;
+ record.application = this.engine.brandName // "Nightly"
+
+ // We can't compute these yet.
+ // record.device = ""; // Bug 1100723
+ // record.formfactor = ""; // Bug 1100722
+ } else {
+ record.cleartext = this._remoteClients[id];
+
+ // Add the commands we have to send
+ if (commandsChanges && commandsChanges.length) {
+ const recordCommands = record.cleartext.commands || [];
+ const newCommands = commandsChanges.filter(command => !hasDupeCommand(recordCommands, command));
+ record.cleartext.commands = recordCommands.concat(newCommands);
+ }
+
+ if (record.cleartext.stale) {
+ // It's almost certainly a logic error for us to upload a record we
+ // consider stale, so make log noise, but still remove the flag.
+ this._log.error(`Preparing to upload record ${id} that we consider stale`);
+ delete record.cleartext.stale;
+ }
+ }
+
+ return record;
+ },
+
+ itemExists(id) {
+ return id in this.getAllIDs();
+ },
+
+ getAllIDs: function getAllIDs() {
+ let ids = {};
+ ids[this.engine.localID] = true;
+ for (let id in this._remoteClients)
+ ids[id] = true;
+ return ids;
+ },
+
+ wipe: function wipe() {
+ this._remoteClients = {};
+ },
+};
+
+function ClientsTracker(name, engine) {
+ Tracker.call(this, name, engine);
+ Svc.Obs.add("weave:engine:start-tracking", this);
+ Svc.Obs.add("weave:engine:stop-tracking", this);
+}
+ClientsTracker.prototype = {
+ __proto__: Tracker.prototype,
+
+ _enabled: false,
+
+ observe: function observe(subject, topic, data) {
+ switch (topic) {
+ case "weave:engine:start-tracking":
+ if (!this._enabled) {
+ Svc.Prefs.observe("client.name", this);
+ this._enabled = true;
+ }
+ break;
+ case "weave:engine:stop-tracking":
+ if (this._enabled) {
+ Svc.Prefs.ignore("client.name", this);
+ this._enabled = false;
+ }
+ break;
+ case "nsPref:changed":
+ this._log.debug("client.name preference changed");
+ this.addChangedID(Svc.Prefs.get("client.GUID"));
+ this.score += SCORE_INCREMENT_XLARGE;
+ break;
+ }
+ }
+};
diff --git a/services/sync/modules/engines/extension-storage.js b/services/sync/modules/engines/extension-storage.js
new file mode 100644
index 000000000..f8f15b128
--- /dev/null
+++ b/services/sync/modules/engines/extension-storage.js
@@ -0,0 +1,277 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+"use strict";
+
+this.EXPORTED_SYMBOLS = ['ExtensionStorageEngine', 'EncryptionRemoteTransformer',
+ 'KeyRingEncryptionRemoteTransformer'];
+
+const {classes: Cc, interfaces: Ci, utils: Cu} = Components;
+
+Cu.import("resource://services-crypto/utils.js");
+Cu.import("resource://services-sync/constants.js");
+Cu.import("resource://services-sync/engines.js");
+Cu.import("resource://services-sync/keys.js");
+Cu.import("resource://services-sync/util.js");
+Cu.import("resource://services-common/async.js");
+XPCOMUtils.defineLazyModuleGetter(this, "ExtensionStorageSync",
+ "resource://gre/modules/ExtensionStorageSync.jsm");
+XPCOMUtils.defineLazyModuleGetter(this, "fxAccounts",
+ "resource://gre/modules/FxAccounts.jsm");
+XPCOMUtils.defineLazyModuleGetter(this, "Task",
+ "resource://gre/modules/Task.jsm");
+
+/**
+ * The Engine that manages syncing for the web extension "storage"
+ * API, and in particular ext.storage.sync.
+ *
+ * ext.storage.sync is implemented using Kinto, so it has mechanisms
+ * for syncing that we do not need to integrate in the Firefox Sync
+ * framework, so this is something of a stub.
+ */
+this.ExtensionStorageEngine = function ExtensionStorageEngine(service) {
+ SyncEngine.call(this, "Extension-Storage", service);
+};
+ExtensionStorageEngine.prototype = {
+ __proto__: SyncEngine.prototype,
+ _trackerObj: ExtensionStorageTracker,
+ // we don't need these since we implement our own sync logic
+ _storeObj: undefined,
+ _recordObj: undefined,
+
+ syncPriority: 10,
+ allowSkippedRecord: false,
+
+ _sync: function () {
+ return Async.promiseSpinningly(ExtensionStorageSync.syncAll());
+ },
+
+ get enabled() {
+ // By default, we sync extension storage if we sync addons. This
+ // lets us simplify the UX since users probably don't consider
+ // "extension preferences" a separate category of syncing.
+ // However, we also respect engine.extension-storage.force, which
+ // can be set to true or false, if a power user wants to customize
+ // the behavior despite the lack of UI.
+ const forced = Svc.Prefs.get("engine." + this.prefName + ".force", undefined);
+ if (forced !== undefined) {
+ return forced;
+ }
+ return Svc.Prefs.get("engine.addons", false);
+ },
+};
+
+function ExtensionStorageTracker(name, engine) {
+ Tracker.call(this, name, engine);
+}
+ExtensionStorageTracker.prototype = {
+ __proto__: Tracker.prototype,
+
+ startTracking: function () {
+ Svc.Obs.add("ext.storage.sync-changed", this);
+ },
+
+ stopTracking: function () {
+ Svc.Obs.remove("ext.storage.sync-changed", this);
+ },
+
+ observe: function (subject, topic, data) {
+ Tracker.prototype.observe.call(this, subject, topic, data);
+
+ if (this.ignoreAll) {
+ return;
+ }
+
+ if (topic !== "ext.storage.sync-changed") {
+ return;
+ }
+
+ // Single adds, removes and changes are not so important on their
+ // own, so let's just increment score a bit.
+ this.score += SCORE_INCREMENT_MEDIUM;
+ },
+
+ // Override a bunch of methods which don't do anything for us.
+ // This is a performance hack.
+ saveChangedIDs: function() {
+ },
+ loadChangedIDs: function() {
+ },
+ ignoreID: function() {
+ },
+ unignoreID: function() {
+ },
+ addChangedID: function() {
+ },
+ removeChangedID: function() {
+ },
+ clearChangedIDs: function() {
+ },
+};
+
+/**
+ * Utility function to enforce an order of fields when computing an HMAC.
+ */
+function ciphertextHMAC(keyBundle, id, IV, ciphertext) {
+ const hasher = keyBundle.sha256HMACHasher;
+ return Utils.bytesAsHex(Utils.digestUTF8(id + IV + ciphertext, hasher));
+}
+
+/**
+ * A "remote transformer" that the Kinto library will use to
+ * encrypt/decrypt records when syncing.
+ *
+ * This is an "abstract base class". Subclass this and override
+ * getKeys() to use it.
+ */
+class EncryptionRemoteTransformer {
+ encode(record) {
+ const self = this;
+ return Task.spawn(function* () {
+ const keyBundle = yield self.getKeys();
+ if (record.ciphertext) {
+ throw new Error("Attempt to reencrypt??");
+ }
+ let id = record.id;
+ if (!record.id) {
+ throw new Error("Record ID is missing or invalid");
+ }
+
+ let IV = Svc.Crypto.generateRandomIV();
+ let ciphertext = Svc.Crypto.encrypt(JSON.stringify(record),
+ keyBundle.encryptionKeyB64, IV);
+ let hmac = ciphertextHMAC(keyBundle, id, IV, ciphertext);
+ const encryptedResult = {ciphertext, IV, hmac, id};
+ if (record.hasOwnProperty("last_modified")) {
+ encryptedResult.last_modified = record.last_modified;
+ }
+ return encryptedResult;
+ });
+ }
+
+ decode(record) {
+ const self = this;
+ return Task.spawn(function* () {
+ if (!record.ciphertext) {
+ // This can happen for tombstones if a record is deleted.
+ if (record.deleted) {
+ return record;
+ }
+ throw new Error("No ciphertext: nothing to decrypt?");
+ }
+ const keyBundle = yield self.getKeys();
+ // Authenticate the encrypted blob with the expected HMAC
+ let computedHMAC = ciphertextHMAC(keyBundle, record.id, record.IV, record.ciphertext);
+
+ if (computedHMAC != record.hmac) {
+ Utils.throwHMACMismatch(record.hmac, computedHMAC);
+ }
+
+ // Handle invalid data here. Elsewhere we assume that cleartext is an object.
+ let cleartext = Svc.Crypto.decrypt(record.ciphertext,
+ keyBundle.encryptionKeyB64, record.IV);
+ let jsonResult = JSON.parse(cleartext);
+ if (!jsonResult || typeof jsonResult !== "object") {
+ throw new Error("Decryption failed: result is <" + jsonResult + ">, not an object.");
+ }
+
+ // Verify that the encrypted id matches the requested record's id.
+ // This should always be true, because we compute the HMAC over
+ // the original record's ID, and that was verified already (above).
+ if (jsonResult.id != record.id) {
+ throw new Error("Record id mismatch: " + jsonResult.id + " != " + record.id);
+ }
+
+ if (record.hasOwnProperty("last_modified")) {
+ jsonResult.last_modified = record.last_modified;
+ }
+
+ return jsonResult;
+ });
+ }
+
+ /**
+ * Retrieve keys to use during encryption.
+ *
+ * Returns a Promise<KeyBundle>.
+ */
+ getKeys() {
+ throw new Error("override getKeys in a subclass");
+ }
+}
+// You can inject this
+EncryptionRemoteTransformer.prototype._fxaService = fxAccounts;
+
+/**
+ * An EncryptionRemoteTransformer that provides a keybundle derived
+ * from the user's kB, suitable for encrypting a keyring.
+ */
+class KeyRingEncryptionRemoteTransformer extends EncryptionRemoteTransformer {
+ getKeys() {
+ const self = this;
+ return Task.spawn(function* () {
+ const user = yield self._fxaService.getSignedInUser();
+ // FIXME: we should permit this if the user is self-hosting
+ // their storage
+ if (!user) {
+ throw new Error("user isn't signed in to FxA; can't sync");
+ }
+
+ if (!user.kB) {
+ throw new Error("user doesn't have kB");
+ }
+
+ let kB = Utils.hexToBytes(user.kB);
+
+ let keyMaterial = CryptoUtils.hkdf(kB, undefined,
+ "identity.mozilla.com/picl/v1/chrome.storage.sync", 2*32);
+ let bundle = new BulkKeyBundle();
+ // [encryptionKey, hmacKey]
+ bundle.keyPair = [keyMaterial.slice(0, 32), keyMaterial.slice(32, 64)];
+ return bundle;
+ });
+ }
+ // Pass through the kbHash field from the unencrypted record. If
+ // encryption fails, we can use this to try to detect whether we are
+ // being compromised or if the record here was encoded with a
+ // different kB.
+ encode(record) {
+ const encodePromise = super.encode(record);
+ return Task.spawn(function* () {
+ const encoded = yield encodePromise;
+ encoded.kbHash = record.kbHash;
+ return encoded;
+ });
+ }
+
+ decode(record) {
+ const decodePromise = super.decode(record);
+ return Task.spawn(function* () {
+ try {
+ return yield decodePromise;
+ } catch (e) {
+ if (Utils.isHMACMismatch(e)) {
+ const currentKBHash = yield ExtensionStorageSync.getKBHash();
+ if (record.kbHash != currentKBHash) {
+ // Some other client encoded this with a kB that we don't
+ // have access to.
+ KeyRingEncryptionRemoteTransformer.throwOutdatedKB(currentKBHash, record.kbHash);
+ }
+ }
+ throw e;
+ }
+ });
+ }
+
+ // Generator and discriminator for KB-is-outdated exceptions.
+ static throwOutdatedKB(shouldBe, is) {
+ throw new Error(`kB hash on record is outdated: should be ${shouldBe}, is ${is}`);
+ }
+
+ static isOutdatedKB(exc) {
+ const kbMessage = "kB hash on record is outdated: ";
+ return exc && exc.message && exc.message.indexOf &&
+ (exc.message.indexOf(kbMessage) == 0);
+ }
+}
diff --git a/services/sync/modules/engines/forms.js b/services/sync/modules/engines/forms.js
new file mode 100644
index 000000000..43f79d4f7
--- /dev/null
+++ b/services/sync/modules/engines/forms.js
@@ -0,0 +1,305 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+this.EXPORTED_SYMBOLS = ['FormEngine', 'FormRec', 'FormValidator'];
+
+var Cc = Components.classes;
+var Ci = Components.interfaces;
+var Cu = Components.utils;
+
+Cu.import("resource://gre/modules/XPCOMUtils.jsm");
+Cu.import("resource://services-sync/engines.js");
+Cu.import("resource://services-sync/record.js");
+Cu.import("resource://services-common/async.js");
+Cu.import("resource://services-sync/util.js");
+Cu.import("resource://services-sync/constants.js");
+Cu.import("resource://services-sync/collection_validator.js");
+Cu.import("resource://gre/modules/Log.jsm");
+
+const FORMS_TTL = 3 * 365 * 24 * 60 * 60; // Three years in seconds.
+
+this.FormRec = function FormRec(collection, id) {
+ CryptoWrapper.call(this, collection, id);
+}
+FormRec.prototype = {
+ __proto__: CryptoWrapper.prototype,
+ _logName: "Sync.Record.Form",
+ ttl: FORMS_TTL
+};
+
+Utils.deferGetSet(FormRec, "cleartext", ["name", "value"]);
+
+
+var FormWrapper = {
+ _log: Log.repository.getLogger("Sync.Engine.Forms"),
+
+ _getEntryCols: ["fieldname", "value"],
+ _guidCols: ["guid"],
+
+ _promiseSearch: function(terms, searchData) {
+ return new Promise(resolve => {
+ let results = [];
+ let callbacks = {
+ handleResult(result) {
+ results.push(result);
+ },
+ handleCompletion(reason) {
+ resolve(results);
+ }
+ };
+ Svc.FormHistory.search(terms, searchData, callbacks);
+ })
+ },
+
+ // Do a "sync" search by spinning the event loop until it completes.
+ _searchSpinningly: function(terms, searchData) {
+ return Async.promiseSpinningly(this._promiseSearch(terms, searchData));
+ },
+
+ _updateSpinningly: function(changes) {
+ if (!Svc.FormHistory.enabled) {
+ return; // update isn't going to do anything.
+ }
+ let cb = Async.makeSpinningCallback();
+ let callbacks = {
+ handleCompletion: function(reason) {
+ cb();
+ }
+ };
+ Svc.FormHistory.update(changes, callbacks);
+ return cb.wait();
+ },
+
+ getEntry: function (guid) {
+ let results = this._searchSpinningly(this._getEntryCols, {guid: guid});
+ if (!results.length) {
+ return null;
+ }
+ return {name: results[0].fieldname, value: results[0].value};
+ },
+
+ getGUID: function (name, value) {
+ // Query for the provided entry.
+ let query = { fieldname: name, value: value };
+ let results = this._searchSpinningly(this._guidCols, query);
+ return results.length ? results[0].guid : null;
+ },
+
+ hasGUID: function (guid) {
+ // We could probably use a count function here, but searchSpinningly exists...
+ return this._searchSpinningly(this._guidCols, {guid: guid}).length != 0;
+ },
+
+ replaceGUID: function (oldGUID, newGUID) {
+ let changes = {
+ op: "update",
+ guid: oldGUID,
+ newGuid: newGUID,
+ }
+ this._updateSpinningly(changes);
+ }
+
+};
+
+this.FormEngine = function FormEngine(service) {
+ SyncEngine.call(this, "Forms", service);
+}
+FormEngine.prototype = {
+ __proto__: SyncEngine.prototype,
+ _storeObj: FormStore,
+ _trackerObj: FormTracker,
+ _recordObj: FormRec,
+ applyIncomingBatchSize: FORMS_STORE_BATCH_SIZE,
+
+ syncPriority: 6,
+
+ get prefName() {
+ return "history";
+ },
+
+ _findDupe: function _findDupe(item) {
+ return FormWrapper.getGUID(item.name, item.value);
+ }
+};
+
+function FormStore(name, engine) {
+ Store.call(this, name, engine);
+}
+FormStore.prototype = {
+ __proto__: Store.prototype,
+
+ _processChange: function (change) {
+ // If this._changes is defined, then we are applying a batch, so we
+ // can defer it.
+ if (this._changes) {
+ this._changes.push(change);
+ return;
+ }
+
+ // Otherwise we must handle the change synchronously, right now.
+ FormWrapper._updateSpinningly(change);
+ },
+
+ applyIncomingBatch: function (records) {
+ // We collect all the changes to be made then apply them all at once.
+ this._changes = [];
+ let failures = Store.prototype.applyIncomingBatch.call(this, records);
+ if (this._changes.length) {
+ FormWrapper._updateSpinningly(this._changes);
+ }
+ delete this._changes;
+ return failures;
+ },
+
+ getAllIDs: function () {
+ let results = FormWrapper._searchSpinningly(["guid"], [])
+ let guids = {};
+ for (let result of results) {
+ guids[result.guid] = true;
+ }
+ return guids;
+ },
+
+ changeItemID: function (oldID, newID) {
+ FormWrapper.replaceGUID(oldID, newID);
+ },
+
+ itemExists: function (id) {
+ return FormWrapper.hasGUID(id);
+ },
+
+ createRecord: function (id, collection) {
+ let record = new FormRec(collection, id);
+ let entry = FormWrapper.getEntry(id);
+ if (entry != null) {
+ record.name = entry.name;
+ record.value = entry.value;
+ } else {
+ record.deleted = true;
+ }
+ return record;
+ },
+
+ create: function (record) {
+ this._log.trace("Adding form record for " + record.name);
+ let change = {
+ op: "add",
+ fieldname: record.name,
+ value: record.value
+ };
+ this._processChange(change);
+ },
+
+ remove: function (record) {
+ this._log.trace("Removing form record: " + record.id);
+ let change = {
+ op: "remove",
+ guid: record.id
+ };
+ this._processChange(change);
+ },
+
+ update: function (record) {
+ this._log.trace("Ignoring form record update request!");
+ },
+
+ wipe: function () {
+ let change = {
+ op: "remove"
+ };
+ FormWrapper._updateSpinningly(change);
+ }
+};
+
+function FormTracker(name, engine) {
+ Tracker.call(this, name, engine);
+}
+FormTracker.prototype = {
+ __proto__: Tracker.prototype,
+
+ QueryInterface: XPCOMUtils.generateQI([
+ Ci.nsIObserver,
+ Ci.nsISupportsWeakReference]),
+
+ startTracking: function() {
+ Svc.Obs.add("satchel-storage-changed", this);
+ },
+
+ stopTracking: function() {
+ Svc.Obs.remove("satchel-storage-changed", this);
+ },
+
+ observe: function (subject, topic, data) {
+ Tracker.prototype.observe.call(this, subject, topic, data);
+ if (this.ignoreAll) {
+ return;
+ }
+ switch (topic) {
+ case "satchel-storage-changed":
+ if (data == "formhistory-add" || data == "formhistory-remove") {
+ let guid = subject.QueryInterface(Ci.nsISupportsString).toString();
+ this.trackEntry(guid);
+ }
+ break;
+ }
+ },
+
+ trackEntry: function (guid) {
+ this.addChangedID(guid);
+ this.score += SCORE_INCREMENT_MEDIUM;
+ },
+};
+
+
+class FormsProblemData extends CollectionProblemData {
+ getSummary() {
+ // We don't support syncing deleted form data, so "clientMissing" isn't a problem
+ return super.getSummary().filter(entry =>
+ entry.name !== "clientMissing");
+ }
+}
+
+class FormValidator extends CollectionValidator {
+ constructor() {
+ super("forms", "id", ["name", "value"]);
+ }
+
+ emptyProblemData() {
+ return new FormsProblemData();
+ }
+
+ getClientItems() {
+ return FormWrapper._promiseSearch(["guid", "fieldname", "value"], {});
+ }
+
+ normalizeClientItem(item) {
+ return {
+ id: item.guid,
+ guid: item.guid,
+ name: item.fieldname,
+ fieldname: item.fieldname,
+ value: item.value,
+ original: item,
+ };
+ }
+
+ normalizeServerItem(item) {
+ let res = Object.assign({
+ guid: item.id,
+ fieldname: item.name,
+ original: item,
+ }, item);
+ // Missing `name` or `value` causes the getGUID call to throw
+ if (item.name !== undefined && item.value !== undefined) {
+ let guid = FormWrapper.getGUID(item.name, item.value);
+ if (guid) {
+ res.guid = guid;
+ res.id = guid;
+ res.duped = true;
+ }
+ }
+
+ return res;
+ }
+} \ No newline at end of file
diff --git a/services/sync/modules/engines/history.js b/services/sync/modules/engines/history.js
new file mode 100644
index 000000000..307d484c1
--- /dev/null
+++ b/services/sync/modules/engines/history.js
@@ -0,0 +1,442 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+this.EXPORTED_SYMBOLS = ['HistoryEngine', 'HistoryRec'];
+
+var Cc = Components.classes;
+var Ci = Components.interfaces;
+var Cu = Components.utils;
+var Cr = Components.results;
+
+const HISTORY_TTL = 5184000; // 60 days
+
+Cu.import("resource://gre/modules/PlacesUtils.jsm", this);
+Cu.import("resource://gre/modules/XPCOMUtils.jsm");
+Cu.import("resource://services-common/async.js");
+Cu.import("resource://gre/modules/Log.jsm");
+Cu.import("resource://services-sync/constants.js");
+Cu.import("resource://services-sync/engines.js");
+Cu.import("resource://services-sync/record.js");
+Cu.import("resource://services-sync/util.js");
+
+this.HistoryRec = function HistoryRec(collection, id) {
+ CryptoWrapper.call(this, collection, id);
+}
+HistoryRec.prototype = {
+ __proto__: CryptoWrapper.prototype,
+ _logName: "Sync.Record.History",
+ ttl: HISTORY_TTL
+};
+
+Utils.deferGetSet(HistoryRec, "cleartext", ["histUri", "title", "visits"]);
+
+
+this.HistoryEngine = function HistoryEngine(service) {
+ SyncEngine.call(this, "History", service);
+}
+HistoryEngine.prototype = {
+ __proto__: SyncEngine.prototype,
+ _recordObj: HistoryRec,
+ _storeObj: HistoryStore,
+ _trackerObj: HistoryTracker,
+ downloadLimit: MAX_HISTORY_DOWNLOAD,
+ applyIncomingBatchSize: HISTORY_STORE_BATCH_SIZE,
+
+ syncPriority: 7,
+
+ _processIncoming: function (newitems) {
+ // We want to notify history observers that a batch operation is underway
+ // so they don't do lots of work for each incoming record.
+ let observers = PlacesUtils.history.getObservers();
+ function notifyHistoryObservers(notification) {
+ for (let observer of observers) {
+ try {
+ observer[notification]();
+ } catch (ex) { }
+ }
+ }
+ notifyHistoryObservers("onBeginUpdateBatch");
+ try {
+ return SyncEngine.prototype._processIncoming.call(this, newitems);
+ } finally {
+ notifyHistoryObservers("onEndUpdateBatch");
+ }
+ },
+};
+
+function HistoryStore(name, engine) {
+ Store.call(this, name, engine);
+
+ // Explicitly nullify our references to our cached services so we don't leak
+ Svc.Obs.add("places-shutdown", function() {
+ for (let query in this._stmts) {
+ let stmt = this._stmts;
+ stmt.finalize();
+ }
+ this._stmts = {};
+ }, this);
+}
+HistoryStore.prototype = {
+ __proto__: Store.prototype,
+
+ __asyncHistory: null,
+ get _asyncHistory() {
+ if (!this.__asyncHistory) {
+ this.__asyncHistory = Cc["@mozilla.org/browser/history;1"]
+ .getService(Ci.mozIAsyncHistory);
+ }
+ return this.__asyncHistory;
+ },
+
+ _stmts: {},
+ _getStmt: function(query) {
+ if (query in this._stmts) {
+ return this._stmts[query];
+ }
+
+ this._log.trace("Creating SQL statement: " + query);
+ let db = PlacesUtils.history.QueryInterface(Ci.nsPIPlacesDatabase)
+ .DBConnection;
+ return this._stmts[query] = db.createAsyncStatement(query);
+ },
+
+ get _setGUIDStm() {
+ return this._getStmt(
+ "UPDATE moz_places " +
+ "SET guid = :guid " +
+ "WHERE url_hash = hash(:page_url) AND url = :page_url");
+ },
+
+ // Some helper functions to handle GUIDs
+ setGUID: function setGUID(uri, guid) {
+ uri = uri.spec ? uri.spec : uri;
+
+ if (!guid) {
+ guid = Utils.makeGUID();
+ }
+
+ let stmt = this._setGUIDStm;
+ stmt.params.guid = guid;
+ stmt.params.page_url = uri;
+ Async.querySpinningly(stmt);
+ return guid;
+ },
+
+ get _guidStm() {
+ return this._getStmt(
+ "SELECT guid " +
+ "FROM moz_places " +
+ "WHERE url_hash = hash(:page_url) AND url = :page_url");
+ },
+ _guidCols: ["guid"],
+
+ GUIDForUri: function GUIDForUri(uri, create) {
+ let stm = this._guidStm;
+ stm.params.page_url = uri.spec ? uri.spec : uri;
+
+ // Use the existing GUID if it exists
+ let result = Async.querySpinningly(stm, this._guidCols)[0];
+ if (result && result.guid)
+ return result.guid;
+
+ // Give the uri a GUID if it doesn't have one
+ if (create)
+ return this.setGUID(uri);
+ },
+
+ get _visitStm() {
+ return this._getStmt(`/* do not warn (bug 599936) */
+ SELECT visit_type type, visit_date date
+ FROM moz_historyvisits
+ JOIN moz_places h ON h.id = place_id
+ WHERE url_hash = hash(:url) AND url = :url
+ ORDER BY date DESC LIMIT 20`);
+ },
+ _visitCols: ["date", "type"],
+
+ get _urlStm() {
+ return this._getStmt(
+ "SELECT url, title, frecency " +
+ "FROM moz_places " +
+ "WHERE guid = :guid");
+ },
+ _urlCols: ["url", "title", "frecency"],
+
+ get _allUrlStm() {
+ return this._getStmt(
+ "SELECT url " +
+ "FROM moz_places " +
+ "WHERE last_visit_date > :cutoff_date " +
+ "ORDER BY frecency DESC " +
+ "LIMIT :max_results");
+ },
+ _allUrlCols: ["url"],
+
+ // See bug 320831 for why we use SQL here
+ _getVisits: function HistStore__getVisits(uri) {
+ this._visitStm.params.url = uri;
+ return Async.querySpinningly(this._visitStm, this._visitCols);
+ },
+
+ // See bug 468732 for why we use SQL here
+ _findURLByGUID: function HistStore__findURLByGUID(guid) {
+ this._urlStm.params.guid = guid;
+ return Async.querySpinningly(this._urlStm, this._urlCols)[0];
+ },
+
+ changeItemID: function HStore_changeItemID(oldID, newID) {
+ this.setGUID(this._findURLByGUID(oldID).url, newID);
+ },
+
+
+ getAllIDs: function HistStore_getAllIDs() {
+ // Only get places visited within the last 30 days (30*24*60*60*1000ms)
+ this._allUrlStm.params.cutoff_date = (Date.now() - 2592000000) * 1000;
+ this._allUrlStm.params.max_results = MAX_HISTORY_UPLOAD;
+
+ let urls = Async.querySpinningly(this._allUrlStm, this._allUrlCols);
+ let self = this;
+ return urls.reduce(function(ids, item) {
+ ids[self.GUIDForUri(item.url, true)] = item.url;
+ return ids;
+ }, {});
+ },
+
+ applyIncomingBatch: function applyIncomingBatch(records) {
+ let failed = [];
+
+ // Convert incoming records to mozIPlaceInfo objects. Some records can be
+ // ignored or handled directly, so we're rewriting the array in-place.
+ let i, k;
+ for (i = 0, k = 0; i < records.length; i++) {
+ let record = records[k] = records[i];
+ let shouldApply;
+
+ // This is still synchronous I/O for now.
+ try {
+ if (record.deleted) {
+ // Consider using nsIBrowserHistory::removePages() here.
+ this.remove(record);
+ // No further processing needed. Remove it from the list.
+ shouldApply = false;
+ } else {
+ shouldApply = this._recordToPlaceInfo(record);
+ }
+ } catch (ex) {
+ if (Async.isShutdownException(ex)) {
+ throw ex;
+ }
+ failed.push(record.id);
+ shouldApply = false;
+ }
+
+ if (shouldApply) {
+ k += 1;
+ }
+ }
+ records.length = k; // truncate array
+
+ // Nothing to do.
+ if (!records.length) {
+ return failed;
+ }
+
+ let updatePlacesCallback = {
+ handleResult: function handleResult() {},
+ handleError: function handleError(resultCode, placeInfo) {
+ failed.push(placeInfo.guid);
+ },
+ handleCompletion: Async.makeSyncCallback()
+ };
+ this._asyncHistory.updatePlaces(records, updatePlacesCallback);
+ Async.waitForSyncCallback(updatePlacesCallback.handleCompletion);
+ return failed;
+ },
+
+ /**
+ * Converts a Sync history record to a mozIPlaceInfo.
+ *
+ * Throws if an invalid record is encountered (invalid URI, etc.),
+ * returns true if the record is to be applied, false otherwise
+ * (no visits to add, etc.),
+ */
+ _recordToPlaceInfo: function _recordToPlaceInfo(record) {
+ // Sort out invalid URIs and ones Places just simply doesn't want.
+ record.uri = Utils.makeURI(record.histUri);
+ if (!record.uri) {
+ this._log.warn("Attempted to process invalid URI, skipping.");
+ throw "Invalid URI in record";
+ }
+
+ if (!Utils.checkGUID(record.id)) {
+ this._log.warn("Encountered record with invalid GUID: " + record.id);
+ return false;
+ }
+ record.guid = record.id;
+
+ if (!PlacesUtils.history.canAddURI(record.uri)) {
+ this._log.trace("Ignoring record " + record.id + " with URI "
+ + record.uri.spec + ": can't add this URI.");
+ return false;
+ }
+
+ // We dupe visits by date and type. So an incoming visit that has
+ // the same timestamp and type as a local one won't get applied.
+ // To avoid creating new objects, we rewrite the query result so we
+ // can simply check for containment below.
+ let curVisits = this._getVisits(record.histUri);
+ let i, k;
+ for (i = 0; i < curVisits.length; i++) {
+ curVisits[i] = curVisits[i].date + "," + curVisits[i].type;
+ }
+
+ // Walk through the visits, make sure we have sound data, and eliminate
+ // dupes. The latter is done by rewriting the array in-place.
+ for (i = 0, k = 0; i < record.visits.length; i++) {
+ let visit = record.visits[k] = record.visits[i];
+
+ if (!visit.date || typeof visit.date != "number") {
+ this._log.warn("Encountered record with invalid visit date: "
+ + visit.date);
+ continue;
+ }
+
+ if (!visit.type ||
+ !Object.values(PlacesUtils.history.TRANSITIONS).includes(visit.type)) {
+ this._log.warn("Encountered record with invalid visit type: " +
+ visit.type + "; ignoring.");
+ continue;
+ }
+
+ // Dates need to be integers.
+ visit.date = Math.round(visit.date);
+
+ if (curVisits.indexOf(visit.date + "," + visit.type) != -1) {
+ // Visit is a dupe, don't increment 'k' so the element will be
+ // overwritten.
+ continue;
+ }
+
+ visit.visitDate = visit.date;
+ visit.transitionType = visit.type;
+ k += 1;
+ }
+ record.visits.length = k; // truncate array
+
+ // No update if there aren't any visits to apply.
+ // mozIAsyncHistory::updatePlaces() wants at least one visit.
+ // In any case, the only thing we could change would be the title
+ // and that shouldn't change without a visit.
+ if (!record.visits.length) {
+ this._log.trace("Ignoring record " + record.id + " with URI "
+ + record.uri.spec + ": no visits to add.");
+ return false;
+ }
+
+ return true;
+ },
+
+ remove: function HistStore_remove(record) {
+ let page = this._findURLByGUID(record.id);
+ if (page == null) {
+ this._log.debug("Page already removed: " + record.id);
+ return;
+ }
+
+ let uri = Utils.makeURI(page.url);
+ PlacesUtils.history.removePage(uri);
+ this._log.trace("Removed page: " + [record.id, page.url, page.title]);
+ },
+
+ itemExists: function HistStore_itemExists(id) {
+ return !!this._findURLByGUID(id);
+ },
+
+ createRecord: function createRecord(id, collection) {
+ let foo = this._findURLByGUID(id);
+ let record = new HistoryRec(collection, id);
+ if (foo) {
+ record.histUri = foo.url;
+ record.title = foo.title;
+ record.sortindex = foo.frecency;
+ record.visits = this._getVisits(record.histUri);
+ } else {
+ record.deleted = true;
+ }
+
+ return record;
+ },
+
+ wipe: function HistStore_wipe() {
+ let cb = Async.makeSyncCallback();
+ PlacesUtils.history.clear().then(result => {cb(null, result)}, err => {cb(err)});
+ return Async.waitForSyncCallback(cb);
+ }
+};
+
+function HistoryTracker(name, engine) {
+ Tracker.call(this, name, engine);
+}
+HistoryTracker.prototype = {
+ __proto__: Tracker.prototype,
+
+ startTracking: function() {
+ this._log.info("Adding Places observer.");
+ PlacesUtils.history.addObserver(this, true);
+ },
+
+ stopTracking: function() {
+ this._log.info("Removing Places observer.");
+ PlacesUtils.history.removeObserver(this);
+ },
+
+ QueryInterface: XPCOMUtils.generateQI([
+ Ci.nsINavHistoryObserver,
+ Ci.nsISupportsWeakReference
+ ]),
+
+ onDeleteAffectsGUID: function (uri, guid, reason, source, increment) {
+ if (this.ignoreAll || reason == Ci.nsINavHistoryObserver.REASON_EXPIRED) {
+ return;
+ }
+ this._log.trace(source + ": " + uri.spec + ", reason " + reason);
+ if (this.addChangedID(guid)) {
+ this.score += increment;
+ }
+ },
+
+ onDeleteVisits: function (uri, visitTime, guid, reason) {
+ this.onDeleteAffectsGUID(uri, guid, reason, "onDeleteVisits", SCORE_INCREMENT_SMALL);
+ },
+
+ onDeleteURI: function (uri, guid, reason) {
+ this.onDeleteAffectsGUID(uri, guid, reason, "onDeleteURI", SCORE_INCREMENT_XLARGE);
+ },
+
+ onVisit: function (uri, vid, time, session, referrer, trans, guid) {
+ if (this.ignoreAll) {
+ this._log.trace("ignoreAll: ignoring visit for " + guid);
+ return;
+ }
+
+ this._log.trace("onVisit: " + uri.spec);
+ if (this.addChangedID(guid)) {
+ this.score += SCORE_INCREMENT_SMALL;
+ }
+ },
+
+ onClearHistory: function () {
+ this._log.trace("onClearHistory");
+ // Note that we're going to trigger a sync, but none of the cleared
+ // pages are tracked, so the deletions will not be propagated.
+ // See Bug 578694.
+ this.score += SCORE_INCREMENT_XLARGE;
+ },
+
+ onBeginUpdateBatch: function () {},
+ onEndUpdateBatch: function () {},
+ onPageChanged: function () {},
+ onTitleChanged: function () {},
+ onBeforeDeleteURI: function () {},
+};
diff --git a/services/sync/modules/engines/passwords.js b/services/sync/modules/engines/passwords.js
new file mode 100644
index 000000000..51db49a0a
--- /dev/null
+++ b/services/sync/modules/engines/passwords.js
@@ -0,0 +1,371 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+this.EXPORTED_SYMBOLS = ['PasswordEngine', 'LoginRec', 'PasswordValidator'];
+
+var {classes: Cc, interfaces: Ci, utils: Cu} = Components;
+
+Cu.import("resource://services-sync/record.js");
+Cu.import("resource://services-sync/constants.js");
+Cu.import("resource://services-sync/collection_validator.js");
+Cu.import("resource://services-sync/engines.js");
+Cu.import("resource://services-sync/util.js");
+Cu.import("resource://services-common/async.js");
+
+this.LoginRec = function LoginRec(collection, id) {
+ CryptoWrapper.call(this, collection, id);
+}
+LoginRec.prototype = {
+ __proto__: CryptoWrapper.prototype,
+ _logName: "Sync.Record.Login",
+};
+
+Utils.deferGetSet(LoginRec, "cleartext", [
+ "hostname", "formSubmitURL",
+ "httpRealm", "username", "password", "usernameField", "passwordField",
+ "timeCreated", "timePasswordChanged",
+ ]);
+
+
+this.PasswordEngine = function PasswordEngine(service) {
+ SyncEngine.call(this, "Passwords", service);
+}
+PasswordEngine.prototype = {
+ __proto__: SyncEngine.prototype,
+ _storeObj: PasswordStore,
+ _trackerObj: PasswordTracker,
+ _recordObj: LoginRec,
+
+ applyIncomingBatchSize: PASSWORDS_STORE_BATCH_SIZE,
+
+ syncPriority: 2,
+
+ _syncFinish: function () {
+ SyncEngine.prototype._syncFinish.call(this);
+
+ // Delete the Weave credentials from the server once.
+ if (!Svc.Prefs.get("deletePwdFxA", false)) {
+ try {
+ let ids = [];
+ for (let host of Utils.getSyncCredentialsHosts()) {
+ for (let info of Services.logins.findLogins({}, host, "", "")) {
+ ids.push(info.QueryInterface(Components.interfaces.nsILoginMetaInfo).guid);
+ }
+ }
+ if (ids.length) {
+ let coll = new Collection(this.engineURL, null, this.service);
+ coll.ids = ids;
+ let ret = coll.delete();
+ this._log.debug("Delete result: " + ret);
+ if (!ret.success && ret.status != 400) {
+ // A non-400 failure means try again next time.
+ return;
+ }
+ } else {
+ this._log.debug("Didn't find any passwords to delete");
+ }
+ // If there were no ids to delete, or we succeeded, or got a 400,
+ // record success.
+ Svc.Prefs.set("deletePwdFxA", true);
+ Svc.Prefs.reset("deletePwd"); // The old prefname we previously used.
+ } catch (ex) {
+ if (Async.isShutdownException(ex)) {
+ throw ex;
+ }
+ this._log.debug("Password deletes failed", ex);
+ }
+ }
+ },
+
+ _findDupe: function (item) {
+ let login = this._store._nsLoginInfoFromRecord(item);
+ if (!login) {
+ return;
+ }
+
+ let logins = Services.logins.findLogins({}, login.hostname, login.formSubmitURL, login.httpRealm);
+
+ this._store._sleep(0); // Yield back to main thread after synchronous operation.
+
+ // Look for existing logins that match the hostname, but ignore the password.
+ for (let local of logins) {
+ if (login.matches(local, true) && local instanceof Ci.nsILoginMetaInfo) {
+ return local.guid;
+ }
+ }
+ },
+};
+
+function PasswordStore(name, engine) {
+ Store.call(this, name, engine);
+ this._nsLoginInfo = new Components.Constructor("@mozilla.org/login-manager/loginInfo;1", Ci.nsILoginInfo, "init");
+}
+PasswordStore.prototype = {
+ __proto__: Store.prototype,
+
+ _newPropertyBag: function () {
+ return Cc["@mozilla.org/hash-property-bag;1"].createInstance(Ci.nsIWritablePropertyBag2);
+ },
+
+ /**
+ * Return an instance of nsILoginInfo (and, implicitly, nsILoginMetaInfo).
+ */
+ _nsLoginInfoFromRecord: function (record) {
+ function nullUndefined(x) {
+ return (x == undefined) ? null : x;
+ }
+
+ if (record.formSubmitURL && record.httpRealm) {
+ this._log.warn("Record " + record.id + " has both formSubmitURL and httpRealm. Skipping.");
+ return null;
+ }
+
+ // Passing in "undefined" results in an empty string, which later
+ // counts as a value. Explicitly `|| null` these fields according to JS
+ // truthiness. Records with empty strings or null will be unmolested.
+ let info = new this._nsLoginInfo(record.hostname,
+ nullUndefined(record.formSubmitURL),
+ nullUndefined(record.httpRealm),
+ record.username,
+ record.password,
+ record.usernameField,
+ record.passwordField);
+
+ info.QueryInterface(Ci.nsILoginMetaInfo);
+ info.guid = record.id;
+ if (record.timeCreated) {
+ info.timeCreated = record.timeCreated;
+ }
+ if (record.timePasswordChanged) {
+ info.timePasswordChanged = record.timePasswordChanged;
+ }
+
+ return info;
+ },
+
+ _getLoginFromGUID: function (id) {
+ let prop = this._newPropertyBag();
+ prop.setPropertyAsAUTF8String("guid", id);
+
+ let logins = Services.logins.searchLogins({}, prop);
+ this._sleep(0); // Yield back to main thread after synchronous operation.
+
+ if (logins.length > 0) {
+ this._log.trace(logins.length + " items matching " + id + " found.");
+ return logins[0];
+ }
+
+ this._log.trace("No items matching " + id + " found. Ignoring");
+ return null;
+ },
+
+ getAllIDs: function () {
+ let items = {};
+ let logins = Services.logins.getAllLogins({});
+
+ for (let i = 0; i < logins.length; i++) {
+ // Skip over Weave password/passphrase entries.
+ let metaInfo = logins[i].QueryInterface(Ci.nsILoginMetaInfo);
+ if (Utils.getSyncCredentialsHosts().has(metaInfo.hostname)) {
+ continue;
+ }
+
+ items[metaInfo.guid] = metaInfo;
+ }
+
+ return items;
+ },
+
+ changeItemID: function (oldID, newID) {
+ this._log.trace("Changing item ID: " + oldID + " to " + newID);
+
+ let oldLogin = this._getLoginFromGUID(oldID);
+ if (!oldLogin) {
+ this._log.trace("Can't change item ID: item doesn't exist");
+ return;
+ }
+ if (this._getLoginFromGUID(newID)) {
+ this._log.trace("Can't change item ID: new ID already in use");
+ return;
+ }
+
+ let prop = this._newPropertyBag();
+ prop.setPropertyAsAUTF8String("guid", newID);
+
+ Services.logins.modifyLogin(oldLogin, prop);
+ },
+
+ itemExists: function (id) {
+ return !!this._getLoginFromGUID(id);
+ },
+
+ createRecord: function (id, collection) {
+ let record = new LoginRec(collection, id);
+ let login = this._getLoginFromGUID(id);
+
+ if (!login) {
+ record.deleted = true;
+ return record;
+ }
+
+ record.hostname = login.hostname;
+ record.formSubmitURL = login.formSubmitURL;
+ record.httpRealm = login.httpRealm;
+ record.username = login.username;
+ record.password = login.password;
+ record.usernameField = login.usernameField;
+ record.passwordField = login.passwordField;
+
+ // Optional fields.
+ login.QueryInterface(Ci.nsILoginMetaInfo);
+ record.timeCreated = login.timeCreated;
+ record.timePasswordChanged = login.timePasswordChanged;
+
+ return record;
+ },
+
+ create: function (record) {
+ let login = this._nsLoginInfoFromRecord(record);
+ if (!login) {
+ return;
+ }
+
+ this._log.debug("Adding login for " + record.hostname);
+ this._log.trace("httpRealm: " + JSON.stringify(login.httpRealm) + "; " +
+ "formSubmitURL: " + JSON.stringify(login.formSubmitURL));
+ try {
+ Services.logins.addLogin(login);
+ } catch(ex) {
+ this._log.debug(`Adding record ${record.id} resulted in exception`, ex);
+ }
+ },
+
+ remove: function (record) {
+ this._log.trace("Removing login " + record.id);
+
+ let loginItem = this._getLoginFromGUID(record.id);
+ if (!loginItem) {
+ this._log.trace("Asked to remove record that doesn't exist, ignoring");
+ return;
+ }
+
+ Services.logins.removeLogin(loginItem);
+ },
+
+ update: function (record) {
+ let loginItem = this._getLoginFromGUID(record.id);
+ if (!loginItem) {
+ this._log.debug("Skipping update for unknown item: " + record.hostname);
+ return;
+ }
+
+ this._log.debug("Updating " + record.hostname);
+ let newinfo = this._nsLoginInfoFromRecord(record);
+ if (!newinfo) {
+ return;
+ }
+
+ try {
+ Services.logins.modifyLogin(loginItem, newinfo);
+ } catch(ex) {
+ this._log.debug(`Modifying record ${record.id} resulted in exception; not modifying`, ex);
+ }
+ },
+
+ wipe: function () {
+ Services.logins.removeAllLogins();
+ },
+};
+
+function PasswordTracker(name, engine) {
+ Tracker.call(this, name, engine);
+ Svc.Obs.add("weave:engine:start-tracking", this);
+ Svc.Obs.add("weave:engine:stop-tracking", this);
+}
+PasswordTracker.prototype = {
+ __proto__: Tracker.prototype,
+
+ startTracking: function () {
+ Svc.Obs.add("passwordmgr-storage-changed", this);
+ },
+
+ stopTracking: function () {
+ Svc.Obs.remove("passwordmgr-storage-changed", this);
+ },
+
+ observe: function (subject, topic, data) {
+ Tracker.prototype.observe.call(this, subject, topic, data);
+
+ if (this.ignoreAll) {
+ return;
+ }
+
+ // A single add, remove or change or removing all items
+ // will trigger a sync for MULTI_DEVICE.
+ switch (data) {
+ case "modifyLogin":
+ subject = subject.QueryInterface(Ci.nsIArray).queryElementAt(1, Ci.nsILoginMetaInfo);
+ // Fall through.
+ case "addLogin":
+ case "removeLogin":
+ // Skip over Weave password/passphrase changes.
+ subject.QueryInterface(Ci.nsILoginMetaInfo).QueryInterface(Ci.nsILoginInfo);
+ if (Utils.getSyncCredentialsHosts().has(subject.hostname)) {
+ break;
+ }
+
+ this.score += SCORE_INCREMENT_XLARGE;
+ this._log.trace(data + ": " + subject.guid);
+ this.addChangedID(subject.guid);
+ break;
+ case "removeAllLogins":
+ this._log.trace(data);
+ this.score += SCORE_INCREMENT_XLARGE;
+ break;
+ }
+ },
+};
+
+class PasswordValidator extends CollectionValidator {
+ constructor() {
+ super("passwords", "id", [
+ "hostname",
+ "formSubmitURL",
+ "httpRealm",
+ "password",
+ "passwordField",
+ "username",
+ "usernameField",
+ ]);
+ }
+
+ getClientItems() {
+ let logins = Services.logins.getAllLogins({});
+ let syncHosts = Utils.getSyncCredentialsHosts()
+ let result = logins.map(l => l.QueryInterface(Ci.nsILoginMetaInfo))
+ .filter(l => !syncHosts.has(l.hostname));
+ return Promise.resolve(result);
+ }
+
+ normalizeClientItem(item) {
+ return {
+ id: item.guid,
+ guid: item.guid,
+ hostname: item.hostname,
+ formSubmitURL: item.formSubmitURL,
+ httpRealm: item.httpRealm,
+ password: item.password,
+ passwordField: item.passwordField,
+ username: item.username,
+ usernameField: item.usernameField,
+ original: item,
+ }
+ }
+
+ normalizeServerItem(item) {
+ return Object.assign({ guid: item.id }, item);
+ }
+}
+
+
diff --git a/services/sync/modules/engines/prefs.js b/services/sync/modules/engines/prefs.js
new file mode 100644
index 000000000..9ceeb9ac6
--- /dev/null
+++ b/services/sync/modules/engines/prefs.js
@@ -0,0 +1,273 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+this.EXPORTED_SYMBOLS = ['PrefsEngine', 'PrefRec'];
+
+var Cc = Components.classes;
+var Ci = Components.interfaces;
+var Cu = Components.utils;
+
+const PREF_SYNC_PREFS_PREFIX = "services.sync.prefs.sync.";
+
+Cu.import("resource://services-sync/engines.js");
+Cu.import("resource://services-sync/record.js");
+Cu.import("resource://services-sync/util.js");
+Cu.import("resource://services-sync/constants.js");
+Cu.import("resource://services-common/utils.js");
+Cu.import("resource://gre/modules/LightweightThemeManager.jsm");
+Cu.import("resource://gre/modules/Preferences.jsm");
+
+const PREFS_GUID = CommonUtils.encodeBase64URL(Services.appinfo.ID);
+
+this.PrefRec = function PrefRec(collection, id) {
+ CryptoWrapper.call(this, collection, id);
+}
+PrefRec.prototype = {
+ __proto__: CryptoWrapper.prototype,
+ _logName: "Sync.Record.Pref",
+};
+
+Utils.deferGetSet(PrefRec, "cleartext", ["value"]);
+
+
+this.PrefsEngine = function PrefsEngine(service) {
+ SyncEngine.call(this, "Prefs", service);
+}
+PrefsEngine.prototype = {
+ __proto__: SyncEngine.prototype,
+ _storeObj: PrefStore,
+ _trackerObj: PrefTracker,
+ _recordObj: PrefRec,
+ version: 2,
+
+ syncPriority: 1,
+ allowSkippedRecord: false,
+
+ getChangedIDs: function () {
+ // No need for a proper timestamp (no conflict resolution needed).
+ let changedIDs = {};
+ if (this._tracker.modified)
+ changedIDs[PREFS_GUID] = 0;
+ return changedIDs;
+ },
+
+ _wipeClient: function () {
+ SyncEngine.prototype._wipeClient.call(this);
+ this.justWiped = true;
+ },
+
+ _reconcile: function (item) {
+ // Apply the incoming item if we don't care about the local data
+ if (this.justWiped) {
+ this.justWiped = false;
+ return true;
+ }
+ return SyncEngine.prototype._reconcile.call(this, item);
+ }
+};
+
+
+function PrefStore(name, engine) {
+ Store.call(this, name, engine);
+ Svc.Obs.add("profile-before-change", function () {
+ this.__prefs = null;
+ }, this);
+}
+PrefStore.prototype = {
+ __proto__: Store.prototype,
+
+ __prefs: null,
+ get _prefs() {
+ if (!this.__prefs) {
+ this.__prefs = new Preferences();
+ }
+ return this.__prefs;
+ },
+
+ _getSyncPrefs: function () {
+ let syncPrefs = Cc["@mozilla.org/preferences-service;1"]
+ .getService(Ci.nsIPrefService)
+ .getBranch(PREF_SYNC_PREFS_PREFIX)
+ .getChildList("", {});
+ // Also sync preferences that determine which prefs get synced.
+ let controlPrefs = syncPrefs.map(pref => PREF_SYNC_PREFS_PREFIX + pref);
+ return controlPrefs.concat(syncPrefs);
+ },
+
+ _isSynced: function (pref) {
+ return pref.startsWith(PREF_SYNC_PREFS_PREFIX) ||
+ this._prefs.get(PREF_SYNC_PREFS_PREFIX + pref, false);
+ },
+
+ _getAllPrefs: function () {
+ let values = {};
+ for (let pref of this._getSyncPrefs()) {
+ if (this._isSynced(pref)) {
+ // Missing and default prefs get the null value.
+ values[pref] = this._prefs.isSet(pref) ? this._prefs.get(pref, null) : null;
+ }
+ }
+ return values;
+ },
+
+ _updateLightWeightTheme (themeID) {
+ let themeObject = null;
+ if (themeID) {
+ themeObject = LightweightThemeManager.getUsedTheme(themeID);
+ }
+ LightweightThemeManager.currentTheme = themeObject;
+ },
+
+ _setAllPrefs: function (values) {
+ let selectedThemeIDPref = "lightweightThemes.selectedThemeID";
+ let selectedThemeIDBefore = this._prefs.get(selectedThemeIDPref, null);
+ let selectedThemeIDAfter = selectedThemeIDBefore;
+
+ // Update 'services.sync.prefs.sync.foo.pref' before 'foo.pref', otherwise
+ // _isSynced returns false when 'foo.pref' doesn't exist (e.g., on a new device).
+ let prefs = Object.keys(values).sort(a => -a.indexOf(PREF_SYNC_PREFS_PREFIX));
+ for (let pref of prefs) {
+ if (!this._isSynced(pref)) {
+ continue;
+ }
+
+ let value = values[pref];
+
+ switch (pref) {
+ // Some special prefs we don't want to set directly.
+ case selectedThemeIDPref:
+ selectedThemeIDAfter = value;
+ break;
+
+ // default is to just set the pref
+ default:
+ if (value == null) {
+ // Pref has gone missing. The best we can do is reset it.
+ this._prefs.reset(pref);
+ } else {
+ try {
+ this._prefs.set(pref, value);
+ } catch(ex) {
+ this._log.trace("Failed to set pref: " + pref + ": " + ex);
+ }
+ }
+ }
+ }
+
+ // Notify the lightweight theme manager if the selected theme has changed.
+ if (selectedThemeIDBefore != selectedThemeIDAfter) {
+ this._updateLightWeightTheme(selectedThemeIDAfter);
+ }
+ },
+
+ getAllIDs: function () {
+ /* We store all prefs in just one WBO, with just one GUID */
+ let allprefs = {};
+ allprefs[PREFS_GUID] = true;
+ return allprefs;
+ },
+
+ changeItemID: function (oldID, newID) {
+ this._log.trace("PrefStore GUID is constant!");
+ },
+
+ itemExists: function (id) {
+ return (id === PREFS_GUID);
+ },
+
+ createRecord: function (id, collection) {
+ let record = new PrefRec(collection, id);
+
+ if (id == PREFS_GUID) {
+ record.value = this._getAllPrefs();
+ } else {
+ record.deleted = true;
+ }
+
+ return record;
+ },
+
+ create: function (record) {
+ this._log.trace("Ignoring create request");
+ },
+
+ remove: function (record) {
+ this._log.trace("Ignoring remove request");
+ },
+
+ update: function (record) {
+ // Silently ignore pref updates that are for other apps.
+ if (record.id != PREFS_GUID)
+ return;
+
+ this._log.trace("Received pref updates, applying...");
+ this._setAllPrefs(record.value);
+ },
+
+ wipe: function () {
+ this._log.trace("Ignoring wipe request");
+ }
+};
+
+function PrefTracker(name, engine) {
+ Tracker.call(this, name, engine);
+ Svc.Obs.add("profile-before-change", this);
+ Svc.Obs.add("weave:engine:start-tracking", this);
+ Svc.Obs.add("weave:engine:stop-tracking", this);
+}
+PrefTracker.prototype = {
+ __proto__: Tracker.prototype,
+
+ get modified() {
+ return Svc.Prefs.get("engine.prefs.modified", false);
+ },
+ set modified(value) {
+ Svc.Prefs.set("engine.prefs.modified", value);
+ },
+
+ loadChangedIDs: function loadChangedIDs() {
+ // Don't read changed IDs from disk at start up.
+ },
+
+ clearChangedIDs: function clearChangedIDs() {
+ this.modified = false;
+ },
+
+ __prefs: null,
+ get _prefs() {
+ if (!this.__prefs) {
+ this.__prefs = new Preferences();
+ }
+ return this.__prefs;
+ },
+
+ startTracking: function () {
+ Services.prefs.addObserver("", this, false);
+ },
+
+ stopTracking: function () {
+ this.__prefs = null;
+ Services.prefs.removeObserver("", this);
+ },
+
+ observe: function (subject, topic, data) {
+ Tracker.prototype.observe.call(this, subject, topic, data);
+
+ switch (topic) {
+ case "profile-before-change":
+ this.stopTracking();
+ break;
+ case "nsPref:changed":
+ // Trigger a sync for MULTI-DEVICE for a change that determines
+ // which prefs are synced or a regular pref change.
+ if (data.indexOf(PREF_SYNC_PREFS_PREFIX) == 0 ||
+ this._prefs.get(PREF_SYNC_PREFS_PREFIX + data, false)) {
+ this.score += SCORE_INCREMENT_XLARGE;
+ this.modified = true;
+ this._log.trace("Preference " + data + " changed");
+ }
+ break;
+ }
+ }
+};
diff --git a/services/sync/modules/engines/tabs.js b/services/sync/modules/engines/tabs.js
new file mode 100644
index 000000000..45ece4a23
--- /dev/null
+++ b/services/sync/modules/engines/tabs.js
@@ -0,0 +1,393 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+this.EXPORTED_SYMBOLS = ["TabEngine", "TabSetRecord"];
+
+var {classes: Cc, interfaces: Ci, utils: Cu} = Components;
+
+const TABS_TTL = 604800; // 7 days.
+const TAB_ENTRIES_LIMIT = 25; // How many URLs to include in tab history.
+
+Cu.import("resource://gre/modules/Preferences.jsm");
+Cu.import("resource://gre/modules/XPCOMUtils.jsm");
+Cu.import("resource://services-sync/engines.js");
+Cu.import("resource://services-sync/engines/clients.js");
+Cu.import("resource://services-sync/record.js");
+Cu.import("resource://services-sync/util.js");
+Cu.import("resource://services-sync/constants.js");
+
+XPCOMUtils.defineLazyModuleGetter(this, "PrivateBrowsingUtils",
+ "resource://gre/modules/PrivateBrowsingUtils.jsm");
+
+this.TabSetRecord = function TabSetRecord(collection, id) {
+ CryptoWrapper.call(this, collection, id);
+}
+TabSetRecord.prototype = {
+ __proto__: CryptoWrapper.prototype,
+ _logName: "Sync.Record.Tabs",
+ ttl: TABS_TTL,
+};
+
+Utils.deferGetSet(TabSetRecord, "cleartext", ["clientName", "tabs"]);
+
+
+this.TabEngine = function TabEngine(service) {
+ SyncEngine.call(this, "Tabs", service);
+
+ // Reset the client on every startup so that we fetch recent tabs.
+ this._resetClient();
+}
+TabEngine.prototype = {
+ __proto__: SyncEngine.prototype,
+ _storeObj: TabStore,
+ _trackerObj: TabTracker,
+ _recordObj: TabSetRecord,
+ // A flag to indicate if we have synced in this session. This is to help
+ // consumers of remote tabs that may want to differentiate between "I've an
+ // empty tab list as I haven't yet synced" vs "I've an empty tab list
+ // as there really are no tabs"
+ hasSyncedThisSession: false,
+
+ syncPriority: 3,
+
+ getChangedIDs: function () {
+ // No need for a proper timestamp (no conflict resolution needed).
+ let changedIDs = {};
+ if (this._tracker.modified)
+ changedIDs[this.service.clientsEngine.localID] = 0;
+ return changedIDs;
+ },
+
+ // API for use by Sync UI code to give user choices of tabs to open.
+ getAllClients: function () {
+ return this._store._remoteClients;
+ },
+
+ getClientById: function (id) {
+ return this._store._remoteClients[id];
+ },
+
+ _resetClient: function () {
+ SyncEngine.prototype._resetClient.call(this);
+ this._store.wipe();
+ this._tracker.modified = true;
+ this.hasSyncedThisSession = false;
+ },
+
+ removeClientData: function () {
+ let url = this.engineURL + "/" + this.service.clientsEngine.localID;
+ this.service.resource(url).delete();
+ },
+
+ /**
+ * Return a Set of open URLs.
+ */
+ getOpenURLs: function () {
+ let urls = new Set();
+ for (let entry of this._store.getAllTabs()) {
+ urls.add(entry.urlHistory[0]);
+ }
+ return urls;
+ },
+
+ _reconcile: function (item) {
+ // Skip our own record.
+ // TabStore.itemExists tests only against our local client ID.
+ if (this._store.itemExists(item.id)) {
+ this._log.trace("Ignoring incoming tab item because of its id: " + item.id);
+ return false;
+ }
+
+ return SyncEngine.prototype._reconcile.call(this, item);
+ },
+
+ _syncFinish() {
+ this.hasSyncedThisSession = true;
+ return SyncEngine.prototype._syncFinish.call(this);
+ },
+};
+
+
+function TabStore(name, engine) {
+ Store.call(this, name, engine);
+}
+TabStore.prototype = {
+ __proto__: Store.prototype,
+
+ itemExists: function (id) {
+ return id == this.engine.service.clientsEngine.localID;
+ },
+
+ getWindowEnumerator: function () {
+ return Services.wm.getEnumerator("navigator:browser");
+ },
+
+ shouldSkipWindow: function (win) {
+ return win.closed ||
+ PrivateBrowsingUtils.isWindowPrivate(win);
+ },
+
+ getTabState: function (tab) {
+ return JSON.parse(Svc.Session.getTabState(tab));
+ },
+
+ getAllTabs: function (filter) {
+ let filteredUrls = new RegExp(Svc.Prefs.get("engine.tabs.filteredUrls"), "i");
+
+ let allTabs = [];
+
+ let winEnum = this.getWindowEnumerator();
+ while (winEnum.hasMoreElements()) {
+ let win = winEnum.getNext();
+ if (this.shouldSkipWindow(win)) {
+ continue;
+ }
+
+ for (let tab of win.gBrowser.tabs) {
+ let tabState = this.getTabState(tab);
+
+ // Make sure there are history entries to look at.
+ if (!tabState || !tabState.entries.length) {
+ continue;
+ }
+
+ let acceptable = !filter ? (url) => url :
+ (url) => url && !filteredUrls.test(url);
+
+ let entries = tabState.entries;
+ let index = tabState.index;
+ let current = entries[index - 1];
+
+ // We ignore the tab completely if the current entry url is
+ // not acceptable (we need something accurate to open).
+ if (!acceptable(current.url)) {
+ continue;
+ }
+
+ if (current.url.length >= (MAX_UPLOAD_BYTES - 1000)) {
+ this._log.trace("Skipping over-long URL.");
+ continue;
+ }
+
+ // The element at `index` is the current page. Previous URLs were
+ // previously visited URLs; subsequent URLs are in the 'forward' stack,
+ // which we can't represent in Sync, so we truncate here.
+ let candidates = (entries.length == index) ?
+ entries :
+ entries.slice(0, index);
+
+ let urls = candidates.map((entry) => entry.url)
+ .filter(acceptable)
+ .reverse(); // Because Sync puts current at index 0, and history after.
+
+ // Truncate if necessary.
+ if (urls.length > TAB_ENTRIES_LIMIT) {
+ urls.length = TAB_ENTRIES_LIMIT;
+ }
+
+ allTabs.push({
+ title: current.title || "",
+ urlHistory: urls,
+ icon: tabState.image ||
+ (tabState.attributes && tabState.attributes.image) ||
+ "",
+ lastUsed: Math.floor((tabState.lastAccessed || 0) / 1000),
+ });
+ }
+ }
+
+ return allTabs;
+ },
+
+ createRecord: function (id, collection) {
+ let record = new TabSetRecord(collection, id);
+ record.clientName = this.engine.service.clientsEngine.localName;
+
+ // Sort tabs in descending-used order to grab the most recently used
+ let tabs = this.getAllTabs(true).sort(function (a, b) {
+ return b.lastUsed - a.lastUsed;
+ });
+
+ // Figure out how many tabs we can pack into a payload. Starting with a 28KB
+ // payload, we can estimate various overheads from encryption/JSON/WBO.
+ let size = JSON.stringify(tabs).length;
+ let origLength = tabs.length;
+ const MAX_TAB_SIZE = 20000;
+ if (size > MAX_TAB_SIZE) {
+ // Estimate a little more than the direct fraction to maximize packing
+ let cutoff = Math.ceil(tabs.length * MAX_TAB_SIZE / size);
+ tabs = tabs.slice(0, cutoff + 1);
+
+ // Keep dropping off the last entry until the data fits
+ while (JSON.stringify(tabs).length > MAX_TAB_SIZE)
+ tabs.pop();
+ }
+
+ this._log.trace("Created tabs " + tabs.length + " of " + origLength);
+ tabs.forEach(function (tab) {
+ this._log.trace("Wrapping tab: " + JSON.stringify(tab));
+ }, this);
+
+ record.tabs = tabs;
+ return record;
+ },
+
+ getAllIDs: function () {
+ // Don't report any tabs if all windows are in private browsing for
+ // first syncs.
+ let ids = {};
+ let allWindowsArePrivate = false;
+ let wins = Services.wm.getEnumerator("navigator:browser");
+ while (wins.hasMoreElements()) {
+ if (PrivateBrowsingUtils.isWindowPrivate(wins.getNext())) {
+ // Ensure that at least there is a private window.
+ allWindowsArePrivate = true;
+ } else {
+ // If there is a not private windown then finish and continue.
+ allWindowsArePrivate = false;
+ break;
+ }
+ }
+
+ if (allWindowsArePrivate &&
+ !PrivateBrowsingUtils.permanentPrivateBrowsing) {
+ return ids;
+ }
+
+ ids[this.engine.service.clientsEngine.localID] = true;
+ return ids;
+ },
+
+ wipe: function () {
+ this._remoteClients = {};
+ },
+
+ create: function (record) {
+ this._log.debug("Adding remote tabs from " + record.clientName);
+ this._remoteClients[record.id] = Object.assign({}, record.cleartext, {
+ lastModified: record.modified
+ });
+ },
+
+ update: function (record) {
+ this._log.trace("Ignoring tab updates as local ones win");
+ },
+};
+
+
+function TabTracker(name, engine) {
+ Tracker.call(this, name, engine);
+ Svc.Obs.add("weave:engine:start-tracking", this);
+ Svc.Obs.add("weave:engine:stop-tracking", this);
+
+ // Make sure "this" pointer is always set correctly for event listeners.
+ this.onTab = Utils.bind2(this, this.onTab);
+ this._unregisterListeners = Utils.bind2(this, this._unregisterListeners);
+}
+TabTracker.prototype = {
+ __proto__: Tracker.prototype,
+
+ QueryInterface: XPCOMUtils.generateQI([Ci.nsIObserver]),
+
+ loadChangedIDs: function () {
+ // Don't read changed IDs from disk at start up.
+ },
+
+ clearChangedIDs: function () {
+ this.modified = false;
+ },
+
+ _topics: ["pageshow", "TabOpen", "TabClose", "TabSelect"],
+
+ _registerListenersForWindow: function (window) {
+ this._log.trace("Registering tab listeners in window");
+ for (let topic of this._topics) {
+ window.addEventListener(topic, this.onTab, false);
+ }
+ window.addEventListener("unload", this._unregisterListeners, false);
+ // If it's got a tab browser we can listen for things like navigation.
+ if (window.gBrowser) {
+ window.gBrowser.addProgressListener(this);
+ }
+ },
+
+ _unregisterListeners: function (event) {
+ this._unregisterListenersForWindow(event.target);
+ },
+
+ _unregisterListenersForWindow: function (window) {
+ this._log.trace("Removing tab listeners in window");
+ window.removeEventListener("unload", this._unregisterListeners, false);
+ for (let topic of this._topics) {
+ window.removeEventListener(topic, this.onTab, false);
+ }
+ if (window.gBrowser) {
+ window.gBrowser.removeProgressListener(this);
+ }
+ },
+
+ startTracking: function () {
+ Svc.Obs.add("domwindowopened", this);
+ let wins = Services.wm.getEnumerator("navigator:browser");
+ while (wins.hasMoreElements()) {
+ this._registerListenersForWindow(wins.getNext());
+ }
+ },
+
+ stopTracking: function () {
+ Svc.Obs.remove("domwindowopened", this);
+ let wins = Services.wm.getEnumerator("navigator:browser");
+ while (wins.hasMoreElements()) {
+ this._unregisterListenersForWindow(wins.getNext());
+ }
+ },
+
+ observe: function (subject, topic, data) {
+ Tracker.prototype.observe.call(this, subject, topic, data);
+
+ switch (topic) {
+ case "domwindowopened":
+ let onLoad = () => {
+ subject.removeEventListener("load", onLoad, false);
+ // Only register after the window is done loading to avoid unloads.
+ this._registerListenersForWindow(subject);
+ };
+
+ // Add tab listeners now that a window has opened.
+ subject.addEventListener("load", onLoad, false);
+ break;
+ }
+ },
+
+ onTab: function (event) {
+ if (event.originalTarget.linkedBrowser) {
+ let browser = event.originalTarget.linkedBrowser;
+ if (PrivateBrowsingUtils.isBrowserPrivate(browser) &&
+ !PrivateBrowsingUtils.permanentPrivateBrowsing) {
+ this._log.trace("Ignoring tab event from private browsing.");
+ return;
+ }
+ }
+
+ this._log.trace("onTab event: " + event.type);
+ this.modified = true;
+
+ // For page shows, bump the score 10% of the time, emulating a partial
+ // score. We don't want to sync too frequently. For all other page
+ // events, always bump the score.
+ if (event.type != "pageshow" || Math.random() < .1) {
+ this.score += SCORE_INCREMENT_SMALL;
+ }
+ },
+
+ // web progress listeners.
+ onLocationChange: function (webProgress, request, location, flags) {
+ // We only care about top-level location changes which are not in the same
+ // document.
+ if (webProgress.isTopLevel &&
+ ((flags & Ci.nsIWebProgressListener.LOCATION_CHANGE_SAME_DOCUMENT) == 0)) {
+ this.modified = true;
+ }
+ },
+};
diff --git a/services/sync/modules/identity.js b/services/sync/modules/identity.js
new file mode 100644
index 000000000..b4da8c0bb
--- /dev/null
+++ b/services/sync/modules/identity.js
@@ -0,0 +1,605 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+"use strict";
+
+this.EXPORTED_SYMBOLS = ["IdentityManager"];
+
+var {classes: Cc, interfaces: Ci, utils: Cu, results: Cr} = Components;
+
+Cu.import("resource://gre/modules/XPCOMUtils.jsm");
+Cu.import("resource://gre/modules/Promise.jsm");
+Cu.import("resource://services-sync/constants.js");
+Cu.import("resource://gre/modules/Log.jsm");
+Cu.import("resource://services-sync/util.js");
+Cu.import("resource://services-common/async.js");
+
+// Lazy import to prevent unnecessary load on startup.
+for (let symbol of ["BulkKeyBundle", "SyncKeyBundle"]) {
+ XPCOMUtils.defineLazyModuleGetter(this, symbol,
+ "resource://services-sync/keys.js",
+ symbol);
+}
+
+/**
+ * Manages "legacy" identity and authentication for Sync.
+ * See browserid_identity for the Firefox Accounts based identity manager.
+ *
+ * The following entities are managed:
+ *
+ * account - The main Sync/services account. This is typically an email
+ * address.
+ * username - A normalized version of your account. This is what's
+ * transmitted to the server.
+ * basic password - UTF-8 password used for authenticating when using HTTP
+ * basic authentication.
+ * sync key - The main encryption key used by Sync.
+ * sync key bundle - A representation of your sync key.
+ *
+ * When changes are made to entities that are stored in the password manager
+ * (basic password, sync key), those changes are merely staged. To commit them
+ * to the password manager, you'll need to call persistCredentials().
+ *
+ * This type also manages authenticating Sync's network requests. Sync's
+ * network code calls into getRESTRequestAuthenticator and
+ * getResourceAuthenticator (depending on the network layer being used). Each
+ * returns a function which can be used to add authentication information to an
+ * outgoing request.
+ *
+ * In theory, this type supports arbitrary identity and authentication
+ * mechanisms. You can add support for them by monkeypatching the global
+ * instance of this type. Specifically, you'll need to redefine the
+ * aforementioned network code functions to do whatever your authentication
+ * mechanism needs them to do. In addition, you may wish to install custom
+ * functions to support your API. Although, that is certainly not required.
+ * If you do monkeypatch, please be advised that Sync expects the core
+ * attributes to have values. You will need to carry at least account and
+ * username forward. If you do not wish to support one of the built-in
+ * authentication mechanisms, you'll probably want to redefine currentAuthState
+ * and any other function that involves the built-in functionality.
+ */
+this.IdentityManager = function IdentityManager() {
+ this._log = Log.repository.getLogger("Sync.Identity");
+ this._log.Level = Log.Level[Svc.Prefs.get("log.logger.identity")];
+
+ this._basicPassword = null;
+ this._basicPasswordAllowLookup = true;
+ this._basicPasswordUpdated = false;
+ this._syncKey = null;
+ this._syncKeyAllowLookup = true;
+ this._syncKeySet = false;
+ this._syncKeyBundle = null;
+}
+IdentityManager.prototype = {
+ _log: null,
+
+ _basicPassword: null,
+ _basicPasswordAllowLookup: true,
+ _basicPasswordUpdated: false,
+
+ _syncKey: null,
+ _syncKeyAllowLookup: true,
+ _syncKeySet: false,
+
+ _syncKeyBundle: null,
+
+ /**
+ * Initialize the identity provider.
+ */
+ initialize: function() {
+ // Nothing to do for this identity provider.
+ },
+
+ finalize: function() {
+ // Nothing to do for this identity provider.
+ },
+
+ /**
+ * Called whenever Service.logout() is called.
+ */
+ logout: function() {
+ // nothing to do for this identity provider.
+ },
+
+ /**
+ * Ensure the user is logged in. Returns a promise that resolves when
+ * the user is logged in, or is rejected if the login attempt has failed.
+ */
+ ensureLoggedIn: function() {
+ // nothing to do for this identity provider
+ return Promise.resolve();
+ },
+
+ get account() {
+ return Svc.Prefs.get("account", this.username);
+ },
+
+ /**
+ * Sets the active account name.
+ *
+ * This should almost always be called in favor of setting username, as
+ * username is derived from account.
+ *
+ * Changing the account name has the side-effect of wiping out stored
+ * credentials. Keep in mind that persistCredentials() will need to be called
+ * to flush the changes to disk.
+ *
+ * Set this value to null to clear out identity information.
+ */
+ set account(value) {
+ if (value) {
+ value = value.toLowerCase();
+ Svc.Prefs.set("account", value);
+ } else {
+ Svc.Prefs.reset("account");
+ }
+
+ this.username = this.usernameFromAccount(value);
+ },
+
+ get username() {
+ return Svc.Prefs.get("username", null);
+ },
+
+ /**
+ * Set the username value.
+ *
+ * Changing the username has the side-effect of wiping credentials.
+ */
+ set username(value) {
+ if (value) {
+ value = value.toLowerCase();
+
+ if (value == this.username) {
+ return;
+ }
+
+ Svc.Prefs.set("username", value);
+ } else {
+ Svc.Prefs.reset("username");
+ }
+
+ // If we change the username, we interpret this as a major change event
+ // and wipe out the credentials.
+ this._log.info("Username changed. Removing stored credentials.");
+ this.resetCredentials();
+ },
+
+ /**
+ * Resets/Drops all credentials we hold for the current user.
+ */
+ resetCredentials: function() {
+ this.basicPassword = null;
+ this.resetSyncKey();
+ },
+
+ /**
+ * Resets/Drops the sync key we hold for the current user.
+ */
+ resetSyncKey: function() {
+ this.syncKey = null;
+ // syncKeyBundle cleared as a result of setting syncKey.
+ },
+
+ /**
+ * Obtains the HTTP Basic auth password.
+ *
+ * Returns a string if set or null if it is not set.
+ */
+ get basicPassword() {
+ if (this._basicPasswordAllowLookup) {
+ // We need a username to find the credentials.
+ let username = this.username;
+ if (!username) {
+ return null;
+ }
+
+ for (let login of this._getLogins(PWDMGR_PASSWORD_REALM)) {
+ if (login.username.toLowerCase() == username) {
+ // It should already be UTF-8 encoded, but we don't take any chances.
+ this._basicPassword = Utils.encodeUTF8(login.password);
+ }
+ }
+
+ this._basicPasswordAllowLookup = false;
+ }
+
+ return this._basicPassword;
+ },
+
+ /**
+ * Set the HTTP basic password to use.
+ *
+ * Changes will not persist unless persistSyncCredentials() is called.
+ */
+ set basicPassword(value) {
+ // Wiping out value.
+ if (!value) {
+ this._log.info("Basic password has no value. Removing.");
+ this._basicPassword = null;
+ this._basicPasswordUpdated = true;
+ this._basicPasswordAllowLookup = false;
+ return;
+ }
+
+ let username = this.username;
+ if (!username) {
+ throw new Error("basicPassword cannot be set before username.");
+ }
+
+ this._log.info("Basic password being updated.");
+ this._basicPassword = Utils.encodeUTF8(value);
+ this._basicPasswordUpdated = true;
+ },
+
+ /**
+ * Obtain the Sync Key.
+ *
+ * This returns a 26 character "friendly" Base32 encoded string on success or
+ * null if no Sync Key could be found.
+ *
+ * If the Sync Key hasn't been set in this session, this will look in the
+ * password manager for the sync key.
+ */
+ get syncKey() {
+ if (this._syncKeyAllowLookup) {
+ let username = this.username;
+ if (!username) {
+ return null;
+ }
+
+ for (let login of this._getLogins(PWDMGR_PASSPHRASE_REALM)) {
+ if (login.username.toLowerCase() == username) {
+ this._syncKey = login.password;
+ }
+ }
+
+ this._syncKeyAllowLookup = false;
+ }
+
+ return this._syncKey;
+ },
+
+ /**
+ * Set the active Sync Key.
+ *
+ * If being set to null, the Sync Key and its derived SyncKeyBundle are
+ * removed. However, the Sync Key won't be deleted from the password manager
+ * until persistSyncCredentials() is called.
+ *
+ * If a value is provided, it should be a 26 or 32 character "friendly"
+ * Base32 string for which Utils.isPassphrase() returns true.
+ *
+ * A side-effect of setting the Sync Key is that a SyncKeyBundle is
+ * generated. For historical reasons, this will silently error out if the
+ * value is not a proper Sync Key (!Utils.isPassphrase()). This should be
+ * fixed in the future (once service.js is more sane) to throw if the passed
+ * value is not valid.
+ */
+ set syncKey(value) {
+ if (!value) {
+ this._log.info("Sync Key has no value. Deleting.");
+ this._syncKey = null;
+ this._syncKeyBundle = null;
+ this._syncKeyUpdated = true;
+ return;
+ }
+
+ if (!this.username) {
+ throw new Error("syncKey cannot be set before username.");
+ }
+
+ this._log.info("Sync Key being updated.");
+ this._syncKey = value;
+
+ // Clear any cached Sync Key Bundle and regenerate it.
+ this._syncKeyBundle = null;
+ let bundle = this.syncKeyBundle;
+
+ this._syncKeyUpdated = true;
+ },
+
+ /**
+ * Obtain the active SyncKeyBundle.
+ *
+ * This returns a SyncKeyBundle representing a key pair derived from the
+ * Sync Key on success. If no Sync Key is present or if the Sync Key is not
+ * valid, this returns null.
+ *
+ * The SyncKeyBundle should be treated as immutable.
+ */
+ get syncKeyBundle() {
+ // We can't obtain a bundle without a username set.
+ if (!this.username) {
+ this._log.warn("Attempted to obtain Sync Key Bundle with no username set!");
+ return null;
+ }
+
+ if (!this.syncKey) {
+ this._log.warn("Attempted to obtain Sync Key Bundle with no Sync Key " +
+ "set!");
+ return null;
+ }
+
+ if (!this._syncKeyBundle) {
+ try {
+ this._syncKeyBundle = new SyncKeyBundle(this.username, this.syncKey);
+ } catch (ex) {
+ this._log.warn("Failed to create sync bundle", ex);
+ return null;
+ }
+ }
+
+ return this._syncKeyBundle;
+ },
+
+ /**
+ * The current state of the auth credentials.
+ *
+ * This essentially validates that enough credentials are available to use
+ * Sync.
+ */
+ get currentAuthState() {
+ if (!this.username) {
+ return LOGIN_FAILED_NO_USERNAME;
+ }
+
+ if (Utils.mpLocked()) {
+ return STATUS_OK;
+ }
+
+ if (!this.basicPassword) {
+ return LOGIN_FAILED_NO_PASSWORD;
+ }
+
+ if (!this.syncKey) {
+ return LOGIN_FAILED_NO_PASSPHRASE;
+ }
+
+ // If we have a Sync Key but no bundle, bundle creation failed, which
+ // implies a bad Sync Key.
+ if (!this.syncKeyBundle) {
+ return LOGIN_FAILED_INVALID_PASSPHRASE;
+ }
+
+ return STATUS_OK;
+ },
+
+ /**
+ * Verify the current auth state, unlocking the master-password if necessary.
+ *
+ * Returns a promise that resolves with the current auth state after
+ * attempting to unlock.
+ */
+ unlockAndVerifyAuthState: function() {
+ // Try to fetch the passphrase - this will prompt for MP unlock as a
+ // side-effect...
+ try {
+ this.syncKey;
+ } catch (ex) {
+ this._log.debug("Fetching passphrase threw " + ex +
+ "; assuming master password locked.");
+ return Promise.resolve(MASTER_PASSWORD_LOCKED);
+ }
+ return Promise.resolve(STATUS_OK);
+ },
+
+ /**
+ * Persist credentials to password store.
+ *
+ * When credentials are updated, they are changed in memory only. This will
+ * need to be called to save them to the underlying password store.
+ *
+ * If the password store is locked (e.g. if the master password hasn't been
+ * entered), this could throw an exception.
+ */
+ persistCredentials: function persistCredentials(force) {
+ if (this._basicPasswordUpdated || force) {
+ if (this._basicPassword) {
+ this._setLogin(PWDMGR_PASSWORD_REALM, this.username,
+ this._basicPassword);
+ } else {
+ for (let login of this._getLogins(PWDMGR_PASSWORD_REALM)) {
+ Services.logins.removeLogin(login);
+ }
+ }
+
+ this._basicPasswordUpdated = false;
+ }
+
+ if (this._syncKeyUpdated || force) {
+ if (this._syncKey) {
+ this._setLogin(PWDMGR_PASSPHRASE_REALM, this.username, this._syncKey);
+ } else {
+ for (let login of this._getLogins(PWDMGR_PASSPHRASE_REALM)) {
+ Services.logins.removeLogin(login);
+ }
+ }
+
+ this._syncKeyUpdated = false;
+ }
+
+ },
+
+ /**
+ * Deletes the Sync Key from the system.
+ */
+ deleteSyncKey: function deleteSyncKey() {
+ this.syncKey = null;
+ this.persistCredentials();
+ },
+
+ hasBasicCredentials: function hasBasicCredentials() {
+ // Because JavaScript.
+ return this.username && this.basicPassword && true;
+ },
+
+ /**
+ * Pre-fetches any information that might help with migration away from this
+ * identity. Called after every sync and is really just an optimization that
+ * allows us to avoid a network request for when we actually need the
+ * migration info.
+ */
+ prefetchMigrationSentinel: function(service) {
+ // Try and fetch the migration sentinel - it will end up in the recordManager
+ // cache.
+ try {
+ service.recordManager.get(service.storageURL + "meta/fxa_credentials");
+ } catch (ex) {
+ if (Async.isShutdownException(ex)) {
+ throw ex;
+ }
+ this._log.warn("Failed to pre-fetch the migration sentinel", ex);
+ }
+ },
+
+ /**
+ * Obtains the array of basic logins from nsiPasswordManager.
+ */
+ _getLogins: function _getLogins(realm) {
+ return Services.logins.findLogins({}, PWDMGR_HOST, null, realm);
+ },
+
+ /**
+ * Set a login in the password manager.
+ *
+ * This has the side-effect of deleting any other logins for the specified
+ * realm.
+ */
+ _setLogin: function _setLogin(realm, username, password) {
+ let exists = false;
+ for (let login of this._getLogins(realm)) {
+ if (login.username == username && login.password == password) {
+ exists = true;
+ } else {
+ this._log.debug("Pruning old login for " + username + " from " + realm);
+ Services.logins.removeLogin(login);
+ }
+ }
+
+ if (exists) {
+ return;
+ }
+
+ this._log.debug("Updating saved password for " + username + " in " +
+ realm);
+
+ let loginInfo = new Components.Constructor(
+ "@mozilla.org/login-manager/loginInfo;1", Ci.nsILoginInfo, "init");
+ let login = new loginInfo(PWDMGR_HOST, null, realm, username,
+ password, "", "");
+ Services.logins.addLogin(login);
+ },
+
+ /**
+ * Return credentials hosts for this identity only.
+ */
+ _getSyncCredentialsHosts: function() {
+ return Utils.getSyncCredentialsHostsLegacy();
+ },
+
+ /**
+ * Deletes Sync credentials from the password manager.
+ */
+ deleteSyncCredentials: function deleteSyncCredentials() {
+ for (let host of this._getSyncCredentialsHosts()) {
+ let logins = Services.logins.findLogins({}, host, "", "");
+ for (let login of logins) {
+ Services.logins.removeLogin(login);
+ }
+ }
+
+ // Wait until after store is updated in case it fails.
+ this._basicPassword = null;
+ this._basicPasswordAllowLookup = true;
+ this._basicPasswordUpdated = false;
+
+ this._syncKey = null;
+ // this._syncKeyBundle is nullified as part of _syncKey setter.
+ this._syncKeyAllowLookup = true;
+ this._syncKeyUpdated = false;
+ },
+
+ usernameFromAccount: function usernameFromAccount(value) {
+ // If we encounter characters not allowed by the API (as found for
+ // instance in an email address), hash the value.
+ if (value && value.match(/[^A-Z0-9._-]/i)) {
+ return Utils.sha1Base32(value.toLowerCase()).toLowerCase();
+ }
+
+ return value ? value.toLowerCase() : value;
+ },
+
+ /**
+ * Obtain a function to be used for adding auth to Resource HTTP requests.
+ */
+ getResourceAuthenticator: function getResourceAuthenticator() {
+ if (this.hasBasicCredentials()) {
+ return this._onResourceRequestBasic.bind(this);
+ }
+
+ return null;
+ },
+
+ /**
+ * Helper method to return an authenticator for basic Resource requests.
+ */
+ getBasicResourceAuthenticator:
+ function getBasicResourceAuthenticator(username, password) {
+
+ return function basicAuthenticator(resource) {
+ let value = "Basic " + btoa(username + ":" + password);
+ return {headers: {authorization: value}};
+ };
+ },
+
+ _onResourceRequestBasic: function _onResourceRequestBasic(resource) {
+ let value = "Basic " + btoa(this.username + ":" + this.basicPassword);
+ return {headers: {authorization: value}};
+ },
+
+ _onResourceRequestMAC: function _onResourceRequestMAC(resource, method) {
+ // TODO Get identifier and key from somewhere.
+ let identifier;
+ let key;
+ let result = Utils.computeHTTPMACSHA1(identifier, key, method, resource.uri);
+
+ return {headers: {authorization: result.header}};
+ },
+
+ /**
+ * Obtain a function to be used for adding auth to RESTRequest instances.
+ */
+ getRESTRequestAuthenticator: function getRESTRequestAuthenticator() {
+ if (this.hasBasicCredentials()) {
+ return this.onRESTRequestBasic.bind(this);
+ }
+
+ return null;
+ },
+
+ onRESTRequestBasic: function onRESTRequestBasic(request) {
+ let up = this.username + ":" + this.basicPassword;
+ request.setHeader("authorization", "Basic " + btoa(up));
+ },
+
+ createClusterManager: function(service) {
+ Cu.import("resource://services-sync/stages/cluster.js");
+ return new ClusterManager(service);
+ },
+
+ offerSyncOptions: function () {
+ // Do nothing for Sync 1.1.
+ return {accepted: true};
+ },
+
+ // Tell Sync what the login status should be if it saw a 401 fetching
+ // info/collections as part of login verification (typically immediately
+ // after login.)
+ // In our case it means an authoritative "password is incorrect".
+ loginStatusFromVerification404() {
+ return LOGIN_FAILED_LOGIN_REJECTED;
+ }
+
+};
diff --git a/services/sync/modules/jpakeclient.js b/services/sync/modules/jpakeclient.js
new file mode 100644
index 000000000..625dc91b6
--- /dev/null
+++ b/services/sync/modules/jpakeclient.js
@@ -0,0 +1,773 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+this.EXPORTED_SYMBOLS = ["JPAKEClient", "SendCredentialsController"];
+
+var {classes: Cc, interfaces: Ci, results: Cr, utils: Cu} = Components;
+
+Cu.import("resource://gre/modules/Log.jsm");
+Cu.import("resource://services-common/rest.js");
+Cu.import("resource://services-sync/constants.js");
+Cu.import("resource://services-sync/util.js");
+
+const REQUEST_TIMEOUT = 60; // 1 minute
+const KEYEXCHANGE_VERSION = 3;
+
+const JPAKE_SIGNERID_SENDER = "sender";
+const JPAKE_SIGNERID_RECEIVER = "receiver";
+const JPAKE_LENGTH_SECRET = 8;
+const JPAKE_LENGTH_CLIENTID = 256;
+const JPAKE_VERIFY_VALUE = "0123456789ABCDEF";
+
+
+/**
+ * Client to exchange encrypted data using the J-PAKE algorithm.
+ * The exchange between two clients of this type looks like this:
+ *
+ *
+ * Mobile Server Desktop
+ * ===================================================================
+ * |
+ * retrieve channel <---------------|
+ * generate random secret |
+ * show PIN = secret + channel | ask user for PIN
+ * upload Mobile's message 1 ------>|
+ * |----> retrieve Mobile's message 1
+ * |<----- upload Desktop's message 1
+ * retrieve Desktop's message 1 <---|
+ * upload Mobile's message 2 ------>|
+ * |----> retrieve Mobile's message 2
+ * | compute key
+ * |<----- upload Desktop's message 2
+ * retrieve Desktop's message 2 <---|
+ * compute key |
+ * encrypt known value ------------>|
+ * |-------> retrieve encrypted value
+ * | verify against local known value
+ *
+ * At this point Desktop knows whether the PIN was entered correctly.
+ * If it wasn't, Desktop deletes the session. If it was, the account
+ * setup can proceed. If Desktop doesn't yet have an account set up,
+ * it will keep the channel open and let the user connect to or
+ * create an account.
+ *
+ * | encrypt credentials
+ * |<------------- upload credentials
+ * retrieve credentials <-----------|
+ * verify HMAC |
+ * decrypt credentials |
+ * delete session ----------------->|
+ * start syncing |
+ *
+ *
+ * Create a client object like so:
+ *
+ * let client = new JPAKEClient(controller);
+ *
+ * The 'controller' object must implement the following methods:
+ *
+ * displayPIN(pin) -- Called when a PIN has been generated and is ready to
+ * be displayed to the user. Only called on the client where the pairing
+ * was initiated with 'receiveNoPIN()'.
+ *
+ * onPairingStart() -- Called when the pairing has started and messages are
+ * being sent back and forth over the channel. Only called on the client
+ * where the pairing was initiated with 'receiveNoPIN()'.
+ *
+ * onPaired() -- Called when the device pairing has been established and
+ * we're ready to send the credentials over. To do that, the controller
+ * must call 'sendAndComplete()' while the channel is active.
+ *
+ * onComplete(data) -- Called after transfer has been completed. On
+ * the sending side this is called with no parameter and as soon as the
+ * data has been uploaded. This does not mean the receiving side has
+ * actually retrieved them yet.
+ *
+ * onAbort(error) -- Called whenever an error is encountered. All errors lead
+ * to an abort and the process has to be started again on both sides.
+ *
+ * To start the data transfer on the receiving side, call
+ *
+ * client.receiveNoPIN();
+ *
+ * This will allocate a new channel on the server, generate a PIN, have it
+ * displayed and then do the transfer once the protocol has been completed
+ * with the sending side.
+ *
+ * To initiate the transfer from the sending side, call
+ *
+ * client.pairWithPIN(pin, true);
+ *
+ * Once the pairing has been established, the controller's 'onPaired()' method
+ * will be called. To then transmit the data, call
+ *
+ * client.sendAndComplete(data);
+ *
+ * To abort the process, call
+ *
+ * client.abort();
+ *
+ * Note that after completion or abort, the 'client' instance may not be reused.
+ * You will have to create a new one in case you'd like to restart the process.
+ */
+this.JPAKEClient = function JPAKEClient(controller) {
+ this.controller = controller;
+
+ this._log = Log.repository.getLogger("Sync.JPAKEClient");
+ this._log.level = Log.Level[Svc.Prefs.get(
+ "log.logger.service.jpakeclient", "Debug")];
+
+ this._serverURL = Svc.Prefs.get("jpake.serverURL");
+ this._pollInterval = Svc.Prefs.get("jpake.pollInterval");
+ this._maxTries = Svc.Prefs.get("jpake.maxTries");
+ if (this._serverURL.slice(-1) != "/") {
+ this._serverURL += "/";
+ }
+
+ this._jpake = Cc["@mozilla.org/services-crypto/sync-jpake;1"]
+ .createInstance(Ci.nsISyncJPAKE);
+
+ this._setClientID();
+}
+JPAKEClient.prototype = {
+
+ _chain: Async.chain,
+
+ /*
+ * Public API
+ */
+
+ /**
+ * Initiate pairing and receive data without providing a PIN. The PIN will
+ * be generated and passed on to the controller to be displayed to the user.
+ *
+ * This is typically called on mobile devices where typing is tedious.
+ */
+ receiveNoPIN: function receiveNoPIN() {
+ this._my_signerid = JPAKE_SIGNERID_RECEIVER;
+ this._their_signerid = JPAKE_SIGNERID_SENDER;
+
+ this._secret = this._createSecret();
+
+ // Allow a large number of tries first while we wait for the PIN
+ // to be entered on the other device.
+ this._maxTries = Svc.Prefs.get("jpake.firstMsgMaxTries");
+ this._chain(this._getChannel,
+ this._computeStepOne,
+ this._putStep,
+ this._getStep,
+ function(callback) {
+ // We fetched the first response from the other client.
+ // Notify controller of the pairing starting.
+ Utils.nextTick(this.controller.onPairingStart,
+ this.controller);
+
+ // Now we can switch back to the smaller timeout.
+ this._maxTries = Svc.Prefs.get("jpake.maxTries");
+ callback();
+ },
+ this._computeStepTwo,
+ this._putStep,
+ this._getStep,
+ this._computeFinal,
+ this._computeKeyVerification,
+ this._putStep,
+ function(callback) {
+ // Allow longer time-out for the last message.
+ this._maxTries = Svc.Prefs.get("jpake.lastMsgMaxTries");
+ callback();
+ },
+ this._getStep,
+ this._decryptData,
+ this._complete)();
+ },
+
+ /**
+ * Initiate pairing based on the PIN entered by the user.
+ *
+ * This is typically called on desktop devices where typing is easier than
+ * on mobile.
+ *
+ * @param pin
+ * 12 character string (in human-friendly base32) containing the PIN
+ * entered by the user.
+ * @param expectDelay
+ * Flag that indicates that a significant delay between the pairing
+ * and the sending should be expected. v2 and earlier of the protocol
+ * did not allow for this and the pairing to a v2 or earlier client
+ * will be aborted if this flag is 'true'.
+ */
+ pairWithPIN: function pairWithPIN(pin, expectDelay) {
+ this._my_signerid = JPAKE_SIGNERID_SENDER;
+ this._their_signerid = JPAKE_SIGNERID_RECEIVER;
+
+ this._channel = pin.slice(JPAKE_LENGTH_SECRET);
+ this._channelURL = this._serverURL + this._channel;
+ this._secret = pin.slice(0, JPAKE_LENGTH_SECRET);
+
+ this._chain(this._computeStepOne,
+ this._getStep,
+ function (callback) {
+ // Ensure that the other client can deal with a delay for
+ // the last message if that's requested by the caller.
+ if (!expectDelay) {
+ return callback();
+ }
+ if (!this._incoming.version || this._incoming.version < 3) {
+ return this.abort(JPAKE_ERROR_DELAYUNSUPPORTED);
+ }
+ return callback();
+ },
+ this._putStep,
+ this._computeStepTwo,
+ this._getStep,
+ this._putStep,
+ this._computeFinal,
+ this._getStep,
+ this._verifyPairing)();
+ },
+
+ /**
+ * Send data after a successful pairing.
+ *
+ * @param obj
+ * Object containing the data to send. It will be serialized as JSON.
+ */
+ sendAndComplete: function sendAndComplete(obj) {
+ if (!this._paired || this._finished) {
+ this._log.error("Can't send data, no active pairing!");
+ throw "No active pairing!";
+ }
+ this._data = JSON.stringify(obj);
+ this._chain(this._encryptData,
+ this._putStep,
+ this._complete)();
+ },
+
+ /**
+ * Abort the current pairing. The channel on the server will be deleted
+ * if the abort wasn't due to a network or server error. The controller's
+ * 'onAbort()' method is notified in all cases.
+ *
+ * @param error [optional]
+ * Error constant indicating the reason for the abort. Defaults to
+ * user abort.
+ */
+ abort: function abort(error) {
+ this._log.debug("Aborting...");
+ this._finished = true;
+ let self = this;
+
+ // Default to "user aborted".
+ if (!error) {
+ error = JPAKE_ERROR_USERABORT;
+ }
+
+ if (error == JPAKE_ERROR_CHANNEL ||
+ error == JPAKE_ERROR_NETWORK ||
+ error == JPAKE_ERROR_NODATA) {
+ Utils.nextTick(function() { this.controller.onAbort(error); }, this);
+ } else {
+ this._reportFailure(error, function() { self.controller.onAbort(error); });
+ }
+ },
+
+ /*
+ * Utilities
+ */
+
+ _setClientID: function _setClientID() {
+ let rng = Cc["@mozilla.org/security/random-generator;1"]
+ .createInstance(Ci.nsIRandomGenerator);
+ let bytes = rng.generateRandomBytes(JPAKE_LENGTH_CLIENTID / 2);
+ this._clientID = bytes.map(byte => ("0" + byte.toString(16)).slice(-2)).join("");
+ },
+
+ _createSecret: function _createSecret() {
+ // 0-9a-z without 1,l,o,0
+ const key = "23456789abcdefghijkmnpqrstuvwxyz";
+ let rng = Cc["@mozilla.org/security/random-generator;1"]
+ .createInstance(Ci.nsIRandomGenerator);
+ let bytes = rng.generateRandomBytes(JPAKE_LENGTH_SECRET);
+ return bytes.map(byte => key[Math.floor(byte * key.length / 256)]).join("");
+ },
+
+ _newRequest: function _newRequest(uri) {
+ let request = new RESTRequest(uri);
+ request.setHeader("X-KeyExchange-Id", this._clientID);
+ request.timeout = REQUEST_TIMEOUT;
+ return request;
+ },
+
+ /*
+ * Steps of J-PAKE procedure
+ */
+
+ _getChannel: function _getChannel(callback) {
+ this._log.trace("Requesting channel.");
+ let request = this._newRequest(this._serverURL + "new_channel");
+ request.get(Utils.bind2(this, function handleChannel(error) {
+ if (this._finished) {
+ return;
+ }
+
+ if (error) {
+ this._log.error("Error acquiring channel ID. " + error);
+ this.abort(JPAKE_ERROR_CHANNEL);
+ return;
+ }
+ if (request.response.status != 200) {
+ this._log.error("Error acquiring channel ID. Server responded with HTTP "
+ + request.response.status);
+ this.abort(JPAKE_ERROR_CHANNEL);
+ return;
+ }
+
+ try {
+ this._channel = JSON.parse(request.response.body);
+ } catch (ex) {
+ this._log.error("Server responded with invalid JSON.");
+ this.abort(JPAKE_ERROR_CHANNEL);
+ return;
+ }
+ this._log.debug("Using channel " + this._channel);
+ this._channelURL = this._serverURL + this._channel;
+
+ // Don't block on UI code.
+ let pin = this._secret + this._channel;
+ Utils.nextTick(function() { this.controller.displayPIN(pin); }, this);
+ callback();
+ }));
+ },
+
+ // Generic handler for uploading data.
+ _putStep: function _putStep(callback) {
+ this._log.trace("Uploading message " + this._outgoing.type);
+ let request = this._newRequest(this._channelURL);
+ if (this._their_etag) {
+ request.setHeader("If-Match", this._their_etag);
+ } else {
+ request.setHeader("If-None-Match", "*");
+ }
+ request.put(this._outgoing, Utils.bind2(this, function (error) {
+ if (this._finished) {
+ return;
+ }
+
+ if (error) {
+ this._log.error("Error uploading data. " + error);
+ this.abort(JPAKE_ERROR_NETWORK);
+ return;
+ }
+ if (request.response.status != 200) {
+ this._log.error("Could not upload data. Server responded with HTTP "
+ + request.response.status);
+ this.abort(JPAKE_ERROR_SERVER);
+ return;
+ }
+ // There's no point in returning early here since the next step will
+ // always be a GET so let's pause for twice the poll interval.
+ this._my_etag = request.response.headers["etag"];
+ Utils.namedTimer(function () { callback(); }, this._pollInterval * 2,
+ this, "_pollTimer");
+ }));
+ },
+
+ // Generic handler for polling for and retrieving data.
+ _pollTries: 0,
+ _getStep: function _getStep(callback) {
+ this._log.trace("Retrieving next message.");
+ let request = this._newRequest(this._channelURL);
+ if (this._my_etag) {
+ request.setHeader("If-None-Match", this._my_etag);
+ }
+
+ request.get(Utils.bind2(this, function (error) {
+ if (this._finished) {
+ return;
+ }
+
+ if (error) {
+ this._log.error("Error fetching data. " + error);
+ this.abort(JPAKE_ERROR_NETWORK);
+ return;
+ }
+
+ if (request.response.status == 304) {
+ this._log.trace("Channel hasn't been updated yet. Will try again later.");
+ if (this._pollTries >= this._maxTries) {
+ this._log.error("Tried for " + this._pollTries + " times, aborting.");
+ this.abort(JPAKE_ERROR_TIMEOUT);
+ return;
+ }
+ this._pollTries += 1;
+ Utils.namedTimer(function() { this._getStep(callback); },
+ this._pollInterval, this, "_pollTimer");
+ return;
+ }
+ this._pollTries = 0;
+
+ if (request.response.status == 404) {
+ this._log.error("No data found in the channel.");
+ this.abort(JPAKE_ERROR_NODATA);
+ return;
+ }
+ if (request.response.status != 200) {
+ this._log.error("Could not retrieve data. Server responded with HTTP "
+ + request.response.status);
+ this.abort(JPAKE_ERROR_SERVER);
+ return;
+ }
+
+ this._their_etag = request.response.headers["etag"];
+ if (!this._their_etag) {
+ this._log.error("Server did not supply ETag for message: "
+ + request.response.body);
+ this.abort(JPAKE_ERROR_SERVER);
+ return;
+ }
+
+ try {
+ this._incoming = JSON.parse(request.response.body);
+ } catch (ex) {
+ this._log.error("Server responded with invalid JSON.");
+ this.abort(JPAKE_ERROR_INVALID);
+ return;
+ }
+ this._log.trace("Fetched message " + this._incoming.type);
+ callback();
+ }));
+ },
+
+ _reportFailure: function _reportFailure(reason, callback) {
+ this._log.debug("Reporting failure to server.");
+ let request = this._newRequest(this._serverURL + "report");
+ request.setHeader("X-KeyExchange-Cid", this._channel);
+ request.setHeader("X-KeyExchange-Log", reason);
+ request.post("", Utils.bind2(this, function (error) {
+ if (error) {
+ this._log.warn("Report failed: " + error);
+ } else if (request.response.status != 200) {
+ this._log.warn("Report failed. Server responded with HTTP "
+ + request.response.status);
+ }
+
+ // Do not block on errors, we're done or aborted by now anyway.
+ callback();
+ }));
+ },
+
+ _computeStepOne: function _computeStepOne(callback) {
+ this._log.trace("Computing round 1.");
+ let gx1 = {};
+ let gv1 = {};
+ let r1 = {};
+ let gx2 = {};
+ let gv2 = {};
+ let r2 = {};
+ try {
+ this._jpake.round1(this._my_signerid, gx1, gv1, r1, gx2, gv2, r2);
+ } catch (ex) {
+ this._log.error("JPAKE round 1 threw: " + ex);
+ this.abort(JPAKE_ERROR_INTERNAL);
+ return;
+ }
+ let one = {gx1: gx1.value,
+ gx2: gx2.value,
+ zkp_x1: {gr: gv1.value, b: r1.value, id: this._my_signerid},
+ zkp_x2: {gr: gv2.value, b: r2.value, id: this._my_signerid}};
+ this._outgoing = {type: this._my_signerid + "1",
+ version: KEYEXCHANGE_VERSION,
+ payload: one};
+ this._log.trace("Generated message " + this._outgoing.type);
+ callback();
+ },
+
+ _computeStepTwo: function _computeStepTwo(callback) {
+ this._log.trace("Computing round 2.");
+ if (this._incoming.type != this._their_signerid + "1") {
+ this._log.error("Invalid round 1 message: "
+ + JSON.stringify(this._incoming));
+ this.abort(JPAKE_ERROR_WRONGMESSAGE);
+ return;
+ }
+
+ let step1 = this._incoming.payload;
+ if (!step1 || !step1.zkp_x1 || step1.zkp_x1.id != this._their_signerid
+ || !step1.zkp_x2 || step1.zkp_x2.id != this._their_signerid) {
+ this._log.error("Invalid round 1 payload: " + JSON.stringify(step1));
+ this.abort(JPAKE_ERROR_WRONGMESSAGE);
+ return;
+ }
+
+ let A = {};
+ let gvA = {};
+ let rA = {};
+
+ try {
+ this._jpake.round2(this._their_signerid, this._secret,
+ step1.gx1, step1.zkp_x1.gr, step1.zkp_x1.b,
+ step1.gx2, step1.zkp_x2.gr, step1.zkp_x2.b,
+ A, gvA, rA);
+ } catch (ex) {
+ this._log.error("JPAKE round 2 threw: " + ex);
+ this.abort(JPAKE_ERROR_INTERNAL);
+ return;
+ }
+ let two = {A: A.value,
+ zkp_A: {gr: gvA.value, b: rA.value, id: this._my_signerid}};
+ this._outgoing = {type: this._my_signerid + "2",
+ version: KEYEXCHANGE_VERSION,
+ payload: two};
+ this._log.trace("Generated message " + this._outgoing.type);
+ callback();
+ },
+
+ _computeFinal: function _computeFinal(callback) {
+ if (this._incoming.type != this._their_signerid + "2") {
+ this._log.error("Invalid round 2 message: "
+ + JSON.stringify(this._incoming));
+ this.abort(JPAKE_ERROR_WRONGMESSAGE);
+ return;
+ }
+
+ let step2 = this._incoming.payload;
+ if (!step2 || !step2.zkp_A || step2.zkp_A.id != this._their_signerid) {
+ this._log.error("Invalid round 2 payload: " + JSON.stringify(step1));
+ this.abort(JPAKE_ERROR_WRONGMESSAGE);
+ return;
+ }
+
+ let aes256Key = {};
+ let hmac256Key = {};
+
+ try {
+ this._jpake.final(step2.A, step2.zkp_A.gr, step2.zkp_A.b, HMAC_INPUT,
+ aes256Key, hmac256Key);
+ } catch (ex) {
+ this._log.error("JPAKE final round threw: " + ex);
+ this.abort(JPAKE_ERROR_INTERNAL);
+ return;
+ }
+
+ this._crypto_key = aes256Key.value;
+ let hmac_key = Utils.makeHMACKey(Utils.safeAtoB(hmac256Key.value));
+ this._hmac_hasher = Utils.makeHMACHasher(Ci.nsICryptoHMAC.SHA256, hmac_key);
+
+ callback();
+ },
+
+ _computeKeyVerification: function _computeKeyVerification(callback) {
+ this._log.trace("Encrypting key verification value.");
+ let iv, ciphertext;
+ try {
+ iv = Svc.Crypto.generateRandomIV();
+ ciphertext = Svc.Crypto.encrypt(JPAKE_VERIFY_VALUE,
+ this._crypto_key, iv);
+ } catch (ex) {
+ this._log.error("Failed to encrypt key verification value.");
+ this.abort(JPAKE_ERROR_INTERNAL);
+ return;
+ }
+ this._outgoing = {type: this._my_signerid + "3",
+ version: KEYEXCHANGE_VERSION,
+ payload: {ciphertext: ciphertext, IV: iv}};
+ this._log.trace("Generated message " + this._outgoing.type);
+ callback();
+ },
+
+ _verifyPairing: function _verifyPairing(callback) {
+ this._log.trace("Verifying their key.");
+ if (this._incoming.type != this._their_signerid + "3") {
+ this._log.error("Invalid round 3 data: " +
+ JSON.stringify(this._incoming));
+ this.abort(JPAKE_ERROR_WRONGMESSAGE);
+ return;
+ }
+ let step3 = this._incoming.payload;
+ let ciphertext;
+ try {
+ ciphertext = Svc.Crypto.encrypt(JPAKE_VERIFY_VALUE,
+ this._crypto_key, step3.IV);
+ if (ciphertext != step3.ciphertext) {
+ throw "Key mismatch!";
+ }
+ } catch (ex) {
+ this._log.error("Keys don't match!");
+ this.abort(JPAKE_ERROR_KEYMISMATCH);
+ return;
+ }
+
+ this._log.debug("Verified pairing!");
+ this._paired = true;
+ Utils.nextTick(function () { this.controller.onPaired(); }, this);
+ callback();
+ },
+
+ _encryptData: function _encryptData(callback) {
+ this._log.trace("Encrypting data.");
+ let iv, ciphertext, hmac;
+ try {
+ iv = Svc.Crypto.generateRandomIV();
+ ciphertext = Svc.Crypto.encrypt(this._data, this._crypto_key, iv);
+ hmac = Utils.bytesAsHex(Utils.digestUTF8(ciphertext, this._hmac_hasher));
+ } catch (ex) {
+ this._log.error("Failed to encrypt data.");
+ this.abort(JPAKE_ERROR_INTERNAL);
+ return;
+ }
+ this._outgoing = {type: this._my_signerid + "3",
+ version: KEYEXCHANGE_VERSION,
+ payload: {ciphertext: ciphertext, IV: iv, hmac: hmac}};
+ this._log.trace("Generated message " + this._outgoing.type);
+ callback();
+ },
+
+ _decryptData: function _decryptData(callback) {
+ this._log.trace("Verifying their key.");
+ if (this._incoming.type != this._their_signerid + "3") {
+ this._log.error("Invalid round 3 data: "
+ + JSON.stringify(this._incoming));
+ this.abort(JPAKE_ERROR_WRONGMESSAGE);
+ return;
+ }
+ let step3 = this._incoming.payload;
+ try {
+ let hmac = Utils.bytesAsHex(
+ Utils.digestUTF8(step3.ciphertext, this._hmac_hasher));
+ if (hmac != step3.hmac) {
+ throw "HMAC validation failed!";
+ }
+ } catch (ex) {
+ this._log.error("HMAC validation failed.");
+ this.abort(JPAKE_ERROR_KEYMISMATCH);
+ return;
+ }
+
+ this._log.trace("Decrypting data.");
+ let cleartext;
+ try {
+ cleartext = Svc.Crypto.decrypt(step3.ciphertext, this._crypto_key,
+ step3.IV);
+ } catch (ex) {
+ this._log.error("Failed to decrypt data.");
+ this.abort(JPAKE_ERROR_INTERNAL);
+ return;
+ }
+
+ try {
+ this._newData = JSON.parse(cleartext);
+ } catch (ex) {
+ this._log.error("Invalid data data: " + JSON.stringify(cleartext));
+ this.abort(JPAKE_ERROR_INVALID);
+ return;
+ }
+
+ this._log.trace("Decrypted data.");
+ callback();
+ },
+
+ _complete: function _complete() {
+ this._log.debug("Exchange completed.");
+ this._finished = true;
+ Utils.nextTick(function () { this.controller.onComplete(this._newData); },
+ this);
+ }
+
+};
+
+
+/**
+ * Send credentials over an active J-PAKE channel.
+ *
+ * This object is designed to take over as the JPAKEClient controller,
+ * presumably replacing one that is UI-based which would either cause
+ * DOM objects to leak or the JPAKEClient to be GC'ed when the DOM
+ * context disappears. This object stays alive for the duration of the
+ * transfer by being strong-ref'ed as an nsIObserver.
+ *
+ * Credentials are sent after the first sync has been completed
+ * (successfully or not.)
+ *
+ * Usage:
+ *
+ * jpakeclient.controller = new SendCredentialsController(jpakeclient,
+ * service);
+ *
+ */
+this.SendCredentialsController =
+ function SendCredentialsController(jpakeclient, service) {
+ this._log = Log.repository.getLogger("Sync.SendCredentialsController");
+ this._log.level = Log.Level[Svc.Prefs.get("log.logger.service.main")];
+
+ this._log.trace("Loading.");
+ this.jpakeclient = jpakeclient;
+ this.service = service;
+
+ // Register ourselves as observers the first Sync finishing (either
+ // successfully or unsuccessfully, we don't care) or for removing
+ // this device's sync configuration, in case that happens while we
+ // haven't finished the first sync yet.
+ Services.obs.addObserver(this, "weave:service:sync:finish", false);
+ Services.obs.addObserver(this, "weave:service:sync:error", false);
+ Services.obs.addObserver(this, "weave:service:start-over", false);
+}
+SendCredentialsController.prototype = {
+
+ unload: function unload() {
+ this._log.trace("Unloading.");
+ try {
+ Services.obs.removeObserver(this, "weave:service:sync:finish");
+ Services.obs.removeObserver(this, "weave:service:sync:error");
+ Services.obs.removeObserver(this, "weave:service:start-over");
+ } catch (ex) {
+ // Ignore.
+ }
+ },
+
+ observe: function observe(subject, topic, data) {
+ switch (topic) {
+ case "weave:service:sync:finish":
+ case "weave:service:sync:error":
+ Utils.nextTick(this.sendCredentials, this);
+ break;
+ case "weave:service:start-over":
+ // This will call onAbort which will call unload().
+ this.jpakeclient.abort();
+ break;
+ }
+ },
+
+ sendCredentials: function sendCredentials() {
+ this._log.trace("Sending credentials.");
+ let credentials = {account: this.service.identity.account,
+ password: this.service.identity.basicPassword,
+ synckey: this.service.identity.syncKey,
+ serverURL: this.service.serverURL};
+ this.jpakeclient.sendAndComplete(credentials);
+ },
+
+ // JPAKEClient controller API
+
+ onComplete: function onComplete() {
+ this._log.debug("Exchange was completed successfully!");
+ this.unload();
+
+ // Schedule a Sync for soonish to fetch the data uploaded by the
+ // device with which we just paired.
+ this.service.scheduler.scheduleNextSync(this.service.scheduler.activeInterval);
+ },
+
+ onAbort: function onAbort(error) {
+ // It doesn't really matter why we aborted, but the channel is closed
+ // for sure, so we won't be able to do anything with it.
+ this._log.debug("Exchange was aborted with error: " + error);
+ this.unload();
+ },
+
+ // Irrelevant methods for this controller:
+ displayPIN: function displayPIN() {},
+ onPairingStart: function onPairingStart() {},
+ onPaired: function onPaired() {},
+};
diff --git a/services/sync/modules/keys.js b/services/sync/modules/keys.js
new file mode 100644
index 000000000..b93de7f31
--- /dev/null
+++ b/services/sync/modules/keys.js
@@ -0,0 +1,214 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+"use strict";
+
+this.EXPORTED_SYMBOLS = [
+ "BulkKeyBundle",
+ "SyncKeyBundle"
+];
+
+var {classes: Cc, interfaces: Ci, utils: Cu, results: Cr} = Components;
+
+Cu.import("resource://services-sync/constants.js");
+Cu.import("resource://gre/modules/Log.jsm");
+Cu.import("resource://services-sync/util.js");
+
+/**
+ * Represents a pair of keys.
+ *
+ * Each key stored in a key bundle is 256 bits. One key is used for symmetric
+ * encryption. The other is used for HMAC.
+ *
+ * A KeyBundle by itself is just an anonymous pair of keys. Other types
+ * deriving from this one add semantics, such as associated collections or
+ * generating a key bundle via HKDF from another key.
+ */
+function KeyBundle() {
+ this._encrypt = null;
+ this._encryptB64 = null;
+ this._hmac = null;
+ this._hmacB64 = null;
+ this._hmacObj = null;
+ this._sha256HMACHasher = null;
+}
+KeyBundle.prototype = {
+ _encrypt: null,
+ _encryptB64: null,
+ _hmac: null,
+ _hmacB64: null,
+ _hmacObj: null,
+ _sha256HMACHasher: null,
+
+ equals: function equals(bundle) {
+ return bundle &&
+ (bundle.hmacKey == this.hmacKey) &&
+ (bundle.encryptionKey == this.encryptionKey);
+ },
+
+ /*
+ * Accessors for the two keys.
+ */
+ get encryptionKey() {
+ return this._encrypt;
+ },
+
+ set encryptionKey(value) {
+ if (!value || typeof value != "string") {
+ throw new Error("Encryption key can only be set to string values.");
+ }
+
+ if (value.length < 16) {
+ throw new Error("Encryption key must be at least 128 bits long.");
+ }
+
+ this._encrypt = value;
+ this._encryptB64 = btoa(value);
+ },
+
+ get encryptionKeyB64() {
+ return this._encryptB64;
+ },
+
+ get hmacKey() {
+ return this._hmac;
+ },
+
+ set hmacKey(value) {
+ if (!value || typeof value != "string") {
+ throw new Error("HMAC key can only be set to string values.");
+ }
+
+ if (value.length < 16) {
+ throw new Error("HMAC key must be at least 128 bits long.");
+ }
+
+ this._hmac = value;
+ this._hmacB64 = btoa(value);
+ this._hmacObj = value ? Utils.makeHMACKey(value) : null;
+ this._sha256HMACHasher = value ? Utils.makeHMACHasher(
+ Ci.nsICryptoHMAC.SHA256, this._hmacObj) : null;
+ },
+
+ get hmacKeyB64() {
+ return this._hmacB64;
+ },
+
+ get hmacKeyObject() {
+ return this._hmacObj;
+ },
+
+ get sha256HMACHasher() {
+ return this._sha256HMACHasher;
+ },
+
+ /**
+ * Populate this key pair with 2 new, randomly generated keys.
+ */
+ generateRandom: function generateRandom() {
+ let generatedHMAC = Svc.Crypto.generateRandomKey();
+ let generatedEncr = Svc.Crypto.generateRandomKey();
+ this.keyPairB64 = [generatedEncr, generatedHMAC];
+ },
+
+};
+
+/**
+ * Represents a KeyBundle associated with a collection.
+ *
+ * This is just a KeyBundle with a collection attached.
+ */
+this.BulkKeyBundle = function BulkKeyBundle(collection) {
+ let log = Log.repository.getLogger("Sync.BulkKeyBundle");
+ log.info("BulkKeyBundle being created for " + collection);
+ KeyBundle.call(this);
+
+ this._collection = collection;
+}
+
+BulkKeyBundle.prototype = {
+ __proto__: KeyBundle.prototype,
+
+ get collection() {
+ return this._collection;
+ },
+
+ /**
+ * Obtain the key pair in this key bundle.
+ *
+ * The returned keys are represented as raw byte strings.
+ */
+ get keyPair() {
+ return [this.encryptionKey, this.hmacKey];
+ },
+
+ set keyPair(value) {
+ if (!Array.isArray(value) || value.length != 2) {
+ throw new Error("BulkKeyBundle.keyPair value must be array of 2 keys.");
+ }
+
+ this.encryptionKey = value[0];
+ this.hmacKey = value[1];
+ },
+
+ get keyPairB64() {
+ return [this.encryptionKeyB64, this.hmacKeyB64];
+ },
+
+ set keyPairB64(value) {
+ if (!Array.isArray(value) || value.length != 2) {
+ throw new Error("BulkKeyBundle.keyPairB64 value must be an array of 2 " +
+ "keys.");
+ }
+
+ this.encryptionKey = Utils.safeAtoB(value[0]);
+ this.hmacKey = Utils.safeAtoB(value[1]);
+ },
+};
+
+/**
+ * Represents a key pair derived from a Sync Key via HKDF.
+ *
+ * Instances of this type should be considered immutable. You create an
+ * instance by specifying the username and 26 character "friendly" Base32
+ * encoded Sync Key. The Sync Key is derived at instance creation time.
+ *
+ * If the username or Sync Key is invalid, an Error will be thrown.
+ */
+this.SyncKeyBundle = function SyncKeyBundle(username, syncKey) {
+ let log = Log.repository.getLogger("Sync.SyncKeyBundle");
+ log.info("SyncKeyBundle being created.");
+ KeyBundle.call(this);
+
+ this.generateFromKey(username, syncKey);
+}
+SyncKeyBundle.prototype = {
+ __proto__: KeyBundle.prototype,
+
+ /*
+ * If we've got a string, hash it into keys and store them.
+ */
+ generateFromKey: function generateFromKey(username, syncKey) {
+ if (!username || (typeof username != "string")) {
+ throw new Error("Sync Key cannot be generated from non-string username.");
+ }
+
+ if (!syncKey || (typeof syncKey != "string")) {
+ throw new Error("Sync Key cannot be generated from non-string key.");
+ }
+
+ if (!Utils.isPassphrase(syncKey)) {
+ throw new Error("Provided key is not a passphrase, cannot derive Sync " +
+ "Key Bundle.");
+ }
+
+ // Expand the base32 Sync Key to an AES 256 and 256 bit HMAC key.
+ let prk = Utils.decodeKeyBase32(syncKey);
+ let info = HMAC_INPUT + username;
+ let okm = Utils.hkdfExpand(prk, info, 32 * 2);
+ this.encryptionKey = okm.slice(0, 32);
+ this.hmacKey = okm.slice(32, 64);
+ },
+};
+
diff --git a/services/sync/modules/main.js b/services/sync/modules/main.js
new file mode 100644
index 000000000..af3399e7a
--- /dev/null
+++ b/services/sync/modules/main.js
@@ -0,0 +1,30 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+this.EXPORTED_SYMBOLS = ['Weave'];
+
+this.Weave = {};
+Components.utils.import("resource://services-sync/constants.js", Weave);
+var lazies = {
+ "jpakeclient.js": ["JPAKEClient", "SendCredentialsController"],
+ "service.js": ["Service"],
+ "status.js": ["Status"],
+ "util.js": ['Utils', 'Svc']
+};
+
+function lazyImport(module, dest, props) {
+ function getter(prop) {
+ return function() {
+ let ns = {};
+ Components.utils.import(module, ns);
+ delete dest[prop];
+ return dest[prop] = ns[prop];
+ };
+ }
+ props.forEach(function (prop) { dest.__defineGetter__(prop, getter(prop)); });
+}
+
+for (let mod in lazies) {
+ lazyImport("resource://services-sync/" + mod, Weave, lazies[mod]);
+}
diff --git a/services/sync/modules/policies.js b/services/sync/modules/policies.js
new file mode 100644
index 000000000..a3933426d
--- /dev/null
+++ b/services/sync/modules/policies.js
@@ -0,0 +1,983 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+this.EXPORTED_SYMBOLS = [
+ "ErrorHandler",
+ "SyncScheduler",
+];
+
+var {classes: Cc, interfaces: Ci, utils: Cu, results: Cr} = Components;
+
+Cu.import("resource://gre/modules/Log.jsm");
+Cu.import("resource://services-sync/constants.js");
+Cu.import("resource://services-sync/engines.js");
+Cu.import("resource://services-sync/util.js");
+Cu.import("resource://services-common/logmanager.js");
+Cu.import("resource://services-common/async.js");
+
+XPCOMUtils.defineLazyModuleGetter(this, "Status",
+ "resource://services-sync/status.js");
+XPCOMUtils.defineLazyModuleGetter(this, "AddonManager",
+ "resource://gre/modules/AddonManager.jsm");
+
+// Get the value for an interval that's stored in preferences. To save users
+// from themselves (and us from them!) the minimum time they can specify
+// is 60s.
+function getThrottledIntervalPreference(prefName) {
+ return Math.max(Svc.Prefs.get(prefName), 60) * 1000;
+}
+
+this.SyncScheduler = function SyncScheduler(service) {
+ this.service = service;
+ this.init();
+}
+SyncScheduler.prototype = {
+ _log: Log.repository.getLogger("Sync.SyncScheduler"),
+
+ _fatalLoginStatus: [LOGIN_FAILED_NO_USERNAME,
+ LOGIN_FAILED_NO_PASSWORD,
+ LOGIN_FAILED_NO_PASSPHRASE,
+ LOGIN_FAILED_INVALID_PASSPHRASE,
+ LOGIN_FAILED_LOGIN_REJECTED],
+
+ /**
+ * The nsITimer object that schedules the next sync. See scheduleNextSync().
+ */
+ syncTimer: null,
+
+ setDefaults: function setDefaults() {
+ this._log.trace("Setting SyncScheduler policy values to defaults.");
+
+ let service = Cc["@mozilla.org/weave/service;1"]
+ .getService(Ci.nsISupports)
+ .wrappedJSObject;
+
+ let part = service.fxAccountsEnabled ? "fxa" : "sync11";
+ let prefSDInterval = "scheduler." + part + ".singleDeviceInterval";
+ this.singleDeviceInterval = getThrottledIntervalPreference(prefSDInterval);
+
+ this.idleInterval = getThrottledIntervalPreference("scheduler.idleInterval");
+ this.activeInterval = getThrottledIntervalPreference("scheduler.activeInterval");
+ this.immediateInterval = getThrottledIntervalPreference("scheduler.immediateInterval");
+ this.eolInterval = getThrottledIntervalPreference("scheduler.eolInterval");
+
+ // A user is non-idle on startup by default.
+ this.idle = false;
+
+ this.hasIncomingItems = false;
+
+ this.clearSyncTriggers();
+ },
+
+ // nextSync is in milliseconds, but prefs can't hold that much
+ get nextSync() {
+ return Svc.Prefs.get("nextSync", 0) * 1000;
+ },
+ set nextSync(value) {
+ Svc.Prefs.set("nextSync", Math.floor(value / 1000));
+ },
+
+ get syncInterval() {
+ return Svc.Prefs.get("syncInterval", this.singleDeviceInterval);
+ },
+ set syncInterval(value) {
+ Svc.Prefs.set("syncInterval", value);
+ },
+
+ get syncThreshold() {
+ return Svc.Prefs.get("syncThreshold", SINGLE_USER_THRESHOLD);
+ },
+ set syncThreshold(value) {
+ Svc.Prefs.set("syncThreshold", value);
+ },
+
+ get globalScore() {
+ return Svc.Prefs.get("globalScore", 0);
+ },
+ set globalScore(value) {
+ Svc.Prefs.set("globalScore", value);
+ },
+
+ get numClients() {
+ return Svc.Prefs.get("numClients", 0);
+ },
+ set numClients(value) {
+ Svc.Prefs.set("numClients", value);
+ },
+
+ init: function init() {
+ this._log.level = Log.Level[Svc.Prefs.get("log.logger.service.main")];
+ this.setDefaults();
+ Svc.Obs.add("weave:engine:score:updated", this);
+ Svc.Obs.add("network:offline-status-changed", this);
+ Svc.Obs.add("weave:service:sync:start", this);
+ Svc.Obs.add("weave:service:sync:finish", this);
+ Svc.Obs.add("weave:engine:sync:finish", this);
+ Svc.Obs.add("weave:engine:sync:error", this);
+ Svc.Obs.add("weave:service:login:error", this);
+ Svc.Obs.add("weave:service:logout:finish", this);
+ Svc.Obs.add("weave:service:sync:error", this);
+ Svc.Obs.add("weave:service:backoff:interval", this);
+ Svc.Obs.add("weave:service:ready", this);
+ Svc.Obs.add("weave:engine:sync:applied", this);
+ Svc.Obs.add("weave:service:setup-complete", this);
+ Svc.Obs.add("weave:service:start-over", this);
+ Svc.Obs.add("FxA:hawk:backoff:interval", this);
+
+ if (Status.checkSetup() == STATUS_OK) {
+ Svc.Obs.add("wake_notification", this);
+ Svc.Idle.addIdleObserver(this, Svc.Prefs.get("scheduler.idleTime"));
+ }
+ },
+
+ observe: function observe(subject, topic, data) {
+ this._log.trace("Handling " + topic);
+ switch(topic) {
+ case "weave:engine:score:updated":
+ if (Status.login == LOGIN_SUCCEEDED) {
+ Utils.namedTimer(this.calculateScore, SCORE_UPDATE_DELAY, this,
+ "_scoreTimer");
+ }
+ break;
+ case "network:offline-status-changed":
+ // Whether online or offline, we'll reschedule syncs
+ this._log.trace("Network offline status change: " + data);
+ this.checkSyncStatus();
+ break;
+ case "weave:service:sync:start":
+ // Clear out any potentially pending syncs now that we're syncing
+ this.clearSyncTriggers();
+
+ // reset backoff info, if the server tells us to continue backing off,
+ // we'll handle that later
+ Status.resetBackoff();
+
+ this.globalScore = 0;
+ break;
+ case "weave:service:sync:finish":
+ this.nextSync = 0;
+ this.adjustSyncInterval();
+
+ if (Status.service == SYNC_FAILED_PARTIAL && this.requiresBackoff) {
+ this.requiresBackoff = false;
+ this.handleSyncError();
+ return;
+ }
+
+ let sync_interval;
+ this._syncErrors = 0;
+ if (Status.sync == NO_SYNC_NODE_FOUND) {
+ this._log.trace("Scheduling a sync at interval NO_SYNC_NODE_FOUND.");
+ sync_interval = NO_SYNC_NODE_INTERVAL;
+ }
+ this.scheduleNextSync(sync_interval);
+ break;
+ case "weave:engine:sync:finish":
+ if (data == "clients") {
+ // Update the client mode because it might change what we sync.
+ this.updateClientMode();
+ }
+ break;
+ case "weave:engine:sync:error":
+ // `subject` is the exception thrown by an engine's sync() method.
+ let exception = subject;
+ if (exception.status >= 500 && exception.status <= 504) {
+ this.requiresBackoff = true;
+ }
+ break;
+ case "weave:service:login:error":
+ this.clearSyncTriggers();
+
+ if (Status.login == MASTER_PASSWORD_LOCKED) {
+ // Try again later, just as if we threw an error... only without the
+ // error count.
+ this._log.debug("Couldn't log in: master password is locked.");
+ this._log.trace("Scheduling a sync at MASTER_PASSWORD_LOCKED_RETRY_INTERVAL");
+ this.scheduleAtInterval(MASTER_PASSWORD_LOCKED_RETRY_INTERVAL);
+ } else if (this._fatalLoginStatus.indexOf(Status.login) == -1) {
+ // Not a fatal login error, just an intermittent network or server
+ // issue. Keep on syncin'.
+ this.checkSyncStatus();
+ }
+ break;
+ case "weave:service:logout:finish":
+ // Start or cancel the sync timer depending on if
+ // logged in or logged out
+ this.checkSyncStatus();
+ break;
+ case "weave:service:sync:error":
+ // There may be multiple clients but if the sync fails, client mode
+ // should still be updated so that the next sync has a correct interval.
+ this.updateClientMode();
+ this.adjustSyncInterval();
+ this.nextSync = 0;
+ this.handleSyncError();
+ break;
+ case "FxA:hawk:backoff:interval":
+ case "weave:service:backoff:interval":
+ let requested_interval = subject * 1000;
+ this._log.debug("Got backoff notification: " + requested_interval + "ms");
+ // Leave up to 25% more time for the back off.
+ let interval = requested_interval * (1 + Math.random() * 0.25);
+ Status.backoffInterval = interval;
+ Status.minimumNextSync = Date.now() + requested_interval;
+ this._log.debug("Fuzzed minimum next sync: " + Status.minimumNextSync);
+ break;
+ case "weave:service:ready":
+ // Applications can specify this preference if they want autoconnect
+ // to happen after a fixed delay.
+ let delay = Svc.Prefs.get("autoconnectDelay");
+ if (delay) {
+ this.delayedAutoConnect(delay);
+ }
+ break;
+ case "weave:engine:sync:applied":
+ let numItems = subject.succeeded;
+ this._log.trace("Engine " + data + " successfully applied " + numItems +
+ " items.");
+ if (numItems) {
+ this.hasIncomingItems = true;
+ }
+ break;
+ case "weave:service:setup-complete":
+ Services.prefs.savePrefFile(null);
+ Svc.Idle.addIdleObserver(this, Svc.Prefs.get("scheduler.idleTime"));
+ Svc.Obs.add("wake_notification", this);
+ break;
+ case "weave:service:start-over":
+ this.setDefaults();
+ try {
+ Svc.Idle.removeIdleObserver(this, Svc.Prefs.get("scheduler.idleTime"));
+ } catch (ex) {
+ if (ex.result != Cr.NS_ERROR_FAILURE) {
+ throw ex;
+ }
+ // In all likelihood we didn't have an idle observer registered yet.
+ // It's all good.
+ }
+ break;
+ case "idle":
+ this._log.trace("We're idle.");
+ this.idle = true;
+ // Adjust the interval for future syncs. This won't actually have any
+ // effect until the next pending sync (which will happen soon since we
+ // were just active.)
+ this.adjustSyncInterval();
+ break;
+ case "active":
+ this._log.trace("Received notification that we're back from idle.");
+ this.idle = false;
+ Utils.namedTimer(function onBack() {
+ if (this.idle) {
+ this._log.trace("... and we're idle again. " +
+ "Ignoring spurious back notification.");
+ return;
+ }
+
+ this._log.trace("Genuine return from idle. Syncing.");
+ // Trigger a sync if we have multiple clients.
+ if (this.numClients > 1) {
+ this.scheduleNextSync(0);
+ }
+ }, IDLE_OBSERVER_BACK_DELAY, this, "idleDebouncerTimer");
+ break;
+ case "wake_notification":
+ this._log.debug("Woke from sleep.");
+ Utils.nextTick(() => {
+ // Trigger a sync if we have multiple clients. We give it 5 seconds
+ // incase the network is still in the process of coming back up.
+ if (this.numClients > 1) {
+ this._log.debug("More than 1 client. Will sync in 5s.");
+ this.scheduleNextSync(5000);
+ }
+ });
+ break;
+ }
+ },
+
+ adjustSyncInterval: function adjustSyncInterval() {
+ if (Status.eol) {
+ this._log.debug("Server status is EOL; using eolInterval.");
+ this.syncInterval = this.eolInterval;
+ return;
+ }
+
+ if (this.numClients <= 1) {
+ this._log.trace("Adjusting syncInterval to singleDeviceInterval.");
+ this.syncInterval = this.singleDeviceInterval;
+ return;
+ }
+
+ // Only MULTI_DEVICE clients will enter this if statement
+ // since SINGLE_USER clients will be handled above.
+ if (this.idle) {
+ this._log.trace("Adjusting syncInterval to idleInterval.");
+ this.syncInterval = this.idleInterval;
+ return;
+ }
+
+ if (this.hasIncomingItems) {
+ this._log.trace("Adjusting syncInterval to immediateInterval.");
+ this.hasIncomingItems = false;
+ this.syncInterval = this.immediateInterval;
+ } else {
+ this._log.trace("Adjusting syncInterval to activeInterval.");
+ this.syncInterval = this.activeInterval;
+ }
+ },
+
+ calculateScore: function calculateScore() {
+ let engines = [this.service.clientsEngine].concat(this.service.engineManager.getEnabled());
+ for (let i = 0;i < engines.length;i++) {
+ this._log.trace(engines[i].name + ": score: " + engines[i].score);
+ this.globalScore += engines[i].score;
+ engines[i]._tracker.resetScore();
+ }
+
+ this._log.trace("Global score updated: " + this.globalScore);
+ this.checkSyncStatus();
+ },
+
+ /**
+ * Process the locally stored clients list to figure out what mode to be in
+ */
+ updateClientMode: function updateClientMode() {
+ // Nothing to do if it's the same amount
+ let numClients = this.service.clientsEngine.stats.numClients;
+ if (this.numClients == numClients)
+ return;
+
+ this._log.debug("Client count: " + this.numClients + " -> " + numClients);
+ this.numClients = numClients;
+
+ if (numClients <= 1) {
+ this._log.trace("Adjusting syncThreshold to SINGLE_USER_THRESHOLD");
+ this.syncThreshold = SINGLE_USER_THRESHOLD;
+ } else {
+ this._log.trace("Adjusting syncThreshold to MULTI_DEVICE_THRESHOLD");
+ this.syncThreshold = MULTI_DEVICE_THRESHOLD;
+ }
+ this.adjustSyncInterval();
+ },
+
+ /**
+ * Check if we should be syncing and schedule the next sync, if it's not scheduled
+ */
+ checkSyncStatus: function checkSyncStatus() {
+ // Should we be syncing now, if not, cancel any sync timers and return
+ // if we're in backoff, we'll schedule the next sync.
+ let ignore = [kSyncBackoffNotMet, kSyncMasterPasswordLocked];
+ let skip = this.service._checkSync(ignore);
+ this._log.trace("_checkSync returned \"" + skip + "\".");
+ if (skip) {
+ this.clearSyncTriggers();
+ return;
+ }
+
+ // Only set the wait time to 0 if we need to sync right away
+ let wait;
+ if (this.globalScore > this.syncThreshold) {
+ this._log.debug("Global Score threshold hit, triggering sync.");
+ wait = 0;
+ }
+ this.scheduleNextSync(wait);
+ },
+
+ /**
+ * Call sync() if Master Password is not locked.
+ *
+ * Otherwise, reschedule a sync for later.
+ */
+ syncIfMPUnlocked: function syncIfMPUnlocked() {
+ // No point if we got kicked out by the master password dialog.
+ if (Status.login == MASTER_PASSWORD_LOCKED &&
+ Utils.mpLocked()) {
+ this._log.debug("Not initiating sync: Login status is " + Status.login);
+
+ // If we're not syncing now, we need to schedule the next one.
+ this._log.trace("Scheduling a sync at MASTER_PASSWORD_LOCKED_RETRY_INTERVAL");
+ this.scheduleAtInterval(MASTER_PASSWORD_LOCKED_RETRY_INTERVAL);
+ return;
+ }
+
+ Utils.nextTick(this.service.sync, this.service);
+ },
+
+ /**
+ * Set a timer for the next sync
+ */
+ scheduleNextSync: function scheduleNextSync(interval) {
+ // If no interval was specified, use the current sync interval.
+ if (interval == null) {
+ interval = this.syncInterval;
+ }
+
+ // Ensure the interval is set to no less than the backoff.
+ if (Status.backoffInterval && interval < Status.backoffInterval) {
+ this._log.trace("Requested interval " + interval +
+ " ms is smaller than the backoff interval. " +
+ "Using backoff interval " +
+ Status.backoffInterval + " ms instead.");
+ interval = Status.backoffInterval;
+ }
+
+ if (this.nextSync != 0) {
+ // There's already a sync scheduled. Don't reschedule if there's already
+ // a timer scheduled for sooner than requested.
+ let currentInterval = this.nextSync - Date.now();
+ this._log.trace("There's already a sync scheduled in " +
+ currentInterval + " ms.");
+ if (currentInterval < interval && this.syncTimer) {
+ this._log.trace("Ignoring scheduling request for next sync in " +
+ interval + " ms.");
+ return;
+ }
+ }
+
+ // Start the sync right away if we're already late.
+ if (interval <= 0) {
+ this._log.trace("Requested sync should happen right away.");
+ this.syncIfMPUnlocked();
+ return;
+ }
+
+ this._log.debug("Next sync in " + interval + " ms.");
+ Utils.namedTimer(this.syncIfMPUnlocked, interval, this, "syncTimer");
+
+ // Save the next sync time in-case sync is disabled (logout/offline/etc.)
+ this.nextSync = Date.now() + interval;
+ },
+
+
+ /**
+ * Incorporates the backoff/retry logic used in error handling and elective
+ * non-syncing.
+ */
+ scheduleAtInterval: function scheduleAtInterval(minimumInterval) {
+ let interval = Utils.calculateBackoff(this._syncErrors,
+ MINIMUM_BACKOFF_INTERVAL,
+ Status.backoffInterval);
+ if (minimumInterval) {
+ interval = Math.max(minimumInterval, interval);
+ }
+
+ this._log.debug("Starting client-initiated backoff. Next sync in " +
+ interval + " ms.");
+ this.scheduleNextSync(interval);
+ },
+
+ /**
+ * Automatically start syncing after the given delay (in seconds).
+ *
+ * Applications can define the `services.sync.autoconnectDelay` preference
+ * to have this called automatically during start-up with the pref value as
+ * the argument. Alternatively, they can call it themselves to control when
+ * Sync should first start to sync.
+ */
+ delayedAutoConnect: function delayedAutoConnect(delay) {
+ if (this.service._checkSetup() == STATUS_OK) {
+ Utils.namedTimer(this.autoConnect, delay * 1000, this, "_autoTimer");
+ }
+ },
+
+ autoConnect: function autoConnect() {
+ if (this.service._checkSetup() == STATUS_OK && !this.service._checkSync()) {
+ // Schedule a sync based on when a previous sync was scheduled.
+ // scheduleNextSync() will do the right thing if that time lies in
+ // the past.
+ this.scheduleNextSync(this.nextSync - Date.now());
+ }
+
+ // Once autoConnect is called we no longer need _autoTimer.
+ if (this._autoTimer) {
+ this._autoTimer.clear();
+ }
+ },
+
+ _syncErrors: 0,
+ /**
+ * Deal with sync errors appropriately
+ */
+ handleSyncError: function handleSyncError() {
+ this._log.trace("In handleSyncError. Error count: " + this._syncErrors);
+ this._syncErrors++;
+
+ // Do nothing on the first couple of failures, if we're not in
+ // backoff due to 5xx errors.
+ if (!Status.enforceBackoff) {
+ if (this._syncErrors < MAX_ERROR_COUNT_BEFORE_BACKOFF) {
+ this.scheduleNextSync();
+ return;
+ }
+ this._log.debug("Sync error count has exceeded " +
+ MAX_ERROR_COUNT_BEFORE_BACKOFF + "; enforcing backoff.");
+ Status.enforceBackoff = true;
+ }
+
+ this.scheduleAtInterval();
+ },
+
+
+ /**
+ * Remove any timers/observers that might trigger a sync
+ */
+ clearSyncTriggers: function clearSyncTriggers() {
+ this._log.debug("Clearing sync triggers and the global score.");
+ this.globalScore = this.nextSync = 0;
+
+ // Clear out any scheduled syncs
+ if (this.syncTimer)
+ this.syncTimer.clear();
+ },
+
+};
+
+this.ErrorHandler = function ErrorHandler(service) {
+ this.service = service;
+ this.init();
+}
+ErrorHandler.prototype = {
+ MINIMUM_ALERT_INTERVAL_MSEC: 604800000, // One week.
+
+ /**
+ * Flag that turns on error reporting for all errors, incl. network errors.
+ */
+ dontIgnoreErrors: false,
+
+ /**
+ * Flag that indicates if we have already reported a prolonged failure.
+ * Once set, we don't report it again, meaning this error is only reported
+ * one per run.
+ */
+ didReportProlongedError: false,
+
+ init: function init() {
+ Svc.Obs.add("weave:engine:sync:applied", this);
+ Svc.Obs.add("weave:engine:sync:error", this);
+ Svc.Obs.add("weave:service:login:error", this);
+ Svc.Obs.add("weave:service:sync:error", this);
+ Svc.Obs.add("weave:service:sync:finish", this);
+
+ this.initLogs();
+ },
+
+ initLogs: function initLogs() {
+ this._log = Log.repository.getLogger("Sync.ErrorHandler");
+ this._log.level = Log.Level[Svc.Prefs.get("log.logger.service.main")];
+
+ let root = Log.repository.getLogger("Sync");
+ root.level = Log.Level[Svc.Prefs.get("log.rootLogger")];
+
+ let logs = ["Sync", "FirefoxAccounts", "Hawk", "Common.TokenServerClient",
+ "Sync.SyncMigration", "browserwindow.syncui",
+ "Services.Common.RESTRequest", "Services.Common.RESTRequest",
+ "BookmarkSyncUtils"
+ ];
+
+ this._logManager = new LogManager(Svc.Prefs, logs, "sync");
+ },
+
+ observe: function observe(subject, topic, data) {
+ this._log.trace("Handling " + topic);
+ switch(topic) {
+ case "weave:engine:sync:applied":
+ if (subject.newFailed) {
+ // An engine isn't able to apply one or more incoming records.
+ // We don't fail hard on this, but it usually indicates a bug,
+ // so for now treat it as sync error (c.f. Service._syncEngine())
+ Status.engines = [data, ENGINE_APPLY_FAIL];
+ this._log.debug(data + " failed to apply some records.");
+ }
+ break;
+ case "weave:engine:sync:error": {
+ let exception = subject; // exception thrown by engine's sync() method
+ let engine_name = data; // engine name that threw the exception
+
+ this.checkServerError(exception);
+
+ Status.engines = [engine_name, exception.failureCode || ENGINE_UNKNOWN_FAIL];
+ if (Async.isShutdownException(exception)) {
+ this._log.debug(engine_name + " was interrupted due to the application shutting down");
+ } else {
+ this._log.debug(engine_name + " failed", exception);
+ Services.telemetry.getKeyedHistogramById("WEAVE_ENGINE_SYNC_ERRORS")
+ .add(engine_name);
+ }
+ break;
+ }
+ case "weave:service:login:error":
+ this._log.error("Sync encountered a login error");
+ this.resetFileLog();
+
+ if (this.shouldReportError()) {
+ this.notifyOnNextTick("weave:ui:login:error");
+ } else {
+ this.notifyOnNextTick("weave:ui:clear-error");
+ }
+
+ this.dontIgnoreErrors = false;
+ break;
+ case "weave:service:sync:error": {
+ if (Status.sync == CREDENTIALS_CHANGED) {
+ this.service.logout();
+ }
+
+ let exception = subject;
+ if (Async.isShutdownException(exception)) {
+ // If we are shutting down we just log the fact, attempt to flush
+ // the log file and get out of here!
+ this._log.error("Sync was interrupted due to the application shutting down");
+ this.resetFileLog();
+ break;
+ }
+
+ // Not a shutdown related exception...
+ this._log.error("Sync encountered an error", exception);
+ this.resetFileLog();
+
+ if (this.shouldReportError()) {
+ this.notifyOnNextTick("weave:ui:sync:error");
+ } else {
+ this.notifyOnNextTick("weave:ui:sync:finish");
+ }
+
+ this.dontIgnoreErrors = false;
+ break;
+ }
+ case "weave:service:sync:finish":
+ this._log.trace("Status.service is " + Status.service);
+
+ // Check both of these status codes: in the event of a failure in one
+ // engine, Status.service will be SYNC_FAILED_PARTIAL despite
+ // Status.sync being SYNC_SUCCEEDED.
+ // *facepalm*
+ if (Status.sync == SYNC_SUCCEEDED &&
+ Status.service == STATUS_OK) {
+ // Great. Let's clear our mid-sync 401 note.
+ this._log.trace("Clearing lastSyncReassigned.");
+ Svc.Prefs.reset("lastSyncReassigned");
+ }
+
+ if (Status.service == SYNC_FAILED_PARTIAL) {
+ this._log.error("Some engines did not sync correctly.");
+ this.resetFileLog();
+
+ if (this.shouldReportError()) {
+ this.dontIgnoreErrors = false;
+ this.notifyOnNextTick("weave:ui:sync:error");
+ break;
+ }
+ } else {
+ this.resetFileLog();
+ }
+ this.dontIgnoreErrors = false;
+ this.notifyOnNextTick("weave:ui:sync:finish");
+ break;
+ }
+ },
+
+ notifyOnNextTick: function notifyOnNextTick(topic) {
+ Utils.nextTick(function() {
+ this._log.trace("Notifying " + topic +
+ ". Status.login is " + Status.login +
+ ". Status.sync is " + Status.sync);
+ Svc.Obs.notify(topic);
+ }, this);
+ },
+
+ /**
+ * Trigger a sync and don't muffle any errors, particularly network errors.
+ */
+ syncAndReportErrors: function syncAndReportErrors() {
+ this._log.debug("Beginning user-triggered sync.");
+
+ this.dontIgnoreErrors = true;
+ Utils.nextTick(this.service.sync, this.service);
+ },
+
+ _dumpAddons: function _dumpAddons() {
+ // Just dump the items that sync may be concerned with. Specifically,
+ // active extensions that are not hidden.
+ let addonPromise = new Promise(resolve => {
+ try {
+ AddonManager.getAddonsByTypes(["extension"], resolve);
+ } catch (e) {
+ this._log.warn("Failed to dump addons", e)
+ resolve([])
+ }
+ });
+
+ return addonPromise.then(addons => {
+ let relevantAddons = addons.filter(x => x.isActive && !x.hidden);
+ this._log.debug("Addons installed", relevantAddons.length);
+ for (let addon of relevantAddons) {
+ this._log.debug(" - ${name}, version ${version}, id ${id}", addon);
+ }
+ });
+ },
+
+ /**
+ * Generate a log file for the sync that just completed
+ * and refresh the input & output streams.
+ */
+ resetFileLog: function resetFileLog() {
+ let onComplete = logType => {
+ Svc.Obs.notify("weave:service:reset-file-log");
+ this._log.trace("Notified: " + Date.now());
+ if (logType == this._logManager.ERROR_LOG_WRITTEN) {
+ Cu.reportError("Sync encountered an error - see about:sync-log for the log file.");
+ }
+ };
+
+ // If we're writing an error log, dump extensions that may be causing problems.
+ let beforeResetLog;
+ if (this._logManager.sawError) {
+ beforeResetLog = this._dumpAddons();
+ } else {
+ beforeResetLog = Promise.resolve();
+ }
+ // Note we do not return the promise here - the caller doesn't need to wait
+ // for this to complete.
+ beforeResetLog
+ .then(() => this._logManager.resetFileLog())
+ .then(onComplete, onComplete);
+ },
+
+ /**
+ * Translates server error codes to meaningful strings.
+ *
+ * @param code
+ * server error code as an integer
+ */
+ errorStr: function errorStr(code) {
+ switch (code.toString()) {
+ case "1":
+ return "illegal-method";
+ case "2":
+ return "invalid-captcha";
+ case "3":
+ return "invalid-username";
+ case "4":
+ return "cannot-overwrite-resource";
+ case "5":
+ return "userid-mismatch";
+ case "6":
+ return "json-parse-failure";
+ case "7":
+ return "invalid-password";
+ case "8":
+ return "invalid-record";
+ case "9":
+ return "weak-password";
+ default:
+ return "generic-server-error";
+ }
+ },
+
+ // A function to indicate if Sync errors should be "reported" - which in this
+ // context really means "should be notify observers of an error" - but note
+ // that since bug 1180587, no one is going to surface an error to the user.
+ shouldReportError: function shouldReportError() {
+ if (Status.login == MASTER_PASSWORD_LOCKED) {
+ this._log.trace("shouldReportError: false (master password locked).");
+ return false;
+ }
+
+ if (this.dontIgnoreErrors) {
+ return true;
+ }
+
+ if (Status.login == LOGIN_FAILED_LOGIN_REJECTED) {
+ // An explicit LOGIN_REJECTED state is always reported (bug 1081158)
+ this._log.trace("shouldReportError: true (login was rejected)");
+ return true;
+ }
+
+ let lastSync = Svc.Prefs.get("lastSync");
+ if (lastSync && ((Date.now() - Date.parse(lastSync)) >
+ Svc.Prefs.get("errorhandler.networkFailureReportTimeout") * 1000)) {
+ Status.sync = PROLONGED_SYNC_FAILURE;
+ if (this.didReportProlongedError) {
+ this._log.trace("shouldReportError: false (prolonged sync failure, but" +
+ " we've already reported it).");
+ return false;
+ }
+ this._log.trace("shouldReportError: true (first prolonged sync failure).");
+ this.didReportProlongedError = true;
+ return true;
+ }
+
+ // We got a 401 mid-sync. Wait for the next sync before actually handling
+ // an error. This assumes that we'll get a 401 again on a login fetch in
+ // order to report the error.
+ if (!this.service.clusterURL) {
+ this._log.trace("shouldReportError: false (no cluster URL; " +
+ "possible node reassignment).");
+ return false;
+ }
+
+
+ let result = ([Status.login, Status.sync].indexOf(SERVER_MAINTENANCE) == -1 &&
+ [Status.login, Status.sync].indexOf(LOGIN_FAILED_NETWORK_ERROR) == -1);
+ this._log.trace("shouldReportError: ${result} due to login=${login}, sync=${sync}",
+ {result, login: Status.login, sync: Status.sync});
+ return result;
+ },
+
+ get currentAlertMode() {
+ return Svc.Prefs.get("errorhandler.alert.mode");
+ },
+
+ set currentAlertMode(str) {
+ return Svc.Prefs.set("errorhandler.alert.mode", str);
+ },
+
+ get earliestNextAlert() {
+ return Svc.Prefs.get("errorhandler.alert.earliestNext", 0) * 1000;
+ },
+
+ set earliestNextAlert(msec) {
+ return Svc.Prefs.set("errorhandler.alert.earliestNext", msec / 1000);
+ },
+
+ clearServerAlerts: function () {
+ // If we have any outstanding alerts, apparently they're no longer relevant.
+ Svc.Prefs.resetBranch("errorhandler.alert");
+ },
+
+ /**
+ * X-Weave-Alert headers can include a JSON object:
+ *
+ * {
+ * "code": // One of "hard-eol", "soft-eol".
+ * "url": // For "Learn more" link.
+ * "message": // Logged in Sync logs.
+ * }
+ */
+ handleServerAlert: function (xwa) {
+ if (!xwa.code) {
+ this._log.warn("Got structured X-Weave-Alert, but no alert code.");
+ return;
+ }
+
+ switch (xwa.code) {
+ // Gently and occasionally notify the user that this service will be
+ // shutting down.
+ case "soft-eol":
+ // Fall through.
+
+ // Tell the user that this service has shut down, and drop our syncing
+ // frequency dramatically.
+ case "hard-eol":
+ // Note that both of these alerts should be subservient to future "sign
+ // in with your Firefox Account" storage alerts.
+ if ((this.currentAlertMode != xwa.code) ||
+ (this.earliestNextAlert < Date.now())) {
+ Utils.nextTick(function() {
+ Svc.Obs.notify("weave:eol", xwa);
+ }, this);
+ this._log.error("X-Weave-Alert: " + xwa.code + ": " + xwa.message);
+ this.earliestNextAlert = Date.now() + this.MINIMUM_ALERT_INTERVAL_MSEC;
+ this.currentAlertMode = xwa.code;
+ }
+ break;
+ default:
+ this._log.debug("Got unexpected X-Weave-Alert code: " + xwa.code);
+ }
+ },
+
+ /**
+ * Handle HTTP response results or exceptions and set the appropriate
+ * Status.* bits.
+ *
+ * This method also looks for "side-channel" warnings.
+ */
+ checkServerError: function (resp) {
+ switch (resp.status) {
+ case 200:
+ case 404:
+ case 513:
+ let xwa = resp.headers['x-weave-alert'];
+
+ // Only process machine-readable alerts.
+ if (!xwa || !xwa.startsWith("{")) {
+ this.clearServerAlerts();
+ return;
+ }
+
+ try {
+ xwa = JSON.parse(xwa);
+ } catch (ex) {
+ this._log.warn("Malformed X-Weave-Alert from server: " + xwa);
+ return;
+ }
+
+ this.handleServerAlert(xwa);
+ break;
+
+ case 400:
+ if (resp == RESPONSE_OVER_QUOTA) {
+ Status.sync = OVER_QUOTA;
+ }
+ break;
+
+ case 401:
+ this.service.logout();
+ this._log.info("Got 401 response; resetting clusterURL.");
+ this.service.clusterURL = null;
+
+ let delay = 0;
+ if (Svc.Prefs.get("lastSyncReassigned")) {
+ // We got a 401 in the middle of the previous sync, and we just got
+ // another. Login must have succeeded in order for us to get here, so
+ // the password should be correct.
+ // This is likely to be an intermittent server issue, so back off and
+ // give it time to recover.
+ this._log.warn("Last sync also failed for 401. Delaying next sync.");
+ delay = MINIMUM_BACKOFF_INTERVAL;
+ } else {
+ this._log.debug("New mid-sync 401 failure. Making a note.");
+ Svc.Prefs.set("lastSyncReassigned", true);
+ }
+ this._log.info("Attempting to schedule another sync.");
+ this.service.scheduler.scheduleNextSync(delay);
+ break;
+
+ case 500:
+ case 502:
+ case 503:
+ case 504:
+ Status.enforceBackoff = true;
+ if (resp.status == 503 && resp.headers["retry-after"]) {
+ let retryAfter = resp.headers["retry-after"];
+ this._log.debug("Got Retry-After: " + retryAfter);
+ if (this.service.isLoggedIn) {
+ Status.sync = SERVER_MAINTENANCE;
+ } else {
+ Status.login = SERVER_MAINTENANCE;
+ }
+ Svc.Obs.notify("weave:service:backoff:interval",
+ parseInt(retryAfter, 10));
+ }
+ break;
+ }
+
+ switch (resp.result) {
+ case Cr.NS_ERROR_UNKNOWN_HOST:
+ case Cr.NS_ERROR_CONNECTION_REFUSED:
+ case Cr.NS_ERROR_NET_TIMEOUT:
+ case Cr.NS_ERROR_NET_RESET:
+ case Cr.NS_ERROR_NET_INTERRUPT:
+ case Cr.NS_ERROR_PROXY_CONNECTION_REFUSED:
+ // The constant says it's about login, but in fact it just
+ // indicates general network error.
+ if (this.service.isLoggedIn) {
+ Status.sync = LOGIN_FAILED_NETWORK_ERROR;
+ } else {
+ Status.login = LOGIN_FAILED_NETWORK_ERROR;
+ }
+ break;
+ }
+ },
+};
diff --git a/services/sync/modules/record.js b/services/sync/modules/record.js
new file mode 100644
index 000000000..02f7f281a
--- /dev/null
+++ b/services/sync/modules/record.js
@@ -0,0 +1,1039 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+this.EXPORTED_SYMBOLS = [
+ "WBORecord",
+ "RecordManager",
+ "CryptoWrapper",
+ "CollectionKeyManager",
+ "Collection",
+];
+
+var Cc = Components.classes;
+var Ci = Components.interfaces;
+var Cr = Components.results;
+var Cu = Components.utils;
+
+const CRYPTO_COLLECTION = "crypto";
+const KEYS_WBO = "keys";
+
+Cu.import("resource://gre/modules/Log.jsm");
+Cu.import("resource://services-sync/constants.js");
+Cu.import("resource://services-sync/keys.js");
+Cu.import("resource://services-sync/resource.js");
+Cu.import("resource://services-sync/util.js");
+Cu.import("resource://services-common/async.js");
+
+this.WBORecord = function WBORecord(collection, id) {
+ this.data = {};
+ this.payload = {};
+ this.collection = collection; // Optional.
+ this.id = id; // Optional.
+}
+WBORecord.prototype = {
+ _logName: "Sync.Record.WBO",
+
+ get sortindex() {
+ if (this.data.sortindex)
+ return this.data.sortindex;
+ return 0;
+ },
+
+ // Get thyself from your URI, then deserialize.
+ // Set thine 'response' field.
+ fetch: function fetch(resource) {
+ if (!resource instanceof Resource) {
+ throw new Error("First argument must be a Resource instance.");
+ }
+
+ let r = resource.get();
+ if (r.success) {
+ this.deserialize(r); // Warning! Muffles exceptions!
+ }
+ this.response = r;
+ return this;
+ },
+
+ upload: function upload(resource) {
+ if (!resource instanceof Resource) {
+ throw new Error("First argument must be a Resource instance.");
+ }
+
+ return resource.put(this);
+ },
+
+ // Take a base URI string, with trailing slash, and return the URI of this
+ // WBO based on collection and ID.
+ uri: function(base) {
+ if (this.collection && this.id) {
+ let url = Utils.makeURI(base + this.collection + "/" + this.id);
+ url.QueryInterface(Ci.nsIURL);
+ return url;
+ }
+ return null;
+ },
+
+ deserialize: function deserialize(json) {
+ this.data = json.constructor.toString() == String ? JSON.parse(json) : json;
+
+ try {
+ // The payload is likely to be JSON, but if not, keep it as a string
+ this.payload = JSON.parse(this.payload);
+ } catch(ex) {}
+ },
+
+ toJSON: function toJSON() {
+ // Copy fields from data to be stringified, making sure payload is a string
+ let obj = {};
+ for (let [key, val] of Object.entries(this.data))
+ obj[key] = key == "payload" ? JSON.stringify(val) : val;
+ if (this.ttl)
+ obj.ttl = this.ttl;
+ return obj;
+ },
+
+ toString: function toString() {
+ return "{ " +
+ "id: " + this.id + " " +
+ "index: " + this.sortindex + " " +
+ "modified: " + this.modified + " " +
+ "ttl: " + this.ttl + " " +
+ "payload: " + JSON.stringify(this.payload) +
+ " }";
+ }
+};
+
+Utils.deferGetSet(WBORecord, "data", ["id", "modified", "sortindex", "payload"]);
+
+this.CryptoWrapper = function CryptoWrapper(collection, id) {
+ this.cleartext = {};
+ WBORecord.call(this, collection, id);
+ this.ciphertext = null;
+ this.id = id;
+}
+CryptoWrapper.prototype = {
+ __proto__: WBORecord.prototype,
+ _logName: "Sync.Record.CryptoWrapper",
+
+ ciphertextHMAC: function ciphertextHMAC(keyBundle) {
+ let hasher = keyBundle.sha256HMACHasher;
+ if (!hasher) {
+ throw "Cannot compute HMAC without an HMAC key.";
+ }
+
+ return Utils.bytesAsHex(Utils.digestUTF8(this.ciphertext, hasher));
+ },
+
+ /*
+ * Don't directly use the sync key. Instead, grab a key for this
+ * collection, which is decrypted with the sync key.
+ *
+ * Cache those keys; invalidate the cache if the time on the keys collection
+ * changes, or other auth events occur.
+ *
+ * Optional key bundle overrides the collection key lookup.
+ */
+ encrypt: function encrypt(keyBundle) {
+ if (!keyBundle) {
+ throw new Error("A key bundle must be supplied to encrypt.");
+ }
+
+ this.IV = Svc.Crypto.generateRandomIV();
+ this.ciphertext = Svc.Crypto.encrypt(JSON.stringify(this.cleartext),
+ keyBundle.encryptionKeyB64, this.IV);
+ this.hmac = this.ciphertextHMAC(keyBundle);
+ this.cleartext = null;
+ },
+
+ // Optional key bundle.
+ decrypt: function decrypt(keyBundle) {
+ if (!this.ciphertext) {
+ throw "No ciphertext: nothing to decrypt?";
+ }
+
+ if (!keyBundle) {
+ throw new Error("A key bundle must be supplied to decrypt.");
+ }
+
+ // Authenticate the encrypted blob with the expected HMAC
+ let computedHMAC = this.ciphertextHMAC(keyBundle);
+
+ if (computedHMAC != this.hmac) {
+ Utils.throwHMACMismatch(this.hmac, computedHMAC);
+ }
+
+ // Handle invalid data here. Elsewhere we assume that cleartext is an object.
+ let cleartext = Svc.Crypto.decrypt(this.ciphertext,
+ keyBundle.encryptionKeyB64, this.IV);
+ let json_result = JSON.parse(cleartext);
+
+ if (json_result && (json_result instanceof Object)) {
+ this.cleartext = json_result;
+ this.ciphertext = null;
+ } else {
+ throw "Decryption failed: result is <" + json_result + ">, not an object.";
+ }
+
+ // Verify that the encrypted id matches the requested record's id.
+ if (this.cleartext.id != this.id)
+ throw "Record id mismatch: " + this.cleartext.id + " != " + this.id;
+
+ return this.cleartext;
+ },
+
+ toString: function toString() {
+ let payload = this.deleted ? "DELETED" : JSON.stringify(this.cleartext);
+
+ return "{ " +
+ "id: " + this.id + " " +
+ "index: " + this.sortindex + " " +
+ "modified: " + this.modified + " " +
+ "ttl: " + this.ttl + " " +
+ "payload: " + payload + " " +
+ "collection: " + (this.collection || "undefined") +
+ " }";
+ },
+
+ // The custom setter below masks the parent's getter, so explicitly call it :(
+ get id() {
+ return WBORecord.prototype.__lookupGetter__("id").call(this);
+ },
+
+ // Keep both plaintext and encrypted versions of the id to verify integrity
+ set id(val) {
+ WBORecord.prototype.__lookupSetter__("id").call(this, val);
+ return this.cleartext.id = val;
+ },
+};
+
+Utils.deferGetSet(CryptoWrapper, "payload", ["ciphertext", "IV", "hmac"]);
+Utils.deferGetSet(CryptoWrapper, "cleartext", "deleted");
+
+/**
+ * An interface and caching layer for records.
+ */
+this.RecordManager = function RecordManager(service) {
+ this.service = service;
+
+ this._log = Log.repository.getLogger(this._logName);
+ this._records = {};
+}
+RecordManager.prototype = {
+ _recordType: CryptoWrapper,
+ _logName: "Sync.RecordManager",
+
+ import: function RecordMgr_import(url) {
+ this._log.trace("Importing record: " + (url.spec ? url.spec : url));
+ try {
+ // Clear out the last response with empty object if GET fails
+ this.response = {};
+ this.response = this.service.resource(url).get();
+
+ // Don't parse and save the record on failure
+ if (!this.response.success)
+ return null;
+
+ let record = new this._recordType(url);
+ record.deserialize(this.response);
+
+ return this.set(url, record);
+ } catch (ex) {
+ if (Async.isShutdownException(ex)) {
+ throw ex;
+ }
+ this._log.debug("Failed to import record", ex);
+ return null;
+ }
+ },
+
+ get: function RecordMgr_get(url) {
+ // Use a url string as the key to the hash
+ let spec = url.spec ? url.spec : url;
+ if (spec in this._records)
+ return this._records[spec];
+ return this.import(url);
+ },
+
+ set: function RecordMgr_set(url, record) {
+ let spec = url.spec ? url.spec : url;
+ return this._records[spec] = record;
+ },
+
+ contains: function RecordMgr_contains(url) {
+ if ((url.spec || url) in this._records)
+ return true;
+ return false;
+ },
+
+ clearCache: function recordMgr_clearCache() {
+ this._records = {};
+ },
+
+ del: function RecordMgr_del(url) {
+ delete this._records[url];
+ }
+};
+
+/**
+ * Keeps track of mappings between collection names ('tabs') and KeyBundles.
+ *
+ * You can update this thing simply by giving it /info/collections. It'll
+ * use the last modified time to bring itself up to date.
+ */
+this.CollectionKeyManager = function CollectionKeyManager(lastModified, default_, collections) {
+ this.lastModified = lastModified || 0;
+ this._default = default_ || null;
+ this._collections = collections || {};
+
+ this._log = Log.repository.getLogger("Sync.CollectionKeyManager");
+}
+
+// TODO: persist this locally as an Identity. Bug 610913.
+// Note that the last modified time needs to be preserved.
+CollectionKeyManager.prototype = {
+
+ /**
+ * Generate a new CollectionKeyManager that has the same attributes
+ * as this one.
+ */
+ clone() {
+ const newCollections = {};
+ for (let c in this._collections) {
+ newCollections[c] = this._collections[c];
+ }
+
+ return new CollectionKeyManager(this.lastModified, this._default, newCollections);
+ },
+
+ // Return information about old vs new keys:
+ // * same: true if two collections are equal
+ // * changed: an array of collection names that changed.
+ _compareKeyBundleCollections: function _compareKeyBundleCollections(m1, m2) {
+ let changed = [];
+
+ function process(m1, m2) {
+ for (let k1 in m1) {
+ let v1 = m1[k1];
+ let v2 = m2[k1];
+ if (!(v1 && v2 && v1.equals(v2)))
+ changed.push(k1);
+ }
+ }
+
+ // Diffs both ways.
+ process(m1, m2);
+ process(m2, m1);
+
+ // Return a sorted, unique array.
+ changed.sort();
+ let last;
+ changed = changed.filter(x => (x != last) && (last = x));
+ return {same: changed.length == 0,
+ changed: changed};
+ },
+
+ get isClear() {
+ return !this._default;
+ },
+
+ clear: function clear() {
+ this._log.info("Clearing collection keys...");
+ this.lastModified = 0;
+ this._collections = {};
+ this._default = null;
+ },
+
+ keyForCollection: function(collection) {
+ if (collection && this._collections[collection])
+ return this._collections[collection];
+
+ return this._default;
+ },
+
+ /**
+ * If `collections` (an array of strings) is provided, iterate
+ * over it and generate random keys for each collection.
+ * Create a WBO for the given data.
+ */
+ _makeWBO: function(collections, defaultBundle) {
+ let wbo = new CryptoWrapper(CRYPTO_COLLECTION, KEYS_WBO);
+ let c = {};
+ for (let k in collections) {
+ c[k] = collections[k].keyPairB64;
+ }
+ wbo.cleartext = {
+ "default": defaultBundle ? defaultBundle.keyPairB64 : null,
+ "collections": c,
+ "collection": CRYPTO_COLLECTION,
+ "id": KEYS_WBO
+ };
+ return wbo;
+ },
+
+ /**
+ * Create a WBO for the current keys.
+ */
+ asWBO: function(collection, id) {
+ return this._makeWBO(this._collections, this._default);
+ },
+
+ /**
+ * Compute a new default key, and new keys for any specified collections.
+ */
+ newKeys: function(collections) {
+ let newDefaultKeyBundle = this.newDefaultKeyBundle();
+
+ let newColls = {};
+ if (collections) {
+ collections.forEach(function (c) {
+ let b = new BulkKeyBundle(c);
+ b.generateRandom();
+ newColls[c] = b;
+ });
+ }
+ return [newDefaultKeyBundle, newColls];
+ },
+
+ /**
+ * Generates new keys, but does not replace our local copy. Use this to
+ * verify an upload before storing.
+ */
+ generateNewKeysWBO: function(collections) {
+ let newDefaultKey, newColls;
+ [newDefaultKey, newColls] = this.newKeys(collections);
+
+ return this._makeWBO(newColls, newDefaultKey);
+ },
+
+ /**
+ * Create a new default key.
+ *
+ * @returns {BulkKeyBundle}
+ */
+ newDefaultKeyBundle() {
+ const key = new BulkKeyBundle(DEFAULT_KEYBUNDLE_NAME);
+ key.generateRandom();
+ return key;
+ },
+
+ /**
+ * Create a new default key and store it as this._default, since without one you cannot use setContents.
+ */
+ generateDefaultKey() {
+ this._default = this.newDefaultKeyBundle();
+ },
+
+ /**
+ * Return true if keys are already present for each of the given
+ * collections.
+ */
+ hasKeysFor(collections) {
+ // We can't use filter() here because sometimes collections is an iterator.
+ for (let collection of collections) {
+ if (!this._collections[collection]) {
+ return false;
+ }
+ }
+ return true;
+ },
+
+ /**
+ * Return a new CollectionKeyManager that has keys for each of the
+ * given collections (creating new ones for collections where we
+ * don't already have keys).
+ */
+ ensureKeysFor(collections) {
+ const newKeys = Object.assign({}, this._collections);
+ for (let c of collections) {
+ if (newKeys[c]) {
+ continue; // don't replace existing keys
+ }
+
+ const b = new BulkKeyBundle(c);
+ b.generateRandom();
+ newKeys[c] = b;
+ }
+ return new CollectionKeyManager(this.lastModified, this._default, newKeys);
+ },
+
+ // Take the fetched info/collections WBO, checking the change
+ // time of the crypto collection.
+ updateNeeded: function(info_collections) {
+
+ this._log.info("Testing for updateNeeded. Last modified: " + this.lastModified);
+
+ // No local record of modification time? Need an update.
+ if (!this.lastModified)
+ return true;
+
+ // No keys on the server? We need an update, though our
+ // update handling will be a little more drastic...
+ if (!(CRYPTO_COLLECTION in info_collections))
+ return true;
+
+ // Otherwise, we need an update if our modification time is stale.
+ return (info_collections[CRYPTO_COLLECTION] > this.lastModified);
+ },
+
+ //
+ // Set our keys and modified time to the values fetched from the server.
+ // Returns one of three values:
+ //
+ // * If the default key was modified, return true.
+ // * If the default key was not modified, but per-collection keys were,
+ // return an array of such.
+ // * Otherwise, return false -- we were up-to-date.
+ //
+ setContents: function setContents(payload, modified) {
+
+ let self = this;
+
+ this._log.info("Setting collection keys contents. Our last modified: " +
+ this.lastModified + ", input modified: " + modified + ".");
+
+ if (!payload)
+ throw "No payload in CollectionKeyManager.setContents().";
+
+ if (!payload.default) {
+ this._log.warn("No downloaded default key: this should not occur.");
+ this._log.warn("Not clearing local keys.");
+ throw "No default key in CollectionKeyManager.setContents(). Cannot proceed.";
+ }
+
+ // Process the incoming default key.
+ let b = new BulkKeyBundle(DEFAULT_KEYBUNDLE_NAME);
+ b.keyPairB64 = payload.default;
+ let newDefault = b;
+
+ // Process the incoming collections.
+ let newCollections = {};
+ if ("collections" in payload) {
+ this._log.info("Processing downloaded per-collection keys.");
+ let colls = payload.collections;
+ for (let k in colls) {
+ let v = colls[k];
+ if (v) {
+ let keyObj = new BulkKeyBundle(k);
+ keyObj.keyPairB64 = v;
+ newCollections[k] = keyObj;
+ }
+ }
+ }
+
+ // Check to see if these are already our keys.
+ let sameDefault = (this._default && this._default.equals(newDefault));
+ let collComparison = this._compareKeyBundleCollections(newCollections, this._collections);
+ let sameColls = collComparison.same;
+
+ if (sameDefault && sameColls) {
+ self._log.info("New keys are the same as our old keys!");
+ if (modified) {
+ self._log.info("Bumped local modified time.");
+ self.lastModified = modified;
+ }
+ return false;
+ }
+
+ // Make sure things are nice and tidy before we set.
+ this.clear();
+
+ this._log.info("Saving downloaded keys.");
+ this._default = newDefault;
+ this._collections = newCollections;
+
+ // Always trust the server.
+ if (modified) {
+ self._log.info("Bumping last modified to " + modified);
+ self.lastModified = modified;
+ }
+
+ return sameDefault ? collComparison.changed : true;
+ },
+
+ updateContents: function updateContents(syncKeyBundle, storage_keys) {
+ let log = this._log;
+ log.info("Updating collection keys...");
+
+ // storage_keys is a WBO, fetched from storage/crypto/keys.
+ // Its payload is the default key, and a map of collections to keys.
+ // We lazily compute the key objects from the strings we're given.
+
+ let payload;
+ try {
+ payload = storage_keys.decrypt(syncKeyBundle);
+ } catch (ex) {
+ log.warn("Got exception \"" + ex + "\" decrypting storage keys with sync key.");
+ log.info("Aborting updateContents. Rethrowing.");
+ throw ex;
+ }
+
+ let r = this.setContents(payload, storage_keys.modified);
+ log.info("Collection keys updated.");
+ return r;
+ }
+}
+
+this.Collection = function Collection(uri, recordObj, service) {
+ if (!service) {
+ throw new Error("Collection constructor requires a service.");
+ }
+
+ Resource.call(this, uri);
+
+ // This is a bit hacky, but gets the job done.
+ let res = service.resource(uri);
+ this.authenticator = res.authenticator;
+
+ this._recordObj = recordObj;
+ this._service = service;
+
+ this._full = false;
+ this._ids = null;
+ this._limit = 0;
+ this._older = 0;
+ this._newer = 0;
+ this._data = [];
+ // optional members used by batch upload operations.
+ this._batch = null;
+ this._commit = false;
+ // Used for batch download operations -- note that this is explicitly an
+ // opaque value and not (necessarily) a number.
+ this._offset = null;
+}
+Collection.prototype = {
+ __proto__: Resource.prototype,
+ _logName: "Sync.Collection",
+
+ _rebuildURL: function Coll__rebuildURL() {
+ // XXX should consider what happens if it's not a URL...
+ this.uri.QueryInterface(Ci.nsIURL);
+
+ let args = [];
+ if (this.older)
+ args.push('older=' + this.older);
+ else if (this.newer) {
+ args.push('newer=' + this.newer);
+ }
+ if (this.full)
+ args.push('full=1');
+ if (this.sort)
+ args.push('sort=' + this.sort);
+ if (this.ids != null)
+ args.push("ids=" + this.ids);
+ if (this.limit > 0 && this.limit != Infinity)
+ args.push("limit=" + this.limit);
+ if (this._batch)
+ args.push("batch=" + encodeURIComponent(this._batch));
+ if (this._commit)
+ args.push("commit=true");
+ if (this._offset)
+ args.push("offset=" + encodeURIComponent(this._offset));
+
+ this.uri.query = (args.length > 0)? '?' + args.join('&') : '';
+ },
+
+ // get full items
+ get full() { return this._full; },
+ set full(value) {
+ this._full = value;
+ this._rebuildURL();
+ },
+
+ // Apply the action to a certain set of ids
+ get ids() { return this._ids; },
+ set ids(value) {
+ this._ids = value;
+ this._rebuildURL();
+ },
+
+ // Limit how many records to get
+ get limit() { return this._limit; },
+ set limit(value) {
+ this._limit = value;
+ this._rebuildURL();
+ },
+
+ // get only items modified before some date
+ get older() { return this._older; },
+ set older(value) {
+ this._older = value;
+ this._rebuildURL();
+ },
+
+ // get only items modified since some date
+ get newer() { return this._newer; },
+ set newer(value) {
+ this._newer = value;
+ this._rebuildURL();
+ },
+
+ // get items sorted by some criteria. valid values:
+ // oldest (oldest first)
+ // newest (newest first)
+ // index
+ get sort() { return this._sort; },
+ set sort(value) {
+ this._sort = value;
+ this._rebuildURL();
+ },
+
+ get offset() { return this._offset; },
+ set offset(value) {
+ this._offset = value;
+ this._rebuildURL();
+ },
+
+ // Set information about the batch for this request.
+ get batch() { return this._batch; },
+ set batch(value) {
+ this._batch = value;
+ this._rebuildURL();
+ },
+
+ get commit() { return this._commit; },
+ set commit(value) {
+ this._commit = value && true;
+ this._rebuildURL();
+ },
+
+ // Similar to get(), but will page through the items `batchSize` at a time,
+ // deferring calling the record handler until we've gotten them all.
+ //
+ // Returns the last response processed, and doesn't run the record handler
+ // on any items if a non-success status is received while downloading the
+ // records (or if a network error occurs).
+ getBatched(batchSize = DEFAULT_DOWNLOAD_BATCH_SIZE) {
+ let totalLimit = Number(this.limit) || Infinity;
+ if (batchSize <= 0 || batchSize >= totalLimit) {
+ // Invalid batch sizes should arguably be an error, but they're easy to handle
+ return this.get();
+ }
+
+ if (!this.full) {
+ throw new Error("getBatched is unimplemented for guid-only GETs");
+ }
+
+ // _onComplete and _onProgress are reset after each `get` by AsyncResource.
+ // We overwrite _onRecord to something that stores the data in an array
+ // until the end.
+ let { _onComplete, _onProgress, _onRecord } = this;
+ let recordBuffer = [];
+ let resp;
+ try {
+ this._onRecord = r => recordBuffer.push(r);
+ let lastModifiedTime;
+ this.limit = batchSize;
+
+ do {
+ this._onProgress = _onProgress;
+ this._onComplete = _onComplete;
+ if (batchSize + recordBuffer.length > totalLimit) {
+ this.limit = totalLimit - recordBuffer.length;
+ }
+ this._log.trace("Performing batched GET", { limit: this.limit, offset: this.offset });
+ // Actually perform the request
+ resp = this.get();
+ if (!resp.success) {
+ break;
+ }
+
+ // Initialize last modified, or check that something broken isn't happening.
+ let lastModified = resp.headers["x-last-modified"];
+ if (!lastModifiedTime) {
+ lastModifiedTime = lastModified;
+ this.setHeader("X-If-Unmodified-Since", lastModified);
+ } else if (lastModified != lastModifiedTime) {
+ // Should be impossible -- We'd get a 412 in this case.
+ throw new Error("X-Last-Modified changed in the middle of a download batch! " +
+ `${lastModified} => ${lastModifiedTime}`)
+ }
+
+ // If this is missing, we're finished.
+ this.offset = resp.headers["x-weave-next-offset"];
+ } while (this.offset && totalLimit > recordBuffer.length);
+ } finally {
+ // Ensure we undo any temporary state so that subsequent calls to get()
+ // or getBatched() work properly. We do this before calling the record
+ // handler so that we can more convincingly pretend to be a normal get()
+ // call. Note: we're resetting these to the values they had before this
+ // function was called.
+ this._onRecord = _onRecord;
+ this._limit = totalLimit;
+ this._offset = null;
+ delete this._headers["x-if-unmodified-since"];
+ this._rebuildURL();
+ }
+ if (resp.success && Async.checkAppReady()) {
+ // call the original _onRecord (e.g. the user supplied record handler)
+ // for each record we've stored
+ for (let record of recordBuffer) {
+ this._onRecord(record);
+ }
+ }
+ return resp;
+ },
+
+ set recordHandler(onRecord) {
+ // Save this because onProgress is called with this as the ChannelListener
+ let coll = this;
+
+ // Switch to newline separated records for incremental parsing
+ coll.setHeader("Accept", "application/newlines");
+
+ this._onRecord = onRecord;
+
+ this._onProgress = function() {
+ let newline;
+ while ((newline = this._data.indexOf("\n")) > 0) {
+ // Split the json record from the rest of the data
+ let json = this._data.slice(0, newline);
+ this._data = this._data.slice(newline + 1);
+
+ // Deserialize a record from json and give it to the callback
+ let record = new coll._recordObj();
+ record.deserialize(json);
+ coll._onRecord(record);
+ }
+ };
+ },
+
+ // This object only supports posting via the postQueue object.
+ post() {
+ throw new Error("Don't directly post to a collection - use newPostQueue instead");
+ },
+
+ newPostQueue(log, timestamp, postCallback) {
+ let poster = (data, headers, batch, commit) => {
+ this.batch = batch;
+ this.commit = commit;
+ for (let [header, value] of headers) {
+ this.setHeader(header, value);
+ }
+ return Resource.prototype.post.call(this, data);
+ }
+ let getConfig = (name, defaultVal) => {
+ if (this._service.serverConfiguration && this._service.serverConfiguration.hasOwnProperty(name)) {
+ return this._service.serverConfiguration[name];
+ }
+ return defaultVal;
+ }
+
+ let config = {
+ max_post_bytes: getConfig("max_post_bytes", MAX_UPLOAD_BYTES),
+ max_post_records: getConfig("max_post_records", MAX_UPLOAD_RECORDS),
+
+ max_batch_bytes: getConfig("max_total_bytes", Infinity),
+ max_batch_records: getConfig("max_total_records", Infinity),
+ }
+
+ // Handle config edge cases
+ if (config.max_post_records <= 0) { config.max_post_records = MAX_UPLOAD_RECORDS; }
+ if (config.max_batch_records <= 0) { config.max_batch_records = Infinity; }
+ if (config.max_post_bytes <= 0) { config.max_post_bytes = MAX_UPLOAD_BYTES; }
+ if (config.max_batch_bytes <= 0) { config.max_batch_bytes = Infinity; }
+
+ // Max size of BSO payload is 256k. This assumes at most 4k of overhead,
+ // which sounds like plenty. If the server says it can't handle this, we
+ // might have valid records we can't sync, so we give up on syncing.
+ let requiredMax = 260 * 1024;
+ if (config.max_post_bytes < requiredMax) {
+ this._log.error("Server configuration max_post_bytes is too low", config);
+ throw new Error("Server configuration max_post_bytes is too low");
+ }
+
+ return new PostQueue(poster, timestamp, config, log, postCallback);
+ },
+};
+
+/* A helper to manage the posting of records while respecting the various
+ size limits.
+
+ This supports the concept of a server-side "batch". The general idea is:
+ * We queue as many records as allowed in memory, then make a single POST.
+ * This first POST (optionally) gives us a batch ID, which we use for
+ all subsequent posts, until...
+ * At some point we hit a batch-maximum, and jump through a few hoops to
+ commit the current batch (ie, all previous POSTs) and start a new one.
+ * Eventually commit the final batch.
+
+ In most cases we expect there to be exactly 1 batch consisting of possibly
+ multiple POSTs.
+*/
+function PostQueue(poster, timestamp, config, log, postCallback) {
+ // The "post" function we should use when it comes time to do the post.
+ this.poster = poster;
+ this.log = log;
+
+ // The config we use. We expect it to have fields "max_post_records",
+ // "max_batch_records", "max_post_bytes", and "max_batch_bytes"
+ this.config = config;
+
+ // The callback we make with the response when we do get around to making the
+ // post (which could be during any of the enqueue() calls or the final flush())
+ // This callback may be called multiple times and must not add new items to
+ // the queue.
+ // The second argument passed to this callback is a boolean value that is true
+ // if we're in the middle of a batch, and false if either the batch is
+ // complete, or it's a post to a server that does not understand batching.
+ this.postCallback = postCallback;
+
+ // The string where we are capturing the stringified version of the records
+ // queued so far. It will always be invalid JSON as it is always missing the
+ // closing bracket.
+ this.queued = "";
+
+ // The number of records we've queued so far but are yet to POST.
+ this.numQueued = 0;
+
+ // The number of records/bytes we've processed in previous POSTs for our
+ // current batch. Does *not* include records currently queued for the next POST.
+ this.numAlreadyBatched = 0;
+ this.bytesAlreadyBatched = 0;
+
+ // The ID of our current batch. Can be undefined (meaning we are yet to make
+ // the first post of a patch, so don't know if we have a batch), null (meaning
+ // we've made the first post but the server response indicated no batching
+ // semantics), otherwise we have made the first post and it holds the batch ID
+ // returned from the server.
+ this.batchID = undefined;
+
+ // Time used for X-If-Unmodified-Since -- should be the timestamp from the last GET.
+ this.lastModified = timestamp;
+}
+
+PostQueue.prototype = {
+ enqueue(record) {
+ // We want to ensure the record has a .toJSON() method defined - even
+ // though JSON.stringify() would implicitly call it, the stringify might
+ // still work even if it isn't defined, which isn't what we want.
+ let jsonRepr = record.toJSON();
+ if (!jsonRepr) {
+ throw new Error("You must only call this with objects that explicitly support JSON");
+ }
+ let bytes = JSON.stringify(jsonRepr);
+
+ // Do a flush if we can't add this record without exceeding our single-request
+ // limits, or without exceeding the total limit for a single batch.
+ let newLength = this.queued.length + bytes.length + 2; // extras for leading "[" / "," and trailing "]"
+
+ let maxAllowedBytes = Math.min(256 * 1024, this.config.max_post_bytes);
+
+ let postSizeExceeded = this.numQueued >= this.config.max_post_records ||
+ newLength >= maxAllowedBytes;
+
+ let batchSizeExceeded = (this.numQueued + this.numAlreadyBatched) >= this.config.max_batch_records ||
+ (newLength + this.bytesAlreadyBatched) >= this.config.max_batch_bytes;
+
+ let singleRecordTooBig = bytes.length + 2 > maxAllowedBytes;
+
+ if (postSizeExceeded || batchSizeExceeded) {
+ this.log.trace(`PostQueue flushing due to postSizeExceeded=${postSizeExceeded}, batchSizeExceeded=${batchSizeExceeded}` +
+ `, max_batch_bytes: ${this.config.max_batch_bytes}, max_post_bytes: ${this.config.max_post_bytes}`);
+
+ if (singleRecordTooBig) {
+ return { enqueued: false, error: new Error("Single record too large to submit to server") };
+ }
+
+ // We need to write the queue out before handling this one, but we only
+ // commit the batch (and thus start a new one) if the batch is full.
+ // Note that if a single record is too big for the batch or post, then
+ // the batch may be empty, and so we don't flush in that case.
+ if (this.numQueued) {
+ this.flush(batchSizeExceeded || singleRecordTooBig);
+ }
+ }
+ // Either a ',' or a '[' depending on whether this is the first record.
+ this.queued += this.numQueued ? "," : "[";
+ this.queued += bytes;
+ this.numQueued++;
+ return { enqueued: true };
+ },
+
+ flush(finalBatchPost) {
+ if (!this.queued) {
+ // nothing queued - we can't be in a batch, and something has gone very
+ // bad if we think we are.
+ if (this.batchID) {
+ throw new Error(`Flush called when no queued records but we are in a batch ${this.batchID}`);
+ }
+ return;
+ }
+ // the batch query-param and headers we'll send.
+ let batch;
+ let headers = [];
+ if (this.batchID === undefined) {
+ // First commit in a (possible) batch.
+ batch = "true";
+ } else if (this.batchID) {
+ // We have an existing batch.
+ batch = this.batchID;
+ } else {
+ // Not the first post and we know we have no batch semantics.
+ batch = null;
+ }
+
+ headers.push(["x-if-unmodified-since", this.lastModified]);
+
+ this.log.info(`Posting ${this.numQueued} records of ${this.queued.length+1} bytes with batch=${batch}`);
+ let queued = this.queued + "]";
+ if (finalBatchPost) {
+ this.bytesAlreadyBatched = 0;
+ this.numAlreadyBatched = 0;
+ } else {
+ this.bytesAlreadyBatched += queued.length;
+ this.numAlreadyBatched += this.numQueued;
+ }
+ this.queued = "";
+ this.numQueued = 0;
+ let response = this.poster(queued, headers, batch, !!(finalBatchPost && this.batchID !== null));
+
+ if (!response.success) {
+ this.log.trace("Server error response during a batch", response);
+ // not clear what we should do here - we expect the consumer of this to
+ // abort by throwing in the postCallback below.
+ return this.postCallback(response, !finalBatchPost);
+ }
+
+ if (finalBatchPost) {
+ this.log.trace("Committed batch", this.batchID);
+ this.batchID = undefined; // we are now in "first post for the batch" state.
+ this.lastModified = response.headers["x-last-modified"];
+ return this.postCallback(response, false);
+ }
+
+ if (response.status != 202) {
+ if (this.batchID) {
+ throw new Error("Server responded non-202 success code while a batch was in progress");
+ }
+ this.batchID = null; // no batch semantics are in place.
+ this.lastModified = response.headers["x-last-modified"];
+ return this.postCallback(response, false);
+ }
+
+ // this response is saying the server has batch semantics - we should
+ // always have a batch ID in the response.
+ let responseBatchID = response.obj.batch;
+ this.log.trace("Server responsed 202 with batch", responseBatchID);
+ if (!responseBatchID) {
+ this.log.error("Invalid server response: 202 without a batch ID", response);
+ throw new Error("Invalid server response: 202 without a batch ID");
+ }
+
+ if (this.batchID === undefined) {
+ this.batchID = responseBatchID;
+ if (!this.lastModified) {
+ this.lastModified = response.headers["x-last-modified"];
+ if (!this.lastModified) {
+ throw new Error("Batch response without x-last-modified");
+ }
+ }
+ }
+
+ if (this.batchID != responseBatchID) {
+ throw new Error(`Invalid client/server batch state - client has ${this.batchID}, server has ${responseBatchID}`);
+ }
+
+ this.postCallback(response, true);
+ },
+}
diff --git a/services/sync/modules/resource.js b/services/sync/modules/resource.js
new file mode 100644
index 000000000..bf7066b9f
--- /dev/null
+++ b/services/sync/modules/resource.js
@@ -0,0 +1,669 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+this.EXPORTED_SYMBOLS = [
+ "AsyncResource",
+ "Resource"
+];
+
+var Cc = Components.classes;
+var Ci = Components.interfaces;
+var Cr = Components.results;
+var Cu = Components.utils;
+
+Cu.import("resource://gre/modules/Preferences.jsm");
+Cu.import("resource://gre/modules/NetUtil.jsm");
+Cu.import("resource://services-common/async.js");
+Cu.import("resource://gre/modules/Log.jsm");
+Cu.import("resource://services-common/observers.js");
+Cu.import("resource://services-common/utils.js");
+Cu.import("resource://services-sync/constants.js");
+Cu.import("resource://services-sync/util.js");
+
+const DEFAULT_LOAD_FLAGS =
+ // Always validate the cache:
+ Ci.nsIRequest.LOAD_BYPASS_CACHE |
+ Ci.nsIRequest.INHIBIT_CACHING |
+ // Don't send user cookies over the wire (Bug 644734).
+ Ci.nsIRequest.LOAD_ANONYMOUS;
+
+/*
+ * AsyncResource represents a remote network resource, identified by a URI.
+ * Create an instance like so:
+ *
+ * let resource = new AsyncResource("http://foobar.com/path/to/resource");
+ *
+ * The 'resource' object has the following methods to issue HTTP requests
+ * of the corresponding HTTP methods:
+ *
+ * get(callback)
+ * put(data, callback)
+ * post(data, callback)
+ * delete(callback)
+ *
+ * 'callback' is a function with the following signature:
+ *
+ * function callback(error, result) {...}
+ *
+ * 'error' will be null on successful requests. Likewise, result will not be
+ * passed (=undefined) when an error occurs. Note that this is independent of
+ * the status of the HTTP response.
+ */
+this.AsyncResource = function AsyncResource(uri) {
+ this._log = Log.repository.getLogger(this._logName);
+ this._log.level =
+ Log.Level[Svc.Prefs.get("log.logger.network.resources")];
+ this.uri = uri;
+ this._headers = {};
+ this._onComplete = Utils.bind2(this, this._onComplete);
+}
+AsyncResource.prototype = {
+ _logName: "Sync.AsyncResource",
+
+ // ** {{{ AsyncResource.serverTime }}} **
+ //
+ // Caches the latest server timestamp (X-Weave-Timestamp header).
+ serverTime: null,
+
+ /**
+ * Callback to be invoked at request time to add authentication details.
+ *
+ * By default, a global authenticator is provided. If this is set, it will
+ * be used instead of the global one.
+ */
+ authenticator: null,
+
+ // Wait 5 minutes before killing a request.
+ ABORT_TIMEOUT: 300000,
+
+ // ** {{{ AsyncResource.headers }}} **
+ //
+ // Headers to be included when making a request for the resource.
+ // Note: Header names should be all lower case, there's no explicit
+ // check for duplicates due to case!
+ get headers() {
+ return this._headers;
+ },
+ set headers(value) {
+ this._headers = value;
+ },
+ setHeader: function Res_setHeader(header, value) {
+ this._headers[header.toLowerCase()] = value;
+ },
+ get headerNames() {
+ return Object.keys(this.headers);
+ },
+
+ // ** {{{ AsyncResource.uri }}} **
+ //
+ // URI representing this resource.
+ get uri() {
+ return this._uri;
+ },
+ set uri(value) {
+ if (typeof value == 'string')
+ this._uri = CommonUtils.makeURI(value);
+ else
+ this._uri = value;
+ },
+
+ // ** {{{ AsyncResource.spec }}} **
+ //
+ // Get the string representation of the URI.
+ get spec() {
+ if (this._uri)
+ return this._uri.spec;
+ return null;
+ },
+
+ // ** {{{ AsyncResource.data }}} **
+ //
+ // Get and set the data encapulated in the resource.
+ _data: null,
+ get data() {
+ return this._data;
+ },
+ set data(value) {
+ this._data = value;
+ },
+
+ // ** {{{ AsyncResource._createRequest }}} **
+ //
+ // This method returns a new IO Channel for requests to be made
+ // through. It is never called directly, only {{{_doRequest}}} uses it
+ // to obtain a request channel.
+ //
+ _createRequest: function Res__createRequest(method) {
+ let channel = NetUtil.newChannel({uri: this.spec, loadUsingSystemPrincipal: true})
+ .QueryInterface(Ci.nsIRequest)
+ .QueryInterface(Ci.nsIHttpChannel);
+
+ channel.loadFlags |= DEFAULT_LOAD_FLAGS;
+
+ // Setup a callback to handle channel notifications.
+ let listener = new ChannelNotificationListener(this.headerNames);
+ channel.notificationCallbacks = listener;
+
+ // Compose a UA string fragment from the various available identifiers.
+ if (Svc.Prefs.get("sendVersionInfo", true)) {
+ channel.setRequestHeader("user-agent", Utils.userAgent, false);
+ }
+
+ let headers = this.headers;
+
+ if (this.authenticator) {
+ let result = this.authenticator(this, method);
+ if (result && result.headers) {
+ for (let [k, v] of Object.entries(result.headers)) {
+ headers[k.toLowerCase()] = v;
+ }
+ }
+ } else {
+ this._log.debug("No authenticator found.");
+ }
+
+ for (let [key, value] of Object.entries(headers)) {
+ if (key == 'authorization')
+ this._log.trace("HTTP Header " + key + ": ***** (suppressed)");
+ else
+ this._log.trace("HTTP Header " + key + ": " + headers[key]);
+ channel.setRequestHeader(key, headers[key], false);
+ }
+ return channel;
+ },
+
+ _onProgress: function Res__onProgress(channel) {},
+
+ _doRequest: function _doRequest(action, data, callback) {
+ this._log.trace("In _doRequest.");
+ this._callback = callback;
+ let channel = this._createRequest(action);
+
+ if ("undefined" != typeof(data))
+ this._data = data;
+
+ // PUT and POST are treated differently because they have payload data.
+ if ("PUT" == action || "POST" == action) {
+ // Convert non-string bodies into JSON
+ if (this._data.constructor.toString() != String)
+ this._data = JSON.stringify(this._data);
+
+ this._log.debug(action + " Length: " + this._data.length);
+ this._log.trace(action + " Body: " + this._data);
+
+ let type = ('content-type' in this._headers) ?
+ this._headers['content-type'] : 'text/plain';
+
+ let stream = Cc["@mozilla.org/io/string-input-stream;1"].
+ createInstance(Ci.nsIStringInputStream);
+ stream.setData(this._data, this._data.length);
+
+ channel.QueryInterface(Ci.nsIUploadChannel);
+ channel.setUploadStream(stream, type, this._data.length);
+ }
+
+ // Setup a channel listener so that the actual network operation
+ // is performed asynchronously.
+ let listener = new ChannelListener(this._onComplete, this._onProgress,
+ this._log, this.ABORT_TIMEOUT);
+ channel.requestMethod = action;
+ try {
+ channel.asyncOpen2(listener);
+ } catch (ex) {
+ // asyncOpen2 can throw in a bunch of cases -- e.g., a forbidden port.
+ this._log.warn("Caught an error in asyncOpen2", ex);
+ CommonUtils.nextTick(callback.bind(this, ex));
+ }
+ },
+
+ _onComplete: function _onComplete(error, data, channel) {
+ this._log.trace("In _onComplete. Error is " + error + ".");
+
+ if (error) {
+ this._callback(error);
+ return;
+ }
+
+ this._data = data;
+ let action = channel.requestMethod;
+
+ this._log.trace("Channel: " + channel);
+ this._log.trace("Action: " + action);
+
+ // Process status and success first. This way a problem with headers
+ // doesn't fail to include accurate status information.
+ let status = 0;
+ let success = false;
+
+ try {
+ status = channel.responseStatus;
+ success = channel.requestSucceeded; // HTTP status.
+
+ this._log.trace("Status: " + status);
+ this._log.trace("Success: " + success);
+
+ // Log the status of the request.
+ let mesg = [action, success ? "success" : "fail", status,
+ channel.URI.spec].join(" ");
+ this._log.debug("mesg: " + mesg);
+
+ if (mesg.length > 200)
+ mesg = mesg.substr(0, 200) + "…";
+ this._log.debug(mesg);
+
+ // Additionally give the full response body when Trace logging.
+ if (this._log.level <= Log.Level.Trace)
+ this._log.trace(action + " body: " + data);
+
+ } catch(ex) {
+ // Got a response, but an exception occurred during processing.
+ // This shouldn't occur.
+ this._log.warn("Caught unexpected exception in _oncomplete", ex);
+ }
+
+ // Process headers. They can be empty, or the call can otherwise fail, so
+ // put this in its own try block.
+ let headers = {};
+ try {
+ this._log.trace("Processing response headers.");
+
+ // Read out the response headers if available.
+ channel.visitResponseHeaders({
+ visitHeader: function visitHeader(header, value) {
+ headers[header.toLowerCase()] = value;
+ }
+ });
+
+ // This is a server-side safety valve to allow slowing down
+ // clients without hurting performance.
+ if (headers["x-weave-backoff"]) {
+ let backoff = headers["x-weave-backoff"];
+ this._log.debug("Got X-Weave-Backoff: " + backoff);
+ Observers.notify("weave:service:backoff:interval",
+ parseInt(backoff, 10));
+ }
+
+ if (success && headers["x-weave-quota-remaining"]) {
+ Observers.notify("weave:service:quota:remaining",
+ parseInt(headers["x-weave-quota-remaining"], 10));
+ }
+
+ let contentLength = headers["content-length"];
+ if (success && contentLength && data &&
+ contentLength != data.length) {
+ this._log.warn("The response body's length of: " + data.length +
+ " doesn't match the header's content-length of: " +
+ contentLength + ".");
+ }
+ } catch (ex) {
+ this._log.debug("Caught exception visiting headers in _onComplete", ex);
+ }
+
+ let ret = new String(data);
+ ret.url = channel.URI.spec;
+ ret.status = status;
+ ret.success = success;
+ ret.headers = headers;
+
+ if (!success) {
+ this._log.warn(`${action} request to ${ret.url} failed with status ${status}`);
+ }
+ // Make a lazy getter to convert the json response into an object.
+ // Note that this can cause a parse error to be thrown far away from the
+ // actual fetch, so be warned!
+ XPCOMUtils.defineLazyGetter(ret, "obj", function() {
+ try {
+ return JSON.parse(ret);
+ } catch (ex) {
+ this._log.warn("Got exception parsing response body", ex);
+ // Stringify to avoid possibly printing non-printable characters.
+ this._log.debug("Parse fail: Response body starts: \"" +
+ JSON.stringify((ret + "").slice(0, 100)) +
+ "\".");
+ throw ex;
+ }
+ }.bind(this));
+
+ this._callback(null, ret);
+ },
+
+ get: function get(callback) {
+ this._doRequest("GET", undefined, callback);
+ },
+
+ put: function put(data, callback) {
+ if (typeof data == "function")
+ [data, callback] = [undefined, data];
+ this._doRequest("PUT", data, callback);
+ },
+
+ post: function post(data, callback) {
+ if (typeof data == "function")
+ [data, callback] = [undefined, data];
+ this._doRequest("POST", data, callback);
+ },
+
+ delete: function delete_(callback) {
+ this._doRequest("DELETE", undefined, callback);
+ }
+};
+
+
+/*
+ * Represent a remote network resource, identified by a URI, with a
+ * synchronous API.
+ *
+ * 'Resource' is not recommended for new code. Use the asynchronous API of
+ * 'AsyncResource' instead.
+ */
+this.Resource = function Resource(uri) {
+ AsyncResource.call(this, uri);
+}
+Resource.prototype = {
+
+ __proto__: AsyncResource.prototype,
+
+ _logName: "Sync.Resource",
+
+ // ** {{{ Resource._request }}} **
+ //
+ // Perform a particular HTTP request on the resource. This method
+ // is never called directly, but is used by the high-level
+ // {{{get}}}, {{{put}}}, {{{post}}} and {{delete}} methods.
+ _request: function Res__request(action, data) {
+ let cb = Async.makeSyncCallback();
+ function callback(error, ret) {
+ if (error)
+ cb.throw(error);
+ else
+ cb(ret);
+ }
+
+ // The channel listener might get a failure code
+ try {
+ this._doRequest(action, data, callback);
+ return Async.waitForSyncCallback(cb);
+ } catch (ex) {
+ if (Async.isShutdownException(ex)) {
+ throw ex;
+ }
+ this._log.warn("${action} request to ${url} failed: ${ex}",
+ { action, url: this.uri.spec, ex });
+ // Combine the channel stack with this request stack. Need to create
+ // a new error object for that.
+ let error = Error(ex.message);
+ error.result = ex.result;
+ let chanStack = [];
+ if (ex.stack)
+ chanStack = ex.stack.trim().split(/\n/).slice(1);
+ let requestStack = error.stack.split(/\n/).slice(1);
+
+ // Strip out the args for the last 2 frames because they're usually HUGE!
+ for (let i = 0; i <= 1; i++)
+ requestStack[i] = requestStack[i].replace(/\(".*"\)@/, "(...)@");
+
+ error.stack = chanStack.concat(requestStack).join("\n");
+ throw error;
+ }
+ },
+
+ // ** {{{ Resource.get }}} **
+ //
+ // Perform an asynchronous HTTP GET for this resource.
+ get: function Res_get() {
+ return this._request("GET");
+ },
+
+ // ** {{{ Resource.put }}} **
+ //
+ // Perform a HTTP PUT for this resource.
+ put: function Res_put(data) {
+ return this._request("PUT", data);
+ },
+
+ // ** {{{ Resource.post }}} **
+ //
+ // Perform a HTTP POST for this resource.
+ post: function Res_post(data) {
+ return this._request("POST", data);
+ },
+
+ // ** {{{ Resource.delete }}} **
+ //
+ // Perform a HTTP DELETE for this resource.
+ delete: function Res_delete() {
+ return this._request("DELETE");
+ }
+};
+
+// = ChannelListener =
+//
+// This object implements the {{{nsIStreamListener}}} interface
+// and is called as the network operation proceeds.
+function ChannelListener(onComplete, onProgress, logger, timeout) {
+ this._onComplete = onComplete;
+ this._onProgress = onProgress;
+ this._log = logger;
+ this._timeout = timeout;
+ this.delayAbort();
+}
+ChannelListener.prototype = {
+
+ onStartRequest: function Channel_onStartRequest(channel) {
+ this._log.trace("onStartRequest called for channel " + channel + ".");
+
+ try {
+ channel.QueryInterface(Ci.nsIHttpChannel);
+ } catch (ex) {
+ this._log.error("Unexpected error: channel is not a nsIHttpChannel!");
+ channel.cancel(Cr.NS_BINDING_ABORTED);
+ return;
+ }
+
+ // Save the latest server timestamp when possible.
+ try {
+ AsyncResource.serverTime = channel.getResponseHeader("X-Weave-Timestamp") - 0;
+ }
+ catch(ex) {}
+
+ this._log.trace("onStartRequest: " + channel.requestMethod + " " +
+ channel.URI.spec);
+ this._data = '';
+ this.delayAbort();
+ },
+
+ onStopRequest: function Channel_onStopRequest(channel, context, status) {
+ // Clear the abort timer now that the channel is done.
+ this.abortTimer.clear();
+
+ if (!this._onComplete) {
+ this._log.error("Unexpected error: _onComplete not defined in onStopRequest.");
+ this._onProgress = null;
+ return;
+ }
+
+ try {
+ channel.QueryInterface(Ci.nsIHttpChannel);
+ } catch (ex) {
+ this._log.error("Unexpected error: channel is not a nsIHttpChannel!");
+
+ this._onComplete(ex, this._data, channel);
+ this._onComplete = this._onProgress = null;
+ return;
+ }
+
+ let statusSuccess = Components.isSuccessCode(status);
+ let uri = channel && channel.URI && channel.URI.spec || "<unknown>";
+ this._log.trace("Channel for " + channel.requestMethod + " " + uri + ": " +
+ "isSuccessCode(" + status + ")? " + statusSuccess);
+
+ if (this._data == '') {
+ this._data = null;
+ }
+
+ // Pass back the failure code and stop execution. Use Components.Exception()
+ // instead of Error() so the exception is QI-able and can be passed across
+ // XPCOM borders while preserving the status code.
+ if (!statusSuccess) {
+ let message = Components.Exception("", status).name;
+ let error = Components.Exception(message, status);
+
+ this._onComplete(error, undefined, channel);
+ this._onComplete = this._onProgress = null;
+ return;
+ }
+
+ this._log.trace("Channel: flags = " + channel.loadFlags +
+ ", URI = " + uri +
+ ", HTTP success? " + channel.requestSucceeded);
+ this._onComplete(null, this._data, channel);
+ this._onComplete = this._onProgress = null;
+ },
+
+ onDataAvailable: function Channel_onDataAvail(req, cb, stream, off, count) {
+ let siStream;
+ try {
+ siStream = Cc["@mozilla.org/scriptableinputstream;1"].createInstance(Ci.nsIScriptableInputStream);
+ siStream.init(stream);
+ } catch (ex) {
+ this._log.warn("Exception creating nsIScriptableInputStream", ex);
+ this._log.debug("Parameters: " + req.URI.spec + ", " + stream + ", " + off + ", " + count);
+ // Cannot proceed, so rethrow and allow the channel to cancel itself.
+ throw ex;
+ }
+
+ try {
+ this._data += siStream.read(count);
+ } catch (ex) {
+ this._log.warn("Exception thrown reading " + count + " bytes from " + siStream + ".");
+ throw ex;
+ }
+
+ try {
+ this._onProgress();
+ } catch (ex) {
+ if (Async.isShutdownException(ex)) {
+ throw ex;
+ }
+ this._log.warn("Got exception calling onProgress handler during fetch of "
+ + req.URI.spec, ex);
+ this._log.trace("Rethrowing; expect a failure code from the HTTP channel.");
+ throw ex;
+ }
+
+ this.delayAbort();
+ },
+
+ /**
+ * Create or push back the abort timer that kills this request.
+ */
+ delayAbort: function delayAbort() {
+ try {
+ CommonUtils.namedTimer(this.abortRequest, this._timeout, this, "abortTimer");
+ } catch (ex) {
+ this._log.warn("Got exception extending abort timer", ex);
+ }
+ },
+
+ abortRequest: function abortRequest() {
+ // Ignore any callbacks if we happen to get any now
+ this.onStopRequest = function() {};
+ let error = Components.Exception("Aborting due to channel inactivity.",
+ Cr.NS_ERROR_NET_TIMEOUT);
+ if (!this._onComplete) {
+ this._log.error("Unexpected error: _onComplete not defined in " +
+ "abortRequest.");
+ return;
+ }
+ this._onComplete(error);
+ }
+};
+
+/**
+ * This class handles channel notification events.
+ *
+ * An instance of this class is bound to each created channel.
+ *
+ * Optionally pass an array of header names. Each header named
+ * in this array will be copied between the channels in the
+ * event of a redirect.
+ */
+function ChannelNotificationListener(headersToCopy) {
+ this._headersToCopy = headersToCopy;
+
+ this._log = Log.repository.getLogger(this._logName);
+ this._log.level = Log.Level[Svc.Prefs.get("log.logger.network.resources")];
+}
+ChannelNotificationListener.prototype = {
+ _logName: "Sync.Resource",
+
+ getInterface: function(aIID) {
+ return this.QueryInterface(aIID);
+ },
+
+ QueryInterface: function(aIID) {
+ if (aIID.equals(Ci.nsIBadCertListener2) ||
+ aIID.equals(Ci.nsIInterfaceRequestor) ||
+ aIID.equals(Ci.nsISupports) ||
+ aIID.equals(Ci.nsIChannelEventSink))
+ return this;
+
+ throw Cr.NS_ERROR_NO_INTERFACE;
+ },
+
+ notifyCertProblem: function certProblem(socketInfo, sslStatus, targetHost) {
+ let log = Log.repository.getLogger("Sync.CertListener");
+ log.warn("Invalid HTTPS certificate encountered!");
+
+ // This suppresses the UI warning only. The request is still cancelled.
+ return true;
+ },
+
+ asyncOnChannelRedirect:
+ function asyncOnChannelRedirect(oldChannel, newChannel, flags, callback) {
+
+ let oldSpec = (oldChannel && oldChannel.URI) ? oldChannel.URI.spec : "<undefined>";
+ let newSpec = (newChannel && newChannel.URI) ? newChannel.URI.spec : "<undefined>";
+ this._log.debug("Channel redirect: " + oldSpec + ", " + newSpec + ", " + flags);
+
+ this._log.debug("Ensuring load flags are set.");
+ newChannel.loadFlags |= DEFAULT_LOAD_FLAGS;
+
+ // For internal redirects, copy the headers that our caller set.
+ try {
+ if ((flags & Ci.nsIChannelEventSink.REDIRECT_INTERNAL) &&
+ newChannel.URI.equals(oldChannel.URI)) {
+ this._log.debug("Copying headers for safe internal redirect.");
+
+ // QI the channel so we can set headers on it.
+ try {
+ newChannel.QueryInterface(Ci.nsIHttpChannel);
+ } catch (ex) {
+ this._log.error("Unexpected error: channel is not a nsIHttpChannel!");
+ throw ex;
+ }
+
+ for (let header of this._headersToCopy) {
+ let value = oldChannel.getRequestHeader(header);
+ if (value) {
+ let printed = (header == "authorization") ? "****" : value;
+ this._log.debug("Header: " + header + " = " + printed);
+ newChannel.setRequestHeader(header, value, false);
+ } else {
+ this._log.warn("No value for header " + header);
+ }
+ }
+ }
+ } catch (ex) {
+ this._log.error("Error copying headers", ex);
+ }
+
+ // We let all redirects proceed.
+ try {
+ callback.onRedirectVerifyCallback(Cr.NS_OK);
+ } catch (ex) {
+ this._log.error("onRedirectVerifyCallback threw!", ex);
+ }
+ }
+};
diff --git a/services/sync/modules/rest.js b/services/sync/modules/rest.js
new file mode 100644
index 000000000..94c096dba
--- /dev/null
+++ b/services/sync/modules/rest.js
@@ -0,0 +1,90 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+var {classes: Cc, interfaces: Ci, utils: Cu, results: Cr} = Components;
+
+Cu.import("resource://gre/modules/Log.jsm");
+Cu.import("resource://services-common/rest.js");
+Cu.import("resource://services-sync/util.js");
+Cu.import("resource://services-sync/constants.js");
+
+this.EXPORTED_SYMBOLS = ["SyncStorageRequest"];
+
+const STORAGE_REQUEST_TIMEOUT = 5 * 60; // 5 minutes
+
+/**
+ * RESTRequest variant for use against a Sync storage server.
+ */
+this.SyncStorageRequest = function SyncStorageRequest(uri) {
+ RESTRequest.call(this, uri);
+
+ this.authenticator = null;
+}
+SyncStorageRequest.prototype = {
+
+ __proto__: RESTRequest.prototype,
+
+ _logName: "Sync.StorageRequest",
+
+ /**
+ * Wait 5 minutes before killing a request.
+ */
+ timeout: STORAGE_REQUEST_TIMEOUT,
+
+ dispatch: function dispatch(method, data, onComplete, onProgress) {
+ // Compose a UA string fragment from the various available identifiers.
+ if (Svc.Prefs.get("sendVersionInfo", true)) {
+ this.setHeader("user-agent", Utils.userAgent);
+ }
+
+ if (this.authenticator) {
+ this.authenticator(this);
+ } else {
+ this._log.debug("No authenticator found.");
+ }
+
+ return RESTRequest.prototype.dispatch.apply(this, arguments);
+ },
+
+ onStartRequest: function onStartRequest(channel) {
+ RESTRequest.prototype.onStartRequest.call(this, channel);
+ if (this.status == this.ABORTED) {
+ return;
+ }
+
+ let headers = this.response.headers;
+ // Save the latest server timestamp when possible.
+ if (headers["x-weave-timestamp"]) {
+ SyncStorageRequest.serverTime = parseFloat(headers["x-weave-timestamp"]);
+ }
+
+ // This is a server-side safety valve to allow slowing down
+ // clients without hurting performance.
+ if (headers["x-weave-backoff"]) {
+ Svc.Obs.notify("weave:service:backoff:interval",
+ parseInt(headers["x-weave-backoff"], 10));
+ }
+
+ if (this.response.success && headers["x-weave-quota-remaining"]) {
+ Svc.Obs.notify("weave:service:quota:remaining",
+ parseInt(headers["x-weave-quota-remaining"], 10));
+ }
+ },
+
+ onStopRequest: function onStopRequest(channel, context, statusCode) {
+ if (this.status != this.ABORTED) {
+ let resp = this.response;
+ let contentLength = resp.headers ? resp.headers["content-length"] : "";
+
+ if (resp.success && contentLength &&
+ contentLength != resp.body.length) {
+ this._log.warn("The response body's length of: " + resp.body.length +
+ " doesn't match the header's content-length of: " +
+ contentLength + ".");
+ }
+ }
+
+ RESTRequest.prototype.onStopRequest.apply(this, arguments);
+ }
+};
diff --git a/services/sync/modules/service.js b/services/sync/modules/service.js
new file mode 100644
index 000000000..5fc0fa7a7
--- /dev/null
+++ b/services/sync/modules/service.js
@@ -0,0 +1,1756 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+this.EXPORTED_SYMBOLS = ["Service"];
+
+var Cc = Components.classes;
+var Ci = Components.interfaces;
+var Cr = Components.results;
+var Cu = Components.utils;
+
+// How long before refreshing the cluster
+const CLUSTER_BACKOFF = 5 * 60 * 1000; // 5 minutes
+
+// How long a key to generate from an old passphrase.
+const PBKDF2_KEY_BYTES = 16;
+
+const CRYPTO_COLLECTION = "crypto";
+const KEYS_WBO = "keys";
+
+Cu.import("resource://gre/modules/Preferences.jsm");
+Cu.import("resource://gre/modules/XPCOMUtils.jsm");
+Cu.import("resource://gre/modules/Log.jsm");
+Cu.import("resource://services-sync/constants.js");
+Cu.import("resource://services-sync/engines.js");
+Cu.import("resource://services-sync/engines/clients.js");
+Cu.import("resource://services-sync/identity.js");
+Cu.import("resource://services-sync/policies.js");
+Cu.import("resource://services-sync/record.js");
+Cu.import("resource://services-sync/resource.js");
+Cu.import("resource://services-sync/rest.js");
+Cu.import("resource://services-sync/stages/enginesync.js");
+Cu.import("resource://services-sync/stages/declined.js");
+Cu.import("resource://services-sync/status.js");
+Cu.import("resource://services-sync/telemetry.js");
+Cu.import("resource://services-sync/userapi.js");
+Cu.import("resource://services-sync/util.js");
+
+const ENGINE_MODULES = {
+ Addons: "addons.js",
+ Bookmarks: "bookmarks.js",
+ Form: "forms.js",
+ History: "history.js",
+ Password: "passwords.js",
+ Prefs: "prefs.js",
+ Tab: "tabs.js",
+ ExtensionStorage: "extension-storage.js",
+};
+
+const STORAGE_INFO_TYPES = [INFO_COLLECTIONS,
+ INFO_COLLECTION_USAGE,
+ INFO_COLLECTION_COUNTS,
+ INFO_QUOTA];
+
+function Sync11Service() {
+ this._notify = Utils.notify("weave:service:");
+}
+Sync11Service.prototype = {
+
+ _lock: Utils.lock,
+ _locked: false,
+ _loggedIn: false,
+
+ infoURL: null,
+ storageURL: null,
+ metaURL: null,
+ cryptoKeyURL: null,
+ // The cluster URL comes via the ClusterManager object, which in the FxA
+ // world is ebbedded in the token returned from the token server.
+ _clusterURL: null,
+
+ get serverURL() {
+ return Svc.Prefs.get("serverURL");
+ },
+ set serverURL(value) {
+ if (!value.endsWith("/")) {
+ value += "/";
+ }
+
+ // Only do work if it's actually changing
+ if (value == this.serverURL)
+ return;
+
+ Svc.Prefs.set("serverURL", value);
+
+ // A new server most likely uses a different cluster, so clear that.
+ this._clusterURL = null;
+ },
+
+ get clusterURL() {
+ return this._clusterURL || "";
+ },
+ set clusterURL(value) {
+ if (value != null && typeof value != "string") {
+ throw new Error("cluster must be a string, got " + (typeof value));
+ }
+ this._clusterURL = value;
+ this._updateCachedURLs();
+ },
+
+ get miscAPI() {
+ // Append to the serverURL if it's a relative fragment
+ let misc = Svc.Prefs.get("miscURL");
+ if (misc.indexOf(":") == -1)
+ misc = this.serverURL + misc;
+ return misc + MISC_API_VERSION + "/";
+ },
+
+ /**
+ * The URI of the User API service.
+ *
+ * This is the base URI of the service as applicable to all users up to
+ * and including the server version path component, complete with trailing
+ * forward slash.
+ */
+ get userAPIURI() {
+ // Append to the serverURL if it's a relative fragment.
+ let url = Svc.Prefs.get("userURL");
+ if (!url.includes(":")) {
+ url = this.serverURL + url;
+ }
+
+ return url + USER_API_VERSION + "/";
+ },
+
+ get pwResetURL() {
+ return this.serverURL + "weave-password-reset";
+ },
+
+ get syncID() {
+ // Generate a random syncID id we don't have one
+ let syncID = Svc.Prefs.get("client.syncID", "");
+ return syncID == "" ? this.syncID = Utils.makeGUID() : syncID;
+ },
+ set syncID(value) {
+ Svc.Prefs.set("client.syncID", value);
+ },
+
+ get isLoggedIn() { return this._loggedIn; },
+
+ get locked() { return this._locked; },
+ lock: function lock() {
+ if (this._locked)
+ return false;
+ this._locked = true;
+ return true;
+ },
+ unlock: function unlock() {
+ this._locked = false;
+ },
+
+ // A specialized variant of Utils.catch.
+ // This provides a more informative error message when we're already syncing:
+ // see Bug 616568.
+ _catch: function _catch(func) {
+ function lockExceptions(ex) {
+ if (Utils.isLockException(ex)) {
+ // This only happens if we're syncing already.
+ this._log.info("Cannot start sync: already syncing?");
+ }
+ }
+
+ return Utils.catch.call(this, func, lockExceptions);
+ },
+
+ get userBaseURL() {
+ if (!this._clusterManager) {
+ return null;
+ }
+ return this._clusterManager.getUserBaseURL();
+ },
+
+ _updateCachedURLs: function _updateCachedURLs() {
+ // Nothing to cache yet if we don't have the building blocks
+ if (!this.clusterURL || !this.identity.username) {
+ // Also reset all other URLs used by Sync to ensure we aren't accidentally
+ // using one cached earlier - if there's no cluster URL any cached ones
+ // are invalid.
+ this.infoURL = undefined;
+ this.storageURL = undefined;
+ this.metaURL = undefined;
+ this.cryptoKeysURL = undefined;
+ return;
+ }
+
+ this._log.debug("Caching URLs under storage user base: " + this.userBaseURL);
+
+ // Generate and cache various URLs under the storage API for this user
+ this.infoURL = this.userBaseURL + "info/collections";
+ this.storageURL = this.userBaseURL + "storage/";
+ this.metaURL = this.storageURL + "meta/global";
+ this.cryptoKeysURL = this.storageURL + CRYPTO_COLLECTION + "/" + KEYS_WBO;
+ },
+
+ _checkCrypto: function _checkCrypto() {
+ let ok = false;
+
+ try {
+ let iv = Svc.Crypto.generateRandomIV();
+ if (iv.length == 24)
+ ok = true;
+
+ } catch (e) {
+ this._log.debug("Crypto check failed: " + e);
+ }
+
+ return ok;
+ },
+
+ /**
+ * Here is a disgusting yet reasonable way of handling HMAC errors deep in
+ * the guts of Sync. The astute reader will note that this is a hacky way of
+ * implementing something like continuable conditions.
+ *
+ * A handler function is glued to each engine. If the engine discovers an
+ * HMAC failure, we fetch keys from the server and update our keys, just as
+ * we would on startup.
+ *
+ * If our key collection changed, we signal to the engine (via our return
+ * value) that it should retry decryption.
+ *
+ * If our key collection did not change, it means that we already had the
+ * correct keys... and thus a different client has the wrong ones. Reupload
+ * the bundle that we fetched, which will bump the modified time on the
+ * server and (we hope) prompt a broken client to fix itself.
+ *
+ * We keep track of the time at which we last applied this reasoning, because
+ * thrashing doesn't solve anything. We keep a reasonable interval between
+ * these remedial actions.
+ */
+ lastHMACEvent: 0,
+
+ /*
+ * Returns whether to try again.
+ */
+ handleHMACEvent: function handleHMACEvent() {
+ let now = Date.now();
+
+ // Leave a sizable delay between HMAC recovery attempts. This gives us
+ // time for another client to fix themselves if we touch the record.
+ if ((now - this.lastHMACEvent) < HMAC_EVENT_INTERVAL)
+ return false;
+
+ this._log.info("Bad HMAC event detected. Attempting recovery " +
+ "or signaling to other clients.");
+
+ // Set the last handled time so that we don't act again.
+ this.lastHMACEvent = now;
+
+ // Fetch keys.
+ let cryptoKeys = new CryptoWrapper(CRYPTO_COLLECTION, KEYS_WBO);
+ try {
+ let cryptoResp = cryptoKeys.fetch(this.resource(this.cryptoKeysURL)).response;
+
+ // Save out the ciphertext for when we reupload. If there's a bug in
+ // CollectionKeyManager, this will prevent us from uploading junk.
+ let cipherText = cryptoKeys.ciphertext;
+
+ if (!cryptoResp.success) {
+ this._log.warn("Failed to download keys.");
+ return false;
+ }
+
+ let keysChanged = this.handleFetchedKeys(this.identity.syncKeyBundle,
+ cryptoKeys, true);
+ if (keysChanged) {
+ // Did they change? If so, carry on.
+ this._log.info("Suggesting retry.");
+ return true; // Try again.
+ }
+
+ // If not, reupload them and continue the current sync.
+ cryptoKeys.ciphertext = cipherText;
+ cryptoKeys.cleartext = null;
+
+ let uploadResp = cryptoKeys.upload(this.resource(this.cryptoKeysURL));
+ if (uploadResp.success)
+ this._log.info("Successfully re-uploaded keys. Continuing sync.");
+ else
+ this._log.warn("Got error response re-uploading keys. " +
+ "Continuing sync; let's try again later.");
+
+ return false; // Don't try again: same keys.
+
+ } catch (ex) {
+ this._log.warn("Got exception \"" + ex + "\" fetching and handling " +
+ "crypto keys. Will try again later.");
+ return false;
+ }
+ },
+
+ handleFetchedKeys: function handleFetchedKeys(syncKey, cryptoKeys, skipReset) {
+ // Don't want to wipe if we're just starting up!
+ let wasBlank = this.collectionKeys.isClear;
+ let keysChanged = this.collectionKeys.updateContents(syncKey, cryptoKeys);
+
+ if (keysChanged && !wasBlank) {
+ this._log.debug("Keys changed: " + JSON.stringify(keysChanged));
+
+ if (!skipReset) {
+ this._log.info("Resetting client to reflect key change.");
+
+ if (keysChanged.length) {
+ // Collection keys only. Reset individual engines.
+ this.resetClient(keysChanged);
+ }
+ else {
+ // Default key changed: wipe it all.
+ this.resetClient();
+ }
+
+ this._log.info("Downloaded new keys, client reset. Proceeding.");
+ }
+ return true;
+ }
+ return false;
+ },
+
+ /**
+ * Prepare to initialize the rest of Weave after waiting a little bit
+ */
+ onStartup: function onStartup() {
+ this._migratePrefs();
+
+ // Status is instantiated before us and is the first to grab an instance of
+ // the IdentityManager. We use that instance because IdentityManager really
+ // needs to be a singleton. Ideally, the longer-lived object would spawn
+ // this service instance.
+ if (!Status || !Status._authManager) {
+ throw new Error("Status or Status._authManager not initialized.");
+ }
+
+ this.status = Status;
+ this.identity = Status._authManager;
+ this.collectionKeys = new CollectionKeyManager();
+
+ this.errorHandler = new ErrorHandler(this);
+
+ this._log = Log.repository.getLogger("Sync.Service");
+ this._log.level =
+ Log.Level[Svc.Prefs.get("log.logger.service.main")];
+
+ this._log.info("Loading Weave " + WEAVE_VERSION);
+
+ this._clusterManager = this.identity.createClusterManager(this);
+ this.recordManager = new RecordManager(this);
+
+ this.enabled = true;
+
+ this._registerEngines();
+
+ let ua = Cc["@mozilla.org/network/protocol;1?name=http"].
+ getService(Ci.nsIHttpProtocolHandler).userAgent;
+ this._log.info(ua);
+
+ if (!this._checkCrypto()) {
+ this.enabled = false;
+ this._log.info("Could not load the Weave crypto component. Disabling " +
+ "Weave, since it will not work correctly.");
+ }
+
+ Svc.Obs.add("weave:service:setup-complete", this);
+ Svc.Obs.add("sync:collection_changed", this); // Pulled from FxAccountsCommon
+ Svc.Prefs.observe("engine.", this);
+
+ this.scheduler = new SyncScheduler(this);
+
+ if (!this.enabled) {
+ this._log.info("Firefox Sync disabled.");
+ }
+
+ this._updateCachedURLs();
+
+ let status = this._checkSetup();
+ if (status != STATUS_DISABLED && status != CLIENT_NOT_CONFIGURED) {
+ Svc.Obs.notify("weave:engine:start-tracking");
+ }
+
+ // Send an event now that Weave service is ready. We don't do this
+ // synchronously so that observers can import this module before
+ // registering an observer.
+ Utils.nextTick(function onNextTick() {
+ this.status.ready = true;
+
+ // UI code uses the flag on the XPCOM service so it doesn't have
+ // to load a bunch of modules.
+ let xps = Cc["@mozilla.org/weave/service;1"]
+ .getService(Ci.nsISupports)
+ .wrappedJSObject;
+ xps.ready = true;
+
+ Svc.Obs.notify("weave:service:ready");
+ }.bind(this));
+ },
+
+ _checkSetup: function _checkSetup() {
+ if (!this.enabled) {
+ return this.status.service = STATUS_DISABLED;
+ }
+ return this.status.checkSetup();
+ },
+
+ _migratePrefs: function _migratePrefs() {
+ // Migrate old debugLog prefs.
+ let logLevel = Svc.Prefs.get("log.appender.debugLog");
+ if (logLevel) {
+ Svc.Prefs.set("log.appender.file.level", logLevel);
+ Svc.Prefs.reset("log.appender.debugLog");
+ }
+ if (Svc.Prefs.get("log.appender.debugLog.enabled")) {
+ Svc.Prefs.set("log.appender.file.logOnSuccess", true);
+ Svc.Prefs.reset("log.appender.debugLog.enabled");
+ }
+
+ // Migrate old extensions.weave.* prefs if we haven't already tried.
+ if (Svc.Prefs.get("migrated", false))
+ return;
+
+ // Grab the list of old pref names
+ let oldPrefBranch = "extensions.weave.";
+ let oldPrefNames = Cc["@mozilla.org/preferences-service;1"].
+ getService(Ci.nsIPrefService).
+ getBranch(oldPrefBranch).
+ getChildList("", {});
+
+ // Map each old pref to the current pref branch
+ let oldPref = new Preferences(oldPrefBranch);
+ for (let pref of oldPrefNames)
+ Svc.Prefs.set(pref, oldPref.get(pref));
+
+ // Remove all the old prefs and remember that we've migrated
+ oldPref.resetBranch("");
+ Svc.Prefs.set("migrated", true);
+ },
+
+ /**
+ * Register the built-in engines for certain applications
+ */
+ _registerEngines: function _registerEngines() {
+ this.engineManager = new EngineManager(this);
+
+ let engines = [];
+ // Applications can provide this preference (comma-separated list)
+ // to specify which engines should be registered on startup.
+ let pref = Svc.Prefs.get("registerEngines");
+ if (pref) {
+ engines = pref.split(",");
+ }
+
+ let declined = [];
+ pref = Svc.Prefs.get("declinedEngines");
+ if (pref) {
+ declined = pref.split(",");
+ }
+
+ this.clientsEngine = new ClientEngine(this);
+
+ for (let name of engines) {
+ if (!name in ENGINE_MODULES) {
+ this._log.info("Do not know about engine: " + name);
+ continue;
+ }
+
+ let ns = {};
+ try {
+ Cu.import("resource://services-sync/engines/" + ENGINE_MODULES[name], ns);
+
+ let engineName = name + "Engine";
+ if (!(engineName in ns)) {
+ this._log.warn("Could not find exported engine instance: " + engineName);
+ continue;
+ }
+
+ this.engineManager.register(ns[engineName]);
+ } catch (ex) {
+ this._log.warn("Could not register engine " + name, ex);
+ }
+ }
+
+ this.engineManager.setDeclined(declined);
+ },
+
+ QueryInterface: XPCOMUtils.generateQI([Ci.nsIObserver,
+ Ci.nsISupportsWeakReference]),
+
+ // nsIObserver
+
+ observe: function observe(subject, topic, data) {
+ switch (topic) {
+ // Ideally this observer should be in the SyncScheduler, but it would require
+ // some work to know about the sync specific engines. We should move this there once it does.
+ case "sync:collection_changed":
+ if (data.includes("clients")) {
+ this.sync([]); // [] = clients collection only
+ }
+ break;
+ case "weave:service:setup-complete":
+ let status = this._checkSetup();
+ if (status != STATUS_DISABLED && status != CLIENT_NOT_CONFIGURED)
+ Svc.Obs.notify("weave:engine:start-tracking");
+ break;
+ case "nsPref:changed":
+ if (this._ignorePrefObserver)
+ return;
+ let engine = data.slice((PREFS_BRANCH + "engine.").length);
+ this._handleEngineStatusChanged(engine);
+ break;
+ }
+ },
+
+ _handleEngineStatusChanged: function handleEngineDisabled(engine) {
+ this._log.trace("Status for " + engine + " engine changed.");
+ if (Svc.Prefs.get("engineStatusChanged." + engine, false)) {
+ // The enabled status being changed back to what it was before.
+ Svc.Prefs.reset("engineStatusChanged." + engine);
+ } else {
+ // Remember that the engine status changed locally until the next sync.
+ Svc.Prefs.set("engineStatusChanged." + engine, true);
+ }
+ },
+
+ /**
+ * Obtain a Resource instance with authentication credentials.
+ */
+ resource: function resource(url) {
+ let res = new Resource(url);
+ res.authenticator = this.identity.getResourceAuthenticator();
+
+ return res;
+ },
+
+ /**
+ * Obtain a SyncStorageRequest instance with authentication credentials.
+ */
+ getStorageRequest: function getStorageRequest(url) {
+ let request = new SyncStorageRequest(url);
+ request.authenticator = this.identity.getRESTRequestAuthenticator();
+
+ return request;
+ },
+
+ /**
+ * Perform the info fetch as part of a login or key fetch, or
+ * inside engine sync.
+ */
+ _fetchInfo: function (url) {
+ let infoURL = url || this.infoURL;
+
+ this._log.trace("In _fetchInfo: " + infoURL);
+ let info;
+ try {
+ info = this.resource(infoURL).get();
+ } catch (ex) {
+ this.errorHandler.checkServerError(ex);
+ throw ex;
+ }
+
+ // Always check for errors; this is also where we look for X-Weave-Alert.
+ this.errorHandler.checkServerError(info);
+ if (!info.success) {
+ this._log.error("Aborting sync: failed to get collections.")
+ throw info;
+ }
+ return info;
+ },
+
+ verifyAndFetchSymmetricKeys: function verifyAndFetchSymmetricKeys(infoResponse) {
+
+ this._log.debug("Fetching and verifying -- or generating -- symmetric keys.");
+
+ // Don't allow empty/missing passphrase.
+ // Furthermore, we assume that our sync key is already upgraded,
+ // and fail if that assumption is invalidated.
+
+ if (!this.identity.syncKey) {
+ this.status.login = LOGIN_FAILED_NO_PASSPHRASE;
+ this.status.sync = CREDENTIALS_CHANGED;
+ return false;
+ }
+
+ let syncKeyBundle = this.identity.syncKeyBundle;
+ if (!syncKeyBundle) {
+ this._log.error("Sync Key Bundle not set. Invalid Sync Key?");
+
+ this.status.login = LOGIN_FAILED_INVALID_PASSPHRASE;
+ this.status.sync = CREDENTIALS_CHANGED;
+ return false;
+ }
+
+ try {
+ if (!infoResponse)
+ infoResponse = this._fetchInfo(); // Will throw an exception on failure.
+
+ // This only applies when the server is already at version 4.
+ if (infoResponse.status != 200) {
+ this._log.warn("info/collections returned non-200 response. Failing key fetch.");
+ this.status.login = LOGIN_FAILED_SERVER_ERROR;
+ this.errorHandler.checkServerError(infoResponse);
+ return false;
+ }
+
+ let infoCollections = infoResponse.obj;
+
+ this._log.info("Testing info/collections: " + JSON.stringify(infoCollections));
+
+ if (this.collectionKeys.updateNeeded(infoCollections)) {
+ this._log.info("collection keys reports that a key update is needed.");
+
+ // Don't always set to CREDENTIALS_CHANGED -- we will probably take care of this.
+
+ // Fetch storage/crypto/keys.
+ let cryptoKeys;
+
+ if (infoCollections && (CRYPTO_COLLECTION in infoCollections)) {
+ try {
+ cryptoKeys = new CryptoWrapper(CRYPTO_COLLECTION, KEYS_WBO);
+ let cryptoResp = cryptoKeys.fetch(this.resource(this.cryptoKeysURL)).response;
+
+ if (cryptoResp.success) {
+ let keysChanged = this.handleFetchedKeys(syncKeyBundle, cryptoKeys);
+ return true;
+ }
+ else if (cryptoResp.status == 404) {
+ // On failure, ask to generate new keys and upload them.
+ // Fall through to the behavior below.
+ this._log.warn("Got 404 for crypto/keys, but 'crypto' in info/collections. Regenerating.");
+ cryptoKeys = null;
+ }
+ else {
+ // Some other problem.
+ this.status.login = LOGIN_FAILED_SERVER_ERROR;
+ this.errorHandler.checkServerError(cryptoResp);
+ this._log.warn("Got status " + cryptoResp.status + " fetching crypto keys.");
+ return false;
+ }
+ }
+ catch (ex) {
+ this._log.warn("Got exception \"" + ex + "\" fetching cryptoKeys.");
+ // TODO: Um, what exceptions might we get here? Should we re-throw any?
+
+ // One kind of exception: HMAC failure.
+ if (Utils.isHMACMismatch(ex)) {
+ this.status.login = LOGIN_FAILED_INVALID_PASSPHRASE;
+ this.status.sync = CREDENTIALS_CHANGED;
+ }
+ else {
+ // In the absence of further disambiguation or more precise
+ // failure constants, just report failure.
+ this.status.login = LOGIN_FAILED;
+ }
+ return false;
+ }
+ }
+ else {
+ this._log.info("... 'crypto' is not a reported collection. Generating new keys.");
+ }
+
+ if (!cryptoKeys) {
+ this._log.info("No keys! Generating new ones.");
+
+ // Better make some and upload them, and wipe the server to ensure
+ // consistency. This is all achieved via _freshStart.
+ // If _freshStart fails to clear the server or upload keys, it will
+ // throw.
+ this._freshStart();
+ return true;
+ }
+
+ // Last-ditch case.
+ return false;
+ }
+ else {
+ // No update needed: we're good!
+ return true;
+ }
+
+ } catch (ex) {
+ // This means no keys are present, or there's a network error.
+ this._log.debug("Failed to fetch and verify keys", ex);
+ this.errorHandler.checkServerError(ex);
+ return false;
+ }
+ },
+
+ verifyLogin: function verifyLogin(allow40XRecovery = true) {
+ if (!this.identity.username) {
+ this._log.warn("No username in verifyLogin.");
+ this.status.login = LOGIN_FAILED_NO_USERNAME;
+ return false;
+ }
+
+ // Attaching auth credentials to a request requires access to
+ // passwords, which means that Resource.get can throw MP-related
+ // exceptions!
+ // So we ask the identity to verify the login state after unlocking the
+ // master password (ie, this call is expected to prompt for MP unlock
+ // if necessary) while we still have control.
+ let cb = Async.makeSpinningCallback();
+ this.identity.unlockAndVerifyAuthState().then(
+ result => cb(null, result),
+ cb
+ );
+ let unlockedState = cb.wait();
+ this._log.debug("Fetching unlocked auth state returned " + unlockedState);
+ if (unlockedState != STATUS_OK) {
+ this.status.login = unlockedState;
+ return false;
+ }
+
+ try {
+ // Make sure we have a cluster to verify against.
+ // This is a little weird, if we don't get a node we pretend
+ // to succeed, since that probably means we just don't have storage.
+ if (this.clusterURL == "" && !this._clusterManager.setCluster()) {
+ this.status.sync = NO_SYNC_NODE_FOUND;
+ return true;
+ }
+
+ // Fetch collection info on every startup.
+ let test = this.resource(this.infoURL).get();
+
+ switch (test.status) {
+ case 200:
+ // The user is authenticated.
+
+ // We have no way of verifying the passphrase right now,
+ // so wait until remoteSetup to do so.
+ // Just make the most trivial checks.
+ if (!this.identity.syncKey) {
+ this._log.warn("No passphrase in verifyLogin.");
+ this.status.login = LOGIN_FAILED_NO_PASSPHRASE;
+ return false;
+ }
+
+ // Go ahead and do remote setup, so that we can determine
+ // conclusively that our passphrase is correct.
+ if (this._remoteSetup(test)) {
+ // Username/password verified.
+ this.status.login = LOGIN_SUCCEEDED;
+ return true;
+ }
+
+ this._log.warn("Remote setup failed.");
+ // Remote setup must have failed.
+ return false;
+
+ case 401:
+ this._log.warn("401: login failed.");
+ // Fall through to the 404 case.
+
+ case 404:
+ // Check that we're verifying with the correct cluster
+ if (allow40XRecovery && this._clusterManager.setCluster()) {
+ return this.verifyLogin(false);
+ }
+
+ // We must have the right cluster, but the server doesn't expect us.
+ // The implications of this depend on the identity being used - for
+ // the legacy identity, it's an authoritatively "incorrect password",
+ // (ie, LOGIN_FAILED_LOGIN_REJECTED) but for FxA it probably means
+ // "transient error fetching auth token".
+ this.status.login = this.identity.loginStatusFromVerification404();
+ return false;
+
+ default:
+ // Server didn't respond with something that we expected
+ this.status.login = LOGIN_FAILED_SERVER_ERROR;
+ this.errorHandler.checkServerError(test);
+ return false;
+ }
+ } catch (ex) {
+ // Must have failed on some network issue
+ this._log.debug("verifyLogin failed", ex);
+ this.status.login = LOGIN_FAILED_NETWORK_ERROR;
+ this.errorHandler.checkServerError(ex);
+ return false;
+ }
+ },
+
+ generateNewSymmetricKeys: function generateNewSymmetricKeys() {
+ this._log.info("Generating new keys WBO...");
+ let wbo = this.collectionKeys.generateNewKeysWBO();
+ this._log.info("Encrypting new key bundle.");
+ wbo.encrypt(this.identity.syncKeyBundle);
+
+ this._log.info("Uploading...");
+ let uploadRes = wbo.upload(this.resource(this.cryptoKeysURL));
+ if (uploadRes.status != 200) {
+ this._log.warn("Got status " + uploadRes.status + " uploading new keys. What to do? Throw!");
+ this.errorHandler.checkServerError(uploadRes);
+ throw new Error("Unable to upload symmetric keys.");
+ }
+ this._log.info("Got status " + uploadRes.status + " uploading keys.");
+ let serverModified = uploadRes.obj; // Modified timestamp according to server.
+ this._log.debug("Server reports crypto modified: " + serverModified);
+
+ // Now verify that info/collections shows them!
+ this._log.debug("Verifying server collection records.");
+ let info = this._fetchInfo();
+ this._log.debug("info/collections is: " + info);
+
+ if (info.status != 200) {
+ this._log.warn("Non-200 info/collections response. Aborting.");
+ throw new Error("Unable to upload symmetric keys.");
+ }
+
+ info = info.obj;
+ if (!(CRYPTO_COLLECTION in info)) {
+ this._log.error("Consistency failure: info/collections excludes " +
+ "crypto after successful upload.");
+ throw new Error("Symmetric key upload failed.");
+ }
+
+ // Can't check against local modified: clock drift.
+ if (info[CRYPTO_COLLECTION] < serverModified) {
+ this._log.error("Consistency failure: info/collections crypto entry " +
+ "is stale after successful upload.");
+ throw new Error("Symmetric key upload failed.");
+ }
+
+ // Doesn't matter if the timestamp is ahead.
+
+ // Download and install them.
+ let cryptoKeys = new CryptoWrapper(CRYPTO_COLLECTION, KEYS_WBO);
+ let cryptoResp = cryptoKeys.fetch(this.resource(this.cryptoKeysURL)).response;
+ if (cryptoResp.status != 200) {
+ this._log.warn("Failed to download keys.");
+ throw new Error("Symmetric key download failed.");
+ }
+ let keysChanged = this.handleFetchedKeys(this.identity.syncKeyBundle,
+ cryptoKeys, true);
+ if (keysChanged) {
+ this._log.info("Downloaded keys differed, as expected.");
+ }
+ },
+
+ changePassword: function changePassword(newPassword) {
+ let client = new UserAPI10Client(this.userAPIURI);
+ let cb = Async.makeSpinningCallback();
+ client.changePassword(this.identity.username,
+ this.identity.basicPassword, newPassword, cb);
+
+ try {
+ cb.wait();
+ } catch (ex) {
+ this._log.debug("Password change failed", ex);
+ return false;
+ }
+
+ // Save the new password for requests and login manager.
+ this.identity.basicPassword = newPassword;
+ this.persistLogin();
+ return true;
+ },
+
+ changePassphrase: function changePassphrase(newphrase) {
+ return this._catch(function doChangePasphrase() {
+ /* Wipe. */
+ this.wipeServer();
+
+ this.logout();
+
+ /* Set this so UI is updated on next run. */
+ this.identity.syncKey = newphrase;
+ this.persistLogin();
+
+ /* We need to re-encrypt everything, so reset. */
+ this.resetClient();
+ this.collectionKeys.clear();
+
+ /* Login and sync. This also generates new keys. */
+ this.sync();
+
+ Svc.Obs.notify("weave:service:change-passphrase", true);
+
+ return true;
+ })();
+ },
+
+ startOver: function startOver() {
+ this._log.trace("Invoking Service.startOver.");
+ Svc.Obs.notify("weave:engine:stop-tracking");
+ this.status.resetSync();
+
+ // Deletion doesn't make sense if we aren't set up yet!
+ if (this.clusterURL != "") {
+ // Clear client-specific data from the server, including disabled engines.
+ for (let engine of [this.clientsEngine].concat(this.engineManager.getAll())) {
+ try {
+ engine.removeClientData();
+ } catch(ex) {
+ this._log.warn(`Deleting client data for ${engine.name} failed`, ex);
+ }
+ }
+ this._log.debug("Finished deleting client data.");
+ } else {
+ this._log.debug("Skipping client data removal: no cluster URL.");
+ }
+
+ // We want let UI consumers of the following notification know as soon as
+ // possible, so let's fake for the CLIENT_NOT_CONFIGURED status for now
+ // by emptying the passphrase (we still need the password).
+ this._log.info("Service.startOver dropping sync key and logging out.");
+ this.identity.resetSyncKey();
+ this.status.login = LOGIN_FAILED_NO_PASSPHRASE;
+ this.logout();
+ Svc.Obs.notify("weave:service:start-over");
+
+ // Reset all engines and clear keys.
+ this.resetClient();
+ this.collectionKeys.clear();
+ this.status.resetBackoff();
+
+ // Reset Weave prefs.
+ this._ignorePrefObserver = true;
+ Svc.Prefs.resetBranch("");
+ this._ignorePrefObserver = false;
+ this.clusterURL = null;
+
+ Svc.Prefs.set("lastversion", WEAVE_VERSION);
+
+ this.identity.deleteSyncCredentials();
+
+ // If necessary, reset the identity manager, then re-initialize it so the
+ // FxA manager is used. This is configurable via a pref - mainly for tests.
+ let keepIdentity = false;
+ try {
+ keepIdentity = Services.prefs.getBoolPref("services.sync-testing.startOverKeepIdentity");
+ } catch (_) { /* no such pref */ }
+ if (keepIdentity) {
+ Svc.Obs.notify("weave:service:start-over:finish");
+ return;
+ }
+
+ try {
+ this.identity.finalize();
+ // an observer so the FxA migration code can take some action before
+ // the new identity is created.
+ Svc.Obs.notify("weave:service:start-over:init-identity");
+ this.identity.username = "";
+ this.status.__authManager = null;
+ this.identity = Status._authManager;
+ this._clusterManager = this.identity.createClusterManager(this);
+ Svc.Obs.notify("weave:service:start-over:finish");
+ } catch (err) {
+ this._log.error("startOver failed to re-initialize the identity manager: " + err);
+ // Still send the observer notification so the current state is
+ // reflected in the UI.
+ Svc.Obs.notify("weave:service:start-over:finish");
+ }
+ },
+
+ persistLogin: function persistLogin() {
+ try {
+ this.identity.persistCredentials(true);
+ } catch (ex) {
+ this._log.info("Unable to persist credentials: " + ex);
+ }
+ },
+
+ login: function login(username, password, passphrase) {
+ function onNotify() {
+ this._loggedIn = false;
+ if (Services.io.offline) {
+ this.status.login = LOGIN_FAILED_NETWORK_ERROR;
+ throw "Application is offline, login should not be called";
+ }
+
+ let initialStatus = this._checkSetup();
+ if (username) {
+ this.identity.username = username;
+ }
+ if (password) {
+ this.identity.basicPassword = password;
+ }
+ if (passphrase) {
+ this.identity.syncKey = passphrase;
+ }
+
+ if (this._checkSetup() == CLIENT_NOT_CONFIGURED) {
+ throw "Aborting login, client not configured.";
+ }
+
+ // Ask the identity manager to explicitly login now.
+ this._log.info("Logging in the user.");
+ let cb = Async.makeSpinningCallback();
+ this.identity.ensureLoggedIn().then(
+ () => cb(null),
+ err => cb(err || "ensureLoggedIn failed")
+ );
+
+ // Just let any errors bubble up - they've more context than we do!
+ cb.wait();
+
+ // Calling login() with parameters when the client was
+ // previously not configured means setup was completed.
+ if (initialStatus == CLIENT_NOT_CONFIGURED
+ && (username || password || passphrase)) {
+ Svc.Obs.notify("weave:service:setup-complete");
+ }
+ this._updateCachedURLs();
+
+ this._log.info("User logged in successfully - verifying login.");
+ if (!this.verifyLogin()) {
+ // verifyLogin sets the failure states here.
+ throw "Login failed: " + this.status.login;
+ }
+
+ this._loggedIn = true;
+
+ return true;
+ }
+
+ let notifier = this._notify("login", "", onNotify.bind(this));
+ return this._catch(this._lock("service.js: login", notifier))();
+ },
+
+ logout: function logout() {
+ // If we failed during login, we aren't going to have this._loggedIn set,
+ // but we still want to ask the identity to logout, so it doesn't try and
+ // reuse any old credentials next time we sync.
+ this._log.info("Logging out");
+ this.identity.logout();
+ this._loggedIn = false;
+
+ Svc.Obs.notify("weave:service:logout:finish");
+ },
+
+ checkAccount: function checkAccount(account) {
+ let client = new UserAPI10Client(this.userAPIURI);
+ let cb = Async.makeSpinningCallback();
+
+ let username = this.identity.usernameFromAccount(account);
+ client.usernameExists(username, cb);
+
+ try {
+ let exists = cb.wait();
+ return exists ? "notAvailable" : "available";
+ } catch (ex) {
+ // TODO fix API convention.
+ return this.errorHandler.errorStr(ex);
+ }
+ },
+
+ createAccount: function createAccount(email, password,
+ captchaChallenge, captchaResponse) {
+ let client = new UserAPI10Client(this.userAPIURI);
+
+ // Hint to server to allow scripted user creation or otherwise
+ // ignore captcha.
+ if (Svc.Prefs.isSet("admin-secret")) {
+ client.adminSecret = Svc.Prefs.get("admin-secret", "");
+ }
+
+ let cb = Async.makeSpinningCallback();
+
+ client.createAccount(email, password, captchaChallenge, captchaResponse,
+ cb);
+
+ try {
+ cb.wait();
+ return null;
+ } catch (ex) {
+ return this.errorHandler.errorStr(ex.body);
+ }
+ },
+
+ // Note: returns false if we failed for a reason other than the server not yet
+ // supporting the api.
+ _fetchServerConfiguration() {
+ // This is similar to _fetchInfo, but with different error handling.
+
+ let infoURL = this.userBaseURL + "info/configuration";
+ this._log.debug("Fetching server configuration", infoURL);
+ let configResponse;
+ try {
+ configResponse = this.resource(infoURL).get();
+ } catch (ex) {
+ // This is probably a network or similar error.
+ this._log.warn("Failed to fetch info/configuration", ex);
+ this.errorHandler.checkServerError(ex);
+ return false;
+ }
+
+ if (configResponse.status == 404) {
+ // This server doesn't support the URL yet - that's OK.
+ this._log.debug("info/configuration returned 404 - using default upload semantics");
+ } else if (configResponse.status != 200) {
+ this._log.warn(`info/configuration returned ${configResponse.status} - using default configuration`);
+ this.errorHandler.checkServerError(configResponse);
+ return false;
+ } else {
+ this.serverConfiguration = configResponse.obj;
+ }
+ this._log.trace("info/configuration for this server", this.serverConfiguration);
+ return true;
+ },
+
+ // Stuff we need to do after login, before we can really do
+ // anything (e.g. key setup).
+ _remoteSetup: function _remoteSetup(infoResponse) {
+ let reset = false;
+
+ if (!this._fetchServerConfiguration()) {
+ return false;
+ }
+
+ this._log.debug("Fetching global metadata record");
+ let meta = this.recordManager.get(this.metaURL);
+
+ // Checking modified time of the meta record.
+ if (infoResponse &&
+ (infoResponse.obj.meta != this.metaModified) &&
+ (!meta || !meta.isNew)) {
+
+ // Delete the cached meta record...
+ this._log.debug("Clearing cached meta record. metaModified is " +
+ JSON.stringify(this.metaModified) + ", setting to " +
+ JSON.stringify(infoResponse.obj.meta));
+
+ this.recordManager.del(this.metaURL);
+
+ // ... fetch the current record from the server, and COPY THE FLAGS.
+ let newMeta = this.recordManager.get(this.metaURL);
+
+ // If we got a 401, we do not want to create a new meta/global - we
+ // should be able to get the existing meta after we get a new node.
+ if (this.recordManager.response.status == 401) {
+ this._log.debug("Fetching meta/global record on the server returned 401.");
+ this.errorHandler.checkServerError(this.recordManager.response);
+ return false;
+ }
+
+ if (this.recordManager.response.status == 404) {
+ this._log.debug("No meta/global record on the server. Creating one.");
+ newMeta = new WBORecord("meta", "global");
+ newMeta.payload.syncID = this.syncID;
+ newMeta.payload.storageVersion = STORAGE_VERSION;
+ newMeta.payload.declined = this.engineManager.getDeclined();
+
+ newMeta.isNew = true;
+
+ this.recordManager.set(this.metaURL, newMeta);
+ let uploadRes = newMeta.upload(this.resource(this.metaURL));
+ if (!uploadRes.success) {
+ this._log.warn("Unable to upload new meta/global. Failing remote setup.");
+ this.errorHandler.checkServerError(uploadRes);
+ return false;
+ }
+ } else if (!newMeta) {
+ this._log.warn("Unable to get meta/global. Failing remote setup.");
+ this.errorHandler.checkServerError(this.recordManager.response);
+ return false;
+ } else {
+ // If newMeta, then it stands to reason that meta != null.
+ newMeta.isNew = meta.isNew;
+ newMeta.changed = meta.changed;
+ }
+
+ // Switch in the new meta object and record the new time.
+ meta = newMeta;
+ this.metaModified = infoResponse.obj.meta;
+ }
+
+ let remoteVersion = (meta && meta.payload.storageVersion)?
+ meta.payload.storageVersion : "";
+
+ this._log.debug(["Weave Version:", WEAVE_VERSION, "Local Storage:",
+ STORAGE_VERSION, "Remote Storage:", remoteVersion].join(" "));
+
+ // Check for cases that require a fresh start. When comparing remoteVersion,
+ // we need to convert it to a number as older clients used it as a string.
+ if (!meta || !meta.payload.storageVersion || !meta.payload.syncID ||
+ STORAGE_VERSION > parseFloat(remoteVersion)) {
+
+ this._log.info("One of: no meta, no meta storageVersion, or no meta syncID. Fresh start needed.");
+
+ // abort the server wipe if the GET status was anything other than 404 or 200
+ let status = this.recordManager.response.status;
+ if (status != 200 && status != 404) {
+ this.status.sync = METARECORD_DOWNLOAD_FAIL;
+ this.errorHandler.checkServerError(this.recordManager.response);
+ this._log.warn("Unknown error while downloading metadata record. " +
+ "Aborting sync.");
+ return false;
+ }
+
+ if (!meta)
+ this._log.info("No metadata record, server wipe needed");
+ if (meta && !meta.payload.syncID)
+ this._log.warn("No sync id, server wipe needed");
+
+ reset = true;
+
+ this._log.info("Wiping server data");
+ this._freshStart();
+
+ if (status == 404)
+ this._log.info("Metadata record not found, server was wiped to ensure " +
+ "consistency.");
+ else // 200
+ this._log.info("Wiped server; incompatible metadata: " + remoteVersion);
+
+ return true;
+ }
+ else if (remoteVersion > STORAGE_VERSION) {
+ this.status.sync = VERSION_OUT_OF_DATE;
+ this._log.warn("Upgrade required to access newer storage version.");
+ return false;
+ }
+ else if (meta.payload.syncID != this.syncID) {
+
+ this._log.info("Sync IDs differ. Local is " + this.syncID + ", remote is " + meta.payload.syncID);
+ this.resetClient();
+ this.collectionKeys.clear();
+ this.syncID = meta.payload.syncID;
+ this._log.debug("Clear cached values and take syncId: " + this.syncID);
+
+ if (!this.upgradeSyncKey(meta.payload.syncID)) {
+ this._log.warn("Failed to upgrade sync key. Failing remote setup.");
+ return false;
+ }
+
+ if (!this.verifyAndFetchSymmetricKeys(infoResponse)) {
+ this._log.warn("Failed to fetch symmetric keys. Failing remote setup.");
+ return false;
+ }
+
+ // bug 545725 - re-verify creds and fail sanely
+ if (!this.verifyLogin()) {
+ this.status.sync = CREDENTIALS_CHANGED;
+ this._log.info("Credentials have changed, aborting sync and forcing re-login.");
+ return false;
+ }
+
+ return true;
+ }
+ else {
+ if (!this.upgradeSyncKey(meta.payload.syncID)) {
+ this._log.warn("Failed to upgrade sync key. Failing remote setup.");
+ return false;
+ }
+
+ if (!this.verifyAndFetchSymmetricKeys(infoResponse)) {
+ this._log.warn("Failed to fetch symmetric keys. Failing remote setup.");
+ return false;
+ }
+
+ return true;
+ }
+ },
+
+ /**
+ * Return whether we should attempt login at the start of a sync.
+ *
+ * Note that this function has strong ties to _checkSync: callers
+ * of this function should typically use _checkSync to verify that
+ * any necessary login took place.
+ */
+ _shouldLogin: function _shouldLogin() {
+ return this.enabled &&
+ !Services.io.offline &&
+ !this.isLoggedIn;
+ },
+
+ /**
+ * Determine if a sync should run.
+ *
+ * @param ignore [optional]
+ * array of reasons to ignore when checking
+ *
+ * @return Reason for not syncing; not-truthy if sync should run
+ */
+ _checkSync: function _checkSync(ignore) {
+ let reason = "";
+ if (!this.enabled)
+ reason = kSyncWeaveDisabled;
+ else if (Services.io.offline)
+ reason = kSyncNetworkOffline;
+ else if (this.status.minimumNextSync > Date.now())
+ reason = kSyncBackoffNotMet;
+ else if ((this.status.login == MASTER_PASSWORD_LOCKED) &&
+ Utils.mpLocked())
+ reason = kSyncMasterPasswordLocked;
+ else if (Svc.Prefs.get("firstSync") == "notReady")
+ reason = kFirstSyncChoiceNotMade;
+
+ if (ignore && ignore.indexOf(reason) != -1)
+ return "";
+
+ return reason;
+ },
+
+ sync: function sync(engineNamesToSync) {
+ let dateStr = Utils.formatTimestamp(new Date());
+ this._log.debug("User-Agent: " + Utils.userAgent);
+ this._log.info("Starting sync at " + dateStr);
+ this._catch(function () {
+ // Make sure we're logged in.
+ if (this._shouldLogin()) {
+ this._log.debug("In sync: should login.");
+ if (!this.login()) {
+ this._log.debug("Not syncing: login returned false.");
+ return;
+ }
+ }
+ else {
+ this._log.trace("In sync: no need to login.");
+ }
+ return this._lockedSync(engineNamesToSync);
+ })();
+ },
+
+ /**
+ * Sync up engines with the server.
+ */
+ _lockedSync: function _lockedSync(engineNamesToSync) {
+ return this._lock("service.js: sync",
+ this._notify("sync", "", function onNotify() {
+
+ let histogram = Services.telemetry.getHistogramById("WEAVE_START_COUNT");
+ histogram.add(1);
+
+ let synchronizer = new EngineSynchronizer(this);
+ let cb = Async.makeSpinningCallback();
+ synchronizer.onComplete = cb;
+
+ synchronizer.sync(engineNamesToSync);
+ // wait() throws if the first argument is truthy, which is exactly what
+ // we want.
+ let result = cb.wait();
+
+ histogram = Services.telemetry.getHistogramById("WEAVE_COMPLETE_SUCCESS_COUNT");
+ histogram.add(1);
+
+ // We successfully synchronized.
+ // Check if the identity wants to pre-fetch a migration sentinel from
+ // the server.
+ // If we have no clusterURL, we are probably doing a node reassignment
+ // so don't attempt to get it in that case.
+ if (this.clusterURL) {
+ this.identity.prefetchMigrationSentinel(this);
+ }
+
+ // Now let's update our declined engines (but only if we have a metaURL;
+ // if Sync failed due to no node we will not have one)
+ if (this.metaURL) {
+ let meta = this.recordManager.get(this.metaURL);
+ if (!meta) {
+ this._log.warn("No meta/global; can't update declined state.");
+ return;
+ }
+
+ let declinedEngines = new DeclinedEngines(this);
+ let didChange = declinedEngines.updateDeclined(meta, this.engineManager);
+ if (!didChange) {
+ this._log.info("No change to declined engines. Not reuploading meta/global.");
+ return;
+ }
+
+ this.uploadMetaGlobal(meta);
+ }
+ }))();
+ },
+
+ /**
+ * Upload meta/global, throwing the response on failure.
+ */
+ uploadMetaGlobal: function (meta) {
+ this._log.debug("Uploading meta/global: " + JSON.stringify(meta));
+
+ // It would be good to set the X-If-Unmodified-Since header to `timestamp`
+ // for this PUT to ensure at least some level of transactionality.
+ // Unfortunately, the servers don't support it after a wipe right now
+ // (bug 693893), so we're going to defer this until bug 692700.
+ let res = this.resource(this.metaURL);
+ let response = res.put(meta);
+ if (!response.success) {
+ throw response;
+ }
+ this.recordManager.set(this.metaURL, meta);
+ },
+
+ /**
+ * Get a migration sentinel for the Firefox Accounts migration.
+ * Returns a JSON blob - it is up to callers of this to make sense of the
+ * data.
+ *
+ * Returns a promise that resolves with the sentinel, or null.
+ */
+ getFxAMigrationSentinel: function() {
+ if (this._shouldLogin()) {
+ this._log.debug("In getFxAMigrationSentinel: should login.");
+ if (!this.login()) {
+ this._log.debug("Can't get migration sentinel: login returned false.");
+ return Promise.resolve(null);
+ }
+ }
+ if (!this.identity.syncKeyBundle) {
+ this._log.error("Can't get migration sentinel: no syncKeyBundle.");
+ return Promise.resolve(null);
+ }
+ try {
+ let collectionURL = this.storageURL + "meta/fxa_credentials";
+ let cryptoWrapper = this.recordManager.get(collectionURL);
+ if (!cryptoWrapper || !cryptoWrapper.payload) {
+ // nothing to decrypt - .decrypt is noisy in that case, so just bail
+ // now.
+ return Promise.resolve(null);
+ }
+ // If the payload has a sentinel it means we must have put back the
+ // decrypted version last time we were called.
+ if (cryptoWrapper.payload.sentinel) {
+ return Promise.resolve(cryptoWrapper.payload.sentinel);
+ }
+ // If decryption fails it almost certainly means the key is wrong - but
+ // it's not clear if we need to take special action for that case?
+ let payload = cryptoWrapper.decrypt(this.identity.syncKeyBundle);
+ // After decrypting the ciphertext is lost, so we just stash the
+ // decrypted payload back into the wrapper.
+ cryptoWrapper.payload = payload;
+ return Promise.resolve(payload.sentinel);
+ } catch (ex) {
+ this._log.error("Failed to fetch the migration sentinel: ${}", ex);
+ return Promise.resolve(null);
+ }
+ },
+
+ /**
+ * Set a migration sentinel for the Firefox Accounts migration.
+ * Accepts a JSON blob - it is up to callers of this to make sense of the
+ * data.
+ *
+ * Returns a promise that resolves with a boolean which indicates if the
+ * sentinel was successfully written.
+ */
+ setFxAMigrationSentinel: function(sentinel) {
+ if (this._shouldLogin()) {
+ this._log.debug("In setFxAMigrationSentinel: should login.");
+ if (!this.login()) {
+ this._log.debug("Can't set migration sentinel: login returned false.");
+ return Promise.resolve(false);
+ }
+ }
+ if (!this.identity.syncKeyBundle) {
+ this._log.error("Can't set migration sentinel: no syncKeyBundle.");
+ return Promise.resolve(false);
+ }
+ try {
+ let collectionURL = this.storageURL + "meta/fxa_credentials";
+ let cryptoWrapper = new CryptoWrapper("meta", "fxa_credentials");
+ cryptoWrapper.cleartext.sentinel = sentinel;
+
+ cryptoWrapper.encrypt(this.identity.syncKeyBundle);
+
+ let res = this.resource(collectionURL);
+ let response = res.put(cryptoWrapper.toJSON());
+
+ if (!response.success) {
+ throw response;
+ }
+ this.recordManager.set(collectionURL, cryptoWrapper);
+ } catch (ex) {
+ this._log.error("Failed to set the migration sentinel: ${}", ex);
+ return Promise.resolve(false);
+ }
+ return Promise.resolve(true);
+ },
+
+ /**
+ * If we have a passphrase, rather than a 25-alphadigit sync key,
+ * use the provided sync ID to bootstrap it using PBKDF2.
+ *
+ * Store the new 'passphrase' back into the identity manager.
+ *
+ * We can check this as often as we want, because once it's done the
+ * check will no longer succeed. It only matters that it happens after
+ * we decide to bump the server storage version.
+ */
+ upgradeSyncKey: function upgradeSyncKey(syncID) {
+ let p = this.identity.syncKey;
+
+ if (!p) {
+ return false;
+ }
+
+ // Check whether it's already a key that we generated.
+ if (Utils.isPassphrase(p)) {
+ this._log.info("Sync key is up-to-date: no need to upgrade.");
+ return true;
+ }
+
+ // Otherwise, let's upgrade it.
+ // N.B., we persist the sync key without testing it first...
+
+ let s = btoa(syncID); // It's what WeaveCrypto expects. *sigh*
+ let k = Utils.derivePresentableKeyFromPassphrase(p, s, PBKDF2_KEY_BYTES); // Base 32.
+
+ if (!k) {
+ this._log.error("No key resulted from derivePresentableKeyFromPassphrase. Failing upgrade.");
+ return false;
+ }
+
+ this._log.info("Upgrading sync key...");
+ this.identity.syncKey = k;
+ this._log.info("Saving upgraded sync key...");
+ this.persistLogin();
+ this._log.info("Done saving.");
+ return true;
+ },
+
+ _freshStart: function _freshStart() {
+ this._log.info("Fresh start. Resetting client and considering key upgrade.");
+ this.resetClient();
+ this.collectionKeys.clear();
+ this.upgradeSyncKey(this.syncID);
+
+ // Wipe the server.
+ let wipeTimestamp = this.wipeServer();
+
+ // Upload a new meta/global record.
+ let meta = new WBORecord("meta", "global");
+ meta.payload.syncID = this.syncID;
+ meta.payload.storageVersion = STORAGE_VERSION;
+ meta.payload.declined = this.engineManager.getDeclined();
+ meta.isNew = true;
+
+ // uploadMetaGlobal throws on failure -- including race conditions.
+ // If we got into a race condition, we'll abort the sync this way, too.
+ // That's fine. We'll just wait till the next sync. The client that we're
+ // racing is probably busy uploading stuff right now anyway.
+ this.uploadMetaGlobal(meta);
+
+ // Wipe everything we know about except meta because we just uploaded it
+ let engines = [this.clientsEngine].concat(this.engineManager.getAll());
+ let collections = engines.map(engine => engine.name);
+ // TODO: there's a bug here. We should be calling resetClient, no?
+
+ // Generate, upload, and download new keys. Do this last so we don't wipe
+ // them...
+ this.generateNewSymmetricKeys();
+ },
+
+ /**
+ * Wipe user data from the server.
+ *
+ * @param collections [optional]
+ * Array of collections to wipe. If not given, all collections are
+ * wiped by issuing a DELETE request for `storageURL`.
+ *
+ * @return the server's timestamp of the (last) DELETE.
+ */
+ wipeServer: function wipeServer(collections) {
+ let response;
+ let histogram = Services.telemetry.getHistogramById("WEAVE_WIPE_SERVER_SUCCEEDED");
+ if (!collections) {
+ // Strip the trailing slash.
+ let res = this.resource(this.storageURL.slice(0, -1));
+ res.setHeader("X-Confirm-Delete", "1");
+ try {
+ response = res.delete();
+ } catch (ex) {
+ this._log.debug("Failed to wipe server", ex);
+ histogram.add(false);
+ throw ex;
+ }
+ if (response.status != 200 && response.status != 404) {
+ this._log.debug("Aborting wipeServer. Server responded with " +
+ response.status + " response for " + this.storageURL);
+ histogram.add(false);
+ throw response;
+ }
+ histogram.add(true);
+ return response.headers["x-weave-timestamp"];
+ }
+
+ let timestamp;
+ for (let name of collections) {
+ let url = this.storageURL + name;
+ try {
+ response = this.resource(url).delete();
+ } catch (ex) {
+ this._log.debug("Failed to wipe '" + name + "' collection", ex);
+ histogram.add(false);
+ throw ex;
+ }
+
+ if (response.status != 200 && response.status != 404) {
+ this._log.debug("Aborting wipeServer. Server responded with " +
+ response.status + " response for " + url);
+ histogram.add(false);
+ throw response;
+ }
+
+ if ("x-weave-timestamp" in response.headers) {
+ timestamp = response.headers["x-weave-timestamp"];
+ }
+ }
+ histogram.add(true);
+ return timestamp;
+ },
+
+ /**
+ * Wipe all local user data.
+ *
+ * @param engines [optional]
+ * Array of engine names to wipe. If not given, all engines are used.
+ */
+ wipeClient: function wipeClient(engines) {
+ // If we don't have any engines, reset the service and wipe all engines
+ if (!engines) {
+ // Clear out any service data
+ this.resetService();
+
+ engines = [this.clientsEngine].concat(this.engineManager.getAll());
+ }
+ // Convert the array of names into engines
+ else {
+ engines = this.engineManager.get(engines);
+ }
+
+ // Fully wipe each engine if it's able to decrypt data
+ for (let engine of engines) {
+ if (engine.canDecrypt()) {
+ engine.wipeClient();
+ }
+ }
+
+ // Save the password/passphrase just in-case they aren't restored by sync
+ this.persistLogin();
+ },
+
+ /**
+ * Wipe all remote user data by wiping the server then telling each remote
+ * client to wipe itself.
+ *
+ * @param engines [optional]
+ * Array of engine names to wipe. If not given, all engines are used.
+ */
+ wipeRemote: function wipeRemote(engines) {
+ try {
+ // Make sure stuff gets uploaded.
+ this.resetClient(engines);
+
+ // Clear out any server data.
+ this.wipeServer(engines);
+
+ // Only wipe the engines provided.
+ if (engines) {
+ engines.forEach(function(e) {
+ this.clientsEngine.sendCommand("wipeEngine", [e]);
+ }, this);
+ }
+ // Tell the remote machines to wipe themselves.
+ else {
+ this.clientsEngine.sendCommand("wipeAll", []);
+ }
+
+ // Make sure the changed clients get updated.
+ this.clientsEngine.sync();
+ } catch (ex) {
+ this.errorHandler.checkServerError(ex);
+ throw ex;
+ }
+ },
+
+ /**
+ * Reset local service information like logs, sync times, caches.
+ */
+ resetService: function resetService() {
+ this._catch(function reset() {
+ this._log.info("Service reset.");
+
+ // Pretend we've never synced to the server and drop cached data
+ this.syncID = "";
+ this.recordManager.clearCache();
+ })();
+ },
+
+ /**
+ * Reset the client by getting rid of any local server data and client data.
+ *
+ * @param engines [optional]
+ * Array of engine names to reset. If not given, all engines are used.
+ */
+ resetClient: function resetClient(engines) {
+ this._catch(function doResetClient() {
+ // If we don't have any engines, reset everything including the service
+ if (!engines) {
+ // Clear out any service data
+ this.resetService();
+
+ engines = [this.clientsEngine].concat(this.engineManager.getAll());
+ }
+ // Convert the array of names into engines
+ else {
+ engines = this.engineManager.get(engines);
+ }
+
+ // Have each engine drop any temporary meta data
+ for (let engine of engines) {
+ engine.resetClient();
+ }
+ })();
+ },
+
+ /**
+ * Fetch storage info from the server.
+ *
+ * @param type
+ * String specifying what info to fetch from the server. Must be one
+ * of the INFO_* values. See Sync Storage Server API spec for details.
+ * @param callback
+ * Callback function with signature (error, data) where `data' is
+ * the return value from the server already parsed as JSON.
+ *
+ * @return RESTRequest instance representing the request, allowing callers
+ * to cancel the request.
+ */
+ getStorageInfo: function getStorageInfo(type, callback) {
+ if (STORAGE_INFO_TYPES.indexOf(type) == -1) {
+ throw "Invalid value for 'type': " + type;
+ }
+
+ let info_type = "info/" + type;
+ this._log.trace("Retrieving '" + info_type + "'...");
+ let url = this.userBaseURL + info_type;
+ return this.getStorageRequest(url).get(function onComplete(error) {
+ // Note: 'this' is the request.
+ if (error) {
+ this._log.debug("Failed to retrieve '" + info_type + "'", error);
+ return callback(error);
+ }
+ if (this.response.status != 200) {
+ this._log.debug("Failed to retrieve '" + info_type +
+ "': server responded with HTTP" +
+ this.response.status);
+ return callback(this.response);
+ }
+
+ let result;
+ try {
+ result = JSON.parse(this.response.body);
+ } catch (ex) {
+ this._log.debug("Server returned invalid JSON for '" + info_type +
+ "': " + this.response.body);
+ return callback(ex);
+ }
+ this._log.trace("Successfully retrieved '" + info_type + "'.");
+ return callback(null, result);
+ });
+ },
+};
+
+this.Service = new Sync11Service();
+Service.onStartup();
diff --git a/services/sync/modules/stages/cluster.js b/services/sync/modules/stages/cluster.js
new file mode 100644
index 000000000..7665ce825
--- /dev/null
+++ b/services/sync/modules/stages/cluster.js
@@ -0,0 +1,113 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+this.EXPORTED_SYMBOLS = ["ClusterManager"];
+
+var {utils: Cu} = Components;
+
+Cu.import("resource://gre/modules/Log.jsm");
+Cu.import("resource://services-sync/constants.js");
+Cu.import("resource://services-sync/policies.js");
+Cu.import("resource://services-sync/util.js");
+
+/**
+ * Contains code for managing the Sync cluster we are in.
+ */
+this.ClusterManager = function ClusterManager(service) {
+ this._log = Log.repository.getLogger("Sync.Service");
+ this._log.level = Log.Level[Svc.Prefs.get("log.logger.service.main")];
+
+ this.service = service;
+}
+ClusterManager.prototype = {
+ get identity() {
+ return this.service.identity;
+ },
+
+ /**
+ * Obtain the cluster for the current user.
+ *
+ * Returns the string URL of the cluster or null on error.
+ */
+ _findCluster: function _findCluster() {
+ this._log.debug("Finding cluster for user " + this.identity.username);
+
+ // This should ideally use UserAPI10Client but the legacy hackiness is
+ // strong with this code.
+ let fail;
+ let url = this.service.userAPIURI + this.identity.username + "/node/weave";
+ let res = this.service.resource(url);
+ try {
+ let node = res.get();
+ switch (node.status) {
+ case 400:
+ this.service.status.login = LOGIN_FAILED_LOGIN_REJECTED;
+ fail = "Find cluster denied: " + this.service.errorHandler.errorStr(node);
+ break;
+ case 404:
+ this._log.debug("Using serverURL as data cluster (multi-cluster support disabled)");
+ return this.service.serverURL;
+ case 0:
+ case 200:
+ if (node == "null") {
+ node = null;
+ }
+ this._log.trace("_findCluster successfully returning " + node);
+ return node;
+ default:
+ this.service.errorHandler.checkServerError(node);
+ fail = "Unexpected response code: " + node.status;
+ break;
+ }
+ } catch (e) {
+ this._log.debug("Network error on findCluster");
+ this.service.status.login = LOGIN_FAILED_NETWORK_ERROR;
+ this.service.errorHandler.checkServerError(e);
+ fail = e;
+ }
+ throw fail;
+ },
+
+ /**
+ * Determine the cluster for the current user and update state.
+ */
+ setCluster: function setCluster() {
+ // Make sure we didn't get some unexpected response for the cluster.
+ let cluster = this._findCluster();
+ this._log.debug("Cluster value = " + cluster);
+ if (cluster == null) {
+ return false;
+ }
+
+ // Convert from the funky "String object with additional properties" that
+ // resource.js returns to a plain-old string.
+ cluster = cluster.toString();
+ // Don't update stuff if we already have the right cluster
+ if (cluster == this.service.clusterURL) {
+ return false;
+ }
+
+ this._log.debug("Setting cluster to " + cluster);
+ this.service.clusterURL = cluster;
+
+ return true;
+ },
+
+ getUserBaseURL: function getUserBaseURL() {
+ // Legacy Sync and FxA Sync construct the userBaseURL differently. Legacy
+ // Sync appends path components onto an empty path, and in FxA Sync, the
+ // token server constructs this for us in an opaque manner. Since the
+ // cluster manager already sets the clusterURL on Service and also has
+ // access to the current identity, we added this functionality here.
+
+ // If the clusterURL hasn't been set, the userBaseURL shouldn't be set
+ // either. Some tests expect "undefined" to be returned here.
+ if (!this.service.clusterURL) {
+ return undefined;
+ }
+ let storageAPI = this.service.clusterURL + SYNC_API_VERSION + "/";
+ return storageAPI + this.identity.username + "/";
+ }
+};
+Object.freeze(ClusterManager.prototype);
diff --git a/services/sync/modules/stages/declined.js b/services/sync/modules/stages/declined.js
new file mode 100644
index 000000000..ff8a14181
--- /dev/null
+++ b/services/sync/modules/stages/declined.js
@@ -0,0 +1,76 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * This file contains code for maintaining the set of declined engines,
+ * in conjunction with EngineManager.
+ */
+
+"use strict";
+
+this.EXPORTED_SYMBOLS = ["DeclinedEngines"];
+
+var {utils: Cu} = Components;
+
+Cu.import("resource://services-sync/constants.js");
+Cu.import("resource://gre/modules/Log.jsm");
+Cu.import("resource://services-common/utils.js");
+Cu.import("resource://services-common/observers.js");
+Cu.import("resource://gre/modules/Preferences.jsm");
+
+
+
+this.DeclinedEngines = function (service) {
+ this._log = Log.repository.getLogger("Sync.Declined");
+ this._log.level = Log.Level[new Preferences(PREFS_BRANCH).get("log.logger.declined")];
+
+ this.service = service;
+}
+this.DeclinedEngines.prototype = {
+ updateDeclined: function (meta, engineManager=this.service.engineManager) {
+ let enabled = new Set(engineManager.getEnabled().map(e => e.name));
+ let known = new Set(engineManager.getAll().map(e => e.name));
+ let remoteDeclined = new Set(meta.payload.declined || []);
+ let localDeclined = new Set(engineManager.getDeclined());
+
+ this._log.debug("Handling remote declined: " + JSON.stringify([...remoteDeclined]));
+ this._log.debug("Handling local declined: " + JSON.stringify([...localDeclined]));
+
+ // Any engines that are locally enabled should be removed from the remote
+ // declined list.
+ //
+ // Any engines that are locally declined should be added to the remote
+ // declined list.
+ let newDeclined = CommonUtils.union(localDeclined, CommonUtils.difference(remoteDeclined, enabled));
+
+ // If our declined set has changed, put it into the meta object and mark
+ // it as changed.
+ let declinedChanged = !CommonUtils.setEqual(newDeclined, remoteDeclined);
+ this._log.debug("Declined changed? " + declinedChanged);
+ if (declinedChanged) {
+ meta.changed = true;
+ meta.payload.declined = [...newDeclined];
+ }
+
+ // Update the engine manager regardless.
+ engineManager.setDeclined(newDeclined);
+
+ // Any engines that are locally known, locally disabled, and not remotely
+ // or locally declined, are candidates for enablement.
+ let undecided = CommonUtils.difference(CommonUtils.difference(known, enabled), newDeclined);
+ if (undecided.size) {
+ let subject = {
+ declined: newDeclined,
+ enabled: enabled,
+ known: known,
+ undecided: undecided,
+ };
+ CommonUtils.nextTick(() => {
+ Observers.notify("weave:engines:notdeclined", subject);
+ });
+ }
+
+ return declinedChanged;
+ },
+};
diff --git a/services/sync/modules/stages/enginesync.js b/services/sync/modules/stages/enginesync.js
new file mode 100644
index 000000000..a00a2f48b
--- /dev/null
+++ b/services/sync/modules/stages/enginesync.js
@@ -0,0 +1,449 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * This file contains code for synchronizing engines.
+ */
+
+this.EXPORTED_SYMBOLS = ["EngineSynchronizer"];
+
+var {utils: Cu} = Components;
+
+Cu.import("resource://gre/modules/Log.jsm");
+Cu.import("resource://services-sync/constants.js");
+Cu.import("resource://services-sync/engines.js");
+Cu.import("resource://services-sync/policies.js");
+Cu.import("resource://services-sync/util.js");
+Cu.import("resource://services-common/observers.js");
+Cu.import("resource://services-common/async.js");
+Cu.import("resource://gre/modules/Task.jsm");
+
+/**
+ * Perform synchronization of engines.
+ *
+ * This was originally split out of service.js. The API needs lots of love.
+ */
+this.EngineSynchronizer = function EngineSynchronizer(service) {
+ this._log = Log.repository.getLogger("Sync.Synchronizer");
+ this._log.level = Log.Level[Svc.Prefs.get("log.logger.synchronizer")];
+
+ this.service = service;
+
+ this.onComplete = null;
+}
+
+EngineSynchronizer.prototype = {
+ sync: function sync(engineNamesToSync) {
+ if (!this.onComplete) {
+ throw new Error("onComplete handler not installed.");
+ }
+
+ let startTime = Date.now();
+
+ this.service.status.resetSync();
+
+ // Make sure we should sync or record why we shouldn't.
+ let reason = this.service._checkSync();
+ if (reason) {
+ if (reason == kSyncNetworkOffline) {
+ this.service.status.sync = LOGIN_FAILED_NETWORK_ERROR;
+ }
+
+ // this is a purposeful abort rather than a failure, so don't set
+ // any status bits
+ reason = "Can't sync: " + reason;
+ this.onComplete(new Error("Can't sync: " + reason));
+ return;
+ }
+
+ // If we don't have a node, get one. If that fails, retry in 10 minutes.
+ if (!this.service.clusterURL && !this.service._clusterManager.setCluster()) {
+ this.service.status.sync = NO_SYNC_NODE_FOUND;
+ this._log.info("No cluster URL found. Cannot sync.");
+ this.onComplete(null);
+ return;
+ }
+
+ // Ping the server with a special info request once a day.
+ let infoURL = this.service.infoURL;
+ let now = Math.floor(Date.now() / 1000);
+ let lastPing = Svc.Prefs.get("lastPing", 0);
+ if (now - lastPing > 86400) { // 60 * 60 * 24
+ infoURL += "?v=" + WEAVE_VERSION;
+ Svc.Prefs.set("lastPing", now);
+ }
+
+ let engineManager = this.service.engineManager;
+
+ // Figure out what the last modified time is for each collection
+ let info = this.service._fetchInfo(infoURL);
+
+ // Convert the response to an object and read out the modified times
+ for (let engine of [this.service.clientsEngine].concat(engineManager.getAll())) {
+ engine.lastModified = info.obj[engine.name] || 0;
+ }
+
+ if (!(this.service._remoteSetup(info))) {
+ this.onComplete(new Error("Aborting sync, remote setup failed"));
+ return;
+ }
+
+ // Make sure we have an up-to-date list of clients before sending commands
+ this._log.debug("Refreshing client list.");
+ if (!this._syncEngine(this.service.clientsEngine)) {
+ // Clients is an engine like any other; it can fail with a 401,
+ // and we can elect to abort the sync.
+ this._log.warn("Client engine sync failed. Aborting.");
+ this.onComplete(null);
+ return;
+ }
+
+ // We only honor the "hint" of what engines to Sync if this isn't
+ // a first sync.
+ let allowEnginesHint = false;
+ // Wipe data in the desired direction if necessary
+ switch (Svc.Prefs.get("firstSync")) {
+ case "resetClient":
+ this.service.resetClient(engineManager.enabledEngineNames);
+ break;
+ case "wipeClient":
+ this.service.wipeClient(engineManager.enabledEngineNames);
+ break;
+ case "wipeRemote":
+ this.service.wipeRemote(engineManager.enabledEngineNames);
+ break;
+ default:
+ allowEnginesHint = true;
+ break;
+ }
+
+ if (this.service.clientsEngine.localCommands) {
+ try {
+ if (!(this.service.clientsEngine.processIncomingCommands())) {
+ this.service.status.sync = ABORT_SYNC_COMMAND;
+ this.onComplete(new Error("Processed command aborted sync."));
+ return;
+ }
+
+ // Repeat remoteSetup in-case the commands forced us to reset
+ if (!(this.service._remoteSetup(info))) {
+ this.onComplete(new Error("Remote setup failed after processing commands."));
+ return;
+ }
+ }
+ finally {
+ // Always immediately attempt to push back the local client (now
+ // without commands).
+ // Note that we don't abort here; if there's a 401 because we've
+ // been reassigned, we'll handle it around another engine.
+ this._syncEngine(this.service.clientsEngine);
+ }
+ }
+
+ // Update engines because it might change what we sync.
+ try {
+ this._updateEnabledEngines();
+ } catch (ex) {
+ this._log.debug("Updating enabled engines failed", ex);
+ this.service.errorHandler.checkServerError(ex);
+ this.onComplete(ex);
+ return;
+ }
+
+ // If the engines to sync has been specified, we sync in the order specified.
+ let enginesToSync;
+ if (allowEnginesHint && engineNamesToSync) {
+ this._log.info("Syncing specified engines", engineNamesToSync);
+ enginesToSync = engineManager.get(engineNamesToSync).filter(e => e.enabled);
+ } else {
+ this._log.info("Syncing all enabled engines.");
+ enginesToSync = engineManager.getEnabled();
+ }
+ try {
+ // We don't bother validating engines that failed to sync.
+ let enginesToValidate = [];
+ for (let engine of enginesToSync) {
+ // If there's any problems with syncing the engine, report the failure
+ if (!(this._syncEngine(engine)) || this.service.status.enforceBackoff) {
+ this._log.info("Aborting sync for failure in " + engine.name);
+ break;
+ }
+ enginesToValidate.push(engine);
+ }
+
+ // If _syncEngine fails for a 401, we might not have a cluster URL here.
+ // If that's the case, break out of this immediately, rather than
+ // throwing an exception when trying to fetch metaURL.
+ if (!this.service.clusterURL) {
+ this._log.debug("Aborting sync, no cluster URL: " +
+ "not uploading new meta/global.");
+ this.onComplete(null);
+ return;
+ }
+
+ // Upload meta/global if any engines changed anything.
+ let meta = this.service.recordManager.get(this.service.metaURL);
+ if (meta.isNew || meta.changed) {
+ this._log.info("meta/global changed locally: reuploading.");
+ try {
+ this.service.uploadMetaGlobal(meta);
+ delete meta.isNew;
+ delete meta.changed;
+ } catch (error) {
+ this._log.error("Unable to upload meta/global. Leaving marked as new.");
+ }
+ }
+
+ Async.promiseSpinningly(this._tryValidateEngines(enginesToValidate));
+
+ // If there were no sync engine failures
+ if (this.service.status.service != SYNC_FAILED_PARTIAL) {
+ Svc.Prefs.set("lastSync", new Date().toString());
+ this.service.status.sync = SYNC_SUCCEEDED;
+ }
+ } finally {
+ Svc.Prefs.reset("firstSync");
+
+ let syncTime = ((Date.now() - startTime) / 1000).toFixed(2);
+ let dateStr = Utils.formatTimestamp(new Date());
+ this._log.info("Sync completed at " + dateStr
+ + " after " + syncTime + " secs.");
+ }
+
+ this.onComplete(null);
+ },
+
+ _tryValidateEngines: Task.async(function* (recentlySyncedEngines) {
+ if (!Services.telemetry.canRecordBase || !Svc.Prefs.get("validation.enabled", false)) {
+ this._log.info("Skipping validation: validation or telemetry reporting is disabled");
+ return;
+ }
+
+ let lastValidation = Svc.Prefs.get("validation.lastTime", 0);
+ let validationInterval = Svc.Prefs.get("validation.interval");
+ let nowSeconds = Math.floor(Date.now() / 1000);
+
+ if (nowSeconds - lastValidation < validationInterval) {
+ this._log.info("Skipping validation: too recent since last validation attempt");
+ return;
+ }
+ // Update the time now, even if we may return false still. We don't want to
+ // check the rest of these more frequently than once a day.
+ Svc.Prefs.set("validation.lastTime", nowSeconds);
+
+ // Validation only occurs a certain percentage of the time.
+ let validationProbability = Svc.Prefs.get("validation.percentageChance", 0) / 100.0;
+ if (validationProbability < Math.random()) {
+ this._log.info("Skipping validation: Probability threshold not met");
+ return;
+ }
+ let maxRecords = Svc.Prefs.get("validation.maxRecords");
+ if (!maxRecords) {
+ // Don't bother asking the server for the counts if we know validation
+ // won't happen anyway.
+ return;
+ }
+
+ // maxRecords of -1 means "any number", so we can skip asking the server.
+ // Used for tests.
+ let info;
+ if (maxRecords < 0) {
+ info = {};
+ for (let e of recentlySyncedEngines) {
+ info[e.name] = 1; // needs to be < maxRecords
+ }
+ maxRecords = 2;
+ } else {
+
+ let collectionCountsURL = this.service.userBaseURL + "info/collection_counts";
+ try {
+ let infoResp = this.service._fetchInfo(collectionCountsURL);
+ if (!infoResp.success) {
+ this._log.error("Can't run validation: request to info/collection_counts responded with "
+ + resp.status);
+ return;
+ }
+ info = infoResp.obj; // might throw because obj is a getter which parses json.
+ } catch (e) {
+ // Not running validation is totally fine, so we just write an error log and return.
+ this._log.error("Can't run validation: Caught error when fetching counts", e);
+ return;
+ }
+ }
+
+ if (!info) {
+ return;
+ }
+
+ let engineLookup = new Map(recentlySyncedEngines.map(e => [e.name, e]));
+ let toRun = [];
+ for (let [engineName, recordCount] of Object.entries(info)) {
+ let engine = engineLookup.get(engineName);
+ if (recordCount > maxRecords || !engine) {
+ this._log.debug(`Skipping validation for ${engineName} because it's not an engine or ` +
+ `the number of records (${recordCount}) is greater than the maximum allowed (${maxRecords}).`);
+ continue;
+ }
+ let validator = engine.getValidator();
+ if (!validator) {
+ continue;
+ }
+ // Put this in an array so that we know how many we're going to do, so we
+ // don't tell users we're going to run some validators when we aren't.
+ toRun.push({ engine, validator });
+ }
+
+ if (!toRun.length) {
+ return;
+ }
+ Services.console.logStringMessage(
+ "Sync is about to run a consistency check. This may be slow, and " +
+ "can be controlled using the pref \"services.sync.validation.enabled\".\n" +
+ "If you encounter any problems because of this, please file a bug.");
+ for (let { validator, engine } of toRun) {
+ try {
+ let result = yield validator.validate(engine);
+ Observers.notify("weave:engine:validate:finish", result, engine.name);
+ } catch (e) {
+ this._log.error(`Failed to run validation on ${engine.name}!`, e);
+ Observers.notify("weave:engine:validate:error", e, engine.name)
+ // Keep validating -- there's no reason to think that a failure for one
+ // validator would mean the others will fail.
+ }
+ }
+ }),
+
+ // Returns true if sync should proceed.
+ // false / no return value means sync should be aborted.
+ _syncEngine: function _syncEngine(engine) {
+ try {
+ engine.sync();
+ }
+ catch(e) {
+ if (e.status == 401) {
+ // Maybe a 401, cluster update perhaps needed?
+ // We rely on ErrorHandler observing the sync failure notification to
+ // schedule another sync and clear node assignment values.
+ // Here we simply want to muffle the exception and return an
+ // appropriate value.
+ return false;
+ }
+ }
+
+ return true;
+ },
+
+ _updateEnabledFromMeta: function (meta, numClients, engineManager=this.service.engineManager) {
+ this._log.info("Updating enabled engines: " +
+ numClients + " clients.");
+
+ if (meta.isNew || !meta.payload.engines) {
+ this._log.debug("meta/global isn't new, or is missing engines. Not updating enabled state.");
+ return;
+ }
+
+ // If we're the only client, and no engines are marked as enabled,
+ // thumb our noses at the server data: it can't be right.
+ // Belt-and-suspenders approach to Bug 615926.
+ let hasEnabledEngines = false;
+ for (let e in meta.payload.engines) {
+ if (e != "clients") {
+ hasEnabledEngines = true;
+ break;
+ }
+ }
+
+ if ((numClients <= 1) && !hasEnabledEngines) {
+ this._log.info("One client and no enabled engines: not touching local engine status.");
+ return;
+ }
+
+ this.service._ignorePrefObserver = true;
+
+ let enabled = engineManager.enabledEngineNames;
+
+ let toDecline = new Set();
+ let toUndecline = new Set();
+
+ for (let engineName in meta.payload.engines) {
+ if (engineName == "clients") {
+ // Clients is special.
+ continue;
+ }
+ let index = enabled.indexOf(engineName);
+ if (index != -1) {
+ // The engine is enabled locally. Nothing to do.
+ enabled.splice(index, 1);
+ continue;
+ }
+ let engine = engineManager.get(engineName);
+ if (!engine) {
+ // The engine doesn't exist locally. Nothing to do.
+ continue;
+ }
+
+ let attemptedEnable = false;
+ // If the engine was enabled remotely, enable it locally.
+ if (!Svc.Prefs.get("engineStatusChanged." + engine.prefName, false)) {
+ this._log.trace("Engine " + engineName + " was enabled. Marking as non-declined.");
+ toUndecline.add(engineName);
+ this._log.trace(engineName + " engine was enabled remotely.");
+ engine.enabled = true;
+ // Note that setting engine.enabled to true might not have worked for
+ // the password engine if a master-password is enabled. However, it's
+ // still OK that we added it to undeclined - the user *tried* to enable
+ // it remotely - so it still winds up as not being flagged as declined
+ // even though it's disabled remotely.
+ attemptedEnable = true;
+ }
+
+ // If either the engine was disabled locally or enabling the engine
+ // failed (see above re master-password) then wipe server data and
+ // disable it everywhere.
+ if (!engine.enabled) {
+ this._log.trace("Wiping data for " + engineName + " engine.");
+ engine.wipeServer();
+ delete meta.payload.engines[engineName];
+ meta.changed = true; // the new enabled state must propagate
+ // We also here mark the engine as declined, because the pref
+ // was explicitly changed to false - unless we tried, and failed,
+ // to enable it - in which case we leave the declined state alone.
+ if (!attemptedEnable) {
+ // This will be reflected in meta/global in the next stage.
+ this._log.trace("Engine " + engineName + " was disabled locally. Marking as declined.");
+ toDecline.add(engineName);
+ }
+ }
+ }
+
+ // Any remaining engines were either enabled locally or disabled remotely.
+ for (let engineName of enabled) {
+ let engine = engineManager.get(engineName);
+ if (Svc.Prefs.get("engineStatusChanged." + engine.prefName, false)) {
+ this._log.trace("The " + engineName + " engine was enabled locally.");
+ toUndecline.add(engineName);
+ } else {
+ this._log.trace("The " + engineName + " engine was disabled remotely.");
+
+ // Don't automatically mark it as declined!
+ engine.enabled = false;
+ }
+ }
+
+ engineManager.decline(toDecline);
+ engineManager.undecline(toUndecline);
+
+ Svc.Prefs.resetBranch("engineStatusChanged.");
+ this.service._ignorePrefObserver = false;
+ },
+
+ _updateEnabledEngines: function () {
+ let meta = this.service.recordManager.get(this.service.metaURL);
+ let numClients = this.service.scheduler.numClients;
+ let engineManager = this.service.engineManager;
+
+ this._updateEnabledFromMeta(meta, numClients, engineManager);
+ },
+};
+Object.freeze(EngineSynchronizer.prototype);
diff --git a/services/sync/modules/status.js b/services/sync/modules/status.js
new file mode 100644
index 000000000..100bc7965
--- /dev/null
+++ b/services/sync/modules/status.js
@@ -0,0 +1,145 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+* License, v. 2.0. If a copy of the MPL was not distributed with this
+* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+this.EXPORTED_SYMBOLS = ["Status"];
+
+var Cc = Components.classes;
+var Ci = Components.interfaces;
+var Cr = Components.results;
+var Cu = Components.utils;
+
+Cu.import("resource://services-sync/constants.js");
+Cu.import("resource://gre/modules/Log.jsm");
+Cu.import("resource://services-sync/identity.js");
+Cu.import("resource://services-sync/browserid_identity.js");
+Cu.import("resource://gre/modules/Services.jsm");
+Cu.import("resource://services-common/async.js");
+
+this.Status = {
+ _log: Log.repository.getLogger("Sync.Status"),
+ __authManager: null,
+ ready: false,
+
+ get _authManager() {
+ if (this.__authManager) {
+ return this.__authManager;
+ }
+ let service = Components.classes["@mozilla.org/weave/service;1"]
+ .getService(Components.interfaces.nsISupports)
+ .wrappedJSObject;
+ let idClass = service.fxAccountsEnabled ? BrowserIDManager : IdentityManager;
+ this.__authManager = new idClass();
+ this.__authManager.initialize();
+ return this.__authManager;
+ },
+
+ get service() {
+ return this._service;
+ },
+
+ set service(code) {
+ this._log.debug("Status.service: " + (this._service || undefined) + " => " + code);
+ this._service = code;
+ },
+
+ get login() {
+ return this._login;
+ },
+
+ set login(code) {
+ this._log.debug("Status.login: " + this._login + " => " + code);
+ this._login = code;
+
+ if (code == LOGIN_FAILED_NO_USERNAME ||
+ code == LOGIN_FAILED_NO_PASSWORD ||
+ code == LOGIN_FAILED_NO_PASSPHRASE) {
+ this.service = CLIENT_NOT_CONFIGURED;
+ } else if (code != LOGIN_SUCCEEDED) {
+ this.service = LOGIN_FAILED;
+ } else {
+ this.service = STATUS_OK;
+ }
+ },
+
+ get sync() {
+ return this._sync;
+ },
+
+ set sync(code) {
+ this._log.debug("Status.sync: " + this._sync + " => " + code);
+ this._sync = code;
+ this.service = code == SYNC_SUCCEEDED ? STATUS_OK : SYNC_FAILED;
+ },
+
+ get eol() {
+ let modePref = PREFS_BRANCH + "errorhandler.alert.mode";
+ try {
+ return Services.prefs.getCharPref(modePref) == "hard-eol";
+ } catch (ex) {
+ return false;
+ }
+ },
+
+ get engines() {
+ return this._engines;
+ },
+
+ set engines([name, code]) {
+ this._log.debug("Status for engine " + name + ": " + code);
+ this._engines[name] = code;
+
+ if (code != ENGINE_SUCCEEDED) {
+ this.service = SYNC_FAILED_PARTIAL;
+ }
+ },
+
+ // Implement toString because adding a logger introduces a cyclic object
+ // value, so we can't trivially debug-print Status as JSON.
+ toString: function toString() {
+ return "<Status" +
+ ": login: " + Status.login +
+ ", service: " + Status.service +
+ ", sync: " + Status.sync + ">";
+ },
+
+ checkSetup: function checkSetup() {
+ let result = this._authManager.currentAuthState;
+ if (result == STATUS_OK) {
+ Status.service = result;
+ return result;
+ }
+
+ Status.login = result;
+ return Status.service;
+ },
+
+ resetBackoff: function resetBackoff() {
+ this.enforceBackoff = false;
+ this.backoffInterval = 0;
+ this.minimumNextSync = 0;
+ },
+
+ resetSync: function resetSync() {
+ // Logger setup.
+ let logPref = PREFS_BRANCH + "log.logger.status";
+ let logLevel = "Trace";
+ try {
+ logLevel = Services.prefs.getCharPref(logPref);
+ } catch (ex) {
+ // Use default.
+ }
+ this._log.level = Log.Level[logLevel];
+
+ this._log.info("Resetting Status.");
+ this.service = STATUS_OK;
+ this._login = LOGIN_SUCCEEDED;
+ this._sync = SYNC_SUCCEEDED;
+ this._engines = {};
+ this.partial = false;
+ }
+};
+
+// Initialize various status values.
+Status.resetBackoff();
+Status.resetSync();
diff --git a/services/sync/modules/telemetry.js b/services/sync/modules/telemetry.js
new file mode 100644
index 000000000..c311387f7
--- /dev/null
+++ b/services/sync/modules/telemetry.js
@@ -0,0 +1,578 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+"use strict";
+
+const {classes: Cc, interfaces: Ci, utils: Cu, results: Cr} = Components;
+
+this.EXPORTED_SYMBOLS = ["SyncTelemetry"];
+
+Cu.import("resource://services-sync/browserid_identity.js");
+Cu.import("resource://services-sync/main.js");
+Cu.import("resource://services-sync/status.js");
+Cu.import("resource://services-sync/util.js");
+Cu.import("resource://services-common/observers.js");
+Cu.import("resource://services-common/async.js");
+Cu.import("resource://gre/modules/Log.jsm");
+Cu.import("resource://gre/modules/TelemetryController.jsm");
+Cu.import("resource://gre/modules/FxAccounts.jsm");
+Cu.import("resource://gre/modules/XPCOMUtils.jsm");
+Cu.import("resource://gre/modules/osfile.jsm", this);
+
+let constants = {};
+Cu.import("resource://services-sync/constants.js", constants);
+
+var fxAccountsCommon = {};
+Cu.import("resource://gre/modules/FxAccountsCommon.js", fxAccountsCommon);
+
+XPCOMUtils.defineLazyServiceGetter(this, "Telemetry",
+ "@mozilla.org/base/telemetry;1",
+ "nsITelemetry");
+
+const log = Log.repository.getLogger("Sync.Telemetry");
+
+const TOPICS = [
+ "profile-before-change",
+ "weave:service:sync:start",
+ "weave:service:sync:finish",
+ "weave:service:sync:error",
+
+ "weave:engine:sync:start",
+ "weave:engine:sync:finish",
+ "weave:engine:sync:error",
+ "weave:engine:sync:applied",
+ "weave:engine:sync:uploaded",
+ "weave:engine:validate:finish",
+ "weave:engine:validate:error",
+];
+
+const PING_FORMAT_VERSION = 1;
+
+// The set of engines we record telemetry for - any other engines are ignored.
+const ENGINES = new Set(["addons", "bookmarks", "clients", "forms", "history",
+ "passwords", "prefs", "tabs", "extension-storage"]);
+
+// A regex we can use to replace the profile dir in error messages. We use a
+// regexp so we can simply replace all case-insensitive occurences.
+// This escaping function is from:
+// https://developer.mozilla.org/en/docs/Web/JavaScript/Guide/Regular_Expressions
+const reProfileDir = new RegExp(
+ OS.Constants.Path.profileDir.replace(/[.*+?^${}()|[\]\\]/g, "\\$&"),
+ "gi");
+
+function transformError(error, engineName) {
+ if (Async.isShutdownException(error)) {
+ return { name: "shutdownerror" };
+ }
+
+ if (typeof error === "string") {
+ if (error.startsWith("error.")) {
+ // This is hacky, but I can't imagine that it's not also accurate.
+ return { name: "othererror", error };
+ }
+ // There's a chance the profiledir is in the error string which is PII we
+ // want to avoid including in the ping.
+ error = error.replace(reProfileDir, "[profileDir]");
+ return { name: "unexpectederror", error };
+ }
+
+ if (error.failureCode) {
+ return { name: "othererror", error: error.failureCode };
+ }
+
+ if (error instanceof AuthenticationError) {
+ return { name: "autherror", from: error.source };
+ }
+
+ if (error instanceof Ci.mozIStorageError) {
+ return { name: "sqlerror", code: error.result };
+ }
+
+ let httpCode = error.status ||
+ (error.response && error.response.status) ||
+ error.code;
+
+ if (httpCode) {
+ return { name: "httperror", code: httpCode };
+ }
+
+ if (error.result) {
+ return { name: "nserror", code: error.result };
+ }
+
+ return {
+ name: "unexpectederror",
+ // as above, remove the profile dir value.
+ error: String(error).replace(reProfileDir, "[profileDir]")
+ }
+}
+
+function tryGetMonotonicTimestamp() {
+ try {
+ return Telemetry.msSinceProcessStart();
+ } catch (e) {
+ log.warn("Unable to get a monotonic timestamp!");
+ return -1;
+ }
+}
+
+function timeDeltaFrom(monotonicStartTime) {
+ let now = tryGetMonotonicTimestamp();
+ if (monotonicStartTime !== -1 && now !== -1) {
+ return Math.round(now - monotonicStartTime);
+ }
+ return -1;
+}
+
+class EngineRecord {
+ constructor(name) {
+ // startTime is in ms from process start, but is monotonic (unlike Date.now())
+ // so we need to keep both it and when.
+ this.startTime = tryGetMonotonicTimestamp();
+ this.name = name;
+ }
+
+ toJSON() {
+ let result = Object.assign({}, this);
+ delete result.startTime;
+ return result;
+ }
+
+ finished(error) {
+ let took = timeDeltaFrom(this.startTime);
+ if (took > 0) {
+ this.took = took;
+ }
+ if (error) {
+ this.failureReason = transformError(error, this.name);
+ }
+ }
+
+ recordApplied(counts) {
+ if (this.incoming) {
+ log.error(`Incoming records applied multiple times for engine ${this.name}!`);
+ return;
+ }
+ if (this.name === "clients" && !counts.failed) {
+ // ignore successful application of client records
+ // since otherwise they show up every time and are meaningless.
+ return;
+ }
+
+ let incomingData = {};
+ let properties = ["applied", "failed", "newFailed", "reconciled"];
+ // Only record non-zero properties and only record incoming at all if
+ // there's at least one property we care about.
+ for (let property of properties) {
+ if (counts[property]) {
+ incomingData[property] = counts[property];
+ this.incoming = incomingData;
+ }
+ }
+ }
+
+ recordValidation(validationResult) {
+ if (this.validation) {
+ log.error(`Multiple validations occurred for engine ${this.name}!`);
+ return;
+ }
+ let { problems, version, duration, recordCount } = validationResult;
+ let validation = {
+ version: version || 0,
+ checked: recordCount || 0,
+ };
+ if (duration > 0) {
+ validation.took = Math.round(duration);
+ }
+ let summarized = problems.getSummary(true).filter(({count}) => count > 0);
+ if (summarized.length) {
+ validation.problems = summarized;
+ }
+ this.validation = validation;
+ }
+
+ recordValidationError(e) {
+ if (this.validation) {
+ log.error(`Multiple validations occurred for engine ${this.name}!`);
+ return;
+ }
+
+ this.validation = {
+ failureReason: transformError(e)
+ };
+ }
+
+ recordUploaded(counts) {
+ if (counts.sent || counts.failed) {
+ if (!this.outgoing) {
+ this.outgoing = [];
+ }
+ this.outgoing.push({
+ sent: counts.sent || undefined,
+ failed: counts.failed || undefined,
+ });
+ }
+ }
+}
+
+class TelemetryRecord {
+ constructor(allowedEngines) {
+ this.allowedEngines = allowedEngines;
+ // Our failure reason. This property only exists in the generated ping if an
+ // error actually occurred.
+ this.failureReason = undefined;
+ this.uid = "";
+ this.when = Date.now();
+ this.startTime = tryGetMonotonicTimestamp();
+ this.took = 0; // will be set later.
+
+ // All engines that have finished (ie, does not include the "current" one)
+ // We omit this from the ping if it's empty.
+ this.engines = [];
+ // The engine that has started but not yet stopped.
+ this.currentEngine = null;
+ }
+
+ toJSON() {
+ let result = {
+ when: this.when,
+ uid: this.uid,
+ took: this.took,
+ failureReason: this.failureReason,
+ status: this.status,
+ deviceID: this.deviceID,
+ devices: this.devices,
+ };
+ let engines = [];
+ for (let engine of this.engines) {
+ engines.push(engine.toJSON());
+ }
+ if (engines.length > 0) {
+ result.engines = engines;
+ }
+ return result;
+ }
+
+ finished(error) {
+ this.took = timeDeltaFrom(this.startTime);
+ if (this.currentEngine != null) {
+ log.error("Finished called for the sync before the current engine finished");
+ this.currentEngine.finished(null);
+ this.onEngineStop(this.currentEngine.name);
+ }
+ if (error) {
+ this.failureReason = transformError(error);
+ }
+
+ // We don't bother including the "devices" field if we can't come up with a
+ // UID or device ID for *this* device -- If that's the case, any data we'd
+ // put there would be likely to be full of garbage anyway.
+ let includeDeviceInfo = false;
+ try {
+ this.uid = Weave.Service.identity.hashedUID();
+ let deviceID = Weave.Service.identity.deviceID();
+ if (deviceID) {
+ // Combine the raw device id with the metrics uid to create a stable
+ // unique identifier that can't be mapped back to the user's FxA
+ // identity without knowing the metrics HMAC key.
+ this.deviceID = Utils.sha256(deviceID + this.uid);
+ includeDeviceInfo = true;
+ }
+ } catch (e) {
+ this.uid = "0".repeat(32);
+ this.deviceID = undefined;
+ }
+
+ if (includeDeviceInfo) {
+ let remoteDevices = Weave.Service.clientsEngine.remoteClients;
+ this.devices = remoteDevices.map(device => {
+ return {
+ os: device.os,
+ version: device.version,
+ id: Utils.sha256(device.id + this.uid)
+ };
+ });
+ }
+
+ // Check for engine statuses. -- We do this now, and not in engine.finished
+ // to make sure any statuses that get set "late" are recorded
+ for (let engine of this.engines) {
+ let status = Status.engines[engine.name];
+ if (status && status !== constants.ENGINE_SUCCEEDED) {
+ engine.status = status;
+ }
+ }
+
+ let statusObject = {};
+
+ let serviceStatus = Status.service;
+ if (serviceStatus && serviceStatus !== constants.STATUS_OK) {
+ statusObject.service = serviceStatus;
+ this.status = statusObject;
+ }
+ let syncStatus = Status.sync;
+ if (syncStatus && syncStatus !== constants.SYNC_SUCCEEDED) {
+ statusObject.sync = syncStatus;
+ this.status = statusObject;
+ }
+ }
+
+ onEngineStart(engineName) {
+ if (this._shouldIgnoreEngine(engineName, false)) {
+ return;
+ }
+
+ if (this.currentEngine) {
+ log.error(`Being told that engine ${engineName} has started, but current engine ${
+ this.currentEngine.name} hasn't stopped`);
+ // Just discard the current engine rather than making up data for it.
+ }
+ this.currentEngine = new EngineRecord(engineName);
+ }
+
+ onEngineStop(engineName, error) {
+ // We only care if it's the current engine if we have a current engine.
+ if (this._shouldIgnoreEngine(engineName, !!this.currentEngine)) {
+ return;
+ }
+ if (!this.currentEngine) {
+ // It's possible for us to get an error before the start message of an engine
+ // (somehow), in which case we still want to record that error.
+ if (!error) {
+ return;
+ }
+ log.error(`Error triggered on ${engineName} when no current engine exists: ${error}`);
+ this.currentEngine = new EngineRecord(engineName);
+ }
+ this.currentEngine.finished(error);
+ this.engines.push(this.currentEngine);
+ this.currentEngine = null;
+ }
+
+ onEngineApplied(engineName, counts) {
+ if (this._shouldIgnoreEngine(engineName)) {
+ return;
+ }
+ this.currentEngine.recordApplied(counts);
+ }
+
+ onEngineValidated(engineName, validationData) {
+ if (this._shouldIgnoreEngine(engineName, false)) {
+ return;
+ }
+ let engine = this.engines.find(e => e.name === engineName);
+ if (!engine && this.currentEngine && engineName === this.currentEngine.name) {
+ engine = this.currentEngine;
+ }
+ if (engine) {
+ engine.recordValidation(validationData);
+ } else {
+ log.warn(`Validation event triggered for engine ${engineName}, which hasn't been synced!`);
+ }
+ }
+
+ onEngineValidateError(engineName, error) {
+ if (this._shouldIgnoreEngine(engineName, false)) {
+ return;
+ }
+ let engine = this.engines.find(e => e.name === engineName);
+ if (!engine && this.currentEngine && engineName === this.currentEngine.name) {
+ engine = this.currentEngine;
+ }
+ if (engine) {
+ engine.recordValidationError(error);
+ } else {
+ log.warn(`Validation failure event triggered for engine ${engineName}, which hasn't been synced!`);
+ }
+ }
+
+ onEngineUploaded(engineName, counts) {
+ if (this._shouldIgnoreEngine(engineName)) {
+ return;
+ }
+ this.currentEngine.recordUploaded(counts);
+ }
+
+ _shouldIgnoreEngine(engineName, shouldBeCurrent = true) {
+ if (!this.allowedEngines.has(engineName)) {
+ log.info(`Notification for engine ${engineName}, but we aren't recording telemetry for it`);
+ return true;
+ }
+ if (shouldBeCurrent) {
+ if (!this.currentEngine || engineName != this.currentEngine.name) {
+ log.error(`Notification for engine ${engineName} but it isn't current`);
+ return true;
+ }
+ }
+ return false;
+ }
+}
+
+class SyncTelemetryImpl {
+ constructor(allowedEngines) {
+ log.level = Log.Level[Svc.Prefs.get("log.logger.telemetry", "Trace")];
+ // This is accessible so we can enable custom engines during tests.
+ this.allowedEngines = allowedEngines;
+ this.current = null;
+ this.setupObservers();
+
+ this.payloads = [];
+ this.discarded = 0;
+ this.maxPayloadCount = Svc.Prefs.get("telemetry.maxPayloadCount");
+ this.submissionInterval = Svc.Prefs.get("telemetry.submissionInterval") * 1000;
+ this.lastSubmissionTime = Telemetry.msSinceProcessStart();
+ }
+
+ getPingJSON(reason) {
+ return {
+ why: reason,
+ discarded: this.discarded || undefined,
+ version: PING_FORMAT_VERSION,
+ syncs: this.payloads.slice(),
+ };
+ }
+
+ finish(reason) {
+ // Note that we might be in the middle of a sync right now, and so we don't
+ // want to touch this.current.
+ let result = this.getPingJSON(reason);
+ this.payloads = [];
+ this.discarded = 0;
+ this.submit(result);
+ }
+
+ setupObservers() {
+ for (let topic of TOPICS) {
+ Observers.add(topic, this, this);
+ }
+ }
+
+ shutdown() {
+ this.finish("shutdown");
+ for (let topic of TOPICS) {
+ Observers.remove(topic, this, this);
+ }
+ }
+
+ submit(record) {
+ // We still call submit() with possibly illegal payloads so that tests can
+ // know that the ping was built. We don't end up submitting them, however.
+ if (record.syncs.length) {
+ log.trace(`submitting ${record.syncs.length} sync record(s) to telemetry`);
+ TelemetryController.submitExternalPing("sync", record);
+ }
+ }
+
+
+ onSyncStarted() {
+ if (this.current) {
+ log.warn("Observed weave:service:sync:start, but we're already recording a sync!");
+ // Just discard the old record, consistent with our handling of engines, above.
+ this.current = null;
+ }
+ this.current = new TelemetryRecord(this.allowedEngines);
+ }
+
+ _checkCurrent(topic) {
+ if (!this.current) {
+ log.warn(`Observed notification ${topic} but no current sync is being recorded.`);
+ return false;
+ }
+ return true;
+ }
+
+ onSyncFinished(error) {
+ if (!this.current) {
+ log.warn("onSyncFinished but we aren't recording");
+ return;
+ }
+ this.current.finished(error);
+ if (this.payloads.length < this.maxPayloadCount) {
+ this.payloads.push(this.current.toJSON());
+ } else {
+ ++this.discarded;
+ }
+ this.current = null;
+ if ((Telemetry.msSinceProcessStart() - this.lastSubmissionTime) > this.submissionInterval) {
+ this.finish("schedule");
+ this.lastSubmissionTime = Telemetry.msSinceProcessStart();
+ }
+ }
+
+ observe(subject, topic, data) {
+ log.trace(`observed ${topic} ${data}`);
+
+ switch (topic) {
+ case "profile-before-change":
+ this.shutdown();
+ break;
+
+ /* sync itself state changes */
+ case "weave:service:sync:start":
+ this.onSyncStarted();
+ break;
+
+ case "weave:service:sync:finish":
+ if (this._checkCurrent(topic)) {
+ this.onSyncFinished(null);
+ }
+ break;
+
+ case "weave:service:sync:error":
+ // argument needs to be truthy (this should always be the case)
+ this.onSyncFinished(subject || "Unknown");
+ break;
+
+ /* engine sync state changes */
+ case "weave:engine:sync:start":
+ if (this._checkCurrent(topic)) {
+ this.current.onEngineStart(data);
+ }
+ break;
+ case "weave:engine:sync:finish":
+ if (this._checkCurrent(topic)) {
+ this.current.onEngineStop(data, null);
+ }
+ break;
+
+ case "weave:engine:sync:error":
+ if (this._checkCurrent(topic)) {
+ // argument needs to be truthy (this should always be the case)
+ this.current.onEngineStop(data, subject || "Unknown");
+ }
+ break;
+
+ /* engine counts */
+ case "weave:engine:sync:applied":
+ if (this._checkCurrent(topic)) {
+ this.current.onEngineApplied(data, subject);
+ }
+ break;
+
+ case "weave:engine:sync:uploaded":
+ if (this._checkCurrent(topic)) {
+ this.current.onEngineUploaded(data, subject);
+ }
+ break;
+
+ case "weave:engine:validate:finish":
+ if (this._checkCurrent(topic)) {
+ this.current.onEngineValidated(data, subject);
+ }
+ break;
+
+ case "weave:engine:validate:error":
+ if (this._checkCurrent(topic)) {
+ this.current.onEngineValidateError(data, subject || "Unknown");
+ }
+ break;
+
+ default:
+ log.warn(`unexpected observer topic ${topic}`);
+ break;
+ }
+ }
+}
+
+this.SyncTelemetry = new SyncTelemetryImpl(ENGINES);
diff --git a/services/sync/modules/userapi.js b/services/sync/modules/userapi.js
new file mode 100644
index 000000000..e906440bd
--- /dev/null
+++ b/services/sync/modules/userapi.js
@@ -0,0 +1,224 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+"use strict";
+
+this.EXPORTED_SYMBOLS = [
+ "UserAPI10Client",
+];
+
+var {utils: Cu} = Components;
+
+Cu.import("resource://gre/modules/Log.jsm");
+Cu.import("resource://services-common/rest.js");
+Cu.import("resource://services-common/utils.js");
+Cu.import("resource://services-sync/identity.js");
+Cu.import("resource://services-sync/util.js");
+
+/**
+ * A generic client for the user API 1.0 service.
+ *
+ * http://docs.services.mozilla.com/reg/apis.html
+ *
+ * Instances are constructed with the base URI of the service.
+ */
+this.UserAPI10Client = function UserAPI10Client(baseURI) {
+ this._log = Log.repository.getLogger("Sync.UserAPI");
+ this._log.level = Log.Level[Svc.Prefs.get("log.logger.userapi")];
+
+ this.baseURI = baseURI;
+}
+UserAPI10Client.prototype = {
+ USER_CREATE_ERROR_CODES: {
+ 2: "Incorrect or missing captcha.",
+ 4: "User exists.",
+ 6: "JSON parse failure.",
+ 7: "Missing password field.",
+ 9: "Requested password not strong enough.",
+ 12: "No email address on file.",
+ },
+
+ /**
+ * Determine whether a specified username exists.
+ *
+ * Callback receives the following arguments:
+ *
+ * (Error) Describes error that occurred or null if request was
+ * successful.
+ * (boolean) True if user exists. False if not. null if there was an error.
+ */
+ usernameExists: function usernameExists(username, cb) {
+ if (typeof(cb) != "function") {
+ throw new Error("cb must be a function.");
+ }
+
+ let url = this.baseURI + username;
+ let request = new RESTRequest(url);
+ request.get(this._onUsername.bind(this, cb, request));
+ },
+
+ /**
+ * Obtain the Weave (Sync) node for a specified user.
+ *
+ * The callback receives the following arguments:
+ *
+ * (Error) Describes error that occurred or null if request was successful.
+ * (string) Username request is for.
+ * (string) URL of user's node. If null and there is no error, no node could
+ * be assigned at the time of the request.
+ */
+ getWeaveNode: function getWeaveNode(username, password, cb) {
+ if (typeof(cb) != "function") {
+ throw new Error("cb must be a function.");
+ }
+
+ let request = this._getRequest(username, "/node/weave", password);
+ request.get(this._onWeaveNode.bind(this, cb, request));
+ },
+
+ /**
+ * Change a password for the specified user.
+ *
+ * @param username
+ * (string) The username whose password to change.
+ * @param oldPassword
+ * (string) The old, current password.
+ * @param newPassword
+ * (string) The new password to switch to.
+ */
+ changePassword: function changePassword(username, oldPassword, newPassword, cb) {
+ let request = this._getRequest(username, "/password", oldPassword);
+ request.onComplete = this._onChangePassword.bind(this, cb, request);
+ request.post(CommonUtils.encodeUTF8(newPassword));
+ },
+
+ createAccount: function createAccount(email, password, captchaChallenge,
+ captchaResponse, cb) {
+ let username = IdentityManager.prototype.usernameFromAccount(email);
+ let body = JSON.stringify({
+ "email": email,
+ "password": Utils.encodeUTF8(password),
+ "captcha-challenge": captchaChallenge,
+ "captcha-response": captchaResponse
+ });
+
+ let url = this.baseURI + username;
+ let request = new RESTRequest(url);
+
+ if (this.adminSecret) {
+ request.setHeader("X-Weave-Secret", this.adminSecret);
+ }
+
+ request.onComplete = this._onCreateAccount.bind(this, cb, request);
+ request.put(body);
+ },
+
+ _getRequest: function _getRequest(username, path, password=null) {
+ let url = this.baseURI + username + path;
+ let request = new RESTRequest(url);
+
+ if (password) {
+ let up = username + ":" + password;
+ request.setHeader("authorization", "Basic " + btoa(up));
+ }
+
+ return request;
+ },
+
+ _onUsername: function _onUsername(cb, request, error) {
+ if (error) {
+ cb(error, null);
+ return;
+ }
+
+ let body = request.response.body;
+ if (body == "0") {
+ cb(null, false);
+ return;
+ } else if (body == "1") {
+ cb(null, true);
+ return;
+ } else {
+ cb(new Error("Unknown response from server: " + body), null);
+ return;
+ }
+ },
+
+ _onWeaveNode: function _onWeaveNode(cb, request, error) {
+ if (error) {
+ cb.network = true;
+ cb(error, null);
+ return;
+ }
+
+ let response = request.response;
+
+ if (response.status == 200) {
+ let body = response.body;
+ if (body == "null") {
+ cb(null, null);
+ return;
+ }
+
+ cb(null, body);
+ return;
+ }
+
+ error = new Error("Sync node retrieval failed.");
+ switch (response.status) {
+ case 400:
+ error.denied = true;
+ break;
+ case 404:
+ error.notFound = true;
+ break;
+ default:
+ error.message = "Unexpected response code: " + response.status;
+ }
+
+ cb(error, null);
+ return;
+ },
+
+ _onChangePassword: function _onChangePassword(cb, request, error) {
+ this._log.info("Password change response received: " +
+ request.response.status);
+ if (error) {
+ cb(error);
+ return;
+ }
+
+ let response = request.response;
+ if (response.status != 200) {
+ cb(new Error("Password changed failed: " + response.body));
+ return;
+ }
+
+ cb(null);
+ },
+
+ _onCreateAccount: function _onCreateAccount(cb, request, error) {
+ let response = request.response;
+
+ this._log.info("Create account response: " + response.status + " " +
+ response.body);
+
+ if (error) {
+ cb(new Error("HTTP transport error."), null);
+ return;
+ }
+
+ if (response.status == 200) {
+ cb(null, response.body);
+ return;
+ }
+
+ error = new Error("Could not create user.");
+ error.body = response.body;
+
+ cb(error, null);
+ return;
+ },
+};
+Object.freeze(UserAPI10Client.prototype);
diff --git a/services/sync/modules/util.js b/services/sync/modules/util.js
new file mode 100644
index 000000000..e9dbcb37d
--- /dev/null
+++ b/services/sync/modules/util.js
@@ -0,0 +1,797 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+this.EXPORTED_SYMBOLS = ["XPCOMUtils", "Services", "Utils", "Async", "Svc", "Str"];
+
+var {classes: Cc, interfaces: Ci, results: Cr, utils: Cu} = Components;
+
+Cu.import("resource://gre/modules/Log.jsm");
+Cu.import("resource://services-common/observers.js");
+Cu.import("resource://services-common/stringbundle.js");
+Cu.import("resource://services-common/utils.js");
+Cu.import("resource://services-common/async.js", this);
+Cu.import("resource://services-crypto/utils.js");
+Cu.import("resource://services-sync/constants.js");
+Cu.import("resource://gre/modules/Preferences.jsm");
+Cu.import("resource://gre/modules/Services.jsm", this);
+Cu.import("resource://gre/modules/XPCOMUtils.jsm", this);
+Cu.import("resource://gre/modules/osfile.jsm", this);
+Cu.import("resource://gre/modules/Task.jsm", this);
+
+// FxAccountsCommon.js doesn't use a "namespace", so create one here.
+XPCOMUtils.defineLazyGetter(this, "FxAccountsCommon", function() {
+ let FxAccountsCommon = {};
+ Cu.import("resource://gre/modules/FxAccountsCommon.js", FxAccountsCommon);
+ return FxAccountsCommon;
+});
+
+/*
+ * Utility functions
+ */
+
+this.Utils = {
+ // Alias in functions from CommonUtils. These previously were defined here.
+ // In the ideal world, references to these would be removed.
+ nextTick: CommonUtils.nextTick,
+ namedTimer: CommonUtils.namedTimer,
+ makeURI: CommonUtils.makeURI,
+ encodeUTF8: CommonUtils.encodeUTF8,
+ decodeUTF8: CommonUtils.decodeUTF8,
+ safeAtoB: CommonUtils.safeAtoB,
+ byteArrayToString: CommonUtils.byteArrayToString,
+ bytesAsHex: CommonUtils.bytesAsHex,
+ hexToBytes: CommonUtils.hexToBytes,
+ encodeBase32: CommonUtils.encodeBase32,
+ decodeBase32: CommonUtils.decodeBase32,
+
+ // Aliases from CryptoUtils.
+ generateRandomBytes: CryptoUtils.generateRandomBytes,
+ computeHTTPMACSHA1: CryptoUtils.computeHTTPMACSHA1,
+ digestUTF8: CryptoUtils.digestUTF8,
+ digestBytes: CryptoUtils.digestBytes,
+ sha1: CryptoUtils.sha1,
+ sha1Base32: CryptoUtils.sha1Base32,
+ sha256: CryptoUtils.sha256,
+ makeHMACKey: CryptoUtils.makeHMACKey,
+ makeHMACHasher: CryptoUtils.makeHMACHasher,
+ hkdfExpand: CryptoUtils.hkdfExpand,
+ pbkdf2Generate: CryptoUtils.pbkdf2Generate,
+ deriveKeyFromPassphrase: CryptoUtils.deriveKeyFromPassphrase,
+ getHTTPMACSHA1Header: CryptoUtils.getHTTPMACSHA1Header,
+
+ /**
+ * The string to use as the base User-Agent in Sync requests.
+ * This string will look something like
+ *
+ * Firefox/49.0a1 (Windows NT 6.1; WOW64; rv:46.0) FxSync/1.51.0.20160516142357.desktop
+ */
+ _userAgent: null,
+ get userAgent() {
+ if (!this._userAgent) {
+ let hph = Cc["@mozilla.org/network/protocol;1?name=http"].getService(Ci.nsIHttpProtocolHandler);
+ this._userAgent =
+ Services.appinfo.name + "/" + Services.appinfo.version + // Product.
+ " (" + hph.oscpu + ")" + // (oscpu)
+ " FxSync/" + WEAVE_VERSION + "." + // Sync.
+ Services.appinfo.appBuildID + "."; // Build.
+ }
+ return this._userAgent + Svc.Prefs.get("client.type", "desktop");
+ },
+
+ /**
+ * Wrap a function to catch all exceptions and log them
+ *
+ * @usage MyObj._catch = Utils.catch;
+ * MyObj.foo = function() { this._catch(func)(); }
+ *
+ * Optionally pass a function which will be called if an
+ * exception occurs.
+ */
+ catch: function Utils_catch(func, exceptionCallback) {
+ let thisArg = this;
+ return function WrappedCatch() {
+ try {
+ return func.call(thisArg);
+ }
+ catch(ex) {
+ thisArg._log.debug("Exception calling " + (func.name || "anonymous function"), ex);
+ if (exceptionCallback) {
+ return exceptionCallback.call(thisArg, ex);
+ }
+ return null;
+ }
+ };
+ },
+
+ /**
+ * Wrap a function to call lock before calling the function then unlock.
+ *
+ * @usage MyObj._lock = Utils.lock;
+ * MyObj.foo = function() { this._lock(func)(); }
+ */
+ lock: function lock(label, func) {
+ let thisArg = this;
+ return function WrappedLock() {
+ if (!thisArg.lock()) {
+ throw "Could not acquire lock. Label: \"" + label + "\".";
+ }
+
+ try {
+ return func.call(thisArg);
+ }
+ finally {
+ thisArg.unlock();
+ }
+ };
+ },
+
+ isLockException: function isLockException(ex) {
+ return ex && ex.indexOf && ex.indexOf("Could not acquire lock.") == 0;
+ },
+
+ /**
+ * Wrap functions to notify when it starts and finishes executing or if it
+ * threw an error.
+ *
+ * The message is a combination of a provided prefix, the local name, and
+ * the event. Possible events are: "start", "finish", "error". The subject
+ * is the function's return value on "finish" or the caught exception on
+ * "error". The data argument is the predefined data value.
+ *
+ * Example:
+ *
+ * @usage function MyObj(name) {
+ * this.name = name;
+ * this._notify = Utils.notify("obj:");
+ * }
+ * MyObj.prototype = {
+ * foo: function() this._notify("func", "data-arg", function () {
+ * //...
+ * }(),
+ * };
+ */
+ notify: function Utils_notify(prefix) {
+ return function NotifyMaker(name, data, func) {
+ let thisArg = this;
+ let notify = function(state, subject) {
+ let mesg = prefix + name + ":" + state;
+ thisArg._log.trace("Event: " + mesg);
+ Observers.notify(mesg, subject, data);
+ };
+
+ return function WrappedNotify() {
+ try {
+ notify("start", null);
+ let ret = func.call(thisArg);
+ notify("finish", ret);
+ return ret;
+ }
+ catch(ex) {
+ notify("error", ex);
+ throw ex;
+ }
+ };
+ };
+ },
+
+ /**
+ * GUIDs are 9 random bytes encoded with base64url (RFC 4648).
+ * That makes them 12 characters long with 72 bits of entropy.
+ */
+ makeGUID: function makeGUID() {
+ return CommonUtils.encodeBase64URL(Utils.generateRandomBytes(9));
+ },
+
+ _base64url_regex: /^[-abcdefghijklmnopqrstuvwxyz0123456789_]{12}$/i,
+ checkGUID: function checkGUID(guid) {
+ return !!guid && this._base64url_regex.test(guid);
+ },
+
+ /**
+ * Add a simple getter/setter to an object that defers access of a property
+ * to an inner property.
+ *
+ * @param obj
+ * Object to add properties to defer in its prototype
+ * @param defer
+ * Property of obj to defer to
+ * @param prop
+ * Property name to defer (or an array of property names)
+ */
+ deferGetSet: function Utils_deferGetSet(obj, defer, prop) {
+ if (Array.isArray(prop))
+ return prop.map(prop => Utils.deferGetSet(obj, defer, prop));
+
+ let prot = obj.prototype;
+
+ // Create a getter if it doesn't exist yet
+ if (!prot.__lookupGetter__(prop)) {
+ prot.__defineGetter__(prop, function () {
+ return this[defer][prop];
+ });
+ }
+
+ // Create a setter if it doesn't exist yet
+ if (!prot.__lookupSetter__(prop)) {
+ prot.__defineSetter__(prop, function (val) {
+ this[defer][prop] = val;
+ });
+ }
+ },
+
+ lazyStrings: function Weave_lazyStrings(name) {
+ let bundle = "chrome://weave/locale/services/" + name + ".properties";
+ return () => new StringBundle(bundle);
+ },
+
+ deepEquals: function eq(a, b) {
+ // If they're triple equals, then it must be equals!
+ if (a === b)
+ return true;
+
+ // If they weren't equal, they must be objects to be different
+ if (typeof a != "object" || typeof b != "object")
+ return false;
+
+ // But null objects won't have properties to compare
+ if (a === null || b === null)
+ return false;
+
+ // Make sure all of a's keys have a matching value in b
+ for (let k in a)
+ if (!eq(a[k], b[k]))
+ return false;
+
+ // Do the same for b's keys but skip those that we already checked
+ for (let k in b)
+ if (!(k in a) && !eq(a[k], b[k]))
+ return false;
+
+ return true;
+ },
+
+ // Generator and discriminator for HMAC exceptions.
+ // Split these out in case we want to make them richer in future, and to
+ // avoid inevitable confusion if the message changes.
+ throwHMACMismatch: function throwHMACMismatch(shouldBe, is) {
+ throw "Record SHA256 HMAC mismatch: should be " + shouldBe + ", is " + is;
+ },
+
+ isHMACMismatch: function isHMACMismatch(ex) {
+ const hmacFail = "Record SHA256 HMAC mismatch: ";
+ return ex && ex.indexOf && (ex.indexOf(hmacFail) == 0);
+ },
+
+ /**
+ * Turn RFC 4648 base32 into our own user-friendly version.
+ * ABCDEFGHIJKLMNOPQRSTUVWXYZ234567
+ * becomes
+ * abcdefghijk8mn9pqrstuvwxyz234567
+ */
+ base32ToFriendly: function base32ToFriendly(input) {
+ return input.toLowerCase()
+ .replace(/l/g, '8')
+ .replace(/o/g, '9');
+ },
+
+ base32FromFriendly: function base32FromFriendly(input) {
+ return input.toUpperCase()
+ .replace(/8/g, 'L')
+ .replace(/9/g, 'O');
+ },
+
+ /**
+ * Key manipulation.
+ */
+
+ // Return an octet string in friendly base32 *with no trailing =*.
+ encodeKeyBase32: function encodeKeyBase32(keyData) {
+ return Utils.base32ToFriendly(
+ Utils.encodeBase32(keyData))
+ .slice(0, SYNC_KEY_ENCODED_LENGTH);
+ },
+
+ decodeKeyBase32: function decodeKeyBase32(encoded) {
+ return Utils.decodeBase32(
+ Utils.base32FromFriendly(
+ Utils.normalizePassphrase(encoded)))
+ .slice(0, SYNC_KEY_DECODED_LENGTH);
+ },
+
+ base64Key: function base64Key(keyData) {
+ return btoa(keyData);
+ },
+
+ /**
+ * N.B., salt should be base64 encoded, even though we have to decode
+ * it later!
+ */
+ derivePresentableKeyFromPassphrase : function derivePresentableKeyFromPassphrase(passphrase, salt, keyLength, forceJS) {
+ let k = CryptoUtils.deriveKeyFromPassphrase(passphrase, salt, keyLength,
+ forceJS);
+ return Utils.encodeKeyBase32(k);
+ },
+
+ /**
+ * N.B., salt should be base64 encoded, even though we have to decode
+ * it later!
+ */
+ deriveEncodedKeyFromPassphrase : function deriveEncodedKeyFromPassphrase(passphrase, salt, keyLength, forceJS) {
+ let k = CryptoUtils.deriveKeyFromPassphrase(passphrase, salt, keyLength,
+ forceJS);
+ return Utils.base64Key(k);
+ },
+
+ /**
+ * Take a base64-encoded 128-bit AES key, returning it as five groups of five
+ * uppercase alphanumeric characters, separated by hyphens.
+ * A.K.A. base64-to-base32 encoding.
+ */
+ presentEncodedKeyAsSyncKey : function presentEncodedKeyAsSyncKey(encodedKey) {
+ return Utils.encodeKeyBase32(atob(encodedKey));
+ },
+
+ /**
+ * Load a JSON file from disk in the profile directory.
+ *
+ * @param filePath
+ * JSON file path load from profile. Loaded file will be
+ * <profile>/<filePath>.json. i.e. Do not specify the ".json"
+ * extension.
+ * @param that
+ * Object to use for logging and "this" for callback.
+ * @param callback
+ * Function to process json object as its first argument. If the file
+ * could not be loaded, the first argument will be undefined.
+ */
+ jsonLoad: Task.async(function*(filePath, that, callback) {
+ let path = OS.Path.join(OS.Constants.Path.profileDir, "weave", filePath + ".json");
+
+ if (that._log) {
+ that._log.trace("Loading json from disk: " + filePath);
+ }
+
+ let json;
+
+ try {
+ json = yield CommonUtils.readJSON(path);
+ } catch (e) {
+ if (e instanceof OS.File.Error && e.becauseNoSuchFile) {
+ // Ignore non-existent files, but explicitly return null.
+ json = null;
+ } else {
+ if (that._log) {
+ that._log.debug("Failed to load json", e);
+ }
+ }
+ }
+
+ callback.call(that, json);
+ }),
+
+ /**
+ * Save a json-able object to disk in the profile directory.
+ *
+ * @param filePath
+ * JSON file path save to <filePath>.json
+ * @param that
+ * Object to use for logging and "this" for callback
+ * @param obj
+ * Function to provide json-able object to save. If this isn't a
+ * function, it'll be used as the object to make a json string.
+ * @param callback
+ * Function called when the write has been performed. Optional.
+ * The first argument will be a Components.results error
+ * constant on error or null if no error was encountered (and
+ * the file saved successfully).
+ */
+ jsonSave: Task.async(function*(filePath, that, obj, callback) {
+ let path = OS.Path.join(OS.Constants.Path.profileDir, "weave",
+ ...(filePath + ".json").split("/"));
+ let dir = OS.Path.dirname(path);
+ let error = null;
+
+ try {
+ yield OS.File.makeDir(dir, { from: OS.Constants.Path.profileDir });
+
+ if (that._log) {
+ that._log.trace("Saving json to disk: " + path);
+ }
+
+ let json = typeof obj == "function" ? obj.call(that) : obj;
+
+ yield CommonUtils.writeJSON(json, path);
+ } catch (e) {
+ error = e
+ }
+
+ if (typeof callback == "function") {
+ callback.call(that, error);
+ }
+ }),
+
+ /**
+ * Move a json file in the profile directory. Will fail if a file exists at the
+ * destination.
+ *
+ * @returns a promise that resolves to undefined on success, or rejects on failure
+ *
+ * @param aFrom
+ * Current path to the JSON file saved on disk, relative to profileDir/weave
+ * .json will be appended to the file name.
+ * @param aTo
+ * New path to the JSON file saved on disk, relative to profileDir/weave
+ * .json will be appended to the file name.
+ * @param that
+ * Object to use for logging
+ */
+ jsonMove(aFrom, aTo, that) {
+ let pathFrom = OS.Path.join(OS.Constants.Path.profileDir, "weave",
+ ...(aFrom + ".json").split("/"));
+ let pathTo = OS.Path.join(OS.Constants.Path.profileDir, "weave",
+ ...(aTo + ".json").split("/"));
+ if (that._log) {
+ that._log.trace("Moving " + pathFrom + " to " + pathTo);
+ }
+ return OS.File.move(pathFrom, pathTo, { noOverwrite: true });
+ },
+
+ /**
+ * Removes a json file in the profile directory.
+ *
+ * @returns a promise that resolves to undefined on success, or rejects on failure
+ *
+ * @param filePath
+ * Current path to the JSON file saved on disk, relative to profileDir/weave
+ * .json will be appended to the file name.
+ * @param that
+ * Object to use for logging
+ */
+ jsonRemove(filePath, that) {
+ let path = OS.Path.join(OS.Constants.Path.profileDir, "weave",
+ ...(filePath + ".json").split("/"));
+ if (that._log) {
+ that._log.trace("Deleting " + path);
+ }
+ return OS.File.remove(path, { ignoreAbsent: true });
+ },
+
+ getErrorString: function Utils_getErrorString(error, args) {
+ try {
+ return Str.errors.get(error, args || null);
+ } catch (e) {}
+
+ // basically returns "Unknown Error"
+ return Str.errors.get("error.reason.unknown");
+ },
+
+ /**
+ * Generate 26 characters.
+ */
+ generatePassphrase: function generatePassphrase() {
+ // Note that this is a different base32 alphabet to the one we use for
+ // other tasks. It's lowercase, uses different letters, and needs to be
+ // decoded with decodeKeyBase32, not just decodeBase32.
+ return Utils.encodeKeyBase32(CryptoUtils.generateRandomBytes(16));
+ },
+
+ /**
+ * The following are the methods supported for UI use:
+ *
+ * * isPassphrase:
+ * determines whether a string is either a normalized or presentable
+ * passphrase.
+ * * hyphenatePassphrase:
+ * present a normalized passphrase for display. This might actually
+ * perform work beyond just hyphenation; sorry.
+ * * hyphenatePartialPassphrase:
+ * present a fragment of a normalized passphrase for display.
+ * * normalizePassphrase:
+ * take a presentable passphrase and reduce it to a normalized
+ * representation for storage. normalizePassphrase can safely be called
+ * on normalized input.
+ * * normalizeAccount:
+ * take user input for account/username, cleaning up appropriately.
+ */
+
+ isPassphrase: function(s) {
+ if (s) {
+ return /^[abcdefghijkmnpqrstuvwxyz23456789]{26}$/.test(Utils.normalizePassphrase(s));
+ }
+ return false;
+ },
+
+ /**
+ * Hyphenate a passphrase (26 characters) into groups.
+ * abbbbccccddddeeeeffffggggh
+ * =>
+ * a-bbbbc-cccdd-ddeee-effff-ggggh
+ */
+ hyphenatePassphrase: function hyphenatePassphrase(passphrase) {
+ // For now, these are the same.
+ return Utils.hyphenatePartialPassphrase(passphrase, true);
+ },
+
+ hyphenatePartialPassphrase: function hyphenatePartialPassphrase(passphrase, omitTrailingDash) {
+ if (!passphrase)
+ return null;
+
+ // Get the raw data input. Just base32.
+ let data = passphrase.toLowerCase().replace(/[^abcdefghijkmnpqrstuvwxyz23456789]/g, "");
+
+ // This is the neatest way to do this.
+ if ((data.length == 1) && !omitTrailingDash)
+ return data + "-";
+
+ // Hyphenate it.
+ let y = data.substr(0,1);
+ let z = data.substr(1).replace(/(.{1,5})/g, "-$1");
+
+ // Correct length? We're done.
+ if ((z.length == 30) || omitTrailingDash)
+ return y + z;
+
+ // Add a trailing dash if appropriate.
+ return (y + z.replace(/([^-]{5})$/, "$1-")).substr(0, SYNC_KEY_HYPHENATED_LENGTH);
+ },
+
+ normalizePassphrase: function normalizePassphrase(pp) {
+ // Short var name... have you seen the lines below?!
+ // Allow leading and trailing whitespace.
+ pp = pp.trim().toLowerCase();
+
+ // 20-char sync key.
+ if (pp.length == 23 &&
+ [5, 11, 17].every(i => pp[i] == '-')) {
+
+ return pp.slice(0, 5) + pp.slice(6, 11)
+ + pp.slice(12, 17) + pp.slice(18, 23);
+ }
+
+ // "Modern" 26-char key.
+ if (pp.length == 31 &&
+ [1, 7, 13, 19, 25].every(i => pp[i] == '-')) {
+
+ return pp.slice(0, 1) + pp.slice(2, 7)
+ + pp.slice(8, 13) + pp.slice(14, 19)
+ + pp.slice(20, 25) + pp.slice(26, 31);
+ }
+
+ // Something else -- just return.
+ return pp;
+ },
+
+ normalizeAccount: function normalizeAccount(acc) {
+ return acc.trim();
+ },
+
+ /**
+ * Create an array like the first but without elements of the second. Reuse
+ * arrays if possible.
+ */
+ arraySub: function arraySub(minuend, subtrahend) {
+ if (!minuend.length || !subtrahend.length)
+ return minuend;
+ return minuend.filter(i => subtrahend.indexOf(i) == -1);
+ },
+
+ /**
+ * Build the union of two arrays. Reuse arrays if possible.
+ */
+ arrayUnion: function arrayUnion(foo, bar) {
+ if (!foo.length)
+ return bar;
+ if (!bar.length)
+ return foo;
+ return foo.concat(Utils.arraySub(bar, foo));
+ },
+
+ bind2: function Async_bind2(object, method) {
+ return function innerBind() { return method.apply(object, arguments); };
+ },
+
+ /**
+ * Is there a master password configured, regardless of current lock state?
+ */
+ mpEnabled: function mpEnabled() {
+ let modules = Cc["@mozilla.org/security/pkcs11moduledb;1"]
+ .getService(Ci.nsIPKCS11ModuleDB);
+ let sdrSlot = modules.findSlotByName("");
+ let status = sdrSlot.status;
+ let slots = Ci.nsIPKCS11Slot;
+
+ return status != slots.SLOT_UNINITIALIZED && status != slots.SLOT_READY;
+ },
+
+ /**
+ * Is there a master password configured and currently locked?
+ */
+ mpLocked: function mpLocked() {
+ let modules = Cc["@mozilla.org/security/pkcs11moduledb;1"]
+ .getService(Ci.nsIPKCS11ModuleDB);
+ let sdrSlot = modules.findSlotByName("");
+ let status = sdrSlot.status;
+ let slots = Ci.nsIPKCS11Slot;
+
+ if (status == slots.SLOT_READY || status == slots.SLOT_LOGGED_IN
+ || status == slots.SLOT_UNINITIALIZED)
+ return false;
+
+ if (status == slots.SLOT_NOT_LOGGED_IN)
+ return true;
+
+ // something wacky happened, pretend MP is locked
+ return true;
+ },
+
+ // If Master Password is enabled and locked, present a dialog to unlock it.
+ // Return whether the system is unlocked.
+ ensureMPUnlocked: function ensureMPUnlocked() {
+ if (!Utils.mpLocked()) {
+ return true;
+ }
+ let sdr = Cc["@mozilla.org/security/sdr;1"]
+ .getService(Ci.nsISecretDecoderRing);
+ try {
+ sdr.encryptString("bacon");
+ return true;
+ } catch(e) {}
+ return false;
+ },
+
+ /**
+ * Return a value for a backoff interval. Maximum is eight hours, unless
+ * Status.backoffInterval is higher.
+ *
+ */
+ calculateBackoff: function calculateBackoff(attempts, baseInterval,
+ statusInterval) {
+ let backoffInterval = attempts *
+ (Math.floor(Math.random() * baseInterval) +
+ baseInterval);
+ return Math.max(Math.min(backoffInterval, MAXIMUM_BACKOFF_INTERVAL),
+ statusInterval);
+ },
+
+ /**
+ * Return a set of hostnames (including the protocol) which may have
+ * credentials for sync itself stored in the login manager.
+ *
+ * In general, these hosts will not have their passwords synced, will be
+ * reset when we drop sync credentials, etc.
+ */
+ getSyncCredentialsHosts: function() {
+ let result = new Set(this.getSyncCredentialsHostsLegacy());
+ for (let host of this.getSyncCredentialsHostsFxA()) {
+ result.add(host);
+ }
+ return result;
+ },
+
+ /*
+ * Get the "legacy" identity hosts.
+ */
+ getSyncCredentialsHostsLegacy: function() {
+ // the legacy sync host
+ return new Set([PWDMGR_HOST]);
+ },
+
+ /*
+ * Get the FxA identity hosts.
+ */
+ getSyncCredentialsHostsFxA: function() {
+ let result = new Set();
+ // the FxA host
+ result.add(FxAccountsCommon.FXA_PWDMGR_HOST);
+ // We used to include the FxA hosts (hence the Set() result) but we now
+ // don't give them special treatment (hence the Set() with exactly 1 item)
+ return result;
+ },
+
+ getDefaultDeviceName() {
+ // Generate a client name if we don't have a useful one yet
+ let env = Cc["@mozilla.org/process/environment;1"]
+ .getService(Ci.nsIEnvironment);
+ let user = env.get("USER") || env.get("USERNAME") ||
+ Svc.Prefs.get("account") || Svc.Prefs.get("username");
+ // A little hack for people using the the moz-build environment on Windows
+ // which sets USER to the literal "%USERNAME%" (yes, really)
+ if (user == "%USERNAME%" && env.get("USERNAME")) {
+ user = env.get("USERNAME");
+ }
+
+ let brand = new StringBundle("chrome://branding/locale/brand.properties");
+ let brandName = brand.get("brandShortName");
+
+ let appName;
+ try {
+ let syncStrings = new StringBundle("chrome://browser/locale/sync.properties");
+ appName = syncStrings.getFormattedString("sync.defaultAccountApplication", [brandName]);
+ } catch (ex) {}
+ appName = appName || brandName;
+
+ let system =
+ // 'device' is defined on unix systems
+ Cc["@mozilla.org/system-info;1"].getService(Ci.nsIPropertyBag2).get("device") ||
+ // hostname of the system, usually assigned by the user or admin
+ Cc["@mozilla.org/system-info;1"].getService(Ci.nsIPropertyBag2).get("host") ||
+ // fall back on ua info string
+ Cc["@mozilla.org/network/protocol;1?name=http"].getService(Ci.nsIHttpProtocolHandler).oscpu;
+
+ return Str.sync.get("client.name2", [user, appName, system]);
+ },
+
+ getDeviceName() {
+ const deviceName = Svc.Prefs.get("client.name", "");
+
+ if (deviceName === "") {
+ return this.getDefaultDeviceName();
+ }
+
+ return deviceName;
+ },
+
+ getDeviceType() {
+ return Svc.Prefs.get("client.type", DEVICE_TYPE_DESKTOP);
+ },
+
+ formatTimestamp(date) {
+ // Format timestamp as: "%Y-%m-%d %H:%M:%S"
+ let year = String(date.getFullYear());
+ let month = String(date.getMonth() + 1).padStart(2, "0");
+ let day = String(date.getDate()).padStart(2, "0");
+ let hours = String(date.getHours()).padStart(2, "0");
+ let minutes = String(date.getMinutes()).padStart(2, "0");
+ let seconds = String(date.getSeconds()).padStart(2, "0");
+
+ return `${year}-${month}-${day} ${hours}:${minutes}:${seconds}`;
+ }
+};
+
+XPCOMUtils.defineLazyGetter(Utils, "_utf8Converter", function() {
+ let converter = Cc["@mozilla.org/intl/scriptableunicodeconverter"]
+ .createInstance(Ci.nsIScriptableUnicodeConverter);
+ converter.charset = "UTF-8";
+ return converter;
+});
+
+/*
+ * Commonly-used services
+ */
+this.Svc = {};
+Svc.Prefs = new Preferences(PREFS_BRANCH);
+Svc.DefaultPrefs = new Preferences({branch: PREFS_BRANCH, defaultBranch: true});
+Svc.Obs = Observers;
+
+var _sessionCID = Services.appinfo.ID == SEAMONKEY_ID ?
+ "@mozilla.org/suite/sessionstore;1" :
+ "@mozilla.org/browser/sessionstore;1";
+
+[
+ ["Idle", "@mozilla.org/widget/idleservice;1", "nsIIdleService"],
+ ["Session", _sessionCID, "nsISessionStore"]
+].forEach(function([name, contract, iface]) {
+ XPCOMUtils.defineLazyServiceGetter(Svc, name, contract, iface);
+});
+
+XPCOMUtils.defineLazyModuleGetter(Svc, "FormHistory", "resource://gre/modules/FormHistory.jsm");
+
+Svc.__defineGetter__("Crypto", function() {
+ let cryptoSvc;
+ let ns = {};
+ Cu.import("resource://services-crypto/WeaveCrypto.js", ns);
+ cryptoSvc = new ns.WeaveCrypto();
+ delete Svc.Crypto;
+ return Svc.Crypto = cryptoSvc;
+});
+
+this.Str = {};
+["errors", "sync"].forEach(function(lazy) {
+ XPCOMUtils.defineLazyGetter(Str, lazy, Utils.lazyStrings(lazy));
+});
+
+Svc.Obs.add("xpcom-shutdown", function () {
+ for (let name in Svc)
+ delete Svc[name];
+});