summaryrefslogtreecommitdiffstats
path: root/services/sync/tests/unit/test_syncengine_sync.js
diff options
context:
space:
mode:
authorwolfbeast <mcwerewolf@gmail.com>2018-10-06 06:57:51 +0200
committerwolfbeast <mcwerewolf@gmail.com>2018-10-06 06:57:51 +0200
commit0c47c83e1b3b7d95681a43fbb0de0e17b2cd5b25 (patch)
treec321601f04cbfd02fb6e12878e745dc49a612c86 /services/sync/tests/unit/test_syncengine_sync.js
parent8860eddcee1417483cafd114f3a9ec127e0f1f74 (diff)
downloadUXP-0c47c83e1b3b7d95681a43fbb0de0e17b2cd5b25.tar
UXP-0c47c83e1b3b7d95681a43fbb0de0e17b2cd5b25.tar.gz
UXP-0c47c83e1b3b7d95681a43fbb0de0e17b2cd5b25.tar.lz
UXP-0c47c83e1b3b7d95681a43fbb0de0e17b2cd5b25.tar.xz
UXP-0c47c83e1b3b7d95681a43fbb0de0e17b2cd5b25.zip
Import Tycho weave client
Diffstat (limited to 'services/sync/tests/unit/test_syncengine_sync.js')
-rw-r--r--services/sync/tests/unit/test_syncengine_sync.js100
1 files changed, 19 insertions, 81 deletions
diff --git a/services/sync/tests/unit/test_syncengine_sync.js b/services/sync/tests/unit/test_syncengine_sync.js
index 97289962f..6a6d047bf 100644
--- a/services/sync/tests/unit/test_syncengine_sync.js
+++ b/services/sync/tests/unit/test_syncengine_sync.js
@@ -15,22 +15,13 @@ function makeRotaryEngine() {
return new RotaryEngine(Service);
}
-function clean() {
+function cleanAndGo(server) {
Svc.Prefs.resetBranch("");
Svc.Prefs.set("log.logger.engine.rotary", "Trace");
Service.recordManager.clearCache();
-}
-
-function cleanAndGo(server) {
- clean();
server.stop(run_next_test);
}
-function promiseClean(server) {
- clean();
- return new Promise(resolve => server.stop(resolve));
-}
-
function configureService(server, username, password) {
Service.clusterURL = server.baseURI;
@@ -181,7 +172,7 @@ add_test(function test_syncStartup_syncIDMismatchResetsClient() {
try {
// Confirm initial environment
- do_check_eq(engine.syncID, 'fake-guid-00');
+ do_check_eq(engine.syncID, 'fake-guid-0');
do_check_eq(engine._tracker.changedIDs["rekolok"], undefined);
engine.lastSync = Date.now() / 1000;
@@ -676,7 +667,7 @@ add_test(function test_processIncoming_mobile_batchSize() {
});
-add_task(function *test_processIncoming_store_toFetch() {
+add_test(function test_processIncoming_store_toFetch() {
_("If processIncoming fails in the middle of a batch on mobile, state is saved in toFetch and lastSync.");
Service.identity.username = "foo";
Svc.Prefs.set("client.type", "mobile");
@@ -723,10 +714,11 @@ add_task(function *test_processIncoming_store_toFetch() {
let error;
try {
- yield sync_engine_and_validate_telem(engine, true);
+ engine.sync();
} catch (ex) {
error = ex;
}
+ do_check_true(!!error);
// Only the first two batches have been applied.
do_check_eq(Object.keys(engine._store.items).length,
@@ -738,7 +730,7 @@ add_task(function *test_processIncoming_store_toFetch() {
do_check_eq(engine.lastSync, collection.wbo("record-no-99").modified);
} finally {
- yield promiseClean(server);
+ cleanAndGo(server);
}
});
@@ -1229,7 +1221,7 @@ add_test(function test_processIncoming_failed_records() {
});
-add_task(function *test_processIncoming_decrypt_failed() {
+add_test(function test_processIncoming_decrypt_failed() {
_("Ensure that records failing to decrypt are either replaced or refetched.");
Service.identity.username = "foo";
@@ -1288,10 +1280,7 @@ add_task(function *test_processIncoming_decrypt_failed() {
});
engine.lastSync = collection.wbo("nojson").modified - 1;
- let ping = yield sync_engine_and_validate_telem(engine, true);
- do_check_eq(ping.engines[0].incoming.applied, 2);
- do_check_eq(ping.engines[0].incoming.failed, 4);
- do_check_eq(ping.engines[0].incoming.newFailed, 4);
+ engine.sync();
do_check_eq(engine.previousFailed.length, 4);
do_check_eq(engine.previousFailed[0], "nojson");
@@ -1305,7 +1294,7 @@ add_task(function *test_processIncoming_decrypt_failed() {
do_check_eq(observerSubject.failed, 4);
} finally {
- yield promiseClean(server);
+ cleanAndGo(server);
}
});
@@ -1369,7 +1358,7 @@ add_test(function test_uploadOutgoing_toEmptyServer() {
});
-add_task(function *test_uploadOutgoing_failed() {
+add_test(function test_uploadOutgoing_failed() {
_("SyncEngine._uploadOutgoing doesn't clear the tracker of objects that failed to upload.");
Service.identity.username = "foo";
@@ -1412,7 +1401,7 @@ add_task(function *test_uploadOutgoing_failed() {
do_check_eq(engine._tracker.changedIDs['peppercorn'], PEPPERCORN_CHANGED);
engine.enabled = true;
- yield sync_engine_and_validate_telem(engine, true);
+ engine.sync();
// Local timestamp has been set.
do_check_true(engine.lastSyncLocal > 0);
@@ -1427,14 +1416,11 @@ add_task(function *test_uploadOutgoing_failed() {
do_check_eq(engine._tracker.changedIDs['peppercorn'], PEPPERCORN_CHANGED);
} finally {
- yield promiseClean(server);
+ cleanAndGo(server);
}
});
-/* A couple of "functional" tests to ensure we split records into appropriate
- POST requests. More comprehensive unit-tests for this "batching" are in
- test_postqueue.js.
-*/
+
add_test(function test_uploadOutgoing_MAX_UPLOAD_RECORDS() {
_("SyncEngine._uploadOutgoing uploads in batches of MAX_UPLOAD_RECORDS");
@@ -1444,18 +1430,9 @@ add_test(function test_uploadOutgoing_MAX_UPLOAD_RECORDS() {
// Let's count how many times the client posts to the server
var noOfUploads = 0;
collection.post = (function(orig) {
- return function(data, request) {
- // This test doesn't arrange for batch semantics - so we expect the
- // first request to come in with batch=true and the others to have no
- // batch related headers at all (as the first response did not provide
- // a batch ID)
- if (noOfUploads == 0) {
- do_check_eq(request.queryString, "batch=true");
- } else {
- do_check_eq(request.queryString, "");
- }
+ return function() {
noOfUploads++;
- return orig.call(this, data, request);
+ return orig.apply(this, arguments);
};
}(collection.post));
@@ -1500,44 +1477,6 @@ add_test(function test_uploadOutgoing_MAX_UPLOAD_RECORDS() {
}
});
-add_test(function test_uploadOutgoing_largeRecords() {
- _("SyncEngine._uploadOutgoing throws on records larger than MAX_UPLOAD_BYTES");
-
- Service.identity.username = "foo";
- let collection = new ServerCollection();
-
- let engine = makeRotaryEngine();
- engine.allowSkippedRecord = false;
- engine._store.items["large-item"] = "Y".repeat(MAX_UPLOAD_BYTES*2);
- engine._tracker.addChangedID("large-item", 0);
- collection.insert("large-item");
-
-
- let meta_global = Service.recordManager.set(engine.metaURL,
- new WBORecord(engine.metaURL));
- meta_global.payload.engines = {rotary: {version: engine.version,
- syncID: engine.syncID}};
-
- let server = sync_httpd_setup({
- "/1.1/foo/storage/rotary": collection.handler()
- });
-
- let syncTesting = new SyncTestingInfrastructure(server);
-
- try {
- engine._syncStartup();
- let error = null;
- try {
- engine._uploadOutgoing();
- } catch (e) {
- error = e;
- }
- ok(!!error);
- } finally {
- cleanAndGo(server);
- }
-});
-
add_test(function test_syncFinish_noDelete() {
_("SyncEngine._syncFinish resets tracker's score");
@@ -1667,7 +1606,7 @@ add_test(function test_syncFinish_deleteLotsInBatches() {
});
-add_task(function *test_sync_partialUpload() {
+add_test(function test_sync_partialUpload() {
_("SyncEngine.sync() keeps changedIDs that couldn't be uploaded.");
Service.identity.username = "foo";
@@ -1715,12 +1654,11 @@ add_task(function *test_sync_partialUpload() {
engine.enabled = true;
let error;
try {
- yield sync_engine_and_validate_telem(engine, true);
+ engine.sync();
} catch (ex) {
error = ex;
}
-
- ok(!!error);
+ do_check_true(!!error);
// The timestamp has been updated.
do_check_true(engine.lastSyncLocal > 456);
@@ -1738,7 +1676,7 @@ add_task(function *test_sync_partialUpload() {
}
} finally {
- yield promiseClean(server);
+ cleanAndGo(server);
}
});