dexie-cloud-addon 1.0.0-beta.10 → 1.0.0-beta.11
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/modern/dexie-cloud-addon.js +189 -158
- package/dist/modern/dexie-cloud-addon.js.map +1 -1
- package/dist/modern/dexie-cloud-addon.min.js +1 -1
- package/dist/modern/dexie-cloud-addon.min.js.map +1 -1
- package/dist/modern/service-worker.js +1208 -1176
- package/dist/modern/service-worker.js.map +1 -1
- package/dist/modern/service-worker.min.js +1 -1
- package/dist/modern/service-worker.min.js.map +1 -1
- package/dist/module-es5/dexie-cloud-addon.js +259 -207
- package/dist/module-es5/dexie-cloud-addon.js.map +1 -1
- package/dist/module-es5/dexie-cloud-addon.min.js +1 -1
- package/dist/module-es5/dexie-cloud-addon.min.js.map +1 -1
- package/dist/types/WSObservable.d.ts +11 -6
- package/dist/types/WebSocketStatus.d.ts +1 -0
- package/dist/types/helpers/BroadcastedLocalEvent.d.ts +8 -0
- package/dist/types/helpers/visibleState.d.ts +1 -0
- package/dist/types/sync/syncServerToClientOnly.d.ts +3 -0
- package/dist/types/types/CloudConnectionStatus.d.ts +0 -0
- package/dist/types/types/ConnectionStatus.d.ts +0 -0
- package/dist/types/types/LoginState.d.ts +41 -0
- package/dist/types/types/SyncConnectionStatus.d.ts +1 -0
- package/dist/types/types/SyncFlowStatus.d.ts +6 -0
- package/dist/types/types/SyncStatus.d.ts +6 -0
- package/dist/umd/dexie-cloud-addon.js +259 -207
- package/dist/umd/dexie-cloud-addon.js.map +1 -1
- package/dist/umd/dexie-cloud-addon.min.js +1 -1
- package/dist/umd/dexie-cloud-addon.min.js.map +1 -1
- package/dist/umd/service-worker.js +1208 -1176
- package/dist/umd/service-worker.js.map +1 -1
- package/dist/umd/service-worker.min.js +1 -1
- package/dist/umd/service-worker.min.js.map +1 -1
- package/dist/umd-modern/dexie-cloud-addon.js +188 -157
- package/dist/umd-modern/dexie-cloud-addon.js.map +1 -1
- package/package.json +2 -2
|
@@ -8,7 +8,7 @@
|
|
|
8
8
|
*
|
|
9
9
|
* ==========================================================================
|
|
10
10
|
*
|
|
11
|
-
* Version 1.0.0-beta.10, Wed
|
|
11
|
+
* Version 1.0.0-beta.10, Wed Oct 06 2021
|
|
12
12
|
*
|
|
13
13
|
* https://dexie.org
|
|
14
14
|
*
|
|
@@ -2951,6 +2951,105 @@ class BroadcastedAndLocalEvent extends Observable$1 {
|
|
|
2951
2951
|
}
|
|
2952
2952
|
}
|
|
2953
2953
|
|
|
2954
|
+
async function computeRealmSetHash({ realms, inviteRealms, }) {
|
|
2955
|
+
const data = JSON.stringify([
|
|
2956
|
+
...realms.map((realmId) => ({ realmId, accepted: true })),
|
|
2957
|
+
...inviteRealms.map((realmId) => ({ realmId, accepted: false })),
|
|
2958
|
+
].sort((a, b) => a.realmId < b.realmId ? -1 : a.realmId > b.realmId ? 1 : 0));
|
|
2959
|
+
const byteArray = new TextEncoder().encode(data);
|
|
2960
|
+
const digestBytes = await crypto.subtle.digest('SHA-1', byteArray);
|
|
2961
|
+
const base64 = b64encode(digestBytes);
|
|
2962
|
+
return base64;
|
|
2963
|
+
}
|
|
2964
|
+
|
|
2965
|
+
function getSyncableTables(db) {
|
|
2966
|
+
return Object.entries(db.cloud.schema || {})
|
|
2967
|
+
.filter(([, { markedForSync }]) => markedForSync)
|
|
2968
|
+
.map(([tbl]) => db.table(tbl));
|
|
2969
|
+
}
|
|
2970
|
+
|
|
2971
|
+
function getMutationTable(tableName) {
|
|
2972
|
+
return `$${tableName}_mutations`;
|
|
2973
|
+
}
|
|
2974
|
+
|
|
2975
|
+
function getTableFromMutationTable(mutationTable) {
|
|
2976
|
+
var _a;
|
|
2977
|
+
const tableName = (_a = /^\$(.*)_mutations$/.exec(mutationTable)) === null || _a === void 0 ? void 0 : _a[1];
|
|
2978
|
+
if (!tableName)
|
|
2979
|
+
throw new Error(`Given mutationTable ${mutationTable} is not correct`);
|
|
2980
|
+
return tableName;
|
|
2981
|
+
}
|
|
2982
|
+
|
|
2983
|
+
async function listClientChanges(mutationTables, db, { since = {}, limit = Infinity } = {}) {
|
|
2984
|
+
const allMutsOnTables = await Promise.all(mutationTables.map(async (mutationTable) => {
|
|
2985
|
+
const tableName = getTableFromMutationTable(mutationTable.name);
|
|
2986
|
+
const lastRevision = since[tableName];
|
|
2987
|
+
let query = lastRevision
|
|
2988
|
+
? mutationTable.where("rev").above(lastRevision)
|
|
2989
|
+
: mutationTable;
|
|
2990
|
+
if (limit < Infinity)
|
|
2991
|
+
query = query.limit(limit);
|
|
2992
|
+
const muts = await query.toArray();
|
|
2993
|
+
//const objTable = db.table(tableName);
|
|
2994
|
+
/*for (const mut of muts) {
|
|
2995
|
+
if (mut.type === "insert" || mut.type === "upsert") {
|
|
2996
|
+
mut.values = await objTable.bulkGet(mut.keys);
|
|
2997
|
+
}
|
|
2998
|
+
}*/
|
|
2999
|
+
return {
|
|
3000
|
+
table: tableName,
|
|
3001
|
+
muts,
|
|
3002
|
+
};
|
|
3003
|
+
}));
|
|
3004
|
+
// Filter out those tables that doesn't have any mutations:
|
|
3005
|
+
return allMutsOnTables.filter(({ muts }) => muts.length > 0);
|
|
3006
|
+
}
|
|
3007
|
+
|
|
3008
|
+
async function listSyncifiedChanges(tablesToSyncify, currentUser, schema, alreadySyncedRealms) {
|
|
3009
|
+
if (currentUser.isLoggedIn) {
|
|
3010
|
+
if (tablesToSyncify.length > 0) {
|
|
3011
|
+
const ignoredRealms = new Set(alreadySyncedRealms || []);
|
|
3012
|
+
const inserts = await Promise.all(tablesToSyncify.map(async (table) => {
|
|
3013
|
+
const { extractKey } = table.core.schema.primaryKey;
|
|
3014
|
+
if (!extractKey)
|
|
3015
|
+
return { table: table.name, muts: [] }; // Outbound tables are not synced.
|
|
3016
|
+
const dexieCloudTableSchema = schema[table.name];
|
|
3017
|
+
const query = (dexieCloudTableSchema === null || dexieCloudTableSchema === void 0 ? void 0 : dexieCloudTableSchema.generatedGlobalId)
|
|
3018
|
+
? table.filter((item) => !ignoredRealms.has(item.realmId || "") && isValidSyncableID(extractKey(item)))
|
|
3019
|
+
: table.filter((item) => !ignoredRealms.has(item.realmId || "") && isValidAtID(extractKey(item), dexieCloudTableSchema === null || dexieCloudTableSchema === void 0 ? void 0 : dexieCloudTableSchema.idPrefix));
|
|
3020
|
+
const unsyncedObjects = await query.toArray();
|
|
3021
|
+
if (unsyncedObjects.length > 0) {
|
|
3022
|
+
const mut = {
|
|
3023
|
+
type: "insert",
|
|
3024
|
+
values: unsyncedObjects,
|
|
3025
|
+
keys: unsyncedObjects.map(extractKey),
|
|
3026
|
+
userId: currentUser.userId,
|
|
3027
|
+
};
|
|
3028
|
+
return {
|
|
3029
|
+
table: table.name,
|
|
3030
|
+
muts: [mut],
|
|
3031
|
+
};
|
|
3032
|
+
}
|
|
3033
|
+
else {
|
|
3034
|
+
return {
|
|
3035
|
+
table: table.name,
|
|
3036
|
+
muts: []
|
|
3037
|
+
};
|
|
3038
|
+
}
|
|
3039
|
+
}));
|
|
3040
|
+
return inserts.filter(op => op.muts.length > 0);
|
|
3041
|
+
}
|
|
3042
|
+
}
|
|
3043
|
+
return [];
|
|
3044
|
+
}
|
|
3045
|
+
|
|
3046
|
+
function getTablesToSyncify(db, syncState) {
|
|
3047
|
+
const syncedTables = (syncState === null || syncState === void 0 ? void 0 : syncState.syncedTables) || [];
|
|
3048
|
+
const syncableTables = getSyncableTables(db);
|
|
3049
|
+
const tablesToSyncify = syncableTables.filter((tbl) => !syncedTables.includes(tbl.name));
|
|
3050
|
+
return tablesToSyncify;
|
|
3051
|
+
}
|
|
3052
|
+
|
|
2954
3053
|
const { toString: toStr } = {};
|
|
2955
3054
|
function getToStringTag(val) {
|
|
2956
3055
|
return toStr.call(val).slice(8, -1);
|
|
@@ -3439,22 +3538,6 @@ var undefinedDef = {
|
|
|
3439
3538
|
// else
|
|
3440
3539
|
// serverRev.rev = new FakeBigInt(server.rev)
|
|
3441
3540
|
const hasBigIntSupport = typeof BigInt(0) === 'bigint';
|
|
3442
|
-
function getValueOfBigInt(x) {
|
|
3443
|
-
if (typeof x === 'bigint') {
|
|
3444
|
-
return x;
|
|
3445
|
-
}
|
|
3446
|
-
if (hasBigIntSupport) {
|
|
3447
|
-
return typeof x === 'string' ? BigInt(x) : BigInt(x.v);
|
|
3448
|
-
}
|
|
3449
|
-
else {
|
|
3450
|
-
return typeof x === 'string' ? Number(x) : Number(x.v);
|
|
3451
|
-
}
|
|
3452
|
-
}
|
|
3453
|
-
function compareBigInts(a, b) {
|
|
3454
|
-
const valA = getValueOfBigInt(a);
|
|
3455
|
-
const valB = getValueOfBigInt(b);
|
|
3456
|
-
return valA < valB ? -1 : valA > valB ? 1 : 0;
|
|
3457
|
-
}
|
|
3458
3541
|
class FakeBigInt {
|
|
3459
3542
|
constructor(value) {
|
|
3460
3543
|
this.v = value;
|
|
@@ -3483,105 +3566,6 @@ const defs = {
|
|
|
3483
3566
|
const TSON = TypesonSimplified(builtin, defs);
|
|
3484
3567
|
const BISON = Bison(defs);
|
|
3485
3568
|
|
|
3486
|
-
async function computeRealmSetHash({ realms, inviteRealms, }) {
|
|
3487
|
-
const data = JSON.stringify([
|
|
3488
|
-
...realms.map((realmId) => ({ realmId, accepted: true })),
|
|
3489
|
-
...inviteRealms.map((realmId) => ({ realmId, accepted: false })),
|
|
3490
|
-
].sort((a, b) => a.realmId < b.realmId ? -1 : a.realmId > b.realmId ? 1 : 0));
|
|
3491
|
-
const byteArray = new TextEncoder().encode(data);
|
|
3492
|
-
const digestBytes = await crypto.subtle.digest('SHA-1', byteArray);
|
|
3493
|
-
const base64 = b64encode(digestBytes);
|
|
3494
|
-
return base64;
|
|
3495
|
-
}
|
|
3496
|
-
|
|
3497
|
-
function getSyncableTables(db) {
|
|
3498
|
-
return Object.entries(db.cloud.schema || {})
|
|
3499
|
-
.filter(([, { markedForSync }]) => markedForSync)
|
|
3500
|
-
.map(([tbl]) => db.table(tbl));
|
|
3501
|
-
}
|
|
3502
|
-
|
|
3503
|
-
function getMutationTable(tableName) {
|
|
3504
|
-
return `$${tableName}_mutations`;
|
|
3505
|
-
}
|
|
3506
|
-
|
|
3507
|
-
function getTableFromMutationTable(mutationTable) {
|
|
3508
|
-
var _a;
|
|
3509
|
-
const tableName = (_a = /^\$(.*)_mutations$/.exec(mutationTable)) === null || _a === void 0 ? void 0 : _a[1];
|
|
3510
|
-
if (!tableName)
|
|
3511
|
-
throw new Error(`Given mutationTable ${mutationTable} is not correct`);
|
|
3512
|
-
return tableName;
|
|
3513
|
-
}
|
|
3514
|
-
|
|
3515
|
-
async function listClientChanges(mutationTables, db, { since = {}, limit = Infinity } = {}) {
|
|
3516
|
-
const allMutsOnTables = await Promise.all(mutationTables.map(async (mutationTable) => {
|
|
3517
|
-
const tableName = getTableFromMutationTable(mutationTable.name);
|
|
3518
|
-
const lastRevision = since[tableName];
|
|
3519
|
-
let query = lastRevision
|
|
3520
|
-
? mutationTable.where("rev").above(lastRevision)
|
|
3521
|
-
: mutationTable;
|
|
3522
|
-
if (limit < Infinity)
|
|
3523
|
-
query = query.limit(limit);
|
|
3524
|
-
const muts = await query.toArray();
|
|
3525
|
-
//const objTable = db.table(tableName);
|
|
3526
|
-
/*for (const mut of muts) {
|
|
3527
|
-
if (mut.type === "insert" || mut.type === "upsert") {
|
|
3528
|
-
mut.values = await objTable.bulkGet(mut.keys);
|
|
3529
|
-
}
|
|
3530
|
-
}*/
|
|
3531
|
-
return {
|
|
3532
|
-
table: tableName,
|
|
3533
|
-
muts,
|
|
3534
|
-
};
|
|
3535
|
-
}));
|
|
3536
|
-
// Filter out those tables that doesn't have any mutations:
|
|
3537
|
-
return allMutsOnTables.filter(({ muts }) => muts.length > 0);
|
|
3538
|
-
}
|
|
3539
|
-
|
|
3540
|
-
async function listSyncifiedChanges(tablesToSyncify, currentUser, schema, alreadySyncedRealms) {
|
|
3541
|
-
if (currentUser.isLoggedIn) {
|
|
3542
|
-
if (tablesToSyncify.length > 0) {
|
|
3543
|
-
const ignoredRealms = new Set(alreadySyncedRealms || []);
|
|
3544
|
-
const inserts = await Promise.all(tablesToSyncify.map(async (table) => {
|
|
3545
|
-
const { extractKey } = table.core.schema.primaryKey;
|
|
3546
|
-
if (!extractKey)
|
|
3547
|
-
return { table: table.name, muts: [] }; // Outbound tables are not synced.
|
|
3548
|
-
const dexieCloudTableSchema = schema[table.name];
|
|
3549
|
-
const query = (dexieCloudTableSchema === null || dexieCloudTableSchema === void 0 ? void 0 : dexieCloudTableSchema.generatedGlobalId)
|
|
3550
|
-
? table.filter((item) => !ignoredRealms.has(item.realmId || "") && isValidSyncableID(extractKey(item)))
|
|
3551
|
-
: table.filter((item) => !ignoredRealms.has(item.realmId || "") && isValidAtID(extractKey(item), dexieCloudTableSchema === null || dexieCloudTableSchema === void 0 ? void 0 : dexieCloudTableSchema.idPrefix));
|
|
3552
|
-
const unsyncedObjects = await query.toArray();
|
|
3553
|
-
if (unsyncedObjects.length > 0) {
|
|
3554
|
-
const mut = {
|
|
3555
|
-
type: "insert",
|
|
3556
|
-
values: unsyncedObjects,
|
|
3557
|
-
keys: unsyncedObjects.map(extractKey),
|
|
3558
|
-
userId: currentUser.userId,
|
|
3559
|
-
};
|
|
3560
|
-
return {
|
|
3561
|
-
table: table.name,
|
|
3562
|
-
muts: [mut],
|
|
3563
|
-
};
|
|
3564
|
-
}
|
|
3565
|
-
else {
|
|
3566
|
-
return {
|
|
3567
|
-
table: table.name,
|
|
3568
|
-
muts: []
|
|
3569
|
-
};
|
|
3570
|
-
}
|
|
3571
|
-
}));
|
|
3572
|
-
return inserts.filter(op => op.muts.length > 0);
|
|
3573
|
-
}
|
|
3574
|
-
}
|
|
3575
|
-
return [];
|
|
3576
|
-
}
|
|
3577
|
-
|
|
3578
|
-
function getTablesToSyncify(db, syncState) {
|
|
3579
|
-
const syncedTables = (syncState === null || syncState === void 0 ? void 0 : syncState.syncedTables) || [];
|
|
3580
|
-
const syncableTables = getSyncableTables(db);
|
|
3581
|
-
const tablesToSyncify = syncableTables.filter((tbl) => !syncedTables.includes(tbl.name));
|
|
3582
|
-
return tablesToSyncify;
|
|
3583
|
-
}
|
|
3584
|
-
|
|
3585
3569
|
//import {BisonWebStreamReader} from "dreambase-library/dist/typeson-simplified/BisonWebStreamReader";
|
|
3586
3570
|
async function syncWithServer(changes, syncState, baseRevs, db, databaseUrl, schema, clientIdentity) {
|
|
3587
3571
|
//
|
|
@@ -3596,6 +3580,7 @@ async function syncWithServer(changes, syncState, baseRevs, db, databaseUrl, sch
|
|
|
3596
3580
|
headers.Authorization = `Bearer ${accessToken}`;
|
|
3597
3581
|
}
|
|
3598
3582
|
const syncRequest = {
|
|
3583
|
+
v: 2,
|
|
3599
3584
|
dbID: syncState === null || syncState === void 0 ? void 0 : syncState.remoteDbId,
|
|
3600
3585
|
clientIdentity,
|
|
3601
3586
|
schema: schema || {},
|
|
@@ -3722,8 +3707,8 @@ async function updateBaseRevs(db, schema, latestRevisions, serverRev) {
|
|
|
3722
3707
|
|
|
3723
3708
|
function getLatestRevisionsPerTable(clientChangeSet, lastRevisions = {}) {
|
|
3724
3709
|
for (const { table, muts } of clientChangeSet) {
|
|
3725
|
-
const lastRev = muts.length > 0 ? muts[muts.length - 1].rev
|
|
3726
|
-
lastRevisions[table] = lastRev;
|
|
3710
|
+
const lastRev = muts.length > 0 ? muts[muts.length - 1].rev : null;
|
|
3711
|
+
lastRevisions[table] = lastRev || lastRevisions[table] || 0;
|
|
3727
3712
|
}
|
|
3728
3713
|
return lastRevisions;
|
|
3729
3714
|
}
|
|
@@ -4044,16 +4029,35 @@ function MessagesFromServerConsumer(db) {
|
|
|
4044
4029
|
const readyToServe = new BehaviorSubject(true);
|
|
4045
4030
|
const event = new BehaviorSubject(null);
|
|
4046
4031
|
let isWorking = false;
|
|
4032
|
+
let loopWarning = 0;
|
|
4033
|
+
let loopDetection = [0, 0, 0, 0, 0, 0, 0, 0, 0, Date.now()];
|
|
4047
4034
|
event.subscribe(async () => {
|
|
4048
4035
|
if (isWorking)
|
|
4049
4036
|
return;
|
|
4050
4037
|
if (queue.length > 0) {
|
|
4051
4038
|
isWorking = true;
|
|
4039
|
+
loopDetection.shift();
|
|
4040
|
+
loopDetection.push(Date.now());
|
|
4052
4041
|
readyToServe.next(false);
|
|
4053
4042
|
try {
|
|
4054
4043
|
await consumeQueue();
|
|
4055
4044
|
}
|
|
4056
4045
|
finally {
|
|
4046
|
+
if (loopDetection[loopDetection.length - 1] - loopDetection[0] < 10000) {
|
|
4047
|
+
// Ten loops within 10 seconds. Slow down!
|
|
4048
|
+
if (Date.now() - loopWarning < 5000) {
|
|
4049
|
+
// Last time we did this, we ended up here too. Wait for a minute.
|
|
4050
|
+
console.warn(`Slowing down websocket loop for one minute`);
|
|
4051
|
+
loopWarning = Date.now() + 60000;
|
|
4052
|
+
await new Promise(resolve => setTimeout(resolve, 60000));
|
|
4053
|
+
}
|
|
4054
|
+
else {
|
|
4055
|
+
// This is a one-time event. Just pause 10 seconds.
|
|
4056
|
+
console.warn(`Slowing down websocket loop for 10 seconds`);
|
|
4057
|
+
loopWarning = Date.now() + 10000;
|
|
4058
|
+
await new Promise(resolve => setTimeout(resolve, 10000));
|
|
4059
|
+
}
|
|
4060
|
+
}
|
|
4057
4061
|
isWorking = false;
|
|
4058
4062
|
readyToServe.next(true);
|
|
4059
4063
|
}
|
|
@@ -4069,6 +4073,9 @@ function MessagesFromServerConsumer(db) {
|
|
|
4069
4073
|
const msg = queue.shift();
|
|
4070
4074
|
try {
|
|
4071
4075
|
console.debug('processing msg', msg);
|
|
4076
|
+
// If the sync worker or service worker is syncing, wait 'til thei're done.
|
|
4077
|
+
// It's no need to have two channels at the same time - even though it wouldnt
|
|
4078
|
+
// be a problem - this is an optimization.
|
|
4072
4079
|
await db.cloud.syncState
|
|
4073
4080
|
.pipe(filter(({ phase }) => phase === 'in-sync' || phase === 'error'), take(1))
|
|
4074
4081
|
.toPromise();
|
|
@@ -4092,26 +4099,23 @@ function MessagesFromServerConsumer(db) {
|
|
|
4092
4099
|
// in turn will lead to that connectWebSocket.ts will reconnect the socket with the
|
|
4093
4100
|
// new token. So we don't need to do anything more here.
|
|
4094
4101
|
break;
|
|
4095
|
-
case 'rev':
|
|
4096
|
-
if (!(persistedSyncState === null || persistedSyncState === void 0 ? void 0 : persistedSyncState.serverRevision) ||
|
|
4097
|
-
compareBigInts(persistedSyncState.serverRevision, msg.rev) < 0) {
|
|
4098
|
-
triggerSync(db, "pull");
|
|
4099
|
-
}
|
|
4100
|
-
break;
|
|
4101
4102
|
case 'realm-added':
|
|
4102
4103
|
if (!((_a = persistedSyncState === null || persistedSyncState === void 0 ? void 0 : persistedSyncState.realms) === null || _a === void 0 ? void 0 : _a.includes(msg.realm))) {
|
|
4103
|
-
triggerSync(db,
|
|
4104
|
+
triggerSync(db, 'pull');
|
|
4104
4105
|
}
|
|
4105
4106
|
break;
|
|
4106
4107
|
case 'realm-removed':
|
|
4107
4108
|
if ((_b = persistedSyncState === null || persistedSyncState === void 0 ? void 0 : persistedSyncState.realms) === null || _b === void 0 ? void 0 : _b.includes(msg.realm)) {
|
|
4108
|
-
triggerSync(db,
|
|
4109
|
+
triggerSync(db, 'pull');
|
|
4109
4110
|
}
|
|
4110
4111
|
break;
|
|
4112
|
+
case 'realms-changed':
|
|
4113
|
+
triggerSync(db, 'pull');
|
|
4114
|
+
break;
|
|
4111
4115
|
case 'changes':
|
|
4112
4116
|
console.debug('changes');
|
|
4113
4117
|
if (((_c = db.cloud.syncState.value) === null || _c === void 0 ? void 0 : _c.phase) === 'error') {
|
|
4114
|
-
triggerSync(db,
|
|
4118
|
+
triggerSync(db, 'pull');
|
|
4115
4119
|
break;
|
|
4116
4120
|
}
|
|
4117
4121
|
await db.transaction('rw', db.dx.tables, async (tx) => {
|
|
@@ -4134,16 +4138,34 @@ function MessagesFromServerConsumer(db) {
|
|
|
4134
4138
|
return; // Initial sync must have taken place - otherwise, ignore this.
|
|
4135
4139
|
}
|
|
4136
4140
|
// Verify again in ACID tx that we're on same server revision.
|
|
4137
|
-
if (
|
|
4141
|
+
if (msg.baseRev !== syncState.serverRevision) {
|
|
4138
4142
|
console.debug(`baseRev (${msg.baseRev}) differs from our serverRevision in syncState (${syncState.serverRevision})`);
|
|
4143
|
+
// Should we trigger a sync now? No. This is a normal case
|
|
4144
|
+
// when another local peer (such as the SW or a websocket channel on other tab) has
|
|
4145
|
+
// updated syncState from new server information but we are not aware yet. It would
|
|
4146
|
+
// be unnescessary to do a sync in that case. Instead, the caller of this consumeQueue()
|
|
4147
|
+
// function will do readyToServe.next(true) right after this return, which will lead
|
|
4148
|
+
// to a "ready" message being sent to server with the new accurate serverRev we have,
|
|
4149
|
+
// so that the next message indeed will be correct.
|
|
4150
|
+
if (typeof msg.baseRev === 'string' && // v2 format
|
|
4151
|
+
(typeof syncState.serverRevision === 'bigint' || // v1 format
|
|
4152
|
+
typeof syncState.serverRevision === 'object') // v1 format old browser
|
|
4153
|
+
) {
|
|
4154
|
+
// The reason for the diff seems to be that server has migrated the revision format.
|
|
4155
|
+
// Do a full sync to update revision format.
|
|
4156
|
+
// If we don't do a sync request now, we could stuck in an endless loop.
|
|
4157
|
+
triggerSync(db, 'pull');
|
|
4158
|
+
}
|
|
4139
4159
|
return; // Ignore message
|
|
4140
4160
|
}
|
|
4141
4161
|
// Verify also that the message is based on the exact same set of realms
|
|
4142
|
-
const ourRealmSetHash = await Dexie.waitFor(
|
|
4162
|
+
const ourRealmSetHash = await Dexie.waitFor(
|
|
4163
|
+
// Keep TX in non-IDB work
|
|
4164
|
+
computeRealmSetHash(syncState));
|
|
4143
4165
|
console.debug('ourRealmSetHash', ourRealmSetHash);
|
|
4144
4166
|
if (ourRealmSetHash !== msg.realmSetHash) {
|
|
4145
4167
|
console.debug('not same realmSetHash', msg.realmSetHash);
|
|
4146
|
-
triggerSync(db,
|
|
4168
|
+
triggerSync(db, 'pull');
|
|
4147
4169
|
// The message isn't based on the same realms.
|
|
4148
4170
|
// Trigger a sync instead to resolve all things up.
|
|
4149
4171
|
return;
|
|
@@ -4155,12 +4177,14 @@ function MessagesFromServerConsumer(db) {
|
|
|
4155
4177
|
clientChanges = await listClientChanges(mutationTables, db);
|
|
4156
4178
|
console.debug('msg queue: client changes', clientChanges);
|
|
4157
4179
|
}
|
|
4158
|
-
|
|
4159
|
-
|
|
4160
|
-
|
|
4161
|
-
|
|
4162
|
-
|
|
4163
|
-
|
|
4180
|
+
if (msg.changes.length > 0) {
|
|
4181
|
+
const filteredChanges = filterServerChangesThroughAddedClientChanges(msg.changes, clientChanges);
|
|
4182
|
+
//
|
|
4183
|
+
// apply server changes
|
|
4184
|
+
//
|
|
4185
|
+
console.debug('applying filtered server changes', filteredChanges);
|
|
4186
|
+
await applyServerChanges(filteredChanges, db);
|
|
4187
|
+
}
|
|
4164
4188
|
// Update latest revisions per table in case there are unsynced changes
|
|
4165
4189
|
// This can be a real case in future when we allow non-eagery sync.
|
|
4166
4190
|
// And it can actually be realistic now also, but very rare.
|
|
@@ -5036,18 +5060,19 @@ const SERVER_PING_TIMEOUT = 20000;
|
|
|
5036
5060
|
const CLIENT_PING_INTERVAL = 30000;
|
|
5037
5061
|
const FAIL_RETRY_WAIT_TIME = 60000;
|
|
5038
5062
|
class WSObservable extends Observable$1 {
|
|
5039
|
-
constructor(databaseUrl, rev, clientIdentity, messageProducer, webSocketStatus, token, tokenExpiration) {
|
|
5040
|
-
super((subscriber) => new WSConnection(databaseUrl, rev, clientIdentity, token, tokenExpiration, subscriber, messageProducer, webSocketStatus));
|
|
5063
|
+
constructor(databaseUrl, rev, realmSetHash, clientIdentity, messageProducer, webSocketStatus, token, tokenExpiration) {
|
|
5064
|
+
super((subscriber) => new WSConnection(databaseUrl, rev, realmSetHash, clientIdentity, token, tokenExpiration, subscriber, messageProducer, webSocketStatus));
|
|
5041
5065
|
}
|
|
5042
5066
|
}
|
|
5043
5067
|
let counter = 0;
|
|
5044
5068
|
class WSConnection extends Subscription$1 {
|
|
5045
|
-
constructor(databaseUrl, rev, clientIdentity, token, tokenExpiration, subscriber, messageProducer, webSocketStatus) {
|
|
5069
|
+
constructor(databaseUrl, rev, realmSetHash, clientIdentity, token, tokenExpiration, subscriber, messageProducer, webSocketStatus) {
|
|
5046
5070
|
super(() => this.teardown());
|
|
5047
5071
|
this.id = ++counter;
|
|
5048
5072
|
console.debug('New WebSocket Connection', this.id, token ? 'authorized' : 'unauthorized');
|
|
5049
5073
|
this.databaseUrl = databaseUrl;
|
|
5050
5074
|
this.rev = rev;
|
|
5075
|
+
this.realmSetHash = realmSetHash;
|
|
5051
5076
|
this.clientIdentity = clientIdentity;
|
|
5052
5077
|
this.token = token;
|
|
5053
5078
|
this.tokenExpiration = tokenExpiration;
|
|
@@ -5152,7 +5177,9 @@ class WSConnection extends Subscription$1 {
|
|
|
5152
5177
|
const searchParams = new URLSearchParams();
|
|
5153
5178
|
if (this.subscriber.closed)
|
|
5154
5179
|
return;
|
|
5180
|
+
searchParams.set('v', "2");
|
|
5155
5181
|
searchParams.set('rev', this.rev);
|
|
5182
|
+
searchParams.set('realmsHash', this.realmSetHash);
|
|
5156
5183
|
searchParams.set('clientId', this.clientIdentity);
|
|
5157
5184
|
if (this.token) {
|
|
5158
5185
|
searchParams.set('token', this.token);
|
|
@@ -5246,12 +5273,12 @@ function connectWebSocket(db) {
|
|
|
5246
5273
|
function createObservable() {
|
|
5247
5274
|
return db.cloud.persistedSyncState.pipe(filter(syncState => syncState === null || syncState === void 0 ? void 0 : syncState.serverRevision), // Don't connect before there's no initial sync performed.
|
|
5248
5275
|
take(1), // Don't continue waking up whenever syncState change
|
|
5249
|
-
switchMap(() => db.cloud.currentUser), switchMap((userLogin) => userIsReallyActive.pipe(map((isActive) =>
|
|
5276
|
+
switchMap((syncState) => db.cloud.currentUser.pipe(map(userLogin => [userLogin, syncState]))), switchMap(([userLogin, syncState]) => userIsReallyActive.pipe(map((isActive) => [isActive ? userLogin : null, syncState]))), switchMap(async ([userLogin, syncState]) => [userLogin, await computeRealmSetHash(syncState)]), switchMap(([userLogin, realmSetHash]) =>
|
|
5250
5277
|
// Let server end query changes from last entry of same client-ID and forward.
|
|
5251
5278
|
// If no new entries, server won't bother the client. If new entries, server sends only those
|
|
5252
5279
|
// and the baseRev of the last from same client-ID.
|
|
5253
5280
|
userLogin
|
|
5254
|
-
? new WSObservable(db.cloud.options.databaseUrl, db.cloud.persistedSyncState.value.serverRevision, db.cloud.persistedSyncState.value.clientIdentity, messageProducer, db.cloud.webSocketStatus, userLogin.accessToken, userLogin.accessTokenExpiration)
|
|
5281
|
+
? new WSObservable(db.cloud.options.databaseUrl, db.cloud.persistedSyncState.value.serverRevision, realmSetHash, db.cloud.persistedSyncState.value.clientIdentity, messageProducer, db.cloud.webSocketStatus, userLogin.accessToken, userLogin.accessTokenExpiration)
|
|
5255
5282
|
: from$1([])), catchError((error) => {
|
|
5256
5283
|
if ((error === null || error === void 0 ? void 0 : error.name) === 'TokenExpiredError') {
|
|
5257
5284
|
console.debug('WebSocket observable: Token expired. Refreshing token...');
|
|
@@ -5371,22 +5398,26 @@ function LocalSyncWorker(db, cloudOptions, cloudSchema) {
|
|
|
5371
5398
|
//let periodicSyncHandler: ((event: Event) => void) | null = null;
|
|
5372
5399
|
let cancelToken = { cancelled: false };
|
|
5373
5400
|
function syncAndRetry(purpose, retryNum = 1) {
|
|
5374
|
-
|
|
5375
|
-
|
|
5376
|
-
|
|
5377
|
-
|
|
5378
|
-
|
|
5379
|
-
|
|
5380
|
-
|
|
5381
|
-
|
|
5382
|
-
|
|
5383
|
-
|
|
5384
|
-
|
|
5385
|
-
|
|
5386
|
-
|
|
5387
|
-
|
|
5388
|
-
|
|
5389
|
-
|
|
5401
|
+
// Use setTimeout() to get onto a clean stack and
|
|
5402
|
+
// break free from possible active transaction:
|
|
5403
|
+
setTimeout(() => {
|
|
5404
|
+
syncIfPossible(db, cloudOptions, cloudSchema, {
|
|
5405
|
+
cancelToken,
|
|
5406
|
+
retryImmediatelyOnFetchError: true,
|
|
5407
|
+
purpose,
|
|
5408
|
+
}).catch((e) => {
|
|
5409
|
+
console.error('error in syncIfPossible()', e);
|
|
5410
|
+
if (cancelToken.cancelled) {
|
|
5411
|
+
stop();
|
|
5412
|
+
}
|
|
5413
|
+
else if (retryNum < 3) {
|
|
5414
|
+
// Mimic service worker sync event: retry 3 times
|
|
5415
|
+
// * first retry after 5 minutes
|
|
5416
|
+
// * second retry 15 minutes later
|
|
5417
|
+
setTimeout(() => syncAndRetry(purpose, retryNum + 1), [0, 5, 15][retryNum] * MINUTES);
|
|
5418
|
+
}
|
|
5419
|
+
});
|
|
5420
|
+
}, 0);
|
|
5390
5421
|
}
|
|
5391
5422
|
const start = () => {
|
|
5392
5423
|
// Sync eagerly whenever a change has happened (+ initially when there's no syncState yet)
|
|
@@ -5394,7 +5425,7 @@ function LocalSyncWorker(db, cloudOptions, cloudSchema) {
|
|
|
5394
5425
|
console.debug('Starting LocalSyncWorker', db.localSyncEvent['id']);
|
|
5395
5426
|
localSyncEventSubscription = db.localSyncEvent.subscribe(({ purpose }) => {
|
|
5396
5427
|
try {
|
|
5397
|
-
syncAndRetry(purpose ||
|
|
5428
|
+
syncAndRetry(purpose || 'pull');
|
|
5398
5429
|
}
|
|
5399
5430
|
catch (err) {
|
|
5400
5431
|
console.error('What-the....', err);
|