dexie-cloud-addon 1.0.0-beta.10 → 1.0.0-beta.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. package/dist/modern/dexie-cloud-addon.js +189 -158
  2. package/dist/modern/dexie-cloud-addon.js.map +1 -1
  3. package/dist/modern/dexie-cloud-addon.min.js +1 -1
  4. package/dist/modern/dexie-cloud-addon.min.js.map +1 -1
  5. package/dist/modern/service-worker.js +1208 -1176
  6. package/dist/modern/service-worker.js.map +1 -1
  7. package/dist/modern/service-worker.min.js +1 -1
  8. package/dist/modern/service-worker.min.js.map +1 -1
  9. package/dist/module-es5/dexie-cloud-addon.js +259 -207
  10. package/dist/module-es5/dexie-cloud-addon.js.map +1 -1
  11. package/dist/module-es5/dexie-cloud-addon.min.js +1 -1
  12. package/dist/module-es5/dexie-cloud-addon.min.js.map +1 -1
  13. package/dist/types/WSObservable.d.ts +11 -6
  14. package/dist/types/WebSocketStatus.d.ts +1 -0
  15. package/dist/types/helpers/BroadcastedLocalEvent.d.ts +8 -0
  16. package/dist/types/helpers/visibleState.d.ts +1 -0
  17. package/dist/types/sync/syncServerToClientOnly.d.ts +3 -0
  18. package/dist/types/types/CloudConnectionStatus.d.ts +0 -0
  19. package/dist/types/types/ConnectionStatus.d.ts +0 -0
  20. package/dist/types/types/LoginState.d.ts +41 -0
  21. package/dist/types/types/SyncConnectionStatus.d.ts +1 -0
  22. package/dist/types/types/SyncFlowStatus.d.ts +6 -0
  23. package/dist/types/types/SyncStatus.d.ts +6 -0
  24. package/dist/umd/dexie-cloud-addon.js +259 -207
  25. package/dist/umd/dexie-cloud-addon.js.map +1 -1
  26. package/dist/umd/dexie-cloud-addon.min.js +1 -1
  27. package/dist/umd/dexie-cloud-addon.min.js.map +1 -1
  28. package/dist/umd/service-worker.js +1208 -1176
  29. package/dist/umd/service-worker.js.map +1 -1
  30. package/dist/umd/service-worker.min.js +1 -1
  31. package/dist/umd/service-worker.min.js.map +1 -1
  32. package/dist/umd-modern/dexie-cloud-addon.js +188 -157
  33. package/dist/umd-modern/dexie-cloud-addon.js.map +1 -1
  34. package/package.json +2 -2
@@ -2958,6 +2958,105 @@
2958
2958
  }
2959
2959
  }
2960
2960
 
2961
+ async function computeRealmSetHash({ realms, inviteRealms, }) {
2962
+ const data = JSON.stringify([
2963
+ ...realms.map((realmId) => ({ realmId, accepted: true })),
2964
+ ...inviteRealms.map((realmId) => ({ realmId, accepted: false })),
2965
+ ].sort((a, b) => a.realmId < b.realmId ? -1 : a.realmId > b.realmId ? 1 : 0));
2966
+ const byteArray = new TextEncoder().encode(data);
2967
+ const digestBytes = await crypto.subtle.digest('SHA-1', byteArray);
2968
+ const base64 = b64encode(digestBytes);
2969
+ return base64;
2970
+ }
2971
+
2972
+ function getSyncableTables(db) {
2973
+ return Object.entries(db.cloud.schema || {})
2974
+ .filter(([, { markedForSync }]) => markedForSync)
2975
+ .map(([tbl]) => db.table(tbl));
2976
+ }
2977
+
2978
+ function getMutationTable(tableName) {
2979
+ return `$${tableName}_mutations`;
2980
+ }
2981
+
2982
+ function getTableFromMutationTable(mutationTable) {
2983
+ var _a;
2984
+ const tableName = (_a = /^\$(.*)_mutations$/.exec(mutationTable)) === null || _a === void 0 ? void 0 : _a[1];
2985
+ if (!tableName)
2986
+ throw new Error(`Given mutationTable ${mutationTable} is not correct`);
2987
+ return tableName;
2988
+ }
2989
+
2990
+ async function listClientChanges(mutationTables, db, { since = {}, limit = Infinity } = {}) {
2991
+ const allMutsOnTables = await Promise.all(mutationTables.map(async (mutationTable) => {
2992
+ const tableName = getTableFromMutationTable(mutationTable.name);
2993
+ const lastRevision = since[tableName];
2994
+ let query = lastRevision
2995
+ ? mutationTable.where("rev").above(lastRevision)
2996
+ : mutationTable;
2997
+ if (limit < Infinity)
2998
+ query = query.limit(limit);
2999
+ const muts = await query.toArray();
3000
+ //const objTable = db.table(tableName);
3001
+ /*for (const mut of muts) {
3002
+ if (mut.type === "insert" || mut.type === "upsert") {
3003
+ mut.values = await objTable.bulkGet(mut.keys);
3004
+ }
3005
+ }*/
3006
+ return {
3007
+ table: tableName,
3008
+ muts,
3009
+ };
3010
+ }));
3011
+ // Filter out those tables that doesn't have any mutations:
3012
+ return allMutsOnTables.filter(({ muts }) => muts.length > 0);
3013
+ }
3014
+
3015
+ async function listSyncifiedChanges(tablesToSyncify, currentUser, schema, alreadySyncedRealms) {
3016
+ if (currentUser.isLoggedIn) {
3017
+ if (tablesToSyncify.length > 0) {
3018
+ const ignoredRealms = new Set(alreadySyncedRealms || []);
3019
+ const inserts = await Promise.all(tablesToSyncify.map(async (table) => {
3020
+ const { extractKey } = table.core.schema.primaryKey;
3021
+ if (!extractKey)
3022
+ return { table: table.name, muts: [] }; // Outbound tables are not synced.
3023
+ const dexieCloudTableSchema = schema[table.name];
3024
+ const query = (dexieCloudTableSchema === null || dexieCloudTableSchema === void 0 ? void 0 : dexieCloudTableSchema.generatedGlobalId)
3025
+ ? table.filter((item) => !ignoredRealms.has(item.realmId || "") && isValidSyncableID(extractKey(item)))
3026
+ : table.filter((item) => !ignoredRealms.has(item.realmId || "") && isValidAtID(extractKey(item), dexieCloudTableSchema === null || dexieCloudTableSchema === void 0 ? void 0 : dexieCloudTableSchema.idPrefix));
3027
+ const unsyncedObjects = await query.toArray();
3028
+ if (unsyncedObjects.length > 0) {
3029
+ const mut = {
3030
+ type: "insert",
3031
+ values: unsyncedObjects,
3032
+ keys: unsyncedObjects.map(extractKey),
3033
+ userId: currentUser.userId,
3034
+ };
3035
+ return {
3036
+ table: table.name,
3037
+ muts: [mut],
3038
+ };
3039
+ }
3040
+ else {
3041
+ return {
3042
+ table: table.name,
3043
+ muts: []
3044
+ };
3045
+ }
3046
+ }));
3047
+ return inserts.filter(op => op.muts.length > 0);
3048
+ }
3049
+ }
3050
+ return [];
3051
+ }
3052
+
3053
+ function getTablesToSyncify(db, syncState) {
3054
+ const syncedTables = (syncState === null || syncState === void 0 ? void 0 : syncState.syncedTables) || [];
3055
+ const syncableTables = getSyncableTables(db);
3056
+ const tablesToSyncify = syncableTables.filter((tbl) => !syncedTables.includes(tbl.name));
3057
+ return tablesToSyncify;
3058
+ }
3059
+
2961
3060
  const { toString: toStr } = {};
2962
3061
  function getToStringTag(val) {
2963
3062
  return toStr.call(val).slice(8, -1);
@@ -3446,22 +3545,6 @@
3446
3545
  // else
3447
3546
  // serverRev.rev = new FakeBigInt(server.rev)
3448
3547
  const hasBigIntSupport = typeof BigInt(0) === 'bigint';
3449
- function getValueOfBigInt(x) {
3450
- if (typeof x === 'bigint') {
3451
- return x;
3452
- }
3453
- if (hasBigIntSupport) {
3454
- return typeof x === 'string' ? BigInt(x) : BigInt(x.v);
3455
- }
3456
- else {
3457
- return typeof x === 'string' ? Number(x) : Number(x.v);
3458
- }
3459
- }
3460
- function compareBigInts(a, b) {
3461
- const valA = getValueOfBigInt(a);
3462
- const valB = getValueOfBigInt(b);
3463
- return valA < valB ? -1 : valA > valB ? 1 : 0;
3464
- }
3465
3548
  class FakeBigInt {
3466
3549
  constructor(value) {
3467
3550
  this.v = value;
@@ -3490,105 +3573,6 @@
3490
3573
  const TSON = TypesonSimplified(builtin, defs);
3491
3574
  const BISON = Bison(defs);
3492
3575
 
3493
- async function computeRealmSetHash({ realms, inviteRealms, }) {
3494
- const data = JSON.stringify([
3495
- ...realms.map((realmId) => ({ realmId, accepted: true })),
3496
- ...inviteRealms.map((realmId) => ({ realmId, accepted: false })),
3497
- ].sort((a, b) => a.realmId < b.realmId ? -1 : a.realmId > b.realmId ? 1 : 0));
3498
- const byteArray = new TextEncoder().encode(data);
3499
- const digestBytes = await crypto.subtle.digest('SHA-1', byteArray);
3500
- const base64 = b64encode(digestBytes);
3501
- return base64;
3502
- }
3503
-
3504
- function getSyncableTables(db) {
3505
- return Object.entries(db.cloud.schema || {})
3506
- .filter(([, { markedForSync }]) => markedForSync)
3507
- .map(([tbl]) => db.table(tbl));
3508
- }
3509
-
3510
- function getMutationTable(tableName) {
3511
- return `$${tableName}_mutations`;
3512
- }
3513
-
3514
- function getTableFromMutationTable(mutationTable) {
3515
- var _a;
3516
- const tableName = (_a = /^\$(.*)_mutations$/.exec(mutationTable)) === null || _a === void 0 ? void 0 : _a[1];
3517
- if (!tableName)
3518
- throw new Error(`Given mutationTable ${mutationTable} is not correct`);
3519
- return tableName;
3520
- }
3521
-
3522
- async function listClientChanges(mutationTables, db, { since = {}, limit = Infinity } = {}) {
3523
- const allMutsOnTables = await Promise.all(mutationTables.map(async (mutationTable) => {
3524
- const tableName = getTableFromMutationTable(mutationTable.name);
3525
- const lastRevision = since[tableName];
3526
- let query = lastRevision
3527
- ? mutationTable.where("rev").above(lastRevision)
3528
- : mutationTable;
3529
- if (limit < Infinity)
3530
- query = query.limit(limit);
3531
- const muts = await query.toArray();
3532
- //const objTable = db.table(tableName);
3533
- /*for (const mut of muts) {
3534
- if (mut.type === "insert" || mut.type === "upsert") {
3535
- mut.values = await objTable.bulkGet(mut.keys);
3536
- }
3537
- }*/
3538
- return {
3539
- table: tableName,
3540
- muts,
3541
- };
3542
- }));
3543
- // Filter out those tables that doesn't have any mutations:
3544
- return allMutsOnTables.filter(({ muts }) => muts.length > 0);
3545
- }
3546
-
3547
- async function listSyncifiedChanges(tablesToSyncify, currentUser, schema, alreadySyncedRealms) {
3548
- if (currentUser.isLoggedIn) {
3549
- if (tablesToSyncify.length > 0) {
3550
- const ignoredRealms = new Set(alreadySyncedRealms || []);
3551
- const inserts = await Promise.all(tablesToSyncify.map(async (table) => {
3552
- const { extractKey } = table.core.schema.primaryKey;
3553
- if (!extractKey)
3554
- return { table: table.name, muts: [] }; // Outbound tables are not synced.
3555
- const dexieCloudTableSchema = schema[table.name];
3556
- const query = (dexieCloudTableSchema === null || dexieCloudTableSchema === void 0 ? void 0 : dexieCloudTableSchema.generatedGlobalId)
3557
- ? table.filter((item) => !ignoredRealms.has(item.realmId || "") && isValidSyncableID(extractKey(item)))
3558
- : table.filter((item) => !ignoredRealms.has(item.realmId || "") && isValidAtID(extractKey(item), dexieCloudTableSchema === null || dexieCloudTableSchema === void 0 ? void 0 : dexieCloudTableSchema.idPrefix));
3559
- const unsyncedObjects = await query.toArray();
3560
- if (unsyncedObjects.length > 0) {
3561
- const mut = {
3562
- type: "insert",
3563
- values: unsyncedObjects,
3564
- keys: unsyncedObjects.map(extractKey),
3565
- userId: currentUser.userId,
3566
- };
3567
- return {
3568
- table: table.name,
3569
- muts: [mut],
3570
- };
3571
- }
3572
- else {
3573
- return {
3574
- table: table.name,
3575
- muts: []
3576
- };
3577
- }
3578
- }));
3579
- return inserts.filter(op => op.muts.length > 0);
3580
- }
3581
- }
3582
- return [];
3583
- }
3584
-
3585
- function getTablesToSyncify(db, syncState) {
3586
- const syncedTables = (syncState === null || syncState === void 0 ? void 0 : syncState.syncedTables) || [];
3587
- const syncableTables = getSyncableTables(db);
3588
- const tablesToSyncify = syncableTables.filter((tbl) => !syncedTables.includes(tbl.name));
3589
- return tablesToSyncify;
3590
- }
3591
-
3592
3576
  //import {BisonWebStreamReader} from "dreambase-library/dist/typeson-simplified/BisonWebStreamReader";
3593
3577
  async function syncWithServer(changes, syncState, baseRevs, db, databaseUrl, schema, clientIdentity) {
3594
3578
  //
@@ -3603,6 +3587,7 @@
3603
3587
  headers.Authorization = `Bearer ${accessToken}`;
3604
3588
  }
3605
3589
  const syncRequest = {
3590
+ v: 2,
3606
3591
  dbID: syncState === null || syncState === void 0 ? void 0 : syncState.remoteDbId,
3607
3592
  clientIdentity,
3608
3593
  schema: schema || {},
@@ -3729,8 +3714,8 @@
3729
3714
 
3730
3715
  function getLatestRevisionsPerTable(clientChangeSet, lastRevisions = {}) {
3731
3716
  for (const { table, muts } of clientChangeSet) {
3732
- const lastRev = muts.length > 0 ? muts[muts.length - 1].rev || 0 : 0;
3733
- lastRevisions[table] = lastRev;
3717
+ const lastRev = muts.length > 0 ? muts[muts.length - 1].rev : null;
3718
+ lastRevisions[table] = lastRev || lastRevisions[table] || 0;
3734
3719
  }
3735
3720
  return lastRevisions;
3736
3721
  }
@@ -4051,16 +4036,35 @@
4051
4036
  const readyToServe = new rxjs.BehaviorSubject(true);
4052
4037
  const event = new rxjs.BehaviorSubject(null);
4053
4038
  let isWorking = false;
4039
+ let loopWarning = 0;
4040
+ let loopDetection = [0, 0, 0, 0, 0, 0, 0, 0, 0, Date.now()];
4054
4041
  event.subscribe(async () => {
4055
4042
  if (isWorking)
4056
4043
  return;
4057
4044
  if (queue.length > 0) {
4058
4045
  isWorking = true;
4046
+ loopDetection.shift();
4047
+ loopDetection.push(Date.now());
4059
4048
  readyToServe.next(false);
4060
4049
  try {
4061
4050
  await consumeQueue();
4062
4051
  }
4063
4052
  finally {
4053
+ if (loopDetection[loopDetection.length - 1] - loopDetection[0] < 10000) {
4054
+ // Ten loops within 10 seconds. Slow down!
4055
+ if (Date.now() - loopWarning < 5000) {
4056
+ // Last time we did this, we ended up here too. Wait for a minute.
4057
+ console.warn(`Slowing down websocket loop for one minute`);
4058
+ loopWarning = Date.now() + 60000;
4059
+ await new Promise(resolve => setTimeout(resolve, 60000));
4060
+ }
4061
+ else {
4062
+ // This is a one-time event. Just pause 10 seconds.
4063
+ console.warn(`Slowing down websocket loop for 10 seconds`);
4064
+ loopWarning = Date.now() + 10000;
4065
+ await new Promise(resolve => setTimeout(resolve, 10000));
4066
+ }
4067
+ }
4064
4068
  isWorking = false;
4065
4069
  readyToServe.next(true);
4066
4070
  }
@@ -4076,6 +4080,9 @@
4076
4080
  const msg = queue.shift();
4077
4081
  try {
4078
4082
  console.debug('processing msg', msg);
4083
+ // If the sync worker or service worker is syncing, wait 'til thei're done.
4084
+ // It's no need to have two channels at the same time - even though it wouldnt
4085
+ // be a problem - this is an optimization.
4079
4086
  await db.cloud.syncState
4080
4087
  .pipe(filter(({ phase }) => phase === 'in-sync' || phase === 'error'), take(1))
4081
4088
  .toPromise();
@@ -4099,26 +4106,23 @@
4099
4106
  // in turn will lead to that connectWebSocket.ts will reconnect the socket with the
4100
4107
  // new token. So we don't need to do anything more here.
4101
4108
  break;
4102
- case 'rev':
4103
- if (!(persistedSyncState === null || persistedSyncState === void 0 ? void 0 : persistedSyncState.serverRevision) ||
4104
- compareBigInts(persistedSyncState.serverRevision, msg.rev) < 0) {
4105
- triggerSync(db, "pull");
4106
- }
4107
- break;
4108
4109
  case 'realm-added':
4109
4110
  if (!((_a = persistedSyncState === null || persistedSyncState === void 0 ? void 0 : persistedSyncState.realms) === null || _a === void 0 ? void 0 : _a.includes(msg.realm))) {
4110
- triggerSync(db, "pull");
4111
+ triggerSync(db, 'pull');
4111
4112
  }
4112
4113
  break;
4113
4114
  case 'realm-removed':
4114
4115
  if ((_b = persistedSyncState === null || persistedSyncState === void 0 ? void 0 : persistedSyncState.realms) === null || _b === void 0 ? void 0 : _b.includes(msg.realm)) {
4115
- triggerSync(db, "pull");
4116
+ triggerSync(db, 'pull');
4116
4117
  }
4117
4118
  break;
4119
+ case 'realms-changed':
4120
+ triggerSync(db, 'pull');
4121
+ break;
4118
4122
  case 'changes':
4119
4123
  console.debug('changes');
4120
4124
  if (((_c = db.cloud.syncState.value) === null || _c === void 0 ? void 0 : _c.phase) === 'error') {
4121
- triggerSync(db, "pull");
4125
+ triggerSync(db, 'pull');
4122
4126
  break;
4123
4127
  }
4124
4128
  await db.transaction('rw', db.dx.tables, async (tx) => {
@@ -4141,16 +4145,34 @@
4141
4145
  return; // Initial sync must have taken place - otherwise, ignore this.
4142
4146
  }
4143
4147
  // Verify again in ACID tx that we're on same server revision.
4144
- if (compareBigInts(msg.baseRev, syncState.serverRevision) !== 0) {
4148
+ if (msg.baseRev !== syncState.serverRevision) {
4145
4149
  console.debug(`baseRev (${msg.baseRev}) differs from our serverRevision in syncState (${syncState.serverRevision})`);
4150
+ // Should we trigger a sync now? No. This is a normal case
4151
+ // when another local peer (such as the SW or a websocket channel on other tab) has
4152
+ // updated syncState from new server information but we are not aware yet. It would
4153
+ // be unnescessary to do a sync in that case. Instead, the caller of this consumeQueue()
4154
+ // function will do readyToServe.next(true) right after this return, which will lead
4155
+ // to a "ready" message being sent to server with the new accurate serverRev we have,
4156
+ // so that the next message indeed will be correct.
4157
+ if (typeof msg.baseRev === 'string' && // v2 format
4158
+ (typeof syncState.serverRevision === 'bigint' || // v1 format
4159
+ typeof syncState.serverRevision === 'object') // v1 format old browser
4160
+ ) {
4161
+ // The reason for the diff seems to be that server has migrated the revision format.
4162
+ // Do a full sync to update revision format.
4163
+ // If we don't do a sync request now, we could stuck in an endless loop.
4164
+ triggerSync(db, 'pull');
4165
+ }
4146
4166
  return; // Ignore message
4147
4167
  }
4148
4168
  // Verify also that the message is based on the exact same set of realms
4149
- const ourRealmSetHash = await Dexie__default['default'].waitFor(computeRealmSetHash(syncState));
4169
+ const ourRealmSetHash = await Dexie__default['default'].waitFor(
4170
+ // Keep TX in non-IDB work
4171
+ computeRealmSetHash(syncState));
4150
4172
  console.debug('ourRealmSetHash', ourRealmSetHash);
4151
4173
  if (ourRealmSetHash !== msg.realmSetHash) {
4152
4174
  console.debug('not same realmSetHash', msg.realmSetHash);
4153
- triggerSync(db, "pull");
4175
+ triggerSync(db, 'pull');
4154
4176
  // The message isn't based on the same realms.
4155
4177
  // Trigger a sync instead to resolve all things up.
4156
4178
  return;
@@ -4162,12 +4184,14 @@
4162
4184
  clientChanges = await listClientChanges(mutationTables, db);
4163
4185
  console.debug('msg queue: client changes', clientChanges);
4164
4186
  }
4165
- const filteredChanges = filterServerChangesThroughAddedClientChanges(msg.changes, clientChanges);
4166
- //
4167
- // apply server changes
4168
- //
4169
- console.debug('applying filtered server changes', filteredChanges);
4170
- await applyServerChanges(filteredChanges, db);
4187
+ if (msg.changes.length > 0) {
4188
+ const filteredChanges = filterServerChangesThroughAddedClientChanges(msg.changes, clientChanges);
4189
+ //
4190
+ // apply server changes
4191
+ //
4192
+ console.debug('applying filtered server changes', filteredChanges);
4193
+ await applyServerChanges(filteredChanges, db);
4194
+ }
4171
4195
  // Update latest revisions per table in case there are unsynced changes
4172
4196
  // This can be a real case in future when we allow non-eagery sync.
4173
4197
  // And it can actually be realistic now also, but very rare.
@@ -5043,18 +5067,19 @@
5043
5067
  const CLIENT_PING_INTERVAL = 30000;
5044
5068
  const FAIL_RETRY_WAIT_TIME = 60000;
5045
5069
  class WSObservable extends rxjs.Observable {
5046
- constructor(databaseUrl, rev, clientIdentity, messageProducer, webSocketStatus, token, tokenExpiration) {
5047
- super((subscriber) => new WSConnection(databaseUrl, rev, clientIdentity, token, tokenExpiration, subscriber, messageProducer, webSocketStatus));
5070
+ constructor(databaseUrl, rev, realmSetHash, clientIdentity, messageProducer, webSocketStatus, token, tokenExpiration) {
5071
+ super((subscriber) => new WSConnection(databaseUrl, rev, realmSetHash, clientIdentity, token, tokenExpiration, subscriber, messageProducer, webSocketStatus));
5048
5072
  }
5049
5073
  }
5050
5074
  let counter = 0;
5051
5075
  class WSConnection extends rxjs.Subscription {
5052
- constructor(databaseUrl, rev, clientIdentity, token, tokenExpiration, subscriber, messageProducer, webSocketStatus) {
5076
+ constructor(databaseUrl, rev, realmSetHash, clientIdentity, token, tokenExpiration, subscriber, messageProducer, webSocketStatus) {
5053
5077
  super(() => this.teardown());
5054
5078
  this.id = ++counter;
5055
5079
  console.debug('New WebSocket Connection', this.id, token ? 'authorized' : 'unauthorized');
5056
5080
  this.databaseUrl = databaseUrl;
5057
5081
  this.rev = rev;
5082
+ this.realmSetHash = realmSetHash;
5058
5083
  this.clientIdentity = clientIdentity;
5059
5084
  this.token = token;
5060
5085
  this.tokenExpiration = tokenExpiration;
@@ -5159,7 +5184,9 @@
5159
5184
  const searchParams = new URLSearchParams();
5160
5185
  if (this.subscriber.closed)
5161
5186
  return;
5187
+ searchParams.set('v', "2");
5162
5188
  searchParams.set('rev', this.rev);
5189
+ searchParams.set('realmsHash', this.realmSetHash);
5163
5190
  searchParams.set('clientId', this.clientIdentity);
5164
5191
  if (this.token) {
5165
5192
  searchParams.set('token', this.token);
@@ -5253,12 +5280,12 @@
5253
5280
  function createObservable() {
5254
5281
  return db.cloud.persistedSyncState.pipe(filter(syncState => syncState === null || syncState === void 0 ? void 0 : syncState.serverRevision), // Don't connect before there's no initial sync performed.
5255
5282
  take(1), // Don't continue waking up whenever syncState change
5256
- switchMap(() => db.cloud.currentUser), switchMap((userLogin) => userIsReallyActive.pipe(map((isActive) => (isActive ? userLogin : null)))), switchMap((userLogin) =>
5283
+ switchMap((syncState) => db.cloud.currentUser.pipe(map(userLogin => [userLogin, syncState]))), switchMap(([userLogin, syncState]) => userIsReallyActive.pipe(map((isActive) => [isActive ? userLogin : null, syncState]))), switchMap(async ([userLogin, syncState]) => [userLogin, await computeRealmSetHash(syncState)]), switchMap(([userLogin, realmSetHash]) =>
5257
5284
  // Let server end query changes from last entry of same client-ID and forward.
5258
5285
  // If no new entries, server won't bother the client. If new entries, server sends only those
5259
5286
  // and the baseRev of the last from same client-ID.
5260
5287
  userLogin
5261
- ? new WSObservable(db.cloud.options.databaseUrl, db.cloud.persistedSyncState.value.serverRevision, db.cloud.persistedSyncState.value.clientIdentity, messageProducer, db.cloud.webSocketStatus, userLogin.accessToken, userLogin.accessTokenExpiration)
5288
+ ? new WSObservable(db.cloud.options.databaseUrl, db.cloud.persistedSyncState.value.serverRevision, realmSetHash, db.cloud.persistedSyncState.value.clientIdentity, messageProducer, db.cloud.webSocketStatus, userLogin.accessToken, userLogin.accessTokenExpiration)
5262
5289
  : rxjs.from([])), catchError((error) => {
5263
5290
  if ((error === null || error === void 0 ? void 0 : error.name) === 'TokenExpiredError') {
5264
5291
  console.debug('WebSocket observable: Token expired. Refreshing token...');
@@ -5378,22 +5405,26 @@
5378
5405
  //let periodicSyncHandler: ((event: Event) => void) | null = null;
5379
5406
  let cancelToken = { cancelled: false };
5380
5407
  function syncAndRetry(purpose, retryNum = 1) {
5381
- syncIfPossible(db, cloudOptions, cloudSchema, {
5382
- cancelToken,
5383
- retryImmediatelyOnFetchError: true,
5384
- purpose
5385
- }).catch((e) => {
5386
- console.error('error in syncIfPossible()', e);
5387
- if (cancelToken.cancelled) {
5388
- stop();
5389
- }
5390
- else if (retryNum < 3) {
5391
- // Mimic service worker sync event: retry 3 times
5392
- // * first retry after 5 minutes
5393
- // * second retry 15 minutes later
5394
- setTimeout(() => syncAndRetry(purpose, retryNum + 1), [0, 5, 15][retryNum] * MINUTES);
5395
- }
5396
- });
5408
+ // Use setTimeout() to get onto a clean stack and
5409
+ // break free from possible active transaction:
5410
+ setTimeout(() => {
5411
+ syncIfPossible(db, cloudOptions, cloudSchema, {
5412
+ cancelToken,
5413
+ retryImmediatelyOnFetchError: true,
5414
+ purpose,
5415
+ }).catch((e) => {
5416
+ console.error('error in syncIfPossible()', e);
5417
+ if (cancelToken.cancelled) {
5418
+ stop();
5419
+ }
5420
+ else if (retryNum < 3) {
5421
+ // Mimic service worker sync event: retry 3 times
5422
+ // * first retry after 5 minutes
5423
+ // * second retry 15 minutes later
5424
+ setTimeout(() => syncAndRetry(purpose, retryNum + 1), [0, 5, 15][retryNum] * MINUTES);
5425
+ }
5426
+ });
5427
+ }, 0);
5397
5428
  }
5398
5429
  const start = () => {
5399
5430
  // Sync eagerly whenever a change has happened (+ initially when there's no syncState yet)
@@ -5401,7 +5432,7 @@
5401
5432
  console.debug('Starting LocalSyncWorker', db.localSyncEvent['id']);
5402
5433
  localSyncEventSubscription = db.localSyncEvent.subscribe(({ purpose }) => {
5403
5434
  try {
5404
- syncAndRetry(purpose || "pull");
5435
+ syncAndRetry(purpose || 'pull');
5405
5436
  }
5406
5437
  catch (err) {
5407
5438
  console.error('What-the....', err);