dexie-cloud-addon 4.1.0-beta.44 → 4.1.0-beta.45
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/modern/WSObservable.d.ts +4 -3
- package/dist/modern/dexie-cloud-addon.js +220 -130
- package/dist/modern/dexie-cloud-addon.js.map +1 -1
- package/dist/modern/dexie-cloud-addon.min.js +1 -1
- package/dist/modern/dexie-cloud-addon.min.js.map +1 -1
- package/dist/modern/service-worker.js +206 -124
- package/dist/modern/service-worker.js.map +1 -1
- package/dist/modern/service-worker.min.js +1 -1
- package/dist/modern/service-worker.min.js.map +1 -1
- package/dist/modern/yjs/getUpdatesTable.d.ts +1 -1
- package/dist/umd/WSObservable.d.ts +4 -3
- package/dist/umd/dexie-cloud-addon.js +220 -130
- package/dist/umd/dexie-cloud-addon.js.map +1 -1
- package/dist/umd/dexie-cloud-addon.min.js +1 -1
- package/dist/umd/dexie-cloud-addon.min.js.map +1 -1
- package/dist/umd/service-worker.js +206 -124
- package/dist/umd/service-worker.js.map +1 -1
- package/dist/umd/service-worker.min.js +1 -1
- package/dist/umd/service-worker.min.js.map +1 -1
- package/dist/umd/yjs/getUpdatesTable.d.ts +1 -1
- package/package.json +3 -3
- package/dist/modern/helpers/dbOnClosed.d.ts +0 -2
- package/dist/umd/helpers/dbOnClosed.d.ts +0 -2
|
@@ -42,7 +42,7 @@ export interface TokenExpiredMessage {
|
|
|
42
42
|
type: 'token-expired';
|
|
43
43
|
}
|
|
44
44
|
export declare class WSObservable extends Observable<WSConnectionMsg> {
|
|
45
|
-
constructor(db: DexieCloudDB, rev: string, realmSetHash: string, clientIdentity: string, messageProducer: Observable<WSClientToServerMsg>, webSocketStatus: BehaviorSubject<DXCWebSocketStatus>, user: UserLogin);
|
|
45
|
+
constructor(db: DexieCloudDB, rev: string | undefined, yrev: string | undefined, realmSetHash: string, clientIdentity: string, messageProducer: Observable<WSClientToServerMsg>, webSocketStatus: BehaviorSubject<DXCWebSocketStatus>, user: UserLogin);
|
|
46
46
|
}
|
|
47
47
|
export declare class WSConnection extends Subscription {
|
|
48
48
|
db: DexieCloudDB;
|
|
@@ -51,7 +51,8 @@ export declare class WSConnection extends Subscription {
|
|
|
51
51
|
lastUserActivity: Date;
|
|
52
52
|
lastPing: Date;
|
|
53
53
|
databaseUrl: string;
|
|
54
|
-
rev: string;
|
|
54
|
+
rev: string | undefined;
|
|
55
|
+
yrev: string | undefined;
|
|
55
56
|
realmSetHash: string;
|
|
56
57
|
clientIdentity: string;
|
|
57
58
|
user: UserLogin;
|
|
@@ -62,7 +63,7 @@ export declare class WSConnection extends Subscription {
|
|
|
62
63
|
id: number;
|
|
63
64
|
private pinger;
|
|
64
65
|
private subscriptions;
|
|
65
|
-
constructor(db: DexieCloudDB, rev: string, realmSetHash: string, clientIdentity: string, user: UserLogin, subscriber: Subscriber<WSConnectionMsg>, messageProducer: Observable<WSClientToServerMsg>, webSocketStatus: BehaviorSubject<DXCWebSocketStatus>);
|
|
66
|
+
constructor(db: DexieCloudDB, rev: string | undefined, yrev: string | undefined, realmSetHash: string, clientIdentity: string, user: UserLogin, subscriber: Subscriber<WSConnectionMsg>, messageProducer: Observable<WSClientToServerMsg>, webSocketStatus: BehaviorSubject<DXCWebSocketStatus>);
|
|
66
67
|
private teardown;
|
|
67
68
|
private disconnect;
|
|
68
69
|
reconnecting: boolean;
|
|
@@ -8,7 +8,7 @@
|
|
|
8
8
|
*
|
|
9
9
|
* ==========================================================================
|
|
10
10
|
*
|
|
11
|
-
* Version 4.1.0-beta.
|
|
11
|
+
* Version 4.1.0-beta.45, Mon Mar 31 2025
|
|
12
12
|
*
|
|
13
13
|
* https://dexie.org
|
|
14
14
|
*
|
|
@@ -1014,6 +1014,7 @@ function encodeYMessage(msg) {
|
|
|
1014
1014
|
break;
|
|
1015
1015
|
case 'u-s':
|
|
1016
1016
|
writeVarUint8Array(encoder, msg.u);
|
|
1017
|
+
writeVarString(encoder, msg.r || '');
|
|
1017
1018
|
break;
|
|
1018
1019
|
}
|
|
1019
1020
|
}
|
|
@@ -1400,7 +1401,8 @@ function decodeYMessage(a) {
|
|
|
1400
1401
|
table,
|
|
1401
1402
|
prop,
|
|
1402
1403
|
k,
|
|
1403
|
-
u: readVarUint8Array(decoder)
|
|
1404
|
+
u: readVarUint8Array(decoder),
|
|
1405
|
+
r: (decoder.pos < decoder.arr.length && readVarString(decoder)) || undefined,
|
|
1404
1406
|
};
|
|
1405
1407
|
default:
|
|
1406
1408
|
throw new TypeError(`Unknown message type: ${type}`);
|
|
@@ -4766,6 +4768,7 @@ function syncWithServer(changes, y, syncState, baseRevs, db, databaseUrl, schema
|
|
|
4766
4768
|
baseRevs,
|
|
4767
4769
|
changes: encodeIdsForServer(db.dx.core.schema, currentUser, changes),
|
|
4768
4770
|
y,
|
|
4771
|
+
dxcv: db.cloud.version
|
|
4769
4772
|
};
|
|
4770
4773
|
console.debug('Sync request', syncRequest);
|
|
4771
4774
|
db.syncStateChangedEvent.next({
|
|
@@ -4908,9 +4911,11 @@ function applyServerChanges(changes, db) {
|
|
|
4908
4911
|
return __awaiter(this, void 0, void 0, function* () {
|
|
4909
4912
|
console.debug('Applying server changes', changes, Dexie.currentTransaction);
|
|
4910
4913
|
for (const { table: tableName, muts } of changes) {
|
|
4914
|
+
if (!db.dx._allTables[tableName]) {
|
|
4915
|
+
console.debug(`Server sent changes for table ${tableName} that we don't have. Ignoring.`);
|
|
4916
|
+
continue;
|
|
4917
|
+
}
|
|
4911
4918
|
const table = db.table(tableName);
|
|
4912
|
-
if (!table)
|
|
4913
|
-
continue; // If server sends changes on a table we don't have, ignore it.
|
|
4914
4919
|
const { primaryKey } = table.core.schema;
|
|
4915
4920
|
const keyDecoder = (key) => {
|
|
4916
4921
|
switch (key[0]) {
|
|
@@ -5096,9 +5101,15 @@ function listYClientMessagesAndStateVector(db, tablesToSync) {
|
|
|
5096
5101
|
|
|
5097
5102
|
function getUpdatesTable(db, table, ydocProp) {
|
|
5098
5103
|
var _a, _b, _c;
|
|
5104
|
+
if (!db.dx._allTables[table])
|
|
5105
|
+
return undefined;
|
|
5099
5106
|
const utbl = (_c = (_b = (_a = db.table(table)) === null || _a === void 0 ? void 0 : _a.schema.yProps) === null || _b === void 0 ? void 0 : _b.find(p => p.prop === ydocProp)) === null || _c === void 0 ? void 0 : _c.updatesTable;
|
|
5100
|
-
if (!utbl)
|
|
5101
|
-
|
|
5107
|
+
if (!utbl) {
|
|
5108
|
+
console.debug(`No updatesTable found for ${table}.${ydocProp}`);
|
|
5109
|
+
return undefined;
|
|
5110
|
+
}
|
|
5111
|
+
if (!db.dx._allTables[utbl])
|
|
5112
|
+
return undefined;
|
|
5102
5113
|
return db.table(utbl);
|
|
5103
5114
|
}
|
|
5104
5115
|
|
|
@@ -5109,74 +5120,91 @@ function applyYServerMessages(yMessages, db) {
|
|
|
5109
5120
|
let resyncNeeded = false;
|
|
5110
5121
|
let yServerRevision;
|
|
5111
5122
|
for (const m of yMessages) {
|
|
5112
|
-
|
|
5113
|
-
|
|
5114
|
-
|
|
5115
|
-
|
|
5116
|
-
|
|
5117
|
-
|
|
5118
|
-
|
|
5119
|
-
|
|
5120
|
-
|
|
5121
|
-
|
|
5122
|
-
|
|
5123
|
-
|
|
5124
|
-
|
|
5125
|
-
|
|
5126
|
-
.
|
|
5127
|
-
|
|
5128
|
-
|
|
5129
|
-
break;
|
|
5130
|
-
}
|
|
5131
|
-
case 'u-reject': {
|
|
5132
|
-
// Acces control or constraint rejected the update.
|
|
5133
|
-
// We delete it. It's not going to be sent again.
|
|
5134
|
-
// What's missing is a way to notify consumers, such as Tiptap editor, that the update was rejected.
|
|
5135
|
-
// This is only an issue when the document is open. We could find the open document and
|
|
5136
|
-
// in a perfect world, we should send a reverse update to the open document to undo the change.
|
|
5137
|
-
// See my question in https://discuss.yjs.dev/t/generate-an-inverse-update/2765
|
|
5138
|
-
console.debug(`Y update rejected. Deleting it.`);
|
|
5139
|
-
const utbl = getUpdatesTable(db, m.table, m.prop);
|
|
5140
|
-
// Delete the rejected update and all local updates since (avoid holes in the CRDT)
|
|
5141
|
-
// and destroy it's open document if there is one.
|
|
5142
|
-
const primaryKey = (_a = (yield utbl.get(m.i))) === null || _a === void 0 ? void 0 : _a.k;
|
|
5143
|
-
if (primaryKey != null) {
|
|
5144
|
-
yield db.transaction('rw', utbl, (tx) => {
|
|
5145
|
-
// @ts-ignore
|
|
5146
|
-
tx.idbtrans._rejecting_y_ypdate = true; // Inform ydoc triggers that we delete because of a rejection and not GC
|
|
5147
|
-
return utbl
|
|
5148
|
-
.where('i')
|
|
5149
|
-
.aboveOrEqual(m.i)
|
|
5150
|
-
.filter((u) => cmp(u.k, primaryKey) === 0 && ((u.f || 0) & 1) === 1)
|
|
5151
|
-
.delete();
|
|
5152
|
-
});
|
|
5153
|
-
// Destroy active doc
|
|
5154
|
-
const activeDoc = DexieYProvider.getDocCache(db.dx).find(m.table, primaryKey, m.prop);
|
|
5155
|
-
if (activeDoc)
|
|
5156
|
-
activeDoc.destroy(); // Destroy the document so that editors don't continue to work on it
|
|
5123
|
+
try {
|
|
5124
|
+
switch (m.type) {
|
|
5125
|
+
case 'u-s': {
|
|
5126
|
+
const utbl = getUpdatesTable(db, m.table, m.prop);
|
|
5127
|
+
if (utbl) {
|
|
5128
|
+
const updateRow = {
|
|
5129
|
+
k: m.k,
|
|
5130
|
+
u: m.u,
|
|
5131
|
+
};
|
|
5132
|
+
if (m.r) {
|
|
5133
|
+
// @ts-ignore
|
|
5134
|
+
updateRow.r = m.r;
|
|
5135
|
+
yServerRevision = m.r;
|
|
5136
|
+
}
|
|
5137
|
+
receivedUntils[utbl.name] = yield utbl.add(updateRow);
|
|
5138
|
+
}
|
|
5139
|
+
break;
|
|
5157
5140
|
}
|
|
5158
|
-
|
|
5159
|
-
|
|
5160
|
-
|
|
5161
|
-
|
|
5162
|
-
|
|
5163
|
-
|
|
5141
|
+
case 'u-ack': {
|
|
5142
|
+
const utbl = getUpdatesTable(db, m.table, m.prop);
|
|
5143
|
+
if (utbl) {
|
|
5144
|
+
yield db.transaction('rw', utbl, (tx) => __awaiter(this, void 0, void 0, function* () {
|
|
5145
|
+
let syncer = (yield tx
|
|
5146
|
+
.table(utbl.name)
|
|
5147
|
+
.get(DEXIE_CLOUD_SYNCER_ID));
|
|
5148
|
+
yield tx.table(utbl.name).put(Object.assign(Object.assign({}, (syncer || { i: DEXIE_CLOUD_SYNCER_ID })), { unsentFrom: Math.max((syncer === null || syncer === void 0 ? void 0 : syncer.unsentFrom) || 1, m.i + 1) }));
|
|
5149
|
+
}));
|
|
5150
|
+
}
|
|
5151
|
+
break;
|
|
5164
5152
|
}
|
|
5165
|
-
|
|
5166
|
-
|
|
5167
|
-
|
|
5168
|
-
|
|
5169
|
-
|
|
5153
|
+
case 'u-reject': {
|
|
5154
|
+
// Acces control or constraint rejected the update.
|
|
5155
|
+
// We delete it. It's not going to be sent again.
|
|
5156
|
+
// What's missing is a way to notify consumers, such as Tiptap editor, that the update was rejected.
|
|
5157
|
+
// This is only an issue when the document is open. We could find the open document and
|
|
5158
|
+
// in a perfect world, we should send a reverse update to the open document to undo the change.
|
|
5159
|
+
// See my question in https://discuss.yjs.dev/t/generate-an-inverse-update/2765
|
|
5160
|
+
console.debug(`Y update rejected. Deleting it.`);
|
|
5161
|
+
const utbl = getUpdatesTable(db, m.table, m.prop);
|
|
5162
|
+
if (!utbl)
|
|
5163
|
+
break;
|
|
5164
|
+
// Delete the rejected update and all local updates since (avoid holes in the CRDT)
|
|
5165
|
+
// and destroy it's open document if there is one.
|
|
5166
|
+
const primaryKey = (_a = (yield utbl.get(m.i))) === null || _a === void 0 ? void 0 : _a.k;
|
|
5167
|
+
if (primaryKey != null) {
|
|
5168
|
+
yield db.transaction('rw', utbl, (tx) => {
|
|
5169
|
+
// @ts-ignore
|
|
5170
|
+
tx.idbtrans._rejecting_y_ypdate = true; // Inform ydoc triggers that we delete because of a rejection and not GC
|
|
5171
|
+
return utbl
|
|
5172
|
+
.where('i')
|
|
5173
|
+
.aboveOrEqual(m.i)
|
|
5174
|
+
.filter((u) => cmp(u.k, primaryKey) === 0 && ((u.f || 0) & 1) === 1)
|
|
5175
|
+
.delete();
|
|
5176
|
+
});
|
|
5177
|
+
// Destroy active doc
|
|
5178
|
+
const activeDoc = DexieYProvider.getDocCache(db.dx).find(m.table, primaryKey, m.prop);
|
|
5179
|
+
if (activeDoc)
|
|
5180
|
+
activeDoc.destroy(); // Destroy the document so that editors don't continue to work on it
|
|
5181
|
+
}
|
|
5182
|
+
break;
|
|
5183
|
+
}
|
|
5184
|
+
case 'in-sync': {
|
|
5185
|
+
const doc = DexieYProvider.getDocCache(db.dx).find(m.table, m.k, m.prop);
|
|
5186
|
+
if (doc && !doc.isSynced) {
|
|
5187
|
+
doc.emit('sync', [true]);
|
|
5188
|
+
}
|
|
5189
|
+
break;
|
|
5190
|
+
}
|
|
5191
|
+
case 'y-complete-sync-done': {
|
|
5192
|
+
yServerRevision = m.yServerRev;
|
|
5193
|
+
break;
|
|
5194
|
+
}
|
|
5195
|
+
case 'outdated-server-rev':
|
|
5196
|
+
resyncNeeded = true;
|
|
5197
|
+
break;
|
|
5170
5198
|
}
|
|
5171
|
-
|
|
5172
|
-
|
|
5173
|
-
|
|
5199
|
+
}
|
|
5200
|
+
catch (e) {
|
|
5201
|
+
console.error(`Failed to apply YMessage`, m, e);
|
|
5174
5202
|
}
|
|
5175
5203
|
}
|
|
5176
5204
|
return {
|
|
5177
5205
|
receivedUntils,
|
|
5178
5206
|
resyncNeeded,
|
|
5179
|
-
yServerRevision
|
|
5207
|
+
yServerRevision,
|
|
5180
5208
|
};
|
|
5181
5209
|
});
|
|
5182
5210
|
}
|
|
@@ -5287,7 +5315,9 @@ function downloadYDocsFromServer(db_1, databaseUrl_1, _a) {
|
|
|
5287
5315
|
throw new Error(`Protocol error from ${databaseUrl}/y/download`);
|
|
5288
5316
|
}
|
|
5289
5317
|
const yTable = getUpdatesTable(db, currentTable, currentProp);
|
|
5290
|
-
|
|
5318
|
+
if (yTable) {
|
|
5319
|
+
yield yTable.bulkAdd(docsToInsert);
|
|
5320
|
+
}
|
|
5291
5321
|
docsToInsert = [];
|
|
5292
5322
|
}
|
|
5293
5323
|
if (currentRealmId &&
|
|
@@ -5623,12 +5653,12 @@ function _sync(db_1, options_1, schema_1) {
|
|
|
5623
5653
|
return false; // Not needed anymore
|
|
5624
5654
|
});
|
|
5625
5655
|
}
|
|
5626
|
-
function deleteObjectsFromRemovedRealms(db, res,
|
|
5656
|
+
function deleteObjectsFromRemovedRealms(db, res, syncState) {
|
|
5627
5657
|
return __awaiter(this, void 0, void 0, function* () {
|
|
5628
5658
|
const deletedRealms = new Set();
|
|
5629
5659
|
const rejectedRealms = new Set();
|
|
5630
|
-
const previousRealmSet =
|
|
5631
|
-
const previousInviteRealmSet =
|
|
5660
|
+
const previousRealmSet = syncState ? syncState.realms : [];
|
|
5661
|
+
const previousInviteRealmSet = syncState ? syncState.inviteRealms : [];
|
|
5632
5662
|
const updatedRealmSet = new Set(res.realms);
|
|
5633
5663
|
const updatedTotalRealmSet = new Set(res.realms.concat(res.inviteRealms));
|
|
5634
5664
|
for (const realmId of previousRealmSet) {
|
|
@@ -5670,17 +5700,10 @@ function deleteObjectsFromRemovedRealms(db, res, prevState) {
|
|
|
5670
5700
|
}
|
|
5671
5701
|
}
|
|
5672
5702
|
}
|
|
5673
|
-
if (rejectedRealms.size > 0) {
|
|
5674
|
-
// Remove rejected/deleted realms from yDownloadedRealms because of the following use case:
|
|
5675
|
-
// 1. User becomes added to the realm
|
|
5676
|
-
// 2. User syncs and all documents of the realm is downloaded (downloadYDocsFromServer.ts)
|
|
5677
|
-
// 3. User leaves the realm and all docs are deleted locally (built-in-trigger of deleting their rows in this file)
|
|
5678
|
-
// 4. User is yet again added to the realm. At this point, we must make sure the docs are not considered already downloaded.
|
|
5679
|
-
const updateSpec = {};
|
|
5703
|
+
if (rejectedRealms.size > 0 && (syncState === null || syncState === void 0 ? void 0 : syncState.yDownloadedRealms)) {
|
|
5680
5704
|
for (const realmId of rejectedRealms) {
|
|
5681
|
-
|
|
5705
|
+
delete syncState.yDownloadedRealms[realmId];
|
|
5682
5706
|
}
|
|
5683
|
-
yield db.$syncState.update('syncState', updateSpec);
|
|
5684
5707
|
}
|
|
5685
5708
|
});
|
|
5686
5709
|
}
|
|
@@ -6899,13 +6922,13 @@ const SERVER_PING_TIMEOUT = 20000;
|
|
|
6899
6922
|
const CLIENT_PING_INTERVAL = 30000;
|
|
6900
6923
|
const FAIL_RETRY_WAIT_TIME = 60000;
|
|
6901
6924
|
class WSObservable extends Observable$1 {
|
|
6902
|
-
constructor(db, rev, realmSetHash, clientIdentity, messageProducer, webSocketStatus, user) {
|
|
6903
|
-
super((subscriber) => new WSConnection(db, rev, realmSetHash, clientIdentity, user, subscriber, messageProducer, webSocketStatus));
|
|
6925
|
+
constructor(db, rev, yrev, realmSetHash, clientIdentity, messageProducer, webSocketStatus, user) {
|
|
6926
|
+
super((subscriber) => new WSConnection(db, rev, yrev, realmSetHash, clientIdentity, user, subscriber, messageProducer, webSocketStatus));
|
|
6904
6927
|
}
|
|
6905
6928
|
}
|
|
6906
6929
|
let counter = 0;
|
|
6907
6930
|
class WSConnection extends Subscription$1 {
|
|
6908
|
-
constructor(db, rev, realmSetHash, clientIdentity, user, subscriber, messageProducer, webSocketStatus) {
|
|
6931
|
+
constructor(db, rev, yrev, realmSetHash, clientIdentity, user, subscriber, messageProducer, webSocketStatus) {
|
|
6909
6932
|
super(() => this.teardown());
|
|
6910
6933
|
this.id = ++counter;
|
|
6911
6934
|
this.subscriptions = new Set();
|
|
@@ -6914,6 +6937,7 @@ class WSConnection extends Subscription$1 {
|
|
|
6914
6937
|
this.db = db;
|
|
6915
6938
|
this.databaseUrl = db.cloud.options.databaseUrl;
|
|
6916
6939
|
this.rev = rev;
|
|
6940
|
+
this.yrev = yrev;
|
|
6917
6941
|
this.realmSetHash = realmSetHash;
|
|
6918
6942
|
this.clientIdentity = clientIdentity;
|
|
6919
6943
|
this.user = user;
|
|
@@ -6983,6 +7007,11 @@ class WSConnection extends Subscription$1 {
|
|
|
6983
7007
|
}
|
|
6984
7008
|
this.webSocketStatus.next('connecting');
|
|
6985
7009
|
this.pinger = setInterval(() => __awaiter(this, void 0, void 0, function* () {
|
|
7010
|
+
// setInterval here causes unnecessary pings when server is proved active anyway.
|
|
7011
|
+
// TODO: Use setTimout() here instead. When triggered, check if we really need to ping.
|
|
7012
|
+
// In case we've had server activity, we don't need to ping. Then schedule then next ping
|
|
7013
|
+
// to the time when we should ping next time (based on lastServerActivity + CLIENT_PING_INTERVAL).
|
|
7014
|
+
// Else, ping now and schedule next ping to CLIENT_PING_INTERVAL from now.
|
|
6986
7015
|
if (this.closed) {
|
|
6987
7016
|
console.debug('pinger check', this.id, 'CLOSED.');
|
|
6988
7017
|
this.teardown();
|
|
@@ -7029,9 +7058,13 @@ class WSConnection extends Subscription$1 {
|
|
|
7029
7058
|
if (this.subscriber.closed)
|
|
7030
7059
|
return;
|
|
7031
7060
|
searchParams.set('v', '2');
|
|
7032
|
-
|
|
7061
|
+
if (this.rev)
|
|
7062
|
+
searchParams.set('rev', this.rev);
|
|
7063
|
+
if (this.yrev)
|
|
7064
|
+
searchParams.set('yrev', this.yrev);
|
|
7033
7065
|
searchParams.set('realmsHash', this.realmSetHash);
|
|
7034
7066
|
searchParams.set('clientId', this.clientIdentity);
|
|
7067
|
+
searchParams.set('dxcv', this.db.cloud.version);
|
|
7035
7068
|
if (this.user.accessToken) {
|
|
7036
7069
|
searchParams.set('token', this.user.accessToken);
|
|
7037
7070
|
}
|
|
@@ -7068,8 +7101,8 @@ class WSConnection extends Subscription$1 {
|
|
|
7068
7101
|
}
|
|
7069
7102
|
}
|
|
7070
7103
|
}
|
|
7071
|
-
else if (msg.type === '
|
|
7072
|
-
|
|
7104
|
+
else if (msg.type === 'pong') {
|
|
7105
|
+
// Do nothing
|
|
7073
7106
|
}
|
|
7074
7107
|
else if (msg.type === 'doc-open') {
|
|
7075
7108
|
const docCache = DexieYProvider.getDocCache(this.db.dx);
|
|
@@ -7078,11 +7111,26 @@ class WSConnection extends Subscription$1 {
|
|
|
7078
7111
|
getOpenDocSignal(doc).next(); // Make yHandler reopen the document on server.
|
|
7079
7112
|
}
|
|
7080
7113
|
}
|
|
7081
|
-
else if (msg.type === 'outdated-server-rev' || msg.type === 'y-complete-sync-done') {
|
|
7082
|
-
|
|
7083
|
-
|
|
7114
|
+
else if (msg.type === 'u-ack' || msg.type === 'u-reject' || msg.type === 'u-s' || msg.type === 'in-sync' || msg.type === 'outdated-server-rev' || msg.type === 'y-complete-sync-done') {
|
|
7115
|
+
applyYServerMessages([msg], this.db).then((_a) => __awaiter(this, [_a], void 0, function* ({ resyncNeeded, yServerRevision, receivedUntils }) {
|
|
7116
|
+
if (yServerRevision) {
|
|
7117
|
+
yield this.db.$syncState.update('syncState', { yServerRevision: yServerRevision });
|
|
7118
|
+
}
|
|
7119
|
+
if (msg.type === 'u-s' && receivedUntils) {
|
|
7120
|
+
const utbl = getUpdatesTable(this.db, msg.table, msg.prop);
|
|
7121
|
+
if (utbl) {
|
|
7122
|
+
const receivedUntil = receivedUntils[utbl.name];
|
|
7123
|
+
if (receivedUntil) {
|
|
7124
|
+
yield utbl.update(DEXIE_CLOUD_SYNCER_ID, { receivedUntil });
|
|
7125
|
+
}
|
|
7126
|
+
}
|
|
7127
|
+
}
|
|
7128
|
+
if (resyncNeeded) {
|
|
7129
|
+
yield this.db.cloud.sync({ purpose: 'pull', wait: true });
|
|
7130
|
+
}
|
|
7131
|
+
}));
|
|
7084
7132
|
}
|
|
7085
|
-
else
|
|
7133
|
+
else {
|
|
7086
7134
|
// Forward the request to our subscriber, wich is in messageFromServerQueue.ts (via connectWebSocket's subscribe() at the end!)
|
|
7087
7135
|
this.subscriber.next(msg);
|
|
7088
7136
|
}
|
|
@@ -7215,7 +7263,7 @@ function connectWebSocket(db) {
|
|
|
7215
7263
|
// If no new entries, server won't bother the client. If new entries, server sends only those
|
|
7216
7264
|
// and the baseRev of the last from same client-ID.
|
|
7217
7265
|
if (userLogin) {
|
|
7218
|
-
return new WSObservable(db, db.cloud.persistedSyncState.value.serverRevision, realmSetHash, db.cloud.persistedSyncState.value.clientIdentity, messageProducer, db.cloud.webSocketStatus, userLogin);
|
|
7266
|
+
return new WSObservable(db, db.cloud.persistedSyncState.value.serverRevision, db.cloud.persistedSyncState.value.yServerRevision, realmSetHash, db.cloud.persistedSyncState.value.clientIdentity, messageProducer, db.cloud.webSocketStatus, userLogin);
|
|
7219
7267
|
}
|
|
7220
7268
|
else {
|
|
7221
7269
|
return from$1([]);
|
|
@@ -7345,61 +7393,95 @@ function syncIfPossible(db, cloudOptions, cloudSchema, options) {
|
|
|
7345
7393
|
}
|
|
7346
7394
|
|
|
7347
7395
|
const SECONDS = 1000;
|
|
7348
|
-
const MINUTES = 60 * SECONDS;
|
|
7349
7396
|
|
|
7350
7397
|
function LocalSyncWorker(db, cloudOptions, cloudSchema) {
|
|
7351
7398
|
let localSyncEventSubscription = null;
|
|
7352
|
-
//let syncHandler: ((event: Event) => void) | null = null;
|
|
7353
|
-
//let periodicSyncHandler: ((event: Event) => void) | null = null;
|
|
7354
7399
|
let cancelToken = { cancelled: false };
|
|
7355
|
-
let
|
|
7356
|
-
let
|
|
7357
|
-
function syncAndRetry(
|
|
7400
|
+
let nextRetryTime = 0;
|
|
7401
|
+
let syncStartTime = 0;
|
|
7402
|
+
function syncAndRetry(retryNum = 1) {
|
|
7358
7403
|
// Use setTimeout() to get onto a clean stack and
|
|
7359
7404
|
// break free from possible active transaction:
|
|
7360
7405
|
setTimeout(() => {
|
|
7361
|
-
|
|
7362
|
-
|
|
7363
|
-
const combPurpose = retryPurpose === 'pull' ? 'pull' : purpose;
|
|
7364
|
-
retryHandle = null;
|
|
7365
|
-
retryPurpose = null;
|
|
7406
|
+
const purpose = pullSignalled ? 'pull' : 'push';
|
|
7407
|
+
syncStartTime = Date.now();
|
|
7366
7408
|
syncIfPossible(db, cloudOptions, cloudSchema, {
|
|
7367
7409
|
cancelToken,
|
|
7368
7410
|
retryImmediatelyOnFetchError: true, // workaround for "net::ERR_NETWORK_CHANGED" in chrome.
|
|
7369
|
-
purpose
|
|
7370
|
-
}).
|
|
7371
|
-
|
|
7411
|
+
purpose,
|
|
7412
|
+
}).then(() => {
|
|
7413
|
+
if (cancelToken.cancelled) {
|
|
7414
|
+
stop();
|
|
7415
|
+
}
|
|
7416
|
+
else {
|
|
7417
|
+
if (pullSignalled || pushSignalled) {
|
|
7418
|
+
// If we have signalled for more sync, do it now.
|
|
7419
|
+
pullSignalled = false;
|
|
7420
|
+
pushSignalled = false;
|
|
7421
|
+
return syncAndRetry();
|
|
7422
|
+
}
|
|
7423
|
+
}
|
|
7424
|
+
ongoingSync = false;
|
|
7425
|
+
nextRetryTime = 0;
|
|
7426
|
+
syncStartTime = 0;
|
|
7427
|
+
}).catch((error) => {
|
|
7428
|
+
console.error('error in syncIfPossible()', error);
|
|
7372
7429
|
if (cancelToken.cancelled) {
|
|
7373
7430
|
stop();
|
|
7431
|
+
ongoingSync = false;
|
|
7432
|
+
nextRetryTime = 0;
|
|
7433
|
+
syncStartTime = 0;
|
|
7434
|
+
}
|
|
7435
|
+
else if (retryNum < 5) {
|
|
7436
|
+
// Mimic service worker sync event but a bit more eager: retry 4 times
|
|
7437
|
+
// * first retry after 20 seconds
|
|
7438
|
+
// * second retry 40 seconds later
|
|
7439
|
+
// * third retry 5 minutes later
|
|
7440
|
+
// * last retry 15 minutes later
|
|
7441
|
+
const retryIn = [0, 20, 40, 300, 900][retryNum] * SECONDS;
|
|
7442
|
+
nextRetryTime = Date.now() + retryIn;
|
|
7443
|
+
syncStartTime = 0;
|
|
7444
|
+
setTimeout(() => syncAndRetry(retryNum + 1), retryIn);
|
|
7374
7445
|
}
|
|
7375
|
-
else
|
|
7376
|
-
|
|
7377
|
-
|
|
7378
|
-
|
|
7379
|
-
const combinedPurpose = retryPurpose && retryPurpose === 'pull' ? 'pull' : purpose;
|
|
7380
|
-
const handle = setTimeout(() => syncAndRetry(combinedPurpose, retryNum + 1), [0, 5, 15][retryNum] * MINUTES);
|
|
7381
|
-
// Cancel the previous retryHandle if it exists to avoid scheduling loads of retries.
|
|
7382
|
-
if (retryHandle)
|
|
7383
|
-
clearTimeout(retryHandle);
|
|
7384
|
-
retryHandle = handle;
|
|
7385
|
-
retryPurpose = combinedPurpose;
|
|
7446
|
+
else {
|
|
7447
|
+
ongoingSync = false;
|
|
7448
|
+
nextRetryTime = 0;
|
|
7449
|
+
syncStartTime = 0;
|
|
7386
7450
|
}
|
|
7387
7451
|
});
|
|
7388
7452
|
}, 0);
|
|
7389
7453
|
}
|
|
7454
|
+
let pullSignalled = false;
|
|
7455
|
+
let pushSignalled = false;
|
|
7456
|
+
let ongoingSync = false;
|
|
7457
|
+
const consumer = (purpose) => {
|
|
7458
|
+
if (cancelToken.cancelled)
|
|
7459
|
+
return;
|
|
7460
|
+
if (purpose === 'pull') {
|
|
7461
|
+
pullSignalled = true;
|
|
7462
|
+
}
|
|
7463
|
+
if (purpose === 'push') {
|
|
7464
|
+
pushSignalled = true;
|
|
7465
|
+
}
|
|
7466
|
+
if (ongoingSync) {
|
|
7467
|
+
if (nextRetryTime) {
|
|
7468
|
+
console.debug(`Sync is paused until ${new Date(nextRetryTime).toISOString()} due to error in last sync attempt`);
|
|
7469
|
+
}
|
|
7470
|
+
else if (syncStartTime > 0 && Date.now() - syncStartTime > 20 * SECONDS) {
|
|
7471
|
+
console.debug(`An existing sync operation is taking more than 20 seconds. Will resync when done.`);
|
|
7472
|
+
}
|
|
7473
|
+
return;
|
|
7474
|
+
}
|
|
7475
|
+
ongoingSync = true;
|
|
7476
|
+
syncAndRetry();
|
|
7477
|
+
};
|
|
7390
7478
|
const start = () => {
|
|
7391
7479
|
// Sync eagerly whenever a change has happened (+ initially when there's no syncState yet)
|
|
7392
7480
|
// This initial subscribe will also trigger an sync also now.
|
|
7393
7481
|
console.debug('Starting LocalSyncWorker', db.localSyncEvent['id']);
|
|
7394
7482
|
localSyncEventSubscription = db.localSyncEvent.subscribe(({ purpose }) => {
|
|
7395
|
-
|
|
7396
|
-
syncAndRetry(purpose || 'pull');
|
|
7397
|
-
}
|
|
7398
|
-
catch (err) {
|
|
7399
|
-
console.error('What-the....', err);
|
|
7400
|
-
}
|
|
7483
|
+
consumer(purpose || 'pull');
|
|
7401
7484
|
});
|
|
7402
|
-
//setTimeout(()=>db.localSyncEvent.next({}), 5000);
|
|
7403
7485
|
};
|
|
7404
7486
|
const stop = () => {
|
|
7405
7487
|
console.debug('Stopping LocalSyncWorker');
|
|
@@ -8160,8 +8242,8 @@ function getTiedObjectId(realmId) {
|
|
|
8160
8242
|
|
|
8161
8243
|
const ydocTriggers = {};
|
|
8162
8244
|
const middlewares = new WeakMap();
|
|
8163
|
-
const txRunner = TriggerRunner("tx");
|
|
8164
|
-
const unloadRunner = TriggerRunner("unload");
|
|
8245
|
+
const txRunner = TriggerRunner("tx"); // Trigger registry for transaction completion. Avoids open docs.
|
|
8246
|
+
const unloadRunner = TriggerRunner("unload"); // Trigger registry for unload. Runs when a document is closed.
|
|
8165
8247
|
function TriggerRunner(name) {
|
|
8166
8248
|
let triggerExecPromise = null;
|
|
8167
8249
|
let triggerScheduled = false;
|
|
@@ -8171,14 +8253,17 @@ function TriggerRunner(name) {
|
|
|
8171
8253
|
for (const { db, parentId, triggers, parentTable, prop, } of registryCopy.values()) {
|
|
8172
8254
|
const yDoc = DexieYProvider.getOrCreateDocument(db, parentTable, prop, parentId);
|
|
8173
8255
|
try {
|
|
8174
|
-
DexieYProvider.load(yDoc); // If doc is open, this would just be a ++refount
|
|
8175
|
-
yield
|
|
8256
|
+
const provider = DexieYProvider.load(yDoc); // If doc is open, this would just be a ++refount
|
|
8257
|
+
yield provider.whenLoaded; // If doc is loaded, this would resolve immediately
|
|
8176
8258
|
for (const trigger of triggers) {
|
|
8177
8259
|
yield trigger(yDoc, parentId);
|
|
8178
8260
|
}
|
|
8179
8261
|
}
|
|
8180
8262
|
catch (error) {
|
|
8181
|
-
|
|
8263
|
+
if ((error === null || error === void 0 ? void 0 : error.name) === 'AbortError') ;
|
|
8264
|
+
else {
|
|
8265
|
+
console.error(`Error in YDocTrigger ${error}`);
|
|
8266
|
+
}
|
|
8182
8267
|
}
|
|
8183
8268
|
finally {
|
|
8184
8269
|
DexieYProvider.release(yDoc);
|
|
@@ -8190,16 +8275,20 @@ function TriggerRunner(name) {
|
|
|
8190
8275
|
name,
|
|
8191
8276
|
run() {
|
|
8192
8277
|
return __awaiter(this, void 0, void 0, function* () {
|
|
8278
|
+
console.log(`Running trigger (${name})?`, triggerScheduled, registry.size, !!triggerExecPromise);
|
|
8193
8279
|
if (!triggerScheduled && registry.size > 0) {
|
|
8194
8280
|
triggerScheduled = true;
|
|
8195
8281
|
if (triggerExecPromise)
|
|
8196
8282
|
yield triggerExecPromise.catch(() => { });
|
|
8197
8283
|
setTimeout(() => {
|
|
8198
8284
|
// setTimeout() is to escape from Promise.PSD zones and never run within liveQueries or transaction scopes
|
|
8285
|
+
console.log("Running trigger really!", name);
|
|
8199
8286
|
triggerScheduled = false;
|
|
8200
8287
|
const registryCopy = registry;
|
|
8201
8288
|
registry = new Map();
|
|
8202
|
-
triggerExecPromise = execute(registryCopy).finally(() =>
|
|
8289
|
+
triggerExecPromise = execute(registryCopy).finally(() => {
|
|
8290
|
+
triggerExecPromise = null;
|
|
8291
|
+
});
|
|
8203
8292
|
}, 0);
|
|
8204
8293
|
}
|
|
8205
8294
|
});
|
|
@@ -8215,6 +8304,7 @@ function TriggerRunner(name) {
|
|
|
8215
8304
|
prop,
|
|
8216
8305
|
triggers: new Set(),
|
|
8217
8306
|
};
|
|
8307
|
+
console.log(`Adding trigger ${key}`);
|
|
8218
8308
|
registry.set(key, entry);
|
|
8219
8309
|
}
|
|
8220
8310
|
entry.triggers.add(trigger);
|
|
@@ -8354,7 +8444,7 @@ function dexieCloud(dexie) {
|
|
|
8354
8444
|
const syncComplete = new Subject();
|
|
8355
8445
|
dexie.cloud = {
|
|
8356
8446
|
// @ts-ignore
|
|
8357
|
-
version: "4.1.0-beta.
|
|
8447
|
+
version: "4.1.0-beta.45",
|
|
8358
8448
|
options: Object.assign({}, DEFAULT_OPTIONS),
|
|
8359
8449
|
schema: null,
|
|
8360
8450
|
get currentUserId() {
|
|
@@ -8672,7 +8762,7 @@ function dexieCloud(dexie) {
|
|
|
8672
8762
|
}
|
|
8673
8763
|
}
|
|
8674
8764
|
// @ts-ignore
|
|
8675
|
-
dexieCloud.version = "4.1.0-beta.
|
|
8765
|
+
dexieCloud.version = "4.1.0-beta.45";
|
|
8676
8766
|
Dexie.Cloud = dexieCloud;
|
|
8677
8767
|
|
|
8678
8768
|
export { dexieCloud as default, defineYDocTrigger, dexieCloud, getTiedObjectId, getTiedRealmId, resolveText };
|