dexie-cloud-addon 4.0.0-beta.17 → 4.0.0-beta.18
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/modern/dexie-cloud-addon.js +1622 -1570
- package/dist/modern/dexie-cloud-addon.js.map +1 -1
- package/dist/modern/dexie-cloud-addon.min.js +1 -1
- package/dist/modern/dexie-cloud-addon.min.js.map +1 -1
- package/dist/modern/service-worker.js +1685 -1630
- package/dist/modern/service-worker.js.map +1 -1
- package/dist/modern/service-worker.min.js +1 -1
- package/dist/modern/service-worker.min.js.map +1 -1
- package/dist/module-es5/dexie-cloud-addon.js +669 -637
- package/dist/module-es5/dexie-cloud-addon.js.map +1 -1
- package/dist/module-es5/dexie-cloud-addon.min.js +1 -1
- package/dist/module-es5/dexie-cloud-addon.min.js.map +1 -1
- package/dist/umd/dexie-cloud-addon.js +669 -637
- package/dist/umd/dexie-cloud-addon.js.map +1 -1
- package/dist/umd/dexie-cloud-addon.min.js +1 -1
- package/dist/umd/dexie-cloud-addon.min.js.map +1 -1
- package/dist/umd/service-worker.js +1685 -1630
- package/dist/umd/service-worker.js.map +1 -1
- package/dist/umd/service-worker.min.js +1 -1
- package/dist/umd/service-worker.min.js.map +1 -1
- package/dist/umd-modern/dexie-cloud-addon.js +1619 -1567
- package/dist/umd-modern/dexie-cloud-addon.js.map +1 -1
- package/package.json +2 -2
|
@@ -1,6 +1,31 @@
|
|
|
1
1
|
import Dexie, { cmp, liveQuery } from 'dexie';
|
|
2
2
|
import { Observable as Observable$1, BehaviorSubject, Subject, fromEvent, of, merge, Subscription as Subscription$1, from as from$1, throwError, combineLatest, map as map$1, share, timer as timer$1, switchMap as switchMap$1 } from 'rxjs';
|
|
3
3
|
|
|
4
|
+
/*! *****************************************************************************
|
|
5
|
+
Copyright (c) Microsoft Corporation.
|
|
6
|
+
|
|
7
|
+
Permission to use, copy, modify, and/or distribute this software for any
|
|
8
|
+
purpose with or without fee is hereby granted.
|
|
9
|
+
|
|
10
|
+
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
|
|
11
|
+
REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
|
|
12
|
+
AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
|
|
13
|
+
INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
|
|
14
|
+
LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
|
|
15
|
+
OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
|
16
|
+
PERFORMANCE OF THIS SOFTWARE.
|
|
17
|
+
***************************************************************************** */
|
|
18
|
+
|
|
19
|
+
function __awaiter$1(thisArg, _arguments, P, generator) {
|
|
20
|
+
function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
|
|
21
|
+
return new (P || (P = Promise))(function (resolve, reject) {
|
|
22
|
+
function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
|
|
23
|
+
function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
|
|
24
|
+
function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
|
|
25
|
+
step((generator = generator.apply(thisArg, _arguments || [])).next());
|
|
26
|
+
});
|
|
27
|
+
}
|
|
28
|
+
|
|
4
29
|
const UNAUTHORIZED_USER = {
|
|
5
30
|
userId: "unauthorized",
|
|
6
31
|
name: "Unauthorized",
|
|
@@ -13,7 +38,7 @@ try {
|
|
|
13
38
|
Object.freeze(UNAUTHORIZED_USER);
|
|
14
39
|
Object.freeze(UNAUTHORIZED_USER.claims);
|
|
15
40
|
}
|
|
16
|
-
catch { }
|
|
41
|
+
catch (_a) { }
|
|
17
42
|
|
|
18
43
|
const swHolder = {};
|
|
19
44
|
const swContainer = self.document && navigator.serviceWorker; // self.document is to verify we're not the SW ourself
|
|
@@ -22,8 +47,9 @@ if (swContainer)
|
|
|
22
47
|
if (typeof self !== 'undefined' && 'clients' in self && !self.document) {
|
|
23
48
|
// We are the service worker. Propagate messages to all our clients.
|
|
24
49
|
addEventListener('message', (ev) => {
|
|
25
|
-
|
|
26
|
-
|
|
50
|
+
var _a, _b;
|
|
51
|
+
if ((_b = (_a = ev.data) === null || _a === void 0 ? void 0 : _a.type) === null || _b === void 0 ? void 0 : _b.startsWith('sw-broadcast-')) {
|
|
52
|
+
[...self['clients'].matchAll({ includeUncontrolled: true })].forEach((client) => { var _a; return client.id !== ((_a = ev.source) === null || _a === void 0 ? void 0 : _a.id) && client.postMessage(ev.data); });
|
|
27
53
|
}
|
|
28
54
|
});
|
|
29
55
|
}
|
|
@@ -35,7 +61,8 @@ class SWBroadcastChannel {
|
|
|
35
61
|
if (!swContainer)
|
|
36
62
|
return () => { };
|
|
37
63
|
const forwarder = (ev) => {
|
|
38
|
-
|
|
64
|
+
var _a;
|
|
65
|
+
if (((_a = ev.data) === null || _a === void 0 ? void 0 : _a.type) === `sw-broadcast-${this.name}`) {
|
|
39
66
|
listener(ev.data.message);
|
|
40
67
|
}
|
|
41
68
|
};
|
|
@@ -43,6 +70,7 @@ class SWBroadcastChannel {
|
|
|
43
70
|
return () => swContainer.removeEventListener('message', forwarder);
|
|
44
71
|
}
|
|
45
72
|
postMessage(message) {
|
|
73
|
+
var _a;
|
|
46
74
|
if (typeof self['clients'] === 'object') {
|
|
47
75
|
// We're a service worker. Propagate to our browser clients.
|
|
48
76
|
[...self['clients'].matchAll({ includeUncontrolled: true })].forEach((client) => client.postMessage({
|
|
@@ -53,7 +81,7 @@ class SWBroadcastChannel {
|
|
|
53
81
|
else if (swHolder.registration) {
|
|
54
82
|
// We're a client (browser window or other worker)
|
|
55
83
|
// Post to SW so it can repost to all its clients and to itself
|
|
56
|
-
swHolder.registration.active
|
|
84
|
+
(_a = swHolder.registration.active) === null || _a === void 0 ? void 0 : _a.postMessage({
|
|
57
85
|
type: `sw-broadcast-${this.name}`,
|
|
58
86
|
message
|
|
59
87
|
});
|
|
@@ -96,7 +124,7 @@ class BroadcastedAndLocalEvent extends Observable$1 {
|
|
|
96
124
|
this.bc = bc;
|
|
97
125
|
}
|
|
98
126
|
next(message) {
|
|
99
|
-
console.debug("BroadcastedAndLocalEvent: bc.postMessage()", {
|
|
127
|
+
console.debug("BroadcastedAndLocalEvent: bc.postMessage()", Object.assign({}, message), "bc is a", this.bc);
|
|
100
128
|
this.bc.postMessage(message);
|
|
101
129
|
const ev = new CustomEvent(`lbc-${this.name}`, { detail: message });
|
|
102
130
|
self.dispatchEvent(ev);
|
|
@@ -1689,55 +1717,60 @@ function timeoutErrorFactory(info) {
|
|
|
1689
1717
|
|
|
1690
1718
|
//const hasSW = 'serviceWorker' in navigator;
|
|
1691
1719
|
let hasComplainedAboutSyncEvent = false;
|
|
1692
|
-
|
|
1693
|
-
|
|
1694
|
-
|
|
1695
|
-
|
|
1696
|
-
|
|
1697
|
-
|
|
1698
|
-
|
|
1699
|
-
|
|
1700
|
-
|
|
1701
|
-
|
|
1702
|
-
|
|
1703
|
-
|
|
1704
|
-
|
|
1705
|
-
|
|
1706
|
-
|
|
1707
|
-
|
|
1708
|
-
|
|
1709
|
-
|
|
1720
|
+
function registerSyncEvent(db, purpose) {
|
|
1721
|
+
return __awaiter$1(this, void 0, void 0, function* () {
|
|
1722
|
+
try {
|
|
1723
|
+
// Send sync event to SW:
|
|
1724
|
+
const sw = yield navigator.serviceWorker.ready;
|
|
1725
|
+
if (purpose === "push" && sw.sync) {
|
|
1726
|
+
yield sw.sync.register(`dexie-cloud:${db.name}`);
|
|
1727
|
+
}
|
|
1728
|
+
if (sw.active) {
|
|
1729
|
+
// Use postMessage for pull syncs and for browsers not supporting sync event (Firefox, Safari).
|
|
1730
|
+
// Also chromium based browsers with sw.sync as a fallback for sleepy sync events not taking action for a while.
|
|
1731
|
+
sw.active.postMessage({
|
|
1732
|
+
type: 'dexie-cloud-sync',
|
|
1733
|
+
dbName: db.name,
|
|
1734
|
+
purpose
|
|
1735
|
+
});
|
|
1736
|
+
}
|
|
1737
|
+
else {
|
|
1738
|
+
throw new Error(`Failed to trigger sync - there's no active service worker`);
|
|
1739
|
+
}
|
|
1740
|
+
return;
|
|
1710
1741
|
}
|
|
1711
|
-
|
|
1712
|
-
|
|
1713
|
-
|
|
1714
|
-
|
|
1715
|
-
|
|
1716
|
-
hasComplainedAboutSyncEvent = true;
|
|
1742
|
+
catch (e) {
|
|
1743
|
+
if (!hasComplainedAboutSyncEvent) {
|
|
1744
|
+
console.debug(`Dexie Cloud: Could not register sync event`, e);
|
|
1745
|
+
hasComplainedAboutSyncEvent = true;
|
|
1746
|
+
}
|
|
1717
1747
|
}
|
|
1718
|
-
}
|
|
1748
|
+
});
|
|
1719
1749
|
}
|
|
1720
|
-
|
|
1721
|
-
|
|
1722
|
-
|
|
1723
|
-
|
|
1724
|
-
|
|
1725
|
-
|
|
1726
|
-
|
|
1727
|
-
|
|
1728
|
-
|
|
1750
|
+
function registerPeriodicSyncEvent(db) {
|
|
1751
|
+
var _a;
|
|
1752
|
+
return __awaiter$1(this, void 0, void 0, function* () {
|
|
1753
|
+
try {
|
|
1754
|
+
// Register periodicSync event to SW:
|
|
1755
|
+
// @ts-ignore
|
|
1756
|
+
const { periodicSync } = yield navigator.serviceWorker.ready;
|
|
1757
|
+
if (periodicSync) {
|
|
1758
|
+
try {
|
|
1759
|
+
yield periodicSync.register(`dexie-cloud:${db.name}`, (_a = db.cloud.options) === null || _a === void 0 ? void 0 : _a.periodicSync);
|
|
1760
|
+
console.debug(`Dexie Cloud: Successfully registered periodicsync event for ${db.name}`);
|
|
1761
|
+
}
|
|
1762
|
+
catch (e) {
|
|
1763
|
+
console.debug(`Dexie Cloud: Failed to register periodic sync. Your PWA must be installed to allow background sync.`, e);
|
|
1764
|
+
}
|
|
1729
1765
|
}
|
|
1730
|
-
|
|
1731
|
-
console.debug(`Dexie Cloud:
|
|
1766
|
+
else {
|
|
1767
|
+
console.debug(`Dexie Cloud: periodicSync not supported.`);
|
|
1732
1768
|
}
|
|
1733
1769
|
}
|
|
1734
|
-
|
|
1735
|
-
console.debug(`Dexie Cloud:
|
|
1770
|
+
catch (e) {
|
|
1771
|
+
console.debug(`Dexie Cloud: Could not register periodicSync for ${db.name}`, e);
|
|
1736
1772
|
}
|
|
1737
|
-
}
|
|
1738
|
-
catch (e) {
|
|
1739
|
-
console.debug(`Dexie Cloud: Could not register periodicSync for ${db.name}`, e);
|
|
1740
|
-
}
|
|
1773
|
+
});
|
|
1741
1774
|
}
|
|
1742
1775
|
|
|
1743
1776
|
function triggerSync(db, purpose) {
|
|
@@ -1766,15 +1799,17 @@ const b64encode = typeof Buffer !== "undefined"
|
|
|
1766
1799
|
: Buffer.from(b).toString("base64")
|
|
1767
1800
|
: (b) => btoa(String.fromCharCode.apply(null, b));
|
|
1768
1801
|
|
|
1769
|
-
|
|
1770
|
-
|
|
1771
|
-
|
|
1772
|
-
|
|
1773
|
-
|
|
1774
|
-
|
|
1775
|
-
|
|
1776
|
-
|
|
1777
|
-
|
|
1802
|
+
function computeRealmSetHash({ realms, inviteRealms, }) {
|
|
1803
|
+
return __awaiter$1(this, void 0, void 0, function* () {
|
|
1804
|
+
const data = JSON.stringify([
|
|
1805
|
+
...realms.map((realmId) => ({ realmId, accepted: true })),
|
|
1806
|
+
...inviteRealms.map((realmId) => ({ realmId, accepted: false })),
|
|
1807
|
+
].sort((a, b) => a.realmId < b.realmId ? -1 : a.realmId > b.realmId ? 1 : 0));
|
|
1808
|
+
const byteArray = new TextEncoder().encode(data);
|
|
1809
|
+
const digestBytes = yield crypto.subtle.digest('SHA-1', byteArray);
|
|
1810
|
+
const base64 = b64encode(digestBytes);
|
|
1811
|
+
return base64;
|
|
1812
|
+
});
|
|
1778
1813
|
}
|
|
1779
1814
|
|
|
1780
1815
|
function getSyncableTables(db) {
|
|
@@ -1789,7 +1824,8 @@ function getMutationTable(tableName) {
|
|
|
1789
1824
|
}
|
|
1790
1825
|
|
|
1791
1826
|
function getTableFromMutationTable(mutationTable) {
|
|
1792
|
-
|
|
1827
|
+
var _a;
|
|
1828
|
+
const tableName = (_a = /^\$(.*)_mutations$/.exec(mutationTable)) === null || _a === void 0 ? void 0 : _a[1];
|
|
1793
1829
|
if (!tableName)
|
|
1794
1830
|
throw new Error(`Given mutationTable ${mutationTable} is not correct`);
|
|
1795
1831
|
return tableName;
|
|
@@ -1800,49 +1836,51 @@ function flatten(a) {
|
|
|
1800
1836
|
return concat.apply([], a);
|
|
1801
1837
|
}
|
|
1802
1838
|
|
|
1803
|
-
|
|
1804
|
-
|
|
1805
|
-
const
|
|
1806
|
-
|
|
1807
|
-
|
|
1808
|
-
|
|
1809
|
-
|
|
1810
|
-
|
|
1811
|
-
|
|
1812
|
-
|
|
1813
|
-
|
|
1814
|
-
|
|
1815
|
-
|
|
1816
|
-
|
|
1817
|
-
|
|
1818
|
-
|
|
1819
|
-
|
|
1820
|
-
|
|
1821
|
-
|
|
1822
|
-
|
|
1823
|
-
|
|
1824
|
-
|
|
1825
|
-
|
|
1826
|
-
|
|
1827
|
-
|
|
1828
|
-
|
|
1829
|
-
|
|
1830
|
-
|
|
1831
|
-
currentEntry
|
|
1832
|
-
|
|
1833
|
-
|
|
1834
|
-
|
|
1835
|
-
|
|
1836
|
-
|
|
1837
|
-
|
|
1838
|
-
|
|
1839
|
-
|
|
1840
|
-
|
|
1841
|
-
|
|
1839
|
+
function listClientChanges(mutationTables, db, { since = {}, limit = Infinity } = {}) {
|
|
1840
|
+
return __awaiter$1(this, void 0, void 0, function* () {
|
|
1841
|
+
const allMutsOnTables = yield Promise.all(mutationTables.map((mutationTable) => __awaiter$1(this, void 0, void 0, function* () {
|
|
1842
|
+
const tableName = getTableFromMutationTable(mutationTable.name);
|
|
1843
|
+
const lastRevision = since[tableName];
|
|
1844
|
+
let query = lastRevision
|
|
1845
|
+
? mutationTable.where('rev').above(lastRevision)
|
|
1846
|
+
: mutationTable;
|
|
1847
|
+
if (limit < Infinity)
|
|
1848
|
+
query = query.limit(limit);
|
|
1849
|
+
const muts = yield query.toArray();
|
|
1850
|
+
//const objTable = db.table(tableName);
|
|
1851
|
+
/*for (const mut of muts) {
|
|
1852
|
+
if (mut.type === "insert" || mut.type === "upsert") {
|
|
1853
|
+
mut.values = await objTable.bulkGet(mut.keys);
|
|
1854
|
+
}
|
|
1855
|
+
}*/
|
|
1856
|
+
return muts.map((mut) => ({
|
|
1857
|
+
table: tableName,
|
|
1858
|
+
mut,
|
|
1859
|
+
}));
|
|
1860
|
+
})));
|
|
1861
|
+
// Sort by time to get a true order of the operations (between tables)
|
|
1862
|
+
const sorted = flatten(allMutsOnTables).sort((a, b) => a.mut.ts - b.mut.ts);
|
|
1863
|
+
const result = [];
|
|
1864
|
+
let currentEntry = null;
|
|
1865
|
+
let currentTxid = null;
|
|
1866
|
+
for (const { table, mut } of sorted) {
|
|
1867
|
+
if (currentEntry &&
|
|
1868
|
+
currentEntry.table === table &&
|
|
1869
|
+
currentTxid === mut.txid) {
|
|
1870
|
+
currentEntry.muts.push(mut);
|
|
1871
|
+
}
|
|
1872
|
+
else {
|
|
1873
|
+
currentEntry = {
|
|
1874
|
+
table,
|
|
1875
|
+
muts: [mut],
|
|
1876
|
+
};
|
|
1877
|
+
currentTxid = mut.txid;
|
|
1878
|
+
result.push(currentEntry);
|
|
1879
|
+
}
|
|
1842
1880
|
}
|
|
1843
|
-
|
|
1844
|
-
|
|
1845
|
-
|
|
1881
|
+
// Filter out those tables that doesn't have any mutations:
|
|
1882
|
+
return result;
|
|
1883
|
+
});
|
|
1846
1884
|
}
|
|
1847
1885
|
|
|
1848
1886
|
function randomString$1(bytes) {
|
|
@@ -2148,58 +2186,60 @@ function getDbNameFromDbUrl(dbUrl) {
|
|
|
2148
2186
|
: url.pathname.split('/')[1];
|
|
2149
2187
|
}
|
|
2150
2188
|
|
|
2151
|
-
|
|
2152
|
-
|
|
2153
|
-
|
|
2154
|
-
if (
|
|
2155
|
-
|
|
2156
|
-
|
|
2157
|
-
const
|
|
2158
|
-
|
|
2159
|
-
|
|
2160
|
-
|
|
2161
|
-
|
|
2162
|
-
?
|
|
2163
|
-
|
|
2164
|
-
|
|
2165
|
-
|
|
2166
|
-
|
|
2167
|
-
|
|
2168
|
-
|
|
2169
|
-
|
|
2170
|
-
|
|
2171
|
-
|
|
2172
|
-
|
|
2173
|
-
|
|
2174
|
-
|
|
2175
|
-
|
|
2176
|
-
|
|
2177
|
-
|
|
2178
|
-
|
|
2179
|
-
|
|
2180
|
-
|
|
2181
|
-
|
|
2182
|
-
|
|
2183
|
-
|
|
2184
|
-
|
|
2185
|
-
|
|
2186
|
-
|
|
2187
|
-
|
|
2188
|
-
|
|
2189
|
-
|
|
2190
|
-
|
|
2191
|
-
|
|
2192
|
-
|
|
2193
|
-
|
|
2194
|
-
|
|
2195
|
-
|
|
2189
|
+
function listSyncifiedChanges(tablesToSyncify, currentUser, schema, alreadySyncedRealms) {
|
|
2190
|
+
return __awaiter$1(this, void 0, void 0, function* () {
|
|
2191
|
+
const txid = `upload-${randomString$1(8)}`;
|
|
2192
|
+
if (currentUser.isLoggedIn) {
|
|
2193
|
+
if (tablesToSyncify.length > 0) {
|
|
2194
|
+
const ignoredRealms = new Set(alreadySyncedRealms || []);
|
|
2195
|
+
const upserts = yield Promise.all(tablesToSyncify.map((table) => __awaiter$1(this, void 0, void 0, function* () {
|
|
2196
|
+
const { extractKey } = table.core.schema.primaryKey;
|
|
2197
|
+
if (!extractKey)
|
|
2198
|
+
return { table: table.name, muts: [] }; // Outbound tables are not synced.
|
|
2199
|
+
const dexieCloudTableSchema = schema[table.name];
|
|
2200
|
+
const query = (dexieCloudTableSchema === null || dexieCloudTableSchema === void 0 ? void 0 : dexieCloudTableSchema.generatedGlobalId)
|
|
2201
|
+
? table.filter((item) => {
|
|
2202
|
+
const id = extractKey(item);
|
|
2203
|
+
return (!ignoredRealms.has(item.realmId || '') &&
|
|
2204
|
+
//(id[0] !== '#' || !!item.$ts) && // Private obj need no sync if not changed
|
|
2205
|
+
isValidSyncableID(id));
|
|
2206
|
+
})
|
|
2207
|
+
: table.filter((item) => {
|
|
2208
|
+
extractKey(item);
|
|
2209
|
+
return (!ignoredRealms.has(item.realmId || '') &&
|
|
2210
|
+
//(id[0] !== '#' || !!item.$ts) && // Private obj need no sync if not changed
|
|
2211
|
+
isValidAtID(extractKey(item), dexieCloudTableSchema === null || dexieCloudTableSchema === void 0 ? void 0 : dexieCloudTableSchema.idPrefix));
|
|
2212
|
+
});
|
|
2213
|
+
const unsyncedObjects = yield query.toArray();
|
|
2214
|
+
if (unsyncedObjects.length > 0) {
|
|
2215
|
+
const mut = {
|
|
2216
|
+
type: 'upsert',
|
|
2217
|
+
values: unsyncedObjects,
|
|
2218
|
+
keys: unsyncedObjects.map(extractKey),
|
|
2219
|
+
userId: currentUser.userId,
|
|
2220
|
+
txid,
|
|
2221
|
+
};
|
|
2222
|
+
return {
|
|
2223
|
+
table: table.name,
|
|
2224
|
+
muts: [mut],
|
|
2225
|
+
};
|
|
2226
|
+
}
|
|
2227
|
+
else {
|
|
2228
|
+
return {
|
|
2229
|
+
table: table.name,
|
|
2230
|
+
muts: [],
|
|
2231
|
+
};
|
|
2232
|
+
}
|
|
2233
|
+
})));
|
|
2234
|
+
return upserts.filter((op) => op.muts.length > 0);
|
|
2235
|
+
}
|
|
2196
2236
|
}
|
|
2197
|
-
|
|
2198
|
-
|
|
2237
|
+
return [];
|
|
2238
|
+
});
|
|
2199
2239
|
}
|
|
2200
2240
|
|
|
2201
2241
|
function getTablesToSyncify(db, syncState) {
|
|
2202
|
-
const syncedTables = syncState
|
|
2242
|
+
const syncedTables = (syncState === null || syncState === void 0 ? void 0 : syncState.syncedTables) || [];
|
|
2203
2243
|
const syncableTables = getSyncableTables(db);
|
|
2204
2244
|
const tablesToSyncify = syncableTables.filter((tbl) => !syncedTables.includes(tbl.name));
|
|
2205
2245
|
return tablesToSyncify;
|
|
@@ -2208,19 +2248,15 @@ function getTablesToSyncify(db, syncState) {
|
|
|
2208
2248
|
function interactWithUser(userInteraction, req) {
|
|
2209
2249
|
let done = false;
|
|
2210
2250
|
return new Promise((resolve, reject) => {
|
|
2211
|
-
const interactionProps = {
|
|
2212
|
-
...req,
|
|
2213
|
-
onSubmit: (res) => {
|
|
2251
|
+
const interactionProps = Object.assign(Object.assign({}, req), { onSubmit: (res) => {
|
|
2214
2252
|
userInteraction.next(undefined);
|
|
2215
2253
|
done = true;
|
|
2216
2254
|
resolve(res);
|
|
2217
|
-
},
|
|
2218
|
-
onCancel: () => {
|
|
2255
|
+
}, onCancel: () => {
|
|
2219
2256
|
userInteraction.next(undefined);
|
|
2220
2257
|
done = true;
|
|
2221
2258
|
reject(new Dexie.AbortError("User cancelled"));
|
|
2222
|
-
}
|
|
2223
|
-
};
|
|
2259
|
+
} });
|
|
2224
2260
|
userInteraction.next(interactionProps);
|
|
2225
2261
|
// Start subscribing for external updates to db.cloud.userInteraction, and if so, cancel this request.
|
|
2226
2262
|
/*const subscription = userInteraction.subscribe((currentInteractionProps) => {
|
|
@@ -2241,180 +2277,193 @@ function alertUser(userInteraction, title, ...alerts) {
|
|
|
2241
2277
|
fields: {}
|
|
2242
2278
|
});
|
|
2243
2279
|
}
|
|
2244
|
-
|
|
2245
|
-
|
|
2246
|
-
|
|
2247
|
-
email
|
|
2248
|
-
|
|
2249
|
-
|
|
2250
|
-
|
|
2251
|
-
|
|
2252
|
-
|
|
2253
|
-
|
|
2254
|
-
|
|
2255
|
-
|
|
2256
|
-
|
|
2280
|
+
function promptForEmail(userInteraction, title, emailHint) {
|
|
2281
|
+
return __awaiter$1(this, void 0, void 0, function* () {
|
|
2282
|
+
let email = emailHint || '';
|
|
2283
|
+
while (!email || !/^[\w-\.]+@([\w-]+\.)+[\w-]{2,10}$/.test(email)) {
|
|
2284
|
+
email = (yield interactWithUser(userInteraction, {
|
|
2285
|
+
type: 'email',
|
|
2286
|
+
title,
|
|
2287
|
+
alerts: email
|
|
2288
|
+
? [
|
|
2289
|
+
{
|
|
2290
|
+
type: 'error',
|
|
2291
|
+
messageCode: 'INVALID_EMAIL',
|
|
2292
|
+
message: 'Please enter a valid email address',
|
|
2293
|
+
messageParams: {},
|
|
2294
|
+
},
|
|
2295
|
+
]
|
|
2296
|
+
: [],
|
|
2297
|
+
fields: {
|
|
2298
|
+
email: {
|
|
2299
|
+
type: 'email',
|
|
2300
|
+
placeholder: 'you@somedomain.com',
|
|
2257
2301
|
},
|
|
2258
|
-
]
|
|
2259
|
-
: [],
|
|
2260
|
-
fields: {
|
|
2261
|
-
email: {
|
|
2262
|
-
type: 'email',
|
|
2263
|
-
placeholder: 'you@somedomain.com',
|
|
2264
2302
|
},
|
|
2265
|
-
}
|
|
2266
|
-
}
|
|
2267
|
-
|
|
2268
|
-
|
|
2303
|
+
})).email;
|
|
2304
|
+
}
|
|
2305
|
+
return email;
|
|
2306
|
+
});
|
|
2269
2307
|
}
|
|
2270
|
-
|
|
2271
|
-
|
|
2272
|
-
|
|
2273
|
-
|
|
2274
|
-
|
|
2275
|
-
|
|
2276
|
-
|
|
2277
|
-
|
|
2278
|
-
];
|
|
2279
|
-
if (alert) {
|
|
2280
|
-
alerts.push(alert);
|
|
2281
|
-
}
|
|
2282
|
-
const { otp } = await interactWithUser(userInteraction, {
|
|
2283
|
-
type: 'otp',
|
|
2284
|
-
title: 'Enter OTP',
|
|
2285
|
-
alerts,
|
|
2286
|
-
fields: {
|
|
2287
|
-
otp: {
|
|
2288
|
-
type: 'otp',
|
|
2289
|
-
label: 'OTP',
|
|
2290
|
-
placeholder: 'Paste OTP here',
|
|
2308
|
+
function promptForOTP(userInteraction, email, alert) {
|
|
2309
|
+
return __awaiter$1(this, void 0, void 0, function* () {
|
|
2310
|
+
const alerts = [
|
|
2311
|
+
{
|
|
2312
|
+
type: 'info',
|
|
2313
|
+
messageCode: 'OTP_SENT',
|
|
2314
|
+
message: `A One-Time password has been sent to {email}`,
|
|
2315
|
+
messageParams: { email },
|
|
2291
2316
|
},
|
|
2292
|
-
|
|
2317
|
+
];
|
|
2318
|
+
if (alert) {
|
|
2319
|
+
alerts.push(alert);
|
|
2320
|
+
}
|
|
2321
|
+
const { otp } = yield interactWithUser(userInteraction, {
|
|
2322
|
+
type: 'otp',
|
|
2323
|
+
title: 'Enter OTP',
|
|
2324
|
+
alerts,
|
|
2325
|
+
fields: {
|
|
2326
|
+
otp: {
|
|
2327
|
+
type: 'otp',
|
|
2328
|
+
label: 'OTP',
|
|
2329
|
+
placeholder: 'Paste OTP here',
|
|
2330
|
+
},
|
|
2331
|
+
},
|
|
2332
|
+
});
|
|
2333
|
+
return otp;
|
|
2293
2334
|
});
|
|
2294
|
-
return otp;
|
|
2295
2335
|
}
|
|
2296
2336
|
|
|
2297
|
-
|
|
2298
|
-
|
|
2299
|
-
|
|
2300
|
-
|
|
2301
|
-
|
|
2302
|
-
|
|
2303
|
-
|
|
2304
|
-
|
|
2305
|
-
|
|
2306
|
-
|
|
2307
|
-
|
|
2308
|
-
|
|
2309
|
-
|
|
2310
|
-
|
|
2311
|
-
|
|
2312
|
-
|
|
2313
|
-
|
|
2314
|
-
|
|
2315
|
-
|
|
2316
|
-
|
|
2337
|
+
function loadAccessToken(db) {
|
|
2338
|
+
var _a, _b;
|
|
2339
|
+
return __awaiter$1(this, void 0, void 0, function* () {
|
|
2340
|
+
const currentUser = yield db.getCurrentUser();
|
|
2341
|
+
const { accessToken, accessTokenExpiration, refreshToken, refreshTokenExpiration, claims, } = currentUser;
|
|
2342
|
+
if (!accessToken)
|
|
2343
|
+
return;
|
|
2344
|
+
const expTime = (_a = accessTokenExpiration === null || accessTokenExpiration === void 0 ? void 0 : accessTokenExpiration.getTime()) !== null && _a !== void 0 ? _a : Infinity;
|
|
2345
|
+
if (expTime > Date.now()) {
|
|
2346
|
+
return accessToken;
|
|
2347
|
+
}
|
|
2348
|
+
if (!refreshToken) {
|
|
2349
|
+
throw new Error(`Refresh token missing`);
|
|
2350
|
+
}
|
|
2351
|
+
const refreshExpTime = (_b = refreshTokenExpiration === null || refreshTokenExpiration === void 0 ? void 0 : refreshTokenExpiration.getTime()) !== null && _b !== void 0 ? _b : Infinity;
|
|
2352
|
+
if (refreshExpTime <= Date.now()) {
|
|
2353
|
+
throw new Error(`Refresh token has expired`);
|
|
2354
|
+
}
|
|
2355
|
+
const refreshedLogin = yield refreshAccessToken(db.cloud.options.databaseUrl, currentUser);
|
|
2356
|
+
yield db.table('$logins').update(claims.sub, {
|
|
2357
|
+
accessToken: refreshedLogin.accessToken,
|
|
2358
|
+
accessTokenExpiration: refreshedLogin.accessTokenExpiration,
|
|
2359
|
+
});
|
|
2360
|
+
return refreshedLogin.accessToken;
|
|
2317
2361
|
});
|
|
2318
|
-
return refreshedLogin.accessToken;
|
|
2319
|
-
}
|
|
2320
|
-
async function authenticate(url, context, fetchToken, userInteraction, hints) {
|
|
2321
|
-
if (context.accessToken &&
|
|
2322
|
-
context.accessTokenExpiration.getTime() > Date.now()) {
|
|
2323
|
-
return context;
|
|
2324
|
-
}
|
|
2325
|
-
else if (context.refreshToken &&
|
|
2326
|
-
(!context.refreshTokenExpiration ||
|
|
2327
|
-
context.refreshTokenExpiration.getTime() > Date.now())) {
|
|
2328
|
-
return await refreshAccessToken(url, context);
|
|
2329
|
-
}
|
|
2330
|
-
else {
|
|
2331
|
-
return await userAuthenticate(context, fetchToken, userInteraction, hints);
|
|
2332
|
-
}
|
|
2333
2362
|
}
|
|
2334
|
-
|
|
2335
|
-
|
|
2336
|
-
|
|
2337
|
-
|
|
2338
|
-
|
|
2339
|
-
|
|
2340
|
-
|
|
2341
|
-
|
|
2342
|
-
|
|
2343
|
-
|
|
2344
|
-
|
|
2345
|
-
|
|
2346
|
-
|
|
2347
|
-
|
|
2348
|
-
scopes: ['ACCESS_DB'],
|
|
2349
|
-
signature,
|
|
2350
|
-
signing_algorithm,
|
|
2351
|
-
time_stamp,
|
|
2352
|
-
};
|
|
2353
|
-
const res = await fetch(`${url}/token`, {
|
|
2354
|
-
body: JSON.stringify(tokenRequest),
|
|
2355
|
-
method: 'post',
|
|
2356
|
-
headers: { 'Content-Type': 'application/json' },
|
|
2357
|
-
mode: 'cors',
|
|
2363
|
+
function authenticate(url, context, fetchToken, userInteraction, hints) {
|
|
2364
|
+
return __awaiter$1(this, void 0, void 0, function* () {
|
|
2365
|
+
if (context.accessToken &&
|
|
2366
|
+
context.accessTokenExpiration.getTime() > Date.now()) {
|
|
2367
|
+
return context;
|
|
2368
|
+
}
|
|
2369
|
+
else if (context.refreshToken &&
|
|
2370
|
+
(!context.refreshTokenExpiration ||
|
|
2371
|
+
context.refreshTokenExpiration.getTime() > Date.now())) {
|
|
2372
|
+
return yield refreshAccessToken(url, context);
|
|
2373
|
+
}
|
|
2374
|
+
else {
|
|
2375
|
+
return yield userAuthenticate(context, fetchToken, userInteraction, hints);
|
|
2376
|
+
}
|
|
2358
2377
|
});
|
|
2359
|
-
|
|
2360
|
-
|
|
2361
|
-
|
|
2362
|
-
|
|
2363
|
-
|
|
2364
|
-
|
|
2365
|
-
|
|
2366
|
-
|
|
2367
|
-
|
|
2368
|
-
|
|
2369
|
-
|
|
2370
|
-
|
|
2371
|
-
|
|
2372
|
-
|
|
2373
|
-
|
|
2374
|
-
|
|
2375
|
-
|
|
2376
|
-
|
|
2377
|
-
|
|
2378
|
-
|
|
2379
|
-
|
|
2380
|
-
|
|
2381
|
-
|
|
2382
|
-
|
|
2383
|
-
|
|
2384
|
-
|
|
2385
|
-
hints,
|
|
2378
|
+
}
|
|
2379
|
+
function refreshAccessToken(url, login) {
|
|
2380
|
+
return __awaiter$1(this, void 0, void 0, function* () {
|
|
2381
|
+
if (!login.refreshToken)
|
|
2382
|
+
throw new Error(`Cannot refresh token - refresh token is missing.`);
|
|
2383
|
+
if (!login.nonExportablePrivateKey)
|
|
2384
|
+
throw new Error(`login.nonExportablePrivateKey is missing - cannot sign refresh token without a private key.`);
|
|
2385
|
+
const time_stamp = Date.now();
|
|
2386
|
+
const signing_algorithm = 'RSASSA-PKCS1-v1_5';
|
|
2387
|
+
const textEncoder = new TextEncoder();
|
|
2388
|
+
const data = textEncoder.encode(login.refreshToken + time_stamp);
|
|
2389
|
+
const binarySignature = yield crypto.subtle.sign(signing_algorithm, login.nonExportablePrivateKey, data);
|
|
2390
|
+
const signature = b64encode(binarySignature);
|
|
2391
|
+
const tokenRequest = {
|
|
2392
|
+
grant_type: 'refresh_token',
|
|
2393
|
+
refresh_token: login.refreshToken,
|
|
2394
|
+
scopes: ['ACCESS_DB'],
|
|
2395
|
+
signature,
|
|
2396
|
+
signing_algorithm,
|
|
2397
|
+
time_stamp,
|
|
2398
|
+
};
|
|
2399
|
+
const res = yield fetch(`${url}/token`, {
|
|
2400
|
+
body: JSON.stringify(tokenRequest),
|
|
2401
|
+
method: 'post',
|
|
2402
|
+
headers: { 'Content-Type': 'application/json' },
|
|
2403
|
+
mode: 'cors',
|
|
2386
2404
|
});
|
|
2387
|
-
if (
|
|
2388
|
-
throw new Error(`
|
|
2389
|
-
|
|
2390
|
-
|
|
2391
|
-
|
|
2392
|
-
|
|
2393
|
-
|
|
2394
|
-
|
|
2395
|
-
|
|
2396
|
-
|
|
2397
|
-
|
|
2398
|
-
|
|
2399
|
-
|
|
2400
|
-
|
|
2401
|
-
|
|
2402
|
-
|
|
2403
|
-
|
|
2404
|
-
|
|
2405
|
+
if (res.status !== 200)
|
|
2406
|
+
throw new Error(`RefreshToken: Status ${res.status} from ${url}/token`);
|
|
2407
|
+
const response = yield res.json();
|
|
2408
|
+
login.accessToken = response.accessToken;
|
|
2409
|
+
login.accessTokenExpiration = response.accessTokenExpiration
|
|
2410
|
+
? new Date(response.accessTokenExpiration)
|
|
2411
|
+
: undefined;
|
|
2412
|
+
return login;
|
|
2413
|
+
});
|
|
2414
|
+
}
|
|
2415
|
+
function userAuthenticate(context, fetchToken, userInteraction, hints) {
|
|
2416
|
+
return __awaiter$1(this, void 0, void 0, function* () {
|
|
2417
|
+
const { privateKey, publicKey } = yield crypto.subtle.generateKey({
|
|
2418
|
+
name: 'RSASSA-PKCS1-v1_5',
|
|
2419
|
+
modulusLength: 2048,
|
|
2420
|
+
publicExponent: new Uint8Array([0x01, 0x00, 0x01]),
|
|
2421
|
+
hash: { name: 'SHA-256' },
|
|
2422
|
+
}, false, // Non-exportable...
|
|
2423
|
+
['sign', 'verify']);
|
|
2424
|
+
if (!privateKey || !publicKey)
|
|
2425
|
+
throw new Error(`Could not generate RSA keypair`); // Typings suggest these can be undefined...
|
|
2426
|
+
context.nonExportablePrivateKey = privateKey; //...but storable!
|
|
2427
|
+
const publicKeySPKI = yield crypto.subtle.exportKey('spki', publicKey);
|
|
2428
|
+
const publicKeyPEM = spkiToPEM(publicKeySPKI);
|
|
2429
|
+
context.publicKey = publicKey;
|
|
2430
|
+
try {
|
|
2431
|
+
const response2 = yield fetchToken({
|
|
2432
|
+
public_key: publicKeyPEM,
|
|
2433
|
+
hints,
|
|
2405
2434
|
});
|
|
2435
|
+
if (response2.type !== 'tokens')
|
|
2436
|
+
throw new Error(`Unexpected response type from token endpoint: ${response2.type}`);
|
|
2437
|
+
context.accessToken = response2.accessToken;
|
|
2438
|
+
context.accessTokenExpiration = new Date(response2.accessTokenExpiration);
|
|
2439
|
+
context.refreshToken = response2.refreshToken;
|
|
2440
|
+
if (response2.refreshTokenExpiration) {
|
|
2441
|
+
context.refreshTokenExpiration = new Date(response2.refreshTokenExpiration);
|
|
2442
|
+
}
|
|
2443
|
+
context.userId = response2.claims.sub;
|
|
2444
|
+
context.email = response2.claims.email;
|
|
2445
|
+
context.name = response2.claims.name;
|
|
2446
|
+
context.claims = response2.claims;
|
|
2447
|
+
if (response2.alerts && response2.alerts.length > 0) {
|
|
2448
|
+
yield interactWithUser(userInteraction, {
|
|
2449
|
+
type: 'message-alert',
|
|
2450
|
+
title: 'Authentication Alert',
|
|
2451
|
+
fields: {},
|
|
2452
|
+
alerts: response2.alerts,
|
|
2453
|
+
});
|
|
2454
|
+
}
|
|
2455
|
+
return context;
|
|
2406
2456
|
}
|
|
2407
|
-
|
|
2408
|
-
|
|
2409
|
-
|
|
2410
|
-
|
|
2411
|
-
|
|
2412
|
-
|
|
2413
|
-
|
|
2414
|
-
|
|
2415
|
-
}
|
|
2416
|
-
|
|
2417
|
-
}
|
|
2457
|
+
catch (error) {
|
|
2458
|
+
yield alertUser(userInteraction, 'Authentication Failed', {
|
|
2459
|
+
type: 'error',
|
|
2460
|
+
messageCode: 'GENERIC_ERROR',
|
|
2461
|
+
message: `We're having a problem authenticating right now.`,
|
|
2462
|
+
messageParams: {}
|
|
2463
|
+
}).catch(() => { });
|
|
2464
|
+
throw error;
|
|
2465
|
+
}
|
|
2466
|
+
});
|
|
2418
2467
|
}
|
|
2419
2468
|
function spkiToPEM(keydata) {
|
|
2420
2469
|
const keydataB64 = b64encode(keydata);
|
|
@@ -2947,23 +2996,17 @@ class FakeBigInt {
|
|
|
2947
2996
|
return this.v;
|
|
2948
2997
|
}
|
|
2949
2998
|
}
|
|
2950
|
-
const defs = {
|
|
2951
|
-
|
|
2952
|
-
|
|
2953
|
-
|
|
2954
|
-
|
|
2955
|
-
|
|
2956
|
-
|
|
2957
|
-
|
|
2958
|
-
|
|
2959
|
-
|
|
2960
|
-
|
|
2961
|
-
};
|
|
2962
|
-
},
|
|
2963
|
-
revive: ({ v, }) => new FakeBigInt(v)
|
|
2964
|
-
}
|
|
2965
|
-
})
|
|
2966
|
-
};
|
|
2999
|
+
const defs = Object.assign(Object.assign({}, undefinedDef), (hasBigIntSupport
|
|
3000
|
+
? {}
|
|
3001
|
+
: {
|
|
3002
|
+
bigint: {
|
|
3003
|
+
test: (val) => val instanceof FakeBigInt,
|
|
3004
|
+
replace: (fakeBigInt) => {
|
|
3005
|
+
return Object.assign({ $t: 'bigint' }, fakeBigInt);
|
|
3006
|
+
},
|
|
3007
|
+
revive: ({ v, }) => new FakeBigInt(v)
|
|
3008
|
+
}
|
|
3009
|
+
}));
|
|
2967
3010
|
const TSON = TypesonSimplified(builtin, defs);
|
|
2968
3011
|
const BISON = Bison(defs);
|
|
2969
3012
|
|
|
@@ -3022,110 +3065,107 @@ function encodeIdsForServer(schema, currentUser, changes) {
|
|
|
3022
3065
|
}
|
|
3023
3066
|
function cloneChange(change, rewriteValues) {
|
|
3024
3067
|
// clone on demand:
|
|
3025
|
-
return {
|
|
3026
|
-
|
|
3027
|
-
|
|
3028
|
-
? change.muts.map((m) => ({
|
|
3029
|
-
...m,
|
|
3030
|
-
keys: m.keys.slice(),
|
|
3031
|
-
values: m.values.slice(),
|
|
3032
|
-
}))
|
|
3033
|
-
: change.muts.map((m) => ({ ...m, keys: m.keys.slice() })),
|
|
3034
|
-
};
|
|
3068
|
+
return Object.assign(Object.assign({}, change), { muts: rewriteValues
|
|
3069
|
+
? change.muts.map((m) => (Object.assign(Object.assign({}, m), { keys: m.keys.slice(), values: m.values.slice() })))
|
|
3070
|
+
: change.muts.map((m) => (Object.assign(Object.assign({}, m), { keys: m.keys.slice() }))) });
|
|
3035
3071
|
}
|
|
3036
3072
|
|
|
3037
3073
|
//import {BisonWebStreamReader} from "dreambase-library/dist/typeson-simplified/BisonWebStreamReader";
|
|
3038
|
-
|
|
3039
|
-
|
|
3040
|
-
|
|
3041
|
-
|
|
3042
|
-
|
|
3043
|
-
|
|
3044
|
-
|
|
3045
|
-
|
|
3046
|
-
|
|
3047
|
-
|
|
3048
|
-
|
|
3049
|
-
|
|
3050
|
-
const syncRequest = {
|
|
3051
|
-
v: 2,
|
|
3052
|
-
dbID: syncState?.remoteDbId,
|
|
3053
|
-
clientIdentity,
|
|
3054
|
-
schema: schema || {},
|
|
3055
|
-
lastPull: syncState ? {
|
|
3056
|
-
serverRevision: syncState.serverRevision,
|
|
3057
|
-
realms: syncState.realms,
|
|
3058
|
-
inviteRealms: syncState.inviteRealms
|
|
3059
|
-
} : undefined,
|
|
3060
|
-
baseRevs,
|
|
3061
|
-
changes: encodeIdsForServer(db.dx.core.schema, currentUser, changes)
|
|
3062
|
-
};
|
|
3063
|
-
console.debug("Sync request", syncRequest);
|
|
3064
|
-
db.syncStateChangedEvent.next({
|
|
3065
|
-
phase: 'pushing',
|
|
3066
|
-
});
|
|
3067
|
-
const res = await fetch(`${databaseUrl}/sync`, {
|
|
3068
|
-
method: 'post',
|
|
3069
|
-
headers,
|
|
3070
|
-
body: TSON.stringify(syncRequest)
|
|
3071
|
-
});
|
|
3072
|
-
//const contentLength = Number(res.headers.get('content-length'));
|
|
3073
|
-
db.syncStateChangedEvent.next({
|
|
3074
|
-
phase: 'pulling'
|
|
3075
|
-
});
|
|
3076
|
-
if (!res.ok) {
|
|
3077
|
-
throw new HttpError(res);
|
|
3078
|
-
}
|
|
3079
|
-
switch (res.headers.get('content-type')) {
|
|
3080
|
-
case 'application/x-bison':
|
|
3081
|
-
return BISON.fromBinary(await res.blob());
|
|
3082
|
-
case 'application/x-bison-stream': //return BisonWebStreamReader(BISON, res);
|
|
3083
|
-
default:
|
|
3084
|
-
case 'application/json': {
|
|
3085
|
-
const text = await res.text();
|
|
3086
|
-
const syncRes = TSON.parse(text);
|
|
3087
|
-
return syncRes;
|
|
3074
|
+
function syncWithServer(changes, syncState, baseRevs, db, databaseUrl, schema, clientIdentity, currentUser) {
|
|
3075
|
+
return __awaiter$1(this, void 0, void 0, function* () {
|
|
3076
|
+
//
|
|
3077
|
+
// Push changes to server using fetch
|
|
3078
|
+
//
|
|
3079
|
+
const headers = {
|
|
3080
|
+
Accept: 'application/json, application/x-bison, application/x-bison-stream',
|
|
3081
|
+
'Content-Type': 'application/tson'
|
|
3082
|
+
};
|
|
3083
|
+
const accessToken = yield loadAccessToken(db);
|
|
3084
|
+
if (accessToken) {
|
|
3085
|
+
headers.Authorization = `Bearer ${accessToken}`;
|
|
3088
3086
|
}
|
|
3089
|
-
|
|
3090
|
-
|
|
3091
|
-
|
|
3092
|
-
|
|
3093
|
-
|
|
3094
|
-
|
|
3095
|
-
|
|
3096
|
-
|
|
3097
|
-
|
|
3098
|
-
|
|
3099
|
-
|
|
3100
|
-
|
|
3101
|
-
|
|
3087
|
+
const syncRequest = {
|
|
3088
|
+
v: 2,
|
|
3089
|
+
dbID: syncState === null || syncState === void 0 ? void 0 : syncState.remoteDbId,
|
|
3090
|
+
clientIdentity,
|
|
3091
|
+
schema: schema || {},
|
|
3092
|
+
lastPull: syncState ? {
|
|
3093
|
+
serverRevision: syncState.serverRevision,
|
|
3094
|
+
realms: syncState.realms,
|
|
3095
|
+
inviteRealms: syncState.inviteRealms
|
|
3096
|
+
} : undefined,
|
|
3097
|
+
baseRevs,
|
|
3098
|
+
changes: encodeIdsForServer(db.dx.core.schema, currentUser, changes)
|
|
3099
|
+
};
|
|
3100
|
+
console.debug("Sync request", syncRequest);
|
|
3101
|
+
db.syncStateChangedEvent.next({
|
|
3102
|
+
phase: 'pushing',
|
|
3103
|
+
});
|
|
3104
|
+
const res = yield fetch(`${databaseUrl}/sync`, {
|
|
3105
|
+
method: 'post',
|
|
3106
|
+
headers,
|
|
3107
|
+
body: TSON.stringify(syncRequest)
|
|
3108
|
+
});
|
|
3109
|
+
//const contentLength = Number(res.headers.get('content-length'));
|
|
3110
|
+
db.syncStateChangedEvent.next({
|
|
3111
|
+
phase: 'pulling'
|
|
3112
|
+
});
|
|
3113
|
+
if (!res.ok) {
|
|
3114
|
+
throw new HttpError(res);
|
|
3102
3115
|
}
|
|
3103
|
-
|
|
3104
|
-
|
|
3105
|
-
|
|
3106
|
-
|
|
3107
|
-
|
|
3108
|
-
|
|
3109
|
-
|
|
3110
|
-
|
|
3116
|
+
switch (res.headers.get('content-type')) {
|
|
3117
|
+
case 'application/x-bison':
|
|
3118
|
+
return BISON.fromBinary(yield res.blob());
|
|
3119
|
+
case 'application/x-bison-stream': //return BisonWebStreamReader(BISON, res);
|
|
3120
|
+
default:
|
|
3121
|
+
case 'application/json': {
|
|
3122
|
+
const text = yield res.text();
|
|
3123
|
+
const syncRes = TSON.parse(text);
|
|
3124
|
+
return syncRes;
|
|
3125
|
+
}
|
|
3111
3126
|
}
|
|
3112
|
-
|
|
3113
|
-
|
|
3114
|
-
|
|
3115
|
-
|
|
3116
|
-
|
|
3117
|
-
|
|
3118
|
-
|
|
3119
|
-
|
|
3127
|
+
});
|
|
3128
|
+
}
|
|
3129
|
+
|
|
3130
|
+
function modifyLocalObjectsWithNewUserId(syncifiedTables, currentUser, alreadySyncedRealms) {
|
|
3131
|
+
return __awaiter$1(this, void 0, void 0, function* () {
|
|
3132
|
+
const ignoredRealms = new Set(alreadySyncedRealms || []);
|
|
3133
|
+
for (const table of syncifiedTables) {
|
|
3134
|
+
if (table.name === "members") {
|
|
3135
|
+
// members
|
|
3136
|
+
yield table.toCollection().modify((member) => {
|
|
3137
|
+
if (!ignoredRealms.has(member.realmId) && (!member.userId || member.userId === UNAUTHORIZED_USER.userId)) {
|
|
3138
|
+
member.userId = currentUser.userId;
|
|
3120
3139
|
}
|
|
3121
|
-
}
|
|
3122
|
-
}
|
|
3140
|
+
});
|
|
3141
|
+
}
|
|
3142
|
+
else if (table.name === "roles") ;
|
|
3143
|
+
else if (table.name === "realms") {
|
|
3144
|
+
// realms
|
|
3145
|
+
yield table.toCollection().modify((realm) => {
|
|
3146
|
+
if (!ignoredRealms.has(realm.realmId) && (realm.owner === undefined || realm.owner === UNAUTHORIZED_USER.userId)) {
|
|
3147
|
+
realm.owner = currentUser.userId;
|
|
3148
|
+
}
|
|
3149
|
+
});
|
|
3150
|
+
}
|
|
3151
|
+
else {
|
|
3152
|
+
// application entities
|
|
3153
|
+
yield table.toCollection().modify((obj) => {
|
|
3154
|
+
if (!obj.realmId || !ignoredRealms.has(obj.realmId)) {
|
|
3155
|
+
if (!obj.owner || obj.owner === UNAUTHORIZED_USER.userId)
|
|
3156
|
+
obj.owner = currentUser.userId;
|
|
3157
|
+
if (!obj.realmId || obj.realmId === UNAUTHORIZED_USER.userId) {
|
|
3158
|
+
obj.realmId = currentUser.userId;
|
|
3159
|
+
}
|
|
3160
|
+
}
|
|
3161
|
+
});
|
|
3162
|
+
}
|
|
3123
3163
|
}
|
|
3124
|
-
}
|
|
3164
|
+
});
|
|
3125
3165
|
}
|
|
3126
3166
|
|
|
3127
3167
|
function throwIfCancelled(cancelToken) {
|
|
3128
|
-
if (cancelToken
|
|
3168
|
+
if (cancelToken === null || cancelToken === void 0 ? void 0 : cancelToken.cancelled)
|
|
3129
3169
|
throw new Dexie.AbortError(`Operation was cancelled`);
|
|
3130
3170
|
}
|
|
3131
3171
|
|
|
@@ -3137,17 +3177,19 @@ let isOnline = navigator.onLine;
|
|
|
3137
3177
|
self.addEventListener('online', () => isOnline = true);
|
|
3138
3178
|
self.addEventListener('offline', () => isOnline = false);
|
|
3139
3179
|
|
|
3140
|
-
|
|
3141
|
-
|
|
3142
|
-
.
|
|
3143
|
-
|
|
3144
|
-
|
|
3145
|
-
|
|
3146
|
-
|
|
3147
|
-
|
|
3148
|
-
|
|
3149
|
-
|
|
3150
|
-
|
|
3180
|
+
function updateBaseRevs(db, schema, latestRevisions, serverRev) {
|
|
3181
|
+
return __awaiter$1(this, void 0, void 0, function* () {
|
|
3182
|
+
yield db.$baseRevs.bulkPut(Object.keys(schema)
|
|
3183
|
+
.filter((table) => schema[table].markedForSync)
|
|
3184
|
+
.map((tableName) => {
|
|
3185
|
+
const lastClientRevOnPreviousServerRev = latestRevisions[tableName] || 0;
|
|
3186
|
+
return {
|
|
3187
|
+
tableName,
|
|
3188
|
+
clientRev: lastClientRevOnPreviousServerRev + 1,
|
|
3189
|
+
serverRev,
|
|
3190
|
+
};
|
|
3191
|
+
}));
|
|
3192
|
+
});
|
|
3151
3193
|
}
|
|
3152
3194
|
|
|
3153
3195
|
function getLatestRevisionsPerTable(clientChangeSet, lastRevisions = {}) {
|
|
@@ -3158,119 +3200,123 @@ function getLatestRevisionsPerTable(clientChangeSet, lastRevisions = {}) {
|
|
|
3158
3200
|
return lastRevisions;
|
|
3159
3201
|
}
|
|
3160
3202
|
|
|
3161
|
-
|
|
3162
|
-
|
|
3163
|
-
|
|
3164
|
-
|
|
3165
|
-
|
|
3166
|
-
|
|
3167
|
-
|
|
3168
|
-
|
|
3169
|
-
|
|
3170
|
-
if (
|
|
3171
|
-
|
|
3172
|
-
|
|
3173
|
-
}
|
|
3174
|
-
else {
|
|
3175
|
-
Dexie.setByKeyPath(obj, keyPath, value);
|
|
3176
|
-
}
|
|
3177
|
-
}
|
|
3178
|
-
resultKeys.push(key);
|
|
3179
|
-
resultObjs.push(obj);
|
|
3180
|
-
}
|
|
3181
|
-
});
|
|
3182
|
-
await (table.schema.primKey.keyPath == null
|
|
3183
|
-
? table.bulkPut(resultObjs, resultKeys)
|
|
3184
|
-
: table.bulkPut(resultObjs));
|
|
3185
|
-
}
|
|
3186
|
-
|
|
3187
|
-
async function applyServerChanges(changes, db) {
|
|
3188
|
-
console.debug('Applying server changes', changes, Dexie.currentTransaction);
|
|
3189
|
-
for (const { table: tableName, muts } of changes) {
|
|
3190
|
-
const table = db.table(tableName);
|
|
3191
|
-
if (!table)
|
|
3192
|
-
continue; // If server sends changes on a table we don't have, ignore it.
|
|
3193
|
-
const { primaryKey } = table.core.schema;
|
|
3194
|
-
const keyDecoder = (key) => {
|
|
3195
|
-
switch (key[0]) {
|
|
3196
|
-
case '[':
|
|
3197
|
-
// Decode JSON array
|
|
3198
|
-
if (key.endsWith(']'))
|
|
3199
|
-
try {
|
|
3200
|
-
// On server, array keys are transformed to JSON string representation
|
|
3201
|
-
return JSON.parse(key);
|
|
3203
|
+
function bulkUpdate(table, keys, changeSpecs) {
|
|
3204
|
+
return __awaiter$1(this, void 0, void 0, function* () {
|
|
3205
|
+
const objs = yield table.bulkGet(keys);
|
|
3206
|
+
const resultKeys = [];
|
|
3207
|
+
const resultObjs = [];
|
|
3208
|
+
keys.forEach((key, idx) => {
|
|
3209
|
+
const obj = objs[idx];
|
|
3210
|
+
if (obj) {
|
|
3211
|
+
for (const [keyPath, value] of Object.entries(changeSpecs[idx])) {
|
|
3212
|
+
if (keyPath === table.schema.primKey.keyPath) {
|
|
3213
|
+
if (cmp(value, key) !== 0) {
|
|
3214
|
+
throw new Error(`Cannot change primary key`);
|
|
3202
3215
|
}
|
|
3203
|
-
catch { }
|
|
3204
|
-
return key;
|
|
3205
|
-
case '#':
|
|
3206
|
-
// Decode private ID (do the opposite from what's done in encodeIdsForServer())
|
|
3207
|
-
if (key.endsWith(':' + db.cloud.currentUserId)) {
|
|
3208
|
-
return key.substr(0, key.length - db.cloud.currentUserId.length - 1);
|
|
3209
|
-
}
|
|
3210
|
-
return key;
|
|
3211
|
-
default:
|
|
3212
|
-
return key;
|
|
3213
|
-
}
|
|
3214
|
-
};
|
|
3215
|
-
for (const mut of muts) {
|
|
3216
|
-
const keys = mut.keys.map(keyDecoder);
|
|
3217
|
-
switch (mut.type) {
|
|
3218
|
-
case 'insert':
|
|
3219
|
-
if (primaryKey.outbound) {
|
|
3220
|
-
await table.bulkAdd(mut.values, keys);
|
|
3221
|
-
}
|
|
3222
|
-
else {
|
|
3223
|
-
keys.forEach((key, i) => {
|
|
3224
|
-
// Make sure inbound keys are consistent
|
|
3225
|
-
Dexie.setByKeyPath(mut.values[i], primaryKey.keyPath, key);
|
|
3226
|
-
});
|
|
3227
|
-
await table.bulkAdd(mut.values);
|
|
3228
|
-
}
|
|
3229
|
-
break;
|
|
3230
|
-
case 'upsert':
|
|
3231
|
-
if (primaryKey.outbound) {
|
|
3232
|
-
await table.bulkPut(mut.values, keys);
|
|
3233
|
-
}
|
|
3234
|
-
else {
|
|
3235
|
-
keys.forEach((key, i) => {
|
|
3236
|
-
// Make sure inbound keys are consistent
|
|
3237
|
-
Dexie.setByKeyPath(mut.values[i], primaryKey.keyPath, key);
|
|
3238
|
-
});
|
|
3239
|
-
await table.bulkPut(mut.values);
|
|
3240
|
-
}
|
|
3241
|
-
break;
|
|
3242
|
-
case 'modify':
|
|
3243
|
-
if (keys.length === 1) {
|
|
3244
|
-
await table.update(keys[0], mut.changeSpec);
|
|
3245
3216
|
}
|
|
3246
3217
|
else {
|
|
3247
|
-
|
|
3218
|
+
Dexie.setByKeyPath(obj, keyPath, value);
|
|
3248
3219
|
}
|
|
3249
|
-
|
|
3250
|
-
|
|
3251
|
-
|
|
3252
|
-
break;
|
|
3253
|
-
case 'delete':
|
|
3254
|
-
await table.bulkDelete(keys);
|
|
3255
|
-
break;
|
|
3220
|
+
}
|
|
3221
|
+
resultKeys.push(key);
|
|
3222
|
+
resultObjs.push(obj);
|
|
3256
3223
|
}
|
|
3257
|
-
}
|
|
3258
|
-
|
|
3224
|
+
});
|
|
3225
|
+
yield (table.schema.primKey.keyPath == null
|
|
3226
|
+
? table.bulkPut(resultObjs, resultKeys)
|
|
3227
|
+
: table.bulkPut(resultObjs));
|
|
3228
|
+
});
|
|
3259
3229
|
}
|
|
3260
3230
|
|
|
3261
|
-
|
|
3262
|
-
|
|
3263
|
-
|
|
3264
|
-
|
|
3265
|
-
|
|
3266
|
-
|
|
3267
|
-
|
|
3231
|
+
function applyServerChanges(changes, db) {
|
|
3232
|
+
return __awaiter$1(this, void 0, void 0, function* () {
|
|
3233
|
+
console.debug('Applying server changes', changes, Dexie.currentTransaction);
|
|
3234
|
+
for (const { table: tableName, muts } of changes) {
|
|
3235
|
+
const table = db.table(tableName);
|
|
3236
|
+
if (!table)
|
|
3237
|
+
continue; // If server sends changes on a table we don't have, ignore it.
|
|
3238
|
+
const { primaryKey } = table.core.schema;
|
|
3239
|
+
const keyDecoder = (key) => {
|
|
3240
|
+
switch (key[0]) {
|
|
3241
|
+
case '[':
|
|
3242
|
+
// Decode JSON array
|
|
3243
|
+
if (key.endsWith(']'))
|
|
3244
|
+
try {
|
|
3245
|
+
// On server, array keys are transformed to JSON string representation
|
|
3246
|
+
return JSON.parse(key);
|
|
3247
|
+
}
|
|
3248
|
+
catch (_a) { }
|
|
3249
|
+
return key;
|
|
3250
|
+
case '#':
|
|
3251
|
+
// Decode private ID (do the opposite from what's done in encodeIdsForServer())
|
|
3252
|
+
if (key.endsWith(':' + db.cloud.currentUserId)) {
|
|
3253
|
+
return key.substr(0, key.length - db.cloud.currentUserId.length - 1);
|
|
3254
|
+
}
|
|
3255
|
+
return key;
|
|
3256
|
+
default:
|
|
3257
|
+
return key;
|
|
3258
|
+
}
|
|
3259
|
+
};
|
|
3260
|
+
for (const mut of muts) {
|
|
3261
|
+
const keys = mut.keys.map(keyDecoder);
|
|
3262
|
+
switch (mut.type) {
|
|
3263
|
+
case 'insert':
|
|
3264
|
+
if (primaryKey.outbound) {
|
|
3265
|
+
yield table.bulkAdd(mut.values, keys);
|
|
3266
|
+
}
|
|
3267
|
+
else {
|
|
3268
|
+
keys.forEach((key, i) => {
|
|
3269
|
+
// Make sure inbound keys are consistent
|
|
3270
|
+
Dexie.setByKeyPath(mut.values[i], primaryKey.keyPath, key);
|
|
3271
|
+
});
|
|
3272
|
+
yield table.bulkAdd(mut.values);
|
|
3273
|
+
}
|
|
3274
|
+
break;
|
|
3275
|
+
case 'upsert':
|
|
3276
|
+
if (primaryKey.outbound) {
|
|
3277
|
+
yield table.bulkPut(mut.values, keys);
|
|
3278
|
+
}
|
|
3279
|
+
else {
|
|
3280
|
+
keys.forEach((key, i) => {
|
|
3281
|
+
// Make sure inbound keys are consistent
|
|
3282
|
+
Dexie.setByKeyPath(mut.values[i], primaryKey.keyPath, key);
|
|
3283
|
+
});
|
|
3284
|
+
yield table.bulkPut(mut.values);
|
|
3285
|
+
}
|
|
3286
|
+
break;
|
|
3287
|
+
case 'modify':
|
|
3288
|
+
if (keys.length === 1) {
|
|
3289
|
+
yield table.update(keys[0], mut.changeSpec);
|
|
3290
|
+
}
|
|
3291
|
+
else {
|
|
3292
|
+
yield table.where(':id').anyOf(keys).modify(mut.changeSpec);
|
|
3293
|
+
}
|
|
3294
|
+
break;
|
|
3295
|
+
case 'update':
|
|
3296
|
+
yield bulkUpdate(table, keys, mut.changeSpecs);
|
|
3297
|
+
break;
|
|
3298
|
+
case 'delete':
|
|
3299
|
+
yield table.bulkDelete(keys);
|
|
3300
|
+
break;
|
|
3301
|
+
}
|
|
3302
|
+
}
|
|
3303
|
+
}
|
|
3304
|
+
});
|
|
3305
|
+
}
|
|
3306
|
+
|
|
3307
|
+
const CURRENT_SYNC_WORKER = 'currentSyncWorker';
|
|
3308
|
+
function sync(db, options, schema, syncOptions) {
|
|
3309
|
+
return _sync
|
|
3310
|
+
.apply(this, arguments)
|
|
3311
|
+
.then(() => {
|
|
3312
|
+
if (!(syncOptions === null || syncOptions === void 0 ? void 0 : syncOptions.justCheckIfNeeded)) {
|
|
3313
|
+
db.syncStateChangedEvent.next({
|
|
3268
3314
|
phase: 'in-sync',
|
|
3269
3315
|
});
|
|
3270
3316
|
}
|
|
3271
3317
|
})
|
|
3272
|
-
.catch(
|
|
3273
|
-
if (syncOptions
|
|
3318
|
+
.catch((error) => __awaiter$1(this, void 0, void 0, function* () {
|
|
3319
|
+
if (syncOptions === null || syncOptions === void 0 ? void 0 : syncOptions.justCheckIfNeeded)
|
|
3274
3320
|
return Promise.reject(error); // Just rethrow.
|
|
3275
3321
|
console.debug('Error from _sync', {
|
|
3276
3322
|
isOnline,
|
|
@@ -3278,23 +3324,20 @@ function sync(db, options, schema, syncOptions) {
|
|
|
3278
3324
|
error,
|
|
3279
3325
|
});
|
|
3280
3326
|
if (isOnline &&
|
|
3281
|
-
syncOptions
|
|
3282
|
-
error
|
|
3283
|
-
/fetch/.test(error
|
|
3327
|
+
(syncOptions === null || syncOptions === void 0 ? void 0 : syncOptions.retryImmediatelyOnFetchError) &&
|
|
3328
|
+
(error === null || error === void 0 ? void 0 : error.name) === 'TypeError' &&
|
|
3329
|
+
/fetch/.test(error === null || error === void 0 ? void 0 : error.message)) {
|
|
3284
3330
|
db.syncStateChangedEvent.next({
|
|
3285
3331
|
phase: 'error',
|
|
3286
3332
|
error,
|
|
3287
3333
|
});
|
|
3288
3334
|
// Retry again in 500 ms but if it fails again, don't retry.
|
|
3289
|
-
|
|
3290
|
-
return
|
|
3291
|
-
...syncOptions,
|
|
3292
|
-
retryImmediatelyOnFetchError: false,
|
|
3293
|
-
});
|
|
3335
|
+
yield new Promise((resolve) => setTimeout(resolve, 500));
|
|
3336
|
+
return yield sync(db, options, schema, Object.assign(Object.assign({}, syncOptions), { retryImmediatelyOnFetchError: false }));
|
|
3294
3337
|
}
|
|
3295
3338
|
// Make sure that no matter whether sync() explodes or not,
|
|
3296
3339
|
// always update the timestamp. Also store the error.
|
|
3297
|
-
|
|
3340
|
+
yield db.$syncState.update('syncState', {
|
|
3298
3341
|
timestamp: new Date(),
|
|
3299
3342
|
error: '' + error,
|
|
3300
3343
|
});
|
|
@@ -3303,234 +3346,239 @@ function sync(db, options, schema, syncOptions) {
|
|
|
3303
3346
|
error,
|
|
3304
3347
|
});
|
|
3305
3348
|
return Promise.reject(error);
|
|
3306
|
-
});
|
|
3349
|
+
}));
|
|
3307
3350
|
}
|
|
3308
|
-
|
|
3351
|
+
function _sync(db, options, schema, { isInitialSync, cancelToken, justCheckIfNeeded, purpose } = {
|
|
3309
3352
|
isInitialSync: false,
|
|
3310
3353
|
}) {
|
|
3311
|
-
|
|
3312
|
-
|
|
3313
|
-
|
|
3314
|
-
|
|
3315
|
-
|
|
3316
|
-
|
|
3317
|
-
|
|
3318
|
-
|
|
3319
|
-
|
|
3320
|
-
|
|
3321
|
-
|
|
3322
|
-
|
|
3323
|
-
|
|
3324
|
-
|
|
3325
|
-
|
|
3326
|
-
|
|
3327
|
-
|
|
3328
|
-
|
|
3329
|
-
|
|
3330
|
-
|
|
3331
|
-
|
|
3332
|
-
if (doSyncify) {
|
|
3333
|
-
if (justCheckIfNeeded)
|
|
3334
|
-
return true;
|
|
3335
|
-
//console.debug('sync doSyncify is true');
|
|
3336
|
-
await db.transaction('rw', tablesToSyncify, async (tx) => {
|
|
3337
|
-
// @ts-ignore
|
|
3338
|
-
tx.idbtrans.disableChangeTracking = true;
|
|
3339
|
-
// @ts-ignore
|
|
3340
|
-
tx.idbtrans.disableAccessControl = true; // TODO: Take care of this flag in access control middleware!
|
|
3341
|
-
await modifyLocalObjectsWithNewUserId(tablesToSyncify, currentUser, persistedSyncState?.realms);
|
|
3342
|
-
});
|
|
3343
|
-
throwIfCancelled(cancelToken);
|
|
3344
|
-
}
|
|
3345
|
-
//
|
|
3346
|
-
// List changes to sync
|
|
3347
|
-
//
|
|
3348
|
-
const [clientChangeSet, syncState, baseRevs] = await db.transaction('r', db.tables, async () => {
|
|
3349
|
-
const syncState = await db.getPersistedSyncState();
|
|
3350
|
-
const baseRevs = await db.$baseRevs.toArray();
|
|
3351
|
-
let clientChanges = await listClientChanges(mutationTables);
|
|
3354
|
+
var _a;
|
|
3355
|
+
return __awaiter$1(this, void 0, void 0, function* () {
|
|
3356
|
+
if (!justCheckIfNeeded) {
|
|
3357
|
+
console.debug('SYNC STARTED', { isInitialSync, purpose });
|
|
3358
|
+
}
|
|
3359
|
+
if (!((_a = db.cloud.options) === null || _a === void 0 ? void 0 : _a.databaseUrl))
|
|
3360
|
+
throw new Error(`Internal error: sync must not be called when no databaseUrl is configured`);
|
|
3361
|
+
const { databaseUrl } = options;
|
|
3362
|
+
const currentUser = yield db.getCurrentUser(); // Keep same value across entire sync flow:
|
|
3363
|
+
const tablesToSync = currentUser.isLoggedIn ? getSyncableTables(db) : [];
|
|
3364
|
+
const mutationTables = tablesToSync.map((tbl) => db.table(getMutationTable(tbl.name)));
|
|
3365
|
+
// If this is not the initial sync,
|
|
3366
|
+
// go through tables that were previously not synced but should now be according to
|
|
3367
|
+
// logged in state and the sync table whitelist in db.cloud.options.
|
|
3368
|
+
//
|
|
3369
|
+
// Prepare for syncification by modifying locally unauthorized objects:
|
|
3370
|
+
//
|
|
3371
|
+
const persistedSyncState = yield db.getPersistedSyncState();
|
|
3372
|
+
const tablesToSyncify = !isInitialSync && currentUser.isLoggedIn
|
|
3373
|
+
? getTablesToSyncify(db, persistedSyncState)
|
|
3374
|
+
: [];
|
|
3352
3375
|
throwIfCancelled(cancelToken);
|
|
3376
|
+
const doSyncify = tablesToSyncify.length > 0;
|
|
3353
3377
|
if (doSyncify) {
|
|
3354
|
-
|
|
3355
|
-
|
|
3356
|
-
|
|
3357
|
-
|
|
3358
|
-
|
|
3378
|
+
if (justCheckIfNeeded)
|
|
3379
|
+
return true;
|
|
3380
|
+
//console.debug('sync doSyncify is true');
|
|
3381
|
+
yield db.transaction('rw', tablesToSyncify, (tx) => __awaiter$1(this, void 0, void 0, function* () {
|
|
3382
|
+
// @ts-ignore
|
|
3383
|
+
tx.idbtrans.disableChangeTracking = true;
|
|
3384
|
+
// @ts-ignore
|
|
3385
|
+
tx.idbtrans.disableAccessControl = true; // TODO: Take care of this flag in access control middleware!
|
|
3386
|
+
yield modifyLocalObjectsWithNewUserId(tablesToSyncify, currentUser, persistedSyncState === null || persistedSyncState === void 0 ? void 0 : persistedSyncState.realms);
|
|
3387
|
+
}));
|
|
3359
3388
|
throwIfCancelled(cancelToken);
|
|
3360
|
-
clientChanges = clientChanges.concat(syncificationInserts);
|
|
3361
|
-
return [clientChanges, syncState, baseRevs];
|
|
3362
3389
|
}
|
|
3363
|
-
return [clientChanges, syncState, baseRevs];
|
|
3364
|
-
});
|
|
3365
|
-
const syncIsNeeded = clientChangeSet.some((set) => set.muts.some((mut) => mut.keys.length > 0));
|
|
3366
|
-
if (justCheckIfNeeded) {
|
|
3367
|
-
console.debug('Sync is needed:', syncIsNeeded);
|
|
3368
|
-
return syncIsNeeded;
|
|
3369
|
-
}
|
|
3370
|
-
if (purpose === 'push' && !syncIsNeeded) {
|
|
3371
|
-
// The purpose of this request was to push changes
|
|
3372
|
-
return false;
|
|
3373
|
-
}
|
|
3374
|
-
const latestRevisions = getLatestRevisionsPerTable(clientChangeSet, syncState?.latestRevisions);
|
|
3375
|
-
const clientIdentity = syncState?.clientIdentity || randomString(16);
|
|
3376
|
-
//
|
|
3377
|
-
// Push changes to server
|
|
3378
|
-
//
|
|
3379
|
-
throwIfCancelled(cancelToken);
|
|
3380
|
-
const res = await syncWithServer(clientChangeSet, syncState, baseRevs, db, databaseUrl, schema, clientIdentity, currentUser);
|
|
3381
|
-
console.debug('Sync response', res);
|
|
3382
|
-
//
|
|
3383
|
-
// Apply changes locally and clear old change entries:
|
|
3384
|
-
//
|
|
3385
|
-
const done = await db.transaction('rw', db.tables, async (tx) => {
|
|
3386
|
-
// @ts-ignore
|
|
3387
|
-
tx.idbtrans.disableChangeTracking = true;
|
|
3388
|
-
// @ts-ignore
|
|
3389
|
-
tx.idbtrans.disableAccessControl = true; // TODO: Take care of this flag in access control middleware!
|
|
3390
|
-
// Update db.cloud.schema from server response.
|
|
3391
|
-
// Local schema MAY include a subset of tables, so do not force all tables into local schema.
|
|
3392
|
-
for (const tableName of Object.keys(schema)) {
|
|
3393
|
-
if (res.schema[tableName]) {
|
|
3394
|
-
// Write directly into configured schema. This code can only be executed alone.
|
|
3395
|
-
schema[tableName] = res.schema[tableName];
|
|
3396
|
-
}
|
|
3397
|
-
}
|
|
3398
|
-
await db.$syncState.put(schema, 'schema');
|
|
3399
|
-
// List mutations that happened during our exchange with the server:
|
|
3400
|
-
const addedClientChanges = await listClientChanges(mutationTables, db, {
|
|
3401
|
-
since: latestRevisions,
|
|
3402
|
-
});
|
|
3403
3390
|
//
|
|
3404
|
-
//
|
|
3405
|
-
// (but keep changes that haven't reached server yet)
|
|
3391
|
+
// List changes to sync
|
|
3406
3392
|
//
|
|
3407
|
-
|
|
3408
|
-
const
|
|
3409
|
-
|
|
3410
|
-
|
|
3411
|
-
|
|
3412
|
-
|
|
3413
|
-
|
|
3414
|
-
|
|
3415
|
-
|
|
3416
|
-
|
|
3417
|
-
|
|
3418
|
-
|
|
3419
|
-
|
|
3420
|
-
|
|
3421
|
-
await Promise.all([
|
|
3422
|
-
mutTable.where('rev').belowOrEqual(latestRev).delete(),
|
|
3423
|
-
db.$baseRevs
|
|
3424
|
-
.where(':id')
|
|
3425
|
-
.between([tableName, -Infinity], [tableName, latestRev + 1], true, true)
|
|
3426
|
-
.reverse()
|
|
3427
|
-
.offset(1) // Keep one entry (the one mapping muts that came during fetch --> previous server revision)
|
|
3428
|
-
.delete(),
|
|
3429
|
-
]);
|
|
3393
|
+
const [clientChangeSet, syncState, baseRevs] = yield db.transaction('r', db.tables, () => __awaiter$1(this, void 0, void 0, function* () {
|
|
3394
|
+
const syncState = yield db.getPersistedSyncState();
|
|
3395
|
+
const baseRevs = yield db.$baseRevs.toArray();
|
|
3396
|
+
let clientChanges = yield listClientChanges(mutationTables);
|
|
3397
|
+
throwIfCancelled(cancelToken);
|
|
3398
|
+
if (doSyncify) {
|
|
3399
|
+
const alreadySyncedRealms = [
|
|
3400
|
+
...((persistedSyncState === null || persistedSyncState === void 0 ? void 0 : persistedSyncState.realms) || []),
|
|
3401
|
+
...((persistedSyncState === null || persistedSyncState === void 0 ? void 0 : persistedSyncState.inviteRealms) || []),
|
|
3402
|
+
];
|
|
3403
|
+
const syncificationInserts = yield listSyncifiedChanges(tablesToSyncify, currentUser, schema, alreadySyncedRealms);
|
|
3404
|
+
throwIfCancelled(cancelToken);
|
|
3405
|
+
clientChanges = clientChanges.concat(syncificationInserts);
|
|
3406
|
+
return [clientChanges, syncState, baseRevs];
|
|
3430
3407
|
}
|
|
3431
|
-
|
|
3432
|
-
}
|
|
3433
|
-
|
|
3434
|
-
|
|
3435
|
-
|
|
3436
|
-
|
|
3437
|
-
|
|
3438
|
-
|
|
3439
|
-
|
|
3440
|
-
|
|
3441
|
-
|
|
3442
|
-
|
|
3443
|
-
|
|
3444
|
-
const syncState = await db.getPersistedSyncState();
|
|
3445
|
-
//
|
|
3446
|
-
// Delete objects from removed realms
|
|
3447
|
-
//
|
|
3448
|
-
await deleteObjectsFromRemovedRealms(db, res, syncState);
|
|
3449
|
-
//
|
|
3450
|
-
// Update syncState
|
|
3451
|
-
//
|
|
3452
|
-
const newSyncState = syncState || {
|
|
3453
|
-
syncedTables: [],
|
|
3454
|
-
latestRevisions: {},
|
|
3455
|
-
realms: [],
|
|
3456
|
-
inviteRealms: [],
|
|
3457
|
-
clientIdentity,
|
|
3458
|
-
};
|
|
3459
|
-
newSyncState.syncedTables = tablesToSync
|
|
3460
|
-
.map((tbl) => tbl.name)
|
|
3461
|
-
.concat(tablesToSyncify.map((tbl) => tbl.name));
|
|
3462
|
-
newSyncState.latestRevisions = latestRevisions;
|
|
3463
|
-
newSyncState.remoteDbId = res.dbId;
|
|
3464
|
-
newSyncState.initiallySynced = true;
|
|
3465
|
-
newSyncState.realms = res.realms;
|
|
3466
|
-
newSyncState.inviteRealms = res.inviteRealms;
|
|
3467
|
-
newSyncState.serverRevision = res.serverRevision;
|
|
3468
|
-
newSyncState.timestamp = new Date();
|
|
3469
|
-
delete newSyncState.error;
|
|
3470
|
-
const filteredChanges = filterServerChangesThroughAddedClientChanges(res.changes, addedClientChanges);
|
|
3408
|
+
return [clientChanges, syncState, baseRevs];
|
|
3409
|
+
}));
|
|
3410
|
+
const syncIsNeeded = clientChangeSet.some((set) => set.muts.some((mut) => mut.keys.length > 0));
|
|
3411
|
+
if (justCheckIfNeeded) {
|
|
3412
|
+
console.debug('Sync is needed:', syncIsNeeded);
|
|
3413
|
+
return syncIsNeeded;
|
|
3414
|
+
}
|
|
3415
|
+
if (purpose === 'push' && !syncIsNeeded) {
|
|
3416
|
+
// The purpose of this request was to push changes
|
|
3417
|
+
return false;
|
|
3418
|
+
}
|
|
3419
|
+
const latestRevisions = getLatestRevisionsPerTable(clientChangeSet, syncState === null || syncState === void 0 ? void 0 : syncState.latestRevisions);
|
|
3420
|
+
const clientIdentity = (syncState === null || syncState === void 0 ? void 0 : syncState.clientIdentity) || randomString(16);
|
|
3471
3421
|
//
|
|
3472
|
-
//
|
|
3422
|
+
// Push changes to server
|
|
3473
3423
|
//
|
|
3474
|
-
|
|
3424
|
+
throwIfCancelled(cancelToken);
|
|
3425
|
+
const res = yield syncWithServer(clientChangeSet, syncState, baseRevs, db, databaseUrl, schema, clientIdentity, currentUser);
|
|
3426
|
+
console.debug('Sync response', res);
|
|
3475
3427
|
//
|
|
3476
|
-
//
|
|
3428
|
+
// Apply changes locally and clear old change entries:
|
|
3477
3429
|
//
|
|
3478
|
-
db
|
|
3479
|
-
|
|
3430
|
+
const done = yield db.transaction('rw', db.tables, (tx) => __awaiter$1(this, void 0, void 0, function* () {
|
|
3431
|
+
// @ts-ignore
|
|
3432
|
+
tx.idbtrans.disableChangeTracking = true;
|
|
3433
|
+
// @ts-ignore
|
|
3434
|
+
tx.idbtrans.disableAccessControl = true; // TODO: Take care of this flag in access control middleware!
|
|
3435
|
+
// Update db.cloud.schema from server response.
|
|
3436
|
+
// Local schema MAY include a subset of tables, so do not force all tables into local schema.
|
|
3437
|
+
for (const tableName of Object.keys(schema)) {
|
|
3438
|
+
if (res.schema[tableName]) {
|
|
3439
|
+
// Write directly into configured schema. This code can only be executed alone.
|
|
3440
|
+
schema[tableName] = res.schema[tableName];
|
|
3441
|
+
}
|
|
3442
|
+
}
|
|
3443
|
+
yield db.$syncState.put(schema, 'schema');
|
|
3444
|
+
// List mutations that happened during our exchange with the server:
|
|
3445
|
+
const addedClientChanges = yield listClientChanges(mutationTables, db, {
|
|
3446
|
+
since: latestRevisions,
|
|
3447
|
+
});
|
|
3448
|
+
//
|
|
3449
|
+
// Delete changes now as server has return success
|
|
3450
|
+
// (but keep changes that haven't reached server yet)
|
|
3451
|
+
//
|
|
3452
|
+
for (const mutTable of mutationTables) {
|
|
3453
|
+
const tableName = getTableFromMutationTable(mutTable.name);
|
|
3454
|
+
if (!addedClientChanges.some((ch) => ch.table === tableName && ch.muts.length > 0)) {
|
|
3455
|
+
// No added mutations for this table during the time we sent changes
|
|
3456
|
+
// to the server.
|
|
3457
|
+
// It is therefore safe to clear all changes (which is faster than
|
|
3458
|
+
// deleting a range)
|
|
3459
|
+
yield Promise.all([
|
|
3460
|
+
mutTable.clear(),
|
|
3461
|
+
db.$baseRevs.where({ tableName }).delete(),
|
|
3462
|
+
]);
|
|
3463
|
+
}
|
|
3464
|
+
else if (latestRevisions[tableName]) {
|
|
3465
|
+
const latestRev = latestRevisions[tableName] || 0;
|
|
3466
|
+
yield Promise.all([
|
|
3467
|
+
mutTable.where('rev').belowOrEqual(latestRev).delete(),
|
|
3468
|
+
db.$baseRevs
|
|
3469
|
+
.where(':id')
|
|
3470
|
+
.between([tableName, -Infinity], [tableName, latestRev + 1], true, true)
|
|
3471
|
+
.reverse()
|
|
3472
|
+
.offset(1) // Keep one entry (the one mapping muts that came during fetch --> previous server revision)
|
|
3473
|
+
.delete(),
|
|
3474
|
+
]);
|
|
3475
|
+
}
|
|
3476
|
+
else ;
|
|
3477
|
+
}
|
|
3478
|
+
// Update latestRevisions object according to additional changes:
|
|
3479
|
+
getLatestRevisionsPerTable(addedClientChanges, latestRevisions);
|
|
3480
|
+
// Update/add new entries into baseRevs map.
|
|
3481
|
+
// * On tables without mutations since last serverRevision,
|
|
3482
|
+
// this will update existing entry.
|
|
3483
|
+
// * On tables where mutations have been recorded since last
|
|
3484
|
+
// serverRevision, this will create a new entry.
|
|
3485
|
+
// The purpose of this operation is to mark a start revision (per table)
|
|
3486
|
+
// so that all client-mutations that come after this, will be mapped to current
|
|
3487
|
+
// server revision.
|
|
3488
|
+
yield updateBaseRevs(db, schema, latestRevisions, res.serverRevision);
|
|
3489
|
+
const syncState = yield db.getPersistedSyncState();
|
|
3490
|
+
//
|
|
3491
|
+
// Delete objects from removed realms
|
|
3492
|
+
//
|
|
3493
|
+
yield deleteObjectsFromRemovedRealms(db, res, syncState);
|
|
3494
|
+
//
|
|
3495
|
+
// Update syncState
|
|
3496
|
+
//
|
|
3497
|
+
const newSyncState = syncState || {
|
|
3498
|
+
syncedTables: [],
|
|
3499
|
+
latestRevisions: {},
|
|
3500
|
+
realms: [],
|
|
3501
|
+
inviteRealms: [],
|
|
3502
|
+
clientIdentity,
|
|
3503
|
+
};
|
|
3504
|
+
newSyncState.syncedTables = tablesToSync
|
|
3505
|
+
.map((tbl) => tbl.name)
|
|
3506
|
+
.concat(tablesToSyncify.map((tbl) => tbl.name));
|
|
3507
|
+
newSyncState.latestRevisions = latestRevisions;
|
|
3508
|
+
newSyncState.remoteDbId = res.dbId;
|
|
3509
|
+
newSyncState.initiallySynced = true;
|
|
3510
|
+
newSyncState.realms = res.realms;
|
|
3511
|
+
newSyncState.inviteRealms = res.inviteRealms;
|
|
3512
|
+
newSyncState.serverRevision = res.serverRevision;
|
|
3513
|
+
newSyncState.timestamp = new Date();
|
|
3514
|
+
delete newSyncState.error;
|
|
3515
|
+
const filteredChanges = filterServerChangesThroughAddedClientChanges(res.changes, addedClientChanges);
|
|
3516
|
+
//
|
|
3517
|
+
// apply server changes
|
|
3518
|
+
//
|
|
3519
|
+
yield applyServerChanges(filteredChanges, db);
|
|
3520
|
+
//
|
|
3521
|
+
// Update syncState
|
|
3522
|
+
//
|
|
3523
|
+
db.$syncState.put(newSyncState, 'syncState');
|
|
3524
|
+
return addedClientChanges.length === 0;
|
|
3525
|
+
}));
|
|
3526
|
+
if (!done) {
|
|
3527
|
+
console.debug('MORE SYNC NEEDED. Go for it again!');
|
|
3528
|
+
return yield _sync(db, options, schema, { isInitialSync, cancelToken });
|
|
3529
|
+
}
|
|
3530
|
+
console.debug('SYNC DONE', { isInitialSync });
|
|
3531
|
+
return false; // Not needed anymore
|
|
3480
3532
|
});
|
|
3481
|
-
|
|
3482
|
-
|
|
3483
|
-
|
|
3484
|
-
|
|
3485
|
-
|
|
3486
|
-
|
|
3487
|
-
|
|
3488
|
-
|
|
3489
|
-
|
|
3490
|
-
|
|
3491
|
-
|
|
3492
|
-
|
|
3493
|
-
|
|
3494
|
-
|
|
3495
|
-
|
|
3496
|
-
|
|
3497
|
-
|
|
3533
|
+
}
|
|
3534
|
+
function deleteObjectsFromRemovedRealms(db, res, prevState) {
|
|
3535
|
+
return __awaiter$1(this, void 0, void 0, function* () {
|
|
3536
|
+
const deletedRealms = new Set();
|
|
3537
|
+
const rejectedRealms = new Set();
|
|
3538
|
+
const previousRealmSet = prevState ? prevState.realms : [];
|
|
3539
|
+
const previousInviteRealmSet = prevState ? prevState.inviteRealms : [];
|
|
3540
|
+
const updatedRealmSet = new Set(res.realms);
|
|
3541
|
+
const updatedTotalRealmSet = new Set(res.realms.concat(res.inviteRealms));
|
|
3542
|
+
for (const realmId of previousRealmSet) {
|
|
3543
|
+
if (!updatedRealmSet.has(realmId)) {
|
|
3544
|
+
rejectedRealms.add(realmId);
|
|
3545
|
+
if (!updatedTotalRealmSet.has(realmId)) {
|
|
3546
|
+
deletedRealms.add(realmId);
|
|
3547
|
+
}
|
|
3548
|
+
}
|
|
3549
|
+
}
|
|
3550
|
+
for (const realmId of previousInviteRealmSet.concat(previousRealmSet)) {
|
|
3498
3551
|
if (!updatedTotalRealmSet.has(realmId)) {
|
|
3499
3552
|
deletedRealms.add(realmId);
|
|
3500
3553
|
}
|
|
3501
3554
|
}
|
|
3502
|
-
|
|
3503
|
-
|
|
3504
|
-
|
|
3505
|
-
|
|
3506
|
-
|
|
3507
|
-
|
|
3508
|
-
|
|
3509
|
-
|
|
3510
|
-
|
|
3511
|
-
|
|
3512
|
-
|
|
3513
|
-
|
|
3514
|
-
|
|
3515
|
-
|
|
3516
|
-
|
|
3517
|
-
|
|
3518
|
-
|
|
3519
|
-
|
|
3520
|
-
|
|
3521
|
-
.where(
|
|
3522
|
-
|
|
3523
|
-
|
|
3524
|
-
|
|
3525
|
-
|
|
3526
|
-
// No index to use:
|
|
3527
|
-
//console.debug(`REMOVAL: deleting all ${table.name} where realmId is any of `, JSON.stringify([...realmsToDelete]), realmsToDelete.size);
|
|
3528
|
-
await table
|
|
3529
|
-
.filter((obj) => !!obj?.realmId && realmsToDelete.has(obj.realmId))
|
|
3530
|
-
.delete();
|
|
3555
|
+
if (deletedRealms.size > 0 || rejectedRealms.size > 0) {
|
|
3556
|
+
const tables = getSyncableTables(db);
|
|
3557
|
+
for (const table of tables) {
|
|
3558
|
+
let realmsToDelete = ['realms', 'members', 'roles'].includes(table.name)
|
|
3559
|
+
? deletedRealms // These tables should spare rejected ones.
|
|
3560
|
+
: rejectedRealms; // All other tables shoudl delete rejected+deleted ones
|
|
3561
|
+
if (realmsToDelete.size === 0)
|
|
3562
|
+
continue;
|
|
3563
|
+
if (table.schema.indexes.some((idx) => idx.keyPath === 'realmId' ||
|
|
3564
|
+
(Array.isArray(idx.keyPath) && idx.keyPath[0] === 'realmId'))) {
|
|
3565
|
+
// There's an index to use:
|
|
3566
|
+
//console.debug(`REMOVAL: deleting all ${table.name} where realmId anyOf `, JSON.stringify([...realmsToDelete]));
|
|
3567
|
+
yield table
|
|
3568
|
+
.where('realmId')
|
|
3569
|
+
.anyOf([...realmsToDelete])
|
|
3570
|
+
.delete();
|
|
3571
|
+
}
|
|
3572
|
+
else {
|
|
3573
|
+
// No index to use:
|
|
3574
|
+
//console.debug(`REMOVAL: deleting all ${table.name} where realmId is any of `, JSON.stringify([...realmsToDelete]), realmsToDelete.size);
|
|
3575
|
+
yield table
|
|
3576
|
+
.filter((obj) => !!(obj === null || obj === void 0 ? void 0 : obj.realmId) && realmsToDelete.has(obj.realmId))
|
|
3577
|
+
.delete();
|
|
3578
|
+
}
|
|
3531
3579
|
}
|
|
3532
3580
|
}
|
|
3533
|
-
}
|
|
3581
|
+
});
|
|
3534
3582
|
}
|
|
3535
3583
|
function filterServerChangesThroughAddedClientChanges(serverChanges, addedClientChanges) {
|
|
3536
3584
|
const changes = {};
|
|
@@ -3548,7 +3596,7 @@ function MessagesFromServerConsumer(db) {
|
|
|
3548
3596
|
let isWorking = false;
|
|
3549
3597
|
let loopWarning = 0;
|
|
3550
3598
|
let loopDetection = [0, 0, 0, 0, 0, 0, 0, 0, 0, Date.now()];
|
|
3551
|
-
event.subscribe(
|
|
3599
|
+
event.subscribe(() => __awaiter$1(this, void 0, void 0, function* () {
|
|
3552
3600
|
if (isWorking)
|
|
3553
3601
|
return;
|
|
3554
3602
|
if (queue.length > 0) {
|
|
@@ -3557,7 +3605,7 @@ function MessagesFromServerConsumer(db) {
|
|
|
3557
3605
|
loopDetection.push(Date.now());
|
|
3558
3606
|
readyToServe.next(false);
|
|
3559
3607
|
try {
|
|
3560
|
-
|
|
3608
|
+
yield consumeQueue();
|
|
3561
3609
|
}
|
|
3562
3610
|
finally {
|
|
3563
3611
|
if (loopDetection[loopDetection.length - 1] - loopDetection[0] <
|
|
@@ -3567,170 +3615,173 @@ function MessagesFromServerConsumer(db) {
|
|
|
3567
3615
|
// Last time we did this, we ended up here too. Wait for a minute.
|
|
3568
3616
|
console.warn(`Slowing down websocket loop for one minute`);
|
|
3569
3617
|
loopWarning = Date.now() + 60000;
|
|
3570
|
-
|
|
3618
|
+
yield new Promise((resolve) => setTimeout(resolve, 60000));
|
|
3571
3619
|
}
|
|
3572
3620
|
else {
|
|
3573
3621
|
// This is a one-time event. Just pause 10 seconds.
|
|
3574
3622
|
console.warn(`Slowing down websocket loop for 10 seconds`);
|
|
3575
3623
|
loopWarning = Date.now() + 10000;
|
|
3576
|
-
|
|
3624
|
+
yield new Promise((resolve) => setTimeout(resolve, 10000));
|
|
3577
3625
|
}
|
|
3578
3626
|
}
|
|
3579
3627
|
isWorking = false;
|
|
3580
3628
|
readyToServe.next(true);
|
|
3581
3629
|
}
|
|
3582
3630
|
}
|
|
3583
|
-
});
|
|
3631
|
+
}));
|
|
3584
3632
|
function enqueue(msg) {
|
|
3585
3633
|
queue.push(msg);
|
|
3586
3634
|
event.next(null);
|
|
3587
3635
|
}
|
|
3588
|
-
|
|
3589
|
-
|
|
3590
|
-
|
|
3591
|
-
|
|
3592
|
-
|
|
3593
|
-
|
|
3594
|
-
|
|
3595
|
-
|
|
3596
|
-
|
|
3597
|
-
.
|
|
3598
|
-
|
|
3599
|
-
|
|
3600
|
-
|
|
3601
|
-
|
|
3602
|
-
|
|
3603
|
-
|
|
3604
|
-
|
|
3605
|
-
|
|
3606
|
-
|
|
3607
|
-
|
|
3608
|
-
|
|
3609
|
-
|
|
3610
|
-
|
|
3611
|
-
|
|
3612
|
-
|
|
3613
|
-
|
|
3614
|
-
|
|
3615
|
-
|
|
3616
|
-
|
|
3617
|
-
|
|
3618
|
-
|
|
3619
|
-
|
|
3620
|
-
|
|
3621
|
-
|
|
3622
|
-
break;
|
|
3623
|
-
case 'realm-accepted':
|
|
3624
|
-
//if (!persistedSyncState?.realms?.includes(msg.realm)) {
|
|
3625
|
-
triggerSync(db, 'pull');
|
|
3626
|
-
//}
|
|
3627
|
-
break;
|
|
3628
|
-
case 'realm-removed':
|
|
3629
|
-
//if (
|
|
3630
|
-
persistedSyncState?.realms?.includes(msg.realm) ||
|
|
3631
|
-
persistedSyncState?.inviteRealms?.includes(msg.realm);
|
|
3632
|
-
//) {
|
|
3633
|
-
triggerSync(db, 'pull');
|
|
3634
|
-
//}
|
|
3635
|
-
break;
|
|
3636
|
-
case 'realms-changed':
|
|
3637
|
-
triggerSync(db, 'pull');
|
|
3638
|
-
break;
|
|
3639
|
-
case 'changes':
|
|
3640
|
-
console.debug('changes');
|
|
3641
|
-
if (db.cloud.syncState.value?.phase === 'error') {
|
|
3636
|
+
function consumeQueue() {
|
|
3637
|
+
var _a, _b, _c;
|
|
3638
|
+
return __awaiter$1(this, void 0, void 0, function* () {
|
|
3639
|
+
while (queue.length > 0) {
|
|
3640
|
+
const msg = queue.shift();
|
|
3641
|
+
try {
|
|
3642
|
+
// If the sync worker or service worker is syncing, wait 'til thei're done.
|
|
3643
|
+
// It's no need to have two channels at the same time - even though it wouldnt
|
|
3644
|
+
// be a problem - this is an optimization.
|
|
3645
|
+
yield db.cloud.syncState
|
|
3646
|
+
.pipe(filter(({ phase }) => phase === 'in-sync' || phase === 'error'), take(1))
|
|
3647
|
+
.toPromise();
|
|
3648
|
+
console.debug('processing msg', msg);
|
|
3649
|
+
const persistedSyncState = db.cloud.persistedSyncState.value;
|
|
3650
|
+
//syncState.
|
|
3651
|
+
if (!msg)
|
|
3652
|
+
continue;
|
|
3653
|
+
switch (msg.type) {
|
|
3654
|
+
case 'token-expired':
|
|
3655
|
+
console.debug('WebSocket observable: Token expired. Refreshing token...');
|
|
3656
|
+
const user = db.cloud.currentUser.value;
|
|
3657
|
+
// Refresh access token
|
|
3658
|
+
const refreshedLogin = yield refreshAccessToken(db.cloud.options.databaseUrl, user);
|
|
3659
|
+
// Persist updated access token
|
|
3660
|
+
yield db.table('$logins').update(user.userId, {
|
|
3661
|
+
accessToken: refreshedLogin.accessToken,
|
|
3662
|
+
accessTokenExpiration: refreshedLogin.accessTokenExpiration,
|
|
3663
|
+
});
|
|
3664
|
+
// Updating $logins will trigger emission of db.cloud.currentUser observable, which
|
|
3665
|
+
// in turn will lead to that connectWebSocket.ts will reconnect the socket with the
|
|
3666
|
+
// new token. So we don't need to do anything more here.
|
|
3667
|
+
break;
|
|
3668
|
+
case 'realm-added':
|
|
3669
|
+
//if (!persistedSyncState?.realms?.includes(msg.realm) && !persistedSyncState?.inviteRealms?.includes(msg.realm)) {
|
|
3642
3670
|
triggerSync(db, 'pull');
|
|
3671
|
+
//}
|
|
3643
3672
|
break;
|
|
3644
|
-
|
|
3645
|
-
|
|
3646
|
-
|
|
3647
|
-
|
|
3648
|
-
|
|
3649
|
-
|
|
3650
|
-
|
|
3651
|
-
|
|
3652
|
-
|
|
3653
|
-
|
|
3654
|
-
|
|
3655
|
-
|
|
3656
|
-
|
|
3657
|
-
|
|
3658
|
-
|
|
3659
|
-
|
|
3660
|
-
|
|
3661
|
-
|
|
3662
|
-
|
|
3673
|
+
case 'realm-accepted':
|
|
3674
|
+
//if (!persistedSyncState?.realms?.includes(msg.realm)) {
|
|
3675
|
+
triggerSync(db, 'pull');
|
|
3676
|
+
//}
|
|
3677
|
+
break;
|
|
3678
|
+
case 'realm-removed':
|
|
3679
|
+
//if (
|
|
3680
|
+
((_a = persistedSyncState === null || persistedSyncState === void 0 ? void 0 : persistedSyncState.realms) === null || _a === void 0 ? void 0 : _a.includes(msg.realm)) ||
|
|
3681
|
+
((_b = persistedSyncState === null || persistedSyncState === void 0 ? void 0 : persistedSyncState.inviteRealms) === null || _b === void 0 ? void 0 : _b.includes(msg.realm));
|
|
3682
|
+
//) {
|
|
3683
|
+
triggerSync(db, 'pull');
|
|
3684
|
+
//}
|
|
3685
|
+
break;
|
|
3686
|
+
case 'realms-changed':
|
|
3687
|
+
triggerSync(db, 'pull');
|
|
3688
|
+
break;
|
|
3689
|
+
case 'changes':
|
|
3690
|
+
console.debug('changes');
|
|
3691
|
+
if (((_c = db.cloud.syncState.value) === null || _c === void 0 ? void 0 : _c.phase) === 'error') {
|
|
3692
|
+
triggerSync(db, 'pull');
|
|
3693
|
+
break;
|
|
3663
3694
|
}
|
|
3664
|
-
|
|
3665
|
-
|
|
3666
|
-
|
|
3667
|
-
//
|
|
3668
|
-
|
|
3669
|
-
|
|
3670
|
-
|
|
3671
|
-
|
|
3672
|
-
|
|
3673
|
-
|
|
3674
|
-
|
|
3675
|
-
|
|
3676
|
-
|
|
3677
|
-
|
|
3678
|
-
|
|
3679
|
-
|
|
3680
|
-
|
|
3695
|
+
yield db.transaction('rw', db.dx.tables, (tx) => __awaiter$1(this, void 0, void 0, function* () {
|
|
3696
|
+
// @ts-ignore
|
|
3697
|
+
tx.idbtrans.disableChangeTracking = true;
|
|
3698
|
+
// @ts-ignore
|
|
3699
|
+
tx.idbtrans.disableAccessControl = true;
|
|
3700
|
+
const [schema, syncState, currentUser] = yield Promise.all([
|
|
3701
|
+
db.getSchema(),
|
|
3702
|
+
db.getPersistedSyncState(),
|
|
3703
|
+
db.getCurrentUser(),
|
|
3704
|
+
]);
|
|
3705
|
+
console.debug('ws message queue: in transaction');
|
|
3706
|
+
if (!syncState || !schema || !currentUser) {
|
|
3707
|
+
console.debug('required vars not present', {
|
|
3708
|
+
syncState,
|
|
3709
|
+
schema,
|
|
3710
|
+
currentUser,
|
|
3711
|
+
});
|
|
3712
|
+
return; // Initial sync must have taken place - otherwise, ignore this.
|
|
3713
|
+
}
|
|
3714
|
+
// Verify again in ACID tx that we're on same server revision.
|
|
3715
|
+
if (msg.baseRev !== syncState.serverRevision) {
|
|
3716
|
+
console.debug(`baseRev (${msg.baseRev}) differs from our serverRevision in syncState (${syncState.serverRevision})`);
|
|
3717
|
+
// Should we trigger a sync now? No. This is a normal case
|
|
3718
|
+
// when another local peer (such as the SW or a websocket channel on other tab) has
|
|
3719
|
+
// updated syncState from new server information but we are not aware yet. It would
|
|
3720
|
+
// be unnescessary to do a sync in that case. Instead, the caller of this consumeQueue()
|
|
3721
|
+
// function will do readyToServe.next(true) right after this return, which will lead
|
|
3722
|
+
// to a "ready" message being sent to server with the new accurate serverRev we have,
|
|
3723
|
+
// so that the next message indeed will be correct.
|
|
3724
|
+
if (typeof msg.baseRev === 'string' && // v2 format
|
|
3725
|
+
(typeof syncState.serverRevision === 'bigint' || // v1 format
|
|
3726
|
+
typeof syncState.serverRevision === 'object') // v1 format old browser
|
|
3727
|
+
) {
|
|
3728
|
+
// The reason for the diff seems to be that server has migrated the revision format.
|
|
3729
|
+
// Do a full sync to update revision format.
|
|
3730
|
+
// If we don't do a sync request now, we could stuck in an endless loop.
|
|
3731
|
+
triggerSync(db, 'pull');
|
|
3732
|
+
}
|
|
3733
|
+
return; // Ignore message
|
|
3734
|
+
}
|
|
3735
|
+
// Verify also that the message is based on the exact same set of realms
|
|
3736
|
+
const ourRealmSetHash = yield Dexie.waitFor(
|
|
3737
|
+
// Keep TX in non-IDB work
|
|
3738
|
+
computeRealmSetHash(syncState));
|
|
3739
|
+
console.debug('ourRealmSetHash', ourRealmSetHash);
|
|
3740
|
+
if (ourRealmSetHash !== msg.realmSetHash) {
|
|
3741
|
+
console.debug('not same realmSetHash', msg.realmSetHash);
|
|
3681
3742
|
triggerSync(db, 'pull');
|
|
3743
|
+
// The message isn't based on the same realms.
|
|
3744
|
+
// Trigger a sync instead to resolve all things up.
|
|
3745
|
+
return;
|
|
3682
3746
|
}
|
|
3683
|
-
|
|
3684
|
-
|
|
3685
|
-
|
|
3686
|
-
|
|
3687
|
-
|
|
3688
|
-
|
|
3689
|
-
|
|
3690
|
-
|
|
3691
|
-
|
|
3692
|
-
|
|
3693
|
-
|
|
3694
|
-
|
|
3695
|
-
|
|
3696
|
-
|
|
3697
|
-
|
|
3698
|
-
|
|
3699
|
-
|
|
3700
|
-
|
|
3701
|
-
|
|
3702
|
-
|
|
3703
|
-
|
|
3704
|
-
|
|
3705
|
-
|
|
3747
|
+
// Get clientChanges
|
|
3748
|
+
let clientChanges = [];
|
|
3749
|
+
if (currentUser.isLoggedIn) {
|
|
3750
|
+
const mutationTables = getSyncableTables(db).map((tbl) => db.table(getMutationTable(tbl.name)));
|
|
3751
|
+
clientChanges = yield listClientChanges(mutationTables, db);
|
|
3752
|
+
console.debug('msg queue: client changes', clientChanges);
|
|
3753
|
+
}
|
|
3754
|
+
if (msg.changes.length > 0) {
|
|
3755
|
+
const filteredChanges = filterServerChangesThroughAddedClientChanges(msg.changes, clientChanges);
|
|
3756
|
+
//
|
|
3757
|
+
// apply server changes
|
|
3758
|
+
//
|
|
3759
|
+
console.debug('applying filtered server changes', filteredChanges);
|
|
3760
|
+
yield applyServerChanges(filteredChanges, db);
|
|
3761
|
+
}
|
|
3762
|
+
// Update latest revisions per table in case there are unsynced changes
|
|
3763
|
+
// This can be a real case in future when we allow non-eagery sync.
|
|
3764
|
+
// And it can actually be realistic now also, but very rare.
|
|
3765
|
+
syncState.latestRevisions = getLatestRevisionsPerTable(clientChanges, syncState.latestRevisions);
|
|
3766
|
+
syncState.serverRevision = msg.newRev;
|
|
3767
|
+
// Update base revs
|
|
3768
|
+
console.debug('Updating baseRefs', syncState.latestRevisions);
|
|
3769
|
+
yield updateBaseRevs(db, schema, syncState.latestRevisions, msg.newRev);
|
|
3706
3770
|
//
|
|
3707
|
-
//
|
|
3771
|
+
// Update syncState
|
|
3708
3772
|
//
|
|
3709
|
-
console.debug('
|
|
3710
|
-
|
|
3711
|
-
}
|
|
3712
|
-
|
|
3713
|
-
|
|
3714
|
-
|
|
3715
|
-
|
|
3716
|
-
|
|
3717
|
-
|
|
3718
|
-
console.debug('Updating baseRefs', syncState.latestRevisions);
|
|
3719
|
-
await updateBaseRevs(db, schema, syncState.latestRevisions, msg.newRev);
|
|
3720
|
-
//
|
|
3721
|
-
// Update syncState
|
|
3722
|
-
//
|
|
3723
|
-
console.debug('Updating syncState', syncState);
|
|
3724
|
-
await db.$syncState.put(syncState, 'syncState');
|
|
3725
|
-
});
|
|
3726
|
-
console.debug('msg queue: done with rw transaction');
|
|
3727
|
-
break;
|
|
3773
|
+
console.debug('Updating syncState', syncState);
|
|
3774
|
+
yield db.$syncState.put(syncState, 'syncState');
|
|
3775
|
+
}));
|
|
3776
|
+
console.debug('msg queue: done with rw transaction');
|
|
3777
|
+
break;
|
|
3778
|
+
}
|
|
3779
|
+
}
|
|
3780
|
+
catch (error) {
|
|
3781
|
+
console.error(`Error in msg queue`, error);
|
|
3728
3782
|
}
|
|
3729
3783
|
}
|
|
3730
|
-
|
|
3731
|
-
console.error(`Error in msg queue`, error);
|
|
3732
|
-
}
|
|
3733
|
-
}
|
|
3784
|
+
});
|
|
3734
3785
|
}
|
|
3735
3786
|
return {
|
|
3736
3787
|
enqueue,
|
|
@@ -3849,100 +3900,105 @@ class AuthPersistedContext {
|
|
|
3849
3900
|
lastLogin: new Date(0)
|
|
3850
3901
|
}));
|
|
3851
3902
|
}
|
|
3852
|
-
|
|
3853
|
-
|
|
3854
|
-
|
|
3903
|
+
save() {
|
|
3904
|
+
return __awaiter$1(this, void 0, void 0, function* () {
|
|
3905
|
+
const db = wm.get(this);
|
|
3906
|
+
db.table("$logins").put(this);
|
|
3907
|
+
});
|
|
3855
3908
|
}
|
|
3856
3909
|
}
|
|
3857
3910
|
|
|
3858
3911
|
function otpFetchTokenCallback(db) {
|
|
3859
3912
|
const { userInteraction } = db.cloud;
|
|
3860
|
-
return
|
|
3861
|
-
|
|
3862
|
-
|
|
3863
|
-
|
|
3864
|
-
|
|
3865
|
-
|
|
3866
|
-
|
|
3867
|
-
|
|
3868
|
-
demo_user,
|
|
3869
|
-
|
|
3870
|
-
|
|
3871
|
-
|
|
3872
|
-
|
|
3873
|
-
|
|
3874
|
-
|
|
3875
|
-
|
|
3876
|
-
|
|
3877
|
-
email,
|
|
3878
|
-
|
|
3879
|
-
|
|
3880
|
-
|
|
3881
|
-
|
|
3882
|
-
|
|
3883
|
-
|
|
3884
|
-
|
|
3885
|
-
|
|
3886
|
-
headers: { 'Content-Type': 'application/json', mode: 'cors' },
|
|
3887
|
-
});
|
|
3888
|
-
if (res1.status !== 200) {
|
|
3889
|
-
const errMsg = await res1.text();
|
|
3890
|
-
await alertUser(userInteraction, "Token request failed", {
|
|
3891
|
-
type: 'error',
|
|
3892
|
-
messageCode: 'GENERIC_ERROR',
|
|
3893
|
-
message: errMsg,
|
|
3894
|
-
messageParams: {}
|
|
3895
|
-
}).catch(() => { });
|
|
3896
|
-
throw new HttpError(res1, errMsg);
|
|
3897
|
-
}
|
|
3898
|
-
const response = await res1.json();
|
|
3899
|
-
if (response.type === 'tokens') {
|
|
3900
|
-
// Demo user request can get a "tokens" response right away
|
|
3901
|
-
return response;
|
|
3902
|
-
}
|
|
3903
|
-
else if (tokenRequest.grant_type === 'otp') {
|
|
3904
|
-
if (response.type !== 'otp-sent')
|
|
3905
|
-
throw new Error(`Unexpected response from ${url}/token`);
|
|
3906
|
-
const otp = await promptForOTP(userInteraction, tokenRequest.email);
|
|
3907
|
-
tokenRequest.otp = otp || '';
|
|
3908
|
-
tokenRequest.otp_id = response.otp_id;
|
|
3909
|
-
let res2 = await fetch(`${url}/token`, {
|
|
3913
|
+
return function otpAuthenticate({ public_key, hints }) {
|
|
3914
|
+
var _a;
|
|
3915
|
+
return __awaiter$1(this, void 0, void 0, function* () {
|
|
3916
|
+
let tokenRequest;
|
|
3917
|
+
const url = (_a = db.cloud.options) === null || _a === void 0 ? void 0 : _a.databaseUrl;
|
|
3918
|
+
if (!url)
|
|
3919
|
+
throw new Error(`No database URL given.`);
|
|
3920
|
+
if ((hints === null || hints === void 0 ? void 0 : hints.grant_type) === 'demo') {
|
|
3921
|
+
const demo_user = yield promptForEmail(userInteraction, 'Enter a demo user email', (hints === null || hints === void 0 ? void 0 : hints.email) || (hints === null || hints === void 0 ? void 0 : hints.userId));
|
|
3922
|
+
tokenRequest = {
|
|
3923
|
+
demo_user,
|
|
3924
|
+
grant_type: 'demo',
|
|
3925
|
+
scopes: ['ACCESS_DB'],
|
|
3926
|
+
public_key,
|
|
3927
|
+
};
|
|
3928
|
+
}
|
|
3929
|
+
else {
|
|
3930
|
+
const email = yield promptForEmail(userInteraction, 'Enter email address', hints === null || hints === void 0 ? void 0 : hints.email);
|
|
3931
|
+
tokenRequest = {
|
|
3932
|
+
email,
|
|
3933
|
+
grant_type: 'otp',
|
|
3934
|
+
scopes: ['ACCESS_DB'],
|
|
3935
|
+
public_key,
|
|
3936
|
+
};
|
|
3937
|
+
}
|
|
3938
|
+
const res1 = yield fetch(`${url}/token`, {
|
|
3910
3939
|
body: JSON.stringify(tokenRequest),
|
|
3911
3940
|
method: 'post',
|
|
3912
|
-
headers: { 'Content-Type': 'application/json' },
|
|
3913
|
-
mode: 'cors',
|
|
3941
|
+
headers: { 'Content-Type': 'application/json', mode: 'cors' },
|
|
3914
3942
|
});
|
|
3915
|
-
|
|
3916
|
-
const
|
|
3917
|
-
|
|
3943
|
+
if (res1.status !== 200) {
|
|
3944
|
+
const errMsg = yield res1.text();
|
|
3945
|
+
yield alertUser(userInteraction, "Token request failed", {
|
|
3918
3946
|
type: 'error',
|
|
3919
|
-
messageCode: '
|
|
3920
|
-
message:
|
|
3947
|
+
messageCode: 'GENERIC_ERROR',
|
|
3948
|
+
message: errMsg,
|
|
3921
3949
|
messageParams: {}
|
|
3922
|
-
});
|
|
3923
|
-
|
|
3950
|
+
}).catch(() => { });
|
|
3951
|
+
throw new HttpError(res1, errMsg);
|
|
3952
|
+
}
|
|
3953
|
+
const response = yield res1.json();
|
|
3954
|
+
if (response.type === 'tokens') {
|
|
3955
|
+
// Demo user request can get a "tokens" response right away
|
|
3956
|
+
return response;
|
|
3957
|
+
}
|
|
3958
|
+
else if (tokenRequest.grant_type === 'otp') {
|
|
3959
|
+
if (response.type !== 'otp-sent')
|
|
3960
|
+
throw new Error(`Unexpected response from ${url}/token`);
|
|
3961
|
+
const otp = yield promptForOTP(userInteraction, tokenRequest.email);
|
|
3962
|
+
tokenRequest.otp = otp || '';
|
|
3963
|
+
tokenRequest.otp_id = response.otp_id;
|
|
3964
|
+
let res2 = yield fetch(`${url}/token`, {
|
|
3924
3965
|
body: JSON.stringify(tokenRequest),
|
|
3925
3966
|
method: 'post',
|
|
3926
3967
|
headers: { 'Content-Type': 'application/json' },
|
|
3927
3968
|
mode: 'cors',
|
|
3928
3969
|
});
|
|
3970
|
+
while (res2.status === 401) {
|
|
3971
|
+
const errorText = yield res2.text();
|
|
3972
|
+
tokenRequest.otp = yield promptForOTP(userInteraction, tokenRequest.email, {
|
|
3973
|
+
type: 'error',
|
|
3974
|
+
messageCode: 'INVALID_OTP',
|
|
3975
|
+
message: errorText,
|
|
3976
|
+
messageParams: {}
|
|
3977
|
+
});
|
|
3978
|
+
res2 = yield fetch(`${url}/token`, {
|
|
3979
|
+
body: JSON.stringify(tokenRequest),
|
|
3980
|
+
method: 'post',
|
|
3981
|
+
headers: { 'Content-Type': 'application/json' },
|
|
3982
|
+
mode: 'cors',
|
|
3983
|
+
});
|
|
3984
|
+
}
|
|
3985
|
+
if (res2.status !== 200) {
|
|
3986
|
+
const errMsg = yield res2.text();
|
|
3987
|
+
yield alertUser(userInteraction, "OTP Authentication Failed", {
|
|
3988
|
+
type: 'error',
|
|
3989
|
+
messageCode: 'GENERIC_ERROR',
|
|
3990
|
+
message: errMsg,
|
|
3991
|
+
messageParams: {}
|
|
3992
|
+
}).catch(() => { });
|
|
3993
|
+
throw new HttpError(res2, errMsg);
|
|
3994
|
+
}
|
|
3995
|
+
const response2 = yield res2.json();
|
|
3996
|
+
return response2;
|
|
3929
3997
|
}
|
|
3930
|
-
|
|
3931
|
-
|
|
3932
|
-
await alertUser(userInteraction, "OTP Authentication Failed", {
|
|
3933
|
-
type: 'error',
|
|
3934
|
-
messageCode: 'GENERIC_ERROR',
|
|
3935
|
-
message: errMsg,
|
|
3936
|
-
messageParams: {}
|
|
3937
|
-
}).catch(() => { });
|
|
3938
|
-
throw new HttpError(res2, errMsg);
|
|
3998
|
+
else {
|
|
3999
|
+
throw new Error(`Unexpected response from ${url}/token`);
|
|
3939
4000
|
}
|
|
3940
|
-
|
|
3941
|
-
return response2;
|
|
3942
|
-
}
|
|
3943
|
-
else {
|
|
3944
|
-
throw new Error(`Unexpected response from ${url}/token`);
|
|
3945
|
-
}
|
|
4001
|
+
});
|
|
3946
4002
|
};
|
|
3947
4003
|
}
|
|
3948
4004
|
|
|
@@ -3957,83 +4013,87 @@ function otpFetchTokenCallback(db) {
|
|
|
3957
4013
|
* @param db
|
|
3958
4014
|
* @param newUser
|
|
3959
4015
|
*/
|
|
3960
|
-
|
|
3961
|
-
|
|
3962
|
-
|
|
3963
|
-
|
|
3964
|
-
|
|
3965
|
-
|
|
3966
|
-
|
|
3967
|
-
.
|
|
3968
|
-
|
|
3969
|
-
|
|
3970
|
-
|
|
4016
|
+
function setCurrentUser(db, user) {
|
|
4017
|
+
return __awaiter$1(this, void 0, void 0, function* () {
|
|
4018
|
+
if (user.userId === db.cloud.currentUserId)
|
|
4019
|
+
return; // Already this user.
|
|
4020
|
+
const $logins = db.table('$logins');
|
|
4021
|
+
yield db.transaction('rw', $logins, (tx) => __awaiter$1(this, void 0, void 0, function* () {
|
|
4022
|
+
const existingLogins = yield $logins.toArray();
|
|
4023
|
+
yield Promise.all(existingLogins
|
|
4024
|
+
.filter((login) => login.userId !== user.userId && login.isLoggedIn)
|
|
4025
|
+
.map((login) => {
|
|
4026
|
+
login.isLoggedIn = false;
|
|
4027
|
+
return $logins.put(login);
|
|
4028
|
+
}));
|
|
4029
|
+
user.isLoggedIn = true;
|
|
4030
|
+
user.lastLogin = new Date();
|
|
4031
|
+
yield user.save();
|
|
4032
|
+
console.debug('Saved new user', user.email);
|
|
3971
4033
|
}));
|
|
3972
|
-
|
|
3973
|
-
|
|
3974
|
-
|
|
3975
|
-
|
|
4034
|
+
yield new Promise((resolve) => {
|
|
4035
|
+
if (db.cloud.currentUserId === user.userId) {
|
|
4036
|
+
resolve(null);
|
|
4037
|
+
}
|
|
4038
|
+
else {
|
|
4039
|
+
const subscription = db.cloud.currentUser.subscribe((currentUser) => {
|
|
4040
|
+
if (currentUser.userId === user.userId) {
|
|
4041
|
+
subscription.unsubscribe();
|
|
4042
|
+
resolve(null);
|
|
4043
|
+
}
|
|
4044
|
+
});
|
|
4045
|
+
}
|
|
4046
|
+
});
|
|
4047
|
+
// TANKAR!!!!
|
|
4048
|
+
// V: Service workern kommer inte ha tillgång till currentUserObservable om den inte istället härrör från ett liveQuery.
|
|
4049
|
+
// V: Samma med andra windows.
|
|
4050
|
+
// V: Så kanske göra om den till att häröra från liveQuery som läser $logins.orderBy('lastLogin').last().
|
|
4051
|
+
// V: Då bara vara medveten om:
|
|
4052
|
+
// V: En sån observable börjar hämta data vid första subscribe
|
|
4053
|
+
// V: Vi har inget "inital value" men kan emulera det till att vara ANONYMOUS_USER
|
|
4054
|
+
// V: Om requireAuth är true, så borde db.on(ready) hålla databasen stängd för alla utom denna observable.
|
|
4055
|
+
// V: Om inte så behöver den inte blocka.
|
|
4056
|
+
// Andra tankar:
|
|
4057
|
+
// * Man kan inte byta användare när man är offline. Skulle gå att flytta realms till undanstuff-tabell vid user-change.
|
|
4058
|
+
// men troligen inte värt det.
|
|
4059
|
+
// * Istället: sälj inte inte switch-user funktionalitet utan tala enbart om inloggat vs icke inloggat läge.
|
|
4060
|
+
// * populate $logins med ANONYMOUS så att en påbörjad inloggning inte räknas, alternativt ha en boolean prop!
|
|
4061
|
+
// Kanske bäst ha en boolean prop!
|
|
4062
|
+
// * Alternativ switch-user funktionalitet:
|
|
4063
|
+
// * DBCore gömmer data från realms man inte har tillgång till.
|
|
4064
|
+
// * Cursor impl behövs också då.
|
|
4065
|
+
// * Då blir det snabba user switch.
|
|
4066
|
+
// * claims-settet som skickas till servern blir summan av alla claims. Då måste servern stödja multipla tokens eller
|
|
4067
|
+
// att ens token är ett samlad.
|
|
3976
4068
|
});
|
|
3977
|
-
|
|
3978
|
-
|
|
3979
|
-
|
|
3980
|
-
|
|
3981
|
-
|
|
3982
|
-
|
|
3983
|
-
|
|
3984
|
-
|
|
3985
|
-
|
|
4069
|
+
}
|
|
4070
|
+
|
|
4071
|
+
function login(db, hints) {
|
|
4072
|
+
return __awaiter$1(this, void 0, void 0, function* () {
|
|
4073
|
+
const currentUser = yield db.getCurrentUser();
|
|
4074
|
+
if (currentUser.isLoggedIn) {
|
|
4075
|
+
if (hints) {
|
|
4076
|
+
if (hints.email && db.cloud.currentUser.value.email !== hints.email) {
|
|
4077
|
+
throw new Error(`Must logout before changing user`);
|
|
3986
4078
|
}
|
|
3987
|
-
|
|
4079
|
+
if (hints.userId && db.cloud.currentUserId !== hints.userId) {
|
|
4080
|
+
throw new Error(`Must logout before changing user`);
|
|
4081
|
+
}
|
|
4082
|
+
}
|
|
4083
|
+
// Already authenticated according to given hints.
|
|
4084
|
+
return;
|
|
3988
4085
|
}
|
|
4086
|
+
const context = new AuthPersistedContext(db, {
|
|
4087
|
+
claims: {},
|
|
4088
|
+
lastLogin: new Date(0),
|
|
4089
|
+
});
|
|
4090
|
+
yield authenticate(db.cloud.options.databaseUrl, context, db.cloud.options.fetchTokens || otpFetchTokenCallback(db), db.cloud.userInteraction, hints);
|
|
4091
|
+
yield context.save();
|
|
4092
|
+
yield setCurrentUser(db, context);
|
|
4093
|
+
// Make sure to resync as the new login will be authorized
|
|
4094
|
+
// for new realms.
|
|
4095
|
+
triggerSync(db, "pull");
|
|
3989
4096
|
});
|
|
3990
|
-
// TANKAR!!!!
|
|
3991
|
-
// V: Service workern kommer inte ha tillgång till currentUserObservable om den inte istället härrör från ett liveQuery.
|
|
3992
|
-
// V: Samma med andra windows.
|
|
3993
|
-
// V: Så kanske göra om den till att häröra från liveQuery som läser $logins.orderBy('lastLogin').last().
|
|
3994
|
-
// V: Då bara vara medveten om:
|
|
3995
|
-
// V: En sån observable börjar hämta data vid första subscribe
|
|
3996
|
-
// V: Vi har inget "inital value" men kan emulera det till att vara ANONYMOUS_USER
|
|
3997
|
-
// V: Om requireAuth är true, så borde db.on(ready) hålla databasen stängd för alla utom denna observable.
|
|
3998
|
-
// V: Om inte så behöver den inte blocka.
|
|
3999
|
-
// Andra tankar:
|
|
4000
|
-
// * Man kan inte byta användare när man är offline. Skulle gå att flytta realms till undanstuff-tabell vid user-change.
|
|
4001
|
-
// men troligen inte värt det.
|
|
4002
|
-
// * Istället: sälj inte inte switch-user funktionalitet utan tala enbart om inloggat vs icke inloggat läge.
|
|
4003
|
-
// * populate $logins med ANONYMOUS så att en påbörjad inloggning inte räknas, alternativt ha en boolean prop!
|
|
4004
|
-
// Kanske bäst ha en boolean prop!
|
|
4005
|
-
// * Alternativ switch-user funktionalitet:
|
|
4006
|
-
// * DBCore gömmer data från realms man inte har tillgång till.
|
|
4007
|
-
// * Cursor impl behövs också då.
|
|
4008
|
-
// * Då blir det snabba user switch.
|
|
4009
|
-
// * claims-settet som skickas till servern blir summan av alla claims. Då måste servern stödja multipla tokens eller
|
|
4010
|
-
// att ens token är ett samlad.
|
|
4011
|
-
}
|
|
4012
|
-
|
|
4013
|
-
async function login(db, hints) {
|
|
4014
|
-
const currentUser = await db.getCurrentUser();
|
|
4015
|
-
if (currentUser.isLoggedIn) {
|
|
4016
|
-
if (hints) {
|
|
4017
|
-
if (hints.email && db.cloud.currentUser.value.email !== hints.email) {
|
|
4018
|
-
throw new Error(`Must logout before changing user`);
|
|
4019
|
-
}
|
|
4020
|
-
if (hints.userId && db.cloud.currentUserId !== hints.userId) {
|
|
4021
|
-
throw new Error(`Must logout before changing user`);
|
|
4022
|
-
}
|
|
4023
|
-
}
|
|
4024
|
-
// Already authenticated according to given hints.
|
|
4025
|
-
return;
|
|
4026
|
-
}
|
|
4027
|
-
const context = new AuthPersistedContext(db, {
|
|
4028
|
-
claims: {},
|
|
4029
|
-
lastLogin: new Date(0),
|
|
4030
|
-
});
|
|
4031
|
-
await authenticate(db.cloud.options.databaseUrl, context, db.cloud.options.fetchTokens || otpFetchTokenCallback(db), db.cloud.userInteraction, hints);
|
|
4032
|
-
await context.save();
|
|
4033
|
-
await setCurrentUser(db, context);
|
|
4034
|
-
// Make sure to resync as the new login will be authorized
|
|
4035
|
-
// for new realms.
|
|
4036
|
-
triggerSync(db, "pull");
|
|
4037
4097
|
}
|
|
4038
4098
|
|
|
4039
4099
|
// @ts-ignore
|
|
@@ -4081,9 +4141,10 @@ function toStringTag(o) {
|
|
|
4081
4141
|
return toString.call(o).slice(8, -1);
|
|
4082
4142
|
}
|
|
4083
4143
|
function getEffectiveKeys(primaryKey, req) {
|
|
4144
|
+
var _a;
|
|
4084
4145
|
if (req.type === 'delete')
|
|
4085
4146
|
return req.keys;
|
|
4086
|
-
return req.keys
|
|
4147
|
+
return ((_a = req.keys) === null || _a === void 0 ? void 0 : _a.slice()) || req.values.map(primaryKey.extractKey);
|
|
4087
4148
|
}
|
|
4088
4149
|
function applyToUpperBitFix(orig, bits) {
|
|
4089
4150
|
return ((bits & 1 ? orig[0].toUpperCase() : orig[0].toLowerCase()) +
|
|
@@ -4174,9 +4235,7 @@ function createIdGenerationMiddleware(db) {
|
|
|
4174
4235
|
name: 'idGenerationMiddleware',
|
|
4175
4236
|
level: 1,
|
|
4176
4237
|
create: (core) => {
|
|
4177
|
-
return {
|
|
4178
|
-
...core,
|
|
4179
|
-
table: (tableName) => {
|
|
4238
|
+
return Object.assign(Object.assign({}, core), { table: (tableName) => {
|
|
4180
4239
|
const table = core.table(tableName);
|
|
4181
4240
|
function generateOrVerifyAtKeys(req, idPrefix) {
|
|
4182
4241
|
let valueClones = null;
|
|
@@ -4202,24 +4261,19 @@ function createIdGenerationMiddleware(db) {
|
|
|
4202
4261
|
`If you want to generate IDs programmatically, remove '@' from the schema to get rid of this constraint. Dexie Cloud supports custom IDs as long as they are random and globally unique.`);
|
|
4203
4262
|
}
|
|
4204
4263
|
});
|
|
4205
|
-
return table.mutate({
|
|
4206
|
-
...req,
|
|
4207
|
-
keys,
|
|
4208
|
-
values: valueClones || req.values,
|
|
4209
|
-
});
|
|
4264
|
+
return table.mutate(Object.assign(Object.assign({}, req), { keys, values: valueClones || req.values }));
|
|
4210
4265
|
}
|
|
4211
|
-
return {
|
|
4212
|
-
|
|
4213
|
-
mutate: (req) => {
|
|
4266
|
+
return Object.assign(Object.assign({}, table), { mutate: (req) => {
|
|
4267
|
+
var _a, _b;
|
|
4214
4268
|
// @ts-ignore
|
|
4215
4269
|
if (req.trans.disableChangeTracking) {
|
|
4216
4270
|
// Disable ID policy checks and ID generation
|
|
4217
4271
|
return table.mutate(req);
|
|
4218
4272
|
}
|
|
4219
4273
|
if (req.type === 'add' || req.type === 'put') {
|
|
4220
|
-
const cloudTableSchema = db.cloud.schema
|
|
4221
|
-
if (!cloudTableSchema
|
|
4222
|
-
if (cloudTableSchema
|
|
4274
|
+
const cloudTableSchema = (_a = db.cloud.schema) === null || _a === void 0 ? void 0 : _a[tableName];
|
|
4275
|
+
if (!(cloudTableSchema === null || cloudTableSchema === void 0 ? void 0 : cloudTableSchema.generatedGlobalId)) {
|
|
4276
|
+
if (cloudTableSchema === null || cloudTableSchema === void 0 ? void 0 : cloudTableSchema.markedForSync) {
|
|
4223
4277
|
// Just make sure primary key is of a supported type:
|
|
4224
4278
|
const keys = getEffectiveKeys(table.schema.primaryKey, req);
|
|
4225
4279
|
keys.forEach((key, idx) => {
|
|
@@ -4233,7 +4287,7 @@ function createIdGenerationMiddleware(db) {
|
|
|
4233
4287
|
}
|
|
4234
4288
|
}
|
|
4235
4289
|
else {
|
|
4236
|
-
if (db.cloud.options
|
|
4290
|
+
if (((_b = db.cloud.options) === null || _b === void 0 ? void 0 : _b.databaseUrl) && !db.initiallySynced) {
|
|
4237
4291
|
// A database URL is configured but no initial sync has been performed.
|
|
4238
4292
|
const keys = getEffectiveKeys(table.schema.primaryKey, req);
|
|
4239
4293
|
// Check if the operation would yield any INSERT. If so, complain! We never want wrong ID prefixes stored.
|
|
@@ -4254,10 +4308,8 @@ function createIdGenerationMiddleware(db) {
|
|
|
4254
4308
|
}
|
|
4255
4309
|
}
|
|
4256
4310
|
return table.mutate(req);
|
|
4257
|
-
}
|
|
4258
|
-
|
|
4259
|
-
},
|
|
4260
|
-
};
|
|
4311
|
+
} });
|
|
4312
|
+
} });
|
|
4261
4313
|
},
|
|
4262
4314
|
};
|
|
4263
4315
|
}
|
|
@@ -4268,19 +4320,16 @@ function createImplicitPropSetterMiddleware(db) {
|
|
|
4268
4320
|
name: 'implicitPropSetterMiddleware',
|
|
4269
4321
|
level: 1,
|
|
4270
4322
|
create: (core) => {
|
|
4271
|
-
return {
|
|
4272
|
-
...core,
|
|
4273
|
-
table: (tableName) => {
|
|
4323
|
+
return Object.assign(Object.assign({}, core), { table: (tableName) => {
|
|
4274
4324
|
const table = core.table(tableName);
|
|
4275
|
-
return {
|
|
4276
|
-
|
|
4277
|
-
mutate: (req) => {
|
|
4325
|
+
return Object.assign(Object.assign({}, table), { mutate: (req) => {
|
|
4326
|
+
var _a, _b, _c, _d;
|
|
4278
4327
|
// @ts-ignore
|
|
4279
4328
|
if (req.trans.disableChangeTracking) {
|
|
4280
4329
|
return table.mutate(req);
|
|
4281
4330
|
}
|
|
4282
4331
|
const trans = req.trans;
|
|
4283
|
-
if (db.cloud.schema
|
|
4332
|
+
if ((_b = (_a = db.cloud.schema) === null || _a === void 0 ? void 0 : _a[tableName]) === null || _b === void 0 ? void 0 : _b.markedForSync) {
|
|
4284
4333
|
if (req.type === 'add' || req.type === 'put') {
|
|
4285
4334
|
// No matter if user is logged in or not, make sure "owner" and "realmId" props are set properly.
|
|
4286
4335
|
// If not logged in, this will be changed upon syncification of the tables (next sync after login),
|
|
@@ -4294,7 +4343,7 @@ function createImplicitPropSetterMiddleware(db) {
|
|
|
4294
4343
|
if (!obj.realmId) {
|
|
4295
4344
|
obj.realmId = trans.currentUser.userId;
|
|
4296
4345
|
}
|
|
4297
|
-
const key = table.schema.primaryKey.extractKey
|
|
4346
|
+
const key = (_d = (_c = table.schema.primaryKey).extractKey) === null || _d === void 0 ? void 0 : _d.call(_c, obj);
|
|
4298
4347
|
if (typeof key === 'string' && key[0] === '#') {
|
|
4299
4348
|
// Add $ts prop for put operations and
|
|
4300
4349
|
// disable update operations as well as consistent
|
|
@@ -4321,10 +4370,8 @@ function createImplicitPropSetterMiddleware(db) {
|
|
|
4321
4370
|
}
|
|
4322
4371
|
}
|
|
4323
4372
|
return table.mutate(req);
|
|
4324
|
-
}
|
|
4325
|
-
|
|
4326
|
-
},
|
|
4327
|
-
};
|
|
4373
|
+
} });
|
|
4374
|
+
} });
|
|
4328
4375
|
},
|
|
4329
4376
|
};
|
|
4330
4377
|
}
|
|
@@ -4343,15 +4390,7 @@ function allSettled(possiblePromises) {
|
|
|
4343
4390
|
let counter$1 = 0;
|
|
4344
4391
|
function guardedTable(table) {
|
|
4345
4392
|
const prop = "$lock" + (++counter$1);
|
|
4346
|
-
return {
|
|
4347
|
-
...table,
|
|
4348
|
-
count: readLock(table.count, prop),
|
|
4349
|
-
get: readLock(table.get, prop),
|
|
4350
|
-
getMany: readLock(table.getMany, prop),
|
|
4351
|
-
openCursor: readLock(table.openCursor, prop),
|
|
4352
|
-
query: readLock(table.query, prop),
|
|
4353
|
-
mutate: writeLock(table.mutate, prop),
|
|
4354
|
-
};
|
|
4393
|
+
return Object.assign(Object.assign({}, table), { count: readLock(table.count, prop), get: readLock(table.get, prop), getMany: readLock(table.getMany, prop), openCursor: readLock(table.openCursor, prop), query: readLock(table.query, prop), mutate: writeLock(table.mutate, prop) });
|
|
4355
4394
|
}
|
|
4356
4395
|
function readLock(fn, prop) {
|
|
4357
4396
|
return function readLocker(req) {
|
|
@@ -4401,16 +4440,14 @@ function createMutationTrackingMiddleware({ currentUserObservable, db }) {
|
|
|
4401
4440
|
core.table(`$${tbl.name}_mutations`)
|
|
4402
4441
|
]));
|
|
4403
4442
|
}
|
|
4404
|
-
catch {
|
|
4443
|
+
catch (_a) {
|
|
4405
4444
|
throwVersionIncrementNeeded();
|
|
4406
4445
|
}
|
|
4407
|
-
return {
|
|
4408
|
-
...core,
|
|
4409
|
-
transaction: (tables, mode) => {
|
|
4446
|
+
return Object.assign(Object.assign({}, core), { transaction: (tables, mode) => {
|
|
4410
4447
|
let tx;
|
|
4411
4448
|
if (mode === 'readwrite') {
|
|
4412
4449
|
const mutationTables = tables
|
|
4413
|
-
.filter((tbl) => db.cloud.schema
|
|
4450
|
+
.filter((tbl) => { var _a, _b; return (_b = (_a = db.cloud.schema) === null || _a === void 0 ? void 0 : _a[tbl]) === null || _b === void 0 ? void 0 : _b.markedForSync; })
|
|
4414
4451
|
.map((tbl) => getMutationTable(tbl));
|
|
4415
4452
|
tx = core.transaction([...tables, ...mutationTables], mode);
|
|
4416
4453
|
}
|
|
@@ -4433,7 +4470,8 @@ function createMutationTrackingMiddleware({ currentUserObservable, db }) {
|
|
|
4433
4470
|
outstandingTransactions.next(outstandingTransactions.value);
|
|
4434
4471
|
};
|
|
4435
4472
|
const txComplete = () => {
|
|
4436
|
-
|
|
4473
|
+
var _a;
|
|
4474
|
+
if (tx.mutationsAdded && ((_a = db.cloud.options) === null || _a === void 0 ? void 0 : _a.databaseUrl)) {
|
|
4437
4475
|
if (db.cloud.usingServiceWorker) {
|
|
4438
4476
|
console.debug('registering sync event');
|
|
4439
4477
|
registerSyncEvent(db, "push");
|
|
@@ -4449,8 +4487,7 @@ function createMutationTrackingMiddleware({ currentUserObservable, db }) {
|
|
|
4449
4487
|
tx.addEventListener('abort', removeTransaction);
|
|
4450
4488
|
}
|
|
4451
4489
|
return tx;
|
|
4452
|
-
},
|
|
4453
|
-
table: (tableName) => {
|
|
4490
|
+
}, table: (tableName) => {
|
|
4454
4491
|
const table = core.table(tableName);
|
|
4455
4492
|
if (/^\$/.test(tableName)) {
|
|
4456
4493
|
if (tableName.endsWith('_mutations')) {
|
|
@@ -4458,20 +4495,15 @@ function createMutationTrackingMiddleware({ currentUserObservable, db }) {
|
|
|
4458
4495
|
// make sure to set the mutationsAdded flag on transaction.
|
|
4459
4496
|
// This is also done in mutateAndLog() as that function talks to a
|
|
4460
4497
|
// lower level DBCore and wouldn't be catched by this code.
|
|
4461
|
-
return {
|
|
4462
|
-
...table,
|
|
4463
|
-
mutate: (req) => {
|
|
4498
|
+
return Object.assign(Object.assign({}, table), { mutate: (req) => {
|
|
4464
4499
|
if (req.type === 'add' || req.type === 'put') {
|
|
4465
4500
|
req.trans.mutationsAdded = true;
|
|
4466
4501
|
}
|
|
4467
4502
|
return table.mutate(req);
|
|
4468
|
-
}
|
|
4469
|
-
};
|
|
4503
|
+
} });
|
|
4470
4504
|
}
|
|
4471
4505
|
else if (tableName === '$logins') {
|
|
4472
|
-
return {
|
|
4473
|
-
...table,
|
|
4474
|
-
mutate: (req) => {
|
|
4506
|
+
return Object.assign(Object.assign({}, table), { mutate: (req) => {
|
|
4475
4507
|
//console.debug('Mutating $logins table', req);
|
|
4476
4508
|
return table
|
|
4477
4509
|
.mutate(req)
|
|
@@ -4485,8 +4517,7 @@ function createMutationTrackingMiddleware({ currentUserObservable, db }) {
|
|
|
4485
4517
|
console.debug('Failed mutation $logins', err);
|
|
4486
4518
|
return Promise.reject(err);
|
|
4487
4519
|
});
|
|
4488
|
-
}
|
|
4489
|
-
};
|
|
4520
|
+
} });
|
|
4490
4521
|
}
|
|
4491
4522
|
else {
|
|
4492
4523
|
return table;
|
|
@@ -4494,17 +4525,16 @@ function createMutationTrackingMiddleware({ currentUserObservable, db }) {
|
|
|
4494
4525
|
}
|
|
4495
4526
|
const { schema } = table;
|
|
4496
4527
|
const mutsTable = mutTableMap.get(tableName);
|
|
4497
|
-
return guardedTable({
|
|
4498
|
-
|
|
4499
|
-
mutate: (req) => {
|
|
4528
|
+
return guardedTable(Object.assign(Object.assign({}, table), { mutate: (req) => {
|
|
4529
|
+
var _a, _b, _c;
|
|
4500
4530
|
const trans = req.trans;
|
|
4501
4531
|
if (!trans.txid)
|
|
4502
4532
|
return table.mutate(req); // Upgrade transactions not guarded by us.
|
|
4503
4533
|
if (trans.disableChangeTracking)
|
|
4504
4534
|
return table.mutate(req);
|
|
4505
|
-
if (!db.cloud.schema
|
|
4535
|
+
if (!((_b = (_a = db.cloud.schema) === null || _a === void 0 ? void 0 : _a[tableName]) === null || _b === void 0 ? void 0 : _b.markedForSync))
|
|
4506
4536
|
return table.mutate(req);
|
|
4507
|
-
if (!trans.currentUser
|
|
4537
|
+
if (!((_c = trans.currentUser) === null || _c === void 0 ? void 0 : _c.isLoggedIn)) {
|
|
4508
4538
|
// Unauthorized user should not log mutations.
|
|
4509
4539
|
// Instead, after login all local data should be logged at once.
|
|
4510
4540
|
return table.mutate(req);
|
|
@@ -4527,8 +4557,7 @@ function createMutationTrackingMiddleware({ currentUserObservable, db }) {
|
|
|
4527
4557
|
});
|
|
4528
4558
|
})
|
|
4529
4559
|
: mutateAndLog(req);
|
|
4530
|
-
}
|
|
4531
|
-
});
|
|
4560
|
+
} }));
|
|
4532
4561
|
function mutateAndLog(req) {
|
|
4533
4562
|
const trans = req.trans;
|
|
4534
4563
|
trans.mutationsAdded = true;
|
|
@@ -4599,18 +4628,14 @@ function createMutationTrackingMiddleware({ currentUserObservable, db }) {
|
|
|
4599
4628
|
: res;
|
|
4600
4629
|
});
|
|
4601
4630
|
}
|
|
4602
|
-
}
|
|
4603
|
-
};
|
|
4631
|
+
} });
|
|
4604
4632
|
}
|
|
4605
4633
|
};
|
|
4606
4634
|
}
|
|
4607
4635
|
|
|
4608
4636
|
function overrideParseStoresSpec(origFunc, dexie) {
|
|
4609
4637
|
return function (stores, dbSchema) {
|
|
4610
|
-
const storesClone = {
|
|
4611
|
-
...DEXIE_CLOUD_SCHEMA,
|
|
4612
|
-
...stores,
|
|
4613
|
-
};
|
|
4638
|
+
const storesClone = Object.assign(Object.assign({}, DEXIE_CLOUD_SCHEMA), stores);
|
|
4614
4639
|
const cloudSchema = dexie.cloud.schema || (dexie.cloud.schema = {});
|
|
4615
4640
|
const allPrefixes = new Set();
|
|
4616
4641
|
Object.keys(storesClone).forEach(tableName => {
|
|
@@ -4642,10 +4667,12 @@ function overrideParseStoresSpec(origFunc, dexie) {
|
|
|
4642
4667
|
};
|
|
4643
4668
|
}
|
|
4644
4669
|
|
|
4645
|
-
|
|
4646
|
-
|
|
4647
|
-
|
|
4648
|
-
|
|
4670
|
+
function performInitialSync(db, cloudOptions, cloudSchema) {
|
|
4671
|
+
return __awaiter$1(this, void 0, void 0, function* () {
|
|
4672
|
+
console.debug('Performing initial sync');
|
|
4673
|
+
yield sync(db, cloudOptions, cloudSchema, { isInitialSync: true });
|
|
4674
|
+
console.debug('Done initial sync');
|
|
4675
|
+
});
|
|
4649
4676
|
}
|
|
4650
4677
|
|
|
4651
4678
|
const USER_INACTIVITY_TIMEOUT = 180000; // 3 minutes
|
|
@@ -4763,7 +4790,7 @@ class WSConnection extends Subscription$1 {
|
|
|
4763
4790
|
try {
|
|
4764
4791
|
this.ws.close();
|
|
4765
4792
|
}
|
|
4766
|
-
catch { }
|
|
4793
|
+
catch (_a) { }
|
|
4767
4794
|
}
|
|
4768
4795
|
this.ws = null;
|
|
4769
4796
|
if (this.messageProducerSubscription) {
|
|
@@ -4778,168 +4805,174 @@ class WSConnection extends Subscription$1 {
|
|
|
4778
4805
|
try {
|
|
4779
4806
|
this.disconnect();
|
|
4780
4807
|
}
|
|
4781
|
-
catch { }
|
|
4808
|
+
catch (_a) { }
|
|
4782
4809
|
this.connect()
|
|
4783
4810
|
.catch(() => { })
|
|
4784
4811
|
.then(() => (this.reconnecting = false)); // finally()
|
|
4785
4812
|
}
|
|
4786
|
-
|
|
4787
|
-
this
|
|
4788
|
-
|
|
4789
|
-
|
|
4790
|
-
|
|
4791
|
-
|
|
4792
|
-
|
|
4793
|
-
|
|
4794
|
-
}
|
|
4795
|
-
if (this.ws) {
|
|
4796
|
-
throw new Error(`Called connect() when a connection is already open`);
|
|
4797
|
-
}
|
|
4798
|
-
if (!this.databaseUrl)
|
|
4799
|
-
throw new Error(`Cannot connect without a database URL`);
|
|
4800
|
-
if (this.closed) {
|
|
4801
|
-
//console.debug('SyncStatus: DUBB: Ooops it was closed!');
|
|
4802
|
-
return;
|
|
4803
|
-
}
|
|
4804
|
-
if (this.tokenExpiration && this.tokenExpiration < new Date()) {
|
|
4805
|
-
this.subscriber.error(new TokenExpiredError()); // Will be handled in connectWebSocket.ts.
|
|
4806
|
-
return;
|
|
4807
|
-
}
|
|
4808
|
-
this.webSocketStatus.next('connecting');
|
|
4809
|
-
this.pinger = setInterval(async () => {
|
|
4810
|
-
if (this.closed) {
|
|
4811
|
-
console.debug('pinger check', this.id, 'CLOSED.');
|
|
4812
|
-
this.teardown();
|
|
4813
|
+
connect() {
|
|
4814
|
+
return __awaiter$1(this, void 0, void 0, function* () {
|
|
4815
|
+
this.lastServerActivity = new Date();
|
|
4816
|
+
if (this.pauseUntil && this.pauseUntil > new Date()) {
|
|
4817
|
+
console.debug('WS not reconnecting just yet', {
|
|
4818
|
+
id: this.id,
|
|
4819
|
+
pauseUntil: this.pauseUntil,
|
|
4820
|
+
});
|
|
4813
4821
|
return;
|
|
4814
4822
|
}
|
|
4815
4823
|
if (this.ws) {
|
|
4816
|
-
|
|
4817
|
-
this.ws.send(JSON.stringify({ type: 'ping' }));
|
|
4818
|
-
setTimeout(() => {
|
|
4819
|
-
console.debug('pinger setTimeout', this.id, this.pinger ? `alive` : 'dead');
|
|
4820
|
-
if (!this.pinger)
|
|
4821
|
-
return;
|
|
4822
|
-
if (this.closed) {
|
|
4823
|
-
console.debug('pinger setTimeout', this.id, 'subscription is closed');
|
|
4824
|
-
this.teardown();
|
|
4825
|
-
return;
|
|
4826
|
-
}
|
|
4827
|
-
if (this.lastServerActivity <
|
|
4828
|
-
new Date(Date.now() - SERVER_PING_TIMEOUT)) {
|
|
4829
|
-
// Server inactive. Reconnect if user is active.
|
|
4830
|
-
console.debug('pinger: server is inactive');
|
|
4831
|
-
console.debug('pinger reconnecting');
|
|
4832
|
-
this.reconnect();
|
|
4833
|
-
}
|
|
4834
|
-
else {
|
|
4835
|
-
console.debug('pinger: server still active');
|
|
4836
|
-
}
|
|
4837
|
-
}, SERVER_PING_TIMEOUT);
|
|
4838
|
-
}
|
|
4839
|
-
catch {
|
|
4840
|
-
console.debug('pinger catch error', this.id, 'reconnecting');
|
|
4841
|
-
this.reconnect();
|
|
4842
|
-
}
|
|
4824
|
+
throw new Error(`Called connect() when a connection is already open`);
|
|
4843
4825
|
}
|
|
4844
|
-
|
|
4845
|
-
|
|
4846
|
-
|
|
4847
|
-
|
|
4848
|
-
}, CLIENT_PING_INTERVAL);
|
|
4849
|
-
// The following vars are needed because we must know which callback to ack when server sends it's ack to us.
|
|
4850
|
-
const wsUrl = new URL(this.databaseUrl);
|
|
4851
|
-
wsUrl.protocol = wsUrl.protocol === 'http:' ? 'ws' : 'wss';
|
|
4852
|
-
const searchParams = new URLSearchParams();
|
|
4853
|
-
if (this.subscriber.closed)
|
|
4854
|
-
return;
|
|
4855
|
-
searchParams.set('v', '2');
|
|
4856
|
-
searchParams.set('rev', this.rev);
|
|
4857
|
-
searchParams.set('realmsHash', this.realmSetHash);
|
|
4858
|
-
searchParams.set('clientId', this.clientIdentity);
|
|
4859
|
-
if (this.token) {
|
|
4860
|
-
searchParams.set('token', this.token);
|
|
4861
|
-
}
|
|
4862
|
-
// Connect the WebSocket to given url:
|
|
4863
|
-
console.debug('dexie-cloud WebSocket create');
|
|
4864
|
-
const ws = (this.ws = new WebSocket(`${wsUrl}/changes?${searchParams}`));
|
|
4865
|
-
//ws.binaryType = "arraybuffer"; // For future when subscribing to actual changes.
|
|
4866
|
-
ws.onclose = (event) => {
|
|
4867
|
-
if (!this.pinger)
|
|
4826
|
+
if (!this.databaseUrl)
|
|
4827
|
+
throw new Error(`Cannot connect without a database URL`);
|
|
4828
|
+
if (this.closed) {
|
|
4829
|
+
//console.debug('SyncStatus: DUBB: Ooops it was closed!');
|
|
4868
4830
|
return;
|
|
4869
|
-
|
|
4870
|
-
this.
|
|
4871
|
-
|
|
4872
|
-
ws.onmessage = (event) => {
|
|
4873
|
-
if (!this.pinger)
|
|
4831
|
+
}
|
|
4832
|
+
if (this.tokenExpiration && this.tokenExpiration < new Date()) {
|
|
4833
|
+
this.subscriber.error(new TokenExpiredError()); // Will be handled in connectWebSocket.ts.
|
|
4874
4834
|
return;
|
|
4875
|
-
|
|
4876
|
-
this.
|
|
4877
|
-
|
|
4878
|
-
|
|
4879
|
-
|
|
4880
|
-
|
|
4835
|
+
}
|
|
4836
|
+
this.webSocketStatus.next('connecting');
|
|
4837
|
+
this.pinger = setInterval(() => __awaiter$1(this, void 0, void 0, function* () {
|
|
4838
|
+
if (this.closed) {
|
|
4839
|
+
console.debug('pinger check', this.id, 'CLOSED.');
|
|
4840
|
+
this.teardown();
|
|
4841
|
+
return;
|
|
4881
4842
|
}
|
|
4882
|
-
if (
|
|
4883
|
-
|
|
4843
|
+
if (this.ws) {
|
|
4844
|
+
try {
|
|
4845
|
+
this.ws.send(JSON.stringify({ type: 'ping' }));
|
|
4846
|
+
setTimeout(() => {
|
|
4847
|
+
console.debug('pinger setTimeout', this.id, this.pinger ? `alive` : 'dead');
|
|
4848
|
+
if (!this.pinger)
|
|
4849
|
+
return;
|
|
4850
|
+
if (this.closed) {
|
|
4851
|
+
console.debug('pinger setTimeout', this.id, 'subscription is closed');
|
|
4852
|
+
this.teardown();
|
|
4853
|
+
return;
|
|
4854
|
+
}
|
|
4855
|
+
if (this.lastServerActivity <
|
|
4856
|
+
new Date(Date.now() - SERVER_PING_TIMEOUT)) {
|
|
4857
|
+
// Server inactive. Reconnect if user is active.
|
|
4858
|
+
console.debug('pinger: server is inactive');
|
|
4859
|
+
console.debug('pinger reconnecting');
|
|
4860
|
+
this.reconnect();
|
|
4861
|
+
}
|
|
4862
|
+
else {
|
|
4863
|
+
console.debug('pinger: server still active');
|
|
4864
|
+
}
|
|
4865
|
+
}, SERVER_PING_TIMEOUT);
|
|
4866
|
+
}
|
|
4867
|
+
catch (_a) {
|
|
4868
|
+
console.debug('pinger catch error', this.id, 'reconnecting');
|
|
4869
|
+
this.reconnect();
|
|
4870
|
+
}
|
|
4884
4871
|
}
|
|
4885
|
-
|
|
4886
|
-
this.
|
|
4872
|
+
else {
|
|
4873
|
+
console.debug('pinger', this.id, 'reconnecting');
|
|
4874
|
+
this.reconnect();
|
|
4887
4875
|
}
|
|
4876
|
+
}), CLIENT_PING_INTERVAL);
|
|
4877
|
+
// The following vars are needed because we must know which callback to ack when server sends it's ack to us.
|
|
4878
|
+
const wsUrl = new URL(this.databaseUrl);
|
|
4879
|
+
wsUrl.protocol = wsUrl.protocol === 'http:' ? 'ws' : 'wss';
|
|
4880
|
+
const searchParams = new URLSearchParams();
|
|
4881
|
+
if (this.subscriber.closed)
|
|
4882
|
+
return;
|
|
4883
|
+
searchParams.set('v', '2');
|
|
4884
|
+
searchParams.set('rev', this.rev);
|
|
4885
|
+
searchParams.set('realmsHash', this.realmSetHash);
|
|
4886
|
+
searchParams.set('clientId', this.clientIdentity);
|
|
4887
|
+
if (this.token) {
|
|
4888
|
+
searchParams.set('token', this.token);
|
|
4888
4889
|
}
|
|
4889
|
-
|
|
4890
|
-
|
|
4891
|
-
}
|
|
4892
|
-
|
|
4893
|
-
|
|
4894
|
-
|
|
4895
|
-
|
|
4896
|
-
|
|
4897
|
-
|
|
4898
|
-
|
|
4899
|
-
|
|
4900
|
-
|
|
4901
|
-
|
|
4902
|
-
|
|
4903
|
-
|
|
4904
|
-
|
|
4905
|
-
|
|
4906
|
-
|
|
4890
|
+
// Connect the WebSocket to given url:
|
|
4891
|
+
console.debug('dexie-cloud WebSocket create');
|
|
4892
|
+
const ws = (this.ws = new WebSocket(`${wsUrl}/changes?${searchParams}`));
|
|
4893
|
+
//ws.binaryType = "arraybuffer"; // For future when subscribing to actual changes.
|
|
4894
|
+
ws.onclose = (event) => {
|
|
4895
|
+
if (!this.pinger)
|
|
4896
|
+
return;
|
|
4897
|
+
console.debug('dexie-cloud WebSocket onclosed', this.id);
|
|
4898
|
+
this.reconnect();
|
|
4899
|
+
};
|
|
4900
|
+
ws.onmessage = (event) => {
|
|
4901
|
+
if (!this.pinger)
|
|
4902
|
+
return;
|
|
4903
|
+
console.debug('dexie-cloud WebSocket onmessage', event.data);
|
|
4904
|
+
this.lastServerActivity = new Date();
|
|
4905
|
+
try {
|
|
4906
|
+
const msg = TSON.parse(event.data);
|
|
4907
|
+
if (msg.type === 'error') {
|
|
4908
|
+
throw new Error(`Error message from dexie-cloud: ${msg.error}`);
|
|
4907
4909
|
}
|
|
4908
|
-
|
|
4909
|
-
this.
|
|
4910
|
+
if (msg.type === 'rev') {
|
|
4911
|
+
this.rev = msg.rev; // No meaning but seems reasonable.
|
|
4910
4912
|
}
|
|
4911
|
-
|
|
4912
|
-
|
|
4913
|
-
this.messageProducerSubscription = this.messageProducer.subscribe((msg) => {
|
|
4914
|
-
if (!this.closed) {
|
|
4915
|
-
if (msg.type === 'ready' &&
|
|
4916
|
-
this.webSocketStatus.value !== 'connected') {
|
|
4917
|
-
this.webSocketStatus.next('connected');
|
|
4913
|
+
if (msg.type !== 'pong') {
|
|
4914
|
+
this.subscriber.next(msg);
|
|
4918
4915
|
}
|
|
4919
|
-
this.ws?.send(TSON.stringify(msg));
|
|
4920
4916
|
}
|
|
4921
|
-
|
|
4922
|
-
|
|
4923
|
-
|
|
4924
|
-
|
|
4925
|
-
|
|
4917
|
+
catch (e) {
|
|
4918
|
+
this.subscriber.error(e);
|
|
4919
|
+
}
|
|
4920
|
+
};
|
|
4921
|
+
try {
|
|
4922
|
+
let everConnected = false;
|
|
4923
|
+
yield new Promise((resolve, reject) => {
|
|
4924
|
+
ws.onopen = (event) => {
|
|
4925
|
+
console.debug('dexie-cloud WebSocket onopen');
|
|
4926
|
+
everConnected = true;
|
|
4927
|
+
resolve(null);
|
|
4928
|
+
};
|
|
4929
|
+
ws.onerror = (event) => {
|
|
4930
|
+
if (!everConnected) {
|
|
4931
|
+
const error = event.error || new Error('WebSocket Error');
|
|
4932
|
+
this.subscriber.error(error);
|
|
4933
|
+
this.webSocketStatus.next('error');
|
|
4934
|
+
reject(error);
|
|
4935
|
+
}
|
|
4936
|
+
else {
|
|
4937
|
+
this.reconnect();
|
|
4938
|
+
}
|
|
4939
|
+
};
|
|
4940
|
+
});
|
|
4941
|
+
this.messageProducerSubscription = this.messageProducer.subscribe((msg) => {
|
|
4942
|
+
var _a;
|
|
4943
|
+
if (!this.closed) {
|
|
4944
|
+
if (msg.type === 'ready' &&
|
|
4945
|
+
this.webSocketStatus.value !== 'connected') {
|
|
4946
|
+
this.webSocketStatus.next('connected');
|
|
4947
|
+
}
|
|
4948
|
+
(_a = this.ws) === null || _a === void 0 ? void 0 : _a.send(TSON.stringify(msg));
|
|
4949
|
+
}
|
|
4950
|
+
});
|
|
4951
|
+
}
|
|
4952
|
+
catch (error) {
|
|
4953
|
+
this.pauseUntil = new Date(Date.now() + FAIL_RETRY_WAIT_TIME);
|
|
4954
|
+
}
|
|
4955
|
+
});
|
|
4926
4956
|
}
|
|
4927
4957
|
}
|
|
4928
4958
|
|
|
4929
4959
|
function sleep$1(ms) {
|
|
4930
4960
|
return new Promise((resolve) => setTimeout(resolve, ms));
|
|
4931
4961
|
}
|
|
4932
|
-
|
|
4933
|
-
|
|
4934
|
-
|
|
4935
|
-
|
|
4936
|
-
|
|
4937
|
-
|
|
4938
|
-
|
|
4939
|
-
|
|
4962
|
+
function waitAndReconnectWhenUserDoesSomething(error) {
|
|
4963
|
+
return __awaiter$1(this, void 0, void 0, function* () {
|
|
4964
|
+
console.error(`WebSocket observable: error but revive when user does some active thing...`, error);
|
|
4965
|
+
// Sleep some seconds...
|
|
4966
|
+
yield sleep$1(3000);
|
|
4967
|
+
// Wait til user does something (move mouse, tap, scroll, click etc)
|
|
4968
|
+
console.debug('waiting for someone to do something');
|
|
4969
|
+
yield userDoesSomething.pipe(take(1)).toPromise();
|
|
4970
|
+
console.debug('someone did something!');
|
|
4971
|
+
});
|
|
4940
4972
|
}
|
|
4941
4973
|
function connectWebSocket(db) {
|
|
4942
|
-
|
|
4974
|
+
var _a;
|
|
4975
|
+
if (!((_a = db.cloud.options) === null || _a === void 0 ? void 0 : _a.databaseUrl)) {
|
|
4943
4976
|
throw new Error(`No database URL to connect WebSocket to`);
|
|
4944
4977
|
}
|
|
4945
4978
|
const messageProducer = db.messageConsumer.readyToServe.pipe(filter((isReady) => isReady), // When consumer is ready for new messages, produce such a message to inform server about it
|
|
@@ -4951,27 +4984,27 @@ function connectWebSocket(db) {
|
|
|
4951
4984
|
rev: syncState.serverRevision,
|
|
4952
4985
|
})));
|
|
4953
4986
|
function createObservable() {
|
|
4954
|
-
return db.cloud.persistedSyncState.pipe(filter((syncState) => syncState
|
|
4987
|
+
return db.cloud.persistedSyncState.pipe(filter((syncState) => syncState === null || syncState === void 0 ? void 0 : syncState.serverRevision), // Don't connect before there's no initial sync performed.
|
|
4955
4988
|
take(1), // Don't continue waking up whenever syncState change
|
|
4956
|
-
switchMap((syncState) => db.cloud.currentUser.pipe(map((userLogin) => [userLogin, syncState]))), switchMap(([userLogin, syncState]) => userIsReallyActive.pipe(map((isActive) => [isActive ? userLogin : null, syncState]))), switchMap(
|
|
4989
|
+
switchMap((syncState) => db.cloud.currentUser.pipe(map((userLogin) => [userLogin, syncState]))), switchMap(([userLogin, syncState]) => userIsReallyActive.pipe(map((isActive) => [isActive ? userLogin : null, syncState]))), switchMap(([userLogin, syncState]) => __awaiter$1(this, void 0, void 0, function* () { return [userLogin, yield computeRealmSetHash(syncState)]; })), switchMap(([userLogin, realmSetHash]) =>
|
|
4957
4990
|
// Let server end query changes from last entry of same client-ID and forward.
|
|
4958
4991
|
// If no new entries, server won't bother the client. If new entries, server sends only those
|
|
4959
4992
|
// and the baseRev of the last from same client-ID.
|
|
4960
4993
|
userLogin
|
|
4961
4994
|
? new WSObservable(db.cloud.options.databaseUrl, db.cloud.persistedSyncState.value.serverRevision, realmSetHash, db.cloud.persistedSyncState.value.clientIdentity, messageProducer, db.cloud.webSocketStatus, userLogin.accessToken, userLogin.accessTokenExpiration)
|
|
4962
4995
|
: from$1([])), catchError((error) => {
|
|
4963
|
-
if (error
|
|
4996
|
+
if ((error === null || error === void 0 ? void 0 : error.name) === 'TokenExpiredError') {
|
|
4964
4997
|
console.debug('WebSocket observable: Token expired. Refreshing token...');
|
|
4965
|
-
return of(true).pipe(switchMap(
|
|
4998
|
+
return of(true).pipe(switchMap(() => __awaiter$1(this, void 0, void 0, function* () {
|
|
4966
4999
|
// Refresh access token
|
|
4967
|
-
const user =
|
|
4968
|
-
const refreshedLogin =
|
|
5000
|
+
const user = yield db.getCurrentUser();
|
|
5001
|
+
const refreshedLogin = yield refreshAccessToken(db.cloud.options.databaseUrl, user);
|
|
4969
5002
|
// Persist updated access token
|
|
4970
|
-
|
|
5003
|
+
yield db.table('$logins').update(user.userId, {
|
|
4971
5004
|
accessToken: refreshedLogin.accessToken,
|
|
4972
5005
|
accessTokenExpiration: refreshedLogin.accessTokenExpiration,
|
|
4973
5006
|
});
|
|
4974
|
-
}), switchMap(() => createObservable()));
|
|
5007
|
+
})), switchMap(() => createObservable()));
|
|
4975
5008
|
}
|
|
4976
5009
|
else {
|
|
4977
5010
|
return throwError(error);
|
|
@@ -4993,10 +5026,13 @@ function connectWebSocket(db) {
|
|
|
4993
5026
|
});
|
|
4994
5027
|
}
|
|
4995
5028
|
|
|
4996
|
-
|
|
4997
|
-
|
|
4998
|
-
|
|
4999
|
-
:
|
|
5029
|
+
function isSyncNeeded(db) {
|
|
5030
|
+
var _a;
|
|
5031
|
+
return __awaiter$1(this, void 0, void 0, function* () {
|
|
5032
|
+
return ((_a = db.cloud.options) === null || _a === void 0 ? void 0 : _a.databaseUrl) && db.cloud.schema
|
|
5033
|
+
? yield sync(db, db.cloud.options, db.cloud.schema, { justCheckIfNeeded: true })
|
|
5034
|
+
: false;
|
|
5035
|
+
});
|
|
5000
5036
|
}
|
|
5001
5037
|
|
|
5002
5038
|
const SECONDS = 1000;
|
|
@@ -5006,93 +5042,97 @@ const myId = randomString$1(16);
|
|
|
5006
5042
|
|
|
5007
5043
|
const GUARDED_JOB_HEARTBEAT = 1 * SECONDS;
|
|
5008
5044
|
const GUARDED_JOB_TIMEOUT = 1 * MINUTES;
|
|
5009
|
-
|
|
5010
|
-
|
|
5011
|
-
|
|
5012
|
-
|
|
5013
|
-
|
|
5014
|
-
|
|
5015
|
-
|
|
5016
|
-
|
|
5017
|
-
|
|
5018
|
-
|
|
5019
|
-
|
|
5020
|
-
|
|
5021
|
-
|
|
5022
|
-
|
|
5023
|
-
|
|
5024
|
-
|
|
5025
|
-
|
|
5026
|
-
|
|
5027
|
-
|
|
5028
|
-
|
|
5029
|
-
|
|
5030
|
-
|
|
5031
|
-
|
|
5032
|
-
|
|
5033
|
-
|
|
5034
|
-
|
|
5035
|
-
|
|
5036
|
-
|
|
5037
|
-
|
|
5038
|
-
|
|
5039
|
-
|
|
5040
|
-
|
|
5041
|
-
|
|
5042
|
-
|
|
5043
|
-
|
|
5044
|
-
|
|
5045
|
-
|
|
5046
|
-
|
|
5047
|
-
|
|
5048
|
-
|
|
5049
|
-
|
|
5050
|
-
|
|
5051
|
-
|
|
5052
|
-
|
|
5053
|
-
|
|
5054
|
-
|
|
5055
|
-
|
|
5056
|
-
|
|
5057
|
-
|
|
5058
|
-
|
|
5059
|
-
|
|
5060
|
-
}
|
|
5061
|
-
if (await aquireLock()) {
|
|
5062
|
-
// We own the lock entry and can do our job undisturbed.
|
|
5063
|
-
// We're not within a transaction, but these type of locks
|
|
5064
|
-
// spans over transactions.
|
|
5065
|
-
// Start our heart beat during the job.
|
|
5066
|
-
// Use setInterval to make sure we are updating heartbeat even during long-lived fetch calls.
|
|
5067
|
-
const heartbeat = setInterval(() => {
|
|
5068
|
-
jobsTable.update(jobName, (job) => {
|
|
5069
|
-
if (job.nodeId === myId) {
|
|
5070
|
-
job.heartbeat = new Date();
|
|
5045
|
+
function performGuardedJob(db, jobName, jobsTableName, job, { awaitRemoteJob } = {}) {
|
|
5046
|
+
return __awaiter$1(this, void 0, void 0, function* () {
|
|
5047
|
+
// Start working.
|
|
5048
|
+
//
|
|
5049
|
+
// Check if someone else is working on this already.
|
|
5050
|
+
//
|
|
5051
|
+
const jobsTable = db.table(jobsTableName);
|
|
5052
|
+
function aquireLock() {
|
|
5053
|
+
return __awaiter$1(this, void 0, void 0, function* () {
|
|
5054
|
+
const gotTheLock = yield db.transaction('rw!', jobsTableName, () => __awaiter$1(this, void 0, void 0, function* () {
|
|
5055
|
+
const currentWork = yield jobsTable.get(jobName);
|
|
5056
|
+
if (!currentWork) {
|
|
5057
|
+
// No one else is working. Let's record that we are.
|
|
5058
|
+
yield jobsTable.add({
|
|
5059
|
+
nodeId: myId,
|
|
5060
|
+
started: new Date(),
|
|
5061
|
+
heartbeat: new Date()
|
|
5062
|
+
}, jobName);
|
|
5063
|
+
return true;
|
|
5064
|
+
}
|
|
5065
|
+
else if (currentWork.heartbeat.getTime() <
|
|
5066
|
+
Date.now() - GUARDED_JOB_TIMEOUT) {
|
|
5067
|
+
console.warn(`Latest ${jobName} worker seem to have died.\n`, `The dead job started:`, currentWork.started, `\n`, `Last heart beat was:`, currentWork.heartbeat, '\n', `We're now taking over!`);
|
|
5068
|
+
// Now, take over!
|
|
5069
|
+
yield jobsTable.put({
|
|
5070
|
+
nodeId: myId,
|
|
5071
|
+
started: new Date(),
|
|
5072
|
+
heartbeat: new Date()
|
|
5073
|
+
}, jobName);
|
|
5074
|
+
return true;
|
|
5075
|
+
}
|
|
5076
|
+
return false;
|
|
5077
|
+
}));
|
|
5078
|
+
if (gotTheLock)
|
|
5079
|
+
return true;
|
|
5080
|
+
// Someone else took the job.
|
|
5081
|
+
if (awaitRemoteJob) {
|
|
5082
|
+
try {
|
|
5083
|
+
const jobDoneObservable = from$1(liveQuery(() => jobsTable.get(jobName))).pipe(timeout(GUARDED_JOB_TIMEOUT), filter((job) => !job)); // Wait til job is not there anymore.
|
|
5084
|
+
yield jobDoneObservable.toPromise();
|
|
5085
|
+
return false;
|
|
5086
|
+
}
|
|
5087
|
+
catch (err) {
|
|
5088
|
+
if (err.name !== 'TimeoutError') {
|
|
5089
|
+
throw err;
|
|
5090
|
+
}
|
|
5091
|
+
// Timeout stopped us! Try aquire the lock now.
|
|
5092
|
+
// It will likely succeed this time unless
|
|
5093
|
+
// another client took it.
|
|
5094
|
+
return yield aquireLock();
|
|
5095
|
+
}
|
|
5071
5096
|
}
|
|
5097
|
+
return false;
|
|
5072
5098
|
});
|
|
5073
|
-
}, GUARDED_JOB_HEARTBEAT);
|
|
5074
|
-
try {
|
|
5075
|
-
return await job();
|
|
5076
5099
|
}
|
|
5077
|
-
|
|
5078
|
-
//
|
|
5079
|
-
|
|
5080
|
-
//
|
|
5081
|
-
|
|
5082
|
-
|
|
5083
|
-
|
|
5084
|
-
|
|
5085
|
-
|
|
5086
|
-
|
|
5100
|
+
if (yield aquireLock()) {
|
|
5101
|
+
// We own the lock entry and can do our job undisturbed.
|
|
5102
|
+
// We're not within a transaction, but these type of locks
|
|
5103
|
+
// spans over transactions.
|
|
5104
|
+
// Start our heart beat during the job.
|
|
5105
|
+
// Use setInterval to make sure we are updating heartbeat even during long-lived fetch calls.
|
|
5106
|
+
const heartbeat = setInterval(() => {
|
|
5107
|
+
jobsTable.update(jobName, (job) => {
|
|
5108
|
+
if (job.nodeId === myId) {
|
|
5109
|
+
job.heartbeat = new Date();
|
|
5110
|
+
}
|
|
5111
|
+
});
|
|
5112
|
+
}, GUARDED_JOB_HEARTBEAT);
|
|
5113
|
+
try {
|
|
5114
|
+
return yield job();
|
|
5115
|
+
}
|
|
5116
|
+
finally {
|
|
5117
|
+
// Stop heartbeat
|
|
5118
|
+
clearInterval(heartbeat);
|
|
5119
|
+
// Remove the persisted job state:
|
|
5120
|
+
yield db.transaction('rw!', jobsTableName, () => __awaiter$1(this, void 0, void 0, function* () {
|
|
5121
|
+
const currentWork = yield jobsTable.get(jobName);
|
|
5122
|
+
if (currentWork && currentWork.nodeId === myId) {
|
|
5123
|
+
yield jobsTable.delete(jobName);
|
|
5124
|
+
}
|
|
5125
|
+
}));
|
|
5126
|
+
}
|
|
5087
5127
|
}
|
|
5088
|
-
}
|
|
5128
|
+
});
|
|
5089
5129
|
}
|
|
5090
5130
|
|
|
5091
5131
|
const ongoingSyncs = new WeakMap();
|
|
5092
5132
|
function syncIfPossible(db, cloudOptions, cloudSchema, options) {
|
|
5093
5133
|
const ongoing = ongoingSyncs.get(db);
|
|
5094
5134
|
if (ongoing) {
|
|
5095
|
-
if (ongoing.pull || options
|
|
5135
|
+
if (ongoing.pull || (options === null || options === void 0 ? void 0 : options.purpose) === 'push') {
|
|
5096
5136
|
console.debug('syncIfPossible(): returning the ongoing sync promise.');
|
|
5097
5137
|
return ongoing.promise;
|
|
5098
5138
|
}
|
|
@@ -5134,32 +5174,34 @@ function syncIfPossible(db, cloudOptions, cloudSchema, options) {
|
|
|
5134
5174
|
}
|
|
5135
5175
|
}
|
|
5136
5176
|
const promise = _syncIfPossible();
|
|
5137
|
-
ongoingSyncs.set(db, { promise, pull: options
|
|
5177
|
+
ongoingSyncs.set(db, { promise, pull: (options === null || options === void 0 ? void 0 : options.purpose) !== 'push' });
|
|
5138
5178
|
return promise;
|
|
5139
|
-
|
|
5140
|
-
|
|
5141
|
-
|
|
5142
|
-
if (
|
|
5143
|
-
|
|
5179
|
+
function _syncIfPossible() {
|
|
5180
|
+
return __awaiter$1(this, void 0, void 0, function* () {
|
|
5181
|
+
try {
|
|
5182
|
+
if (db.cloud.usingServiceWorker) {
|
|
5183
|
+
if (IS_SERVICE_WORKER) {
|
|
5184
|
+
yield sync(db, cloudOptions, cloudSchema, options);
|
|
5185
|
+
}
|
|
5186
|
+
}
|
|
5187
|
+
else {
|
|
5188
|
+
// We use a flow that is better suited for the case when multiple workers want to
|
|
5189
|
+
// do the same thing.
|
|
5190
|
+
yield performGuardedJob(db, CURRENT_SYNC_WORKER, '$jobs', () => sync(db, cloudOptions, cloudSchema, options));
|
|
5144
5191
|
}
|
|
5192
|
+
ongoingSyncs.delete(db);
|
|
5193
|
+
console.debug('Done sync');
|
|
5145
5194
|
}
|
|
5146
|
-
|
|
5147
|
-
|
|
5148
|
-
|
|
5149
|
-
|
|
5195
|
+
catch (error) {
|
|
5196
|
+
ongoingSyncs.delete(db);
|
|
5197
|
+
console.error(`Failed to sync client changes`, error);
|
|
5198
|
+
throw error; // Make sure we rethrow error so that sync event is retried.
|
|
5199
|
+
// I don't think we should setTimout or so here.
|
|
5200
|
+
// Unless server tells us to in some response.
|
|
5201
|
+
// Then we could follow that advice but not by waiting here but by registering
|
|
5202
|
+
// Something that triggers an event listened to in startPushWorker()
|
|
5150
5203
|
}
|
|
5151
|
-
|
|
5152
|
-
console.debug('Done sync');
|
|
5153
|
-
}
|
|
5154
|
-
catch (error) {
|
|
5155
|
-
ongoingSyncs.delete(db);
|
|
5156
|
-
console.error(`Failed to sync client changes`, error);
|
|
5157
|
-
throw error; // Make sure we rethrow error so that sync event is retried.
|
|
5158
|
-
// I don't think we should setTimout or so here.
|
|
5159
|
-
// Unless server tells us to in some response.
|
|
5160
|
-
// Then we could follow that advice but not by waiting here but by registering
|
|
5161
|
-
// Something that triggers an event listened to in startPushWorker()
|
|
5162
|
-
}
|
|
5204
|
+
});
|
|
5163
5205
|
}
|
|
5164
5206
|
}
|
|
5165
5207
|
|
|
@@ -5229,8 +5271,9 @@ function updateSchemaFromOptions(schema, options) {
|
|
|
5229
5271
|
}
|
|
5230
5272
|
|
|
5231
5273
|
function verifySchema(db) {
|
|
5274
|
+
var _a, _b;
|
|
5232
5275
|
for (const table of db.tables) {
|
|
5233
|
-
if (db.cloud.schema
|
|
5276
|
+
if ((_b = (_a = db.cloud.schema) === null || _a === void 0 ? void 0 : _a[table.name]) === null || _b === void 0 ? void 0 : _b.markedForSync) {
|
|
5234
5277
|
if (table.schema.primKey.auto) {
|
|
5235
5278
|
throw new Dexie.SchemaError(`Table ${table.name} is both autoIncremented and synced. ` +
|
|
5236
5279
|
`Use db.cloud.configure({unsyncedTables: [${JSON.stringify(table.name)}]}) to blacklist it from sync`);
|
|
@@ -5323,7 +5366,7 @@ function resolveText({ message, messageCode, messageParams }) {
|
|
|
5323
5366
|
function LoginDialog({ title, alerts, fields, onCancel, onSubmit, }) {
|
|
5324
5367
|
const [params, setParams] = l({});
|
|
5325
5368
|
const firstFieldRef = s();
|
|
5326
|
-
h(() => firstFieldRef.current
|
|
5369
|
+
h(() => { var _a; return (_a = firstFieldRef.current) === null || _a === void 0 ? void 0 : _a.focus(); }, []);
|
|
5327
5370
|
return (a$1(Dialog, null,
|
|
5328
5371
|
a$1(y, null,
|
|
5329
5372
|
a$1("h3", { style: Styles.WindowHeader }, title),
|
|
@@ -5333,7 +5376,7 @@ function LoginDialog({ title, alerts, fields, onCancel, onSubmit, }) {
|
|
|
5333
5376
|
onSubmit(params);
|
|
5334
5377
|
} }, Object.entries(fields).map(([fieldName, { type, label, placeholder }], idx) => (a$1("label", { style: Styles.Label },
|
|
5335
5378
|
label ? `${label}: ` : '',
|
|
5336
|
-
a$1("input", { ref: idx === 0 ? firstFieldRef : undefined, type: type, name: fieldName, autoComplete: "on", style: Styles.Input, autoFocus: true, placeholder: placeholder, value: params[fieldName] || '', onInput: (ev) => setParams({
|
|
5379
|
+
a$1("input", { ref: idx === 0 ? firstFieldRef : undefined, type: type, name: fieldName, autoComplete: "on", style: Styles.Input, autoFocus: true, placeholder: placeholder, value: params[fieldName] || '', onInput: (ev) => { var _a; return setParams(Object.assign(Object.assign({}, params), { [fieldName]: valueTransformer(type, (_a = ev.target) === null || _a === void 0 ? void 0 : _a['value']) })); } })))))),
|
|
5337
5380
|
a$1("div", { style: Styles.ButtonsDiv },
|
|
5338
5381
|
a$1("button", { type: "submit", style: Styles.Button, onClick: () => onSubmit(params) }, "Submit"),
|
|
5339
5382
|
a$1("button", { style: Styles.Button, onClick: onCancel }, "Cancel"))));
|
|
@@ -5365,7 +5408,7 @@ class LoginGui extends p$1 {
|
|
|
5365
5408
|
if (!userInteraction)
|
|
5366
5409
|
return null;
|
|
5367
5410
|
//if (props.db.cloud.userInteraction.observers.length > 1) return null; // Someone else subscribes.
|
|
5368
|
-
return a$1(LoginDialog, {
|
|
5411
|
+
return a$1(LoginDialog, Object.assign({}, userInteraction));
|
|
5369
5412
|
}
|
|
5370
5413
|
}
|
|
5371
5414
|
function setupDefaultGUI(db) {
|
|
@@ -5551,7 +5594,7 @@ function mergePermissions(...permissions) {
|
|
|
5551
5594
|
if (permissions.length === 0)
|
|
5552
5595
|
return {};
|
|
5553
5596
|
const reduced = permissions.reduce((result, next) => {
|
|
5554
|
-
const ret = {
|
|
5597
|
+
const ret = Object.assign({}, result);
|
|
5555
5598
|
for (const [verb, rights] of Object.entries(next)) {
|
|
5556
5599
|
if (verb in ret && ret[verb]) {
|
|
5557
5600
|
if (ret[verb] === '*')
|
|
@@ -5633,14 +5676,11 @@ const getPermissionsLookupObservable = associate((db) => {
|
|
|
5633
5676
|
.map((role) => globalRoles[role])
|
|
5634
5677
|
.filter((role) => role)
|
|
5635
5678
|
.map((role) => role.permissions);
|
|
5636
|
-
return {
|
|
5637
|
-
...realm,
|
|
5638
|
-
permissions: realm.owner === userId
|
|
5679
|
+
return Object.assign(Object.assign({}, realm), { permissions: realm.owner === userId
|
|
5639
5680
|
? { manage: '*' }
|
|
5640
|
-
: mergePermissions(...directPermissionSets, ...rolePermissionSets)
|
|
5641
|
-
};
|
|
5681
|
+
: mergePermissions(...directPermissionSets, ...rolePermissionSets) });
|
|
5642
5682
|
})
|
|
5643
|
-
.reduce((p, c) => ({
|
|
5683
|
+
.reduce((p, c) => (Object.assign(Object.assign({}, p), { [c.realmId]: c })), {
|
|
5644
5684
|
[userId]: {
|
|
5645
5685
|
realmId: userId,
|
|
5646
5686
|
owner: userId,
|
|
@@ -5659,47 +5699,50 @@ class PermissionChecker {
|
|
|
5659
5699
|
this.isOwner = isOwner;
|
|
5660
5700
|
}
|
|
5661
5701
|
add(...tableNames) {
|
|
5702
|
+
var _a;
|
|
5662
5703
|
// If user can manage the whole realm, return true.
|
|
5663
5704
|
if (this.permissions.manage === '*')
|
|
5664
5705
|
return true;
|
|
5665
5706
|
// If user can manage given table in realm, return true
|
|
5666
|
-
if (this.permissions.manage
|
|
5707
|
+
if ((_a = this.permissions.manage) === null || _a === void 0 ? void 0 : _a.includes(this.tableName))
|
|
5667
5708
|
return true;
|
|
5668
5709
|
// If user can add any type, return true
|
|
5669
5710
|
if (this.permissions.add === '*')
|
|
5670
5711
|
return true;
|
|
5671
5712
|
// If user can add objects into given table names in the realm, return true
|
|
5672
|
-
if (tableNames.every((tableName) => this.permissions.add
|
|
5713
|
+
if (tableNames.every((tableName) => { var _a; return (_a = this.permissions.add) === null || _a === void 0 ? void 0 : _a.includes(tableName); })) {
|
|
5673
5714
|
return true;
|
|
5674
5715
|
}
|
|
5675
5716
|
return false;
|
|
5676
5717
|
}
|
|
5677
5718
|
update(...props) {
|
|
5719
|
+
var _a, _b;
|
|
5678
5720
|
// If user is owner of this object, or if user can manage the whole realm, return true.
|
|
5679
5721
|
if (this.isOwner || this.permissions.manage === '*')
|
|
5680
5722
|
return true;
|
|
5681
5723
|
// If user can manage given table in realm, return true
|
|
5682
|
-
if (this.permissions.manage
|
|
5724
|
+
if ((_a = this.permissions.manage) === null || _a === void 0 ? void 0 : _a.includes(this.tableName))
|
|
5683
5725
|
return true;
|
|
5684
5726
|
// If user can update any prop in any table in this realm, return true unless
|
|
5685
5727
|
// it regards to ownership change:
|
|
5686
5728
|
if (this.permissions.update === '*') {
|
|
5687
5729
|
return props.every((prop) => prop !== 'owner');
|
|
5688
5730
|
}
|
|
5689
|
-
const tablePermissions = this.permissions.update
|
|
5731
|
+
const tablePermissions = (_b = this.permissions.update) === null || _b === void 0 ? void 0 : _b[this.tableName];
|
|
5690
5732
|
// If user can update any prop in table and realm, return true unless
|
|
5691
5733
|
// accessing special props owner or realmId
|
|
5692
5734
|
if (tablePermissions === '*')
|
|
5693
5735
|
return props.every((prop) => prop !== 'owner');
|
|
5694
5736
|
// Explicitely listed properties to allow updates on:
|
|
5695
|
-
return props.every((prop) => tablePermissions
|
|
5737
|
+
return props.every((prop) => tablePermissions === null || tablePermissions === void 0 ? void 0 : tablePermissions.some((permittedProp) => permittedProp === prop || (permittedProp === '*' && prop !== 'owner')));
|
|
5696
5738
|
}
|
|
5697
5739
|
delete() {
|
|
5740
|
+
var _a;
|
|
5698
5741
|
// If user is owner of this object, or if user can manage the whole realm, return true.
|
|
5699
5742
|
if (this.isOwner || this.permissions.manage === '*')
|
|
5700
5743
|
return true;
|
|
5701
5744
|
// If user can manage given table in realm, return true
|
|
5702
|
-
if (this.permissions.manage
|
|
5745
|
+
if ((_a = this.permissions.manage) === null || _a === void 0 ? void 0 : _a.includes(this.tableName))
|
|
5703
5746
|
return true;
|
|
5704
5747
|
return false;
|
|
5705
5748
|
}
|
|
@@ -5735,7 +5778,7 @@ const getInvitesObservable = associate((db) => {
|
|
|
5735
5778
|
const permissions = getPermissionsLookupObservable(db._novip);
|
|
5736
5779
|
const accessControl = getInternalAccessControlObservable(db._novip);
|
|
5737
5780
|
return createSharedValueObservable(combineLatest([membersByEmail, accessControl, permissions]).pipe(map$1(([membersByEmail, accessControl, realmLookup]) => {
|
|
5738
|
-
const reducer = (result, m) => ({
|
|
5781
|
+
const reducer = (result, m) => (Object.assign(Object.assign({}, result), { [m.id]: Object.assign(Object.assign({}, m), { realm: realmLookup[m.realmId] }) }));
|
|
5739
5782
|
const emailMembersById = membersByEmail.reduce(reducer, {});
|
|
5740
5783
|
const membersById = accessControl.selfMembers.reduce(reducer, emailMembersById);
|
|
5741
5784
|
return Object.values(membersById).filter(m => !m.accepted);
|
|
@@ -5755,15 +5798,15 @@ function dexieCloud(dexie) {
|
|
|
5755
5798
|
let configuredProgramatically = false;
|
|
5756
5799
|
// local sync worker - used when there's no service worker.
|
|
5757
5800
|
let localSyncWorker = null;
|
|
5758
|
-
dexie.on('ready',
|
|
5801
|
+
dexie.on('ready', (dexie) => __awaiter$1(this, void 0, void 0, function* () {
|
|
5759
5802
|
try {
|
|
5760
|
-
|
|
5803
|
+
yield onDbReady(dexie);
|
|
5761
5804
|
}
|
|
5762
5805
|
catch (error) {
|
|
5763
5806
|
console.error(error);
|
|
5764
5807
|
// Make sure to succeed with database open even if network is down.
|
|
5765
5808
|
}
|
|
5766
|
-
}, true // true = sticky
|
|
5809
|
+
}), true // true = sticky
|
|
5767
5810
|
);
|
|
5768
5811
|
/** Void starting subscribers after a close has happened. */
|
|
5769
5812
|
let closed = false;
|
|
@@ -5779,8 +5822,8 @@ function dexieCloud(dexie) {
|
|
|
5779
5822
|
currentUserEmitter.next(UNAUTHORIZED_USER);
|
|
5780
5823
|
});
|
|
5781
5824
|
dexie.cloud = {
|
|
5782
|
-
version: '4.0.0-beta.
|
|
5783
|
-
options: {
|
|
5825
|
+
version: '4.0.0-beta.18',
|
|
5826
|
+
options: Object.assign({}, DEFAULT_OPTIONS),
|
|
5784
5827
|
schema: null,
|
|
5785
5828
|
serverState: null,
|
|
5786
5829
|
get currentUserId() {
|
|
@@ -5794,15 +5837,17 @@ function dexieCloud(dexie) {
|
|
|
5794
5837
|
persistedSyncState: new BehaviorSubject(undefined),
|
|
5795
5838
|
userInteraction: new BehaviorSubject(undefined),
|
|
5796
5839
|
webSocketStatus: new BehaviorSubject('not-started'),
|
|
5797
|
-
|
|
5798
|
-
|
|
5799
|
-
|
|
5800
|
-
|
|
5840
|
+
login(hint) {
|
|
5841
|
+
return __awaiter$1(this, void 0, void 0, function* () {
|
|
5842
|
+
const db = DexieCloudDB(dexie);
|
|
5843
|
+
yield db.cloud.sync();
|
|
5844
|
+
yield login(db, hint);
|
|
5845
|
+
});
|
|
5801
5846
|
},
|
|
5802
5847
|
invites: getInvitesObservable(dexie),
|
|
5803
5848
|
roles: getGlobalRolesObservable(dexie),
|
|
5804
5849
|
configure(options) {
|
|
5805
|
-
options = dexie.cloud.options = {
|
|
5850
|
+
options = dexie.cloud.options = Object.assign(Object.assign({}, dexie.cloud.options), options);
|
|
5806
5851
|
configuredProgramatically = true;
|
|
5807
5852
|
if (options.databaseUrl && options.nameSuffix) {
|
|
5808
5853
|
// @ts-ignore
|
|
@@ -5811,41 +5856,43 @@ function dexieCloud(dexie) {
|
|
|
5811
5856
|
}
|
|
5812
5857
|
updateSchemaFromOptions(dexie.cloud.schema, dexie.cloud.options);
|
|
5813
5858
|
},
|
|
5814
|
-
|
|
5815
|
-
|
|
5816
|
-
wait
|
|
5817
|
-
|
|
5818
|
-
|
|
5819
|
-
|
|
5820
|
-
|
|
5821
|
-
|
|
5822
|
-
|
|
5823
|
-
|
|
5824
|
-
|
|
5825
|
-
|
|
5826
|
-
|
|
5827
|
-
|
|
5859
|
+
sync({ wait, purpose } = { wait: true, purpose: 'push' }) {
|
|
5860
|
+
return __awaiter$1(this, void 0, void 0, function* () {
|
|
5861
|
+
if (wait === undefined)
|
|
5862
|
+
wait = true;
|
|
5863
|
+
const db = DexieCloudDB(dexie);
|
|
5864
|
+
if (purpose === 'pull') {
|
|
5865
|
+
const syncState = db.cloud.persistedSyncState.value;
|
|
5866
|
+
triggerSync(db, purpose);
|
|
5867
|
+
if (wait) {
|
|
5868
|
+
const newSyncState = yield db.cloud.persistedSyncState
|
|
5869
|
+
.pipe(filter((newSyncState) => (newSyncState === null || newSyncState === void 0 ? void 0 : newSyncState.timestamp) != null &&
|
|
5870
|
+
(!syncState || newSyncState.timestamp > syncState.timestamp)), take(1))
|
|
5871
|
+
.toPromise();
|
|
5872
|
+
if (newSyncState === null || newSyncState === void 0 ? void 0 : newSyncState.error) {
|
|
5873
|
+
throw new Error(`Sync error: ` + newSyncState.error);
|
|
5874
|
+
}
|
|
5828
5875
|
}
|
|
5829
5876
|
}
|
|
5830
|
-
|
|
5831
|
-
|
|
5832
|
-
|
|
5833
|
-
|
|
5834
|
-
|
|
5835
|
-
|
|
5836
|
-
|
|
5837
|
-
|
|
5838
|
-
|
|
5839
|
-
|
|
5840
|
-
|
|
5841
|
-
|
|
5842
|
-
|
|
5843
|
-
|
|
5844
|
-
|
|
5845
|
-
.
|
|
5846
|
-
|
|
5877
|
+
else if (yield isSyncNeeded(db)) {
|
|
5878
|
+
const syncState = db.cloud.persistedSyncState.value;
|
|
5879
|
+
triggerSync(db, purpose);
|
|
5880
|
+
if (wait) {
|
|
5881
|
+
console.debug('db.cloud.login() is waiting for sync completion...');
|
|
5882
|
+
yield from$1(liveQuery(() => __awaiter$1(this, void 0, void 0, function* () {
|
|
5883
|
+
const syncNeeded = yield isSyncNeeded(db);
|
|
5884
|
+
const newSyncState = yield db.getPersistedSyncState();
|
|
5885
|
+
if ((newSyncState === null || newSyncState === void 0 ? void 0 : newSyncState.timestamp) !== (syncState === null || syncState === void 0 ? void 0 : syncState.timestamp) &&
|
|
5886
|
+
(newSyncState === null || newSyncState === void 0 ? void 0 : newSyncState.error))
|
|
5887
|
+
throw new Error(`Sync error: ` + newSyncState.error);
|
|
5888
|
+
return syncNeeded;
|
|
5889
|
+
})))
|
|
5890
|
+
.pipe(filter((isNeeded) => !isNeeded), take(1))
|
|
5891
|
+
.toPromise();
|
|
5892
|
+
console.debug('Done waiting for sync completion because we have nothing to push anymore');
|
|
5893
|
+
}
|
|
5847
5894
|
}
|
|
5848
|
-
}
|
|
5895
|
+
});
|
|
5849
5896
|
},
|
|
5850
5897
|
permissions(obj, tableName) {
|
|
5851
5898
|
return permissions(dexie._novip, obj, tableName);
|
|
@@ -5857,7 +5904,8 @@ function dexieCloud(dexie) {
|
|
|
5857
5904
|
return generateKey(dexie.cloud.schema[this.name].idPrefix || '', shardKey);
|
|
5858
5905
|
};
|
|
5859
5906
|
dexie.Table.prototype.idPrefix = function () {
|
|
5860
|
-
|
|
5907
|
+
var _a, _b;
|
|
5908
|
+
return ((_b = (_a = this.db.cloud.schema) === null || _a === void 0 ? void 0 : _a[this.name]) === null || _b === void 0 ? void 0 : _b.idPrefix) || '';
|
|
5861
5909
|
};
|
|
5862
5910
|
dexie.use(createMutationTrackingMiddleware({
|
|
5863
5911
|
currentUserObservable: dexie.cloud.currentUser,
|
|
@@ -5865,163 +5913,167 @@ function dexieCloud(dexie) {
|
|
|
5865
5913
|
}));
|
|
5866
5914
|
dexie.use(createImplicitPropSetterMiddleware(DexieCloudDB(dexie)));
|
|
5867
5915
|
dexie.use(createIdGenerationMiddleware(DexieCloudDB(dexie)));
|
|
5868
|
-
|
|
5869
|
-
|
|
5870
|
-
|
|
5871
|
-
|
|
5872
|
-
|
|
5873
|
-
|
|
5874
|
-
|
|
5875
|
-
|
|
5876
|
-
|
|
5877
|
-
|
|
5878
|
-
|
|
5879
|
-
// Verify the user has allowed version increment.
|
|
5880
|
-
if (!db.tables.every((table) => table.core)) {
|
|
5881
|
-
throwVersionIncrementNeeded();
|
|
5882
|
-
}
|
|
5883
|
-
const swRegistrations = 'serviceWorker' in navigator
|
|
5884
|
-
? await navigator.serviceWorker.getRegistrations()
|
|
5885
|
-
: [];
|
|
5886
|
-
const initiallySynced = await db.transaction('rw', db.$syncState, async () => {
|
|
5887
|
-
const { options, schema } = db.cloud;
|
|
5888
|
-
const [persistedOptions, persistedSchema, persistedSyncState] = await Promise.all([
|
|
5889
|
-
db.getOptions(),
|
|
5890
|
-
db.getSchema(),
|
|
5891
|
-
db.getPersistedSyncState(),
|
|
5892
|
-
]);
|
|
5893
|
-
if (!configuredProgramatically) {
|
|
5894
|
-
// Options not specified programatically (use case for SW!)
|
|
5895
|
-
// Take persisted options:
|
|
5896
|
-
db.cloud.options = persistedOptions || null;
|
|
5897
|
-
}
|
|
5898
|
-
else if (!persistedOptions ||
|
|
5899
|
-
JSON.stringify(persistedOptions) !== JSON.stringify(options)) {
|
|
5900
|
-
// Update persisted options:
|
|
5901
|
-
if (!options)
|
|
5902
|
-
throw new Error(`Internal error`); // options cannot be null if configuredProgramatically is set.
|
|
5903
|
-
await db.$syncState.put(options, 'options');
|
|
5904
|
-
}
|
|
5905
|
-
if (db.cloud.options?.tryUseServiceWorker &&
|
|
5906
|
-
'serviceWorker' in navigator &&
|
|
5907
|
-
swRegistrations.length > 0 &&
|
|
5908
|
-
!DISABLE_SERVICEWORKER_STRATEGY) {
|
|
5909
|
-
// * Configured for using service worker if available.
|
|
5910
|
-
// * Browser supports service workers
|
|
5911
|
-
// * There are at least one service worker registration
|
|
5912
|
-
console.debug('Dexie Cloud Addon: Using service worker');
|
|
5913
|
-
db.cloud.usingServiceWorker = true;
|
|
5916
|
+
function onDbReady(dexie) {
|
|
5917
|
+
var _a, _b, _c, _d, _e, _f;
|
|
5918
|
+
return __awaiter$1(this, void 0, void 0, function* () {
|
|
5919
|
+
closed = false; // As Dexie calls us, we are not closed anymore. Maybe reopened? Remember db.ready event is registered with sticky flag!
|
|
5920
|
+
const db = DexieCloudDB(dexie);
|
|
5921
|
+
// Setup default GUI:
|
|
5922
|
+
if (!IS_SERVICE_WORKER) {
|
|
5923
|
+
if (!((_a = db.cloud.options) === null || _a === void 0 ? void 0 : _a.customLoginGui)) {
|
|
5924
|
+
subscriptions.push(setupDefaultGUI(dexie));
|
|
5925
|
+
}
|
|
5926
|
+
subscriptions.push(computeSyncState(db).subscribe(dexie.cloud.syncState));
|
|
5914
5927
|
}
|
|
5915
|
-
|
|
5916
|
-
|
|
5917
|
-
|
|
5918
|
-
|
|
5919
|
-
|
|
5920
|
-
|
|
5921
|
-
|
|
5922
|
-
|
|
5923
|
-
|
|
5924
|
-
|
|
5928
|
+
//verifyConfig(db.cloud.options); Not needed (yet at least!)
|
|
5929
|
+
// Verify the user has allowed version increment.
|
|
5930
|
+
if (!db.tables.every((table) => table.core)) {
|
|
5931
|
+
throwVersionIncrementNeeded();
|
|
5932
|
+
}
|
|
5933
|
+
const swRegistrations = 'serviceWorker' in navigator
|
|
5934
|
+
? yield navigator.serviceWorker.getRegistrations()
|
|
5935
|
+
: [];
|
|
5936
|
+
const initiallySynced = yield db.transaction('rw', db.$syncState, () => __awaiter$1(this, void 0, void 0, function* () {
|
|
5937
|
+
var _g, _h;
|
|
5938
|
+
const { options, schema } = db.cloud;
|
|
5939
|
+
const [persistedOptions, persistedSchema, persistedSyncState] = yield Promise.all([
|
|
5940
|
+
db.getOptions(),
|
|
5941
|
+
db.getSchema(),
|
|
5942
|
+
db.getPersistedSyncState(),
|
|
5943
|
+
]);
|
|
5944
|
+
if (!configuredProgramatically) {
|
|
5945
|
+
// Options not specified programatically (use case for SW!)
|
|
5946
|
+
// Take persisted options:
|
|
5947
|
+
db.cloud.options = persistedOptions || null;
|
|
5948
|
+
}
|
|
5949
|
+
else if (!persistedOptions ||
|
|
5950
|
+
JSON.stringify(persistedOptions) !== JSON.stringify(options)) {
|
|
5951
|
+
// Update persisted options:
|
|
5952
|
+
if (!options)
|
|
5953
|
+
throw new Error(`Internal error`); // options cannot be null if configuredProgramatically is set.
|
|
5954
|
+
yield db.$syncState.put(options, 'options');
|
|
5955
|
+
}
|
|
5956
|
+
if (((_g = db.cloud.options) === null || _g === void 0 ? void 0 : _g.tryUseServiceWorker) &&
|
|
5957
|
+
'serviceWorker' in navigator &&
|
|
5958
|
+
swRegistrations.length > 0 &&
|
|
5959
|
+
!DISABLE_SERVICEWORKER_STRATEGY) {
|
|
5960
|
+
// * Configured for using service worker if available.
|
|
5961
|
+
// * Browser supports service workers
|
|
5962
|
+
// * There are at least one service worker registration
|
|
5963
|
+
console.debug('Dexie Cloud Addon: Using service worker');
|
|
5964
|
+
db.cloud.usingServiceWorker = true;
|
|
5925
5965
|
}
|
|
5926
|
-
|
|
5927
|
-
|
|
5928
|
-
|
|
5929
|
-
|
|
5930
|
-
|
|
5931
|
-
|
|
5932
|
-
|
|
5933
|
-
|
|
5934
|
-
|
|
5935
|
-
|
|
5936
|
-
JSON.stringify(persistedSchema) !== JSON.stringify(schema)) {
|
|
5937
|
-
// Update persisted schema (but don't overwrite table prefixes)
|
|
5938
|
-
const newPersistedSchema = persistedSchema || {};
|
|
5939
|
-
for (const [table, tblSchema] of Object.entries(schema)) {
|
|
5940
|
-
const newTblSchema = newPersistedSchema[table];
|
|
5941
|
-
if (!newTblSchema) {
|
|
5942
|
-
newPersistedSchema[table] = { ...tblSchema };
|
|
5966
|
+
else {
|
|
5967
|
+
// Not configured for using service worker or no service worker
|
|
5968
|
+
// registration exists. Don't rely on service worker to do any job.
|
|
5969
|
+
// Use LocalSyncWorker instead.
|
|
5970
|
+
if (((_h = db.cloud.options) === null || _h === void 0 ? void 0 : _h.tryUseServiceWorker) && !IS_SERVICE_WORKER) {
|
|
5971
|
+
console.debug('dexie-cloud-addon: Not using service worker.', swRegistrations.length === 0
|
|
5972
|
+
? 'No SW registrations found.'
|
|
5973
|
+
: 'serviceWorker' in navigator && DISABLE_SERVICEWORKER_STRATEGY
|
|
5974
|
+
? 'Avoiding SW background sync and SW periodic bg sync for this browser due to browser bugs.'
|
|
5975
|
+
: 'navigator.serviceWorker not present');
|
|
5943
5976
|
}
|
|
5944
|
-
|
|
5945
|
-
|
|
5946
|
-
|
|
5947
|
-
|
|
5977
|
+
db.cloud.usingServiceWorker = false;
|
|
5978
|
+
}
|
|
5979
|
+
updateSchemaFromOptions(schema, db.cloud.options);
|
|
5980
|
+
updateSchemaFromOptions(persistedSchema, db.cloud.options);
|
|
5981
|
+
if (!schema) {
|
|
5982
|
+
// Database opened dynamically (use case for SW!)
|
|
5983
|
+
// Take persisted schema:
|
|
5984
|
+
db.cloud.schema = persistedSchema || null;
|
|
5985
|
+
}
|
|
5986
|
+
else if (!persistedSchema ||
|
|
5987
|
+
JSON.stringify(persistedSchema) !== JSON.stringify(schema)) {
|
|
5988
|
+
// Update persisted schema (but don't overwrite table prefixes)
|
|
5989
|
+
const newPersistedSchema = persistedSchema || {};
|
|
5990
|
+
for (const [table, tblSchema] of Object.entries(schema)) {
|
|
5991
|
+
const newTblSchema = newPersistedSchema[table];
|
|
5992
|
+
if (!newTblSchema) {
|
|
5993
|
+
newPersistedSchema[table] = Object.assign({}, tblSchema);
|
|
5994
|
+
}
|
|
5995
|
+
else {
|
|
5996
|
+
newTblSchema.markedForSync = tblSchema.markedForSync;
|
|
5997
|
+
tblSchema.deleted = newTblSchema.deleted;
|
|
5998
|
+
newTblSchema.generatedGlobalId = tblSchema.generatedGlobalId;
|
|
5999
|
+
}
|
|
5948
6000
|
}
|
|
6001
|
+
yield db.$syncState.put(newPersistedSchema, 'schema');
|
|
6002
|
+
// Make sure persisted table prefixes are being used instead of computed ones:
|
|
6003
|
+
// Let's assign all props as the newPersistedSchems should be what we should be working with.
|
|
6004
|
+
Object.assign(schema, newPersistedSchema);
|
|
5949
6005
|
}
|
|
5950
|
-
|
|
5951
|
-
|
|
5952
|
-
|
|
5953
|
-
|
|
6006
|
+
return persistedSyncState === null || persistedSyncState === void 0 ? void 0 : persistedSyncState.initiallySynced;
|
|
6007
|
+
}));
|
|
6008
|
+
if (initiallySynced) {
|
|
6009
|
+
db.setInitiallySynced(true);
|
|
5954
6010
|
}
|
|
5955
|
-
|
|
5956
|
-
|
|
5957
|
-
|
|
5958
|
-
|
|
5959
|
-
|
|
5960
|
-
|
|
5961
|
-
|
|
5962
|
-
|
|
5963
|
-
|
|
5964
|
-
|
|
5965
|
-
|
|
5966
|
-
|
|
5967
|
-
|
|
5968
|
-
|
|
5969
|
-
|
|
5970
|
-
|
|
5971
|
-
|
|
5972
|
-
|
|
5973
|
-
|
|
5974
|
-
|
|
5975
|
-
|
|
5976
|
-
|
|
5977
|
-
|
|
5978
|
-
|
|
5979
|
-
|
|
5980
|
-
|
|
5981
|
-
|
|
5982
|
-
|
|
5983
|
-
|
|
5984
|
-
|
|
5985
|
-
|
|
5986
|
-
|
|
5987
|
-
|
|
5988
|
-
|
|
5989
|
-
|
|
5990
|
-
|
|
5991
|
-
|
|
5992
|
-
|
|
5993
|
-
db.cloud.schema &&
|
|
5994
|
-
!IS_SERVICE_WORKER) {
|
|
5995
|
-
// There's no SW. Start SyncWorker instead.
|
|
5996
|
-
localSyncWorker = LocalSyncWorker(db, db.cloud.options, db.cloud.schema);
|
|
5997
|
-
localSyncWorker.start();
|
|
5998
|
-
triggerSync(db, 'push');
|
|
5999
|
-
}
|
|
6000
|
-
// Listen to online event and do sync.
|
|
6001
|
-
throwIfClosed();
|
|
6002
|
-
if (!IS_SERVICE_WORKER) {
|
|
6003
|
-
subscriptions.push(fromEvent(self, 'online').subscribe(() => {
|
|
6004
|
-
console.debug('online!');
|
|
6005
|
-
db.syncStateChangedEvent.next({
|
|
6006
|
-
phase: 'not-in-sync',
|
|
6007
|
-
});
|
|
6011
|
+
verifySchema(db);
|
|
6012
|
+
if (((_b = db.cloud.options) === null || _b === void 0 ? void 0 : _b.databaseUrl) && !initiallySynced) {
|
|
6013
|
+
yield performInitialSync(db, db.cloud.options, db.cloud.schema);
|
|
6014
|
+
db.setInitiallySynced(true);
|
|
6015
|
+
}
|
|
6016
|
+
// Manage CurrentUser observable:
|
|
6017
|
+
throwIfClosed();
|
|
6018
|
+
if (!IS_SERVICE_WORKER) {
|
|
6019
|
+
subscriptions.push(liveQuery(() => db.getCurrentUser()).subscribe(currentUserEmitter));
|
|
6020
|
+
// Manage PersistendSyncState observable:
|
|
6021
|
+
subscriptions.push(liveQuery(() => db.getPersistedSyncState()).subscribe(db.cloud.persistedSyncState));
|
|
6022
|
+
// Wait till currentUser and persistedSyncState gets populated
|
|
6023
|
+
// with things from the database and not just the default values.
|
|
6024
|
+
// This is so that when db.open() completes, user should be safe
|
|
6025
|
+
// to subscribe to these observables and get actual data.
|
|
6026
|
+
yield combineLatest([
|
|
6027
|
+
currentUserEmitter.pipe(skip(1), take(1)),
|
|
6028
|
+
db.cloud.persistedSyncState.pipe(skip(1), take(1)),
|
|
6029
|
+
]).toPromise();
|
|
6030
|
+
}
|
|
6031
|
+
// HERE: If requireAuth, do athentication now.
|
|
6032
|
+
if ((_c = db.cloud.options) === null || _c === void 0 ? void 0 : _c.requireAuth) {
|
|
6033
|
+
yield login(db);
|
|
6034
|
+
}
|
|
6035
|
+
if (localSyncWorker)
|
|
6036
|
+
localSyncWorker.stop();
|
|
6037
|
+
localSyncWorker = null;
|
|
6038
|
+
throwIfClosed();
|
|
6039
|
+
if (db.cloud.usingServiceWorker && ((_d = db.cloud.options) === null || _d === void 0 ? void 0 : _d.databaseUrl)) {
|
|
6040
|
+
registerSyncEvent(db, 'push').catch(() => { });
|
|
6041
|
+
registerPeriodicSyncEvent(db).catch(() => { });
|
|
6042
|
+
}
|
|
6043
|
+
else if (((_e = db.cloud.options) === null || _e === void 0 ? void 0 : _e.databaseUrl) &&
|
|
6044
|
+
db.cloud.schema &&
|
|
6045
|
+
!IS_SERVICE_WORKER) {
|
|
6046
|
+
// There's no SW. Start SyncWorker instead.
|
|
6047
|
+
localSyncWorker = LocalSyncWorker(db, db.cloud.options, db.cloud.schema);
|
|
6048
|
+
localSyncWorker.start();
|
|
6008
6049
|
triggerSync(db, 'push');
|
|
6009
|
-
}
|
|
6010
|
-
|
|
6011
|
-
|
|
6012
|
-
|
|
6013
|
-
|
|
6014
|
-
|
|
6015
|
-
|
|
6016
|
-
|
|
6017
|
-
|
|
6018
|
-
|
|
6019
|
-
|
|
6020
|
-
|
|
6021
|
-
|
|
6050
|
+
}
|
|
6051
|
+
// Listen to online event and do sync.
|
|
6052
|
+
throwIfClosed();
|
|
6053
|
+
if (!IS_SERVICE_WORKER) {
|
|
6054
|
+
subscriptions.push(fromEvent(self, 'online').subscribe(() => {
|
|
6055
|
+
console.debug('online!');
|
|
6056
|
+
db.syncStateChangedEvent.next({
|
|
6057
|
+
phase: 'not-in-sync',
|
|
6058
|
+
});
|
|
6059
|
+
triggerSync(db, 'push');
|
|
6060
|
+
}), fromEvent(self, 'offline').subscribe(() => {
|
|
6061
|
+
console.debug('offline!');
|
|
6062
|
+
db.syncStateChangedEvent.next({
|
|
6063
|
+
phase: 'offline',
|
|
6064
|
+
});
|
|
6065
|
+
}));
|
|
6066
|
+
}
|
|
6067
|
+
// Connect WebSocket only if we're a browser window
|
|
6068
|
+
if (typeof window !== 'undefined' &&
|
|
6069
|
+
!IS_SERVICE_WORKER &&
|
|
6070
|
+
((_f = db.cloud.options) === null || _f === void 0 ? void 0 : _f.databaseUrl)) {
|
|
6071
|
+
subscriptions.push(connectWebSocket(db));
|
|
6072
|
+
}
|
|
6073
|
+
});
|
|
6022
6074
|
}
|
|
6023
6075
|
}
|
|
6024
|
-
dexieCloud.version = '4.0.0-beta.
|
|
6076
|
+
dexieCloud.version = '4.0.0-beta.18';
|
|
6025
6077
|
Dexie.Cloud = dexieCloud;
|
|
6026
6078
|
|
|
6027
6079
|
// In case the SW lives for a while, let it reuse already opened connections:
|
|
@@ -6050,55 +6102,58 @@ function syncDB(dbName, purpose) {
|
|
|
6050
6102
|
syncDBSemaphore.set(dbName + '/' + purpose, promise);
|
|
6051
6103
|
}
|
|
6052
6104
|
return promise;
|
|
6053
|
-
|
|
6054
|
-
|
|
6055
|
-
|
|
6056
|
-
|
|
6057
|
-
|
|
6058
|
-
|
|
6059
|
-
|
|
6060
|
-
|
|
6061
|
-
|
|
6062
|
-
//
|
|
6063
|
-
managedDBs.
|
|
6064
|
-
|
|
6065
|
-
|
|
6066
|
-
|
|
6067
|
-
console.error(`Dexie Cloud: No databaseUrl configured`);
|
|
6068
|
-
return; // Nothing to sync.
|
|
6069
|
-
}
|
|
6070
|
-
if (!db.cloud.schema) {
|
|
6071
|
-
console.error(`Dexie Cloud: No schema persisted`);
|
|
6072
|
-
return; // Nothing to sync.
|
|
6073
|
-
}
|
|
6074
|
-
function stopManagingDB() {
|
|
6075
|
-
db.dx.on.versionchange.unsubscribe(stopManagingDB);
|
|
6076
|
-
if (managedDBs.get(db.name) === db) {
|
|
6077
|
-
// Avoid race conditions.
|
|
6078
|
-
managedDBs.delete(db.name);
|
|
6079
|
-
}
|
|
6080
|
-
console.debug(`Dexie Cloud SW: Closing Dexie instance for ${dbName}`);
|
|
6081
|
-
db.dx.close();
|
|
6082
|
-
return false;
|
|
6083
|
-
}
|
|
6084
|
-
try {
|
|
6085
|
-
console.debug('Dexie Cloud SW: Syncing');
|
|
6086
|
-
await syncIfPossible(db, db.cloud.options, db.cloud.schema, {
|
|
6087
|
-
retryImmediatelyOnFetchError: true,
|
|
6088
|
-
purpose,
|
|
6089
|
-
});
|
|
6090
|
-
console.debug('Dexie Cloud SW: Done Syncing');
|
|
6091
|
-
}
|
|
6092
|
-
catch (e) {
|
|
6093
|
-
console.error(`Dexie Cloud SW Error`, e);
|
|
6094
|
-
// Error occured. Stop managing this DB until we wake up again by a sync event,
|
|
6095
|
-
// which will open a new Dexie and start trying to sync it.
|
|
6096
|
-
stopManagingDB();
|
|
6097
|
-
if (e.name !== Dexie.errnames.NoSuchDatabase) {
|
|
6098
|
-
// Unless the error was that DB doesn't exist, rethrow to trigger sync retry.
|
|
6099
|
-
throw e; // Throw e to make syncEvent.waitUntil() receive a rejected promis, so it will retry.
|
|
6105
|
+
function _syncDB(dbName, purpose) {
|
|
6106
|
+
var _a;
|
|
6107
|
+
return __awaiter$1(this, void 0, void 0, function* () {
|
|
6108
|
+
let db = managedDBs.get(dbName);
|
|
6109
|
+
if (!db) {
|
|
6110
|
+
console.debug('Dexie Cloud SW: Creating new Dexie instance for', dbName);
|
|
6111
|
+
const dexie = new Dexie(dbName, { addons: [dexieCloud] });
|
|
6112
|
+
db = DexieCloudDB(dexie);
|
|
6113
|
+
dexie.on('versionchange', stopManagingDB);
|
|
6114
|
+
yield db.dx.open(); // Makes sure db.cloud.options and db.cloud.schema are read from db,
|
|
6115
|
+
if (!managedDBs.get(dbName)) {
|
|
6116
|
+
// Avoid race conditions.
|
|
6117
|
+
managedDBs.set(dbName, db);
|
|
6118
|
+
}
|
|
6100
6119
|
}
|
|
6101
|
-
|
|
6120
|
+
if (!((_a = db.cloud.options) === null || _a === void 0 ? void 0 : _a.databaseUrl)) {
|
|
6121
|
+
console.error(`Dexie Cloud: No databaseUrl configured`);
|
|
6122
|
+
return; // Nothing to sync.
|
|
6123
|
+
}
|
|
6124
|
+
if (!db.cloud.schema) {
|
|
6125
|
+
console.error(`Dexie Cloud: No schema persisted`);
|
|
6126
|
+
return; // Nothing to sync.
|
|
6127
|
+
}
|
|
6128
|
+
function stopManagingDB() {
|
|
6129
|
+
db.dx.on.versionchange.unsubscribe(stopManagingDB);
|
|
6130
|
+
if (managedDBs.get(db.name) === db) {
|
|
6131
|
+
// Avoid race conditions.
|
|
6132
|
+
managedDBs.delete(db.name);
|
|
6133
|
+
}
|
|
6134
|
+
console.debug(`Dexie Cloud SW: Closing Dexie instance for ${dbName}`);
|
|
6135
|
+
db.dx.close();
|
|
6136
|
+
return false;
|
|
6137
|
+
}
|
|
6138
|
+
try {
|
|
6139
|
+
console.debug('Dexie Cloud SW: Syncing');
|
|
6140
|
+
yield syncIfPossible(db, db.cloud.options, db.cloud.schema, {
|
|
6141
|
+
retryImmediatelyOnFetchError: true,
|
|
6142
|
+
purpose,
|
|
6143
|
+
});
|
|
6144
|
+
console.debug('Dexie Cloud SW: Done Syncing');
|
|
6145
|
+
}
|
|
6146
|
+
catch (e) {
|
|
6147
|
+
console.error(`Dexie Cloud SW Error`, e);
|
|
6148
|
+
// Error occured. Stop managing this DB until we wake up again by a sync event,
|
|
6149
|
+
// which will open a new Dexie and start trying to sync it.
|
|
6150
|
+
stopManagingDB();
|
|
6151
|
+
if (e.name !== Dexie.errnames.NoSuchDatabase) {
|
|
6152
|
+
// Unless the error was that DB doesn't exist, rethrow to trigger sync retry.
|
|
6153
|
+
throw e; // Throw e to make syncEvent.waitUntil() receive a rejected promis, so it will retry.
|
|
6154
|
+
}
|
|
6155
|
+
}
|
|
6156
|
+
});
|
|
6102
6157
|
}
|
|
6103
6158
|
}
|
|
6104
6159
|
// Avoid taking care of events if browser bugs out by using dexie cloud from a service worker.
|
|
@@ -6124,12 +6179,12 @@ if (!DISABLE_SERVICEWORKER_STRATEGY) {
|
|
|
6124
6179
|
// Mimic background sync behavior - retry in X minutes on failure.
|
|
6125
6180
|
// But lesser timeout and more number of times.
|
|
6126
6181
|
const syncAndRetry = (num = 1) => {
|
|
6127
|
-
return syncDB(dbName, event.data.purpose || "pull").catch(
|
|
6182
|
+
return syncDB(dbName, event.data.purpose || "pull").catch((e) => __awaiter$1(void 0, void 0, void 0, function* () {
|
|
6128
6183
|
if (num === 3)
|
|
6129
6184
|
throw e;
|
|
6130
|
-
|
|
6185
|
+
yield sleep(60000); // 1 minute
|
|
6131
6186
|
syncAndRetry(num + 1);
|
|
6132
|
-
});
|
|
6187
|
+
}));
|
|
6133
6188
|
};
|
|
6134
6189
|
if ('waitUntil' in event) {
|
|
6135
6190
|
event.waitUntil(syncAndRetry().catch(error => console.error(error)));
|