dexie-cloud-addon 4.3.9 → 4.4.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/modern/DexieCloudAPI.d.ts +17 -0
- package/dist/modern/DexieCloudOptions.d.ts +19 -0
- package/dist/modern/TSON.d.ts +0 -6
- package/dist/modern/db/DexieCloudDB.d.ts +2 -0
- package/dist/modern/db/entities/EntityCommon.d.ts +1 -0
- package/dist/modern/dexie-cloud-addon.js +3776 -2354
- package/dist/modern/dexie-cloud-addon.js.map +1 -1
- package/dist/modern/dexie-cloud-addon.min.js +1 -1
- package/dist/modern/dexie-cloud-addon.min.js.map +1 -1
- package/dist/modern/middlewares/blobResolveMiddleware.d.ts +21 -0
- package/dist/modern/service-worker.js +2195 -773
- package/dist/modern/service-worker.js.map +1 -1
- package/dist/modern/service-worker.min.js +1 -1
- package/dist/modern/service-worker.min.js.map +1 -1
- package/dist/modern/sync/BlobDownloadTracker.d.ts +33 -0
- package/dist/modern/sync/BlobSavingQueue.d.ts +35 -0
- package/dist/modern/sync/blobOffloading.d.ts +38 -0
- package/dist/modern/sync/blobProgress.d.ts +25 -0
- package/dist/modern/sync/blobResolve.d.ts +85 -0
- package/dist/modern/sync/eagerBlobDownloader.d.ts +20 -0
- package/dist/modern/sync/loadCachedAccessToken.d.ts +2 -0
- package/dist/modern/types/DXCAlert.d.ts +6 -0
- package/dist/modern/types/TXExpandos.d.ts +1 -0
- package/dist/umd/dexie-cloud-addon.js +3867 -2445
- package/dist/umd/dexie-cloud-addon.js.map +1 -1
- package/dist/umd/dexie-cloud-addon.min.js +1 -1
- package/dist/umd/dexie-cloud-addon.min.js.map +1 -1
- package/dist/umd/service-worker.js +2330 -908
- package/dist/umd/service-worker.js.map +1 -1
- package/dist/umd/service-worker.min.js +1 -1
- package/dist/umd/service-worker.min.js.map +1 -1
- package/package.json +5 -6
- package/dist/modern/default-ui/AuthProviderButton.d.ts +0 -21
- package/dist/modern/default-ui/ProviderSelectionDialog.d.ts +0 -7
- package/dist/modern/default-ui/SelectDialog.d.ts +0 -10
- package/dist/modern/dexie-cloud-addon.min.js.gz +0 -0
- package/dist/umd/DISABLE_SERVICEWORKER_STRATEGY.d.ts +0 -1
- package/dist/umd/DXCWebSocketStatus.d.ts +0 -1
- package/dist/umd/DexieCloudAPI.d.ts +0 -75
- package/dist/umd/DexieCloudOptions.d.ts +0 -27
- package/dist/umd/DexieCloudSyncOptions.d.ts +0 -4
- package/dist/umd/DexieCloudTable.d.ts +0 -18
- package/dist/umd/InvalidLicenseError.d.ts +0 -5
- package/dist/umd/Invite.d.ts +0 -8
- package/dist/umd/PermissionChecker.d.ts +0 -15
- package/dist/umd/TSON.d.ts +0 -17
- package/dist/umd/WSObservable.d.ts +0 -72
- package/dist/umd/associate.d.ts +0 -1
- package/dist/umd/authentication/AuthPersistedContext.d.ts +0 -9
- package/dist/umd/authentication/TokenErrorResponseError.d.ts +0 -10
- package/dist/umd/authentication/TokenExpiredError.d.ts +0 -3
- package/dist/umd/authentication/UNAUTHORIZED_USER.d.ts +0 -2
- package/dist/umd/authentication/authenticate.d.ts +0 -13
- package/dist/umd/authentication/interactWithUser.d.ts +0 -21
- package/dist/umd/authentication/login.d.ts +0 -3
- package/dist/umd/authentication/logout.d.ts +0 -5
- package/dist/umd/authentication/otpFetchTokenCallback.d.ts +0 -3
- package/dist/umd/authentication/setCurrentUser.d.ts +0 -14
- package/dist/umd/authentication/waitUntil.d.ts +0 -3
- package/dist/umd/computeSyncState.d.ts +0 -4
- package/dist/umd/createSharedValueObservable.d.ts +0 -3
- package/dist/umd/currentUserEmitter.d.ts +0 -3
- package/dist/umd/db/DexieCloudDB.d.ts +0 -61
- package/dist/umd/db/entities/BaseRevisionMapEntry.d.ts +0 -5
- package/dist/umd/db/entities/EntityCommon.d.ts +0 -5
- package/dist/umd/db/entities/GuardedJob.d.ts +0 -5
- package/dist/umd/db/entities/Member.d.ts +0 -19
- package/dist/umd/db/entities/PersistedSyncState.d.ts +0 -22
- package/dist/umd/db/entities/Realm.d.ts +0 -14
- package/dist/umd/db/entities/Role.d.ts +0 -11
- package/dist/umd/db/entities/UserLogin.d.ts +0 -23
- package/dist/umd/default-ui/Dialog.d.ts +0 -5
- package/dist/umd/default-ui/LoginDialog.d.ts +0 -3
- package/dist/umd/default-ui/Styles.d.ts +0 -3
- package/dist/umd/default-ui/index.d.ts +0 -24
- package/dist/umd/define-ydoc-trigger.d.ts +0 -3
- package/dist/umd/dexie-cloud-addon.d.ts +0 -3
- package/dist/umd/dexie-cloud-addon.js.gz +0 -0
- package/dist/umd/dexie-cloud-addon.min.js.gz +0 -0
- package/dist/umd/dexie-cloud-client.d.ts +0 -23
- package/dist/umd/errors/HttpError.d.ts +0 -5
- package/dist/umd/extend-dexie-interface.d.ts +0 -23
- package/dist/umd/getGlobalRolesObservable.d.ts +0 -5
- package/dist/umd/getInternalAccessControlObservable.d.ts +0 -12
- package/dist/umd/getInvitesObservable.d.ts +0 -23
- package/dist/umd/getPermissionsLookupObservable.d.ts +0 -16
- package/dist/umd/getTiedRealmId.d.ts +0 -2
- package/dist/umd/helpers/BroadcastedAndLocalEvent.d.ts +0 -8
- package/dist/umd/helpers/CancelToken.d.ts +0 -4
- package/dist/umd/helpers/IS_SERVICE_WORKER.d.ts +0 -1
- package/dist/umd/helpers/SWBroadcastChannel.d.ts +0 -12
- package/dist/umd/helpers/allSettled.d.ts +0 -1
- package/dist/umd/helpers/bulkUpdate.d.ts +0 -4
- package/dist/umd/helpers/computeRealmSetHash.d.ts +0 -2
- package/dist/umd/helpers/date-constants.d.ts +0 -5
- package/dist/umd/helpers/flatten.d.ts +0 -1
- package/dist/umd/helpers/getMutationTable.d.ts +0 -1
- package/dist/umd/helpers/getSyncableTables.d.ts +0 -4
- package/dist/umd/helpers/getTableFromMutationTable.d.ts +0 -1
- package/dist/umd/helpers/makeArray.d.ts +0 -1
- package/dist/umd/helpers/randomString.d.ts +0 -1
- package/dist/umd/helpers/resolveText.d.ts +0 -16
- package/dist/umd/helpers/throwVersionIncrementNeeded.d.ts +0 -1
- package/dist/umd/helpers/visibilityState.d.ts +0 -1
- package/dist/umd/isEagerSyncDisabled.d.ts +0 -2
- package/dist/umd/isFirefox.d.ts +0 -1
- package/dist/umd/isSafari.d.ts +0 -2
- package/dist/umd/mapValueObservable.d.ts +0 -5
- package/dist/umd/mergePermissions.d.ts +0 -2
- package/dist/umd/middleware-helpers/guardedTable.d.ts +0 -11
- package/dist/umd/middleware-helpers/idGenerationHelpers.d.ts +0 -18
- package/dist/umd/middlewares/createIdGenerationMiddleware.d.ts +0 -3
- package/dist/umd/middlewares/createImplicitPropSetterMiddleware.d.ts +0 -3
- package/dist/umd/middlewares/createMutationTrackingMiddleware.d.ts +0 -17
- package/dist/umd/middlewares/outstandingTransaction.d.ts +0 -4
- package/dist/umd/overrideParseStoresSpec.d.ts +0 -4
- package/dist/umd/performInitialSync.d.ts +0 -4
- package/dist/umd/permissions.d.ts +0 -9
- package/dist/umd/prodLog.d.ts +0 -9
- package/dist/umd/service-worker.d.ts +0 -1
- package/dist/umd/sync/DEXIE_CLOUD_SYNCER_ID.d.ts +0 -1
- package/dist/umd/sync/LocalSyncWorker.d.ts +0 -7
- package/dist/umd/sync/SyncRequiredError.d.ts +0 -3
- package/dist/umd/sync/applyServerChanges.d.ts +0 -3
- package/dist/umd/sync/connectWebSocket.d.ts +0 -2
- package/dist/umd/sync/encodeIdsForServer.d.ts +0 -4
- package/dist/umd/sync/extractRealm.d.ts +0 -2
- package/dist/umd/sync/getLatestRevisionsPerTable.d.ts +0 -6
- package/dist/umd/sync/getTablesToSyncify.d.ts +0 -3
- package/dist/umd/sync/isOnline.d.ts +0 -1
- package/dist/umd/sync/isSyncNeeded.d.ts +0 -2
- package/dist/umd/sync/listClientChanges.d.ts +0 -9
- package/dist/umd/sync/listSyncifiedChanges.d.ts +0 -5
- package/dist/umd/sync/messageConsumerIsReady.d.ts +0 -2
- package/dist/umd/sync/messagesFromServerQueue.d.ts +0 -8
- package/dist/umd/sync/modifyLocalObjectsWithNewUserId.d.ts +0 -4
- package/dist/umd/sync/myId.d.ts +0 -1
- package/dist/umd/sync/numUnsyncedMutations.d.ts +0 -2
- package/dist/umd/sync/old_startSyncingClientChanges.d.ts +0 -39
- package/dist/umd/sync/performGuardedJob.d.ts +0 -2
- package/dist/umd/sync/ratelimit.d.ts +0 -3
- package/dist/umd/sync/registerSyncEvent.d.ts +0 -3
- package/dist/umd/sync/sync.d.ts +0 -15
- package/dist/umd/sync/syncIfPossible.d.ts +0 -5
- package/dist/umd/sync/syncWithServer.d.ts +0 -6
- package/dist/umd/sync/triggerSync.d.ts +0 -2
- package/dist/umd/sync/updateBaseRevs.d.ts +0 -5
- package/dist/umd/types/DXCAlert.d.ts +0 -25
- package/dist/umd/types/DXCInputField.d.ts +0 -11
- package/dist/umd/types/DXCUserInteraction.d.ts +0 -93
- package/dist/umd/types/NewIdOptions.d.ts +0 -3
- package/dist/umd/types/SWMessageEvent.d.ts +0 -3
- package/dist/umd/types/SWSyncEvent.d.ts +0 -4
- package/dist/umd/types/SyncState.d.ts +0 -9
- package/dist/umd/types/TXExpandos.d.ts +0 -11
- package/dist/umd/updateSchemaFromOptions.d.ts +0 -3
- package/dist/umd/userIsActive.d.ts +0 -7
- package/dist/umd/verifyConfig.d.ts +0 -2
- package/dist/umd/verifySchema.d.ts +0 -2
- package/dist/umd/yjs/YDexieCloudSyncState.d.ts +0 -3
- package/dist/umd/yjs/YTable.d.ts +0 -3
- package/dist/umd/yjs/applyYMessages.d.ts +0 -9
- package/dist/umd/yjs/awareness.d.ts +0 -3
- package/dist/umd/yjs/createYClientUpdateObservable.d.ts +0 -4
- package/dist/umd/yjs/createYHandler.d.ts +0 -2
- package/dist/umd/yjs/downloadYDocsFromServer.d.ts +0 -3
- package/dist/umd/yjs/getUpdatesTable.d.ts +0 -3
- package/dist/umd/yjs/listUpdatesSince.d.ts +0 -3
- package/dist/umd/yjs/listYClientMessagesAndStateVector.d.ts +0 -26
- package/dist/umd/yjs/reopenDocSignal.d.ts +0 -10
- package/dist/umd/yjs/updateYSyncStates.d.ts +0 -6
- /package/dist/{umd/authentication/currentUserObservable.d.ts → modern/sync/blobOffloading.test.d.ts} +0 -0
|
@@ -8,7 +8,7 @@
|
|
|
8
8
|
*
|
|
9
9
|
* ==========================================================================
|
|
10
10
|
*
|
|
11
|
-
* Version 4.
|
|
11
|
+
* Version 4.4.1, Thu Mar 19 2026
|
|
12
12
|
*
|
|
13
13
|
* https://dexie.org
|
|
14
14
|
*
|
|
@@ -16,9 +16,9 @@
|
|
|
16
16
|
*
|
|
17
17
|
*/
|
|
18
18
|
|
|
19
|
-
import Dexie, { PropModification, cmp,
|
|
20
|
-
import { Observable, BehaviorSubject, firstValueFrom, Subject, from, filter as filter$1, of, fromEvent, merge, switchMap as switchMap$1, tap as tap$1, mergeMap, Subscription, throwError,
|
|
21
|
-
import { filter, switchMap, delay, distinctUntilChanged,
|
|
19
|
+
import Dexie, { PropModification, cmp, liveQuery, RangeSet } from 'dexie';
|
|
20
|
+
import { Observable, BehaviorSubject, firstValueFrom, Subject, from, combineLatest, timer, filter as filter$1, of, fromEvent, merge, switchMap as switchMap$1, tap as tap$1, mergeMap, Subscription, throwError, map as map$1, share as share$1, startWith as startWith$1 } from 'rxjs';
|
|
21
|
+
import { filter, map, share, switchMap, delay, distinctUntilChanged, tap, take, catchError, debounceTime, startWith, skip } from 'rxjs/operators';
|
|
22
22
|
import { Encoder, writeVarString, writeAny, writeVarUint8Array, writeBigUint64, toUint8Array } from 'lib0/encoding';
|
|
23
23
|
import { Decoder, readVarString, readAny, readVarUint8Array, readBigUint64, hasContent, readUint8 } from 'lib0/decoding';
|
|
24
24
|
import * as Y from 'yjs';
|
|
@@ -325,6 +325,409 @@ function triggerSync(db, purpose) {
|
|
|
325
325
|
}
|
|
326
326
|
}
|
|
327
327
|
|
|
328
|
+
const { toString: toStr } = {};
|
|
329
|
+
function getToStringTag(val) {
|
|
330
|
+
return toStr.call(val).slice(8, -1);
|
|
331
|
+
}
|
|
332
|
+
function escapeDollarProps(value) {
|
|
333
|
+
const keys = Object.keys(value);
|
|
334
|
+
let dollarKeys = null;
|
|
335
|
+
for (let i = 0, l = keys.length; i < l; ++i) {
|
|
336
|
+
if (keys[i][0] === "$") {
|
|
337
|
+
dollarKeys = dollarKeys || [];
|
|
338
|
+
dollarKeys.push(keys[i]);
|
|
339
|
+
}
|
|
340
|
+
}
|
|
341
|
+
if (!dollarKeys)
|
|
342
|
+
return value;
|
|
343
|
+
const clone = { ...value };
|
|
344
|
+
for (const k of dollarKeys) {
|
|
345
|
+
delete clone[k];
|
|
346
|
+
}
|
|
347
|
+
for (const k of dollarKeys) {
|
|
348
|
+
clone["$" + k] = value[k];
|
|
349
|
+
}
|
|
350
|
+
return clone;
|
|
351
|
+
}
|
|
352
|
+
const ObjectDef = {
|
|
353
|
+
replace: escapeDollarProps,
|
|
354
|
+
};
|
|
355
|
+
function TypesonSimplified(...typeDefsInputs) {
|
|
356
|
+
const typeDefs = typeDefsInputs.reduce((p, c) => ({ ...p, ...c }), typeDefsInputs.reduce((p, c) => ({ ...c, ...p }), {}));
|
|
357
|
+
const protoMap = new WeakMap();
|
|
358
|
+
return {
|
|
359
|
+
stringify(value, alternateChannel, space) {
|
|
360
|
+
const json = JSON.stringify(value, function (key) {
|
|
361
|
+
const realVal = this[key];
|
|
362
|
+
const typeDef = getTypeDef(realVal);
|
|
363
|
+
return typeDef
|
|
364
|
+
? typeDef.replace(realVal, alternateChannel, typeDefs)
|
|
365
|
+
: realVal;
|
|
366
|
+
}, space);
|
|
367
|
+
return json;
|
|
368
|
+
},
|
|
369
|
+
parse(tson, alternateChannel) {
|
|
370
|
+
const stack = [];
|
|
371
|
+
return JSON.parse(tson, function (key, value) {
|
|
372
|
+
//
|
|
373
|
+
// Parent Part
|
|
374
|
+
//
|
|
375
|
+
const type = value?.$t;
|
|
376
|
+
if (type) {
|
|
377
|
+
const typeDef = typeDefs[type];
|
|
378
|
+
value = typeDef
|
|
379
|
+
? typeDef.revive(value, alternateChannel, typeDefs)
|
|
380
|
+
: value;
|
|
381
|
+
}
|
|
382
|
+
let top = stack[stack.length - 1];
|
|
383
|
+
if (top && top[0] === value) {
|
|
384
|
+
// Do what the kid told us to
|
|
385
|
+
// Unescape dollar props
|
|
386
|
+
value = { ...value };
|
|
387
|
+
// Delete keys that children wanted us to delete
|
|
388
|
+
for (const k of top[1])
|
|
389
|
+
delete value[k];
|
|
390
|
+
// Set keys that children wanted us to set
|
|
391
|
+
for (const [k, v] of Object.entries(top[2])) {
|
|
392
|
+
value[k] = v;
|
|
393
|
+
}
|
|
394
|
+
stack.pop();
|
|
395
|
+
}
|
|
396
|
+
//
|
|
397
|
+
// Child part
|
|
398
|
+
//
|
|
399
|
+
if (value === undefined || (key[0] === "$" && key !== "$t")) {
|
|
400
|
+
top = stack[stack.length - 1];
|
|
401
|
+
let deletes;
|
|
402
|
+
let mods;
|
|
403
|
+
if (top && top[0] === this) {
|
|
404
|
+
deletes = top[1];
|
|
405
|
+
mods = top[2];
|
|
406
|
+
}
|
|
407
|
+
else {
|
|
408
|
+
stack.push([this, (deletes = []), (mods = {})]);
|
|
409
|
+
}
|
|
410
|
+
if (key[0] === "$" && key !== "$t") {
|
|
411
|
+
// Unescape props (also preserves undefined if this is a combo)
|
|
412
|
+
deletes.push(key);
|
|
413
|
+
mods[key.substr(1)] = value;
|
|
414
|
+
}
|
|
415
|
+
else {
|
|
416
|
+
// Preserve undefined
|
|
417
|
+
mods[key] = undefined;
|
|
418
|
+
}
|
|
419
|
+
}
|
|
420
|
+
return value;
|
|
421
|
+
});
|
|
422
|
+
},
|
|
423
|
+
};
|
|
424
|
+
function getTypeDef(realVal) {
|
|
425
|
+
const type = typeof realVal;
|
|
426
|
+
switch (typeof realVal) {
|
|
427
|
+
case "object":
|
|
428
|
+
case "function": {
|
|
429
|
+
// "object", "function", null
|
|
430
|
+
if (realVal === null)
|
|
431
|
+
return null;
|
|
432
|
+
const proto = Object.getPrototypeOf(realVal);
|
|
433
|
+
if (!proto)
|
|
434
|
+
return ObjectDef;
|
|
435
|
+
let typeDef = protoMap.get(proto);
|
|
436
|
+
if (typeDef !== undefined)
|
|
437
|
+
return typeDef; // Null counts to! So the caching of Array.prototype also counts.
|
|
438
|
+
const toStringTag = getToStringTag(realVal);
|
|
439
|
+
const entry = Object.entries(typeDefs).find(([typeName, typeDef]) => typeDef?.test?.(realVal, toStringTag) ?? typeName === toStringTag);
|
|
440
|
+
typeDef = entry?.[1];
|
|
441
|
+
if (!typeDef) {
|
|
442
|
+
typeDef = Array.isArray(realVal)
|
|
443
|
+
? null
|
|
444
|
+
: typeof realVal === "function"
|
|
445
|
+
? typeDefs.function || null
|
|
446
|
+
: ObjectDef;
|
|
447
|
+
}
|
|
448
|
+
protoMap.set(proto, typeDef);
|
|
449
|
+
return typeDef;
|
|
450
|
+
}
|
|
451
|
+
default:
|
|
452
|
+
return typeDefs[type];
|
|
453
|
+
}
|
|
454
|
+
}
|
|
455
|
+
}
|
|
456
|
+
|
|
457
|
+
class FakeBlob {
|
|
458
|
+
constructor(buf, type) {
|
|
459
|
+
this.buf = buf;
|
|
460
|
+
this.type = type;
|
|
461
|
+
}
|
|
462
|
+
}
|
|
463
|
+
|
|
464
|
+
/**
|
|
465
|
+
* TSONRef - Reference to a blob stored separately from the main data.
|
|
466
|
+
*
|
|
467
|
+
* When TSON parses data containing blob references, it creates TSONRef
|
|
468
|
+
* instances instead of the actual binary data. The client can then
|
|
469
|
+
* resolve these refs asynchronously.
|
|
470
|
+
*
|
|
471
|
+
* @example
|
|
472
|
+
* ```typescript
|
|
473
|
+
* // Configure resolver
|
|
474
|
+
* TSONRef.resolver = async (ref) => {
|
|
475
|
+
* const response = await fetch(`/blob/${ref.ref}`);
|
|
476
|
+
* return response.arrayBuffer();
|
|
477
|
+
* };
|
|
478
|
+
*
|
|
479
|
+
* // After parsing, resolve all refs in an object
|
|
480
|
+
* await resolveAllRefs(data);
|
|
481
|
+
* ```
|
|
482
|
+
*/
|
|
483
|
+
var _a;
|
|
484
|
+
/** Symbol for type checking TSONRef instances */
|
|
485
|
+
const TSON_REF_SYMBOL = Symbol.for('TSONRef');
|
|
486
|
+
/**
|
|
487
|
+
* TSONRef represents a reference to binary data stored as a blob.
|
|
488
|
+
*/
|
|
489
|
+
class TSONRef {
|
|
490
|
+
constructor(
|
|
491
|
+
/** Original TSON type: 'ArrayBuffer', 'Blob', 'Uint8Array', etc */
|
|
492
|
+
type,
|
|
493
|
+
/** Blob reference ID (UUID) */
|
|
494
|
+
ref,
|
|
495
|
+
/** Size in bytes */
|
|
496
|
+
size,
|
|
497
|
+
/** Content-Type (for Blob type) */
|
|
498
|
+
contentType) {
|
|
499
|
+
this.type = type;
|
|
500
|
+
this.ref = ref;
|
|
501
|
+
this.size = size;
|
|
502
|
+
this.contentType = contentType;
|
|
503
|
+
/** Type brand for runtime identification */
|
|
504
|
+
this[_a] = true;
|
|
505
|
+
Object.freeze(this);
|
|
506
|
+
}
|
|
507
|
+
/**
|
|
508
|
+
* Resolve this reference to actual data.
|
|
509
|
+
* Requires TSONRef.resolver to be configured.
|
|
510
|
+
*/
|
|
511
|
+
async resolve() {
|
|
512
|
+
if (!TSONRef.resolver) {
|
|
513
|
+
throw new Error('TSONRef.resolver not configured. ' +
|
|
514
|
+
'Set TSONRef.resolver to a function that fetches blobs.');
|
|
515
|
+
}
|
|
516
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
517
|
+
const data = await TSONRef.resolver(this);
|
|
518
|
+
return this.reconstruct(data);
|
|
519
|
+
}
|
|
520
|
+
/**
|
|
521
|
+
* Reconstruct the original type from ArrayBuffer.
|
|
522
|
+
* Validates byte alignment for TypedArrays that require it.
|
|
523
|
+
*/
|
|
524
|
+
reconstruct(data) {
|
|
525
|
+
// Helper to validate alignment for multi-byte TypedArrays
|
|
526
|
+
const validateAlignment = (bytesPerElement, typeName) => {
|
|
527
|
+
if (data.byteLength % bytesPerElement !== 0) {
|
|
528
|
+
throw new RangeError(`Buffer length ${data.byteLength} is not aligned to ${bytesPerElement} bytes for ${typeName}`);
|
|
529
|
+
}
|
|
530
|
+
};
|
|
531
|
+
switch (this.type) {
|
|
532
|
+
case 'ArrayBuffer':
|
|
533
|
+
return data;
|
|
534
|
+
case 'Uint8Array':
|
|
535
|
+
return new Uint8Array(data);
|
|
536
|
+
case 'Blob':
|
|
537
|
+
return new Blob([data], { type: this.contentType });
|
|
538
|
+
// Handle other TypedArrays with alignment validation
|
|
539
|
+
case 'Int8Array':
|
|
540
|
+
return new Int8Array(data);
|
|
541
|
+
case 'Uint8ClampedArray':
|
|
542
|
+
return new Uint8ClampedArray(data);
|
|
543
|
+
case 'Int16Array':
|
|
544
|
+
validateAlignment(2, 'Int16Array');
|
|
545
|
+
return new Int16Array(data);
|
|
546
|
+
case 'Uint16Array':
|
|
547
|
+
validateAlignment(2, 'Uint16Array');
|
|
548
|
+
return new Uint16Array(data);
|
|
549
|
+
case 'Int32Array':
|
|
550
|
+
validateAlignment(4, 'Int32Array');
|
|
551
|
+
return new Int32Array(data);
|
|
552
|
+
case 'Uint32Array':
|
|
553
|
+
validateAlignment(4, 'Uint32Array');
|
|
554
|
+
return new Uint32Array(data);
|
|
555
|
+
case 'Float32Array':
|
|
556
|
+
validateAlignment(4, 'Float32Array');
|
|
557
|
+
return new Float32Array(data);
|
|
558
|
+
case 'Float64Array':
|
|
559
|
+
validateAlignment(8, 'Float64Array');
|
|
560
|
+
return new Float64Array(data);
|
|
561
|
+
case 'BigInt64Array':
|
|
562
|
+
validateAlignment(8, 'BigInt64Array');
|
|
563
|
+
return new BigInt64Array(data);
|
|
564
|
+
case 'BigUint64Array':
|
|
565
|
+
validateAlignment(8, 'BigUint64Array');
|
|
566
|
+
return new BigUint64Array(data);
|
|
567
|
+
default:
|
|
568
|
+
console.warn(`Unknown TSONRef type: ${this.type}, returning ArrayBuffer`);
|
|
569
|
+
return data;
|
|
570
|
+
}
|
|
571
|
+
}
|
|
572
|
+
/**
|
|
573
|
+
* Check if a value is a TSONRef instance.
|
|
574
|
+
*/
|
|
575
|
+
static isTSONRef(value) {
|
|
576
|
+
return (value !== null &&
|
|
577
|
+
typeof value === 'object' &&
|
|
578
|
+
TSON_REF_SYMBOL in value &&
|
|
579
|
+
value[TSON_REF_SYMBOL] === true);
|
|
580
|
+
}
|
|
581
|
+
/**
|
|
582
|
+
* Check if a value is TSONRef serialized data (has $ref).
|
|
583
|
+
*/
|
|
584
|
+
static isTSONRefData(value) {
|
|
585
|
+
return (value !== null &&
|
|
586
|
+
typeof value === 'object' &&
|
|
587
|
+
'$ref' in value &&
|
|
588
|
+
'$t' in value &&
|
|
589
|
+
'$size' in value);
|
|
590
|
+
}
|
|
591
|
+
/**
|
|
592
|
+
* Create TSONRef from serialized data.
|
|
593
|
+
*/
|
|
594
|
+
static fromData(data) {
|
|
595
|
+
return new TSONRef(data.$t, data.$ref, data.$size, data.$ct);
|
|
596
|
+
}
|
|
597
|
+
/**
|
|
598
|
+
* Serialize to JSON-compatible format.
|
|
599
|
+
*/
|
|
600
|
+
toJSON() {
|
|
601
|
+
const result = {
|
|
602
|
+
$t: this.type,
|
|
603
|
+
$ref: this.ref,
|
|
604
|
+
$size: this.size,
|
|
605
|
+
};
|
|
606
|
+
if (this.contentType) {
|
|
607
|
+
result.$ct = this.contentType;
|
|
608
|
+
}
|
|
609
|
+
return result;
|
|
610
|
+
}
|
|
611
|
+
}
|
|
612
|
+
_a = TSON_REF_SYMBOL;
|
|
613
|
+
/** Symbol for type checking */
|
|
614
|
+
TSONRef.TYPE_SYMBOL = TSON_REF_SYMBOL;
|
|
615
|
+
/** Global resolver function - must be configured before resolving */
|
|
616
|
+
TSONRef.resolver = null;
|
|
617
|
+
|
|
618
|
+
function readBlobSync(b) {
|
|
619
|
+
const req = new XMLHttpRequest();
|
|
620
|
+
req.overrideMimeType("text/plain; charset=x-user-defined");
|
|
621
|
+
const url = URL.createObjectURL(b);
|
|
622
|
+
try {
|
|
623
|
+
req.open("GET", url, false); // Sync
|
|
624
|
+
req.send();
|
|
625
|
+
if (req.status !== 200 && req.status !== 0) {
|
|
626
|
+
throw new Error("Bad Blob access: " + req.status);
|
|
627
|
+
}
|
|
628
|
+
return req.responseText;
|
|
629
|
+
}
|
|
630
|
+
finally {
|
|
631
|
+
URL.revokeObjectURL(url);
|
|
632
|
+
}
|
|
633
|
+
}
|
|
634
|
+
|
|
635
|
+
const numberTypeDef = {
|
|
636
|
+
number: {
|
|
637
|
+
replace: (num) => {
|
|
638
|
+
switch (true) {
|
|
639
|
+
case isNaN(num):
|
|
640
|
+
return { $t: "number", v: "NaN" };
|
|
641
|
+
case num === Infinity:
|
|
642
|
+
return { $t: "number", v: "Infinity" };
|
|
643
|
+
case num === -Infinity:
|
|
644
|
+
return { $t: "number", v: "-Infinity" };
|
|
645
|
+
default:
|
|
646
|
+
return num;
|
|
647
|
+
}
|
|
648
|
+
},
|
|
649
|
+
revive: ({ v }) => Number(v),
|
|
650
|
+
},
|
|
651
|
+
};
|
|
652
|
+
|
|
653
|
+
const dateTypeDef = {
|
|
654
|
+
Date: {
|
|
655
|
+
replace: (date) => ({
|
|
656
|
+
$t: "Date",
|
|
657
|
+
v: isNaN(date.getTime()) ? "NaN" : date.toISOString(),
|
|
658
|
+
}),
|
|
659
|
+
revive: ({ v }) => new Date(v === "NaN" ? NaN : Date.parse(v)),
|
|
660
|
+
},
|
|
661
|
+
};
|
|
662
|
+
|
|
663
|
+
const setTypeDef = {
|
|
664
|
+
Set: {
|
|
665
|
+
replace: (set) => ({
|
|
666
|
+
$t: "Set",
|
|
667
|
+
v: Array.from(set),
|
|
668
|
+
}),
|
|
669
|
+
revive: ({ v }) => new Set(v),
|
|
670
|
+
},
|
|
671
|
+
};
|
|
672
|
+
|
|
673
|
+
const mapTypeDef = {
|
|
674
|
+
Map: {
|
|
675
|
+
replace: (map) => ({
|
|
676
|
+
$t: "Map",
|
|
677
|
+
v: Array.from(map.entries()),
|
|
678
|
+
}),
|
|
679
|
+
revive: ({ v }) => new Map(v),
|
|
680
|
+
},
|
|
681
|
+
};
|
|
682
|
+
|
|
683
|
+
const _global = typeof globalThis !== "undefined" // All modern environments (node, bun, deno, browser, workers, webview etc)
|
|
684
|
+
? globalThis
|
|
685
|
+
: typeof self !== "undefined" // Older browsers, workers, webview, window etc
|
|
686
|
+
? self
|
|
687
|
+
: typeof global !== "undefined" // Older versions of node
|
|
688
|
+
? global
|
|
689
|
+
: undefined; // Unsupported environment. No idea to return 'this' since we are in a module or a function scope anyway.
|
|
690
|
+
|
|
691
|
+
const typedArrayTypeDefs = [
|
|
692
|
+
"Int8Array",
|
|
693
|
+
"Uint8Array",
|
|
694
|
+
"Uint8ClampedArray",
|
|
695
|
+
"Int16Array",
|
|
696
|
+
"Uint16Array",
|
|
697
|
+
"Int32Array",
|
|
698
|
+
"Uint32Array",
|
|
699
|
+
"Float32Array",
|
|
700
|
+
"Float64Array",
|
|
701
|
+
"DataView",
|
|
702
|
+
"BigInt64Array",
|
|
703
|
+
"BigUint64Array",
|
|
704
|
+
].reduce((specs, typeName) => ({
|
|
705
|
+
...specs,
|
|
706
|
+
[typeName]: {
|
|
707
|
+
// Replace passes the typed array into $t, buffer so that
|
|
708
|
+
// the ArrayBuffer typedef takes care of further handling of the buffer:
|
|
709
|
+
// {$t:"Uint8Array",buffer:{$t:"ArrayBuffer",idx:0}}
|
|
710
|
+
// CHANGED ABOVE! Now shortcutting that for more sparse format of the typed arrays
|
|
711
|
+
// to contain the b64 property directly.
|
|
712
|
+
replace: (a, _, typeDefs) => {
|
|
713
|
+
const buffer = a.buffer;
|
|
714
|
+
const slicedBuffer = a.byteOffset === 0 && a.byteLength === buffer.byteLength
|
|
715
|
+
? buffer
|
|
716
|
+
: buffer.slice(a.byteOffset, a.byteOffset + a.byteLength);
|
|
717
|
+
const result = {
|
|
718
|
+
$t: typeName,
|
|
719
|
+
v: typeDefs.ArrayBuffer.replace(slicedBuffer, _, typeDefs).v,
|
|
720
|
+
};
|
|
721
|
+
return result;
|
|
722
|
+
},
|
|
723
|
+
revive: ({ v }, _, typeDefs) => {
|
|
724
|
+
const TypedArray = _global[typeName];
|
|
725
|
+
return (TypedArray &&
|
|
726
|
+
new TypedArray(typeDefs.ArrayBuffer.revive({ v }, _, typeDefs)));
|
|
727
|
+
},
|
|
728
|
+
},
|
|
729
|
+
}), {});
|
|
730
|
+
|
|
328
731
|
const hasArrayBufferFromBase64 = "fromBase64" in Uint8Array; // https://github.com/tc39/proposal-arraybuffer-base64;
|
|
329
732
|
const hasArrayBufferToBase64 = "toBase64" in Uint8Array.prototype; // https://github.com/tc39/proposal-arraybuffer-base64;
|
|
330
733
|
const b64decode = typeof Buffer !== "undefined"
|
|
@@ -366,183 +769,261 @@ const b64encode = typeof Buffer !== "undefined"
|
|
|
366
769
|
const strs = [];
|
|
367
770
|
for (let i = 0, l = u8a.length; i < l; i += CHUNK_SIZE) {
|
|
368
771
|
const chunk = u8a.subarray(i, i + CHUNK_SIZE);
|
|
369
|
-
strs.push(String.fromCharCode.apply(null, chunk));
|
|
772
|
+
strs.push(String.fromCharCode.apply(null, Array.from(chunk)));
|
|
370
773
|
}
|
|
371
774
|
return btoa(strs.join(""));
|
|
372
775
|
};
|
|
373
776
|
|
|
374
|
-
function
|
|
375
|
-
return
|
|
376
|
-
const data = JSON.stringify([
|
|
377
|
-
...realms.map((realmId) => ({ realmId, accepted: true })),
|
|
378
|
-
...inviteRealms.map((realmId) => ({ realmId, accepted: false })),
|
|
379
|
-
].sort((a, b) => a.realmId < b.realmId ? -1 : a.realmId > b.realmId ? 1 : 0));
|
|
380
|
-
const byteArray = new TextEncoder().encode(data);
|
|
381
|
-
const digestBytes = yield crypto.subtle.digest('SHA-1', byteArray);
|
|
382
|
-
const base64 = b64encode(digestBytes);
|
|
383
|
-
return base64;
|
|
384
|
-
});
|
|
777
|
+
function b64LexEncode(b) {
|
|
778
|
+
return b64ToLex(b64encode(b));
|
|
385
779
|
}
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
return Object.entries(db.cloud.schema || {})
|
|
389
|
-
.filter(([, { markedForSync }]) => markedForSync)
|
|
390
|
-
.map(([tbl]) => db.tables.filter(({ name }) => name === tbl)[0])
|
|
391
|
-
.filter(cloudTableSchema => cloudTableSchema);
|
|
780
|
+
function b64LexDecode(b64Lex) {
|
|
781
|
+
return b64decode(lexToB64(b64Lex));
|
|
392
782
|
}
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
783
|
+
function b64ToLex(base64) {
|
|
784
|
+
var encoded = "";
|
|
785
|
+
for (var i = 0, length = base64.length; i < length; i++) {
|
|
786
|
+
encoded += ENCODE_TABLE[base64[i]];
|
|
787
|
+
}
|
|
788
|
+
return encoded;
|
|
396
789
|
}
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
790
|
+
function lexToB64(base64lex) {
|
|
791
|
+
// only accept string input
|
|
792
|
+
if (typeof base64lex !== "string") {
|
|
793
|
+
throw new Error("invalid decoder input: " + base64lex);
|
|
794
|
+
}
|
|
795
|
+
var base64 = "";
|
|
796
|
+
for (var i = 0, length = base64lex.length; i < length; i++) {
|
|
797
|
+
base64 += DECODE_TABLE[base64lex[i]];
|
|
798
|
+
}
|
|
799
|
+
return base64;
|
|
404
800
|
}
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
801
|
+
const DECODE_TABLE = {
|
|
802
|
+
"-": "=",
|
|
803
|
+
"0": "A",
|
|
804
|
+
"1": "B",
|
|
805
|
+
"2": "C",
|
|
806
|
+
"3": "D",
|
|
807
|
+
"4": "E",
|
|
808
|
+
"5": "F",
|
|
809
|
+
"6": "G",
|
|
810
|
+
"7": "H",
|
|
811
|
+
"8": "I",
|
|
812
|
+
"9": "J",
|
|
813
|
+
A: "K",
|
|
814
|
+
B: "L",
|
|
815
|
+
C: "M",
|
|
816
|
+
D: "N",
|
|
817
|
+
E: "O",
|
|
818
|
+
F: "P",
|
|
819
|
+
G: "Q",
|
|
820
|
+
H: "R",
|
|
821
|
+
I: "S",
|
|
822
|
+
J: "T",
|
|
823
|
+
K: "U",
|
|
824
|
+
L: "V",
|
|
825
|
+
M: "W",
|
|
826
|
+
N: "X",
|
|
827
|
+
O: "Y",
|
|
828
|
+
P: "Z",
|
|
829
|
+
Q: "a",
|
|
830
|
+
R: "b",
|
|
831
|
+
S: "c",
|
|
832
|
+
T: "d",
|
|
833
|
+
U: "e",
|
|
834
|
+
V: "f",
|
|
835
|
+
W: "g",
|
|
836
|
+
X: "h",
|
|
837
|
+
Y: "i",
|
|
838
|
+
Z: "j",
|
|
839
|
+
_: "k",
|
|
840
|
+
a: "l",
|
|
841
|
+
b: "m",
|
|
842
|
+
c: "n",
|
|
843
|
+
d: "o",
|
|
844
|
+
e: "p",
|
|
845
|
+
f: "q",
|
|
846
|
+
g: "r",
|
|
847
|
+
h: "s",
|
|
848
|
+
i: "t",
|
|
849
|
+
j: "u",
|
|
850
|
+
k: "v",
|
|
851
|
+
l: "w",
|
|
852
|
+
m: "x",
|
|
853
|
+
n: "y",
|
|
854
|
+
o: "z",
|
|
855
|
+
p: "0",
|
|
856
|
+
q: "1",
|
|
857
|
+
r: "2",
|
|
858
|
+
s: "3",
|
|
859
|
+
t: "4",
|
|
860
|
+
u: "5",
|
|
861
|
+
v: "6",
|
|
862
|
+
w: "7",
|
|
863
|
+
x: "8",
|
|
864
|
+
y: "9",
|
|
865
|
+
z: "+",
|
|
866
|
+
"|": "/",
|
|
867
|
+
};
|
|
868
|
+
const ENCODE_TABLE = {};
|
|
869
|
+
for (const c of Object.keys(DECODE_TABLE)) {
|
|
870
|
+
ENCODE_TABLE[DECODE_TABLE[c]] = c;
|
|
409
871
|
}
|
|
410
872
|
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
|
|
429
|
-
|
|
430
|
-
|
|
431
|
-
const sorted = flatten(allMutsOnTables).sort((a, b) => a.mut.txid === b.mut.txid
|
|
432
|
-
? a.mut.opNo - b.mut.opNo // Within same transaction, sort by opNo
|
|
433
|
-
: a.mut.ts - b.mut.ts // Different transactions - sort by timestamp when mutation resolved
|
|
434
|
-
);
|
|
435
|
-
const result = [];
|
|
436
|
-
let currentEntry = null;
|
|
437
|
-
let currentTxid = null;
|
|
438
|
-
for (const { table, mut } of sorted) {
|
|
439
|
-
if (currentEntry &&
|
|
440
|
-
currentEntry.table === table &&
|
|
441
|
-
currentTxid === mut.txid) {
|
|
442
|
-
currentEntry.muts.push(mut);
|
|
443
|
-
}
|
|
444
|
-
else {
|
|
445
|
-
currentEntry = {
|
|
446
|
-
table,
|
|
447
|
-
muts: [mut],
|
|
448
|
-
};
|
|
449
|
-
currentTxid = mut.txid;
|
|
450
|
-
result.push(currentEntry);
|
|
451
|
-
}
|
|
452
|
-
}
|
|
453
|
-
// Filter out those tables that doesn't have any mutations:
|
|
454
|
-
return result;
|
|
455
|
-
});
|
|
456
|
-
}
|
|
457
|
-
function removeRedundantUpdateOps(muts) {
|
|
458
|
-
const updateCoverage = new Map();
|
|
459
|
-
for (const mut of muts) {
|
|
460
|
-
if (mut.type === 'update') {
|
|
461
|
-
if (mut.keys.length !== 1 || mut.changeSpecs.length !== 1) {
|
|
462
|
-
continue; // Don't optimize multi-key updates
|
|
463
|
-
}
|
|
464
|
-
const strKey = '' + mut.keys[0];
|
|
465
|
-
const changeSpecs = mut.changeSpecs[0];
|
|
466
|
-
if (Object.values(changeSpecs).some(v => typeof v === "object" && v && "@@propmod" in v)) {
|
|
467
|
-
continue; // Cannot optimize if any PropModification is present
|
|
468
|
-
}
|
|
469
|
-
let keyCoverage = updateCoverage.get(strKey);
|
|
470
|
-
if (keyCoverage) {
|
|
471
|
-
keyCoverage.push({ txid: mut.txid, updateSpec: changeSpecs });
|
|
472
|
-
}
|
|
473
|
-
else {
|
|
474
|
-
updateCoverage.set(strKey, [{ txid: mut.txid, updateSpec: changeSpecs }]);
|
|
475
|
-
}
|
|
476
|
-
}
|
|
873
|
+
const arrayBufferTypeDef = {
|
|
874
|
+
ArrayBuffer: {
|
|
875
|
+
replace: (ab) => ({
|
|
876
|
+
$t: "ArrayBuffer",
|
|
877
|
+
v: b64LexEncode(ab),
|
|
878
|
+
}),
|
|
879
|
+
revive: ({ v }) => {
|
|
880
|
+
const ba = b64LexDecode(v);
|
|
881
|
+
const buf = ba.buffer.byteLength === ba.byteLength
|
|
882
|
+
? ba.buffer
|
|
883
|
+
: ba.buffer.slice(ba.byteOffset, ba.byteOffset + ba.byteLength);
|
|
884
|
+
return buf;
|
|
885
|
+
},
|
|
886
|
+
},
|
|
887
|
+
};
|
|
888
|
+
|
|
889
|
+
function string2ArrayBuffer(str) {
|
|
890
|
+
const array = new Uint8Array(str.length);
|
|
891
|
+
for (let i = 0; i < str.length; ++i) {
|
|
892
|
+
array[i] = str.charCodeAt(i); // & 0xff;
|
|
477
893
|
}
|
|
478
|
-
|
|
479
|
-
// Only apply optimization to update mutations that are single-key
|
|
480
|
-
if (mut.type !== 'update')
|
|
481
|
-
return true;
|
|
482
|
-
if (mut.keys.length !== 1 || mut.changeSpecs.length !== 1)
|
|
483
|
-
return true;
|
|
484
|
-
// Check if this has PropModifications - if so, skip optimization
|
|
485
|
-
const changeSpecs = mut.changeSpecs[0];
|
|
486
|
-
if (Object.values(changeSpecs).some(v => typeof v === "object" && v && "@@propmod" in v)) {
|
|
487
|
-
return true; // Cannot optimize if any PropModification is present
|
|
488
|
-
}
|
|
489
|
-
// Keep track of properties that aren't overlapped by later transactions
|
|
490
|
-
const unoverlappedProps = new Set(Object.keys(mut.changeSpecs[0]));
|
|
491
|
-
const strKey = '' + mut.keys[0];
|
|
492
|
-
const keyCoverage = updateCoverage.get(strKey);
|
|
493
|
-
if (!keyCoverage)
|
|
494
|
-
return true; // No coverage info - cannot optimize
|
|
495
|
-
for (let i = keyCoverage.length - 1; i >= 0; --i) {
|
|
496
|
-
const { txid, updateSpec } = keyCoverage[i];
|
|
497
|
-
if (txid === mut.txid)
|
|
498
|
-
break; // Stop when reaching own txid
|
|
499
|
-
// If all changes in updateSpec are covered by all props on all mut.changeSpecs then
|
|
500
|
-
// txid is redundant and can be removed.
|
|
501
|
-
for (const keyPath of Object.keys(updateSpec)) {
|
|
502
|
-
unoverlappedProps.delete(keyPath);
|
|
503
|
-
}
|
|
504
|
-
}
|
|
505
|
-
if (unoverlappedProps.size === 0) {
|
|
506
|
-
// This operation is completely overlapped by later operations. It can be removed.
|
|
507
|
-
return false;
|
|
508
|
-
}
|
|
509
|
-
return true;
|
|
510
|
-
});
|
|
511
|
-
return muts;
|
|
512
|
-
}
|
|
513
|
-
function canonicalizeToUpdateOps(muts) {
|
|
514
|
-
muts = muts.map(mut => {
|
|
515
|
-
if (mut.type === 'modify' && mut.criteria.index === null) {
|
|
516
|
-
// The criteria is on primary key. Convert to an update operation instead.
|
|
517
|
-
// It is simpler for the server to handle and also more efficient.
|
|
518
|
-
const updateMut = Object.assign(Object.assign({}, mut), { criteria: undefined, changeSpec: undefined, type: 'update', keys: mut.keys, changeSpecs: [mut.changeSpec] });
|
|
519
|
-
delete updateMut.criteria;
|
|
520
|
-
delete updateMut.changeSpec;
|
|
521
|
-
return updateMut;
|
|
522
|
-
}
|
|
523
|
-
return mut;
|
|
524
|
-
});
|
|
525
|
-
return muts;
|
|
894
|
+
return array.buffer;
|
|
526
895
|
}
|
|
527
896
|
|
|
528
|
-
|
|
529
|
-
|
|
530
|
-
|
|
531
|
-
|
|
532
|
-
|
|
533
|
-
|
|
534
|
-
|
|
535
|
-
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
|
|
539
|
-
|
|
540
|
-
|
|
541
|
-
|
|
897
|
+
const blobTypeDef = {
|
|
898
|
+
Blob: {
|
|
899
|
+
test: (blob, toStringTag) => toStringTag === "Blob" || blob instanceof FakeBlob,
|
|
900
|
+
replace: (blob) => ({
|
|
901
|
+
$t: "Blob",
|
|
902
|
+
v: blob instanceof FakeBlob
|
|
903
|
+
? b64encode(blob.buf)
|
|
904
|
+
: b64encode(string2ArrayBuffer(readBlobSync(blob))),
|
|
905
|
+
type: blob.type,
|
|
906
|
+
}),
|
|
907
|
+
revive: ({ type, v }) => {
|
|
908
|
+
const ab = b64decode(v);
|
|
909
|
+
const buf = ab.buffer.byteLength === ab.byteLength
|
|
910
|
+
? ab.buffer
|
|
911
|
+
: ab.buffer.slice(ab.byteOffset, ab.byteOffset + ab.byteLength);
|
|
912
|
+
return typeof Blob !== "undefined"
|
|
913
|
+
? new Blob([new Uint8Array(buf)], { type })
|
|
914
|
+
: new FakeBlob(buf, type);
|
|
915
|
+
},
|
|
916
|
+
},
|
|
917
|
+
};
|
|
918
|
+
|
|
919
|
+
({
|
|
920
|
+
...numberTypeDef,
|
|
921
|
+
...dateTypeDef,
|
|
922
|
+
...setTypeDef,
|
|
923
|
+
...mapTypeDef,
|
|
924
|
+
...typedArrayTypeDefs,
|
|
925
|
+
...arrayBufferTypeDef,
|
|
926
|
+
...blobTypeDef, // Should be moved to another preset for DOM types (or universal? since it supports node as well with FakeBlob)
|
|
927
|
+
});
|
|
928
|
+
|
|
929
|
+
const fileTypeDef = {
|
|
930
|
+
File: {
|
|
931
|
+
test: (file, toStringTag) => toStringTag === "File",
|
|
932
|
+
replace: (file) => ({
|
|
933
|
+
$t: "File",
|
|
934
|
+
v: b64encode(string2ArrayBuffer(readBlobSync(file))),
|
|
935
|
+
type: file.type,
|
|
936
|
+
name: file.name,
|
|
937
|
+
lastModified: new Date(file.lastModified).toISOString(),
|
|
938
|
+
}),
|
|
939
|
+
revive: ({ type, v, name, lastModified }) => {
|
|
940
|
+
const ab = b64decode(v);
|
|
941
|
+
const buf = ab.buffer.byteLength === ab.byteLength
|
|
942
|
+
? ab.buffer
|
|
943
|
+
: ab.buffer.slice(ab.byteOffset, ab.byteOffset + ab.byteLength);
|
|
944
|
+
return new File([new Uint8Array(buf)], name, {
|
|
945
|
+
type,
|
|
946
|
+
lastModified: new Date(lastModified).getTime(),
|
|
947
|
+
});
|
|
948
|
+
},
|
|
949
|
+
},
|
|
950
|
+
};
|
|
951
|
+
|
|
952
|
+
/** The undefined type is not part of builtin but can be manually added.
|
|
953
|
+
* The reason for supporting undefined is if the following object should be revived correctly:
|
|
954
|
+
*
|
|
955
|
+
* {foo: undefined}
|
|
956
|
+
*
|
|
957
|
+
* Without including this typedef, the revived object would just be {}.
|
|
958
|
+
* If including this typedef, the revived object would be {foo: undefined}.
|
|
959
|
+
*/
|
|
960
|
+
const undefinedTypeDef = {
|
|
961
|
+
undefined: {
|
|
962
|
+
replace: () => ({
|
|
963
|
+
$t: "undefined",
|
|
964
|
+
}),
|
|
965
|
+
revive: () => undefined,
|
|
966
|
+
},
|
|
967
|
+
};
|
|
968
|
+
|
|
969
|
+
const getRandomValues = typeof crypto !== "undefined"
|
|
970
|
+
? crypto.getRandomValues.bind(crypto)
|
|
971
|
+
: (buf) => {
|
|
972
|
+
for (let i = 0; i < buf.length; ++i) {
|
|
973
|
+
buf[i] = Math.floor(Math.random() * 256);
|
|
974
|
+
}
|
|
975
|
+
};
|
|
976
|
+
let time$1 = 0;
|
|
977
|
+
/**
|
|
978
|
+
* Generates unique ID where bytes 0-6 represents a timestampish value
|
|
979
|
+
* instead of random, similary to UUID version 1 but with random istead of MAC address.
|
|
980
|
+
*
|
|
981
|
+
* With "timestampish" we mean milliseconds from 1970 approximately, as in bulk-creation
|
|
982
|
+
* scenarios, milliseconds in future will be used (while creating more than 1 id per
|
|
983
|
+
* millisecond)
|
|
984
|
+
*
|
|
985
|
+
* This is similary UUID version 1 but with random instead of Mac, and with
|
|
986
|
+
* support for generating unique IDs the same millisecond.
|
|
987
|
+
*
|
|
988
|
+
* It's even more similar to the "version 6" proposal at
|
|
989
|
+
* https://bradleypeabody.github.io/uuidv6/.
|
|
990
|
+
*
|
|
991
|
+
* Difference from "version 6" proposal is that we keep the clock-sequence within
|
|
992
|
+
* the timestamp part to allow 9 more bits for randomness. This is at the cost of
|
|
993
|
+
* knwoing how exact the time-stamp is. But since we anyway don't expect a perfect
|
|
994
|
+
* time stamps as many clients may have wrong time settings, what we want is just
|
|
995
|
+
* a sorted ID, still universially unique.
|
|
996
|
+
*
|
|
997
|
+
* Random part is totally 73 bits entropy, which basically means that a collisions would
|
|
998
|
+
* be likely if 9 444 732 965 739 290 427 392 devices was generating ids during the exact same
|
|
999
|
+
* millisecond.
|
|
1000
|
+
*
|
|
1001
|
+
*/
|
|
1002
|
+
function newId() {
|
|
1003
|
+
const a = new Uint8Array(18);
|
|
1004
|
+
const timePart = new Uint8Array(a.buffer, 0, 6);
|
|
1005
|
+
const now = Date.now(); // Will fit into 6 bytes until year 10 895.
|
|
1006
|
+
if (time$1 >= now) {
|
|
1007
|
+
// User is bulk-creating objects the same millisecond.
|
|
1008
|
+
// Increment the time part by one millisecond for each item.
|
|
1009
|
+
// If bulk-creating 1,000,000 rows client-side in 0 seconds,
|
|
1010
|
+
// the last time-stamp will be 1,000 seconds in future, which is no biggie at all.
|
|
1011
|
+
// The point is to create a nice order of the generated IDs instead of
|
|
1012
|
+
// using random ids.
|
|
1013
|
+
++time$1;
|
|
542
1014
|
}
|
|
543
1015
|
else {
|
|
544
|
-
|
|
1016
|
+
time$1 = now;
|
|
545
1017
|
}
|
|
1018
|
+
timePart[0] = time$1 / 0x10000000000;
|
|
1019
|
+
timePart[1] = time$1 / 0x100000000;
|
|
1020
|
+
timePart[2] = time$1 / 0x1000000;
|
|
1021
|
+
timePart[3] = time$1 / 0x10000;
|
|
1022
|
+
timePart[4] = time$1 / 0x100;
|
|
1023
|
+
timePart[5] = time$1;
|
|
1024
|
+
const randomPart = new Uint8Array(a.buffer, 6);
|
|
1025
|
+
getRandomValues(randomPart);
|
|
1026
|
+
return b64LexEncode(a);
|
|
546
1027
|
}
|
|
547
1028
|
|
|
548
1029
|
function assert(b) {
|
|
@@ -605,7 +1086,7 @@ function setByKeyPath(obj, keyPath, value) {
|
|
|
605
1086
|
}
|
|
606
1087
|
}
|
|
607
1088
|
}
|
|
608
|
-
const randomString = typeof self !== 'undefined' && typeof crypto !== 'undefined' ? (bytes, randomFill = crypto.getRandomValues.bind(crypto)) => {
|
|
1089
|
+
const randomString$1 = typeof self !== 'undefined' && typeof crypto !== 'undefined' ? (bytes, randomFill = crypto.getRandomValues.bind(crypto)) => {
|
|
609
1090
|
// Web
|
|
610
1091
|
const buf = new Uint8Array(bytes);
|
|
611
1092
|
randomFill(buf);
|
|
@@ -1076,9 +1557,183 @@ function getFetchResponseBodyGenerator(res) {
|
|
|
1076
1557
|
};
|
|
1077
1558
|
}
|
|
1078
1559
|
|
|
1560
|
+
function computeRealmSetHash(_a) {
|
|
1561
|
+
return __awaiter(this, arguments, void 0, function* ({ realms, inviteRealms, }) {
|
|
1562
|
+
const data = JSON.stringify([
|
|
1563
|
+
...realms.map((realmId) => ({ realmId, accepted: true })),
|
|
1564
|
+
...inviteRealms.map((realmId) => ({ realmId, accepted: false })),
|
|
1565
|
+
].sort((a, b) => a.realmId < b.realmId ? -1 : a.realmId > b.realmId ? 1 : 0));
|
|
1566
|
+
const byteArray = new TextEncoder().encode(data);
|
|
1567
|
+
const digestBytes = yield crypto.subtle.digest('SHA-1', byteArray);
|
|
1568
|
+
const base64 = b64encode(digestBytes);
|
|
1569
|
+
return base64;
|
|
1570
|
+
});
|
|
1571
|
+
}
|
|
1572
|
+
|
|
1573
|
+
function getSyncableTables(db) {
|
|
1574
|
+
return Object.entries(db.cloud.schema || {})
|
|
1575
|
+
.filter(([, { markedForSync }]) => markedForSync)
|
|
1576
|
+
.map(([tbl]) => db.tables.find(({ name }) => name === tbl))
|
|
1577
|
+
.filter((syncableTable) => !!syncableTable);
|
|
1578
|
+
}
|
|
1579
|
+
|
|
1580
|
+
function getMutationTable(tableName) {
|
|
1581
|
+
return `$${tableName}_mutations`;
|
|
1582
|
+
}
|
|
1583
|
+
|
|
1584
|
+
function getTableFromMutationTable(mutationTable) {
|
|
1585
|
+
var _a;
|
|
1586
|
+
const tableName = (_a = /^\$(.*)_mutations$/.exec(mutationTable)) === null || _a === void 0 ? void 0 : _a[1];
|
|
1587
|
+
if (!tableName)
|
|
1588
|
+
throw new Error(`Given mutationTable ${mutationTable} is not correct`);
|
|
1589
|
+
return tableName;
|
|
1590
|
+
}
|
|
1591
|
+
|
|
1592
|
+
const concat = [].concat;
|
|
1593
|
+
function flatten(a) {
|
|
1594
|
+
return concat.apply([], a);
|
|
1595
|
+
}
|
|
1596
|
+
|
|
1597
|
+
function listClientChanges(mutationTables_1, db_1) {
|
|
1598
|
+
return __awaiter(this, arguments, void 0, function* (mutationTables, db, { since = {}, limit = Infinity } = {}) {
|
|
1599
|
+
const allMutsOnTables = yield Promise.all(mutationTables.map((mutationTable) => __awaiter(this, void 0, void 0, function* () {
|
|
1600
|
+
const tableName = getTableFromMutationTable(mutationTable.name);
|
|
1601
|
+
const lastRevision = since[tableName];
|
|
1602
|
+
let query = lastRevision
|
|
1603
|
+
? mutationTable.where('rev').above(lastRevision)
|
|
1604
|
+
: mutationTable;
|
|
1605
|
+
if (limit < Infinity)
|
|
1606
|
+
query = query.limit(limit);
|
|
1607
|
+
let muts = yield query.toArray();
|
|
1608
|
+
muts = canonicalizeToUpdateOps(muts);
|
|
1609
|
+
muts = removeRedundantUpdateOps(muts);
|
|
1610
|
+
const rv = muts.map((mut) => ({
|
|
1611
|
+
table: tableName,
|
|
1612
|
+
mut,
|
|
1613
|
+
}));
|
|
1614
|
+
return rv;
|
|
1615
|
+
})));
|
|
1616
|
+
// Sort by time to get a true order of the operations (between tables)
|
|
1617
|
+
const sorted = flatten(allMutsOnTables).sort((a, b) => a.mut.txid === b.mut.txid
|
|
1618
|
+
? a.mut.opNo - b.mut.opNo // Within same transaction, sort by opNo
|
|
1619
|
+
: a.mut.ts - b.mut.ts // Different transactions - sort by timestamp when mutation resolved
|
|
1620
|
+
);
|
|
1621
|
+
const result = [];
|
|
1622
|
+
let currentEntry = null;
|
|
1623
|
+
let currentTxid = null;
|
|
1624
|
+
for (const { table, mut } of sorted) {
|
|
1625
|
+
if (currentEntry &&
|
|
1626
|
+
currentEntry.table === table &&
|
|
1627
|
+
currentTxid === mut.txid) {
|
|
1628
|
+
currentEntry.muts.push(mut);
|
|
1629
|
+
}
|
|
1630
|
+
else {
|
|
1631
|
+
currentEntry = {
|
|
1632
|
+
table,
|
|
1633
|
+
muts: [mut],
|
|
1634
|
+
};
|
|
1635
|
+
currentTxid = mut.txid;
|
|
1636
|
+
result.push(currentEntry);
|
|
1637
|
+
}
|
|
1638
|
+
}
|
|
1639
|
+
// Filter out those tables that doesn't have any mutations:
|
|
1640
|
+
return result;
|
|
1641
|
+
});
|
|
1642
|
+
}
|
|
1643
|
+
function removeRedundantUpdateOps(muts) {
|
|
1644
|
+
const updateCoverage = new Map();
|
|
1645
|
+
for (const mut of muts) {
|
|
1646
|
+
if (mut.type === 'update') {
|
|
1647
|
+
if (mut.keys.length !== 1 || mut.changeSpecs.length !== 1) {
|
|
1648
|
+
continue; // Don't optimize multi-key updates
|
|
1649
|
+
}
|
|
1650
|
+
const strKey = '' + mut.keys[0];
|
|
1651
|
+
const changeSpecs = mut.changeSpecs[0];
|
|
1652
|
+
if (Object.values(changeSpecs).some(v => typeof v === "object" && v && "@@propmod" in v)) {
|
|
1653
|
+
continue; // Cannot optimize if any PropModification is present
|
|
1654
|
+
}
|
|
1655
|
+
let keyCoverage = updateCoverage.get(strKey);
|
|
1656
|
+
if (keyCoverage) {
|
|
1657
|
+
keyCoverage.push({ txid: mut.txid, updateSpec: changeSpecs });
|
|
1658
|
+
}
|
|
1659
|
+
else {
|
|
1660
|
+
updateCoverage.set(strKey, [{ txid: mut.txid, updateSpec: changeSpecs }]);
|
|
1661
|
+
}
|
|
1662
|
+
}
|
|
1663
|
+
}
|
|
1664
|
+
muts = muts.filter(mut => {
|
|
1665
|
+
// Only apply optimization to update mutations that are single-key
|
|
1666
|
+
if (mut.type !== 'update')
|
|
1667
|
+
return true;
|
|
1668
|
+
if (mut.keys.length !== 1 || mut.changeSpecs.length !== 1)
|
|
1669
|
+
return true;
|
|
1670
|
+
// Check if this has PropModifications - if so, skip optimization
|
|
1671
|
+
const changeSpecs = mut.changeSpecs[0];
|
|
1672
|
+
if (Object.values(changeSpecs).some(v => typeof v === "object" && v && "@@propmod" in v)) {
|
|
1673
|
+
return true; // Cannot optimize if any PropModification is present
|
|
1674
|
+
}
|
|
1675
|
+
// Keep track of properties that aren't overlapped by later transactions
|
|
1676
|
+
const unoverlappedProps = new Set(Object.keys(mut.changeSpecs[0]));
|
|
1677
|
+
const strKey = '' + mut.keys[0];
|
|
1678
|
+
const keyCoverage = updateCoverage.get(strKey);
|
|
1679
|
+
if (!keyCoverage)
|
|
1680
|
+
return true; // No coverage info - cannot optimize
|
|
1681
|
+
for (let i = keyCoverage.length - 1; i >= 0; --i) {
|
|
1682
|
+
const { txid, updateSpec } = keyCoverage[i];
|
|
1683
|
+
if (txid === mut.txid)
|
|
1684
|
+
break; // Stop when reaching own txid
|
|
1685
|
+
// If all changes in updateSpec are covered by all props on all mut.changeSpecs then
|
|
1686
|
+
// txid is redundant and can be removed.
|
|
1687
|
+
for (const keyPath of Object.keys(updateSpec)) {
|
|
1688
|
+
unoverlappedProps.delete(keyPath);
|
|
1689
|
+
}
|
|
1690
|
+
}
|
|
1691
|
+
if (unoverlappedProps.size === 0) {
|
|
1692
|
+
// This operation is completely overlapped by later operations. It can be removed.
|
|
1693
|
+
return false;
|
|
1694
|
+
}
|
|
1695
|
+
return true;
|
|
1696
|
+
});
|
|
1697
|
+
return muts;
|
|
1698
|
+
}
|
|
1699
|
+
function canonicalizeToUpdateOps(muts) {
|
|
1700
|
+
muts = muts.map(mut => {
|
|
1701
|
+
if (mut.type === 'modify' && mut.criteria.index === null) {
|
|
1702
|
+
// The criteria is on primary key. Convert to an update operation instead.
|
|
1703
|
+
// It is simpler for the server to handle and also more efficient.
|
|
1704
|
+
const updateMut = Object.assign(Object.assign({}, mut), { criteria: undefined, changeSpec: undefined, type: 'update', keys: mut.keys, changeSpecs: [mut.changeSpec] });
|
|
1705
|
+
delete updateMut.criteria;
|
|
1706
|
+
delete updateMut.changeSpec;
|
|
1707
|
+
return updateMut;
|
|
1708
|
+
}
|
|
1709
|
+
return mut;
|
|
1710
|
+
});
|
|
1711
|
+
return muts;
|
|
1712
|
+
}
|
|
1713
|
+
|
|
1714
|
+
function randomString(bytes) {
|
|
1715
|
+
const buf = new Uint8Array(bytes);
|
|
1716
|
+
if (typeof crypto !== 'undefined') {
|
|
1717
|
+
crypto.getRandomValues(buf);
|
|
1718
|
+
}
|
|
1719
|
+
else {
|
|
1720
|
+
for (let i = 0; i < bytes; i++)
|
|
1721
|
+
buf[i] = Math.floor(Math.random() * 256);
|
|
1722
|
+
}
|
|
1723
|
+
if (typeof Buffer !== 'undefined' && Buffer.from) {
|
|
1724
|
+
return Buffer.from(buf).toString('base64');
|
|
1725
|
+
}
|
|
1726
|
+
else if (typeof btoa !== 'undefined') {
|
|
1727
|
+
return btoa(String.fromCharCode.apply(null, buf));
|
|
1728
|
+
}
|
|
1729
|
+
else {
|
|
1730
|
+
throw new Error('No btoa or Buffer available');
|
|
1731
|
+
}
|
|
1732
|
+
}
|
|
1733
|
+
|
|
1079
1734
|
function listSyncifiedChanges(tablesToSyncify, currentUser, schema, alreadySyncedRealms) {
|
|
1080
1735
|
return __awaiter(this, void 0, void 0, function* () {
|
|
1081
|
-
const txid = `upload-${randomString
|
|
1736
|
+
const txid = `upload-${randomString(8)}`;
|
|
1082
1737
|
if (currentUser.isLoggedIn) {
|
|
1083
1738
|
if (tablesToSyncify.length > 0) {
|
|
1084
1739
|
const ignoredRealms = new Set(alreadySyncedRealms || []);
|
|
@@ -1370,6 +2025,9 @@ class OAuthRedirectError extends Error {
|
|
|
1370
2025
|
}
|
|
1371
2026
|
}
|
|
1372
2027
|
|
|
2028
|
+
const SECONDS = 1000;
|
|
2029
|
+
const MINUTES = 60 * SECONDS;
|
|
2030
|
+
|
|
1373
2031
|
function loadAccessToken(db) {
|
|
1374
2032
|
return __awaiter(this, void 0, void 0, function* () {
|
|
1375
2033
|
var _a, _b, _c;
|
|
@@ -1378,7 +2036,7 @@ function loadAccessToken(db) {
|
|
|
1378
2036
|
if (!accessToken)
|
|
1379
2037
|
return null;
|
|
1380
2038
|
const expTime = (_a = accessTokenExpiration === null || accessTokenExpiration === void 0 ? void 0 : accessTokenExpiration.getTime()) !== null && _a !== void 0 ? _a : Infinity;
|
|
1381
|
-
if (expTime > Date.now() && (((_b = currentUser.license) === null || _b === void 0 ? void 0 : _b.status) || 'ok') === 'ok') {
|
|
2039
|
+
if (expTime > (Date.now() + 5 * MINUTES) && (((_b = currentUser.license) === null || _b === void 0 ? void 0 : _b.status) || 'ok') === 'ok') {
|
|
1382
2040
|
return currentUser;
|
|
1383
2041
|
}
|
|
1384
2042
|
if (!refreshToken) {
|
|
@@ -1526,582 +2184,78 @@ function userAuthenticate(context, fetchToken, userInteraction, hints) {
|
|
|
1526
2184
|
if (response2.userValidUntil != null) {
|
|
1527
2185
|
context.license.validUntil = new Date(response2.userValidUntil);
|
|
1528
2186
|
}
|
|
1529
|
-
if (response2.alerts && response2.alerts.length > 0) {
|
|
1530
|
-
yield interactWithUser(userInteraction, {
|
|
1531
|
-
type: 'message-alert',
|
|
1532
|
-
title: 'Authentication Alert',
|
|
1533
|
-
fields: {},
|
|
1534
|
-
alerts: response2.alerts,
|
|
1535
|
-
});
|
|
1536
|
-
}
|
|
1537
|
-
return context;
|
|
1538
|
-
}
|
|
1539
|
-
catch (error) {
|
|
1540
|
-
// OAuth redirect is not an error - page is navigating away
|
|
1541
|
-
if (error instanceof OAuthRedirectError || (error === null || error === void 0 ? void 0 : error.name) === 'OAuthRedirectError') {
|
|
1542
|
-
throw error; // Re-throw without logging
|
|
1543
|
-
}
|
|
1544
|
-
if (error instanceof TokenErrorResponseError) {
|
|
1545
|
-
yield alertUser(userInteraction, error.title, {
|
|
1546
|
-
type: 'error',
|
|
1547
|
-
messageCode: error.messageCode,
|
|
1548
|
-
message: error.message,
|
|
1549
|
-
messageParams: {},
|
|
1550
|
-
});
|
|
1551
|
-
throw error;
|
|
1552
|
-
}
|
|
1553
|
-
let message = `We're having a problem authenticating right now.`;
|
|
1554
|
-
console.error(`Error authenticating`, error);
|
|
1555
|
-
if (error instanceof TypeError) {
|
|
1556
|
-
const isOffline = typeof navigator !== undefined && !navigator.onLine;
|
|
1557
|
-
if (isOffline) {
|
|
1558
|
-
message = `You seem to be offline. Please connect to the internet and try again.`;
|
|
1559
|
-
}
|
|
1560
|
-
else if (
|
|
1561
|
-
// The audience is most likely the developer. Suggest to whitelist the localhost origin:
|
|
1562
|
-
|
|
1563
|
-
|
|
1564
|
-
|
|
1565
|
-
|
|
1566
|
-
|
|
1567
|
-
|
|
1568
|
-
|
|
1569
|
-
|
|
1570
|
-
|
|
1571
|
-
|
|
1572
|
-
|
|
1573
|
-
|
|
1574
|
-
|
|
1575
|
-
|
|
1576
|
-
|
|
1577
|
-
|
|
1578
|
-
|
|
1579
|
-
|
|
1580
|
-
|
|
1581
|
-
|
|
1582
|
-
|
|
1583
|
-
|
|
1584
|
-
|
|
1585
|
-
while (str.length > 0) {
|
|
1586
|
-
finalString += str.substring(0, 64) + '\n';
|
|
1587
|
-
str = str.substring(64);
|
|
1588
|
-
}
|
|
1589
|
-
finalString = finalString + '-----END PUBLIC KEY-----';
|
|
1590
|
-
return finalString;
|
|
1591
|
-
}
|
|
1592
|
-
|
|
1593
|
-
const { toString: toStr } = {};
|
|
1594
|
-
function getToStringTag(val) {
|
|
1595
|
-
return toStr.call(val).slice(8, -1);
|
|
1596
|
-
}
|
|
1597
|
-
function escapeDollarProps(value) {
|
|
1598
|
-
const keys = Object.keys(value);
|
|
1599
|
-
let dollarKeys = null;
|
|
1600
|
-
for (let i = 0, l = keys.length; i < l; ++i) {
|
|
1601
|
-
if (keys[i][0] === "$") {
|
|
1602
|
-
dollarKeys = dollarKeys || [];
|
|
1603
|
-
dollarKeys.push(keys[i]);
|
|
1604
|
-
}
|
|
1605
|
-
}
|
|
1606
|
-
if (!dollarKeys)
|
|
1607
|
-
return value;
|
|
1608
|
-
const clone = { ...value };
|
|
1609
|
-
for (const k of dollarKeys) {
|
|
1610
|
-
delete clone[k];
|
|
1611
|
-
}
|
|
1612
|
-
for (const k of dollarKeys) {
|
|
1613
|
-
clone["$" + k] = value[k];
|
|
1614
|
-
}
|
|
1615
|
-
return clone;
|
|
1616
|
-
}
|
|
1617
|
-
const ObjectDef = {
|
|
1618
|
-
replace: escapeDollarProps,
|
|
1619
|
-
};
|
|
1620
|
-
function TypesonSimplified(...typeDefsInputs) {
|
|
1621
|
-
const typeDefs = typeDefsInputs.reduce((p, c) => ({ ...p, ...c }), typeDefsInputs.reduce((p, c) => ({ ...c, ...p }), {}));
|
|
1622
|
-
const protoMap = new WeakMap();
|
|
1623
|
-
return {
|
|
1624
|
-
stringify(value, alternateChannel, space) {
|
|
1625
|
-
const json = JSON.stringify(value, function (key) {
|
|
1626
|
-
const realVal = this[key];
|
|
1627
|
-
const typeDef = getTypeDef(realVal);
|
|
1628
|
-
return typeDef
|
|
1629
|
-
? typeDef.replace(realVal, alternateChannel, typeDefs)
|
|
1630
|
-
: realVal;
|
|
1631
|
-
}, space);
|
|
1632
|
-
return json;
|
|
1633
|
-
},
|
|
1634
|
-
parse(tson, alternateChannel) {
|
|
1635
|
-
const stack = [];
|
|
1636
|
-
return JSON.parse(tson, function (key, value) {
|
|
1637
|
-
//
|
|
1638
|
-
// Parent Part
|
|
1639
|
-
//
|
|
1640
|
-
const type = value === null || value === void 0 ? void 0 : value.$t;
|
|
1641
|
-
if (type) {
|
|
1642
|
-
const typeDef = typeDefs[type];
|
|
1643
|
-
value = typeDef
|
|
1644
|
-
? typeDef.revive(value, alternateChannel, typeDefs)
|
|
1645
|
-
: value;
|
|
1646
|
-
}
|
|
1647
|
-
let top = stack[stack.length - 1];
|
|
1648
|
-
if (top && top[0] === value) {
|
|
1649
|
-
// Do what the kid told us to
|
|
1650
|
-
// Unescape dollar props
|
|
1651
|
-
value = { ...value };
|
|
1652
|
-
// Delete keys that children wanted us to delete
|
|
1653
|
-
for (const k of top[1])
|
|
1654
|
-
delete value[k];
|
|
1655
|
-
// Set keys that children wanted us to set
|
|
1656
|
-
for (const [k, v] of Object.entries(top[2])) {
|
|
1657
|
-
value[k] = v;
|
|
1658
|
-
}
|
|
1659
|
-
stack.pop();
|
|
1660
|
-
}
|
|
1661
|
-
//
|
|
1662
|
-
// Child part
|
|
1663
|
-
//
|
|
1664
|
-
if (value === undefined || (key[0] === "$" && key !== "$t")) {
|
|
1665
|
-
top = stack[stack.length - 1];
|
|
1666
|
-
let deletes;
|
|
1667
|
-
let mods;
|
|
1668
|
-
if (top && top[0] === this) {
|
|
1669
|
-
deletes = top[1];
|
|
1670
|
-
mods = top[2];
|
|
1671
|
-
}
|
|
1672
|
-
else {
|
|
1673
|
-
stack.push([this, (deletes = []), (mods = {})]);
|
|
1674
|
-
}
|
|
1675
|
-
if (key[0] === "$" && key !== "$t") {
|
|
1676
|
-
// Unescape props (also preserves undefined if this is a combo)
|
|
1677
|
-
deletes.push(key);
|
|
1678
|
-
mods[key.substr(1)] = value;
|
|
1679
|
-
}
|
|
1680
|
-
else {
|
|
1681
|
-
// Preserve undefined
|
|
1682
|
-
mods[key] = undefined;
|
|
1683
|
-
}
|
|
1684
|
-
}
|
|
1685
|
-
return value;
|
|
1686
|
-
});
|
|
1687
|
-
},
|
|
1688
|
-
};
|
|
1689
|
-
function getTypeDef(realVal) {
|
|
1690
|
-
const type = typeof realVal;
|
|
1691
|
-
switch (typeof realVal) {
|
|
1692
|
-
case "object":
|
|
1693
|
-
case "function": {
|
|
1694
|
-
// "object", "function", null
|
|
1695
|
-
if (realVal === null)
|
|
1696
|
-
return null;
|
|
1697
|
-
const proto = Object.getPrototypeOf(realVal);
|
|
1698
|
-
if (!proto)
|
|
1699
|
-
return ObjectDef;
|
|
1700
|
-
let typeDef = protoMap.get(proto);
|
|
1701
|
-
if (typeDef !== undefined)
|
|
1702
|
-
return typeDef; // Null counts to! So the caching of Array.prototype also counts.
|
|
1703
|
-
const toStringTag = getToStringTag(realVal);
|
|
1704
|
-
const entry = Object.entries(typeDefs).find(([typeName, typeDef]) => { var _a, _b; return (_b = (_a = typeDef === null || typeDef === void 0 ? void 0 : typeDef.test) === null || _a === void 0 ? void 0 : _a.call(typeDef, realVal, toStringTag)) !== null && _b !== void 0 ? _b : typeName === toStringTag; });
|
|
1705
|
-
typeDef = entry === null || entry === void 0 ? void 0 : entry[1];
|
|
1706
|
-
if (!typeDef) {
|
|
1707
|
-
typeDef = Array.isArray(realVal)
|
|
1708
|
-
? null
|
|
1709
|
-
: typeof realVal === "function"
|
|
1710
|
-
? typeDefs.function || null
|
|
1711
|
-
: ObjectDef;
|
|
1712
|
-
}
|
|
1713
|
-
protoMap.set(proto, typeDef);
|
|
1714
|
-
return typeDef;
|
|
1715
|
-
}
|
|
1716
|
-
default:
|
|
1717
|
-
return typeDefs[type];
|
|
1718
|
-
}
|
|
1719
|
-
}
|
|
1720
|
-
}
|
|
1721
|
-
|
|
1722
|
-
const BisonBinaryTypes = {
|
|
1723
|
-
Blob: {
|
|
1724
|
-
test: (blob, toStringTag) => toStringTag === "Blob",
|
|
1725
|
-
replace: (blob, altChannel) => {
|
|
1726
|
-
const i = altChannel.length;
|
|
1727
|
-
altChannel.push(blob);
|
|
1728
|
-
return {
|
|
1729
|
-
$t: "Blob",
|
|
1730
|
-
mimeType: blob.type,
|
|
1731
|
-
i,
|
|
1732
|
-
};
|
|
1733
|
-
},
|
|
1734
|
-
revive: ({ i, mimeType }, altChannel) => new Blob([altChannel[i]], { type: mimeType }),
|
|
1735
|
-
},
|
|
1736
|
-
};
|
|
1737
|
-
|
|
1738
|
-
var numberDef = {
|
|
1739
|
-
number: {
|
|
1740
|
-
replace: (num) => {
|
|
1741
|
-
switch (true) {
|
|
1742
|
-
case isNaN(num):
|
|
1743
|
-
return { $t: "number", v: "NaN" };
|
|
1744
|
-
case num === Infinity:
|
|
1745
|
-
return { $t: "number", v: "Infinity" };
|
|
1746
|
-
case num === -Infinity:
|
|
1747
|
-
return { $t: "number", v: "-Infinity" };
|
|
1748
|
-
default:
|
|
1749
|
-
return num;
|
|
1750
|
-
}
|
|
1751
|
-
},
|
|
1752
|
-
revive: ({ v }) => Number(v),
|
|
1753
|
-
},
|
|
1754
|
-
};
|
|
1755
|
-
|
|
1756
|
-
const bigIntDef$1 = {
|
|
1757
|
-
bigint: {
|
|
1758
|
-
replace: (realVal) => {
|
|
1759
|
-
return { $t: "bigint", v: "" + realVal };
|
|
1760
|
-
},
|
|
1761
|
-
revive: (obj) => BigInt(obj.v),
|
|
1762
|
-
},
|
|
1763
|
-
};
|
|
1764
|
-
|
|
1765
|
-
var DateDef = {
|
|
1766
|
-
Date: {
|
|
1767
|
-
replace: (date) => ({
|
|
1768
|
-
$t: "Date",
|
|
1769
|
-
v: isNaN(date.getTime()) ? "NaN" : date.toISOString(),
|
|
1770
|
-
}),
|
|
1771
|
-
revive: ({ v }) => new Date(v === "NaN" ? NaN : Date.parse(v)),
|
|
1772
|
-
},
|
|
1773
|
-
};
|
|
1774
|
-
|
|
1775
|
-
var SetDef = {
|
|
1776
|
-
Set: {
|
|
1777
|
-
replace: (set) => ({
|
|
1778
|
-
$t: "Set",
|
|
1779
|
-
v: Array.from(set.entries()),
|
|
1780
|
-
}),
|
|
1781
|
-
revive: ({ v }) => new Set(v),
|
|
1782
|
-
},
|
|
1783
|
-
};
|
|
1784
|
-
|
|
1785
|
-
var MapDef = {
|
|
1786
|
-
Map: {
|
|
1787
|
-
replace: (map) => ({
|
|
1788
|
-
$t: "Map",
|
|
1789
|
-
v: Array.from(map.entries()),
|
|
1790
|
-
}),
|
|
1791
|
-
revive: ({ v }) => new Map(v),
|
|
1792
|
-
},
|
|
1793
|
-
};
|
|
1794
|
-
|
|
1795
|
-
const _global = typeof globalThis !== "undefined" // All modern environments (node, bun, deno, browser, workers, webview etc)
|
|
1796
|
-
? globalThis
|
|
1797
|
-
: typeof self !== "undefined" // Older browsers, workers, webview, window etc
|
|
1798
|
-
? self
|
|
1799
|
-
: typeof global !== "undefined" // Older versions of node
|
|
1800
|
-
? global
|
|
1801
|
-
: undefined; // Unsupported environment. No idea to return 'this' since we are in a module or a function scope anyway.
|
|
1802
|
-
|
|
1803
|
-
var TypedArraysDefs = [
|
|
1804
|
-
"Int8Array",
|
|
1805
|
-
"Uint8Array",
|
|
1806
|
-
"Uint8ClampedArray",
|
|
1807
|
-
"Int16Array",
|
|
1808
|
-
"Uint16Array",
|
|
1809
|
-
"Int32Array",
|
|
1810
|
-
"Uint32Array",
|
|
1811
|
-
"Float32Array",
|
|
1812
|
-
"Float64Array",
|
|
1813
|
-
"DataView",
|
|
1814
|
-
"BigInt64Array",
|
|
1815
|
-
"BigUint64Array",
|
|
1816
|
-
].reduce((specs, typeName) => ({
|
|
1817
|
-
...specs,
|
|
1818
|
-
[typeName]: {
|
|
1819
|
-
// Replace passes the the typed array into $t, buffer so that
|
|
1820
|
-
// the ArrayBuffer typedef takes care of further handling of the buffer:
|
|
1821
|
-
// {$t:"Uint8Array",buffer:{$t:"ArrayBuffer",idx:0}}
|
|
1822
|
-
// CHANGED ABOVE! Now shortcutting that for more sparse format of the typed arrays
|
|
1823
|
-
// to contain the b64 property directly.
|
|
1824
|
-
replace: (a, _, typeDefs) => {
|
|
1825
|
-
const result = {
|
|
1826
|
-
$t: typeName,
|
|
1827
|
-
v: typeDefs.ArrayBuffer.replace(a.byteOffset === 0 && a.byteLength === a.buffer.byteLength
|
|
1828
|
-
? a.buffer
|
|
1829
|
-
: a.buffer.slice(a.byteOffset, a.byteOffset + a.byteLength), _, typeDefs).v,
|
|
1830
|
-
};
|
|
1831
|
-
return result;
|
|
1832
|
-
},
|
|
1833
|
-
revive: ({ v }, _, typeDefs) => {
|
|
1834
|
-
const TypedArray = _global[typeName];
|
|
1835
|
-
return (TypedArray &&
|
|
1836
|
-
new TypedArray(typeDefs.ArrayBuffer.revive({ v }, _, typeDefs)));
|
|
1837
|
-
},
|
|
1838
|
-
},
|
|
1839
|
-
}), {});
|
|
1840
|
-
|
|
1841
|
-
function b64LexEncode(b) {
|
|
1842
|
-
return b64ToLex(b64encode(b));
|
|
1843
|
-
}
|
|
1844
|
-
function b64LexDecode(b64Lex) {
|
|
1845
|
-
return b64decode(lexToB64(b64Lex));
|
|
1846
|
-
}
|
|
1847
|
-
function b64ToLex(base64) {
|
|
1848
|
-
var encoded = "";
|
|
1849
|
-
for (var i = 0, length = base64.length; i < length; i++) {
|
|
1850
|
-
encoded += ENCODE_TABLE[base64[i]];
|
|
1851
|
-
}
|
|
1852
|
-
return encoded;
|
|
1853
|
-
}
|
|
1854
|
-
function lexToB64(base64lex) {
|
|
1855
|
-
// only accept string input
|
|
1856
|
-
if (typeof base64lex !== "string") {
|
|
1857
|
-
throw new Error("invalid decoder input: " + base64lex);
|
|
1858
|
-
}
|
|
1859
|
-
var base64 = "";
|
|
1860
|
-
for (var i = 0, length = base64lex.length; i < length; i++) {
|
|
1861
|
-
base64 += DECODE_TABLE[base64lex[i]];
|
|
1862
|
-
}
|
|
1863
|
-
return base64;
|
|
1864
|
-
}
|
|
1865
|
-
const DECODE_TABLE = {
|
|
1866
|
-
"-": "=",
|
|
1867
|
-
"0": "A",
|
|
1868
|
-
"1": "B",
|
|
1869
|
-
"2": "C",
|
|
1870
|
-
"3": "D",
|
|
1871
|
-
"4": "E",
|
|
1872
|
-
"5": "F",
|
|
1873
|
-
"6": "G",
|
|
1874
|
-
"7": "H",
|
|
1875
|
-
"8": "I",
|
|
1876
|
-
"9": "J",
|
|
1877
|
-
A: "K",
|
|
1878
|
-
B: "L",
|
|
1879
|
-
C: "M",
|
|
1880
|
-
D: "N",
|
|
1881
|
-
E: "O",
|
|
1882
|
-
F: "P",
|
|
1883
|
-
G: "Q",
|
|
1884
|
-
H: "R",
|
|
1885
|
-
I: "S",
|
|
1886
|
-
J: "T",
|
|
1887
|
-
K: "U",
|
|
1888
|
-
L: "V",
|
|
1889
|
-
M: "W",
|
|
1890
|
-
N: "X",
|
|
1891
|
-
O: "Y",
|
|
1892
|
-
P: "Z",
|
|
1893
|
-
Q: "a",
|
|
1894
|
-
R: "b",
|
|
1895
|
-
S: "c",
|
|
1896
|
-
T: "d",
|
|
1897
|
-
U: "e",
|
|
1898
|
-
V: "f",
|
|
1899
|
-
W: "g",
|
|
1900
|
-
X: "h",
|
|
1901
|
-
Y: "i",
|
|
1902
|
-
Z: "j",
|
|
1903
|
-
_: "k",
|
|
1904
|
-
a: "l",
|
|
1905
|
-
b: "m",
|
|
1906
|
-
c: "n",
|
|
1907
|
-
d: "o",
|
|
1908
|
-
e: "p",
|
|
1909
|
-
f: "q",
|
|
1910
|
-
g: "r",
|
|
1911
|
-
h: "s",
|
|
1912
|
-
i: "t",
|
|
1913
|
-
j: "u",
|
|
1914
|
-
k: "v",
|
|
1915
|
-
l: "w",
|
|
1916
|
-
m: "x",
|
|
1917
|
-
n: "y",
|
|
1918
|
-
o: "z",
|
|
1919
|
-
p: "0",
|
|
1920
|
-
q: "1",
|
|
1921
|
-
r: "2",
|
|
1922
|
-
s: "3",
|
|
1923
|
-
t: "4",
|
|
1924
|
-
u: "5",
|
|
1925
|
-
v: "6",
|
|
1926
|
-
w: "7",
|
|
1927
|
-
x: "8",
|
|
1928
|
-
y: "9",
|
|
1929
|
-
z: "+",
|
|
1930
|
-
"|": "/",
|
|
1931
|
-
};
|
|
1932
|
-
const ENCODE_TABLE = {};
|
|
1933
|
-
for (const c of Object.keys(DECODE_TABLE)) {
|
|
1934
|
-
ENCODE_TABLE[DECODE_TABLE[c]] = c;
|
|
1935
|
-
}
|
|
1936
|
-
|
|
1937
|
-
var ArrayBufferDef = {
|
|
1938
|
-
ArrayBuffer: {
|
|
1939
|
-
replace: (ab) => ({
|
|
1940
|
-
$t: "ArrayBuffer",
|
|
1941
|
-
v: b64LexEncode(ab),
|
|
1942
|
-
}),
|
|
1943
|
-
revive: ({ v }) => {
|
|
1944
|
-
const ba = b64LexDecode(v);
|
|
1945
|
-
return ba.buffer.byteLength === ba.byteLength
|
|
1946
|
-
? ba.buffer
|
|
1947
|
-
: ba.buffer.slice(ba.byteOffset, ba.byteOffset + ba.byteLength);
|
|
1948
|
-
},
|
|
1949
|
-
},
|
|
1950
|
-
};
|
|
1951
|
-
|
|
1952
|
-
class FakeBlob {
|
|
1953
|
-
constructor(buf, type) {
|
|
1954
|
-
this.buf = buf;
|
|
1955
|
-
this.type = type;
|
|
1956
|
-
}
|
|
2187
|
+
if (response2.alerts && response2.alerts.length > 0) {
|
|
2188
|
+
yield interactWithUser(userInteraction, {
|
|
2189
|
+
type: 'message-alert',
|
|
2190
|
+
title: 'Authentication Alert',
|
|
2191
|
+
fields: {},
|
|
2192
|
+
alerts: response2.alerts,
|
|
2193
|
+
});
|
|
2194
|
+
}
|
|
2195
|
+
return context;
|
|
2196
|
+
}
|
|
2197
|
+
catch (error) {
|
|
2198
|
+
// OAuth redirect is not an error - page is navigating away
|
|
2199
|
+
if (error instanceof OAuthRedirectError || (error === null || error === void 0 ? void 0 : error.name) === 'OAuthRedirectError') {
|
|
2200
|
+
throw error; // Re-throw without logging
|
|
2201
|
+
}
|
|
2202
|
+
if (error instanceof TokenErrorResponseError) {
|
|
2203
|
+
yield alertUser(userInteraction, error.title, {
|
|
2204
|
+
type: 'error',
|
|
2205
|
+
messageCode: error.messageCode,
|
|
2206
|
+
message: error.message,
|
|
2207
|
+
messageParams: {},
|
|
2208
|
+
});
|
|
2209
|
+
throw error;
|
|
2210
|
+
}
|
|
2211
|
+
let message = `We're having a problem authenticating right now.`;
|
|
2212
|
+
console.error(`Error authenticating`, error);
|
|
2213
|
+
if (error instanceof TypeError) {
|
|
2214
|
+
const isOffline = typeof navigator !== 'undefined' && !navigator.onLine;
|
|
2215
|
+
if (isOffline) {
|
|
2216
|
+
message = `You seem to be offline. Please connect to the internet and try again.`;
|
|
2217
|
+
}
|
|
2218
|
+
else if (typeof location !== 'undefined' && (Dexie.debug || location.hostname === 'localhost' || location.hostname === '127.0.0.1')) {
|
|
2219
|
+
// The audience is most likely the developer. Suggest to whitelist the localhost origin:
|
|
2220
|
+
const whitelistCommand = `npx dexie-cloud whitelist ${location.origin}`;
|
|
2221
|
+
message = `Could not connect to server. Please verify that your origin '${location.origin}' is whitelisted using \`npx dexie-cloud whitelist\``;
|
|
2222
|
+
yield alertUser(userInteraction, 'Authentication Failed', {
|
|
2223
|
+
type: 'error',
|
|
2224
|
+
messageCode: 'GENERIC_ERROR',
|
|
2225
|
+
message,
|
|
2226
|
+
messageParams: {},
|
|
2227
|
+
copyText: whitelistCommand,
|
|
2228
|
+
}).catch(() => { });
|
|
2229
|
+
}
|
|
2230
|
+
else {
|
|
2231
|
+
message = `Could not connect to server. Please verify the connection.`;
|
|
2232
|
+
yield alertUser(userInteraction, 'Authentication Failed', {
|
|
2233
|
+
type: 'error',
|
|
2234
|
+
messageCode: 'GENERIC_ERROR',
|
|
2235
|
+
message,
|
|
2236
|
+
messageParams: {},
|
|
2237
|
+
}).catch(() => { });
|
|
2238
|
+
}
|
|
2239
|
+
}
|
|
2240
|
+
throw error;
|
|
2241
|
+
}
|
|
2242
|
+
});
|
|
1957
2243
|
}
|
|
1958
|
-
|
|
1959
|
-
|
|
1960
|
-
const
|
|
1961
|
-
|
|
1962
|
-
req.open("GET", URL.createObjectURL(b), false); // Sync
|
|
1963
|
-
req.send();
|
|
1964
|
-
if (req.status !== 200 && req.status !== 0) {
|
|
1965
|
-
throw new Error("Bad Blob access: " + req.status);
|
|
1966
|
-
}
|
|
1967
|
-
return req.responseText;
|
|
2244
|
+
function spkiToPEM(keydata) {
|
|
2245
|
+
const keydataB64 = b64encode(keydata);
|
|
2246
|
+
const keydataB64Pem = formatAsPem(keydataB64);
|
|
2247
|
+
return keydataB64Pem;
|
|
1968
2248
|
}
|
|
1969
|
-
|
|
1970
|
-
|
|
1971
|
-
|
|
1972
|
-
|
|
1973
|
-
|
|
2249
|
+
function formatAsPem(str) {
|
|
2250
|
+
let finalString = '-----BEGIN PUBLIC KEY-----\n';
|
|
2251
|
+
while (str.length > 0) {
|
|
2252
|
+
finalString += str.substring(0, 64) + '\n';
|
|
2253
|
+
str = str.substring(64);
|
|
1974
2254
|
}
|
|
1975
|
-
|
|
1976
|
-
|
|
1977
|
-
|
|
1978
|
-
var BlobDef = {
|
|
1979
|
-
Blob: {
|
|
1980
|
-
test: (blob, toStringTag) => toStringTag === "Blob" || blob instanceof FakeBlob,
|
|
1981
|
-
replace: (blob) => ({
|
|
1982
|
-
$t: "Blob",
|
|
1983
|
-
v: blob instanceof FakeBlob
|
|
1984
|
-
? b64encode(blob.buf)
|
|
1985
|
-
: b64encode(string2ArrayBuffer(readBlobSync(blob))),
|
|
1986
|
-
type: blob.type,
|
|
1987
|
-
}),
|
|
1988
|
-
revive: ({ type, v }) => {
|
|
1989
|
-
const ab = b64decode(v);
|
|
1990
|
-
return typeof Blob !== undefined
|
|
1991
|
-
? new Blob([ab])
|
|
1992
|
-
: new FakeBlob(ab.buffer, type);
|
|
1993
|
-
},
|
|
1994
|
-
},
|
|
1995
|
-
};
|
|
1996
|
-
|
|
1997
|
-
const builtin = {
|
|
1998
|
-
...numberDef,
|
|
1999
|
-
...bigIntDef$1,
|
|
2000
|
-
...DateDef,
|
|
2001
|
-
...SetDef,
|
|
2002
|
-
...MapDef,
|
|
2003
|
-
...TypedArraysDefs,
|
|
2004
|
-
...ArrayBufferDef,
|
|
2005
|
-
...BlobDef, // Should be moved to another preset for DOM types (or universal? since it supports node as well with FakeBlob)
|
|
2006
|
-
};
|
|
2007
|
-
|
|
2008
|
-
function Bison(...typeDefsInputs) {
|
|
2009
|
-
const tson = TypesonSimplified(builtin, BisonBinaryTypes, ...typeDefsInputs);
|
|
2010
|
-
return {
|
|
2011
|
-
toBinary(value) {
|
|
2012
|
-
const [blob, json] = this.stringify(value);
|
|
2013
|
-
const lenBuf = new ArrayBuffer(4);
|
|
2014
|
-
new DataView(lenBuf).setUint32(0, blob.size);
|
|
2015
|
-
return new Blob([lenBuf, blob, json]);
|
|
2016
|
-
},
|
|
2017
|
-
stringify(value) {
|
|
2018
|
-
const binaries = [];
|
|
2019
|
-
const json = tson.stringify(value, binaries);
|
|
2020
|
-
const blob = new Blob(binaries.map((b) => {
|
|
2021
|
-
const lenBuf = new ArrayBuffer(4);
|
|
2022
|
-
new DataView(lenBuf).setUint32(0, "byteLength" in b ? b.byteLength : b.size);
|
|
2023
|
-
return new Blob([lenBuf, b]);
|
|
2024
|
-
}));
|
|
2025
|
-
return [blob, json];
|
|
2026
|
-
},
|
|
2027
|
-
async parse(json, binData) {
|
|
2028
|
-
let pos = 0;
|
|
2029
|
-
const arrayBuffers = [];
|
|
2030
|
-
const buf = await readBlobBinary(binData);
|
|
2031
|
-
const view = new DataView(buf);
|
|
2032
|
-
while (pos < buf.byteLength) {
|
|
2033
|
-
const len = view.getUint32(pos);
|
|
2034
|
-
pos += 4;
|
|
2035
|
-
const ab = buf.slice(pos, pos + len);
|
|
2036
|
-
pos += len;
|
|
2037
|
-
arrayBuffers.push(ab);
|
|
2038
|
-
}
|
|
2039
|
-
return tson.parse(json, arrayBuffers);
|
|
2040
|
-
},
|
|
2041
|
-
async fromBinary(blob) {
|
|
2042
|
-
const len = new DataView(await readBlobBinary(blob.slice(0, 4))).getUint32(0);
|
|
2043
|
-
const binData = blob.slice(4, len + 4);
|
|
2044
|
-
const json = await readBlob(blob.slice(len + 4));
|
|
2045
|
-
return await this.parse(json, binData);
|
|
2046
|
-
},
|
|
2047
|
-
};
|
|
2048
|
-
}
|
|
2049
|
-
function readBlob(blob) {
|
|
2050
|
-
return new Promise((resolve, reject) => {
|
|
2051
|
-
const reader = new FileReader();
|
|
2052
|
-
reader.onabort = (ev) => reject(new Error("file read aborted"));
|
|
2053
|
-
reader.onerror = (ev) => reject(ev.target.error);
|
|
2054
|
-
reader.onload = (ev) => resolve(ev.target.result);
|
|
2055
|
-
reader.readAsText(blob);
|
|
2056
|
-
});
|
|
2057
|
-
}
|
|
2058
|
-
function readBlobBinary(blob) {
|
|
2059
|
-
return new Promise((resolve, reject) => {
|
|
2060
|
-
const reader = new FileReader();
|
|
2061
|
-
reader.onabort = (ev) => reject(new Error("file read aborted"));
|
|
2062
|
-
reader.onerror = (ev) => reject(ev.target.error);
|
|
2063
|
-
reader.onload = (ev) => resolve(ev.target.result);
|
|
2064
|
-
reader.readAsArrayBuffer(blob);
|
|
2065
|
-
});
|
|
2255
|
+
finalString = finalString + '-----END PUBLIC KEY-----';
|
|
2256
|
+
return finalString;
|
|
2066
2257
|
}
|
|
2067
2258
|
|
|
2068
|
-
/** The undefined type is not part of builtin but can be manually added.
|
|
2069
|
-
* The reason for supporting undefined is if the following object should be revived correctly:
|
|
2070
|
-
*
|
|
2071
|
-
* {foo: undefined}
|
|
2072
|
-
*
|
|
2073
|
-
* Without including this typedef, the revived object would just be {}.
|
|
2074
|
-
* If including this typedef, the revived object would be {foo: undefined}.
|
|
2075
|
-
*/
|
|
2076
|
-
var undefinedDef = {
|
|
2077
|
-
undefined: {
|
|
2078
|
-
replace: () => ({
|
|
2079
|
-
$t: "undefined"
|
|
2080
|
-
}),
|
|
2081
|
-
revive: () => undefined,
|
|
2082
|
-
},
|
|
2083
|
-
};
|
|
2084
|
-
|
|
2085
|
-
var FileDef = {
|
|
2086
|
-
File: {
|
|
2087
|
-
test: (file, toStringTag) => toStringTag === "File",
|
|
2088
|
-
replace: (file) => ({
|
|
2089
|
-
$t: "File",
|
|
2090
|
-
v: b64encode(string2ArrayBuffer(readBlobSync(file))),
|
|
2091
|
-
type: file.type,
|
|
2092
|
-
name: file.name,
|
|
2093
|
-
lastModified: new Date(file.lastModified).toISOString(),
|
|
2094
|
-
}),
|
|
2095
|
-
revive: ({ type, v, name, lastModified }) => {
|
|
2096
|
-
const ab = b64decode(v);
|
|
2097
|
-
return new File([ab], name, {
|
|
2098
|
-
type,
|
|
2099
|
-
lastModified: new Date(lastModified).getTime(),
|
|
2100
|
-
});
|
|
2101
|
-
},
|
|
2102
|
-
},
|
|
2103
|
-
};
|
|
2104
|
-
|
|
2105
2259
|
// Since server revisions are stored in bigints, we need to handle clients without
|
|
2106
2260
|
// bigint support to not fail when serverRevision is passed over to client.
|
|
2107
2261
|
// We need to not fail when reviving it and we need to somehow store the information.
|
|
@@ -2137,7 +2291,7 @@ const bigIntDef = hasBigIntSupport
|
|
|
2137
2291
|
revive: ({ v }) => new FakeBigInt(v),
|
|
2138
2292
|
},
|
|
2139
2293
|
};
|
|
2140
|
-
const defs = Object.assign(Object.assign(Object.assign(Object.assign({},
|
|
2294
|
+
const defs = Object.assign(Object.assign(Object.assign(Object.assign({}, undefinedTypeDef), bigIntDef), fileTypeDef), { PropModification: {
|
|
2141
2295
|
test: (val) => val instanceof PropModification,
|
|
2142
2296
|
replace: (propModification) => {
|
|
2143
2297
|
return Object.assign({ $t: 'PropModification' }, propModification['@@propmod']);
|
|
@@ -2149,8 +2303,14 @@ const defs = Object.assign(Object.assign(Object.assign(Object.assign({}, undefin
|
|
|
2149
2303
|
return new PropModification(propModSpec);
|
|
2150
2304
|
},
|
|
2151
2305
|
} });
|
|
2152
|
-
const TSON = TypesonSimplified(
|
|
2153
|
-
|
|
2306
|
+
const TSON = TypesonSimplified(
|
|
2307
|
+
// Standard type definitions - TSON is transparent to BlobRefs
|
|
2308
|
+
// BlobRefs use _bt convention and are handled by blobResolveMiddleware, not TSON
|
|
2309
|
+
typedArrayTypeDefs, arrayBufferTypeDef, blobTypeDef,
|
|
2310
|
+
// Non-binary built-in types
|
|
2311
|
+
numberTypeDef, dateTypeDef, setTypeDef, mapTypeDef,
|
|
2312
|
+
// Custom type definitions
|
|
2313
|
+
defs);
|
|
2154
2314
|
|
|
2155
2315
|
class HttpError extends Error {
|
|
2156
2316
|
constructor(res, message) {
|
|
@@ -2266,7 +2426,7 @@ function syncWithServer(changes, y, syncState, baseRevs, db, databaseUrl, schema
|
|
|
2266
2426
|
// Push changes to server using fetch
|
|
2267
2427
|
//
|
|
2268
2428
|
const headers = {
|
|
2269
|
-
Accept: 'application/json
|
|
2429
|
+
Accept: 'application/json',
|
|
2270
2430
|
'Content-Type': 'application/tson',
|
|
2271
2431
|
};
|
|
2272
2432
|
const updatedUser = yield loadAccessToken(db);
|
|
@@ -2285,7 +2445,7 @@ function syncWithServer(changes, y, syncState, baseRevs, db, databaseUrl, schema
|
|
|
2285
2445
|
headers.Authorization = `Bearer ${accessToken}`;
|
|
2286
2446
|
}
|
|
2287
2447
|
const syncRequest = {
|
|
2288
|
-
v:
|
|
2448
|
+
v: 3, // v3 = supports BlobRef
|
|
2289
2449
|
dbID: syncState === null || syncState === void 0 ? void 0 : syncState.remoteDbId,
|
|
2290
2450
|
clientIdentity,
|
|
2291
2451
|
schema: schema || {},
|
|
@@ -2323,8 +2483,9 @@ function syncWithServer(changes, y, syncState, baseRevs, db, databaseUrl, schema
|
|
|
2323
2483
|
}
|
|
2324
2484
|
switch (res.headers.get('content-type')) {
|
|
2325
2485
|
case 'application/x-bison':
|
|
2326
|
-
|
|
2327
|
-
|
|
2486
|
+
case 'application/x-bison-stream':
|
|
2487
|
+
// BISON format deprecated - throw error if server sends it
|
|
2488
|
+
throw new Error('BISON format no longer supported. Server should send application/json.');
|
|
2328
2489
|
default:
|
|
2329
2490
|
case 'application/json': {
|
|
2330
2491
|
const text = yield res.text();
|
|
@@ -2442,6 +2603,194 @@ function bulkUpdate(table, keys, changeSpecs) {
|
|
|
2442
2603
|
});
|
|
2443
2604
|
}
|
|
2444
2605
|
|
|
2606
|
+
/**
|
|
2607
|
+
* Check if a value is a BlobRef (offloaded binary data)
|
|
2608
|
+
* A BlobRef has _bt (type), ref (blob ID), but no v (inline data)
|
|
2609
|
+
*/
|
|
2610
|
+
function isBlobRef(value) {
|
|
2611
|
+
if (typeof value !== 'object' || value === null)
|
|
2612
|
+
return false;
|
|
2613
|
+
const obj = value;
|
|
2614
|
+
return (typeof obj._bt === 'string' &&
|
|
2615
|
+
typeof obj.ref === 'string' &&
|
|
2616
|
+
obj.v === undefined // No inline data = it's a reference
|
|
2617
|
+
);
|
|
2618
|
+
}
|
|
2619
|
+
/**
|
|
2620
|
+
* Check if a value is a serialized TSONRef (after IndexedDB storage)
|
|
2621
|
+
* Has 'type' instead of '$t', and no Symbol marker
|
|
2622
|
+
*/
|
|
2623
|
+
function isSerializedTSONRef(value) {
|
|
2624
|
+
if (typeof value !== 'object' || value === null)
|
|
2625
|
+
return false;
|
|
2626
|
+
const obj = value;
|
|
2627
|
+
return (typeof obj.type === 'string' &&
|
|
2628
|
+
typeof obj.ref === 'string' &&
|
|
2629
|
+
typeof obj.size === 'number' &&
|
|
2630
|
+
obj._bt === undefined // Not a raw BlobRef
|
|
2631
|
+
);
|
|
2632
|
+
}
|
|
2633
|
+
/**
|
|
2634
|
+
* Recursively check if an object contains any BlobRefs
|
|
2635
|
+
*/
|
|
2636
|
+
function hasBlobRefs(obj, visited = new WeakSet()) {
|
|
2637
|
+
if (obj === null || obj === undefined) {
|
|
2638
|
+
return false;
|
|
2639
|
+
}
|
|
2640
|
+
if (isBlobRef(obj)) {
|
|
2641
|
+
return true;
|
|
2642
|
+
}
|
|
2643
|
+
if (typeof obj !== 'object') {
|
|
2644
|
+
return false;
|
|
2645
|
+
}
|
|
2646
|
+
// Avoid circular references - check BEFORE processing
|
|
2647
|
+
if (visited.has(obj)) {
|
|
2648
|
+
return false;
|
|
2649
|
+
}
|
|
2650
|
+
visited.add(obj);
|
|
2651
|
+
// Skip special objects that can't contain BlobRefs
|
|
2652
|
+
if (obj instanceof Date || obj instanceof RegExp || obj instanceof Blob) {
|
|
2653
|
+
return false;
|
|
2654
|
+
}
|
|
2655
|
+
if (obj instanceof ArrayBuffer || ArrayBuffer.isView(obj)) {
|
|
2656
|
+
return false;
|
|
2657
|
+
}
|
|
2658
|
+
if (Array.isArray(obj)) {
|
|
2659
|
+
return obj.some(item => hasBlobRefs(item, visited));
|
|
2660
|
+
}
|
|
2661
|
+
// Only traverse POJOs
|
|
2662
|
+
if (obj.constructor === Object) {
|
|
2663
|
+
return Object.values(obj).some(value => hasBlobRefs(value, visited));
|
|
2664
|
+
}
|
|
2665
|
+
return false;
|
|
2666
|
+
}
|
|
2667
|
+
/**
|
|
2668
|
+
* Convert downloaded Uint8Array to the original type specified in BlobRef
|
|
2669
|
+
*/
|
|
2670
|
+
function convertToOriginalType(data, ref) {
|
|
2671
|
+
// String type: decode UTF-8 back to string
|
|
2672
|
+
if (ref._bt === 'string') {
|
|
2673
|
+
return new TextDecoder().decode(data);
|
|
2674
|
+
}
|
|
2675
|
+
// Get the underlying ArrayBuffer (handle shared buffer case)
|
|
2676
|
+
const buffer = data.buffer.byteLength === data.byteLength
|
|
2677
|
+
? data.buffer
|
|
2678
|
+
: data.buffer.slice(data.byteOffset, data.byteOffset + data.byteLength);
|
|
2679
|
+
switch (ref._bt) {
|
|
2680
|
+
case 'Blob':
|
|
2681
|
+
return new Blob([new Uint8Array(buffer)], { type: ref.ct || '' });
|
|
2682
|
+
case 'ArrayBuffer':
|
|
2683
|
+
return buffer;
|
|
2684
|
+
case 'Uint8Array':
|
|
2685
|
+
return data;
|
|
2686
|
+
case 'Int8Array':
|
|
2687
|
+
return new Int8Array(buffer);
|
|
2688
|
+
case 'Uint8ClampedArray':
|
|
2689
|
+
return new Uint8ClampedArray(buffer);
|
|
2690
|
+
case 'Int16Array':
|
|
2691
|
+
return new Int16Array(buffer);
|
|
2692
|
+
case 'Uint16Array':
|
|
2693
|
+
return new Uint16Array(buffer);
|
|
2694
|
+
case 'Int32Array':
|
|
2695
|
+
return new Int32Array(buffer);
|
|
2696
|
+
case 'Uint32Array':
|
|
2697
|
+
return new Uint32Array(buffer);
|
|
2698
|
+
case 'Float32Array':
|
|
2699
|
+
return new Float32Array(buffer);
|
|
2700
|
+
case 'Float64Array':
|
|
2701
|
+
return new Float64Array(buffer);
|
|
2702
|
+
case 'BigInt64Array':
|
|
2703
|
+
return new BigInt64Array(buffer);
|
|
2704
|
+
case 'BigUint64Array':
|
|
2705
|
+
return new BigUint64Array(buffer);
|
|
2706
|
+
case 'DataView':
|
|
2707
|
+
return new DataView(buffer);
|
|
2708
|
+
default:
|
|
2709
|
+
// Fallback to Uint8Array for unknown types
|
|
2710
|
+
return data;
|
|
2711
|
+
}
|
|
2712
|
+
}
|
|
2713
|
+
/**
|
|
2714
|
+
* Recursively resolve all BlobRefs in an object and collect them for queueing.
|
|
2715
|
+
* Returns a new object with BlobRefs replaced by their original type data,
|
|
2716
|
+
* and populates the resolvedBlobs array with keyPath info for each blob.
|
|
2717
|
+
*
|
|
2718
|
+
* @param obj - Object to resolve
|
|
2719
|
+
* @param dbUrl - Base URL for the database
|
|
2720
|
+
* @param accessToken - Access token for blob downloads
|
|
2721
|
+
* @param resolvedBlobs - Array to collect resolved blob info
|
|
2722
|
+
* @param currentPath - Current property path (for tracking)
|
|
2723
|
+
* @param visited - WeakMap for circular reference detection
|
|
2724
|
+
*/
|
|
2725
|
+
function resolveAllBlobRefs(obj_1, dbUrl_1) {
|
|
2726
|
+
return __awaiter(this, arguments, void 0, function* (obj, dbUrl, resolvedBlobs = [], currentPath = '', visited = new WeakMap(), tracker) {
|
|
2727
|
+
if (obj == null) { // null or undefined
|
|
2728
|
+
return obj;
|
|
2729
|
+
}
|
|
2730
|
+
// Check if this is a BlobRef - resolve it and track it
|
|
2731
|
+
if (isBlobRef(obj)) {
|
|
2732
|
+
const rawData = yield tracker.download(obj, dbUrl);
|
|
2733
|
+
const data = convertToOriginalType(rawData, obj);
|
|
2734
|
+
resolvedBlobs.push({ keyPath: currentPath, data, ref: obj.ref });
|
|
2735
|
+
return data;
|
|
2736
|
+
}
|
|
2737
|
+
// Handle arrays
|
|
2738
|
+
if (Array.isArray(obj)) {
|
|
2739
|
+
// Avoid circular references - check and set BEFORE iterating
|
|
2740
|
+
if (visited.has(obj)) {
|
|
2741
|
+
return visited.get(obj);
|
|
2742
|
+
}
|
|
2743
|
+
const result = [];
|
|
2744
|
+
visited.set(obj, result); // Set before iterating to handle self-references
|
|
2745
|
+
for (let i = 0; i < obj.length; i++) {
|
|
2746
|
+
const itemPath = currentPath ? `${currentPath}.${i}` : `${i}`;
|
|
2747
|
+
result.push(yield resolveAllBlobRefs(obj[i], dbUrl, resolvedBlobs, itemPath, visited, tracker));
|
|
2748
|
+
}
|
|
2749
|
+
return result;
|
|
2750
|
+
}
|
|
2751
|
+
// Handle POJO objects only (not Date, RegExp, Blob, ArrayBuffer, etc.)
|
|
2752
|
+
if (typeof obj === 'object' && obj.constructor === Object) {
|
|
2753
|
+
// Avoid circular references
|
|
2754
|
+
if (visited.has(obj)) {
|
|
2755
|
+
return visited.get(obj);
|
|
2756
|
+
}
|
|
2757
|
+
const result = {};
|
|
2758
|
+
visited.set(obj, result);
|
|
2759
|
+
for (const [propName, value] of Object.entries(obj)) {
|
|
2760
|
+
// Skip the _hasBlobRefs marker itself
|
|
2761
|
+
if (propName === '_hasBlobRefs') {
|
|
2762
|
+
continue;
|
|
2763
|
+
}
|
|
2764
|
+
const propPath = currentPath ? `${currentPath}.${propName}` : propName;
|
|
2765
|
+
result[propName] = yield resolveAllBlobRefs(value, dbUrl, resolvedBlobs, propPath, visited, tracker);
|
|
2766
|
+
}
|
|
2767
|
+
return result;
|
|
2768
|
+
}
|
|
2769
|
+
return obj;
|
|
2770
|
+
});
|
|
2771
|
+
}
|
|
2772
|
+
/**
|
|
2773
|
+
* Check if an object has unresolved BlobRefs
|
|
2774
|
+
*/
|
|
2775
|
+
function hasUnresolvedBlobRefs(obj) {
|
|
2776
|
+
return (typeof obj === 'object' &&
|
|
2777
|
+
obj !== null &&
|
|
2778
|
+
obj._hasBlobRefs === 1);
|
|
2779
|
+
}
|
|
2780
|
+
|
|
2781
|
+
/**
|
|
2782
|
+
* If the incoming value contains BlobRefs (e.g. offloaded strings or binaries),
|
|
2783
|
+
* mark it with _hasBlobRefs = 1 so the blobResolveMiddleware will resolve them
|
|
2784
|
+
* on the next read.
|
|
2785
|
+
*/
|
|
2786
|
+
function markIfHasBlobRefs(obj) {
|
|
2787
|
+
if (obj !== null &&
|
|
2788
|
+
typeof obj === 'object' &&
|
|
2789
|
+
obj.constructor === Object &&
|
|
2790
|
+
hasBlobRefs(obj)) {
|
|
2791
|
+
obj._hasBlobRefs = 1;
|
|
2792
|
+
}
|
|
2793
|
+
}
|
|
2445
2794
|
function applyServerChanges(changes, db) {
|
|
2446
2795
|
return __awaiter(this, void 0, void 0, function* () {
|
|
2447
2796
|
console.debug('Applying server changes', changes, Dexie.currentTransaction);
|
|
@@ -2477,6 +2826,7 @@ function applyServerChanges(changes, db) {
|
|
|
2477
2826
|
const keys = mut.keys.map(keyDecoder);
|
|
2478
2827
|
switch (mut.type) {
|
|
2479
2828
|
case 'insert':
|
|
2829
|
+
mut.values.forEach(markIfHasBlobRefs);
|
|
2480
2830
|
if (primaryKey.outbound) {
|
|
2481
2831
|
yield table.bulkAdd(mut.values, keys);
|
|
2482
2832
|
}
|
|
@@ -2489,6 +2839,7 @@ function applyServerChanges(changes, db) {
|
|
|
2489
2839
|
}
|
|
2490
2840
|
break;
|
|
2491
2841
|
case 'upsert':
|
|
2842
|
+
mut.values.forEach(markIfHasBlobRefs);
|
|
2492
2843
|
if (primaryKey.outbound) {
|
|
2493
2844
|
yield table.bulkPut(mut.values, keys);
|
|
2494
2845
|
}
|
|
@@ -2728,12 +3079,346 @@ function applyYServerMessages(yMessages, db) {
|
|
|
2728
3079
|
console.error(`Failed to apply YMessage`, m, e);
|
|
2729
3080
|
}
|
|
2730
3081
|
}
|
|
2731
|
-
return {
|
|
2732
|
-
receivedUntils,
|
|
2733
|
-
resyncNeeded,
|
|
2734
|
-
yServerRevision,
|
|
2735
|
-
};
|
|
2736
|
-
});
|
|
3082
|
+
return {
|
|
3083
|
+
receivedUntils,
|
|
3084
|
+
resyncNeeded,
|
|
3085
|
+
yServerRevision,
|
|
3086
|
+
};
|
|
3087
|
+
});
|
|
3088
|
+
}
|
|
3089
|
+
|
|
3090
|
+
/**
|
|
3091
|
+
* Blob Offloading for Dexie Cloud
|
|
3092
|
+
*
|
|
3093
|
+
* Handles uploading large blobs to blob storage before sync,
|
|
3094
|
+
* and resolving BlobRefs when reading from the database.
|
|
3095
|
+
*/
|
|
3096
|
+
// Blobs >= 4KB are offloaded to blob storage
|
|
3097
|
+
const BLOB_OFFLOAD_THRESHOLD = 4096;
|
|
3098
|
+
// Default max string length before offloading (32KB characters)
|
|
3099
|
+
const DEFAULT_MAX_STRING_LENGTH = 32768;
|
|
3100
|
+
// Cache: once we know the server doesn't support blob storage, skip future uploads.
|
|
3101
|
+
// Maps databaseUrl → boolean (true = supported, false = not supported).
|
|
3102
|
+
const blobEndpointSupported = new Map();
|
|
3103
|
+
/**
|
|
3104
|
+
* Cross-realm type detection helpers (performance-optimized)
|
|
3105
|
+
*
|
|
3106
|
+
* When code runs in different JavaScript realms (e.g., Service Worker context),
|
|
3107
|
+
* `instanceof` checks can fail because each realm has its own global constructors.
|
|
3108
|
+
* We use Object.prototype.toString which works reliably across realms.
|
|
3109
|
+
*
|
|
3110
|
+
* Performance considerations (this is a hot path - every property is checked):
|
|
3111
|
+
* - Early return for primitives via typeof
|
|
3112
|
+
* - Static Set for O(1) TypedArray tag lookup
|
|
3113
|
+
* - Single typeTag call per check
|
|
3114
|
+
*/
|
|
3115
|
+
// TypedArray/DataView tags for size check
|
|
3116
|
+
const ARRAYBUFFER_VIEW_TAGS = new Set([
|
|
3117
|
+
'Int8Array', 'Uint8Array', 'Uint8ClampedArray',
|
|
3118
|
+
'Int16Array', 'Uint16Array', 'Int32Array', 'Uint32Array',
|
|
3119
|
+
'Float32Array', 'Float64Array', 'BigInt64Array', 'BigUint64Array',
|
|
3120
|
+
'DataView'
|
|
3121
|
+
]);
|
|
3122
|
+
// Static Set for O(1) lookup of binary type tags
|
|
3123
|
+
const BINARY_TYPE_TAGS = new Set([
|
|
3124
|
+
'Blob',
|
|
3125
|
+
'File',
|
|
3126
|
+
'ArrayBuffer',
|
|
3127
|
+
...ARRAYBUFFER_VIEW_TAGS,
|
|
3128
|
+
]);
|
|
3129
|
+
/**
|
|
3130
|
+
* Get the [[Class]] internal property via Object.prototype.toString
|
|
3131
|
+
*/
|
|
3132
|
+
function getTypeTag(value) {
|
|
3133
|
+
return Object.prototype.toString.call(value).slice(8, -1);
|
|
3134
|
+
}
|
|
3135
|
+
/**
|
|
3136
|
+
* Get the original type name for a value
|
|
3137
|
+
*/
|
|
3138
|
+
function getOrigType(value) {
|
|
3139
|
+
const tag = getTypeTag(value);
|
|
3140
|
+
if (tag === 'Blob' || tag === 'File')
|
|
3141
|
+
return 'Blob';
|
|
3142
|
+
if (tag === 'ArrayBuffer')
|
|
3143
|
+
return 'ArrayBuffer';
|
|
3144
|
+
return tag;
|
|
3145
|
+
}
|
|
3146
|
+
/**
|
|
3147
|
+
* Check if a value should be offloaded to blob storage
|
|
3148
|
+
* Performance-optimized for hot path traversal.
|
|
3149
|
+
*/
|
|
3150
|
+
function shouldOffloadBlob(value) {
|
|
3151
|
+
// Fast path: primitives (most common case)
|
|
3152
|
+
// typeof returns: "string", "number", "boolean", "undefined", "symbol", "bigint", "function", "object"
|
|
3153
|
+
const t = typeof value;
|
|
3154
|
+
if (t !== 'object' || value === null)
|
|
3155
|
+
return false;
|
|
3156
|
+
// Get type tag once (cross-realm safe)
|
|
3157
|
+
const tag = getTypeTag(value);
|
|
3158
|
+
// Quick check: is this even a binary type?
|
|
3159
|
+
if (!BINARY_TYPE_TAGS.has(tag))
|
|
3160
|
+
return false;
|
|
3161
|
+
// Blob/File: always offload regardless of size.
|
|
3162
|
+
// This ensures blobs are never stored inline in IndexedDB, which avoids
|
|
3163
|
+
// issues with synchronous blob reading (e.g. in service workers where
|
|
3164
|
+
// XMLHttpRequest is unavailable — see #2182).
|
|
3165
|
+
if (tag === 'Blob' || tag === 'File') {
|
|
3166
|
+
return true;
|
|
3167
|
+
}
|
|
3168
|
+
// ArrayBuffer/TypedArray/DataView: only offload above threshold
|
|
3169
|
+
if (tag === 'ArrayBuffer') {
|
|
3170
|
+
return value.byteLength >= BLOB_OFFLOAD_THRESHOLD;
|
|
3171
|
+
}
|
|
3172
|
+
// TypedArray or DataView
|
|
3173
|
+
return value.byteLength >= BLOB_OFFLOAD_THRESHOLD;
|
|
3174
|
+
}
|
|
3175
|
+
/**
|
|
3176
|
+
* Upload a blob to the blob storage endpoint
|
|
3177
|
+
*/
|
|
3178
|
+
function uploadBlob(databaseUrl, getCachedAccessToken, blob) {
|
|
3179
|
+
return __awaiter(this, void 0, void 0, function* () {
|
|
3180
|
+
const accessToken = yield getCachedAccessToken();
|
|
3181
|
+
if (!accessToken) {
|
|
3182
|
+
throw new Error('Failed to load access token for blob upload');
|
|
3183
|
+
}
|
|
3184
|
+
const blobId = newId();
|
|
3185
|
+
// URL format: {databaseUrl}/blob/{blobId}
|
|
3186
|
+
const url = `${databaseUrl}/blob/${blobId}`;
|
|
3187
|
+
let body;
|
|
3188
|
+
let contentType;
|
|
3189
|
+
let size;
|
|
3190
|
+
const origType = getOrigType(blob);
|
|
3191
|
+
// Use type tag for cross-realm compatible checks
|
|
3192
|
+
const tag = getTypeTag(blob);
|
|
3193
|
+
if (tag === 'Blob' || tag === 'File') {
|
|
3194
|
+
body = blob;
|
|
3195
|
+
contentType = blob.type || 'application/octet-stream';
|
|
3196
|
+
size = blob.size;
|
|
3197
|
+
}
|
|
3198
|
+
else if (tag === 'ArrayBuffer') {
|
|
3199
|
+
body = blob;
|
|
3200
|
+
contentType = 'application/octet-stream';
|
|
3201
|
+
size = blob.byteLength;
|
|
3202
|
+
}
|
|
3203
|
+
else if (ARRAYBUFFER_VIEW_TAGS.has(tag)) {
|
|
3204
|
+
// ArrayBufferView (TypedArray or DataView) - create a proper ArrayBuffer copy
|
|
3205
|
+
const view = blob;
|
|
3206
|
+
const arrayBuffer = new ArrayBuffer(view.byteLength);
|
|
3207
|
+
new Uint8Array(arrayBuffer).set(new Uint8Array(view.buffer, view.byteOffset, view.byteLength));
|
|
3208
|
+
body = arrayBuffer;
|
|
3209
|
+
contentType = 'application/octet-stream';
|
|
3210
|
+
size = view.byteLength;
|
|
3211
|
+
}
|
|
3212
|
+
else {
|
|
3213
|
+
throw new Error(`Unsupported blob type: ${tag}`);
|
|
3214
|
+
}
|
|
3215
|
+
// Add content type as query param for the server to store
|
|
3216
|
+
const uploadUrl = `${url}?ct=${encodeURIComponent(contentType)}`;
|
|
3217
|
+
const response = yield fetch(uploadUrl, {
|
|
3218
|
+
method: 'PUT',
|
|
3219
|
+
headers: {
|
|
3220
|
+
'Authorization': `Bearer ${accessToken}`,
|
|
3221
|
+
'Content-Type': contentType,
|
|
3222
|
+
},
|
|
3223
|
+
body,
|
|
3224
|
+
});
|
|
3225
|
+
if (!response.ok) {
|
|
3226
|
+
if (response.status === 404 || response.status === 405) {
|
|
3227
|
+
// Server doesn't support blob storage endpoint — fall back to inline storage.
|
|
3228
|
+
// This happens when a new client connects to an older server (pre-3.0).
|
|
3229
|
+
return null;
|
|
3230
|
+
}
|
|
3231
|
+
throw new Error(`Failed to upload blob: ${response.status} ${response.statusText}`);
|
|
3232
|
+
}
|
|
3233
|
+
// The server returns the ref with version prefix (e.g., "1:blobId")
|
|
3234
|
+
const result = yield response.json();
|
|
3235
|
+
// Return BlobRef with server's ref (includes version) and original type preserved in _bt
|
|
3236
|
+
return Object.assign({ _bt: origType, ref: result.ref, size: size }, (origType === 'Blob' ? { ct: contentType } : {}) // Only include content type for Blobs
|
|
3237
|
+
);
|
|
3238
|
+
});
|
|
3239
|
+
}
|
|
3240
|
+
function offloadBlobsAndMarkDirty(obj_1, databaseUrl_1, getCachedAccessToken_1) {
|
|
3241
|
+
return __awaiter(this, arguments, void 0, function* (obj, databaseUrl, getCachedAccessToken, maxStringLength = DEFAULT_MAX_STRING_LENGTH) {
|
|
3242
|
+
const dirtyFlag = { dirty: false };
|
|
3243
|
+
const result = yield offloadBlobs(obj, databaseUrl, getCachedAccessToken, maxStringLength, dirtyFlag);
|
|
3244
|
+
// Mark the object as dirty for sync if any blobs were offloaded
|
|
3245
|
+
if (dirtyFlag.dirty && typeof result === 'object' && result !== null && result.constructor === Object) {
|
|
3246
|
+
result._hasBlobRefs = 1;
|
|
3247
|
+
}
|
|
3248
|
+
return result;
|
|
3249
|
+
});
|
|
3250
|
+
}
|
|
3251
|
+
/**
|
|
3252
|
+
* Recursively scan an object for large blobs and upload them
|
|
3253
|
+
* Returns a new object with blobs replaced by BlobRefs
|
|
3254
|
+
*/
|
|
3255
|
+
function offloadBlobs(obj_1, databaseUrl_1, getCachedAccessToken_1) {
|
|
3256
|
+
return __awaiter(this, arguments, void 0, function* (obj, databaseUrl, getCachedAccessToken, maxStringLength = DEFAULT_MAX_STRING_LENGTH, dirtyFlag = { dirty: false }, visited = new WeakSet()) {
|
|
3257
|
+
if (obj === null || obj === undefined) {
|
|
3258
|
+
return obj;
|
|
3259
|
+
}
|
|
3260
|
+
// Check if this is a long string that should be offloaded
|
|
3261
|
+
if (typeof obj === 'string' && obj.length > maxStringLength && maxStringLength !== Infinity) {
|
|
3262
|
+
if (blobEndpointSupported.get(databaseUrl) === false) {
|
|
3263
|
+
return obj;
|
|
3264
|
+
}
|
|
3265
|
+
const blob = new Blob([obj], { type: 'text/plain;charset=utf-8' });
|
|
3266
|
+
const blobRef = yield uploadBlob(databaseUrl, getCachedAccessToken, blob);
|
|
3267
|
+
if (blobRef === null) {
|
|
3268
|
+
blobEndpointSupported.set(databaseUrl, false);
|
|
3269
|
+
return obj;
|
|
3270
|
+
}
|
|
3271
|
+
blobEndpointSupported.set(databaseUrl, true);
|
|
3272
|
+
dirtyFlag.dirty = true;
|
|
3273
|
+
// Mark as string type so it's resolved back to string, not Blob
|
|
3274
|
+
return Object.assign(Object.assign({}, blobRef), { _bt: 'string' });
|
|
3275
|
+
}
|
|
3276
|
+
// Check if this is a blob that should be offloaded
|
|
3277
|
+
if (shouldOffloadBlob(obj)) {
|
|
3278
|
+
if (blobEndpointSupported.get(databaseUrl) === false) {
|
|
3279
|
+
// Server known to not support blob storage — keep inline
|
|
3280
|
+
return obj;
|
|
3281
|
+
}
|
|
3282
|
+
const blobRef = yield uploadBlob(databaseUrl, getCachedAccessToken, obj);
|
|
3283
|
+
if (blobRef === null) {
|
|
3284
|
+
// Server doesn't support blob storage — keep original inline
|
|
3285
|
+
blobEndpointSupported.set(databaseUrl, false);
|
|
3286
|
+
return obj;
|
|
3287
|
+
}
|
|
3288
|
+
blobEndpointSupported.set(databaseUrl, true);
|
|
3289
|
+
dirtyFlag.dirty = true;
|
|
3290
|
+
return blobRef;
|
|
3291
|
+
}
|
|
3292
|
+
if (typeof obj !== 'object') {
|
|
3293
|
+
return obj;
|
|
3294
|
+
}
|
|
3295
|
+
// Avoid circular references - check BEFORE processing
|
|
3296
|
+
if (visited.has(obj)) {
|
|
3297
|
+
return obj;
|
|
3298
|
+
}
|
|
3299
|
+
visited.add(obj);
|
|
3300
|
+
// Handle arrays
|
|
3301
|
+
if (Array.isArray(obj)) {
|
|
3302
|
+
const result = [];
|
|
3303
|
+
for (const item of obj) {
|
|
3304
|
+
result.push(yield offloadBlobs(item, databaseUrl, getCachedAccessToken, maxStringLength, dirtyFlag, visited));
|
|
3305
|
+
}
|
|
3306
|
+
return result;
|
|
3307
|
+
}
|
|
3308
|
+
// Traverse plain objects (POJO-like) - use prototype check since IndexedDB
|
|
3309
|
+
// may return objects where constructor !== Object
|
|
3310
|
+
const proto = Object.getPrototypeOf(obj);
|
|
3311
|
+
if (proto !== Object.prototype && proto !== null) {
|
|
3312
|
+
return obj;
|
|
3313
|
+
}
|
|
3314
|
+
const result = {};
|
|
3315
|
+
for (const [key, value] of Object.entries(obj)) {
|
|
3316
|
+
result[key] = yield offloadBlobs(value, databaseUrl, getCachedAccessToken, maxStringLength, dirtyFlag, visited);
|
|
3317
|
+
}
|
|
3318
|
+
return result;
|
|
3319
|
+
});
|
|
3320
|
+
}
|
|
3321
|
+
/**
|
|
3322
|
+
* Process a DBOperationsSet and offload any large blobs
|
|
3323
|
+
* Returns a new DBOperationsSet with blobs replaced by BlobRefs
|
|
3324
|
+
*/
|
|
3325
|
+
function offloadBlobsInOperations(operations_1, databaseUrl_1, getCachedAccessToken_1) {
|
|
3326
|
+
return __awaiter(this, arguments, void 0, function* (operations, databaseUrl, getCachedAccessToken, maxStringLength = DEFAULT_MAX_STRING_LENGTH) {
|
|
3327
|
+
const result = [];
|
|
3328
|
+
for (const tableOps of operations) {
|
|
3329
|
+
const processedMuts = [];
|
|
3330
|
+
for (const mut of tableOps.muts) {
|
|
3331
|
+
const processedMut = yield offloadBlobsInOperation(mut, databaseUrl, getCachedAccessToken, maxStringLength);
|
|
3332
|
+
processedMuts.push(processedMut);
|
|
3333
|
+
}
|
|
3334
|
+
result.push({
|
|
3335
|
+
table: tableOps.table,
|
|
3336
|
+
muts: processedMuts,
|
|
3337
|
+
});
|
|
3338
|
+
}
|
|
3339
|
+
return result;
|
|
3340
|
+
});
|
|
3341
|
+
}
|
|
3342
|
+
function offloadBlobsInOperation(op_1, databaseUrl_1, getCachedAccessToken_1) {
|
|
3343
|
+
return __awaiter(this, arguments, void 0, function* (op, databaseUrl, getCachedAccessToken, maxStringLength = DEFAULT_MAX_STRING_LENGTH) {
|
|
3344
|
+
switch (op.type) {
|
|
3345
|
+
case 'insert':
|
|
3346
|
+
case 'upsert': {
|
|
3347
|
+
const processedValues = yield Promise.all(op.values.map(value => offloadBlobsAndMarkDirty(value, databaseUrl, getCachedAccessToken, maxStringLength)));
|
|
3348
|
+
return Object.assign(Object.assign({}, op), { values: processedValues });
|
|
3349
|
+
}
|
|
3350
|
+
case 'update': {
|
|
3351
|
+
const processedChangeSpecs = yield Promise.all(op.changeSpecs.map(spec => offloadBlobsAndMarkDirty(spec, databaseUrl, getCachedAccessToken, maxStringLength)));
|
|
3352
|
+
return Object.assign(Object.assign({}, op), { changeSpecs: processedChangeSpecs });
|
|
3353
|
+
}
|
|
3354
|
+
case 'modify': {
|
|
3355
|
+
const processedChangeSpec = yield offloadBlobsAndMarkDirty(op.changeSpec, databaseUrl, getCachedAccessToken, maxStringLength);
|
|
3356
|
+
return Object.assign(Object.assign({}, op), { changeSpec: processedChangeSpec });
|
|
3357
|
+
}
|
|
3358
|
+
case 'delete':
|
|
3359
|
+
// No blobs in delete operations
|
|
3360
|
+
return op;
|
|
3361
|
+
default:
|
|
3362
|
+
return op;
|
|
3363
|
+
}
|
|
3364
|
+
});
|
|
3365
|
+
}
|
|
3366
|
+
/**
|
|
3367
|
+
* Check if there are any large blobs in the operations that need offloading
|
|
3368
|
+
* This is a quick check to avoid unnecessary processing
|
|
3369
|
+
*/
|
|
3370
|
+
function hasLargeBlobsInOperations(operations, maxStringLength = DEFAULT_MAX_STRING_LENGTH) {
|
|
3371
|
+
for (const tableOps of operations) {
|
|
3372
|
+
for (const mut of tableOps.muts) {
|
|
3373
|
+
if (hasLargeBlobsInOperation(mut, maxStringLength)) {
|
|
3374
|
+
return true;
|
|
3375
|
+
}
|
|
3376
|
+
}
|
|
3377
|
+
}
|
|
3378
|
+
return false;
|
|
3379
|
+
}
|
|
3380
|
+
function hasLargeBlobsInOperation(op, maxStringLength) {
|
|
3381
|
+
switch (op.type) {
|
|
3382
|
+
case 'insert':
|
|
3383
|
+
case 'upsert':
|
|
3384
|
+
return op.values.some(value => hasLargeBlobs(value, maxStringLength));
|
|
3385
|
+
case 'update':
|
|
3386
|
+
return op.changeSpecs.some(spec => hasLargeBlobs(spec, maxStringLength));
|
|
3387
|
+
case 'modify':
|
|
3388
|
+
return hasLargeBlobs(op.changeSpec, maxStringLength);
|
|
3389
|
+
default:
|
|
3390
|
+
return false;
|
|
3391
|
+
}
|
|
3392
|
+
}
|
|
3393
|
+
function hasLargeBlobs(obj, maxStringLength, visited = new WeakSet()) {
|
|
3394
|
+
if (obj === null || obj === undefined) {
|
|
3395
|
+
return false;
|
|
3396
|
+
}
|
|
3397
|
+
// Check long strings
|
|
3398
|
+
if (typeof obj === 'string' && obj.length > maxStringLength && maxStringLength !== Infinity) {
|
|
3399
|
+
return true;
|
|
3400
|
+
}
|
|
3401
|
+
if (shouldOffloadBlob(obj)) {
|
|
3402
|
+
return true;
|
|
3403
|
+
}
|
|
3404
|
+
if (typeof obj !== 'object') {
|
|
3405
|
+
return false;
|
|
3406
|
+
}
|
|
3407
|
+
// Avoid circular references - check BEFORE processing
|
|
3408
|
+
if (visited.has(obj)) {
|
|
3409
|
+
return false;
|
|
3410
|
+
}
|
|
3411
|
+
visited.add(obj);
|
|
3412
|
+
if (Array.isArray(obj)) {
|
|
3413
|
+
return obj.some(item => hasLargeBlobs(item, maxStringLength, visited));
|
|
3414
|
+
}
|
|
3415
|
+
// Traverse plain objects (POJO-like) - use duck typing since IndexedDB
|
|
3416
|
+
// may return objects where constructor !== Object
|
|
3417
|
+
const proto = Object.getPrototypeOf(obj);
|
|
3418
|
+
if (proto === Object.prototype || proto === null) {
|
|
3419
|
+
return Object.values(obj).some(value => hasLargeBlobs(value, maxStringLength, visited));
|
|
3420
|
+
}
|
|
3421
|
+
return false;
|
|
2737
3422
|
}
|
|
2738
3423
|
|
|
2739
3424
|
function updateYSyncStates(lastUpdateIdsBeforeSync, receivedUntilsAfterSync, db) {
|
|
@@ -2917,6 +3602,33 @@ function downloadYDocsFromServer(db_1, databaseUrl_1, _a) {
|
|
|
2917
3602
|
});
|
|
2918
3603
|
}
|
|
2919
3604
|
|
|
3605
|
+
const wm$3 = new WeakMap();
|
|
3606
|
+
function loadCachedAccessToken(db) {
|
|
3607
|
+
var _a, _b, _c, _d;
|
|
3608
|
+
let cached = wm$3.get(db);
|
|
3609
|
+
if (cached && cached.expiration > Date.now() + 5 * MINUTES) {
|
|
3610
|
+
return Promise.resolve(cached.accessToken);
|
|
3611
|
+
}
|
|
3612
|
+
const currentUser = db.cloud.currentUser.value;
|
|
3613
|
+
if (currentUser && currentUser.accessToken && ((_b = (_a = currentUser.accessTokenExpiration) === null || _a === void 0 ? void 0 : _a.getTime()) !== null && _b !== void 0 ? _b : Infinity) > Date.now() + 5 * MINUTES) {
|
|
3614
|
+
wm$3.set(db, {
|
|
3615
|
+
accessToken: currentUser.accessToken,
|
|
3616
|
+
expiration: (_d = (_c = currentUser.accessTokenExpiration) === null || _c === void 0 ? void 0 : _c.getTime()) !== null && _d !== void 0 ? _d : Infinity
|
|
3617
|
+
});
|
|
3618
|
+
return Promise.resolve(currentUser.accessToken);
|
|
3619
|
+
}
|
|
3620
|
+
return Dexie.ignoreTransaction(() => loadAccessToken(db).then(user => {
|
|
3621
|
+
var _a, _b;
|
|
3622
|
+
if (user === null || user === void 0 ? void 0 : user.accessToken) {
|
|
3623
|
+
wm$3.set(db, {
|
|
3624
|
+
accessToken: user.accessToken,
|
|
3625
|
+
expiration: (_b = (_a = user.accessTokenExpiration) === null || _a === void 0 ? void 0 : _a.getTime()) !== null && _b !== void 0 ? _b : Infinity
|
|
3626
|
+
});
|
|
3627
|
+
}
|
|
3628
|
+
return (user === null || user === void 0 ? void 0 : user.accessToken) || null;
|
|
3629
|
+
}));
|
|
3630
|
+
}
|
|
3631
|
+
|
|
2920
3632
|
const CURRENT_SYNC_WORKER = 'currentSyncWorker';
|
|
2921
3633
|
function sync(db, options, schema, syncOptions) {
|
|
2922
3634
|
return _sync(db, options, schema, syncOptions)
|
|
@@ -2965,7 +3677,7 @@ function _sync(db_1, options_1, schema_1) {
|
|
|
2965
3677
|
return __awaiter(this, arguments, void 0, function* (db, options, schema, { isInitialSync, cancelToken, justCheckIfNeeded, purpose } = {
|
|
2966
3678
|
isInitialSync: false,
|
|
2967
3679
|
}) {
|
|
2968
|
-
var _a;
|
|
3680
|
+
var _a, _b, _c;
|
|
2969
3681
|
if (!justCheckIfNeeded) {
|
|
2970
3682
|
console.debug('SYNC STARTED', { isInitialSync, purpose });
|
|
2971
3683
|
}
|
|
@@ -3034,12 +3746,21 @@ function _sync(db_1, options_1, schema_1) {
|
|
|
3034
3746
|
return false;
|
|
3035
3747
|
}
|
|
3036
3748
|
const latestRevisions = getLatestRevisionsPerTable(clientChangeSet, syncState === null || syncState === void 0 ? void 0 : syncState.latestRevisions);
|
|
3037
|
-
const clientIdentity = (syncState === null || syncState === void 0 ? void 0 : syncState.clientIdentity) || randomString(16);
|
|
3749
|
+
const clientIdentity = (syncState === null || syncState === void 0 ? void 0 : syncState.clientIdentity) || randomString$1(16);
|
|
3750
|
+
//
|
|
3751
|
+
// Offload large blobs to blob storage before sync
|
|
3752
|
+
//
|
|
3753
|
+
let processedChangeSet = clientChangeSet;
|
|
3754
|
+
const maxStringLength = (_c = (_b = db.cloud.options) === null || _b === void 0 ? void 0 : _b.maxStringLength) !== null && _c !== void 0 ? _c : 32768;
|
|
3755
|
+
const hasLargeBlobs = hasLargeBlobsInOperations(clientChangeSet, maxStringLength);
|
|
3756
|
+
if (hasLargeBlobs) {
|
|
3757
|
+
processedChangeSet = yield offloadBlobsInOperations(clientChangeSet, databaseUrl, () => loadCachedAccessToken(db), maxStringLength);
|
|
3758
|
+
}
|
|
3038
3759
|
//
|
|
3039
3760
|
// Push changes to server
|
|
3040
3761
|
//
|
|
3041
3762
|
throwIfCancelled(cancelToken);
|
|
3042
|
-
const res = yield syncWithServer(
|
|
3763
|
+
const res = yield syncWithServer(processedChangeSet, yMessages, syncState, baseRevs, db, databaseUrl, schema, clientIdentity, currentUser);
|
|
3043
3764
|
console.debug('Sync response', res);
|
|
3044
3765
|
//
|
|
3045
3766
|
// Apply changes locally and clear old change entries:
|
|
@@ -3442,6 +4163,65 @@ function MessagesFromServerConsumer(db) {
|
|
|
3442
4163
|
};
|
|
3443
4164
|
}
|
|
3444
4165
|
|
|
4166
|
+
/**
|
|
4167
|
+
* Deduplicates in-flight blob downloads.
|
|
4168
|
+
*
|
|
4169
|
+
* Both the blob-resolve middleware and the eager blob downloader may
|
|
4170
|
+
* try to fetch the same blob concurrently. This tracker ensures each
|
|
4171
|
+
* unique blob ref is only downloaded once — subsequent requests for
|
|
4172
|
+
* the same ref piggyback on the existing promise.
|
|
4173
|
+
*
|
|
4174
|
+
* Instantiate once per DexieCloudDB.
|
|
4175
|
+
*/
|
|
4176
|
+
class BlobDownloadTracker {
|
|
4177
|
+
constructor(db) {
|
|
4178
|
+
this.inFlight = new Map();
|
|
4179
|
+
this.db = db;
|
|
4180
|
+
}
|
|
4181
|
+
/**
|
|
4182
|
+
* Download a blob, deduplicating concurrent requests for the same ref.
|
|
4183
|
+
*
|
|
4184
|
+
* @param blobRef - The BlobRef to download
|
|
4185
|
+
* @param dbUrl - Base URL for the database (e.g., 'https://mydb.dexie.cloud')
|
|
4186
|
+
*/
|
|
4187
|
+
download(blobRef, dbUrl) {
|
|
4188
|
+
let promise = this.inFlight.get(blobRef.ref);
|
|
4189
|
+
if (!promise) {
|
|
4190
|
+
promise = loadCachedAccessToken(this.db).then(accessToken => {
|
|
4191
|
+
if (!accessToken)
|
|
4192
|
+
throw new Error("No access token available for blob download");
|
|
4193
|
+
return downloadBlob(blobRef, dbUrl, accessToken);
|
|
4194
|
+
}).finally(() => this.inFlight.delete(blobRef.ref));
|
|
4195
|
+
// When the promise settles (either fulfilled or rejected), remove it from the in-flight map
|
|
4196
|
+
this.inFlight.set(blobRef.ref, promise);
|
|
4197
|
+
}
|
|
4198
|
+
return promise;
|
|
4199
|
+
}
|
|
4200
|
+
}
|
|
4201
|
+
/**
|
|
4202
|
+
* Download blob data from server via proxy endpoint.
|
|
4203
|
+
* Uses auth header for authentication (same as sync).
|
|
4204
|
+
*
|
|
4205
|
+
* @param blobRef - The BlobRef to download
|
|
4206
|
+
* @param dbUrl - Base URL for the database (e.g., 'https://mydb.dexie.cloud')
|
|
4207
|
+
* @param accessToken - Access token for authentication
|
|
4208
|
+
*/
|
|
4209
|
+
function downloadBlob(blobRef, dbUrl, accessToken) {
|
|
4210
|
+
return __awaiter(this, void 0, void 0, function* () {
|
|
4211
|
+
const downloadUrl = `${dbUrl}/blob/${blobRef.ref}`;
|
|
4212
|
+
const response = yield fetch(downloadUrl, {
|
|
4213
|
+
headers: {
|
|
4214
|
+
'Authorization': `Bearer ${accessToken}`
|
|
4215
|
+
}
|
|
4216
|
+
});
|
|
4217
|
+
if (!response.ok) {
|
|
4218
|
+
throw new Error(`Failed to download blob ${blobRef.ref}: ${response.status} ${response.statusText}`);
|
|
4219
|
+
}
|
|
4220
|
+
const arrayBuffer = yield response.arrayBuffer();
|
|
4221
|
+
return new Uint8Array(arrayBuffer);
|
|
4222
|
+
});
|
|
4223
|
+
}
|
|
4224
|
+
|
|
3445
4225
|
const wm$2 = new WeakMap();
|
|
3446
4226
|
const DEXIE_CLOUD_SCHEMA = {
|
|
3447
4227
|
members: '@id, [userId+realmId], [email+realmId], realmId',
|
|
@@ -3456,7 +4236,7 @@ let static_counter = 0;
|
|
|
3456
4236
|
function DexieCloudDB(dx) {
|
|
3457
4237
|
if ('vip' in dx)
|
|
3458
4238
|
dx = dx['vip']; // Avoid race condition. Always map to a vipped dexie that don't block during db.on.ready().
|
|
3459
|
-
let db = wm$2.get(dx
|
|
4239
|
+
let db = wm$2.get(dx);
|
|
3460
4240
|
if (!db) {
|
|
3461
4241
|
const localSyncEvent = new Subject();
|
|
3462
4242
|
let syncStateChangedEvent = new BroadcastedAndLocalEvent(`syncstatechanged-${dx.name}`);
|
|
@@ -3475,7 +4255,9 @@ function DexieCloudDB(dx) {
|
|
|
3475
4255
|
get tables() {
|
|
3476
4256
|
return dx.tables;
|
|
3477
4257
|
},
|
|
3478
|
-
cloud
|
|
4258
|
+
get cloud() {
|
|
4259
|
+
return dx.cloud;
|
|
4260
|
+
},
|
|
3479
4261
|
get $jobs() {
|
|
3480
4262
|
return dx.table('$jobs');
|
|
3481
4263
|
},
|
|
@@ -3544,7 +4326,8 @@ function DexieCloudDB(dx) {
|
|
|
3544
4326
|
Object.assign(db, helperMethods);
|
|
3545
4327
|
db.messageConsumer = MessagesFromServerConsumer(db);
|
|
3546
4328
|
db.messageProducer = new Subject();
|
|
3547
|
-
|
|
4329
|
+
db.blobDownloadTracker = new BlobDownloadTracker(db);
|
|
4330
|
+
wm$2.set(dx, db);
|
|
3548
4331
|
}
|
|
3549
4332
|
return db;
|
|
3550
4333
|
}
|
|
@@ -3554,6 +4337,221 @@ function nameFromKeyPath(keyPath) {
|
|
|
3554
4337
|
keyPath ? ('[' + [].join.call(keyPath, '+') + ']') : "";
|
|
3555
4338
|
}
|
|
3556
4339
|
|
|
4340
|
+
/**
|
|
4341
|
+
* Blob Progress Tracking
|
|
4342
|
+
*
|
|
4343
|
+
* Uses liveQuery to reactively track unresolved blob refs.
|
|
4344
|
+
* Any change to _hasBlobRefs in any syncable table automatically
|
|
4345
|
+
* triggers a re-scan — no manual updateBlobProgress() needed.
|
|
4346
|
+
*/
|
|
4347
|
+
/**
|
|
4348
|
+
* BehaviorSubject for the isDownloading flag, controlled by eagerBlobDownloader.
|
|
4349
|
+
*/
|
|
4350
|
+
function createDownloadingState() {
|
|
4351
|
+
return new BehaviorSubject(false);
|
|
4352
|
+
}
|
|
4353
|
+
/**
|
|
4354
|
+
* Set downloading state.
|
|
4355
|
+
*/
|
|
4356
|
+
function setDownloadingState(downloading$, isDownloading) {
|
|
4357
|
+
if (downloading$.value !== isDownloading) {
|
|
4358
|
+
downloading$.next(isDownloading);
|
|
4359
|
+
}
|
|
4360
|
+
}
|
|
4361
|
+
/**
|
|
4362
|
+
* Create a liveQuery-based Observable<BlobProgress>.
|
|
4363
|
+
*
|
|
4364
|
+
* Combines a liveQuery (blobsRemaining, bytesRemaining) with an external
|
|
4365
|
+
* isDownloading flag controlled by the eager downloader.
|
|
4366
|
+
*/
|
|
4367
|
+
function observeBlobProgress(db, downloading$) {
|
|
4368
|
+
const blobStats$ = from(liveQuery(() => __awaiter(this, void 0, void 0, function* () {
|
|
4369
|
+
let blobsRemaining = 0;
|
|
4370
|
+
let bytesRemaining = 0;
|
|
4371
|
+
const syncedTables = getSyncableTables(db);
|
|
4372
|
+
yield db.dx.transaction('r', syncedTables, (tx) => __awaiter(this, void 0, void 0, function* () {
|
|
4373
|
+
tx.idbtrans.disableBlobResolve = true;
|
|
4374
|
+
for (const table of syncedTables) {
|
|
4375
|
+
const hasIndex = !!table.schema.idxByName['_hasBlobRefs'];
|
|
4376
|
+
if (!hasIndex)
|
|
4377
|
+
continue;
|
|
4378
|
+
const unresolvedObjects = yield table
|
|
4379
|
+
.where('_hasBlobRefs')
|
|
4380
|
+
.equals(1)
|
|
4381
|
+
.toArray();
|
|
4382
|
+
for (const obj of unresolvedObjects) {
|
|
4383
|
+
const blobs = findBlobRefs(obj);
|
|
4384
|
+
blobsRemaining += blobs.length;
|
|
4385
|
+
bytesRemaining += blobs.reduce((sum, blob) => sum + (blob.size || 0), 0);
|
|
4386
|
+
}
|
|
4387
|
+
}
|
|
4388
|
+
}));
|
|
4389
|
+
return { blobsRemaining, bytesRemaining };
|
|
4390
|
+
})));
|
|
4391
|
+
return combineLatest([blobStats$, downloading$]).pipe(map(([stats, isDownloading]) => ({
|
|
4392
|
+
isDownloading: isDownloading && stats.blobsRemaining > 0,
|
|
4393
|
+
blobsRemaining: stats.blobsRemaining,
|
|
4394
|
+
bytesRemaining: stats.bytesRemaining,
|
|
4395
|
+
})), share({ resetOnRefCountZero: () => timer(2000) }) // Keep alive for 2s after last unsubscription to avoid rapid re-subscriptions during UI updates
|
|
4396
|
+
);
|
|
4397
|
+
}
|
|
4398
|
+
/**
|
|
4399
|
+
* Find all unresolved refs (BlobRef or TSONRef) in an object (recursive).
|
|
4400
|
+
* Handles both live TSONRef instances and serialized TSONRefs (after IndexedDB).
|
|
4401
|
+
*/
|
|
4402
|
+
function findBlobRefs(obj) {
|
|
4403
|
+
const refs = [];
|
|
4404
|
+
function scan(value) {
|
|
4405
|
+
if (value === null || value === undefined)
|
|
4406
|
+
return;
|
|
4407
|
+
if (typeof value !== 'object')
|
|
4408
|
+
return;
|
|
4409
|
+
if (TSONRef.isTSONRef(value)) {
|
|
4410
|
+
refs.push({ ref: value.ref, size: value.size });
|
|
4411
|
+
return;
|
|
4412
|
+
}
|
|
4413
|
+
if (isSerializedTSONRef(value)) {
|
|
4414
|
+
const obj = value;
|
|
4415
|
+
refs.push({ ref: obj.ref, size: obj.size });
|
|
4416
|
+
return;
|
|
4417
|
+
}
|
|
4418
|
+
if (isBlobRef(value)) {
|
|
4419
|
+
refs.push({ ref: value.ref, size: value.size || 0 });
|
|
4420
|
+
return;
|
|
4421
|
+
}
|
|
4422
|
+
if (Array.isArray(value)) {
|
|
4423
|
+
value.forEach(scan);
|
|
4424
|
+
}
|
|
4425
|
+
else if (value.constructor === Object) {
|
|
4426
|
+
Object.values(value).forEach(scan);
|
|
4427
|
+
}
|
|
4428
|
+
}
|
|
4429
|
+
scan(obj);
|
|
4430
|
+
return refs;
|
|
4431
|
+
}
|
|
4432
|
+
|
|
4433
|
+
/**
|
|
4434
|
+
* Eager Blob Downloader
|
|
4435
|
+
*
|
|
4436
|
+
* Downloads unresolved blobs in the background when blobMode='eager'.
|
|
4437
|
+
* Called after sync completes to prefetch blobs for offline access.
|
|
4438
|
+
*
|
|
4439
|
+
* Progress is tracked automatically via liveQuery in blobProgress.ts —
|
|
4440
|
+
* no manual progress reporting needed here.
|
|
4441
|
+
*/
|
|
4442
|
+
/**
|
|
4443
|
+
* Download all unresolved blobs in the background.
|
|
4444
|
+
*
|
|
4445
|
+
* This is called when blobMode='eager' (default) after sync completes.
|
|
4446
|
+
* BlobRef URLs are signed (SAS tokens) so no auth header needed.
|
|
4447
|
+
*
|
|
4448
|
+
* Each blob is saved atomically using Table.update() to avoid race conditions.
|
|
4449
|
+
*/
|
|
4450
|
+
function downloadUnresolvedBlobs(db, downloading$, signal) {
|
|
4451
|
+
return __awaiter(this, void 0, void 0, function* () {
|
|
4452
|
+
var _a;
|
|
4453
|
+
const debugLog = (msg) => console.debug(`[dexie-cloud] ${msg}`);
|
|
4454
|
+
debugLog('Eager download: Starting...');
|
|
4455
|
+
// Scan for unresolved blobs
|
|
4456
|
+
const syncedTables = getSyncableTables(db);
|
|
4457
|
+
let hasWork = false;
|
|
4458
|
+
for (const table of syncedTables) {
|
|
4459
|
+
try {
|
|
4460
|
+
const hasIndex = !!table.schema.idxByName['_hasBlobRefs'];
|
|
4461
|
+
if (!hasIndex)
|
|
4462
|
+
continue;
|
|
4463
|
+
const count = yield table.where('_hasBlobRefs').equals(1).count();
|
|
4464
|
+
if (count > 0) {
|
|
4465
|
+
hasWork = true;
|
|
4466
|
+
break;
|
|
4467
|
+
}
|
|
4468
|
+
}
|
|
4469
|
+
catch (_b) {
|
|
4470
|
+
// skip
|
|
4471
|
+
}
|
|
4472
|
+
}
|
|
4473
|
+
if (!hasWork) {
|
|
4474
|
+
debugLog('Eager download: No blobs remaining, exiting');
|
|
4475
|
+
return;
|
|
4476
|
+
}
|
|
4477
|
+
setDownloadingState(downloading$, true);
|
|
4478
|
+
try {
|
|
4479
|
+
debugLog(`Eager download: Found ${syncedTables.length} syncable tables: ${syncedTables.map(t => t.name).join(', ')}`);
|
|
4480
|
+
for (const table of syncedTables) {
|
|
4481
|
+
if (signal === null || signal === void 0 ? void 0 : signal.aborted)
|
|
4482
|
+
;
|
|
4483
|
+
try {
|
|
4484
|
+
// Check if table has _hasBlobRefs index
|
|
4485
|
+
const hasIndex = table.schema.indexes.some(idx => idx.name === '_hasBlobRefs');
|
|
4486
|
+
if (!hasIndex)
|
|
4487
|
+
continue;
|
|
4488
|
+
// Query objects with _hasBlobRefs marker
|
|
4489
|
+
const unresolvedObjects = yield table
|
|
4490
|
+
.where('_hasBlobRefs')
|
|
4491
|
+
.equals(1)
|
|
4492
|
+
.toArray();
|
|
4493
|
+
debugLog(`Eager download: Table ${table.name} has ${unresolvedObjects.length} unresolved objects`);
|
|
4494
|
+
const databaseUrl = (_a = db.cloud.options) === null || _a === void 0 ? void 0 : _a.databaseUrl;
|
|
4495
|
+
if (!databaseUrl)
|
|
4496
|
+
throw new Error('Database URL is required to download blobs');
|
|
4497
|
+
// Download up to MAX_CONCURRENT blobs in parallel
|
|
4498
|
+
const MAX_CONCURRENT = 6;
|
|
4499
|
+
const primaryKey = table.schema.primKey;
|
|
4500
|
+
// Filter to actionable objects first
|
|
4501
|
+
const pending = unresolvedObjects.filter(obj => {
|
|
4502
|
+
if (!hasUnresolvedBlobRefs(obj))
|
|
4503
|
+
return false;
|
|
4504
|
+
const key = primaryKey.keyPath
|
|
4505
|
+
? Dexie.getByKeyPath(obj, primaryKey.keyPath)
|
|
4506
|
+
: undefined;
|
|
4507
|
+
return key !== undefined;
|
|
4508
|
+
});
|
|
4509
|
+
// Process in parallel with concurrency limit
|
|
4510
|
+
let i = 0;
|
|
4511
|
+
const runNext = () => __awaiter(this, void 0, void 0, function* () {
|
|
4512
|
+
while (i < pending.length) {
|
|
4513
|
+
if (signal === null || signal === void 0 ? void 0 : signal.aborted)
|
|
4514
|
+
;
|
|
4515
|
+
const obj = pending[i++];
|
|
4516
|
+
const key = Dexie.getByKeyPath(obj, primaryKey.keyPath);
|
|
4517
|
+
try {
|
|
4518
|
+
// Refresh token per object — cheap (returns cached) but ensures
|
|
4519
|
+
// we pick up renewed tokens during long download sessions.
|
|
4520
|
+
const resolvedBlobs = [];
|
|
4521
|
+
yield resolveAllBlobRefs(obj, databaseUrl, resolvedBlobs, '', new WeakMap(), db.blobDownloadTracker);
|
|
4522
|
+
const updateSpec = {
|
|
4523
|
+
_hasBlobRefs: undefined,
|
|
4524
|
+
};
|
|
4525
|
+
for (const blob of resolvedBlobs) {
|
|
4526
|
+
updateSpec[blob.keyPath] = blob.data;
|
|
4527
|
+
}
|
|
4528
|
+
debugLog(`Eager download: Updating ${table.name}:${key} with ${resolvedBlobs.length} blobs`);
|
|
4529
|
+
yield table.update(key, updateSpec);
|
|
4530
|
+
// liveQuery in blobProgress.ts auto-detects this change
|
|
4531
|
+
}
|
|
4532
|
+
catch (err) {
|
|
4533
|
+
console.error(`Failed to download blobs for ${table.name}:${key}:`, err);
|
|
4534
|
+
}
|
|
4535
|
+
}
|
|
4536
|
+
});
|
|
4537
|
+
// Launch up to MAX_CONCURRENT workers
|
|
4538
|
+
const workers = [];
|
|
4539
|
+
for (let w = 0; w < Math.min(MAX_CONCURRENT, pending.length); w++) {
|
|
4540
|
+
workers.push(runNext());
|
|
4541
|
+
}
|
|
4542
|
+
yield Promise.all(workers);
|
|
4543
|
+
}
|
|
4544
|
+
catch (err) {
|
|
4545
|
+
// Table might not have _hasBlobRefs index or other issues - skip silently
|
|
4546
|
+
}
|
|
4547
|
+
}
|
|
4548
|
+
}
|
|
4549
|
+
finally {
|
|
4550
|
+
setDownloadingState(downloading$, false);
|
|
4551
|
+
}
|
|
4552
|
+
});
|
|
4553
|
+
}
|
|
4554
|
+
|
|
3557
4555
|
// Emulate true-private property db. Why? So it's not stored in DB.
|
|
3558
4556
|
const wm$1 = new WeakMap();
|
|
3559
4557
|
class AuthPersistedContext {
|
|
@@ -4512,7 +5510,7 @@ function createMutationTrackingMiddleware({ currentUserObservable, db, }) {
|
|
|
4512
5510
|
}
|
|
4513
5511
|
if (mode === 'readwrite') {
|
|
4514
5512
|
// Give each transaction a globally unique id.
|
|
4515
|
-
tx.txid = randomString
|
|
5513
|
+
tx.txid = randomString(16);
|
|
4516
5514
|
tx.opCount = 0;
|
|
4517
5515
|
// Introduce the concept of current user that lasts through the entire transaction.
|
|
4518
5516
|
// This is important because the tracked mutations must be connected to the user.
|
|
@@ -4810,6 +5808,318 @@ function createMutationTrackingMiddleware({ currentUserObservable, db, }) {
|
|
|
4810
5808
|
};
|
|
4811
5809
|
}
|
|
4812
5810
|
|
|
5811
|
+
/**
|
|
5812
|
+
* BlobSavingQueue - Queues resolved blobs for saving back to IndexedDB
|
|
5813
|
+
*
|
|
5814
|
+
* Uses setTimeout(fn, 0) instead of queueMicrotask to completely isolate
|
|
5815
|
+
* from Dexie's Promise.PSD context. This prevents the save operation
|
|
5816
|
+
* from inheriting any ongoing transaction.
|
|
5817
|
+
*
|
|
5818
|
+
* Each blob is saved atomically using downCore transaction with the specific
|
|
5819
|
+
* keyPath to avoid race conditions with other property changes.
|
|
5820
|
+
*/
|
|
5821
|
+
class BlobSavingQueue {
|
|
5822
|
+
constructor(db) {
|
|
5823
|
+
this.queue = [];
|
|
5824
|
+
this.isProcessing = false;
|
|
5825
|
+
this.db = db;
|
|
5826
|
+
}
|
|
5827
|
+
/**
|
|
5828
|
+
* Queue a resolved blob for saving.
|
|
5829
|
+
* Only the specific blob property will be updated atomically.
|
|
5830
|
+
*/
|
|
5831
|
+
saveBlobs(tableName, primaryKey, resolvedBlobs) {
|
|
5832
|
+
this.queue.push({ tableName, primaryKey, resolvedBlobs });
|
|
5833
|
+
this.startConsumer();
|
|
5834
|
+
}
|
|
5835
|
+
/**
|
|
5836
|
+
* Start the consumer if not already processing.
|
|
5837
|
+
* Uses setTimeout(fn, 0) to completely break out of any
|
|
5838
|
+
* Dexie transaction context (Promise.PSD).
|
|
5839
|
+
*/
|
|
5840
|
+
startConsumer() {
|
|
5841
|
+
if (this.isProcessing)
|
|
5842
|
+
return;
|
|
5843
|
+
this.isProcessing = true;
|
|
5844
|
+
// Use setTimeout to completely isolate from Dexie's PSD context
|
|
5845
|
+
// queueMicrotask would risk inheriting the current transaction
|
|
5846
|
+
setTimeout(() => {
|
|
5847
|
+
this.processQueue();
|
|
5848
|
+
}, 0);
|
|
5849
|
+
}
|
|
5850
|
+
/**
|
|
5851
|
+
* Process all queued blobs.
|
|
5852
|
+
* Runs in a completely isolated context (no inherited transaction).
|
|
5853
|
+
* Uses atomic updates to avoid race conditions.
|
|
5854
|
+
*/
|
|
5855
|
+
processQueue() {
|
|
5856
|
+
const item = this.queue.shift();
|
|
5857
|
+
if (!item) {
|
|
5858
|
+
this.isProcessing = false;
|
|
5859
|
+
return;
|
|
5860
|
+
}
|
|
5861
|
+
// Atomic update of just the blob property
|
|
5862
|
+
this.db.transaction('rw', item.tableName, (tx) => {
|
|
5863
|
+
const trans = tx.idbtrans;
|
|
5864
|
+
trans.disableChangeTracking = true; // Don't regard this as a change for sync purposes
|
|
5865
|
+
trans.disableAccessControl = true; // Bypass any access control checks since this is an internal operation
|
|
5866
|
+
trans.disableBlobResolve = true; // Custom flag to skip blob resolve middleware during this transaction
|
|
5867
|
+
const updateSpec = {};
|
|
5868
|
+
for (const blob of item.resolvedBlobs) {
|
|
5869
|
+
updateSpec[blob.keyPath] = blob.data;
|
|
5870
|
+
}
|
|
5871
|
+
tx.table(item.tableName).update(item.primaryKey, obj => {
|
|
5872
|
+
// Check that object still has the same unresolved blob refs before applying update (i.e. it hasn't been modified since we read it)
|
|
5873
|
+
for (const blob of item.resolvedBlobs) {
|
|
5874
|
+
// Verify atomicity - none of the blob properties has been modified since we read it. If any of them was modified, skip updating this item to avoid overwriting user changes.
|
|
5875
|
+
const currentValue = Dexie.getByKeyPath(obj, blob.keyPath);
|
|
5876
|
+
if (currentValue === undefined) {
|
|
5877
|
+
// Blob property was removed - skip updating this blob
|
|
5878
|
+
continue;
|
|
5879
|
+
}
|
|
5880
|
+
if (!isBlobRef(currentValue)) {
|
|
5881
|
+
// Blob property was modified to a non-blob-ref value - skip updating this blob
|
|
5882
|
+
continue;
|
|
5883
|
+
}
|
|
5884
|
+
if (currentValue.ref !== blob.ref) {
|
|
5885
|
+
// Blob property was modified - skip updating this blob
|
|
5886
|
+
return; // Stop. Another items has been queued to fully fix the object.
|
|
5887
|
+
}
|
|
5888
|
+
Dexie.setByKeyPath(obj, blob.keyPath, blob.data);
|
|
5889
|
+
}
|
|
5890
|
+
delete obj._hasBlobRefs; // Clear the _hasBlobRefs marker if all refs was resolved.
|
|
5891
|
+
});
|
|
5892
|
+
}).catch((error) => {
|
|
5893
|
+
console.error(`Error saving resolved blobs on ${item.tableName}:${item.primaryKey}:`, error);
|
|
5894
|
+
}).finally(() => {
|
|
5895
|
+
// Process next item in the queue
|
|
5896
|
+
return this.processQueue();
|
|
5897
|
+
});
|
|
5898
|
+
}
|
|
5899
|
+
}
|
|
5900
|
+
|
|
5901
|
+
/**
|
|
5902
|
+
* DBCore Middleware for resolving BlobRefs on read
|
|
5903
|
+
*
|
|
5904
|
+
* This middleware intercepts read operations and resolves any BlobRefs
|
|
5905
|
+
* found in objects marked with _hasBlobRefs.
|
|
5906
|
+
*
|
|
5907
|
+
* Important: Avoids async/await to preserve Dexie's Promise.PSD context.
|
|
5908
|
+
* Uses Dexie.waitFor() only for explicit rw transactions to keep them alive.
|
|
5909
|
+
* For readonly or implicit transactions, resolves directly (no waitFor needed).
|
|
5910
|
+
*
|
|
5911
|
+
* Resolved blobs are queued for saving via BlobSavingQueue, which uses
|
|
5912
|
+
* setTimeout(fn, 0) to completely isolate from Dexie's transaction context.
|
|
5913
|
+
* Each blob is saved atomically using Table.update() with its keyPath to
|
|
5914
|
+
* avoid race conditions with other property changes.
|
|
5915
|
+
*
|
|
5916
|
+
* Blob downloads use Authorization header (same as sync) via the server
|
|
5917
|
+
* proxy endpoint: GET /blob/{ref}
|
|
5918
|
+
*/
|
|
5919
|
+
function createBlobResolveMiddleware(db) {
|
|
5920
|
+
return {
|
|
5921
|
+
stack: 'dbcore',
|
|
5922
|
+
name: 'blobResolve',
|
|
5923
|
+
level: -2, // Run below other middlewares and after sync and caching middlewares
|
|
5924
|
+
create(downlevelDatabase) {
|
|
5925
|
+
// Create a single queue instance for this database
|
|
5926
|
+
const blobSavingQueue = new BlobSavingQueue(db);
|
|
5927
|
+
return Object.assign(Object.assign({}, downlevelDatabase), { table(tableName) {
|
|
5928
|
+
var _a;
|
|
5929
|
+
if (!db.cloud) {
|
|
5930
|
+
// db.cloud not yet initialized - skip blob resolution
|
|
5931
|
+
// Fall through to downlevel table to avoid crash
|
|
5932
|
+
return downlevelDatabase.table(tableName);
|
|
5933
|
+
}
|
|
5934
|
+
const dbUrl = (_a = db.cloud.options) === null || _a === void 0 ? void 0 : _a.databaseUrl;
|
|
5935
|
+
const downlevelTable = downlevelDatabase.table(tableName);
|
|
5936
|
+
// Skip internal tables
|
|
5937
|
+
if (tableName.startsWith('$')) {
|
|
5938
|
+
return downlevelTable;
|
|
5939
|
+
}
|
|
5940
|
+
return Object.assign(Object.assign({}, downlevelTable), { get(req) {
|
|
5941
|
+
var _a;
|
|
5942
|
+
if ((_a = req.trans) === null || _a === void 0 ? void 0 : _a.disableBlobResolve) {
|
|
5943
|
+
return downlevelTable.get(req);
|
|
5944
|
+
}
|
|
5945
|
+
return downlevelTable.get(req).then(result => {
|
|
5946
|
+
if (result && hasUnresolvedBlobRefs(result)) {
|
|
5947
|
+
return resolveAndSave(downlevelTable, req.trans, req.key, result, blobSavingQueue, db);
|
|
5948
|
+
}
|
|
5949
|
+
return result;
|
|
5950
|
+
});
|
|
5951
|
+
},
|
|
5952
|
+
getMany(req) {
|
|
5953
|
+
var _a;
|
|
5954
|
+
if ((_a = req.trans) === null || _a === void 0 ? void 0 : _a.disableBlobResolve) {
|
|
5955
|
+
return downlevelTable.getMany(req);
|
|
5956
|
+
}
|
|
5957
|
+
return downlevelTable.getMany(req).then(results => {
|
|
5958
|
+
// Check if any results need resolution
|
|
5959
|
+
const needsResolution = results.some(r => r && hasUnresolvedBlobRefs(r));
|
|
5960
|
+
if (!needsResolution)
|
|
5961
|
+
return results;
|
|
5962
|
+
return Dexie.Promise.all(results.map((result, index) => {
|
|
5963
|
+
if (result && hasUnresolvedBlobRefs(result)) {
|
|
5964
|
+
return resolveAndSave(downlevelTable, req.trans, req.keys[index], result, blobSavingQueue, db);
|
|
5965
|
+
}
|
|
5966
|
+
return result;
|
|
5967
|
+
}));
|
|
5968
|
+
});
|
|
5969
|
+
},
|
|
5970
|
+
query(req) {
|
|
5971
|
+
var _a;
|
|
5972
|
+
if ((_a = req.trans) === null || _a === void 0 ? void 0 : _a.disableBlobResolve) {
|
|
5973
|
+
return downlevelTable.query(req);
|
|
5974
|
+
}
|
|
5975
|
+
return downlevelTable.query(req).then(result => {
|
|
5976
|
+
if (!result.result || !Array.isArray(result.result))
|
|
5977
|
+
return result;
|
|
5978
|
+
// Check if any results need resolution
|
|
5979
|
+
const needsResolution = result.result.some(r => r && hasUnresolvedBlobRefs(r));
|
|
5980
|
+
if (!needsResolution)
|
|
5981
|
+
return result;
|
|
5982
|
+
return Dexie.Promise.all(result.result.map(item => {
|
|
5983
|
+
if (item && hasUnresolvedBlobRefs(item)) {
|
|
5984
|
+
return resolveAndSave(downlevelTable, req.trans, undefined, item, blobSavingQueue, db);
|
|
5985
|
+
}
|
|
5986
|
+
return item;
|
|
5987
|
+
})).then(resolved => (Object.assign(Object.assign({}, result), { result: resolved })));
|
|
5988
|
+
});
|
|
5989
|
+
},
|
|
5990
|
+
openCursor(req) {
|
|
5991
|
+
var _a;
|
|
5992
|
+
if ((_a = req.trans) === null || _a === void 0 ? void 0 : _a.disableBlobResolve) {
|
|
5993
|
+
return downlevelTable.openCursor(req);
|
|
5994
|
+
}
|
|
5995
|
+
return downlevelTable.openCursor(req).then(cursor => {
|
|
5996
|
+
if (!cursor)
|
|
5997
|
+
return cursor; // No results, so no resolution needed
|
|
5998
|
+
if (!req.values)
|
|
5999
|
+
return cursor; // No values requested, so no resolution needed
|
|
6000
|
+
if (!dbUrl)
|
|
6001
|
+
return cursor; // No database URL configured, can't resolve blobs
|
|
6002
|
+
return createBlobResolvingCursor(cursor, downlevelTable, blobSavingQueue, db);
|
|
6003
|
+
});
|
|
6004
|
+
} });
|
|
6005
|
+
} });
|
|
6006
|
+
},
|
|
6007
|
+
};
|
|
6008
|
+
}
|
|
6009
|
+
/**
|
|
6010
|
+
* Create a cursor wrapper that resolves BlobRefs in values synchronously.
|
|
6011
|
+
*
|
|
6012
|
+
* Uses Object.create() to inherit all cursor methods, only overriding:
|
|
6013
|
+
* - start(): Resolves BlobRefs before calling the callback
|
|
6014
|
+
* - value: Getter that returns the resolved value
|
|
6015
|
+
*
|
|
6016
|
+
* Returns the cursor synchronously. Resolution happens in start() before
|
|
6017
|
+
* each onNext callback, ensuring cursor.value is always available.
|
|
6018
|
+
*/
|
|
6019
|
+
function createBlobResolvingCursor(cursor, table, blobSavingQueue, db) {
|
|
6020
|
+
// Create wrapped cursor using Object.create() - inherits everything
|
|
6021
|
+
const wrappedCursor = Object.create(cursor, {
|
|
6022
|
+
value: {
|
|
6023
|
+
value: cursor.value,
|
|
6024
|
+
enumerable: true,
|
|
6025
|
+
writable: true
|
|
6026
|
+
},
|
|
6027
|
+
start: {
|
|
6028
|
+
value(onNext) {
|
|
6029
|
+
// Override start to resolve BlobRefs before each callback
|
|
6030
|
+
return cursor.start(() => {
|
|
6031
|
+
const rawValue = cursor.value;
|
|
6032
|
+
if (!rawValue || !hasUnresolvedBlobRefs(rawValue)) {
|
|
6033
|
+
onNext();
|
|
6034
|
+
return;
|
|
6035
|
+
}
|
|
6036
|
+
resolveAndSave(table, cursor.trans, cursor.primaryKey, rawValue, blobSavingQueue, db, true).then(resolved => {
|
|
6037
|
+
wrappedCursor.value = resolved;
|
|
6038
|
+
onNext();
|
|
6039
|
+
}, err => {
|
|
6040
|
+
console.error('Failed to resolve BlobRefs for cursor value:', err);
|
|
6041
|
+
onNext();
|
|
6042
|
+
});
|
|
6043
|
+
});
|
|
6044
|
+
}
|
|
6045
|
+
}
|
|
6046
|
+
});
|
|
6047
|
+
return wrappedCursor;
|
|
6048
|
+
}
|
|
6049
|
+
/**
|
|
6050
|
+
* Resolve BlobRefs in an object and queue each blob for atomic saving.
|
|
6051
|
+
*
|
|
6052
|
+
* Uses Dexie.waitFor() only when needed:
|
|
6053
|
+
* - Skip waitFor for readonly ('r') transactions
|
|
6054
|
+
* - Skip waitFor for implicit transactions (most common in liveQuery)
|
|
6055
|
+
* - Use waitFor only for explicit rw transactions that need to stay alive
|
|
6056
|
+
*
|
|
6057
|
+
* Each resolved blob is queued individually with its keyPath for atomic
|
|
6058
|
+
* update using downCore transaction with the specific keyPath - this avoids race conditions.
|
|
6059
|
+
*
|
|
6060
|
+
* Returns Dexie.Promise to preserve PSD context.
|
|
6061
|
+
*/
|
|
6062
|
+
function resolveAndSave(table, trans, pKey, // optional. If missing, tries to extract from object using primary key path
|
|
6063
|
+
obj, blobSavingQueue, db, isCursorValue = false // Flag to indicate if we're resolving a cursor value (which may not have a primary key)
|
|
6064
|
+
) {
|
|
6065
|
+
var _a;
|
|
6066
|
+
try {
|
|
6067
|
+
// Determine if we need waitFor:
|
|
6068
|
+
// Skip waitFor ONLY if BOTH conditions are met:
|
|
6069
|
+
// 1. readonly transaction
|
|
6070
|
+
// 2. implicit (non-explicit) transaction
|
|
6071
|
+
//
|
|
6072
|
+
// Transaction.explicit is true when user called db.transaction() explicitly.
|
|
6073
|
+
// For implicit transactions (auto-created for single operations),
|
|
6074
|
+
// Dexie handles async automatically so no waitFor needed.
|
|
6075
|
+
const currentTx = Dexie.currentTransaction;
|
|
6076
|
+
const isReadonly = (currentTx === null || currentTx === void 0 ? void 0 : currentTx.mode) === 'readonly';
|
|
6077
|
+
const isExplicit = (currentTx === null || currentTx === void 0 ? void 0 : currentTx.explicit) === true;
|
|
6078
|
+
// Skip waitFor only for implicit readonly (most common case: liveQuery)
|
|
6079
|
+
const skipWaitFor = isReadonly && !isExplicit && !isCursorValue;
|
|
6080
|
+
const needsWaitFor = currentTx && !skipWaitFor;
|
|
6081
|
+
const dbUrl = ((_a = db.cloud.options) === null || _a === void 0 ? void 0 : _a.databaseUrl) || '';
|
|
6082
|
+
// Collect resolved blobs with their keyPaths
|
|
6083
|
+
const resolvedBlobs = [];
|
|
6084
|
+
// Create the resolution promise with auth info
|
|
6085
|
+
const resolutionPromise = resolveAllBlobRefs(obj, dbUrl, resolvedBlobs, '', new WeakMap(), db.blobDownloadTracker);
|
|
6086
|
+
// Wrap with waitFor to keep transaction alive during fetch
|
|
6087
|
+
const resolvePromise = needsWaitFor
|
|
6088
|
+
? Dexie.waitFor(resolutionPromise)
|
|
6089
|
+
: Dexie.Promise.resolve(resolutionPromise);
|
|
6090
|
+
return resolvePromise.then(resolved => {
|
|
6091
|
+
// Get primary key from the object
|
|
6092
|
+
const primaryKey = table.schema.primaryKey;
|
|
6093
|
+
const key = pKey !== undefined ? pKey : primaryKey.keyPath
|
|
6094
|
+
? Dexie.getByKeyPath(obj, primaryKey.keyPath)
|
|
6095
|
+
: undefined;
|
|
6096
|
+
if (key !== undefined) {
|
|
6097
|
+
// Queue each resolved blob individually for atomic update
|
|
6098
|
+
// This uses setTimeout(fn, 0) to completely isolate from
|
|
6099
|
+
// Dexie's transaction context (avoids inheriting PSD)
|
|
6100
|
+
if (isReadonly) {
|
|
6101
|
+
blobSavingQueue.saveBlobs(table.name, key, resolvedBlobs);
|
|
6102
|
+
}
|
|
6103
|
+
else {
|
|
6104
|
+
// For rw transactions, we can save directly without queueing
|
|
6105
|
+
// since we're still in the same transaction context
|
|
6106
|
+
table.mutate({ type: 'put', keys: [key], values: [resolved], trans }).catch(err => {
|
|
6107
|
+
console.error(`Failed to save resolved blob on ${table.name}:${key}:`, err);
|
|
6108
|
+
});
|
|
6109
|
+
}
|
|
6110
|
+
}
|
|
6111
|
+
return resolved;
|
|
6112
|
+
}).catch(err => {
|
|
6113
|
+
console.error(`[dexie-cloud:blobResolve] Failed to resolve BlobRefs on ${table.name}:`, err);
|
|
6114
|
+
return obj; // Return original object on error - never block the read pipeline
|
|
6115
|
+
});
|
|
6116
|
+
}
|
|
6117
|
+
catch (err) {
|
|
6118
|
+
console.error(`[dexie-cloud:blobResolve] Sync error in resolveAndSave on ${table.name}:`, err);
|
|
6119
|
+
return Dexie.Promise.resolve(obj); // Never block reads
|
|
6120
|
+
}
|
|
6121
|
+
}
|
|
6122
|
+
|
|
4813
6123
|
function overrideParseStoresSpec(origFunc, dexie) {
|
|
4814
6124
|
return function (stores, dbSchema) {
|
|
4815
6125
|
var _a;
|
|
@@ -4862,6 +6172,11 @@ function overrideParseStoresSpec(origFunc, dexie) {
|
|
|
4862
6172
|
if (!/^\$/.test(tableName)) {
|
|
4863
6173
|
storesClone[`$${tableName}_mutations`] = '++rev';
|
|
4864
6174
|
cloudTableSchema.markedForSync = true;
|
|
6175
|
+
// Add sparse index for _hasBlobRefs (for BlobRef resolution tracking)
|
|
6176
|
+
// IndexedDB sparse indexes have zero overhead when the property doesn't exist
|
|
6177
|
+
if (!storesClone[tableName].includes('_hasBlobRefs')) {
|
|
6178
|
+
storesClone[tableName] += ',_hasBlobRefs';
|
|
6179
|
+
}
|
|
4865
6180
|
}
|
|
4866
6181
|
if (cloudTableSchema.deleted) {
|
|
4867
6182
|
cloudTableSchema.deleted = false;
|
|
@@ -5501,7 +6816,6 @@ function syncIfPossible(db, cloudOptions, cloudSchema, options) {
|
|
|
5501
6816
|
yield checkSyncRateLimitDelay(db);
|
|
5502
6817
|
yield performGuardedJob(db, CURRENT_SYNC_WORKER, () => sync(db, cloudOptions, cloudSchema, options));
|
|
5503
6818
|
ongoingSyncs.delete(db);
|
|
5504
|
-
console.debug('Done sync');
|
|
5505
6819
|
}
|
|
5506
6820
|
catch (error) {
|
|
5507
6821
|
ongoingSyncs.delete(db);
|
|
@@ -5516,8 +6830,6 @@ function syncIfPossible(db, cloudOptions, cloudSchema, options) {
|
|
|
5516
6830
|
}
|
|
5517
6831
|
}
|
|
5518
6832
|
|
|
5519
|
-
const SECONDS = 1000;
|
|
5520
|
-
|
|
5521
6833
|
function LocalSyncWorker(db, cloudOptions, cloudSchema) {
|
|
5522
6834
|
let localSyncEventSubscription = null;
|
|
5523
6835
|
let cancelToken = { cancelled: false };
|
|
@@ -5854,6 +7166,38 @@ const Styles = {
|
|
|
5854
7166
|
color: "#374151",
|
|
5855
7167
|
transition: "all 0.2s ease",
|
|
5856
7168
|
gap: "12px"
|
|
7169
|
+
},
|
|
7170
|
+
// Copy button for alerts with copyText
|
|
7171
|
+
CopyButton: {
|
|
7172
|
+
display: "inline-flex",
|
|
7173
|
+
alignItems: "center",
|
|
7174
|
+
gap: "4px",
|
|
7175
|
+
padding: "4px 10px",
|
|
7176
|
+
marginTop: "8px",
|
|
7177
|
+
border: "1px solid #d1d5db",
|
|
7178
|
+
borderRadius: "4px",
|
|
7179
|
+
backgroundColor: "#f9fafb",
|
|
7180
|
+
cursor: "pointer",
|
|
7181
|
+
fontSize: "12px",
|
|
7182
|
+
fontWeight: "500",
|
|
7183
|
+
color: "#374151",
|
|
7184
|
+
transition: "all 0.15s ease",
|
|
7185
|
+
fontFamily: "monospace"
|
|
7186
|
+
},
|
|
7187
|
+
CopyButtonCopied: {
|
|
7188
|
+
display: "inline-flex",
|
|
7189
|
+
alignItems: "center",
|
|
7190
|
+
gap: "4px",
|
|
7191
|
+
padding: "4px 10px",
|
|
7192
|
+
marginTop: "8px",
|
|
7193
|
+
border: "1px solid #22c55e",
|
|
7194
|
+
borderRadius: "4px",
|
|
7195
|
+
backgroundColor: "#f0fdf4",
|
|
7196
|
+
cursor: "default",
|
|
7197
|
+
fontSize: "12px",
|
|
7198
|
+
fontWeight: "500",
|
|
7199
|
+
color: "#16a34a",
|
|
7200
|
+
fontFamily: "monospace"
|
|
5857
7201
|
}};
|
|
5858
7202
|
|
|
5859
7203
|
function Dialog({ children, className }) {
|
|
@@ -5964,7 +7308,9 @@ function LoginDialog({ title, alerts, fields, options, submitLabel, cancelLabel,
|
|
|
5964
7308
|
return (_$1(Dialog, { className: "dxc-login-dlg" },
|
|
5965
7309
|
_$1(k$1, null,
|
|
5966
7310
|
_$1("h3", { style: Styles.WindowHeader }, title),
|
|
5967
|
-
alerts.map((alert, idx) => (_$1("
|
|
7311
|
+
alerts.map((alert, idx) => (_$1("div", { key: idx },
|
|
7312
|
+
_$1("p", { style: Styles.Alert[alert.type] }, resolveText(alert)),
|
|
7313
|
+
alert.copyText && _$1(CopyButton, { text: alert.copyText })))),
|
|
5968
7314
|
hasOptions && (_$1("div", { class: "dxc-options" }, hasMultipleGroups ? (
|
|
5969
7315
|
// Render with dividers between groups
|
|
5970
7316
|
Array.from(optionGroups.entries()).map(([groupName, groupOptions], groupIdx) => (_$1(k$1, { key: groupName },
|
|
@@ -6003,6 +7349,50 @@ function valueTransformer(type, value) {
|
|
|
6003
7349
|
return value;
|
|
6004
7350
|
}
|
|
6005
7351
|
}
|
|
7352
|
+
function CopyButton({ text }) {
|
|
7353
|
+
const [copied, setCopied] = d(false);
|
|
7354
|
+
const timeoutRef = A(null);
|
|
7355
|
+
// Cleanup timeout on unmount
|
|
7356
|
+
_(() => {
|
|
7357
|
+
return () => {
|
|
7358
|
+
if (timeoutRef.current !== null)
|
|
7359
|
+
clearTimeout(timeoutRef.current);
|
|
7360
|
+
};
|
|
7361
|
+
}, []);
|
|
7362
|
+
const scheduleCopiedReset = () => {
|
|
7363
|
+
if (timeoutRef.current !== null)
|
|
7364
|
+
clearTimeout(timeoutRef.current);
|
|
7365
|
+
setCopied(true);
|
|
7366
|
+
timeoutRef.current = setTimeout(() => {
|
|
7367
|
+
timeoutRef.current = null;
|
|
7368
|
+
setCopied(false);
|
|
7369
|
+
}, 2000);
|
|
7370
|
+
};
|
|
7371
|
+
const handleClick = () => {
|
|
7372
|
+
var _a;
|
|
7373
|
+
if (typeof navigator !== 'undefined' && ((_a = navigator.clipboard) === null || _a === void 0 ? void 0 : _a.writeText)) {
|
|
7374
|
+
navigator.clipboard.writeText(text).then(scheduleCopiedReset).catch(() => {
|
|
7375
|
+
fallbackCopy(text, scheduleCopiedReset);
|
|
7376
|
+
});
|
|
7377
|
+
}
|
|
7378
|
+
else {
|
|
7379
|
+
fallbackCopy(text, scheduleCopiedReset);
|
|
7380
|
+
}
|
|
7381
|
+
};
|
|
7382
|
+
return (_$1("button", { type: "button", style: copied ? Styles.CopyButtonCopied : Styles.CopyButton, onClick: handleClick, title: "Copy to clipboard" }, copied ? '✓ Copied!' : `📋 ${text}`));
|
|
7383
|
+
}
|
|
7384
|
+
function fallbackCopy(text, onSuccess) {
|
|
7385
|
+
const textarea = document.createElement('textarea');
|
|
7386
|
+
textarea.value = text;
|
|
7387
|
+
textarea.style.position = 'fixed';
|
|
7388
|
+
textarea.style.opacity = '0';
|
|
7389
|
+
document.body.appendChild(textarea);
|
|
7390
|
+
textarea.select();
|
|
7391
|
+
const success = document.execCommand('copy');
|
|
7392
|
+
document.body.removeChild(textarea);
|
|
7393
|
+
if (success)
|
|
7394
|
+
onSuccess();
|
|
7395
|
+
}
|
|
6006
7396
|
|
|
6007
7397
|
class LoginGui extends x {
|
|
6008
7398
|
constructor(props) {
|
|
@@ -6147,7 +7537,7 @@ function computeSyncState(db) {
|
|
|
6147
7537
|
|
|
6148
7538
|
function createSharedValueObservable(o, defaultValue) {
|
|
6149
7539
|
let currentValue = defaultValue;
|
|
6150
|
-
let shared = from(o).pipe(map$1((x) => (currentValue = x)), share({ resetOnRefCountZero: () => timer(1000) }));
|
|
7540
|
+
let shared = from(o).pipe(map$1((x) => (currentValue = x)), share$1({ resetOnRefCountZero: () => timer(1000) }));
|
|
6151
7541
|
const rv = new Observable((observer) => {
|
|
6152
7542
|
let didEmit = false;
|
|
6153
7543
|
const subscription = shared.subscribe({
|
|
@@ -6701,9 +8091,10 @@ function dexieCloud(dexie) {
|
|
|
6701
8091
|
currentUserEmitter.next(UNAUTHORIZED_USER);
|
|
6702
8092
|
});
|
|
6703
8093
|
const syncComplete = new Subject();
|
|
8094
|
+
const downloading$ = createDownloadingState();
|
|
6704
8095
|
dexie.cloud = {
|
|
6705
8096
|
// @ts-ignore
|
|
6706
|
-
version: "4.
|
|
8097
|
+
version: "4.4.1",
|
|
6707
8098
|
options: Object.assign({}, DEFAULT_OPTIONS),
|
|
6708
8099
|
schema: null,
|
|
6709
8100
|
get currentUserId() {
|
|
@@ -6718,6 +8109,7 @@ function dexieCloud(dexie) {
|
|
|
6718
8109
|
syncComplete,
|
|
6719
8110
|
},
|
|
6720
8111
|
persistedSyncState: new BehaviorSubject(undefined),
|
|
8112
|
+
blobProgress: observeBlobProgress(DexieCloudDB(dexie), downloading$),
|
|
6721
8113
|
userInteraction: new BehaviorSubject(undefined),
|
|
6722
8114
|
webSocketStatus: new BehaviorSubject('not-started'),
|
|
6723
8115
|
login(hint) {
|
|
@@ -6730,6 +8122,16 @@ function dexieCloud(dexie) {
|
|
|
6730
8122
|
invites: getInvitesObservable(dexie),
|
|
6731
8123
|
roles: getGlobalRolesObservable(dexie),
|
|
6732
8124
|
configure(options) {
|
|
8125
|
+
// Validate maxStringLength — Infinity disables offloading, otherwise must be
|
|
8126
|
+
// a finite positive number not exceeding the server limit (32768).
|
|
8127
|
+
const MAX_SERVER_STRING_LENGTH = 32768;
|
|
8128
|
+
if (options.maxStringLength !== undefined &&
|
|
8129
|
+
options.maxStringLength !== Infinity &&
|
|
8130
|
+
(!Number.isFinite(options.maxStringLength) ||
|
|
8131
|
+
options.maxStringLength < 0 ||
|
|
8132
|
+
options.maxStringLength > MAX_SERVER_STRING_LENGTH)) {
|
|
8133
|
+
throw new Error(`maxStringLength must be Infinity or a finite number in [0, ${MAX_SERVER_STRING_LENGTH}]. Got: ${options.maxStringLength}`);
|
|
8134
|
+
}
|
|
6733
8135
|
options = dexie.cloud.options = Object.assign(Object.assign({}, dexie.cloud.options), options);
|
|
6734
8136
|
configuredProgramatically = true;
|
|
6735
8137
|
if (options.databaseUrl && options.nameSuffix) {
|
|
@@ -6831,6 +8233,7 @@ function dexieCloud(dexie) {
|
|
|
6831
8233
|
var _a, _b;
|
|
6832
8234
|
return ((_b = (_a = this.db.cloud.schema) === null || _a === void 0 ? void 0 : _a[this.name]) === null || _b === void 0 ? void 0 : _b.idPrefix) || '';
|
|
6833
8235
|
};
|
|
8236
|
+
dexie.use(createBlobResolveMiddleware(DexieCloudDB(dexie)));
|
|
6834
8237
|
dexie.use(createMutationTrackingMiddleware({
|
|
6835
8238
|
currentUserObservable: dexie.cloud.currentUser,
|
|
6836
8239
|
db: DexieCloudDB(dexie),
|
|
@@ -6839,7 +8242,7 @@ function dexieCloud(dexie) {
|
|
|
6839
8242
|
dexie.use(createIdGenerationMiddleware(DexieCloudDB(dexie)));
|
|
6840
8243
|
function onDbReady(dexie) {
|
|
6841
8244
|
return __awaiter(this, void 0, void 0, function* () {
|
|
6842
|
-
var _a, _b, _c, _d, _e, _f, _g;
|
|
8245
|
+
var _a, _b, _c, _d, _e, _f, _g, _h, _j;
|
|
6843
8246
|
closed = false; // As Dexie calls us, we are not closed anymore. Maybe reopened? Remember db.ready event is registered with sticky flag!
|
|
6844
8247
|
const db = DexieCloudDB(dexie);
|
|
6845
8248
|
// Setup default GUI:
|
|
@@ -6853,6 +8256,25 @@ function dexieCloud(dexie) {
|
|
|
6853
8256
|
}
|
|
6854
8257
|
// Forward db.syncCompleteEvent to be publicly consumable via db.cloud.events.syncComplete:
|
|
6855
8258
|
subscriptions.push(db.syncCompleteEvent.subscribe(syncComplete));
|
|
8259
|
+
// Eager blob download: When blobMode='eager' (default), download unresolved blobs after sync
|
|
8260
|
+
const blobMode = (_c = (_b = db.cloud.options) === null || _b === void 0 ? void 0 : _b.blobMode) !== null && _c !== void 0 ? _c : 'eager';
|
|
8261
|
+
if (blobMode === 'eager') {
|
|
8262
|
+
let eagerBlobDownloadInFlight = null;
|
|
8263
|
+
const downloadBlobs = () => {
|
|
8264
|
+
if (eagerBlobDownloadInFlight)
|
|
8265
|
+
return;
|
|
8266
|
+
eagerBlobDownloadInFlight = Dexie.ignoreTransaction(() => downloadUnresolvedBlobs(db, downloading$))
|
|
8267
|
+
.catch(err => {
|
|
8268
|
+
console.error('[dexie-cloud] Eager blob download failed:', err);
|
|
8269
|
+
})
|
|
8270
|
+
.finally(() => {
|
|
8271
|
+
eagerBlobDownloadInFlight = null;
|
|
8272
|
+
});
|
|
8273
|
+
};
|
|
8274
|
+
setTimeout(downloadBlobs, 0); // Don't block ready event. Start downloading blobs in the background right after.
|
|
8275
|
+
// And also after every sync completes:
|
|
8276
|
+
subscriptions.push(db.syncCompleteEvent.subscribe(downloadBlobs));
|
|
8277
|
+
}
|
|
6856
8278
|
//verifyConfig(db.cloud.options); Not needed (yet at least!)
|
|
6857
8279
|
// Verify the user has allowed version increment.
|
|
6858
8280
|
if (!db.tables.every((table) => table.core)) {
|
|
@@ -7008,7 +8430,7 @@ function dexieCloud(dexie) {
|
|
|
7008
8430
|
// Continue with normal flow - user can try again
|
|
7009
8431
|
}
|
|
7010
8432
|
}
|
|
7011
|
-
const requireAuth = (
|
|
8433
|
+
const requireAuth = (_d = db.cloud.options) === null || _d === void 0 ? void 0 : _d.requireAuth;
|
|
7012
8434
|
if (requireAuth) {
|
|
7013
8435
|
if (db.cloud.isServiceWorkerDB) {
|
|
7014
8436
|
// If this is a service worker DB, we can't do authentication here,
|
|
@@ -7045,20 +8467,20 @@ function dexieCloud(dexie) {
|
|
|
7045
8467
|
localSyncWorker.stop();
|
|
7046
8468
|
localSyncWorker = null;
|
|
7047
8469
|
throwIfClosed();
|
|
7048
|
-
const doInitialSync = ((
|
|
8470
|
+
const doInitialSync = ((_e = db.cloud.options) === null || _e === void 0 ? void 0 : _e.databaseUrl) && (!initiallySynced || changedUser);
|
|
7049
8471
|
if (doInitialSync) {
|
|
7050
8472
|
// Do the initial sync directly in the browser thread no matter if we are using service worker or not.
|
|
7051
8473
|
yield performInitialSync(db, db.cloud.options, db.cloud.schema);
|
|
7052
8474
|
db.setInitiallySynced(true);
|
|
7053
8475
|
}
|
|
7054
8476
|
throwIfClosed();
|
|
7055
|
-
if (db.cloud.usingServiceWorker && ((
|
|
8477
|
+
if (db.cloud.usingServiceWorker && ((_f = db.cloud.options) === null || _f === void 0 ? void 0 : _f.databaseUrl)) {
|
|
7056
8478
|
if (!doInitialSync) {
|
|
7057
8479
|
registerSyncEvent(db, 'push').catch(() => { });
|
|
7058
8480
|
}
|
|
7059
8481
|
registerPeriodicSyncEvent(db).catch(() => { });
|
|
7060
8482
|
}
|
|
7061
|
-
else if (((
|
|
8483
|
+
else if (((_g = db.cloud.options) === null || _g === void 0 ? void 0 : _g.databaseUrl) &&
|
|
7062
8484
|
db.cloud.schema &&
|
|
7063
8485
|
!db.cloud.isServiceWorkerDB) {
|
|
7064
8486
|
// There's no SW. Start SyncWorker instead.
|
|
@@ -7087,8 +8509,8 @@ function dexieCloud(dexie) {
|
|
|
7087
8509
|
}));
|
|
7088
8510
|
}
|
|
7089
8511
|
// Connect WebSocket unless we are in a service worker or websocket is disabled.
|
|
7090
|
-
if (((
|
|
7091
|
-
!((
|
|
8512
|
+
if (((_h = db.cloud.options) === null || _h === void 0 ? void 0 : _h.databaseUrl) &&
|
|
8513
|
+
!((_j = db.cloud.options) === null || _j === void 0 ? void 0 : _j.disableWebSocket) &&
|
|
7092
8514
|
!IS_SERVICE_WORKER) {
|
|
7093
8515
|
subscriptions.push(connectWebSocket(db));
|
|
7094
8516
|
}
|
|
@@ -7096,7 +8518,7 @@ function dexieCloud(dexie) {
|
|
|
7096
8518
|
}
|
|
7097
8519
|
}
|
|
7098
8520
|
// @ts-ignore
|
|
7099
|
-
dexieCloud.version = "4.
|
|
8521
|
+
dexieCloud.version = "4.4.1";
|
|
7100
8522
|
Dexie.Cloud = dexieCloud;
|
|
7101
8523
|
|
|
7102
8524
|
// In case the SW lives for a while, let it reuse already opened connections:
|