cry-synced-db-client 0.1.145 → 0.1.147
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +101 -0
- package/dist/index.js +384 -61
- package/dist/src/db/RestProxy.d.ts +7 -3
- package/dist/src/db/SyncedDb.d.ts +73 -7
- package/dist/src/db/sync/SyncEngine.d.ts +7 -2
- package/dist/src/db/types/managers.d.ts +21 -1
- package/dist/src/types/I_RestInterface.d.ts +18 -2
- package/dist/src/types/I_SyncedDb.d.ts +38 -3
- package/package.json +2 -2
package/CHANGELOG.md
CHANGED
|
@@ -2,6 +2,77 @@
|
|
|
2
2
|
|
|
3
3
|
## Unreleased
|
|
4
4
|
|
|
5
|
+
### Auto-eviction co-located with sync — one round-trip total
|
|
6
|
+
|
|
7
|
+
When `evictStaleRecordsEveryHrs > 0` and the interval has elapsed, the
|
|
8
|
+
scope-exit eviction queries now ride along on sync's existing
|
|
9
|
+
`findNewerManyStream` call instead of issuing a separate `findNewerMany`
|
|
10
|
+
afterwards. Saves one server round-trip per "due" sync.
|
|
11
|
+
|
|
12
|
+
- `SyncedDb.sync()` pre-computes the eviction plan (Dexie scan + spec
|
|
13
|
+
build) before invoking `SyncEngine.sync()`, hands the spec list as
|
|
14
|
+
`extras`, and applies the deletes in `finally`. The old post-sync
|
|
15
|
+
`maybeAutoEvict()` call is gone.
|
|
16
|
+
- New `SyncExtras` type on `I_SyncEngine.sync(calledFrom?, extras?)`:
|
|
17
|
+
appends caller-supplied `findNewerMany` specs (each tagged with a
|
|
18
|
+
unique `specId`) to the same streamed call. Chunks for those specs
|
|
19
|
+
are routed via `extras.onChunk(specId, items)` and never enter
|
|
20
|
+
positive-sync conflict resolution.
|
|
21
|
+
- Sync-lifecycle callbacks (`onSyncStart`/`onSyncEnd`/`onServerSyncStart`/
|
|
22
|
+
`onServerSyncEnd`/`onSyncProgress`/`onFindNewerManyCall`/
|
|
23
|
+
`onFindNewerManyResult`) are deliberately scoped to positive sync —
|
|
24
|
+
they do **not** count or report scope-exit specs. Eviction lifecycle
|
|
25
|
+
stays observable via `onEvictionStart` / `onEviction`.
|
|
26
|
+
- Manual `evictOutOfScopeRecordsAll("manual")` keeps the standalone
|
|
27
|
+
`findNewerMany` round-trip behavior unchanged.
|
|
28
|
+
- Internal refactor: `evictOutOfScopeRecordsAll` split into
|
|
29
|
+
`_collectScopeExitPlan` (phase 1), `_applyScopeExitChunkToPlan`
|
|
30
|
+
(server result handling, shared with sync's onChunk dispatch), and
|
|
31
|
+
`_applyScopeExitPlan` (phase 3 — deletes + EvictionInfo + onEviction).
|
|
32
|
+
|
|
33
|
+
### `GetNewerSpec.specId` — disambiguate duplicate-collection specs
|
|
34
|
+
|
|
35
|
+
New optional field on `GetNewerSpec` mirrors the cry-db ≥ 2.4.32 API
|
|
36
|
+
addition: when set, results for that spec are keyed by `specId` in
|
|
37
|
+
`findNewerMany` responses (and emitted as the third `onChunk` arg in
|
|
38
|
+
`findNewerManyStream`) instead of `collection`. Lets a single batched
|
|
39
|
+
request carry multiple specs against the same collection without
|
|
40
|
+
`Record<key, items[]>` collisions.
|
|
41
|
+
|
|
42
|
+
- Backwards compatible — specs without `specId` keep collection-keyed
|
|
43
|
+
behavior.
|
|
44
|
+
- `RestProxy.parseStreamingResponse` learned a new wire frame type
|
|
45
|
+
`0x02`: `[type:1=0x02][nameLen:2][name:N][specIdLen:2][specId:M][dataLen:4][payload]`,
|
|
46
|
+
emitted by rdb2 when any spec sets `specId`. Type `0x01` unchanged
|
|
47
|
+
for back-compat.
|
|
48
|
+
- All mocks (`MockRestInterfaceFake`, `MockRestInterfaceReal`,
|
|
49
|
+
`MockRestInterface`) honor `specId` in both the non-streaming and
|
|
50
|
+
streaming variants.
|
|
51
|
+
|
|
52
|
+
### Eviction batched via `findNewerMany` + `onEvictionStart` lifecycle
|
|
53
|
+
|
|
54
|
+
- `evictOutOfScopeRecordsAll`'s server-assisted pass collapses N
|
|
55
|
+
per-collection round-trips into **one** `findNewerMany` call, with
|
|
56
|
+
per-(collection × CHUNK_SIZE-id-batch) specs disambiguated via
|
|
57
|
+
`specId = "<collection>#<chunkIndex>"`. `findServerSideScopeExits`
|
|
58
|
+
(single-collection helper) similarly bundles all chunks into one call.
|
|
59
|
+
- New `onEvictionStart(info: EvictionStartInfo)` callback fires before
|
|
60
|
+
phase 1 with `{ trigger, collectionCount, collections[] }` so consumers
|
|
61
|
+
see a debug breadcrumb of which collections will be scanned.
|
|
62
|
+
- Extended `EvictionCollectionInfo` with `localEvictedCount`,
|
|
63
|
+
`serverEvictedCount`, `serverCandidateCount` — split by source so
|
|
64
|
+
debug output distinguishes "cached state failed" from "server
|
|
65
|
+
reported out of scope".
|
|
66
|
+
- Extended `EvictionInfo` with `totalLocalEvicted`, `totalServerEvicted`,
|
|
67
|
+
`totalServerCandidates`, `serverRounds`, `serverFailed` — aggregate
|
|
68
|
+
counters for the batched pass.
|
|
69
|
+
|
|
70
|
+
**Type addition (non-breaking for producers, possibly breaking for
|
|
71
|
+
consumers constructing literals):** the new fields are required on
|
|
72
|
+
the public `EvictionCollectionInfo` / `EvictionInfo` types since the
|
|
73
|
+
library always populates them. Downstream code that constructs mock
|
|
74
|
+
literals (e.g. tests) needs the new fields.
|
|
75
|
+
|
|
5
76
|
### Fix: filtered-sync tombstone (scope-exit from other writers)
|
|
6
77
|
|
|
7
78
|
When a collection has `syncConfig.query` (e.g. `{ status: { $ne: "obsolete" } }`)
|
|
@@ -108,6 +179,36 @@ Signature is identical. No other callback changes.
|
|
|
108
179
|
- Noop when offline or on writeOnly collections.
|
|
109
180
|
- Ignored on `find` / `findOne` (use `referToServer` there).
|
|
110
181
|
|
|
182
|
+
## 0.1.146 (2026-04-25)
|
|
183
|
+
|
|
184
|
+
### Fix: INITIAL SYNC re-fetched whole dataset on every reload (cursor race)
|
|
185
|
+
|
|
186
|
+
`syncMetaCache` was populated lazily inside `loadCollectionToInMem` — one
|
|
187
|
+
collection at a time, only after that collection's records finished hydrating
|
|
188
|
+
into in-memory state. When `ConnectionManager.tryGoOnline` fired
|
|
189
|
+
`sync("INITIAL SYNC")` on WS connect (or `setSyncOnlyTheseCollections`
|
|
190
|
+
expanded the allowed set under a fresh login), the sync engine read
|
|
191
|
+
`syncMetaCache.get(collection)?.lastSyncTs` for each collection — and for
|
|
192
|
+
collections whose hydration hadn't completed yet, it found nothing and fell
|
|
193
|
+
back to `timestamp: 0` (`SyncEngine.ts:94`). The server then returned every
|
|
194
|
+
matching row since epoch, not a delta.
|
|
195
|
+
|
|
196
|
+
In one observed reproducer with 62 sync'd collections, 58 of them arrived at
|
|
197
|
+
the server with `timestamp: 0` and the server replied with 60 131 rows on a
|
|
198
|
+
session that already had 58 745 rows cached locally — i.e. ~full re-fetch
|
|
199
|
+
every reload, regardless of how recently the client had synced.
|
|
200
|
+
|
|
201
|
+
`init()` now eagerly preloads sync cursors for every registered collection
|
|
202
|
+
into `syncMetaCache` before `connectionManager.startTimers()` runs, via a
|
|
203
|
+
parallel fan-out of `getSyncMeta` reads. Cursor cache availability is
|
|
204
|
+
decoupled from in-mem record hydration: a sync triggered the moment WS comes
|
|
205
|
+
up reads a fully populated cache, regardless of where hydration is. One Dexie
|
|
206
|
+
point-lookup per registered collection — cheap.
|
|
207
|
+
|
|
208
|
+
The lazy populate inside `loadCollectionToInMem` is retained as a defensive
|
|
209
|
+
overwrite for collections registered after init (e.g. via dynamic
|
|
210
|
+
`addCollection`).
|
|
211
|
+
|
|
111
212
|
## 0.1.145 (2026-04-25)
|
|
112
213
|
|
|
113
214
|
### Fix: `onSyncProgress` back-track during initial sync
|
package/dist/index.js
CHANGED
|
@@ -2435,8 +2435,13 @@ var _SyncEngine = class _SyncEngine {
|
|
|
2435
2435
|
/**
|
|
2436
2436
|
* Execute full sync cycle.
|
|
2437
2437
|
* Called by SyncedDb which handles locking.
|
|
2438
|
+
*
|
|
2439
|
+
* `extras` lets the caller append additional `findNewerMany` specs
|
|
2440
|
+
* (each with a unique `specId`) onto the same streamed call. Chunks
|
|
2441
|
+
* for those specs are routed via `extras.onChunk(specId, items)` and
|
|
2442
|
+
* never enter the positive-sync processing path.
|
|
2438
2443
|
*/
|
|
2439
|
-
async sync(calledFrom) {
|
|
2444
|
+
async sync(calledFrom, extras) {
|
|
2440
2445
|
var _a, _b;
|
|
2441
2446
|
const startTime = Date.now();
|
|
2442
2447
|
let receivedCount = 0;
|
|
@@ -2481,10 +2486,15 @@ var _SyncEngine = class _SyncEngine {
|
|
|
2481
2486
|
}
|
|
2482
2487
|
try {
|
|
2483
2488
|
const completedCollections = /* @__PURE__ */ new Set();
|
|
2489
|
+
const allSpecs = extras && extras.specs.length > 0 ? [...syncSpecs, ...extras.specs] : syncSpecs;
|
|
2484
2490
|
await this.deps.withSyncTimeout(
|
|
2485
2491
|
this.restInterface.findNewerManyStream(
|
|
2486
|
-
|
|
2487
|
-
async (collection, items) => {
|
|
2492
|
+
allSpecs,
|
|
2493
|
+
async (collection, items, specId) => {
|
|
2494
|
+
if (specId !== void 0) {
|
|
2495
|
+
if (extras) extras.onChunk(specId, items);
|
|
2496
|
+
return;
|
|
2497
|
+
}
|
|
2488
2498
|
const config = configMap.get(collection);
|
|
2489
2499
|
if (!config) return;
|
|
2490
2500
|
const state = collectionState.get(collection);
|
|
@@ -3521,6 +3531,7 @@ var _SyncedDb = class _SyncedDb {
|
|
|
3521
3531
|
this.onWsNotification = config.onWsNotification;
|
|
3522
3532
|
this.onCrossTabSync = config.onCrossTabSync;
|
|
3523
3533
|
this.onWakeSync = config.onWakeSync;
|
|
3534
|
+
this.onEvictionStart = config.onEvictionStart;
|
|
3524
3535
|
this.onEviction = config.onEviction;
|
|
3525
3536
|
this.evictStaleRecordsEveryHrs = (_e = config.evictStaleRecordsEveryHrs) != null ? _e : 0;
|
|
3526
3537
|
for (const col of config.collections) {
|
|
@@ -3801,6 +3812,7 @@ var _SyncedDb = class _SyncedDb {
|
|
|
3801
3812
|
}
|
|
3802
3813
|
}
|
|
3803
3814
|
await this.pendingChanges.recoverPendingWrites();
|
|
3815
|
+
await this.preloadAllSyncMetas();
|
|
3804
3816
|
const allowedColls = [...this.collections.keys()].filter((n) => this.isSyncAllowed(n));
|
|
3805
3817
|
await this.loadCollectionsToInMem(allowedColls, "init");
|
|
3806
3818
|
this.leaderElection.init();
|
|
@@ -4476,8 +4488,26 @@ var _SyncedDb = class _SyncedDb {
|
|
|
4476
4488
|
}
|
|
4477
4489
|
this.syncing = true;
|
|
4478
4490
|
this.crossTabSync.startServerSync();
|
|
4491
|
+
let evictionPlan;
|
|
4492
|
+
let evictionServerFailed = false;
|
|
4493
|
+
if (await this._isAutoEvictionDue()) {
|
|
4494
|
+
try {
|
|
4495
|
+
evictionPlan = await this._collectScopeExitPlan("auto");
|
|
4496
|
+
} catch (err) {
|
|
4497
|
+
console.error(
|
|
4498
|
+
"[evict] phase 1 failed (skipping bundled eviction):",
|
|
4499
|
+
err
|
|
4500
|
+
);
|
|
4501
|
+
}
|
|
4502
|
+
}
|
|
4503
|
+
const evictionExtras = evictionPlan && evictionPlan.specs.length > 0 ? {
|
|
4504
|
+
specs: evictionPlan.specs,
|
|
4505
|
+
onChunk: (specId, items) => {
|
|
4506
|
+
this._applyScopeExitChunkToPlan(evictionPlan, specId, items);
|
|
4507
|
+
}
|
|
4508
|
+
} : void 0;
|
|
4479
4509
|
try {
|
|
4480
|
-
await this.syncEngine.sync(calledFrom);
|
|
4510
|
+
await this.syncEngine.sync(calledFrom, evictionExtras);
|
|
4481
4511
|
if (!this.syncOnlyCollections) {
|
|
4482
4512
|
const now = /* @__PURE__ */ new Date();
|
|
4483
4513
|
if (!this._lastFullSyncDate) {
|
|
@@ -4489,14 +4519,24 @@ var _SyncedDb = class _SyncedDb {
|
|
|
4489
4519
|
console.error("Failed to persist lastFullSync:", err);
|
|
4490
4520
|
});
|
|
4491
4521
|
}
|
|
4522
|
+
} catch (err) {
|
|
4523
|
+
if (evictionExtras) evictionServerFailed = true;
|
|
4524
|
+
throw err;
|
|
4492
4525
|
} finally {
|
|
4493
4526
|
this.syncing = false;
|
|
4494
4527
|
this.crossTabSync.endServerSync();
|
|
4495
4528
|
await this.processQueuedWsUpdates();
|
|
4496
|
-
|
|
4497
|
-
|
|
4498
|
-
|
|
4499
|
-
|
|
4529
|
+
if (evictionPlan) {
|
|
4530
|
+
try {
|
|
4531
|
+
await this._applyScopeExitPlan(
|
|
4532
|
+
evictionPlan,
|
|
4533
|
+
evictionServerFailed,
|
|
4534
|
+
evictionExtras ? 1 : 0
|
|
4535
|
+
);
|
|
4536
|
+
await this._persistEvictionTimestamp();
|
|
4537
|
+
} catch (err) {
|
|
4538
|
+
console.error("[evict] phase 3 failed:", err);
|
|
4539
|
+
}
|
|
4500
4540
|
}
|
|
4501
4541
|
}
|
|
4502
4542
|
} finally {
|
|
@@ -4725,7 +4765,15 @@ var _SyncedDb = class _SyncedDb {
|
|
|
4725
4765
|
const syncQuery = (_a = config.syncConfig) == null ? void 0 : _a.query;
|
|
4726
4766
|
const query = typeof syncQuery === "function" ? syncQuery() : syncQuery;
|
|
4727
4767
|
if (!query) {
|
|
4728
|
-
return {
|
|
4768
|
+
return {
|
|
4769
|
+
collection,
|
|
4770
|
+
evictedCount: 0,
|
|
4771
|
+
localEvictedCount: 0,
|
|
4772
|
+
serverEvictedCount: 0,
|
|
4773
|
+
dirtySkipped: 0,
|
|
4774
|
+
scannedCount: 0,
|
|
4775
|
+
serverCandidateCount: 0
|
|
4776
|
+
};
|
|
4729
4777
|
}
|
|
4730
4778
|
await this.pendingChanges.flushForCollection(collection);
|
|
4731
4779
|
const dirtyItems = await this.dexieDb.getDirty(collection);
|
|
@@ -4754,6 +4802,8 @@ var _SyncedDb = class _SyncedDb {
|
|
|
4754
4802
|
}
|
|
4755
4803
|
}
|
|
4756
4804
|
);
|
|
4805
|
+
const localEvictedCount = evictIds.length;
|
|
4806
|
+
let serverEvictedCount = 0;
|
|
4757
4807
|
if (serverAssisted && serverCandidateIds.length > 0) {
|
|
4758
4808
|
let scopeExitTimestamp;
|
|
4759
4809
|
const lookbehindMs = opts == null ? void 0 : opts.outOfWindowLookbehindMs;
|
|
@@ -4773,6 +4823,7 @@ var _SyncedDb = class _SyncedDb {
|
|
|
4773
4823
|
scopeExitTimestamp
|
|
4774
4824
|
);
|
|
4775
4825
|
for (const id of serverExits) evictIds.push(id);
|
|
4826
|
+
serverEvictedCount = serverExits.length;
|
|
4776
4827
|
} catch (err) {
|
|
4777
4828
|
console.error(
|
|
4778
4829
|
`[evict] server-assisted pass failed for ${collection} (proceeding with local-only):`,
|
|
@@ -4791,7 +4842,15 @@ var _SyncedDb = class _SyncedDb {
|
|
|
4791
4842
|
}
|
|
4792
4843
|
this.crossTabSync.broadcastReload([collection]);
|
|
4793
4844
|
}
|
|
4794
|
-
return {
|
|
4845
|
+
return {
|
|
4846
|
+
collection,
|
|
4847
|
+
evictedCount: evictIds.length,
|
|
4848
|
+
localEvictedCount,
|
|
4849
|
+
serverEvictedCount,
|
|
4850
|
+
dirtySkipped,
|
|
4851
|
+
scannedCount,
|
|
4852
|
+
serverCandidateCount: serverCandidateIds.length
|
|
4853
|
+
};
|
|
4795
4854
|
}
|
|
4796
4855
|
/**
|
|
4797
4856
|
* Ask the server which of the given IDs no longer match the positive
|
|
@@ -4800,9 +4859,10 @@ var _SyncedDb = class _SyncedDb {
|
|
|
4800
4859
|
* out of scope by other writers that the filtered delta feed would
|
|
4801
4860
|
* never report.
|
|
4802
4861
|
*
|
|
4803
|
-
*
|
|
4804
|
-
*
|
|
4805
|
-
*
|
|
4862
|
+
* Bundles all id chunks into a single `findNewerMany` call, keyed by
|
|
4863
|
+
* `specId = "<chunkIndex>"` so multiple specs against the same
|
|
4864
|
+
* collection don't collide on the response. One round-trip regardless
|
|
4865
|
+
* of `candidateIds.length`.
|
|
4806
4866
|
*
|
|
4807
4867
|
* @param timestamp - `findNewer` timestamp cursor. Only records with
|
|
4808
4868
|
* `_ts > timestamp` are examined. Caller decides the window (usually
|
|
@@ -4812,24 +4872,31 @@ var _SyncedDb = class _SyncedDb {
|
|
|
4812
4872
|
* `NOT query`). These are candidates for local-only eviction.
|
|
4813
4873
|
*/
|
|
4814
4874
|
async findServerSideScopeExits(collection, positiveQuery, candidateIds, timestamp) {
|
|
4875
|
+
var _a;
|
|
4876
|
+
if (candidateIds.length === 0) return [];
|
|
4815
4877
|
const CHUNK_SIZE = 500;
|
|
4816
|
-
const scopeExits = [];
|
|
4817
4878
|
const negated = { $nor: [positiveQuery] };
|
|
4879
|
+
const specs = [];
|
|
4818
4880
|
for (let i = 0; i < candidateIds.length; i += CHUNK_SIZE) {
|
|
4819
4881
|
const chunk = candidateIds.slice(i, i + CHUNK_SIZE);
|
|
4820
|
-
|
|
4821
|
-
|
|
4822
|
-
|
|
4823
|
-
|
|
4824
|
-
|
|
4825
|
-
|
|
4826
|
-
|
|
4827
|
-
|
|
4828
|
-
|
|
4829
|
-
|
|
4830
|
-
|
|
4831
|
-
)
|
|
4832
|
-
|
|
4882
|
+
specs.push({
|
|
4883
|
+
collection,
|
|
4884
|
+
timestamp,
|
|
4885
|
+
query: {
|
|
4886
|
+
$and: [negated, { _id: { $in: chunk } }]
|
|
4887
|
+
},
|
|
4888
|
+
opts: { project: { _id: 1 } },
|
|
4889
|
+
specId: `${collection}#${i / CHUNK_SIZE}`
|
|
4890
|
+
});
|
|
4891
|
+
}
|
|
4892
|
+
const results = await this.connectionManager.withRestTimeout(
|
|
4893
|
+
this.restInterface.findNewerMany(specs),
|
|
4894
|
+
"evictOutOfScopeRecords.serverAssisted"
|
|
4895
|
+
);
|
|
4896
|
+
const scopeExits = [];
|
|
4897
|
+
for (const spec of specs) {
|
|
4898
|
+
const items = (_a = results[spec.specId]) != null ? _a : [];
|
|
4899
|
+
for (const item of items) {
|
|
4833
4900
|
const id = item._id;
|
|
4834
4901
|
if (id !== void 0) scopeExits.push(String(id));
|
|
4835
4902
|
}
|
|
@@ -4839,37 +4906,231 @@ var _SyncedDb = class _SyncedDb {
|
|
|
4839
4906
|
/**
|
|
4840
4907
|
* Evict out-of-scope records for all collections.
|
|
4841
4908
|
* Skips writeOnly and collections without syncConfig.query.
|
|
4842
|
-
* Fires onEviction
|
|
4909
|
+
* Fires onEvictionStart before phase 1 and onEviction at the end.
|
|
4910
|
+
*
|
|
4911
|
+
* Three phases:
|
|
4912
|
+
* 1. Local pass per eligible collection — Dexie scan, partition into
|
|
4913
|
+
* locally-failing (immediate evict candidates) vs locally-matching
|
|
4914
|
+
* (server-check candidates). Dirty records are skipped.
|
|
4915
|
+
* 2. Server-assisted pass — a single `findNewerMany` call carrying
|
|
4916
|
+
* one spec per (collection × CHUNK_SIZE-id-batch), disambiguated
|
|
4917
|
+
* via `specId`. Collapses the prior N-per-collection round-trips
|
|
4918
|
+
* into one server round-trip total.
|
|
4919
|
+
* 3. Apply evictions per collection (Dexie + in-mem deletes,
|
|
4920
|
+
* cross-tab reload broadcast).
|
|
4921
|
+
*
|
|
4922
|
+
* Server-assisted is automatic when the collection is online and not
|
|
4923
|
+
* writeOnly — same default as `evictOutOfScopeRecords`. On batch
|
|
4924
|
+
* failure, local evictions are still applied; remaining server rounds
|
|
4925
|
+
* are skipped (logged, not thrown), and `serverFailed=true` is reported
|
|
4926
|
+
* on the EvictionInfo.
|
|
4843
4927
|
*/
|
|
4844
4928
|
async evictOutOfScopeRecordsAll(trigger = "manual") {
|
|
4845
|
-
|
|
4929
|
+
const plan = await this._collectScopeExitPlan(trigger);
|
|
4930
|
+
let serverFailed = false;
|
|
4931
|
+
let serverRounds = 0;
|
|
4932
|
+
if (plan.specs.length > 0) {
|
|
4933
|
+
try {
|
|
4934
|
+
const results = await this.connectionManager.withRestTimeout(
|
|
4935
|
+
this.restInterface.findNewerMany(plan.specs),
|
|
4936
|
+
"evictOutOfScopeRecordsAll.serverAssisted"
|
|
4937
|
+
);
|
|
4938
|
+
serverRounds = 1;
|
|
4939
|
+
for (const [specId, items] of Object.entries(results)) {
|
|
4940
|
+
this._applyScopeExitChunkToPlan(plan, specId, items);
|
|
4941
|
+
}
|
|
4942
|
+
} catch (err) {
|
|
4943
|
+
serverFailed = true;
|
|
4944
|
+
console.error(
|
|
4945
|
+
"[evict] server-assisted batch failed (proceeding with local-only):",
|
|
4946
|
+
err
|
|
4947
|
+
);
|
|
4948
|
+
}
|
|
4949
|
+
}
|
|
4950
|
+
return this._applyScopeExitPlan(plan, serverFailed, serverRounds);
|
|
4951
|
+
}
|
|
4952
|
+
/**
|
|
4953
|
+
* Phase 1 of eviction: pre-filter eligible collections, fire
|
|
4954
|
+
* `onEvictionStart`, scan Dexie per collection to partition records
|
|
4955
|
+
* into local-fail (immediate evict) vs server-check (candidates), and
|
|
4956
|
+
* build the `findNewerMany` spec list (each spec tagged with a unique
|
|
4957
|
+
* `specId`).
|
|
4958
|
+
*
|
|
4959
|
+
* Returns a plan object that can be:
|
|
4960
|
+
* - Sent to the server as a standalone `findNewerMany` call (manual eviction),
|
|
4961
|
+
* - Bundled into sync's `findNewerManyStream` via `SyncExtras` (auto eviction),
|
|
4962
|
+
* - Or applied with empty server results when offline/writeOnly.
|
|
4963
|
+
*
|
|
4964
|
+
* Always fires `onEvictionStart` (even with collectionCount=0) so the
|
|
4965
|
+
* callback is a reliable lifecycle marker.
|
|
4966
|
+
*/
|
|
4967
|
+
async _collectScopeExitPlan(trigger) {
|
|
4968
|
+
var _a, _b, _c;
|
|
4846
4969
|
const startTime = Date.now();
|
|
4847
|
-
const
|
|
4848
|
-
let totalEvicted = 0;
|
|
4970
|
+
const eligible = [];
|
|
4849
4971
|
for (const [name, config] of this.collections) {
|
|
4850
4972
|
if (config.writeOnly) continue;
|
|
4851
4973
|
const syncQuery = (_a = config.syncConfig) == null ? void 0 : _a.query;
|
|
4852
4974
|
const query = typeof syncQuery === "function" ? syncQuery() : syncQuery;
|
|
4853
4975
|
if (!query) continue;
|
|
4854
|
-
|
|
4855
|
-
|
|
4856
|
-
|
|
4976
|
+
eligible.push({ name, config, query });
|
|
4977
|
+
}
|
|
4978
|
+
this.safeCallback(this.onEvictionStart, {
|
|
4979
|
+
trigger,
|
|
4980
|
+
collectionCount: eligible.length,
|
|
4981
|
+
collections: eligible.map((e) => e.name)
|
|
4982
|
+
});
|
|
4983
|
+
const pending = [];
|
|
4984
|
+
let totalServerCandidates = 0;
|
|
4985
|
+
for (const { name, config, query } of eligible) {
|
|
4986
|
+
await this.pendingChanges.flushForCollection(name);
|
|
4987
|
+
const dirtyItems = await this.dexieDb.getDirty(name);
|
|
4988
|
+
const dirtyIds = new Set(dirtyItems.map((d) => String(d._id)));
|
|
4989
|
+
const serverAssisted = !config.writeOnly && this.connectionManager.isOnline();
|
|
4990
|
+
let scannedCount = 0;
|
|
4991
|
+
let dirtySkipped = 0;
|
|
4992
|
+
const evictIds = [];
|
|
4993
|
+
const serverCandidateIds = [];
|
|
4994
|
+
await this.dexieDb.forEachBatch(
|
|
4995
|
+
name,
|
|
4996
|
+
2e3,
|
|
4997
|
+
async (items) => {
|
|
4998
|
+
for (const item of items) {
|
|
4999
|
+
scannedCount++;
|
|
5000
|
+
const id = String(item._id);
|
|
5001
|
+
if (dirtyIds.has(id)) {
|
|
5002
|
+
dirtySkipped++;
|
|
5003
|
+
continue;
|
|
5004
|
+
}
|
|
5005
|
+
if (!matchesQuery(item, query)) {
|
|
5006
|
+
evictIds.push(id);
|
|
5007
|
+
} else if (serverAssisted) {
|
|
5008
|
+
serverCandidateIds.push(id);
|
|
5009
|
+
}
|
|
5010
|
+
}
|
|
5011
|
+
}
|
|
5012
|
+
);
|
|
5013
|
+
totalServerCandidates += serverCandidateIds.length;
|
|
5014
|
+
pending.push({
|
|
5015
|
+
collection: name,
|
|
5016
|
+
config,
|
|
5017
|
+
query,
|
|
5018
|
+
timestamp: (_c = (_b = this.syncMetaCache.get(name)) == null ? void 0 : _b.lastSyncTs) != null ? _c : 0,
|
|
5019
|
+
evictIds,
|
|
5020
|
+
localEvictedCount: evictIds.length,
|
|
5021
|
+
serverEvictedCount: 0,
|
|
5022
|
+
serverCandidateIds,
|
|
5023
|
+
dirtySkipped,
|
|
5024
|
+
scannedCount,
|
|
5025
|
+
serverAssisted
|
|
5026
|
+
});
|
|
5027
|
+
}
|
|
5028
|
+
const CHUNK_SIZE = 500;
|
|
5029
|
+
const specs = [];
|
|
5030
|
+
const ownersBySpecId = /* @__PURE__ */ new Map();
|
|
5031
|
+
for (const p of pending) {
|
|
5032
|
+
if (!p.serverAssisted) continue;
|
|
5033
|
+
for (let i = 0; i < p.serverCandidateIds.length; i += CHUNK_SIZE) {
|
|
5034
|
+
const chunk = p.serverCandidateIds.slice(i, i + CHUNK_SIZE);
|
|
5035
|
+
const specId = `${p.collection}#${i / CHUNK_SIZE}`;
|
|
5036
|
+
specs.push({
|
|
5037
|
+
collection: p.collection,
|
|
5038
|
+
timestamp: p.timestamp,
|
|
5039
|
+
query: {
|
|
5040
|
+
$and: [{ $nor: [p.query] }, { _id: { $in: chunk } }]
|
|
5041
|
+
},
|
|
5042
|
+
opts: { project: { _id: 1 } },
|
|
5043
|
+
specId
|
|
5044
|
+
});
|
|
5045
|
+
ownersBySpecId.set(specId, p);
|
|
5046
|
+
}
|
|
5047
|
+
}
|
|
5048
|
+
return {
|
|
5049
|
+
startTime,
|
|
5050
|
+
trigger,
|
|
5051
|
+
pending,
|
|
5052
|
+
specs,
|
|
5053
|
+
ownersBySpecId,
|
|
5054
|
+
totalServerCandidates
|
|
5055
|
+
};
|
|
5056
|
+
}
|
|
5057
|
+
/**
|
|
5058
|
+
* Append server-confirmed scope-exit IDs to a plan's pending entries.
|
|
5059
|
+
* Called either with the response of a standalone `findNewerMany`
|
|
5060
|
+
* (manual eviction) or per-chunk during sync's `findNewerManyStream`
|
|
5061
|
+
* (auto eviction bundled with sync).
|
|
5062
|
+
*/
|
|
5063
|
+
_applyScopeExitChunkToPlan(plan, specId, items) {
|
|
5064
|
+
const owner = plan.ownersBySpecId.get(specId);
|
|
5065
|
+
if (!owner) return;
|
|
5066
|
+
for (const item of items) {
|
|
5067
|
+
const id = item._id;
|
|
5068
|
+
if (id !== void 0) {
|
|
5069
|
+
owner.evictIds.push(String(id));
|
|
5070
|
+
owner.serverEvictedCount++;
|
|
5071
|
+
}
|
|
5072
|
+
}
|
|
5073
|
+
}
|
|
5074
|
+
/**
|
|
5075
|
+
* Phase 3 of eviction: apply accumulated evict IDs (Dexie + in-mem
|
|
5076
|
+
* deletes), broadcast cross-tab reload, build EvictionInfo, fire
|
|
5077
|
+
* `onEviction`. Idempotent on empty plans — produces a zero-eviction
|
|
5078
|
+
* info and still fires the callback so consumers see a lifecycle event.
|
|
5079
|
+
*/
|
|
5080
|
+
async _applyScopeExitPlan(plan, serverFailed, serverRounds) {
|
|
5081
|
+
const collectionResults = [];
|
|
5082
|
+
let totalEvicted = 0;
|
|
5083
|
+
let totalLocalEvicted = 0;
|
|
5084
|
+
let totalServerEvicted = 0;
|
|
5085
|
+
for (const p of plan.pending) {
|
|
5086
|
+
const uniqueEvictIds = p.evictIds.length > 0 ? Array.from(new Set(p.evictIds)) : p.evictIds;
|
|
5087
|
+
if (uniqueEvictIds.length > 0) {
|
|
5088
|
+
await this.dexieDb.deleteMany(p.collection, uniqueEvictIds);
|
|
5089
|
+
if (!p.config.writeOnly) {
|
|
5090
|
+
this.inMemManager.writeBatch(
|
|
5091
|
+
p.collection,
|
|
5092
|
+
uniqueEvictIds.map((id) => ({ _id: id })),
|
|
5093
|
+
"delete"
|
|
5094
|
+
);
|
|
5095
|
+
}
|
|
5096
|
+
this.crossTabSync.broadcastReload([p.collection]);
|
|
5097
|
+
}
|
|
5098
|
+
collectionResults.push({
|
|
5099
|
+
collection: p.collection,
|
|
5100
|
+
evictedCount: uniqueEvictIds.length,
|
|
5101
|
+
localEvictedCount: p.localEvictedCount,
|
|
5102
|
+
serverEvictedCount: p.serverEvictedCount,
|
|
5103
|
+
dirtySkipped: p.dirtySkipped,
|
|
5104
|
+
scannedCount: p.scannedCount,
|
|
5105
|
+
serverCandidateCount: p.serverCandidateIds.length
|
|
5106
|
+
});
|
|
5107
|
+
totalEvicted += uniqueEvictIds.length;
|
|
5108
|
+
totalLocalEvicted += p.localEvictedCount;
|
|
5109
|
+
totalServerEvicted += p.serverEvictedCount;
|
|
4857
5110
|
}
|
|
4858
5111
|
const info = {
|
|
4859
5112
|
totalEvicted,
|
|
4860
|
-
|
|
4861
|
-
|
|
5113
|
+
totalLocalEvicted,
|
|
5114
|
+
totalServerEvicted,
|
|
5115
|
+
totalServerCandidates: plan.totalServerCandidates,
|
|
5116
|
+
serverRounds,
|
|
5117
|
+
serverFailed,
|
|
5118
|
+
durationMs: Date.now() - plan.startTime,
|
|
5119
|
+
trigger: plan.trigger,
|
|
4862
5120
|
collections: collectionResults
|
|
4863
5121
|
};
|
|
4864
5122
|
this.safeCallback(this.onEviction, info);
|
|
4865
5123
|
return info;
|
|
4866
5124
|
}
|
|
4867
5125
|
/**
|
|
4868
|
-
*
|
|
4869
|
-
*
|
|
5126
|
+
* Whether auto-eviction is due to run on the next sync. Mirrors the
|
|
5127
|
+
* gating logic of the old `maybeAutoEvict` (interval check + persisted
|
|
5128
|
+
* `__lastEviction` cursor) but split out so `sync()` can pre-compute
|
|
5129
|
+
* the eviction plan BEFORE issuing the streamed `findNewerMany` —
|
|
5130
|
+
* letting scope-exit specs ride along on the same call.
|
|
4870
5131
|
*/
|
|
4871
|
-
async
|
|
4872
|
-
if (this.evictStaleRecordsEveryHrs <= 0) return;
|
|
5132
|
+
async _isAutoEvictionDue() {
|
|
5133
|
+
if (this.evictStaleRecordsEveryHrs <= 0) return false;
|
|
4873
5134
|
const intervalMs = this.evictStaleRecordsEveryHrs * 36e5;
|
|
4874
5135
|
if (!this._lastEvictionDate) {
|
|
4875
5136
|
const meta = await this.dexieDb.getSyncMeta("__lastEviction");
|
|
@@ -4878,9 +5139,15 @@ var _SyncedDb = class _SyncedDb {
|
|
|
4878
5139
|
}
|
|
4879
5140
|
}
|
|
4880
5141
|
if (this._lastEvictionDate && Date.now() - this._lastEvictionDate.getTime() < intervalMs) {
|
|
4881
|
-
return;
|
|
5142
|
+
return false;
|
|
4882
5143
|
}
|
|
4883
|
-
|
|
5144
|
+
return true;
|
|
5145
|
+
}
|
|
5146
|
+
/**
|
|
5147
|
+
* Persist the current time as the last successful eviction.
|
|
5148
|
+
* Called from sync() after phase 3 completes.
|
|
5149
|
+
*/
|
|
5150
|
+
async _persistEvictionTimestamp() {
|
|
4884
5151
|
this._lastEvictionDate = /* @__PURE__ */ new Date();
|
|
4885
5152
|
await this.dexieDb.setSyncMeta(
|
|
4886
5153
|
"__lastEviction",
|
|
@@ -5019,6 +5286,25 @@ var _SyncedDb = class _SyncedDb {
|
|
|
5019
5286
|
}
|
|
5020
5287
|
return allItems.length;
|
|
5021
5288
|
}
|
|
5289
|
+
/**
|
|
5290
|
+
* Bulk-read sync cursors for every registered collection into syncMetaCache.
|
|
5291
|
+
* Called once during init() before sync can fire. Decouples cursor cache
|
|
5292
|
+
* availability from in-mem record hydration, eliminating the race where a
|
|
5293
|
+
* sync triggered by ConnectionManager.tryGoOnline (or by setSyncOnlyTheseCollections
|
|
5294
|
+
* expanding the allowed set) reads an unpopulated cache and sends timestamp:0
|
|
5295
|
+
* for un-hydrated collections.
|
|
5296
|
+
*/
|
|
5297
|
+
async preloadAllSyncMetas() {
|
|
5298
|
+
const names = [...this.collections.keys()];
|
|
5299
|
+
const results = await Promise.all(
|
|
5300
|
+
names.map(
|
|
5301
|
+
(name) => this.dexieDb.getSyncMeta(name).then((meta) => ({ name, meta }))
|
|
5302
|
+
)
|
|
5303
|
+
);
|
|
5304
|
+
for (const { name, meta } of results) {
|
|
5305
|
+
if (meta) this.syncMetaCache.set(name, meta);
|
|
5306
|
+
}
|
|
5307
|
+
}
|
|
5022
5308
|
assertCollection(name) {
|
|
5023
5309
|
if (!this.collections.has(name)) {
|
|
5024
5310
|
throw new Error(`SyncedDb: Collection "${(name == null ? void 0 : name.toString()) || "?"}" not configured`);
|
|
@@ -7884,10 +8170,14 @@ var RestProxy = class {
|
|
|
7884
8170
|
}
|
|
7885
8171
|
/**
|
|
7886
8172
|
* Parse streaming response. Auto-detects format:
|
|
7887
|
-
* - Streaming: first byte is 0x00 (end) or
|
|
8173
|
+
* - Streaming: first byte is 0x00 (end), 0x01 (data chunk), or 0x02 (data chunk with specId)
|
|
7888
8174
|
* - Legacy msgpack: first byte is msgpack type marker (0x80+ for map, etc.)
|
|
7889
8175
|
*
|
|
7890
|
-
*
|
|
8176
|
+
* Frame variants (rdb2 ≥ specId support):
|
|
8177
|
+
* 0x01: [type:1][nameLen:2][name:N][dataLen:4][msgpack(items[]):M]
|
|
8178
|
+
* 0x02: [type:1][nameLen:2][name:N][specIdLen:2][specId:M][dataLen:4][msgpack(items[]):K]
|
|
8179
|
+
*
|
|
8180
|
+
* `onChunk` receives `specId` as the third arg for type-0x02 frames; `undefined` otherwise.
|
|
7891
8181
|
*/
|
|
7892
8182
|
async parseStreamingResponse(response, onChunk, onActivity) {
|
|
7893
8183
|
const reader = response.body.getReader();
|
|
@@ -7904,7 +8194,7 @@ var RestProxy = class {
|
|
|
7904
8194
|
if (!await readMore()) return;
|
|
7905
8195
|
}
|
|
7906
8196
|
const firstByte = buffer.at(0);
|
|
7907
|
-
if (firstByte !== 0 && firstByte !== 1) {
|
|
8197
|
+
if (firstByte !== 0 && firstByte !== 1 && firstByte !== 2) {
|
|
7908
8198
|
while (await readMore()) {
|
|
7909
8199
|
}
|
|
7910
8200
|
const result = unpack2(buffer.subarray(0, buffer.length));
|
|
@@ -7919,28 +8209,61 @@ var RestProxy = class {
|
|
|
7919
8209
|
while (buffer.length < 1) {
|
|
7920
8210
|
if (!await readMore()) return;
|
|
7921
8211
|
}
|
|
7922
|
-
|
|
8212
|
+
const type = buffer.at(0);
|
|
8213
|
+
if (type === 0) return;
|
|
7923
8214
|
while (buffer.length < 3) {
|
|
7924
8215
|
if (!await readMore())
|
|
7925
8216
|
throw new Error("Unexpected end of stream in chunk header");
|
|
7926
8217
|
}
|
|
7927
8218
|
const nameLen = buffer.at(1) << 8 | buffer.at(2);
|
|
7928
|
-
|
|
7929
|
-
|
|
7930
|
-
|
|
7931
|
-
|
|
7932
|
-
|
|
7933
|
-
|
|
7934
|
-
|
|
7935
|
-
|
|
7936
|
-
|
|
7937
|
-
|
|
7938
|
-
|
|
7939
|
-
|
|
8219
|
+
if (type === 1) {
|
|
8220
|
+
const headerSize = 1 + 2 + nameLen + 4;
|
|
8221
|
+
while (buffer.length < headerSize) {
|
|
8222
|
+
if (!await readMore())
|
|
8223
|
+
throw new Error("Unexpected end of stream in chunk header");
|
|
8224
|
+
}
|
|
8225
|
+
const collection = decoder2.decode(buffer.subarray(3, 3 + nameLen));
|
|
8226
|
+
const dataOffset = 3 + nameLen;
|
|
8227
|
+
const dataLen = buffer.at(dataOffset) << 24 | buffer.at(dataOffset + 1) << 16 | buffer.at(dataOffset + 2) << 8 | buffer.at(dataOffset + 3);
|
|
8228
|
+
const totalChunkSize = headerSize + dataLen;
|
|
8229
|
+
while (buffer.length < totalChunkSize) {
|
|
8230
|
+
if (!await readMore())
|
|
8231
|
+
throw new Error("Unexpected end of stream in chunk data");
|
|
8232
|
+
}
|
|
8233
|
+
const items = unpack2(buffer.subarray(headerSize, totalChunkSize));
|
|
8234
|
+
buffer.consume(totalChunkSize);
|
|
8235
|
+
await onChunk(collection, items);
|
|
8236
|
+
} else if (type === 2) {
|
|
8237
|
+
const minSize = 1 + 2 + nameLen + 2;
|
|
8238
|
+
while (buffer.length < minSize) {
|
|
8239
|
+
if (!await readMore())
|
|
8240
|
+
throw new Error("Unexpected end of stream in specId chunk header");
|
|
8241
|
+
}
|
|
8242
|
+
const collection = decoder2.decode(buffer.subarray(3, 3 + nameLen));
|
|
8243
|
+
const specIdLenOffset = 3 + nameLen;
|
|
8244
|
+
const specIdLen = buffer.at(specIdLenOffset) << 8 | buffer.at(specIdLenOffset + 1);
|
|
8245
|
+
const headerSize = minSize + specIdLen + 4;
|
|
8246
|
+
while (buffer.length < headerSize) {
|
|
8247
|
+
if (!await readMore())
|
|
8248
|
+
throw new Error("Unexpected end of stream in specId chunk header");
|
|
8249
|
+
}
|
|
8250
|
+
const specIdOffset = specIdLenOffset + 2;
|
|
8251
|
+
const specId = decoder2.decode(
|
|
8252
|
+
buffer.subarray(specIdOffset, specIdOffset + specIdLen)
|
|
8253
|
+
);
|
|
8254
|
+
const dataOffset = specIdOffset + specIdLen;
|
|
8255
|
+
const dataLen = buffer.at(dataOffset) << 24 | buffer.at(dataOffset + 1) << 16 | buffer.at(dataOffset + 2) << 8 | buffer.at(dataOffset + 3);
|
|
8256
|
+
const totalChunkSize = headerSize + dataLen;
|
|
8257
|
+
while (buffer.length < totalChunkSize) {
|
|
8258
|
+
if (!await readMore())
|
|
8259
|
+
throw new Error("Unexpected end of stream in specId chunk data");
|
|
8260
|
+
}
|
|
8261
|
+
const items = unpack2(buffer.subarray(headerSize, totalChunkSize));
|
|
8262
|
+
buffer.consume(totalChunkSize);
|
|
8263
|
+
await onChunk(collection, items, specId);
|
|
8264
|
+
} else {
|
|
8265
|
+
throw new Error(`Unknown stream chunk type: 0x${type.toString(16)}`);
|
|
7940
8266
|
}
|
|
7941
|
-
const items = unpack2(buffer.subarray(headerSize, totalChunkSize));
|
|
7942
|
-
buffer.consume(totalChunkSize);
|
|
7943
|
-
await onChunk(collection, items);
|
|
7944
8267
|
}
|
|
7945
8268
|
}
|
|
7946
8269
|
// Sync - pošiljanje lokalnih sprememb na server
|
|
@@ -116,17 +116,21 @@ export declare class RestProxy implements I_RestInterface {
|
|
|
116
116
|
* [type:1][nameLen:2][name:N][dataLen:4][msgpack(items[]):M]
|
|
117
117
|
* type=0x01 for data, type=0x00 for end-of-stream.
|
|
118
118
|
*/
|
|
119
|
-
findNewerManyStream<T>(spec: GetNewerSpec<T>[], onChunk: (collection: string, items: T[]) => Promise<void>, options?: {
|
|
119
|
+
findNewerManyStream<T>(spec: GetNewerSpec<T>[], onChunk: (collection: string, items: T[], specId?: string) => Promise<void>, options?: {
|
|
120
120
|
timeoutMs?: number;
|
|
121
121
|
signal?: AbortSignal;
|
|
122
122
|
activityTimeoutMs?: number;
|
|
123
123
|
}): Promise<void>;
|
|
124
124
|
/**
|
|
125
125
|
* Parse streaming response. Auto-detects format:
|
|
126
|
-
* - Streaming: first byte is 0x00 (end) or
|
|
126
|
+
* - Streaming: first byte is 0x00 (end), 0x01 (data chunk), or 0x02 (data chunk with specId)
|
|
127
127
|
* - Legacy msgpack: first byte is msgpack type marker (0x80+ for map, etc.)
|
|
128
128
|
*
|
|
129
|
-
*
|
|
129
|
+
* Frame variants (rdb2 ≥ specId support):
|
|
130
|
+
* 0x01: [type:1][nameLen:2][name:N][dataLen:4][msgpack(items[]):M]
|
|
131
|
+
* 0x02: [type:1][nameLen:2][name:N][specIdLen:2][specId:M][dataLen:4][msgpack(items[]):K]
|
|
132
|
+
*
|
|
133
|
+
* `onChunk` receives `specId` as the third arg for type-0x02 frames; `undefined` otherwise.
|
|
130
134
|
*/
|
|
131
135
|
private parseStreamingResponse;
|
|
132
136
|
deleteOne<T>(collection: string, query: QuerySpec<T>): Promise<T>;
|
|
@@ -50,6 +50,7 @@ export declare class SyncedDb implements I_SyncedDb {
|
|
|
50
50
|
private readonly onWsNotification?;
|
|
51
51
|
private readonly onCrossTabSync?;
|
|
52
52
|
private readonly onWakeSync?;
|
|
53
|
+
private readonly onEvictionStart?;
|
|
53
54
|
private readonly onEviction?;
|
|
54
55
|
private readonly evictStaleRecordsEveryHrs;
|
|
55
56
|
private _lastEvictionDate?;
|
|
@@ -228,9 +229,10 @@ export declare class SyncedDb implements I_SyncedDb {
|
|
|
228
229
|
* out of scope by other writers that the filtered delta feed would
|
|
229
230
|
* never report.
|
|
230
231
|
*
|
|
231
|
-
*
|
|
232
|
-
*
|
|
233
|
-
*
|
|
232
|
+
* Bundles all id chunks into a single `findNewerMany` call, keyed by
|
|
233
|
+
* `specId = "<chunkIndex>"` so multiple specs against the same
|
|
234
|
+
* collection don't collide on the response. One round-trip regardless
|
|
235
|
+
* of `candidateIds.length`.
|
|
234
236
|
*
|
|
235
237
|
* @param timestamp - `findNewer` timestamp cursor. Only records with
|
|
236
238
|
* `_ts > timestamp` are examined. Caller decides the window (usually
|
|
@@ -243,14 +245,69 @@ export declare class SyncedDb implements I_SyncedDb {
|
|
|
243
245
|
/**
|
|
244
246
|
* Evict out-of-scope records for all collections.
|
|
245
247
|
* Skips writeOnly and collections without syncConfig.query.
|
|
246
|
-
* Fires onEviction
|
|
248
|
+
* Fires onEvictionStart before phase 1 and onEviction at the end.
|
|
249
|
+
*
|
|
250
|
+
* Three phases:
|
|
251
|
+
* 1. Local pass per eligible collection — Dexie scan, partition into
|
|
252
|
+
* locally-failing (immediate evict candidates) vs locally-matching
|
|
253
|
+
* (server-check candidates). Dirty records are skipped.
|
|
254
|
+
* 2. Server-assisted pass — a single `findNewerMany` call carrying
|
|
255
|
+
* one spec per (collection × CHUNK_SIZE-id-batch), disambiguated
|
|
256
|
+
* via `specId`. Collapses the prior N-per-collection round-trips
|
|
257
|
+
* into one server round-trip total.
|
|
258
|
+
* 3. Apply evictions per collection (Dexie + in-mem deletes,
|
|
259
|
+
* cross-tab reload broadcast).
|
|
260
|
+
*
|
|
261
|
+
* Server-assisted is automatic when the collection is online and not
|
|
262
|
+
* writeOnly — same default as `evictOutOfScopeRecords`. On batch
|
|
263
|
+
* failure, local evictions are still applied; remaining server rounds
|
|
264
|
+
* are skipped (logged, not thrown), and `serverFailed=true` is reported
|
|
265
|
+
* on the EvictionInfo.
|
|
247
266
|
*/
|
|
248
267
|
evictOutOfScopeRecordsAll(trigger?: "auto" | "manual"): Promise<EvictionInfo>;
|
|
249
268
|
/**
|
|
250
|
-
*
|
|
251
|
-
*
|
|
269
|
+
* Phase 1 of eviction: pre-filter eligible collections, fire
|
|
270
|
+
* `onEvictionStart`, scan Dexie per collection to partition records
|
|
271
|
+
* into local-fail (immediate evict) vs server-check (candidates), and
|
|
272
|
+
* build the `findNewerMany` spec list (each spec tagged with a unique
|
|
273
|
+
* `specId`).
|
|
274
|
+
*
|
|
275
|
+
* Returns a plan object that can be:
|
|
276
|
+
* - Sent to the server as a standalone `findNewerMany` call (manual eviction),
|
|
277
|
+
* - Bundled into sync's `findNewerManyStream` via `SyncExtras` (auto eviction),
|
|
278
|
+
* - Or applied with empty server results when offline/writeOnly.
|
|
279
|
+
*
|
|
280
|
+
* Always fires `onEvictionStart` (even with collectionCount=0) so the
|
|
281
|
+
* callback is a reliable lifecycle marker.
|
|
282
|
+
*/
|
|
283
|
+
private _collectScopeExitPlan;
|
|
284
|
+
/**
|
|
285
|
+
* Append server-confirmed scope-exit IDs to a plan's pending entries.
|
|
286
|
+
* Called either with the response of a standalone `findNewerMany`
|
|
287
|
+
* (manual eviction) or per-chunk during sync's `findNewerManyStream`
|
|
288
|
+
* (auto eviction bundled with sync).
|
|
289
|
+
*/
|
|
290
|
+
private _applyScopeExitChunkToPlan;
|
|
291
|
+
/**
|
|
292
|
+
* Phase 3 of eviction: apply accumulated evict IDs (Dexie + in-mem
|
|
293
|
+
* deletes), broadcast cross-tab reload, build EvictionInfo, fire
|
|
294
|
+
* `onEviction`. Idempotent on empty plans — produces a zero-eviction
|
|
295
|
+
* info and still fires the callback so consumers see a lifecycle event.
|
|
296
|
+
*/
|
|
297
|
+
private _applyScopeExitPlan;
|
|
298
|
+
/**
|
|
299
|
+
* Whether auto-eviction is due to run on the next sync. Mirrors the
|
|
300
|
+
* gating logic of the old `maybeAutoEvict` (interval check + persisted
|
|
301
|
+
* `__lastEviction` cursor) but split out so `sync()` can pre-compute
|
|
302
|
+
* the eviction plan BEFORE issuing the streamed `findNewerMany` —
|
|
303
|
+
* letting scope-exit specs ride along on the same call.
|
|
252
304
|
*/
|
|
253
|
-
private
|
|
305
|
+
private _isAutoEvictionDue;
|
|
306
|
+
/**
|
|
307
|
+
* Persist the current time as the last successful eviction.
|
|
308
|
+
* Called from sync() after phase 3 completes.
|
|
309
|
+
*/
|
|
310
|
+
private _persistEvictionTimestamp;
|
|
254
311
|
getObjectMetadata<M>(collection: string, _id: Id): M | undefined;
|
|
255
312
|
getObjectsMetadata<M>(collection: string, _ids: Id[]): (M | undefined)[];
|
|
256
313
|
setObjectMetadata<M>(collection: string, _id: Id, metadata: M): void;
|
|
@@ -291,6 +348,15 @@ export declare class SyncedDb implements I_SyncedDb {
|
|
|
291
348
|
*/
|
|
292
349
|
private loadCollectionsToInMem;
|
|
293
350
|
private loadCollectionToInMem;
|
|
351
|
+
/**
|
|
352
|
+
* Bulk-read sync cursors for every registered collection into syncMetaCache.
|
|
353
|
+
* Called once during init() before sync can fire. Decouples cursor cache
|
|
354
|
+
* availability from in-mem record hydration, eliminating the race where a
|
|
355
|
+
* sync triggered by ConnectionManager.tryGoOnline (or by setSyncOnlyTheseCollections
|
|
356
|
+
* expanding the allowed set) reads an unpopulated cache and sends timestamp:0
|
|
357
|
+
* for un-hydrated collections.
|
|
358
|
+
*/
|
|
359
|
+
private preloadAllSyncMetas;
|
|
294
360
|
private assertCollection;
|
|
295
361
|
private static readonly STRINGIFIED_FALSY;
|
|
296
362
|
/** Stringify an Id parameter (ObjectId → hex string). */
|
|
@@ -7,7 +7,7 @@
|
|
|
7
7
|
* - Uploading dirty items to server
|
|
8
8
|
*/
|
|
9
9
|
import type { LocalDbEntity } from "../../types/DbEntity";
|
|
10
|
-
import type { I_SyncEngine, SyncEngineConfig } from "../types/managers";
|
|
10
|
+
import type { I_SyncEngine, SyncEngineConfig, SyncExtras } from "../types/managers";
|
|
11
11
|
import type { UploadResult } from "../types/internal";
|
|
12
12
|
export declare class SyncEngine implements I_SyncEngine {
|
|
13
13
|
private readonly tenant;
|
|
@@ -21,8 +21,13 @@ export declare class SyncEngine implements I_SyncEngine {
|
|
|
21
21
|
/**
|
|
22
22
|
* Execute full sync cycle.
|
|
23
23
|
* Called by SyncedDb which handles locking.
|
|
24
|
+
*
|
|
25
|
+
* `extras` lets the caller append additional `findNewerMany` specs
|
|
26
|
+
* (each with a unique `specId`) onto the same streamed call. Chunks
|
|
27
|
+
* for those specs are routed via `extras.onChunk(specId, items)` and
|
|
28
|
+
* never enter the positive-sync processing path.
|
|
24
29
|
*/
|
|
25
|
-
sync(calledFrom?: string): Promise<void>;
|
|
30
|
+
sync(calledFrom?: string, extras?: SyncExtras): Promise<void>;
|
|
26
31
|
/**
|
|
27
32
|
* Upload dirty items for all collections.
|
|
28
33
|
*/
|
|
@@ -283,9 +283,29 @@ export interface SyncEngineConfig {
|
|
|
283
283
|
callbacks: SyncEngineCallbacks;
|
|
284
284
|
deps: SyncEngineDeps;
|
|
285
285
|
}
|
|
286
|
+
/**
|
|
287
|
+
* Optional extras passed to `SyncEngine.sync()`. Lets a caller piggyback
|
|
288
|
+
* additional `findNewerMany` specs (each tagged with a unique `specId`)
|
|
289
|
+
* onto the same streamed server call that fetches positive sync deltas —
|
|
290
|
+
* saving the round-trip that those specs would otherwise cost.
|
|
291
|
+
*
|
|
292
|
+
* Used today by SyncedDb to bundle scope-exit eviction queries into the
|
|
293
|
+
* positive sync stream when auto-eviction is due.
|
|
294
|
+
*
|
|
295
|
+
* Contract:
|
|
296
|
+
* - Each spec MUST set `specId` to a string unique within the call.
|
|
297
|
+
* - Positive sync specs do NOT set `specId`; chunks for them arrive
|
|
298
|
+
* with `specId === undefined` and are processed normally.
|
|
299
|
+
* - Extra spec chunks are routed to `onChunk(specId, items)` and never
|
|
300
|
+
* touch the conflict-resolution / Dexie write path.
|
|
301
|
+
*/
|
|
302
|
+
export interface SyncExtras {
|
|
303
|
+
specs: import("../../types/I_RestInterface").GetNewerSpec<any>[];
|
|
304
|
+
onChunk: (specId: string, items: any[]) => void;
|
|
305
|
+
}
|
|
286
306
|
export interface I_SyncEngine {
|
|
287
307
|
/** Execute full sync cycle. */
|
|
288
|
-
sync(calledFrom?: string): Promise<void>;
|
|
308
|
+
sync(calledFrom?: string, extras?: SyncExtras): Promise<void>;
|
|
289
309
|
/** Upload dirty items for all collections. */
|
|
290
310
|
uploadDirtyItems(calledFrom?: string): Promise<UploadResult>;
|
|
291
311
|
/** Upload dirty items for a specific collection. */
|
|
@@ -59,6 +59,18 @@ export interface GetNewerSpec<T> {
|
|
|
59
59
|
timestamp: number | Date | string | Timestamp;
|
|
60
60
|
query?: QuerySpec<T>;
|
|
61
61
|
opts?: QueryOpts;
|
|
62
|
+
/**
|
|
63
|
+
* Optional disambiguator. When set, results for this spec are keyed
|
|
64
|
+
* by `specId` in the `findNewerMany` response (and emitted as the
|
|
65
|
+
* `specId` arg to `onChunk` in the streaming variant) instead of
|
|
66
|
+
* `collection`. Lets a single batched request carry multiple specs
|
|
67
|
+
* against the same `collection` without `Record<key, items[]>`
|
|
68
|
+
* collisions. Backwards-compatible: specs without `specId` are keyed
|
|
69
|
+
* by `collection` exactly as before.
|
|
70
|
+
*
|
|
71
|
+
* Server support: cry-db ≥ 2.4.32 (specId honored).
|
|
72
|
+
*/
|
|
73
|
+
specId?: string;
|
|
62
74
|
}
|
|
63
75
|
/**
|
|
64
76
|
* Request to batch update a collection, used to sync data
|
|
@@ -90,8 +102,12 @@ export interface I_RestInterface {
|
|
|
90
102
|
findByIds<T>(collection: string, ids: Id[]): Promise<T[]>;
|
|
91
103
|
findNewer<T>(collection: string, timestamp: Timestamp | number | string | Date, query?: QuerySpec<T>, opts?: QueryOpts): Promise<T[]>;
|
|
92
104
|
findNewerMany<T>(spec?: GetNewerSpec<T>[]): Promise<Record<string, any[]>>;
|
|
93
|
-
/**
|
|
94
|
-
|
|
105
|
+
/**
|
|
106
|
+
* Streaming variant of findNewerMany. Calls onChunk for each batch of items as they arrive.
|
|
107
|
+
* `specId` is forwarded as the third arg when the originating spec set one — `undefined` otherwise.
|
|
108
|
+
* Old two-arg `onChunk` callbacks keep working unchanged (the third arg is ignored).
|
|
109
|
+
*/
|
|
110
|
+
findNewerManyStream<T>(spec: GetNewerSpec<T>[], onChunk: (collection: string, items: T[], specId?: string) => Promise<void>, options?: {
|
|
95
111
|
timeoutMs?: number;
|
|
96
112
|
signal?: AbortSignal;
|
|
97
113
|
}): Promise<void>;
|
|
@@ -232,17 +232,42 @@ export interface SyncInfo {
|
|
|
232
232
|
export interface EvictionCollectionInfo {
|
|
233
233
|
/** Collection name. */
|
|
234
234
|
collection: string;
|
|
235
|
-
/** Records removed from Dexie + in-mem. */
|
|
235
|
+
/** Records removed from Dexie + in-mem (local + server-confirmed). */
|
|
236
236
|
evictedCount: number;
|
|
237
|
+
/** Records evicted by the local pass (cached state failed the query). */
|
|
238
|
+
localEvictedCount: number;
|
|
239
|
+
/** Records evicted by the server-assisted pass (server reports out-of-scope). */
|
|
240
|
+
serverEvictedCount: number;
|
|
237
241
|
/** Records skipped because they have pending dirty changes. */
|
|
238
242
|
dirtySkipped: number;
|
|
239
243
|
/** Total records scanned in this collection. */
|
|
240
244
|
scannedCount: number;
|
|
245
|
+
/** IDs sent to the server for scope-exit confirmation (0 if not server-assisted). */
|
|
246
|
+
serverCandidateCount: number;
|
|
247
|
+
}
|
|
248
|
+
/** Reported to onEvictionStart before the eviction passes begin. */
|
|
249
|
+
export interface EvictionStartInfo {
|
|
250
|
+
/** Whether this run is an auto or manual trigger. */
|
|
251
|
+
trigger: "auto" | "manual";
|
|
252
|
+
/** Number of eligible collections (writeOnly and queryless skipped). */
|
|
253
|
+
collectionCount: number;
|
|
254
|
+
/** Names of the eligible collections, in iteration order. */
|
|
255
|
+
collections: string[];
|
|
241
256
|
}
|
|
242
257
|
/** Eviction result reported to the onEviction callback. */
|
|
243
258
|
export interface EvictionInfo {
|
|
244
|
-
/** Total records evicted across all collections. */
|
|
259
|
+
/** Total records evicted across all collections (local + server). */
|
|
245
260
|
totalEvicted: number;
|
|
261
|
+
/** Sum of records evicted by local-pass alone across all collections. */
|
|
262
|
+
totalLocalEvicted: number;
|
|
263
|
+
/** Sum of records evicted by server-assisted pass alone across all collections. */
|
|
264
|
+
totalServerEvicted: number;
|
|
265
|
+
/** Sum of `serverCandidateCount` across all collections — IDs we asked the server about. */
|
|
266
|
+
totalServerCandidates: number;
|
|
267
|
+
/** Number of `findNewerMany` round-trips issued during the server-assisted pass. */
|
|
268
|
+
serverRounds: number;
|
|
269
|
+
/** Whether the server-assisted pass aborted mid-flight (still applies local evictions). */
|
|
270
|
+
serverFailed: boolean;
|
|
246
271
|
/** Wall-clock duration of the eviction operation in ms. */
|
|
247
272
|
durationMs: number;
|
|
248
273
|
/** Whether this was triggered automatically or by manual call. */
|
|
@@ -525,7 +550,17 @@ export interface SyncedDbConfig {
|
|
|
525
550
|
* Default: 0 (disabled).
|
|
526
551
|
*/
|
|
527
552
|
evictStaleRecordsEveryHrs?: number;
|
|
528
|
-
/**
|
|
553
|
+
/**
|
|
554
|
+
* Callback fired before each eviction run begins, after the eligible
|
|
555
|
+
* collection list has been resolved. Useful as a debug breadcrumb to
|
|
556
|
+
* confirm a tick fired and which collections will be scanned.
|
|
557
|
+
* Errors are swallowed.
|
|
558
|
+
*/
|
|
559
|
+
onEvictionStart?: (info: EvictionStartInfo) => void;
|
|
560
|
+
/**
|
|
561
|
+
* Callback fired after each eviction run completes (manual or auto).
|
|
562
|
+
* Receives full per-collection and aggregate stats. Errors are swallowed.
|
|
563
|
+
*/
|
|
529
564
|
onEviction?: (info: EvictionInfo) => void;
|
|
530
565
|
}
|
|
531
566
|
/**
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "cry-synced-db-client",
|
|
3
|
-
"version": "0.1.
|
|
3
|
+
"version": "0.1.147",
|
|
4
4
|
"type": "module",
|
|
5
5
|
"main": "./dist/index.js",
|
|
6
6
|
"module": "./dist/index.js",
|
|
@@ -36,7 +36,7 @@
|
|
|
36
36
|
"vitest": "^4.1.2"
|
|
37
37
|
},
|
|
38
38
|
"dependencies": {
|
|
39
|
-
"cry-db": "^2.4.
|
|
39
|
+
"cry-db": "^2.4.32",
|
|
40
40
|
"cry-helpers": "^2.1.193",
|
|
41
41
|
"msgpackr": "^1.11.9",
|
|
42
42
|
"notepack": "^0.0.2",
|