cry-synced-db-client 0.1.146 → 0.1.148

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -2,6 +2,107 @@
2
2
 
3
3
  ## Unreleased
4
4
 
5
+ ### `uploadDirtyItems` follow-up pass — drain in-sync writes immediately
6
+
7
+ Writes that land **during** a sync iteration had their
8
+ `scheduleRestUpload()` guarded out by `isSyncing()` (silent drop, no
9
+ re-schedule), so they sat in `_dirty_changes` until the next 60s
10
+ auto-sync tick. Particularly visible for high-frequency writeOnly
11
+ collections (e.g. `prehodi`, written on every route change) on tablets
12
+ with intensive navigation: dirty items piled up until the next tick.
13
+
14
+ `SyncEngine.sync()` now performs a single follow-up pass right after the
15
+ primary `uploadDirtyItems()` call:
16
+
17
+ 1. `flushAllPendingChanges()` — forces any in-flight 500ms Dexie
18
+ debounces to land in `_dirty_changes` before the second snapshot.
19
+ 2. `uploadDirtyItems(calledFrom + ":followUp")` — drains entries that
20
+ accumulated during the first pass's server roundtrip.
21
+
22
+ Single pass (not a loop) — bounded work; later writes after sync
23
+ completes will trigger their own `scheduleRestUpload()` once the
24
+ `isSyncing` flag clears. Sequential `await` ordering means no concurrent
25
+ server roundtrips and no new race conditions vs. the existing
26
+ snapshot-then-clear pattern in `_dirty_changes`. Stats from the
27
+ follow-up pass are merged into `uploadStats` so `onSyncEnd` /
28
+ `collectionStats.sentCount` reflect both passes.
29
+
30
+ Errors in the follow-up are caught by the same outer `try/catch` as the
31
+ first pass — a follow-up failure does not roll back the first pass's
32
+ already-cleared dirty entries; affected items are caught at the next
33
+ sync tick (same retry semantics as before).
34
+
35
+ ### Auto-eviction co-located with sync — one round-trip total
36
+
37
+ When `evictStaleRecordsEveryHrs > 0` and the interval has elapsed, the
38
+ scope-exit eviction queries now ride along on sync's existing
39
+ `findNewerManyStream` call instead of issuing a separate `findNewerMany`
40
+ afterwards. Saves one server round-trip per "due" sync.
41
+
42
+ - `SyncedDb.sync()` pre-computes the eviction plan (Dexie scan + spec
43
+ build) before invoking `SyncEngine.sync()`, hands the spec list as
44
+ `extras`, and applies the deletes in `finally`. The old post-sync
45
+ `maybeAutoEvict()` call is gone.
46
+ - New `SyncExtras` type on `I_SyncEngine.sync(calledFrom?, extras?)`:
47
+ appends caller-supplied `findNewerMany` specs (each tagged with a
48
+ unique `specId`) to the same streamed call. Chunks for those specs
49
+ are routed via `extras.onChunk(specId, items)` and never enter
50
+ positive-sync conflict resolution.
51
+ - Sync-lifecycle callbacks (`onSyncStart`/`onSyncEnd`/`onServerSyncStart`/
52
+ `onServerSyncEnd`/`onSyncProgress`/`onFindNewerManyCall`/
53
+ `onFindNewerManyResult`) are deliberately scoped to positive sync —
54
+ they do **not** count or report scope-exit specs. Eviction lifecycle
55
+ stays observable via `onEvictionStart` / `onEviction`.
56
+ - Manual `evictOutOfScopeRecordsAll("manual")` keeps the standalone
57
+ `findNewerMany` round-trip behavior unchanged.
58
+ - Internal refactor: `evictOutOfScopeRecordsAll` split into
59
+ `_collectScopeExitPlan` (phase 1), `_applyScopeExitChunkToPlan`
60
+ (server result handling, shared with sync's onChunk dispatch), and
61
+ `_applyScopeExitPlan` (phase 3 — deletes + EvictionInfo + onEviction).
62
+
63
+ ### `GetNewerSpec.specId` — disambiguate duplicate-collection specs
64
+
65
+ New optional field on `GetNewerSpec` mirrors the cry-db ≥ 2.4.32 API
66
+ addition: when set, results for that spec are keyed by `specId` in
67
+ `findNewerMany` responses (and emitted as the third `onChunk` arg in
68
+ `findNewerManyStream`) instead of `collection`. Lets a single batched
69
+ request carry multiple specs against the same collection without
70
+ `Record<key, items[]>` collisions.
71
+
72
+ - Backwards compatible — specs without `specId` keep collection-keyed
73
+ behavior.
74
+ - `RestProxy.parseStreamingResponse` learned a new wire frame type
75
+ `0x02`: `[type:1=0x02][nameLen:2][name:N][specIdLen:2][specId:M][dataLen:4][payload]`,
76
+ emitted by rdb2 when any spec sets `specId`. Type `0x01` unchanged
77
+ for back-compat.
78
+ - All mocks (`MockRestInterfaceFake`, `MockRestInterfaceReal`,
79
+ `MockRestInterface`) honor `specId` in both the non-streaming and
80
+ streaming variants.
81
+
82
+ ### Eviction batched via `findNewerMany` + `onEvictionStart` lifecycle
83
+
84
+ - `evictOutOfScopeRecordsAll`'s server-assisted pass collapses N
85
+ per-collection round-trips into **one** `findNewerMany` call, with
86
+ per-(collection × CHUNK_SIZE-id-batch) specs disambiguated via
87
+ `specId = "<collection>#<chunkIndex>"`. `findServerSideScopeExits`
88
+ (single-collection helper) similarly bundles all chunks into one call.
89
+ - New `onEvictionStart(info: EvictionStartInfo)` callback fires before
90
+ phase 1 with `{ trigger, collectionCount, collections[] }` so consumers
91
+ see a debug breadcrumb of which collections will be scanned.
92
+ - Extended `EvictionCollectionInfo` with `localEvictedCount`,
93
+ `serverEvictedCount`, `serverCandidateCount` — split by source so
94
+ debug output distinguishes "cached state failed" from "server
95
+ reported out of scope".
96
+ - Extended `EvictionInfo` with `totalLocalEvicted`, `totalServerEvicted`,
97
+ `totalServerCandidates`, `serverRounds`, `serverFailed` — aggregate
98
+ counters for the batched pass.
99
+
100
+ **Type addition (non-breaking for producers, possibly breaking for
101
+ consumers constructing literals):** the new fields are required on
102
+ the public `EvictionCollectionInfo` / `EvictionInfo` types since the
103
+ library always populates them. Downstream code that constructs mock
104
+ literals (e.g. tests) needs the new fields.
105
+
5
106
  ### Fix: filtered-sync tombstone (scope-exit from other writers)
6
107
 
7
108
  When a collection has `syncConfig.query` (e.g. `{ status: { $ne: "obsolete" } }`)
package/dist/index.js CHANGED
@@ -2435,8 +2435,13 @@ var _SyncEngine = class _SyncEngine {
2435
2435
  /**
2436
2436
  * Execute full sync cycle.
2437
2437
  * Called by SyncedDb which handles locking.
2438
+ *
2439
+ * `extras` lets the caller append additional `findNewerMany` specs
2440
+ * (each with a unique `specId`) onto the same streamed call. Chunks
2441
+ * for those specs are routed via `extras.onChunk(specId, items)` and
2442
+ * never enter the positive-sync processing path.
2438
2443
  */
2439
- async sync(calledFrom) {
2444
+ async sync(calledFrom, extras) {
2440
2445
  var _a, _b;
2441
2446
  const startTime = Date.now();
2442
2447
  let receivedCount = 0;
@@ -2481,10 +2486,15 @@ var _SyncEngine = class _SyncEngine {
2481
2486
  }
2482
2487
  try {
2483
2488
  const completedCollections = /* @__PURE__ */ new Set();
2489
+ const allSpecs = extras && extras.specs.length > 0 ? [...syncSpecs, ...extras.specs] : syncSpecs;
2484
2490
  await this.deps.withSyncTimeout(
2485
2491
  this.restInterface.findNewerManyStream(
2486
- syncSpecs,
2487
- async (collection, items) => {
2492
+ allSpecs,
2493
+ async (collection, items, specId) => {
2494
+ if (specId !== void 0) {
2495
+ if (extras) extras.onChunk(specId, items);
2496
+ return;
2497
+ }
2488
2498
  const config = configMap.get(collection);
2489
2499
  if (!config) return;
2490
2500
  const state = collectionState.get(collection);
@@ -2543,6 +2553,17 @@ var _SyncEngine = class _SyncEngine {
2543
2553
  let uploadStats = { sentCount: 0 };
2544
2554
  try {
2545
2555
  uploadStats = await this.uploadDirtyItems(calledFrom);
2556
+ await this.deps.flushAllPendingChanges();
2557
+ const followUp = await this.uploadDirtyItems(`${calledFrom != null ? calledFrom : "sync"}:followUp`);
2558
+ if (followUp.sentCount > 0) {
2559
+ uploadStats.sentCount += followUp.sentCount;
2560
+ if (followUp.collectionSentCounts) {
2561
+ uploadStats.collectionSentCounts = uploadStats.collectionSentCounts || {};
2562
+ for (const [c, n] of Object.entries(followUp.collectionSentCounts)) {
2563
+ uploadStats.collectionSentCounts[c] = (uploadStats.collectionSentCounts[c] || 0) + n;
2564
+ }
2565
+ }
2566
+ }
2546
2567
  } catch (err) {
2547
2568
  console.error(
2548
2569
  "uploadDirtyItems failed (download succeeded, staying online):",
@@ -3521,6 +3542,7 @@ var _SyncedDb = class _SyncedDb {
3521
3542
  this.onWsNotification = config.onWsNotification;
3522
3543
  this.onCrossTabSync = config.onCrossTabSync;
3523
3544
  this.onWakeSync = config.onWakeSync;
3545
+ this.onEvictionStart = config.onEvictionStart;
3524
3546
  this.onEviction = config.onEviction;
3525
3547
  this.evictStaleRecordsEveryHrs = (_e = config.evictStaleRecordsEveryHrs) != null ? _e : 0;
3526
3548
  for (const col of config.collections) {
@@ -4477,8 +4499,26 @@ var _SyncedDb = class _SyncedDb {
4477
4499
  }
4478
4500
  this.syncing = true;
4479
4501
  this.crossTabSync.startServerSync();
4502
+ let evictionPlan;
4503
+ let evictionServerFailed = false;
4504
+ if (await this._isAutoEvictionDue()) {
4505
+ try {
4506
+ evictionPlan = await this._collectScopeExitPlan("auto");
4507
+ } catch (err) {
4508
+ console.error(
4509
+ "[evict] phase 1 failed (skipping bundled eviction):",
4510
+ err
4511
+ );
4512
+ }
4513
+ }
4514
+ const evictionExtras = evictionPlan && evictionPlan.specs.length > 0 ? {
4515
+ specs: evictionPlan.specs,
4516
+ onChunk: (specId, items) => {
4517
+ this._applyScopeExitChunkToPlan(evictionPlan, specId, items);
4518
+ }
4519
+ } : void 0;
4480
4520
  try {
4481
- await this.syncEngine.sync(calledFrom);
4521
+ await this.syncEngine.sync(calledFrom, evictionExtras);
4482
4522
  if (!this.syncOnlyCollections) {
4483
4523
  const now = /* @__PURE__ */ new Date();
4484
4524
  if (!this._lastFullSyncDate) {
@@ -4490,14 +4530,24 @@ var _SyncedDb = class _SyncedDb {
4490
4530
  console.error("Failed to persist lastFullSync:", err);
4491
4531
  });
4492
4532
  }
4533
+ } catch (err) {
4534
+ if (evictionExtras) evictionServerFailed = true;
4535
+ throw err;
4493
4536
  } finally {
4494
4537
  this.syncing = false;
4495
4538
  this.crossTabSync.endServerSync();
4496
4539
  await this.processQueuedWsUpdates();
4497
- try {
4498
- await this.maybeAutoEvict();
4499
- } catch (err) {
4500
- console.error("Auto-eviction failed:", err);
4540
+ if (evictionPlan) {
4541
+ try {
4542
+ await this._applyScopeExitPlan(
4543
+ evictionPlan,
4544
+ evictionServerFailed,
4545
+ evictionExtras ? 1 : 0
4546
+ );
4547
+ await this._persistEvictionTimestamp();
4548
+ } catch (err) {
4549
+ console.error("[evict] phase 3 failed:", err);
4550
+ }
4501
4551
  }
4502
4552
  }
4503
4553
  } finally {
@@ -4726,7 +4776,15 @@ var _SyncedDb = class _SyncedDb {
4726
4776
  const syncQuery = (_a = config.syncConfig) == null ? void 0 : _a.query;
4727
4777
  const query = typeof syncQuery === "function" ? syncQuery() : syncQuery;
4728
4778
  if (!query) {
4729
- return { collection, evictedCount: 0, dirtySkipped: 0, scannedCount: 0 };
4779
+ return {
4780
+ collection,
4781
+ evictedCount: 0,
4782
+ localEvictedCount: 0,
4783
+ serverEvictedCount: 0,
4784
+ dirtySkipped: 0,
4785
+ scannedCount: 0,
4786
+ serverCandidateCount: 0
4787
+ };
4730
4788
  }
4731
4789
  await this.pendingChanges.flushForCollection(collection);
4732
4790
  const dirtyItems = await this.dexieDb.getDirty(collection);
@@ -4755,6 +4813,8 @@ var _SyncedDb = class _SyncedDb {
4755
4813
  }
4756
4814
  }
4757
4815
  );
4816
+ const localEvictedCount = evictIds.length;
4817
+ let serverEvictedCount = 0;
4758
4818
  if (serverAssisted && serverCandidateIds.length > 0) {
4759
4819
  let scopeExitTimestamp;
4760
4820
  const lookbehindMs = opts == null ? void 0 : opts.outOfWindowLookbehindMs;
@@ -4774,6 +4834,7 @@ var _SyncedDb = class _SyncedDb {
4774
4834
  scopeExitTimestamp
4775
4835
  );
4776
4836
  for (const id of serverExits) evictIds.push(id);
4837
+ serverEvictedCount = serverExits.length;
4777
4838
  } catch (err) {
4778
4839
  console.error(
4779
4840
  `[evict] server-assisted pass failed for ${collection} (proceeding with local-only):`,
@@ -4792,7 +4853,15 @@ var _SyncedDb = class _SyncedDb {
4792
4853
  }
4793
4854
  this.crossTabSync.broadcastReload([collection]);
4794
4855
  }
4795
- return { collection, evictedCount: evictIds.length, dirtySkipped, scannedCount };
4856
+ return {
4857
+ collection,
4858
+ evictedCount: evictIds.length,
4859
+ localEvictedCount,
4860
+ serverEvictedCount,
4861
+ dirtySkipped,
4862
+ scannedCount,
4863
+ serverCandidateCount: serverCandidateIds.length
4864
+ };
4796
4865
  }
4797
4866
  /**
4798
4867
  * Ask the server which of the given IDs no longer match the positive
@@ -4801,9 +4870,10 @@ var _SyncedDb = class _SyncedDb {
4801
4870
  * out of scope by other writers that the filtered delta feed would
4802
4871
  * never report.
4803
4872
  *
4804
- * Chunks `candidateIds` into `$in`-sized batches to keep the request
4805
- * payload bounded. Uses `project: { _id: 1 }` to keep the response
4806
- * payload minimal.
4873
+ * Bundles all id chunks into a single `findNewerMany` call, keyed by
4874
+ * `specId = "<chunkIndex>"` so multiple specs against the same
4875
+ * collection don't collide on the response. One round-trip regardless
4876
+ * of `candidateIds.length`.
4807
4877
  *
4808
4878
  * @param timestamp - `findNewer` timestamp cursor. Only records with
4809
4879
  * `_ts > timestamp` are examined. Caller decides the window (usually
@@ -4813,24 +4883,31 @@ var _SyncedDb = class _SyncedDb {
4813
4883
  * `NOT query`). These are candidates for local-only eviction.
4814
4884
  */
4815
4885
  async findServerSideScopeExits(collection, positiveQuery, candidateIds, timestamp) {
4886
+ var _a;
4887
+ if (candidateIds.length === 0) return [];
4816
4888
  const CHUNK_SIZE = 500;
4817
- const scopeExits = [];
4818
4889
  const negated = { $nor: [positiveQuery] };
4890
+ const specs = [];
4819
4891
  for (let i = 0; i < candidateIds.length; i += CHUNK_SIZE) {
4820
4892
  const chunk = candidateIds.slice(i, i + CHUNK_SIZE);
4821
- const scopedNegation = {
4822
- $and: [negated, { _id: { $in: chunk } }]
4823
- };
4824
- const results = await this.connectionManager.withRestTimeout(
4825
- this.restInterface.findNewer(
4826
- collection,
4827
- timestamp,
4828
- scopedNegation,
4829
- { project: { _id: 1 } }
4830
- ),
4831
- "evictOutOfScopeRecords.serverAssisted"
4832
- );
4833
- for (const item of results) {
4893
+ specs.push({
4894
+ collection,
4895
+ timestamp,
4896
+ query: {
4897
+ $and: [negated, { _id: { $in: chunk } }]
4898
+ },
4899
+ opts: { project: { _id: 1 } },
4900
+ specId: `${collection}#${i / CHUNK_SIZE}`
4901
+ });
4902
+ }
4903
+ const results = await this.connectionManager.withRestTimeout(
4904
+ this.restInterface.findNewerMany(specs),
4905
+ "evictOutOfScopeRecords.serverAssisted"
4906
+ );
4907
+ const scopeExits = [];
4908
+ for (const spec of specs) {
4909
+ const items = (_a = results[spec.specId]) != null ? _a : [];
4910
+ for (const item of items) {
4834
4911
  const id = item._id;
4835
4912
  if (id !== void 0) scopeExits.push(String(id));
4836
4913
  }
@@ -4840,37 +4917,231 @@ var _SyncedDb = class _SyncedDb {
4840
4917
  /**
4841
4918
  * Evict out-of-scope records for all collections.
4842
4919
  * Skips writeOnly and collections without syncConfig.query.
4843
- * Fires onEviction callback.
4920
+ * Fires onEvictionStart before phase 1 and onEviction at the end.
4921
+ *
4922
+ * Three phases:
4923
+ * 1. Local pass per eligible collection — Dexie scan, partition into
4924
+ * locally-failing (immediate evict candidates) vs locally-matching
4925
+ * (server-check candidates). Dirty records are skipped.
4926
+ * 2. Server-assisted pass — a single `findNewerMany` call carrying
4927
+ * one spec per (collection × CHUNK_SIZE-id-batch), disambiguated
4928
+ * via `specId`. Collapses the prior N-per-collection round-trips
4929
+ * into one server round-trip total.
4930
+ * 3. Apply evictions per collection (Dexie + in-mem deletes,
4931
+ * cross-tab reload broadcast).
4932
+ *
4933
+ * Server-assisted is automatic when the collection is online and not
4934
+ * writeOnly — same default as `evictOutOfScopeRecords`. On batch
4935
+ * failure, local evictions are still applied; remaining server rounds
4936
+ * are skipped (logged, not thrown), and `serverFailed=true` is reported
4937
+ * on the EvictionInfo.
4844
4938
  */
4845
4939
  async evictOutOfScopeRecordsAll(trigger = "manual") {
4846
- var _a;
4940
+ const plan = await this._collectScopeExitPlan(trigger);
4941
+ let serverFailed = false;
4942
+ let serverRounds = 0;
4943
+ if (plan.specs.length > 0) {
4944
+ try {
4945
+ const results = await this.connectionManager.withRestTimeout(
4946
+ this.restInterface.findNewerMany(plan.specs),
4947
+ "evictOutOfScopeRecordsAll.serverAssisted"
4948
+ );
4949
+ serverRounds = 1;
4950
+ for (const [specId, items] of Object.entries(results)) {
4951
+ this._applyScopeExitChunkToPlan(plan, specId, items);
4952
+ }
4953
+ } catch (err) {
4954
+ serverFailed = true;
4955
+ console.error(
4956
+ "[evict] server-assisted batch failed (proceeding with local-only):",
4957
+ err
4958
+ );
4959
+ }
4960
+ }
4961
+ return this._applyScopeExitPlan(plan, serverFailed, serverRounds);
4962
+ }
4963
+ /**
4964
+ * Phase 1 of eviction: pre-filter eligible collections, fire
4965
+ * `onEvictionStart`, scan Dexie per collection to partition records
4966
+ * into local-fail (immediate evict) vs server-check (candidates), and
4967
+ * build the `findNewerMany` spec list (each spec tagged with a unique
4968
+ * `specId`).
4969
+ *
4970
+ * Returns a plan object that can be:
4971
+ * - Sent to the server as a standalone `findNewerMany` call (manual eviction),
4972
+ * - Bundled into sync's `findNewerManyStream` via `SyncExtras` (auto eviction),
4973
+ * - Or applied with empty server results when offline/writeOnly.
4974
+ *
4975
+ * Always fires `onEvictionStart` (even with collectionCount=0) so the
4976
+ * callback is a reliable lifecycle marker.
4977
+ */
4978
+ async _collectScopeExitPlan(trigger) {
4979
+ var _a, _b, _c;
4847
4980
  const startTime = Date.now();
4848
- const collectionResults = [];
4849
- let totalEvicted = 0;
4981
+ const eligible = [];
4850
4982
  for (const [name, config] of this.collections) {
4851
4983
  if (config.writeOnly) continue;
4852
4984
  const syncQuery = (_a = config.syncConfig) == null ? void 0 : _a.query;
4853
4985
  const query = typeof syncQuery === "function" ? syncQuery() : syncQuery;
4854
4986
  if (!query) continue;
4855
- const result = await this.evictOutOfScopeRecords(name);
4856
- collectionResults.push(result);
4857
- totalEvicted += result.evictedCount;
4987
+ eligible.push({ name, config, query });
4988
+ }
4989
+ this.safeCallback(this.onEvictionStart, {
4990
+ trigger,
4991
+ collectionCount: eligible.length,
4992
+ collections: eligible.map((e) => e.name)
4993
+ });
4994
+ const pending = [];
4995
+ let totalServerCandidates = 0;
4996
+ for (const { name, config, query } of eligible) {
4997
+ await this.pendingChanges.flushForCollection(name);
4998
+ const dirtyItems = await this.dexieDb.getDirty(name);
4999
+ const dirtyIds = new Set(dirtyItems.map((d) => String(d._id)));
5000
+ const serverAssisted = !config.writeOnly && this.connectionManager.isOnline();
5001
+ let scannedCount = 0;
5002
+ let dirtySkipped = 0;
5003
+ const evictIds = [];
5004
+ const serverCandidateIds = [];
5005
+ await this.dexieDb.forEachBatch(
5006
+ name,
5007
+ 2e3,
5008
+ async (items) => {
5009
+ for (const item of items) {
5010
+ scannedCount++;
5011
+ const id = String(item._id);
5012
+ if (dirtyIds.has(id)) {
5013
+ dirtySkipped++;
5014
+ continue;
5015
+ }
5016
+ if (!matchesQuery(item, query)) {
5017
+ evictIds.push(id);
5018
+ } else if (serverAssisted) {
5019
+ serverCandidateIds.push(id);
5020
+ }
5021
+ }
5022
+ }
5023
+ );
5024
+ totalServerCandidates += serverCandidateIds.length;
5025
+ pending.push({
5026
+ collection: name,
5027
+ config,
5028
+ query,
5029
+ timestamp: (_c = (_b = this.syncMetaCache.get(name)) == null ? void 0 : _b.lastSyncTs) != null ? _c : 0,
5030
+ evictIds,
5031
+ localEvictedCount: evictIds.length,
5032
+ serverEvictedCount: 0,
5033
+ serverCandidateIds,
5034
+ dirtySkipped,
5035
+ scannedCount,
5036
+ serverAssisted
5037
+ });
5038
+ }
5039
+ const CHUNK_SIZE = 500;
5040
+ const specs = [];
5041
+ const ownersBySpecId = /* @__PURE__ */ new Map();
5042
+ for (const p of pending) {
5043
+ if (!p.serverAssisted) continue;
5044
+ for (let i = 0; i < p.serverCandidateIds.length; i += CHUNK_SIZE) {
5045
+ const chunk = p.serverCandidateIds.slice(i, i + CHUNK_SIZE);
5046
+ const specId = `${p.collection}#${i / CHUNK_SIZE}`;
5047
+ specs.push({
5048
+ collection: p.collection,
5049
+ timestamp: p.timestamp,
5050
+ query: {
5051
+ $and: [{ $nor: [p.query] }, { _id: { $in: chunk } }]
5052
+ },
5053
+ opts: { project: { _id: 1 } },
5054
+ specId
5055
+ });
5056
+ ownersBySpecId.set(specId, p);
5057
+ }
5058
+ }
5059
+ return {
5060
+ startTime,
5061
+ trigger,
5062
+ pending,
5063
+ specs,
5064
+ ownersBySpecId,
5065
+ totalServerCandidates
5066
+ };
5067
+ }
5068
+ /**
5069
+ * Append server-confirmed scope-exit IDs to a plan's pending entries.
5070
+ * Called either with the response of a standalone `findNewerMany`
5071
+ * (manual eviction) or per-chunk during sync's `findNewerManyStream`
5072
+ * (auto eviction bundled with sync).
5073
+ */
5074
+ _applyScopeExitChunkToPlan(plan, specId, items) {
5075
+ const owner = plan.ownersBySpecId.get(specId);
5076
+ if (!owner) return;
5077
+ for (const item of items) {
5078
+ const id = item._id;
5079
+ if (id !== void 0) {
5080
+ owner.evictIds.push(String(id));
5081
+ owner.serverEvictedCount++;
5082
+ }
5083
+ }
5084
+ }
5085
+ /**
5086
+ * Phase 3 of eviction: apply accumulated evict IDs (Dexie + in-mem
5087
+ * deletes), broadcast cross-tab reload, build EvictionInfo, fire
5088
+ * `onEviction`. Idempotent on empty plans — produces a zero-eviction
5089
+ * info and still fires the callback so consumers see a lifecycle event.
5090
+ */
5091
+ async _applyScopeExitPlan(plan, serverFailed, serverRounds) {
5092
+ const collectionResults = [];
5093
+ let totalEvicted = 0;
5094
+ let totalLocalEvicted = 0;
5095
+ let totalServerEvicted = 0;
5096
+ for (const p of plan.pending) {
5097
+ const uniqueEvictIds = p.evictIds.length > 0 ? Array.from(new Set(p.evictIds)) : p.evictIds;
5098
+ if (uniqueEvictIds.length > 0) {
5099
+ await this.dexieDb.deleteMany(p.collection, uniqueEvictIds);
5100
+ if (!p.config.writeOnly) {
5101
+ this.inMemManager.writeBatch(
5102
+ p.collection,
5103
+ uniqueEvictIds.map((id) => ({ _id: id })),
5104
+ "delete"
5105
+ );
5106
+ }
5107
+ this.crossTabSync.broadcastReload([p.collection]);
5108
+ }
5109
+ collectionResults.push({
5110
+ collection: p.collection,
5111
+ evictedCount: uniqueEvictIds.length,
5112
+ localEvictedCount: p.localEvictedCount,
5113
+ serverEvictedCount: p.serverEvictedCount,
5114
+ dirtySkipped: p.dirtySkipped,
5115
+ scannedCount: p.scannedCount,
5116
+ serverCandidateCount: p.serverCandidateIds.length
5117
+ });
5118
+ totalEvicted += uniqueEvictIds.length;
5119
+ totalLocalEvicted += p.localEvictedCount;
5120
+ totalServerEvicted += p.serverEvictedCount;
4858
5121
  }
4859
5122
  const info = {
4860
5123
  totalEvicted,
4861
- durationMs: Date.now() - startTime,
4862
- trigger,
5124
+ totalLocalEvicted,
5125
+ totalServerEvicted,
5126
+ totalServerCandidates: plan.totalServerCandidates,
5127
+ serverRounds,
5128
+ serverFailed,
5129
+ durationMs: Date.now() - plan.startTime,
5130
+ trigger: plan.trigger,
4863
5131
  collections: collectionResults
4864
5132
  };
4865
5133
  this.safeCallback(this.onEviction, info);
4866
5134
  return info;
4867
5135
  }
4868
5136
  /**
4869
- * Check if auto-eviction should run and execute if interval has elapsed.
4870
- * Called after sync() and at end of init().
5137
+ * Whether auto-eviction is due to run on the next sync. Mirrors the
5138
+ * gating logic of the old `maybeAutoEvict` (interval check + persisted
5139
+ * `__lastEviction` cursor) but split out so `sync()` can pre-compute
5140
+ * the eviction plan BEFORE issuing the streamed `findNewerMany` —
5141
+ * letting scope-exit specs ride along on the same call.
4871
5142
  */
4872
- async maybeAutoEvict() {
4873
- if (this.evictStaleRecordsEveryHrs <= 0) return;
5143
+ async _isAutoEvictionDue() {
5144
+ if (this.evictStaleRecordsEveryHrs <= 0) return false;
4874
5145
  const intervalMs = this.evictStaleRecordsEveryHrs * 36e5;
4875
5146
  if (!this._lastEvictionDate) {
4876
5147
  const meta = await this.dexieDb.getSyncMeta("__lastEviction");
@@ -4879,9 +5150,15 @@ var _SyncedDb = class _SyncedDb {
4879
5150
  }
4880
5151
  }
4881
5152
  if (this._lastEvictionDate && Date.now() - this._lastEvictionDate.getTime() < intervalMs) {
4882
- return;
5153
+ return false;
4883
5154
  }
4884
- await this.evictOutOfScopeRecordsAll("auto");
5155
+ return true;
5156
+ }
5157
+ /**
5158
+ * Persist the current time as the last successful eviction.
5159
+ * Called from sync() after phase 3 completes.
5160
+ */
5161
+ async _persistEvictionTimestamp() {
4885
5162
  this._lastEvictionDate = /* @__PURE__ */ new Date();
4886
5163
  await this.dexieDb.setSyncMeta(
4887
5164
  "__lastEviction",
@@ -7904,10 +8181,14 @@ var RestProxy = class {
7904
8181
  }
7905
8182
  /**
7906
8183
  * Parse streaming response. Auto-detects format:
7907
- * - Streaming: first byte is 0x00 (end) or 0x01 (data chunk)
8184
+ * - Streaming: first byte is 0x00 (end), 0x01 (data chunk), or 0x02 (data chunk with specId)
7908
8185
  * - Legacy msgpack: first byte is msgpack type marker (0x80+ for map, etc.)
7909
8186
  *
7910
- * Streaming chunk format: [type:1][nameLen:2][name:N][dataLen:4][msgpack(items[]):M]
8187
+ * Frame variants (rdb2 ≥ specId support):
8188
+ * 0x01: [type:1][nameLen:2][name:N][dataLen:4][msgpack(items[]):M]
8189
+ * 0x02: [type:1][nameLen:2][name:N][specIdLen:2][specId:M][dataLen:4][msgpack(items[]):K]
8190
+ *
8191
+ * `onChunk` receives `specId` as the third arg for type-0x02 frames; `undefined` otherwise.
7911
8192
  */
7912
8193
  async parseStreamingResponse(response, onChunk, onActivity) {
7913
8194
  const reader = response.body.getReader();
@@ -7924,7 +8205,7 @@ var RestProxy = class {
7924
8205
  if (!await readMore()) return;
7925
8206
  }
7926
8207
  const firstByte = buffer.at(0);
7927
- if (firstByte !== 0 && firstByte !== 1) {
8208
+ if (firstByte !== 0 && firstByte !== 1 && firstByte !== 2) {
7928
8209
  while (await readMore()) {
7929
8210
  }
7930
8211
  const result = unpack2(buffer.subarray(0, buffer.length));
@@ -7939,28 +8220,61 @@ var RestProxy = class {
7939
8220
  while (buffer.length < 1) {
7940
8221
  if (!await readMore()) return;
7941
8222
  }
7942
- if (buffer.at(0) === 0) return;
8223
+ const type = buffer.at(0);
8224
+ if (type === 0) return;
7943
8225
  while (buffer.length < 3) {
7944
8226
  if (!await readMore())
7945
8227
  throw new Error("Unexpected end of stream in chunk header");
7946
8228
  }
7947
8229
  const nameLen = buffer.at(1) << 8 | buffer.at(2);
7948
- const headerSize = 1 + 2 + nameLen + 4;
7949
- while (buffer.length < headerSize) {
7950
- if (!await readMore())
7951
- throw new Error("Unexpected end of stream in chunk header");
7952
- }
7953
- const collection = decoder2.decode(buffer.subarray(3, 3 + nameLen));
7954
- const dataOffset = 3 + nameLen;
7955
- const dataLen = buffer.at(dataOffset) << 24 | buffer.at(dataOffset + 1) << 16 | buffer.at(dataOffset + 2) << 8 | buffer.at(dataOffset + 3);
7956
- const totalChunkSize = headerSize + dataLen;
7957
- while (buffer.length < totalChunkSize) {
7958
- if (!await readMore())
7959
- throw new Error("Unexpected end of stream in chunk data");
8230
+ if (type === 1) {
8231
+ const headerSize = 1 + 2 + nameLen + 4;
8232
+ while (buffer.length < headerSize) {
8233
+ if (!await readMore())
8234
+ throw new Error("Unexpected end of stream in chunk header");
8235
+ }
8236
+ const collection = decoder2.decode(buffer.subarray(3, 3 + nameLen));
8237
+ const dataOffset = 3 + nameLen;
8238
+ const dataLen = buffer.at(dataOffset) << 24 | buffer.at(dataOffset + 1) << 16 | buffer.at(dataOffset + 2) << 8 | buffer.at(dataOffset + 3);
8239
+ const totalChunkSize = headerSize + dataLen;
8240
+ while (buffer.length < totalChunkSize) {
8241
+ if (!await readMore())
8242
+ throw new Error("Unexpected end of stream in chunk data");
8243
+ }
8244
+ const items = unpack2(buffer.subarray(headerSize, totalChunkSize));
8245
+ buffer.consume(totalChunkSize);
8246
+ await onChunk(collection, items);
8247
+ } else if (type === 2) {
8248
+ const minSize = 1 + 2 + nameLen + 2;
8249
+ while (buffer.length < minSize) {
8250
+ if (!await readMore())
8251
+ throw new Error("Unexpected end of stream in specId chunk header");
8252
+ }
8253
+ const collection = decoder2.decode(buffer.subarray(3, 3 + nameLen));
8254
+ const specIdLenOffset = 3 + nameLen;
8255
+ const specIdLen = buffer.at(specIdLenOffset) << 8 | buffer.at(specIdLenOffset + 1);
8256
+ const headerSize = minSize + specIdLen + 4;
8257
+ while (buffer.length < headerSize) {
8258
+ if (!await readMore())
8259
+ throw new Error("Unexpected end of stream in specId chunk header");
8260
+ }
8261
+ const specIdOffset = specIdLenOffset + 2;
8262
+ const specId = decoder2.decode(
8263
+ buffer.subarray(specIdOffset, specIdOffset + specIdLen)
8264
+ );
8265
+ const dataOffset = specIdOffset + specIdLen;
8266
+ const dataLen = buffer.at(dataOffset) << 24 | buffer.at(dataOffset + 1) << 16 | buffer.at(dataOffset + 2) << 8 | buffer.at(dataOffset + 3);
8267
+ const totalChunkSize = headerSize + dataLen;
8268
+ while (buffer.length < totalChunkSize) {
8269
+ if (!await readMore())
8270
+ throw new Error("Unexpected end of stream in specId chunk data");
8271
+ }
8272
+ const items = unpack2(buffer.subarray(headerSize, totalChunkSize));
8273
+ buffer.consume(totalChunkSize);
8274
+ await onChunk(collection, items, specId);
8275
+ } else {
8276
+ throw new Error(`Unknown stream chunk type: 0x${type.toString(16)}`);
7960
8277
  }
7961
- const items = unpack2(buffer.subarray(headerSize, totalChunkSize));
7962
- buffer.consume(totalChunkSize);
7963
- await onChunk(collection, items);
7964
8278
  }
7965
8279
  }
7966
8280
  // Sync - pošiljanje lokalnih sprememb na server
@@ -116,17 +116,21 @@ export declare class RestProxy implements I_RestInterface {
116
116
  * [type:1][nameLen:2][name:N][dataLen:4][msgpack(items[]):M]
117
117
  * type=0x01 for data, type=0x00 for end-of-stream.
118
118
  */
119
- findNewerManyStream<T>(spec: GetNewerSpec<T>[], onChunk: (collection: string, items: T[]) => Promise<void>, options?: {
119
+ findNewerManyStream<T>(spec: GetNewerSpec<T>[], onChunk: (collection: string, items: T[], specId?: string) => Promise<void>, options?: {
120
120
  timeoutMs?: number;
121
121
  signal?: AbortSignal;
122
122
  activityTimeoutMs?: number;
123
123
  }): Promise<void>;
124
124
  /**
125
125
  * Parse streaming response. Auto-detects format:
126
- * - Streaming: first byte is 0x00 (end) or 0x01 (data chunk)
126
+ * - Streaming: first byte is 0x00 (end), 0x01 (data chunk), or 0x02 (data chunk with specId)
127
127
  * - Legacy msgpack: first byte is msgpack type marker (0x80+ for map, etc.)
128
128
  *
129
- * Streaming chunk format: [type:1][nameLen:2][name:N][dataLen:4][msgpack(items[]):M]
129
+ * Frame variants (rdb2 ≥ specId support):
130
+ * 0x01: [type:1][nameLen:2][name:N][dataLen:4][msgpack(items[]):M]
131
+ * 0x02: [type:1][nameLen:2][name:N][specIdLen:2][specId:M][dataLen:4][msgpack(items[]):K]
132
+ *
133
+ * `onChunk` receives `specId` as the third arg for type-0x02 frames; `undefined` otherwise.
130
134
  */
131
135
  private parseStreamingResponse;
132
136
  deleteOne<T>(collection: string, query: QuerySpec<T>): Promise<T>;
@@ -50,6 +50,7 @@ export declare class SyncedDb implements I_SyncedDb {
50
50
  private readonly onWsNotification?;
51
51
  private readonly onCrossTabSync?;
52
52
  private readonly onWakeSync?;
53
+ private readonly onEvictionStart?;
53
54
  private readonly onEviction?;
54
55
  private readonly evictStaleRecordsEveryHrs;
55
56
  private _lastEvictionDate?;
@@ -228,9 +229,10 @@ export declare class SyncedDb implements I_SyncedDb {
228
229
  * out of scope by other writers that the filtered delta feed would
229
230
  * never report.
230
231
  *
231
- * Chunks `candidateIds` into `$in`-sized batches to keep the request
232
- * payload bounded. Uses `project: { _id: 1 }` to keep the response
233
- * payload minimal.
232
+ * Bundles all id chunks into a single `findNewerMany` call, keyed by
233
+ * `specId = "<chunkIndex>"` so multiple specs against the same
234
+ * collection don't collide on the response. One round-trip regardless
235
+ * of `candidateIds.length`.
234
236
  *
235
237
  * @param timestamp - `findNewer` timestamp cursor. Only records with
236
238
  * `_ts > timestamp` are examined. Caller decides the window (usually
@@ -243,14 +245,69 @@ export declare class SyncedDb implements I_SyncedDb {
243
245
  /**
244
246
  * Evict out-of-scope records for all collections.
245
247
  * Skips writeOnly and collections without syncConfig.query.
246
- * Fires onEviction callback.
248
+ * Fires onEvictionStart before phase 1 and onEviction at the end.
249
+ *
250
+ * Three phases:
251
+ * 1. Local pass per eligible collection — Dexie scan, partition into
252
+ * locally-failing (immediate evict candidates) vs locally-matching
253
+ * (server-check candidates). Dirty records are skipped.
254
+ * 2. Server-assisted pass — a single `findNewerMany` call carrying
255
+ * one spec per (collection × CHUNK_SIZE-id-batch), disambiguated
256
+ * via `specId`. Collapses the prior N-per-collection round-trips
257
+ * into one server round-trip total.
258
+ * 3. Apply evictions per collection (Dexie + in-mem deletes,
259
+ * cross-tab reload broadcast).
260
+ *
261
+ * Server-assisted is automatic when the collection is online and not
262
+ * writeOnly — same default as `evictOutOfScopeRecords`. On batch
263
+ * failure, local evictions are still applied; remaining server rounds
264
+ * are skipped (logged, not thrown), and `serverFailed=true` is reported
265
+ * on the EvictionInfo.
247
266
  */
248
267
  evictOutOfScopeRecordsAll(trigger?: "auto" | "manual"): Promise<EvictionInfo>;
249
268
  /**
250
- * Check if auto-eviction should run and execute if interval has elapsed.
251
- * Called after sync() and at end of init().
269
+ * Phase 1 of eviction: pre-filter eligible collections, fire
270
+ * `onEvictionStart`, scan Dexie per collection to partition records
271
+ * into local-fail (immediate evict) vs server-check (candidates), and
272
+ * build the `findNewerMany` spec list (each spec tagged with a unique
273
+ * `specId`).
274
+ *
275
+ * Returns a plan object that can be:
276
+ * - Sent to the server as a standalone `findNewerMany` call (manual eviction),
277
+ * - Bundled into sync's `findNewerManyStream` via `SyncExtras` (auto eviction),
278
+ * - Or applied with empty server results when offline/writeOnly.
279
+ *
280
+ * Always fires `onEvictionStart` (even with collectionCount=0) so the
281
+ * callback is a reliable lifecycle marker.
282
+ */
283
+ private _collectScopeExitPlan;
284
+ /**
285
+ * Append server-confirmed scope-exit IDs to a plan's pending entries.
286
+ * Called either with the response of a standalone `findNewerMany`
287
+ * (manual eviction) or per-chunk during sync's `findNewerManyStream`
288
+ * (auto eviction bundled with sync).
289
+ */
290
+ private _applyScopeExitChunkToPlan;
291
+ /**
292
+ * Phase 3 of eviction: apply accumulated evict IDs (Dexie + in-mem
293
+ * deletes), broadcast cross-tab reload, build EvictionInfo, fire
294
+ * `onEviction`. Idempotent on empty plans — produces a zero-eviction
295
+ * info and still fires the callback so consumers see a lifecycle event.
296
+ */
297
+ private _applyScopeExitPlan;
298
+ /**
299
+ * Whether auto-eviction is due to run on the next sync. Mirrors the
300
+ * gating logic of the old `maybeAutoEvict` (interval check + persisted
301
+ * `__lastEviction` cursor) but split out so `sync()` can pre-compute
302
+ * the eviction plan BEFORE issuing the streamed `findNewerMany` —
303
+ * letting scope-exit specs ride along on the same call.
304
+ */
305
+ private _isAutoEvictionDue;
306
+ /**
307
+ * Persist the current time as the last successful eviction.
308
+ * Called from sync() after phase 3 completes.
252
309
  */
253
- private maybeAutoEvict;
310
+ private _persistEvictionTimestamp;
254
311
  getObjectMetadata<M>(collection: string, _id: Id): M | undefined;
255
312
  getObjectsMetadata<M>(collection: string, _ids: Id[]): (M | undefined)[];
256
313
  setObjectMetadata<M>(collection: string, _id: Id, metadata: M): void;
@@ -7,7 +7,7 @@
7
7
  * - Uploading dirty items to server
8
8
  */
9
9
  import type { LocalDbEntity } from "../../types/DbEntity";
10
- import type { I_SyncEngine, SyncEngineConfig } from "../types/managers";
10
+ import type { I_SyncEngine, SyncEngineConfig, SyncExtras } from "../types/managers";
11
11
  import type { UploadResult } from "../types/internal";
12
12
  export declare class SyncEngine implements I_SyncEngine {
13
13
  private readonly tenant;
@@ -21,8 +21,13 @@ export declare class SyncEngine implements I_SyncEngine {
21
21
  /**
22
22
  * Execute full sync cycle.
23
23
  * Called by SyncedDb which handles locking.
24
+ *
25
+ * `extras` lets the caller append additional `findNewerMany` specs
26
+ * (each with a unique `specId`) onto the same streamed call. Chunks
27
+ * for those specs are routed via `extras.onChunk(specId, items)` and
28
+ * never enter the positive-sync processing path.
24
29
  */
25
- sync(calledFrom?: string): Promise<void>;
30
+ sync(calledFrom?: string, extras?: SyncExtras): Promise<void>;
26
31
  /**
27
32
  * Upload dirty items for all collections.
28
33
  */
@@ -283,9 +283,29 @@ export interface SyncEngineConfig {
283
283
  callbacks: SyncEngineCallbacks;
284
284
  deps: SyncEngineDeps;
285
285
  }
286
+ /**
287
+ * Optional extras passed to `SyncEngine.sync()`. Lets a caller piggyback
288
+ * additional `findNewerMany` specs (each tagged with a unique `specId`)
289
+ * onto the same streamed server call that fetches positive sync deltas —
290
+ * saving the round-trip that those specs would otherwise cost.
291
+ *
292
+ * Used today by SyncedDb to bundle scope-exit eviction queries into the
293
+ * positive sync stream when auto-eviction is due.
294
+ *
295
+ * Contract:
296
+ * - Each spec MUST set `specId` to a string unique within the call.
297
+ * - Positive sync specs do NOT set `specId`; chunks for them arrive
298
+ * with `specId === undefined` and are processed normally.
299
+ * - Extra spec chunks are routed to `onChunk(specId, items)` and never
300
+ * touch the conflict-resolution / Dexie write path.
301
+ */
302
+ export interface SyncExtras {
303
+ specs: import("../../types/I_RestInterface").GetNewerSpec<any>[];
304
+ onChunk: (specId: string, items: any[]) => void;
305
+ }
286
306
  export interface I_SyncEngine {
287
307
  /** Execute full sync cycle. */
288
- sync(calledFrom?: string): Promise<void>;
308
+ sync(calledFrom?: string, extras?: SyncExtras): Promise<void>;
289
309
  /** Upload dirty items for all collections. */
290
310
  uploadDirtyItems(calledFrom?: string): Promise<UploadResult>;
291
311
  /** Upload dirty items for a specific collection. */
@@ -59,6 +59,18 @@ export interface GetNewerSpec<T> {
59
59
  timestamp: number | Date | string | Timestamp;
60
60
  query?: QuerySpec<T>;
61
61
  opts?: QueryOpts;
62
+ /**
63
+ * Optional disambiguator. When set, results for this spec are keyed
64
+ * by `specId` in the `findNewerMany` response (and emitted as the
65
+ * `specId` arg to `onChunk` in the streaming variant) instead of
66
+ * `collection`. Lets a single batched request carry multiple specs
67
+ * against the same `collection` without `Record<key, items[]>`
68
+ * collisions. Backwards-compatible: specs without `specId` are keyed
69
+ * by `collection` exactly as before.
70
+ *
71
+ * Server support: cry-db ≥ 2.4.32 (specId honored).
72
+ */
73
+ specId?: string;
62
74
  }
63
75
  /**
64
76
  * Request to batch update a collection, used to sync data
@@ -90,8 +102,12 @@ export interface I_RestInterface {
90
102
  findByIds<T>(collection: string, ids: Id[]): Promise<T[]>;
91
103
  findNewer<T>(collection: string, timestamp: Timestamp | number | string | Date, query?: QuerySpec<T>, opts?: QueryOpts): Promise<T[]>;
92
104
  findNewerMany<T>(spec?: GetNewerSpec<T>[]): Promise<Record<string, any[]>>;
93
- /** Streaming variant of findNewerMany. Calls onChunk for each batch of items as they arrive. */
94
- findNewerManyStream<T>(spec: GetNewerSpec<T>[], onChunk: (collection: string, items: T[]) => Promise<void>, options?: {
105
+ /**
106
+ * Streaming variant of findNewerMany. Calls onChunk for each batch of items as they arrive.
107
+ * `specId` is forwarded as the third arg when the originating spec set one — `undefined` otherwise.
108
+ * Old two-arg `onChunk` callbacks keep working unchanged (the third arg is ignored).
109
+ */
110
+ findNewerManyStream<T>(spec: GetNewerSpec<T>[], onChunk: (collection: string, items: T[], specId?: string) => Promise<void>, options?: {
95
111
  timeoutMs?: number;
96
112
  signal?: AbortSignal;
97
113
  }): Promise<void>;
@@ -232,17 +232,42 @@ export interface SyncInfo {
232
232
  export interface EvictionCollectionInfo {
233
233
  /** Collection name. */
234
234
  collection: string;
235
- /** Records removed from Dexie + in-mem. */
235
+ /** Records removed from Dexie + in-mem (local + server-confirmed). */
236
236
  evictedCount: number;
237
+ /** Records evicted by the local pass (cached state failed the query). */
238
+ localEvictedCount: number;
239
+ /** Records evicted by the server-assisted pass (server reports out-of-scope). */
240
+ serverEvictedCount: number;
237
241
  /** Records skipped because they have pending dirty changes. */
238
242
  dirtySkipped: number;
239
243
  /** Total records scanned in this collection. */
240
244
  scannedCount: number;
245
+ /** IDs sent to the server for scope-exit confirmation (0 if not server-assisted). */
246
+ serverCandidateCount: number;
247
+ }
248
+ /** Reported to onEvictionStart before the eviction passes begin. */
249
+ export interface EvictionStartInfo {
250
+ /** Whether this run is an auto or manual trigger. */
251
+ trigger: "auto" | "manual";
252
+ /** Number of eligible collections (writeOnly and queryless skipped). */
253
+ collectionCount: number;
254
+ /** Names of the eligible collections, in iteration order. */
255
+ collections: string[];
241
256
  }
242
257
  /** Eviction result reported to the onEviction callback. */
243
258
  export interface EvictionInfo {
244
- /** Total records evicted across all collections. */
259
+ /** Total records evicted across all collections (local + server). */
245
260
  totalEvicted: number;
261
+ /** Sum of records evicted by local-pass alone across all collections. */
262
+ totalLocalEvicted: number;
263
+ /** Sum of records evicted by server-assisted pass alone across all collections. */
264
+ totalServerEvicted: number;
265
+ /** Sum of `serverCandidateCount` across all collections — IDs we asked the server about. */
266
+ totalServerCandidates: number;
267
+ /** Number of `findNewerMany` round-trips issued during the server-assisted pass. */
268
+ serverRounds: number;
269
+ /** Whether the server-assisted pass aborted mid-flight (still applies local evictions). */
270
+ serverFailed: boolean;
246
271
  /** Wall-clock duration of the eviction operation in ms. */
247
272
  durationMs: number;
248
273
  /** Whether this was triggered automatically or by manual call. */
@@ -525,7 +550,17 @@ export interface SyncedDbConfig {
525
550
  * Default: 0 (disabled).
526
551
  */
527
552
  evictStaleRecordsEveryHrs?: number;
528
- /** Callback fired after each eviction run (manual or auto). Errors are swallowed. */
553
+ /**
554
+ * Callback fired before each eviction run begins, after the eligible
555
+ * collection list has been resolved. Useful as a debug breadcrumb to
556
+ * confirm a tick fired and which collections will be scanned.
557
+ * Errors are swallowed.
558
+ */
559
+ onEvictionStart?: (info: EvictionStartInfo) => void;
560
+ /**
561
+ * Callback fired after each eviction run completes (manual or auto).
562
+ * Receives full per-collection and aggregate stats. Errors are swallowed.
563
+ */
529
564
  onEviction?: (info: EvictionInfo) => void;
530
565
  }
531
566
  /**
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "cry-synced-db-client",
3
- "version": "0.1.146",
3
+ "version": "0.1.148",
4
4
  "type": "module",
5
5
  "main": "./dist/index.js",
6
6
  "module": "./dist/index.js",
@@ -36,7 +36,7 @@
36
36
  "vitest": "^4.1.2"
37
37
  },
38
38
  "dependencies": {
39
- "cry-db": "^2.4.31",
39
+ "cry-db": "^2.4.32",
40
40
  "cry-helpers": "^2.1.193",
41
41
  "msgpackr": "^1.11.9",
42
42
  "notepack": "^0.0.2",