memory-lancedb-pro 1.0.25 → 1.0.26

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,770 @@
1
+ import { describe, it, beforeEach, afterEach } from "node:test";
2
+ import assert from "node:assert/strict";
3
+ import jitiFactory from "jiti";
4
+
5
+ const jiti = jitiFactory(import.meta.url, { interopDefault: true });
6
+
7
+ const {
8
+ parseAccessMetadata,
9
+ buildUpdatedMetadata,
10
+ computeEffectiveHalfLife,
11
+ AccessTracker,
12
+ } = jiti("../src/access-tracker.ts");
13
+
14
+ // ============================================================================
15
+ // Test helpers
16
+ // ============================================================================
17
+
18
+ function createMockStore(entries = new Map()) {
19
+ return {
20
+ /** @type {Array<{id: string}>} */
21
+ getByIdCalls: [],
22
+ /** @type {Array<{id: string, updates: object}>} */
23
+ updateCalls: [],
24
+ async getById(id) {
25
+ this.getByIdCalls.push({ id });
26
+ const entry = entries.get(id);
27
+ if (!entry) return null;
28
+ return { ...entry };
29
+ },
30
+ async update(id, updates) {
31
+ this.updateCalls.push({ id, updates });
32
+ const entry = entries.get(id);
33
+ if (!entry) return null;
34
+ // Simulate store.update: apply updates to entry
35
+ if (updates.metadata) {
36
+ entry.metadata = updates.metadata;
37
+ }
38
+ return { ...entry };
39
+ },
40
+ };
41
+ }
42
+
43
+ function createMockLogger() {
44
+ return {
45
+ /** @type {unknown[][]} */
46
+ warnings: [],
47
+ warn(...args) {
48
+ this.warnings.push(args);
49
+ },
50
+ info() {},
51
+ };
52
+ }
53
+
54
+ function createTracker(overrides = {}) {
55
+ const store = overrides.store || createMockStore();
56
+ const logger = overrides.logger || createMockLogger();
57
+ const debounceMs = overrides.debounceMs ?? 60_000;
58
+ return {
59
+ tracker: new AccessTracker({ store, logger, debounceMs }),
60
+ store,
61
+ logger,
62
+ };
63
+ }
64
+
65
+ // ============================================================================
66
+ // parseAccessMetadata
67
+ // ============================================================================
68
+
69
+ describe("parseAccessMetadata", () => {
70
+ it("returns defaults for undefined", () => {
71
+ const result = parseAccessMetadata(undefined);
72
+ assert.equal(result.accessCount, 0);
73
+ assert.equal(result.lastAccessedAt, 0);
74
+ });
75
+
76
+ it("returns defaults for empty string", () => {
77
+ const result = parseAccessMetadata("");
78
+ assert.equal(result.accessCount, 0);
79
+ assert.equal(result.lastAccessedAt, 0);
80
+ });
81
+
82
+ it("returns defaults for malformed JSON", () => {
83
+ const result = parseAccessMetadata("{not valid json");
84
+ assert.equal(result.accessCount, 0);
85
+ assert.equal(result.lastAccessedAt, 0);
86
+ });
87
+
88
+ it("returns defaults for JSON array (non-object)", () => {
89
+ const result = parseAccessMetadata("[1, 2, 3]");
90
+ assert.equal(result.accessCount, 0);
91
+ assert.equal(result.lastAccessedAt, 0);
92
+ });
93
+
94
+ it("returns defaults for JSON null", () => {
95
+ const result = parseAccessMetadata("null");
96
+ assert.equal(result.accessCount, 0);
97
+ assert.equal(result.lastAccessedAt, 0);
98
+ });
99
+
100
+ it("returns defaults for JSON string", () => {
101
+ const result = parseAccessMetadata('"hello"');
102
+ assert.equal(result.accessCount, 0);
103
+ assert.equal(result.lastAccessedAt, 0);
104
+ });
105
+
106
+ it("parses valid metadata with both fields", () => {
107
+ const meta = JSON.stringify({ accessCount: 5, lastAccessedAt: 1700000000000 });
108
+ const result = parseAccessMetadata(meta);
109
+ assert.equal(result.accessCount, 5);
110
+ assert.equal(result.lastAccessedAt, 1700000000000);
111
+ });
112
+
113
+ it("defaults missing accessCount to 0", () => {
114
+ const meta = JSON.stringify({ lastAccessedAt: 1700000000000 });
115
+ const result = parseAccessMetadata(meta);
116
+ assert.equal(result.accessCount, 0);
117
+ assert.equal(result.lastAccessedAt, 1700000000000);
118
+ });
119
+
120
+ it("defaults missing lastAccessedAt to 0", () => {
121
+ const meta = JSON.stringify({ accessCount: 3 });
122
+ const result = parseAccessMetadata(meta);
123
+ assert.equal(result.accessCount, 3);
124
+ assert.equal(result.lastAccessedAt, 0);
125
+ });
126
+
127
+ it("clamps negative accessCount to 0", () => {
128
+ const meta = JSON.stringify({ accessCount: -10, lastAccessedAt: 100 });
129
+ const result = parseAccessMetadata(meta);
130
+ assert.equal(result.accessCount, 0);
131
+ });
132
+
133
+ it("clamps accessCount above 10000 to 10000", () => {
134
+ const meta = JSON.stringify({ accessCount: 99999, lastAccessedAt: 100 });
135
+ const result = parseAccessMetadata(meta);
136
+ assert.equal(result.accessCount, 10000);
137
+ });
138
+
139
+ it("floors fractional accessCount", () => {
140
+ const meta = JSON.stringify({ accessCount: 3.7 });
141
+ const result = parseAccessMetadata(meta);
142
+ assert.equal(result.accessCount, 3);
143
+ });
144
+
145
+ it("handles NaN accessCount", () => {
146
+ const meta = JSON.stringify({ accessCount: "not a number" });
147
+ const result = parseAccessMetadata(meta);
148
+ assert.equal(result.accessCount, 0);
149
+ });
150
+
151
+ it("handles Infinity accessCount", () => {
152
+ // JSON.stringify converts Infinity to null, so manually craft
153
+ const meta = '{"accessCount": 1e309}';
154
+ const result = parseAccessMetadata(meta);
155
+ // 1e309 parses to Infinity in JS, which is not finite
156
+ assert.equal(result.accessCount, 0);
157
+ });
158
+
159
+ it("handles negative lastAccessedAt", () => {
160
+ const meta = JSON.stringify({ accessCount: 1, lastAccessedAt: -500 });
161
+ const result = parseAccessMetadata(meta);
162
+ assert.equal(result.lastAccessedAt, 0);
163
+ });
164
+
165
+ it("preserves valid lastAccessedAt", () => {
166
+ const ts = Date.now();
167
+ const meta = JSON.stringify({ lastAccessedAt: ts });
168
+ const result = parseAccessMetadata(meta);
169
+ assert.equal(result.lastAccessedAt, ts);
170
+ });
171
+
172
+ it("handles empty JSON object", () => {
173
+ const result = parseAccessMetadata("{}");
174
+ assert.equal(result.accessCount, 0);
175
+ assert.equal(result.lastAccessedAt, 0);
176
+ });
177
+ });
178
+
179
+ // ============================================================================
180
+ // buildUpdatedMetadata
181
+ // ============================================================================
182
+
183
+ describe("buildUpdatedMetadata", () => {
184
+ it("creates metadata from undefined with delta=1", () => {
185
+ const result = JSON.parse(buildUpdatedMetadata(undefined, 1));
186
+ assert.equal(result.accessCount, 1);
187
+ assert.equal(typeof result.lastAccessedAt, "number");
188
+ assert.ok(result.lastAccessedAt > 0);
189
+ });
190
+
191
+ it("creates metadata from empty string with delta=1", () => {
192
+ const result = JSON.parse(buildUpdatedMetadata("", 1));
193
+ assert.equal(result.accessCount, 1);
194
+ });
195
+
196
+ it("increments existing accessCount", () => {
197
+ const existing = JSON.stringify({ accessCount: 3, lastAccessedAt: 100 });
198
+ const result = JSON.parse(buildUpdatedMetadata(existing, 2));
199
+ assert.equal(result.accessCount, 5);
200
+ });
201
+
202
+ it("preserves all existing fields", () => {
203
+ const existing = JSON.stringify({
204
+ accessCount: 1,
205
+ lastAccessedAt: 100,
206
+ customField: "hello",
207
+ nested: { a: 1 },
208
+ });
209
+ const result = JSON.parse(buildUpdatedMetadata(existing, 1));
210
+ assert.equal(result.accessCount, 2);
211
+ assert.equal(result.customField, "hello");
212
+ assert.deepEqual(result.nested, { a: 1 });
213
+ });
214
+
215
+ it("clamps result to max 10000", () => {
216
+ const existing = JSON.stringify({ accessCount: 9999 });
217
+ const result = JSON.parse(buildUpdatedMetadata(existing, 100));
218
+ assert.equal(result.accessCount, 10000);
219
+ });
220
+
221
+ it("clamps negative result to 0", () => {
222
+ const existing = JSON.stringify({ accessCount: 2 });
223
+ const result = JSON.parse(buildUpdatedMetadata(existing, -10));
224
+ assert.equal(result.accessCount, 0);
225
+ });
226
+
227
+ it("handles malformed existing JSON gracefully", () => {
228
+ const result = JSON.parse(buildUpdatedMetadata("{bad json", 3));
229
+ assert.equal(result.accessCount, 3);
230
+ assert.equal(typeof result.lastAccessedAt, "number");
231
+ });
232
+
233
+ it("updates lastAccessedAt to a recent timestamp", () => {
234
+ const before = Date.now();
235
+ const result = JSON.parse(buildUpdatedMetadata(undefined, 1));
236
+ const after = Date.now();
237
+ assert.ok(result.lastAccessedAt >= before);
238
+ assert.ok(result.lastAccessedAt <= after);
239
+ });
240
+
241
+ it("returns valid JSON string", () => {
242
+ const output = buildUpdatedMetadata(undefined, 1);
243
+ assert.doesNotThrow(() => JSON.parse(output));
244
+ });
245
+
246
+ it("delta of 0 keeps count unchanged", () => {
247
+ const existing = JSON.stringify({ accessCount: 5 });
248
+ const result = JSON.parse(buildUpdatedMetadata(existing, 0));
249
+ assert.equal(result.accessCount, 5);
250
+ });
251
+ });
252
+
253
+ // ============================================================================
254
+ // computeEffectiveHalfLife
255
+ // ============================================================================
256
+
257
+ describe("computeEffectiveHalfLife", () => {
258
+ it("returns baseHalfLife when reinforcementFactor is 0", () => {
259
+ const result = computeEffectiveHalfLife(30, 100, Date.now(), 0, 5);
260
+ assert.equal(result, 30);
261
+ });
262
+
263
+ it("returns baseHalfLife when accessCount is 0", () => {
264
+ const result = computeEffectiveHalfLife(30, 0, Date.now(), 0.5, 5);
265
+ assert.equal(result, 30);
266
+ });
267
+
268
+ it("returns baseHalfLife when accessCount is negative", () => {
269
+ const result = computeEffectiveHalfLife(30, -5, Date.now(), 0.5, 5);
270
+ assert.equal(result, 30);
271
+ });
272
+
273
+ it("extends half-life for recent accesses", () => {
274
+ const now = Date.now();
275
+ const result = computeEffectiveHalfLife(30, 10, now, 0.5, 5);
276
+ assert.ok(result > 30, `Expected > 30, got ${result}`);
277
+ });
278
+
279
+ it("uses logarithmic scaling (diminishing returns)", () => {
280
+ const now = Date.now();
281
+ const r10 = computeEffectiveHalfLife(30, 10, now, 0.5, 100);
282
+ const r100 = computeEffectiveHalfLife(30, 100, now, 0.5, 100);
283
+ const r1000 = computeEffectiveHalfLife(30, 1000, now, 0.5, 100);
284
+
285
+ // Each 10x increase in access count should yield less additional extension
286
+ const delta1 = r100 - r10;
287
+ const delta2 = r1000 - r100;
288
+ assert.ok(delta2 < delta1 * 2, "Logarithmic scaling should show diminishing returns");
289
+ });
290
+
291
+ it("caps result at baseHalfLife * maxMultiplier", () => {
292
+ const now = Date.now();
293
+ const result = computeEffectiveHalfLife(30, 10000, now, 10, 3);
294
+ assert.equal(result, 90); // 30 * 3 = 90
295
+ });
296
+
297
+ it("decays access freshness for old accesses", () => {
298
+ const now = Date.now();
299
+ const recentResult = computeEffectiveHalfLife(
300
+ 30, 10, now, 0.5, 10,
301
+ );
302
+ // 60 days ago
303
+ const oldResult = computeEffectiveHalfLife(
304
+ 30, 10, now - 60 * 24 * 60 * 60 * 1000, 0.5, 10,
305
+ );
306
+ assert.ok(
307
+ recentResult > oldResult,
308
+ `Recent (${recentResult}) should be > old (${oldResult})`,
309
+ );
310
+ });
311
+
312
+ it("access 30 days ago has roughly half the effect", () => {
313
+ const now = Date.now();
314
+ const thirtyDaysMs = 30 * 24 * 60 * 60 * 1000;
315
+
316
+ // Fresh access
317
+ const freshExtension = computeEffectiveHalfLife(30, 10, now, 0.5, 100) - 30;
318
+
319
+ // 30-day-old access (should be approximately half freshness)
320
+ const oldExtension = computeEffectiveHalfLife(30, 10, now - thirtyDaysMs, 0.5, 100) - 30;
321
+
322
+ // The extension should be roughly halved (within tolerance)
323
+ // Due to log1p, the ratio won't be exactly 0.5, but the old extension should be smaller
324
+ assert.ok(oldExtension < freshExtension, "30-day-old access should have less extension");
325
+ assert.ok(oldExtension > 0, "30-day-old access should still have some extension");
326
+ });
327
+
328
+ it("very old accesses contribute almost no extension", () => {
329
+ const now = Date.now();
330
+ const yearAgoMs = 365 * 24 * 60 * 60 * 1000;
331
+ const result = computeEffectiveHalfLife(30, 10, now - yearAgoMs, 0.5, 10);
332
+ // After 365 days with 30-day decay half-life, freshness is very low
333
+ const extension = result - 30;
334
+ assert.ok(extension < 1, `Year-old access extension (${extension}) should be < 1`);
335
+ });
336
+
337
+ it("handles maxMultiplier of 1 (no extension allowed)", () => {
338
+ const result = computeEffectiveHalfLife(30, 100, Date.now(), 1, 1);
339
+ assert.equal(result, 30);
340
+ });
341
+
342
+ it("handles baseHalfLife of 0", () => {
343
+ const result = computeEffectiveHalfLife(0, 10, Date.now(), 0.5, 5);
344
+ // 0 + 0 * 0.5 * log1p(x) = 0
345
+ assert.equal(result, 0);
346
+ });
347
+ });
348
+
349
+ // ============================================================================
350
+ // AccessTracker class
351
+ // ============================================================================
352
+
353
+ describe("AccessTracker", () => {
354
+ /** @type {InstanceType<typeof AccessTracker>} */
355
+ let tracker;
356
+ let mockStore;
357
+ let mockLogger;
358
+
359
+ beforeEach(() => {
360
+ mockStore = createMockStore();
361
+ mockLogger = createMockLogger();
362
+ tracker = new AccessTracker({
363
+ store: mockStore,
364
+ logger: mockLogger,
365
+ debounceMs: 60_000, // long debounce to avoid auto-flush during tests
366
+ });
367
+ });
368
+
369
+ afterEach(() => {
370
+ tracker.destroy();
371
+ });
372
+
373
+ it("starts with empty pending map", () => {
374
+ const pending = tracker.getPendingUpdates();
375
+ assert.equal(pending.size, 0);
376
+ });
377
+
378
+ it("recordAccess increments delta for a single ID", () => {
379
+ tracker.recordAccess(["id-1"]);
380
+ const pending = tracker.getPendingUpdates();
381
+ assert.equal(pending.get("id-1"), 1);
382
+ });
383
+
384
+ it("recordAccess accumulates multiple calls for same ID", () => {
385
+ tracker.recordAccess(["id-1"]);
386
+ tracker.recordAccess(["id-1"]);
387
+ tracker.recordAccess(["id-1"]);
388
+ const pending = tracker.getPendingUpdates();
389
+ assert.equal(pending.get("id-1"), 3);
390
+ });
391
+
392
+ it("recordAccess handles multiple IDs in one call", () => {
393
+ tracker.recordAccess(["id-1", "id-2", "id-3"]);
394
+ const pending = tracker.getPendingUpdates();
395
+ assert.equal(pending.get("id-1"), 1);
396
+ assert.equal(pending.get("id-2"), 1);
397
+ assert.equal(pending.get("id-3"), 1);
398
+ });
399
+
400
+ it("recordAccess handles duplicate IDs in one call", () => {
401
+ tracker.recordAccess(["id-1", "id-1"]);
402
+ const pending = tracker.getPendingUpdates();
403
+ assert.equal(pending.get("id-1"), 2);
404
+ });
405
+
406
+ it("recordAccess handles empty array", () => {
407
+ tracker.recordAccess([]);
408
+ const pending = tracker.getPendingUpdates();
409
+ assert.equal(pending.size, 0);
410
+ });
411
+
412
+ it("getPendingUpdates returns a copy (not the internal map)", () => {
413
+ tracker.recordAccess(["id-1"]);
414
+ const copy = tracker.getPendingUpdates();
415
+ copy.set("id-99", 42);
416
+ // Internal map should not be affected
417
+ const internal = tracker.getPendingUpdates();
418
+ assert.equal(internal.has("id-99"), false);
419
+ });
420
+
421
+ it("flush clears all pending updates", async () => {
422
+ tracker.recordAccess(["id-1", "id-2"]);
423
+ assert.equal(tracker.getPendingUpdates().size, 2);
424
+ await tracker.flush();
425
+ assert.equal(tracker.getPendingUpdates().size, 0);
426
+ });
427
+
428
+ it("destroy clears all pending updates", () => {
429
+ tracker.recordAccess(["id-1"]);
430
+ tracker.destroy();
431
+ assert.equal(tracker.getPendingUpdates().size, 0);
432
+ });
433
+
434
+ it("can record new accesses after flush", async () => {
435
+ tracker.recordAccess(["id-1"]);
436
+ await tracker.flush();
437
+ tracker.recordAccess(["id-2"]);
438
+ const pending = tracker.getPendingUpdates();
439
+ assert.equal(pending.has("id-1"), false);
440
+ assert.equal(pending.get("id-2"), 1);
441
+ });
442
+
443
+ it("recordAccess is synchronous (no promise returned)", () => {
444
+ const result = tracker.recordAccess(["id-1"]);
445
+ assert.equal(result, undefined);
446
+ });
447
+
448
+ it("tracks independent IDs independently", () => {
449
+ tracker.recordAccess(["a"]);
450
+ tracker.recordAccess(["b"]);
451
+ tracker.recordAccess(["a"]);
452
+ tracker.recordAccess(["c"]);
453
+ tracker.recordAccess(["b"]);
454
+ tracker.recordAccess(["a"]);
455
+
456
+ const pending = tracker.getPendingUpdates();
457
+ assert.equal(pending.get("a"), 3);
458
+ assert.equal(pending.get("b"), 2);
459
+ assert.equal(pending.get("c"), 1);
460
+ });
461
+
462
+ it("debounce auto-flush fires after configured delay", async () => {
463
+ const fastStore = createMockStore();
464
+ const fastLogger = createMockLogger();
465
+ const fastTracker = new AccessTracker({
466
+ store: fastStore,
467
+ logger: fastLogger,
468
+ debounceMs: 50, // 50ms debounce
469
+ });
470
+ try {
471
+ fastTracker.recordAccess(["id-1"]);
472
+ assert.equal(fastTracker.getPendingUpdates().size, 1);
473
+
474
+ // Wait for debounce to fire
475
+ await new Promise((resolve) => setTimeout(resolve, 120));
476
+
477
+ assert.equal(
478
+ fastTracker.getPendingUpdates().size,
479
+ 0,
480
+ "Pending should be empty after debounce",
481
+ );
482
+ } finally {
483
+ fastTracker.destroy();
484
+ }
485
+ });
486
+
487
+ it("debounce timer resets on each recordAccess", async () => {
488
+ const fastStore = createMockStore();
489
+ const fastLogger = createMockLogger();
490
+ const fastTracker = new AccessTracker({
491
+ store: fastStore,
492
+ logger: fastLogger,
493
+ debounceMs: 80,
494
+ });
495
+ try {
496
+ fastTracker.recordAccess(["id-1"]);
497
+
498
+ // Wait 50ms (less than debounce)
499
+ await new Promise((resolve) => setTimeout(resolve, 50));
500
+
501
+ // Record again — should reset the 80ms timer
502
+ fastTracker.recordAccess(["id-2"]);
503
+
504
+ // Wait 50ms more — total 100ms from first, but only 50ms from last
505
+ await new Promise((resolve) => setTimeout(resolve, 50));
506
+
507
+ // Should still have pending items (timer was reset)
508
+ assert.equal(
509
+ fastTracker.getPendingUpdates().size,
510
+ 2,
511
+ "Should still be pending (timer reset)",
512
+ );
513
+
514
+ // Wait for full debounce from last recordAccess
515
+ await new Promise((resolve) => setTimeout(resolve, 80));
516
+
517
+ assert.equal(
518
+ fastTracker.getPendingUpdates().size,
519
+ 0,
520
+ "Should be flushed after debounce",
521
+ );
522
+ } finally {
523
+ fastTracker.destroy();
524
+ }
525
+ });
526
+ });
527
+
528
+ // ============================================================================
529
+ // AccessTracker flush integration
530
+ // ============================================================================
531
+
532
+ describe("AccessTracker flush integration", () => {
533
+ it("flush calls store.update with merged metadata for each pending ID", async () => {
534
+ const id1 = "aaaaaaaa-1111-2222-3333-444444444444";
535
+ const id2 = "bbbbbbbb-1111-2222-3333-444444444444";
536
+
537
+ const entries = new Map([
538
+ [id1, { id: id1, metadata: JSON.stringify({ accessCount: 2, customTag: "keep" }) }],
539
+ [id2, { id: id2, metadata: JSON.stringify({ accessCount: 0 }) }],
540
+ ]);
541
+
542
+ const store = createMockStore(entries);
543
+ const logger = createMockLogger();
544
+
545
+ const tracker = new AccessTracker({ store, logger, debounceMs: 60_000 });
546
+ try {
547
+ tracker.recordAccess([id1, id1, id1]); // delta=3 for id1
548
+ tracker.recordAccess([id2]); // delta=1 for id2
549
+
550
+ await tracker.flush();
551
+
552
+ // Pending should be empty after flush
553
+ assert.equal(tracker.getPendingUpdates().size, 0);
554
+
555
+ // getById should be called once per entry (pure read, no delete+add)
556
+ assert.equal(store.getByIdCalls.length, 2);
557
+
558
+ // store.update should only have write calls (with metadata), no empty reads
559
+ assert.equal(store.updateCalls.length, 2);
560
+
561
+ // All update calls should have metadata (no empty {} reads)
562
+ const writeCalls = store.updateCalls.filter((c) => c.updates.metadata);
563
+ assert.equal(writeCalls.length, 2);
564
+
565
+ // Verify id1 metadata merge: accessCount 2 + 3 = 5, customTag preserved
566
+ const id1Write = writeCalls.find((c) => c.id === id1);
567
+ assert.ok(id1Write, "Should have a write call for id1");
568
+ const id1Meta = JSON.parse(id1Write.updates.metadata);
569
+ assert.equal(id1Meta.accessCount, 5);
570
+ assert.equal(id1Meta.customTag, "keep");
571
+ assert.equal(typeof id1Meta.lastAccessedAt, "number");
572
+
573
+ // Verify id2 metadata merge: accessCount 0 + 1 = 1
574
+ const id2Write = writeCalls.find((c) => c.id === id2);
575
+ assert.ok(id2Write, "Should have a write call for id2");
576
+ const id2Meta = JSON.parse(id2Write.updates.metadata);
577
+ assert.equal(id2Meta.accessCount, 1);
578
+ } finally {
579
+ tracker.destroy();
580
+ }
581
+ });
582
+
583
+ it("flush skips entries not found in store (returns null)", async () => {
584
+ const missingId = "cccccccc-1111-2222-3333-444444444444";
585
+
586
+ // Empty store — all lookups return null
587
+ const store = createMockStore(new Map());
588
+ const logger = createMockLogger();
589
+
590
+ const tracker = new AccessTracker({ store, logger, debounceMs: 60_000 });
591
+ try {
592
+ tracker.recordAccess([missingId]);
593
+ await tracker.flush();
594
+
595
+ // Should have tried getById, but no write-back via update
596
+ assert.equal(store.getByIdCalls.length, 1);
597
+ assert.equal(store.updateCalls.length, 0);
598
+
599
+ // No warnings (null return is expected, not an error)
600
+ assert.equal(logger.warnings.length, 0);
601
+ } finally {
602
+ tracker.destroy();
603
+ }
604
+ });
605
+
606
+ it("flush logs warning on store error and continues", async () => {
607
+ const id1 = "dddddddd-1111-2222-3333-444444444444";
608
+ const id2 = "eeeeeeee-1111-2222-3333-444444444444";
609
+
610
+ let getByIdCallCount = 0;
611
+ const failingStore = {
612
+ async getById(id) {
613
+ getByIdCallCount++;
614
+ if (id === id1) {
615
+ throw new Error("simulated store failure");
616
+ }
617
+ // id2 succeeds
618
+ return { id, metadata: JSON.stringify({ accessCount: 0 }) };
619
+ },
620
+ async update(id, updates) {
621
+ return { id, metadata: updates.metadata || "{}" };
622
+ },
623
+ };
624
+
625
+ const logger = createMockLogger();
626
+ const tracker = new AccessTracker({ store: failingStore, logger, debounceMs: 60_000 });
627
+ try {
628
+ tracker.recordAccess([id1, id2]);
629
+ await tracker.flush();
630
+
631
+ // Should have warned about id1 failure
632
+ assert.ok(logger.warnings.length >= 1, "Should log at least one warning");
633
+ const warningMsg = String(logger.warnings[0][0]);
634
+ assert.ok(
635
+ warningMsg.includes("access-tracker"),
636
+ `Warning should mention access-tracker, got: ${warningMsg}`,
637
+ );
638
+
639
+ // id2 should have been processed (getById was called for it)
640
+ assert.equal(getByIdCallCount, 2, "getById should have been called for both IDs");
641
+ } finally {
642
+ tracker.destroy();
643
+ }
644
+ });
645
+
646
+ it("concurrent flush: second flush awaits first then processes accumulated data", async () => {
647
+ const id1 = "ffffffff-1111-2222-3333-444444444444";
648
+
649
+ let resolveFirst;
650
+ let getByIdCallCount = 0;
651
+ const slowStore = {
652
+ async getById(id) {
653
+ getByIdCallCount++;
654
+ if (getByIdCallCount === 1) {
655
+ // First getById blocks until we resolve
656
+ await new Promise((resolve) => { resolveFirst = resolve; });
657
+ }
658
+ return { id, metadata: JSON.stringify({ accessCount: 0 }) };
659
+ },
660
+ updateCalls: [],
661
+ async update(id, updates) {
662
+ this.updateCalls.push({ id, updates });
663
+ return { id, metadata: updates.metadata || "{}" };
664
+ },
665
+ };
666
+
667
+ const logger = createMockLogger();
668
+ const tracker = new AccessTracker({ store: slowStore, logger, debounceMs: 60_000 });
669
+ try {
670
+ tracker.recordAccess([id1]);
671
+
672
+ // Start first flush (will block on first store.getById)
673
+ const flush1 = tracker.flush();
674
+
675
+ // Record more while flush is in progress
676
+ tracker.recordAccess([id1]);
677
+
678
+ // Second flush should await the first, then process accumulated data
679
+ const flush2 = tracker.flush();
680
+
681
+ // Unblock the first flush
682
+ resolveFirst();
683
+ await flush1;
684
+ await flush2;
685
+
686
+ // Both flushes should have completed — no pending data left
687
+ assert.equal(tracker.getPendingUpdates().size, 0, "All data should be flushed");
688
+
689
+ // store.update should have been called twice (once per flush cycle)
690
+ assert.equal(slowStore.updateCalls.length, 2, "Two write-back cycles should have occurred");
691
+ } finally {
692
+ tracker.destroy();
693
+ }
694
+ });
695
+
696
+ it("flush requeues failed write-backs for retry on next flush", async () => {
697
+ const id1 = "gggggggg-1111-2222-3333-444444444444";
698
+
699
+ let failCount = 0;
700
+ const flakeyStore = {
701
+ getByIdCalls: [],
702
+ updateCalls: [],
703
+ async getById(id) {
704
+ this.getByIdCalls.push({ id });
705
+ failCount++;
706
+ if (failCount === 1) {
707
+ throw new Error("simulated transient failure");
708
+ }
709
+ return { id, metadata: JSON.stringify({ accessCount: 0 }) };
710
+ },
711
+ async update(id, updates) {
712
+ this.updateCalls.push({ id, updates });
713
+ return { id, metadata: updates.metadata || "{}" };
714
+ },
715
+ };
716
+
717
+ const logger = createMockLogger();
718
+ const tracker = new AccessTracker({ store: flakeyStore, logger, debounceMs: 60_000 });
719
+ try {
720
+ tracker.recordAccess([id1]); // delta=1
721
+
722
+ // First flush — getById fails, delta should be requeued
723
+ await tracker.flush();
724
+ assert.equal(tracker.getPendingUpdates().size, 1, "Failed delta should be requeued");
725
+ assert.equal(tracker.getPendingUpdates().get(id1), 1, "Requeued delta should be 1");
726
+ assert.ok(logger.warnings.length >= 1, "Should log a warning on failure");
727
+
728
+ // Second flush — getById succeeds this time
729
+ await tracker.flush();
730
+ assert.equal(tracker.getPendingUpdates().size, 0, "Requeued data should be flushed");
731
+ assert.equal(flakeyStore.updateCalls.length, 1, "Should have one successful write-back");
732
+ } finally {
733
+ tracker.destroy();
734
+ }
735
+ });
736
+
737
+ it("destroy warns when pending writes exist", () => {
738
+ const store = createMockStore();
739
+ const logger = createMockLogger();
740
+ const tracker = new AccessTracker({ store, logger, debounceMs: 60_000 });
741
+
742
+ tracker.recordAccess(["id-1", "id-2"]);
743
+ assert.equal(logger.warnings.length, 0);
744
+
745
+ tracker.destroy();
746
+
747
+ // Should have logged a warning about pending writes
748
+ assert.equal(logger.warnings.length, 1, "Should log one warning");
749
+ const warningMsg = String(logger.warnings[0][0]);
750
+ assert.ok(
751
+ warningMsg.includes("2 pending writes"),
752
+ `Warning should mention pending count, got: ${warningMsg}`,
753
+ );
754
+
755
+ // Pending should be cleared after destroy
756
+ assert.equal(tracker.getPendingUpdates().size, 0);
757
+ });
758
+
759
+ it("flush is a no-op when pending map is empty", async () => {
760
+ const store = createMockStore();
761
+ const logger = createMockLogger();
762
+ const tracker = new AccessTracker({ store, logger, debounceMs: 60_000 });
763
+ try {
764
+ await tracker.flush();
765
+ assert.equal(store.updateCalls.length, 0);
766
+ } finally {
767
+ tracker.destroy();
768
+ }
769
+ });
770
+ });