resplite 1.2.4 → 1.2.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. package/README.md +179 -274
  2. package/package.json +1 -6
  3. package/scripts/create-interface-smoke.js +32 -0
  4. package/skills/README.md +22 -0
  5. package/skills/resplite-command-vertical-slice/SKILL.md +134 -0
  6. package/skills/resplite-ft-search-workbench/SKILL.md +138 -0
  7. package/skills/resplite-migration-cutover-assistant/SKILL.md +138 -0
  8. package/spec/00-INDEX.md +37 -0
  9. package/spec/01-overview-and-goals.md +125 -0
  10. package/spec/02-protocol-and-commands.md +174 -0
  11. package/spec/03-data-model-ttl-transactions.md +157 -0
  12. package/spec/04-cache-architecture.md +171 -0
  13. package/spec/05-scan-admin-implementation.md +379 -0
  14. package/spec/06-migration-strategy-core.md +79 -0
  15. package/spec/07-type-lists.md +202 -0
  16. package/spec/08-type-sorted-sets.md +220 -0
  17. package/spec/{SPEC_D.md → 09-search-ft-commands.md} +3 -1
  18. package/spec/{SPEC_E.md → 10-blocking-commands.md} +3 -1
  19. package/spec/{SPEC_F.md → 11-migration-dirty-registry.md} +61 -147
  20. package/src/commands/object.js +17 -0
  21. package/src/commands/registry.js +2 -0
  22. package/src/engine/engine.js +11 -0
  23. package/src/migration/apply-dirty.js +8 -1
  24. package/src/migration/index.js +48 -4
  25. package/src/migration/migrate-search.js +25 -6
  26. package/src/migration/tracker.js +23 -0
  27. package/test/integration/migration-dirty-tracker.test.js +9 -4
  28. package/test/integration/object-idletime.test.js +51 -0
  29. package/test/unit/migrate-search.test.js +50 -2
  30. package/spec/SPEC_A.md +0 -1171
  31. package/spec/SPEC_B.md +0 -426
  32. package/src/cli/import-from-redis.js +0 -194
  33. package/src/cli/resplite-dirty-tracker.js +0 -92
  34. package/src/cli/resplite-import.js +0 -296
  35. package/test/contract/import-from-redis.test.js +0 -83
@@ -0,0 +1,220 @@
1
+ # RESPLite — Type: Sorted Sets / ZSET (Appendix C)
2
+
3
+ ## C.1 Goals
4
+
5
+ * Provide a Redis-compatible subset of ZSET commands with efficient range and score queries.
6
+ * Persist in SQLite with appropriate indexing.
7
+ * Keep semantics close to Redis for ordering, score ties, and missing elements.
8
+
9
+ ## C.2 Supported Commands (vNext)
10
+
11
+ Recommended minimal set:
12
+
13
+ * `ZADD key [NX|XX] [CH] [INCR] score member [score member ...]` (start with a reduced subset)
14
+ * `ZREM key member [member ...]`
15
+ * `ZCARD key`
16
+ * `ZSCORE key member`
17
+ * `ZRANGE key start stop [WITHSCORES]`
18
+ * `ZREVRANGE key start stop [WITHSCORES]` (optional but useful)
19
+ * `ZRANGEBYSCORE key min max [WITHSCORES] [LIMIT offset count]`
20
+ * `ZREMRANGEBYSCORE key min max` (optional, later)
21
+ * `ZSCAN key cursor [MATCH pattern] [COUNT n]` (later)
22
+
23
+ **Initial simplification for v1 of ZSET:**
24
+
25
+ * Support `ZADD key score member [score member ...]` (no flags) returning number of new elements.
26
+ * Add flags later.
27
+
28
+ ## C.3 Redis Semantics
29
+
30
+ * Sorted set is ordered by:
31
+
32
+ 1. score ascending
33
+ 2. member lexicographically ascending as tie-breaker (Redis behavior)
34
+ * Wrong type errors same pattern as other types.
35
+ * Non-existent key:
36
+
37
+ * `ZCARD` => `0`
38
+ * `ZRANGE` => empty array
39
+ * `ZSCORE` => `nil`
40
+
41
+ ## C.4 Data Model (SQLite)
42
+
43
+ ```sql
44
+ CREATE TABLE redis_zsets (
45
+ key BLOB NOT NULL,
46
+ member BLOB NOT NULL,
47
+ score REAL NOT NULL,
48
+ PRIMARY KEY (key, member),
49
+ FOREIGN KEY(key) REFERENCES redis_keys(key) ON DELETE CASCADE
50
+ );
51
+
52
+ CREATE INDEX redis_zsets_key_score_member_idx
53
+ ON redis_zsets(key, score, member);
54
+ ```
55
+
56
+ Notes:
57
+
58
+ * `PRIMARY KEY (key, member)` allows upsert of member score.
59
+ * Secondary index supports score range scans and stable ordering by `(score, member)`.
60
+
61
+ ## C.5 Command Behavior
62
+
63
+ ### C.5.1 ZADD
64
+
65
+ **Minimal v1 behavior:**
66
+
67
+ * `ZADD key score member [score member ...]`
68
+ * Response: integer count of **new** members added (not updated).
69
+ * If key does not exist: create metadata type zset.
70
+ * For existing member: update score (does not increment return count).
71
+ * Use one transaction:
72
+
73
+ * ensure type
74
+ * upsert all pairs
75
+ * bump key version
76
+
77
+ **Later flags (optional):**
78
+
79
+ * `NX`: only add new
80
+ * `XX`: only update existing
81
+ * `CH`: count changed elements
82
+ * `INCR`: single member increment
83
+
84
+ ### C.5.2 ZREM
85
+
86
+ * Remove one or more members.
87
+ * Response: integer number removed.
88
+ * If zset becomes empty: delete key metadata.
89
+
90
+ ### C.5.3 ZCARD
91
+
92
+ * Return cardinality:
93
+
94
+ * Prefer `SELECT COUNT(*)` (acceptable).
95
+ * If performance needs: maintain count in a meta table (not needed initially).
96
+
97
+ ### C.5.4 ZSCORE
98
+
99
+ * Return bulk string representing score (Redis returns string form), or `nil`.
100
+ * Store as `REAL`, but serialize consistently:
101
+
102
+ * Use a stable conversion (avoid scientific notation surprises if possible).
103
+ * Accept that exact formatting may differ from Redis; document if needed.
104
+
105
+ ### C.5.5 ZRANGE (by rank)
106
+
107
+ **Request:** `ZRANGE key start stop [WITHSCORES]`
108
+
109
+ Rank rules like LRANGE:
110
+
111
+ * start/stop inclusive
112
+ * negative indexes from end
113
+ * clamp
114
+
115
+ Implementation:
116
+
117
+ * let `len = ZCARD`
118
+ * normalize range
119
+ * SQL for ordering:
120
+
121
+ ```sql
122
+ SELECT member, score
123
+ FROM redis_zsets
124
+ WHERE key=?
125
+ ORDER BY score ASC, member ASC
126
+ LIMIT ? OFFSET ?;
127
+ ```
128
+ * Response:
129
+
130
+ * without WITHSCORES: array of members
131
+ * with WITHSCORES: array `[member1, score1, member2, score2, ...]`
132
+
133
+ ### C.5.6 ZRANGEBYSCORE
134
+
135
+ **Request:** `ZRANGEBYSCORE key min max [WITHSCORES] [LIMIT offset count]`
136
+
137
+ Score bounds rules:
138
+
139
+ * Support numeric `min/max`.
140
+ * Optional later: `(` exclusive bounds, `-inf`, `+inf`.
141
+
142
+ Implementation:
143
+
144
+ ```sql
145
+ SELECT member, score
146
+ FROM redis_zsets
147
+ WHERE key=? AND score >= ? AND score <= ?
148
+ ORDER BY score ASC, member ASC
149
+ LIMIT ? OFFSET ?;
150
+ ```
151
+
152
+ Return format same as `ZRANGE`.
153
+
154
+ ## C.6 Expiration and Cache
155
+
156
+ * TTL is in `redis_keys.expires_at`.
157
+ * Lazy expiration removes zset rows via cascade.
158
+ * Cache:
159
+
160
+ * Cache `ZSCORE` lookups optionally (key+member) if beneficial.
161
+ * Avoid caching full zsets early.
162
+ * Always invalidate on `ZADD/ZREM`.
163
+
164
+ ## C.7 Complexity Targets
165
+
166
+ * `ZADD`: O(k log n) effectively via index maintenance, practical for SQLite
167
+ * `ZRANGE`: O(m) over returned slice with index support
168
+ * `ZRANGEBYSCORE`: O(m) over match range with index support
169
+ * `ZSCORE`: O(log n) via PK on (key, member)
170
+
171
+ ## C.8 Tests (Required)
172
+
173
+ * Correct ordering:
174
+
175
+ * by score, then by member for ties
176
+ * Rank normalization:
177
+
178
+ * negative indices
179
+ * out of range
180
+ * start > stop -> empty
181
+ * Score range:
182
+
183
+ * boundaries inclusive
184
+ * LIMIT behavior
185
+ * Wrong type behavior
186
+ * TTL interaction and lazy deletion
187
+ * Persistence across restart
188
+ * Binary member support
189
+ * Concurrency sanity:
190
+
191
+ * multiple clients doing `ZADD` on same key does not corrupt ordering or counts
192
+
193
+ ---
194
+
195
+ ## Integration notes (LIST and ZSET)
196
+
197
+ ### Type constants
198
+
199
+ Extend `redis_keys.type` enum:
200
+
201
+ * `4 = list`
202
+ * `5 = zset`
203
+
204
+ ### Wrong-type enforcement
205
+
206
+ Any command on a key must:
207
+
208
+ 1. run lazy-expire check
209
+ 2. read `redis_keys.type`
210
+ 3. if mismatch, return WRONGTYPE
211
+
212
+ ### Key deletion when empty
213
+
214
+ For list and zset:
215
+
216
+ * if becomes empty, delete metadata key row (and meta/items rows if any)
217
+
218
+ ### SCAN behavior
219
+
220
+ * SCAN should include list/zset keys automatically (it reads from `redis_keys`).
@@ -1,4 +1,6 @@
1
- # Appendix D: Search (FT.*) Specification
1
+ # RESPLite Search (FT.*) Specification
2
+
3
+ Originally Appendix D. Goals, data model, FT.CREATE/ADD/DEL/SEARCH/SUG* behavior, SQL templates, parser grammar.
2
4
 
3
5
  ## D.1 Goals
4
6
 
@@ -1,4 +1,6 @@
1
- # Appendix E: Blocking Commands Specification (vNext)
1
+ # RESPLite Blocking Commands Specification (vNext)
2
+
3
+ Originally Appendix E. BLPOP/BRPOP semantics, wait model, wakeup, tests.
2
4
 
3
5
  ## E.1 Goals
4
6
 
@@ -1,4 +1,6 @@
1
- # Appendix F: Migration with Dirty Key Registry (Keyspace Notifications)
1
+ # RESPLite Migration with Dirty Key Registry (Keyspace Notifications)
2
+
3
+ Originally Appendix F. Bulk import, dirty key tracker, delta apply, search index migration.
2
4
 
3
5
  ## F.1 Goals
4
6
 
@@ -13,7 +15,7 @@
13
15
  * Perfect change-data-capture guarantees equivalent to replication logs.
14
16
  * Distributed migration across multiple import workers with strict ordering semantics.
15
17
  * Full fidelity for unsupported Redis data types (streams, modules, Lua scripts, etc.).
16
- * **Search indices (FT.\*):** Keyspace migration (`bulk` / `apply-dirty`) copies only the Redis KV data (strings, hashes, sets, lists, zsets). RediSearch index schemas and documents are migrated separately via the `migrate-search` step (§F.10).
18
+ * **Search indices (FT.\*):** Keyspace migration (`bulk` / `apply-dirty`) copies only the Redis KV data (strings, hashes, sets, lists, zsets). RediSearch index schemas and documents are migrated separately via the `migrate-search` step (§F.12).
17
19
 
18
20
  ---
19
21
 
@@ -71,7 +73,7 @@ A practical baseline is:
71
73
  * `z` (zset)
72
74
  * `t` (set)
73
75
 
74
- If you need the broadest coverage, use all (often `AKE`-style in some docs), but configuration specifics vary by Redis version and operational policy. The migration tool should:
76
+ If you need the broadest coverage, use "all" (often `AKE`-style in some docs), but configuration specifics vary by Redis version and operational policy. The migration tool should:
75
77
 
76
78
  * detect whether notifications are enabled
77
79
  * refuse or warn if they are not enabled
@@ -198,7 +200,7 @@ If only keyspace notifications are available, you will receive:
198
200
  * channel includes the key, payload includes the event
199
201
  You must support both, but keyevent is simpler.
200
202
 
201
- ## F.6.2 Events to treat as dirty
203
+ ## F.6.2 Events to treat as "dirty"
202
204
 
203
205
  Mark key as dirty when you see any of:
204
206
 
@@ -209,7 +211,7 @@ Mark key as dirty when you see any of:
209
211
  * `zadd`, `zrem`, etc.
210
212
  * `expire`, `pexpire`, `persist` (TTL changes)
211
213
 
212
- ## F.6.3 Events to treat as deleted
214
+ ## F.6.3 Events to treat as "deleted"
213
215
 
214
216
  Mark key as deleted when you see:
215
217
 
@@ -225,7 +227,7 @@ Keyspace notifications are not a guaranteed durable log:
225
227
  * if the tracker disconnects, events can be missed
226
228
  Mitigation:
227
229
  * treat the final cutover delta as authoritative with the application frozen
228
- * optionally run one short SCAN after freeze as a safety sweep if you want extra assurance
230
+ * optionally run one short SCAN after freeze as a "safety sweep" if you want extra assurance
229
231
 
230
232
  ---
231
233
 
@@ -286,7 +288,7 @@ The importer must support:
286
288
  ## F.8.1 When to run delta
287
289
 
288
290
  * During cutover window, with application writes frozen.
289
- * Optional: run a pre-delta while still live to reduce final delta size.
291
+ * Optional: run a "pre-delta" while still live to reduce final delta size.
290
292
 
291
293
  ## F.8.2 Delta algorithm
292
294
 
@@ -331,7 +333,7 @@ After freeze begins and delta completes:
331
333
 
332
334
  ---
333
335
 
334
- # F.9 Suggested End-to-End Migration Process (Example)
336
+ # F.9 Suggested End-to-End Migration Process (Programmatic Example)
335
337
 
336
338
  Assume:
337
339
 
@@ -341,119 +343,55 @@ Assume:
341
343
  * Supported types: string/hash/set/list/zset
342
344
  * Goal: minimal downtime
343
345
 
344
- ## Step 0: Preflight
345
-
346
- ```bash id="4bct0i"
347
- resplite-import preflight \
348
- --from redis://10.0.0.10:6379 \
349
- --to ./resplite.db
350
- ```
351
-
352
- Outputs:
353
-
354
- * estimated key count
355
- * type distribution sample
356
- * recommended concurrency and scan count
357
- * detection of unsupported types
358
-
359
- ## Step 1: Start Dirty Key Tracker
360
-
361
- Start the tracker first, so it captures changes during the entire bulk run.
362
-
363
- ```bash id="6km4l7"
364
- resplite-dirty-tracker start \
365
- --run-id run_2026_03_03 \
366
- --from redis://10.0.0.10:6379 \
367
- --to ./resplite.db \
368
- --channels keyevent
369
- ```
370
-
371
- ## Step 2: Run Bulk Import Online
372
-
373
- ```bash id="a9g2aa"
374
- resplite-import bulk \
375
- --run-id run_2026_03_03 \
376
- --from redis://10.0.0.10:6379 \
377
- --to ./resplite.db \
378
- --scan-count 1000 \
379
- --max-concurrency 32 \
380
- --max-rps 2000 \
381
- --batch-keys 200 \
382
- --batch-bytes 64MB \
383
- --ttl-mode preserve \
384
- --resume
385
- ```
386
-
387
- Monitor progress:
388
-
389
- ```bash id="rkf6uv"
390
- resplite-import status --run-id run_2026_03_03 --to ./resplite.db
391
- ```
392
-
393
- ## Step 3 (Optional): Pre-Delta While Still Live
394
-
395
- Apply dirty keys while Redis is still live to reduce final delta size:
396
-
397
- ```bash id="v5xc6f"
398
- resplite-import apply-dirty \
399
- --run-id run_2026_03_03 \
400
- --from redis://10.0.0.10:6379 \
401
- --to ./resplite.db \
402
- --max-concurrency 32 \
403
- --max-rps 2000 \
404
- --batch-keys 200 \
405
- --ttl-mode preserve
406
- ```
407
-
408
- You can run this repeatedly (or continuously) while bulk is still running.
409
-
410
- ## Step 4: Cutover Window (Freeze Writes)
411
-
412
- * Put the application into maintenance mode (freeze writes to Redis).
413
- * Keep dirty tracker running for a moment to capture any last writes.
414
-
415
- ## Step 5: Final Delta Apply
416
-
417
- With writes frozen, apply all remaining dirty keys:
346
+ ```javascript id="f9programmatic"
347
+ import { stdin, stdout } from 'node:process';
348
+ import { createInterface } from 'node:readline/promises';
349
+ import { createMigration } from 'resplite/migration';
350
+
351
+ const m = createMigration({
352
+ from: 'redis://10.0.0.10:6379',
353
+ to: './resplite.db',
354
+ runId: 'run_2026_03_03',
355
+ scanCount: 1000,
356
+ batchKeys: 200,
357
+ batchBytes: 64 * 1024 * 1024,
358
+ maxRps: 2000,
359
+ });
418
360
 
419
- ```bash id="v0x8xo"
420
- resplite-import apply-dirty \
421
- --run-id run_2026_03_03 \
422
- --from redis://10.0.0.10:6379 \
423
- --to ./resplite.db \
424
- --max-concurrency 64 \
425
- --max-rps 5000 \
426
- --batch-keys 500 \
427
- --ttl-mode preserve
428
- ```
361
+ const info = await m.preflight();
362
+ await m.enableKeyspaceNotifications();
363
+ await m.startDirtyTracker();
364
+
365
+ const total = info.keyCountEstimate || 1;
366
+ await m.bulk({
367
+ resume: true,
368
+ onProgress: (r) => {
369
+ const pct = ((r.scanned_keys / total) * 100).toFixed(1);
370
+ console.log(`bulk ${pct}% scanned=${r.scanned_keys} migrated=${r.migrated_keys}`);
371
+ },
372
+ });
429
373
 
430
- Verify no remaining dirty keys:
374
+ console.log(m.status());
431
375
 
432
- ```bash id="0ca1y7"
433
- resplite-import status --run-id run_2026_03_03 --to ./resplite.db
434
- ```
376
+ const rl = createInterface({ input: stdin, output: stdout });
377
+ await rl.question('Freeze writes to Redis, then press Enter to apply the final dirty set...');
378
+ rl.close();
435
379
 
436
- ## Step 6: Stop Dirty Tracker and Switch Clients
380
+ await m.applyDirty();
381
+ await m.stopDirtyTracker();
437
382
 
438
- Stop tracker:
383
+ const verify = await m.verify({ samplePct: 0.5, maxSample: 10000 });
384
+ console.log(verify);
439
385
 
440
- ```bash id="1v1u1j"
441
- resplite-dirty-tracker stop --run-id run_2026_03_03 --to ./resplite.db
386
+ await m.close();
442
387
  ```
443
388
 
444
- Switch application Redis endpoint to RespLite server (RESP port).
445
-
446
- ## Step 7: Verification (Post-Cutover)
389
+ Notes:
447
390
 
448
- Run a sampling verification:
449
-
450
- ```bash id="p6w5q6"
451
- resplite-import verify \
452
- --run-id run_2026_03_03 \
453
- --from redis://10.0.0.10:6379 \
454
- --to ./resplite.db \
455
- --sample 0.5%
456
- ```
391
+ * Start dirty tracking before bulk so it captures writes during the whole import.
392
+ * Keep the tracker running until after the final `applyDirty()`.
393
+ * The cutover window is: freeze writes to Redis, apply the remaining dirty set, stop the tracker, then switch clients to RespLite.
394
+ * `status()` is synchronous and can be polled at any point from the destination DB.
457
395
 
458
396
  ---
459
397
 
@@ -482,7 +420,7 @@ Implementation may use:
482
420
 
483
421
  * updating `migration_runs.status`
484
422
  * a simple control file
485
- * or a CLI that updates the SQLite run row
423
+ * or another control surface that updates the SQLite run row
486
424
 
487
425
  ---
488
426
 
@@ -498,7 +436,7 @@ If dirty tracker disconnects:
498
436
 
499
437
  ## F.11.2 Importer crash/restart
500
438
 
501
- * On restart with `--resume`, continue from stored cursor.
439
+ * On restart with resume enabled, continue from stored cursor.
502
440
  * Already migrated keys may be overwritten idempotently.
503
441
 
504
442
  ## F.11.3 Idempotency requirements
@@ -510,24 +448,24 @@ If dirty tracker disconnects:
510
448
 
511
449
  ---
512
450
 
513
- # F.10 Search Index Migration (FT.* / RediSearch)
451
+ # F.12 Search Index Migration (FT.* / RediSearch)
514
452
 
515
- ## F.10.1 Overview
453
+ ## F.12.1 Overview
516
454
 
517
455
  When the source is a Redis instance with **RediSearch** (Redis Stack or the `redis/search` module), search indices can be migrated with the `migrate-search` step. This step is independent of the KV bulk import and can be run at any time (before or after `bulk`).
518
456
 
519
- ## F.10.2 Algorithm
457
+ ## F.12.2 Algorithm
520
458
 
521
459
  For each index in the source:
522
460
 
523
461
  1. **`FT._LIST`** → enumerate all index names.
524
462
  2. **`FT.INFO <name>`** → read `index_definition` (key type, prefix patterns) and `attributes` (field names and types).
525
- 3. **Schema mapping** (see §F.10.3).
463
+ 3. **Schema mapping** (see §F.12.3).
526
464
  4. **`FT.CREATE`** in RespLite with the mapped schema. Skip if already exists (controlled by `skipExisting`).
527
465
  5. **SCAN** keys matching each index prefix → **HGETALL** → `addDocument` in SQLite batches.
528
466
  6. **`FT.SUGGET "" MAX n WITHSCORES`** → import suggestions into RespLite.
529
467
 
530
- ## F.10.3 Field type mapping
468
+ ## F.12.3 Field type mapping
531
469
 
532
470
  | RediSearch type | RespLite type | Notes |
533
471
  |-----------------|---------------|-------|
@@ -538,42 +476,18 @@ For each index in the source:
538
476
 
539
477
  RespLite requires a `payload` TEXT field. If none of the source fields maps to `payload`, a `payload` field is added automatically and synthesised at import time by concatenating all other text values.
540
478
 
541
- ## F.10.4 Constraints
479
+ ## F.12.4 Constraints
542
480
 
543
481
  * Only **HASH**-based indices are supported (`key_type = HASH`). JSON indices (RedisJSON) are skipped with an error.
544
482
  * Index names must match `[A-Za-z][A-Za-z0-9:_-]{0,63}`. Indices with invalid names are skipped with an error.
545
483
  * `FT.SUGGET` has no cursor; suggestions are imported up to `maxSuggestions` (default 10 000).
546
484
  * Document score is read from the `__score` or `score` hash field if present; defaults to `1.0`.
547
485
 
548
- ## F.10.5 Graceful shutdown
486
+ ## F.12.5 Graceful shutdown
549
487
 
550
488
  Same pattern as `bulk` (§F.7.2.1): SIGINT/SIGTERM finishes the current document, closes the SQLite DB cleanly, and exits with a non-zero code.
551
489
 
552
- ## F.10.6 CLI
553
-
554
- ```bash
555
- # Migrate all RediSearch indices
556
- resplite-import migrate-search \
557
- --from redis://10.0.0.10:6379 \
558
- --to ./resplite.db
559
-
560
- # Migrate specific indices only
561
- resplite-import migrate-search \
562
- --from redis://10.0.0.10:6379 \
563
- --to ./resplite.db \
564
- --index products \
565
- --index articles
566
-
567
- # Options
568
- # --scan-count N SCAN COUNT hint (default 500)
569
- # --max-rps N throttle (default unlimited)
570
- # --batch-docs N docs per SQLite transaction (default 200)
571
- # --max-suggestions N cap for FT.SUGGET (default 10000)
572
- # --no-skip overwrite if index already exists
573
- # --no-suggestions skip suggestion import
574
- ```
575
-
576
- ## F.10.7 Programmatic API
490
+ ## F.12.6 Programmatic API
577
491
 
578
492
  ```javascript
579
493
  const m = createMigration({ from, to, runId });
@@ -591,7 +505,7 @@ const result = await m.migrateSearch({
591
505
 
592
506
  ---
593
507
 
594
- # F.12 Operational Guidance (Large datasets)
508
+ # F.13 Operational Guidance (Large datasets)
595
509
 
596
510
  * Use a dedicated Redis replica for reads if possible to reduce load on primary.
597
511
  * Keep `max_concurrency` conservative at first; increase only if Redis latency remains stable.
@@ -0,0 +1,17 @@
1
+ /**
2
+ * OBJECT subcommand key - introspection (IDLETIME: seconds since last write).
3
+ * Only OBJECT IDLETIME key is supported; uses redis_keys.updated_at.
4
+ */
5
+
6
+ export function handleObject(engine, args) {
7
+ if (!args || args.length < 2) {
8
+ return { error: 'ERR wrong number of arguments for \'OBJECT\' command' };
9
+ }
10
+ const sub = (Buffer.isBuffer(args[0]) ? args[0].toString('utf8') : String(args[0])).toUpperCase();
11
+ if (sub !== 'IDLETIME') {
12
+ return { error: 'ERR unknown subcommand or wrong number of arguments for \'OBJECT\'. Try OBJECT HELP.' };
13
+ }
14
+ const key = args[1];
15
+ const seconds = engine.objectIdletime(key);
16
+ return seconds;
17
+ }
@@ -11,6 +11,7 @@ import * as set from './set.js';
11
11
  import * as del from './del.js';
12
12
  import * as exists from './exists.js';
13
13
  import * as type from './type.js';
14
+ import * as object from './object.js';
14
15
  import * as mget from './mget.js';
15
16
  import * as mset from './mset.js';
16
17
  import * as expire from './expire.js';
@@ -75,6 +76,7 @@ const HANDLERS = new Map([
75
76
  ['DEL', (e, a) => del.handleDel(e, a)],
76
77
  ['EXISTS', (e, a) => exists.handleExists(e, a)],
77
78
  ['TYPE', (e, a) => type.handleType(e, a)],
79
+ ['OBJECT', (e, a) => object.handleObject(e, a)],
78
80
  ['MGET', (e, a) => mget.handleMget(e, a)],
79
81
  ['MSET', (e, a) => mset.handleMset(e, a)],
80
82
  ['EXPIRE', (e, a) => expire.handleExpire(e, a)],
@@ -404,6 +404,17 @@ export function createEngine(opts = {}) {
404
404
  return typeName(meta);
405
405
  },
406
406
 
407
+ /**
408
+ * OBJECT IDLETIME: seconds since key was last written (updated_at).
409
+ * Returns null if key does not exist (Redis: nil).
410
+ */
411
+ objectIdletime(key) {
412
+ const meta = getKeyMeta(key);
413
+ if (!meta || meta.updatedAt == null) return null;
414
+ const elapsedMs = clock() - meta.updatedAt;
415
+ return Math.floor(elapsedMs / 1000);
416
+ },
417
+
407
418
  scan(cursor, options = {}) {
408
419
  const count = options.count ?? 10;
409
420
  const offset = parseInt(String(cursor), 10) || 0;
@@ -25,9 +25,10 @@ function sleep(ms) {
25
25
  * @param {string} [options.pragmaTemplate='default']
26
26
  * @param {number} [options.batch_keys=200]
27
27
  * @param {number} [options.max_rps=0]
28
+ * @param {(run: object) => void | Promise<void>} [options.onProgress] - Called after each batch with the current run row.
28
29
  */
29
30
  export async function runApplyDirty(redisClient, dbPath, runId, options = {}) {
30
- const { pragmaTemplate = 'default', batch_keys = 200, max_rps = 0 } = options;
31
+ const { pragmaTemplate = 'default', batch_keys = 200, max_rps = 0, onProgress } = options;
31
32
 
32
33
  const db = openDb(dbPath, { pragmaTemplate });
33
34
  const run = getRun(db, runId);
@@ -56,6 +57,8 @@ export async function runApplyDirty(redisClient, dbPath, runId, options = {}) {
56
57
  const deletedBatch = getDirtyBatch(db, runId, 'deleted', batch_keys);
57
58
  if (dirtyBatch.length === 0 && deletedBatch.length === 0) break;
58
59
 
60
+ const batchSize = dirtyBatch.length + deletedBatch.length;
61
+
59
62
  // ── Re-import (or remove) keys that changed while bulk was running ──
60
63
  for (const { key: keyBuf } of dirtyBatch) {
61
64
  r = getRun(db, runId);
@@ -122,6 +125,10 @@ export async function runApplyDirty(redisClient, dbPath, runId, options = {}) {
122
125
  markDirtyState(db, runId, keyBuf, 'error');
123
126
  }
124
127
  }
128
+ if (batchSize > 0 && onProgress) {
129
+ const run = getRun(db, runId);
130
+ if (run) Promise.resolve(onProgress(run)).catch(() => {});
131
+ }
125
132
  }
126
133
 
127
134
  return getRun(db, runId);