resplite 1.0.0 → 1.0.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +84 -21
- package/package.json +5 -1
- package/spec/SPEC_F.md +505 -0
- package/src/blocking/manager.js +183 -0
- package/src/cli/resplite-dirty-tracker.js +124 -0
- package/src/cli/resplite-import.js +237 -0
- package/src/commands/blpop.js +50 -0
- package/src/commands/brpop.js +50 -0
- package/src/commands/registry.js +11 -5
- package/src/engine/engine.js +11 -3
- package/src/migration/apply-dirty.js +97 -0
- package/src/migration/bulk.js +181 -0
- package/src/migration/import-one.js +106 -0
- package/src/migration/preflight.js +62 -0
- package/src/migration/registry.js +222 -0
- package/src/migration/verify.js +191 -0
- package/src/server/connection.js +55 -13
- package/src/storage/sqlite/db.js +2 -0
- package/src/storage/sqlite/migration-schema.js +66 -0
- package/tasks/todo.md +19 -0
- package/test/integration/blocking.test.js +107 -0
- package/test/unit/migration-registry.test.js +127 -0
package/README.md
CHANGED
|
@@ -79,14 +79,14 @@ await srv.close();
|
|
|
79
79
|
```javascript
|
|
80
80
|
// SET with expiration
|
|
81
81
|
await client.set('session:abc', JSON.stringify({ user: 'alice' }));
|
|
82
|
-
await client.expire('session:abc', 3600);
|
|
83
|
-
console.log(await client.ttl('session:abc'));
|
|
82
|
+
await client.expire('session:abc', 3600); // expire in 1 hour
|
|
83
|
+
console.log(await client.ttl('session:abc')); // → 3600 (approx)
|
|
84
84
|
|
|
85
85
|
// Atomic counters
|
|
86
86
|
await client.set('visits', '0');
|
|
87
87
|
await client.incr('visits');
|
|
88
88
|
await client.incrBy('visits', 10);
|
|
89
|
-
console.log(await client.get('visits'));
|
|
89
|
+
console.log(await client.get('visits')); // → "11"
|
|
90
90
|
|
|
91
91
|
// Multi-key operations
|
|
92
92
|
await client.mSet(['k1', 'v1', 'k2', 'v2']);
|
|
@@ -94,9 +94,9 @@ const values = await client.mGet(['k1', 'k2', 'missing']);
|
|
|
94
94
|
console.log(values); // → ["v1", "v2", null]
|
|
95
95
|
|
|
96
96
|
// Key existence and deletion
|
|
97
|
-
console.log(await client.exists('k1'));
|
|
97
|
+
console.log(await client.exists('k1')); // → 1
|
|
98
98
|
await client.del('k1');
|
|
99
|
-
console.log(await client.exists('k1'));
|
|
99
|
+
console.log(await client.exists('k1')); // → 0
|
|
100
100
|
```
|
|
101
101
|
|
|
102
102
|
### Hashes
|
|
@@ -104,13 +104,13 @@ console.log(await client.exists('k1')); // → 0
|
|
|
104
104
|
```javascript
|
|
105
105
|
await client.hSet('user:1', { name: 'Martin', age: '42', city: 'BCN' });
|
|
106
106
|
|
|
107
|
-
console.log(await client.hGet('user:1', 'name'));
|
|
107
|
+
console.log(await client.hGet('user:1', 'name')); // → "Martin"
|
|
108
108
|
|
|
109
109
|
const user = await client.hGetAll('user:1');
|
|
110
110
|
console.log(user); // → { name: "Martin", age: "42", city: "BCN" }
|
|
111
111
|
|
|
112
112
|
await client.hIncrBy('user:1', 'age', 1);
|
|
113
|
-
console.log(await client.hGet('user:1', 'age'));
|
|
113
|
+
console.log(await client.hGet('user:1', 'age')); // → "43"
|
|
114
114
|
|
|
115
115
|
console.log(await client.hExists('user:1', 'email')); // → false
|
|
116
116
|
```
|
|
@@ -119,8 +119,8 @@ console.log(await client.hExists('user:1', 'email')); // → false
|
|
|
119
119
|
|
|
120
120
|
```javascript
|
|
121
121
|
await client.sAdd('tags', ['node', 'sqlite', 'redis']);
|
|
122
|
-
console.log(await client.sMembers('tags'));
|
|
123
|
-
console.log(await client.sIsMember('tags', 'node'));
|
|
122
|
+
console.log(await client.sMembers('tags')); // → ["node", "sqlite", "redis"]
|
|
123
|
+
console.log(await client.sIsMember('tags', 'node')); // → true
|
|
124
124
|
console.log(await client.sCard('tags')); // → 3
|
|
125
125
|
|
|
126
126
|
await client.sRem('tags', 'redis');
|
|
@@ -130,17 +130,34 @@ console.log(await client.sCard('tags')); // → 2
|
|
|
130
130
|
### Lists
|
|
131
131
|
|
|
132
132
|
```javascript
|
|
133
|
-
await client.lPush('queue', ['c', 'b', 'a']);
|
|
134
|
-
await client.rPush('queue', ['d', 'e']);
|
|
133
|
+
await client.lPush('queue', ['c', 'b', 'a']); // push left: a, b, c
|
|
134
|
+
await client.rPush('queue', ['d', 'e']); // push right: d, e
|
|
135
135
|
|
|
136
|
-
console.log(await client.lLen('queue'));
|
|
137
|
-
console.log(await client.lRange('queue', 0, -1));
|
|
138
|
-
console.log(await client.lIndex('queue', 0));
|
|
136
|
+
console.log(await client.lLen('queue')); // → 5
|
|
137
|
+
console.log(await client.lRange('queue', 0, -1)); // → ["a", "b", "c", "d", "e"]
|
|
138
|
+
console.log(await client.lIndex('queue', 0)); // → "a"
|
|
139
139
|
|
|
140
|
-
console.log(await client.lPop('queue'));
|
|
141
|
-
console.log(await client.rPop('queue'));
|
|
140
|
+
console.log(await client.lPop('queue')); // → "a"
|
|
141
|
+
console.log(await client.rPop('queue')); // → "e"
|
|
142
142
|
```
|
|
143
143
|
|
|
144
|
+
### Blocking list commands (BLPOP / BRPOP)
|
|
145
|
+
|
|
146
|
+
`BLPOP` and `BRPOP` block until an element is available or a timeout (seconds) is reached. Use them for simple queues or coordination between producers and consumers.
|
|
147
|
+
|
|
148
|
+
```javascript
|
|
149
|
+
// Consumer: block up to 10 seconds for an element from "tasks" or "fallback"
|
|
150
|
+
const result = await client.blPop(['tasks', 'fallback'], 10);
|
|
151
|
+
// result is { key: 'tasks', element: 'item1' } or null on timeout
|
|
152
|
+
|
|
153
|
+
// Producer (e.g. another client or process)
|
|
154
|
+
await client.rPush('tasks', 'item1');
|
|
155
|
+
```
|
|
156
|
+
|
|
157
|
+
- **Timeout**: `0` = block indefinitely; `> 0` = block up to that many seconds.
|
|
158
|
+
- **Return**: `{ key, element }` on success, or `null` on timeout.
|
|
159
|
+
- **Multi-key**: Keys are checked in order; the first key that has an element wins. One push wakes at most one blocked client (FIFO per key).
|
|
160
|
+
|
|
144
161
|
### Sorted sets
|
|
145
162
|
|
|
146
163
|
```javascript
|
|
@@ -239,7 +256,7 @@ await srv2.close();
|
|
|
239
256
|
| **TTL** | EXPIRE, PEXPIRE, TTL, PTTL, PERSIST |
|
|
240
257
|
| **Hashes** | HSET, HGET, HMGET, HGETALL, HDEL, HEXISTS, HINCRBY |
|
|
241
258
|
| **Sets** | SADD, SREM, SMEMBERS, SISMEMBER, SCARD |
|
|
242
|
-
| **Lists** | LPUSH, RPUSH, LLEN, LRANGE, LINDEX, LPOP, RPOP |
|
|
259
|
+
| **Lists** | LPUSH, RPUSH, LLEN, LRANGE, LINDEX, LPOP, RPOP, BLPOP, BRPOP |
|
|
243
260
|
| **Sorted sets** | ZADD, ZREM, ZCARD, ZSCORE, ZRANGE, ZRANGEBYSCORE |
|
|
244
261
|
| **Search (FT.\*)** | FT.CREATE, FT.INFO, FT.ADD, FT.DEL, FT.SEARCH, FT.SUGADD, FT.SUGGET, FT.SUGDEL |
|
|
245
262
|
| **Introspection** | TYPE, SCAN |
|
|
@@ -252,14 +269,18 @@ await srv2.close();
|
|
|
252
269
|
- Streams (XADD, XRANGE, etc.)
|
|
253
270
|
- Lua (EVAL, EVALSHA)
|
|
254
271
|
- Transactions (MULTI, EXEC, WATCH)
|
|
255
|
-
-
|
|
272
|
+
- BRPOPLPUSH, BLMOVE (blocking list moves)
|
|
256
273
|
- SELECT (multiple logical DBs)
|
|
257
274
|
|
|
258
275
|
Unsupported commands return: `ERR command not supported yet`.
|
|
259
276
|
|
|
260
277
|
## Migration from Redis
|
|
261
278
|
|
|
262
|
-
|
|
279
|
+
Migration supports two modes:
|
|
280
|
+
|
|
281
|
+
### Simple one-shot import (legacy)
|
|
282
|
+
|
|
283
|
+
For small datasets or when downtime is acceptable:
|
|
263
284
|
|
|
264
285
|
```bash
|
|
265
286
|
# Default: redis://127.0.0.1:6379 → ./data.db
|
|
@@ -275,7 +296,47 @@ npm run import-from-redis -- --db ./migrated.db --host 127.0.0.1 --port 6379
|
|
|
275
296
|
npm run import-from-redis -- --db ./migrated.db --pragma-template performance
|
|
276
297
|
```
|
|
277
298
|
|
|
278
|
-
|
|
299
|
+
### Minimal-downtime migration (SPEC_F)
|
|
300
|
+
|
|
301
|
+
For large datasets (~30 GB), use the Dirty Key Registry flow so the bulk of the migration runs online and only a short cutover is needed:
|
|
302
|
+
|
|
303
|
+
1. **Preflight** – Check Redis, key count, type distribution, and that keyspace notifications are enabled:
|
|
304
|
+
```bash
|
|
305
|
+
npx resplite-import preflight --from redis://10.0.0.10:6379 --to ./resplite.db
|
|
306
|
+
```
|
|
307
|
+
|
|
308
|
+
2. **Start dirty-key tracker** – Captures keys modified during bulk (requires `notify-keyspace-events` in Redis):
|
|
309
|
+
```bash
|
|
310
|
+
npx resplite-dirty-tracker start --run-id run_001 --from redis://10.0.0.10:6379 --to ./resplite.db
|
|
311
|
+
```
|
|
312
|
+
|
|
313
|
+
3. **Bulk import** – SCAN and copy all keys; progress is checkpointed and resumable:
|
|
314
|
+
```bash
|
|
315
|
+
npx resplite-import bulk --run-id run_001 --from redis://10.0.0.10:6379 --to ./resplite.db \
|
|
316
|
+
--scan-count 1000 --max-rps 2000 --batch-keys 200 --batch-bytes 64MB --resume
|
|
317
|
+
```
|
|
318
|
+
|
|
319
|
+
4. **Monitor** – Check run and dirty-key counts:
|
|
320
|
+
```bash
|
|
321
|
+
npx resplite-import status --run-id run_001 --to ./resplite.db
|
|
322
|
+
```
|
|
323
|
+
|
|
324
|
+
5. **Cutover** – Freeze app writes to Redis, then apply remaining dirty keys:
|
|
325
|
+
```bash
|
|
326
|
+
npx resplite-import apply-dirty --run-id run_001 --from redis://10.0.0.10:6379 --to ./resplite.db
|
|
327
|
+
```
|
|
328
|
+
|
|
329
|
+
6. **Stop tracker and switch** – Stop the tracker and point clients to RespLite:
|
|
330
|
+
```bash
|
|
331
|
+
npx resplite-dirty-tracker stop --run-id run_001 --to ./resplite.db
|
|
332
|
+
```
|
|
333
|
+
|
|
334
|
+
7. **Verify** – Optional sampling check between Redis and destination:
|
|
335
|
+
```bash
|
|
336
|
+
npx resplite-import verify --run-id run_001 --from redis://10.0.0.10:6379 --to ./resplite.db --sample 0.5%
|
|
337
|
+
```
|
|
338
|
+
|
|
339
|
+
Then start RespLite with the migrated DB: `RESPLITE_DB=./resplite.db npm start`.
|
|
279
340
|
|
|
280
341
|
## Benchmark (Redis vs RESPLite)
|
|
281
342
|
|
|
@@ -303,7 +364,9 @@ npm run benchmark -- --iterations 10000 --redis-port 6379 --resplite-port 6380
|
|
|
303
364
|
| `npm run test:contract` | Contract tests (redis client) |
|
|
304
365
|
| `npm run test:stress` | Stress tests |
|
|
305
366
|
| `npm run benchmark` | Comparative benchmark Redis vs RESPLite |
|
|
306
|
-
| `npm run import-from-redis` |
|
|
367
|
+
| `npm run import-from-redis` | One-shot import from Redis into a SQLite DB |
|
|
368
|
+
| `npx resplite-import` (preflight, bulk, status, apply-dirty, verify) | Migration CLI (SPEC_F minimal-downtime flow) |
|
|
369
|
+
| `npx resplite-dirty-tracker <start\|stop>` | Dirty-key tracker for migration cutover |
|
|
307
370
|
|
|
308
371
|
## Specification
|
|
309
372
|
|
package/package.json
CHANGED
|
@@ -1,9 +1,13 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "resplite",
|
|
3
|
-
"version": "1.0.
|
|
3
|
+
"version": "1.0.4",
|
|
4
4
|
"description": "A RESP2 server with practical Redis compatibility, backed by SQLite",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"main": "src/index.js",
|
|
7
|
+
"bin": {
|
|
8
|
+
"resplite-import": "src/cli/resplite-import.js",
|
|
9
|
+
"resplite-dirty-tracker": "src/cli/resplite-dirty-tracker.js"
|
|
10
|
+
},
|
|
7
11
|
"exports": {
|
|
8
12
|
".": "./src/index.js",
|
|
9
13
|
"./embed": "./src/embed.js"
|
package/spec/SPEC_F.md
ADDED
|
@@ -0,0 +1,505 @@
|
|
|
1
|
+
# Appendix F: Migration with Dirty Key Registry (Keyspace Notifications)
|
|
2
|
+
|
|
3
|
+
## F.1 Goals
|
|
4
|
+
|
|
5
|
+
* Migrate a large Redis dataset (example: ~30 GB) into RespLite with minimal downtime.
|
|
6
|
+
* Perform the bulk of the migration online while the application continues using Redis.
|
|
7
|
+
* Capture keys modified during the bulk copy into a **persistent Dirty Key Registry**.
|
|
8
|
+
* During a short cutover window, apply a **delta migration** from the Dirty Key Registry to reach consistency.
|
|
9
|
+
* Provide progress reporting, resumability, throttling controls, and verification.
|
|
10
|
+
|
|
11
|
+
## F.2 Non-Goals (v1)
|
|
12
|
+
|
|
13
|
+
* Perfect change-data-capture guarantees equivalent to replication logs.
|
|
14
|
+
* Distributed migration across multiple import workers with strict ordering semantics.
|
|
15
|
+
* Full fidelity for unsupported Redis data types (streams, modules, Lua scripts, etc.).
|
|
16
|
+
|
|
17
|
+
---
|
|
18
|
+
|
|
19
|
+
# F.3 Overview
|
|
20
|
+
|
|
21
|
+
This migration strategy uses two cooperating processes:
|
|
22
|
+
|
|
23
|
+
1. **Bulk Importer**
|
|
24
|
+
|
|
25
|
+
* Scans the entire keyspace with `SCAN`.
|
|
26
|
+
* Copies supported key types and TTLs into the RespLite SQLite database.
|
|
27
|
+
* Checkpoints progress frequently.
|
|
28
|
+
|
|
29
|
+
2. **Dirty Key Tracker**
|
|
30
|
+
|
|
31
|
+
* Subscribes to Redis Keyspace Notifications.
|
|
32
|
+
* Records keys that are modified (and keys that are deleted or expire) into a persistent registry in SQLite.
|
|
33
|
+
* Enables the delta migration to focus only on changed keys.
|
|
34
|
+
|
|
35
|
+
After bulk completes, you perform a controlled **cutover**:
|
|
36
|
+
|
|
37
|
+
* Temporarily freeze writes to Redis (application maintenance window).
|
|
38
|
+
* Apply the delta migration by reimporting dirty keys (and deleting keys that were removed in Redis).
|
|
39
|
+
* Switch clients to RespLite.
|
|
40
|
+
|
|
41
|
+
---
|
|
42
|
+
|
|
43
|
+
# F.4 Redis Requirements
|
|
44
|
+
|
|
45
|
+
## F.4.1 Keyspace Notifications
|
|
46
|
+
|
|
47
|
+
Redis must be configured to emit keyspace and/or keyevent notifications. The exact flags depend on your required coverage.
|
|
48
|
+
|
|
49
|
+
### Recommended minimal event coverage for delta migration
|
|
50
|
+
|
|
51
|
+
You must capture:
|
|
52
|
+
|
|
53
|
+
* Key modifications (writes) for all supported types
|
|
54
|
+
* TTL changes (EXPIRE/PEXPIRE/PERSIST)
|
|
55
|
+
* Deletions
|
|
56
|
+
* Expiration events
|
|
57
|
+
|
|
58
|
+
### Recommended `notify-keyspace-events` flags (pragmatic v1)
|
|
59
|
+
|
|
60
|
+
A practical baseline is:
|
|
61
|
+
|
|
62
|
+
* `K` (Keyspace events) or `E` (Keyevent events)
|
|
63
|
+
* `g` (generic commands like DEL, EXPIRE)
|
|
64
|
+
* `x` (expired events)
|
|
65
|
+
* plus type-specific sets as needed:
|
|
66
|
+
|
|
67
|
+
* `s` (string)
|
|
68
|
+
* `h` (hash)
|
|
69
|
+
* `l` (list)
|
|
70
|
+
* `z` (zset)
|
|
71
|
+
* `t` (set)
|
|
72
|
+
|
|
73
|
+
If you need the broadest coverage, use “all” (often `AKE`-style in some docs), but configuration specifics vary by Redis version and operational policy. The migration tool should:
|
|
74
|
+
|
|
75
|
+
* detect whether notifications are enabled
|
|
76
|
+
* refuse or warn if they are not enabled
|
|
77
|
+
|
|
78
|
+
## F.4.2 Permissions
|
|
79
|
+
|
|
80
|
+
The tracking client needs:
|
|
81
|
+
|
|
82
|
+
* `PSUBSCRIBE` capability to the keyevent/keyspace channels
|
|
83
|
+
* Ability to read keys during delta verification (optional)
|
|
84
|
+
The bulk importer needs:
|
|
85
|
+
* `SCAN`, `TYPE`, read commands per type, and `PTTL`
|
|
86
|
+
|
|
87
|
+
---
|
|
88
|
+
|
|
89
|
+
# F.5 Dirty Key Registry (SQLite)
|
|
90
|
+
|
|
91
|
+
The registry lives in the destination SQLite database so it is persistent and resumable.
|
|
92
|
+
|
|
93
|
+
## F.5.1 Schema
|
|
94
|
+
|
|
95
|
+
### Migration run registry
|
|
96
|
+
|
|
97
|
+
```sql id="e1f4j9"
|
|
98
|
+
CREATE TABLE migration_runs (
|
|
99
|
+
run_id TEXT PRIMARY KEY,
|
|
100
|
+
source_uri TEXT NOT NULL,
|
|
101
|
+
started_at INTEGER NOT NULL,
|
|
102
|
+
updated_at INTEGER NOT NULL,
|
|
103
|
+
status TEXT NOT NULL, -- running|paused|completed|failed|aborted
|
|
104
|
+
|
|
105
|
+
scan_cursor TEXT NOT NULL DEFAULT "0",
|
|
106
|
+
scan_count_hint INTEGER NOT NULL DEFAULT 1000,
|
|
107
|
+
|
|
108
|
+
scanned_keys INTEGER NOT NULL DEFAULT 0,
|
|
109
|
+
migrated_keys INTEGER NOT NULL DEFAULT 0,
|
|
110
|
+
skipped_keys INTEGER NOT NULL DEFAULT 0,
|
|
111
|
+
error_keys INTEGER NOT NULL DEFAULT 0,
|
|
112
|
+
migrated_bytes INTEGER NOT NULL DEFAULT 0,
|
|
113
|
+
|
|
114
|
+
dirty_keys_seen INTEGER NOT NULL DEFAULT 0,
|
|
115
|
+
dirty_keys_applied INTEGER NOT NULL DEFAULT 0,
|
|
116
|
+
dirty_keys_deleted INTEGER NOT NULL DEFAULT 0,
|
|
117
|
+
|
|
118
|
+
last_error TEXT
|
|
119
|
+
);
|
|
120
|
+
```
|
|
121
|
+
|
|
122
|
+
### Dirty keys table
|
|
123
|
+
|
|
124
|
+
```sql id="4x3p9c"
|
|
125
|
+
CREATE TABLE migration_dirty_keys (
|
|
126
|
+
run_id TEXT NOT NULL,
|
|
127
|
+
key BLOB NOT NULL,
|
|
128
|
+
|
|
129
|
+
first_seen_at INTEGER NOT NULL,
|
|
130
|
+
last_seen_at INTEGER NOT NULL,
|
|
131
|
+
events_count INTEGER NOT NULL DEFAULT 1,
|
|
132
|
+
|
|
133
|
+
last_event TEXT, -- e.g. "set","hset","del","expire","expired"
|
|
134
|
+
state TEXT NOT NULL DEFAULT "dirty", -- dirty|applied|deleted|skipped|error
|
|
135
|
+
|
|
136
|
+
PRIMARY KEY (run_id, key)
|
|
137
|
+
);
|
|
138
|
+
|
|
139
|
+
CREATE INDEX migration_dirty_keys_state_idx
|
|
140
|
+
ON migration_dirty_keys(run_id, state);
|
|
141
|
+
|
|
142
|
+
CREATE INDEX migration_dirty_keys_last_seen_idx
|
|
143
|
+
ON migration_dirty_keys(run_id, last_seen_at);
|
|
144
|
+
```
|
|
145
|
+
|
|
146
|
+
### Optional: error log (bounded)
|
|
147
|
+
|
|
148
|
+
To avoid exploding database size, log only errors and a small bounded sample:
|
|
149
|
+
|
|
150
|
+
```sql id="3o3xga"
|
|
151
|
+
CREATE TABLE migration_errors (
|
|
152
|
+
run_id TEXT NOT NULL,
|
|
153
|
+
at INTEGER NOT NULL,
|
|
154
|
+
key BLOB,
|
|
155
|
+
stage TEXT NOT NULL, -- bulk|dirty_apply|verify
|
|
156
|
+
message TEXT NOT NULL
|
|
157
|
+
);
|
|
158
|
+
CREATE INDEX migration_errors_at_idx ON migration_errors(run_id, at);
|
|
159
|
+
```
|
|
160
|
+
|
|
161
|
+
## F.5.2 Registry update semantics
|
|
162
|
+
|
|
163
|
+
When the tracker sees an event for key `K`:
|
|
164
|
+
|
|
165
|
+
* Insert if not present:
|
|
166
|
+
|
|
167
|
+
* `first_seen_at = now`, `last_seen_at = now`, `events_count = 1`, `last_event = event`
|
|
168
|
+
* If present:
|
|
169
|
+
|
|
170
|
+
* `last_seen_at = now`
|
|
171
|
+
* `events_count += 1`
|
|
172
|
+
* `last_event = event`
|
|
173
|
+
* `state = "dirty"` unless state is terminal (`deleted` can be reverted to dirty if a new write arrives)
|
|
174
|
+
|
|
175
|
+
This is a **set-like deduplicated registry** with useful metadata.
|
|
176
|
+
|
|
177
|
+
---
|
|
178
|
+
|
|
179
|
+
# F.6 Event Capture: Mapping Notifications to Dirty Keys
|
|
180
|
+
|
|
181
|
+
## F.6.1 Channel subscription strategy
|
|
182
|
+
|
|
183
|
+
Prefer subscribing to **keyevent** channels because they give you the event name, not only the keyspace operation.
|
|
184
|
+
|
|
185
|
+
Examples (conceptual):
|
|
186
|
+
|
|
187
|
+
* `__keyevent@0__:set`
|
|
188
|
+
* `__keyevent@0__:hset`
|
|
189
|
+
* `__keyevent@0__:del`
|
|
190
|
+
* `__keyevent@0__:expire`
|
|
191
|
+
* `__keyevent@0__:expired`
|
|
192
|
+
|
|
193
|
+
The payload is the key name.
|
|
194
|
+
|
|
195
|
+
If only keyspace notifications are available, you will receive:
|
|
196
|
+
|
|
197
|
+
* channel includes the key, payload includes the event
|
|
198
|
+
You must support both, but keyevent is simpler.
|
|
199
|
+
|
|
200
|
+
## F.6.2 Events to treat as “dirty”
|
|
201
|
+
|
|
202
|
+
Mark key as dirty when you see any of:
|
|
203
|
+
|
|
204
|
+
* `set`, `mset`, `incrby`, etc. (string writes)
|
|
205
|
+
* `hset`, `hdel`, `hincrby`, etc.
|
|
206
|
+
* `sadd`, `srem`, etc.
|
|
207
|
+
* `lpush`, `rpush`, `lpop`, `rpop`, etc.
|
|
208
|
+
* `zadd`, `zrem`, etc.
|
|
209
|
+
* `expire`, `pexpire`, `persist` (TTL changes)
|
|
210
|
+
|
|
211
|
+
## F.6.3 Events to treat as “deleted”
|
|
212
|
+
|
|
213
|
+
Mark key as deleted when you see:
|
|
214
|
+
|
|
215
|
+
* `del` / `unlink` event
|
|
216
|
+
* `expired` event
|
|
217
|
+
|
|
218
|
+
**Important:** A key can be deleted and later recreated. If a write event arrives after a deleted mark, you must set state back to `dirty`.
|
|
219
|
+
|
|
220
|
+
## F.6.4 Limitations and mitigation
|
|
221
|
+
|
|
222
|
+
Keyspace notifications are not a guaranteed durable log:
|
|
223
|
+
|
|
224
|
+
* if the tracker disconnects, events can be missed
|
|
225
|
+
Mitigation:
|
|
226
|
+
* treat the final cutover delta as authoritative with the application frozen
|
|
227
|
+
* optionally run one short SCAN after freeze as a “safety sweep” if you want extra assurance
|
|
228
|
+
|
|
229
|
+
---
|
|
230
|
+
|
|
231
|
+
# F.7 Bulk Importer Behavior (No Patterns)
|
|
232
|
+
|
|
233
|
+
## F.7.1 Bulk scan loop
|
|
234
|
+
|
|
235
|
+
* Use `SCAN cursor COUNT scan_count_hint`
|
|
236
|
+
* For each returned key:
|
|
237
|
+
|
|
238
|
+
1. `TYPE key`
|
|
239
|
+
2. If type unsupported: `skipped_keys++`
|
|
240
|
+
3. Else fetch full value depending on type:
|
|
241
|
+
|
|
242
|
+
* string: `GET`
|
|
243
|
+
* hash: `HGETALL`
|
|
244
|
+
* set: `SMEMBERS`
|
|
245
|
+
* list: `LRANGE 0 -1` (if lists supported)
|
|
246
|
+
* zset: `ZRANGE 0 -1 WITHSCORES` (if zsets supported)
|
|
247
|
+
4. `PTTL key` (preserve TTL)
|
|
248
|
+
5. Write to destination in one batch transaction
|
|
249
|
+
|
|
250
|
+
## F.7.2 Checkpointing
|
|
251
|
+
|
|
252
|
+
Persist:
|
|
253
|
+
|
|
254
|
+
* cursor
|
|
255
|
+
* counters
|
|
256
|
+
* last update time
|
|
257
|
+
every N seconds or every M keys.
|
|
258
|
+
|
|
259
|
+
If interrupted, `--resume` restarts from the stored cursor.
|
|
260
|
+
|
|
261
|
+
## F.7.3 Throughput controls
|
|
262
|
+
|
|
263
|
+
The importer must support:
|
|
264
|
+
|
|
265
|
+
* `max_concurrency`: number of inflight fetches
|
|
266
|
+
* `max_rps`: throttle reads against Redis
|
|
267
|
+
* `batch_keys` / `batch_bytes`: commit grouping
|
|
268
|
+
|
|
269
|
+
---
|
|
270
|
+
|
|
271
|
+
# F.8 Delta Apply (Using Dirty Key Registry)
|
|
272
|
+
|
|
273
|
+
## F.8.1 When to run delta
|
|
274
|
+
|
|
275
|
+
* During cutover window, with application writes frozen.
|
|
276
|
+
* Optional: run a “pre-delta” while still live to reduce final delta size.
|
|
277
|
+
|
|
278
|
+
## F.8.2 Delta algorithm
|
|
279
|
+
|
|
280
|
+
Repeat until no dirty keys remain:
|
|
281
|
+
|
|
282
|
+
1. Select dirty keys in batches:
|
|
283
|
+
|
|
284
|
+
```sql
|
|
285
|
+
SELECT key
|
|
286
|
+
FROM migration_dirty_keys
|
|
287
|
+
WHERE run_id=? AND state="dirty"
|
|
288
|
+
ORDER BY last_seen_at ASC
|
|
289
|
+
LIMIT ?;
|
|
290
|
+
```
|
|
291
|
+
2. For each key:
|
|
292
|
+
|
|
293
|
+
* Check existence in Redis:
|
|
294
|
+
|
|
295
|
+
* Option A: attempt `TYPE`. If `none`, treat as deleted.
|
|
296
|
+
* If deleted:
|
|
297
|
+
|
|
298
|
+
* `DEL key` on RespLite destination
|
|
299
|
+
* mark state = `deleted`
|
|
300
|
+
* increment `dirty_keys_deleted`
|
|
301
|
+
* Else:
|
|
302
|
+
|
|
303
|
+
* fetch by type (same as bulk)
|
|
304
|
+
* fetch `PTTL`
|
|
305
|
+
* write into RespLite
|
|
306
|
+
* mark state = `applied`
|
|
307
|
+
* increment `dirty_keys_applied`
|
|
308
|
+
|
|
309
|
+
All writes should update progress counters in `migration_runs`.
|
|
310
|
+
|
|
311
|
+
## F.8.3 Safety sweep (optional but recommended for large/high-write systems)
|
|
312
|
+
|
|
313
|
+
After freeze begins and delta completes:
|
|
314
|
+
|
|
315
|
+
* run a quick SCAN pass limited by time (or a full pass if feasible)
|
|
316
|
+
* compare to destination by spot checks or reimport a final time
|
|
317
|
+
This is a belt-and-suspenders option.
|
|
318
|
+
|
|
319
|
+
---
|
|
320
|
+
|
|
321
|
+
# F.9 Suggested End-to-End Migration Process (Example)
|
|
322
|
+
|
|
323
|
+
Assume:
|
|
324
|
+
|
|
325
|
+
* Redis source: `redis://10.0.0.10:6379`
|
|
326
|
+
* RespLite destination DB: `./resplite.db`
|
|
327
|
+
* Full migration without patterns
|
|
328
|
+
* Supported types: string/hash/set/list/zset
|
|
329
|
+
* Goal: minimal downtime
|
|
330
|
+
|
|
331
|
+
## Step 0: Preflight
|
|
332
|
+
|
|
333
|
+
```bash id="4bct0i"
|
|
334
|
+
resplite-import preflight \
|
|
335
|
+
--from redis://10.0.0.10:6379 \
|
|
336
|
+
--to ./resplite.db
|
|
337
|
+
```
|
|
338
|
+
|
|
339
|
+
Outputs:
|
|
340
|
+
|
|
341
|
+
* estimated key count
|
|
342
|
+
* type distribution sample
|
|
343
|
+
* recommended concurrency and scan count
|
|
344
|
+
* detection of unsupported types
|
|
345
|
+
|
|
346
|
+
## Step 1: Start Dirty Key Tracker
|
|
347
|
+
|
|
348
|
+
Start the tracker first, so it captures changes during the entire bulk run.
|
|
349
|
+
|
|
350
|
+
```bash id="6km4l7"
|
|
351
|
+
resplite-dirty-tracker start \
|
|
352
|
+
--run-id run_2026_03_03 \
|
|
353
|
+
--from redis://10.0.0.10:6379 \
|
|
354
|
+
--to ./resplite.db \
|
|
355
|
+
--channels keyevent
|
|
356
|
+
```
|
|
357
|
+
|
|
358
|
+
## Step 2: Run Bulk Import Online
|
|
359
|
+
|
|
360
|
+
```bash id="a9g2aa"
|
|
361
|
+
resplite-import bulk \
|
|
362
|
+
--run-id run_2026_03_03 \
|
|
363
|
+
--from redis://10.0.0.10:6379 \
|
|
364
|
+
--to ./resplite.db \
|
|
365
|
+
--scan-count 1000 \
|
|
366
|
+
--max-concurrency 32 \
|
|
367
|
+
--max-rps 2000 \
|
|
368
|
+
--batch-keys 200 \
|
|
369
|
+
--batch-bytes 64MB \
|
|
370
|
+
--ttl-mode preserve \
|
|
371
|
+
--resume
|
|
372
|
+
```
|
|
373
|
+
|
|
374
|
+
Monitor progress:
|
|
375
|
+
|
|
376
|
+
```bash id="rkf6uv"
|
|
377
|
+
resplite-import status --run-id run_2026_03_03 --to ./resplite.db
|
|
378
|
+
```
|
|
379
|
+
|
|
380
|
+
## Step 3 (Optional): Pre-Delta While Still Live
|
|
381
|
+
|
|
382
|
+
Apply dirty keys while Redis is still live to reduce final delta size:
|
|
383
|
+
|
|
384
|
+
```bash id="v5xc6f"
|
|
385
|
+
resplite-import apply-dirty \
|
|
386
|
+
--run-id run_2026_03_03 \
|
|
387
|
+
--from redis://10.0.0.10:6379 \
|
|
388
|
+
--to ./resplite.db \
|
|
389
|
+
--max-concurrency 32 \
|
|
390
|
+
--max-rps 2000 \
|
|
391
|
+
--batch-keys 200 \
|
|
392
|
+
--ttl-mode preserve
|
|
393
|
+
```
|
|
394
|
+
|
|
395
|
+
You can run this repeatedly (or continuously) while bulk is still running.
|
|
396
|
+
|
|
397
|
+
## Step 4: Cutover Window (Freeze Writes)
|
|
398
|
+
|
|
399
|
+
* Put the application into maintenance mode (freeze writes to Redis).
|
|
400
|
+
* Keep dirty tracker running for a moment to capture any last writes.
|
|
401
|
+
|
|
402
|
+
## Step 5: Final Delta Apply
|
|
403
|
+
|
|
404
|
+
With writes frozen, apply all remaining dirty keys:
|
|
405
|
+
|
|
406
|
+
```bash id="v0x8xo"
|
|
407
|
+
resplite-import apply-dirty \
|
|
408
|
+
--run-id run_2026_03_03 \
|
|
409
|
+
--from redis://10.0.0.10:6379 \
|
|
410
|
+
--to ./resplite.db \
|
|
411
|
+
--max-concurrency 64 \
|
|
412
|
+
--max-rps 5000 \
|
|
413
|
+
--batch-keys 500 \
|
|
414
|
+
--ttl-mode preserve
|
|
415
|
+
```
|
|
416
|
+
|
|
417
|
+
Verify no remaining dirty keys:
|
|
418
|
+
|
|
419
|
+
```bash id="0ca1y7"
|
|
420
|
+
resplite-import status --run-id run_2026_03_03 --to ./resplite.db
|
|
421
|
+
```
|
|
422
|
+
|
|
423
|
+
## Step 6: Stop Dirty Tracker and Switch Clients
|
|
424
|
+
|
|
425
|
+
Stop tracker:
|
|
426
|
+
|
|
427
|
+
```bash id="1v1u1j"
|
|
428
|
+
resplite-dirty-tracker stop --run-id run_2026_03_03 --to ./resplite.db
|
|
429
|
+
```
|
|
430
|
+
|
|
431
|
+
Switch application Redis endpoint to RespLite server (RESP port).
|
|
432
|
+
|
|
433
|
+
## Step 7: Verification (Post-Cutover)
|
|
434
|
+
|
|
435
|
+
Run a sampling verification:
|
|
436
|
+
|
|
437
|
+
```bash id="p6w5q6"
|
|
438
|
+
resplite-import verify \
|
|
439
|
+
--run-id run_2026_03_03 \
|
|
440
|
+
--from redis://10.0.0.10:6379 \
|
|
441
|
+
--to ./resplite.db \
|
|
442
|
+
--sample 0.5%
|
|
443
|
+
```
|
|
444
|
+
|
|
445
|
+
---
|
|
446
|
+
|
|
447
|
+
# F.10 Progress Reporting and Controls
|
|
448
|
+
|
|
449
|
+
## F.10.1 Progress output requirements
|
|
450
|
+
|
|
451
|
+
Both bulk importer and dirty applier must print and persist:
|
|
452
|
+
|
|
453
|
+
* scanned_keys, migrated_keys, migrated_bytes
|
|
454
|
+
* dirty_keys_seen, dirty_keys_applied, dirty_keys_deleted
|
|
455
|
+
* current cursor
|
|
456
|
+
* rates (keys/s, MB/s)
|
|
457
|
+
* recent errors summary
|
|
458
|
+
* checkpoint time
|
|
459
|
+
|
|
460
|
+
## F.10.2 Runtime controls
|
|
461
|
+
|
|
462
|
+
Provide:
|
|
463
|
+
|
|
464
|
+
* `pause`, `resume`, `abort`
|
|
465
|
+
* adjust `max_concurrency`, `max_rps`
|
|
466
|
+
* adjust `batch_keys`, `scan_count`
|
|
467
|
+
|
|
468
|
+
Implementation may use:
|
|
469
|
+
|
|
470
|
+
* updating `migration_runs.status`
|
|
471
|
+
* a simple control file
|
|
472
|
+
* or a CLI that updates the SQLite run row
|
|
473
|
+
|
|
474
|
+
---
|
|
475
|
+
|
|
476
|
+
# F.11 Failure and Recovery Rules
|
|
477
|
+
|
|
478
|
+
## F.11.1 Tracker disconnect
|
|
479
|
+
|
|
480
|
+
If dirty tracker disconnects:
|
|
481
|
+
|
|
482
|
+
* it must attempt reconnect with backoff
|
|
483
|
+
* record a warning in `migration_errors`
|
|
484
|
+
* migration can proceed, but final delta should be done after freeze (which provides correctness)
|
|
485
|
+
|
|
486
|
+
## F.11.2 Importer crash/restart
|
|
487
|
+
|
|
488
|
+
* On restart with `--resume`, continue from stored cursor.
|
|
489
|
+
* Already migrated keys may be overwritten idempotently.
|
|
490
|
+
|
|
491
|
+
## F.11.3 Idempotency requirements
|
|
492
|
+
|
|
493
|
+
* Bulk and dirty apply must be safe to rerun:
|
|
494
|
+
|
|
495
|
+
* writes should upsert
|
|
496
|
+
* deletions should be no-op if missing
|
|
497
|
+
|
|
498
|
+
---
|
|
499
|
+
|
|
500
|
+
# F.12 Operational Guidance (Large datasets)
|
|
501
|
+
|
|
502
|
+
* Use a dedicated Redis replica for reads if possible to reduce load on primary.
|
|
503
|
+
* Keep `max_concurrency` conservative at first; increase only if Redis latency remains stable.
|
|
504
|
+
* Keep dirty tracker running from before bulk starts until just before cutover switch.
|
|
505
|
+
* Prefer application-level maintenance mode for freeze.
|