resplite 1.3.2 → 1.3.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -107,6 +107,7 @@ const m = createMigration({
107
107
  batchKeys: 1000,
108
108
  batchBytes: 64 * 1024 * 1024, // 64 MB
109
109
  maxRps: 0, // 0 = unlimited
110
+ redisCommandTimeoutMs: 30000, // fail a stuck Redis command instead of waiting forever
110
111
 
111
112
  // If your Redis deployment renamed CONFIG for security:
112
113
  // configCommand: 'MYCONFIG',
@@ -189,10 +190,10 @@ await m.close();
189
190
  `resume` defaults to `true`. It doesn't matter whether it's the first run or a resume: the same script works for both starting and continuing. The first run starts from cursor 0; if the process is interrupted (Ctrl+C, crash, etc.), running the script again continues from the last checkpoint. You don't need to pass `resume: false` on the first run or change anything to resume.
190
191
 
191
192
  **Graceful shutdown**
192
- On SIGINT (Ctrl+C) or SIGTERM, the bulk importer checkpoints progress, sets the run status to `aborted`, closes the SQLite database cleanly (so WAL is checkpointed and the file is not left open), then exits. You can safely interrupt a long-running bulk and resume later. The same applies to **apply-dirty**: Ctrl+C stops the delta apply, sets the run to `aborted`, and exits so you can inspect and retry.
193
+ On SIGINT (Ctrl+C) or SIGTERM, the bulk importer checkpoints progress, sets the run status to `aborted`, closes the SQLite database cleanly (so WAL is checkpointed and the file is not left open), and tears down the Redis client so a stuck command does not trap the process forever. You can safely interrupt a long-running bulk and resume later. The same applies to **apply-dirty**: Ctrl+C stops the delta apply, sets the run to `aborted`, and exits so you can inspect and retry.
193
194
 
194
195
  **Errors and stalls**
195
- Use `onProgress` to see progress and detect problems. The callback receives the run row (e.g. `scanned_keys`, `migrated_keys`, `dirty_keys_applied`, `last_error`). If progress stops for a long time (e.g. Redis hang or network issue), you may see `_stallWarning: true` and `_stallMessage` in the progress object every 15 seconds. When a key fails to import, the error is logged to `migration_errors` and the run’s `last_error` is set; the real error message is included so you can diagnose. After any failure, check `m.status()` and query `migration_errors` in the DB if needed.
196
+ Use `onProgress` to see progress and detect problems. The callback receives the run row (e.g. `scanned_keys`, `migrated_keys`, `dirty_keys_applied`, `last_error`) plus transient fields like `_activeStage`, `_activeKey`, `_activeForMs`, `_stallWarning`, and `_stallMessage` when the importer is waiting on Redis. Each Redis command is also protected by `redisCommandTimeoutMs` (30s by default), so a dead connection or pathological key fails with the active stage/key in the error instead of silently hanging forever. When a key fails to import, the error is logged to `migration_errors` and the run’s `last_error` is set; the real error message is included so you can diagnose. After any failure, check `m.status()` and query `migration_errors` in the DB if needed.
196
197
 
197
198
  The JS API can run the dirty-key tracker in-process via `m.startDirtyTracker()` / `m.stopDirtyTracker()`, so the full flow stays inside a single script.
198
199
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "resplite",
3
- "version": "1.3.2",
3
+ "version": "1.3.4",
4
4
  "description": "A RESP2 server with practical Redis compatibility, backed by SQLite",
5
5
  "type": "module",
6
6
  "main": "src/index.js",
@@ -11,6 +11,7 @@ import { createListsStorage } from '../storage/sqlite/lists.js';
11
11
  import { createZsetsStorage } from '../storage/sqlite/zsets.js';
12
12
  import { getRun, getDirtyBatch, markDirtyState, logError, setRunStatus, updateBulkProgress, RUN_STATUS } from './registry.js';
13
13
  import { importKeyFromRedis } from './import-one.js';
14
+ import { createRedisCommandGuard, isFatalRedisCommandError } from './redis-guard.js';
14
15
 
15
16
  function sleep(ms) {
16
17
  return new Promise((resolve) => setTimeout(resolve, ms));
@@ -30,28 +31,69 @@ const STALL_WARNING_MS = 60000;
30
31
  * @param {string} [options.pragmaTemplate='default']
31
32
  * @param {number} [options.batch_keys=200]
32
33
  * @param {number} [options.max_rps=0]
34
+ * @param {number} [options.redis_command_timeout_ms=30000]
33
35
  * @param {(run: object) => void | Promise<void>} [options.onProgress] - Called after each batch and on heartbeat; may receive _stallWarning if no progress for 60s.
34
36
  */
35
37
  export async function runApplyDirty(redisClient, dbPath, runId, options = {}) {
36
- const { pragmaTemplate = 'default', batch_keys = 200, max_rps = 0, onProgress } = options;
38
+ const { pragmaTemplate = 'default', batch_keys = 200, max_rps = 0, redis_command_timeout_ms = 30000, onProgress } = options;
37
39
 
38
40
  const db = openDb(dbPath, { pragmaTemplate });
41
+ const redisGuard = createRedisCommandGuard({
42
+ redisClient,
43
+ timeoutMs: redis_command_timeout_ms,
44
+ });
39
45
  let abortRequested = false;
40
46
  const onSignal = () => {
41
47
  abortRequested = true;
48
+ redisGuard.signalAbort('Apply dirty interrupted by signal (SIGINT/SIGTERM)', 'APPLY_DIRTY_ABORTED');
42
49
  };
43
50
  process.on('SIGINT', onSignal);
44
51
  process.on('SIGTERM', onSignal);
45
52
 
53
+ function enrichProgress(run) {
54
+ const active = redisGuard.snapshot();
55
+ if (!active) return run;
56
+
57
+ return {
58
+ ...run,
59
+ _activeStage: active.stage,
60
+ _activeKey: active.key,
61
+ _activeSince: active.startedAt,
62
+ _activeForMs: active.activeForMs,
63
+ };
64
+ }
65
+
66
+ function withStallContext(run) {
67
+ const active = redisGuard.snapshot();
68
+ let payload = enrichProgress(run);
69
+ if (run.updated_at && Date.now() - run.updated_at > STALL_WARNING_MS) {
70
+ const detail = active
71
+ ? ` Active operation: ${active.stage}${active.key ? ` key=${active.key}` : ''}.`
72
+ : '';
73
+ payload = {
74
+ ...payload,
75
+ _stallWarning: true,
76
+ _stallMessage: `No progress for 60s — possible hang or Redis timeout.${detail}`,
77
+ };
78
+ }
79
+ return payload;
80
+ }
81
+
82
+ let abortHandled = false;
83
+ function markAborted() {
84
+ if (abortHandled) return getRun(db, runId);
85
+ abortHandled = true;
86
+ setRunStatus(db, runId, RUN_STATUS.ABORTED);
87
+ updateBulkProgress(db, runId, { last_error: 'Interrupted by SIGINT/SIGTERM' });
88
+ return getRun(db, runId);
89
+ }
90
+
46
91
  let heartbeatTimer = null;
47
92
  if (onProgress) {
48
93
  heartbeatTimer = setInterval(() => {
49
94
  const run = getRun(db, runId);
50
95
  if (!run) return;
51
- let payload = run;
52
- if (run.updated_at && Date.now() - run.updated_at > STALL_WARNING_MS) {
53
- payload = { ...run, _stallWarning: true, _stallMessage: 'No progress for 60s — possible hang or Redis timeout' };
54
- }
96
+ const payload = withStallContext(run);
55
97
  Promise.resolve(onProgress(payload)).catch(() => {});
56
98
  }, HEARTBEAT_INTERVAL_MS);
57
99
  }
@@ -104,12 +146,18 @@ export async function runApplyDirty(redisClient, dbPath, runId, options = {}) {
104
146
 
105
147
  const keyName = keyBuf.toString('utf8');
106
148
  try {
107
- const type = (await redisClient.type(keyName)).toLowerCase();
149
+ const type = (await redisGuard.run(
150
+ () => redisClient.type(keyName),
151
+ { stage: 'apply-dirty.type', key: keyName }
152
+ )).toLowerCase();
108
153
  if (type === 'none' || !type) {
109
154
  keys.delete(keyBuf);
110
155
  markDirtyState(db, runId, keyBuf, 'deleted');
111
156
  } else {
112
- const outcome = await importKeyFromRedis(redisClient, keyName, storages, {});
157
+ const outcome = await importKeyFromRedis(redisClient, keyName, storages, {
158
+ redisGuard,
159
+ knownType: type,
160
+ });
113
161
  if (outcome.ok) {
114
162
  markDirtyState(db, runId, keyBuf, 'applied');
115
163
  } else if (outcome.skipped) {
@@ -120,6 +168,7 @@ export async function runApplyDirty(redisClient, dbPath, runId, options = {}) {
120
168
  }
121
169
  }
122
170
  } catch (err) {
171
+ if (isFatalRedisCommandError(err)) throw err;
123
172
  logError(db, runId, 'dirty_apply', err.message, keyBuf);
124
173
  markDirtyState(db, runId, keyBuf, 'error');
125
174
  }
@@ -156,15 +205,13 @@ export async function runApplyDirty(redisClient, dbPath, runId, options = {}) {
156
205
  }
157
206
  if (batchSize > 0 && onProgress) {
158
207
  const run = getRun(db, runId);
159
- if (run) Promise.resolve(onProgress(run)).catch(() => {});
208
+ if (run) Promise.resolve(onProgress(enrichProgress(run))).catch(() => {});
160
209
  }
161
210
  }
162
211
 
163
212
  if (abortRequested) {
164
- setRunStatus(db, runId, RUN_STATUS.ABORTED);
165
- updateBulkProgress(db, runId, { last_error: 'Interrupted by SIGINT/SIGTERM' });
166
- const run = getRun(db, runId);
167
- if (onProgress && run) Promise.resolve(onProgress(run)).catch(() => {});
213
+ const run = markAborted();
214
+ if (onProgress && run) Promise.resolve(onProgress(enrichProgress(run))).catch(() => {});
168
215
  const err = new Error('Apply dirty interrupted by signal (SIGINT/SIGTERM)');
169
216
  err.code = 'APPLY_DIRTY_ABORTED';
170
217
  throw err;
@@ -172,7 +219,10 @@ export async function runApplyDirty(redisClient, dbPath, runId, options = {}) {
172
219
 
173
220
  return getRun(db, runId);
174
221
  } catch (err) {
175
- if (err.code !== 'APPLY_DIRTY_ABORTED') {
222
+ if (err.code === 'APPLY_DIRTY_ABORTED') {
223
+ const run = markAborted();
224
+ if (onProgress && run) Promise.resolve(onProgress(enrichProgress(run))).catch(() => {});
225
+ } else {
176
226
  setRunStatus(db, runId, RUN_STATUS.FAILED);
177
227
  updateBulkProgress(db, runId, { last_error: err.message });
178
228
  logError(db, runId, 'dirty_apply', err.message, null);
@@ -18,6 +18,7 @@ import {
18
18
  RUN_STATUS,
19
19
  } from './registry.js';
20
20
  import { importKeyFromRedis } from './import-one.js';
21
+ import { createRedisCommandGuard } from './redis-guard.js';
21
22
 
22
23
  function parseScanResult(result) {
23
24
  if (Array.isArray(result)) {
@@ -54,6 +55,7 @@ const STALL_WARNING_MS = 60000;
54
55
  * @param {number} [options.batch_keys=200]
55
56
  * @param {number} [options.batch_bytes=64*1024*1024] - 64MB
56
57
  * @param {number} [options.checkpoint_interval_sec=30]
58
+ * @param {number} [options.redis_command_timeout_ms=30000]
57
59
  * @param {boolean} [options.resume=true] - true: start from 0 or continue from checkpoint; false: always start from 0
58
60
  * @param {function(run): void} [options.onProgress] - called after checkpoint and on heartbeat; may receive _stallWarning if no progress for 60s
59
61
  */
@@ -66,31 +68,86 @@ export async function runBulkImport(redisClient, dbPath, runId, options = {}) {
66
68
  batch_keys = 200,
67
69
  batch_bytes = 64 * 1024 * 1024,
68
70
  checkpoint_interval_sec = 30,
71
+ redis_command_timeout_ms = 30000,
69
72
  resume = true,
70
73
  onProgress,
71
74
  } = options;
72
75
 
73
76
  const db = openDb(dbPath, { pragmaTemplate });
77
+ const redisGuard = createRedisCommandGuard({
78
+ redisClient,
79
+ timeoutMs: redis_command_timeout_ms,
80
+ });
74
81
  let abortRequested = false;
75
82
  const onSignal = () => {
76
83
  abortRequested = true;
84
+ redisGuard.signalAbort('Bulk import interrupted by signal (SIGINT/SIGTERM)', 'BULK_ABORTED');
77
85
  };
78
86
  process.on('SIGINT', onSignal);
79
87
  process.on('SIGTERM', onSignal);
80
88
 
89
+ function enrichProgress(run) {
90
+ const active = redisGuard.snapshot();
91
+ if (!active) return run;
92
+
93
+ return {
94
+ ...run,
95
+ _activeStage: active.stage,
96
+ _activeKey: active.key,
97
+ _activeSince: active.startedAt,
98
+ _activeForMs: active.activeForMs,
99
+ };
100
+ }
101
+
102
+ function withStallContext(run) {
103
+ const active = redisGuard.snapshot();
104
+ let payload = enrichProgress(run);
105
+ if (run.updated_at && Date.now() - run.updated_at > STALL_WARNING_MS) {
106
+ const detail = active
107
+ ? ` Active operation: ${active.stage}${active.key ? ` key=${active.key}` : ''}.`
108
+ : '';
109
+ payload = {
110
+ ...payload,
111
+ _stallWarning: true,
112
+ _stallMessage: `No progress for 60s — possible hang or Redis timeout.${detail}`,
113
+ };
114
+ }
115
+ return payload;
116
+ }
117
+
81
118
  let heartbeatTimer = null;
82
119
  if (onProgress) {
83
120
  heartbeatTimer = setInterval(() => {
84
121
  const run = getRun(db, runId);
85
122
  if (!run) return;
86
- let payload = run;
87
- if (run.updated_at && Date.now() - run.updated_at > STALL_WARNING_MS) {
88
- payload = { ...run, _stallWarning: true, _stallMessage: 'No progress for 60s — possible hang or Redis timeout' };
89
- }
123
+ const payload = withStallContext(run);
90
124
  Promise.resolve(onProgress(payload)).catch(() => {});
91
125
  }, HEARTBEAT_INTERVAL_MS);
92
126
  }
93
127
 
128
+ let cursor = 0;
129
+ let scanned_keys = 0;
130
+ let migrated_keys = 0;
131
+ let skipped_keys = 0;
132
+ let error_keys = 0;
133
+ let migrated_bytes = 0;
134
+ let abortHandled = false;
135
+ function markAborted() {
136
+ if (abortHandled) return getRun(db, runId);
137
+ abortHandled = true;
138
+ updateBulkProgress(db, runId, {
139
+ scan_cursor: String(cursor),
140
+ scanned_keys,
141
+ migrated_keys,
142
+ skipped_keys,
143
+ error_keys,
144
+ migrated_bytes,
145
+ last_error: 'Interrupted by SIGINT/SIGTERM',
146
+ });
147
+ setRunStatus(db, runId, RUN_STATUS.ABORTED);
148
+ return getRun(db, runId);
149
+ }
150
+
94
151
  try {
95
152
  const keys = createKeysStorage(db);
96
153
  const strings = createStringsStorage(db, keys);
@@ -104,12 +161,12 @@ export async function runBulkImport(redisClient, dbPath, runId, options = {}) {
104
161
  let run = getRun(db, runId);
105
162
  if (!run) throw new Error(`Run ${runId} not found`);
106
163
 
107
- let cursor = resume && run.scan_cursor !== undefined ? parseInt(String(run.scan_cursor), 10) : 0;
108
- let scanned_keys = resume ? (run.scanned_keys || 0) : 0;
109
- let migrated_keys = resume ? (run.migrated_keys || 0) : 0;
110
- let skipped_keys = resume ? (run.skipped_keys || 0) : 0;
111
- let error_keys = resume ? (run.error_keys || 0) : 0;
112
- let migrated_bytes = resume ? (run.migrated_bytes || 0) : 0;
164
+ cursor = resume && run.scan_cursor !== undefined ? parseInt(String(run.scan_cursor), 10) : 0;
165
+ scanned_keys = resume ? (run.scanned_keys || 0) : 0;
166
+ migrated_keys = resume ? (run.migrated_keys || 0) : 0;
167
+ skipped_keys = resume ? (run.skipped_keys || 0) : 0;
168
+ error_keys = resume ? (run.error_keys || 0) : 0;
169
+ migrated_bytes = resume ? (run.migrated_bytes || 0) : 0;
113
170
 
114
171
  if (!resume) {
115
172
  updateBulkProgress(db, runId, { scan_cursor: String(cursor), scanned_keys, migrated_keys, skipped_keys, error_keys, migrated_bytes });
@@ -130,7 +187,10 @@ export async function runBulkImport(redisClient, dbPath, runId, options = {}) {
130
187
  run = getRun(db, runId);
131
188
  }
132
189
 
133
- const result = await redisClient.scan(cursor, { COUNT: scan_count });
190
+ const result = await redisGuard.run(
191
+ () => redisClient.scan(cursor, { COUNT: scan_count }),
192
+ { stage: 'bulk.scan' }
193
+ );
134
194
  const parsed = parseScanResult(result);
135
195
  cursor = parsed.cursor;
136
196
  const keyList = parsed.keys || [];
@@ -152,7 +212,10 @@ export async function runBulkImport(redisClient, dbPath, runId, options = {}) {
152
212
  }
153
213
 
154
214
  const now = Date.now();
155
- const outcome = await importKeyFromRedis(redisClient, keyName, storages, { now });
215
+ const outcome = await importKeyFromRedis(redisClient, keyName, storages, {
216
+ now,
217
+ redisGuard,
218
+ });
156
219
  if (outcome.ok) {
157
220
  migrated_keys++;
158
221
  migrated_bytes += outcome.bytes || 0;
@@ -183,7 +246,7 @@ export async function runBulkImport(redisClient, dbPath, runId, options = {}) {
183
246
  batchScanned = 0;
184
247
  batchBytes = 0;
185
248
  run = getRun(db, runId);
186
- if (onProgress && run) onProgress(run);
249
+ if (onProgress && run) onProgress(enrichProgress(run));
187
250
  }
188
251
  }
189
252
  } while (cursor !== 0);
@@ -197,9 +260,8 @@ export async function runBulkImport(redisClient, dbPath, runId, options = {}) {
197
260
  error_keys,
198
261
  migrated_bytes,
199
262
  });
200
- setRunStatus(db, runId, RUN_STATUS.ABORTED);
201
- run = getRun(db, runId);
202
- if (onProgress && run) onProgress(run);
263
+ run = markAborted();
264
+ if (onProgress && run) onProgress(enrichProgress(run));
203
265
  const err = new Error('Bulk import interrupted by signal (SIGINT/SIGTERM)');
204
266
  err.code = 'BULK_ABORTED';
205
267
  throw err;
@@ -216,7 +278,10 @@ export async function runBulkImport(redisClient, dbPath, runId, options = {}) {
216
278
  setRunStatus(db, runId, RUN_STATUS.COMPLETED);
217
279
  return getRun(db, runId);
218
280
  } catch (err) {
219
- if (err.code !== 'BULK_ABORTED') {
281
+ if (err.code === 'BULK_ABORTED') {
282
+ const run = markAborted();
283
+ if (onProgress && run) onProgress(enrichProgress(run));
284
+ } else {
220
285
  setRunStatus(db, runId, RUN_STATUS.FAILED);
221
286
  updateBulkProgress(db, runId, { last_error: err.message });
222
287
  logError(db, runId, 'bulk', err.message, null);
@@ -4,6 +4,7 @@
4
4
  */
5
5
 
6
6
  import { asKey, asValue } from '../util/buffers.js';
7
+ import { isFatalRedisCommandError } from './redis-guard.js';
7
8
 
8
9
  const SUPPORTED_TYPES = new Set(['string', 'hash', 'set', 'list', 'zset']);
9
10
 
@@ -19,20 +20,27 @@ function toBuffer(value) {
19
20
  * @param {import('redis').RedisClientType} redisClient
20
21
  * @param {string} keyName
21
22
  * @param {{ keys: import('../storage/sqlite/keys.js').ReturnType<import('../storage/sqlite/keys.js').createKeysStorage>; strings: ReturnType<import('../storage/sqlite/strings.js').createStringsStorage>; hashes: ReturnType<import('../storage/sqlite/hashes.js').createHashesStorage>; sets: ReturnType<import('../storage/sqlite/sets.js').createSetsStorage>; lists: ReturnType<import('../storage/sqlite/lists.js').createListsStorage>; zsets: ReturnType<import('../storage/sqlite/zsets.js').createZsetsStorage> }} storages
22
- * @param {{ now?: number }} options
23
+ * @param {{ now?: number; knownType?: string; redisGuard?: { run(fn: Function, options: { stage: string, key?: string | null }): Promise<any> } }} options
23
24
  * @returns {Promise<{ ok: boolean; skipped?: boolean; error?: boolean; errorMessage?: string; bytes?: number }>}
24
25
  */
25
26
  export async function importKeyFromRedis(redisClient, keyName, storages, options = {}) {
26
27
  const now = options.now ?? Date.now();
28
+ const knownType = options.knownType ? String(options.knownType).toLowerCase() : null;
29
+ const redisGuard = options.redisGuard ?? null;
27
30
  const { keys, strings, hashes, sets, lists, zsets } = storages;
31
+ const runRedis = (stage, fn) => (
32
+ redisGuard
33
+ ? redisGuard.run(fn, { stage, key: keyName })
34
+ : Promise.resolve().then(fn)
35
+ );
28
36
 
29
37
  try {
30
- const type = (await redisClient.type(keyName)).toLowerCase();
38
+ const type = knownType || (await runRedis('import.type', () => redisClient.type(keyName))).toLowerCase();
31
39
  if (!SUPPORTED_TYPES.has(type)) {
32
40
  return { ok: false, skipped: true };
33
41
  }
34
42
 
35
- let pttl = await redisClient.pTTL(keyName);
43
+ let pttl = await runRedis('import.pttl', () => redisClient.pTTL(keyName));
36
44
  if (pttl === -2) pttl = -1;
37
45
  const expiresAt = pttl > 0 ? now + pttl : null;
38
46
  const keyBuf = asKey(keyName);
@@ -40,7 +48,7 @@ export async function importKeyFromRedis(redisClient, keyName, storages, options
40
48
  let bytes = keyBuf.length;
41
49
 
42
50
  if (type === 'string') {
43
- const value = await redisClient.get(keyName);
51
+ const value = await runRedis('import.string.get', () => redisClient.get(keyName));
44
52
  if (value === undefined || value === null) return { ok: false, skipped: true };
45
53
  const valBuf = asValue(value);
46
54
  bytes += valBuf.length;
@@ -49,7 +57,7 @@ export async function importKeyFromRedis(redisClient, keyName, storages, options
49
57
  }
50
58
 
51
59
  if (type === 'hash') {
52
- const obj = await redisClient.hGetAll(keyName);
60
+ const obj = await runRedis('import.hash.hGetAll', () => redisClient.hGetAll(keyName));
53
61
  if (!obj || typeof obj !== 'object') return { ok: false, skipped: true };
54
62
  const pairs = [];
55
63
  for (const [f, v] of Object.entries(obj)) {
@@ -65,7 +73,7 @@ export async function importKeyFromRedis(redisClient, keyName, storages, options
65
73
  }
66
74
 
67
75
  if (type === 'set') {
68
- const members = await redisClient.sMembers(keyName);
76
+ const members = await runRedis('import.set.sMembers', () => redisClient.sMembers(keyName));
69
77
  if (!members || !members.length) return { ok: false, skipped: true };
70
78
  const memberBuffers = members.map((m) => toBuffer(m));
71
79
  for (const b of memberBuffers) bytes += b.length;
@@ -75,7 +83,7 @@ export async function importKeyFromRedis(redisClient, keyName, storages, options
75
83
  }
76
84
 
77
85
  if (type === 'list') {
78
- const elements = await redisClient.lRange(keyName, 0, -1);
86
+ const elements = await runRedis('import.list.lRange', () => redisClient.lRange(keyName, 0, -1));
79
87
  if (!elements || !elements.length) return { ok: false, skipped: true };
80
88
  const valueBuffers = elements.map((e) => toBuffer(e));
81
89
  for (const b of valueBuffers) bytes += b.length;
@@ -85,7 +93,7 @@ export async function importKeyFromRedis(redisClient, keyName, storages, options
85
93
  }
86
94
 
87
95
  if (type === 'zset') {
88
- const withScores = await redisClient.zRangeWithScores(keyName, 0, -1);
96
+ const withScores = await runRedis('import.zset.zRangeWithScores', () => redisClient.zRangeWithScores(keyName, 0, -1));
89
97
  if (!withScores || !withScores.length) return { ok: false, skipped: true };
90
98
  const pairs = withScores.map((item) => ({
91
99
  member: toBuffer(item.value),
@@ -99,6 +107,7 @@ export async function importKeyFromRedis(redisClient, keyName, storages, options
99
107
 
100
108
  return { ok: false, skipped: true };
101
109
  } catch (err) {
110
+ if (isFatalRedisCommandError(err)) throw err;
102
111
  return { ok: false, error: true, errorMessage: err.message };
103
112
  }
104
113
  }
@@ -38,6 +38,7 @@ import { startDirtyTracker as startDirtyTrackerProcess } from './tracker.js';
38
38
  * @property {number} [maxRps=0] - Max requests/s (0 = unlimited).
39
39
  * @property {number} [batchKeys=200]
40
40
  * @property {number} [batchBytes=67108864] - 64 MB default.
41
+ * @property {number} [redisCommandTimeoutMs=30000] - Fail a stuck Redis command instead of waiting forever.
41
42
  * @property {string} [configCommand='CONFIG'] - Redis CONFIG command name. Override if renamed for security.
42
43
  */
43
44
 
@@ -67,6 +68,7 @@ export function createMigration({
67
68
  maxRps = 0,
68
69
  batchKeys = 200,
69
70
  batchBytes = 64 * 1024 * 1024,
71
+ redisCommandTimeoutMs = 30000,
70
72
  configCommand = 'CONFIG',
71
73
  } = {}) {
72
74
  if (!to) throw new Error('createMigration: "to" (db path) is required');
@@ -168,6 +170,7 @@ export function createMigration({
168
170
  max_rps: maxRps,
169
171
  batch_keys: batchKeys,
170
172
  batch_bytes: batchBytes,
173
+ redis_command_timeout_ms: redisCommandTimeoutMs,
171
174
  resume,
172
175
  onProgress,
173
176
  });
@@ -200,6 +203,7 @@ export function createMigration({
200
203
  pragmaTemplate,
201
204
  batch_keys: bk,
202
205
  max_rps: rps,
206
+ redis_command_timeout_ms: redisCommandTimeoutMs,
203
207
  onProgress,
204
208
  });
205
209
  },
@@ -0,0 +1,174 @@
1
+ /**
2
+ * Guard Redis migration commands with timeouts and abort-aware progress context.
3
+ */
4
+
5
+ const FATAL_CODES = new Set([
6
+ 'REDIS_COMMAND_TIMEOUT',
7
+ 'BULK_ABORTED',
8
+ 'APPLY_DIRTY_ABORTED',
9
+ 'MIGRATION_ABORTED',
10
+ ]);
11
+
12
+ function normalizeKey(key) {
13
+ if (key == null) return null;
14
+ if (Buffer.isBuffer(key)) return key.toString('utf8');
15
+ return String(key);
16
+ }
17
+
18
+ function attachContext(err, snapshot) {
19
+ if (!snapshot || !err || typeof err !== 'object') return err;
20
+ if (!('migrationStage' in err)) err.migrationStage = snapshot.stage;
21
+ if (!('migrationKey' in err)) err.migrationKey = snapshot.key;
22
+ if (!('migrationActiveSince' in err)) err.migrationActiveSince = snapshot.startedAt;
23
+ return err;
24
+ }
25
+
26
+ function formatContext(message, snapshot) {
27
+ if (!snapshot) return message;
28
+ const parts = [message, `during ${snapshot.stage}`];
29
+ if (snapshot.key) parts.push(`for key "${snapshot.key}"`);
30
+ return parts.join(' ');
31
+ }
32
+
33
+ function abortRedisTransport(redisClient) {
34
+ if (!redisClient) return;
35
+
36
+ for (const methodName of ['disconnect', 'destroy']) {
37
+ if (typeof redisClient[methodName] !== 'function') continue;
38
+ try {
39
+ const result = redisClient[methodName]();
40
+ if (result && typeof result.catch === 'function') {
41
+ result.catch(() => {});
42
+ }
43
+ } catch (_) {
44
+ // Best effort: we're already aborting because the client is unhealthy.
45
+ }
46
+ return;
47
+ }
48
+ }
49
+
50
+ export function isFatalRedisCommandError(err) {
51
+ return !!(err && typeof err === 'object' && FATAL_CODES.has(err.code));
52
+ }
53
+
54
+ export function createRedisCommandGuard({ redisClient, timeoutMs = 30000 } = {}) {
55
+ const abortListeners = new Set();
56
+ let active = null;
57
+ let terminalError = null;
58
+ let transportAborted = false;
59
+
60
+ function snapshot() {
61
+ if (!active) return null;
62
+ return {
63
+ stage: active.stage,
64
+ key: active.key,
65
+ startedAt: active.startedAt,
66
+ activeForMs: Date.now() - active.startedAt,
67
+ };
68
+ }
69
+
70
+ function notifyAbort(err) {
71
+ for (const listener of abortListeners) {
72
+ try {
73
+ listener(err);
74
+ } catch (_) {
75
+ // Ignore listener failures while aborting.
76
+ }
77
+ }
78
+ }
79
+
80
+ function abortTransport() {
81
+ if (transportAborted) return;
82
+ transportAborted = true;
83
+ abortRedisTransport(redisClient);
84
+ }
85
+
86
+ function signalAbort(message, code = 'MIGRATION_ABORTED') {
87
+ if (terminalError) return terminalError;
88
+ const current = snapshot();
89
+ const err = attachContext(new Error(formatContext(message, current)), current);
90
+ err.code = code;
91
+ terminalError = err;
92
+ abortTransport();
93
+ notifyAbort(err);
94
+ return err;
95
+ }
96
+
97
+ async function run(fn, { stage, key = null, timeoutMs: overrideTimeoutMs } = {}) {
98
+ if (terminalError) throw terminalError;
99
+
100
+ const opTimeoutMs = overrideTimeoutMs ?? timeoutMs;
101
+ const opKey = normalizeKey(key);
102
+ const opState = {
103
+ stage,
104
+ key: opKey,
105
+ startedAt: Date.now(),
106
+ };
107
+ active = opState;
108
+
109
+ return await new Promise((resolve, reject) => {
110
+ let settled = false;
111
+ let timeoutId = null;
112
+
113
+ const cleanup = () => {
114
+ abortListeners.delete(onAbort);
115
+ if (timeoutId) clearTimeout(timeoutId);
116
+ if (active === opState) active = null;
117
+ };
118
+
119
+ const finishResolve = (value) => {
120
+ if (settled) return;
121
+ settled = true;
122
+ cleanup();
123
+ resolve(value);
124
+ };
125
+
126
+ const finishReject = (err) => {
127
+ if (settled) return;
128
+ settled = true;
129
+ cleanup();
130
+ reject(err);
131
+ };
132
+
133
+ const onAbort = (err) => {
134
+ finishReject(err);
135
+ };
136
+
137
+ abortListeners.add(onAbort);
138
+
139
+ if (Number.isFinite(opTimeoutMs) && opTimeoutMs > 0) {
140
+ timeoutId = setTimeout(() => {
141
+ const current = snapshot() ?? {
142
+ stage,
143
+ key: opKey,
144
+ startedAt: opState.startedAt,
145
+ activeForMs: Date.now() - opState.startedAt,
146
+ };
147
+ const err = attachContext(
148
+ new Error(formatContext(`Redis command timed out after ${opTimeoutMs}ms`, current)),
149
+ current
150
+ );
151
+ err.code = 'REDIS_COMMAND_TIMEOUT';
152
+ terminalError = err;
153
+ abortTransport();
154
+ notifyAbort(err);
155
+ finishReject(err);
156
+ }, opTimeoutMs);
157
+ }
158
+
159
+ Promise.resolve()
160
+ .then(fn)
161
+ .then(
162
+ (value) => finishResolve(value),
163
+ (err) => finishReject(attachContext(err, snapshot() ?? opState))
164
+ );
165
+ });
166
+ }
167
+
168
+ return {
169
+ run,
170
+ snapshot,
171
+ signalAbort,
172
+ abortTransport,
173
+ };
174
+ }
@@ -0,0 +1,122 @@
1
+ import { describe, it } from 'node:test';
2
+ import assert from 'node:assert/strict';
3
+ import { runBulkImport } from '../../src/migration/bulk.js';
4
+ import { runApplyDirty } from '../../src/migration/apply-dirty.js';
5
+ import { createRedisCommandGuard } from '../../src/migration/redis-guard.js';
6
+ import { openDb } from '../../src/storage/sqlite/db.js';
7
+ import { createRun, getRun, upsertDirtyKey } from '../../src/migration/registry.js';
8
+ import { tmpDbPath } from '../helpers/tmp.js';
9
+
10
+ function never() {
11
+ return new Promise(() => {});
12
+ }
13
+
14
+ describe('migration Redis timeouts', () => {
15
+ it('runBulkImport fails a stuck scan with timeout context', async () => {
16
+ const dbPath = tmpDbPath();
17
+ let disconnectCalls = 0;
18
+ const redisClient = {
19
+ scan: () => never(),
20
+ disconnect() {
21
+ disconnectCalls += 1;
22
+ },
23
+ };
24
+
25
+ await assert.rejects(
26
+ () => runBulkImport(redisClient, dbPath, 'bulk-timeout', {
27
+ sourceUri: 'redis://example.test:6379',
28
+ pragmaTemplate: 'minimal',
29
+ redis_command_timeout_ms: 20,
30
+ }),
31
+ (err) => {
32
+ assert.equal(err.code, 'REDIS_COMMAND_TIMEOUT');
33
+ assert.match(err.message, /bulk\.scan/);
34
+ return true;
35
+ }
36
+ );
37
+
38
+ const db = openDb(dbPath, { pragmaTemplate: 'minimal' });
39
+ try {
40
+ const run = getRun(db, 'bulk-timeout');
41
+ assert.equal(run.status, 'failed');
42
+ assert.match(run.last_error, /bulk\.scan/);
43
+ } finally {
44
+ db.close();
45
+ }
46
+
47
+ assert.equal(disconnectCalls, 1);
48
+ });
49
+
50
+ it('runApplyDirty fails a stuck key lookup with key context', async () => {
51
+ const dbPath = tmpDbPath();
52
+ const db = openDb(dbPath, { pragmaTemplate: 'minimal' });
53
+ try {
54
+ createRun(db, 'apply-timeout', 'redis://example.test:6379');
55
+ upsertDirtyKey(db, 'apply-timeout', 'stuck:key', 'set');
56
+ } finally {
57
+ db.close();
58
+ }
59
+
60
+ let disconnectCalls = 0;
61
+ const redisClient = {
62
+ type: () => never(),
63
+ disconnect() {
64
+ disconnectCalls += 1;
65
+ },
66
+ };
67
+
68
+ await assert.rejects(
69
+ () => runApplyDirty(redisClient, dbPath, 'apply-timeout', {
70
+ pragmaTemplate: 'minimal',
71
+ redis_command_timeout_ms: 20,
72
+ }),
73
+ (err) => {
74
+ assert.equal(err.code, 'REDIS_COMMAND_TIMEOUT');
75
+ assert.match(err.message, /apply-dirty\.type/);
76
+ assert.match(err.message, /stuck:key/);
77
+ return true;
78
+ }
79
+ );
80
+
81
+ const verifyDb = openDb(dbPath, { pragmaTemplate: 'minimal' });
82
+ try {
83
+ const run = getRun(verifyDb, 'apply-timeout');
84
+ assert.equal(run.status, 'failed');
85
+ assert.match(run.last_error, /apply-dirty\.type/);
86
+ const row = verifyDb.prepare('SELECT message FROM migration_errors WHERE run_id = ? ORDER BY rowid DESC LIMIT 1').get('apply-timeout');
87
+ assert.match(row.message, /stuck:key/);
88
+ } finally {
89
+ verifyDb.close();
90
+ }
91
+
92
+ assert.equal(disconnectCalls, 1);
93
+ });
94
+
95
+ it('redis guard aborts a pending command immediately', async () => {
96
+ let disconnectCalls = 0;
97
+ const guard = createRedisCommandGuard({
98
+ redisClient: {
99
+ disconnect() {
100
+ disconnectCalls += 1;
101
+ },
102
+ },
103
+ timeoutMs: 1000,
104
+ });
105
+
106
+ const pending = guard.run(() => never(), { stage: 'bulk.scan' });
107
+ queueMicrotask(() => {
108
+ guard.signalAbort('Bulk import interrupted by signal (SIGINT/SIGTERM)', 'BULK_ABORTED');
109
+ });
110
+
111
+ await assert.rejects(
112
+ () => pending,
113
+ (err) => {
114
+ assert.equal(err.code, 'BULK_ABORTED');
115
+ assert.match(err.message, /bulk\.scan/);
116
+ return true;
117
+ }
118
+ );
119
+
120
+ assert.equal(disconnectCalls, 1);
121
+ });
122
+ });