resplite 1.3.0 → 1.3.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +5 -1
- package/package.json +1 -1
- package/src/migration/apply-dirty.js +109 -7
- package/src/migration/bulk.js +97 -15
- package/src/migration/import-one.js +19 -10
- package/src/migration/index.js +4 -0
- package/src/migration/redis-guard.js +174 -0
- package/test/unit/migration-timeouts.test.js +122 -0
package/README.md
CHANGED
|
@@ -107,6 +107,7 @@ const m = createMigration({
|
|
|
107
107
|
batchKeys: 1000,
|
|
108
108
|
batchBytes: 64 * 1024 * 1024, // 64 MB
|
|
109
109
|
maxRps: 0, // 0 = unlimited
|
|
110
|
+
redisCommandTimeoutMs: 30000, // fail a stuck Redis command instead of waiting forever
|
|
110
111
|
|
|
111
112
|
// If your Redis deployment renamed CONFIG for security:
|
|
112
113
|
// configCommand: 'MYCONFIG',
|
|
@@ -189,7 +190,10 @@ await m.close();
|
|
|
189
190
|
`resume` defaults to `true`. It doesn't matter whether it's the first run or a resume: the same script works for both starting and continuing. The first run starts from cursor 0; if the process is interrupted (Ctrl+C, crash, etc.), running the script again continues from the last checkpoint. You don't need to pass `resume: false` on the first run or change anything to resume.
|
|
190
191
|
|
|
191
192
|
**Graceful shutdown**
|
|
192
|
-
On SIGINT (Ctrl+C) or SIGTERM, the bulk importer checkpoints progress, sets the run status to `aborted`, closes the SQLite database cleanly (so WAL is checkpointed and the file is not left open),
|
|
193
|
+
On SIGINT (Ctrl+C) or SIGTERM, the bulk importer checkpoints progress, sets the run status to `aborted`, closes the SQLite database cleanly (so WAL is checkpointed and the file is not left open), and tears down the Redis client so a stuck command does not trap the process forever. You can safely interrupt a long-running bulk and resume later. The same applies to **apply-dirty**: Ctrl+C stops the delta apply, sets the run to `aborted`, and exits so you can inspect and retry.
|
|
194
|
+
|
|
195
|
+
**Errors and stalls**
|
|
196
|
+
Use `onProgress` to see progress and detect problems. The callback receives the run row (e.g. `scanned_keys`, `migrated_keys`, `dirty_keys_applied`, `last_error`) plus transient fields like `_activeStage`, `_activeKey`, `_activeForMs`, `_stallWarning`, and `_stallMessage` when the importer is waiting on Redis. Each Redis command is also protected by `redisCommandTimeoutMs` (30s by default), so a dead connection or pathological key fails with the active stage/key in the error instead of silently hanging forever. When a key fails to import, the error is logged to `migration_errors` and the run’s `last_error` is set; the real error message is included so you can diagnose. After any failure, check `m.status()` and query `migration_errors` in the DB if needed.
|
|
193
197
|
|
|
194
198
|
The JS API can run the dirty-key tracker in-process via `m.startDirtyTracker()` / `m.stopDirtyTracker()`, so the full flow stays inside a single script.
|
|
195
199
|
|
package/package.json
CHANGED
|
@@ -9,15 +9,21 @@ import { createHashesStorage } from '../storage/sqlite/hashes.js';
|
|
|
9
9
|
import { createSetsStorage } from '../storage/sqlite/sets.js';
|
|
10
10
|
import { createListsStorage } from '../storage/sqlite/lists.js';
|
|
11
11
|
import { createZsetsStorage } from '../storage/sqlite/zsets.js';
|
|
12
|
-
import { getRun, getDirtyBatch, markDirtyState, logError, RUN_STATUS } from './registry.js';
|
|
12
|
+
import { getRun, getDirtyBatch, markDirtyState, logError, setRunStatus, updateBulkProgress, RUN_STATUS } from './registry.js';
|
|
13
13
|
import { importKeyFromRedis } from './import-one.js';
|
|
14
|
+
import { createRedisCommandGuard, isFatalRedisCommandError } from './redis-guard.js';
|
|
14
15
|
|
|
15
16
|
function sleep(ms) {
|
|
16
17
|
return new Promise((resolve) => setTimeout(resolve, ms));
|
|
17
18
|
}
|
|
18
19
|
|
|
20
|
+
const HEARTBEAT_INTERVAL_MS = 15000;
|
|
21
|
+
const STALL_WARNING_MS = 60000;
|
|
22
|
+
|
|
19
23
|
/**
|
|
20
24
|
* Apply dirty keys: for each key in registry with state=dirty, reimport from Redis or delete in destination.
|
|
25
|
+
* On SIGINT/SIGTERM, sets run status to ABORTED, closes DB and rethrows so the process can exit and Ctrl+C works.
|
|
26
|
+
*
|
|
21
27
|
* @param {import('redis').RedisClientType} redisClient
|
|
22
28
|
* @param {string} dbPath
|
|
23
29
|
* @param {string} runId
|
|
@@ -25,12 +31,73 @@ function sleep(ms) {
|
|
|
25
31
|
* @param {string} [options.pragmaTemplate='default']
|
|
26
32
|
* @param {number} [options.batch_keys=200]
|
|
27
33
|
* @param {number} [options.max_rps=0]
|
|
28
|
-
* @param {
|
|
34
|
+
* @param {number} [options.redis_command_timeout_ms=30000]
|
|
35
|
+
* @param {(run: object) => void | Promise<void>} [options.onProgress] - Called after each batch and on heartbeat; may receive _stallWarning if no progress for 60s.
|
|
29
36
|
*/
|
|
30
37
|
export async function runApplyDirty(redisClient, dbPath, runId, options = {}) {
|
|
31
|
-
const { pragmaTemplate = 'default', batch_keys = 200, max_rps = 0, onProgress } = options;
|
|
38
|
+
const { pragmaTemplate = 'default', batch_keys = 200, max_rps = 0, redis_command_timeout_ms = 30000, onProgress } = options;
|
|
32
39
|
|
|
33
40
|
const db = openDb(dbPath, { pragmaTemplate });
|
|
41
|
+
const redisGuard = createRedisCommandGuard({
|
|
42
|
+
redisClient,
|
|
43
|
+
timeoutMs: redis_command_timeout_ms,
|
|
44
|
+
});
|
|
45
|
+
let abortRequested = false;
|
|
46
|
+
const onSignal = () => {
|
|
47
|
+
abortRequested = true;
|
|
48
|
+
redisGuard.signalAbort('Apply dirty interrupted by signal (SIGINT/SIGTERM)', 'APPLY_DIRTY_ABORTED');
|
|
49
|
+
};
|
|
50
|
+
process.on('SIGINT', onSignal);
|
|
51
|
+
process.on('SIGTERM', onSignal);
|
|
52
|
+
|
|
53
|
+
function enrichProgress(run) {
|
|
54
|
+
const active = redisGuard.snapshot();
|
|
55
|
+
if (!active) return run;
|
|
56
|
+
|
|
57
|
+
return {
|
|
58
|
+
...run,
|
|
59
|
+
_activeStage: active.stage,
|
|
60
|
+
_activeKey: active.key,
|
|
61
|
+
_activeSince: active.startedAt,
|
|
62
|
+
_activeForMs: active.activeForMs,
|
|
63
|
+
};
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
function withStallContext(run) {
|
|
67
|
+
const active = redisGuard.snapshot();
|
|
68
|
+
let payload = enrichProgress(run);
|
|
69
|
+
if (run.updated_at && Date.now() - run.updated_at > STALL_WARNING_MS) {
|
|
70
|
+
const detail = active
|
|
71
|
+
? ` Active operation: ${active.stage}${active.key ? ` key=${active.key}` : ''}.`
|
|
72
|
+
: '';
|
|
73
|
+
payload = {
|
|
74
|
+
...payload,
|
|
75
|
+
_stallWarning: true,
|
|
76
|
+
_stallMessage: `No progress for 60s — possible hang or Redis timeout.${detail}`,
|
|
77
|
+
};
|
|
78
|
+
}
|
|
79
|
+
return payload;
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
let abortHandled = false;
|
|
83
|
+
function markAborted() {
|
|
84
|
+
if (abortHandled) return getRun(db, runId);
|
|
85
|
+
abortHandled = true;
|
|
86
|
+
setRunStatus(db, runId, RUN_STATUS.ABORTED);
|
|
87
|
+
updateBulkProgress(db, runId, { last_error: 'Interrupted by SIGINT/SIGTERM' });
|
|
88
|
+
return getRun(db, runId);
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
let heartbeatTimer = null;
|
|
92
|
+
if (onProgress) {
|
|
93
|
+
heartbeatTimer = setInterval(() => {
|
|
94
|
+
const run = getRun(db, runId);
|
|
95
|
+
if (!run) return;
|
|
96
|
+
const payload = withStallContext(run);
|
|
97
|
+
Promise.resolve(onProgress(payload)).catch(() => {});
|
|
98
|
+
}, HEARTBEAT_INTERVAL_MS);
|
|
99
|
+
}
|
|
100
|
+
|
|
34
101
|
const run = getRun(db, runId);
|
|
35
102
|
if (!run) throw new Error(`Run ${runId} not found`);
|
|
36
103
|
|
|
@@ -45,7 +112,9 @@ export async function runApplyDirty(redisClient, dbPath, runId, options = {}) {
|
|
|
45
112
|
const minIntervalMs = max_rps > 0 ? 1000 / max_rps : 0;
|
|
46
113
|
let lastKeyTime = 0;
|
|
47
114
|
|
|
115
|
+
try {
|
|
48
116
|
for (;;) {
|
|
117
|
+
if (abortRequested) break;
|
|
49
118
|
let r = getRun(db, runId);
|
|
50
119
|
if (r && r.status === RUN_STATUS.ABORTED) break;
|
|
51
120
|
while (r && r.status === RUN_STATUS.PAUSED) {
|
|
@@ -61,6 +130,7 @@ export async function runApplyDirty(redisClient, dbPath, runId, options = {}) {
|
|
|
61
130
|
|
|
62
131
|
// ── Re-import (or remove) keys that changed while bulk was running ──
|
|
63
132
|
for (const { key: keyBuf } of dirtyBatch) {
|
|
133
|
+
if (abortRequested) break;
|
|
64
134
|
r = getRun(db, runId);
|
|
65
135
|
if (r && r.status === RUN_STATUS.ABORTED) break;
|
|
66
136
|
while (r && r.status === RUN_STATUS.PAUSED) {
|
|
@@ -76,22 +146,29 @@ export async function runApplyDirty(redisClient, dbPath, runId, options = {}) {
|
|
|
76
146
|
|
|
77
147
|
const keyName = keyBuf.toString('utf8');
|
|
78
148
|
try {
|
|
79
|
-
const type = (await
|
|
149
|
+
const type = (await redisGuard.run(
|
|
150
|
+
() => redisClient.type(keyName),
|
|
151
|
+
{ stage: 'apply-dirty.type', key: keyName }
|
|
152
|
+
)).toLowerCase();
|
|
80
153
|
if (type === 'none' || !type) {
|
|
81
154
|
keys.delete(keyBuf);
|
|
82
155
|
markDirtyState(db, runId, keyBuf, 'deleted');
|
|
83
156
|
} else {
|
|
84
|
-
const outcome = await importKeyFromRedis(redisClient, keyName, storages, {
|
|
157
|
+
const outcome = await importKeyFromRedis(redisClient, keyName, storages, {
|
|
158
|
+
redisGuard,
|
|
159
|
+
knownType: type,
|
|
160
|
+
});
|
|
85
161
|
if (outcome.ok) {
|
|
86
162
|
markDirtyState(db, runId, keyBuf, 'applied');
|
|
87
163
|
} else if (outcome.skipped) {
|
|
88
164
|
markDirtyState(db, runId, keyBuf, 'skipped');
|
|
89
165
|
} else {
|
|
90
|
-
logError(db, runId, 'dirty_apply', outcome.error ? 'Import failed' : 'Skipped', keyName);
|
|
166
|
+
logError(db, runId, 'dirty_apply', outcome.errorMessage || (outcome.error ? 'Import failed' : 'Skipped'), keyName);
|
|
91
167
|
markDirtyState(db, runId, keyBuf, 'error');
|
|
92
168
|
}
|
|
93
169
|
}
|
|
94
170
|
} catch (err) {
|
|
171
|
+
if (isFatalRedisCommandError(err)) throw err;
|
|
95
172
|
logError(db, runId, 'dirty_apply', err.message, keyBuf);
|
|
96
173
|
markDirtyState(db, runId, keyBuf, 'error');
|
|
97
174
|
}
|
|
@@ -102,6 +179,7 @@ export async function runApplyDirty(redisClient, dbPath, runId, options = {}) {
|
|
|
102
179
|
// Marked as 'deleted' in the run counter; state changed away from 'deleted'
|
|
103
180
|
// so the next getDirtyBatch call won't return them again (avoiding infinite loop).
|
|
104
181
|
for (const { key: keyBuf } of deletedBatch) {
|
|
182
|
+
if (abortRequested) break;
|
|
105
183
|
r = getRun(db, runId);
|
|
106
184
|
if (r && r.status === RUN_STATUS.ABORTED) break;
|
|
107
185
|
while (r && r.status === RUN_STATUS.PAUSED) {
|
|
@@ -127,9 +205,33 @@ export async function runApplyDirty(redisClient, dbPath, runId, options = {}) {
|
|
|
127
205
|
}
|
|
128
206
|
if (batchSize > 0 && onProgress) {
|
|
129
207
|
const run = getRun(db, runId);
|
|
130
|
-
if (run) Promise.resolve(onProgress(run)).catch(() => {});
|
|
208
|
+
if (run) Promise.resolve(onProgress(enrichProgress(run))).catch(() => {});
|
|
131
209
|
}
|
|
132
210
|
}
|
|
133
211
|
|
|
212
|
+
if (abortRequested) {
|
|
213
|
+
const run = markAborted();
|
|
214
|
+
if (onProgress && run) Promise.resolve(onProgress(enrichProgress(run))).catch(() => {});
|
|
215
|
+
const err = new Error('Apply dirty interrupted by signal (SIGINT/SIGTERM)');
|
|
216
|
+
err.code = 'APPLY_DIRTY_ABORTED';
|
|
217
|
+
throw err;
|
|
218
|
+
}
|
|
219
|
+
|
|
134
220
|
return getRun(db, runId);
|
|
221
|
+
} catch (err) {
|
|
222
|
+
if (err.code === 'APPLY_DIRTY_ABORTED') {
|
|
223
|
+
const run = markAborted();
|
|
224
|
+
if (onProgress && run) Promise.resolve(onProgress(enrichProgress(run))).catch(() => {});
|
|
225
|
+
} else {
|
|
226
|
+
setRunStatus(db, runId, RUN_STATUS.FAILED);
|
|
227
|
+
updateBulkProgress(db, runId, { last_error: err.message });
|
|
228
|
+
logError(db, runId, 'dirty_apply', err.message, null);
|
|
229
|
+
}
|
|
230
|
+
throw err;
|
|
231
|
+
} finally {
|
|
232
|
+
if (heartbeatTimer) clearInterval(heartbeatTimer);
|
|
233
|
+
process.off('SIGINT', onSignal);
|
|
234
|
+
process.off('SIGTERM', onSignal);
|
|
235
|
+
db.close();
|
|
236
|
+
}
|
|
135
237
|
}
|
package/src/migration/bulk.js
CHANGED
|
@@ -18,6 +18,7 @@ import {
|
|
|
18
18
|
RUN_STATUS,
|
|
19
19
|
} from './registry.js';
|
|
20
20
|
import { importKeyFromRedis } from './import-one.js';
|
|
21
|
+
import { createRedisCommandGuard } from './redis-guard.js';
|
|
21
22
|
|
|
22
23
|
function parseScanResult(result) {
|
|
23
24
|
if (Array.isArray(result)) {
|
|
@@ -35,6 +36,9 @@ function sleep(ms) {
|
|
|
35
36
|
return new Promise((resolve) => setTimeout(resolve, ms));
|
|
36
37
|
}
|
|
37
38
|
|
|
39
|
+
const HEARTBEAT_INTERVAL_MS = 15000;
|
|
40
|
+
const STALL_WARNING_MS = 60000;
|
|
41
|
+
|
|
38
42
|
/**
|
|
39
43
|
* Run bulk import: SCAN keys from Redis, import into RespLite DB with checkpointing.
|
|
40
44
|
* On SIGINT/SIGTERM, checkpoint progress, set run status to ABORTED, close DB and rethrow.
|
|
@@ -51,8 +55,9 @@ function sleep(ms) {
|
|
|
51
55
|
* @param {number} [options.batch_keys=200]
|
|
52
56
|
* @param {number} [options.batch_bytes=64*1024*1024] - 64MB
|
|
53
57
|
* @param {number} [options.checkpoint_interval_sec=30]
|
|
58
|
+
* @param {number} [options.redis_command_timeout_ms=30000]
|
|
54
59
|
* @param {boolean} [options.resume=true] - true: start from 0 or continue from checkpoint; false: always start from 0
|
|
55
|
-
* @param {function(run): void} [options.onProgress] - called after checkpoint
|
|
60
|
+
* @param {function(run): void} [options.onProgress] - called after checkpoint and on heartbeat; may receive _stallWarning if no progress for 60s
|
|
56
61
|
*/
|
|
57
62
|
export async function runBulkImport(redisClient, dbPath, runId, options = {}) {
|
|
58
63
|
const {
|
|
@@ -63,18 +68,86 @@ export async function runBulkImport(redisClient, dbPath, runId, options = {}) {
|
|
|
63
68
|
batch_keys = 200,
|
|
64
69
|
batch_bytes = 64 * 1024 * 1024,
|
|
65
70
|
checkpoint_interval_sec = 30,
|
|
71
|
+
redis_command_timeout_ms = 30000,
|
|
66
72
|
resume = true,
|
|
67
73
|
onProgress,
|
|
68
74
|
} = options;
|
|
69
75
|
|
|
70
76
|
const db = openDb(dbPath, { pragmaTemplate });
|
|
77
|
+
const redisGuard = createRedisCommandGuard({
|
|
78
|
+
redisClient,
|
|
79
|
+
timeoutMs: redis_command_timeout_ms,
|
|
80
|
+
});
|
|
71
81
|
let abortRequested = false;
|
|
72
82
|
const onSignal = () => {
|
|
73
83
|
abortRequested = true;
|
|
84
|
+
redisGuard.signalAbort('Bulk import interrupted by signal (SIGINT/SIGTERM)', 'BULK_ABORTED');
|
|
74
85
|
};
|
|
75
86
|
process.on('SIGINT', onSignal);
|
|
76
87
|
process.on('SIGTERM', onSignal);
|
|
77
88
|
|
|
89
|
+
function enrichProgress(run) {
|
|
90
|
+
const active = redisGuard.snapshot();
|
|
91
|
+
if (!active) return run;
|
|
92
|
+
|
|
93
|
+
return {
|
|
94
|
+
...run,
|
|
95
|
+
_activeStage: active.stage,
|
|
96
|
+
_activeKey: active.key,
|
|
97
|
+
_activeSince: active.startedAt,
|
|
98
|
+
_activeForMs: active.activeForMs,
|
|
99
|
+
};
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
function withStallContext(run) {
|
|
103
|
+
const active = redisGuard.snapshot();
|
|
104
|
+
let payload = enrichProgress(run);
|
|
105
|
+
if (run.updated_at && Date.now() - run.updated_at > STALL_WARNING_MS) {
|
|
106
|
+
const detail = active
|
|
107
|
+
? ` Active operation: ${active.stage}${active.key ? ` key=${active.key}` : ''}.`
|
|
108
|
+
: '';
|
|
109
|
+
payload = {
|
|
110
|
+
...payload,
|
|
111
|
+
_stallWarning: true,
|
|
112
|
+
_stallMessage: `No progress for 60s — possible hang or Redis timeout.${detail}`,
|
|
113
|
+
};
|
|
114
|
+
}
|
|
115
|
+
return payload;
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
let heartbeatTimer = null;
|
|
119
|
+
if (onProgress) {
|
|
120
|
+
heartbeatTimer = setInterval(() => {
|
|
121
|
+
const run = getRun(db, runId);
|
|
122
|
+
if (!run) return;
|
|
123
|
+
const payload = withStallContext(run);
|
|
124
|
+
Promise.resolve(onProgress(payload)).catch(() => {});
|
|
125
|
+
}, HEARTBEAT_INTERVAL_MS);
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
let cursor = 0;
|
|
129
|
+
let scanned_keys = 0;
|
|
130
|
+
let migrated_keys = 0;
|
|
131
|
+
let skipped_keys = 0;
|
|
132
|
+
let error_keys = 0;
|
|
133
|
+
let migrated_bytes = 0;
|
|
134
|
+
let abortHandled = false;
|
|
135
|
+
function markAborted() {
|
|
136
|
+
if (abortHandled) return getRun(db, runId);
|
|
137
|
+
abortHandled = true;
|
|
138
|
+
updateBulkProgress(db, runId, {
|
|
139
|
+
scan_cursor: String(cursor),
|
|
140
|
+
scanned_keys,
|
|
141
|
+
migrated_keys,
|
|
142
|
+
skipped_keys,
|
|
143
|
+
error_keys,
|
|
144
|
+
migrated_bytes,
|
|
145
|
+
last_error: 'Interrupted by SIGINT/SIGTERM',
|
|
146
|
+
});
|
|
147
|
+
setRunStatus(db, runId, RUN_STATUS.ABORTED);
|
|
148
|
+
return getRun(db, runId);
|
|
149
|
+
}
|
|
150
|
+
|
|
78
151
|
try {
|
|
79
152
|
const keys = createKeysStorage(db);
|
|
80
153
|
const strings = createStringsStorage(db, keys);
|
|
@@ -88,12 +161,12 @@ export async function runBulkImport(redisClient, dbPath, runId, options = {}) {
|
|
|
88
161
|
let run = getRun(db, runId);
|
|
89
162
|
if (!run) throw new Error(`Run ${runId} not found`);
|
|
90
163
|
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
164
|
+
cursor = resume && run.scan_cursor !== undefined ? parseInt(String(run.scan_cursor), 10) : 0;
|
|
165
|
+
scanned_keys = resume ? (run.scanned_keys || 0) : 0;
|
|
166
|
+
migrated_keys = resume ? (run.migrated_keys || 0) : 0;
|
|
167
|
+
skipped_keys = resume ? (run.skipped_keys || 0) : 0;
|
|
168
|
+
error_keys = resume ? (run.error_keys || 0) : 0;
|
|
169
|
+
migrated_bytes = resume ? (run.migrated_bytes || 0) : 0;
|
|
97
170
|
|
|
98
171
|
if (!resume) {
|
|
99
172
|
updateBulkProgress(db, runId, { scan_cursor: String(cursor), scanned_keys, migrated_keys, skipped_keys, error_keys, migrated_bytes });
|
|
@@ -114,7 +187,10 @@ export async function runBulkImport(redisClient, dbPath, runId, options = {}) {
|
|
|
114
187
|
run = getRun(db, runId);
|
|
115
188
|
}
|
|
116
189
|
|
|
117
|
-
const result = await
|
|
190
|
+
const result = await redisGuard.run(
|
|
191
|
+
() => redisClient.scan(cursor, { COUNT: scan_count }),
|
|
192
|
+
{ stage: 'bulk.scan' }
|
|
193
|
+
);
|
|
118
194
|
const parsed = parseScanResult(result);
|
|
119
195
|
cursor = parsed.cursor;
|
|
120
196
|
const keyList = parsed.keys || [];
|
|
@@ -136,7 +212,10 @@ export async function runBulkImport(redisClient, dbPath, runId, options = {}) {
|
|
|
136
212
|
}
|
|
137
213
|
|
|
138
214
|
const now = Date.now();
|
|
139
|
-
const outcome = await importKeyFromRedis(redisClient, keyName, storages, {
|
|
215
|
+
const outcome = await importKeyFromRedis(redisClient, keyName, storages, {
|
|
216
|
+
now,
|
|
217
|
+
redisGuard,
|
|
218
|
+
});
|
|
140
219
|
if (outcome.ok) {
|
|
141
220
|
migrated_keys++;
|
|
142
221
|
migrated_bytes += outcome.bytes || 0;
|
|
@@ -146,7 +225,7 @@ export async function runBulkImport(redisClient, dbPath, runId, options = {}) {
|
|
|
146
225
|
skipped_keys++;
|
|
147
226
|
} else {
|
|
148
227
|
error_keys++;
|
|
149
|
-
logError(db, runId, 'bulk', outcome.error ? 'Import failed' : 'Skipped', keyName);
|
|
228
|
+
logError(db, runId, 'bulk', outcome.errorMessage || (outcome.error ? 'Import failed' : 'Skipped'), keyName);
|
|
150
229
|
}
|
|
151
230
|
|
|
152
231
|
const now2 = Date.now();
|
|
@@ -167,7 +246,7 @@ export async function runBulkImport(redisClient, dbPath, runId, options = {}) {
|
|
|
167
246
|
batchScanned = 0;
|
|
168
247
|
batchBytes = 0;
|
|
169
248
|
run = getRun(db, runId);
|
|
170
|
-
if (onProgress && run) onProgress(run);
|
|
249
|
+
if (onProgress && run) onProgress(enrichProgress(run));
|
|
171
250
|
}
|
|
172
251
|
}
|
|
173
252
|
} while (cursor !== 0);
|
|
@@ -181,9 +260,8 @@ export async function runBulkImport(redisClient, dbPath, runId, options = {}) {
|
|
|
181
260
|
error_keys,
|
|
182
261
|
migrated_bytes,
|
|
183
262
|
});
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
if (onProgress && run) onProgress(run);
|
|
263
|
+
run = markAborted();
|
|
264
|
+
if (onProgress && run) onProgress(enrichProgress(run));
|
|
187
265
|
const err = new Error('Bulk import interrupted by signal (SIGINT/SIGTERM)');
|
|
188
266
|
err.code = 'BULK_ABORTED';
|
|
189
267
|
throw err;
|
|
@@ -200,13 +278,17 @@ export async function runBulkImport(redisClient, dbPath, runId, options = {}) {
|
|
|
200
278
|
setRunStatus(db, runId, RUN_STATUS.COMPLETED);
|
|
201
279
|
return getRun(db, runId);
|
|
202
280
|
} catch (err) {
|
|
203
|
-
if (err.code
|
|
281
|
+
if (err.code === 'BULK_ABORTED') {
|
|
282
|
+
const run = markAborted();
|
|
283
|
+
if (onProgress && run) onProgress(enrichProgress(run));
|
|
284
|
+
} else {
|
|
204
285
|
setRunStatus(db, runId, RUN_STATUS.FAILED);
|
|
205
286
|
updateBulkProgress(db, runId, { last_error: err.message });
|
|
206
287
|
logError(db, runId, 'bulk', err.message, null);
|
|
207
288
|
}
|
|
208
289
|
throw err;
|
|
209
290
|
} finally {
|
|
291
|
+
if (heartbeatTimer) clearInterval(heartbeatTimer);
|
|
210
292
|
process.off('SIGINT', onSignal);
|
|
211
293
|
process.off('SIGTERM', onSignal);
|
|
212
294
|
db.close();
|
|
@@ -4,6 +4,7 @@
|
|
|
4
4
|
*/
|
|
5
5
|
|
|
6
6
|
import { asKey, asValue } from '../util/buffers.js';
|
|
7
|
+
import { isFatalRedisCommandError } from './redis-guard.js';
|
|
7
8
|
|
|
8
9
|
const SUPPORTED_TYPES = new Set(['string', 'hash', 'set', 'list', 'zset']);
|
|
9
10
|
|
|
@@ -19,20 +20,27 @@ function toBuffer(value) {
|
|
|
19
20
|
* @param {import('redis').RedisClientType} redisClient
|
|
20
21
|
* @param {string} keyName
|
|
21
22
|
* @param {{ keys: import('../storage/sqlite/keys.js').ReturnType<import('../storage/sqlite/keys.js').createKeysStorage>; strings: ReturnType<import('../storage/sqlite/strings.js').createStringsStorage>; hashes: ReturnType<import('../storage/sqlite/hashes.js').createHashesStorage>; sets: ReturnType<import('../storage/sqlite/sets.js').createSetsStorage>; lists: ReturnType<import('../storage/sqlite/lists.js').createListsStorage>; zsets: ReturnType<import('../storage/sqlite/zsets.js').createZsetsStorage> }} storages
|
|
22
|
-
* @param {{ now?: number }} options
|
|
23
|
-
* @returns {Promise<{ ok: boolean; skipped?: boolean; error?: boolean; bytes?: number }>}
|
|
23
|
+
* @param {{ now?: number; knownType?: string; redisGuard?: { run(fn: Function, options: { stage: string, key?: string | null }): Promise<any> } }} options
|
|
24
|
+
* @returns {Promise<{ ok: boolean; skipped?: boolean; error?: boolean; errorMessage?: string; bytes?: number }>}
|
|
24
25
|
*/
|
|
25
26
|
export async function importKeyFromRedis(redisClient, keyName, storages, options = {}) {
|
|
26
27
|
const now = options.now ?? Date.now();
|
|
28
|
+
const knownType = options.knownType ? String(options.knownType).toLowerCase() : null;
|
|
29
|
+
const redisGuard = options.redisGuard ?? null;
|
|
27
30
|
const { keys, strings, hashes, sets, lists, zsets } = storages;
|
|
31
|
+
const runRedis = (stage, fn) => (
|
|
32
|
+
redisGuard
|
|
33
|
+
? redisGuard.run(fn, { stage, key: keyName })
|
|
34
|
+
: Promise.resolve().then(fn)
|
|
35
|
+
);
|
|
28
36
|
|
|
29
37
|
try {
|
|
30
|
-
const type = (await redisClient.type(keyName)).toLowerCase();
|
|
38
|
+
const type = knownType || (await runRedis('import.type', () => redisClient.type(keyName))).toLowerCase();
|
|
31
39
|
if (!SUPPORTED_TYPES.has(type)) {
|
|
32
40
|
return { ok: false, skipped: true };
|
|
33
41
|
}
|
|
34
42
|
|
|
35
|
-
let pttl = await redisClient.pTTL(keyName);
|
|
43
|
+
let pttl = await runRedis('import.pttl', () => redisClient.pTTL(keyName));
|
|
36
44
|
if (pttl === -2) pttl = -1;
|
|
37
45
|
const expiresAt = pttl > 0 ? now + pttl : null;
|
|
38
46
|
const keyBuf = asKey(keyName);
|
|
@@ -40,7 +48,7 @@ export async function importKeyFromRedis(redisClient, keyName, storages, options
|
|
|
40
48
|
let bytes = keyBuf.length;
|
|
41
49
|
|
|
42
50
|
if (type === 'string') {
|
|
43
|
-
const value = await redisClient.get(keyName);
|
|
51
|
+
const value = await runRedis('import.string.get', () => redisClient.get(keyName));
|
|
44
52
|
if (value === undefined || value === null) return { ok: false, skipped: true };
|
|
45
53
|
const valBuf = asValue(value);
|
|
46
54
|
bytes += valBuf.length;
|
|
@@ -49,7 +57,7 @@ export async function importKeyFromRedis(redisClient, keyName, storages, options
|
|
|
49
57
|
}
|
|
50
58
|
|
|
51
59
|
if (type === 'hash') {
|
|
52
|
-
const obj = await redisClient.hGetAll(keyName);
|
|
60
|
+
const obj = await runRedis('import.hash.hGetAll', () => redisClient.hGetAll(keyName));
|
|
53
61
|
if (!obj || typeof obj !== 'object') return { ok: false, skipped: true };
|
|
54
62
|
const pairs = [];
|
|
55
63
|
for (const [f, v] of Object.entries(obj)) {
|
|
@@ -65,7 +73,7 @@ export async function importKeyFromRedis(redisClient, keyName, storages, options
|
|
|
65
73
|
}
|
|
66
74
|
|
|
67
75
|
if (type === 'set') {
|
|
68
|
-
const members = await redisClient.sMembers(keyName);
|
|
76
|
+
const members = await runRedis('import.set.sMembers', () => redisClient.sMembers(keyName));
|
|
69
77
|
if (!members || !members.length) return { ok: false, skipped: true };
|
|
70
78
|
const memberBuffers = members.map((m) => toBuffer(m));
|
|
71
79
|
for (const b of memberBuffers) bytes += b.length;
|
|
@@ -75,7 +83,7 @@ export async function importKeyFromRedis(redisClient, keyName, storages, options
|
|
|
75
83
|
}
|
|
76
84
|
|
|
77
85
|
if (type === 'list') {
|
|
78
|
-
const elements = await redisClient.lRange(keyName, 0, -1);
|
|
86
|
+
const elements = await runRedis('import.list.lRange', () => redisClient.lRange(keyName, 0, -1));
|
|
79
87
|
if (!elements || !elements.length) return { ok: false, skipped: true };
|
|
80
88
|
const valueBuffers = elements.map((e) => toBuffer(e));
|
|
81
89
|
for (const b of valueBuffers) bytes += b.length;
|
|
@@ -85,7 +93,7 @@ export async function importKeyFromRedis(redisClient, keyName, storages, options
|
|
|
85
93
|
}
|
|
86
94
|
|
|
87
95
|
if (type === 'zset') {
|
|
88
|
-
const withScores = await redisClient.zRangeWithScores(keyName, 0, -1);
|
|
96
|
+
const withScores = await runRedis('import.zset.zRangeWithScores', () => redisClient.zRangeWithScores(keyName, 0, -1));
|
|
89
97
|
if (!withScores || !withScores.length) return { ok: false, skipped: true };
|
|
90
98
|
const pairs = withScores.map((item) => ({
|
|
91
99
|
member: toBuffer(item.value),
|
|
@@ -99,7 +107,8 @@ export async function importKeyFromRedis(redisClient, keyName, storages, options
|
|
|
99
107
|
|
|
100
108
|
return { ok: false, skipped: true };
|
|
101
109
|
} catch (err) {
|
|
102
|
-
|
|
110
|
+
if (isFatalRedisCommandError(err)) throw err;
|
|
111
|
+
return { ok: false, error: true, errorMessage: err.message };
|
|
103
112
|
}
|
|
104
113
|
}
|
|
105
114
|
|
package/src/migration/index.js
CHANGED
|
@@ -38,6 +38,7 @@ import { startDirtyTracker as startDirtyTrackerProcess } from './tracker.js';
|
|
|
38
38
|
* @property {number} [maxRps=0] - Max requests/s (0 = unlimited).
|
|
39
39
|
* @property {number} [batchKeys=200]
|
|
40
40
|
* @property {number} [batchBytes=67108864] - 64 MB default.
|
|
41
|
+
* @property {number} [redisCommandTimeoutMs=30000] - Fail a stuck Redis command instead of waiting forever.
|
|
41
42
|
* @property {string} [configCommand='CONFIG'] - Redis CONFIG command name. Override if renamed for security.
|
|
42
43
|
*/
|
|
43
44
|
|
|
@@ -67,6 +68,7 @@ export function createMigration({
|
|
|
67
68
|
maxRps = 0,
|
|
68
69
|
batchKeys = 200,
|
|
69
70
|
batchBytes = 64 * 1024 * 1024,
|
|
71
|
+
redisCommandTimeoutMs = 30000,
|
|
70
72
|
configCommand = 'CONFIG',
|
|
71
73
|
} = {}) {
|
|
72
74
|
if (!to) throw new Error('createMigration: "to" (db path) is required');
|
|
@@ -168,6 +170,7 @@ export function createMigration({
|
|
|
168
170
|
max_rps: maxRps,
|
|
169
171
|
batch_keys: batchKeys,
|
|
170
172
|
batch_bytes: batchBytes,
|
|
173
|
+
redis_command_timeout_ms: redisCommandTimeoutMs,
|
|
171
174
|
resume,
|
|
172
175
|
onProgress,
|
|
173
176
|
});
|
|
@@ -200,6 +203,7 @@ export function createMigration({
|
|
|
200
203
|
pragmaTemplate,
|
|
201
204
|
batch_keys: bk,
|
|
202
205
|
max_rps: rps,
|
|
206
|
+
redis_command_timeout_ms: redisCommandTimeoutMs,
|
|
203
207
|
onProgress,
|
|
204
208
|
});
|
|
205
209
|
},
|
|
@@ -0,0 +1,174 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Guard Redis migration commands with timeouts and abort-aware progress context.
|
|
3
|
+
*/
|
|
4
|
+
|
|
5
|
+
const FATAL_CODES = new Set([
|
|
6
|
+
'REDIS_COMMAND_TIMEOUT',
|
|
7
|
+
'BULK_ABORTED',
|
|
8
|
+
'APPLY_DIRTY_ABORTED',
|
|
9
|
+
'MIGRATION_ABORTED',
|
|
10
|
+
]);
|
|
11
|
+
|
|
12
|
+
function normalizeKey(key) {
|
|
13
|
+
if (key == null) return null;
|
|
14
|
+
if (Buffer.isBuffer(key)) return key.toString('utf8');
|
|
15
|
+
return String(key);
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
function attachContext(err, snapshot) {
|
|
19
|
+
if (!snapshot || !err || typeof err !== 'object') return err;
|
|
20
|
+
if (!('migrationStage' in err)) err.migrationStage = snapshot.stage;
|
|
21
|
+
if (!('migrationKey' in err)) err.migrationKey = snapshot.key;
|
|
22
|
+
if (!('migrationActiveSince' in err)) err.migrationActiveSince = snapshot.startedAt;
|
|
23
|
+
return err;
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
function formatContext(message, snapshot) {
|
|
27
|
+
if (!snapshot) return message;
|
|
28
|
+
const parts = [message, `during ${snapshot.stage}`];
|
|
29
|
+
if (snapshot.key) parts.push(`for key "${snapshot.key}"`);
|
|
30
|
+
return parts.join(' ');
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
function abortRedisTransport(redisClient) {
|
|
34
|
+
if (!redisClient) return;
|
|
35
|
+
|
|
36
|
+
for (const methodName of ['disconnect', 'destroy']) {
|
|
37
|
+
if (typeof redisClient[methodName] !== 'function') continue;
|
|
38
|
+
try {
|
|
39
|
+
const result = redisClient[methodName]();
|
|
40
|
+
if (result && typeof result.catch === 'function') {
|
|
41
|
+
result.catch(() => {});
|
|
42
|
+
}
|
|
43
|
+
} catch (_) {
|
|
44
|
+
// Best effort: we're already aborting because the client is unhealthy.
|
|
45
|
+
}
|
|
46
|
+
return;
|
|
47
|
+
}
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
export function isFatalRedisCommandError(err) {
|
|
51
|
+
return !!(err && typeof err === 'object' && FATAL_CODES.has(err.code));
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
export function createRedisCommandGuard({ redisClient, timeoutMs = 30000 } = {}) {
|
|
55
|
+
const abortListeners = new Set();
|
|
56
|
+
let active = null;
|
|
57
|
+
let terminalError = null;
|
|
58
|
+
let transportAborted = false;
|
|
59
|
+
|
|
60
|
+
function snapshot() {
|
|
61
|
+
if (!active) return null;
|
|
62
|
+
return {
|
|
63
|
+
stage: active.stage,
|
|
64
|
+
key: active.key,
|
|
65
|
+
startedAt: active.startedAt,
|
|
66
|
+
activeForMs: Date.now() - active.startedAt,
|
|
67
|
+
};
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
function notifyAbort(err) {
|
|
71
|
+
for (const listener of abortListeners) {
|
|
72
|
+
try {
|
|
73
|
+
listener(err);
|
|
74
|
+
} catch (_) {
|
|
75
|
+
// Ignore listener failures while aborting.
|
|
76
|
+
}
|
|
77
|
+
}
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
function abortTransport() {
|
|
81
|
+
if (transportAborted) return;
|
|
82
|
+
transportAborted = true;
|
|
83
|
+
abortRedisTransport(redisClient);
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
function signalAbort(message, code = 'MIGRATION_ABORTED') {
|
|
87
|
+
if (terminalError) return terminalError;
|
|
88
|
+
const current = snapshot();
|
|
89
|
+
const err = attachContext(new Error(formatContext(message, current)), current);
|
|
90
|
+
err.code = code;
|
|
91
|
+
terminalError = err;
|
|
92
|
+
abortTransport();
|
|
93
|
+
notifyAbort(err);
|
|
94
|
+
return err;
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
async function run(fn, { stage, key = null, timeoutMs: overrideTimeoutMs } = {}) {
|
|
98
|
+
if (terminalError) throw terminalError;
|
|
99
|
+
|
|
100
|
+
const opTimeoutMs = overrideTimeoutMs ?? timeoutMs;
|
|
101
|
+
const opKey = normalizeKey(key);
|
|
102
|
+
const opState = {
|
|
103
|
+
stage,
|
|
104
|
+
key: opKey,
|
|
105
|
+
startedAt: Date.now(),
|
|
106
|
+
};
|
|
107
|
+
active = opState;
|
|
108
|
+
|
|
109
|
+
return await new Promise((resolve, reject) => {
|
|
110
|
+
let settled = false;
|
|
111
|
+
let timeoutId = null;
|
|
112
|
+
|
|
113
|
+
const cleanup = () => {
|
|
114
|
+
abortListeners.delete(onAbort);
|
|
115
|
+
if (timeoutId) clearTimeout(timeoutId);
|
|
116
|
+
if (active === opState) active = null;
|
|
117
|
+
};
|
|
118
|
+
|
|
119
|
+
const finishResolve = (value) => {
|
|
120
|
+
if (settled) return;
|
|
121
|
+
settled = true;
|
|
122
|
+
cleanup();
|
|
123
|
+
resolve(value);
|
|
124
|
+
};
|
|
125
|
+
|
|
126
|
+
const finishReject = (err) => {
|
|
127
|
+
if (settled) return;
|
|
128
|
+
settled = true;
|
|
129
|
+
cleanup();
|
|
130
|
+
reject(err);
|
|
131
|
+
};
|
|
132
|
+
|
|
133
|
+
const onAbort = (err) => {
|
|
134
|
+
finishReject(err);
|
|
135
|
+
};
|
|
136
|
+
|
|
137
|
+
abortListeners.add(onAbort);
|
|
138
|
+
|
|
139
|
+
if (Number.isFinite(opTimeoutMs) && opTimeoutMs > 0) {
|
|
140
|
+
timeoutId = setTimeout(() => {
|
|
141
|
+
const current = snapshot() ?? {
|
|
142
|
+
stage,
|
|
143
|
+
key: opKey,
|
|
144
|
+
startedAt: opState.startedAt,
|
|
145
|
+
activeForMs: Date.now() - opState.startedAt,
|
|
146
|
+
};
|
|
147
|
+
const err = attachContext(
|
|
148
|
+
new Error(formatContext(`Redis command timed out after ${opTimeoutMs}ms`, current)),
|
|
149
|
+
current
|
|
150
|
+
);
|
|
151
|
+
err.code = 'REDIS_COMMAND_TIMEOUT';
|
|
152
|
+
terminalError = err;
|
|
153
|
+
abortTransport();
|
|
154
|
+
notifyAbort(err);
|
|
155
|
+
finishReject(err);
|
|
156
|
+
}, opTimeoutMs);
|
|
157
|
+
}
|
|
158
|
+
|
|
159
|
+
Promise.resolve()
|
|
160
|
+
.then(fn)
|
|
161
|
+
.then(
|
|
162
|
+
(value) => finishResolve(value),
|
|
163
|
+
(err) => finishReject(attachContext(err, snapshot() ?? opState))
|
|
164
|
+
);
|
|
165
|
+
});
|
|
166
|
+
}
|
|
167
|
+
|
|
168
|
+
return {
|
|
169
|
+
run,
|
|
170
|
+
snapshot,
|
|
171
|
+
signalAbort,
|
|
172
|
+
abortTransport,
|
|
173
|
+
};
|
|
174
|
+
}
|
|
@@ -0,0 +1,122 @@
|
|
|
1
|
+
import { describe, it } from 'node:test';
|
|
2
|
+
import assert from 'node:assert/strict';
|
|
3
|
+
import { runBulkImport } from '../../src/migration/bulk.js';
|
|
4
|
+
import { runApplyDirty } from '../../src/migration/apply-dirty.js';
|
|
5
|
+
import { createRedisCommandGuard } from '../../src/migration/redis-guard.js';
|
|
6
|
+
import { openDb } from '../../src/storage/sqlite/db.js';
|
|
7
|
+
import { createRun, getRun, upsertDirtyKey } from '../../src/migration/registry.js';
|
|
8
|
+
import { tmpDbPath } from '../helpers/tmp.js';
|
|
9
|
+
|
|
10
|
+
function never() {
|
|
11
|
+
return new Promise(() => {});
|
|
12
|
+
}
|
|
13
|
+
|
|
14
|
+
describe('migration Redis timeouts', () => {
|
|
15
|
+
it('runBulkImport fails a stuck scan with timeout context', async () => {
|
|
16
|
+
const dbPath = tmpDbPath();
|
|
17
|
+
let disconnectCalls = 0;
|
|
18
|
+
const redisClient = {
|
|
19
|
+
scan: () => never(),
|
|
20
|
+
disconnect() {
|
|
21
|
+
disconnectCalls += 1;
|
|
22
|
+
},
|
|
23
|
+
};
|
|
24
|
+
|
|
25
|
+
await assert.rejects(
|
|
26
|
+
() => runBulkImport(redisClient, dbPath, 'bulk-timeout', {
|
|
27
|
+
sourceUri: 'redis://example.test:6379',
|
|
28
|
+
pragmaTemplate: 'minimal',
|
|
29
|
+
redis_command_timeout_ms: 20,
|
|
30
|
+
}),
|
|
31
|
+
(err) => {
|
|
32
|
+
assert.equal(err.code, 'REDIS_COMMAND_TIMEOUT');
|
|
33
|
+
assert.match(err.message, /bulk\.scan/);
|
|
34
|
+
return true;
|
|
35
|
+
}
|
|
36
|
+
);
|
|
37
|
+
|
|
38
|
+
const db = openDb(dbPath, { pragmaTemplate: 'minimal' });
|
|
39
|
+
try {
|
|
40
|
+
const run = getRun(db, 'bulk-timeout');
|
|
41
|
+
assert.equal(run.status, 'failed');
|
|
42
|
+
assert.match(run.last_error, /bulk\.scan/);
|
|
43
|
+
} finally {
|
|
44
|
+
db.close();
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
assert.equal(disconnectCalls, 1);
|
|
48
|
+
});
|
|
49
|
+
|
|
50
|
+
it('runApplyDirty fails a stuck key lookup with key context', async () => {
|
|
51
|
+
const dbPath = tmpDbPath();
|
|
52
|
+
const db = openDb(dbPath, { pragmaTemplate: 'minimal' });
|
|
53
|
+
try {
|
|
54
|
+
createRun(db, 'apply-timeout', 'redis://example.test:6379');
|
|
55
|
+
upsertDirtyKey(db, 'apply-timeout', 'stuck:key', 'set');
|
|
56
|
+
} finally {
|
|
57
|
+
db.close();
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
let disconnectCalls = 0;
|
|
61
|
+
const redisClient = {
|
|
62
|
+
type: () => never(),
|
|
63
|
+
disconnect() {
|
|
64
|
+
disconnectCalls += 1;
|
|
65
|
+
},
|
|
66
|
+
};
|
|
67
|
+
|
|
68
|
+
await assert.rejects(
|
|
69
|
+
() => runApplyDirty(redisClient, dbPath, 'apply-timeout', {
|
|
70
|
+
pragmaTemplate: 'minimal',
|
|
71
|
+
redis_command_timeout_ms: 20,
|
|
72
|
+
}),
|
|
73
|
+
(err) => {
|
|
74
|
+
assert.equal(err.code, 'REDIS_COMMAND_TIMEOUT');
|
|
75
|
+
assert.match(err.message, /apply-dirty\.type/);
|
|
76
|
+
assert.match(err.message, /stuck:key/);
|
|
77
|
+
return true;
|
|
78
|
+
}
|
|
79
|
+
);
|
|
80
|
+
|
|
81
|
+
const verifyDb = openDb(dbPath, { pragmaTemplate: 'minimal' });
|
|
82
|
+
try {
|
|
83
|
+
const run = getRun(verifyDb, 'apply-timeout');
|
|
84
|
+
assert.equal(run.status, 'failed');
|
|
85
|
+
assert.match(run.last_error, /apply-dirty\.type/);
|
|
86
|
+
const row = verifyDb.prepare('SELECT message FROM migration_errors WHERE run_id = ? ORDER BY rowid DESC LIMIT 1').get('apply-timeout');
|
|
87
|
+
assert.match(row.message, /stuck:key/);
|
|
88
|
+
} finally {
|
|
89
|
+
verifyDb.close();
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
assert.equal(disconnectCalls, 1);
|
|
93
|
+
});
|
|
94
|
+
|
|
95
|
+
it('redis guard aborts a pending command immediately', async () => {
|
|
96
|
+
let disconnectCalls = 0;
|
|
97
|
+
const guard = createRedisCommandGuard({
|
|
98
|
+
redisClient: {
|
|
99
|
+
disconnect() {
|
|
100
|
+
disconnectCalls += 1;
|
|
101
|
+
},
|
|
102
|
+
},
|
|
103
|
+
timeoutMs: 1000,
|
|
104
|
+
});
|
|
105
|
+
|
|
106
|
+
const pending = guard.run(() => never(), { stage: 'bulk.scan' });
|
|
107
|
+
queueMicrotask(() => {
|
|
108
|
+
guard.signalAbort('Bulk import interrupted by signal (SIGINT/SIGTERM)', 'BULK_ABORTED');
|
|
109
|
+
});
|
|
110
|
+
|
|
111
|
+
await assert.rejects(
|
|
112
|
+
() => pending,
|
|
113
|
+
(err) => {
|
|
114
|
+
assert.equal(err.code, 'BULK_ABORTED');
|
|
115
|
+
assert.match(err.message, /bulk\.scan/);
|
|
116
|
+
return true;
|
|
117
|
+
}
|
|
118
|
+
);
|
|
119
|
+
|
|
120
|
+
assert.equal(disconnectCalls, 1);
|
|
121
|
+
});
|
|
122
|
+
});
|