resplite 1.3.4 → 1.3.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -5
- package/package.json +1 -1
- package/src/migration/apply-dirty.js +7 -109
- package/src/migration/bulk.js +15 -97
- package/src/migration/import-one.js +10 -19
- package/src/migration/index.js +0 -4
- package/src/migration/redis-guard.js +0 -174
- package/test/unit/migration-timeouts.test.js +0 -122
package/README.md
CHANGED
|
@@ -107,7 +107,6 @@ const m = createMigration({
|
|
|
107
107
|
batchKeys: 1000,
|
|
108
108
|
batchBytes: 64 * 1024 * 1024, // 64 MB
|
|
109
109
|
maxRps: 0, // 0 = unlimited
|
|
110
|
-
redisCommandTimeoutMs: 30000, // fail a stuck Redis command instead of waiting forever
|
|
111
110
|
|
|
112
111
|
// If your Redis deployment renamed CONFIG for security:
|
|
113
112
|
// configCommand: 'MYCONFIG',
|
|
@@ -190,10 +189,7 @@ await m.close();
|
|
|
190
189
|
`resume` defaults to `true`. It doesn't matter whether it's the first run or a resume: the same script works for both starting and continuing. The first run starts from cursor 0; if the process is interrupted (Ctrl+C, crash, etc.), running the script again continues from the last checkpoint. You don't need to pass `resume: false` on the first run or change anything to resume.
|
|
191
190
|
|
|
192
191
|
**Graceful shutdown**
|
|
193
|
-
On SIGINT (Ctrl+C) or SIGTERM, the bulk importer checkpoints progress, sets the run status to `aborted`, closes the SQLite database cleanly (so WAL is checkpointed and the file is not left open),
|
|
194
|
-
|
|
195
|
-
**Errors and stalls**
|
|
196
|
-
Use `onProgress` to see progress and detect problems. The callback receives the run row (e.g. `scanned_keys`, `migrated_keys`, `dirty_keys_applied`, `last_error`) plus transient fields like `_activeStage`, `_activeKey`, `_activeForMs`, `_stallWarning`, and `_stallMessage` when the importer is waiting on Redis. Each Redis command is also protected by `redisCommandTimeoutMs` (30s by default), so a dead connection or pathological key fails with the active stage/key in the error instead of silently hanging forever. When a key fails to import, the error is logged to `migration_errors` and the run’s `last_error` is set; the real error message is included so you can diagnose. After any failure, check `m.status()` and query `migration_errors` in the DB if needed.
|
|
192
|
+
On SIGINT (Ctrl+C) or SIGTERM, the bulk importer checkpoints progress, sets the run status to `aborted`, closes the SQLite database cleanly (so WAL is checkpointed and the file is not left open), then exits. You can safely interrupt a long-running bulk and resume later.
|
|
197
193
|
|
|
198
194
|
The JS API can run the dirty-key tracker in-process via `m.startDirtyTracker()` / `m.stopDirtyTracker()`, so the full flow stays inside a single script.
|
|
199
195
|
|
package/package.json
CHANGED
|
@@ -9,21 +9,15 @@ import { createHashesStorage } from '../storage/sqlite/hashes.js';
|
|
|
9
9
|
import { createSetsStorage } from '../storage/sqlite/sets.js';
|
|
10
10
|
import { createListsStorage } from '../storage/sqlite/lists.js';
|
|
11
11
|
import { createZsetsStorage } from '../storage/sqlite/zsets.js';
|
|
12
|
-
import { getRun, getDirtyBatch, markDirtyState, logError,
|
|
12
|
+
import { getRun, getDirtyBatch, markDirtyState, logError, RUN_STATUS } from './registry.js';
|
|
13
13
|
import { importKeyFromRedis } from './import-one.js';
|
|
14
|
-
import { createRedisCommandGuard, isFatalRedisCommandError } from './redis-guard.js';
|
|
15
14
|
|
|
16
15
|
function sleep(ms) {
|
|
17
16
|
return new Promise((resolve) => setTimeout(resolve, ms));
|
|
18
17
|
}
|
|
19
18
|
|
|
20
|
-
const HEARTBEAT_INTERVAL_MS = 15000;
|
|
21
|
-
const STALL_WARNING_MS = 60000;
|
|
22
|
-
|
|
23
19
|
/**
|
|
24
20
|
* Apply dirty keys: for each key in registry with state=dirty, reimport from Redis or delete in destination.
|
|
25
|
-
* On SIGINT/SIGTERM, sets run status to ABORTED, closes DB and rethrows so the process can exit and Ctrl+C works.
|
|
26
|
-
*
|
|
27
21
|
* @param {import('redis').RedisClientType} redisClient
|
|
28
22
|
* @param {string} dbPath
|
|
29
23
|
* @param {string} runId
|
|
@@ -31,73 +25,12 @@ const STALL_WARNING_MS = 60000;
|
|
|
31
25
|
* @param {string} [options.pragmaTemplate='default']
|
|
32
26
|
* @param {number} [options.batch_keys=200]
|
|
33
27
|
* @param {number} [options.max_rps=0]
|
|
34
|
-
* @param {
|
|
35
|
-
* @param {(run: object) => void | Promise<void>} [options.onProgress] - Called after each batch and on heartbeat; may receive _stallWarning if no progress for 60s.
|
|
28
|
+
* @param {(run: object) => void | Promise<void>} [options.onProgress] - Called after each batch with the current run row.
|
|
36
29
|
*/
|
|
37
30
|
export async function runApplyDirty(redisClient, dbPath, runId, options = {}) {
|
|
38
|
-
const { pragmaTemplate = 'default', batch_keys = 200, max_rps = 0,
|
|
31
|
+
const { pragmaTemplate = 'default', batch_keys = 200, max_rps = 0, onProgress } = options;
|
|
39
32
|
|
|
40
33
|
const db = openDb(dbPath, { pragmaTemplate });
|
|
41
|
-
const redisGuard = createRedisCommandGuard({
|
|
42
|
-
redisClient,
|
|
43
|
-
timeoutMs: redis_command_timeout_ms,
|
|
44
|
-
});
|
|
45
|
-
let abortRequested = false;
|
|
46
|
-
const onSignal = () => {
|
|
47
|
-
abortRequested = true;
|
|
48
|
-
redisGuard.signalAbort('Apply dirty interrupted by signal (SIGINT/SIGTERM)', 'APPLY_DIRTY_ABORTED');
|
|
49
|
-
};
|
|
50
|
-
process.on('SIGINT', onSignal);
|
|
51
|
-
process.on('SIGTERM', onSignal);
|
|
52
|
-
|
|
53
|
-
function enrichProgress(run) {
|
|
54
|
-
const active = redisGuard.snapshot();
|
|
55
|
-
if (!active) return run;
|
|
56
|
-
|
|
57
|
-
return {
|
|
58
|
-
...run,
|
|
59
|
-
_activeStage: active.stage,
|
|
60
|
-
_activeKey: active.key,
|
|
61
|
-
_activeSince: active.startedAt,
|
|
62
|
-
_activeForMs: active.activeForMs,
|
|
63
|
-
};
|
|
64
|
-
}
|
|
65
|
-
|
|
66
|
-
function withStallContext(run) {
|
|
67
|
-
const active = redisGuard.snapshot();
|
|
68
|
-
let payload = enrichProgress(run);
|
|
69
|
-
if (run.updated_at && Date.now() - run.updated_at > STALL_WARNING_MS) {
|
|
70
|
-
const detail = active
|
|
71
|
-
? ` Active operation: ${active.stage}${active.key ? ` key=${active.key}` : ''}.`
|
|
72
|
-
: '';
|
|
73
|
-
payload = {
|
|
74
|
-
...payload,
|
|
75
|
-
_stallWarning: true,
|
|
76
|
-
_stallMessage: `No progress for 60s — possible hang or Redis timeout.${detail}`,
|
|
77
|
-
};
|
|
78
|
-
}
|
|
79
|
-
return payload;
|
|
80
|
-
}
|
|
81
|
-
|
|
82
|
-
let abortHandled = false;
|
|
83
|
-
function markAborted() {
|
|
84
|
-
if (abortHandled) return getRun(db, runId);
|
|
85
|
-
abortHandled = true;
|
|
86
|
-
setRunStatus(db, runId, RUN_STATUS.ABORTED);
|
|
87
|
-
updateBulkProgress(db, runId, { last_error: 'Interrupted by SIGINT/SIGTERM' });
|
|
88
|
-
return getRun(db, runId);
|
|
89
|
-
}
|
|
90
|
-
|
|
91
|
-
let heartbeatTimer = null;
|
|
92
|
-
if (onProgress) {
|
|
93
|
-
heartbeatTimer = setInterval(() => {
|
|
94
|
-
const run = getRun(db, runId);
|
|
95
|
-
if (!run) return;
|
|
96
|
-
const payload = withStallContext(run);
|
|
97
|
-
Promise.resolve(onProgress(payload)).catch(() => {});
|
|
98
|
-
}, HEARTBEAT_INTERVAL_MS);
|
|
99
|
-
}
|
|
100
|
-
|
|
101
34
|
const run = getRun(db, runId);
|
|
102
35
|
if (!run) throw new Error(`Run ${runId} not found`);
|
|
103
36
|
|
|
@@ -112,9 +45,7 @@ export async function runApplyDirty(redisClient, dbPath, runId, options = {}) {
|
|
|
112
45
|
const minIntervalMs = max_rps > 0 ? 1000 / max_rps : 0;
|
|
113
46
|
let lastKeyTime = 0;
|
|
114
47
|
|
|
115
|
-
try {
|
|
116
48
|
for (;;) {
|
|
117
|
-
if (abortRequested) break;
|
|
118
49
|
let r = getRun(db, runId);
|
|
119
50
|
if (r && r.status === RUN_STATUS.ABORTED) break;
|
|
120
51
|
while (r && r.status === RUN_STATUS.PAUSED) {
|
|
@@ -130,7 +61,6 @@ export async function runApplyDirty(redisClient, dbPath, runId, options = {}) {
|
|
|
130
61
|
|
|
131
62
|
// ── Re-import (or remove) keys that changed while bulk was running ──
|
|
132
63
|
for (const { key: keyBuf } of dirtyBatch) {
|
|
133
|
-
if (abortRequested) break;
|
|
134
64
|
r = getRun(db, runId);
|
|
135
65
|
if (r && r.status === RUN_STATUS.ABORTED) break;
|
|
136
66
|
while (r && r.status === RUN_STATUS.PAUSED) {
|
|
@@ -146,29 +76,22 @@ export async function runApplyDirty(redisClient, dbPath, runId, options = {}) {
|
|
|
146
76
|
|
|
147
77
|
const keyName = keyBuf.toString('utf8');
|
|
148
78
|
try {
|
|
149
|
-
const type = (await
|
|
150
|
-
() => redisClient.type(keyName),
|
|
151
|
-
{ stage: 'apply-dirty.type', key: keyName }
|
|
152
|
-
)).toLowerCase();
|
|
79
|
+
const type = (await redisClient.type(keyName)).toLowerCase();
|
|
153
80
|
if (type === 'none' || !type) {
|
|
154
81
|
keys.delete(keyBuf);
|
|
155
82
|
markDirtyState(db, runId, keyBuf, 'deleted');
|
|
156
83
|
} else {
|
|
157
|
-
const outcome = await importKeyFromRedis(redisClient, keyName, storages, {
|
|
158
|
-
redisGuard,
|
|
159
|
-
knownType: type,
|
|
160
|
-
});
|
|
84
|
+
const outcome = await importKeyFromRedis(redisClient, keyName, storages, {});
|
|
161
85
|
if (outcome.ok) {
|
|
162
86
|
markDirtyState(db, runId, keyBuf, 'applied');
|
|
163
87
|
} else if (outcome.skipped) {
|
|
164
88
|
markDirtyState(db, runId, keyBuf, 'skipped');
|
|
165
89
|
} else {
|
|
166
|
-
logError(db, runId, 'dirty_apply', outcome.
|
|
90
|
+
logError(db, runId, 'dirty_apply', outcome.error ? 'Import failed' : 'Skipped', keyName);
|
|
167
91
|
markDirtyState(db, runId, keyBuf, 'error');
|
|
168
92
|
}
|
|
169
93
|
}
|
|
170
94
|
} catch (err) {
|
|
171
|
-
if (isFatalRedisCommandError(err)) throw err;
|
|
172
95
|
logError(db, runId, 'dirty_apply', err.message, keyBuf);
|
|
173
96
|
markDirtyState(db, runId, keyBuf, 'error');
|
|
174
97
|
}
|
|
@@ -179,7 +102,6 @@ export async function runApplyDirty(redisClient, dbPath, runId, options = {}) {
|
|
|
179
102
|
// Marked as 'deleted' in the run counter; state changed away from 'deleted'
|
|
180
103
|
// so the next getDirtyBatch call won't return them again (avoiding infinite loop).
|
|
181
104
|
for (const { key: keyBuf } of deletedBatch) {
|
|
182
|
-
if (abortRequested) break;
|
|
183
105
|
r = getRun(db, runId);
|
|
184
106
|
if (r && r.status === RUN_STATUS.ABORTED) break;
|
|
185
107
|
while (r && r.status === RUN_STATUS.PAUSED) {
|
|
@@ -205,33 +127,9 @@ export async function runApplyDirty(redisClient, dbPath, runId, options = {}) {
|
|
|
205
127
|
}
|
|
206
128
|
if (batchSize > 0 && onProgress) {
|
|
207
129
|
const run = getRun(db, runId);
|
|
208
|
-
if (run) Promise.resolve(onProgress(
|
|
130
|
+
if (run) Promise.resolve(onProgress(run)).catch(() => {});
|
|
209
131
|
}
|
|
210
132
|
}
|
|
211
133
|
|
|
212
|
-
if (abortRequested) {
|
|
213
|
-
const run = markAborted();
|
|
214
|
-
if (onProgress && run) Promise.resolve(onProgress(enrichProgress(run))).catch(() => {});
|
|
215
|
-
const err = new Error('Apply dirty interrupted by signal (SIGINT/SIGTERM)');
|
|
216
|
-
err.code = 'APPLY_DIRTY_ABORTED';
|
|
217
|
-
throw err;
|
|
218
|
-
}
|
|
219
|
-
|
|
220
134
|
return getRun(db, runId);
|
|
221
|
-
} catch (err) {
|
|
222
|
-
if (err.code === 'APPLY_DIRTY_ABORTED') {
|
|
223
|
-
const run = markAborted();
|
|
224
|
-
if (onProgress && run) Promise.resolve(onProgress(enrichProgress(run))).catch(() => {});
|
|
225
|
-
} else {
|
|
226
|
-
setRunStatus(db, runId, RUN_STATUS.FAILED);
|
|
227
|
-
updateBulkProgress(db, runId, { last_error: err.message });
|
|
228
|
-
logError(db, runId, 'dirty_apply', err.message, null);
|
|
229
|
-
}
|
|
230
|
-
throw err;
|
|
231
|
-
} finally {
|
|
232
|
-
if (heartbeatTimer) clearInterval(heartbeatTimer);
|
|
233
|
-
process.off('SIGINT', onSignal);
|
|
234
|
-
process.off('SIGTERM', onSignal);
|
|
235
|
-
db.close();
|
|
236
|
-
}
|
|
237
135
|
}
|
package/src/migration/bulk.js
CHANGED
|
@@ -18,7 +18,6 @@ import {
|
|
|
18
18
|
RUN_STATUS,
|
|
19
19
|
} from './registry.js';
|
|
20
20
|
import { importKeyFromRedis } from './import-one.js';
|
|
21
|
-
import { createRedisCommandGuard } from './redis-guard.js';
|
|
22
21
|
|
|
23
22
|
function parseScanResult(result) {
|
|
24
23
|
if (Array.isArray(result)) {
|
|
@@ -36,9 +35,6 @@ function sleep(ms) {
|
|
|
36
35
|
return new Promise((resolve) => setTimeout(resolve, ms));
|
|
37
36
|
}
|
|
38
37
|
|
|
39
|
-
const HEARTBEAT_INTERVAL_MS = 15000;
|
|
40
|
-
const STALL_WARNING_MS = 60000;
|
|
41
|
-
|
|
42
38
|
/**
|
|
43
39
|
* Run bulk import: SCAN keys from Redis, import into RespLite DB with checkpointing.
|
|
44
40
|
* On SIGINT/SIGTERM, checkpoint progress, set run status to ABORTED, close DB and rethrow.
|
|
@@ -55,9 +51,8 @@ const STALL_WARNING_MS = 60000;
|
|
|
55
51
|
* @param {number} [options.batch_keys=200]
|
|
56
52
|
* @param {number} [options.batch_bytes=64*1024*1024] - 64MB
|
|
57
53
|
* @param {number} [options.checkpoint_interval_sec=30]
|
|
58
|
-
* @param {number} [options.redis_command_timeout_ms=30000]
|
|
59
54
|
* @param {boolean} [options.resume=true] - true: start from 0 or continue from checkpoint; false: always start from 0
|
|
60
|
-
* @param {function(run): void} [options.onProgress] - called after checkpoint
|
|
55
|
+
* @param {function(run): void} [options.onProgress] - called after checkpoint with run row
|
|
61
56
|
*/
|
|
62
57
|
export async function runBulkImport(redisClient, dbPath, runId, options = {}) {
|
|
63
58
|
const {
|
|
@@ -68,86 +63,18 @@ export async function runBulkImport(redisClient, dbPath, runId, options = {}) {
|
|
|
68
63
|
batch_keys = 200,
|
|
69
64
|
batch_bytes = 64 * 1024 * 1024,
|
|
70
65
|
checkpoint_interval_sec = 30,
|
|
71
|
-
redis_command_timeout_ms = 30000,
|
|
72
66
|
resume = true,
|
|
73
67
|
onProgress,
|
|
74
68
|
} = options;
|
|
75
69
|
|
|
76
70
|
const db = openDb(dbPath, { pragmaTemplate });
|
|
77
|
-
const redisGuard = createRedisCommandGuard({
|
|
78
|
-
redisClient,
|
|
79
|
-
timeoutMs: redis_command_timeout_ms,
|
|
80
|
-
});
|
|
81
71
|
let abortRequested = false;
|
|
82
72
|
const onSignal = () => {
|
|
83
73
|
abortRequested = true;
|
|
84
|
-
redisGuard.signalAbort('Bulk import interrupted by signal (SIGINT/SIGTERM)', 'BULK_ABORTED');
|
|
85
74
|
};
|
|
86
75
|
process.on('SIGINT', onSignal);
|
|
87
76
|
process.on('SIGTERM', onSignal);
|
|
88
77
|
|
|
89
|
-
function enrichProgress(run) {
|
|
90
|
-
const active = redisGuard.snapshot();
|
|
91
|
-
if (!active) return run;
|
|
92
|
-
|
|
93
|
-
return {
|
|
94
|
-
...run,
|
|
95
|
-
_activeStage: active.stage,
|
|
96
|
-
_activeKey: active.key,
|
|
97
|
-
_activeSince: active.startedAt,
|
|
98
|
-
_activeForMs: active.activeForMs,
|
|
99
|
-
};
|
|
100
|
-
}
|
|
101
|
-
|
|
102
|
-
function withStallContext(run) {
|
|
103
|
-
const active = redisGuard.snapshot();
|
|
104
|
-
let payload = enrichProgress(run);
|
|
105
|
-
if (run.updated_at && Date.now() - run.updated_at > STALL_WARNING_MS) {
|
|
106
|
-
const detail = active
|
|
107
|
-
? ` Active operation: ${active.stage}${active.key ? ` key=${active.key}` : ''}.`
|
|
108
|
-
: '';
|
|
109
|
-
payload = {
|
|
110
|
-
...payload,
|
|
111
|
-
_stallWarning: true,
|
|
112
|
-
_stallMessage: `No progress for 60s — possible hang or Redis timeout.${detail}`,
|
|
113
|
-
};
|
|
114
|
-
}
|
|
115
|
-
return payload;
|
|
116
|
-
}
|
|
117
|
-
|
|
118
|
-
let heartbeatTimer = null;
|
|
119
|
-
if (onProgress) {
|
|
120
|
-
heartbeatTimer = setInterval(() => {
|
|
121
|
-
const run = getRun(db, runId);
|
|
122
|
-
if (!run) return;
|
|
123
|
-
const payload = withStallContext(run);
|
|
124
|
-
Promise.resolve(onProgress(payload)).catch(() => {});
|
|
125
|
-
}, HEARTBEAT_INTERVAL_MS);
|
|
126
|
-
}
|
|
127
|
-
|
|
128
|
-
let cursor = 0;
|
|
129
|
-
let scanned_keys = 0;
|
|
130
|
-
let migrated_keys = 0;
|
|
131
|
-
let skipped_keys = 0;
|
|
132
|
-
let error_keys = 0;
|
|
133
|
-
let migrated_bytes = 0;
|
|
134
|
-
let abortHandled = false;
|
|
135
|
-
function markAborted() {
|
|
136
|
-
if (abortHandled) return getRun(db, runId);
|
|
137
|
-
abortHandled = true;
|
|
138
|
-
updateBulkProgress(db, runId, {
|
|
139
|
-
scan_cursor: String(cursor),
|
|
140
|
-
scanned_keys,
|
|
141
|
-
migrated_keys,
|
|
142
|
-
skipped_keys,
|
|
143
|
-
error_keys,
|
|
144
|
-
migrated_bytes,
|
|
145
|
-
last_error: 'Interrupted by SIGINT/SIGTERM',
|
|
146
|
-
});
|
|
147
|
-
setRunStatus(db, runId, RUN_STATUS.ABORTED);
|
|
148
|
-
return getRun(db, runId);
|
|
149
|
-
}
|
|
150
|
-
|
|
151
78
|
try {
|
|
152
79
|
const keys = createKeysStorage(db);
|
|
153
80
|
const strings = createStringsStorage(db, keys);
|
|
@@ -161,12 +88,12 @@ export async function runBulkImport(redisClient, dbPath, runId, options = {}) {
|
|
|
161
88
|
let run = getRun(db, runId);
|
|
162
89
|
if (!run) throw new Error(`Run ${runId} not found`);
|
|
163
90
|
|
|
164
|
-
cursor = resume && run.scan_cursor !== undefined ? parseInt(String(run.scan_cursor), 10) : 0;
|
|
165
|
-
scanned_keys = resume ? (run.scanned_keys || 0) : 0;
|
|
166
|
-
migrated_keys = resume ? (run.migrated_keys || 0) : 0;
|
|
167
|
-
skipped_keys = resume ? (run.skipped_keys || 0) : 0;
|
|
168
|
-
error_keys = resume ? (run.error_keys || 0) : 0;
|
|
169
|
-
migrated_bytes = resume ? (run.migrated_bytes || 0) : 0;
|
|
91
|
+
let cursor = resume && run.scan_cursor !== undefined ? parseInt(String(run.scan_cursor), 10) : 0;
|
|
92
|
+
let scanned_keys = resume ? (run.scanned_keys || 0) : 0;
|
|
93
|
+
let migrated_keys = resume ? (run.migrated_keys || 0) : 0;
|
|
94
|
+
let skipped_keys = resume ? (run.skipped_keys || 0) : 0;
|
|
95
|
+
let error_keys = resume ? (run.error_keys || 0) : 0;
|
|
96
|
+
let migrated_bytes = resume ? (run.migrated_bytes || 0) : 0;
|
|
170
97
|
|
|
171
98
|
if (!resume) {
|
|
172
99
|
updateBulkProgress(db, runId, { scan_cursor: String(cursor), scanned_keys, migrated_keys, skipped_keys, error_keys, migrated_bytes });
|
|
@@ -187,10 +114,7 @@ export async function runBulkImport(redisClient, dbPath, runId, options = {}) {
|
|
|
187
114
|
run = getRun(db, runId);
|
|
188
115
|
}
|
|
189
116
|
|
|
190
|
-
const result = await
|
|
191
|
-
() => redisClient.scan(cursor, { COUNT: scan_count }),
|
|
192
|
-
{ stage: 'bulk.scan' }
|
|
193
|
-
);
|
|
117
|
+
const result = await redisClient.scan(cursor, { COUNT: scan_count });
|
|
194
118
|
const parsed = parseScanResult(result);
|
|
195
119
|
cursor = parsed.cursor;
|
|
196
120
|
const keyList = parsed.keys || [];
|
|
@@ -212,10 +136,7 @@ export async function runBulkImport(redisClient, dbPath, runId, options = {}) {
|
|
|
212
136
|
}
|
|
213
137
|
|
|
214
138
|
const now = Date.now();
|
|
215
|
-
const outcome = await importKeyFromRedis(redisClient, keyName, storages, {
|
|
216
|
-
now,
|
|
217
|
-
redisGuard,
|
|
218
|
-
});
|
|
139
|
+
const outcome = await importKeyFromRedis(redisClient, keyName, storages, { now });
|
|
219
140
|
if (outcome.ok) {
|
|
220
141
|
migrated_keys++;
|
|
221
142
|
migrated_bytes += outcome.bytes || 0;
|
|
@@ -225,7 +146,7 @@ export async function runBulkImport(redisClient, dbPath, runId, options = {}) {
|
|
|
225
146
|
skipped_keys++;
|
|
226
147
|
} else {
|
|
227
148
|
error_keys++;
|
|
228
|
-
logError(db, runId, 'bulk', outcome.
|
|
149
|
+
logError(db, runId, 'bulk', outcome.error ? 'Import failed' : 'Skipped', keyName);
|
|
229
150
|
}
|
|
230
151
|
|
|
231
152
|
const now2 = Date.now();
|
|
@@ -246,7 +167,7 @@ export async function runBulkImport(redisClient, dbPath, runId, options = {}) {
|
|
|
246
167
|
batchScanned = 0;
|
|
247
168
|
batchBytes = 0;
|
|
248
169
|
run = getRun(db, runId);
|
|
249
|
-
if (onProgress && run) onProgress(
|
|
170
|
+
if (onProgress && run) onProgress(run);
|
|
250
171
|
}
|
|
251
172
|
}
|
|
252
173
|
} while (cursor !== 0);
|
|
@@ -260,8 +181,9 @@ export async function runBulkImport(redisClient, dbPath, runId, options = {}) {
|
|
|
260
181
|
error_keys,
|
|
261
182
|
migrated_bytes,
|
|
262
183
|
});
|
|
263
|
-
|
|
264
|
-
|
|
184
|
+
setRunStatus(db, runId, RUN_STATUS.ABORTED);
|
|
185
|
+
run = getRun(db, runId);
|
|
186
|
+
if (onProgress && run) onProgress(run);
|
|
265
187
|
const err = new Error('Bulk import interrupted by signal (SIGINT/SIGTERM)');
|
|
266
188
|
err.code = 'BULK_ABORTED';
|
|
267
189
|
throw err;
|
|
@@ -278,17 +200,13 @@ export async function runBulkImport(redisClient, dbPath, runId, options = {}) {
|
|
|
278
200
|
setRunStatus(db, runId, RUN_STATUS.COMPLETED);
|
|
279
201
|
return getRun(db, runId);
|
|
280
202
|
} catch (err) {
|
|
281
|
-
if (err.code
|
|
282
|
-
const run = markAborted();
|
|
283
|
-
if (onProgress && run) onProgress(enrichProgress(run));
|
|
284
|
-
} else {
|
|
203
|
+
if (err.code !== 'BULK_ABORTED') {
|
|
285
204
|
setRunStatus(db, runId, RUN_STATUS.FAILED);
|
|
286
205
|
updateBulkProgress(db, runId, { last_error: err.message });
|
|
287
206
|
logError(db, runId, 'bulk', err.message, null);
|
|
288
207
|
}
|
|
289
208
|
throw err;
|
|
290
209
|
} finally {
|
|
291
|
-
if (heartbeatTimer) clearInterval(heartbeatTimer);
|
|
292
210
|
process.off('SIGINT', onSignal);
|
|
293
211
|
process.off('SIGTERM', onSignal);
|
|
294
212
|
db.close();
|
|
@@ -4,7 +4,6 @@
|
|
|
4
4
|
*/
|
|
5
5
|
|
|
6
6
|
import { asKey, asValue } from '../util/buffers.js';
|
|
7
|
-
import { isFatalRedisCommandError } from './redis-guard.js';
|
|
8
7
|
|
|
9
8
|
const SUPPORTED_TYPES = new Set(['string', 'hash', 'set', 'list', 'zset']);
|
|
10
9
|
|
|
@@ -20,27 +19,20 @@ function toBuffer(value) {
|
|
|
20
19
|
* @param {import('redis').RedisClientType} redisClient
|
|
21
20
|
* @param {string} keyName
|
|
22
21
|
* @param {{ keys: import('../storage/sqlite/keys.js').ReturnType<import('../storage/sqlite/keys.js').createKeysStorage>; strings: ReturnType<import('../storage/sqlite/strings.js').createStringsStorage>; hashes: ReturnType<import('../storage/sqlite/hashes.js').createHashesStorage>; sets: ReturnType<import('../storage/sqlite/sets.js').createSetsStorage>; lists: ReturnType<import('../storage/sqlite/lists.js').createListsStorage>; zsets: ReturnType<import('../storage/sqlite/zsets.js').createZsetsStorage> }} storages
|
|
23
|
-
* @param {{ now?: number
|
|
24
|
-
* @returns {Promise<{ ok: boolean; skipped?: boolean; error?: boolean;
|
|
22
|
+
* @param {{ now?: number }} options
|
|
23
|
+
* @returns {Promise<{ ok: boolean; skipped?: boolean; error?: boolean; bytes?: number }>}
|
|
25
24
|
*/
|
|
26
25
|
export async function importKeyFromRedis(redisClient, keyName, storages, options = {}) {
|
|
27
26
|
const now = options.now ?? Date.now();
|
|
28
|
-
const knownType = options.knownType ? String(options.knownType).toLowerCase() : null;
|
|
29
|
-
const redisGuard = options.redisGuard ?? null;
|
|
30
27
|
const { keys, strings, hashes, sets, lists, zsets } = storages;
|
|
31
|
-
const runRedis = (stage, fn) => (
|
|
32
|
-
redisGuard
|
|
33
|
-
? redisGuard.run(fn, { stage, key: keyName })
|
|
34
|
-
: Promise.resolve().then(fn)
|
|
35
|
-
);
|
|
36
28
|
|
|
37
29
|
try {
|
|
38
|
-
const type =
|
|
30
|
+
const type = (await redisClient.type(keyName)).toLowerCase();
|
|
39
31
|
if (!SUPPORTED_TYPES.has(type)) {
|
|
40
32
|
return { ok: false, skipped: true };
|
|
41
33
|
}
|
|
42
34
|
|
|
43
|
-
let pttl = await
|
|
35
|
+
let pttl = await redisClient.pTTL(keyName);
|
|
44
36
|
if (pttl === -2) pttl = -1;
|
|
45
37
|
const expiresAt = pttl > 0 ? now + pttl : null;
|
|
46
38
|
const keyBuf = asKey(keyName);
|
|
@@ -48,7 +40,7 @@ export async function importKeyFromRedis(redisClient, keyName, storages, options
|
|
|
48
40
|
let bytes = keyBuf.length;
|
|
49
41
|
|
|
50
42
|
if (type === 'string') {
|
|
51
|
-
const value = await
|
|
43
|
+
const value = await redisClient.get(keyName);
|
|
52
44
|
if (value === undefined || value === null) return { ok: false, skipped: true };
|
|
53
45
|
const valBuf = asValue(value);
|
|
54
46
|
bytes += valBuf.length;
|
|
@@ -57,7 +49,7 @@ export async function importKeyFromRedis(redisClient, keyName, storages, options
|
|
|
57
49
|
}
|
|
58
50
|
|
|
59
51
|
if (type === 'hash') {
|
|
60
|
-
const obj = await
|
|
52
|
+
const obj = await redisClient.hGetAll(keyName);
|
|
61
53
|
if (!obj || typeof obj !== 'object') return { ok: false, skipped: true };
|
|
62
54
|
const pairs = [];
|
|
63
55
|
for (const [f, v] of Object.entries(obj)) {
|
|
@@ -73,7 +65,7 @@ export async function importKeyFromRedis(redisClient, keyName, storages, options
|
|
|
73
65
|
}
|
|
74
66
|
|
|
75
67
|
if (type === 'set') {
|
|
76
|
-
const members = await
|
|
68
|
+
const members = await redisClient.sMembers(keyName);
|
|
77
69
|
if (!members || !members.length) return { ok: false, skipped: true };
|
|
78
70
|
const memberBuffers = members.map((m) => toBuffer(m));
|
|
79
71
|
for (const b of memberBuffers) bytes += b.length;
|
|
@@ -83,7 +75,7 @@ export async function importKeyFromRedis(redisClient, keyName, storages, options
|
|
|
83
75
|
}
|
|
84
76
|
|
|
85
77
|
if (type === 'list') {
|
|
86
|
-
const elements = await
|
|
78
|
+
const elements = await redisClient.lRange(keyName, 0, -1);
|
|
87
79
|
if (!elements || !elements.length) return { ok: false, skipped: true };
|
|
88
80
|
const valueBuffers = elements.map((e) => toBuffer(e));
|
|
89
81
|
for (const b of valueBuffers) bytes += b.length;
|
|
@@ -93,7 +85,7 @@ export async function importKeyFromRedis(redisClient, keyName, storages, options
|
|
|
93
85
|
}
|
|
94
86
|
|
|
95
87
|
if (type === 'zset') {
|
|
96
|
-
const withScores = await
|
|
88
|
+
const withScores = await redisClient.zRangeWithScores(keyName, 0, -1);
|
|
97
89
|
if (!withScores || !withScores.length) return { ok: false, skipped: true };
|
|
98
90
|
const pairs = withScores.map((item) => ({
|
|
99
91
|
member: toBuffer(item.value),
|
|
@@ -107,8 +99,7 @@ export async function importKeyFromRedis(redisClient, keyName, storages, options
|
|
|
107
99
|
|
|
108
100
|
return { ok: false, skipped: true };
|
|
109
101
|
} catch (err) {
|
|
110
|
-
|
|
111
|
-
return { ok: false, error: true, errorMessage: err.message };
|
|
102
|
+
return { ok: false, error: true };
|
|
112
103
|
}
|
|
113
104
|
}
|
|
114
105
|
|
package/src/migration/index.js
CHANGED
|
@@ -38,7 +38,6 @@ import { startDirtyTracker as startDirtyTrackerProcess } from './tracker.js';
|
|
|
38
38
|
* @property {number} [maxRps=0] - Max requests/s (0 = unlimited).
|
|
39
39
|
* @property {number} [batchKeys=200]
|
|
40
40
|
* @property {number} [batchBytes=67108864] - 64 MB default.
|
|
41
|
-
* @property {number} [redisCommandTimeoutMs=30000] - Fail a stuck Redis command instead of waiting forever.
|
|
42
41
|
* @property {string} [configCommand='CONFIG'] - Redis CONFIG command name. Override if renamed for security.
|
|
43
42
|
*/
|
|
44
43
|
|
|
@@ -68,7 +67,6 @@ export function createMigration({
|
|
|
68
67
|
maxRps = 0,
|
|
69
68
|
batchKeys = 200,
|
|
70
69
|
batchBytes = 64 * 1024 * 1024,
|
|
71
|
-
redisCommandTimeoutMs = 30000,
|
|
72
70
|
configCommand = 'CONFIG',
|
|
73
71
|
} = {}) {
|
|
74
72
|
if (!to) throw new Error('createMigration: "to" (db path) is required');
|
|
@@ -170,7 +168,6 @@ export function createMigration({
|
|
|
170
168
|
max_rps: maxRps,
|
|
171
169
|
batch_keys: batchKeys,
|
|
172
170
|
batch_bytes: batchBytes,
|
|
173
|
-
redis_command_timeout_ms: redisCommandTimeoutMs,
|
|
174
171
|
resume,
|
|
175
172
|
onProgress,
|
|
176
173
|
});
|
|
@@ -203,7 +200,6 @@ export function createMigration({
|
|
|
203
200
|
pragmaTemplate,
|
|
204
201
|
batch_keys: bk,
|
|
205
202
|
max_rps: rps,
|
|
206
|
-
redis_command_timeout_ms: redisCommandTimeoutMs,
|
|
207
203
|
onProgress,
|
|
208
204
|
});
|
|
209
205
|
},
|
|
@@ -1,174 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* Guard Redis migration commands with timeouts and abort-aware progress context.
|
|
3
|
-
*/
|
|
4
|
-
|
|
5
|
-
const FATAL_CODES = new Set([
|
|
6
|
-
'REDIS_COMMAND_TIMEOUT',
|
|
7
|
-
'BULK_ABORTED',
|
|
8
|
-
'APPLY_DIRTY_ABORTED',
|
|
9
|
-
'MIGRATION_ABORTED',
|
|
10
|
-
]);
|
|
11
|
-
|
|
12
|
-
function normalizeKey(key) {
|
|
13
|
-
if (key == null) return null;
|
|
14
|
-
if (Buffer.isBuffer(key)) return key.toString('utf8');
|
|
15
|
-
return String(key);
|
|
16
|
-
}
|
|
17
|
-
|
|
18
|
-
function attachContext(err, snapshot) {
|
|
19
|
-
if (!snapshot || !err || typeof err !== 'object') return err;
|
|
20
|
-
if (!('migrationStage' in err)) err.migrationStage = snapshot.stage;
|
|
21
|
-
if (!('migrationKey' in err)) err.migrationKey = snapshot.key;
|
|
22
|
-
if (!('migrationActiveSince' in err)) err.migrationActiveSince = snapshot.startedAt;
|
|
23
|
-
return err;
|
|
24
|
-
}
|
|
25
|
-
|
|
26
|
-
function formatContext(message, snapshot) {
|
|
27
|
-
if (!snapshot) return message;
|
|
28
|
-
const parts = [message, `during ${snapshot.stage}`];
|
|
29
|
-
if (snapshot.key) parts.push(`for key "${snapshot.key}"`);
|
|
30
|
-
return parts.join(' ');
|
|
31
|
-
}
|
|
32
|
-
|
|
33
|
-
function abortRedisTransport(redisClient) {
|
|
34
|
-
if (!redisClient) return;
|
|
35
|
-
|
|
36
|
-
for (const methodName of ['disconnect', 'destroy']) {
|
|
37
|
-
if (typeof redisClient[methodName] !== 'function') continue;
|
|
38
|
-
try {
|
|
39
|
-
const result = redisClient[methodName]();
|
|
40
|
-
if (result && typeof result.catch === 'function') {
|
|
41
|
-
result.catch(() => {});
|
|
42
|
-
}
|
|
43
|
-
} catch (_) {
|
|
44
|
-
// Best effort: we're already aborting because the client is unhealthy.
|
|
45
|
-
}
|
|
46
|
-
return;
|
|
47
|
-
}
|
|
48
|
-
}
|
|
49
|
-
|
|
50
|
-
export function isFatalRedisCommandError(err) {
|
|
51
|
-
return !!(err && typeof err === 'object' && FATAL_CODES.has(err.code));
|
|
52
|
-
}
|
|
53
|
-
|
|
54
|
-
export function createRedisCommandGuard({ redisClient, timeoutMs = 30000 } = {}) {
|
|
55
|
-
const abortListeners = new Set();
|
|
56
|
-
let active = null;
|
|
57
|
-
let terminalError = null;
|
|
58
|
-
let transportAborted = false;
|
|
59
|
-
|
|
60
|
-
function snapshot() {
|
|
61
|
-
if (!active) return null;
|
|
62
|
-
return {
|
|
63
|
-
stage: active.stage,
|
|
64
|
-
key: active.key,
|
|
65
|
-
startedAt: active.startedAt,
|
|
66
|
-
activeForMs: Date.now() - active.startedAt,
|
|
67
|
-
};
|
|
68
|
-
}
|
|
69
|
-
|
|
70
|
-
function notifyAbort(err) {
|
|
71
|
-
for (const listener of abortListeners) {
|
|
72
|
-
try {
|
|
73
|
-
listener(err);
|
|
74
|
-
} catch (_) {
|
|
75
|
-
// Ignore listener failures while aborting.
|
|
76
|
-
}
|
|
77
|
-
}
|
|
78
|
-
}
|
|
79
|
-
|
|
80
|
-
function abortTransport() {
|
|
81
|
-
if (transportAborted) return;
|
|
82
|
-
transportAborted = true;
|
|
83
|
-
abortRedisTransport(redisClient);
|
|
84
|
-
}
|
|
85
|
-
|
|
86
|
-
function signalAbort(message, code = 'MIGRATION_ABORTED') {
|
|
87
|
-
if (terminalError) return terminalError;
|
|
88
|
-
const current = snapshot();
|
|
89
|
-
const err = attachContext(new Error(formatContext(message, current)), current);
|
|
90
|
-
err.code = code;
|
|
91
|
-
terminalError = err;
|
|
92
|
-
abortTransport();
|
|
93
|
-
notifyAbort(err);
|
|
94
|
-
return err;
|
|
95
|
-
}
|
|
96
|
-
|
|
97
|
-
async function run(fn, { stage, key = null, timeoutMs: overrideTimeoutMs } = {}) {
|
|
98
|
-
if (terminalError) throw terminalError;
|
|
99
|
-
|
|
100
|
-
const opTimeoutMs = overrideTimeoutMs ?? timeoutMs;
|
|
101
|
-
const opKey = normalizeKey(key);
|
|
102
|
-
const opState = {
|
|
103
|
-
stage,
|
|
104
|
-
key: opKey,
|
|
105
|
-
startedAt: Date.now(),
|
|
106
|
-
};
|
|
107
|
-
active = opState;
|
|
108
|
-
|
|
109
|
-
return await new Promise((resolve, reject) => {
|
|
110
|
-
let settled = false;
|
|
111
|
-
let timeoutId = null;
|
|
112
|
-
|
|
113
|
-
const cleanup = () => {
|
|
114
|
-
abortListeners.delete(onAbort);
|
|
115
|
-
if (timeoutId) clearTimeout(timeoutId);
|
|
116
|
-
if (active === opState) active = null;
|
|
117
|
-
};
|
|
118
|
-
|
|
119
|
-
const finishResolve = (value) => {
|
|
120
|
-
if (settled) return;
|
|
121
|
-
settled = true;
|
|
122
|
-
cleanup();
|
|
123
|
-
resolve(value);
|
|
124
|
-
};
|
|
125
|
-
|
|
126
|
-
const finishReject = (err) => {
|
|
127
|
-
if (settled) return;
|
|
128
|
-
settled = true;
|
|
129
|
-
cleanup();
|
|
130
|
-
reject(err);
|
|
131
|
-
};
|
|
132
|
-
|
|
133
|
-
const onAbort = (err) => {
|
|
134
|
-
finishReject(err);
|
|
135
|
-
};
|
|
136
|
-
|
|
137
|
-
abortListeners.add(onAbort);
|
|
138
|
-
|
|
139
|
-
if (Number.isFinite(opTimeoutMs) && opTimeoutMs > 0) {
|
|
140
|
-
timeoutId = setTimeout(() => {
|
|
141
|
-
const current = snapshot() ?? {
|
|
142
|
-
stage,
|
|
143
|
-
key: opKey,
|
|
144
|
-
startedAt: opState.startedAt,
|
|
145
|
-
activeForMs: Date.now() - opState.startedAt,
|
|
146
|
-
};
|
|
147
|
-
const err = attachContext(
|
|
148
|
-
new Error(formatContext(`Redis command timed out after ${opTimeoutMs}ms`, current)),
|
|
149
|
-
current
|
|
150
|
-
);
|
|
151
|
-
err.code = 'REDIS_COMMAND_TIMEOUT';
|
|
152
|
-
terminalError = err;
|
|
153
|
-
abortTransport();
|
|
154
|
-
notifyAbort(err);
|
|
155
|
-
finishReject(err);
|
|
156
|
-
}, opTimeoutMs);
|
|
157
|
-
}
|
|
158
|
-
|
|
159
|
-
Promise.resolve()
|
|
160
|
-
.then(fn)
|
|
161
|
-
.then(
|
|
162
|
-
(value) => finishResolve(value),
|
|
163
|
-
(err) => finishReject(attachContext(err, snapshot() ?? opState))
|
|
164
|
-
);
|
|
165
|
-
});
|
|
166
|
-
}
|
|
167
|
-
|
|
168
|
-
return {
|
|
169
|
-
run,
|
|
170
|
-
snapshot,
|
|
171
|
-
signalAbort,
|
|
172
|
-
abortTransport,
|
|
173
|
-
};
|
|
174
|
-
}
|
|
@@ -1,122 +0,0 @@
|
|
|
1
|
-
import { describe, it } from 'node:test';
|
|
2
|
-
import assert from 'node:assert/strict';
|
|
3
|
-
import { runBulkImport } from '../../src/migration/bulk.js';
|
|
4
|
-
import { runApplyDirty } from '../../src/migration/apply-dirty.js';
|
|
5
|
-
import { createRedisCommandGuard } from '../../src/migration/redis-guard.js';
|
|
6
|
-
import { openDb } from '../../src/storage/sqlite/db.js';
|
|
7
|
-
import { createRun, getRun, upsertDirtyKey } from '../../src/migration/registry.js';
|
|
8
|
-
import { tmpDbPath } from '../helpers/tmp.js';
|
|
9
|
-
|
|
10
|
-
function never() {
|
|
11
|
-
return new Promise(() => {});
|
|
12
|
-
}
|
|
13
|
-
|
|
14
|
-
describe('migration Redis timeouts', () => {
|
|
15
|
-
it('runBulkImport fails a stuck scan with timeout context', async () => {
|
|
16
|
-
const dbPath = tmpDbPath();
|
|
17
|
-
let disconnectCalls = 0;
|
|
18
|
-
const redisClient = {
|
|
19
|
-
scan: () => never(),
|
|
20
|
-
disconnect() {
|
|
21
|
-
disconnectCalls += 1;
|
|
22
|
-
},
|
|
23
|
-
};
|
|
24
|
-
|
|
25
|
-
await assert.rejects(
|
|
26
|
-
() => runBulkImport(redisClient, dbPath, 'bulk-timeout', {
|
|
27
|
-
sourceUri: 'redis://example.test:6379',
|
|
28
|
-
pragmaTemplate: 'minimal',
|
|
29
|
-
redis_command_timeout_ms: 20,
|
|
30
|
-
}),
|
|
31
|
-
(err) => {
|
|
32
|
-
assert.equal(err.code, 'REDIS_COMMAND_TIMEOUT');
|
|
33
|
-
assert.match(err.message, /bulk\.scan/);
|
|
34
|
-
return true;
|
|
35
|
-
}
|
|
36
|
-
);
|
|
37
|
-
|
|
38
|
-
const db = openDb(dbPath, { pragmaTemplate: 'minimal' });
|
|
39
|
-
try {
|
|
40
|
-
const run = getRun(db, 'bulk-timeout');
|
|
41
|
-
assert.equal(run.status, 'failed');
|
|
42
|
-
assert.match(run.last_error, /bulk\.scan/);
|
|
43
|
-
} finally {
|
|
44
|
-
db.close();
|
|
45
|
-
}
|
|
46
|
-
|
|
47
|
-
assert.equal(disconnectCalls, 1);
|
|
48
|
-
});
|
|
49
|
-
|
|
50
|
-
it('runApplyDirty fails a stuck key lookup with key context', async () => {
|
|
51
|
-
const dbPath = tmpDbPath();
|
|
52
|
-
const db = openDb(dbPath, { pragmaTemplate: 'minimal' });
|
|
53
|
-
try {
|
|
54
|
-
createRun(db, 'apply-timeout', 'redis://example.test:6379');
|
|
55
|
-
upsertDirtyKey(db, 'apply-timeout', 'stuck:key', 'set');
|
|
56
|
-
} finally {
|
|
57
|
-
db.close();
|
|
58
|
-
}
|
|
59
|
-
|
|
60
|
-
let disconnectCalls = 0;
|
|
61
|
-
const redisClient = {
|
|
62
|
-
type: () => never(),
|
|
63
|
-
disconnect() {
|
|
64
|
-
disconnectCalls += 1;
|
|
65
|
-
},
|
|
66
|
-
};
|
|
67
|
-
|
|
68
|
-
await assert.rejects(
|
|
69
|
-
() => runApplyDirty(redisClient, dbPath, 'apply-timeout', {
|
|
70
|
-
pragmaTemplate: 'minimal',
|
|
71
|
-
redis_command_timeout_ms: 20,
|
|
72
|
-
}),
|
|
73
|
-
(err) => {
|
|
74
|
-
assert.equal(err.code, 'REDIS_COMMAND_TIMEOUT');
|
|
75
|
-
assert.match(err.message, /apply-dirty\.type/);
|
|
76
|
-
assert.match(err.message, /stuck:key/);
|
|
77
|
-
return true;
|
|
78
|
-
}
|
|
79
|
-
);
|
|
80
|
-
|
|
81
|
-
const verifyDb = openDb(dbPath, { pragmaTemplate: 'minimal' });
|
|
82
|
-
try {
|
|
83
|
-
const run = getRun(verifyDb, 'apply-timeout');
|
|
84
|
-
assert.equal(run.status, 'failed');
|
|
85
|
-
assert.match(run.last_error, /apply-dirty\.type/);
|
|
86
|
-
const row = verifyDb.prepare('SELECT message FROM migration_errors WHERE run_id = ? ORDER BY rowid DESC LIMIT 1').get('apply-timeout');
|
|
87
|
-
assert.match(row.message, /stuck:key/);
|
|
88
|
-
} finally {
|
|
89
|
-
verifyDb.close();
|
|
90
|
-
}
|
|
91
|
-
|
|
92
|
-
assert.equal(disconnectCalls, 1);
|
|
93
|
-
});
|
|
94
|
-
|
|
95
|
-
it('redis guard aborts a pending command immediately', async () => {
|
|
96
|
-
let disconnectCalls = 0;
|
|
97
|
-
const guard = createRedisCommandGuard({
|
|
98
|
-
redisClient: {
|
|
99
|
-
disconnect() {
|
|
100
|
-
disconnectCalls += 1;
|
|
101
|
-
},
|
|
102
|
-
},
|
|
103
|
-
timeoutMs: 1000,
|
|
104
|
-
});
|
|
105
|
-
|
|
106
|
-
const pending = guard.run(() => never(), { stage: 'bulk.scan' });
|
|
107
|
-
queueMicrotask(() => {
|
|
108
|
-
guard.signalAbort('Bulk import interrupted by signal (SIGINT/SIGTERM)', 'BULK_ABORTED');
|
|
109
|
-
});
|
|
110
|
-
|
|
111
|
-
await assert.rejects(
|
|
112
|
-
() => pending,
|
|
113
|
-
(err) => {
|
|
114
|
-
assert.equal(err.code, 'BULK_ABORTED');
|
|
115
|
-
assert.match(err.message, /bulk\.scan/);
|
|
116
|
-
return true;
|
|
117
|
-
}
|
|
118
|
-
);
|
|
119
|
-
|
|
120
|
-
assert.equal(disconnectCalls, 1);
|
|
121
|
-
});
|
|
122
|
-
});
|