resplite 1.3.0 → 1.3.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -189,7 +189,10 @@ await m.close();
189
189
  `resume` defaults to `true`. It doesn't matter whether it's the first run or a resume: the same script works for both starting and continuing. The first run starts from cursor 0; if the process is interrupted (Ctrl+C, crash, etc.), running the script again continues from the last checkpoint. You don't need to pass `resume: false` on the first run or change anything to resume.
190
190
 
191
191
  **Graceful shutdown**
192
- On SIGINT (Ctrl+C) or SIGTERM, the bulk importer checkpoints progress, sets the run status to `aborted`, closes the SQLite database cleanly (so WAL is checkpointed and the file is not left open), then exits. You can safely interrupt a long-running bulk and resume later.
192
+ On SIGINT (Ctrl+C) or SIGTERM, the bulk importer checkpoints progress, sets the run status to `aborted`, closes the SQLite database cleanly (so WAL is checkpointed and the file is not left open), then exits. You can safely interrupt a long-running bulk and resume later. The same applies to **apply-dirty**: Ctrl+C stops the delta apply, sets the run to `aborted`, and exits so you can inspect and retry.
193
+
194
+ **Errors and stalls**
195
+ Use `onProgress` to see progress and detect problems. The callback receives the run row (e.g. `scanned_keys`, `migrated_keys`, `dirty_keys_applied`, `last_error`). If progress stops for a long time (e.g. Redis hang or network issue), you may see `_stallWarning: true` and `_stallMessage` in the progress object every 15 seconds. When a key fails to import, the error is logged to `migration_errors` and the run’s `last_error` is set; the real error message is included so you can diagnose. After any failure, check `m.status()` and query `migration_errors` in the DB if needed.
193
196
 
194
197
  The JS API can run the dirty-key tracker in-process via `m.startDirtyTracker()` / `m.stopDirtyTracker()`, so the full flow stays inside a single script.
195
198
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "resplite",
3
- "version": "1.3.0",
3
+ "version": "1.3.2",
4
4
  "description": "A RESP2 server with practical Redis compatibility, backed by SQLite",
5
5
  "type": "module",
6
6
  "main": "src/index.js",
@@ -9,15 +9,20 @@ import { createHashesStorage } from '../storage/sqlite/hashes.js';
9
9
  import { createSetsStorage } from '../storage/sqlite/sets.js';
10
10
  import { createListsStorage } from '../storage/sqlite/lists.js';
11
11
  import { createZsetsStorage } from '../storage/sqlite/zsets.js';
12
- import { getRun, getDirtyBatch, markDirtyState, logError, RUN_STATUS } from './registry.js';
12
+ import { getRun, getDirtyBatch, markDirtyState, logError, setRunStatus, updateBulkProgress, RUN_STATUS } from './registry.js';
13
13
  import { importKeyFromRedis } from './import-one.js';
14
14
 
15
15
  function sleep(ms) {
16
16
  return new Promise((resolve) => setTimeout(resolve, ms));
17
17
  }
18
18
 
19
+ const HEARTBEAT_INTERVAL_MS = 15000;
20
+ const STALL_WARNING_MS = 60000;
21
+
19
22
  /**
20
23
  * Apply dirty keys: for each key in registry with state=dirty, reimport from Redis or delete in destination.
24
+ * On SIGINT/SIGTERM, sets run status to ABORTED, closes DB and rethrows so the process can exit and Ctrl+C works.
25
+ *
21
26
  * @param {import('redis').RedisClientType} redisClient
22
27
  * @param {string} dbPath
23
28
  * @param {string} runId
@@ -25,12 +30,32 @@ function sleep(ms) {
25
30
  * @param {string} [options.pragmaTemplate='default']
26
31
  * @param {number} [options.batch_keys=200]
27
32
  * @param {number} [options.max_rps=0]
28
- * @param {(run: object) => void | Promise<void>} [options.onProgress] - Called after each batch with the current run row.
33
+ * @param {(run: object) => void | Promise<void>} [options.onProgress] - Called after each batch and on heartbeat; may receive _stallWarning if no progress for 60s.
29
34
  */
30
35
  export async function runApplyDirty(redisClient, dbPath, runId, options = {}) {
31
36
  const { pragmaTemplate = 'default', batch_keys = 200, max_rps = 0, onProgress } = options;
32
37
 
33
38
  const db = openDb(dbPath, { pragmaTemplate });
39
+ let abortRequested = false;
40
+ const onSignal = () => {
41
+ abortRequested = true;
42
+ };
43
+ process.on('SIGINT', onSignal);
44
+ process.on('SIGTERM', onSignal);
45
+
46
+ let heartbeatTimer = null;
47
+ if (onProgress) {
48
+ heartbeatTimer = setInterval(() => {
49
+ const run = getRun(db, runId);
50
+ if (!run) return;
51
+ let payload = run;
52
+ if (run.updated_at && Date.now() - run.updated_at > STALL_WARNING_MS) {
53
+ payload = { ...run, _stallWarning: true, _stallMessage: 'No progress for 60s — possible hang or Redis timeout' };
54
+ }
55
+ Promise.resolve(onProgress(payload)).catch(() => {});
56
+ }, HEARTBEAT_INTERVAL_MS);
57
+ }
58
+
34
59
  const run = getRun(db, runId);
35
60
  if (!run) throw new Error(`Run ${runId} not found`);
36
61
 
@@ -45,7 +70,9 @@ export async function runApplyDirty(redisClient, dbPath, runId, options = {}) {
45
70
  const minIntervalMs = max_rps > 0 ? 1000 / max_rps : 0;
46
71
  let lastKeyTime = 0;
47
72
 
73
+ try {
48
74
  for (;;) {
75
+ if (abortRequested) break;
49
76
  let r = getRun(db, runId);
50
77
  if (r && r.status === RUN_STATUS.ABORTED) break;
51
78
  while (r && r.status === RUN_STATUS.PAUSED) {
@@ -61,6 +88,7 @@ export async function runApplyDirty(redisClient, dbPath, runId, options = {}) {
61
88
 
62
89
  // ── Re-import (or remove) keys that changed while bulk was running ──
63
90
  for (const { key: keyBuf } of dirtyBatch) {
91
+ if (abortRequested) break;
64
92
  r = getRun(db, runId);
65
93
  if (r && r.status === RUN_STATUS.ABORTED) break;
66
94
  while (r && r.status === RUN_STATUS.PAUSED) {
@@ -87,7 +115,7 @@ export async function runApplyDirty(redisClient, dbPath, runId, options = {}) {
87
115
  } else if (outcome.skipped) {
88
116
  markDirtyState(db, runId, keyBuf, 'skipped');
89
117
  } else {
90
- logError(db, runId, 'dirty_apply', outcome.error ? 'Import failed' : 'Skipped', keyName);
118
+ logError(db, runId, 'dirty_apply', outcome.errorMessage || (outcome.error ? 'Import failed' : 'Skipped'), keyName);
91
119
  markDirtyState(db, runId, keyBuf, 'error');
92
120
  }
93
121
  }
@@ -102,6 +130,7 @@ export async function runApplyDirty(redisClient, dbPath, runId, options = {}) {
102
130
  // Marked as 'deleted' in the run counter; state changed away from 'deleted'
103
131
  // so the next getDirtyBatch call won't return them again (avoiding infinite loop).
104
132
  for (const { key: keyBuf } of deletedBatch) {
133
+ if (abortRequested) break;
105
134
  r = getRun(db, runId);
106
135
  if (r && r.status === RUN_STATUS.ABORTED) break;
107
136
  while (r && r.status === RUN_STATUS.PAUSED) {
@@ -131,5 +160,28 @@ export async function runApplyDirty(redisClient, dbPath, runId, options = {}) {
131
160
  }
132
161
  }
133
162
 
163
+ if (abortRequested) {
164
+ setRunStatus(db, runId, RUN_STATUS.ABORTED);
165
+ updateBulkProgress(db, runId, { last_error: 'Interrupted by SIGINT/SIGTERM' });
166
+ const run = getRun(db, runId);
167
+ if (onProgress && run) Promise.resolve(onProgress(run)).catch(() => {});
168
+ const err = new Error('Apply dirty interrupted by signal (SIGINT/SIGTERM)');
169
+ err.code = 'APPLY_DIRTY_ABORTED';
170
+ throw err;
171
+ }
172
+
134
173
  return getRun(db, runId);
174
+ } catch (err) {
175
+ if (err.code !== 'APPLY_DIRTY_ABORTED') {
176
+ setRunStatus(db, runId, RUN_STATUS.FAILED);
177
+ updateBulkProgress(db, runId, { last_error: err.message });
178
+ logError(db, runId, 'dirty_apply', err.message, null);
179
+ }
180
+ throw err;
181
+ } finally {
182
+ if (heartbeatTimer) clearInterval(heartbeatTimer);
183
+ process.off('SIGINT', onSignal);
184
+ process.off('SIGTERM', onSignal);
185
+ db.close();
186
+ }
135
187
  }
@@ -35,6 +35,9 @@ function sleep(ms) {
35
35
  return new Promise((resolve) => setTimeout(resolve, ms));
36
36
  }
37
37
 
38
+ const HEARTBEAT_INTERVAL_MS = 15000;
39
+ const STALL_WARNING_MS = 60000;
40
+
38
41
  /**
39
42
  * Run bulk import: SCAN keys from Redis, import into RespLite DB with checkpointing.
40
43
  * On SIGINT/SIGTERM, checkpoint progress, set run status to ABORTED, close DB and rethrow.
@@ -52,7 +55,7 @@ function sleep(ms) {
52
55
  * @param {number} [options.batch_bytes=64*1024*1024] - 64MB
53
56
  * @param {number} [options.checkpoint_interval_sec=30]
54
57
  * @param {boolean} [options.resume=true] - true: start from 0 or continue from checkpoint; false: always start from 0
55
- * @param {function(run): void} [options.onProgress] - called after checkpoint with run row
58
+ * @param {function(run): void} [options.onProgress] - called after checkpoint and on heartbeat; may receive _stallWarning if no progress for 60s
56
59
  */
57
60
  export async function runBulkImport(redisClient, dbPath, runId, options = {}) {
58
61
  const {
@@ -75,6 +78,19 @@ export async function runBulkImport(redisClient, dbPath, runId, options = {}) {
75
78
  process.on('SIGINT', onSignal);
76
79
  process.on('SIGTERM', onSignal);
77
80
 
81
+ let heartbeatTimer = null;
82
+ if (onProgress) {
83
+ heartbeatTimer = setInterval(() => {
84
+ const run = getRun(db, runId);
85
+ if (!run) return;
86
+ let payload = run;
87
+ if (run.updated_at && Date.now() - run.updated_at > STALL_WARNING_MS) {
88
+ payload = { ...run, _stallWarning: true, _stallMessage: 'No progress for 60s — possible hang or Redis timeout' };
89
+ }
90
+ Promise.resolve(onProgress(payload)).catch(() => {});
91
+ }, HEARTBEAT_INTERVAL_MS);
92
+ }
93
+
78
94
  try {
79
95
  const keys = createKeysStorage(db);
80
96
  const strings = createStringsStorage(db, keys);
@@ -146,7 +162,7 @@ export async function runBulkImport(redisClient, dbPath, runId, options = {}) {
146
162
  skipped_keys++;
147
163
  } else {
148
164
  error_keys++;
149
- logError(db, runId, 'bulk', outcome.error ? 'Import failed' : 'Skipped', keyName);
165
+ logError(db, runId, 'bulk', outcome.errorMessage || (outcome.error ? 'Import failed' : 'Skipped'), keyName);
150
166
  }
151
167
 
152
168
  const now2 = Date.now();
@@ -207,6 +223,7 @@ export async function runBulkImport(redisClient, dbPath, runId, options = {}) {
207
223
  }
208
224
  throw err;
209
225
  } finally {
226
+ if (heartbeatTimer) clearInterval(heartbeatTimer);
210
227
  process.off('SIGINT', onSignal);
211
228
  process.off('SIGTERM', onSignal);
212
229
  db.close();
@@ -20,7 +20,7 @@ function toBuffer(value) {
20
20
  * @param {string} keyName
21
21
  * @param {{ keys: import('../storage/sqlite/keys.js').ReturnType<import('../storage/sqlite/keys.js').createKeysStorage>; strings: ReturnType<import('../storage/sqlite/strings.js').createStringsStorage>; hashes: ReturnType<import('../storage/sqlite/hashes.js').createHashesStorage>; sets: ReturnType<import('../storage/sqlite/sets.js').createSetsStorage>; lists: ReturnType<import('../storage/sqlite/lists.js').createListsStorage>; zsets: ReturnType<import('../storage/sqlite/zsets.js').createZsetsStorage> }} storages
22
22
  * @param {{ now?: number }} options
23
- * @returns {Promise<{ ok: boolean; skipped?: boolean; error?: boolean; bytes?: number }>}
23
+ * @returns {Promise<{ ok: boolean; skipped?: boolean; error?: boolean; errorMessage?: string; bytes?: number }>}
24
24
  */
25
25
  export async function importKeyFromRedis(redisClient, keyName, storages, options = {}) {
26
26
  const now = options.now ?? Date.now();
@@ -99,7 +99,7 @@ export async function importKeyFromRedis(redisClient, keyName, storages, options
99
99
 
100
100
  return { ok: false, skipped: true };
101
101
  } catch (err) {
102
- return { ok: false, error: true };
102
+ return { ok: false, error: true, errorMessage: err.message };
103
103
  }
104
104
  }
105
105