resplite 1.2.2 → 1.2.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -37,6 +37,9 @@ function sleep(ms) {
37
37
 
38
38
  /**
39
39
  * Run bulk import: SCAN keys from Redis, import into RespLite DB with checkpointing.
40
+ * On SIGINT/SIGTERM, checkpoint progress, set run status to ABORTED, close DB and rethrow.
41
+ * DB is always closed in a finally block (graceful shutdown when process is interrupted).
42
+ *
40
43
  * @param {import('redis').RedisClientType} redisClient
41
44
  * @param {string} dbPath
42
45
  * @param {string} runId
@@ -65,41 +68,47 @@ export async function runBulkImport(redisClient, dbPath, runId, options = {}) {
65
68
  } = options;
66
69
 
67
70
  const db = openDb(dbPath, { pragmaTemplate });
68
- const keys = createKeysStorage(db);
69
- const strings = createStringsStorage(db, keys);
70
- const hashes = createHashesStorage(db, keys);
71
- const sets = createSetsStorage(db, keys);
72
- const lists = createListsStorage(db, keys);
73
- const zsets = createZsetsStorage(db, keys);
74
- const storages = { keys, strings, hashes, sets, lists, zsets };
75
-
76
- createRun(db, runId, sourceUri, { scan_count_hint: scan_count });
77
- let run = getRun(db, runId);
78
- if (!run) throw new Error(`Run ${runId} not found`);
79
-
80
- let cursor = resume && run.scan_cursor !== undefined ? parseInt(String(run.scan_cursor), 10) : 0;
81
- let scanned_keys = resume ? (run.scanned_keys || 0) : 0;
82
- let migrated_keys = resume ? (run.migrated_keys || 0) : 0;
83
- let skipped_keys = resume ? (run.skipped_keys || 0) : 0;
84
- let error_keys = resume ? (run.error_keys || 0) : 0;
85
- let migrated_bytes = resume ? (run.migrated_bytes || 0) : 0;
86
-
87
- if (!resume) {
88
- updateBulkProgress(db, runId, { scan_cursor: String(cursor), scanned_keys, migrated_keys, skipped_keys, error_keys, migrated_bytes });
89
- }
90
-
91
- let lastCheckpointTime = Date.now();
92
- let batchScanned = 0;
93
- let batchBytes = 0;
94
- const minIntervalMs = max_rps > 0 ? 1000 / max_rps : 0;
95
- let lastKeyTime = 0;
71
+ let abortRequested = false;
72
+ const onSignal = () => {
73
+ abortRequested = true;
74
+ };
75
+ process.on('SIGINT', onSignal);
76
+ process.on('SIGTERM', onSignal);
96
77
 
97
78
  try {
98
- do {
79
+ const keys = createKeysStorage(db);
80
+ const strings = createStringsStorage(db, keys);
81
+ const hashes = createHashesStorage(db, keys);
82
+ const sets = createSetsStorage(db, keys);
83
+ const lists = createListsStorage(db, keys);
84
+ const zsets = createZsetsStorage(db, keys);
85
+ const storages = { keys, strings, hashes, sets, lists, zsets };
86
+
87
+ createRun(db, runId, sourceUri, { scan_count_hint: scan_count });
88
+ let run = getRun(db, runId);
89
+ if (!run) throw new Error(`Run ${runId} not found`);
90
+
91
+ let cursor = resume && run.scan_cursor !== undefined ? parseInt(String(run.scan_cursor), 10) : 0;
92
+ let scanned_keys = resume ? (run.scanned_keys || 0) : 0;
93
+ let migrated_keys = resume ? (run.migrated_keys || 0) : 0;
94
+ let skipped_keys = resume ? (run.skipped_keys || 0) : 0;
95
+ let error_keys = resume ? (run.error_keys || 0) : 0;
96
+ let migrated_bytes = resume ? (run.migrated_bytes || 0) : 0;
97
+
98
+ if (!resume) {
99
+ updateBulkProgress(db, runId, { scan_cursor: String(cursor), scanned_keys, migrated_keys, skipped_keys, error_keys, migrated_bytes });
100
+ }
101
+
102
+ let lastCheckpointTime = Date.now();
103
+ let batchScanned = 0;
104
+ let batchBytes = 0;
105
+ const minIntervalMs = max_rps > 0 ? 1000 / max_rps : 0;
106
+ let lastKeyTime = 0;
107
+
108
+ outer: do {
99
109
  run = getRun(db, runId);
100
- if (run && run.status === RUN_STATUS.ABORTED) {
101
- break;
102
- }
110
+ if (run && run.status === RUN_STATUS.ABORTED) break;
111
+ if (abortRequested) break;
103
112
  while (run && run.status === RUN_STATUS.PAUSED) {
104
113
  await sleep(2000);
105
114
  run = getRun(db, runId);
@@ -111,8 +120,9 @@ export async function runBulkImport(redisClient, dbPath, runId, options = {}) {
111
120
  const keyList = parsed.keys || [];
112
121
 
113
122
  for (const keyName of keyList) {
123
+ if (abortRequested) break outer;
114
124
  run = getRun(db, runId);
115
- if (run && run.status === RUN_STATUS.ABORTED) break;
125
+ if (run && run.status === RUN_STATUS.ABORTED) break outer;
116
126
  while (run && run.status === RUN_STATUS.PAUSED) {
117
127
  await sleep(2000);
118
128
  run = getRun(db, runId);
@@ -162,6 +172,23 @@ export async function runBulkImport(redisClient, dbPath, runId, options = {}) {
162
172
  }
163
173
  } while (cursor !== 0);
164
174
 
175
+ if (abortRequested) {
176
+ updateBulkProgress(db, runId, {
177
+ scan_cursor: String(cursor),
178
+ scanned_keys,
179
+ migrated_keys,
180
+ skipped_keys,
181
+ error_keys,
182
+ migrated_bytes,
183
+ });
184
+ setRunStatus(db, runId, RUN_STATUS.ABORTED);
185
+ run = getRun(db, runId);
186
+ if (onProgress && run) onProgress(run);
187
+ const err = new Error('Bulk import interrupted by signal (SIGINT/SIGTERM)');
188
+ err.code = 'BULK_ABORTED';
189
+ throw err;
190
+ }
191
+
165
192
  updateBulkProgress(db, runId, {
166
193
  scan_cursor: '0',
167
194
  scanned_keys,
@@ -173,9 +200,15 @@ export async function runBulkImport(redisClient, dbPath, runId, options = {}) {
173
200
  setRunStatus(db, runId, RUN_STATUS.COMPLETED);
174
201
  return getRun(db, runId);
175
202
  } catch (err) {
176
- setRunStatus(db, runId, RUN_STATUS.FAILED);
177
- updateBulkProgress(db, runId, { last_error: err.message });
178
- logError(db, runId, 'bulk', err.message, null);
203
+ if (err.code !== 'BULK_ABORTED') {
204
+ setRunStatus(db, runId, RUN_STATUS.FAILED);
205
+ updateBulkProgress(db, runId, { last_error: err.message });
206
+ logError(db, runId, 'bulk', err.message, null);
207
+ }
179
208
  throw err;
209
+ } finally {
210
+ process.off('SIGINT', onSignal);
211
+ process.off('SIGTERM', onSignal);
212
+ db.close();
180
213
  }
181
214
  }
@@ -24,7 +24,9 @@ import { runPreflight, readKeyspaceEvents, setKeyspaceEvents } from './preflight
24
24
  import { runBulkImport } from './bulk.js';
25
25
  import { runApplyDirty } from './apply-dirty.js';
26
26
  import { runVerify } from './verify.js';
27
+ import { runMigrateSearch } from './migrate-search.js';
27
28
  import { getRun, getDirtyCounts } from './registry.js';
29
+ import { startDirtyTracker as startDirtyTrackerProcess } from './tracker.js';
28
30
 
29
31
  /**
30
32
  * @typedef {object} MigrationOptions
@@ -46,10 +48,13 @@ import { getRun, getDirtyCounts } from './registry.js';
46
48
  * @returns {{
47
49
  * preflight(): Promise<object>,
48
50
  * enableKeyspaceNotifications(opts?: { value?: string, merge?: boolean }): Promise<{ ok: boolean, previous: string|null, applied: string, error?: string }>,
51
+ * startDirtyTracker(opts?: { pragmaTemplate?: string, onProgress?: function }): Promise<{ running: true }>,
52
+ * stopDirtyTracker(): Promise<{ running: false }>,
49
53
  * bulk(opts?: { resume?: boolean, onProgress?: function }): Promise<object>,
50
54
  * status(): { run: object, dirty: object } | null,
51
55
  * applyDirty(opts?: { batchKeys?: number, maxRps?: number }): Promise<object>,
52
56
  * verify(opts?: { samplePct?: number, maxSample?: number }): Promise<object>,
57
+ * migrateSearch(opts?: { onlyIndices?: string[], scanCount?: number, maxRps?: number, batchDocs?: number, maxSuggestions?: number, skipExisting?: boolean, withSuggestions?: boolean, onProgress?: function }): Promise<object>,
53
58
  * close(): Promise<void>,
54
59
  * }}
55
60
  */
@@ -67,6 +72,7 @@ export function createMigration({
67
72
  if (!to) throw new Error('createMigration: "to" (db path) is required');
68
73
 
69
74
  let _client = null;
75
+ let _tracker = null;
70
76
 
71
77
  async function getClient() {
72
78
  if (_client) return _client;
@@ -111,6 +117,41 @@ export function createMigration({
111
117
  return setKeyspaceEvents(client, value, { configCommand, merge });
112
118
  },
113
119
 
120
+ /**
121
+ * Start dirty-key tracking in-process for this migration controller.
122
+ * Use this to run the full minimal-downtime flow in one Node script.
123
+ *
124
+ * @param {{
125
+ * pragmaTemplate?: string,
126
+ * onProgress?: (progress: { runId: string, key: string, event: string, totalEvents: number, at: string }) => void | Promise<void>
127
+ * }} [opts]
128
+ */
129
+ async startDirtyTracker({ pragmaTemplate: pt = pragmaTemplate, onProgress } = {}) {
130
+ if (_tracker) return { running: true };
131
+ const id = requireRunId();
132
+ _tracker = await startDirtyTrackerProcess({
133
+ from,
134
+ to,
135
+ runId: id,
136
+ pragmaTemplate: pt,
137
+ configCommand,
138
+ onProgress,
139
+ });
140
+ return { running: true };
141
+ },
142
+
143
+ /**
144
+ * Stop in-process dirty-key tracking started by `startDirtyTracker`.
145
+ * Safe to call even if tracking is not running.
146
+ */
147
+ async stopDirtyTracker() {
148
+ if (_tracker) {
149
+ await _tracker.stop();
150
+ _tracker = null;
151
+ }
152
+ return { running: false };
153
+ },
154
+
114
155
  /**
115
156
  * Step 1 — Bulk import: SCAN all keys from Redis into the destination DB.
116
157
  * Resume is on by default: first run starts from 0, later runs continue from checkpoint.
@@ -173,10 +214,43 @@ export function createMigration({
173
214
  return runVerify(client, to, { pragmaTemplate, samplePct, maxSample });
174
215
  },
175
216
 
217
+ /**
218
+ * Step 5 — Migrate search indices: copy RediSearch index schemas and documents
219
+ * into RespLite FT.* tables.
220
+ *
221
+ * Requires RediSearch (Redis Stack or redis/search module) on the source.
222
+ * Only HASH-based indices with TEXT/TAG/NUMERIC fields are supported.
223
+ * TAG and NUMERIC fields are mapped to TEXT.
224
+ *
225
+ * @param {{
226
+ * onlyIndices?: string[],
227
+ * scanCount?: number,
228
+ * maxRps?: number,
229
+ * batchDocs?: number,
230
+ * maxSuggestions?: number,
231
+ * skipExisting?: boolean,
232
+ * withSuggestions?: boolean,
233
+ * onProgress?: (result: object) => void
234
+ * }} [opts]
235
+ * @returns {Promise<{ indices: object[], aborted: boolean }>}
236
+ */
237
+ async migrateSearch(opts = {}) {
238
+ const client = await getClient();
239
+ return runMigrateSearch(client, to, {
240
+ pragmaTemplate,
241
+ maxRps,
242
+ ...opts,
243
+ });
244
+ },
245
+
176
246
  /**
177
247
  * Disconnect from Redis. Call when done with all migration operations.
178
248
  */
179
249
  async close() {
250
+ if (_tracker) {
251
+ await _tracker.stop().catch(() => {});
252
+ _tracker = null;
253
+ }
180
254
  if (_client) {
181
255
  await _client.quit().catch(() => {});
182
256
  _client = null;
@@ -185,6 +259,6 @@ export function createMigration({
185
259
  };
186
260
  }
187
261
 
188
- export { runPreflight, readKeyspaceEvents, setKeyspaceEvents, runBulkImport, runApplyDirty, runVerify };
262
+ export { runPreflight, readKeyspaceEvents, setKeyspaceEvents, runBulkImport, runApplyDirty, runVerify, runMigrateSearch };
189
263
  export { startDirtyTracker } from './tracker.js';
190
264
  export { getRun, getDirtyCounts, createRun, setRunStatus, logError } from './registry.js';