resplite 1.4.0 → 1.4.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -166,7 +166,17 @@ await rl.question('Stop app traffic to Redis, then press Enter to apply the fina
166
166
  rl.close();
167
167
 
168
168
  // Step 3 — Apply dirty keys that changed in Redis during bulk
169
- await m.applyDirty({ onProgress: console.log });
169
+ await m.applyDirty({
170
+ concurrency: 32,
171
+ batchKeys: 5000,
172
+ onProgress: (r) => {
173
+ console.log(
174
+ `dirty processed=${r.dirty_keys_processed} pending=${r.dirty_pending} ` +
175
+ `applied=${r.dirty_keys_applied} deleted=${r.dirty_keys_deleted} ` +
176
+ `rate=${r.dirty_keys_per_second.toFixed(1)} keys/s eta=${r.dirty_eta_seconds ?? '—'}s`
177
+ );
178
+ },
179
+ });
170
180
 
171
181
  // Step 3b — Stop tracker after cutover
172
182
  await m.stopDirtyTracker();
@@ -507,16 +517,27 @@ redis-cli -p 6380 PING
507
517
  | `RESPLITE_DB` | `./data.db` | SQLite database file |
508
518
  | `RESPLITE_PRAGMA_TEMPLATE` | `default` | SQLite PRAGMA preset (see below) |
509
519
 
510
- ### PRAGMA templates
520
+ ### PRAGMA (convention over configuration)
521
+
522
+ A **template** is applied by default (`default`); you usually don't pass anything. Only pass **overrides** when you need to change specific pragmas.
511
523
 
512
524
  | Template | Description | Key settings |
513
- |---|---|---|
525
+ |----------|-------------|--------------|
514
526
  | `default` | Balanced durability and speed (recommended) | WAL, synchronous=NORMAL, 20 MB cache |
515
527
  | `performance` | Maximum throughput, reduced crash safety | WAL, synchronous=OFF, 64 MB cache, 512 MB mmap, exclusive locking |
516
528
  | `safety` | Crash-safe writes at the cost of speed | WAL, synchronous=FULL, 20 MB cache |
517
529
  | `minimal` | Only WAL + foreign keys | WAL, foreign_keys=ON |
518
530
  | `none` | No pragmas applied, pure SQLite defaults | - |
519
531
 
532
+ Override specific pragmas only when needed. Overrides are applied after the template. Example — 1 GB cache:
533
+
534
+ ```javascript
535
+ const srv = await createRESPlite({
536
+ db: './data.db',
537
+ pragma: { cache_size: -1024 * 1024 }, // negative = KiB, so 1 GiB
538
+ });
539
+ ```
540
+
520
541
  ## Benchmark (Redis vs RESPLite)
521
542
 
522
543
  A typical comparison is **Redis (for example, in Docker)** on one side and **RESPLite locally** on the other. In that setup, RESPLite often shows **better latency** because it avoids Docker networking and runs in the same process or host. The benchmark below uses RESPLite with the **default** PRAGMA template only.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "resplite",
3
- "version": "1.4.0",
3
+ "version": "1.4.4",
4
4
  "description": "A RESP2 server with practical Redis compatibility, backed by SQLite",
5
5
  "type": "module",
6
6
  "main": "src/index.js",
package/src/embed.js CHANGED
@@ -33,7 +33,8 @@ export { handleConnection, createEngine, openDb };
33
33
  * @param {string} [options.db=':memory:'] SQLite file path, or ':memory:' for in-memory.
34
34
  * @param {string} [options.host='127.0.0.1'] Host to listen on.
35
35
  * @param {number} [options.port=0] Port to listen on (0 = OS-assigned).
36
- * @param {string} [options.pragmaTemplate='default'] PRAGMA preset (default|performance|safety|minimal|none).
36
+ * @param {string} [options.pragmaTemplate='default'] PRAGMA preset (default|performance|safety|minimal|none). Convention: this template is applied by default; no config needed.
37
+ * @param {Record<string, string|number>} [options.pragma] Override specific pragmas only when needed (e.g. { synchronous: 'FULL' }). Applied after the template.
37
38
  * @param {RESPliteHooks} [options.hooks] Optional event hooks for observability (onUnknownCommand, onCommandError, onSocketError).
38
39
  * @param {boolean} [options.gracefulShutdown=true] If true, register SIGTERM/SIGINT to call close(). Set false if you handle shutdown yourself to avoid double handlers.
39
40
  * @returns {Promise<{ port: number, host: string, close: () => Promise<void> }>}
@@ -43,10 +44,11 @@ export async function createRESPlite({
43
44
  host = '127.0.0.1',
44
45
  port = 0,
45
46
  pragmaTemplate = 'default',
47
+ pragma,
46
48
  hooks = {},
47
49
  gracefulShutdown = true,
48
50
  } = {}) {
49
- const db = openDb(dbPath, { pragmaTemplate });
51
+ const db = openDb(dbPath, { pragmaTemplate, pragma });
50
52
  const engine = createEngine({ db });
51
53
  const connections = new Set();
52
54
 
package/src/index.js CHANGED
@@ -24,6 +24,7 @@ const DEFAULT_PORT = 6379;
24
24
  * @param {number} [options.port]
25
25
  * @param {string} [options.dbPath]
26
26
  * @param {string} [options.pragmaTemplate]
27
+ * @param {Record<string, string|number>} [options.pragma] Override specific pragmas when needed (e.g. { synchronous: 'FULL' }). Convention: template is applied by default.
27
28
  * @param {boolean} [options.gracefulShutdown=true] If true, register SIGTERM/SIGINT to close server and DB. Set false if you handle shutdown yourself.
28
29
  */
29
30
  export function startServer(options = {}) {
@@ -32,7 +33,7 @@ export function startServer(options = {}) {
32
33
  const pragmaTemplate = options.pragmaTemplate ?? process.env.RESPLITE_PRAGMA_TEMPLATE ?? 'default';
33
34
  const gracefulShutdown = options.gracefulShutdown !== false;
34
35
 
35
- const db = openDb(dbPath, { pragmaTemplate });
36
+ const db = openDb(dbPath, { pragmaTemplate, pragma: options.pragma });
36
37
  const cache = createCache({ enabled: true });
37
38
  const engine = createEngine({ db, cache });
38
39
  const sweeper = createExpirationSweeper({
@@ -16,6 +16,30 @@ function sleep(ms) {
16
16
  return new Promise((resolve) => setTimeout(resolve, ms));
17
17
  }
18
18
 
19
+ function buildDirtyProgressPayload(run, startedAtMs, totalProcessed, totalFetched, pendingDirty, pendingDeleted) {
20
+ if (!run) return null;
21
+ const elapsedSec = Math.max(0.001, (Date.now() - startedAtMs) / 1000);
22
+ const keysPerSec = totalProcessed / elapsedSec;
23
+ const pendingTotal = pendingDirty + pendingDeleted;
24
+ const etaSeconds = keysPerSec > 0 ? Math.ceil(pendingTotal / keysPerSec) : null;
25
+ const applied = Number(run.dirty_keys_applied || 0);
26
+ const deleted = Number(run.dirty_keys_deleted || 0);
27
+ const reconciled = applied + deleted;
28
+
29
+ return {
30
+ ...run,
31
+ dirty_elapsed_seconds: elapsedSec,
32
+ dirty_keys_per_second: keysPerSec,
33
+ dirty_keys_processed: totalProcessed,
34
+ dirty_keys_fetched: totalFetched,
35
+ dirty_reconciled_total: reconciled,
36
+ dirty_pending: pendingTotal,
37
+ dirty_pending_dirty: pendingDirty,
38
+ dirty_pending_deleted: pendingDeleted,
39
+ dirty_eta_seconds: etaSeconds,
40
+ };
41
+ }
42
+
19
43
  /**
20
44
  * Apply dirty keys: for each key in registry with state=dirty, reimport from Redis or delete in destination.
21
45
  * @param {import('redis').RedisClientType} redisClient
@@ -25,10 +49,19 @@ function sleep(ms) {
25
49
  * @param {string} [options.pragmaTemplate='default']
26
50
  * @param {number} [options.batch_keys=200]
27
51
  * @param {number} [options.max_rps=0]
52
+ * @param {number} [options.concurrency=1]
53
+ * @param {number} [options.progress_interval_ms=2000]
28
54
  * @param {(run: object) => void | Promise<void>} [options.onProgress] - Called after each batch with the current run row.
29
55
  */
30
56
  export async function runApplyDirty(redisClient, dbPath, runId, options = {}) {
31
- const { pragmaTemplate = 'default', batch_keys = 200, max_rps = 0, onProgress } = options;
57
+ const {
58
+ pragmaTemplate = 'default',
59
+ batch_keys = 200,
60
+ max_rps = 0,
61
+ concurrency = 1,
62
+ progress_interval_ms = 2000,
63
+ onProgress,
64
+ } = options;
32
65
 
33
66
  const db = openDb(dbPath, { pragmaTemplate });
34
67
  const run = getRun(db, runId);
@@ -43,7 +76,41 @@ export async function runApplyDirty(redisClient, dbPath, runId, options = {}) {
43
76
  const storages = { keys, strings, hashes, sets, lists, zsets };
44
77
 
45
78
  const minIntervalMs = max_rps > 0 ? 1000 / max_rps : 0;
46
- let lastKeyTime = 0;
79
+ const workerCount = Number.isFinite(concurrency) ? Math.max(1, Math.floor(concurrency)) : 1;
80
+ let nextAllowedAt = 0;
81
+ const startedAtMs = Date.now();
82
+ let totalProcessed = 0;
83
+ let totalFetched = 0;
84
+ let lastProgressAt = 0;
85
+
86
+ async function awaitRateLimit() {
87
+ if (minIntervalMs <= 0) return;
88
+ const now = Date.now();
89
+ const scheduled = Math.max(now, nextAllowedAt);
90
+ nextAllowedAt = scheduled + minIntervalMs;
91
+ if (scheduled > now) {
92
+ await sleep(scheduled - now);
93
+ }
94
+ }
95
+
96
+ function emitProgress(force = false, pendingDirty = 0, pendingDeleted = 0) {
97
+ if (!onProgress) return;
98
+ const now = Date.now();
99
+ if (!force && now - lastProgressAt < progress_interval_ms) return;
100
+ const current = getRun(db, runId);
101
+ lastProgressAt = now;
102
+ if (current) {
103
+ const payload = buildDirtyProgressPayload(
104
+ current,
105
+ startedAtMs,
106
+ totalProcessed,
107
+ totalFetched,
108
+ pendingDirty,
109
+ pendingDeleted
110
+ );
111
+ Promise.resolve(onProgress(payload)).catch(() => {});
112
+ }
113
+ }
47
114
 
48
115
  for (;;) {
49
116
  let r = getRun(db, runId);
@@ -57,79 +124,146 @@ export async function runApplyDirty(redisClient, dbPath, runId, options = {}) {
57
124
  const deletedBatch = getDirtyBatch(db, runId, 'deleted', batch_keys);
58
125
  if (dirtyBatch.length === 0 && deletedBatch.length === 0) break;
59
126
 
60
- const batchSize = dirtyBatch.length + deletedBatch.length;
127
+ totalFetched += dirtyBatch.length + deletedBatch.length;
128
+ let aborted = false;
61
129
 
62
130
  // ── Re-import (or remove) keys that changed while bulk was running ──
63
- for (const { key: keyBuf } of dirtyBatch) {
131
+ for (let i = 0; i < dirtyBatch.length; i += workerCount) {
64
132
  r = getRun(db, runId);
65
- if (r && r.status === RUN_STATUS.ABORTED) break;
133
+ if (r && r.status === RUN_STATUS.ABORTED) {
134
+ aborted = true;
135
+ break;
136
+ }
66
137
  while (r && r.status === RUN_STATUS.PAUSED) {
67
138
  await sleep(2000);
68
139
  r = getRun(db, runId);
69
140
  }
70
-
71
- if (minIntervalMs > 0) {
72
- const elapsed = Date.now() - lastKeyTime;
73
- if (elapsed < minIntervalMs) await sleep(minIntervalMs - elapsed);
74
- lastKeyTime = Date.now();
141
+ if (r && r.status === RUN_STATUS.ABORTED) {
142
+ aborted = true;
143
+ break;
75
144
  }
76
145
 
77
- const keyName = keyBuf.toString('utf8');
78
- try {
79
- const type = (await redisClient.type(keyName)).toLowerCase();
80
- if (type === 'none' || !type) {
81
- keys.delete(keyBuf);
82
- markDirtyState(db, runId, keyBuf, 'deleted');
83
- } else {
84
- const outcome = await importKeyFromRedis(redisClient, keyName, storages, {});
85
- if (outcome.ok) {
86
- markDirtyState(db, runId, keyBuf, 'applied');
87
- } else if (outcome.skipped) {
88
- markDirtyState(db, runId, keyBuf, 'skipped');
146
+ const chunk = dirtyBatch.slice(i, i + workerCount);
147
+ const results = await Promise.all(
148
+ chunk.map(async ({ key: keyBuf }) => {
149
+ const keyName = keyBuf.toString('utf8');
150
+ try {
151
+ await awaitRateLimit();
152
+ const type = (await redisClient.type(keyName)).toLowerCase();
153
+ if (type === 'none' || !type) {
154
+ return { keyBuf, keyName, state: 'deleted' };
155
+ }
156
+ const outcome = await importKeyFromRedis(redisClient, keyName, storages, {});
157
+ return { keyBuf, keyName, state: 'imported', outcome };
158
+ } catch (err) {
159
+ return { keyBuf, keyName, state: 'exception', error: err };
160
+ }
161
+ })
162
+ );
163
+
164
+ for (const result of results) {
165
+ try {
166
+ if (result.state === 'deleted') {
167
+ keys.delete(result.keyBuf);
168
+ markDirtyState(db, runId, result.keyBuf, 'deleted');
169
+ } else if (result.state === 'imported') {
170
+ if (result.outcome.ok) {
171
+ markDirtyState(db, runId, result.keyBuf, 'applied');
172
+ } else if (result.outcome.skipped) {
173
+ markDirtyState(db, runId, result.keyBuf, 'skipped');
174
+ } else {
175
+ logError(
176
+ db,
177
+ runId,
178
+ 'dirty_apply',
179
+ result.outcome.error ? 'Import failed' : 'Skipped',
180
+ result.keyName
181
+ );
182
+ markDirtyState(db, runId, result.keyBuf, 'error');
183
+ }
89
184
  } else {
90
- logError(db, runId, 'dirty_apply', outcome.error ? 'Import failed' : 'Skipped', keyName);
91
- markDirtyState(db, runId, keyBuf, 'error');
185
+ logError(db, runId, 'dirty_apply', result.error.message, result.keyBuf);
186
+ markDirtyState(db, runId, result.keyBuf, 'error');
92
187
  }
188
+ } catch (err) {
189
+ logError(db, runId, 'dirty_apply', err.message, result.keyBuf);
190
+ markDirtyState(db, runId, result.keyBuf, 'error');
191
+ } finally {
192
+ totalProcessed++;
93
193
  }
94
- } catch (err) {
95
- logError(db, runId, 'dirty_apply', err.message, keyBuf);
96
- markDirtyState(db, runId, keyBuf, 'error');
97
194
  }
195
+
196
+ emitProgress(false, Math.max(0, dirtyBatch.length - (i + chunk.length)), deletedBatch.length);
197
+ }
198
+
199
+ if (aborted) {
200
+ emitProgress(true, dirtyBatch.length, deletedBatch.length);
201
+ break;
98
202
  }
99
203
 
100
204
  // ── Apply deletions recorded by the tracker (del / expired events) ──
101
205
  // The tracker already determined these keys are gone; delete from destination.
102
206
  // Marked as 'deleted' in the run counter; state changed away from 'deleted'
103
207
  // so the next getDirtyBatch call won't return them again (avoiding infinite loop).
104
- for (const { key: keyBuf } of deletedBatch) {
208
+ for (let i = 0; i < deletedBatch.length; i += workerCount) {
105
209
  r = getRun(db, runId);
106
- if (r && r.status === RUN_STATUS.ABORTED) break;
210
+ if (r && r.status === RUN_STATUS.ABORTED) {
211
+ aborted = true;
212
+ break;
213
+ }
107
214
  while (r && r.status === RUN_STATUS.PAUSED) {
108
215
  await sleep(2000);
109
216
  r = getRun(db, runId);
110
217
  }
218
+ if (r && r.status === RUN_STATUS.ABORTED) {
219
+ aborted = true;
220
+ break;
221
+ }
111
222
 
112
- try {
113
- keys.delete(keyBuf);
114
- // Increment dirty_keys_deleted counter and transition state out of 'deleted'
115
- // so this key is not re-processed in the next batch iteration.
116
- const now = Date.now();
117
- db.prepare(
118
- `UPDATE migration_dirty_keys SET state = 'applied', last_seen_at = ? WHERE run_id = ? AND key = ?`
119
- ).run(now, runId, keyBuf);
120
- db.prepare(
121
- `UPDATE migration_runs SET dirty_keys_deleted = dirty_keys_deleted + 1, updated_at = ? WHERE run_id = ?`
122
- ).run(now, runId);
123
- } catch (err) {
124
- logError(db, runId, 'dirty_apply', err.message, keyBuf);
125
- markDirtyState(db, runId, keyBuf, 'error');
223
+ const chunk = deletedBatch.slice(i, i + workerCount);
224
+ for (const { key: keyBuf } of chunk) {
225
+ try {
226
+ keys.delete(keyBuf);
227
+ // Increment dirty_keys_deleted counter and transition state out of 'deleted'
228
+ // so this key is not re-processed in the next batch iteration.
229
+ const now = Date.now();
230
+ db.prepare(
231
+ `UPDATE migration_dirty_keys SET state = 'applied', last_seen_at = ? WHERE run_id = ? AND key = ?`
232
+ ).run(now, runId, keyBuf);
233
+ db.prepare(
234
+ `UPDATE migration_runs SET dirty_keys_deleted = dirty_keys_deleted + 1, updated_at = ? WHERE run_id = ?`
235
+ ).run(now, runId);
236
+ } catch (err) {
237
+ logError(db, runId, 'dirty_apply', err.message, keyBuf);
238
+ markDirtyState(db, runId, keyBuf, 'error');
239
+ } finally {
240
+ totalProcessed++;
241
+ }
126
242
  }
243
+ emitProgress(false, 0, Math.max(0, deletedBatch.length - (i + chunk.length)));
244
+ if (aborted) break;
127
245
  }
128
- if (batchSize > 0 && onProgress) {
129
- const run = getRun(db, runId);
130
- if (run) Promise.resolve(onProgress(run)).catch(() => {});
246
+
247
+ if (aborted) {
248
+ emitProgress(true, dirtyBatch.length, deletedBatch.length);
249
+ break;
131
250
  }
251
+
252
+ const pendingDirty = db.prepare(
253
+ `SELECT COUNT(*) as n FROM migration_dirty_keys WHERE run_id = ? AND state = 'dirty'`
254
+ ).get(runId).n;
255
+ const pendingDeleted = db.prepare(
256
+ `SELECT COUNT(*) as n FROM migration_dirty_keys WHERE run_id = ? AND state = 'deleted'`
257
+ ).get(runId).n;
258
+ emitProgress(true, pendingDirty, pendingDeleted);
132
259
  }
133
260
 
261
+ const finalPendingDirty = db.prepare(
262
+ `SELECT COUNT(*) as n FROM migration_dirty_keys WHERE run_id = ? AND state = 'dirty'`
263
+ ).get(runId).n;
264
+ const finalPendingDeleted = db.prepare(
265
+ `SELECT COUNT(*) as n FROM migration_dirty_keys WHERE run_id = ? AND state = 'deleted'`
266
+ ).get(runId).n;
267
+ emitProgress(true, finalPendingDirty, finalPendingDeleted);
134
268
  return getRun(db, runId);
135
269
  }
@@ -36,7 +36,7 @@ import { startDirtyTracker as startDirtyTrackerProcess } from './tracker.js';
36
36
  * @property {string} [pragmaTemplate='default'] - PRAGMA preset.
37
37
  * @property {number} [scanCount=1000]
38
38
  * @property {number} [maxRps=0] - Max requests/s (0 = unlimited).
39
- * @property {number} [concurrency=1] - Concurrent imports during bulk migration.
39
+ * @property {number} [concurrency=1] - Concurrent imports during bulk/apply-dirty migration.
40
40
  * @property {number} [estimatedTotalKeys=0] - Optional total-keys estimate for ETA/progress in onProgress.
41
41
  * @property {number} [batchKeys=200]
42
42
  * @property {number} [batchBytes=67108864] - 64 MB default.
@@ -54,7 +54,7 @@ import { startDirtyTracker as startDirtyTrackerProcess } from './tracker.js';
54
54
  * stopDirtyTracker(): Promise<{ running: false }>,
55
55
  * bulk(opts?: { resume?: boolean, onProgress?: function }): Promise<object>,
56
56
  * status(): { run: object, dirty: object } | null,
57
- * applyDirty(opts?: { batchKeys?: number, maxRps?: number, onProgress?: function }): Promise<object>,
57
+ * applyDirty(opts?: { batchKeys?: number, maxRps?: number, concurrency?: number, progressIntervalMs?: number, onProgress?: function }): Promise<object>,
58
58
  * verify(opts?: { samplePct?: number, maxSample?: number }): Promise<object>,
59
59
  * migrateSearch(opts?: { onlyIndices?: string[], scanCount?: number, maxRps?: number, batchDocs?: number, maxSuggestions?: number, skipExisting?: boolean, withSuggestions?: boolean, onProgress?: function }): Promise<object>,
60
60
  * close(): Promise<void>,
@@ -205,15 +205,23 @@ export function createMigration({
205
205
  /**
206
206
  * Step 3 — Apply dirty: reconcile keys that changed in Redis during bulk import.
207
207
  *
208
- * @param {{ batchKeys?: number, maxRps?: number, onProgress?: (run: object) => void }} [opts]
208
+ * @param {{ batchKeys?: number, maxRps?: number, concurrency?: number, progressIntervalMs?: number, onProgress?: (run: object) => void }} [opts]
209
209
  */
210
- async applyDirty({ batchKeys: bk = batchKeys, maxRps: rps = maxRps, onProgress } = {}) {
210
+ async applyDirty({
211
+ batchKeys: bk = batchKeys,
212
+ maxRps: rps = maxRps,
213
+ concurrency: c = concurrency,
214
+ progressIntervalMs: pim = 2000,
215
+ onProgress,
216
+ } = {}) {
211
217
  const id = requireRunId();
212
218
  const client = await getClient();
213
219
  return runApplyDirty(client, to, id, {
214
220
  pragmaTemplate,
215
221
  batch_keys: bk,
216
222
  max_rps: rps,
223
+ concurrency: c,
224
+ progress_interval_ms: pim,
217
225
  onProgress,
218
226
  });
219
227
  },
@@ -12,7 +12,7 @@ import { applyMigrationSchema } from './migration-schema.js';
12
12
 
13
13
  /**
14
14
  * @param {string} dbPath - Database file path (or ':memory:')
15
- * @param {object} [options] - Options: pragmaTemplate (default|performance|safety|minimal), plus any better-sqlite3 options
15
+ * @param {object} [options] - Options: pragmaTemplate (default|performance|safety|minimal), pragma (custom key-value overrides), plus any better-sqlite3 options
16
16
  * @returns {import('better-sqlite3').Database}
17
17
  */
18
18
  export function openDb(dbPath, options = {}) {
@@ -20,9 +20,9 @@ export function openDb(dbPath, options = {}) {
20
20
  const dir = path.dirname(dbPath);
21
21
  if (dir) fs.mkdirSync(dir, { recursive: true });
22
22
  }
23
- const { pragmaTemplate = 'default', ...dbOptions } = options;
23
+ const { pragmaTemplate = 'default', pragma: customPragma, ...dbOptions } = options;
24
24
  const db = new Database(dbPath, dbOptions);
25
- applyPragmas(db, pragmaTemplate);
25
+ applyPragmas(db, pragmaTemplate, customPragma);
26
26
  applySchema(db);
27
27
  applyMigrationSchema(db);
28
28
  return db;
@@ -69,13 +69,29 @@ export function getPragmasForTemplate(name) {
69
69
  }
70
70
 
71
71
  /**
72
- * Apply pragmas from a named template to an open database.
72
+ * Apply custom pragma key-value object to an open database.
73
+ * @param {import('better-sqlite3').Database} db
74
+ * @param {Record<string, string|number>} obj - e.g. { journal_mode: 'WAL', cache_size: -64000 }
75
+ */
76
+ function applyPragmaObject(db, obj) {
77
+ for (const [key, val] of Object.entries(obj)) {
78
+ if (val === undefined) continue;
79
+ db.exec(`PRAGMA ${key}=${val};`);
80
+ }
81
+ }
82
+
83
+ /**
84
+ * Apply pragmas from a named template and optional overrides to an open database.
73
85
  * @param {import('better-sqlite3').Database} db
74
86
  * @param {string} [templateName='default'] - One of: default, performance, safety, minimal, none
87
+ * @param {Record<string, string|number>} [customPragma] - Optional overrides, e.g. { synchronous: 'FULL', cache_size: -10000 }
75
88
  */
76
- export function applyPragmas(db, templateName = 'default') {
89
+ export function applyPragmas(db, templateName = 'default', customPragma = undefined) {
77
90
  const pragmas = getPragmasForTemplate(templateName);
78
91
  for (const sql of pragmas) {
79
92
  db.exec(sql);
80
93
  }
94
+ if (customPragma && typeof customPragma === 'object' && Object.keys(customPragma).length > 0) {
95
+ applyPragmaObject(db, customPragma);
96
+ }
81
97
  }
@@ -89,6 +89,17 @@ describe('createRESPlite', () => {
89
89
  await srv.close();
90
90
  });
91
91
 
92
+ it('accepts pragma overrides (convention: template first, overrides only when needed)', async () => {
93
+ const srv = await createRESPlite({
94
+ pragma: { synchronous: 'FULL', cache_size: -10_000 },
95
+ });
96
+ const client = await redisClient(srv.port);
97
+ await client.set('k', 'v');
98
+ assert.equal(await client.get('k'), 'v');
99
+ await client.quit();
100
+ await srv.close();
101
+ });
102
+
92
103
  it('unsupported command still returns ERR command not supported yet to client', async () => {
93
104
  const srv = await createRESPlite();
94
105
  const client = await redisClient(srv.port);
@@ -0,0 +1,101 @@
1
+ /**
2
+ * Unit tests for dirty apply concurrency/progress behavior.
3
+ */
4
+
5
+ import { describe, it } from 'node:test';
6
+ import assert from 'node:assert/strict';
7
+ import { openDb } from '../../src/storage/sqlite/db.js';
8
+ import { runApplyDirty } from '../../src/migration/apply-dirty.js';
9
+ import { createRun, upsertDirtyKey, getDirtyCounts } from '../../src/migration/registry.js';
10
+ import { tmpDbPath } from '../helpers/tmp.js';
11
+
12
+ function sleep(ms) {
13
+ return new Promise((resolve) => setTimeout(resolve, ms));
14
+ }
15
+
16
+ class FakeRedisStringClient {
17
+ constructor(initialValues, delayMs = 8) {
18
+ this.values = new Map(Object.entries(initialValues));
19
+ this.delayMs = delayMs;
20
+ this.inFlight = 0;
21
+ this.maxInFlight = 0;
22
+ }
23
+
24
+ async type(key) {
25
+ this.inFlight++;
26
+ this.maxInFlight = Math.max(this.maxInFlight, this.inFlight);
27
+ try {
28
+ await sleep(this.delayMs);
29
+ return this.values.has(key) ? 'string' : 'none';
30
+ } finally {
31
+ this.inFlight--;
32
+ }
33
+ }
34
+
35
+ async pTTL() {
36
+ this.inFlight++;
37
+ this.maxInFlight = Math.max(this.maxInFlight, this.inFlight);
38
+ try {
39
+ await sleep(this.delayMs);
40
+ return -1;
41
+ } finally {
42
+ this.inFlight--;
43
+ }
44
+ }
45
+
46
+ async get(key) {
47
+ this.inFlight++;
48
+ this.maxInFlight = Math.max(this.maxInFlight, this.inFlight);
49
+ try {
50
+ await sleep(this.delayMs);
51
+ return this.values.get(key) ?? null;
52
+ } finally {
53
+ this.inFlight--;
54
+ }
55
+ }
56
+ }
57
+
58
+ describe('migration apply-dirty', () => {
59
+ it('processes dirty keys with configured concurrency and emits progress payloads', async () => {
60
+ const dbPath = tmpDbPath();
61
+ const runId = `apply-dirty-concurrency-${Date.now()}`;
62
+ const totalKeys = 30;
63
+
64
+ const db = openDb(dbPath, { pragmaTemplate: 'minimal' });
65
+ createRun(db, runId, 'redis://x:6379');
66
+ const initialValues = {};
67
+ for (let i = 0; i < totalKeys; i++) {
68
+ const key = `k:${i}`;
69
+ initialValues[key] = `v:${i}`;
70
+ upsertDirtyKey(db, runId, key, 'set');
71
+ }
72
+ db.close();
73
+
74
+ const fakeRedis = new FakeRedisStringClient(initialValues);
75
+ const progress = [];
76
+
77
+ const run = await runApplyDirty(fakeRedis, dbPath, runId, {
78
+ pragmaTemplate: 'minimal',
79
+ batch_keys: totalKeys,
80
+ concurrency: 8,
81
+ progress_interval_ms: 0,
82
+ onProgress: (r) => progress.push(r),
83
+ });
84
+
85
+ assert.equal(run.dirty_keys_applied, totalKeys);
86
+ assert.equal(run.dirty_keys_deleted, 0);
87
+ assert.ok(fakeRedis.maxInFlight > 1, `Expected concurrent calls, maxInFlight=${fakeRedis.maxInFlight}`);
88
+ assert.ok(progress.length >= 1, 'Expected at least one onProgress callback');
89
+ const last = progress[progress.length - 1];
90
+ assert.equal(last.dirty_pending, 0);
91
+ assert.equal(last.dirty_reconciled_total, totalKeys);
92
+ assert.ok(Number.isFinite(last.dirty_keys_per_second));
93
+
94
+ const verifyDb = openDb(dbPath, { pragmaTemplate: 'minimal' });
95
+ const counts = getDirtyCounts(verifyDb, runId);
96
+ verifyDb.close();
97
+ assert.equal(counts.dirty, 0);
98
+ assert.equal(counts.deleted, 0);
99
+ assert.equal(counts.applied, totalKeys);
100
+ });
101
+ });
@@ -72,4 +72,28 @@ describe('Pragma templates', () => {
72
72
  db.close();
73
73
  }
74
74
  });
75
+
76
+ it('openDb with pragma overrides applies them after the template', () => {
77
+ const path = tmpDbPath();
78
+ const db = openDb(path, { pragmaTemplate: 'default', pragma: { synchronous: 'FULL' } });
79
+ try {
80
+ const row = db.prepare('PRAGMA synchronous').get();
81
+ assert.equal(row.synchronous, 2); // FULL = 2
82
+ } finally {
83
+ db.close();
84
+ }
85
+ });
86
+
87
+ it('openDb with pragma cache_size override (e.g. 1 GiB)', () => {
88
+ const path = tmpDbPath();
89
+ const oneGibKib = 1024 * 1024;
90
+ const db = openDb(path, { pragmaTemplate: 'default', pragma: { cache_size: -oneGibKib } });
91
+ try {
92
+ const row = db.prepare('PRAGMA cache_size').get();
93
+ // SQLite returns cache size in KiB when it was set negative
94
+ assert.equal(Math.abs(row.cache_size), oneGibKib);
95
+ } finally {
96
+ db.close();
97
+ }
98
+ });
75
99
  });
@@ -1,7 +0,0 @@
1
- ---
2
- id: tucj9i5nh5
3
- type: implementation
4
- title: Bulk migration concurrency added
5
- created: '2026-03-11 11:09:20'
6
- ---
7
- Added configurable concurrency to runBulkImport and createMigration.bulk with default 1. Implemented chunked parallel import with shared global max_rps limiter. Added unit tests proving default sequential behavior and concurrent behavior with cap.
@@ -1,7 +0,0 @@
1
- ---
2
- id: 105jsp012x
3
- type: implementation
4
- title: Bulk onProgress ETA support
5
- created: '2026-03-11 11:10:48'
6
- ---
7
- Added ETA/progress metrics to bulk migration onProgress payload. New optional options: estimated_total_keys in runBulkImport, estimatedTotalKeys in createMigration/bulk(). onProgress payload now includes elapsed_seconds, keys_per_second, estimated_total_keys, remaining_keys_estimate, eta_seconds, progress_pct. README migration example updated to print ETA/rate. Added unit test validating ETA fields and final 100%/eta=0 behavior.