resplite 1.3.6 → 1.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -107,6 +107,8 @@ const m = createMigration({
107
107
  batchKeys: 1000,
108
108
  batchBytes: 64 * 1024 * 1024, // 64 MB
109
109
  maxRps: 0, // 0 = unlimited
110
+ concurrency: 8, // parallel key imports during bulk
111
+ // estimatedTotalKeys: info.keyCountEstimate, // optional ETA baseline (can also be set per bulk call)
110
112
 
111
113
  // If your Redis deployment renamed CONFIG for security:
112
114
  // configCommand: 'MYCONFIG',
@@ -137,14 +139,14 @@ await m.startDirtyTracker({
137
139
  });
138
140
 
139
141
  // Step 1 — Bulk import (checkpointed, resumable). Same script to start or continue.
140
- // Use keyCountEstimate from preflight to show progress % (estimate; actual count may change).
141
- const total = info.keyCountEstimate || 1;
142
+ // Use keyCountEstimate from preflight to compute ETA/progress during bulk.
142
143
  await m.bulk({
143
- resume: true,
144
+ estimatedTotalKeys: info.keyCountEstimate,
144
145
  onProgress: (r) => {
145
- const pct = total ? ((r.scanned_keys / total) * 100).toFixed(1) : '—';
146
+ const pct = r.progress_pct != null ? r.progress_pct.toFixed(1) : '—';
147
+ const eta = r.eta_seconds != null ? `${r.eta_seconds}s` : '—';
146
148
  console.log(
147
- `scanned=${r.scanned_keys} migrated=${r.migrated_keys} errors=${r.error_keys} progress=${pct}%`
149
+ `scanned=${r.scanned_keys} migrated=${r.migrated_keys} errors=${r.error_keys} progress=${pct}% eta=${eta} rate=${r.keys_per_second.toFixed(1)} keys/s`
148
150
  );
149
151
  },
150
152
  });
@@ -0,0 +1,7 @@
1
+ ---
2
+ id: tucj9i5nh5
3
+ type: implementation
4
+ title: Bulk migration concurrency added
5
+ created: '2026-03-11 11:09:20'
6
+ ---
7
+ Added configurable concurrency to runBulkImport and createMigration.bulk with default 1. Implemented chunked parallel import with shared global max_rps limiter. Added unit tests proving default sequential behavior and concurrent behavior with cap.
@@ -0,0 +1,7 @@
1
+ ---
2
+ id: 105jsp012x
3
+ type: implementation
4
+ title: Bulk onProgress ETA support
5
+ created: '2026-03-11 11:10:48'
6
+ ---
7
+ Added ETA/progress metrics to bulk migration onProgress payload. New optional options: estimated_total_keys in runBulkImport, estimatedTotalKeys in createMigration/bulk(). onProgress payload now includes elapsed_seconds, keys_per_second, estimated_total_keys, remaining_keys_estimate, eta_seconds, progress_pct. README migration example updated to print ETA/rate. Added unit test validating ETA fields and final 100%/eta=0 behavior.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "resplite",
3
- "version": "1.3.6",
3
+ "version": "1.4.0",
4
4
  "description": "A RESP2 server with practical Redis compatibility, backed by SQLite",
5
5
  "type": "module",
6
6
  "main": "src/index.js",
@@ -35,6 +35,27 @@ function sleep(ms) {
35
35
  return new Promise((resolve) => setTimeout(resolve, ms));
36
36
  }
37
37
 
38
+ function buildProgressPayload(run, startTimeMs, estimatedTotalKeys) {
39
+ if (!run) return null;
40
+ const scanned = Number(run.scanned_keys || 0);
41
+ const elapsedSec = Math.max(0.001, (Date.now() - startTimeMs) / 1000);
42
+ const keysPerSec = scanned / elapsedSec;
43
+ const hasEstimate = Number.isFinite(estimatedTotalKeys) && estimatedTotalKeys > 0;
44
+ const remainingKeys = hasEstimate ? Math.max(0, estimatedTotalKeys - scanned) : null;
45
+ const etaSeconds = hasEstimate && keysPerSec > 0 ? Math.ceil(remainingKeys / keysPerSec) : null;
46
+ const progressPct = hasEstimate ? Math.min(100, (scanned / estimatedTotalKeys) * 100) : null;
47
+
48
+ return {
49
+ ...run,
50
+ elapsed_seconds: elapsedSec,
51
+ keys_per_second: keysPerSec,
52
+ estimated_total_keys: hasEstimate ? estimatedTotalKeys : null,
53
+ remaining_keys_estimate: remainingKeys,
54
+ eta_seconds: etaSeconds,
55
+ progress_pct: progressPct,
56
+ };
57
+ }
58
+
38
59
  /**
39
60
  * Run bulk import: SCAN keys from Redis, import into RespLite DB with checkpointing.
40
61
  * On SIGINT/SIGTERM, checkpoint progress, set run status to ABORTED, close DB and rethrow.
@@ -48,6 +69,8 @@ function sleep(ms) {
48
69
  * @param {string} [options.pragmaTemplate='default']
49
70
  * @param {number} [options.scan_count=1000]
50
71
  * @param {number} [options.max_rps=0] - 0 = no limit
72
+ * @param {number} [options.concurrency=1] - Number of concurrent key imports
73
+ * @param {number} [options.estimated_total_keys=0] - Optional key count estimate used for ETA/progress
51
74
  * @param {number} [options.batch_keys=200]
52
75
  * @param {number} [options.batch_bytes=64*1024*1024] - 64MB
53
76
  * @param {number} [options.checkpoint_interval_sec=30]
@@ -60,6 +83,8 @@ export async function runBulkImport(redisClient, dbPath, runId, options = {}) {
60
83
  pragmaTemplate = 'default',
61
84
  scan_count = 1000,
62
85
  max_rps = 0,
86
+ concurrency = 1,
87
+ estimated_total_keys = 0,
63
88
  batch_keys = 200,
64
89
  batch_bytes = 64 * 1024 * 1024,
65
90
  checkpoint_interval_sec = 30,
@@ -100,10 +125,22 @@ export async function runBulkImport(redisClient, dbPath, runId, options = {}) {
100
125
  }
101
126
 
102
127
  let lastCheckpointTime = Date.now();
128
+ const startedAtMs = lastCheckpointTime;
103
129
  let batchScanned = 0;
104
130
  let batchBytes = 0;
105
131
  const minIntervalMs = max_rps > 0 ? 1000 / max_rps : 0;
106
- let lastKeyTime = 0;
132
+ const workerCount = Number.isFinite(concurrency) ? Math.max(1, Math.floor(concurrency)) : 1;
133
+ let nextAllowedAt = 0;
134
+
135
+ async function awaitRateLimit() {
136
+ if (minIntervalMs <= 0) return;
137
+ const now = Date.now();
138
+ const scheduled = Math.max(now, nextAllowedAt);
139
+ nextAllowedAt = scheduled + minIntervalMs;
140
+ if (scheduled > now) {
141
+ await sleep(scheduled - now);
142
+ }
143
+ }
107
144
 
108
145
  outer: do {
109
146
  run = getRun(db, runId);
@@ -119,7 +156,7 @@ export async function runBulkImport(redisClient, dbPath, runId, options = {}) {
119
156
  cursor = parsed.cursor;
120
157
  const keyList = parsed.keys || [];
121
158
 
122
- for (const keyName of keyList) {
159
+ for (let i = 0; i < keyList.length; i += workerCount) {
123
160
  if (abortRequested) break outer;
124
161
  run = getRun(db, runId);
125
162
  if (run && run.status === RUN_STATUS.ABORTED) break outer;
@@ -128,46 +165,50 @@ export async function runBulkImport(redisClient, dbPath, runId, options = {}) {
128
165
  run = getRun(db, runId);
129
166
  }
130
167
 
131
- scanned_keys++;
132
- if (minIntervalMs > 0) {
133
- const elapsed = Date.now() - lastKeyTime;
134
- if (elapsed < minIntervalMs) await sleep(minIntervalMs - elapsed);
135
- lastKeyTime = Date.now();
136
- }
168
+ const chunk = keyList.slice(i, i + workerCount);
169
+ const results = await Promise.all(
170
+ chunk.map(async (keyName) => {
171
+ await awaitRateLimit();
172
+ const now = Date.now();
173
+ const outcome = await importKeyFromRedis(redisClient, keyName, storages, { now });
174
+ return { keyName, outcome };
175
+ })
176
+ );
137
177
 
138
- const now = Date.now();
139
- const outcome = await importKeyFromRedis(redisClient, keyName, storages, { now });
140
- if (outcome.ok) {
141
- migrated_keys++;
142
- migrated_bytes += outcome.bytes || 0;
143
- batchScanned++;
144
- batchBytes += outcome.bytes || 0;
145
- } else if (outcome.skipped) {
146
- skipped_keys++;
147
- } else {
148
- error_keys++;
149
- logError(db, runId, 'bulk', outcome.error ? 'Import failed' : 'Skipped', keyName);
150
- }
178
+ for (const { keyName, outcome } of results) {
179
+ scanned_keys++;
180
+ if (outcome.ok) {
181
+ migrated_keys++;
182
+ migrated_bytes += outcome.bytes || 0;
183
+ batchScanned++;
184
+ batchBytes += outcome.bytes || 0;
185
+ } else if (outcome.skipped) {
186
+ skipped_keys++;
187
+ } else {
188
+ error_keys++;
189
+ logError(db, runId, 'bulk', outcome.error ? 'Import failed' : 'Skipped', keyName);
190
+ }
151
191
 
152
- const now2 = Date.now();
153
- const shouldCheckpoint =
154
- batchScanned >= batch_keys ||
155
- batchBytes >= batch_bytes ||
156
- now2 - lastCheckpointTime >= checkpoint_interval_sec * 1000;
157
- if (shouldCheckpoint) {
158
- updateBulkProgress(db, runId, {
159
- scan_cursor: String(cursor),
160
- scanned_keys,
161
- migrated_keys,
162
- skipped_keys,
163
- error_keys,
164
- migrated_bytes,
165
- });
166
- lastCheckpointTime = now2;
167
- batchScanned = 0;
168
- batchBytes = 0;
169
- run = getRun(db, runId);
170
- if (onProgress && run) onProgress(run);
192
+ const now2 = Date.now();
193
+ const shouldCheckpoint =
194
+ batchScanned >= batch_keys ||
195
+ batchBytes >= batch_bytes ||
196
+ now2 - lastCheckpointTime >= checkpoint_interval_sec * 1000;
197
+ if (shouldCheckpoint) {
198
+ updateBulkProgress(db, runId, {
199
+ scan_cursor: String(cursor),
200
+ scanned_keys,
201
+ migrated_keys,
202
+ skipped_keys,
203
+ error_keys,
204
+ migrated_bytes,
205
+ });
206
+ lastCheckpointTime = now2;
207
+ batchScanned = 0;
208
+ batchBytes = 0;
209
+ run = getRun(db, runId);
210
+ if (onProgress && run) onProgress(buildProgressPayload(run, startedAtMs, estimated_total_keys));
211
+ }
171
212
  }
172
213
  }
173
214
  } while (cursor !== 0);
@@ -183,7 +224,7 @@ export async function runBulkImport(redisClient, dbPath, runId, options = {}) {
183
224
  });
184
225
  setRunStatus(db, runId, RUN_STATUS.ABORTED);
185
226
  run = getRun(db, runId);
186
- if (onProgress && run) onProgress(run);
227
+ if (onProgress && run) onProgress(buildProgressPayload(run, startedAtMs, estimated_total_keys));
187
228
  const err = new Error('Bulk import interrupted by signal (SIGINT/SIGTERM)');
188
229
  err.code = 'BULK_ABORTED';
189
230
  throw err;
@@ -198,7 +239,9 @@ export async function runBulkImport(redisClient, dbPath, runId, options = {}) {
198
239
  migrated_bytes,
199
240
  });
200
241
  setRunStatus(db, runId, RUN_STATUS.COMPLETED);
201
- return getRun(db, runId);
242
+ run = getRun(db, runId);
243
+ if (onProgress && run) onProgress(buildProgressPayload(run, startedAtMs, estimated_total_keys));
244
+ return run;
202
245
  } catch (err) {
203
246
  if (err.code !== 'BULK_ABORTED') {
204
247
  setRunStatus(db, runId, RUN_STATUS.FAILED);
@@ -14,16 +14,35 @@ function toBuffer(value) {
14
14
  return Buffer.from(String(value), 'utf8');
15
15
  }
16
16
 
17
+ function parseZscanResult(raw) {
18
+ if (!Array.isArray(raw) || raw.length < 2) {
19
+ return { cursor: 0, entries: [] };
20
+ }
21
+ const cursor = parseInt(String(raw[0] ?? '0'), 10) || 0;
22
+ const flat = Array.isArray(raw[1]) ? raw[1] : [];
23
+ const entries = [];
24
+ for (let i = 0; i < flat.length; i += 2) {
25
+ const member = flat[i];
26
+ const score = flat[i + 1];
27
+ if (member == null || score == null) continue;
28
+ entries.push({ value: member, score: Number(score) });
29
+ }
30
+ return { cursor, entries };
31
+ }
32
+
17
33
  /**
18
34
  * Fetch one key from Redis and write to storages. Idempotent (upsert).
19
35
  * @param {import('redis').RedisClientType} redisClient
20
36
  * @param {string} keyName
21
37
  * @param {{ keys: import('../storage/sqlite/keys.js').ReturnType<import('../storage/sqlite/keys.js').createKeysStorage>; strings: ReturnType<import('../storage/sqlite/strings.js').createStringsStorage>; hashes: ReturnType<import('../storage/sqlite/hashes.js').createHashesStorage>; sets: ReturnType<import('../storage/sqlite/sets.js').createSetsStorage>; lists: ReturnType<import('../storage/sqlite/lists.js').createListsStorage>; zsets: ReturnType<import('../storage/sqlite/zsets.js').createZsetsStorage> }} storages
22
- * @param {{ now?: number }} options
38
+ * @param {{ now?: number, zsetScanCount?: number }} options
23
39
  * @returns {Promise<{ ok: boolean; skipped?: boolean; error?: boolean; bytes?: number }>}
24
40
  */
25
41
  export async function importKeyFromRedis(redisClient, keyName, storages, options = {}) {
26
42
  const now = options.now ?? Date.now();
43
+ const zsetScanCount = Number.isFinite(options.zsetScanCount)
44
+ ? Math.max(10, Math.floor(options.zsetScanCount))
45
+ : 1000;
27
46
  const { keys, strings, hashes, sets, lists, zsets } = storages;
28
47
 
29
48
  try {
@@ -85,16 +104,46 @@ export async function importKeyFromRedis(redisClient, keyName, storages, options
85
104
  }
86
105
 
87
106
  if (type === 'zset') {
88
- const withScores = await redisClient.zRangeWithScores(keyName, 0, -1);
89
- if (!withScores || !withScores.length) return { ok: false, skipped: true };
90
- const pairs = withScores.map((item) => ({
91
- member: toBuffer(item.value),
92
- score: Number(item.score),
93
- }));
94
- for (const p of pairs) bytes += p.member.length + 8;
95
- zsets.add(keyBuf, pairs, { updatedAt: now });
96
- keys.setExpires(keyBuf, expiresAt, now);
97
- return { ok: true, bytes };
107
+ try {
108
+ // Use cursor-based reads to avoid loading very large sorted sets in one call.
109
+ let cursor = 0;
110
+ let wroteAny = false;
111
+ do {
112
+ const raw = await redisClient.sendCommand([
113
+ 'ZSCAN',
114
+ keyName,
115
+ String(cursor),
116
+ 'COUNT',
117
+ String(zsetScanCount),
118
+ ]);
119
+ const parsed = parseZscanResult(raw);
120
+ cursor = parsed.cursor;
121
+ if (parsed.entries.length === 0) continue;
122
+ const pairs = parsed.entries.map((item) => ({
123
+ member: toBuffer(item.value),
124
+ score: Number(item.score),
125
+ }));
126
+ for (const p of pairs) bytes += p.member.length + 8;
127
+ zsets.add(keyBuf, pairs, { updatedAt: now });
128
+ wroteAny = true;
129
+ } while (cursor !== 0);
130
+
131
+ if (!wroteAny) return { ok: false, skipped: true };
132
+ keys.setExpires(keyBuf, expiresAt, now);
133
+ return { ok: true, bytes };
134
+ } catch {
135
+ // Fallback for clients/backends without command passthrough support.
136
+ const withScores = await redisClient.zRangeWithScores(keyName, 0, -1);
137
+ if (!withScores || !withScores.length) return { ok: false, skipped: true };
138
+ const pairs = withScores.map((item) => ({
139
+ member: toBuffer(item.value),
140
+ score: Number(item.score),
141
+ }));
142
+ for (const p of pairs) bytes += p.member.length + 8;
143
+ zsets.add(keyBuf, pairs, { updatedAt: now });
144
+ keys.setExpires(keyBuf, expiresAt, now);
145
+ return { ok: true, bytes };
146
+ }
98
147
  }
99
148
 
100
149
  return { ok: false, skipped: true };
@@ -36,6 +36,8 @@ import { startDirtyTracker as startDirtyTrackerProcess } from './tracker.js';
36
36
  * @property {string} [pragmaTemplate='default'] - PRAGMA preset.
37
37
  * @property {number} [scanCount=1000]
38
38
  * @property {number} [maxRps=0] - Max requests/s (0 = unlimited).
39
+ * @property {number} [concurrency=1] - Concurrent imports during bulk migration.
40
+ * @property {number} [estimatedTotalKeys=0] - Optional total-keys estimate for ETA/progress in onProgress.
39
41
  * @property {number} [batchKeys=200]
40
42
  * @property {number} [batchBytes=67108864] - 64 MB default.
41
43
  * @property {string} [configCommand='CONFIG'] - Redis CONFIG command name. Override if renamed for security.
@@ -65,6 +67,8 @@ export function createMigration({
65
67
  pragmaTemplate = 'default',
66
68
  scanCount = 1000,
67
69
  maxRps = 0,
70
+ concurrency = 1,
71
+ estimatedTotalKeys = 0,
68
72
  batchKeys = 200,
69
73
  batchBytes = 64 * 1024 * 1024,
70
74
  configCommand = 'CONFIG',
@@ -156,9 +160,17 @@ export function createMigration({
156
160
  * Step 1 — Bulk import: SCAN all keys from Redis into the destination DB.
157
161
  * Resume is on by default: first run starts from 0, later runs continue from checkpoint.
158
162
  *
159
- * @param {{ resume?: boolean, onProgress?: (run: object) => void }} [opts] - resume (default true): start or continue automatically
163
+ * @param {{ resume?: boolean, concurrency?: number, estimatedTotalKeys?: number, onProgress?: (run: object) => void }} [opts]
164
+ * - `resume` (default true): start or continue automatically
165
+ * - `concurrency` (default from createMigration options): concurrent key imports
166
+ * - `estimatedTotalKeys` (optional): used to compute ETA/progress fields in onProgress
160
167
  */
161
- async bulk({ resume = true, onProgress } = {}) {
168
+ async bulk({
169
+ resume = true,
170
+ concurrency: c = concurrency,
171
+ estimatedTotalKeys: et = estimatedTotalKeys,
172
+ onProgress,
173
+ } = {}) {
162
174
  const id = requireRunId();
163
175
  const client = await getClient();
164
176
  return runBulkImport(client, to, id, {
@@ -166,6 +178,8 @@ export function createMigration({
166
178
  pragmaTemplate,
167
179
  scan_count: scanCount,
168
180
  max_rps: maxRps,
181
+ concurrency: c,
182
+ estimated_total_keys: et,
169
183
  batch_keys: batchKeys,
170
184
  batch_bytes: batchBytes,
171
185
  resume,
@@ -0,0 +1,145 @@
1
+ /**
2
+ * Unit tests for bulk migration concurrency behavior.
3
+ */
4
+
5
+ import { describe, it } from 'node:test';
6
+ import assert from 'node:assert/strict';
7
+ import { runBulkImport } from '../../src/migration/bulk.js';
8
+ import { tmpDbPath } from '../helpers/tmp.js';
9
+
10
+ function sleep(ms) {
11
+ return new Promise((resolve) => setTimeout(resolve, ms));
12
+ }
13
+
14
+ function makeFakeRedis(valuesByKey, options = {}) {
15
+ const { getDelayMs = 0 } = options;
16
+ const keys = Object.keys(valuesByKey);
17
+
18
+ let inFlightGets = 0;
19
+ let maxInFlightGets = 0;
20
+
21
+ return {
22
+ stats: {
23
+ get maxInFlightGets() {
24
+ return maxInFlightGets;
25
+ },
26
+ },
27
+
28
+ async scan(cursor) {
29
+ if (Number(cursor) !== 0) return { cursor: 0, keys: [] };
30
+ return { cursor: 0, keys };
31
+ },
32
+
33
+ async type(keyName) {
34
+ return Object.prototype.hasOwnProperty.call(valuesByKey, keyName) ? 'string' : 'none';
35
+ },
36
+
37
+ async pTTL() {
38
+ return -1;
39
+ },
40
+
41
+ async get(keyName) {
42
+ inFlightGets++;
43
+ maxInFlightGets = Math.max(maxInFlightGets, inFlightGets);
44
+ try {
45
+ if (getDelayMs > 0) await sleep(getDelayMs);
46
+ return valuesByKey[keyName];
47
+ } finally {
48
+ inFlightGets--;
49
+ }
50
+ },
51
+ };
52
+ }
53
+
54
+ describe('runBulkImport concurrency', () => {
55
+ it('uses sequential processing by default (concurrency=1)', async () => {
56
+ const redis = makeFakeRedis(
57
+ {
58
+ k1: 'v1',
59
+ k2: 'v2',
60
+ k3: 'v3',
61
+ k4: 'v4',
62
+ },
63
+ { getDelayMs: 10 }
64
+ );
65
+
66
+ const run = await runBulkImport(redis, tmpDbPath(), `bulk-seq-${Date.now()}`, {
67
+ sourceUri: 'redis://fake',
68
+ scan_count: 100,
69
+ batch_keys: 1,
70
+ });
71
+
72
+ assert.equal(run.status, 'completed');
73
+ assert.equal(run.scanned_keys, 4);
74
+ assert.equal(run.migrated_keys, 4);
75
+ assert.equal(redis.stats.maxInFlightGets, 1);
76
+ });
77
+
78
+ it('processes keys concurrently when concurrency is configured', async () => {
79
+ const redis = makeFakeRedis(
80
+ {
81
+ k1: 'v1',
82
+ k2: 'v2',
83
+ k3: 'v3',
84
+ k4: 'v4',
85
+ k5: 'v5',
86
+ k6: 'v6',
87
+ k7: 'v7',
88
+ k8: 'v8',
89
+ },
90
+ { getDelayMs: 20 }
91
+ );
92
+
93
+ const run = await runBulkImport(redis, tmpDbPath(), `bulk-concurrent-${Date.now()}`, {
94
+ sourceUri: 'redis://fake',
95
+ scan_count: 100,
96
+ concurrency: 4,
97
+ batch_keys: 2,
98
+ });
99
+
100
+ assert.equal(run.status, 'completed');
101
+ assert.equal(run.scanned_keys, 8);
102
+ assert.equal(run.migrated_keys, 8);
103
+ assert.ok(redis.stats.maxInFlightGets > 1, `expected >1 inflight gets, got ${redis.stats.maxInFlightGets}`);
104
+ assert.ok(redis.stats.maxInFlightGets <= 4, `expected <=4 inflight gets, got ${redis.stats.maxInFlightGets}`);
105
+ });
106
+
107
+ it('includes ETA/progress fields in onProgress when total estimate is provided', async () => {
108
+ const redis = makeFakeRedis(
109
+ {
110
+ k1: 'v1',
111
+ k2: 'v2',
112
+ k3: 'v3',
113
+ k4: 'v4',
114
+ },
115
+ { getDelayMs: 8 }
116
+ );
117
+
118
+ const events = [];
119
+ const run = await runBulkImport(redis, tmpDbPath(), `bulk-eta-${Date.now()}`, {
120
+ sourceUri: 'redis://fake',
121
+ scan_count: 100,
122
+ batch_keys: 1,
123
+ estimated_total_keys: 4,
124
+ onProgress: (r) => events.push(r),
125
+ });
126
+
127
+ assert.equal(run.status, 'completed');
128
+ assert.ok(events.length >= 2, `expected at least 2 progress events, got ${events.length}`);
129
+
130
+ const withEta = events.filter((e) => e.eta_seconds !== null);
131
+ assert.ok(withEta.length >= 1, 'expected at least one progress event with eta_seconds');
132
+
133
+ for (const e of withEta) {
134
+ assert.equal(e.estimated_total_keys, 4);
135
+ assert.ok(e.progress_pct >= 0 && e.progress_pct <= 100, `invalid progress_pct=${e.progress_pct}`);
136
+ assert.ok(e.keys_per_second > 0, `invalid keys_per_second=${e.keys_per_second}`);
137
+ assert.ok(e.elapsed_seconds > 0, `invalid elapsed_seconds=${e.elapsed_seconds}`);
138
+ }
139
+
140
+ const last = events.at(-1);
141
+ assert.equal(last.progress_pct, 100);
142
+ assert.equal(last.eta_seconds, 0);
143
+ assert.equal(last.remaining_keys_estimate, 0);
144
+ });
145
+ });
@@ -0,0 +1,94 @@
1
+ import { describe, it } from 'node:test';
2
+ import assert from 'node:assert/strict';
3
+ import { importKeyFromRedis } from '../../src/migration/import-one.js';
4
+
5
+ function makeStorages() {
6
+ const calls = {
7
+ zsetAdds: [],
8
+ setExpires: [],
9
+ };
10
+ return {
11
+ calls,
12
+ storages: {
13
+ keys: {
14
+ setExpires(key, expiresAt, updatedAt) {
15
+ calls.setExpires.push({ key, expiresAt, updatedAt });
16
+ },
17
+ },
18
+ strings: {},
19
+ hashes: {},
20
+ sets: {},
21
+ lists: {},
22
+ zsets: {
23
+ add(key, pairs) {
24
+ calls.zsetAdds.push({ key, pairs });
25
+ },
26
+ },
27
+ },
28
+ };
29
+ }
30
+
31
+ describe('importKeyFromRedis zset handling', () => {
32
+ it('imports large zsets with ZSCAN chunks', async () => {
33
+ const { storages, calls } = makeStorages();
34
+ let scanCalls = 0;
35
+ const redis = {
36
+ async type() {
37
+ return 'zset';
38
+ },
39
+ async pTTL() {
40
+ return -1;
41
+ },
42
+ async sendCommand(argv) {
43
+ assert.equal(argv[0], 'ZSCAN');
44
+ scanCalls += 1;
45
+ if (scanCalls === 1) return ['7', ['a', '1', 'b', '2']];
46
+ if (scanCalls === 2) return ['0', ['c', '3']];
47
+ return ['0', []];
48
+ },
49
+ async zRangeWithScores() {
50
+ throw new Error('fallback should not be used when ZSCAN works');
51
+ },
52
+ };
53
+
54
+ const result = await importKeyFromRedis(redis, 'big:zset', storages, { now: 1000, zsetScanCount: 2 });
55
+
56
+ assert.equal(result.ok, true);
57
+ assert.equal(result.skipped, undefined);
58
+ assert.equal(scanCalls, 2);
59
+ assert.equal(calls.zsetAdds.length, 2);
60
+ assert.equal(calls.zsetAdds[0].pairs.length, 2);
61
+ assert.equal(calls.zsetAdds[1].pairs.length, 1);
62
+ assert.equal(calls.setExpires.length, 1);
63
+ // key bytes + member bytes + score metadata estimate (8 bytes/member)
64
+ assert.equal(result.bytes, Buffer.byteLength('big:zset') + (1 + 1 + 1) + (3 * 8));
65
+ });
66
+
67
+ it('falls back to zRangeWithScores when ZSCAN passthrough is unavailable', async () => {
68
+ const { storages, calls } = makeStorages();
69
+ let fallbackUsed = false;
70
+ const redis = {
71
+ async type() {
72
+ return 'zset';
73
+ },
74
+ async pTTL() {
75
+ return -1;
76
+ },
77
+ async sendCommand() {
78
+ throw new Error('sendCommand not supported');
79
+ },
80
+ async zRangeWithScores() {
81
+ fallbackUsed = true;
82
+ return [{ value: 'member-1', score: 42 }];
83
+ },
84
+ };
85
+
86
+ const result = await importKeyFromRedis(redis, 'legacy:zset', storages, { now: 1000 });
87
+
88
+ assert.equal(result.ok, true);
89
+ assert.equal(fallbackUsed, true);
90
+ assert.equal(calls.zsetAdds.length, 1);
91
+ assert.equal(calls.zsetAdds[0].pairs.length, 1);
92
+ assert.equal(calls.setExpires.length, 1);
93
+ });
94
+ });