resplite 1.3.6 → 1.3.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md
CHANGED
|
@@ -107,6 +107,8 @@ const m = createMigration({
|
|
|
107
107
|
batchKeys: 1000,
|
|
108
108
|
batchBytes: 64 * 1024 * 1024, // 64 MB
|
|
109
109
|
maxRps: 0, // 0 = unlimited
|
|
110
|
+
concurrency: 8, // parallel key imports during bulk
|
|
111
|
+
// estimatedTotalKeys: info.keyCountEstimate, // optional ETA baseline (can also be set per bulk call)
|
|
110
112
|
|
|
111
113
|
// If your Redis deployment renamed CONFIG for security:
|
|
112
114
|
// configCommand: 'MYCONFIG',
|
|
@@ -137,14 +139,15 @@ await m.startDirtyTracker({
|
|
|
137
139
|
});
|
|
138
140
|
|
|
139
141
|
// Step 1 — Bulk import (checkpointed, resumable). Same script to start or continue.
|
|
140
|
-
// Use keyCountEstimate from preflight to
|
|
141
|
-
const total = info.keyCountEstimate || 1;
|
|
142
|
+
// Use keyCountEstimate from preflight to compute ETA/progress during bulk.
|
|
142
143
|
await m.bulk({
|
|
143
144
|
resume: true,
|
|
145
|
+
estimatedTotalKeys: info.keyCountEstimate,
|
|
144
146
|
onProgress: (r) => {
|
|
145
|
-
const pct =
|
|
147
|
+
const pct = r.progress_pct != null ? r.progress_pct.toFixed(1) : '—';
|
|
148
|
+
const eta = r.eta_seconds != null ? `${r.eta_seconds}s` : '—';
|
|
146
149
|
console.log(
|
|
147
|
-
`scanned=${r.scanned_keys} migrated=${r.migrated_keys} errors=${r.error_keys} progress=${pct}
|
|
150
|
+
`scanned=${r.scanned_keys} migrated=${r.migrated_keys} errors=${r.error_keys} progress=${pct}% eta=${eta} rate=${r.keys_per_second.toFixed(1)} keys/s`
|
|
148
151
|
);
|
|
149
152
|
},
|
|
150
153
|
});
|
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
---
|
|
2
|
+
id: tucj9i5nh5
|
|
3
|
+
type: implementation
|
|
4
|
+
title: Bulk migration concurrency added
|
|
5
|
+
created: '2026-03-11 11:09:20'
|
|
6
|
+
---
|
|
7
|
+
Added configurable concurrency to runBulkImport and createMigration.bulk with default 1. Implemented chunked parallel import with shared global max_rps limiter. Added unit tests proving default sequential behavior and concurrent behavior with cap.
|
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
---
|
|
2
|
+
id: 105jsp012x
|
|
3
|
+
type: implementation
|
|
4
|
+
title: Bulk onProgress ETA support
|
|
5
|
+
created: '2026-03-11 11:10:48'
|
|
6
|
+
---
|
|
7
|
+
Added ETA/progress metrics to bulk migration onProgress payload. New optional options: estimated_total_keys in runBulkImport, estimatedTotalKeys in createMigration/bulk(). onProgress payload now includes elapsed_seconds, keys_per_second, estimated_total_keys, remaining_keys_estimate, eta_seconds, progress_pct. README migration example updated to print ETA/rate. Added unit test validating ETA fields and final 100%/eta=0 behavior.
|
package/package.json
CHANGED
package/src/migration/bulk.js
CHANGED
|
@@ -35,6 +35,27 @@ function sleep(ms) {
|
|
|
35
35
|
return new Promise((resolve) => setTimeout(resolve, ms));
|
|
36
36
|
}
|
|
37
37
|
|
|
38
|
+
function buildProgressPayload(run, startTimeMs, estimatedTotalKeys) {
|
|
39
|
+
if (!run) return null;
|
|
40
|
+
const scanned = Number(run.scanned_keys || 0);
|
|
41
|
+
const elapsedSec = Math.max(0.001, (Date.now() - startTimeMs) / 1000);
|
|
42
|
+
const keysPerSec = scanned / elapsedSec;
|
|
43
|
+
const hasEstimate = Number.isFinite(estimatedTotalKeys) && estimatedTotalKeys > 0;
|
|
44
|
+
const remainingKeys = hasEstimate ? Math.max(0, estimatedTotalKeys - scanned) : null;
|
|
45
|
+
const etaSeconds = hasEstimate && keysPerSec > 0 ? Math.ceil(remainingKeys / keysPerSec) : null;
|
|
46
|
+
const progressPct = hasEstimate ? Math.min(100, (scanned / estimatedTotalKeys) * 100) : null;
|
|
47
|
+
|
|
48
|
+
return {
|
|
49
|
+
...run,
|
|
50
|
+
elapsed_seconds: elapsedSec,
|
|
51
|
+
keys_per_second: keysPerSec,
|
|
52
|
+
estimated_total_keys: hasEstimate ? estimatedTotalKeys : null,
|
|
53
|
+
remaining_keys_estimate: remainingKeys,
|
|
54
|
+
eta_seconds: etaSeconds,
|
|
55
|
+
progress_pct: progressPct,
|
|
56
|
+
};
|
|
57
|
+
}
|
|
58
|
+
|
|
38
59
|
/**
|
|
39
60
|
* Run bulk import: SCAN keys from Redis, import into RespLite DB with checkpointing.
|
|
40
61
|
* On SIGINT/SIGTERM, checkpoint progress, set run status to ABORTED, close DB and rethrow.
|
|
@@ -48,6 +69,8 @@ function sleep(ms) {
|
|
|
48
69
|
* @param {string} [options.pragmaTemplate='default']
|
|
49
70
|
* @param {number} [options.scan_count=1000]
|
|
50
71
|
* @param {number} [options.max_rps=0] - 0 = no limit
|
|
72
|
+
* @param {number} [options.concurrency=1] - Number of concurrent key imports
|
|
73
|
+
* @param {number} [options.estimated_total_keys=0] - Optional key count estimate used for ETA/progress
|
|
51
74
|
* @param {number} [options.batch_keys=200]
|
|
52
75
|
* @param {number} [options.batch_bytes=64*1024*1024] - 64MB
|
|
53
76
|
* @param {number} [options.checkpoint_interval_sec=30]
|
|
@@ -60,6 +83,8 @@ export async function runBulkImport(redisClient, dbPath, runId, options = {}) {
|
|
|
60
83
|
pragmaTemplate = 'default',
|
|
61
84
|
scan_count = 1000,
|
|
62
85
|
max_rps = 0,
|
|
86
|
+
concurrency = 1,
|
|
87
|
+
estimated_total_keys = 0,
|
|
63
88
|
batch_keys = 200,
|
|
64
89
|
batch_bytes = 64 * 1024 * 1024,
|
|
65
90
|
checkpoint_interval_sec = 30,
|
|
@@ -100,10 +125,22 @@ export async function runBulkImport(redisClient, dbPath, runId, options = {}) {
|
|
|
100
125
|
}
|
|
101
126
|
|
|
102
127
|
let lastCheckpointTime = Date.now();
|
|
128
|
+
const startedAtMs = lastCheckpointTime;
|
|
103
129
|
let batchScanned = 0;
|
|
104
130
|
let batchBytes = 0;
|
|
105
131
|
const minIntervalMs = max_rps > 0 ? 1000 / max_rps : 0;
|
|
106
|
-
|
|
132
|
+
const workerCount = Number.isFinite(concurrency) ? Math.max(1, Math.floor(concurrency)) : 1;
|
|
133
|
+
let nextAllowedAt = 0;
|
|
134
|
+
|
|
135
|
+
async function awaitRateLimit() {
|
|
136
|
+
if (minIntervalMs <= 0) return;
|
|
137
|
+
const now = Date.now();
|
|
138
|
+
const scheduled = Math.max(now, nextAllowedAt);
|
|
139
|
+
nextAllowedAt = scheduled + minIntervalMs;
|
|
140
|
+
if (scheduled > now) {
|
|
141
|
+
await sleep(scheduled - now);
|
|
142
|
+
}
|
|
143
|
+
}
|
|
107
144
|
|
|
108
145
|
outer: do {
|
|
109
146
|
run = getRun(db, runId);
|
|
@@ -119,7 +156,7 @@ export async function runBulkImport(redisClient, dbPath, runId, options = {}) {
|
|
|
119
156
|
cursor = parsed.cursor;
|
|
120
157
|
const keyList = parsed.keys || [];
|
|
121
158
|
|
|
122
|
-
for (
|
|
159
|
+
for (let i = 0; i < keyList.length; i += workerCount) {
|
|
123
160
|
if (abortRequested) break outer;
|
|
124
161
|
run = getRun(db, runId);
|
|
125
162
|
if (run && run.status === RUN_STATUS.ABORTED) break outer;
|
|
@@ -128,46 +165,50 @@ export async function runBulkImport(redisClient, dbPath, runId, options = {}) {
|
|
|
128
165
|
run = getRun(db, runId);
|
|
129
166
|
}
|
|
130
167
|
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
168
|
+
const chunk = keyList.slice(i, i + workerCount);
|
|
169
|
+
const results = await Promise.all(
|
|
170
|
+
chunk.map(async (keyName) => {
|
|
171
|
+
await awaitRateLimit();
|
|
172
|
+
const now = Date.now();
|
|
173
|
+
const outcome = await importKeyFromRedis(redisClient, keyName, storages, { now });
|
|
174
|
+
return { keyName, outcome };
|
|
175
|
+
})
|
|
176
|
+
);
|
|
137
177
|
|
|
138
|
-
const
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
178
|
+
for (const { keyName, outcome } of results) {
|
|
179
|
+
scanned_keys++;
|
|
180
|
+
if (outcome.ok) {
|
|
181
|
+
migrated_keys++;
|
|
182
|
+
migrated_bytes += outcome.bytes || 0;
|
|
183
|
+
batchScanned++;
|
|
184
|
+
batchBytes += outcome.bytes || 0;
|
|
185
|
+
} else if (outcome.skipped) {
|
|
186
|
+
skipped_keys++;
|
|
187
|
+
} else {
|
|
188
|
+
error_keys++;
|
|
189
|
+
logError(db, runId, 'bulk', outcome.error ? 'Import failed' : 'Skipped', keyName);
|
|
190
|
+
}
|
|
151
191
|
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
192
|
+
const now2 = Date.now();
|
|
193
|
+
const shouldCheckpoint =
|
|
194
|
+
batchScanned >= batch_keys ||
|
|
195
|
+
batchBytes >= batch_bytes ||
|
|
196
|
+
now2 - lastCheckpointTime >= checkpoint_interval_sec * 1000;
|
|
197
|
+
if (shouldCheckpoint) {
|
|
198
|
+
updateBulkProgress(db, runId, {
|
|
199
|
+
scan_cursor: String(cursor),
|
|
200
|
+
scanned_keys,
|
|
201
|
+
migrated_keys,
|
|
202
|
+
skipped_keys,
|
|
203
|
+
error_keys,
|
|
204
|
+
migrated_bytes,
|
|
205
|
+
});
|
|
206
|
+
lastCheckpointTime = now2;
|
|
207
|
+
batchScanned = 0;
|
|
208
|
+
batchBytes = 0;
|
|
209
|
+
run = getRun(db, runId);
|
|
210
|
+
if (onProgress && run) onProgress(buildProgressPayload(run, startedAtMs, estimated_total_keys));
|
|
211
|
+
}
|
|
171
212
|
}
|
|
172
213
|
}
|
|
173
214
|
} while (cursor !== 0);
|
|
@@ -183,7 +224,7 @@ export async function runBulkImport(redisClient, dbPath, runId, options = {}) {
|
|
|
183
224
|
});
|
|
184
225
|
setRunStatus(db, runId, RUN_STATUS.ABORTED);
|
|
185
226
|
run = getRun(db, runId);
|
|
186
|
-
if (onProgress && run) onProgress(run);
|
|
227
|
+
if (onProgress && run) onProgress(buildProgressPayload(run, startedAtMs, estimated_total_keys));
|
|
187
228
|
const err = new Error('Bulk import interrupted by signal (SIGINT/SIGTERM)');
|
|
188
229
|
err.code = 'BULK_ABORTED';
|
|
189
230
|
throw err;
|
|
@@ -198,7 +239,9 @@ export async function runBulkImport(redisClient, dbPath, runId, options = {}) {
|
|
|
198
239
|
migrated_bytes,
|
|
199
240
|
});
|
|
200
241
|
setRunStatus(db, runId, RUN_STATUS.COMPLETED);
|
|
201
|
-
|
|
242
|
+
run = getRun(db, runId);
|
|
243
|
+
if (onProgress && run) onProgress(buildProgressPayload(run, startedAtMs, estimated_total_keys));
|
|
244
|
+
return run;
|
|
202
245
|
} catch (err) {
|
|
203
246
|
if (err.code !== 'BULK_ABORTED') {
|
|
204
247
|
setRunStatus(db, runId, RUN_STATUS.FAILED);
|
package/src/migration/index.js
CHANGED
|
@@ -36,6 +36,8 @@ import { startDirtyTracker as startDirtyTrackerProcess } from './tracker.js';
|
|
|
36
36
|
* @property {string} [pragmaTemplate='default'] - PRAGMA preset.
|
|
37
37
|
* @property {number} [scanCount=1000]
|
|
38
38
|
* @property {number} [maxRps=0] - Max requests/s (0 = unlimited).
|
|
39
|
+
* @property {number} [concurrency=1] - Concurrent imports during bulk migration.
|
|
40
|
+
* @property {number} [estimatedTotalKeys=0] - Optional total-keys estimate for ETA/progress in onProgress.
|
|
39
41
|
* @property {number} [batchKeys=200]
|
|
40
42
|
* @property {number} [batchBytes=67108864] - 64 MB default.
|
|
41
43
|
* @property {string} [configCommand='CONFIG'] - Redis CONFIG command name. Override if renamed for security.
|
|
@@ -65,6 +67,8 @@ export function createMigration({
|
|
|
65
67
|
pragmaTemplate = 'default',
|
|
66
68
|
scanCount = 1000,
|
|
67
69
|
maxRps = 0,
|
|
70
|
+
concurrency = 1,
|
|
71
|
+
estimatedTotalKeys = 0,
|
|
68
72
|
batchKeys = 200,
|
|
69
73
|
batchBytes = 64 * 1024 * 1024,
|
|
70
74
|
configCommand = 'CONFIG',
|
|
@@ -156,9 +160,17 @@ export function createMigration({
|
|
|
156
160
|
* Step 1 — Bulk import: SCAN all keys from Redis into the destination DB.
|
|
157
161
|
* Resume is on by default: first run starts from 0, later runs continue from checkpoint.
|
|
158
162
|
*
|
|
159
|
-
* @param {{ resume?: boolean, onProgress?: (run: object) => void }} [opts]
|
|
163
|
+
* @param {{ resume?: boolean, concurrency?: number, estimatedTotalKeys?: number, onProgress?: (run: object) => void }} [opts]
|
|
164
|
+
* - `resume` (default true): start or continue automatically
|
|
165
|
+
* - `concurrency` (default from createMigration options): concurrent key imports
|
|
166
|
+
* - `estimatedTotalKeys` (optional): used to compute ETA/progress fields in onProgress
|
|
160
167
|
*/
|
|
161
|
-
async bulk({
|
|
168
|
+
async bulk({
|
|
169
|
+
resume = true,
|
|
170
|
+
concurrency: c = concurrency,
|
|
171
|
+
estimatedTotalKeys: et = estimatedTotalKeys,
|
|
172
|
+
onProgress,
|
|
173
|
+
} = {}) {
|
|
162
174
|
const id = requireRunId();
|
|
163
175
|
const client = await getClient();
|
|
164
176
|
return runBulkImport(client, to, id, {
|
|
@@ -166,6 +178,8 @@ export function createMigration({
|
|
|
166
178
|
pragmaTemplate,
|
|
167
179
|
scan_count: scanCount,
|
|
168
180
|
max_rps: maxRps,
|
|
181
|
+
concurrency: c,
|
|
182
|
+
estimated_total_keys: et,
|
|
169
183
|
batch_keys: batchKeys,
|
|
170
184
|
batch_bytes: batchBytes,
|
|
171
185
|
resume,
|
|
@@ -0,0 +1,145 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Unit tests for bulk migration concurrency behavior.
|
|
3
|
+
*/
|
|
4
|
+
|
|
5
|
+
import { describe, it } from 'node:test';
|
|
6
|
+
import assert from 'node:assert/strict';
|
|
7
|
+
import { runBulkImport } from '../../src/migration/bulk.js';
|
|
8
|
+
import { tmpDbPath } from '../helpers/tmp.js';
|
|
9
|
+
|
|
10
|
+
function sleep(ms) {
|
|
11
|
+
return new Promise((resolve) => setTimeout(resolve, ms));
|
|
12
|
+
}
|
|
13
|
+
|
|
14
|
+
function makeFakeRedis(valuesByKey, options = {}) {
|
|
15
|
+
const { getDelayMs = 0 } = options;
|
|
16
|
+
const keys = Object.keys(valuesByKey);
|
|
17
|
+
|
|
18
|
+
let inFlightGets = 0;
|
|
19
|
+
let maxInFlightGets = 0;
|
|
20
|
+
|
|
21
|
+
return {
|
|
22
|
+
stats: {
|
|
23
|
+
get maxInFlightGets() {
|
|
24
|
+
return maxInFlightGets;
|
|
25
|
+
},
|
|
26
|
+
},
|
|
27
|
+
|
|
28
|
+
async scan(cursor) {
|
|
29
|
+
if (Number(cursor) !== 0) return { cursor: 0, keys: [] };
|
|
30
|
+
return { cursor: 0, keys };
|
|
31
|
+
},
|
|
32
|
+
|
|
33
|
+
async type(keyName) {
|
|
34
|
+
return Object.prototype.hasOwnProperty.call(valuesByKey, keyName) ? 'string' : 'none';
|
|
35
|
+
},
|
|
36
|
+
|
|
37
|
+
async pTTL() {
|
|
38
|
+
return -1;
|
|
39
|
+
},
|
|
40
|
+
|
|
41
|
+
async get(keyName) {
|
|
42
|
+
inFlightGets++;
|
|
43
|
+
maxInFlightGets = Math.max(maxInFlightGets, inFlightGets);
|
|
44
|
+
try {
|
|
45
|
+
if (getDelayMs > 0) await sleep(getDelayMs);
|
|
46
|
+
return valuesByKey[keyName];
|
|
47
|
+
} finally {
|
|
48
|
+
inFlightGets--;
|
|
49
|
+
}
|
|
50
|
+
},
|
|
51
|
+
};
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
describe('runBulkImport concurrency', () => {
|
|
55
|
+
it('uses sequential processing by default (concurrency=1)', async () => {
|
|
56
|
+
const redis = makeFakeRedis(
|
|
57
|
+
{
|
|
58
|
+
k1: 'v1',
|
|
59
|
+
k2: 'v2',
|
|
60
|
+
k3: 'v3',
|
|
61
|
+
k4: 'v4',
|
|
62
|
+
},
|
|
63
|
+
{ getDelayMs: 10 }
|
|
64
|
+
);
|
|
65
|
+
|
|
66
|
+
const run = await runBulkImport(redis, tmpDbPath(), `bulk-seq-${Date.now()}`, {
|
|
67
|
+
sourceUri: 'redis://fake',
|
|
68
|
+
scan_count: 100,
|
|
69
|
+
batch_keys: 1,
|
|
70
|
+
});
|
|
71
|
+
|
|
72
|
+
assert.equal(run.status, 'completed');
|
|
73
|
+
assert.equal(run.scanned_keys, 4);
|
|
74
|
+
assert.equal(run.migrated_keys, 4);
|
|
75
|
+
assert.equal(redis.stats.maxInFlightGets, 1);
|
|
76
|
+
});
|
|
77
|
+
|
|
78
|
+
it('processes keys concurrently when concurrency is configured', async () => {
|
|
79
|
+
const redis = makeFakeRedis(
|
|
80
|
+
{
|
|
81
|
+
k1: 'v1',
|
|
82
|
+
k2: 'v2',
|
|
83
|
+
k3: 'v3',
|
|
84
|
+
k4: 'v4',
|
|
85
|
+
k5: 'v5',
|
|
86
|
+
k6: 'v6',
|
|
87
|
+
k7: 'v7',
|
|
88
|
+
k8: 'v8',
|
|
89
|
+
},
|
|
90
|
+
{ getDelayMs: 20 }
|
|
91
|
+
);
|
|
92
|
+
|
|
93
|
+
const run = await runBulkImport(redis, tmpDbPath(), `bulk-concurrent-${Date.now()}`, {
|
|
94
|
+
sourceUri: 'redis://fake',
|
|
95
|
+
scan_count: 100,
|
|
96
|
+
concurrency: 4,
|
|
97
|
+
batch_keys: 2,
|
|
98
|
+
});
|
|
99
|
+
|
|
100
|
+
assert.equal(run.status, 'completed');
|
|
101
|
+
assert.equal(run.scanned_keys, 8);
|
|
102
|
+
assert.equal(run.migrated_keys, 8);
|
|
103
|
+
assert.ok(redis.stats.maxInFlightGets > 1, `expected >1 inflight gets, got ${redis.stats.maxInFlightGets}`);
|
|
104
|
+
assert.ok(redis.stats.maxInFlightGets <= 4, `expected <=4 inflight gets, got ${redis.stats.maxInFlightGets}`);
|
|
105
|
+
});
|
|
106
|
+
|
|
107
|
+
it('includes ETA/progress fields in onProgress when total estimate is provided', async () => {
|
|
108
|
+
const redis = makeFakeRedis(
|
|
109
|
+
{
|
|
110
|
+
k1: 'v1',
|
|
111
|
+
k2: 'v2',
|
|
112
|
+
k3: 'v3',
|
|
113
|
+
k4: 'v4',
|
|
114
|
+
},
|
|
115
|
+
{ getDelayMs: 8 }
|
|
116
|
+
);
|
|
117
|
+
|
|
118
|
+
const events = [];
|
|
119
|
+
const run = await runBulkImport(redis, tmpDbPath(), `bulk-eta-${Date.now()}`, {
|
|
120
|
+
sourceUri: 'redis://fake',
|
|
121
|
+
scan_count: 100,
|
|
122
|
+
batch_keys: 1,
|
|
123
|
+
estimated_total_keys: 4,
|
|
124
|
+
onProgress: (r) => events.push(r),
|
|
125
|
+
});
|
|
126
|
+
|
|
127
|
+
assert.equal(run.status, 'completed');
|
|
128
|
+
assert.ok(events.length >= 2, `expected at least 2 progress events, got ${events.length}`);
|
|
129
|
+
|
|
130
|
+
const withEta = events.filter((e) => e.eta_seconds !== null);
|
|
131
|
+
assert.ok(withEta.length >= 1, 'expected at least one progress event with eta_seconds');
|
|
132
|
+
|
|
133
|
+
for (const e of withEta) {
|
|
134
|
+
assert.equal(e.estimated_total_keys, 4);
|
|
135
|
+
assert.ok(e.progress_pct >= 0 && e.progress_pct <= 100, `invalid progress_pct=${e.progress_pct}`);
|
|
136
|
+
assert.ok(e.keys_per_second > 0, `invalid keys_per_second=${e.keys_per_second}`);
|
|
137
|
+
assert.ok(e.elapsed_seconds > 0, `invalid elapsed_seconds=${e.elapsed_seconds}`);
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
const last = events.at(-1);
|
|
141
|
+
assert.equal(last.progress_pct, 100);
|
|
142
|
+
assert.equal(last.eta_seconds, 0);
|
|
143
|
+
assert.equal(last.remaining_keys_estimate, 0);
|
|
144
|
+
});
|
|
145
|
+
});
|