@coopenomics/parser 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,951 @@
1
+ #!/usr/bin/env node
2
+
3
+ // src/cli/index.ts
4
+ import { Command } from "commander";
5
+
6
+ // src/config/index.ts
7
+ import { readFileSync } from "fs";
8
+ import { parse as parseYaml } from "yaml";
9
+
10
+ // src/errors.ts
11
+ var ConfigValidationError = class extends Error {
12
+ cause;
13
+ constructor(message, cause) {
14
+ super(message);
15
+ this.name = "ConfigValidationError";
16
+ this.cause = cause;
17
+ }
18
+ };
19
+ var ConfigSecurityError = class extends Error {
20
+ constructor(message) {
21
+ super(message);
22
+ this.name = "ConfigSecurityError";
23
+ }
24
+ };
25
+
26
+ // src/config/index.ts
27
+ var PLAIN_SECRET_RE = /redis:\/\/[^$\s]*:[^@$\s]+@/i;
28
+ function interpolateEnv(value) {
29
+ return value.replace(/\$\{([^}]+)\}/g, (_, varName) => {
30
+ return process.env[varName] ?? `\${${varName}}`;
31
+ });
32
+ }
33
+ function interpolateDeep(obj) {
34
+ if (typeof obj === "string") return interpolateEnv(obj);
35
+ if (Array.isArray(obj)) return obj.map(interpolateDeep);
36
+ if (obj !== null && typeof obj === "object") {
37
+ const result = {};
38
+ for (const [k, v] of Object.entries(obj)) {
39
+ result[k] = interpolateDeep(v);
40
+ }
41
+ return result;
42
+ }
43
+ return obj;
44
+ }
45
+ function isObject(v) {
46
+ return v !== null && typeof v === "object" && !Array.isArray(v);
47
+ }
48
+ function validate(raw) {
49
+ const errors = [];
50
+ if (!isObject(raw)) {
51
+ errors.push("(root) must be an object");
52
+ throw new ConfigValidationError(`Config validation failed: ${errors.join("; ")}`);
53
+ }
54
+ if (!isObject(raw["ship"]) || typeof raw["ship"]["url"] !== "string") {
55
+ errors.push("ship.url is required and must be a string");
56
+ }
57
+ if (!isObject(raw["redis"]) || typeof raw["redis"]["url"] !== "string") {
58
+ errors.push("redis.url is required and must be a string");
59
+ }
60
+ const abiFallback = raw["abiFallback"];
61
+ if (abiFallback !== void 0 && abiFallback !== "rpc-current" && abiFallback !== "fail") {
62
+ errors.push('abiFallback must be "rpc-current" or "fail"');
63
+ }
64
+ const deserializer = raw["deserializer"];
65
+ if (deserializer !== void 0 && deserializer !== "wharfkit") {
66
+ errors.push('deserializer must be "wharfkit"');
67
+ }
68
+ if (errors.length > 0) {
69
+ throw new ConfigValidationError(`Config validation failed: ${errors.join("; ")}`);
70
+ }
71
+ return true;
72
+ }
73
+ function checkPlainSecrets(opts) {
74
+ if (PLAIN_SECRET_RE.test(opts.redis.url)) {
75
+ throw new ConfigSecurityError(
76
+ "Secrets must be injected via env variables, not hardcoded in config"
77
+ );
78
+ }
79
+ }
80
+ function parseConfig(raw) {
81
+ const interpolated = interpolateDeep(raw);
82
+ validate(interpolated);
83
+ const opts = interpolated;
84
+ checkPlainSecrets(opts);
85
+ return opts;
86
+ }
87
+ function fromConfigFile(filePath) {
88
+ const text = readFileSync(filePath, "utf8");
89
+ const raw = parseYaml(text);
90
+ return parseConfig(raw);
91
+ }
92
+
93
+ // src/adapters/IoRedisStore.ts
94
+ var { default: RedisClass } = await import("ioredis");
95
+ var PEXPIRE_LUA = `
96
+ local current = redis.call('GET', KEYS[1])
97
+ if current == ARGV[2] then
98
+ redis.call('PEXPIRE', KEYS[1], ARGV[1])
99
+ return 1
100
+ end
101
+ return 0
102
+ `;
103
+ var DEL_LUA = `
104
+ local current = redis.call('GET', KEYS[1])
105
+ if current == ARGV[1] then
106
+ redis.call('DEL', KEYS[1])
107
+ return 1
108
+ end
109
+ return 0
110
+ `;
111
+ function parseStreamEntries(raw) {
112
+ const messages = [];
113
+ for (const [msgId, rawFields] of raw) {
114
+ const fields = {};
115
+ for (let i = 0; i + 1 < rawFields.length; i += 2) {
116
+ fields[rawFields[i] ?? ""] = rawFields[i + 1] ?? "";
117
+ }
118
+ messages.push({ id: msgId, fields });
119
+ }
120
+ return messages;
121
+ }
122
+ function parseXGroupInfo(raw) {
123
+ let obj;
124
+ if (Array.isArray(raw)) {
125
+ obj = {};
126
+ for (let i = 0; i + 1 < raw.length; i += 2) {
127
+ obj[raw[i]] = raw[i + 1];
128
+ }
129
+ } else {
130
+ obj = raw;
131
+ }
132
+ return {
133
+ name: String(obj["name"] ?? ""),
134
+ pending: Number(obj["pending"] ?? 0),
135
+ // Поле 'last-delivered-id' в Redis (с дефисами), не 'lastDeliveredId'
136
+ lastDeliveredId: String(obj["last-delivered-id"] ?? "0-0"),
137
+ // lag появилось в Redis 7.0; null для старых версий
138
+ lag: obj["lag"] != null ? Number(obj["lag"]) : null,
139
+ consumers: Number(obj["consumers"] ?? 0)
140
+ };
141
+ }
142
+ var IoRedisStore = class {
143
+ /** Прямой доступ к ioredis-клиенту (для тестов и расширения). */
144
+ client;
145
+ constructor(opts) {
146
+ const redisOpts = {
147
+ lazyConnect: true,
148
+ // не подключаться в конструкторе — явный connect()
149
+ enableReadyCheck: true
150
+ // проверять готовность перед командами
151
+ };
152
+ if (opts.password !== void 0) redisOpts.password = opts.password;
153
+ if (opts.keyPrefix !== void 0) redisOpts.keyPrefix = opts.keyPrefix;
154
+ this.client = new RedisClass(opts.url, redisOpts);
155
+ }
156
+ /** Явное подключение — вызывается один раз при старте Parser/ParserClient. */
157
+ async connect() {
158
+ await this.client.connect();
159
+ }
160
+ /** XADD stream * field1 val1 … — возвращает присвоенный entry ID. */
161
+ async xadd(stream, fields) {
162
+ const args = [];
163
+ for (const [k, v] of Object.entries(fields)) args.push(k, v);
164
+ const id = await this.client.xadd(stream, "*", ...args);
165
+ return id ?? "";
166
+ }
167
+ /** XTRIM stream MINID minId — удаляет записи с ID < minId. */
168
+ async xtrim(stream, minId) {
169
+ return this.client.xtrim(stream, "MINID", minId);
170
+ }
171
+ /**
172
+ * XGROUP CREATE stream group startId MKSTREAM
173
+ * MKSTREAM: создаёт стрим если не существует.
174
+ * BUSYGROUP: группа уже существует — это нормально, поглощаем ошибку.
175
+ */
176
+ async xgroupCreate(stream, group, startId) {
177
+ try {
178
+ await this.client.xgroup("CREATE", stream, group, startId, "MKSTREAM");
179
+ } catch (err) {
180
+ if (err instanceof Error && err.message.includes("BUSYGROUP")) return;
181
+ throw err;
182
+ }
183
+ }
184
+ /** XGROUP SETID stream group id — переставляет позицию group в стриме. */
185
+ async xgroupSetId(stream, group, id) {
186
+ await this.client.xgroup("SETID", stream, group, id);
187
+ }
188
+ /** XINFO GROUPS stream → список consumer groups с метриками. */
189
+ async xinfoGroups(stream) {
190
+ const raw = await this.client.xinfo("GROUPS", stream);
191
+ return (raw ?? []).map(parseXGroupInfo);
192
+ }
193
+ /**
194
+ * XREADGROUP GROUP group consumer COUNT count BLOCK blockMs STREAMS stream id
195
+ * id='>' — только новые сообщения.
196
+ * id='0' — PEL (pending): уже доставленные, но не подтверждённые (recovery).
197
+ */
198
+ async xreadGroup(stream, group, consumer, count, blockMs, id) {
199
+ const result = await this.client.xreadgroup(
200
+ "GROUP",
201
+ group,
202
+ consumer,
203
+ "COUNT",
204
+ count,
205
+ "BLOCK",
206
+ blockMs,
207
+ "STREAMS",
208
+ stream,
209
+ id
210
+ );
211
+ if (!result) return [];
212
+ const messages = [];
213
+ for (const [, entries] of result) {
214
+ messages.push(...parseStreamEntries(entries));
215
+ }
216
+ return messages;
217
+ }
218
+ /** XRANGE stream start end COUNT count. */
219
+ async xrange(stream, start, end, count) {
220
+ const raw = await this.client.xrange(stream, start, end, "COUNT", count);
221
+ return parseStreamEntries(raw);
222
+ }
223
+ /** XREVRANGE stream end start COUNT count. */
224
+ async xrevrange(stream, end, start, count) {
225
+ const raw = await this.client.xrevrange(stream, end, start, "COUNT", count);
226
+ return parseStreamEntries(raw);
227
+ }
228
+ /** XLEN stream. */
229
+ async xlen(stream) {
230
+ return this.client.xlen(stream);
231
+ }
232
+ /** XDEL stream id — удаляет запись по ID. */
233
+ async xdel(stream, id) {
234
+ return this.client.xdel(stream, id);
235
+ }
236
+ /** XACK stream group id — убирает из PEL. */
237
+ async xack(stream, group, id) {
238
+ await this.client.xack(stream, group, id);
239
+ }
240
+ /** ZADD key score member. */
241
+ async zadd(key, score, member) {
242
+ await this.client.zadd(key, score, member);
243
+ }
244
+ /**
245
+ * ZREVRANGEBYSCORE key max min LIMIT 0 1
246
+ * Возвращает максимум один элемент с score ≤ max.
247
+ * Используется для поиска ABI: «последняя версия не позже блока N».
248
+ */
249
+ async zrangeByscoreRev(key, max, min) {
250
+ return this.client.zrevrangebyscore(key, max, min, "LIMIT", 0, 1);
251
+ }
252
+ /** ZRANGEBYSCORE key min max LIMIT 0 9999999 — все элементы в диапазоне. */
253
+ async zrangeByScore(key, min, max) {
254
+ return this.client.zrangebyscore(key, min, max, "LIMIT", 0, 9999999);
255
+ }
256
+ /** ZCOUNT key min max. */
257
+ async zcount(key, min, max) {
258
+ return this.client.zcount(key, min, max);
259
+ }
260
+ /** ZREMRANGEBYSCORE key min max → число удалённых. */
261
+ async zremRangeByScore(key, min, max) {
262
+ return this.client.zremrangebyscore(key, min, max);
263
+ }
264
+ /** ZCARD key. */
265
+ async zcard(key) {
266
+ return this.client.zcard(key);
267
+ }
268
+ /** HSET key field1 val1 field2 val2 … */
269
+ async hset(key, fields) {
270
+ const args = [];
271
+ for (const [k, v] of Object.entries(fields)) args.push(k, v);
272
+ if (args.length > 0) await this.client.hset(key, ...args);
273
+ }
274
+ /** HGET key field. */
275
+ async hget(key, field) {
276
+ return this.client.hget(key, field);
277
+ }
278
+ /** HGETALL key → пустой объект если ключ не существует (ioredis возвращает null). */
279
+ async hgetAll(key) {
280
+ const result = await this.client.hgetall(key);
281
+ return result ?? {};
282
+ }
283
+ /** HINCRBY key field increment → новое значение счётчика. */
284
+ async hincrby(key, field, increment) {
285
+ return this.client.hincrby(key, field, increment);
286
+ }
287
+ /** HDEL key field. */
288
+ async hdel(key, field) {
289
+ await this.client.hdel(key, field);
290
+ }
291
+ /**
292
+ * SET key value NX PX pxMs
293
+ * NX: только если не существует. PX: TTL в миллисекундах.
294
+ * Используется для захвата distributed lock'а.
295
+ */
296
+ async setNx(key, value, pxMs) {
297
+ const result = await this.client.set(key, value, "NX", "PX", pxMs);
298
+ return result === "OK";
299
+ }
300
+ /**
301
+ * Выполняет PEXPIRE_LUA: продлевает TTL lock'а только если мы — владелец.
302
+ * Возвращает true если продление прошло успешно.
303
+ */
304
+ async pexpire(key, ms, value) {
305
+ const result = await this.client.eval(PEXPIRE_LUA, 1, key, String(ms), value);
306
+ return result === 1;
307
+ }
308
+ /**
309
+ * Выполняет DEL_LUA: удаляет lock только если мы — владелец.
310
+ * Возвращает true если удаление прошло успешно.
311
+ */
312
+ async luaDel(key, value) {
313
+ const result = await this.client.eval(DEL_LUA, 1, key, value);
314
+ return result === 1;
315
+ }
316
+ /** EXPIRE key seconds. */
317
+ async expire(key, seconds) {
318
+ await this.client.expire(key, seconds);
319
+ }
320
+ /**
321
+ * Полный SCAN по паттерну: итерирует cursor пока не вернётся '0'.
322
+ * @param count — подсказка Redis сколько ключей возвращать за итерацию.
323
+ * @returns Полный список ключей (может быть большим для широких паттернов).
324
+ */
325
+ async scan(pattern, count = 100) {
326
+ const keys = [];
327
+ let cursor = "0";
328
+ do {
329
+ const [nextCursor, batch] = await this.client.scan(cursor, "MATCH", pattern, "COUNT", count);
330
+ keys.push(...batch);
331
+ cursor = nextCursor;
332
+ } while (cursor !== "0");
333
+ return keys;
334
+ }
335
+ /** Закрывает соединение с Redis. */
336
+ async quit() {
337
+ await this.client.quit();
338
+ }
339
+ };
340
+
341
+ // src/redis/keys.ts
342
+ var RedisKeys = {
343
+ /**
344
+ * Главный поток событий парсера (unified event stream).
345
+ * Тип Redis: Stream. Тримируется XtrimSupervisor'ом.
346
+ * Пример: ce:parser:eos-mainnet:events
347
+ */
348
+ eventsStream: (chainId) => `ce:parser:${chainId}:events`,
349
+ /**
350
+ * Dead-letter поток для конкретной подписки.
351
+ * Содержит сообщения, которые не смог обработать consumer после N попыток.
352
+ * Тип Redis: Stream.
353
+ * Пример: ce:parser:eos-mainnet:dead:verifier
354
+ */
355
+ deadLetterStream: (chainId, subId) => `ce:parser:${chainId}:dead:${subId}`,
356
+ /**
357
+ * Поток для задания on-demand reparse (зарезервировано для будущего).
358
+ * Тип Redis: Stream.
359
+ */
360
+ reparseStream: (chainId, jobId) => `ce:parser:${chainId}:reparse:${jobId}`,
361
+ /**
362
+ * История версий ABI конкретного контракта.
363
+ * Тип Redis: Sorted Set. Score = block_num, member = base64(rawAbiBytes).
364
+ * При поиске ABI для блока N используется ZREVRANGEBYSCORE … N -inf LIMIT 0 1.
365
+ * Пример: parser:abi:eosio.token
366
+ */
367
+ abiZset: (contract) => `parser:abi:${contract}`,
368
+ /**
369
+ * Контрольная точка синхронизации парсера (crash-recovery).
370
+ * Тип Redis: Hash. Поля: block_num, block_id, last_updated.
371
+ * При рестарте парсер читает отсюда позицию и продолжает с неё.
372
+ */
373
+ syncHash: (chainId) => `parser:sync:${chainId}`,
374
+ /**
375
+ * Реестр всех зарегистрированных подписок.
376
+ * Тип Redis: Hash. Ключ поля = subId, значение = JSON-метаданные подписки.
377
+ */
378
+ subsHash: () => `parser:subs`,
379
+ /**
380
+ * Счётчики ошибок per-event для конкретной подписки.
381
+ * Тип Redis: Hash. Ключ поля = event_id, значение = число провалов.
382
+ * TTL: 24 часа (обновляется при каждом новом провале).
383
+ * Используется FailureTracker для решения о переводе в dead-letter.
384
+ */
385
+ subFailuresHash: (subId) => `parser:sub:${subId}:failures`,
386
+ /**
387
+ * Блокировка single-active-consumer для подписки.
388
+ * Тип Redis: String (instanceId держателя блокировки). TTL: 10 с (автопродление).
389
+ * Только один экземпляр consumer-а может быть active; остальные — standby.
390
+ */
391
+ subLock: (subId) => `parser:sub:${subId}:lock`,
392
+ /**
393
+ * Метаданные задания reparse (зарезервировано для будущего).
394
+ * Тип Redis: Hash.
395
+ */
396
+ reparseJobHash: (jobId) => `parser:reparse:${jobId}`
397
+ };
398
+
399
+ // src/cli/commands/listSubscriptions.ts
400
+ function formatFilters(filters) {
401
+ if (!filters || filters.length === 0) return "*";
402
+ return filters.slice(0, 2).map((f) => {
403
+ const kind = f["kind"] ?? "*";
404
+ if (kind === "action") return `action:${f["account"] ?? "*"}/${f["name"] ?? "*"}`;
405
+ if (kind === "delta") return `delta:${f["code"] ?? "*"}`;
406
+ if (kind === "native-delta") return `native-delta:${f["table"] ?? "*"}`;
407
+ return kind;
408
+ }).join(",");
409
+ }
410
+ async function listSubscriptions(redis, chainId, json) {
411
+ const allSubs = await redis.hgetAll(RedisKeys.subsHash());
412
+ if (Object.keys(allSubs).length === 0) {
413
+ console.log("No subscriptions registered.");
414
+ return;
415
+ }
416
+ const stream = RedisKeys.eventsStream(chainId);
417
+ let groups = [];
418
+ try {
419
+ groups = await redis.xinfoGroups(stream);
420
+ } catch {
421
+ }
422
+ const results = [];
423
+ for (const rawJson of Object.values(allSubs)) {
424
+ let meta;
425
+ try {
426
+ meta = JSON.parse(rawJson);
427
+ } catch {
428
+ continue;
429
+ }
430
+ const group = groups.find((g) => g.name === meta.subId);
431
+ results.push({
432
+ subId: meta.subId,
433
+ filters: meta.filters,
434
+ startFrom: String(meta.startFrom),
435
+ registeredAt: meta.registeredAt ?? "",
436
+ pending: group !== void 0 ? group.pending : null,
437
+ lag: group !== void 0 ? group.lag : null,
438
+ lastDeliveredId: group?.lastDeliveredId ?? "not started"
439
+ });
440
+ }
441
+ if (json) {
442
+ console.log(JSON.stringify(results, null, 2));
443
+ return;
444
+ }
445
+ const cols = {
446
+ subId: 16,
447
+ filters: 25,
448
+ pending: 9,
449
+ lag: 6,
450
+ lastDelivered: 21,
451
+ startFrom: 10
452
+ };
453
+ const header = "SUB ID".padEnd(cols.subId) + "FILTERS".padEnd(cols.filters) + "PENDING".padEnd(cols.pending) + "LAG".padEnd(cols.lag) + "LAST DELIVERED".padEnd(cols.lastDelivered) + "START FROM";
454
+ console.log(header);
455
+ console.log("-".repeat(header.length));
456
+ for (const sub of results) {
457
+ const row = sub.subId.slice(0, cols.subId - 1).padEnd(cols.subId) + formatFilters(sub.filters).slice(0, cols.filters - 1).padEnd(cols.filters) + String(sub.pending ?? "-").padEnd(cols.pending) + String(sub.lag ?? "-").padEnd(cols.lag) + sub.lastDeliveredId.slice(0, cols.lastDelivered - 1).padEnd(cols.lastDelivered) + sub.startFrom;
458
+ console.log(row);
459
+ }
460
+ }
461
+
462
+ // src/cli/commands/resetSubscription.ts
463
+ async function findSetidForBlock(redis, stream, targetBlock) {
464
+ let cursor = "-";
465
+ let lastBeforeTarget = "0-0";
466
+ while (true) {
467
+ const entries = await redis.xrange(stream, cursor, "+", 100);
468
+ if (entries.length === 0) break;
469
+ for (const entry of entries) {
470
+ let blockNum;
471
+ try {
472
+ const event = JSON.parse(entry.fields["data"] ?? "{}");
473
+ if (event.block_num !== void 0) blockNum = Number(event.block_num);
474
+ } catch {
475
+ }
476
+ if (blockNum !== void 0) {
477
+ if (blockNum < targetBlock) {
478
+ lastBeforeTarget = entry.id;
479
+ } else {
480
+ return lastBeforeTarget;
481
+ }
482
+ }
483
+ }
484
+ if (entries.length < 100) break;
485
+ const lastId = entries[entries.length - 1].id;
486
+ cursor = "(" + lastId;
487
+ }
488
+ return lastBeforeTarget;
489
+ }
490
+ async function resetSubscription(redis, chainId, subId, toBlock, dryRun) {
491
+ const stream = RedisKeys.eventsStream(chainId);
492
+ const groupName = subId;
493
+ let groups = [];
494
+ try {
495
+ groups = await redis.xinfoGroups(stream);
496
+ } catch {
497
+ throw new Error(`Subscription ${subId} has no active consumer group. Start the consumer first.`);
498
+ }
499
+ const group = groups.find((g) => g.name === groupName);
500
+ if (!group) {
501
+ throw new Error(`Subscription ${subId} has no active consumer group. Start the consumer first.`);
502
+ }
503
+ const pelCount = group.pending;
504
+ let targetId;
505
+ if (toBlock === "0" || toBlock === "latest" || toBlock === "$") {
506
+ targetId = "$";
507
+ } else {
508
+ const blockNum = Number(toBlock);
509
+ if (isNaN(blockNum) || blockNum < 0) {
510
+ throw new Error(`Invalid --to-block value: ${toBlock}. Use a block number, 0, or "latest".`);
511
+ }
512
+ const firstEntries = await redis.xrange(stream, "-", "+", 1);
513
+ if (firstEntries.length > 0) {
514
+ let earliestBlock;
515
+ try {
516
+ const event = JSON.parse(firstEntries[0].fields["data"] ?? "{}");
517
+ if (event.block_num !== void 0) earliestBlock = Number(event.block_num);
518
+ } catch {
519
+ }
520
+ if (earliestBlock !== void 0 && blockNum < earliestBlock) {
521
+ throw new Error(
522
+ `Block ${blockNum} is before earliest available block ${earliestBlock} (stream trimmed). Cannot reset to trimmed range.`
523
+ );
524
+ }
525
+ }
526
+ targetId = await findSetidForBlock(redis, stream, blockNum);
527
+ }
528
+ if (dryRun) {
529
+ console.log(`[dry-run] Would execute: XGROUP SETID ${stream} ${groupName} ${targetId}`);
530
+ if (pelCount > 0) {
531
+ console.log(`Warning: PEL has ${pelCount} pending messages. They will be re-delivered on next consumer start.`);
532
+ }
533
+ return;
534
+ }
535
+ await redis.xgroupSetId(stream, groupName, targetId);
536
+ console.log(`Reset subscription ${subId} to entry ${targetId}.`);
537
+ if (pelCount > 0) {
538
+ console.log(`Warning: PEL has ${pelCount} pending messages. They will be re-delivered on next consumer start.`);
539
+ }
540
+ }
541
+
542
+ // src/cli/commands/abiPrune.ts
543
+ async function pruneContract(redis, contract, olderThan, dryRun) {
544
+ const key = RedisKeys.abiZset(contract);
545
+ const total = await redis.zcard(key);
546
+ if (total === 0) {
547
+ return { pruned: 0, remaining: 0, oldestScore: null, newestScore: null };
548
+ }
549
+ const candidateCount = await redis.zcount(key, "-inf", `(${olderThan}`);
550
+ const remaining = total - candidateCount;
551
+ if (remaining < 1 && candidateCount > 0) {
552
+ throw new Error(`Cannot prune all ABI versions for ${contract} \u2014 at least one must remain`);
553
+ }
554
+ if (!dryRun && candidateCount > 0) {
555
+ await redis.zremRangeByScore(key, "-inf", `(${olderThan}`);
556
+ }
557
+ return { pruned: dryRun ? 0 : candidateCount, remaining, oldestScore: null, newestScore: null };
558
+ }
559
+ async function abiPrune(redis, contract, olderThan, dryRun, allContracts) {
560
+ if (!allContracts && !contract) {
561
+ throw new Error("Specify --contract or --all-contracts");
562
+ }
563
+ if (allContracts) {
564
+ const keys = await redis.scan("parser:abi:*");
565
+ if (keys.length === 0) {
566
+ console.log("No ABI history found.");
567
+ return;
568
+ }
569
+ const rows = [];
570
+ for (const key2 of keys) {
571
+ const name = key2.replace(/^parser:abi:/, "");
572
+ try {
573
+ const result = await pruneContract(redis, name, olderThan, dryRun);
574
+ rows.push({ contract: name, pruned: dryRun ? result.remaining : result.pruned, remaining: result.remaining });
575
+ } catch (err) {
576
+ const msg = err instanceof Error ? err.message : String(err);
577
+ console.error(` ${name}: ${msg}`);
578
+ }
579
+ }
580
+ if (dryRun) {
581
+ console.log("CONTRACT".padEnd(30) + "WOULD PRUNE".padEnd(14) + "REMAINING");
582
+ console.log("-".repeat(54));
583
+ for (const row of rows) {
584
+ const candidateCount2 = row.pruned;
585
+ console.log(row.contract.padEnd(30) + String(candidateCount2).padEnd(14) + row.remaining);
586
+ }
587
+ }
588
+ return;
589
+ }
590
+ const key = RedisKeys.abiZset(contract);
591
+ const total = await redis.zcard(key);
592
+ if (total === 0) {
593
+ console.log(`No ABI history found for contract ${contract}.`);
594
+ return;
595
+ }
596
+ const candidateCount = await redis.zcount(key, "-inf", `(${olderThan}`);
597
+ const remaining = total - candidateCount;
598
+ if (remaining < 1 && candidateCount > 0) {
599
+ console.error(`Cannot prune all ABI versions \u2014 at least one must remain`);
600
+ process.exitCode = 1;
601
+ return;
602
+ }
603
+ if (dryRun) {
604
+ console.log(`[dry-run] Would prune ${candidateCount} ABI version(s) for ${contract}. ${remaining} version(s) would remain.`);
605
+ return;
606
+ }
607
+ const pruned = candidateCount > 0 ? await redis.zremRangeByScore(key, "-inf", `(${olderThan}`) : 0;
608
+ const newTotal = await redis.zcard(key);
609
+ if (pruned === 0) {
610
+ console.log(`Pruned 0 ABI versions for ${contract}. ${newTotal} version(s) remain.`);
611
+ return;
612
+ }
613
+ console.log(`Pruned ${pruned} ABI version(s) for ${contract}. ${newTotal} version(s) remain.`);
614
+ }
615
+
616
+ // src/cli/commands/listDeadLetters.ts
617
+ function entryIdToTimestamp(entryId) {
618
+ const ms = parseInt(entryId.split("-")[0] ?? "0", 10);
619
+ return new Date(ms).toISOString();
620
+ }
621
+ function truncate(s, max) {
622
+ return s.length > max ? s.slice(0, max - 1) + "\u2026" : s;
623
+ }
624
+ async function readDeadLetters(redis, stream, limit, fromEntry) {
625
+ const raw = await redis.xrange(stream, fromEntry, "+", limit);
626
+ return raw.map((msg) => {
627
+ let eventId = "";
628
+ let kind = "";
629
+ let originalPayload = null;
630
+ const dataStr = msg.fields["data"];
631
+ if (dataStr) {
632
+ try {
633
+ const parsed = JSON.parse(dataStr);
634
+ eventId = typeof parsed["event_id"] === "string" ? parsed["event_id"] : "";
635
+ kind = typeof parsed["kind"] === "string" ? parsed["kind"] : "";
636
+ originalPayload = parsed;
637
+ } catch {
638
+ }
639
+ }
640
+ return {
641
+ entryId: msg.id,
642
+ eventId,
643
+ kind,
644
+ failureCount: parseInt(msg.fields["failureCount"] ?? "0", 10),
645
+ lastError: msg.fields["lastError"] ?? "",
646
+ deadLetteredAt: entryIdToTimestamp(msg.id),
647
+ originalPayload
648
+ };
649
+ });
650
+ }
651
+ async function listDeadLetters(redis, chainId, subId, json, limit, fromEntry, all) {
652
+ let streams = [];
653
+ if (all) {
654
+ const pattern = RedisKeys.deadLetterStream(chainId, "*");
655
+ const keys = await redis.scan(pattern);
656
+ const prefix = `ce:parser:${chainId}:dead:`;
657
+ streams = keys.map((k) => ({ key: k, subId: k.slice(prefix.length) }));
658
+ if (streams.length === 0) {
659
+ console.log("No dead-letter streams found.");
660
+ return;
661
+ }
662
+ } else {
663
+ if (!subId) throw new Error("--sub-id is required unless --all is specified");
664
+ streams = [{ key: RedisKeys.deadLetterStream(chainId, subId), subId }];
665
+ }
666
+ if (json) {
667
+ const allEntries = [];
668
+ for (const { key, subId: sid } of streams) {
669
+ const entries = await readDeadLetters(redis, key, limit, fromEntry);
670
+ allEntries.push(...entries.map((e) => ({ ...e, subId: sid })));
671
+ }
672
+ console.log(JSON.stringify(allEntries, null, 2));
673
+ return;
674
+ }
675
+ for (const { key, subId: sid } of streams) {
676
+ const total = await redis.xlen(key);
677
+ console.log(`Dead letters for ${sid}: ${total} total`);
678
+ if (total === 0) {
679
+ console.log(`No dead letters for subscription ${sid}.`);
680
+ continue;
681
+ }
682
+ const entries = await readDeadLetters(redis, key, limit, fromEntry);
683
+ if (entries.length === 0) {
684
+ console.log(`No dead letters for subscription ${sid}.`);
685
+ continue;
686
+ }
687
+ const cols = {
688
+ entryId: 22,
689
+ eventId: 50,
690
+ kind: 14,
691
+ failCount: 6,
692
+ lastError: 40
693
+ };
694
+ if (all) {
695
+ const subCol = 12;
696
+ console.log(
697
+ "SUB ID".padEnd(subCol) + "ENTRY ID".padEnd(cols.entryId) + "EVENT ID".padEnd(cols.eventId) + "KIND".padEnd(cols.kind) + "FAIL#".padEnd(cols.failCount) + "LAST ERROR"
698
+ );
699
+ for (const e of entries) {
700
+ console.log(
701
+ truncate(sid, subCol).padEnd(subCol) + truncate(e.entryId, cols.entryId).padEnd(cols.entryId) + truncate(e.eventId, cols.eventId).padEnd(cols.eventId) + truncate(e.kind, cols.kind).padEnd(cols.kind) + String(e.failureCount).padEnd(cols.failCount) + truncate(e.lastError, cols.lastError)
702
+ );
703
+ }
704
+ } else {
705
+ console.log(
706
+ "ENTRY ID".padEnd(cols.entryId) + "EVENT ID".padEnd(cols.eventId) + "KIND".padEnd(cols.kind) + "FAIL#".padEnd(cols.failCount) + "LAST ERROR"
707
+ );
708
+ for (const e of entries) {
709
+ console.log(
710
+ truncate(e.entryId, cols.entryId).padEnd(cols.entryId) + truncate(e.eventId, cols.eventId).padEnd(cols.eventId) + truncate(e.kind, cols.kind).padEnd(cols.kind) + String(e.failureCount).padEnd(cols.failCount) + truncate(e.lastError, cols.lastError)
711
+ );
712
+ }
713
+ }
714
+ }
715
+ }
716
+
717
+ // src/cli/commands/replayDeadLetter.ts
718
+ async function findByEventId(redis, stream, eventId) {
719
+ let cursor = "-";
720
+ while (true) {
721
+ const batch = await redis.xrange(stream, cursor, "+", 100);
722
+ if (batch.length === 0) return null;
723
+ for (const msg of batch) {
724
+ const dataStr = msg.fields["data"];
725
+ if (dataStr) {
726
+ try {
727
+ const parsed = JSON.parse(dataStr);
728
+ if (parsed["event_id"] === eventId) return msg;
729
+ } catch {
730
+ }
731
+ }
732
+ }
733
+ const last = batch[batch.length - 1];
734
+ if (!last || batch.length < 100) return null;
735
+ cursor = "(" + last.id;
736
+ }
737
+ }
738
+ async function replaySingle(redis, liveStream, deadStream, subId, msg, dryRun) {
739
+ const dataStr = msg.fields["data"];
740
+ if (!dataStr) return null;
741
+ let eventId = "";
742
+ try {
743
+ const parsed = JSON.parse(dataStr);
744
+ eventId = typeof parsed["event_id"] === "string" ? parsed["event_id"] : "";
745
+ } catch {
746
+ }
747
+ if (dryRun) {
748
+ console.log(`[dry-run] Would replay event ${eventId || msg.id} \u2192 live stream, delete from dead-letter.`);
749
+ return null;
750
+ }
751
+ const newId = await redis.xadd(liveStream, { data: dataStr });
752
+ await redis.xdel(deadStream, msg.id);
753
+ if (eventId) {
754
+ await redis.hdel(RedisKeys.subFailuresHash(subId), eventId);
755
+ }
756
+ return newId;
757
+ }
758
+ async function replayDeadLetter(redis, chainId, subId, eventId, all, dryRun) {
759
+ const liveStream = RedisKeys.eventsStream(chainId);
760
+ const deadStream = RedisKeys.deadLetterStream(chainId, subId);
761
+ if (all) {
762
+ let cursor = "-";
763
+ let replayed = 0;
764
+ while (true) {
765
+ const batch = await redis.xrange(deadStream, cursor, "+", 100);
766
+ if (batch.length === 0) break;
767
+ for (const msg2 of batch) {
768
+ const newId2 = await replaySingle(redis, liveStream, deadStream, subId, msg2, dryRun);
769
+ if (newId2 !== null) replayed++;
770
+ }
771
+ if (dryRun) {
772
+ replayed = batch.length;
773
+ break;
774
+ }
775
+ if (batch.length < 100) break;
776
+ cursor = "-";
777
+ if (replayed > 0) break;
778
+ }
779
+ if (dryRun) {
780
+ console.log(`[dry-run] Would replay ${replayed} events from dead-letter stream for ${subId}.`);
781
+ } else {
782
+ console.log(`Replayed ${replayed} events, removed ${replayed} from dead-letter.`);
783
+ }
784
+ return;
785
+ }
786
+ if (!eventId) throw new Error("--event-id is required unless --all is specified");
787
+ const msg = await findByEventId(redis, deadStream, eventId);
788
+ if (!msg) {
789
+ console.error(`Event not found in dead-letter stream for subscription ${subId}.`);
790
+ process.exit(1);
791
+ }
792
+ const newId = await replaySingle(redis, liveStream, deadStream, subId, msg, dryRun);
793
+ if (newId !== null) {
794
+ console.log(`Replayed event ${eventId} \u2192 new entry id ${newId}. Removed from dead-letter.`);
795
+ }
796
+ }
797
+
798
+ // src/cli/index.ts
799
+ var program = new Command();
800
+ program.name("parser").description("@coopenomics/parser \u2014 universal EOSIO/Antelope blockchain indexer").version("0.1.0");
801
+ program.command("validate <config-file>").description("Validate config file without starting the parser").option("--json", "Output result as JSON").action((configFile, opts) => {
802
+ try {
803
+ const config = fromConfigFile(configFile);
804
+ const redacted = {
805
+ ship: { url: redactUrl(config.ship.url) },
806
+ chain: config.chain ? { url: config.chain.url ? redactUrl(config.chain.url) : void 0, id: config.chain.id } : void 0,
807
+ redis: { url: redactUrl(config.redis.url) }
808
+ };
809
+ if (opts.json) {
810
+ console.log(JSON.stringify({ valid: true, config: redacted }));
811
+ } else {
812
+ console.log("\u2713 Config valid");
813
+ console.log(JSON.stringify(redacted, null, 2));
814
+ }
815
+ process.exit(0);
816
+ } catch (err) {
817
+ if (err instanceof ConfigSecurityError) {
818
+ if (opts.json) {
819
+ console.error(JSON.stringify({ valid: false, errors: [`SECURITY: ${err.message}`] }));
820
+ } else {
821
+ console.error(`SECURITY: secrets must not be hardcoded`);
822
+ console.error(err.message);
823
+ }
824
+ process.exit(2);
825
+ }
826
+ if (err instanceof ConfigValidationError) {
827
+ const message = err.message;
828
+ if (opts.json) {
829
+ console.error(JSON.stringify({ valid: false, errors: [message] }));
830
+ } else {
831
+ console.error("\u2717 Config invalid:");
832
+ console.error(message);
833
+ }
834
+ process.exit(1);
835
+ }
836
+ if (opts.json) {
837
+ console.error(JSON.stringify({ valid: false, errors: [String(err)] }));
838
+ } else {
839
+ console.error("\u2717 Config invalid:");
840
+ console.error(String(err));
841
+ }
842
+ process.exit(1);
843
+ }
844
+ });
845
+ program.command("list-subscriptions").description("List registered subscriptions with consumer group stats").requiredOption("--config <file>", "Config file path").option("--chain-id <id>", "Override chain ID from config").option("--json", "Output as JSON").action(async (opts) => {
846
+ let redis = null;
847
+ try {
848
+ const config = fromConfigFile(opts.config);
849
+ const chainId = opts.chainId ?? config.chain?.id ?? "default";
850
+ redis = new IoRedisStore(config.redis);
851
+ await redis.connect();
852
+ await listSubscriptions(redis, chainId, opts.json ?? false);
853
+ process.exit(0);
854
+ } catch (err) {
855
+ console.error(err instanceof Error ? err.message : String(err));
856
+ process.exit(1);
857
+ } finally {
858
+ await redis?.quit();
859
+ }
860
+ });
861
+ program.command("reset-subscription").description("Rewind a subscription consumer group to a specific block").requiredOption("--config <file>", "Config file path").requiredOption("--sub-id <id>", "Subscription ID to reset").requiredOption("--to-block <n>", 'Target block number (0 or "latest" = skip to end)').option("--chain-id <id>", "Override chain ID from config").option("--dry-run", "Show what would be done without executing").action(async (opts) => {
862
+ let redis = null;
863
+ try {
864
+ const config = fromConfigFile(opts.config);
865
+ const chainId = opts.chainId ?? config.chain?.id ?? "default";
866
+ redis = new IoRedisStore(config.redis);
867
+ await redis.connect();
868
+ await resetSubscription(redis, chainId, opts.subId, opts.toBlock, opts.dryRun ?? false);
869
+ process.exit(0);
870
+ } catch (err) {
871
+ console.error(err instanceof Error ? err.message : String(err));
872
+ process.exit(1);
873
+ } finally {
874
+ await redis?.quit();
875
+ }
876
+ });
877
+ program.command("abi-prune").description("Prune old ABI versions from a contract ZSET").requiredOption("--config <file>", "Config file path").option("--contract <name>", "Contract name to prune").option("--older-than <block>", "Remove versions older than this block number").option("--dry-run", "Show what would be done without executing").option("--all-contracts", "Apply prune to all contracts with ABI history").action(async (opts) => {
878
+ let redis = null;
879
+ try {
880
+ const config = fromConfigFile(opts.config);
881
+ redis = new IoRedisStore(config.redis);
882
+ await redis.connect();
883
+ const olderThan = opts.olderThan !== void 0 ? Number(opts.olderThan) : 0;
884
+ if (isNaN(olderThan)) throw new Error(`Invalid --older-than value: ${opts.olderThan}`);
885
+ await abiPrune(redis, opts.contract ?? null, olderThan, opts.dryRun ?? false, opts.allContracts ?? false);
886
+ process.exit(0);
887
+ } catch (err) {
888
+ console.error(err instanceof Error ? err.message : String(err));
889
+ process.exit(1);
890
+ } finally {
891
+ await redis?.quit();
892
+ }
893
+ });
894
+ program.command("list-dead-letters").description("Inspect dead-letter stream for a subscription").requiredOption("--config <file>", "Config file path").option("--sub-id <id>", "Subscription ID to inspect").option("--chain-id <id>", "Override chain ID from config").option("--all", "Inspect all dead-letter streams").option("--json", "Output as JSON").option("--limit <n>", "Max entries to show", "100").option("--from <entryId>", "Start from this entry ID (pagination)", "-").action(async (opts) => {
895
+ let redis = null;
896
+ try {
897
+ const config = fromConfigFile(opts.config);
898
+ const chainId = opts.chainId ?? config.chain?.id ?? "default";
899
+ redis = new IoRedisStore(config.redis);
900
+ await redis.connect();
901
+ await listDeadLetters(
902
+ redis,
903
+ chainId,
904
+ opts.subId ?? null,
905
+ opts.json ?? false,
906
+ Number(opts.limit ?? 100),
907
+ opts.from ?? "-",
908
+ opts.all ?? false
909
+ );
910
+ process.exit(0);
911
+ } catch (err) {
912
+ console.error(err instanceof Error ? err.message : String(err));
913
+ process.exit(1);
914
+ } finally {
915
+ await redis?.quit();
916
+ }
917
+ });
918
+ program.command("replay-dead-letter").description("Replay a dead-letter event back into the live stream").requiredOption("--config <file>", "Config file path").requiredOption("--sub-id <id>", "Subscription ID").option("--event-id <id>", "Event ID to replay").option("--all", "Replay all dead-letter events for the subscription").option("--chain-id <id>", "Override chain ID from config").option("--dry-run", "Show what would be done without executing").action(async (opts) => {
919
+ let redis = null;
920
+ try {
921
+ const config = fromConfigFile(opts.config);
922
+ const chainId = opts.chainId ?? config.chain?.id ?? "default";
923
+ redis = new IoRedisStore(config.redis);
924
+ await redis.connect();
925
+ await replayDeadLetter(
926
+ redis,
927
+ chainId,
928
+ opts.subId,
929
+ opts.eventId ?? null,
930
+ opts.all ?? false,
931
+ opts.dryRun ?? false
932
+ );
933
+ process.exit(0);
934
+ } catch (err) {
935
+ console.error(err instanceof Error ? err.message : String(err));
936
+ process.exit(1);
937
+ } finally {
938
+ await redis?.quit();
939
+ }
940
+ });
941
+ function redactUrl(url) {
942
+ try {
943
+ const u = new URL(url);
944
+ if (u.password) u.password = "***";
945
+ return u.toString();
946
+ } catch {
947
+ return url;
948
+ }
949
+ }
950
+ program.parse(process.argv);
951
+ //# sourceMappingURL=index.js.map