effective-indexer 0.2.1 → 0.2.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -2,6 +2,10 @@
2
2
 
3
3
  Lightweight EVM smart contract event indexer built with [Effect](https://effect.website).
4
4
 
5
+ Index EVM events to your own database in minutes — no hosted lock-in, no PhD required.
6
+
7
+ Repository: [github.com/cybervoid0/effective-indexer](https://github.com/cybervoid0/effective-indexer)
8
+
5
9
  Indexes smart contract events into SQLite with:
6
10
  - Historical backfill (`eth_getLogs` in chunks)
7
11
  - Live polling for new blocks
@@ -93,6 +97,7 @@ Returns `IndexerHandle`:
93
97
  | `dbPath` | `string` | `"./indexer.db"` | SQLite database path |
94
98
  | `contracts` | `ContractConfig[]` | — | Contracts to index |
95
99
  | `network` | `NetworkConfig` | see below | Network tuning |
100
+ | `telemetry` | `TelemetryConfig` | see below | Backfill progress settings |
96
101
  | `logLevel` | `string` | `"info"` | Minimum log level |
97
102
  | `logFormat` | `string` | `"pretty"` | Log output format |
98
103
  | `enableTelemetry` | `boolean` | `true` | Set `false` for errors-only |
@@ -102,25 +107,196 @@ Returns `IndexerHandle`:
102
107
  ```ts
103
108
  {
104
109
  polling: {
105
- intervalMs: 12000, // block polling interval
106
- confirmations: 1, // blocks behind head to consider confirmed
110
+ intervalMs: 12000, // block polling interval
111
+ confirmations: 1, // blocks behind head to consider confirmed
107
112
  },
108
113
  logs: {
109
- chunkSize: 5000, // blocks per eth_getLogs request
110
- maxRetries: 5, // retry count on RPC failure
114
+ chunkSize: 5000, // blocks per eth_getLogs request
115
+ maxRetries: 5, // retry count on RPC failure
116
+ parallelRequests: 1, // concurrent eth_getLogs requests during backfill
111
117
  retry: {
112
- baseDelayMs: 1000, // initial retry delay
113
- maxDelayMs: 30000, // cap for exponential backoff
118
+ baseDelayMs: 1000, // initial retry delay
119
+ maxDelayMs: 30000, // cap for exponential backoff
114
120
  },
115
121
  },
116
122
  reorg: {
117
- depth: 20, // block hash buffer depth for reorg detection
123
+ depth: 20, // block hash buffer depth for reorg detection
118
124
  },
119
125
  }
120
126
  ```
121
127
 
122
128
  All fields are optional — defaults are shown above.
123
129
 
130
+ ### `TelemetryConfig`
131
+
132
+ ```ts
133
+ {
134
+ telemetry: {
135
+ progress: {
136
+ enabled: true, // show backfill progress in terminal
137
+ intervalMs: 3000, // progress update frequency (ms, minimum 500)
138
+ },
139
+ },
140
+ }
141
+ ```
142
+
143
+ `enableTelemetry: false` disables progress rendering and keeps error-level logs only.
144
+
145
+ When enabled, the indexer displays a live progress line during backfill:
146
+
147
+ ```
148
+ [Backfill] Token 42.8% | 1,234,000/2,880,000 blocks | 3,450 blk/s | 12.4 ev/s | ETA 00:07:43 | p=3 | chunk=5000
149
+ ```
150
+
151
+ On non-TTY environments, periodic info logs are emitted instead. A final summary is logged when backfill completes:
152
+
153
+ ```
154
+ [Backfill complete] Token: 2,880,000 blocks | 45,230 events | 312 chunks | 00:13:54 (3,453 blk/s, 54.2 ev/s) | p=3 | chunkSize=5000
155
+ ```
156
+
157
+ ## Worker Setup (Recommended)
158
+
159
+ Run the indexer as a dedicated long-lived worker process (not in request handlers).
160
+
161
+ Create `scripts/indexer.ts`:
162
+
163
+ ```ts
164
+ import { Indexer } from "effective-indexer"
165
+ import type { Abi } from "viem"
166
+
167
+ const transferAbi: Abi = [
168
+ {
169
+ type: "event",
170
+ name: "Transfer",
171
+ inputs: [
172
+ { indexed: true, name: "from", type: "address" },
173
+ { indexed: true, name: "to", type: "address" },
174
+ { indexed: false, name: "value", type: "uint256" },
175
+ ],
176
+ },
177
+ ]
178
+
179
+ const indexer = Indexer.create({
180
+ rpcUrl: process.env.EVM_RPC_URL!,
181
+ dbPath: process.env.INDEXER_DB_PATH ?? "./data/events.db",
182
+ contracts: [
183
+ {
184
+ name: process.env.INDEXER_CONTRACT_NAME ?? "Token",
185
+ address: process.env.INDEXER_CONTRACT_ADDRESS!,
186
+ abi: transferAbi,
187
+ events: ["Transfer"],
188
+ startBlock: BigInt(process.env.INDEXER_START_BLOCK ?? "0"),
189
+ },
190
+ ],
191
+ network: {
192
+ polling: {
193
+ intervalMs: Number(process.env.INDEXER_POLL_INTERVAL_MS ?? "12000"),
194
+ confirmations: Number(process.env.INDEXER_CONFIRMATIONS ?? "1"),
195
+ },
196
+ logs: {
197
+ chunkSize: Number(process.env.INDEXER_CHUNK_SIZE ?? "2000"),
198
+ parallelRequests: Number(process.env.INDEXER_PARALLEL_REQUESTS ?? "1"),
199
+ maxRetries: Number(process.env.INDEXER_MAX_RETRIES ?? "5"),
200
+ retry: {
201
+ baseDelayMs: Number(process.env.INDEXER_RETRY_BASE_MS ?? "1000"),
202
+ maxDelayMs: Number(process.env.INDEXER_RETRY_MAX_MS ?? "30000"),
203
+ },
204
+ },
205
+ reorg: {
206
+ depth: Number(process.env.INDEXER_REORG_DEPTH ?? "20"),
207
+ },
208
+ },
209
+ telemetry: {
210
+ progress: {
211
+ enabled: process.env.INDEXER_PROGRESS_ENABLED !== "false",
212
+ intervalMs: Number(process.env.INDEXER_PROGRESS_INTERVAL_MS ?? "3000"),
213
+ },
214
+ },
215
+ logLevel: (process.env.INDEXER_LOG_LEVEL ?? "info") as
216
+ | "trace"
217
+ | "debug"
218
+ | "info"
219
+ | "warning"
220
+ | "error"
221
+ | "none",
222
+ logFormat: (process.env.INDEXER_LOG_FORMAT ?? "pretty") as
223
+ | "pretty"
224
+ | "json"
225
+ | "structured",
226
+ enableTelemetry: process.env.INDEXER_TELEMETRY !== "false",
227
+ })
228
+
229
+ const start = async (): Promise<void> => {
230
+ await indexer.start()
231
+ console.log("Indexer worker started")
232
+
233
+ // Keep the process alive while indexing in background.
234
+ const keepAlive = setInterval(() => undefined, 60_000)
235
+
236
+ const stop = async (): Promise<void> => {
237
+ clearInterval(keepAlive)
238
+ await indexer.stop()
239
+ process.exit(0)
240
+ }
241
+
242
+ process.on("SIGINT", () => {
243
+ void stop()
244
+ })
245
+ process.on("SIGTERM", () => {
246
+ void stop()
247
+ })
248
+ }
249
+
250
+ start().catch(error => {
251
+ console.error("Indexer worker failed:", error)
252
+ process.exit(1)
253
+ })
254
+ ```
255
+
256
+ Create `.env`:
257
+
258
+ ```bash
259
+ EVM_RPC_URL=https://your-rpc-url
260
+ INDEXER_DB_PATH=./data/events.db
261
+ INDEXER_CONTRACT_NAME=Token
262
+ INDEXER_CONTRACT_ADDRESS=0xYourContractAddress
263
+ INDEXER_START_BLOCK=0
264
+ INDEXER_POLL_INTERVAL_MS=12000
265
+ INDEXER_CONFIRMATIONS=1
266
+ INDEXER_CHUNK_SIZE=2000
267
+ INDEXER_PARALLEL_REQUESTS=1
268
+ INDEXER_MAX_RETRIES=5
269
+ INDEXER_RETRY_BASE_MS=1000
270
+ INDEXER_RETRY_MAX_MS=30000
271
+ INDEXER_REORG_DEPTH=20
272
+ INDEXER_PROGRESS_ENABLED=true
273
+ INDEXER_PROGRESS_INTERVAL_MS=3000
274
+ INDEXER_LOG_LEVEL=info
275
+ INDEXER_LOG_FORMAT=pretty
276
+ INDEXER_TELEMETRY=true
277
+ ```
278
+
279
+ Add scripts (with `tsx` installed):
280
+
281
+ ```bash
282
+ npm install -D tsx
283
+ ```
284
+
285
+ ```json
286
+ {
287
+ "scripts": {
288
+ "indexer:start": "node --import tsx ./scripts/indexer.ts",
289
+ "indexer:debug": "INDEXER_LOG_LEVEL=debug node --import tsx ./scripts/indexer.ts"
290
+ }
291
+ }
292
+ ```
293
+
294
+ Run:
295
+
296
+ ```bash
297
+ npm run indexer:start
298
+ ```
299
+
124
300
  ### Network Tuning Profiles
125
301
 
126
302
  | Chain | `polling.intervalMs` | `polling.confirmations` | `logs.chunkSize` | `reorg.depth` |
@@ -167,6 +343,35 @@ The indexer uses Effect's native logging system. All log output is controlled vi
167
343
  - On restart, the indexer resumes from checkpoint and backfills missed blocks.
168
344
  - If RPC does not support `eth_getLogs`, indexing cannot work.
169
345
 
346
+ ## Parallel Backfill
347
+
348
+ Set `network.logs.parallelRequests` to speed up historical backfill by issuing multiple `eth_getLogs` requests concurrently. Chunk ordering is preserved regardless of concurrency.
349
+
350
+ ```ts
351
+ const indexer = Indexer.create({
352
+ rpcUrl: "https://eth.llamarpc.com",
353
+ contracts: [/* ... */],
354
+ network: {
355
+ logs: {
356
+ chunkSize: 2000,
357
+ parallelRequests: 4,
358
+ },
359
+ },
360
+ })
361
+ ```
362
+
363
+ **Recommended values**: Start with `parallelRequests: 3` and increase if the RPC allows. Public endpoints may rate-limit above 5-10 concurrent requests.
364
+
365
+ ### Benchmarking
366
+
367
+ To measure the effect of parallelism:
368
+
369
+ 1. Use a fixed RPC endpoint and contract/block range
370
+ 2. Start with an empty database each run
371
+ 3. Compare `parallelRequests` values 1, 2, 3, 4
372
+ 4. Run 3 times each and take the median
373
+ 5. Use the progress summary line for timing: `[Backfill complete] ... blk/s`
374
+
170
375
  ## Development
171
376
 
172
377
  ```bash
package/dist/index.cjs CHANGED
@@ -6,6 +6,20 @@ var viem = require('viem');
6
6
  var sql = require('@effect/sql');
7
7
 
8
8
  // src/index.ts
9
+ var RpcError = class extends effect.Data.TaggedError("RpcError") {
10
+ };
11
+ var DecodeError = class extends effect.Data.TaggedError("DecodeError") {
12
+ };
13
+ var StorageError = class extends effect.Data.TaggedError("StorageError") {
14
+ };
15
+ var ReorgDetected = class extends effect.Data.TaggedError("ReorgDetected") {
16
+ };
17
+ var CheckpointError = class extends effect.Data.TaggedError("CheckpointError") {
18
+ };
19
+ var ConfigError = class extends effect.Data.TaggedError("ConfigError") {
20
+ };
21
+
22
+ // src/config.ts
9
23
  var resolveNetwork = (config) => {
10
24
  const n = config.network;
11
25
  return {
@@ -19,20 +33,46 @@ var resolveNetwork = (config) => {
19
33
  retry: {
20
34
  baseDelayMs: n?.logs?.retry?.baseDelayMs ?? 1e3,
21
35
  maxDelayMs: n?.logs?.retry?.maxDelayMs ?? 3e4
22
- }
36
+ },
37
+ parallelRequests: n?.logs?.parallelRequests ?? 1
23
38
  },
24
39
  reorg: {
25
40
  depth: n?.reorg?.depth ?? 20
26
41
  }
27
42
  };
28
43
  };
44
+ var resolveTelemetry = (config) => {
45
+ const t = config.telemetry;
46
+ return {
47
+ progress: {
48
+ enabled: t?.progress?.enabled ?? true,
49
+ intervalMs: t?.progress?.intervalMs ?? 3e3
50
+ }
51
+ };
52
+ };
29
53
  var resolveConfig = (config) => {
30
54
  const network = resolveNetwork(config);
55
+ const telemetry = resolveTelemetry(config);
56
+ const pr = network.logs.parallelRequests;
57
+ if (!Number.isInteger(pr) || pr < 1) {
58
+ throw new ConfigError({
59
+ reason: "parallelRequests must be an integer >= 1",
60
+ field: "network.logs.parallelRequests"
61
+ });
62
+ }
63
+ const pi = telemetry.progress.intervalMs;
64
+ if (!Number.isInteger(pi) || !Number.isFinite(pi) || pi < 500) {
65
+ throw new ConfigError({
66
+ reason: "telemetry.progress.intervalMs must be an integer >= 500",
67
+ field: "telemetry.progress.intervalMs"
68
+ });
69
+ }
31
70
  return {
32
71
  rpcUrl: config.rpcUrl,
33
72
  dbPath: config.dbPath ?? "./indexer.db",
34
73
  contracts: config.contracts,
35
74
  network,
75
+ telemetry,
36
76
  logLevel: config.logLevel ?? "info",
37
77
  logFormat: config.logFormat ?? "pretty",
38
78
  enableTelemetry: config.enableTelemetry ?? true
@@ -76,20 +116,6 @@ var LoggerLive = (config) => {
76
116
  effect.Logger.minimumLogLevel(level)
77
117
  );
78
118
  };
79
- var RpcError = class extends effect.Data.TaggedError("RpcError") {
80
- };
81
- var DecodeError = class extends effect.Data.TaggedError("DecodeError") {
82
- };
83
- var StorageError = class extends effect.Data.TaggedError("StorageError") {
84
- };
85
- var ReorgDetected = class extends effect.Data.TaggedError("ReorgDetected") {
86
- };
87
- var CheckpointError = class extends effect.Data.TaggedError("CheckpointError") {
88
- };
89
- (class extends effect.Data.TaggedError("ConfigError") {
90
- });
91
-
92
- // src/services/RpcProvider.ts
93
119
  var toHexQuantity = (value) => `0x${value.toString(16)}`;
94
120
  var RpcProvider = class extends effect.Context.Tag("effective-indexer/RpcProvider")() {
95
121
  };
@@ -453,6 +479,195 @@ var EventDecoderLive = effect.Layer.succeed(EventDecoder, {
453
479
  logs.map((log) => decodeLog(contractName, abi, log)).filter((e) => e !== null)
454
480
  )
455
481
  });
482
+ var computeSnapshot = (p) => {
483
+ const now = Date.now();
484
+ const elapsedMs = Math.max(now - p.startedAt, 1);
485
+ const elapsedSec = elapsedMs / 1e3;
486
+ const processed = Number(p.processedBlocks);
487
+ const total = Number(p.totalBlocks);
488
+ const percentage = total > 0 ? processed / total * 100 : 0;
489
+ const blocksPerSecond = elapsedSec > 0 ? processed / elapsedSec : 0;
490
+ const eventsPerSecond = elapsedSec > 0 ? p.totalEvents / elapsedSec : 0;
491
+ const remaining = total - processed;
492
+ const etaMs = blocksPerSecond > 0 ? remaining / blocksPerSecond * 1e3 : null;
493
+ return {
494
+ contractName: p.contractName,
495
+ totalBlocks: p.totalBlocks,
496
+ processedBlocks: p.processedBlocks,
497
+ totalEvents: p.totalEvents,
498
+ chunkCount: p.chunkCount,
499
+ elapsedMs,
500
+ blocksPerSecond,
501
+ eventsPerSecond,
502
+ percentage,
503
+ etaMs
504
+ };
505
+ };
506
+ var ProgressReporter = class extends effect.Context.Tag(
507
+ "effective-indexer/ProgressReporter"
508
+ )() {
509
+ };
510
+ var ProgressReporterLive = effect.Layer.effect(
511
+ ProgressReporter,
512
+ effect.Effect.gen(function* () {
513
+ const state = yield* effect.Ref.make(/* @__PURE__ */ new Map());
514
+ return {
515
+ start: (contractName, totalBlocks) => effect.Ref.update(state, (map) => {
516
+ const next = new Map(map);
517
+ next.set(contractName, {
518
+ contractName,
519
+ totalBlocks,
520
+ processedBlocks: 0n,
521
+ totalEvents: 0,
522
+ chunkCount: 0,
523
+ startedAt: Date.now()
524
+ });
525
+ return next;
526
+ }),
527
+ update: (contractName, processedBlocks, eventsInChunk) => effect.Ref.update(state, (map) => {
528
+ const entry = map.get(contractName);
529
+ if (!entry) return map;
530
+ const boundedProcessed = processedBlocks > entry.totalBlocks ? entry.totalBlocks : processedBlocks;
531
+ const nextProcessed = boundedProcessed > entry.processedBlocks ? boundedProcessed : entry.processedBlocks;
532
+ const next = new Map(map);
533
+ next.set(contractName, {
534
+ ...entry,
535
+ processedBlocks: nextProcessed,
536
+ totalEvents: entry.totalEvents + eventsInChunk
537
+ });
538
+ return next;
539
+ }),
540
+ incrementChunks: (contractName) => effect.Ref.update(state, (map) => {
541
+ const entry = map.get(contractName);
542
+ if (!entry) return map;
543
+ const next = new Map(map);
544
+ next.set(contractName, {
545
+ ...entry,
546
+ chunkCount: entry.chunkCount + 1
547
+ });
548
+ return next;
549
+ }),
550
+ finish: (contractName) => effect.Ref.update(state, (map) => {
551
+ const next = new Map(map);
552
+ next.delete(contractName);
553
+ return next;
554
+ }),
555
+ getSnapshot: (contractName) => effect.Ref.get(state).pipe(
556
+ effect.Effect.map((map) => {
557
+ const entry = map.get(contractName);
558
+ return entry ? computeSnapshot(entry) : null;
559
+ })
560
+ ),
561
+ getAllSnapshots: () => effect.Ref.get(state).pipe(
562
+ effect.Effect.map((map) => Array.from(map.values()).map(computeSnapshot))
563
+ )
564
+ };
565
+ })
566
+ );
567
+
568
+ // src/services/ProgressRenderer.ts
569
+ var formatDuration = (ms) => {
570
+ const totalSec = Math.floor(ms / 1e3);
571
+ const h = Math.floor(totalSec / 3600);
572
+ const m = Math.floor(totalSec % 3600 / 60);
573
+ const s = totalSec % 60;
574
+ const pad = (n) => String(n).padStart(2, "0");
575
+ return h > 0 ? `${pad(h)}:${pad(m)}:${pad(s)}` : `${pad(m)}:${pad(s)}`;
576
+ };
577
+ var formatNumber = (n) => n.toLocaleString("en-US", { maximumFractionDigits: 0 });
578
+ var formatRate = (n) => n >= 100 ? formatNumber(Math.round(n)) : n.toLocaleString("en-US", { maximumFractionDigits: 1 });
579
+ var buildLine = (snap, config) => {
580
+ const pct = snap.percentage.toFixed(1);
581
+ const processed = formatNumber(Number(snap.processedBlocks));
582
+ const total = formatNumber(Number(snap.totalBlocks));
583
+ const bps = formatRate(snap.blocksPerSecond);
584
+ const eps = formatRate(snap.eventsPerSecond);
585
+ const eta = snap.etaMs !== null ? formatDuration(snap.etaMs) : "--:--";
586
+ const p = config.network.logs.parallelRequests;
587
+ const chunk = config.network.logs.chunkSize;
588
+ return `[Backfill] ${snap.contractName} ${pct}% | ${processed}/${total} blocks | ${bps} blk/s | ${eps} ev/s | ETA ${eta} | p=${p} | chunk=${chunk}`;
589
+ };
590
+ var buildSummaryLine = (snap, config) => {
591
+ const total = formatNumber(Number(snap.totalBlocks));
592
+ const events = formatNumber(snap.totalEvents);
593
+ const chunks = snap.chunkCount;
594
+ const dur = formatDuration(snap.elapsedMs);
595
+ const bps = formatRate(snap.blocksPerSecond);
596
+ const eps = formatRate(snap.eventsPerSecond);
597
+ const p = config.network.logs.parallelRequests;
598
+ const chunk = config.network.logs.chunkSize;
599
+ return `[Backfill complete] ${snap.contractName}: ${total} blocks | ${events} events | ${chunks} chunks | ${dur} (${bps} blk/s, ${eps} ev/s) | p=${p} | chunkSize=${chunk}`;
600
+ };
601
+ var ProgressRenderer = class extends effect.Context.Tag(
602
+ "effective-indexer/ProgressRenderer"
603
+ )() {
604
+ };
605
+ var ProgressRendererLive = effect.Layer.effect(
606
+ ProgressRenderer,
607
+ effect.Effect.gen(function* () {
608
+ const config = yield* Config;
609
+ const reporter = yield* ProgressReporter;
610
+ const fiberRef = yield* effect.Ref.make(
611
+ effect.Option.none()
612
+ );
613
+ const enabled = config.enableTelemetry && config.telemetry.progress.enabled;
614
+ const intervalMs = config.telemetry.progress.intervalMs;
615
+ const isTTY = typeof process !== "undefined" && !!process.stdout?.isTTY;
616
+ const renderOnce = effect.Effect.gen(function* () {
617
+ const snapshots = yield* reporter.getAllSnapshots();
618
+ if (snapshots.length === 0) return;
619
+ if (isTTY) {
620
+ const { default: logUpdate } = yield* effect.Effect.promise(
621
+ () => import('log-update')
622
+ );
623
+ const pc = yield* effect.Effect.promise(() => import('picocolors'));
624
+ const lines = snapshots.map((s) => {
625
+ const raw = buildLine(s, config);
626
+ return pc.default.cyan(raw);
627
+ });
628
+ logUpdate(lines.join("\n"));
629
+ } else {
630
+ for (const s of snapshots) {
631
+ yield* effect.Effect.log(buildLine(s, config));
632
+ }
633
+ }
634
+ });
635
+ return {
636
+ startRendering: () => {
637
+ if (!enabled) return effect.Effect.void;
638
+ return effect.Effect.gen(function* () {
639
+ const fiber = yield* renderOnce.pipe(
640
+ effect.Effect.repeat(effect.Schedule.spaced(intervalMs)),
641
+ effect.Effect.asVoid,
642
+ effect.Effect.catchAll(() => effect.Effect.void),
643
+ effect.Effect.fork
644
+ );
645
+ yield* effect.Ref.set(fiberRef, effect.Option.some(fiber));
646
+ });
647
+ },
648
+ stopRendering: () => {
649
+ if (!enabled) return effect.Effect.void;
650
+ return effect.Effect.gen(function* () {
651
+ const maybeFiber = yield* effect.Ref.get(fiberRef);
652
+ if (effect.Option.isSome(maybeFiber)) {
653
+ yield* effect.Fiber.interrupt(maybeFiber.value);
654
+ yield* effect.Ref.set(fiberRef, effect.Option.none());
655
+ if (isTTY) {
656
+ const { default: logUpdate } = yield* effect.Effect.promise(
657
+ () => import('log-update')
658
+ );
659
+ logUpdate.clear();
660
+ }
661
+ }
662
+ });
663
+ },
664
+ renderFinalSummary: (snapshot, cfg) => {
665
+ if (!enabled) return effect.Effect.void;
666
+ return effect.Effect.log(buildSummaryLine(snapshot, cfg));
667
+ }
668
+ };
669
+ })
670
+ );
456
671
  var buildTopicFilter = (abi, eventNames) => {
457
672
  const topics = [];
458
673
  for (const name of eventNames) {
@@ -484,6 +699,7 @@ var fetchLogs = (params) => effect.Stream.unwrap(
484
699
  if (chunks.length === 0) {
485
700
  return effect.Stream.empty;
486
701
  }
702
+ const concurrency = config.network.logs.parallelRequests;
487
703
  return effect.Stream.fromIterable(chunks).pipe(
488
704
  effect.Stream.mapEffect(
489
705
  (chunk) => effect.Effect.gen(function* () {
@@ -501,7 +717,9 @@ var fetchLogs = (params) => effect.Stream.unwrap(
501
717
  const n = yield* effect.Ref.getAndUpdate(attempt, (a) => a + 1);
502
718
  const rawDelay = 2 ** n * baseDelayMs;
503
719
  const delayMs = Math.min(rawDelay, maxDelayMs);
504
- yield* effect.Effect.logDebug("RPC getLogs failed, retrying").pipe(
720
+ yield* effect.Effect.logDebug(
721
+ "RPC getLogs failed, retrying"
722
+ ).pipe(
505
723
  effect.Effect.annotateLogs({
506
724
  method: "eth_getLogs",
507
725
  from: chunk.from.toString(),
@@ -534,7 +752,8 @@ var fetchLogs = (params) => effect.Stream.unwrap(
534
752
  )
535
753
  )
536
754
  );
537
- })
755
+ }),
756
+ { concurrency }
538
757
  )
539
758
  );
540
759
  })
@@ -614,12 +833,15 @@ var ReorgDetectorLive = effect.Layer.effect(
614
833
  // src/pipeline/Indexer.ts
615
834
  var indexContract = (contract) => effect.Stream.unwrap(
616
835
  effect.Effect.gen(function* () {
836
+ const config = yield* Config;
617
837
  const rpc = yield* RpcProvider;
618
838
  const decoder = yield* EventDecoder;
619
839
  const checkpoint = yield* CheckpointManager;
620
840
  const reorgDetector = yield* ReorgDetector;
621
841
  const storage = yield* Storage;
622
842
  const blockCursor = yield* BlockCursor;
843
+ const progress = yield* ProgressReporter;
844
+ const renderer = yield* ProgressRenderer;
623
845
  const startBlock = yield* checkpoint.getStartBlock(
624
846
  contract.name,
625
847
  contract.startBlock ?? 0n
@@ -632,7 +854,13 @@ var indexContract = (contract) => effect.Stream.unwrap(
632
854
  })
633
855
  );
634
856
  const topics = buildTopicFilter(contract.abi, contract.events);
635
- const backfillStream = startBlock <= currentHead ? fetchLogs({
857
+ const needsBackfill = startBlock <= currentHead;
858
+ const totalBackfillBlocks = currentHead - startBlock + 1n;
859
+ if (needsBackfill) {
860
+ yield* progress.start(contract.name, totalBackfillBlocks);
861
+ }
862
+ const processedBlocksRef = yield* effect.Ref.make(0n);
863
+ const backfillStream = needsBackfill ? fetchLogs({
636
864
  address: contract.address,
637
865
  topics,
638
866
  fromBlock: startBlock,
@@ -681,6 +909,18 @@ var indexContract = (contract) => effect.Stream.unwrap(
681
909
  lastBlock.blockHash
682
910
  );
683
911
  }
912
+ const chunkSize = BigInt(config.network.logs.chunkSize);
913
+ const lastProcessed = yield* effect.Ref.modify(processedBlocksRef, (current) => {
914
+ const advanced = current + chunkSize;
915
+ const next = advanced > totalBackfillBlocks ? totalBackfillBlocks : advanced;
916
+ return [next, next];
917
+ });
918
+ yield* progress.update(
919
+ contract.name,
920
+ lastProcessed,
921
+ withTimestamp.length
922
+ );
923
+ yield* progress.incrementChunks(contract.name);
684
924
  yield* effect.Effect.logDebug("Chunk indexed").pipe(
685
925
  effect.Effect.annotateLogs({
686
926
  events: withTimestamp.length.toString(),
@@ -760,27 +1000,37 @@ var indexContract = (contract) => effect.Stream.unwrap(
760
1000
  ),
761
1001
  effect.Stream.flatMap(effect.Stream.fromIterable)
762
1002
  );
1003
+ const backfillTransition = needsBackfill ? effect.Stream.execute(
1004
+ effect.Effect.gen(function* () {
1005
+ const snapshot = yield* progress.getSnapshot(contract.name);
1006
+ if (snapshot) {
1007
+ yield* renderer.renderFinalSummary(snapshot, config);
1008
+ }
1009
+ yield* progress.finish(contract.name);
1010
+ yield* effect.Effect.log("Backfill complete, switching to live");
1011
+ })
1012
+ ) : effect.Stream.execute(effect.Effect.log("Backfill complete, switching to live"));
763
1013
  return effect.Stream.concat(
764
1014
  backfillStream,
765
- effect.Stream.concat(
766
- effect.Stream.execute(effect.Effect.log("Backfill complete, switching to live")),
767
- liveStream
768
- )
1015
+ effect.Stream.concat(backfillTransition, liveStream)
769
1016
  );
770
1017
  }).pipe(effect.Effect.annotateLogs("contract", contract.name))
771
1018
  );
772
1019
  var runIndexer = effect.Effect.gen(function* () {
773
1020
  const config = yield* Config;
774
1021
  const storage = yield* Storage;
1022
+ const renderer = yield* ProgressRenderer;
775
1023
  yield* storage.initialize.pipe(effect.Effect.withLogSpan("storage_init"));
776
1024
  yield* effect.Effect.log("Indexer starting").pipe(
777
1025
  effect.Effect.annotateLogs({
778
1026
  contracts: config.contracts.length.toString()
779
1027
  })
780
1028
  );
1029
+ yield* renderer.startRendering();
781
1030
  const streams = config.contracts.map((c) => indexContract(c));
782
1031
  yield* effect.Stream.mergeAll(streams, { concurrency: streams.length }).pipe(
783
1032
  effect.Stream.runDrain,
1033
+ effect.Effect.ensuring(renderer.stopRendering()),
784
1034
  effect.Effect.tapError(
785
1035
  (err) => effect.Effect.logError("Indexer error").pipe(
786
1036
  effect.Effect.annotateLogs({
@@ -870,6 +1120,10 @@ var buildLayers = (config) => {
870
1120
  effect.Layer.provide(effect.Layer.merge(StorageLayer, ConfigLayer))
871
1121
  );
872
1122
  const QueryLayer = QueryApiLive.pipe(effect.Layer.provide(StorageLayer));
1123
+ const ProgressReporterLayer = ProgressReporterLive;
1124
+ const ProgressRendererLayer = ProgressRendererLive.pipe(
1125
+ effect.Layer.provide(effect.Layer.merge(ConfigLayer, ProgressReporterLayer))
1126
+ );
873
1127
  return effect.Layer.mergeAll(
874
1128
  ConfigLayer,
875
1129
  StorageLayer,
@@ -879,6 +1133,8 @@ var buildLayers = (config) => {
879
1133
  CursorLayer,
880
1134
  ReorgLayer,
881
1135
  QueryLayer,
1136
+ ProgressReporterLayer,
1137
+ ProgressRendererLayer,
882
1138
  LoggerLayer
883
1139
  );
884
1140
  };
@@ -952,6 +1208,10 @@ exports.EventDecoder = EventDecoder;
952
1208
  exports.EventDecoderLive = EventDecoderLive;
953
1209
  exports.Indexer = Indexer;
954
1210
  exports.LoggerLive = LoggerLive;
1211
+ exports.ProgressRenderer = ProgressRenderer;
1212
+ exports.ProgressRendererLive = ProgressRendererLive;
1213
+ exports.ProgressReporter = ProgressReporter;
1214
+ exports.ProgressReporterLive = ProgressReporterLive;
955
1215
  exports.QueryApi = QueryApi;
956
1216
  exports.QueryApiLive = QueryApiLive;
957
1217
  exports.ReorgDetector = ReorgDetector;
@@ -960,6 +1220,7 @@ exports.RpcProvider = RpcProvider;
960
1220
  exports.RpcProviderLive = RpcProviderLive;
961
1221
  exports.Storage = Storage;
962
1222
  exports.StorageLive = StorageLive;
1223
+ exports.computeSnapshot = computeSnapshot;
963
1224
  exports.createIndexer = createIndexer;
964
1225
  exports.resolveConfig = resolveConfig;
965
1226
  //# sourceMappingURL=index.cjs.map