effective-indexer 0.2.0 → 0.2.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -2,6 +2,8 @@
2
2
 
3
3
  Lightweight EVM smart contract event indexer built with [Effect](https://effect.website).
4
4
 
5
+ Repository: [github.com/cybervoid0/effective-indexer](https://github.com/cybervoid0/effective-indexer)
6
+
5
7
  Indexes smart contract events into SQLite with:
6
8
  - Historical backfill (`eth_getLogs` in chunks)
7
9
  - Live polling for new blocks
@@ -93,6 +95,7 @@ Returns `IndexerHandle`:
93
95
  | `dbPath` | `string` | `"./indexer.db"` | SQLite database path |
94
96
  | `contracts` | `ContractConfig[]` | — | Contracts to index |
95
97
  | `network` | `NetworkConfig` | see below | Network tuning |
98
+ | `telemetry` | `TelemetryConfig` | see below | Backfill progress settings |
96
99
  | `logLevel` | `string` | `"info"` | Minimum log level |
97
100
  | `logFormat` | `string` | `"pretty"` | Log output format |
98
101
  | `enableTelemetry` | `boolean` | `true` | Set `false` for errors-only |
@@ -102,25 +105,196 @@ Returns `IndexerHandle`:
102
105
  ```ts
103
106
  {
104
107
  polling: {
105
- intervalMs: 12000, // block polling interval
106
- confirmations: 1, // blocks behind head to consider confirmed
108
+ intervalMs: 12000, // block polling interval
109
+ confirmations: 1, // blocks behind head to consider confirmed
107
110
  },
108
111
  logs: {
109
- chunkSize: 5000, // blocks per eth_getLogs request
110
- maxRetries: 5, // retry count on RPC failure
112
+ chunkSize: 5000, // blocks per eth_getLogs request
113
+ maxRetries: 5, // retry count on RPC failure
114
+ parallelRequests: 1, // concurrent eth_getLogs requests during backfill
111
115
  retry: {
112
- baseDelayMs: 1000, // initial retry delay
113
- maxDelayMs: 30000, // cap for exponential backoff
116
+ baseDelayMs: 1000, // initial retry delay
117
+ maxDelayMs: 30000, // cap for exponential backoff
114
118
  },
115
119
  },
116
120
  reorg: {
117
- depth: 20, // block hash buffer depth for reorg detection
121
+ depth: 20, // block hash buffer depth for reorg detection
118
122
  },
119
123
  }
120
124
  ```
121
125
 
122
126
  All fields are optional — defaults are shown above.
123
127
 
128
+ ### `TelemetryConfig`
129
+
130
+ ```ts
131
+ {
132
+ telemetry: {
133
+ progress: {
134
+ enabled: true, // show backfill progress in terminal
135
+ intervalMs: 3000, // progress update frequency (ms, minimum 500)
136
+ },
137
+ },
138
+ }
139
+ ```
140
+
141
+ `enableTelemetry: false` disables progress rendering and keeps error-level logs only.
142
+
143
+ When enabled, the indexer displays a live progress line during backfill:
144
+
145
+ ```
146
+ [Backfill] Token 42.8% | 1,234,000/2,880,000 blocks | 3,450 blk/s | 12.4 ev/s | ETA 00:07:43 | p=3 | chunk=5000
147
+ ```
148
+
149
+ On non-TTY environments, periodic info logs are emitted instead. A final summary is logged when backfill completes:
150
+
151
+ ```
152
+ [Backfill complete] Token: 2,880,000 blocks | 45,230 events | 312 chunks | 00:13:54 (3,453 blk/s, 54.2 ev/s) | p=3 | chunkSize=5000
153
+ ```
154
+
155
+ ## Worker Setup (Recommended)
156
+
157
+ Run the indexer as a dedicated long-lived worker process (not in request handlers).
158
+
159
+ Create `scripts/indexer.ts`:
160
+
161
+ ```ts
162
+ import { Indexer } from "effective-indexer"
163
+ import type { Abi } from "viem"
164
+
165
+ const transferAbi: Abi = [
166
+ {
167
+ type: "event",
168
+ name: "Transfer",
169
+ inputs: [
170
+ { indexed: true, name: "from", type: "address" },
171
+ { indexed: true, name: "to", type: "address" },
172
+ { indexed: false, name: "value", type: "uint256" },
173
+ ],
174
+ },
175
+ ]
176
+
177
+ const indexer = Indexer.create({
178
+ rpcUrl: process.env.EVM_RPC_URL!,
179
+ dbPath: process.env.INDEXER_DB_PATH ?? "./data/events.db",
180
+ contracts: [
181
+ {
182
+ name: process.env.INDEXER_CONTRACT_NAME ?? "Token",
183
+ address: process.env.INDEXER_CONTRACT_ADDRESS!,
184
+ abi: transferAbi,
185
+ events: ["Transfer"],
186
+ startBlock: BigInt(process.env.INDEXER_START_BLOCK ?? "0"),
187
+ },
188
+ ],
189
+ network: {
190
+ polling: {
191
+ intervalMs: Number(process.env.INDEXER_POLL_INTERVAL_MS ?? "12000"),
192
+ confirmations: Number(process.env.INDEXER_CONFIRMATIONS ?? "1"),
193
+ },
194
+ logs: {
195
+ chunkSize: Number(process.env.INDEXER_CHUNK_SIZE ?? "2000"),
196
+ parallelRequests: Number(process.env.INDEXER_PARALLEL_REQUESTS ?? "1"),
197
+ maxRetries: Number(process.env.INDEXER_MAX_RETRIES ?? "5"),
198
+ retry: {
199
+ baseDelayMs: Number(process.env.INDEXER_RETRY_BASE_MS ?? "1000"),
200
+ maxDelayMs: Number(process.env.INDEXER_RETRY_MAX_MS ?? "30000"),
201
+ },
202
+ },
203
+ reorg: {
204
+ depth: Number(process.env.INDEXER_REORG_DEPTH ?? "20"),
205
+ },
206
+ },
207
+ telemetry: {
208
+ progress: {
209
+ enabled: process.env.INDEXER_PROGRESS_ENABLED !== "false",
210
+ intervalMs: Number(process.env.INDEXER_PROGRESS_INTERVAL_MS ?? "3000"),
211
+ },
212
+ },
213
+ logLevel: (process.env.INDEXER_LOG_LEVEL ?? "info") as
214
+ | "trace"
215
+ | "debug"
216
+ | "info"
217
+ | "warning"
218
+ | "error"
219
+ | "none",
220
+ logFormat: (process.env.INDEXER_LOG_FORMAT ?? "pretty") as
221
+ | "pretty"
222
+ | "json"
223
+ | "structured",
224
+ enableTelemetry: process.env.INDEXER_TELEMETRY !== "false",
225
+ })
226
+
227
+ const start = async (): Promise<void> => {
228
+ await indexer.start()
229
+ console.log("Indexer worker started")
230
+
231
+ // Keep the process alive while indexing in background.
232
+ const keepAlive = setInterval(() => undefined, 60_000)
233
+
234
+ const stop = async (): Promise<void> => {
235
+ clearInterval(keepAlive)
236
+ await indexer.stop()
237
+ process.exit(0)
238
+ }
239
+
240
+ process.on("SIGINT", () => {
241
+ void stop()
242
+ })
243
+ process.on("SIGTERM", () => {
244
+ void stop()
245
+ })
246
+ }
247
+
248
+ start().catch(error => {
249
+ console.error("Indexer worker failed:", error)
250
+ process.exit(1)
251
+ })
252
+ ```
253
+
254
+ Create `.env`:
255
+
256
+ ```bash
257
+ EVM_RPC_URL=https://your-rpc-url
258
+ INDEXER_DB_PATH=./data/events.db
259
+ INDEXER_CONTRACT_NAME=Token
260
+ INDEXER_CONTRACT_ADDRESS=0xYourContractAddress
261
+ INDEXER_START_BLOCK=0
262
+ INDEXER_POLL_INTERVAL_MS=12000
263
+ INDEXER_CONFIRMATIONS=1
264
+ INDEXER_CHUNK_SIZE=2000
265
+ INDEXER_PARALLEL_REQUESTS=1
266
+ INDEXER_MAX_RETRIES=5
267
+ INDEXER_RETRY_BASE_MS=1000
268
+ INDEXER_RETRY_MAX_MS=30000
269
+ INDEXER_REORG_DEPTH=20
270
+ INDEXER_PROGRESS_ENABLED=true
271
+ INDEXER_PROGRESS_INTERVAL_MS=3000
272
+ INDEXER_LOG_LEVEL=info
273
+ INDEXER_LOG_FORMAT=pretty
274
+ INDEXER_TELEMETRY=true
275
+ ```
276
+
277
+ Add scripts (with `tsx` installed):
278
+
279
+ ```bash
280
+ npm install -D tsx
281
+ ```
282
+
283
+ ```json
284
+ {
285
+ "scripts": {
286
+ "indexer:start": "node --import tsx ./scripts/indexer.ts",
287
+ "indexer:debug": "INDEXER_LOG_LEVEL=debug node --import tsx ./scripts/indexer.ts"
288
+ }
289
+ }
290
+ ```
291
+
292
+ Run:
293
+
294
+ ```bash
295
+ npm run indexer:start
296
+ ```
297
+
124
298
  ### Network Tuning Profiles
125
299
 
126
300
  | Chain | `polling.intervalMs` | `polling.confirmations` | `logs.chunkSize` | `reorg.depth` |
@@ -167,6 +341,35 @@ The indexer uses Effect's native logging system. All log output is controlled vi
167
341
  - On restart, the indexer resumes from checkpoint and backfills missed blocks.
168
342
  - If RPC does not support `eth_getLogs`, indexing cannot work.
169
343
 
344
+ ## Parallel Backfill
345
+
346
+ Set `network.logs.parallelRequests` to speed up historical backfill by issuing multiple `eth_getLogs` requests concurrently. Chunk ordering is preserved regardless of concurrency.
347
+
348
+ ```ts
349
+ const indexer = Indexer.create({
350
+ rpcUrl: "https://eth.llamarpc.com",
351
+ contracts: [/* ... */],
352
+ network: {
353
+ logs: {
354
+ chunkSize: 2000,
355
+ parallelRequests: 4,
356
+ },
357
+ },
358
+ })
359
+ ```
360
+
361
+ **Recommended values**: Start with `parallelRequests: 3` and increase if the RPC allows. Public endpoints may rate-limit above 5-10 concurrent requests.
362
+
363
+ ### Benchmarking
364
+
365
+ To measure the effect of parallelism:
366
+
367
+ 1. Use a fixed RPC endpoint and contract/block range
368
+ 2. Start with an empty database each run
369
+ 3. Compare `parallelRequests` values 1, 2, 3, 4
370
+ 4. Run 3 times each and take the median
371
+ 5. Use the progress summary line for timing: `[Backfill complete] ... blk/s`
372
+
170
373
  ## Development
171
374
 
172
375
  ```bash