@thru/indexer 0.1.38

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,1064 @@
1
+ import * as drizzle_orm_pg_core from 'drizzle-orm/pg-core';
2
+ import { PgTableWithColumns, PgColumn } from 'drizzle-orm/pg-core';
3
+ import { PostgresJsDatabase } from 'drizzle-orm/postgres-js';
4
+ import { z } from 'zod';
5
+ import { Filter, Event, AccountState, ChainClientFactory } from '@thru/replay';
6
+ import { OpenAPIHono, z as z$1 } from '@hono/zod-openapi';
7
+
8
+ /**
9
+ * Schema type definitions with improved type safety.
10
+ * Uses branded types and conditional inference for better DX.
11
+ */
12
+
13
+ /** Supported column data types */
14
+ type ColumnType = "text" | "bigint" | "integer" | "boolean" | "timestamp";
15
+ /** Internal modifiers for a column definition */
16
+ interface ColumnModifiers<T, TNull extends boolean = boolean> {
17
+ /** Phantom type for inference */
18
+ readonly _type: T;
19
+ /** Whether the column allows null - literal type for proper inference */
20
+ readonly _nullable: TNull;
21
+ /** Whether the column is indexed */
22
+ readonly _indexed: boolean;
23
+ /** Whether the column has a unique constraint */
24
+ readonly _unique: boolean;
25
+ /** Whether the column is the primary key */
26
+ readonly _primary: boolean;
27
+ /** Default value for the column */
28
+ readonly _default?: T;
29
+ /** Whether to use current timestamp as default */
30
+ readonly _defaultNow?: boolean;
31
+ /** Foreign key reference */
32
+ readonly _references?: {
33
+ table: PgTableWithColumns<any>;
34
+ column: string;
35
+ };
36
+ /** The underlying column type */
37
+ readonly _columnType: ColumnType;
38
+ }
39
+ /** A column definition with fluent modifiers */
40
+ interface ColumnDef<T, TNull extends boolean = true> extends ColumnModifiers<T, TNull> {
41
+ /** Mark column as NOT NULL - changes the type to non-nullable */
42
+ notNull(): ColumnDef<T, false>;
43
+ /** Add an index on this column */
44
+ index(): ColumnDef<T, TNull>;
45
+ /** Add a unique constraint */
46
+ unique(): ColumnDef<T, TNull>;
47
+ /** Mark as primary key (implies NOT NULL) */
48
+ primaryKey(): ColumnDef<T, false>;
49
+ /** Set a default value */
50
+ default(value: T): ColumnDef<T, TNull>;
51
+ /** Use current timestamp as default (only for timestamp columns) */
52
+ defaultNow(): ColumnDef<T, TNull>;
53
+ /** Add a foreign key reference */
54
+ references<TTable extends PgTableWithColumns<any>>(table: TTable | (() => TTable), column: keyof TTable["_"]["columns"]): ColumnDef<T, TNull>;
55
+ }
56
+ /** Any column definition (for generic constraints) */
57
+ type AnyColumnDef = ColumnDef<unknown, boolean>;
58
+ /** A schema is a record of column definitions */
59
+ type SchemaDefinition = Record<string, AnyColumnDef>;
60
+ /**
61
+ * Infer the row type from a schema definition.
62
+ * Handles nullable columns and preserves exact types.
63
+ * Uses conditional type to check the literal `_nullable` type parameter.
64
+ */
65
+ type InferRow<TSchema extends SchemaDefinition> = {
66
+ [K in keyof TSchema]: TSchema[K] extends ColumnDef<infer T, infer TNull> ? TNull extends false ? T : T | null : never;
67
+ };
68
+ /**
69
+ * Infer the insert type from a schema definition.
70
+ * Makes fields optional if they have defaults or are nullable.
71
+ */
72
+ type InferInsert<TSchema extends SchemaDefinition> = {
73
+ [K in keyof TSchema as TSchema[K] extends ColumnDef<unknown, false> ? TSchema[K]["_default"] extends undefined ? TSchema[K]["_defaultNow"] extends true ? never : K : never : never]: TSchema[K] extends ColumnDef<infer T, infer _TNull> ? T : never;
74
+ } & {
75
+ [K in keyof TSchema as TSchema[K] extends ColumnDef<unknown, true> ? K : TSchema[K]["_default"] extends undefined ? TSchema[K]["_defaultNow"] extends true ? K : never : K]?: TSchema[K] extends ColumnDef<infer T, infer _TNull> ? T | null : never;
76
+ };
77
+ /**
78
+ * Column accessors for use with Drizzle operators (eq, inArray, etc.).
79
+ * Maps field names to Drizzle column references.
80
+ *
81
+ * Note: We use `any` here because the full PgColumn type is complex
82
+ * and varies based on many internal Drizzle factors. The column
83
+ * references work correctly at runtime for use with eq(), inArray(), etc.
84
+ */
85
+ type Columns<TSchema extends SchemaDefinition> = {
86
+ [K in keyof TSchema]: PgColumn<any>;
87
+ };
88
+
89
+ /** Database client type - generic to work with any schema */
90
+ type DatabaseClient = PostgresJsDatabase<Record<string, unknown>>;
91
+
92
+ /**
93
+ * Fluent column builder API for defining schemas.
94
+ * Exports a global `t` object for concise schema definitions.
95
+ *
96
+ * @example
97
+ * ```ts
98
+ * import { t } from "@thru/indexer";
99
+ *
100
+ * const schema = {
101
+ * id: t.text().primaryKey(),
102
+ * slot: t.bigint().notNull().index(),
103
+ * amount: t.bigint().notNull(),
104
+ * timestamp: t.timestamp(),
105
+ * };
106
+ * ```
107
+ */
108
+
109
+ /**
110
+ * Interface for the column builder.
111
+ * Each method creates a new column definition of the appropriate type.
112
+ */
113
+ interface ColumnBuilder {
114
+ /** Text column (varchar) */
115
+ text(): ColumnDef<string, true>;
116
+ /** BigInt column (64-bit integer) - use for slots, amounts */
117
+ bigint(): ColumnDef<bigint, true>;
118
+ /** Integer column (32-bit) */
119
+ integer(): ColumnDef<number, true>;
120
+ /** Boolean column */
121
+ boolean(): ColumnDef<boolean, true>;
122
+ /** Timestamp column with timezone */
123
+ timestamp(): ColumnDef<Date, true>;
124
+ }
125
+ /**
126
+ * Global column builder for defining schemas.
127
+ *
128
+ * @example
129
+ * ```ts
130
+ * const schema = {
131
+ * id: t.text().primaryKey(),
132
+ * slot: t.bigint().notNull().index(),
133
+ * name: t.text(),
134
+ * active: t.boolean().notNull().default(true),
135
+ * createdAt: t.timestamp().notNull().defaultNow(),
136
+ * };
137
+ * ```
138
+ */
139
+ declare const t: ColumnBuilder;
140
+ /**
141
+ * Alternative export for the column builder.
142
+ * Same as `t` but with a more descriptive name.
143
+ */
144
+ declare const columnBuilder: ColumnBuilder;
145
+
146
+ /**
147
+ * Runtime validation for parsed data.
148
+ * Generates Zod schemas from column definitions for validation.
149
+ */
150
+
151
+ /**
152
+ * Generate a Zod schema from column definitions.
153
+ * Used for runtime validation of parse output.
154
+ */
155
+ declare function generateZodSchema<TSchema extends SchemaDefinition>(schema: TSchema): z.ZodObject<any>;
156
+ /**
157
+ * Validate parsed data against schema.
158
+ * Returns validation result with detailed errors.
159
+ */
160
+ declare function validateParsedData<TSchema extends SchemaDefinition>(schema: TSchema, data: unknown, streamName: string): {
161
+ success: true;
162
+ data: unknown;
163
+ } | {
164
+ success: false;
165
+ error: string;
166
+ };
167
+
168
+ /**
169
+ * Shared types for the indexer library.
170
+ */
171
+
172
+ /**
173
+ * API configuration for route generation.
174
+ * Used by event streams, account streams, and tables.
175
+ */
176
+ interface ApiConfig {
177
+ /** Enable API routes (default: true) */
178
+ enabled?: boolean;
179
+ /** Fields that can be filtered via query params */
180
+ filters?: string[];
181
+ /** Primary key field name (default: "id" for event streams, "address" for account streams) */
182
+ idField?: string;
183
+ }
184
+ /**
185
+ * A batch of parsed events ready to commit.
186
+ * Used by the stream processor for batching before database writes.
187
+ */
188
+ interface StreamBatch<T = unknown> {
189
+ /** The slot these events belong to */
190
+ slot: bigint;
191
+ /** The parsed events in this batch */
192
+ events: T[];
193
+ }
194
+ /**
195
+ * Context passed to stream hooks (filterBatch, onCommit).
196
+ */
197
+ interface HookContext {
198
+ /** Database client for queries */
199
+ db: DatabaseClient;
200
+ }
201
+
202
+ /**
203
+ * Event stream type definitions.
204
+ */
205
+
206
+ /**
207
+ * Definition input for creating an event stream.
208
+ * This is what users pass to defineEventStream().
209
+ */
210
+ interface EventStreamDefinition<TSchema extends SchemaDefinition> {
211
+ /** Unique stream name (used for table name, checkpointing) */
212
+ name: string;
213
+ /** Human-readable description */
214
+ description?: string;
215
+ /**
216
+ * Schema definition using the column builder.
217
+ * Defines the structure of the indexed event data.
218
+ *
219
+ * @example
220
+ * ```ts
221
+ * schema: {
222
+ * id: t.text().primaryKey(),
223
+ * slot: t.bigint().notNull().index(),
224
+ * amount: t.bigint().notNull(),
225
+ * }
226
+ * ```
227
+ */
228
+ schema: TSchema;
229
+ /**
230
+ * CEL filter for @thru/replay.
231
+ * Determines which events this stream receives.
232
+ *
233
+ * Use `filter` for direct filter values, or `filterFactory` for lazy loading
234
+ * (required for drizzle-kit compatibility when config isn't available at import time).
235
+ *
236
+ * @example
237
+ * ```ts
238
+ * // Direct filter (simpler, but requires config at import time)
239
+ * filter: create(FilterSchema, {
240
+ * expression: "event.program.value == params.address",
241
+ * params: { address: ... }
242
+ * })
243
+ *
244
+ * // Factory pattern (drizzle-kit compatible)
245
+ * filterFactory: () => create(FilterSchema, { ... loadConfig() ... })
246
+ * ```
247
+ */
248
+ filter?: Filter;
249
+ /**
250
+ * Lazy filter factory for drizzle-kit compatibility.
251
+ * Called once when the indexer starts.
252
+ */
253
+ filterFactory?: () => Filter;
254
+ /**
255
+ * Parse a raw event into a table row.
256
+ * Return null to skip the event.
257
+ *
258
+ * @param event - Raw event from the chain
259
+ * @returns Parsed row data or null to skip
260
+ */
261
+ parse: (event: Event) => InferRow<TSchema> | null;
262
+ /** API configuration (used for route generation) */
263
+ api?: ApiConfig;
264
+ /**
265
+ * Filter batch before committing to database.
266
+ * Use to filter events based on external state (e.g., registered accounts).
267
+ *
268
+ * @param events - Parsed events about to be committed
269
+ * @param ctx - Hook context with database access
270
+ * @returns Filtered array of events to commit
271
+ */
272
+ filterBatch?: (events: InferRow<TSchema>[], ctx: HookContext) => Promise<InferRow<TSchema>[]>;
273
+ /**
274
+ * Called after a batch of events is committed to the database.
275
+ * Use for side effects like notifications, webhooks, analytics.
276
+ * Errors are logged but do not block indexing.
277
+ *
278
+ * @param batch - The committed batch with slot and events
279
+ * @param ctx - Hook context with database access
280
+ */
281
+ onCommit?: (batch: StreamBatch<InferRow<TSchema>>, ctx: HookContext) => Promise<void>;
282
+ }
283
+ /**
284
+ * A compiled event stream with table and methods.
285
+ * This is what defineEventStream() returns.
286
+ */
287
+ interface EventStream<TSchema extends SchemaDefinition = SchemaDefinition> {
288
+ /** Stream name */
289
+ readonly name: string;
290
+ /** Human-readable description */
291
+ readonly description: string;
292
+ /** The schema definition (for validation) */
293
+ readonly schema: TSchema;
294
+ /** The Drizzle table for this stream */
295
+ readonly table: PgTableWithColumns<any>;
296
+ /**
297
+ * Typed column accessors for use with Drizzle operators.
298
+ * Use with eq(), inArray(), etc.
299
+ *
300
+ * @example
301
+ * ```ts
302
+ * db.select().from(stream.table).where(eq(stream.c.source, address))
303
+ * ```
304
+ */
305
+ readonly c: Columns<TSchema>;
306
+ /** Get the CEL filter for @thru/replay (resolves lazy factory if used) */
307
+ getFilter(): Filter;
308
+ /** Parse function */
309
+ readonly parse: (event: Event) => InferRow<TSchema> | null;
310
+ /** API configuration */
311
+ readonly api?: ApiConfig;
312
+ /** Filter batch hook */
313
+ readonly filterBatch?: (events: InferRow<TSchema>[], ctx: HookContext) => Promise<InferRow<TSchema>[]>;
314
+ /** Post-commit hook */
315
+ readonly onCommit?: (batch: StreamBatch<InferRow<TSchema>>, ctx: HookContext) => Promise<void>;
316
+ }
317
+
318
+ /**
319
+ * Define an event stream for indexing blockchain events.
320
+ *
321
+ * Event streams index historical, immutable event data from the chain.
322
+ * Each event is stored once and never updated.
323
+ *
324
+ * @example
325
+ * ```ts
326
+ * import { defineEventStream, t } from "@thru/indexer";
327
+ * import { create } from "@bufbuild/protobuf";
328
+ * import { FilterSchema, type Event } from "@thru/replay";
329
+ *
330
+ * export const transfers = defineEventStream({
331
+ * name: "transfers",
332
+ *
333
+ * schema: {
334
+ * id: t.text().primaryKey(),
335
+ * slot: t.bigint().notNull().index(),
336
+ * source: t.text().notNull().index(),
337
+ * dest: t.text().notNull().index(),
338
+ * amount: t.bigint().notNull(),
339
+ * },
340
+ *
341
+ * filter: create(FilterSchema, {
342
+ * expression: "event.program.value == params.address",
343
+ * params: { address: ... },
344
+ * }),
345
+ *
346
+ * parse: (event: Event) => {
347
+ * if (!event.payload) return null;
348
+ * return {
349
+ * id: event.eventId,
350
+ * slot: event.slot!,
351
+ * source: decodeAddress(event.payload, 0),
352
+ * dest: decodeAddress(event.payload, 32),
353
+ * amount: decodeBigint(event.payload, 64),
354
+ * };
355
+ * },
356
+ *
357
+ * api: { filters: ["source", "dest"] },
358
+ * });
359
+ *
360
+ * // Export the table for Drizzle migrations
361
+ * export const transfersTable = transfers.table;
362
+ * ```
363
+ */
364
+
365
+ /**
366
+ * Define an event stream for indexing blockchain events.
367
+ *
368
+ * @param definition - Stream definition with schema, filter, and parse function
369
+ * @returns A compiled event stream ready for use with the indexer
370
+ */
371
+ declare function defineEventStream<TSchema extends SchemaDefinition>(definition: EventStreamDefinition<TSchema>): EventStream<TSchema>;
372
+
373
+ /**
374
+ * Event stream processor.
375
+ *
376
+ * Processes events from @thru/replay and commits them to the database
377
+ * with batching and checkpointing.
378
+ */
379
+
380
+ interface ProcessorStats {
381
+ /** Total events processed */
382
+ eventsProcessed: number;
383
+ /** Total batches committed */
384
+ batchesCommitted: number;
385
+ /** Last slot processed */
386
+ lastSlot: bigint | null;
387
+ }
388
+
389
+ /**
390
+ * Account stream type definitions.
391
+ */
392
+
393
+ /**
394
+ * Definition input for creating an account stream.
395
+ * This is what users pass to defineAccountStream().
396
+ */
397
+ interface AccountStreamDefinition<TSchema extends SchemaDefinition> {
398
+ /** Unique stream name (used for table name, checkpointing) */
399
+ name: string;
400
+ /** Human-readable description */
401
+ description?: string;
402
+ /**
403
+ * Owner program address bytes.
404
+ * Filters accounts by their owner program.
405
+ *
406
+ * Use `ownerProgram` for direct values, or `ownerProgramFactory` for lazy loading
407
+ * (required for drizzle-kit compatibility when config isn't available at import time).
408
+ */
409
+ ownerProgram?: Uint8Array;
410
+ /**
411
+ * Lazy factory for owner program address.
412
+ * Called once when the indexer starts.
413
+ */
414
+ ownerProgramFactory?: () => Uint8Array;
415
+ /** Expected account data size (for filtering by account type) */
416
+ expectedSize?: number;
417
+ /**
418
+ * Additional data sizes to accept.
419
+ * Use when an account type can have multiple valid sizes.
420
+ */
421
+ dataSizes?: number[];
422
+ /**
423
+ * Schema definition using the column builder.
424
+ * Defines the structure of the indexed account data.
425
+ *
426
+ * @example
427
+ * ```ts
428
+ * schema: {
429
+ * address: t.text().primaryKey(),
430
+ * mint: t.text().notNull().index(),
431
+ * owner: t.text().notNull().index(),
432
+ * amount: t.bigint().notNull(),
433
+ * slot: t.bigint().notNull(),
434
+ * seq: t.bigint().notNull(),
435
+ * updatedAt: t.timestamp().notNull().defaultNow(),
436
+ * }
437
+ * ```
438
+ */
439
+ schema: TSchema;
440
+ /**
441
+ * Parse account state into a table row.
442
+ * Return null to skip the account.
443
+ *
444
+ * @param account - Account state from the chain
445
+ * @returns Parsed row data or null to skip
446
+ */
447
+ parse: (account: AccountState) => InferRow<TSchema> | null;
448
+ /** API configuration (used for route generation) */
449
+ api?: ApiConfig;
450
+ }
451
+ /**
452
+ * A compiled account stream with table and methods.
453
+ * This is what defineAccountStream() returns.
454
+ */
455
+ interface AccountStream<TSchema extends SchemaDefinition = SchemaDefinition> {
456
+ /** Stream name */
457
+ readonly name: string;
458
+ /** Human-readable description */
459
+ readonly description: string;
460
+ /** The schema definition (for validation) */
461
+ readonly schema: TSchema;
462
+ /** Get owner program address bytes (resolves lazy factory if used) */
463
+ getOwnerProgram(): Uint8Array;
464
+ /** Expected account data size */
465
+ readonly expectedSize?: number;
466
+ /** Additional data sizes to accept */
467
+ readonly dataSizes?: number[];
468
+ /** The Drizzle table for this stream */
469
+ readonly table: PgTableWithColumns<any>;
470
+ /**
471
+ * Typed column accessors for use with Drizzle operators.
472
+ * Use with eq(), inArray(), etc.
473
+ *
474
+ * @example
475
+ * ```ts
476
+ * db.select().from(stream.table).where(eq(stream.c.owner, address))
477
+ * ```
478
+ */
479
+ readonly c: Columns<TSchema>;
480
+ /** Parse function */
481
+ readonly parse: (account: AccountState) => InferRow<TSchema> | null;
482
+ /** API configuration */
483
+ readonly api?: ApiConfig;
484
+ }
485
+
486
+ /**
487
+ * Define an account stream for indexing on-chain account state.
488
+ *
489
+ * Account streams track current state of on-chain accounts.
490
+ * Unlike event streams (historical log), account streams are mutable -
491
+ * rows are updated when account state changes.
492
+ *
493
+ * @example
494
+ * ```ts
495
+ * import { defineAccountStream, t } from "@thru/indexer";
496
+ * import { decodeAddress } from "@thru/helpers";
497
+ * import type { AccountState } from "@thru/replay";
498
+ *
499
+ * export const tokenAccounts = defineAccountStream({
500
+ * name: "token-accounts",
501
+ *
502
+ * ownerProgram: decodeAddress(TOKEN_PROGRAM_PUBKEY),
503
+ * expectedSize: 73, // TokenAccount size in bytes
504
+ *
505
+ * schema: {
506
+ * address: t.text().primaryKey(),
507
+ * mint: t.text().notNull().index(),
508
+ * owner: t.text().notNull().index(),
509
+ * amount: t.bigint().notNull(),
510
+ * isFrozen: t.boolean().notNull(),
511
+ * slot: t.bigint().notNull(),
512
+ * seq: t.bigint().notNull(),
513
+ * updatedAt: t.timestamp().notNull().defaultNow(),
514
+ * },
515
+ *
516
+ * parse: (account: AccountState) => {
517
+ * if (account.data.length !== 73) return null;
518
+ * return {
519
+ * address: encodeAddress(account.address),
520
+ * mint: parseAddress(account.data, 0),
521
+ * owner: parseAddress(account.data, 32),
522
+ * amount: parseBigint(account.data, 64),
523
+ * isFrozen: account.data[72] !== 0,
524
+ * slot: account.slot,
525
+ * seq: account.seq,
526
+ * updatedAt: new Date(),
527
+ * };
528
+ * },
529
+ *
530
+ * api: { filters: ["mint", "owner"], idField: "address" },
531
+ * });
532
+ *
533
+ * // Export the table for Drizzle migrations
534
+ * export const tokenAccountsTable = tokenAccounts.table;
535
+ * ```
536
+ */
537
+
538
+ /**
539
+ * Define an account stream for indexing on-chain account state.
540
+ *
541
+ * @param definition - Stream definition with schema, ownerProgram, and parse function
542
+ * @returns A compiled account stream ready for use with the indexer
543
+ */
544
+ declare function defineAccountStream<TSchema extends SchemaDefinition>(definition: AccountStreamDefinition<TSchema>): AccountStream<TSchema>;
545
+
546
+ /**
547
+ * Account stream processor.
548
+ *
549
+ * Processes account state from @thru/replay and commits to the database
550
+ * with slot-aware upserts and checkpointing.
551
+ */
552
+
553
+ interface AccountProcessorStats {
554
+ /** Total accounts processed */
555
+ accountsProcessed: number;
556
+ /** Total accounts inserted/updated */
557
+ accountsUpdated: number;
558
+ /** Total accounts deleted */
559
+ accountsDeleted: number;
560
+ }
561
+
562
+ /**
563
+ * Checkpoint table schema.
564
+ *
565
+ * Users should export this from their Drizzle schema for migrations.
566
+ *
567
+ * @example
568
+ * ```ts
569
+ * // db/schema.ts
570
+ * import { checkpointTable } from "@thru/indexer";
571
+ * export { checkpointTable };
572
+ * ```
573
+ */
574
+ /**
575
+ * Per-stream checkpoint table.
576
+ * Each stream has its own checkpoint keyed by stream name.
577
+ *
578
+ * This table should be included in your Drizzle migrations.
579
+ */
580
+ declare const checkpointTable: drizzle_orm_pg_core.PgTableWithColumns<{
581
+ name: "indexer_checkpoints";
582
+ schema: undefined;
583
+ columns: {
584
+ streamName: drizzle_orm_pg_core.PgColumn<{
585
+ name: "stream_name";
586
+ tableName: "indexer_checkpoints";
587
+ dataType: "string";
588
+ columnType: "PgText";
589
+ data: string;
590
+ driverParam: string;
591
+ notNull: true;
592
+ hasDefault: false;
593
+ isPrimaryKey: true;
594
+ isAutoincrement: false;
595
+ hasRuntimeDefault: false;
596
+ enumValues: [string, ...string[]];
597
+ baseColumn: never;
598
+ identity: undefined;
599
+ generated: undefined;
600
+ }, {}, {}>;
601
+ lastIndexedSlot: drizzle_orm_pg_core.PgColumn<{
602
+ name: "last_indexed_slot";
603
+ tableName: "indexer_checkpoints";
604
+ dataType: "bigint";
605
+ columnType: "PgBigInt64";
606
+ data: bigint;
607
+ driverParam: string;
608
+ notNull: true;
609
+ hasDefault: false;
610
+ isPrimaryKey: false;
611
+ isAutoincrement: false;
612
+ hasRuntimeDefault: false;
613
+ enumValues: undefined;
614
+ baseColumn: never;
615
+ identity: undefined;
616
+ generated: undefined;
617
+ }, {}, {}>;
618
+ lastEventId: drizzle_orm_pg_core.PgColumn<{
619
+ name: "last_event_id";
620
+ tableName: "indexer_checkpoints";
621
+ dataType: "string";
622
+ columnType: "PgText";
623
+ data: string;
624
+ driverParam: string;
625
+ notNull: false;
626
+ hasDefault: false;
627
+ isPrimaryKey: false;
628
+ isAutoincrement: false;
629
+ hasRuntimeDefault: false;
630
+ enumValues: [string, ...string[]];
631
+ baseColumn: never;
632
+ identity: undefined;
633
+ generated: undefined;
634
+ }, {}, {}>;
635
+ updatedAt: drizzle_orm_pg_core.PgColumn<{
636
+ name: "updated_at";
637
+ tableName: "indexer_checkpoints";
638
+ dataType: "date";
639
+ columnType: "PgTimestamp";
640
+ data: Date;
641
+ driverParam: string;
642
+ notNull: true;
643
+ hasDefault: true;
644
+ isPrimaryKey: false;
645
+ isAutoincrement: false;
646
+ hasRuntimeDefault: false;
647
+ enumValues: undefined;
648
+ baseColumn: never;
649
+ identity: undefined;
650
+ generated: undefined;
651
+ }, {}, {}>;
652
+ };
653
+ dialect: "pg";
654
+ }>;
655
+ /**
656
+ * Checkpoint data structure.
657
+ */
658
+ interface Checkpoint {
659
+ /** Last indexed slot number */
660
+ slot: bigint;
661
+ /** Last event ID (for cursor-based resume) */
662
+ eventId: string | null;
663
+ }
664
+
665
+ /**
666
+ * Checkpoint repository for reading and updating stream checkpoints.
667
+ */
668
+
669
+ /**
670
+ * Get the checkpoint for a specific stream.
671
+ * Returns null if no checkpoint exists yet.
672
+ *
673
+ * @param db - Database client
674
+ * @param streamName - Name of the stream
675
+ * @returns Checkpoint or null if not found
676
+ */
677
+ declare function getCheckpoint(db: DatabaseClient, streamName: string): Promise<Checkpoint | null>;
678
+ /**
679
+ * Update the checkpoint for a specific stream.
680
+ * Uses upsert to handle both insert and update cases.
681
+ *
682
+ * @param db - Database client (can be a transaction)
683
+ * @param streamName - Name of the stream
684
+ * @param slot - Last indexed slot number
685
+ * @param eventId - Last event ID (optional)
686
+ */
687
+ declare function updateCheckpoint(db: DatabaseClient, streamName: string, slot: bigint, eventId?: string | null): Promise<void>;
688
+ /**
689
+ * Delete the checkpoint for a specific stream.
690
+ * Use with caution - this will cause the stream to re-index from the start.
691
+ *
692
+ * @param db - Database client
693
+ * @param streamName - Name of the stream
694
+ */
695
+ declare function deleteCheckpoint(db: DatabaseClient, streamName: string): Promise<void>;
696
+ /**
697
+ * Get all checkpoints.
698
+ * Useful for monitoring and debugging.
699
+ *
700
+ * @param db - Database client
701
+ * @returns Array of checkpoints with stream names
702
+ */
703
+ declare function getAllCheckpoints(db: DatabaseClient): Promise<Array<{
704
+ streamName: string;
705
+ checkpoint: Checkpoint;
706
+ }>>;
707
+
708
+ /**
709
+ * Checkpoint module exports.
710
+ */
711
+
712
+ /**
713
+ * Get all tables that need to be exported for Drizzle migrations.
714
+ *
715
+ * This helper collects the checkpoint table and all stream tables
716
+ * into a single object for easy export in your schema file.
717
+ *
718
+ * @example
719
+ * ```ts
720
+ * // db/schema.ts
721
+ * import { getSchemaExports } from "@thru/indexer";
722
+ * import transfers from "../streams/transfers";
723
+ * import tokenAccounts from "../account-streams/token-accounts";
724
+ *
725
+ * export const {
726
+ * checkpointTable,
727
+ * transfersTable,
728
+ * tokenAccountsTable,
729
+ * } = getSchemaExports({
730
+ * eventStreams: [transfers],
731
+ * accountStreams: [tokenAccounts],
732
+ * tableNames: {
733
+ * transfers: "transfersTable",
734
+ * "token-accounts": "tokenAccountsTable",
735
+ * },
736
+ * });
737
+ * ```
738
+ */
739
+ declare function getSchemaExports(config: {
740
+ eventStreams?: EventStream<any>[];
741
+ accountStreams?: AccountStream<any>[];
742
+ tableNames?: Record<string, string>;
743
+ }): Record<string, PgTableWithColumns<any>>;
744
+
745
+ /**
746
+ * Route generation for event and account streams.
747
+ *
748
+ * Auto-generates list and get routes with filtering and pagination.
749
+ */
750
+
751
+ interface MountRoutesOptions {
752
+ /** Database client */
753
+ db: DatabaseClient;
754
+ /** Event streams to create routes for */
755
+ eventStreams?: EventStream[];
756
+ /** Account streams to create routes for */
757
+ accountStreams?: AccountStream[];
758
+ /** Path prefix (default: "/api/v1") */
759
+ pathPrefix?: string;
760
+ }
761
+ /**
762
+ * Mount auto-generated routes for event and account streams.
763
+ *
764
+ * @param app - Hono app to mount routes on
765
+ * @param options - Configuration options
766
+ *
767
+ * @example
768
+ * ```ts
769
+ * import { OpenAPIHono } from "@hono/zod-openapi";
770
+ * import { mountStreamRoutes } from "@thru/indexer";
771
+ * import { transfers } from "./streams/transfers";
772
+ * import { tokenAccounts } from "./account-streams/token-accounts";
773
+ *
774
+ * const app = new OpenAPIHono();
775
+ *
776
+ * mountStreamRoutes(app, {
777
+ * db,
778
+ * eventStreams: [transfers],
779
+ * accountStreams: [tokenAccounts],
780
+ * });
781
+ *
782
+ * // Routes generated:
783
+ * // GET /api/v1/transfers
784
+ * // GET /api/v1/transfers/:id
785
+ * // GET /api/v1/token-accounts
786
+ * // GET /api/v1/token-accounts/:address
787
+ * ```
788
+ */
789
+ declare function mountStreamRoutes(app: OpenAPIHono, options: MountRoutesOptions): void;
790
+
791
+ /**
792
+ * Schema generation utilities.
793
+ *
794
+ * Generates Zod schemas and serializers from Drizzle tables.
795
+ */
796
+
797
+ interface GeneratedSchemas {
798
+ /** Row schema (database types) */
799
+ row: z$1.ZodTypeAny;
800
+ /** Insert schema (validation for inserts) */
801
+ insert: z$1.ZodTypeAny;
802
+ /** API output schema (serialized for JSON) */
803
+ api: z$1.ZodTypeAny;
804
+ /** Serialize a database row for API output */
805
+ serialize: (row: Record<string, unknown>) => Record<string, unknown>;
806
+ }
807
+ /**
808
+ * Generate Zod schemas from a Drizzle table.
809
+ *
810
+ * Handles bigint → string and Date → ISO string serialization
811
+ * for JSON-safe API responses.
812
+ *
813
+ * @param table - Drizzle table
814
+ * @param name - Schema name for OpenAPI
815
+ * @param suffix - Suffix for OpenAPI schema name (e.g., "Event", "Account")
816
+ * @returns Generated schemas and serializer
817
+ */
818
+ declare function generateSchemas(table: PgTableWithColumns<any>, name: string, suffix?: string): GeneratedSchemas;
819
+
820
+ /**
821
+ * Pagination utilities for API routes.
822
+ */
823
+
824
+ /**
825
+ * Query parameters for paginated list endpoints.
826
+ */
827
+ declare const paginationQuerySchema: z$1.ZodObject<{
828
+ limit: z$1.ZodDefault<z$1.ZodNumber>;
829
+ offset: z$1.ZodDefault<z$1.ZodNumber>;
830
+ cursor: z$1.ZodOptional<z$1.ZodString>;
831
+ }, "strip", z$1.ZodTypeAny, {
832
+ limit: number;
833
+ offset: number;
834
+ cursor?: string | undefined;
835
+ }, {
836
+ limit?: number | undefined;
837
+ offset?: number | undefined;
838
+ cursor?: string | undefined;
839
+ }>;
840
+ type PaginationQuery = z$1.infer<typeof paginationQuerySchema>;
841
+ /**
842
+ * Pagination metadata in responses.
843
+ */
844
+ declare const paginationResponseSchema: z$1.ZodObject<{
845
+ limit: z$1.ZodNumber;
846
+ offset: z$1.ZodNumber;
847
+ hasMore: z$1.ZodBoolean;
848
+ nextCursor: z$1.ZodNullable<z$1.ZodString>;
849
+ }, "strip", z$1.ZodTypeAny, {
850
+ limit: number;
851
+ offset: number;
852
+ hasMore: boolean;
853
+ nextCursor: string | null;
854
+ }, {
855
+ limit: number;
856
+ offset: number;
857
+ hasMore: boolean;
858
+ nextCursor: string | null;
859
+ }>;
860
+ type PaginationResponse = z$1.infer<typeof paginationResponseSchema>;
861
+ /**
862
+ * Wrap a schema in a data response.
863
+ */
864
+ declare function dataResponse<T extends z$1.ZodTypeAny>(schema: T): z$1.ZodObject<{
865
+ data: T;
866
+ }, "strip", z$1.ZodTypeAny, z$1.objectUtil.addQuestionMarks<z$1.baseObjectOutputType<{
867
+ data: T;
868
+ }>, any> extends infer T_1 ? { [k in keyof T_1]: T_1[k]; } : never, z$1.baseObjectInputType<{
869
+ data: T;
870
+ }> extends infer T_2 ? { [k_1 in keyof T_2]: T_2[k_1]; } : never>;
871
+ /**
872
+ * Wrap a schema in a list response with pagination.
873
+ */
874
+ declare function listResponse<T extends z$1.ZodTypeAny>(schema: T): z$1.ZodObject<{
875
+ data: z$1.ZodArray<T, "many">;
876
+ pagination: z$1.ZodObject<{
877
+ limit: z$1.ZodNumber;
878
+ offset: z$1.ZodNumber;
879
+ hasMore: z$1.ZodBoolean;
880
+ nextCursor: z$1.ZodNullable<z$1.ZodString>;
881
+ }, "strip", z$1.ZodTypeAny, {
882
+ limit: number;
883
+ offset: number;
884
+ hasMore: boolean;
885
+ nextCursor: string | null;
886
+ }, {
887
+ limit: number;
888
+ offset: number;
889
+ hasMore: boolean;
890
+ nextCursor: string | null;
891
+ }>;
892
+ }, "strip", z$1.ZodTypeAny, {
893
+ data: T["_output"][];
894
+ pagination: {
895
+ limit: number;
896
+ offset: number;
897
+ hasMore: boolean;
898
+ nextCursor: string | null;
899
+ };
900
+ }, {
901
+ data: T["_input"][];
902
+ pagination: {
903
+ limit: number;
904
+ offset: number;
905
+ hasMore: boolean;
906
+ nextCursor: string | null;
907
+ };
908
+ }>;
909
+ /**
910
+ * Standard error response schema.
911
+ */
912
+ declare const errorSchema: z$1.ZodObject<{
913
+ error: z$1.ZodString;
914
+ code: z$1.ZodOptional<z$1.ZodString>;
915
+ }, "strip", z$1.ZodTypeAny, {
916
+ error: string;
917
+ code?: string | undefined;
918
+ }, {
919
+ error: string;
920
+ code?: string | undefined;
921
+ }>;
922
+ type ErrorResponse = z$1.infer<typeof errorSchema>;
923
+ interface PaginationResult<T> {
924
+ data: T[];
925
+ pagination: PaginationResponse;
926
+ }
927
+ /**
928
+ * Process raw results into paginated response.
929
+ * Expects `limit + 1` rows to determine hasMore.
930
+ *
931
+ * @param rows - Query results (should have limit + 1 rows)
932
+ * @param query - The pagination query parameters
933
+ * @param getCursor - Optional function to get cursor from last item
934
+ * @returns Paginated result with data and pagination metadata
935
+ */
936
+ declare function paginate<T>(rows: T[], query: PaginationQuery, getCursor?: (item: T) => string | null): PaginationResult<T>;
937
+ /**
938
+ * Parse a cursor string into slot and id components.
939
+ *
940
+ * @param cursor - Cursor string in format "slot:id"
941
+ * @returns Parsed cursor or null if invalid
942
+ */
943
+ declare function parseCursor(cursor: string): {
944
+ slot: bigint;
945
+ id: string;
946
+ } | null;
947
+
948
+ /**
949
+ * Indexer configuration types.
950
+ */
951
+
952
+ /**
953
+ * Configuration for the Indexer runtime.
954
+ */
955
+ interface IndexerConfig {
956
+ /** Database client */
957
+ db: DatabaseClient;
958
+ /** Factory to create fresh chain clients for reconnection */
959
+ clientFactory: ChainClientFactory;
960
+ /** Event streams to run */
961
+ eventStreams?: EventStream[];
962
+ /** Account streams to run */
963
+ accountStreams?: AccountStream[];
964
+ /** Default start slot if no checkpoint exists (default: 0n) */
965
+ defaultStartSlot?: bigint;
966
+ /** Safety margin for finality in slots (default: 64) */
967
+ safetyMargin?: number;
968
+ /** Page size for fetching events (default: 512) */
969
+ pageSize?: number;
970
+ /** Log level (default: "info") */
971
+ logLevel?: "debug" | "info" | "warn" | "error";
972
+ /**
973
+ * Validate parse output at runtime using Zod schemas.
974
+ * Useful for development to catch type mismatches early.
975
+ * Disabled by default for performance in production.
976
+ * @default false
977
+ */
978
+ validateParse?: boolean;
979
+ }
980
+
981
+ /**
982
+ * Indexer runtime class.
983
+ *
984
+ * Orchestrates running multiple event and account streams concurrently
985
+ * with graceful shutdown support.
986
+ *
987
+ * @example
988
+ * ```ts
989
+ * import { Indexer } from "@thru/indexer";
990
+ * import { ChainClient } from "@thru/replay";
991
+ * import { transfers } from "./streams/transfers";
992
+ * import { tokenAccounts } from "./account-streams/token-accounts";
993
+ *
994
+ * const indexer = new Indexer({
995
+ * db,
996
+ * clientFactory: () => new ChainClient({ baseUrl: RPC_URL }),
997
+ * eventStreams: [transfers],
998
+ * accountStreams: [tokenAccounts],
999
+ * defaultStartSlot: 0n,
1000
+ * safetyMargin: 64,
1001
+ * });
1002
+ *
1003
+ * // Handle graceful shutdown
1004
+ * process.on("SIGINT", () => indexer.stop());
1005
+ * process.on("SIGTERM", () => indexer.stop());
1006
+ *
1007
+ * // Start indexing
1008
+ * await indexer.start();
1009
+ * ```
1010
+ */
1011
+
1012
+ interface IndexerResult {
1013
+ /** Results from event stream processors */
1014
+ eventStreams: Array<{
1015
+ name: string;
1016
+ status: "fulfilled" | "rejected";
1017
+ result?: ProcessorStats;
1018
+ error?: Error;
1019
+ }>;
1020
+ /** Results from account stream processors */
1021
+ accountStreams: Array<{
1022
+ name: string;
1023
+ status: "fulfilled" | "rejected";
1024
+ result?: AccountProcessorStats;
1025
+ error?: Error;
1026
+ }>;
1027
+ }
1028
+ /**
1029
+ * Indexer runtime that orchestrates event and account streams.
1030
+ */
1031
+ declare class Indexer {
1032
+ private config;
1033
+ private abortController;
1034
+ private running;
1035
+ private shutdownRequested;
1036
+ constructor(config: IndexerConfig);
1037
+ /**
1038
+ * Check if the checkpoint table exists in the database.
1039
+ * Logs a warning if it doesn't exist (user may have forgotten to export it in schema).
1040
+ */
1041
+ private checkCheckpointTable;
1042
+ /**
1043
+ * Start the indexer.
1044
+ *
1045
+ * Runs all configured event and account streams concurrently.
1046
+ * Returns when all streams complete or when stop() is called.
1047
+ *
1048
+ * @returns Results from all stream processors
1049
+ */
1050
+ start(): Promise<IndexerResult>;
1051
+ /**
1052
+ * Stop the indexer gracefully.
1053
+ *
1054
+ * Signals all streams to finish their current batch and stop.
1055
+ * The start() promise will resolve once all streams have stopped.
1056
+ */
1057
+ stop(): void;
1058
+ /**
1059
+ * Check if the indexer is currently running.
1060
+ */
1061
+ isRunning(): boolean;
1062
+ }
1063
+
1064
+ export { type AccountStream, type AccountStreamDefinition, type AnyColumnDef, type ApiConfig, type Checkpoint, type ColumnBuilder, type ColumnDef, type Columns, type DatabaseClient, type ErrorResponse, type EventStream, type EventStreamDefinition, type GeneratedSchemas, type HookContext, Indexer, type IndexerConfig, type IndexerResult, type InferInsert, type InferRow, type MountRoutesOptions, type PaginationQuery, type PaginationResponse, type PaginationResult, type SchemaDefinition, type StreamBatch, checkpointTable, columnBuilder, dataResponse, defineAccountStream, defineEventStream, deleteCheckpoint, errorSchema, generateSchemas, generateZodSchema, getAllCheckpoints, getCheckpoint, getSchemaExports, listResponse, mountStreamRoutes, paginate, paginationQuerySchema, paginationResponseSchema, parseCursor, t, updateCheckpoint, validateParsedData };