@apibara/plugin-drizzle 2.1.0-beta.5 → 2.1.0-beta.50

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -1,10 +1,11 @@
1
1
  import { useIndexerContext } from '@apibara/indexer';
2
- import { defineIndexerPlugin } from '@apibara/indexer/plugins';
2
+ import { defineIndexerPlugin, useLogger } from '@apibara/indexer/plugins';
3
3
  import { generateIndexerId } from '@apibara/indexer/internal';
4
4
  import { useInternalContext } from '@apibara/indexer/internal/plugins';
5
+ import { S as SCHEMA_NAME, D as DRIZZLE_PROPERTY, a as DRIZZLE_STORAGE_DB_PROPERTY } from './shared/plugin-drizzle.2d226351.mjs';
6
+ import { entityKind, sql, eq, and, isNull, gt, lt } from 'drizzle-orm';
5
7
  import { normalizeCursor } from '@apibara/protocol';
6
- import { eq, and, isNull, gt, lt, sql } from 'drizzle-orm';
7
- import { pgTable, text, integer, primaryKey, serial, char, jsonb } from 'drizzle-orm/pg-core';
8
+ import { pgSchema, text, integer, primaryKey, serial, timestamp, char, jsonb } from 'drizzle-orm/pg-core';
8
9
 
9
10
  class DrizzleStorageError extends Error {
10
11
  constructor(message, options) {
@@ -33,16 +34,89 @@ function serialize(obj) {
33
34
  function sleep(ms) {
34
35
  return new Promise((resolve) => setTimeout(resolve, ms));
35
36
  }
37
+ const getIdColumnForTable = (tableName, idColumn) => {
38
+ if (idColumn[tableName]) {
39
+ return idColumn[tableName];
40
+ }
41
+ return idColumn["*"];
42
+ };
36
43
 
37
- const CHECKPOINTS_TABLE_NAME = "__indexer_checkpoints";
38
- const FILTERS_TABLE_NAME = "__indexer_filters";
39
- const SCHEMA_VERSION_TABLE_NAME = "__indexer_schema_version";
40
- const checkpoints = pgTable(CHECKPOINTS_TABLE_NAME, {
44
+ function drizzle(options) {
45
+ const {
46
+ connectionString = process.env["POSTGRES_CONNECTION_STRING"] ?? "memory://",
47
+ schema,
48
+ type = "pglite",
49
+ config,
50
+ poolConfig
51
+ } = options ?? {};
52
+ if (isPgliteConnectionString(connectionString) && type === "pglite") {
53
+ const { drizzle: drizzlePGLite } = require("drizzle-orm/pglite");
54
+ return drizzlePGLite({
55
+ schema,
56
+ connection: {
57
+ dataDir: connectionString || "memory://pglite"
58
+ },
59
+ ...config || {}
60
+ });
61
+ }
62
+ const { Pool } = require("pg");
63
+ const { drizzle: drizzleNode } = require("drizzle-orm/node-postgres");
64
+ const pool = new Pool({
65
+ connectionString,
66
+ ...poolConfig || {}
67
+ });
68
+ return drizzleNode(pool, { schema, ...config || {} });
69
+ }
70
+ async function migrate(db, options) {
71
+ const isPglite = isDrizzleKind(db, "PgliteDatabase");
72
+ try {
73
+ if (isPglite) {
74
+ const { migrate: migratePGLite } = require("drizzle-orm/pglite/migrator");
75
+ await migratePGLite(db, options);
76
+ } else {
77
+ const {
78
+ migrate: migrateNode
79
+ } = require("drizzle-orm/node-postgres/migrator");
80
+ await migrateNode(db, options);
81
+ }
82
+ } catch (error) {
83
+ throw new DrizzleStorageError(
84
+ "Failed to apply migrations! Please check if you have generated migrations using drizzle:generate",
85
+ {
86
+ cause: error
87
+ }
88
+ );
89
+ }
90
+ }
91
+ function isPgliteConnectionString(conn) {
92
+ return conn.startsWith("memory://") || conn.startsWith("file://") || conn.startsWith("idb://");
93
+ }
94
+ function isDrizzleKind(value, entityKindValue) {
95
+ if (!value || typeof value !== "object") {
96
+ return false;
97
+ }
98
+ let cls = Object.getPrototypeOf(value).constructor;
99
+ if (cls) {
100
+ while (cls) {
101
+ if (entityKind in cls && cls[entityKind] === entityKindValue) {
102
+ return true;
103
+ }
104
+ cls = Object.getPrototypeOf(cls);
105
+ }
106
+ }
107
+ return false;
108
+ }
109
+
110
+ const CHECKPOINTS_TABLE_NAME = "checkpoints";
111
+ const FILTERS_TABLE_NAME = "filters";
112
+ const SCHEMA_VERSION_TABLE_NAME = "schema_version";
113
+ const schema$1 = pgSchema(SCHEMA_NAME);
114
+ const checkpoints = schema$1.table(CHECKPOINTS_TABLE_NAME, {
41
115
  id: text("id").notNull().primaryKey(),
42
116
  orderKey: integer("order_key").notNull(),
43
117
  uniqueKey: text("unique_key")
44
118
  });
45
- const filters = pgTable(
119
+ const filters = schema$1.table(
46
120
  FILTERS_TABLE_NAME,
47
121
  {
48
122
  id: text("id").notNull(),
@@ -56,7 +130,16 @@ const filters = pgTable(
56
130
  }
57
131
  ]
58
132
  );
59
- const schemaVersion = pgTable(SCHEMA_VERSION_TABLE_NAME, {
133
+ const chainReorganizations = schema$1.table("chain_reorganizations", {
134
+ id: serial("id").primaryKey(),
135
+ indexerId: text("indexer_id").notNull(),
136
+ oldHeadOrderKey: integer("old_head_order_key"),
137
+ oldHeadUniqueKey: text("old_head_unique_key").$type().default(null),
138
+ newHeadOrderKey: integer("new_head_order_key").notNull(),
139
+ newHeadUniqueKey: text("new_head_unique_key").$type().default(null),
140
+ recordedAt: timestamp("recorded_at").defaultNow().notNull()
141
+ });
142
+ const schemaVersion = schema$1.table(SCHEMA_VERSION_TABLE_NAME, {
60
143
  k: integer("k").notNull().primaryKey(),
61
144
  version: integer("version").notNull()
62
145
  });
@@ -67,12 +150,38 @@ const MIGRATIONS = [
67
150
  // Add more migration arrays for future versions
68
151
  ];
69
152
  async function initializePersistentState(tx) {
70
- await tx.execute(`
71
- CREATE TABLE IF NOT EXISTS ${SCHEMA_VERSION_TABLE_NAME} (
153
+ await tx.execute(
154
+ sql.raw(`
155
+ CREATE SCHEMA IF NOT EXISTS ${SCHEMA_NAME};
156
+ `)
157
+ );
158
+ await tx.execute(
159
+ sql.raw(`
160
+ CREATE TABLE IF NOT EXISTS ${SCHEMA_NAME}.${SCHEMA_VERSION_TABLE_NAME} (
72
161
  k INTEGER PRIMARY KEY,
73
162
  version INTEGER NOT NULL
74
163
  );
75
- `);
164
+ `)
165
+ );
166
+ await tx.execute(
167
+ sql.raw(`
168
+ CREATE TABLE IF NOT EXISTS ${SCHEMA_NAME}.chain_reorganizations (
169
+ id SERIAL PRIMARY KEY,
170
+ indexer_id TEXT NOT NULL,
171
+ old_head_order_key INTEGER,
172
+ old_head_unique_key TEXT DEFAULT NULL,
173
+ new_head_order_key INTEGER NOT NULL,
174
+ new_head_unique_key TEXT DEFAULT NULL,
175
+ recorded_at TIMESTAMP NOT NULL DEFAULT NOW()
176
+ );
177
+ `)
178
+ );
179
+ await tx.execute(
180
+ sql.raw(`
181
+ CREATE INDEX IF NOT EXISTS idx_chain_reorgs_indexer_id
182
+ ON ${SCHEMA_NAME}.chain_reorganizations(indexer_id);
183
+ `)
184
+ );
76
185
  const versionRows = await tx.select().from(schemaVersion).where(eq(schemaVersion.k, 0));
77
186
  const storedVersion = versionRows[0]?.version ?? -1;
78
187
  if (storedVersion > CURRENT_SCHEMA_VERSION) {
@@ -82,22 +191,26 @@ async function initializePersistentState(tx) {
82
191
  }
83
192
  try {
84
193
  if (storedVersion === -1) {
85
- await tx.execute(`
86
- CREATE TABLE IF NOT EXISTS ${CHECKPOINTS_TABLE_NAME} (
194
+ await tx.execute(
195
+ sql.raw(`
196
+ CREATE TABLE IF NOT EXISTS ${SCHEMA_NAME}.${CHECKPOINTS_TABLE_NAME} (
87
197
  id TEXT PRIMARY KEY,
88
198
  order_key INTEGER NOT NULL,
89
199
  unique_key TEXT
90
200
  );
91
- `);
92
- await tx.execute(`
93
- CREATE TABLE IF NOT EXISTS ${FILTERS_TABLE_NAME} (
201
+ `)
202
+ );
203
+ await tx.execute(
204
+ sql.raw(`
205
+ CREATE TABLE IF NOT EXISTS ${SCHEMA_NAME}.${FILTERS_TABLE_NAME} (
94
206
  id TEXT NOT NULL,
95
207
  filter TEXT NOT NULL,
96
208
  from_block INTEGER NOT NULL,
97
209
  to_block INTEGER DEFAULT NULL,
98
210
  PRIMARY KEY (id, from_block)
99
211
  );
100
- `);
212
+ `)
213
+ );
101
214
  await tx.insert(schemaVersion).values({
102
215
  k: 0,
103
216
  version: CURRENT_SCHEMA_VERSION
@@ -120,6 +233,22 @@ async function initializePersistentState(tx) {
120
233
  );
121
234
  }
122
235
  }
236
+ async function recordChainReorganization(props) {
237
+ const { tx, indexerId, oldHead, newHead } = props;
238
+ try {
239
+ await tx.insert(chainReorganizations).values({
240
+ indexerId,
241
+ oldHeadOrderKey: oldHead ? Number(oldHead.orderKey) : null,
242
+ oldHeadUniqueKey: oldHead?.uniqueKey ? oldHead.uniqueKey : null,
243
+ newHeadOrderKey: Number(newHead.orderKey),
244
+ newHeadUniqueKey: newHead.uniqueKey ? newHead.uniqueKey : null
245
+ });
246
+ } catch (error) {
247
+ throw new DrizzleStorageError("Failed to record chain reorganization", {
248
+ cause: error
249
+ });
250
+ }
251
+ }
123
252
  async function persistState(props) {
124
253
  const { tx, endCursor, filter, indexerId } = props;
125
254
  try {
@@ -132,7 +261,9 @@ async function persistState(props) {
132
261
  target: checkpoints.id,
133
262
  set: {
134
263
  orderKey: Number(endCursor.orderKey),
135
- uniqueKey: endCursor.uniqueKey
264
+ // Explicitly set the unique key to `null` to indicate that it has been deleted
265
+ // Otherwise drizzle will not update its value.
266
+ uniqueKey: endCursor.uniqueKey ? endCursor.uniqueKey : null
136
267
  }
137
268
  });
138
269
  if (filter) {
@@ -178,6 +309,10 @@ async function getState(props) {
178
309
  async function invalidateState(props) {
179
310
  const { tx, cursor, indexerId } = props;
180
311
  try {
312
+ await tx.update(checkpoints).set({
313
+ orderKey: Number(cursor.orderKey),
314
+ uniqueKey: cursor.uniqueKey ? cursor.uniqueKey : null
315
+ }).where(eq(checkpoints.id, indexerId));
181
316
  await tx.delete(filters).where(
182
317
  and(
183
318
  eq(filters.id, indexerId),
@@ -211,11 +346,24 @@ async function finalizeState(props) {
211
346
  });
212
347
  }
213
348
  }
349
+ async function resetPersistence(props) {
350
+ const { tx, indexerId } = props;
351
+ try {
352
+ await tx.delete(checkpoints).where(eq(checkpoints.id, indexerId));
353
+ await tx.delete(filters).where(eq(filters.id, indexerId));
354
+ } catch (error) {
355
+ throw new DrizzleStorageError("Failed to reset persistence state", {
356
+ cause: error
357
+ });
358
+ }
359
+ }
214
360
 
361
+ const ROLLBACK_TABLE_NAME = "reorg_rollback";
362
+ const schema = pgSchema(SCHEMA_NAME);
215
363
  function getReorgTriggerName(table, indexerId) {
216
364
  return `${table}_reorg_${indexerId}`;
217
365
  }
218
- pgTable("__reorg_rollback", {
366
+ schema.table(ROLLBACK_TABLE_NAME, {
219
367
  n: serial("n").primaryKey(),
220
368
  op: char("op", { length: 1 }).$type().notNull(),
221
369
  table_name: text("table_name").notNull(),
@@ -226,9 +374,12 @@ pgTable("__reorg_rollback", {
226
374
  });
227
375
  async function initializeReorgRollbackTable(tx, indexerId) {
228
376
  try {
377
+ await tx.execute(`
378
+ CREATE SCHEMA IF NOT EXISTS ${SCHEMA_NAME};
379
+ `);
229
380
  await tx.execute(
230
381
  sql.raw(`
231
- CREATE TABLE IF NOT EXISTS __reorg_rollback(
382
+ CREATE TABLE IF NOT EXISTS ${SCHEMA_NAME}.${ROLLBACK_TABLE_NAME}(
232
383
  n SERIAL PRIMARY KEY,
233
384
  op CHAR(1) NOT NULL,
234
385
  table_name TEXT NOT NULL,
@@ -241,7 +392,7 @@ async function initializeReorgRollbackTable(tx, indexerId) {
241
392
  );
242
393
  await tx.execute(
243
394
  sql.raw(`
244
- CREATE INDEX IF NOT EXISTS idx_reorg_rollback_indexer_id_cursor ON __reorg_rollback(indexer_id, cursor);
395
+ CREATE INDEX IF NOT EXISTS idx_reorg_rollback_indexer_id_cursor ON ${SCHEMA_NAME}.${ROLLBACK_TABLE_NAME}(indexer_id, cursor);
245
396
  `)
246
397
  );
247
398
  } catch (error) {
@@ -252,24 +403,25 @@ async function initializeReorgRollbackTable(tx, indexerId) {
252
403
  try {
253
404
  await tx.execute(
254
405
  sql.raw(`
255
- CREATE OR REPLACE FUNCTION reorg_checkpoint()
406
+ CREATE OR REPLACE FUNCTION ${SCHEMA_NAME}.reorg_checkpoint()
256
407
  RETURNS TRIGGER AS $$
257
408
  DECLARE
258
- id_col TEXT := TG_ARGV[0]::TEXT;
259
- order_key INTEGER := TG_ARGV[1]::INTEGER;
260
- indexer_id TEXT := TG_ARGV[2]::TEXT;
409
+ table_name TEXT := TG_ARGV[0]::TEXT;
410
+ id_col TEXT := TG_ARGV[1]::TEXT;
411
+ order_key INTEGER := TG_ARGV[2]::INTEGER;
412
+ indexer_id TEXT := TG_ARGV[3]::TEXT;
261
413
  new_id_value TEXT := row_to_json(NEW.*)->>id_col;
262
414
  old_id_value TEXT := row_to_json(OLD.*)->>id_col;
263
415
  BEGIN
264
416
  IF (TG_OP = 'DELETE') THEN
265
- INSERT INTO __reorg_rollback(op, table_name, cursor, row_id, row_value, indexer_id)
266
- SELECT 'D', TG_TABLE_NAME, order_key, old_id_value, row_to_json(OLD.*), indexer_id;
417
+ INSERT INTO ${SCHEMA_NAME}.${ROLLBACK_TABLE_NAME}(op, table_name, cursor, row_id, row_value, indexer_id)
418
+ SELECT 'D', table_name, order_key, old_id_value, row_to_json(OLD.*), indexer_id;
267
419
  ELSIF (TG_OP = 'UPDATE') THEN
268
- INSERT INTO __reorg_rollback(op, table_name, cursor, row_id, row_value, indexer_id)
269
- SELECT 'U', TG_TABLE_NAME, order_key, new_id_value, row_to_json(OLD.*), indexer_id;
420
+ INSERT INTO ${SCHEMA_NAME}.${ROLLBACK_TABLE_NAME}(op, table_name, cursor, row_id, row_value, indexer_id)
421
+ SELECT 'U', table_name, order_key, new_id_value, row_to_json(OLD.*), indexer_id;
270
422
  ELSIF (TG_OP = 'INSERT') THEN
271
- INSERT INTO __reorg_rollback(op, table_name, cursor, row_id, row_value, indexer_id)
272
- SELECT 'I', TG_TABLE_NAME, order_key, new_id_value, null, indexer_id;
423
+ INSERT INTO ${SCHEMA_NAME}.${ROLLBACK_TABLE_NAME}(op, table_name, cursor, row_id, row_value, indexer_id)
424
+ SELECT 'I', table_name, order_key, new_id_value, null, indexer_id;
273
425
  END IF;
274
426
  RETURN NULL;
275
427
  END;
@@ -285,9 +437,10 @@ async function initializeReorgRollbackTable(tx, indexerId) {
285
437
  );
286
438
  }
287
439
  }
288
- async function registerTriggers(tx, tables, endCursor, idColumn, indexerId) {
440
+ async function registerTriggers(tx, tables, endCursor, idColumnMap, indexerId) {
289
441
  try {
290
442
  for (const table of tables) {
443
+ const tableIdColumn = getIdColumnForTable(table, idColumnMap);
291
444
  await tx.execute(
292
445
  sql.raw(
293
446
  `DROP TRIGGER IF EXISTS ${getReorgTriggerName(table, indexerId)} ON ${table};`
@@ -298,7 +451,7 @@ async function registerTriggers(tx, tables, endCursor, idColumn, indexerId) {
298
451
  CREATE CONSTRAINT TRIGGER ${getReorgTriggerName(table, indexerId)}
299
452
  AFTER INSERT OR UPDATE OR DELETE ON ${table}
300
453
  DEFERRABLE INITIALLY DEFERRED
301
- FOR EACH ROW EXECUTE FUNCTION reorg_checkpoint('${idColumn}', ${`${Number(endCursor.orderKey)}`}, '${indexerId}');
454
+ FOR EACH ROW EXECUTE FUNCTION ${SCHEMA_NAME}.reorg_checkpoint('${table}', '${tableIdColumn}', ${Number(endCursor.orderKey)}, '${indexerId}');
302
455
  `)
303
456
  );
304
457
  }
@@ -323,11 +476,11 @@ async function removeTriggers(db, tables, indexerId) {
323
476
  });
324
477
  }
325
478
  }
326
- async function invalidate(tx, cursor, idColumn, indexerId) {
479
+ async function invalidate(tx, cursor, idColumnMap, indexerId) {
327
480
  const { rows: result } = await tx.execute(
328
481
  sql.raw(`
329
482
  WITH deleted AS (
330
- DELETE FROM __reorg_rollback
483
+ DELETE FROM ${SCHEMA_NAME}.${ROLLBACK_TABLE_NAME}
331
484
  WHERE cursor > ${Number(cursor.orderKey)}
332
485
  AND indexer_id = '${indexerId}'
333
486
  RETURNING *
@@ -341,6 +494,7 @@ async function invalidate(tx, cursor, idColumn, indexerId) {
341
494
  );
342
495
  }
343
496
  for (const op of result) {
497
+ const tableIdColumn = getIdColumnForTable(op.table_name, idColumnMap);
344
498
  switch (op.op) {
345
499
  case "I":
346
500
  try {
@@ -350,7 +504,7 @@ async function invalidate(tx, cursor, idColumn, indexerId) {
350
504
  await tx.execute(
351
505
  sql.raw(`
352
506
  DELETE FROM ${op.table_name}
353
- WHERE ${idColumn} = '${op.row_id}'
507
+ WHERE ${tableIdColumn} = '${op.row_id}'
354
508
  `)
355
509
  );
356
510
  } catch (error) {
@@ -390,7 +544,9 @@ async function invalidate(tx, cursor, idColumn, indexerId) {
390
544
  );
391
545
  }
392
546
  const rowValue = typeof op.row_value === "string" ? JSON.parse(op.row_value) : op.row_value;
393
- const nonIdKeys = Object.keys(rowValue).filter((k) => k !== idColumn);
547
+ const nonIdKeys = Object.keys(rowValue).filter(
548
+ (k) => k !== tableIdColumn
549
+ );
394
550
  const fields = nonIdKeys.map((c) => `${c} = prev.${c}`).join(", ");
395
551
  const query = sql.raw(`
396
552
  UPDATE ${op.table_name}
@@ -398,7 +554,7 @@ async function invalidate(tx, cursor, idColumn, indexerId) {
398
554
  FROM (
399
555
  SELECT * FROM json_populate_record(null::${op.table_name}, '${JSON.stringify(op.row_value)}'::json)
400
556
  ) as prev
401
- WHERE ${op.table_name}.${idColumn} = '${op.row_id}'
557
+ WHERE ${op.table_name}.${tableIdColumn} = '${op.row_id}'
402
558
  `);
403
559
  await tx.execute(query);
404
560
  } catch (error) {
@@ -420,7 +576,7 @@ async function finalize(tx, cursor, indexerId) {
420
576
  try {
421
577
  await tx.execute(
422
578
  sql.raw(`
423
- DELETE FROM __reorg_rollback
579
+ DELETE FROM ${SCHEMA_NAME}.${ROLLBACK_TABLE_NAME}
424
580
  WHERE cursor <= ${Number(cursor.orderKey)}
425
581
  AND indexer_id = '${indexerId}'
426
582
  `)
@@ -431,8 +587,37 @@ async function finalize(tx, cursor, indexerId) {
431
587
  });
432
588
  }
433
589
  }
590
+ async function cleanupStorage(tx, tables, indexerId) {
591
+ try {
592
+ for (const table of tables) {
593
+ await tx.execute(
594
+ sql.raw(
595
+ `DROP TRIGGER IF EXISTS ${getReorgTriggerName(table, indexerId)} ON ${table};`
596
+ )
597
+ );
598
+ }
599
+ await tx.execute(
600
+ sql.raw(`
601
+ DELETE FROM ${SCHEMA_NAME}.${ROLLBACK_TABLE_NAME}
602
+ WHERE indexer_id = '${indexerId}'
603
+ `)
604
+ );
605
+ for (const table of tables) {
606
+ try {
607
+ await tx.execute(sql.raw(`TRUNCATE TABLE ${table} CASCADE;`));
608
+ } catch (error) {
609
+ throw new DrizzleStorageError(`Failed to truncate table ${table}`, {
610
+ cause: error
611
+ });
612
+ }
613
+ }
614
+ } catch (error) {
615
+ throw new DrizzleStorageError("Failed to clean up storage", {
616
+ cause: error
617
+ });
618
+ }
619
+ }
434
620
 
435
- const DRIZZLE_PROPERTY = "_drizzle";
436
621
  const MAX_RETRIES = 5;
437
622
  function useDrizzleStorage(_db) {
438
623
  const context = useIndexerContext();
@@ -443,40 +628,93 @@ function useDrizzleStorage(_db) {
443
628
  }
444
629
  return context[DRIZZLE_PROPERTY];
445
630
  }
631
+ function useTestDrizzleStorage() {
632
+ const context = useIndexerContext();
633
+ if (!context[DRIZZLE_STORAGE_DB_PROPERTY]) {
634
+ throw new DrizzleStorageError(
635
+ "drizzle storage db is not available. Did you register the plugin?"
636
+ );
637
+ }
638
+ return context[DRIZZLE_STORAGE_DB_PROPERTY];
639
+ }
446
640
  function drizzleStorage({
447
641
  db,
448
642
  persistState: enablePersistence = true,
449
643
  indexerName: identifier = "default",
450
- schema,
451
- idColumn = "id"
644
+ schema: _schema,
645
+ idColumn,
646
+ migrate: migrateOptions,
647
+ recordChainReorganizations = false
452
648
  }) {
453
649
  return defineIndexerPlugin((indexer) => {
454
650
  let tableNames = [];
455
651
  let indexerId = "";
652
+ const alwaysReindex = process.env["APIBARA_ALWAYS_REINDEX"] === "true";
653
+ let prevFinality;
654
+ const schema = _schema ?? db._.schema ?? {};
655
+ const idColumnMap = {
656
+ "*": typeof idColumn === "string" ? idColumn : "id",
657
+ ...typeof idColumn === "object" ? idColumn : {}
658
+ };
456
659
  try {
457
- tableNames = Object.values(schema ?? db._.schema ?? {}).map(
458
- (table) => table.dbName
459
- );
660
+ tableNames = Object.values(schema).map((table) => table.dbName);
460
661
  } catch (error) {
461
662
  throw new DrizzleStorageError("Failed to get table names from schema", {
462
663
  cause: error
463
664
  });
464
665
  }
465
- indexer.hooks.hook("run:before", async () => {
466
- const { indexerName: indexerFileName, availableIndexers } = useInternalContext();
666
+ for (const table of Object.values(schema)) {
667
+ const columns = table.columns;
668
+ const tableIdColumn = getIdColumnForTable(table.dbName, idColumnMap);
669
+ const columnExists = Object.values(columns).some(
670
+ (column) => column.name === tableIdColumn
671
+ );
672
+ if (!columnExists) {
673
+ throw new DrizzleStorageError(
674
+ `Column \`"${tableIdColumn}"\` does not exist in table \`"${table.dbName}"\`. Make sure the table has the specified column or provide a valid \`idColumn\` mapping to \`drizzleStorage\`.`
675
+ );
676
+ }
677
+ }
678
+ indexer.hooks.hook("plugins:init", async () => {
679
+ const internalContext = useInternalContext();
680
+ const context = useIndexerContext();
681
+ const logger = useLogger();
682
+ context[DRIZZLE_STORAGE_DB_PROPERTY] = db;
683
+ const { indexerName: indexerFileName, availableIndexers } = internalContext;
467
684
  indexerId = generateIndexerId(indexerFileName, identifier);
468
685
  let retries = 0;
686
+ let migrationsApplied = false;
687
+ let cleanupApplied = false;
469
688
  while (retries <= MAX_RETRIES) {
470
689
  try {
690
+ if (migrateOptions && !migrationsApplied) {
691
+ await migrate(db, migrateOptions);
692
+ migrationsApplied = true;
693
+ logger.success("Migrations applied");
694
+ }
471
695
  await withTransaction(db, async (tx) => {
472
696
  await initializeReorgRollbackTable(tx, indexerId);
473
697
  if (enablePersistence) {
474
698
  await initializePersistentState(tx);
475
699
  }
700
+ if (alwaysReindex && !cleanupApplied) {
701
+ logger.warn(
702
+ `Reindexing: Deleting all data from tables - ${tableNames.join(", ")}`
703
+ );
704
+ await cleanupStorage(tx, tableNames, indexerId);
705
+ if (enablePersistence) {
706
+ await resetPersistence({ tx, indexerId });
707
+ }
708
+ cleanupApplied = true;
709
+ logger.success("Tables have been cleaned up for reindexing");
710
+ }
476
711
  });
477
712
  break;
478
713
  } catch (error) {
479
714
  if (retries === MAX_RETRIES) {
715
+ if (error instanceof DrizzleStorageError) {
716
+ throw error;
717
+ }
480
718
  throw new DrizzleStorageError(
481
719
  "Initialization failed after 5 retries",
482
720
  {
@@ -512,7 +750,7 @@ function drizzleStorage({
512
750
  return;
513
751
  }
514
752
  await withTransaction(db, async (tx) => {
515
- await invalidate(tx, cursor, idColumn, indexerId);
753
+ await invalidate(tx, cursor, idColumnMap, indexerId);
516
754
  if (enablePersistence) {
517
755
  await invalidateState({ tx, cursor, indexerId });
518
756
  }
@@ -533,7 +771,7 @@ function drizzleStorage({
533
771
  }
534
772
  });
535
773
  indexer.hooks.hook("message:finalize", async ({ message }) => {
536
- const { cursor } = message.finalize;
774
+ const { cursor } = message;
537
775
  if (!cursor) {
538
776
  throw new DrizzleStorageError("Finalized Cursor is undefined");
539
777
  }
@@ -545,12 +783,26 @@ function drizzleStorage({
545
783
  });
546
784
  });
547
785
  indexer.hooks.hook("message:invalidate", async ({ message }) => {
548
- const { cursor } = message.invalidate;
786
+ const { cursor } = message;
549
787
  if (!cursor) {
550
788
  throw new DrizzleStorageError("Invalidate Cursor is undefined");
551
789
  }
552
790
  await withTransaction(db, async (tx) => {
553
- await invalidate(tx, cursor, idColumn, indexerId);
791
+ let oldHead;
792
+ if (recordChainReorganizations) {
793
+ const { cursor: currentCursor } = await getState({
794
+ tx,
795
+ indexerId
796
+ });
797
+ oldHead = currentCursor;
798
+ await recordChainReorganization({
799
+ tx,
800
+ indexerId,
801
+ oldHead,
802
+ newHead: cursor
803
+ });
804
+ }
805
+ await invalidate(tx, cursor, idColumnMap, indexerId);
554
806
  if (enablePersistence) {
555
807
  await invalidateState({ tx, cursor, indexerId });
556
808
  }
@@ -559,43 +811,46 @@ function drizzleStorage({
559
811
  indexer.hooks.hook("handler:middleware", async ({ use }) => {
560
812
  use(async (context, next) => {
561
813
  try {
562
- const { endCursor, finality } = context;
814
+ const { endCursor, finality, cursor } = context;
563
815
  if (!endCursor) {
564
816
  throw new DrizzleStorageError("End Cursor is undefined");
565
817
  }
566
818
  await withTransaction(db, async (tx) => {
567
819
  context[DRIZZLE_PROPERTY] = { db: tx };
820
+ if (prevFinality === "pending") {
821
+ await invalidate(tx, cursor, idColumnMap, indexerId);
822
+ }
568
823
  if (finality !== "finalized") {
569
824
  await registerTriggers(
570
825
  tx,
571
826
  tableNames,
572
827
  endCursor,
573
- idColumn,
828
+ idColumnMap,
574
829
  indexerId
575
830
  );
576
831
  }
577
832
  await next();
578
833
  delete context[DRIZZLE_PROPERTY];
579
- if (enablePersistence) {
834
+ if (enablePersistence && finality !== "pending") {
580
835
  await persistState({
581
836
  tx,
582
837
  endCursor,
583
838
  indexerId
584
839
  });
585
840
  }
841
+ prevFinality = finality;
586
842
  });
587
843
  if (finality !== "finalized") {
588
844
  await removeTriggers(db, tableNames, indexerId);
589
845
  }
590
846
  } catch (error) {
591
847
  await removeTriggers(db, tableNames, indexerId);
592
- throw new DrizzleStorageError("Failed to run handler:middleware", {
593
- cause: error
594
- });
848
+ throw error;
595
849
  }
596
850
  });
597
851
  });
598
852
  });
599
853
  }
600
854
 
601
- export { drizzleStorage, useDrizzleStorage };
855
+ export { drizzle, drizzleStorage, migrate, useDrizzleStorage, useTestDrizzleStorage };
856
+ //# sourceMappingURL=index.mjs.map