@apibara/plugin-drizzle 2.1.0-beta.2 → 2.1.0-beta.20
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +194 -43
- package/dist/index.d.cts +134 -3
- package/dist/index.d.mts +134 -3
- package/dist/index.d.ts +134 -3
- package/dist/index.mjs +180 -35
- package/dist/shared/plugin-drizzle.2d226351.mjs +5 -0
- package/dist/shared/plugin-drizzle.cae20704.cjs +9 -0
- package/dist/testing.cjs +13 -0
- package/dist/testing.d.cts +6 -0
- package/dist/testing.d.mts +6 -0
- package/dist/testing.d.ts +6 -0
- package/dist/testing.mjs +11 -0
- package/package.json +12 -6
- package/src/constants.ts +3 -0
- package/src/helper.ts +202 -0
- package/src/index.ts +75 -5
- package/src/persistence.ts +60 -18
- package/src/storage.ts +63 -11
- package/src/testing.ts +13 -0
package/dist/index.mjs
CHANGED
|
@@ -1,10 +1,17 @@
|
|
|
1
1
|
import { useIndexerContext } from '@apibara/indexer';
|
|
2
|
-
import { defineIndexerPlugin } from '@apibara/indexer/plugins';
|
|
2
|
+
import { defineIndexerPlugin, useLogger } from '@apibara/indexer/plugins';
|
|
3
3
|
import { generateIndexerId } from '@apibara/indexer/internal';
|
|
4
4
|
import { useInternalContext } from '@apibara/indexer/internal/plugins';
|
|
5
|
+
import { S as SCHEMA_NAME, D as DRIZZLE_PROPERTY, a as DRIZZLE_STORAGE_DB_PROPERTY } from './shared/plugin-drizzle.2d226351.mjs';
|
|
6
|
+
import { PGlite } from '@electric-sql/pglite';
|
|
7
|
+
import { drizzle as drizzle$1 } from 'drizzle-orm/node-postgres';
|
|
8
|
+
import { migrate as migrate$2 } from 'drizzle-orm/node-postgres/migrator';
|
|
9
|
+
import { drizzle as drizzle$2 } from 'drizzle-orm/pglite';
|
|
10
|
+
import { migrate as migrate$1 } from 'drizzle-orm/pglite/migrator';
|
|
11
|
+
import pg from 'pg';
|
|
5
12
|
import { normalizeCursor } from '@apibara/protocol';
|
|
6
|
-
import { eq, and, isNull, gt, lt
|
|
7
|
-
import {
|
|
13
|
+
import { sql, eq, and, isNull, gt, lt } from 'drizzle-orm';
|
|
14
|
+
import { pgSchema, text, integer, primaryKey, serial, char, jsonb } from 'drizzle-orm/pg-core';
|
|
8
15
|
|
|
9
16
|
class DrizzleStorageError extends Error {
|
|
10
17
|
constructor(message, options) {
|
|
@@ -34,15 +41,63 @@ function sleep(ms) {
|
|
|
34
41
|
return new Promise((resolve) => setTimeout(resolve, ms));
|
|
35
42
|
}
|
|
36
43
|
|
|
37
|
-
|
|
38
|
-
const
|
|
39
|
-
|
|
40
|
-
|
|
44
|
+
function drizzle(options) {
|
|
45
|
+
const {
|
|
46
|
+
connectionString = process.env["POSTGRES_CONNECTION_STRING"] ?? "memory://",
|
|
47
|
+
schema,
|
|
48
|
+
type = "pglite",
|
|
49
|
+
config,
|
|
50
|
+
poolConfig
|
|
51
|
+
} = options ?? {};
|
|
52
|
+
if (isPostgresConnectionString(connectionString) || type === "node-postgres") {
|
|
53
|
+
const pool = new pg.Pool({
|
|
54
|
+
connectionString,
|
|
55
|
+
...poolConfig || {}
|
|
56
|
+
});
|
|
57
|
+
return drizzle$1(pool, { schema, ...config || {} });
|
|
58
|
+
}
|
|
59
|
+
if (type === "pglite") {
|
|
60
|
+
return drizzle$2({
|
|
61
|
+
schema,
|
|
62
|
+
connection: {
|
|
63
|
+
dataDir: connectionString || "memory://pglite"
|
|
64
|
+
},
|
|
65
|
+
...config || {}
|
|
66
|
+
});
|
|
67
|
+
}
|
|
68
|
+
throw new Error("Invalid database type");
|
|
69
|
+
}
|
|
70
|
+
async function migrate(db, options) {
|
|
71
|
+
const isPglite = !!("$client" in db && db.$client instanceof PGlite);
|
|
72
|
+
try {
|
|
73
|
+
if (isPglite) {
|
|
74
|
+
await migrate$1(db, options);
|
|
75
|
+
} else {
|
|
76
|
+
await migrate$2(db, options);
|
|
77
|
+
}
|
|
78
|
+
} catch (error) {
|
|
79
|
+
throw new DrizzleStorageError(
|
|
80
|
+
"Failed to apply migrations! Please check if you have generated migrations using drizzle:generate",
|
|
81
|
+
{
|
|
82
|
+
cause: error
|
|
83
|
+
}
|
|
84
|
+
);
|
|
85
|
+
}
|
|
86
|
+
}
|
|
87
|
+
function isPostgresConnectionString(conn) {
|
|
88
|
+
return conn.startsWith("postgres://") || conn.startsWith("postgresql://");
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
const CHECKPOINTS_TABLE_NAME = "checkpoints";
|
|
92
|
+
const FILTERS_TABLE_NAME = "filters";
|
|
93
|
+
const SCHEMA_VERSION_TABLE_NAME = "schema_version";
|
|
94
|
+
const schema$1 = pgSchema(SCHEMA_NAME);
|
|
95
|
+
const checkpoints = schema$1.table(CHECKPOINTS_TABLE_NAME, {
|
|
41
96
|
id: text("id").notNull().primaryKey(),
|
|
42
97
|
orderKey: integer("order_key").notNull(),
|
|
43
98
|
uniqueKey: text("unique_key")
|
|
44
99
|
});
|
|
45
|
-
const filters =
|
|
100
|
+
const filters = schema$1.table(
|
|
46
101
|
FILTERS_TABLE_NAME,
|
|
47
102
|
{
|
|
48
103
|
id: text("id").notNull(),
|
|
@@ -56,7 +111,7 @@ const filters = pgTable(
|
|
|
56
111
|
}
|
|
57
112
|
]
|
|
58
113
|
);
|
|
59
|
-
const schemaVersion =
|
|
114
|
+
const schemaVersion = schema$1.table(SCHEMA_VERSION_TABLE_NAME, {
|
|
60
115
|
k: integer("k").notNull().primaryKey(),
|
|
61
116
|
version: integer("version").notNull()
|
|
62
117
|
});
|
|
@@ -67,12 +122,19 @@ const MIGRATIONS = [
|
|
|
67
122
|
// Add more migration arrays for future versions
|
|
68
123
|
];
|
|
69
124
|
async function initializePersistentState(tx) {
|
|
70
|
-
await tx.execute(
|
|
71
|
-
|
|
125
|
+
await tx.execute(
|
|
126
|
+
sql.raw(`
|
|
127
|
+
CREATE SCHEMA IF NOT EXISTS ${SCHEMA_NAME};
|
|
128
|
+
`)
|
|
129
|
+
);
|
|
130
|
+
await tx.execute(
|
|
131
|
+
sql.raw(`
|
|
132
|
+
CREATE TABLE IF NOT EXISTS ${SCHEMA_NAME}.${SCHEMA_VERSION_TABLE_NAME} (
|
|
72
133
|
k INTEGER PRIMARY KEY,
|
|
73
134
|
version INTEGER NOT NULL
|
|
74
135
|
);
|
|
75
|
-
`)
|
|
136
|
+
`)
|
|
137
|
+
);
|
|
76
138
|
const versionRows = await tx.select().from(schemaVersion).where(eq(schemaVersion.k, 0));
|
|
77
139
|
const storedVersion = versionRows[0]?.version ?? -1;
|
|
78
140
|
if (storedVersion > CURRENT_SCHEMA_VERSION) {
|
|
@@ -82,22 +144,26 @@ async function initializePersistentState(tx) {
|
|
|
82
144
|
}
|
|
83
145
|
try {
|
|
84
146
|
if (storedVersion === -1) {
|
|
85
|
-
await tx.execute(
|
|
86
|
-
|
|
147
|
+
await tx.execute(
|
|
148
|
+
sql.raw(`
|
|
149
|
+
CREATE TABLE IF NOT EXISTS ${SCHEMA_NAME}.${CHECKPOINTS_TABLE_NAME} (
|
|
87
150
|
id TEXT PRIMARY KEY,
|
|
88
151
|
order_key INTEGER NOT NULL,
|
|
89
152
|
unique_key TEXT
|
|
90
153
|
);
|
|
91
|
-
`)
|
|
92
|
-
|
|
93
|
-
|
|
154
|
+
`)
|
|
155
|
+
);
|
|
156
|
+
await tx.execute(
|
|
157
|
+
sql.raw(`
|
|
158
|
+
CREATE TABLE IF NOT EXISTS ${SCHEMA_NAME}.${FILTERS_TABLE_NAME} (
|
|
94
159
|
id TEXT NOT NULL,
|
|
95
160
|
filter TEXT NOT NULL,
|
|
96
161
|
from_block INTEGER NOT NULL,
|
|
97
162
|
to_block INTEGER DEFAULT NULL,
|
|
98
163
|
PRIMARY KEY (id, from_block)
|
|
99
164
|
);
|
|
100
|
-
`)
|
|
165
|
+
`)
|
|
166
|
+
);
|
|
101
167
|
await tx.insert(schemaVersion).values({
|
|
102
168
|
k: 0,
|
|
103
169
|
version: CURRENT_SCHEMA_VERSION
|
|
@@ -132,7 +198,9 @@ async function persistState(props) {
|
|
|
132
198
|
target: checkpoints.id,
|
|
133
199
|
set: {
|
|
134
200
|
orderKey: Number(endCursor.orderKey),
|
|
135
|
-
|
|
201
|
+
// Explicitly set the unique key to `null` to indicate that it has been deleted
|
|
202
|
+
// Otherwise drizzle will not update its value.
|
|
203
|
+
uniqueKey: endCursor.uniqueKey ? endCursor.uniqueKey : null
|
|
136
204
|
}
|
|
137
205
|
});
|
|
138
206
|
if (filter) {
|
|
@@ -211,11 +279,24 @@ async function finalizeState(props) {
|
|
|
211
279
|
});
|
|
212
280
|
}
|
|
213
281
|
}
|
|
282
|
+
async function resetPersistence(props) {
|
|
283
|
+
const { tx, indexerId } = props;
|
|
284
|
+
try {
|
|
285
|
+
await tx.delete(checkpoints).where(eq(checkpoints.id, indexerId));
|
|
286
|
+
await tx.delete(filters).where(eq(filters.id, indexerId));
|
|
287
|
+
} catch (error) {
|
|
288
|
+
throw new DrizzleStorageError("Failed to reset persistence state", {
|
|
289
|
+
cause: error
|
|
290
|
+
});
|
|
291
|
+
}
|
|
292
|
+
}
|
|
214
293
|
|
|
294
|
+
const ROLLBACK_TABLE_NAME = "reorg_rollback";
|
|
295
|
+
const schema = pgSchema(SCHEMA_NAME);
|
|
215
296
|
function getReorgTriggerName(table, indexerId) {
|
|
216
297
|
return `${table}_reorg_${indexerId}`;
|
|
217
298
|
}
|
|
218
|
-
|
|
299
|
+
schema.table(ROLLBACK_TABLE_NAME, {
|
|
219
300
|
n: serial("n").primaryKey(),
|
|
220
301
|
op: char("op", { length: 1 }).$type().notNull(),
|
|
221
302
|
table_name: text("table_name").notNull(),
|
|
@@ -226,9 +307,12 @@ pgTable("__reorg_rollback", {
|
|
|
226
307
|
});
|
|
227
308
|
async function initializeReorgRollbackTable(tx, indexerId) {
|
|
228
309
|
try {
|
|
310
|
+
await tx.execute(`
|
|
311
|
+
CREATE SCHEMA IF NOT EXISTS ${SCHEMA_NAME};
|
|
312
|
+
`);
|
|
229
313
|
await tx.execute(
|
|
230
314
|
sql.raw(`
|
|
231
|
-
CREATE TABLE IF NOT EXISTS
|
|
315
|
+
CREATE TABLE IF NOT EXISTS ${SCHEMA_NAME}.${ROLLBACK_TABLE_NAME}(
|
|
232
316
|
n SERIAL PRIMARY KEY,
|
|
233
317
|
op CHAR(1) NOT NULL,
|
|
234
318
|
table_name TEXT NOT NULL,
|
|
@@ -241,7 +325,7 @@ async function initializeReorgRollbackTable(tx, indexerId) {
|
|
|
241
325
|
);
|
|
242
326
|
await tx.execute(
|
|
243
327
|
sql.raw(`
|
|
244
|
-
CREATE INDEX IF NOT EXISTS idx_reorg_rollback_indexer_id_cursor ON
|
|
328
|
+
CREATE INDEX IF NOT EXISTS idx_reorg_rollback_indexer_id_cursor ON ${SCHEMA_NAME}.${ROLLBACK_TABLE_NAME}(indexer_id, cursor);
|
|
245
329
|
`)
|
|
246
330
|
);
|
|
247
331
|
} catch (error) {
|
|
@@ -252,7 +336,7 @@ async function initializeReorgRollbackTable(tx, indexerId) {
|
|
|
252
336
|
try {
|
|
253
337
|
await tx.execute(
|
|
254
338
|
sql.raw(`
|
|
255
|
-
CREATE OR REPLACE FUNCTION reorg_checkpoint()
|
|
339
|
+
CREATE OR REPLACE FUNCTION ${SCHEMA_NAME}.reorg_checkpoint()
|
|
256
340
|
RETURNS TRIGGER AS $$
|
|
257
341
|
DECLARE
|
|
258
342
|
id_col TEXT := TG_ARGV[0]::TEXT;
|
|
@@ -262,13 +346,13 @@ async function initializeReorgRollbackTable(tx, indexerId) {
|
|
|
262
346
|
old_id_value TEXT := row_to_json(OLD.*)->>id_col;
|
|
263
347
|
BEGIN
|
|
264
348
|
IF (TG_OP = 'DELETE') THEN
|
|
265
|
-
INSERT INTO
|
|
349
|
+
INSERT INTO ${SCHEMA_NAME}.${ROLLBACK_TABLE_NAME}(op, table_name, cursor, row_id, row_value, indexer_id)
|
|
266
350
|
SELECT 'D', TG_TABLE_NAME, order_key, old_id_value, row_to_json(OLD.*), indexer_id;
|
|
267
351
|
ELSIF (TG_OP = 'UPDATE') THEN
|
|
268
|
-
INSERT INTO
|
|
352
|
+
INSERT INTO ${SCHEMA_NAME}.${ROLLBACK_TABLE_NAME}(op, table_name, cursor, row_id, row_value, indexer_id)
|
|
269
353
|
SELECT 'U', TG_TABLE_NAME, order_key, new_id_value, row_to_json(OLD.*), indexer_id;
|
|
270
354
|
ELSIF (TG_OP = 'INSERT') THEN
|
|
271
|
-
INSERT INTO
|
|
355
|
+
INSERT INTO ${SCHEMA_NAME}.${ROLLBACK_TABLE_NAME}(op, table_name, cursor, row_id, row_value, indexer_id)
|
|
272
356
|
SELECT 'I', TG_TABLE_NAME, order_key, new_id_value, null, indexer_id;
|
|
273
357
|
END IF;
|
|
274
358
|
RETURN NULL;
|
|
@@ -298,7 +382,7 @@ async function registerTriggers(tx, tables, endCursor, idColumn, indexerId) {
|
|
|
298
382
|
CREATE CONSTRAINT TRIGGER ${getReorgTriggerName(table, indexerId)}
|
|
299
383
|
AFTER INSERT OR UPDATE OR DELETE ON ${table}
|
|
300
384
|
DEFERRABLE INITIALLY DEFERRED
|
|
301
|
-
FOR EACH ROW EXECUTE FUNCTION reorg_checkpoint('${idColumn}', ${`${Number(endCursor.orderKey)}`}, '${indexerId}');
|
|
385
|
+
FOR EACH ROW EXECUTE FUNCTION ${SCHEMA_NAME}.reorg_checkpoint('${idColumn}', ${`${Number(endCursor.orderKey)}`}, '${indexerId}');
|
|
302
386
|
`)
|
|
303
387
|
);
|
|
304
388
|
}
|
|
@@ -327,7 +411,7 @@ async function invalidate(tx, cursor, idColumn, indexerId) {
|
|
|
327
411
|
const { rows: result } = await tx.execute(
|
|
328
412
|
sql.raw(`
|
|
329
413
|
WITH deleted AS (
|
|
330
|
-
DELETE FROM
|
|
414
|
+
DELETE FROM ${SCHEMA_NAME}.${ROLLBACK_TABLE_NAME}
|
|
331
415
|
WHERE cursor > ${Number(cursor.orderKey)}
|
|
332
416
|
AND indexer_id = '${indexerId}'
|
|
333
417
|
RETURNING *
|
|
@@ -420,7 +504,7 @@ async function finalize(tx, cursor, indexerId) {
|
|
|
420
504
|
try {
|
|
421
505
|
await tx.execute(
|
|
422
506
|
sql.raw(`
|
|
423
|
-
DELETE FROM
|
|
507
|
+
DELETE FROM ${SCHEMA_NAME}.${ROLLBACK_TABLE_NAME}
|
|
424
508
|
WHERE cursor <= ${Number(cursor.orderKey)}
|
|
425
509
|
AND indexer_id = '${indexerId}'
|
|
426
510
|
`)
|
|
@@ -431,8 +515,37 @@ async function finalize(tx, cursor, indexerId) {
|
|
|
431
515
|
});
|
|
432
516
|
}
|
|
433
517
|
}
|
|
518
|
+
async function cleanupStorage(tx, tables, indexerId) {
|
|
519
|
+
try {
|
|
520
|
+
for (const table of tables) {
|
|
521
|
+
await tx.execute(
|
|
522
|
+
sql.raw(
|
|
523
|
+
`DROP TRIGGER IF EXISTS ${getReorgTriggerName(table, indexerId)} ON ${table};`
|
|
524
|
+
)
|
|
525
|
+
);
|
|
526
|
+
}
|
|
527
|
+
await tx.execute(
|
|
528
|
+
sql.raw(`
|
|
529
|
+
DELETE FROM ${SCHEMA_NAME}.${ROLLBACK_TABLE_NAME}
|
|
530
|
+
WHERE indexer_id = '${indexerId}'
|
|
531
|
+
`)
|
|
532
|
+
);
|
|
533
|
+
for (const table of tables) {
|
|
534
|
+
try {
|
|
535
|
+
await tx.execute(sql.raw(`TRUNCATE TABLE ${table} CASCADE;`));
|
|
536
|
+
} catch (error) {
|
|
537
|
+
throw new DrizzleStorageError(`Failed to truncate table ${table}`, {
|
|
538
|
+
cause: error
|
|
539
|
+
});
|
|
540
|
+
}
|
|
541
|
+
}
|
|
542
|
+
} catch (error) {
|
|
543
|
+
throw new DrizzleStorageError("Failed to clean up storage", {
|
|
544
|
+
cause: error
|
|
545
|
+
});
|
|
546
|
+
}
|
|
547
|
+
}
|
|
434
548
|
|
|
435
|
-
const DRIZZLE_PROPERTY = "_drizzle";
|
|
436
549
|
const MAX_RETRIES = 5;
|
|
437
550
|
function useDrizzleStorage(_db) {
|
|
438
551
|
const context = useIndexerContext();
|
|
@@ -448,11 +561,14 @@ function drizzleStorage({
|
|
|
448
561
|
persistState: enablePersistence = true,
|
|
449
562
|
indexerName: identifier = "default",
|
|
450
563
|
schema,
|
|
451
|
-
idColumn = "id"
|
|
564
|
+
idColumn = "id",
|
|
565
|
+
migrate: migrateOptions
|
|
452
566
|
}) {
|
|
453
567
|
return defineIndexerPlugin((indexer) => {
|
|
454
568
|
let tableNames = [];
|
|
455
569
|
let indexerId = "";
|
|
570
|
+
const alwaysReindex = process.env["APIBARA_ALWAYS_REINDEX"] === "true";
|
|
571
|
+
let prevFinality;
|
|
456
572
|
try {
|
|
457
573
|
tableNames = Object.values(schema ?? db._.schema ?? {}).map(
|
|
458
574
|
(table) => table.dbName
|
|
@@ -463,11 +579,33 @@ function drizzleStorage({
|
|
|
463
579
|
});
|
|
464
580
|
}
|
|
465
581
|
indexer.hooks.hook("run:before", async () => {
|
|
466
|
-
const
|
|
582
|
+
const internalContext = useInternalContext();
|
|
583
|
+
const context = useIndexerContext();
|
|
584
|
+
const logger = useLogger();
|
|
585
|
+
context[DRIZZLE_STORAGE_DB_PROPERTY] = db;
|
|
586
|
+
const { indexerName: indexerFileName, availableIndexers } = internalContext;
|
|
467
587
|
indexerId = generateIndexerId(indexerFileName, identifier);
|
|
588
|
+
if (alwaysReindex) {
|
|
589
|
+
logger.warn(
|
|
590
|
+
`Reindexing: Deleting all data from tables - ${tableNames.join(", ")}`
|
|
591
|
+
);
|
|
592
|
+
await withTransaction(db, async (tx) => {
|
|
593
|
+
await cleanupStorage(tx, tableNames, indexerId);
|
|
594
|
+
if (enablePersistence) {
|
|
595
|
+
await resetPersistence({ tx, indexerId });
|
|
596
|
+
}
|
|
597
|
+
logger.success("Tables have been cleaned up for reindexing");
|
|
598
|
+
});
|
|
599
|
+
}
|
|
468
600
|
let retries = 0;
|
|
601
|
+
let migrationsApplied = false;
|
|
469
602
|
while (retries <= MAX_RETRIES) {
|
|
470
603
|
try {
|
|
604
|
+
if (migrateOptions && !migrationsApplied) {
|
|
605
|
+
await migrate(db, migrateOptions);
|
|
606
|
+
migrationsApplied = true;
|
|
607
|
+
logger.success("Migrations applied");
|
|
608
|
+
}
|
|
471
609
|
await withTransaction(db, async (tx) => {
|
|
472
610
|
await initializeReorgRollbackTable(tx, indexerId);
|
|
473
611
|
if (enablePersistence) {
|
|
@@ -477,6 +615,9 @@ function drizzleStorage({
|
|
|
477
615
|
break;
|
|
478
616
|
} catch (error) {
|
|
479
617
|
if (retries === MAX_RETRIES) {
|
|
618
|
+
if (error instanceof DrizzleStorageError) {
|
|
619
|
+
throw error;
|
|
620
|
+
}
|
|
480
621
|
throw new DrizzleStorageError(
|
|
481
622
|
"Initialization failed after 5 retries",
|
|
482
623
|
{
|
|
@@ -559,12 +700,15 @@ function drizzleStorage({
|
|
|
559
700
|
indexer.hooks.hook("handler:middleware", async ({ use }) => {
|
|
560
701
|
use(async (context, next) => {
|
|
561
702
|
try {
|
|
562
|
-
const { endCursor, finality } = context;
|
|
703
|
+
const { endCursor, finality, cursor } = context;
|
|
563
704
|
if (!endCursor) {
|
|
564
705
|
throw new DrizzleStorageError("End Cursor is undefined");
|
|
565
706
|
}
|
|
566
707
|
await withTransaction(db, async (tx) => {
|
|
567
708
|
context[DRIZZLE_PROPERTY] = { db: tx };
|
|
709
|
+
if (prevFinality === "pending") {
|
|
710
|
+
await invalidate(tx, cursor, idColumn, indexerId);
|
|
711
|
+
}
|
|
568
712
|
if (finality !== "finalized") {
|
|
569
713
|
await registerTriggers(
|
|
570
714
|
tx,
|
|
@@ -576,13 +720,14 @@ function drizzleStorage({
|
|
|
576
720
|
}
|
|
577
721
|
await next();
|
|
578
722
|
delete context[DRIZZLE_PROPERTY];
|
|
579
|
-
if (enablePersistence) {
|
|
723
|
+
if (enablePersistence && finality !== "pending") {
|
|
580
724
|
await persistState({
|
|
581
725
|
tx,
|
|
582
726
|
endCursor,
|
|
583
727
|
indexerId
|
|
584
728
|
});
|
|
585
729
|
}
|
|
730
|
+
prevFinality = finality;
|
|
586
731
|
});
|
|
587
732
|
if (finality !== "finalized") {
|
|
588
733
|
await removeTriggers(db, tableNames, indexerId);
|
|
@@ -598,4 +743,4 @@ function drizzleStorage({
|
|
|
598
743
|
});
|
|
599
744
|
}
|
|
600
745
|
|
|
601
|
-
export { drizzleStorage, useDrizzleStorage };
|
|
746
|
+
export { drizzle, drizzleStorage, migrate, useDrizzleStorage };
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
'use strict';
|
|
2
|
+
|
|
3
|
+
const DRIZZLE_PROPERTY = "_drizzle";
|
|
4
|
+
const DRIZZLE_STORAGE_DB_PROPERTY = "_drizzleStorageDB";
|
|
5
|
+
const SCHEMA_NAME = "airfoil";
|
|
6
|
+
|
|
7
|
+
exports.DRIZZLE_PROPERTY = DRIZZLE_PROPERTY;
|
|
8
|
+
exports.DRIZZLE_STORAGE_DB_PROPERTY = DRIZZLE_STORAGE_DB_PROPERTY;
|
|
9
|
+
exports.SCHEMA_NAME = SCHEMA_NAME;
|
package/dist/testing.cjs
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
'use strict';
|
|
2
|
+
|
|
3
|
+
const constants = require('./shared/plugin-drizzle.cae20704.cjs');
|
|
4
|
+
|
|
5
|
+
function getTestDatabase(context) {
|
|
6
|
+
const db = context[constants.DRIZZLE_STORAGE_DB_PROPERTY];
|
|
7
|
+
if (!db) {
|
|
8
|
+
throw new Error("Drizzle database not found in context");
|
|
9
|
+
}
|
|
10
|
+
return db;
|
|
11
|
+
}
|
|
12
|
+
|
|
13
|
+
exports.getTestDatabase = getTestDatabase;
|
package/dist/testing.mjs
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
import { a as DRIZZLE_STORAGE_DB_PROPERTY } from './shared/plugin-drizzle.2d226351.mjs';
|
|
2
|
+
|
|
3
|
+
function getTestDatabase(context) {
|
|
4
|
+
const db = context[DRIZZLE_STORAGE_DB_PROPERTY];
|
|
5
|
+
if (!db) {
|
|
6
|
+
throw new Error("Drizzle database not found in context");
|
|
7
|
+
}
|
|
8
|
+
return db;
|
|
9
|
+
}
|
|
10
|
+
|
|
11
|
+
export { getTestDatabase };
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@apibara/plugin-drizzle",
|
|
3
|
-
"version": "2.1.0-beta.
|
|
3
|
+
"version": "2.1.0-beta.20",
|
|
4
4
|
"type": "module",
|
|
5
5
|
"files": [
|
|
6
6
|
"dist",
|
|
@@ -15,6 +15,12 @@
|
|
|
15
15
|
"import": "./dist/index.mjs",
|
|
16
16
|
"require": "./dist/index.cjs",
|
|
17
17
|
"default": "./dist/index.mjs"
|
|
18
|
+
},
|
|
19
|
+
"./testing": {
|
|
20
|
+
"types": "./dist/testing.d.ts",
|
|
21
|
+
"import": "./dist/testing.mjs",
|
|
22
|
+
"require": "./dist/testing.cjs",
|
|
23
|
+
"default": "./dist/testing.mjs"
|
|
18
24
|
}
|
|
19
25
|
},
|
|
20
26
|
"scripts": {
|
|
@@ -26,21 +32,21 @@
|
|
|
26
32
|
"test:ci": "vitest run"
|
|
27
33
|
},
|
|
28
34
|
"peerDependencies": {
|
|
29
|
-
"drizzle-orm": "
|
|
30
|
-
"pg": "
|
|
35
|
+
"drizzle-orm": "<1",
|
|
36
|
+
"pg": ">=8"
|
|
31
37
|
},
|
|
32
38
|
"devDependencies": {
|
|
33
39
|
"@electric-sql/pglite": "^0.2.17",
|
|
34
40
|
"@types/node": "^20.14.0",
|
|
35
41
|
"@types/pg": "^8.11.10",
|
|
36
|
-
"drizzle-orm": "^0.
|
|
42
|
+
"drizzle-orm": "^0.40.1",
|
|
37
43
|
"pg": "^8.13.1",
|
|
38
44
|
"unbuild": "^2.0.0",
|
|
39
45
|
"vitest": "^1.6.0"
|
|
40
46
|
},
|
|
41
47
|
"dependencies": {
|
|
42
|
-
"@apibara/indexer": "2.1.0-beta.
|
|
43
|
-
"@apibara/protocol": "2.1.0-beta.
|
|
48
|
+
"@apibara/indexer": "2.1.0-beta.20",
|
|
49
|
+
"@apibara/protocol": "2.1.0-beta.20",
|
|
44
50
|
"postgres-range": "^1.1.4"
|
|
45
51
|
}
|
|
46
52
|
}
|
package/src/constants.ts
ADDED