@apibara/plugin-drizzle 2.1.0-beta.3 → 2.1.0-beta.31
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +251 -64
- package/dist/index.cjs.map +1 -0
- package/dist/index.d.cts +165 -4
- package/dist/index.d.mts +165 -4
- package/dist/index.d.ts +165 -4
- package/dist/index.mjs +240 -55
- package/dist/index.mjs.map +1 -0
- package/dist/shared/plugin-drizzle.2d226351.mjs +6 -0
- package/dist/shared/plugin-drizzle.2d226351.mjs.map +1 -0
- package/dist/shared/plugin-drizzle.cae20704.cjs +10 -0
- package/dist/shared/plugin-drizzle.cae20704.cjs.map +1 -0
- package/dist/testing.cjs +14 -0
- package/dist/testing.cjs.map +1 -0
- package/dist/testing.d.cts +6 -0
- package/dist/testing.d.mts +6 -0
- package/dist/testing.d.ts +6 -0
- package/dist/testing.mjs +12 -0
- package/dist/testing.mjs.map +1 -0
- package/package.json +21 -6
- package/src/constants.ts +3 -0
- package/src/helper.ts +219 -0
- package/src/index.ts +142 -17
- package/src/persistence.ts +60 -18
- package/src/storage.ts +88 -23
- package/src/testing.ts +13 -0
- package/src/utils.ts +19 -0
package/src/index.ts
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import { useIndexerContext } from "@apibara/indexer";
|
|
2
|
-
import { defineIndexerPlugin } from "@apibara/indexer/plugins";
|
|
2
|
+
import { defineIndexerPlugin, useLogger } from "@apibara/indexer/plugins";
|
|
3
3
|
|
|
4
4
|
import type {
|
|
5
5
|
ExtractTablesWithRelations,
|
|
@@ -14,23 +14,36 @@ import type {
|
|
|
14
14
|
PgQueryResultHKT,
|
|
15
15
|
PgTransaction,
|
|
16
16
|
} from "drizzle-orm/pg-core";
|
|
17
|
+
import { DRIZZLE_PROPERTY, DRIZZLE_STORAGE_DB_PROPERTY } from "./constants";
|
|
18
|
+
import { type MigrateOptions, migrate } from "./helper";
|
|
17
19
|
import {
|
|
18
20
|
finalizeState,
|
|
19
21
|
getState,
|
|
20
22
|
initializePersistentState,
|
|
21
23
|
invalidateState,
|
|
22
24
|
persistState,
|
|
25
|
+
resetPersistence,
|
|
23
26
|
} from "./persistence";
|
|
24
27
|
import {
|
|
28
|
+
cleanupStorage,
|
|
25
29
|
finalize,
|
|
26
30
|
initializeReorgRollbackTable,
|
|
27
31
|
invalidate,
|
|
28
32
|
registerTriggers,
|
|
29
33
|
removeTriggers,
|
|
30
34
|
} from "./storage";
|
|
31
|
-
import {
|
|
35
|
+
import {
|
|
36
|
+
DrizzleStorageError,
|
|
37
|
+
type IdColumnMap,
|
|
38
|
+
getIdColumnForTable,
|
|
39
|
+
sleep,
|
|
40
|
+
withTransaction,
|
|
41
|
+
} from "./utils";
|
|
42
|
+
|
|
43
|
+
export * from "./helper";
|
|
44
|
+
|
|
45
|
+
export type { IdColumnMap };
|
|
32
46
|
|
|
33
|
-
const DRIZZLE_PROPERTY = "_drizzle";
|
|
34
47
|
const MAX_RETRIES = 5;
|
|
35
48
|
|
|
36
49
|
export type DrizzleStorage<
|
|
@@ -67,11 +80,53 @@ export interface DrizzleStorageOptions<
|
|
|
67
80
|
TSchema extends
|
|
68
81
|
TablesRelationalConfig = ExtractTablesWithRelations<TFullSchema>,
|
|
69
82
|
> {
|
|
83
|
+
/**
|
|
84
|
+
* The Drizzle database instance.
|
|
85
|
+
*/
|
|
70
86
|
db: PgDatabase<TQueryResult, TFullSchema, TSchema>;
|
|
87
|
+
/**
|
|
88
|
+
* Whether to persist the indexer's state. Defaults to true.
|
|
89
|
+
*/
|
|
71
90
|
persistState?: boolean;
|
|
91
|
+
/**
|
|
92
|
+
* The name of the indexer. Default value is 'default'.
|
|
93
|
+
*/
|
|
72
94
|
indexerName?: string;
|
|
95
|
+
/**
|
|
96
|
+
* The schema of the database.
|
|
97
|
+
*/
|
|
73
98
|
schema?: Record<string, unknown>;
|
|
74
|
-
|
|
99
|
+
/**
|
|
100
|
+
* The column to use as the primary identifier for each table.
|
|
101
|
+
*
|
|
102
|
+
* This identifier is used for tracking changes during reorgs and rollbacks.
|
|
103
|
+
*
|
|
104
|
+
* Can be specified in two ways:
|
|
105
|
+
*
|
|
106
|
+
* 1. As a single string that applies to all tables:
|
|
107
|
+
* ```ts
|
|
108
|
+
* idColumn: "_id" // Uses "_id" column for all tables
|
|
109
|
+
* ```
|
|
110
|
+
*
|
|
111
|
+
* 2. As an object mapping table names to their ID columns:
|
|
112
|
+
* ```ts
|
|
113
|
+
* idColumn: {
|
|
114
|
+
* transfers: "transaction_hash", // Use "transaction_hash" for transfers table
|
|
115
|
+
* blocks: "block_number", // Use "block_number" for blocks table
|
|
116
|
+
* "*": "_id" // Use "_id" for all other tables | defaults to "id"
|
|
117
|
+
* }
|
|
118
|
+
* ```
|
|
119
|
+
*
|
|
120
|
+
* The special "*" key acts as a fallback for any tables not explicitly mapped.
|
|
121
|
+
*
|
|
122
|
+
* @default "id"
|
|
123
|
+
* @type {string | Partial<IdColumnMap>}
|
|
124
|
+
*/
|
|
125
|
+
idColumn?: string | Partial<IdColumnMap>;
|
|
126
|
+
/**
|
|
127
|
+
* The options for the database migration. When provided, the database will automatically run migrations before the indexer runs.
|
|
128
|
+
*/
|
|
129
|
+
migrate?: MigrateOptions;
|
|
75
130
|
}
|
|
76
131
|
|
|
77
132
|
/**
|
|
@@ -83,6 +138,7 @@ export interface DrizzleStorageOptions<
|
|
|
83
138
|
* @param options.indexerName - The name of the indexer. Defaults value is 'default'.
|
|
84
139
|
* @param options.schema - The schema of the database.
|
|
85
140
|
* @param options.idColumn - The column to use as the id. Defaults to 'id'.
|
|
141
|
+
* @param options.migrate - The options for the database migration. when provided, the database will automatically run migrations before the indexer runs.
|
|
86
142
|
*/
|
|
87
143
|
export function drizzleStorage<
|
|
88
144
|
TFilter,
|
|
@@ -95,42 +151,101 @@ export function drizzleStorage<
|
|
|
95
151
|
db,
|
|
96
152
|
persistState: enablePersistence = true,
|
|
97
153
|
indexerName: identifier = "default",
|
|
98
|
-
schema,
|
|
99
|
-
idColumn
|
|
154
|
+
schema: _schema,
|
|
155
|
+
idColumn,
|
|
156
|
+
migrate: migrateOptions,
|
|
100
157
|
}: DrizzleStorageOptions<TQueryResult, TFullSchema, TSchema>) {
|
|
101
158
|
return defineIndexerPlugin<TFilter, TBlock>((indexer) => {
|
|
102
159
|
let tableNames: string[] = [];
|
|
103
160
|
let indexerId = "";
|
|
161
|
+
const alwaysReindex = process.env["APIBARA_ALWAYS_REINDEX"] === "true";
|
|
162
|
+
let prevFinality: DataFinality | undefined;
|
|
163
|
+
const schema: TSchema = (_schema as TSchema) ?? db._.schema ?? {};
|
|
164
|
+
const idColumnMap: IdColumnMap = {
|
|
165
|
+
"*": typeof idColumn === "string" ? idColumn : "id",
|
|
166
|
+
...(typeof idColumn === "object" ? idColumn : {}),
|
|
167
|
+
};
|
|
104
168
|
|
|
105
169
|
try {
|
|
106
|
-
tableNames = Object.values(
|
|
107
|
-
(table) => table.dbName,
|
|
108
|
-
);
|
|
170
|
+
tableNames = Object.values(schema).map((table) => table.dbName);
|
|
109
171
|
} catch (error) {
|
|
110
172
|
throw new DrizzleStorageError("Failed to get table names from schema", {
|
|
111
173
|
cause: error,
|
|
112
174
|
});
|
|
113
175
|
}
|
|
114
176
|
|
|
177
|
+
// Check if specified idColumn exists in all the tables in schema
|
|
178
|
+
for (const table of Object.values(schema)) {
|
|
179
|
+
const columns = table.columns;
|
|
180
|
+
const tableIdColumn = getIdColumnForTable(table.dbName, idColumnMap);
|
|
181
|
+
|
|
182
|
+
const columnExists = Object.values(columns).some(
|
|
183
|
+
(column) => column.name === tableIdColumn,
|
|
184
|
+
);
|
|
185
|
+
|
|
186
|
+
if (!columnExists) {
|
|
187
|
+
throw new DrizzleStorageError(
|
|
188
|
+
`Column \`"${tableIdColumn}"\` does not exist in table \`"${table.dbName}"\`. ` +
|
|
189
|
+
"Make sure the table has the specified column or provide a valid `idColumn` mapping to `drizzleStorage`.",
|
|
190
|
+
);
|
|
191
|
+
}
|
|
192
|
+
}
|
|
193
|
+
|
|
115
194
|
indexer.hooks.hook("run:before", async () => {
|
|
195
|
+
const internalContext = useInternalContext();
|
|
196
|
+
const context = useIndexerContext();
|
|
197
|
+
const logger = useLogger();
|
|
198
|
+
|
|
199
|
+
// For testing purposes using vcr.
|
|
200
|
+
context[DRIZZLE_STORAGE_DB_PROPERTY] = db;
|
|
201
|
+
|
|
116
202
|
const { indexerName: indexerFileName, availableIndexers } =
|
|
117
|
-
|
|
203
|
+
internalContext;
|
|
118
204
|
|
|
119
205
|
indexerId = generateIndexerId(indexerFileName, identifier);
|
|
120
206
|
|
|
121
207
|
let retries = 0;
|
|
122
208
|
|
|
209
|
+
// incase the migrations are already applied, we don't want to run them again
|
|
210
|
+
let migrationsApplied = false;
|
|
211
|
+
let cleanupApplied = false;
|
|
212
|
+
|
|
123
213
|
while (retries <= MAX_RETRIES) {
|
|
124
214
|
try {
|
|
215
|
+
if (migrateOptions && !migrationsApplied) {
|
|
216
|
+
// @ts-ignore type mismatch for db
|
|
217
|
+
await migrate(db, migrateOptions);
|
|
218
|
+
migrationsApplied = true;
|
|
219
|
+
logger.success("Migrations applied");
|
|
220
|
+
}
|
|
125
221
|
await withTransaction(db, async (tx) => {
|
|
126
222
|
await initializeReorgRollbackTable(tx, indexerId);
|
|
127
223
|
if (enablePersistence) {
|
|
128
224
|
await initializePersistentState(tx);
|
|
129
225
|
}
|
|
226
|
+
|
|
227
|
+
if (alwaysReindex && !cleanupApplied) {
|
|
228
|
+
logger.warn(
|
|
229
|
+
`Reindexing: Deleting all data from tables - ${tableNames.join(", ")}`,
|
|
230
|
+
);
|
|
231
|
+
|
|
232
|
+
await cleanupStorage(tx, tableNames, indexerId);
|
|
233
|
+
|
|
234
|
+
if (enablePersistence) {
|
|
235
|
+
await resetPersistence({ tx, indexerId });
|
|
236
|
+
}
|
|
237
|
+
|
|
238
|
+
cleanupApplied = true;
|
|
239
|
+
|
|
240
|
+
logger.success("Tables have been cleaned up for reindexing");
|
|
241
|
+
}
|
|
130
242
|
});
|
|
131
243
|
break;
|
|
132
244
|
} catch (error) {
|
|
133
245
|
if (retries === MAX_RETRIES) {
|
|
246
|
+
if (error instanceof DrizzleStorageError) {
|
|
247
|
+
throw error;
|
|
248
|
+
}
|
|
134
249
|
throw new DrizzleStorageError(
|
|
135
250
|
"Initialization failed after 5 retries",
|
|
136
251
|
{
|
|
@@ -177,7 +292,8 @@ export function drizzleStorage<
|
|
|
177
292
|
}
|
|
178
293
|
|
|
179
294
|
await withTransaction(db, async (tx) => {
|
|
180
|
-
|
|
295
|
+
// Use the appropriate idColumn for each table when calling invalidate
|
|
296
|
+
await invalidate(tx, cursor, idColumnMap, indexerId);
|
|
181
297
|
|
|
182
298
|
if (enablePersistence) {
|
|
183
299
|
await invalidateState({ tx, cursor, indexerId });
|
|
@@ -204,7 +320,7 @@ export function drizzleStorage<
|
|
|
204
320
|
});
|
|
205
321
|
|
|
206
322
|
indexer.hooks.hook("message:finalize", async ({ message }) => {
|
|
207
|
-
const { cursor } = message
|
|
323
|
+
const { cursor } = message;
|
|
208
324
|
|
|
209
325
|
if (!cursor) {
|
|
210
326
|
throw new DrizzleStorageError("Finalized Cursor is undefined");
|
|
@@ -220,14 +336,15 @@ export function drizzleStorage<
|
|
|
220
336
|
});
|
|
221
337
|
|
|
222
338
|
indexer.hooks.hook("message:invalidate", async ({ message }) => {
|
|
223
|
-
const { cursor } = message
|
|
339
|
+
const { cursor } = message;
|
|
224
340
|
|
|
225
341
|
if (!cursor) {
|
|
226
342
|
throw new DrizzleStorageError("Invalidate Cursor is undefined");
|
|
227
343
|
}
|
|
228
344
|
|
|
229
345
|
await withTransaction(db, async (tx) => {
|
|
230
|
-
|
|
346
|
+
// Use the appropriate idColumn for each table when calling invalidate
|
|
347
|
+
await invalidate(tx, cursor, idColumnMap, indexerId);
|
|
231
348
|
|
|
232
349
|
if (enablePersistence) {
|
|
233
350
|
await invalidateState({ tx, cursor, indexerId });
|
|
@@ -238,7 +355,8 @@ export function drizzleStorage<
|
|
|
238
355
|
indexer.hooks.hook("handler:middleware", async ({ use }) => {
|
|
239
356
|
use(async (context, next) => {
|
|
240
357
|
try {
|
|
241
|
-
const { endCursor, finality } = context as {
|
|
358
|
+
const { endCursor, finality, cursor } = context as {
|
|
359
|
+
cursor: Cursor;
|
|
242
360
|
endCursor: Cursor;
|
|
243
361
|
finality: DataFinality;
|
|
244
362
|
};
|
|
@@ -254,12 +372,17 @@ export function drizzleStorage<
|
|
|
254
372
|
TSchema
|
|
255
373
|
>;
|
|
256
374
|
|
|
375
|
+
if (prevFinality === "pending") {
|
|
376
|
+
// invalidate if previous block's finality was "pending"
|
|
377
|
+
await invalidate(tx, cursor, idColumnMap, indexerId);
|
|
378
|
+
}
|
|
379
|
+
|
|
257
380
|
if (finality !== "finalized") {
|
|
258
381
|
await registerTriggers(
|
|
259
382
|
tx,
|
|
260
383
|
tableNames,
|
|
261
384
|
endCursor,
|
|
262
|
-
|
|
385
|
+
idColumnMap,
|
|
263
386
|
indexerId,
|
|
264
387
|
);
|
|
265
388
|
}
|
|
@@ -267,13 +390,15 @@ export function drizzleStorage<
|
|
|
267
390
|
await next();
|
|
268
391
|
delete context[DRIZZLE_PROPERTY];
|
|
269
392
|
|
|
270
|
-
if (enablePersistence) {
|
|
393
|
+
if (enablePersistence && finality !== "pending") {
|
|
271
394
|
await persistState({
|
|
272
395
|
tx,
|
|
273
396
|
endCursor,
|
|
274
397
|
indexerId,
|
|
275
398
|
});
|
|
276
399
|
}
|
|
400
|
+
|
|
401
|
+
prevFinality = finality;
|
|
277
402
|
});
|
|
278
403
|
|
|
279
404
|
if (finality !== "finalized") {
|
package/src/persistence.ts
CHANGED
|
@@ -1,24 +1,29 @@
|
|
|
1
1
|
import { type Cursor, normalizeCursor } from "@apibara/protocol";
|
|
2
|
-
import { and, eq, gt, isNull, lt } from "drizzle-orm";
|
|
2
|
+
import { and, eq, gt, isNull, lt, sql } from "drizzle-orm";
|
|
3
3
|
import type {
|
|
4
4
|
ExtractTablesWithRelations,
|
|
5
5
|
TablesRelationalConfig,
|
|
6
6
|
} from "drizzle-orm";
|
|
7
7
|
import type { PgQueryResultHKT, PgTransaction } from "drizzle-orm/pg-core";
|
|
8
|
-
import { integer,
|
|
8
|
+
import { integer, pgSchema, primaryKey, text } from "drizzle-orm/pg-core";
|
|
9
|
+
import { SCHEMA_NAME } from "./constants";
|
|
9
10
|
import { DrizzleStorageError, deserialize, serialize } from "./utils";
|
|
10
11
|
|
|
11
|
-
const CHECKPOINTS_TABLE_NAME = "
|
|
12
|
-
const FILTERS_TABLE_NAME = "
|
|
13
|
-
const SCHEMA_VERSION_TABLE_NAME = "
|
|
12
|
+
const CHECKPOINTS_TABLE_NAME = "checkpoints";
|
|
13
|
+
const FILTERS_TABLE_NAME = "filters";
|
|
14
|
+
const SCHEMA_VERSION_TABLE_NAME = "schema_version";
|
|
14
15
|
|
|
15
|
-
|
|
16
|
+
const schema = pgSchema(SCHEMA_NAME);
|
|
17
|
+
|
|
18
|
+
/** This table is not used for migrations, its only used for ease of internal operations with drizzle. */
|
|
19
|
+
export const checkpoints = schema.table(CHECKPOINTS_TABLE_NAME, {
|
|
16
20
|
id: text("id").notNull().primaryKey(),
|
|
17
21
|
orderKey: integer("order_key").notNull(),
|
|
18
22
|
uniqueKey: text("unique_key"),
|
|
19
23
|
});
|
|
20
24
|
|
|
21
|
-
|
|
25
|
+
/** This table is not used for migrations, its only used for ease of internal operations with drizzle. */
|
|
26
|
+
export const filters = schema.table(
|
|
22
27
|
FILTERS_TABLE_NAME,
|
|
23
28
|
{
|
|
24
29
|
id: text("id").notNull(),
|
|
@@ -33,7 +38,8 @@ export const filters = pgTable(
|
|
|
33
38
|
],
|
|
34
39
|
);
|
|
35
40
|
|
|
36
|
-
|
|
41
|
+
/** This table is not used for migrations, its only used for ease of internal operations with drizzle. */
|
|
42
|
+
export const schemaVersion = schema.table(SCHEMA_VERSION_TABLE_NAME, {
|
|
37
43
|
k: integer("k").notNull().primaryKey(),
|
|
38
44
|
version: integer("version").notNull(),
|
|
39
45
|
});
|
|
@@ -53,13 +59,22 @@ export async function initializePersistentState<
|
|
|
53
59
|
TSchema extends
|
|
54
60
|
TablesRelationalConfig = ExtractTablesWithRelations<TFullSchema>,
|
|
55
61
|
>(tx: PgTransaction<TQueryResult, TFullSchema, TSchema>) {
|
|
62
|
+
// Create schema if it doesn't exist
|
|
63
|
+
await tx.execute(
|
|
64
|
+
sql.raw(`
|
|
65
|
+
CREATE SCHEMA IF NOT EXISTS ${SCHEMA_NAME};
|
|
66
|
+
`),
|
|
67
|
+
);
|
|
68
|
+
|
|
56
69
|
// Create schema version table
|
|
57
|
-
await tx.execute(
|
|
58
|
-
|
|
70
|
+
await tx.execute(
|
|
71
|
+
sql.raw(`
|
|
72
|
+
CREATE TABLE IF NOT EXISTS ${SCHEMA_NAME}.${SCHEMA_VERSION_TABLE_NAME} (
|
|
59
73
|
k INTEGER PRIMARY KEY,
|
|
60
74
|
version INTEGER NOT NULL
|
|
61
75
|
);
|
|
62
|
-
`)
|
|
76
|
+
`),
|
|
77
|
+
);
|
|
63
78
|
|
|
64
79
|
// Get current schema version
|
|
65
80
|
const versionRows = await tx
|
|
@@ -80,23 +95,27 @@ export async function initializePersistentState<
|
|
|
80
95
|
try {
|
|
81
96
|
if (storedVersion === -1) {
|
|
82
97
|
// First time initialization
|
|
83
|
-
await tx.execute(
|
|
84
|
-
|
|
98
|
+
await tx.execute(
|
|
99
|
+
sql.raw(`
|
|
100
|
+
CREATE TABLE IF NOT EXISTS ${SCHEMA_NAME}.${CHECKPOINTS_TABLE_NAME} (
|
|
85
101
|
id TEXT PRIMARY KEY,
|
|
86
102
|
order_key INTEGER NOT NULL,
|
|
87
103
|
unique_key TEXT
|
|
88
104
|
);
|
|
89
|
-
`)
|
|
105
|
+
`),
|
|
106
|
+
);
|
|
90
107
|
|
|
91
|
-
await tx.execute(
|
|
92
|
-
|
|
108
|
+
await tx.execute(
|
|
109
|
+
sql.raw(`
|
|
110
|
+
CREATE TABLE IF NOT EXISTS ${SCHEMA_NAME}.${FILTERS_TABLE_NAME} (
|
|
93
111
|
id TEXT NOT NULL,
|
|
94
112
|
filter TEXT NOT NULL,
|
|
95
113
|
from_block INTEGER NOT NULL,
|
|
96
114
|
to_block INTEGER DEFAULT NULL,
|
|
97
115
|
PRIMARY KEY (id, from_block)
|
|
98
116
|
);
|
|
99
|
-
`)
|
|
117
|
+
`),
|
|
118
|
+
);
|
|
100
119
|
|
|
101
120
|
// Set initial schema version
|
|
102
121
|
await tx.insert(schemaVersion).values({
|
|
@@ -155,7 +174,9 @@ export async function persistState<
|
|
|
155
174
|
target: checkpoints.id,
|
|
156
175
|
set: {
|
|
157
176
|
orderKey: Number(endCursor.orderKey),
|
|
158
|
-
|
|
177
|
+
// Explicitly set the unique key to `null` to indicate that it has been deleted
|
|
178
|
+
// Otherwise drizzle will not update its value.
|
|
179
|
+
uniqueKey: endCursor.uniqueKey ? endCursor.uniqueKey : null,
|
|
159
180
|
},
|
|
160
181
|
});
|
|
161
182
|
|
|
@@ -297,3 +318,24 @@ export async function finalizeState<
|
|
|
297
318
|
});
|
|
298
319
|
}
|
|
299
320
|
}
|
|
321
|
+
|
|
322
|
+
export async function resetPersistence<
|
|
323
|
+
TQueryResult extends PgQueryResultHKT,
|
|
324
|
+
TFullSchema extends Record<string, unknown> = Record<string, never>,
|
|
325
|
+
TSchema extends
|
|
326
|
+
TablesRelationalConfig = ExtractTablesWithRelations<TFullSchema>,
|
|
327
|
+
>(props: {
|
|
328
|
+
tx: PgTransaction<TQueryResult, TFullSchema, TSchema>;
|
|
329
|
+
indexerId: string;
|
|
330
|
+
}) {
|
|
331
|
+
const { tx, indexerId } = props;
|
|
332
|
+
|
|
333
|
+
try {
|
|
334
|
+
await tx.delete(checkpoints).where(eq(checkpoints.id, indexerId));
|
|
335
|
+
await tx.delete(filters).where(eq(filters.id, indexerId));
|
|
336
|
+
} catch (error) {
|
|
337
|
+
throw new DrizzleStorageError("Failed to reset persistence state", {
|
|
338
|
+
cause: error,
|
|
339
|
+
});
|
|
340
|
+
}
|
|
341
|
+
}
|
package/src/storage.ts
CHANGED
|
@@ -11,11 +11,20 @@ import {
|
|
|
11
11
|
char,
|
|
12
12
|
integer,
|
|
13
13
|
jsonb,
|
|
14
|
-
|
|
14
|
+
pgSchema,
|
|
15
15
|
serial,
|
|
16
16
|
text,
|
|
17
17
|
} from "drizzle-orm/pg-core";
|
|
18
|
-
import {
|
|
18
|
+
import { SCHEMA_NAME } from "./constants";
|
|
19
|
+
import {
|
|
20
|
+
DrizzleStorageError,
|
|
21
|
+
type IdColumnMap,
|
|
22
|
+
getIdColumnForTable,
|
|
23
|
+
} from "./utils";
|
|
24
|
+
|
|
25
|
+
const ROLLBACK_TABLE_NAME = "reorg_rollback";
|
|
26
|
+
|
|
27
|
+
const schema = pgSchema(SCHEMA_NAME);
|
|
19
28
|
|
|
20
29
|
function getReorgTriggerName(table: string, indexerId: string) {
|
|
21
30
|
return `${table}_reorg_${indexerId}`;
|
|
@@ -23,7 +32,8 @@ function getReorgTriggerName(table: string, indexerId: string) {
|
|
|
23
32
|
|
|
24
33
|
export type ReorgOperation = "I" | "U" | "D";
|
|
25
34
|
|
|
26
|
-
|
|
35
|
+
/** This table is not used for migrations, its only used for ease of internal operations with drizzle. */
|
|
36
|
+
export const reorgRollbackTable = schema.table(ROLLBACK_TABLE_NAME, {
|
|
27
37
|
n: serial("n").primaryKey(),
|
|
28
38
|
op: char("op", { length: 1 }).$type<ReorgOperation>().notNull(),
|
|
29
39
|
table_name: text("table_name").notNull(),
|
|
@@ -42,10 +52,14 @@ export async function initializeReorgRollbackTable<
|
|
|
42
52
|
TablesRelationalConfig = ExtractTablesWithRelations<TFullSchema>,
|
|
43
53
|
>(tx: PgTransaction<TQueryResult, TFullSchema, TSchema>, indexerId: string) {
|
|
44
54
|
try {
|
|
55
|
+
// Create schema if it doesn't exist
|
|
56
|
+
await tx.execute(`
|
|
57
|
+
CREATE SCHEMA IF NOT EXISTS ${SCHEMA_NAME};
|
|
58
|
+
`);
|
|
45
59
|
// Create the audit log table
|
|
46
60
|
await tx.execute(
|
|
47
61
|
sql.raw(`
|
|
48
|
-
CREATE TABLE IF NOT EXISTS
|
|
62
|
+
CREATE TABLE IF NOT EXISTS ${SCHEMA_NAME}.${ROLLBACK_TABLE_NAME}(
|
|
49
63
|
n SERIAL PRIMARY KEY,
|
|
50
64
|
op CHAR(1) NOT NULL,
|
|
51
65
|
table_name TEXT NOT NULL,
|
|
@@ -59,7 +73,7 @@ export async function initializeReorgRollbackTable<
|
|
|
59
73
|
|
|
60
74
|
await tx.execute(
|
|
61
75
|
sql.raw(`
|
|
62
|
-
CREATE INDEX IF NOT EXISTS idx_reorg_rollback_indexer_id_cursor ON
|
|
76
|
+
CREATE INDEX IF NOT EXISTS idx_reorg_rollback_indexer_id_cursor ON ${SCHEMA_NAME}.${ROLLBACK_TABLE_NAME}(indexer_id, cursor);
|
|
63
77
|
`),
|
|
64
78
|
);
|
|
65
79
|
} catch (error) {
|
|
@@ -72,24 +86,25 @@ export async function initializeReorgRollbackTable<
|
|
|
72
86
|
// Create the trigger function
|
|
73
87
|
await tx.execute(
|
|
74
88
|
sql.raw(`
|
|
75
|
-
CREATE OR REPLACE FUNCTION reorg_checkpoint()
|
|
89
|
+
CREATE OR REPLACE FUNCTION ${SCHEMA_NAME}.reorg_checkpoint()
|
|
76
90
|
RETURNS TRIGGER AS $$
|
|
77
91
|
DECLARE
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
92
|
+
table_name TEXT := TG_ARGV[0]::TEXT;
|
|
93
|
+
id_col TEXT := TG_ARGV[1]::TEXT;
|
|
94
|
+
order_key INTEGER := TG_ARGV[2]::INTEGER;
|
|
95
|
+
indexer_id TEXT := TG_ARGV[3]::TEXT;
|
|
81
96
|
new_id_value TEXT := row_to_json(NEW.*)->>id_col;
|
|
82
97
|
old_id_value TEXT := row_to_json(OLD.*)->>id_col;
|
|
83
98
|
BEGIN
|
|
84
99
|
IF (TG_OP = 'DELETE') THEN
|
|
85
|
-
INSERT INTO
|
|
86
|
-
SELECT 'D',
|
|
100
|
+
INSERT INTO ${SCHEMA_NAME}.${ROLLBACK_TABLE_NAME}(op, table_name, cursor, row_id, row_value, indexer_id)
|
|
101
|
+
SELECT 'D', table_name, order_key, old_id_value, row_to_json(OLD.*), indexer_id;
|
|
87
102
|
ELSIF (TG_OP = 'UPDATE') THEN
|
|
88
|
-
INSERT INTO
|
|
89
|
-
SELECT 'U',
|
|
103
|
+
INSERT INTO ${SCHEMA_NAME}.${ROLLBACK_TABLE_NAME}(op, table_name, cursor, row_id, row_value, indexer_id)
|
|
104
|
+
SELECT 'U', table_name, order_key, new_id_value, row_to_json(OLD.*), indexer_id;
|
|
90
105
|
ELSIF (TG_OP = 'INSERT') THEN
|
|
91
|
-
INSERT INTO
|
|
92
|
-
SELECT 'I',
|
|
106
|
+
INSERT INTO ${SCHEMA_NAME}.${ROLLBACK_TABLE_NAME}(op, table_name, cursor, row_id, row_value, indexer_id)
|
|
107
|
+
SELECT 'I', table_name, order_key, new_id_value, null, indexer_id;
|
|
93
108
|
END IF;
|
|
94
109
|
RETURN NULL;
|
|
95
110
|
END;
|
|
@@ -115,11 +130,14 @@ export async function registerTriggers<
|
|
|
115
130
|
tx: PgTransaction<TQueryResult, TFullSchema, TSchema>,
|
|
116
131
|
tables: string[],
|
|
117
132
|
endCursor: Cursor,
|
|
118
|
-
|
|
133
|
+
idColumnMap: IdColumnMap,
|
|
119
134
|
indexerId: string,
|
|
120
135
|
) {
|
|
121
136
|
try {
|
|
122
137
|
for (const table of tables) {
|
|
138
|
+
// Determine the column ID for this specific table
|
|
139
|
+
const tableIdColumn = getIdColumnForTable(table, idColumnMap);
|
|
140
|
+
|
|
123
141
|
await tx.execute(
|
|
124
142
|
sql.raw(
|
|
125
143
|
`DROP TRIGGER IF EXISTS ${getReorgTriggerName(table, indexerId)} ON ${table};`,
|
|
@@ -130,7 +148,7 @@ export async function registerTriggers<
|
|
|
130
148
|
CREATE CONSTRAINT TRIGGER ${getReorgTriggerName(table, indexerId)}
|
|
131
149
|
AFTER INSERT OR UPDATE OR DELETE ON ${table}
|
|
132
150
|
DEFERRABLE INITIALLY DEFERRED
|
|
133
|
-
FOR EACH ROW EXECUTE FUNCTION reorg_checkpoint('${
|
|
151
|
+
FOR EACH ROW EXECUTE FUNCTION ${SCHEMA_NAME}.reorg_checkpoint('${table}', '${tableIdColumn}', ${Number(endCursor.orderKey)}, '${indexerId}');
|
|
134
152
|
`),
|
|
135
153
|
);
|
|
136
154
|
}
|
|
@@ -174,14 +192,14 @@ export async function invalidate<
|
|
|
174
192
|
>(
|
|
175
193
|
tx: PgTransaction<TQueryResult, TFullSchema, TSchema>,
|
|
176
194
|
cursor: Cursor,
|
|
177
|
-
|
|
195
|
+
idColumnMap: IdColumnMap,
|
|
178
196
|
indexerId: string,
|
|
179
197
|
) {
|
|
180
198
|
// Get and delete operations after cursor in one query, ordered by newest first
|
|
181
199
|
const { rows: result } = (await tx.execute(
|
|
182
200
|
sql.raw(`
|
|
183
201
|
WITH deleted AS (
|
|
184
|
-
DELETE FROM
|
|
202
|
+
DELETE FROM ${SCHEMA_NAME}.${ROLLBACK_TABLE_NAME}
|
|
185
203
|
WHERE cursor > ${Number(cursor.orderKey)}
|
|
186
204
|
AND indexer_id = '${indexerId}'
|
|
187
205
|
RETURNING *
|
|
@@ -198,6 +216,9 @@ export async function invalidate<
|
|
|
198
216
|
|
|
199
217
|
// Process each operation in reverse order
|
|
200
218
|
for (const op of result) {
|
|
219
|
+
// Determine the column ID for this specific table
|
|
220
|
+
const tableIdColumn = getIdColumnForTable(op.table_name, idColumnMap);
|
|
221
|
+
|
|
201
222
|
switch (op.op) {
|
|
202
223
|
case "I":
|
|
203
224
|
try {
|
|
@@ -208,7 +229,7 @@ export async function invalidate<
|
|
|
208
229
|
await tx.execute(
|
|
209
230
|
sql.raw(`
|
|
210
231
|
DELETE FROM ${op.table_name}
|
|
211
|
-
WHERE ${
|
|
232
|
+
WHERE ${tableIdColumn} = '${op.row_id}'
|
|
212
233
|
`),
|
|
213
234
|
);
|
|
214
235
|
} catch (error) {
|
|
@@ -261,7 +282,9 @@ export async function invalidate<
|
|
|
261
282
|
? JSON.parse(op.row_value)
|
|
262
283
|
: op.row_value;
|
|
263
284
|
|
|
264
|
-
const nonIdKeys = Object.keys(rowValue).filter(
|
|
285
|
+
const nonIdKeys = Object.keys(rowValue).filter(
|
|
286
|
+
(k) => k !== tableIdColumn,
|
|
287
|
+
);
|
|
265
288
|
|
|
266
289
|
const fields = nonIdKeys.map((c) => `${c} = prev.${c}`).join(", ");
|
|
267
290
|
|
|
@@ -271,7 +294,7 @@ export async function invalidate<
|
|
|
271
294
|
FROM (
|
|
272
295
|
SELECT * FROM json_populate_record(null::${op.table_name}, '${JSON.stringify(op.row_value)}'::json)
|
|
273
296
|
) as prev
|
|
274
|
-
WHERE ${op.table_name}.${
|
|
297
|
+
WHERE ${op.table_name}.${tableIdColumn} = '${op.row_id}'
|
|
275
298
|
`);
|
|
276
299
|
|
|
277
300
|
await tx.execute(query);
|
|
@@ -305,7 +328,7 @@ export async function finalize<
|
|
|
305
328
|
try {
|
|
306
329
|
await tx.execute(
|
|
307
330
|
sql.raw(`
|
|
308
|
-
DELETE FROM
|
|
331
|
+
DELETE FROM ${SCHEMA_NAME}.${ROLLBACK_TABLE_NAME}
|
|
309
332
|
WHERE cursor <= ${Number(cursor.orderKey)}
|
|
310
333
|
AND indexer_id = '${indexerId}'
|
|
311
334
|
`),
|
|
@@ -316,3 +339,45 @@ export async function finalize<
|
|
|
316
339
|
});
|
|
317
340
|
}
|
|
318
341
|
}
|
|
342
|
+
|
|
343
|
+
export async function cleanupStorage<
|
|
344
|
+
TQueryResult extends PgQueryResultHKT,
|
|
345
|
+
TFullSchema extends Record<string, unknown> = Record<string, never>,
|
|
346
|
+
TSchema extends
|
|
347
|
+
TablesRelationalConfig = ExtractTablesWithRelations<TFullSchema>,
|
|
348
|
+
>(
|
|
349
|
+
tx: PgTransaction<TQueryResult, TFullSchema, TSchema>,
|
|
350
|
+
tables: string[],
|
|
351
|
+
indexerId: string,
|
|
352
|
+
) {
|
|
353
|
+
try {
|
|
354
|
+
for (const table of tables) {
|
|
355
|
+
await tx.execute(
|
|
356
|
+
sql.raw(
|
|
357
|
+
`DROP TRIGGER IF EXISTS ${getReorgTriggerName(table, indexerId)} ON ${table};`,
|
|
358
|
+
),
|
|
359
|
+
);
|
|
360
|
+
}
|
|
361
|
+
|
|
362
|
+
await tx.execute(
|
|
363
|
+
sql.raw(`
|
|
364
|
+
DELETE FROM ${SCHEMA_NAME}.${ROLLBACK_TABLE_NAME}
|
|
365
|
+
WHERE indexer_id = '${indexerId}'
|
|
366
|
+
`),
|
|
367
|
+
);
|
|
368
|
+
|
|
369
|
+
for (const table of tables) {
|
|
370
|
+
try {
|
|
371
|
+
await tx.execute(sql.raw(`TRUNCATE TABLE ${table} CASCADE;`));
|
|
372
|
+
} catch (error) {
|
|
373
|
+
throw new DrizzleStorageError(`Failed to truncate table ${table}`, {
|
|
374
|
+
cause: error,
|
|
375
|
+
});
|
|
376
|
+
}
|
|
377
|
+
}
|
|
378
|
+
} catch (error) {
|
|
379
|
+
throw new DrizzleStorageError("Failed to clean up storage", {
|
|
380
|
+
cause: error,
|
|
381
|
+
});
|
|
382
|
+
}
|
|
383
|
+
}
|
package/src/testing.ts
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
import type { VcrResult } from "@apibara/indexer/testing";
|
|
2
|
+
import type { PgDatabase, PgQueryResultHKT } from "drizzle-orm/pg-core";
|
|
3
|
+
import { DRIZZLE_STORAGE_DB_PROPERTY } from "./constants";
|
|
4
|
+
|
|
5
|
+
export function getTestDatabase(context: VcrResult) {
|
|
6
|
+
const db = context[DRIZZLE_STORAGE_DB_PROPERTY];
|
|
7
|
+
|
|
8
|
+
if (!db) {
|
|
9
|
+
throw new Error("Drizzle database not found in context");
|
|
10
|
+
}
|
|
11
|
+
|
|
12
|
+
return db as PgDatabase<PgQueryResultHKT>;
|
|
13
|
+
}
|