@apibara/plugin-drizzle 2.1.0-beta.2 → 2.1.0-beta.20
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +194 -43
- package/dist/index.d.cts +134 -3
- package/dist/index.d.mts +134 -3
- package/dist/index.d.ts +134 -3
- package/dist/index.mjs +180 -35
- package/dist/shared/plugin-drizzle.2d226351.mjs +5 -0
- package/dist/shared/plugin-drizzle.cae20704.cjs +9 -0
- package/dist/testing.cjs +13 -0
- package/dist/testing.d.cts +6 -0
- package/dist/testing.d.mts +6 -0
- package/dist/testing.d.ts +6 -0
- package/dist/testing.mjs +11 -0
- package/package.json +12 -6
- package/src/constants.ts +3 -0
- package/src/helper.ts +202 -0
- package/src/index.ts +75 -5
- package/src/persistence.ts +60 -18
- package/src/storage.ts +63 -11
- package/src/testing.ts +13 -0
package/src/helper.ts
ADDED
|
@@ -0,0 +1,202 @@
|
|
|
1
|
+
import { PGlite, type PGliteOptions } from "@electric-sql/pglite";
|
|
2
|
+
import type { DrizzleConfig } from "drizzle-orm";
|
|
3
|
+
import type { MigrationConfig } from "drizzle-orm/migrator";
|
|
4
|
+
import {
|
|
5
|
+
type NodePgDatabase as OriginalNodePgDatabase,
|
|
6
|
+
drizzle as drizzleNode,
|
|
7
|
+
} from "drizzle-orm/node-postgres";
|
|
8
|
+
import { migrate as migrateNode } from "drizzle-orm/node-postgres/migrator";
|
|
9
|
+
import {} from "drizzle-orm/pg-core";
|
|
10
|
+
import {
|
|
11
|
+
type PgliteDatabase as OriginalPgliteDatabase,
|
|
12
|
+
drizzle as drizzlePGLite,
|
|
13
|
+
} from "drizzle-orm/pglite";
|
|
14
|
+
import { migrate as migratePGLite } from "drizzle-orm/pglite/migrator";
|
|
15
|
+
import pg from "pg";
|
|
16
|
+
import { DrizzleStorageError } from "./utils";
|
|
17
|
+
|
|
18
|
+
/**
|
|
19
|
+
* Union type of all possible drizzle database options
|
|
20
|
+
*/
|
|
21
|
+
export type DrizzleOptions = PgliteDrizzleOptions | NodePgDrizzleOptions;
|
|
22
|
+
|
|
23
|
+
/**
|
|
24
|
+
* Configuration options for Node-Postgres database connection
|
|
25
|
+
*/
|
|
26
|
+
export type NodePgDrizzleOptions = {
|
|
27
|
+
/**
|
|
28
|
+
* Type of database to use -
|
|
29
|
+
* - "pglite" - PGLite database
|
|
30
|
+
* - "node-postgres" - Node-Postgres database
|
|
31
|
+
* @default "pglite"
|
|
32
|
+
*/
|
|
33
|
+
type: "node-postgres";
|
|
34
|
+
/**
|
|
35
|
+
* Connection string to use for the database
|
|
36
|
+
* @default ""
|
|
37
|
+
*/
|
|
38
|
+
connectionString?: string;
|
|
39
|
+
/**
|
|
40
|
+
* Pool configuration options for Node-Postgres
|
|
41
|
+
*/
|
|
42
|
+
poolConfig?: pg.PoolConfig;
|
|
43
|
+
/**
|
|
44
|
+
* Additional drizzle configuration options
|
|
45
|
+
*/
|
|
46
|
+
config?: Omit<DrizzleConfig, "schema">;
|
|
47
|
+
};
|
|
48
|
+
|
|
49
|
+
/**
|
|
50
|
+
* Configuration options for PGLite database connection
|
|
51
|
+
*/
|
|
52
|
+
export type PgliteDrizzleOptions = {
|
|
53
|
+
/**
|
|
54
|
+
* Type of database to use -
|
|
55
|
+
* - "pglite" - PGLite database
|
|
56
|
+
* - "node-postgres" - Node-Postgres database
|
|
57
|
+
*/
|
|
58
|
+
type?: "pglite";
|
|
59
|
+
/**
|
|
60
|
+
* Connection string to use for the database
|
|
61
|
+
* @default process.env["POSTGRES_CONNECTION_STRING"] ?? "memory://pglite"
|
|
62
|
+
*/
|
|
63
|
+
connectionString?: string;
|
|
64
|
+
/**
|
|
65
|
+
* Pool configuration is not supported for PGLite
|
|
66
|
+
*/
|
|
67
|
+
poolConfig?: never;
|
|
68
|
+
/**
|
|
69
|
+
* Additional drizzle configuration options with PGLite specific connection options
|
|
70
|
+
*/
|
|
71
|
+
config?: Omit<DrizzleConfig, "schema"> & {
|
|
72
|
+
connection?:
|
|
73
|
+
| (PGliteOptions & {
|
|
74
|
+
dataDir?: string;
|
|
75
|
+
})
|
|
76
|
+
| string;
|
|
77
|
+
};
|
|
78
|
+
};
|
|
79
|
+
|
|
80
|
+
/**
|
|
81
|
+
* Extended PGLite database type with client information
|
|
82
|
+
*/
|
|
83
|
+
export type PgliteDatabase<TSchema extends Record<string, unknown>> =
|
|
84
|
+
OriginalPgliteDatabase<TSchema> & {
|
|
85
|
+
$client: PGlite;
|
|
86
|
+
};
|
|
87
|
+
|
|
88
|
+
/**
|
|
89
|
+
* Extended Node-Postgres database type with client information
|
|
90
|
+
*/
|
|
91
|
+
export type NodePgDatabase<TSchema extends Record<string, unknown>> =
|
|
92
|
+
OriginalNodePgDatabase<TSchema> & {
|
|
93
|
+
$client: pg.Pool;
|
|
94
|
+
};
|
|
95
|
+
|
|
96
|
+
export type Database<
|
|
97
|
+
TOptions extends DrizzleOptions,
|
|
98
|
+
TSchema extends Record<string, unknown>,
|
|
99
|
+
> = TOptions extends PgliteDrizzleOptions
|
|
100
|
+
? PgliteDatabase<TSchema>
|
|
101
|
+
: NodePgDatabase<TSchema>;
|
|
102
|
+
|
|
103
|
+
/**
|
|
104
|
+
* Creates a new Drizzle database instance based on the provided options
|
|
105
|
+
*
|
|
106
|
+
* @important connectionString defaults to process.env["POSTGRES_CONNECTION_STRING"], if not set, it defaults to "memory://" (in-memory pglite)
|
|
107
|
+
*
|
|
108
|
+
* @param options - Configuration options for the database connection
|
|
109
|
+
* @returns A configured Drizzle database instance
|
|
110
|
+
* @throws {Error} If an invalid database type is specified
|
|
111
|
+
*/
|
|
112
|
+
export function drizzle<
|
|
113
|
+
TSchema extends Record<string, unknown>,
|
|
114
|
+
TOptions extends DrizzleOptions,
|
|
115
|
+
>(
|
|
116
|
+
options?: TOptions & {
|
|
117
|
+
/**
|
|
118
|
+
* Schema to use for the database
|
|
119
|
+
* @default {}
|
|
120
|
+
*/
|
|
121
|
+
schema?: TSchema;
|
|
122
|
+
},
|
|
123
|
+
): Database<TOptions, TSchema> {
|
|
124
|
+
const {
|
|
125
|
+
connectionString = process.env["POSTGRES_CONNECTION_STRING"] ?? "memory://",
|
|
126
|
+
schema,
|
|
127
|
+
type = "pglite",
|
|
128
|
+
config,
|
|
129
|
+
poolConfig,
|
|
130
|
+
} = options ?? {};
|
|
131
|
+
|
|
132
|
+
if (
|
|
133
|
+
isPostgresConnectionString(connectionString) ||
|
|
134
|
+
type === "node-postgres"
|
|
135
|
+
) {
|
|
136
|
+
const pool = new pg.Pool({
|
|
137
|
+
connectionString,
|
|
138
|
+
...(poolConfig || {}),
|
|
139
|
+
});
|
|
140
|
+
return drizzleNode(pool, { schema, ...(config || {}) }) as Database<
|
|
141
|
+
TOptions,
|
|
142
|
+
TSchema
|
|
143
|
+
>;
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
if (type === "pglite") {
|
|
147
|
+
return drizzlePGLite({
|
|
148
|
+
schema: schema as TSchema,
|
|
149
|
+
connection: {
|
|
150
|
+
dataDir: connectionString || "memory://pglite",
|
|
151
|
+
},
|
|
152
|
+
...(config || {}),
|
|
153
|
+
}) as Database<TOptions, TSchema>;
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
throw new Error("Invalid database type");
|
|
157
|
+
}
|
|
158
|
+
|
|
159
|
+
/**
|
|
160
|
+
* Options for database migration
|
|
161
|
+
*/
|
|
162
|
+
export type MigrateOptions = MigrationConfig;
|
|
163
|
+
|
|
164
|
+
/**
|
|
165
|
+
* Performs database migration based on the provided configuration
|
|
166
|
+
* @param db - The database instance to migrate
|
|
167
|
+
* @param options - Migration configuration options
|
|
168
|
+
*
|
|
169
|
+
* @important This function runs migrations on the database instance provided to the `drizzleStorage` plugin.
|
|
170
|
+
* It automatically detects the type of database and runs the appropriate migrate function
|
|
171
|
+
* (PGLite or Node-Postgres).
|
|
172
|
+
*
|
|
173
|
+
* @example
|
|
174
|
+
* ```ts
|
|
175
|
+
* await migrate(db, { migrationsFolder: "./drizzle" });
|
|
176
|
+
* ```
|
|
177
|
+
*/
|
|
178
|
+
export async function migrate<TSchema extends Record<string, unknown>>(
|
|
179
|
+
db: PgliteDatabase<TSchema> | NodePgDatabase<TSchema>,
|
|
180
|
+
options: MigrateOptions,
|
|
181
|
+
) {
|
|
182
|
+
const isPglite = !!("$client" in db && db.$client instanceof PGlite);
|
|
183
|
+
|
|
184
|
+
try {
|
|
185
|
+
if (isPglite) {
|
|
186
|
+
await migratePGLite(db as PgliteDatabase<TSchema>, options);
|
|
187
|
+
} else {
|
|
188
|
+
await migrateNode(db as NodePgDatabase<TSchema>, options);
|
|
189
|
+
}
|
|
190
|
+
} catch (error) {
|
|
191
|
+
throw new DrizzleStorageError(
|
|
192
|
+
"Failed to apply migrations! Please check if you have generated migrations using drizzle:generate",
|
|
193
|
+
{
|
|
194
|
+
cause: error,
|
|
195
|
+
},
|
|
196
|
+
);
|
|
197
|
+
}
|
|
198
|
+
}
|
|
199
|
+
|
|
200
|
+
function isPostgresConnectionString(conn: string) {
|
|
201
|
+
return conn.startsWith("postgres://") || conn.startsWith("postgresql://");
|
|
202
|
+
}
|
package/src/index.ts
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import { useIndexerContext } from "@apibara/indexer";
|
|
2
|
-
import { defineIndexerPlugin } from "@apibara/indexer/plugins";
|
|
2
|
+
import { defineIndexerPlugin, useLogger } from "@apibara/indexer/plugins";
|
|
3
3
|
|
|
4
4
|
import type {
|
|
5
5
|
ExtractTablesWithRelations,
|
|
@@ -14,14 +14,18 @@ import type {
|
|
|
14
14
|
PgQueryResultHKT,
|
|
15
15
|
PgTransaction,
|
|
16
16
|
} from "drizzle-orm/pg-core";
|
|
17
|
+
import { DRIZZLE_PROPERTY, DRIZZLE_STORAGE_DB_PROPERTY } from "./constants";
|
|
18
|
+
import { type MigrateOptions, migrate } from "./helper";
|
|
17
19
|
import {
|
|
18
20
|
finalizeState,
|
|
19
21
|
getState,
|
|
20
22
|
initializePersistentState,
|
|
21
23
|
invalidateState,
|
|
22
24
|
persistState,
|
|
25
|
+
resetPersistence,
|
|
23
26
|
} from "./persistence";
|
|
24
27
|
import {
|
|
28
|
+
cleanupStorage,
|
|
25
29
|
finalize,
|
|
26
30
|
initializeReorgRollbackTable,
|
|
27
31
|
invalidate,
|
|
@@ -30,7 +34,8 @@ import {
|
|
|
30
34
|
} from "./storage";
|
|
31
35
|
import { DrizzleStorageError, sleep, withTransaction } from "./utils";
|
|
32
36
|
|
|
33
|
-
|
|
37
|
+
export * from "./helper";
|
|
38
|
+
|
|
34
39
|
const MAX_RETRIES = 5;
|
|
35
40
|
|
|
36
41
|
export type DrizzleStorage<
|
|
@@ -67,11 +72,30 @@ export interface DrizzleStorageOptions<
|
|
|
67
72
|
TSchema extends
|
|
68
73
|
TablesRelationalConfig = ExtractTablesWithRelations<TFullSchema>,
|
|
69
74
|
> {
|
|
75
|
+
/**
|
|
76
|
+
* The Drizzle database instance.
|
|
77
|
+
*/
|
|
70
78
|
db: PgDatabase<TQueryResult, TFullSchema, TSchema>;
|
|
79
|
+
/**
|
|
80
|
+
* Whether to persist the indexer's state. Defaults to true.
|
|
81
|
+
*/
|
|
71
82
|
persistState?: boolean;
|
|
83
|
+
/**
|
|
84
|
+
* The name of the indexer. Default value is 'default'.
|
|
85
|
+
*/
|
|
72
86
|
indexerName?: string;
|
|
87
|
+
/**
|
|
88
|
+
* The schema of the database.
|
|
89
|
+
*/
|
|
73
90
|
schema?: Record<string, unknown>;
|
|
91
|
+
/**
|
|
92
|
+
* The column to use as the id. Defaults to 'id'.
|
|
93
|
+
*/
|
|
74
94
|
idColumn?: string;
|
|
95
|
+
/**
|
|
96
|
+
* The options for the database migration. When provided, the database will automatically run migrations before the indexer runs.
|
|
97
|
+
*/
|
|
98
|
+
migrate?: MigrateOptions;
|
|
75
99
|
}
|
|
76
100
|
|
|
77
101
|
/**
|
|
@@ -83,6 +107,7 @@ export interface DrizzleStorageOptions<
|
|
|
83
107
|
* @param options.indexerName - The name of the indexer. Defaults value is 'default'.
|
|
84
108
|
* @param options.schema - The schema of the database.
|
|
85
109
|
* @param options.idColumn - The column to use as the id. Defaults to 'id'.
|
|
110
|
+
* @param options.migrate - The options for the database migration. when provided, the database will automatically run migrations before the indexer runs.
|
|
86
111
|
*/
|
|
87
112
|
export function drizzleStorage<
|
|
88
113
|
TFilter,
|
|
@@ -97,10 +122,13 @@ export function drizzleStorage<
|
|
|
97
122
|
indexerName: identifier = "default",
|
|
98
123
|
schema,
|
|
99
124
|
idColumn = "id",
|
|
125
|
+
migrate: migrateOptions,
|
|
100
126
|
}: DrizzleStorageOptions<TQueryResult, TFullSchema, TSchema>) {
|
|
101
127
|
return defineIndexerPlugin<TFilter, TBlock>((indexer) => {
|
|
102
128
|
let tableNames: string[] = [];
|
|
103
129
|
let indexerId = "";
|
|
130
|
+
const alwaysReindex = process.env["APIBARA_ALWAYS_REINDEX"] === "true";
|
|
131
|
+
let prevFinality: DataFinality | undefined;
|
|
104
132
|
|
|
105
133
|
try {
|
|
106
134
|
tableNames = Object.values((schema as TSchema) ?? db._.schema ?? {}).map(
|
|
@@ -113,15 +141,46 @@ export function drizzleStorage<
|
|
|
113
141
|
}
|
|
114
142
|
|
|
115
143
|
indexer.hooks.hook("run:before", async () => {
|
|
144
|
+
const internalContext = useInternalContext();
|
|
145
|
+
const context = useIndexerContext();
|
|
146
|
+
const logger = useLogger();
|
|
147
|
+
|
|
148
|
+
// For testing purposes using vcr.
|
|
149
|
+
context[DRIZZLE_STORAGE_DB_PROPERTY] = db;
|
|
150
|
+
|
|
116
151
|
const { indexerName: indexerFileName, availableIndexers } =
|
|
117
|
-
|
|
152
|
+
internalContext;
|
|
118
153
|
|
|
119
154
|
indexerId = generateIndexerId(indexerFileName, identifier);
|
|
120
155
|
|
|
156
|
+
if (alwaysReindex) {
|
|
157
|
+
logger.warn(
|
|
158
|
+
`Reindexing: Deleting all data from tables - ${tableNames.join(", ")}`,
|
|
159
|
+
);
|
|
160
|
+
await withTransaction(db, async (tx) => {
|
|
161
|
+
await cleanupStorage(tx, tableNames, indexerId);
|
|
162
|
+
|
|
163
|
+
if (enablePersistence) {
|
|
164
|
+
await resetPersistence({ tx, indexerId });
|
|
165
|
+
}
|
|
166
|
+
|
|
167
|
+
logger.success("Tables have been cleaned up for reindexing");
|
|
168
|
+
});
|
|
169
|
+
}
|
|
170
|
+
|
|
121
171
|
let retries = 0;
|
|
122
172
|
|
|
173
|
+
// incase the migrations are already applied, we don't want to run them again
|
|
174
|
+
let migrationsApplied = false;
|
|
175
|
+
|
|
123
176
|
while (retries <= MAX_RETRIES) {
|
|
124
177
|
try {
|
|
178
|
+
if (migrateOptions && !migrationsApplied) {
|
|
179
|
+
// @ts-ignore type mismatch for db
|
|
180
|
+
await migrate(db, migrateOptions);
|
|
181
|
+
migrationsApplied = true;
|
|
182
|
+
logger.success("Migrations applied");
|
|
183
|
+
}
|
|
125
184
|
await withTransaction(db, async (tx) => {
|
|
126
185
|
await initializeReorgRollbackTable(tx, indexerId);
|
|
127
186
|
if (enablePersistence) {
|
|
@@ -131,6 +190,9 @@ export function drizzleStorage<
|
|
|
131
190
|
break;
|
|
132
191
|
} catch (error) {
|
|
133
192
|
if (retries === MAX_RETRIES) {
|
|
193
|
+
if (error instanceof DrizzleStorageError) {
|
|
194
|
+
throw error;
|
|
195
|
+
}
|
|
134
196
|
throw new DrizzleStorageError(
|
|
135
197
|
"Initialization failed after 5 retries",
|
|
136
198
|
{
|
|
@@ -238,7 +300,8 @@ export function drizzleStorage<
|
|
|
238
300
|
indexer.hooks.hook("handler:middleware", async ({ use }) => {
|
|
239
301
|
use(async (context, next) => {
|
|
240
302
|
try {
|
|
241
|
-
const { endCursor, finality } = context as {
|
|
303
|
+
const { endCursor, finality, cursor } = context as {
|
|
304
|
+
cursor: Cursor;
|
|
242
305
|
endCursor: Cursor;
|
|
243
306
|
finality: DataFinality;
|
|
244
307
|
};
|
|
@@ -254,6 +317,11 @@ export function drizzleStorage<
|
|
|
254
317
|
TSchema
|
|
255
318
|
>;
|
|
256
319
|
|
|
320
|
+
if (prevFinality === "pending") {
|
|
321
|
+
// invalidate if previous block's finality was "pending"
|
|
322
|
+
await invalidate(tx, cursor, idColumn, indexerId);
|
|
323
|
+
}
|
|
324
|
+
|
|
257
325
|
if (finality !== "finalized") {
|
|
258
326
|
await registerTriggers(
|
|
259
327
|
tx,
|
|
@@ -267,13 +335,15 @@ export function drizzleStorage<
|
|
|
267
335
|
await next();
|
|
268
336
|
delete context[DRIZZLE_PROPERTY];
|
|
269
337
|
|
|
270
|
-
if (enablePersistence) {
|
|
338
|
+
if (enablePersistence && finality !== "pending") {
|
|
271
339
|
await persistState({
|
|
272
340
|
tx,
|
|
273
341
|
endCursor,
|
|
274
342
|
indexerId,
|
|
275
343
|
});
|
|
276
344
|
}
|
|
345
|
+
|
|
346
|
+
prevFinality = finality;
|
|
277
347
|
});
|
|
278
348
|
|
|
279
349
|
if (finality !== "finalized") {
|
package/src/persistence.ts
CHANGED
|
@@ -1,24 +1,29 @@
|
|
|
1
1
|
import { type Cursor, normalizeCursor } from "@apibara/protocol";
|
|
2
|
-
import { and, eq, gt, isNull, lt } from "drizzle-orm";
|
|
2
|
+
import { and, eq, gt, isNull, lt, sql } from "drizzle-orm";
|
|
3
3
|
import type {
|
|
4
4
|
ExtractTablesWithRelations,
|
|
5
5
|
TablesRelationalConfig,
|
|
6
6
|
} from "drizzle-orm";
|
|
7
7
|
import type { PgQueryResultHKT, PgTransaction } from "drizzle-orm/pg-core";
|
|
8
|
-
import { integer,
|
|
8
|
+
import { integer, pgSchema, primaryKey, text } from "drizzle-orm/pg-core";
|
|
9
|
+
import { SCHEMA_NAME } from "./constants";
|
|
9
10
|
import { DrizzleStorageError, deserialize, serialize } from "./utils";
|
|
10
11
|
|
|
11
|
-
const CHECKPOINTS_TABLE_NAME = "
|
|
12
|
-
const FILTERS_TABLE_NAME = "
|
|
13
|
-
const SCHEMA_VERSION_TABLE_NAME = "
|
|
12
|
+
const CHECKPOINTS_TABLE_NAME = "checkpoints";
|
|
13
|
+
const FILTERS_TABLE_NAME = "filters";
|
|
14
|
+
const SCHEMA_VERSION_TABLE_NAME = "schema_version";
|
|
14
15
|
|
|
15
|
-
|
|
16
|
+
const schema = pgSchema(SCHEMA_NAME);
|
|
17
|
+
|
|
18
|
+
/** This table is not used for migrations, its only used for ease of internal operations with drizzle. */
|
|
19
|
+
export const checkpoints = schema.table(CHECKPOINTS_TABLE_NAME, {
|
|
16
20
|
id: text("id").notNull().primaryKey(),
|
|
17
21
|
orderKey: integer("order_key").notNull(),
|
|
18
22
|
uniqueKey: text("unique_key"),
|
|
19
23
|
});
|
|
20
24
|
|
|
21
|
-
|
|
25
|
+
/** This table is not used for migrations, its only used for ease of internal operations with drizzle. */
|
|
26
|
+
export const filters = schema.table(
|
|
22
27
|
FILTERS_TABLE_NAME,
|
|
23
28
|
{
|
|
24
29
|
id: text("id").notNull(),
|
|
@@ -33,7 +38,8 @@ export const filters = pgTable(
|
|
|
33
38
|
],
|
|
34
39
|
);
|
|
35
40
|
|
|
36
|
-
|
|
41
|
+
/** This table is not used for migrations, its only used for ease of internal operations with drizzle. */
|
|
42
|
+
export const schemaVersion = schema.table(SCHEMA_VERSION_TABLE_NAME, {
|
|
37
43
|
k: integer("k").notNull().primaryKey(),
|
|
38
44
|
version: integer("version").notNull(),
|
|
39
45
|
});
|
|
@@ -53,13 +59,22 @@ export async function initializePersistentState<
|
|
|
53
59
|
TSchema extends
|
|
54
60
|
TablesRelationalConfig = ExtractTablesWithRelations<TFullSchema>,
|
|
55
61
|
>(tx: PgTransaction<TQueryResult, TFullSchema, TSchema>) {
|
|
62
|
+
// Create schema if it doesn't exist
|
|
63
|
+
await tx.execute(
|
|
64
|
+
sql.raw(`
|
|
65
|
+
CREATE SCHEMA IF NOT EXISTS ${SCHEMA_NAME};
|
|
66
|
+
`),
|
|
67
|
+
);
|
|
68
|
+
|
|
56
69
|
// Create schema version table
|
|
57
|
-
await tx.execute(
|
|
58
|
-
|
|
70
|
+
await tx.execute(
|
|
71
|
+
sql.raw(`
|
|
72
|
+
CREATE TABLE IF NOT EXISTS ${SCHEMA_NAME}.${SCHEMA_VERSION_TABLE_NAME} (
|
|
59
73
|
k INTEGER PRIMARY KEY,
|
|
60
74
|
version INTEGER NOT NULL
|
|
61
75
|
);
|
|
62
|
-
`)
|
|
76
|
+
`),
|
|
77
|
+
);
|
|
63
78
|
|
|
64
79
|
// Get current schema version
|
|
65
80
|
const versionRows = await tx
|
|
@@ -80,23 +95,27 @@ export async function initializePersistentState<
|
|
|
80
95
|
try {
|
|
81
96
|
if (storedVersion === -1) {
|
|
82
97
|
// First time initialization
|
|
83
|
-
await tx.execute(
|
|
84
|
-
|
|
98
|
+
await tx.execute(
|
|
99
|
+
sql.raw(`
|
|
100
|
+
CREATE TABLE IF NOT EXISTS ${SCHEMA_NAME}.${CHECKPOINTS_TABLE_NAME} (
|
|
85
101
|
id TEXT PRIMARY KEY,
|
|
86
102
|
order_key INTEGER NOT NULL,
|
|
87
103
|
unique_key TEXT
|
|
88
104
|
);
|
|
89
|
-
`)
|
|
105
|
+
`),
|
|
106
|
+
);
|
|
90
107
|
|
|
91
|
-
await tx.execute(
|
|
92
|
-
|
|
108
|
+
await tx.execute(
|
|
109
|
+
sql.raw(`
|
|
110
|
+
CREATE TABLE IF NOT EXISTS ${SCHEMA_NAME}.${FILTERS_TABLE_NAME} (
|
|
93
111
|
id TEXT NOT NULL,
|
|
94
112
|
filter TEXT NOT NULL,
|
|
95
113
|
from_block INTEGER NOT NULL,
|
|
96
114
|
to_block INTEGER DEFAULT NULL,
|
|
97
115
|
PRIMARY KEY (id, from_block)
|
|
98
116
|
);
|
|
99
|
-
`)
|
|
117
|
+
`),
|
|
118
|
+
);
|
|
100
119
|
|
|
101
120
|
// Set initial schema version
|
|
102
121
|
await tx.insert(schemaVersion).values({
|
|
@@ -155,7 +174,9 @@ export async function persistState<
|
|
|
155
174
|
target: checkpoints.id,
|
|
156
175
|
set: {
|
|
157
176
|
orderKey: Number(endCursor.orderKey),
|
|
158
|
-
|
|
177
|
+
// Explicitly set the unique key to `null` to indicate that it has been deleted
|
|
178
|
+
// Otherwise drizzle will not update its value.
|
|
179
|
+
uniqueKey: endCursor.uniqueKey ? endCursor.uniqueKey : null,
|
|
159
180
|
},
|
|
160
181
|
});
|
|
161
182
|
|
|
@@ -297,3 +318,24 @@ export async function finalizeState<
|
|
|
297
318
|
});
|
|
298
319
|
}
|
|
299
320
|
}
|
|
321
|
+
|
|
322
|
+
export async function resetPersistence<
|
|
323
|
+
TQueryResult extends PgQueryResultHKT,
|
|
324
|
+
TFullSchema extends Record<string, unknown> = Record<string, never>,
|
|
325
|
+
TSchema extends
|
|
326
|
+
TablesRelationalConfig = ExtractTablesWithRelations<TFullSchema>,
|
|
327
|
+
>(props: {
|
|
328
|
+
tx: PgTransaction<TQueryResult, TFullSchema, TSchema>;
|
|
329
|
+
indexerId: string;
|
|
330
|
+
}) {
|
|
331
|
+
const { tx, indexerId } = props;
|
|
332
|
+
|
|
333
|
+
try {
|
|
334
|
+
await tx.delete(checkpoints).where(eq(checkpoints.id, indexerId));
|
|
335
|
+
await tx.delete(filters).where(eq(filters.id, indexerId));
|
|
336
|
+
} catch (error) {
|
|
337
|
+
throw new DrizzleStorageError("Failed to reset persistence state", {
|
|
338
|
+
cause: error,
|
|
339
|
+
});
|
|
340
|
+
}
|
|
341
|
+
}
|
package/src/storage.ts
CHANGED
|
@@ -11,19 +11,25 @@ import {
|
|
|
11
11
|
char,
|
|
12
12
|
integer,
|
|
13
13
|
jsonb,
|
|
14
|
-
|
|
14
|
+
pgSchema,
|
|
15
15
|
serial,
|
|
16
16
|
text,
|
|
17
17
|
} from "drizzle-orm/pg-core";
|
|
18
|
+
import { SCHEMA_NAME } from "./constants";
|
|
18
19
|
import { DrizzleStorageError } from "./utils";
|
|
19
20
|
|
|
21
|
+
const ROLLBACK_TABLE_NAME = "reorg_rollback";
|
|
22
|
+
|
|
23
|
+
const schema = pgSchema(SCHEMA_NAME);
|
|
24
|
+
|
|
20
25
|
function getReorgTriggerName(table: string, indexerId: string) {
|
|
21
26
|
return `${table}_reorg_${indexerId}`;
|
|
22
27
|
}
|
|
23
28
|
|
|
24
29
|
export type ReorgOperation = "I" | "U" | "D";
|
|
25
30
|
|
|
26
|
-
|
|
31
|
+
/** This table is not used for migrations, its only used for ease of internal operations with drizzle. */
|
|
32
|
+
export const reorgRollbackTable = schema.table(ROLLBACK_TABLE_NAME, {
|
|
27
33
|
n: serial("n").primaryKey(),
|
|
28
34
|
op: char("op", { length: 1 }).$type<ReorgOperation>().notNull(),
|
|
29
35
|
table_name: text("table_name").notNull(),
|
|
@@ -42,10 +48,14 @@ export async function initializeReorgRollbackTable<
|
|
|
42
48
|
TablesRelationalConfig = ExtractTablesWithRelations<TFullSchema>,
|
|
43
49
|
>(tx: PgTransaction<TQueryResult, TFullSchema, TSchema>, indexerId: string) {
|
|
44
50
|
try {
|
|
51
|
+
// Create schema if it doesn't exist
|
|
52
|
+
await tx.execute(`
|
|
53
|
+
CREATE SCHEMA IF NOT EXISTS ${SCHEMA_NAME};
|
|
54
|
+
`);
|
|
45
55
|
// Create the audit log table
|
|
46
56
|
await tx.execute(
|
|
47
57
|
sql.raw(`
|
|
48
|
-
CREATE TABLE IF NOT EXISTS
|
|
58
|
+
CREATE TABLE IF NOT EXISTS ${SCHEMA_NAME}.${ROLLBACK_TABLE_NAME}(
|
|
49
59
|
n SERIAL PRIMARY KEY,
|
|
50
60
|
op CHAR(1) NOT NULL,
|
|
51
61
|
table_name TEXT NOT NULL,
|
|
@@ -59,7 +69,7 @@ export async function initializeReorgRollbackTable<
|
|
|
59
69
|
|
|
60
70
|
await tx.execute(
|
|
61
71
|
sql.raw(`
|
|
62
|
-
CREATE INDEX IF NOT EXISTS idx_reorg_rollback_indexer_id_cursor ON
|
|
72
|
+
CREATE INDEX IF NOT EXISTS idx_reorg_rollback_indexer_id_cursor ON ${SCHEMA_NAME}.${ROLLBACK_TABLE_NAME}(indexer_id, cursor);
|
|
63
73
|
`),
|
|
64
74
|
);
|
|
65
75
|
} catch (error) {
|
|
@@ -72,7 +82,7 @@ export async function initializeReorgRollbackTable<
|
|
|
72
82
|
// Create the trigger function
|
|
73
83
|
await tx.execute(
|
|
74
84
|
sql.raw(`
|
|
75
|
-
CREATE OR REPLACE FUNCTION reorg_checkpoint()
|
|
85
|
+
CREATE OR REPLACE FUNCTION ${SCHEMA_NAME}.reorg_checkpoint()
|
|
76
86
|
RETURNS TRIGGER AS $$
|
|
77
87
|
DECLARE
|
|
78
88
|
id_col TEXT := TG_ARGV[0]::TEXT;
|
|
@@ -82,13 +92,13 @@ export async function initializeReorgRollbackTable<
|
|
|
82
92
|
old_id_value TEXT := row_to_json(OLD.*)->>id_col;
|
|
83
93
|
BEGIN
|
|
84
94
|
IF (TG_OP = 'DELETE') THEN
|
|
85
|
-
INSERT INTO
|
|
95
|
+
INSERT INTO ${SCHEMA_NAME}.${ROLLBACK_TABLE_NAME}(op, table_name, cursor, row_id, row_value, indexer_id)
|
|
86
96
|
SELECT 'D', TG_TABLE_NAME, order_key, old_id_value, row_to_json(OLD.*), indexer_id;
|
|
87
97
|
ELSIF (TG_OP = 'UPDATE') THEN
|
|
88
|
-
INSERT INTO
|
|
98
|
+
INSERT INTO ${SCHEMA_NAME}.${ROLLBACK_TABLE_NAME}(op, table_name, cursor, row_id, row_value, indexer_id)
|
|
89
99
|
SELECT 'U', TG_TABLE_NAME, order_key, new_id_value, row_to_json(OLD.*), indexer_id;
|
|
90
100
|
ELSIF (TG_OP = 'INSERT') THEN
|
|
91
|
-
INSERT INTO
|
|
101
|
+
INSERT INTO ${SCHEMA_NAME}.${ROLLBACK_TABLE_NAME}(op, table_name, cursor, row_id, row_value, indexer_id)
|
|
92
102
|
SELECT 'I', TG_TABLE_NAME, order_key, new_id_value, null, indexer_id;
|
|
93
103
|
END IF;
|
|
94
104
|
RETURN NULL;
|
|
@@ -130,7 +140,7 @@ export async function registerTriggers<
|
|
|
130
140
|
CREATE CONSTRAINT TRIGGER ${getReorgTriggerName(table, indexerId)}
|
|
131
141
|
AFTER INSERT OR UPDATE OR DELETE ON ${table}
|
|
132
142
|
DEFERRABLE INITIALLY DEFERRED
|
|
133
|
-
FOR EACH ROW EXECUTE FUNCTION reorg_checkpoint('${idColumn}', ${`${Number(endCursor.orderKey)}`}, '${indexerId}');
|
|
143
|
+
FOR EACH ROW EXECUTE FUNCTION ${SCHEMA_NAME}.reorg_checkpoint('${idColumn}', ${`${Number(endCursor.orderKey)}`}, '${indexerId}');
|
|
134
144
|
`),
|
|
135
145
|
);
|
|
136
146
|
}
|
|
@@ -181,7 +191,7 @@ export async function invalidate<
|
|
|
181
191
|
const { rows: result } = (await tx.execute(
|
|
182
192
|
sql.raw(`
|
|
183
193
|
WITH deleted AS (
|
|
184
|
-
DELETE FROM
|
|
194
|
+
DELETE FROM ${SCHEMA_NAME}.${ROLLBACK_TABLE_NAME}
|
|
185
195
|
WHERE cursor > ${Number(cursor.orderKey)}
|
|
186
196
|
AND indexer_id = '${indexerId}'
|
|
187
197
|
RETURNING *
|
|
@@ -305,7 +315,7 @@ export async function finalize<
|
|
|
305
315
|
try {
|
|
306
316
|
await tx.execute(
|
|
307
317
|
sql.raw(`
|
|
308
|
-
DELETE FROM
|
|
318
|
+
DELETE FROM ${SCHEMA_NAME}.${ROLLBACK_TABLE_NAME}
|
|
309
319
|
WHERE cursor <= ${Number(cursor.orderKey)}
|
|
310
320
|
AND indexer_id = '${indexerId}'
|
|
311
321
|
`),
|
|
@@ -316,3 +326,45 @@ export async function finalize<
|
|
|
316
326
|
});
|
|
317
327
|
}
|
|
318
328
|
}
|
|
329
|
+
|
|
330
|
+
export async function cleanupStorage<
|
|
331
|
+
TQueryResult extends PgQueryResultHKT,
|
|
332
|
+
TFullSchema extends Record<string, unknown> = Record<string, never>,
|
|
333
|
+
TSchema extends
|
|
334
|
+
TablesRelationalConfig = ExtractTablesWithRelations<TFullSchema>,
|
|
335
|
+
>(
|
|
336
|
+
tx: PgTransaction<TQueryResult, TFullSchema, TSchema>,
|
|
337
|
+
tables: string[],
|
|
338
|
+
indexerId: string,
|
|
339
|
+
) {
|
|
340
|
+
try {
|
|
341
|
+
for (const table of tables) {
|
|
342
|
+
await tx.execute(
|
|
343
|
+
sql.raw(
|
|
344
|
+
`DROP TRIGGER IF EXISTS ${getReorgTriggerName(table, indexerId)} ON ${table};`,
|
|
345
|
+
),
|
|
346
|
+
);
|
|
347
|
+
}
|
|
348
|
+
|
|
349
|
+
await tx.execute(
|
|
350
|
+
sql.raw(`
|
|
351
|
+
DELETE FROM ${SCHEMA_NAME}.${ROLLBACK_TABLE_NAME}
|
|
352
|
+
WHERE indexer_id = '${indexerId}'
|
|
353
|
+
`),
|
|
354
|
+
);
|
|
355
|
+
|
|
356
|
+
for (const table of tables) {
|
|
357
|
+
try {
|
|
358
|
+
await tx.execute(sql.raw(`TRUNCATE TABLE ${table} CASCADE;`));
|
|
359
|
+
} catch (error) {
|
|
360
|
+
throw new DrizzleStorageError(`Failed to truncate table ${table}`, {
|
|
361
|
+
cause: error,
|
|
362
|
+
});
|
|
363
|
+
}
|
|
364
|
+
}
|
|
365
|
+
} catch (error) {
|
|
366
|
+
throw new DrizzleStorageError("Failed to clean up storage", {
|
|
367
|
+
cause: error,
|
|
368
|
+
});
|
|
369
|
+
}
|
|
370
|
+
}
|
package/src/testing.ts
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
import type { VcrResult } from "@apibara/indexer/testing";
|
|
2
|
+
import type { PgDatabase, PgQueryResultHKT } from "drizzle-orm/pg-core";
|
|
3
|
+
import { DRIZZLE_STORAGE_DB_PROPERTY } from "./constants";
|
|
4
|
+
|
|
5
|
+
export function getTestDatabase(context: VcrResult) {
|
|
6
|
+
const db = context[DRIZZLE_STORAGE_DB_PROPERTY];
|
|
7
|
+
|
|
8
|
+
if (!db) {
|
|
9
|
+
throw new Error("Drizzle database not found in context");
|
|
10
|
+
}
|
|
11
|
+
|
|
12
|
+
return db as PgDatabase<PgQueryResultHKT>;
|
|
13
|
+
}
|