@apibara/plugin-drizzle 2.1.0-beta.14 → 2.1.0-beta.16
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +45 -28
- package/dist/index.d.cts +4 -1
- package/dist/index.d.mts +4 -1
- package/dist/index.d.ts +4 -1
- package/dist/index.mjs +47 -30
- package/dist/shared/plugin-drizzle.2d226351.mjs +5 -0
- package/dist/shared/{plugin-drizzle.e884ca32.cjs → plugin-drizzle.cae20704.cjs} +2 -0
- package/dist/testing.cjs +1 -1
- package/dist/testing.mjs +1 -1
- package/package.json +3 -3
- package/src/constants.ts +1 -0
- package/src/helper.ts +5 -2
- package/src/persistence.ts +36 -17
- package/src/storage.ts +22 -12
- package/dist/shared/plugin-drizzle.f8d1b186.mjs +0 -4
package/dist/index.cjs
CHANGED
|
@@ -4,7 +4,7 @@ const indexer = require('@apibara/indexer');
|
|
|
4
4
|
const plugins = require('@apibara/indexer/plugins');
|
|
5
5
|
const internal = require('@apibara/indexer/internal');
|
|
6
6
|
const plugins$1 = require('@apibara/indexer/internal/plugins');
|
|
7
|
-
const constants = require('./shared/plugin-drizzle.
|
|
7
|
+
const constants = require('./shared/plugin-drizzle.cae20704.cjs');
|
|
8
8
|
const pglite$1 = require('@electric-sql/pglite');
|
|
9
9
|
const nodePostgres = require('drizzle-orm/node-postgres');
|
|
10
10
|
const migrator$1 = require('drizzle-orm/node-postgres/migrator');
|
|
@@ -49,7 +49,7 @@ function sleep(ms) {
|
|
|
49
49
|
|
|
50
50
|
function drizzle(options) {
|
|
51
51
|
const {
|
|
52
|
-
connectionString = "memory://",
|
|
52
|
+
connectionString = process.env["POSTGRES_CONNECTION_STRING"] ?? "memory://",
|
|
53
53
|
schema,
|
|
54
54
|
type = "pglite",
|
|
55
55
|
config,
|
|
@@ -91,15 +91,16 @@ async function migrate(db, options) {
|
|
|
91
91
|
}
|
|
92
92
|
}
|
|
93
93
|
|
|
94
|
-
const CHECKPOINTS_TABLE_NAME = "
|
|
95
|
-
const FILTERS_TABLE_NAME = "
|
|
96
|
-
const SCHEMA_VERSION_TABLE_NAME = "
|
|
97
|
-
const
|
|
94
|
+
const CHECKPOINTS_TABLE_NAME = "checkpoints";
|
|
95
|
+
const FILTERS_TABLE_NAME = "filters";
|
|
96
|
+
const SCHEMA_VERSION_TABLE_NAME = "schema_version";
|
|
97
|
+
const schema$1 = pgCore.pgSchema(constants.SCHEMA_NAME);
|
|
98
|
+
const checkpoints = schema$1.table(CHECKPOINTS_TABLE_NAME, {
|
|
98
99
|
id: pgCore.text("id").notNull().primaryKey(),
|
|
99
100
|
orderKey: pgCore.integer("order_key").notNull(),
|
|
100
101
|
uniqueKey: pgCore.text("unique_key")
|
|
101
102
|
});
|
|
102
|
-
const filters =
|
|
103
|
+
const filters = schema$1.table(
|
|
103
104
|
FILTERS_TABLE_NAME,
|
|
104
105
|
{
|
|
105
106
|
id: pgCore.text("id").notNull(),
|
|
@@ -113,7 +114,7 @@ const filters = pgCore.pgTable(
|
|
|
113
114
|
}
|
|
114
115
|
]
|
|
115
116
|
);
|
|
116
|
-
const schemaVersion =
|
|
117
|
+
const schemaVersion = schema$1.table(SCHEMA_VERSION_TABLE_NAME, {
|
|
117
118
|
k: pgCore.integer("k").notNull().primaryKey(),
|
|
118
119
|
version: pgCore.integer("version").notNull()
|
|
119
120
|
});
|
|
@@ -124,12 +125,19 @@ const MIGRATIONS = [
|
|
|
124
125
|
// Add more migration arrays for future versions
|
|
125
126
|
];
|
|
126
127
|
async function initializePersistentState(tx) {
|
|
127
|
-
await tx.execute(
|
|
128
|
-
|
|
128
|
+
await tx.execute(
|
|
129
|
+
drizzleOrm.sql.raw(`
|
|
130
|
+
CREATE SCHEMA IF NOT EXISTS ${constants.SCHEMA_NAME};
|
|
131
|
+
`)
|
|
132
|
+
);
|
|
133
|
+
await tx.execute(
|
|
134
|
+
drizzleOrm.sql.raw(`
|
|
135
|
+
CREATE TABLE IF NOT EXISTS ${constants.SCHEMA_NAME}.${SCHEMA_VERSION_TABLE_NAME} (
|
|
129
136
|
k INTEGER PRIMARY KEY,
|
|
130
137
|
version INTEGER NOT NULL
|
|
131
138
|
);
|
|
132
|
-
`)
|
|
139
|
+
`)
|
|
140
|
+
);
|
|
133
141
|
const versionRows = await tx.select().from(schemaVersion).where(drizzleOrm.eq(schemaVersion.k, 0));
|
|
134
142
|
const storedVersion = versionRows[0]?.version ?? -1;
|
|
135
143
|
if (storedVersion > CURRENT_SCHEMA_VERSION) {
|
|
@@ -139,22 +147,26 @@ async function initializePersistentState(tx) {
|
|
|
139
147
|
}
|
|
140
148
|
try {
|
|
141
149
|
if (storedVersion === -1) {
|
|
142
|
-
await tx.execute(
|
|
143
|
-
|
|
150
|
+
await tx.execute(
|
|
151
|
+
drizzleOrm.sql.raw(`
|
|
152
|
+
CREATE TABLE IF NOT EXISTS ${constants.SCHEMA_NAME}.${CHECKPOINTS_TABLE_NAME} (
|
|
144
153
|
id TEXT PRIMARY KEY,
|
|
145
154
|
order_key INTEGER NOT NULL,
|
|
146
155
|
unique_key TEXT
|
|
147
156
|
);
|
|
148
|
-
`)
|
|
149
|
-
|
|
150
|
-
|
|
157
|
+
`)
|
|
158
|
+
);
|
|
159
|
+
await tx.execute(
|
|
160
|
+
drizzleOrm.sql.raw(`
|
|
161
|
+
CREATE TABLE IF NOT EXISTS ${constants.SCHEMA_NAME}.${FILTERS_TABLE_NAME} (
|
|
151
162
|
id TEXT NOT NULL,
|
|
152
163
|
filter TEXT NOT NULL,
|
|
153
164
|
from_block INTEGER NOT NULL,
|
|
154
165
|
to_block INTEGER DEFAULT NULL,
|
|
155
166
|
PRIMARY KEY (id, from_block)
|
|
156
167
|
);
|
|
157
|
-
`)
|
|
168
|
+
`)
|
|
169
|
+
);
|
|
158
170
|
await tx.insert(schemaVersion).values({
|
|
159
171
|
k: 0,
|
|
160
172
|
version: CURRENT_SCHEMA_VERSION
|
|
@@ -280,10 +292,12 @@ async function resetPersistence(props) {
|
|
|
280
292
|
}
|
|
281
293
|
}
|
|
282
294
|
|
|
295
|
+
const ROLLBACK_TABLE_NAME = "reorg_rollback";
|
|
296
|
+
const schema = pgCore.pgSchema(constants.SCHEMA_NAME);
|
|
283
297
|
function getReorgTriggerName(table, indexerId) {
|
|
284
298
|
return `${table}_reorg_${indexerId}`;
|
|
285
299
|
}
|
|
286
|
-
|
|
300
|
+
schema.table(ROLLBACK_TABLE_NAME, {
|
|
287
301
|
n: pgCore.serial("n").primaryKey(),
|
|
288
302
|
op: pgCore.char("op", { length: 1 }).$type().notNull(),
|
|
289
303
|
table_name: pgCore.text("table_name").notNull(),
|
|
@@ -294,9 +308,12 @@ pgCore.pgTable("__reorg_rollback", {
|
|
|
294
308
|
});
|
|
295
309
|
async function initializeReorgRollbackTable(tx, indexerId) {
|
|
296
310
|
try {
|
|
311
|
+
await tx.execute(`
|
|
312
|
+
CREATE SCHEMA IF NOT EXISTS ${constants.SCHEMA_NAME};
|
|
313
|
+
`);
|
|
297
314
|
await tx.execute(
|
|
298
315
|
drizzleOrm.sql.raw(`
|
|
299
|
-
CREATE TABLE IF NOT EXISTS
|
|
316
|
+
CREATE TABLE IF NOT EXISTS ${constants.SCHEMA_NAME}.${ROLLBACK_TABLE_NAME}(
|
|
300
317
|
n SERIAL PRIMARY KEY,
|
|
301
318
|
op CHAR(1) NOT NULL,
|
|
302
319
|
table_name TEXT NOT NULL,
|
|
@@ -309,7 +326,7 @@ async function initializeReorgRollbackTable(tx, indexerId) {
|
|
|
309
326
|
);
|
|
310
327
|
await tx.execute(
|
|
311
328
|
drizzleOrm.sql.raw(`
|
|
312
|
-
CREATE INDEX IF NOT EXISTS idx_reorg_rollback_indexer_id_cursor ON
|
|
329
|
+
CREATE INDEX IF NOT EXISTS idx_reorg_rollback_indexer_id_cursor ON ${constants.SCHEMA_NAME}.${ROLLBACK_TABLE_NAME}(indexer_id, cursor);
|
|
313
330
|
`)
|
|
314
331
|
);
|
|
315
332
|
} catch (error) {
|
|
@@ -320,7 +337,7 @@ async function initializeReorgRollbackTable(tx, indexerId) {
|
|
|
320
337
|
try {
|
|
321
338
|
await tx.execute(
|
|
322
339
|
drizzleOrm.sql.raw(`
|
|
323
|
-
CREATE OR REPLACE FUNCTION reorg_checkpoint()
|
|
340
|
+
CREATE OR REPLACE FUNCTION ${constants.SCHEMA_NAME}.reorg_checkpoint()
|
|
324
341
|
RETURNS TRIGGER AS $$
|
|
325
342
|
DECLARE
|
|
326
343
|
id_col TEXT := TG_ARGV[0]::TEXT;
|
|
@@ -330,13 +347,13 @@ async function initializeReorgRollbackTable(tx, indexerId) {
|
|
|
330
347
|
old_id_value TEXT := row_to_json(OLD.*)->>id_col;
|
|
331
348
|
BEGIN
|
|
332
349
|
IF (TG_OP = 'DELETE') THEN
|
|
333
|
-
INSERT INTO
|
|
350
|
+
INSERT INTO ${constants.SCHEMA_NAME}.${ROLLBACK_TABLE_NAME}(op, table_name, cursor, row_id, row_value, indexer_id)
|
|
334
351
|
SELECT 'D', TG_TABLE_NAME, order_key, old_id_value, row_to_json(OLD.*), indexer_id;
|
|
335
352
|
ELSIF (TG_OP = 'UPDATE') THEN
|
|
336
|
-
INSERT INTO
|
|
353
|
+
INSERT INTO ${constants.SCHEMA_NAME}.${ROLLBACK_TABLE_NAME}(op, table_name, cursor, row_id, row_value, indexer_id)
|
|
337
354
|
SELECT 'U', TG_TABLE_NAME, order_key, new_id_value, row_to_json(OLD.*), indexer_id;
|
|
338
355
|
ELSIF (TG_OP = 'INSERT') THEN
|
|
339
|
-
INSERT INTO
|
|
356
|
+
INSERT INTO ${constants.SCHEMA_NAME}.${ROLLBACK_TABLE_NAME}(op, table_name, cursor, row_id, row_value, indexer_id)
|
|
340
357
|
SELECT 'I', TG_TABLE_NAME, order_key, new_id_value, null, indexer_id;
|
|
341
358
|
END IF;
|
|
342
359
|
RETURN NULL;
|
|
@@ -366,7 +383,7 @@ async function registerTriggers(tx, tables, endCursor, idColumn, indexerId) {
|
|
|
366
383
|
CREATE CONSTRAINT TRIGGER ${getReorgTriggerName(table, indexerId)}
|
|
367
384
|
AFTER INSERT OR UPDATE OR DELETE ON ${table}
|
|
368
385
|
DEFERRABLE INITIALLY DEFERRED
|
|
369
|
-
FOR EACH ROW EXECUTE FUNCTION reorg_checkpoint('${idColumn}', ${`${Number(endCursor.orderKey)}`}, '${indexerId}');
|
|
386
|
+
FOR EACH ROW EXECUTE FUNCTION ${constants.SCHEMA_NAME}.reorg_checkpoint('${idColumn}', ${`${Number(endCursor.orderKey)}`}, '${indexerId}');
|
|
370
387
|
`)
|
|
371
388
|
);
|
|
372
389
|
}
|
|
@@ -395,7 +412,7 @@ async function invalidate(tx, cursor, idColumn, indexerId) {
|
|
|
395
412
|
const { rows: result } = await tx.execute(
|
|
396
413
|
drizzleOrm.sql.raw(`
|
|
397
414
|
WITH deleted AS (
|
|
398
|
-
DELETE FROM
|
|
415
|
+
DELETE FROM ${constants.SCHEMA_NAME}.${ROLLBACK_TABLE_NAME}
|
|
399
416
|
WHERE cursor > ${Number(cursor.orderKey)}
|
|
400
417
|
AND indexer_id = '${indexerId}'
|
|
401
418
|
RETURNING *
|
|
@@ -488,7 +505,7 @@ async function finalize(tx, cursor, indexerId) {
|
|
|
488
505
|
try {
|
|
489
506
|
await tx.execute(
|
|
490
507
|
drizzleOrm.sql.raw(`
|
|
491
|
-
DELETE FROM
|
|
508
|
+
DELETE FROM ${constants.SCHEMA_NAME}.${ROLLBACK_TABLE_NAME}
|
|
492
509
|
WHERE cursor <= ${Number(cursor.orderKey)}
|
|
493
510
|
AND indexer_id = '${indexerId}'
|
|
494
511
|
`)
|
|
@@ -510,7 +527,7 @@ async function cleanupStorage(tx, tables, indexerId) {
|
|
|
510
527
|
}
|
|
511
528
|
await tx.execute(
|
|
512
529
|
drizzleOrm.sql.raw(`
|
|
513
|
-
DELETE FROM
|
|
530
|
+
DELETE FROM ${constants.SCHEMA_NAME}.${ROLLBACK_TABLE_NAME}
|
|
514
531
|
WHERE indexer_id = '${indexerId}'
|
|
515
532
|
`)
|
|
516
533
|
);
|
package/dist/index.d.cts
CHANGED
|
@@ -48,7 +48,7 @@ type PgliteDrizzleOptions = {
|
|
|
48
48
|
type?: "pglite";
|
|
49
49
|
/**
|
|
50
50
|
* Connection string to use for the database
|
|
51
|
-
* @default "memory://pglite"
|
|
51
|
+
* @default process.env["POSTGRES_CONNECTION_STRING"] ?? "memory://pglite"
|
|
52
52
|
*/
|
|
53
53
|
connectionString?: string;
|
|
54
54
|
/**
|
|
@@ -79,6 +79,9 @@ type NodePgDatabase<TSchema extends Record<string, unknown>> = NodePgDatabase$1<
|
|
|
79
79
|
type Database<TOptions extends DrizzleOptions, TSchema extends Record<string, unknown>> = TOptions extends PgliteDrizzleOptions ? PgliteDatabase<TSchema> : NodePgDatabase<TSchema>;
|
|
80
80
|
/**
|
|
81
81
|
* Creates a new Drizzle database instance based on the provided options
|
|
82
|
+
*
|
|
83
|
+
* @important connectionString defaults to process.env["POSTGRES_CONNECTION_STRING"], if not set, it defaults to "memory://" (in-memory pglite)
|
|
84
|
+
*
|
|
82
85
|
* @param options - Configuration options for the database connection
|
|
83
86
|
* @returns A configured Drizzle database instance
|
|
84
87
|
* @throws {Error} If an invalid database type is specified
|
package/dist/index.d.mts
CHANGED
|
@@ -48,7 +48,7 @@ type PgliteDrizzleOptions = {
|
|
|
48
48
|
type?: "pglite";
|
|
49
49
|
/**
|
|
50
50
|
* Connection string to use for the database
|
|
51
|
-
* @default "memory://pglite"
|
|
51
|
+
* @default process.env["POSTGRES_CONNECTION_STRING"] ?? "memory://pglite"
|
|
52
52
|
*/
|
|
53
53
|
connectionString?: string;
|
|
54
54
|
/**
|
|
@@ -79,6 +79,9 @@ type NodePgDatabase<TSchema extends Record<string, unknown>> = NodePgDatabase$1<
|
|
|
79
79
|
type Database<TOptions extends DrizzleOptions, TSchema extends Record<string, unknown>> = TOptions extends PgliteDrizzleOptions ? PgliteDatabase<TSchema> : NodePgDatabase<TSchema>;
|
|
80
80
|
/**
|
|
81
81
|
* Creates a new Drizzle database instance based on the provided options
|
|
82
|
+
*
|
|
83
|
+
* @important connectionString defaults to process.env["POSTGRES_CONNECTION_STRING"], if not set, it defaults to "memory://" (in-memory pglite)
|
|
84
|
+
*
|
|
82
85
|
* @param options - Configuration options for the database connection
|
|
83
86
|
* @returns A configured Drizzle database instance
|
|
84
87
|
* @throws {Error} If an invalid database type is specified
|
package/dist/index.d.ts
CHANGED
|
@@ -48,7 +48,7 @@ type PgliteDrizzleOptions = {
|
|
|
48
48
|
type?: "pglite";
|
|
49
49
|
/**
|
|
50
50
|
* Connection string to use for the database
|
|
51
|
-
* @default "memory://pglite"
|
|
51
|
+
* @default process.env["POSTGRES_CONNECTION_STRING"] ?? "memory://pglite"
|
|
52
52
|
*/
|
|
53
53
|
connectionString?: string;
|
|
54
54
|
/**
|
|
@@ -79,6 +79,9 @@ type NodePgDatabase<TSchema extends Record<string, unknown>> = NodePgDatabase$1<
|
|
|
79
79
|
type Database<TOptions extends DrizzleOptions, TSchema extends Record<string, unknown>> = TOptions extends PgliteDrizzleOptions ? PgliteDatabase<TSchema> : NodePgDatabase<TSchema>;
|
|
80
80
|
/**
|
|
81
81
|
* Creates a new Drizzle database instance based on the provided options
|
|
82
|
+
*
|
|
83
|
+
* @important connectionString defaults to process.env["POSTGRES_CONNECTION_STRING"], if not set, it defaults to "memory://" (in-memory pglite)
|
|
84
|
+
*
|
|
82
85
|
* @param options - Configuration options for the database connection
|
|
83
86
|
* @returns A configured Drizzle database instance
|
|
84
87
|
* @throws {Error} If an invalid database type is specified
|
package/dist/index.mjs
CHANGED
|
@@ -2,7 +2,7 @@ import { useIndexerContext } from '@apibara/indexer';
|
|
|
2
2
|
import { defineIndexerPlugin, useLogger } from '@apibara/indexer/plugins';
|
|
3
3
|
import { generateIndexerId } from '@apibara/indexer/internal';
|
|
4
4
|
import { useInternalContext } from '@apibara/indexer/internal/plugins';
|
|
5
|
-
import { D as DRIZZLE_PROPERTY, a as DRIZZLE_STORAGE_DB_PROPERTY } from './shared/plugin-drizzle.
|
|
5
|
+
import { S as SCHEMA_NAME, D as DRIZZLE_PROPERTY, a as DRIZZLE_STORAGE_DB_PROPERTY } from './shared/plugin-drizzle.2d226351.mjs';
|
|
6
6
|
import { PGlite } from '@electric-sql/pglite';
|
|
7
7
|
import { drizzle as drizzle$1 } from 'drizzle-orm/node-postgres';
|
|
8
8
|
import { migrate as migrate$2 } from 'drizzle-orm/node-postgres/migrator';
|
|
@@ -10,8 +10,8 @@ import { drizzle as drizzle$2 } from 'drizzle-orm/pglite';
|
|
|
10
10
|
import { migrate as migrate$1 } from 'drizzle-orm/pglite/migrator';
|
|
11
11
|
import pg from 'pg';
|
|
12
12
|
import { normalizeCursor } from '@apibara/protocol';
|
|
13
|
-
import { eq, and, isNull, gt, lt
|
|
14
|
-
import {
|
|
13
|
+
import { sql, eq, and, isNull, gt, lt } from 'drizzle-orm';
|
|
14
|
+
import { pgSchema, text, integer, primaryKey, serial, char, jsonb } from 'drizzle-orm/pg-core';
|
|
15
15
|
|
|
16
16
|
class DrizzleStorageError extends Error {
|
|
17
17
|
constructor(message, options) {
|
|
@@ -43,7 +43,7 @@ function sleep(ms) {
|
|
|
43
43
|
|
|
44
44
|
function drizzle(options) {
|
|
45
45
|
const {
|
|
46
|
-
connectionString = "memory://",
|
|
46
|
+
connectionString = process.env["POSTGRES_CONNECTION_STRING"] ?? "memory://",
|
|
47
47
|
schema,
|
|
48
48
|
type = "pglite",
|
|
49
49
|
config,
|
|
@@ -85,15 +85,16 @@ async function migrate(db, options) {
|
|
|
85
85
|
}
|
|
86
86
|
}
|
|
87
87
|
|
|
88
|
-
const CHECKPOINTS_TABLE_NAME = "
|
|
89
|
-
const FILTERS_TABLE_NAME = "
|
|
90
|
-
const SCHEMA_VERSION_TABLE_NAME = "
|
|
91
|
-
const
|
|
88
|
+
const CHECKPOINTS_TABLE_NAME = "checkpoints";
|
|
89
|
+
const FILTERS_TABLE_NAME = "filters";
|
|
90
|
+
const SCHEMA_VERSION_TABLE_NAME = "schema_version";
|
|
91
|
+
const schema$1 = pgSchema(SCHEMA_NAME);
|
|
92
|
+
const checkpoints = schema$1.table(CHECKPOINTS_TABLE_NAME, {
|
|
92
93
|
id: text("id").notNull().primaryKey(),
|
|
93
94
|
orderKey: integer("order_key").notNull(),
|
|
94
95
|
uniqueKey: text("unique_key")
|
|
95
96
|
});
|
|
96
|
-
const filters =
|
|
97
|
+
const filters = schema$1.table(
|
|
97
98
|
FILTERS_TABLE_NAME,
|
|
98
99
|
{
|
|
99
100
|
id: text("id").notNull(),
|
|
@@ -107,7 +108,7 @@ const filters = pgTable(
|
|
|
107
108
|
}
|
|
108
109
|
]
|
|
109
110
|
);
|
|
110
|
-
const schemaVersion =
|
|
111
|
+
const schemaVersion = schema$1.table(SCHEMA_VERSION_TABLE_NAME, {
|
|
111
112
|
k: integer("k").notNull().primaryKey(),
|
|
112
113
|
version: integer("version").notNull()
|
|
113
114
|
});
|
|
@@ -118,12 +119,19 @@ const MIGRATIONS = [
|
|
|
118
119
|
// Add more migration arrays for future versions
|
|
119
120
|
];
|
|
120
121
|
async function initializePersistentState(tx) {
|
|
121
|
-
await tx.execute(
|
|
122
|
-
|
|
122
|
+
await tx.execute(
|
|
123
|
+
sql.raw(`
|
|
124
|
+
CREATE SCHEMA IF NOT EXISTS ${SCHEMA_NAME};
|
|
125
|
+
`)
|
|
126
|
+
);
|
|
127
|
+
await tx.execute(
|
|
128
|
+
sql.raw(`
|
|
129
|
+
CREATE TABLE IF NOT EXISTS ${SCHEMA_NAME}.${SCHEMA_VERSION_TABLE_NAME} (
|
|
123
130
|
k INTEGER PRIMARY KEY,
|
|
124
131
|
version INTEGER NOT NULL
|
|
125
132
|
);
|
|
126
|
-
`)
|
|
133
|
+
`)
|
|
134
|
+
);
|
|
127
135
|
const versionRows = await tx.select().from(schemaVersion).where(eq(schemaVersion.k, 0));
|
|
128
136
|
const storedVersion = versionRows[0]?.version ?? -1;
|
|
129
137
|
if (storedVersion > CURRENT_SCHEMA_VERSION) {
|
|
@@ -133,22 +141,26 @@ async function initializePersistentState(tx) {
|
|
|
133
141
|
}
|
|
134
142
|
try {
|
|
135
143
|
if (storedVersion === -1) {
|
|
136
|
-
await tx.execute(
|
|
137
|
-
|
|
144
|
+
await tx.execute(
|
|
145
|
+
sql.raw(`
|
|
146
|
+
CREATE TABLE IF NOT EXISTS ${SCHEMA_NAME}.${CHECKPOINTS_TABLE_NAME} (
|
|
138
147
|
id TEXT PRIMARY KEY,
|
|
139
148
|
order_key INTEGER NOT NULL,
|
|
140
149
|
unique_key TEXT
|
|
141
150
|
);
|
|
142
|
-
`)
|
|
143
|
-
|
|
144
|
-
|
|
151
|
+
`)
|
|
152
|
+
);
|
|
153
|
+
await tx.execute(
|
|
154
|
+
sql.raw(`
|
|
155
|
+
CREATE TABLE IF NOT EXISTS ${SCHEMA_NAME}.${FILTERS_TABLE_NAME} (
|
|
145
156
|
id TEXT NOT NULL,
|
|
146
157
|
filter TEXT NOT NULL,
|
|
147
158
|
from_block INTEGER NOT NULL,
|
|
148
159
|
to_block INTEGER DEFAULT NULL,
|
|
149
160
|
PRIMARY KEY (id, from_block)
|
|
150
161
|
);
|
|
151
|
-
`)
|
|
162
|
+
`)
|
|
163
|
+
);
|
|
152
164
|
await tx.insert(schemaVersion).values({
|
|
153
165
|
k: 0,
|
|
154
166
|
version: CURRENT_SCHEMA_VERSION
|
|
@@ -274,10 +286,12 @@ async function resetPersistence(props) {
|
|
|
274
286
|
}
|
|
275
287
|
}
|
|
276
288
|
|
|
289
|
+
const ROLLBACK_TABLE_NAME = "reorg_rollback";
|
|
290
|
+
const schema = pgSchema(SCHEMA_NAME);
|
|
277
291
|
function getReorgTriggerName(table, indexerId) {
|
|
278
292
|
return `${table}_reorg_${indexerId}`;
|
|
279
293
|
}
|
|
280
|
-
|
|
294
|
+
schema.table(ROLLBACK_TABLE_NAME, {
|
|
281
295
|
n: serial("n").primaryKey(),
|
|
282
296
|
op: char("op", { length: 1 }).$type().notNull(),
|
|
283
297
|
table_name: text("table_name").notNull(),
|
|
@@ -288,9 +302,12 @@ pgTable("__reorg_rollback", {
|
|
|
288
302
|
});
|
|
289
303
|
async function initializeReorgRollbackTable(tx, indexerId) {
|
|
290
304
|
try {
|
|
305
|
+
await tx.execute(`
|
|
306
|
+
CREATE SCHEMA IF NOT EXISTS ${SCHEMA_NAME};
|
|
307
|
+
`);
|
|
291
308
|
await tx.execute(
|
|
292
309
|
sql.raw(`
|
|
293
|
-
CREATE TABLE IF NOT EXISTS
|
|
310
|
+
CREATE TABLE IF NOT EXISTS ${SCHEMA_NAME}.${ROLLBACK_TABLE_NAME}(
|
|
294
311
|
n SERIAL PRIMARY KEY,
|
|
295
312
|
op CHAR(1) NOT NULL,
|
|
296
313
|
table_name TEXT NOT NULL,
|
|
@@ -303,7 +320,7 @@ async function initializeReorgRollbackTable(tx, indexerId) {
|
|
|
303
320
|
);
|
|
304
321
|
await tx.execute(
|
|
305
322
|
sql.raw(`
|
|
306
|
-
CREATE INDEX IF NOT EXISTS idx_reorg_rollback_indexer_id_cursor ON
|
|
323
|
+
CREATE INDEX IF NOT EXISTS idx_reorg_rollback_indexer_id_cursor ON ${SCHEMA_NAME}.${ROLLBACK_TABLE_NAME}(indexer_id, cursor);
|
|
307
324
|
`)
|
|
308
325
|
);
|
|
309
326
|
} catch (error) {
|
|
@@ -314,7 +331,7 @@ async function initializeReorgRollbackTable(tx, indexerId) {
|
|
|
314
331
|
try {
|
|
315
332
|
await tx.execute(
|
|
316
333
|
sql.raw(`
|
|
317
|
-
CREATE OR REPLACE FUNCTION reorg_checkpoint()
|
|
334
|
+
CREATE OR REPLACE FUNCTION ${SCHEMA_NAME}.reorg_checkpoint()
|
|
318
335
|
RETURNS TRIGGER AS $$
|
|
319
336
|
DECLARE
|
|
320
337
|
id_col TEXT := TG_ARGV[0]::TEXT;
|
|
@@ -324,13 +341,13 @@ async function initializeReorgRollbackTable(tx, indexerId) {
|
|
|
324
341
|
old_id_value TEXT := row_to_json(OLD.*)->>id_col;
|
|
325
342
|
BEGIN
|
|
326
343
|
IF (TG_OP = 'DELETE') THEN
|
|
327
|
-
INSERT INTO
|
|
344
|
+
INSERT INTO ${SCHEMA_NAME}.${ROLLBACK_TABLE_NAME}(op, table_name, cursor, row_id, row_value, indexer_id)
|
|
328
345
|
SELECT 'D', TG_TABLE_NAME, order_key, old_id_value, row_to_json(OLD.*), indexer_id;
|
|
329
346
|
ELSIF (TG_OP = 'UPDATE') THEN
|
|
330
|
-
INSERT INTO
|
|
347
|
+
INSERT INTO ${SCHEMA_NAME}.${ROLLBACK_TABLE_NAME}(op, table_name, cursor, row_id, row_value, indexer_id)
|
|
331
348
|
SELECT 'U', TG_TABLE_NAME, order_key, new_id_value, row_to_json(OLD.*), indexer_id;
|
|
332
349
|
ELSIF (TG_OP = 'INSERT') THEN
|
|
333
|
-
INSERT INTO
|
|
350
|
+
INSERT INTO ${SCHEMA_NAME}.${ROLLBACK_TABLE_NAME}(op, table_name, cursor, row_id, row_value, indexer_id)
|
|
334
351
|
SELECT 'I', TG_TABLE_NAME, order_key, new_id_value, null, indexer_id;
|
|
335
352
|
END IF;
|
|
336
353
|
RETURN NULL;
|
|
@@ -360,7 +377,7 @@ async function registerTriggers(tx, tables, endCursor, idColumn, indexerId) {
|
|
|
360
377
|
CREATE CONSTRAINT TRIGGER ${getReorgTriggerName(table, indexerId)}
|
|
361
378
|
AFTER INSERT OR UPDATE OR DELETE ON ${table}
|
|
362
379
|
DEFERRABLE INITIALLY DEFERRED
|
|
363
|
-
FOR EACH ROW EXECUTE FUNCTION reorg_checkpoint('${idColumn}', ${`${Number(endCursor.orderKey)}`}, '${indexerId}');
|
|
380
|
+
FOR EACH ROW EXECUTE FUNCTION ${SCHEMA_NAME}.reorg_checkpoint('${idColumn}', ${`${Number(endCursor.orderKey)}`}, '${indexerId}');
|
|
364
381
|
`)
|
|
365
382
|
);
|
|
366
383
|
}
|
|
@@ -389,7 +406,7 @@ async function invalidate(tx, cursor, idColumn, indexerId) {
|
|
|
389
406
|
const { rows: result } = await tx.execute(
|
|
390
407
|
sql.raw(`
|
|
391
408
|
WITH deleted AS (
|
|
392
|
-
DELETE FROM
|
|
409
|
+
DELETE FROM ${SCHEMA_NAME}.${ROLLBACK_TABLE_NAME}
|
|
393
410
|
WHERE cursor > ${Number(cursor.orderKey)}
|
|
394
411
|
AND indexer_id = '${indexerId}'
|
|
395
412
|
RETURNING *
|
|
@@ -482,7 +499,7 @@ async function finalize(tx, cursor, indexerId) {
|
|
|
482
499
|
try {
|
|
483
500
|
await tx.execute(
|
|
484
501
|
sql.raw(`
|
|
485
|
-
DELETE FROM
|
|
502
|
+
DELETE FROM ${SCHEMA_NAME}.${ROLLBACK_TABLE_NAME}
|
|
486
503
|
WHERE cursor <= ${Number(cursor.orderKey)}
|
|
487
504
|
AND indexer_id = '${indexerId}'
|
|
488
505
|
`)
|
|
@@ -504,7 +521,7 @@ async function cleanupStorage(tx, tables, indexerId) {
|
|
|
504
521
|
}
|
|
505
522
|
await tx.execute(
|
|
506
523
|
sql.raw(`
|
|
507
|
-
DELETE FROM
|
|
524
|
+
DELETE FROM ${SCHEMA_NAME}.${ROLLBACK_TABLE_NAME}
|
|
508
525
|
WHERE indexer_id = '${indexerId}'
|
|
509
526
|
`)
|
|
510
527
|
);
|
|
@@ -2,6 +2,8 @@
|
|
|
2
2
|
|
|
3
3
|
const DRIZZLE_PROPERTY = "_drizzle";
|
|
4
4
|
const DRIZZLE_STORAGE_DB_PROPERTY = "_drizzleStorageDB";
|
|
5
|
+
const SCHEMA_NAME = "airfoil";
|
|
5
6
|
|
|
6
7
|
exports.DRIZZLE_PROPERTY = DRIZZLE_PROPERTY;
|
|
7
8
|
exports.DRIZZLE_STORAGE_DB_PROPERTY = DRIZZLE_STORAGE_DB_PROPERTY;
|
|
9
|
+
exports.SCHEMA_NAME = SCHEMA_NAME;
|
package/dist/testing.cjs
CHANGED
package/dist/testing.mjs
CHANGED
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@apibara/plugin-drizzle",
|
|
3
|
-
"version": "2.1.0-beta.
|
|
3
|
+
"version": "2.1.0-beta.16",
|
|
4
4
|
"type": "module",
|
|
5
5
|
"files": [
|
|
6
6
|
"dist",
|
|
@@ -45,8 +45,8 @@
|
|
|
45
45
|
"vitest": "^1.6.0"
|
|
46
46
|
},
|
|
47
47
|
"dependencies": {
|
|
48
|
-
"@apibara/indexer": "2.1.0-beta.
|
|
49
|
-
"@apibara/protocol": "2.1.0-beta.
|
|
48
|
+
"@apibara/indexer": "2.1.0-beta.16",
|
|
49
|
+
"@apibara/protocol": "2.1.0-beta.16",
|
|
50
50
|
"postgres-range": "^1.1.4"
|
|
51
51
|
}
|
|
52
52
|
}
|
package/src/constants.ts
CHANGED
package/src/helper.ts
CHANGED
|
@@ -58,7 +58,7 @@ export type PgliteDrizzleOptions = {
|
|
|
58
58
|
type?: "pglite";
|
|
59
59
|
/**
|
|
60
60
|
* Connection string to use for the database
|
|
61
|
-
* @default "memory://pglite"
|
|
61
|
+
* @default process.env["POSTGRES_CONNECTION_STRING"] ?? "memory://pglite"
|
|
62
62
|
*/
|
|
63
63
|
connectionString?: string;
|
|
64
64
|
/**
|
|
@@ -102,6 +102,9 @@ export type Database<
|
|
|
102
102
|
|
|
103
103
|
/**
|
|
104
104
|
* Creates a new Drizzle database instance based on the provided options
|
|
105
|
+
*
|
|
106
|
+
* @important connectionString defaults to process.env["POSTGRES_CONNECTION_STRING"], if not set, it defaults to "memory://" (in-memory pglite)
|
|
107
|
+
*
|
|
105
108
|
* @param options - Configuration options for the database connection
|
|
106
109
|
* @returns A configured Drizzle database instance
|
|
107
110
|
* @throws {Error} If an invalid database type is specified
|
|
@@ -119,7 +122,7 @@ export function drizzle<
|
|
|
119
122
|
},
|
|
120
123
|
): Database<TOptions, TSchema> {
|
|
121
124
|
const {
|
|
122
|
-
connectionString = "memory://",
|
|
125
|
+
connectionString = process.env["POSTGRES_CONNECTION_STRING"] ?? "memory://",
|
|
123
126
|
schema,
|
|
124
127
|
type = "pglite",
|
|
125
128
|
config,
|
package/src/persistence.ts
CHANGED
|
@@ -1,24 +1,29 @@
|
|
|
1
1
|
import { type Cursor, normalizeCursor } from "@apibara/protocol";
|
|
2
|
-
import { and, eq, gt, isNull, lt } from "drizzle-orm";
|
|
2
|
+
import { and, eq, gt, isNull, lt, sql } from "drizzle-orm";
|
|
3
3
|
import type {
|
|
4
4
|
ExtractTablesWithRelations,
|
|
5
5
|
TablesRelationalConfig,
|
|
6
6
|
} from "drizzle-orm";
|
|
7
7
|
import type { PgQueryResultHKT, PgTransaction } from "drizzle-orm/pg-core";
|
|
8
|
-
import { integer,
|
|
8
|
+
import { integer, pgSchema, primaryKey, text } from "drizzle-orm/pg-core";
|
|
9
|
+
import { SCHEMA_NAME } from "./constants";
|
|
9
10
|
import { DrizzleStorageError, deserialize, serialize } from "./utils";
|
|
10
11
|
|
|
11
|
-
const CHECKPOINTS_TABLE_NAME = "
|
|
12
|
-
const FILTERS_TABLE_NAME = "
|
|
13
|
-
const SCHEMA_VERSION_TABLE_NAME = "
|
|
12
|
+
const CHECKPOINTS_TABLE_NAME = "checkpoints";
|
|
13
|
+
const FILTERS_TABLE_NAME = "filters";
|
|
14
|
+
const SCHEMA_VERSION_TABLE_NAME = "schema_version";
|
|
14
15
|
|
|
15
|
-
|
|
16
|
+
const schema = pgSchema(SCHEMA_NAME);
|
|
17
|
+
|
|
18
|
+
/** This table is not used for migrations, its only used for ease of internal operations with drizzle. */
|
|
19
|
+
export const checkpoints = schema.table(CHECKPOINTS_TABLE_NAME, {
|
|
16
20
|
id: text("id").notNull().primaryKey(),
|
|
17
21
|
orderKey: integer("order_key").notNull(),
|
|
18
22
|
uniqueKey: text("unique_key"),
|
|
19
23
|
});
|
|
20
24
|
|
|
21
|
-
|
|
25
|
+
/** This table is not used for migrations, its only used for ease of internal operations with drizzle. */
|
|
26
|
+
export const filters = schema.table(
|
|
22
27
|
FILTERS_TABLE_NAME,
|
|
23
28
|
{
|
|
24
29
|
id: text("id").notNull(),
|
|
@@ -33,7 +38,8 @@ export const filters = pgTable(
|
|
|
33
38
|
],
|
|
34
39
|
);
|
|
35
40
|
|
|
36
|
-
|
|
41
|
+
/** This table is not used for migrations, its only used for ease of internal operations with drizzle. */
|
|
42
|
+
export const schemaVersion = schema.table(SCHEMA_VERSION_TABLE_NAME, {
|
|
37
43
|
k: integer("k").notNull().primaryKey(),
|
|
38
44
|
version: integer("version").notNull(),
|
|
39
45
|
});
|
|
@@ -53,13 +59,22 @@ export async function initializePersistentState<
|
|
|
53
59
|
TSchema extends
|
|
54
60
|
TablesRelationalConfig = ExtractTablesWithRelations<TFullSchema>,
|
|
55
61
|
>(tx: PgTransaction<TQueryResult, TFullSchema, TSchema>) {
|
|
62
|
+
// Create schema if it doesn't exist
|
|
63
|
+
await tx.execute(
|
|
64
|
+
sql.raw(`
|
|
65
|
+
CREATE SCHEMA IF NOT EXISTS ${SCHEMA_NAME};
|
|
66
|
+
`),
|
|
67
|
+
);
|
|
68
|
+
|
|
56
69
|
// Create schema version table
|
|
57
|
-
await tx.execute(
|
|
58
|
-
|
|
70
|
+
await tx.execute(
|
|
71
|
+
sql.raw(`
|
|
72
|
+
CREATE TABLE IF NOT EXISTS ${SCHEMA_NAME}.${SCHEMA_VERSION_TABLE_NAME} (
|
|
59
73
|
k INTEGER PRIMARY KEY,
|
|
60
74
|
version INTEGER NOT NULL
|
|
61
75
|
);
|
|
62
|
-
`)
|
|
76
|
+
`),
|
|
77
|
+
);
|
|
63
78
|
|
|
64
79
|
// Get current schema version
|
|
65
80
|
const versionRows = await tx
|
|
@@ -80,23 +95,27 @@ export async function initializePersistentState<
|
|
|
80
95
|
try {
|
|
81
96
|
if (storedVersion === -1) {
|
|
82
97
|
// First time initialization
|
|
83
|
-
await tx.execute(
|
|
84
|
-
|
|
98
|
+
await tx.execute(
|
|
99
|
+
sql.raw(`
|
|
100
|
+
CREATE TABLE IF NOT EXISTS ${SCHEMA_NAME}.${CHECKPOINTS_TABLE_NAME} (
|
|
85
101
|
id TEXT PRIMARY KEY,
|
|
86
102
|
order_key INTEGER NOT NULL,
|
|
87
103
|
unique_key TEXT
|
|
88
104
|
);
|
|
89
|
-
`)
|
|
105
|
+
`),
|
|
106
|
+
);
|
|
90
107
|
|
|
91
|
-
await tx.execute(
|
|
92
|
-
|
|
108
|
+
await tx.execute(
|
|
109
|
+
sql.raw(`
|
|
110
|
+
CREATE TABLE IF NOT EXISTS ${SCHEMA_NAME}.${FILTERS_TABLE_NAME} (
|
|
93
111
|
id TEXT NOT NULL,
|
|
94
112
|
filter TEXT NOT NULL,
|
|
95
113
|
from_block INTEGER NOT NULL,
|
|
96
114
|
to_block INTEGER DEFAULT NULL,
|
|
97
115
|
PRIMARY KEY (id, from_block)
|
|
98
116
|
);
|
|
99
|
-
`)
|
|
117
|
+
`),
|
|
118
|
+
);
|
|
100
119
|
|
|
101
120
|
// Set initial schema version
|
|
102
121
|
await tx.insert(schemaVersion).values({
|
package/src/storage.ts
CHANGED
|
@@ -11,19 +11,25 @@ import {
|
|
|
11
11
|
char,
|
|
12
12
|
integer,
|
|
13
13
|
jsonb,
|
|
14
|
-
|
|
14
|
+
pgSchema,
|
|
15
15
|
serial,
|
|
16
16
|
text,
|
|
17
17
|
} from "drizzle-orm/pg-core";
|
|
18
|
+
import { SCHEMA_NAME } from "./constants";
|
|
18
19
|
import { DrizzleStorageError } from "./utils";
|
|
19
20
|
|
|
21
|
+
const ROLLBACK_TABLE_NAME = "reorg_rollback";
|
|
22
|
+
|
|
23
|
+
const schema = pgSchema(SCHEMA_NAME);
|
|
24
|
+
|
|
20
25
|
function getReorgTriggerName(table: string, indexerId: string) {
|
|
21
26
|
return `${table}_reorg_${indexerId}`;
|
|
22
27
|
}
|
|
23
28
|
|
|
24
29
|
export type ReorgOperation = "I" | "U" | "D";
|
|
25
30
|
|
|
26
|
-
|
|
31
|
+
/** This table is not used for migrations, its only used for ease of internal operations with drizzle. */
|
|
32
|
+
export const reorgRollbackTable = schema.table(ROLLBACK_TABLE_NAME, {
|
|
27
33
|
n: serial("n").primaryKey(),
|
|
28
34
|
op: char("op", { length: 1 }).$type<ReorgOperation>().notNull(),
|
|
29
35
|
table_name: text("table_name").notNull(),
|
|
@@ -42,10 +48,14 @@ export async function initializeReorgRollbackTable<
|
|
|
42
48
|
TablesRelationalConfig = ExtractTablesWithRelations<TFullSchema>,
|
|
43
49
|
>(tx: PgTransaction<TQueryResult, TFullSchema, TSchema>, indexerId: string) {
|
|
44
50
|
try {
|
|
51
|
+
// Create schema if it doesn't exist
|
|
52
|
+
await tx.execute(`
|
|
53
|
+
CREATE SCHEMA IF NOT EXISTS ${SCHEMA_NAME};
|
|
54
|
+
`);
|
|
45
55
|
// Create the audit log table
|
|
46
56
|
await tx.execute(
|
|
47
57
|
sql.raw(`
|
|
48
|
-
CREATE TABLE IF NOT EXISTS
|
|
58
|
+
CREATE TABLE IF NOT EXISTS ${SCHEMA_NAME}.${ROLLBACK_TABLE_NAME}(
|
|
49
59
|
n SERIAL PRIMARY KEY,
|
|
50
60
|
op CHAR(1) NOT NULL,
|
|
51
61
|
table_name TEXT NOT NULL,
|
|
@@ -59,7 +69,7 @@ export async function initializeReorgRollbackTable<
|
|
|
59
69
|
|
|
60
70
|
await tx.execute(
|
|
61
71
|
sql.raw(`
|
|
62
|
-
CREATE INDEX IF NOT EXISTS idx_reorg_rollback_indexer_id_cursor ON
|
|
72
|
+
CREATE INDEX IF NOT EXISTS idx_reorg_rollback_indexer_id_cursor ON ${SCHEMA_NAME}.${ROLLBACK_TABLE_NAME}(indexer_id, cursor);
|
|
63
73
|
`),
|
|
64
74
|
);
|
|
65
75
|
} catch (error) {
|
|
@@ -72,7 +82,7 @@ export async function initializeReorgRollbackTable<
|
|
|
72
82
|
// Create the trigger function
|
|
73
83
|
await tx.execute(
|
|
74
84
|
sql.raw(`
|
|
75
|
-
CREATE OR REPLACE FUNCTION reorg_checkpoint()
|
|
85
|
+
CREATE OR REPLACE FUNCTION ${SCHEMA_NAME}.reorg_checkpoint()
|
|
76
86
|
RETURNS TRIGGER AS $$
|
|
77
87
|
DECLARE
|
|
78
88
|
id_col TEXT := TG_ARGV[0]::TEXT;
|
|
@@ -82,13 +92,13 @@ export async function initializeReorgRollbackTable<
|
|
|
82
92
|
old_id_value TEXT := row_to_json(OLD.*)->>id_col;
|
|
83
93
|
BEGIN
|
|
84
94
|
IF (TG_OP = 'DELETE') THEN
|
|
85
|
-
INSERT INTO
|
|
95
|
+
INSERT INTO ${SCHEMA_NAME}.${ROLLBACK_TABLE_NAME}(op, table_name, cursor, row_id, row_value, indexer_id)
|
|
86
96
|
SELECT 'D', TG_TABLE_NAME, order_key, old_id_value, row_to_json(OLD.*), indexer_id;
|
|
87
97
|
ELSIF (TG_OP = 'UPDATE') THEN
|
|
88
|
-
INSERT INTO
|
|
98
|
+
INSERT INTO ${SCHEMA_NAME}.${ROLLBACK_TABLE_NAME}(op, table_name, cursor, row_id, row_value, indexer_id)
|
|
89
99
|
SELECT 'U', TG_TABLE_NAME, order_key, new_id_value, row_to_json(OLD.*), indexer_id;
|
|
90
100
|
ELSIF (TG_OP = 'INSERT') THEN
|
|
91
|
-
INSERT INTO
|
|
101
|
+
INSERT INTO ${SCHEMA_NAME}.${ROLLBACK_TABLE_NAME}(op, table_name, cursor, row_id, row_value, indexer_id)
|
|
92
102
|
SELECT 'I', TG_TABLE_NAME, order_key, new_id_value, null, indexer_id;
|
|
93
103
|
END IF;
|
|
94
104
|
RETURN NULL;
|
|
@@ -130,7 +140,7 @@ export async function registerTriggers<
|
|
|
130
140
|
CREATE CONSTRAINT TRIGGER ${getReorgTriggerName(table, indexerId)}
|
|
131
141
|
AFTER INSERT OR UPDATE OR DELETE ON ${table}
|
|
132
142
|
DEFERRABLE INITIALLY DEFERRED
|
|
133
|
-
FOR EACH ROW EXECUTE FUNCTION reorg_checkpoint('${idColumn}', ${`${Number(endCursor.orderKey)}`}, '${indexerId}');
|
|
143
|
+
FOR EACH ROW EXECUTE FUNCTION ${SCHEMA_NAME}.reorg_checkpoint('${idColumn}', ${`${Number(endCursor.orderKey)}`}, '${indexerId}');
|
|
134
144
|
`),
|
|
135
145
|
);
|
|
136
146
|
}
|
|
@@ -181,7 +191,7 @@ export async function invalidate<
|
|
|
181
191
|
const { rows: result } = (await tx.execute(
|
|
182
192
|
sql.raw(`
|
|
183
193
|
WITH deleted AS (
|
|
184
|
-
DELETE FROM
|
|
194
|
+
DELETE FROM ${SCHEMA_NAME}.${ROLLBACK_TABLE_NAME}
|
|
185
195
|
WHERE cursor > ${Number(cursor.orderKey)}
|
|
186
196
|
AND indexer_id = '${indexerId}'
|
|
187
197
|
RETURNING *
|
|
@@ -305,7 +315,7 @@ export async function finalize<
|
|
|
305
315
|
try {
|
|
306
316
|
await tx.execute(
|
|
307
317
|
sql.raw(`
|
|
308
|
-
DELETE FROM
|
|
318
|
+
DELETE FROM ${SCHEMA_NAME}.${ROLLBACK_TABLE_NAME}
|
|
309
319
|
WHERE cursor <= ${Number(cursor.orderKey)}
|
|
310
320
|
AND indexer_id = '${indexerId}'
|
|
311
321
|
`),
|
|
@@ -338,7 +348,7 @@ export async function cleanupStorage<
|
|
|
338
348
|
|
|
339
349
|
await tx.execute(
|
|
340
350
|
sql.raw(`
|
|
341
|
-
DELETE FROM
|
|
351
|
+
DELETE FROM ${SCHEMA_NAME}.${ROLLBACK_TABLE_NAME}
|
|
342
352
|
WHERE indexer_id = '${indexerId}'
|
|
343
353
|
`),
|
|
344
354
|
);
|