@latticexyz/store-indexer 2.0.0-snapshot-test-32d38619 → 2.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +23 -15
- package/dist/bin/postgres-decoded-indexer.js +3 -0
- package/dist/bin/postgres-decoded-indexer.js.map +1 -0
- package/dist/bin/postgres-frontend.js +31 -0
- package/dist/bin/postgres-frontend.js.map +1 -0
- package/dist/bin/postgres-indexer.js +1 -1
- package/dist/bin/postgres-indexer.js.map +1 -1
- package/dist/bin/sqlite-indexer.js +1 -1
- package/dist/bin/sqlite-indexer.js.map +1 -1
- package/dist/chunk-KDDXIBYJ.js +2 -0
- package/dist/chunk-KDDXIBYJ.js.map +1 -0
- package/dist/chunk-NHP2EYE7.js +7 -0
- package/dist/chunk-NHP2EYE7.js.map +1 -0
- package/dist/chunk-OUZYPRYF.js +2 -0
- package/dist/chunk-OUZYPRYF.js.map +1 -0
- package/dist/chunk-VCBWGHIO.js +2 -0
- package/dist/chunk-VCBWGHIO.js.map +1 -0
- package/dist/chunk-ZS3IQEZ4.js +2 -0
- package/dist/chunk-ZS3IQEZ4.js.map +1 -0
- package/dist/healthcheck-7XXWJH5U.js +2 -0
- package/dist/healthcheck-7XXWJH5U.js.map +1 -0
- package/dist/helloWorld-BMBNVEA7.js +2 -0
- package/dist/helloWorld-BMBNVEA7.js.map +1 -0
- package/package.json +34 -20
- package/src/debug.ts +7 -0
- package/src/koa-middleware/compress.ts +48 -0
- package/src/koa-middleware/healthcheck.ts +37 -0
- package/src/koa-middleware/helloWorld.ts +12 -0
- package/src/koa-middleware/sentry.ts +101 -0
- package/src/postgres/apiRoutes.ts +58 -0
- package/src/postgres/common.ts +21 -0
- package/src/postgres/deprecated/createQueryAdapter.ts +57 -0
- package/src/postgres/deprecated/getLogs.ts +82 -0
- package/src/postgres/queryLogs.ts +71 -0
- package/src/postgres/recordToLog.ts +19 -0
- package/src/sqlite/apiRoutes.ts +48 -0
- package/src/sqlite/createQueryAdapter.ts +10 -29
- package/src/sqlite/getTablesWithRecords.ts +76 -0
- package/dist/chunk-X3OEYQLT.js +0 -7
- package/dist/chunk-X3OEYQLT.js.map +0 -1
- package/src/postgres/createQueryAdapter.ts +0 -54
@@ -0,0 +1,48 @@
|
|
1
|
+
import { Middleware } from "koa";
|
2
|
+
import { Readable, Stream } from "node:stream";
|
3
|
+
import accepts from "accepts";
|
4
|
+
import { Zlib, createBrotliCompress, createDeflate, createGzip } from "node:zlib";
|
5
|
+
import { includes } from "@latticexyz/common/utils";
|
6
|
+
|
7
|
+
// Loosely based on https://github.com/holic/koa-compress/blob/master/lib/index.js
|
8
|
+
// with better handling of streams better with occasional flushing
|
9
|
+
|
10
|
+
const encodings = {
|
11
|
+
br: createBrotliCompress,
|
12
|
+
gzip: createGzip,
|
13
|
+
deflate: createDeflate,
|
14
|
+
} as const;
|
15
|
+
|
16
|
+
const encodingNames = Object.keys(encodings) as (keyof typeof encodings)[];
|
17
|
+
|
18
|
+
function flushEvery<stream extends Zlib & Readable>(stream: stream, bytesThreshold: number): stream {
|
19
|
+
let bytesSinceFlush = 0;
|
20
|
+
stream.on("data", (data) => {
|
21
|
+
bytesSinceFlush += data.length;
|
22
|
+
if (bytesSinceFlush > bytesThreshold) {
|
23
|
+
bytesSinceFlush = 0;
|
24
|
+
stream.flush();
|
25
|
+
}
|
26
|
+
});
|
27
|
+
return stream;
|
28
|
+
}
|
29
|
+
|
30
|
+
type CompressOptions = {
|
31
|
+
flushThreshold?: number;
|
32
|
+
};
|
33
|
+
|
34
|
+
export function compress({ flushThreshold = 1024 * 4 }: CompressOptions = {}): Middleware {
|
35
|
+
return async function compressMiddleware(ctx, next) {
|
36
|
+
ctx.vary("Accept-Encoding");
|
37
|
+
|
38
|
+
await next();
|
39
|
+
|
40
|
+
const encoding = accepts(ctx.req).encoding(encodingNames);
|
41
|
+
if (!includes(encodingNames, encoding)) return;
|
42
|
+
|
43
|
+
const compressed = flushEvery(encodings[encoding](), flushThreshold);
|
44
|
+
|
45
|
+
ctx.set("Content-Encoding", encoding);
|
46
|
+
ctx.body = ctx.body instanceof Stream ? ctx.body.pipe(compressed) : compressed.end(ctx.body);
|
47
|
+
};
|
48
|
+
}
|
@@ -0,0 +1,37 @@
|
|
1
|
+
import { Middleware } from "koa";
|
2
|
+
|
3
|
+
type HealthcheckOptions = {
|
4
|
+
isHealthy?: () => boolean;
|
5
|
+
isReady?: () => boolean;
|
6
|
+
};
|
7
|
+
|
8
|
+
/**
|
9
|
+
* Middleware to add Kubernetes healthcheck endpoints
|
10
|
+
*/
|
11
|
+
export function healthcheck({ isHealthy, isReady }: HealthcheckOptions = {}): Middleware {
|
12
|
+
return async function healthcheckMiddleware(ctx, next): Promise<void> {
|
13
|
+
if (ctx.path === "/healthz") {
|
14
|
+
if (isHealthy == null || isHealthy()) {
|
15
|
+
ctx.status = 200;
|
16
|
+
ctx.body = "healthy";
|
17
|
+
} else {
|
18
|
+
ctx.status = 503;
|
19
|
+
ctx.body = "not healthy";
|
20
|
+
}
|
21
|
+
return;
|
22
|
+
}
|
23
|
+
|
24
|
+
if (ctx.path === "/readyz") {
|
25
|
+
if (isReady == null || isReady()) {
|
26
|
+
ctx.status = 200;
|
27
|
+
ctx.body = "ready";
|
28
|
+
} else {
|
29
|
+
ctx.status = 503;
|
30
|
+
ctx.body = "not ready";
|
31
|
+
}
|
32
|
+
return;
|
33
|
+
}
|
34
|
+
|
35
|
+
await next();
|
36
|
+
};
|
37
|
+
}
|
@@ -0,0 +1,12 @@
|
|
1
|
+
import { Middleware } from "koa";
|
2
|
+
|
3
|
+
export function helloWorld(): Middleware {
|
4
|
+
return async function helloWorldMiddleware(ctx, next): Promise<void> {
|
5
|
+
if (ctx.path === "/") {
|
6
|
+
ctx.status = 200;
|
7
|
+
ctx.body = "emit HelloWorld();";
|
8
|
+
return;
|
9
|
+
}
|
10
|
+
await next();
|
11
|
+
};
|
12
|
+
}
|
@@ -0,0 +1,101 @@
|
|
1
|
+
import * as Sentry from "@sentry/node";
|
2
|
+
import { ProfilingIntegration } from "@sentry/profiling-node";
|
3
|
+
import { stripUrlQueryAndFragment } from "@sentry/utils";
|
4
|
+
import debug from "debug";
|
5
|
+
import Koa from "koa";
|
6
|
+
import compose from "koa-compose";
|
7
|
+
|
8
|
+
export function errorHandler(): Koa.Middleware {
|
9
|
+
return async function errorHandlerMiddleware(ctx, next) {
|
10
|
+
try {
|
11
|
+
await next();
|
12
|
+
} catch (err) {
|
13
|
+
Sentry.withScope((scope) => {
|
14
|
+
scope.addEventProcessor((event) => {
|
15
|
+
return Sentry.addRequestDataToEvent(event, ctx.request);
|
16
|
+
});
|
17
|
+
Sentry.captureException(err);
|
18
|
+
});
|
19
|
+
throw err;
|
20
|
+
}
|
21
|
+
};
|
22
|
+
}
|
23
|
+
|
24
|
+
export function requestHandler(): Koa.Middleware {
|
25
|
+
return async function requestHandlerMiddleware(ctx, next) {
|
26
|
+
await Sentry.runWithAsyncContext(async () => {
|
27
|
+
const hub = Sentry.getCurrentHub();
|
28
|
+
hub.configureScope((scope) =>
|
29
|
+
scope.addEventProcessor((event) =>
|
30
|
+
Sentry.addRequestDataToEvent(event, ctx.request, {
|
31
|
+
include: {
|
32
|
+
user: false,
|
33
|
+
},
|
34
|
+
}),
|
35
|
+
),
|
36
|
+
);
|
37
|
+
await next();
|
38
|
+
});
|
39
|
+
};
|
40
|
+
}
|
41
|
+
|
42
|
+
export function tracing(): Koa.Middleware {
|
43
|
+
// creates a Sentry transaction per request
|
44
|
+
return async function tracingMiddleware(ctx, next) {
|
45
|
+
const reqMethod = (ctx.method || "").toUpperCase();
|
46
|
+
const reqUrl = ctx.url && stripUrlQueryAndFragment(ctx.url);
|
47
|
+
|
48
|
+
// Connect to trace of upstream app
|
49
|
+
let traceparentData;
|
50
|
+
if (ctx.request.get("sentry-trace")) {
|
51
|
+
traceparentData = Sentry.extractTraceparentData(ctx.request.get("sentry-trace"));
|
52
|
+
}
|
53
|
+
|
54
|
+
const transaction = Sentry.startTransaction({
|
55
|
+
name: `${reqMethod} ${reqUrl}`,
|
56
|
+
op: "http.server",
|
57
|
+
...traceparentData,
|
58
|
+
});
|
59
|
+
|
60
|
+
ctx.__sentry_transaction = transaction;
|
61
|
+
|
62
|
+
// We put the transaction on the scope so users can attach children to it
|
63
|
+
Sentry.getCurrentHub().configureScope((scope) => {
|
64
|
+
scope.setSpan(transaction);
|
65
|
+
});
|
66
|
+
|
67
|
+
ctx.res.on("finish", () => {
|
68
|
+
// Push `transaction.finish` to the next event loop so open spans have a chance to finish before the transaction closes
|
69
|
+
setImmediate(() => {
|
70
|
+
// If you're using koa router, set the matched route as transaction name
|
71
|
+
if (ctx._matchedRoute) {
|
72
|
+
const mountPath = ctx.mountPath || "";
|
73
|
+
transaction.setName(`${reqMethod} ${mountPath}${ctx._matchedRoute}`);
|
74
|
+
}
|
75
|
+
|
76
|
+
transaction.setHttpStatus(ctx.status);
|
77
|
+
transaction.finish();
|
78
|
+
});
|
79
|
+
});
|
80
|
+
|
81
|
+
await next();
|
82
|
+
};
|
83
|
+
}
|
84
|
+
|
85
|
+
export function sentry(dsn: string): Koa.Middleware {
|
86
|
+
debug("Initializing Sentry");
|
87
|
+
Sentry.init({
|
88
|
+
dsn,
|
89
|
+
integrations: [
|
90
|
+
// Automatically instrument Node.js libraries and frameworks
|
91
|
+
...Sentry.autoDiscoverNodePerformanceMonitoringIntegrations(),
|
92
|
+
new ProfilingIntegration(),
|
93
|
+
],
|
94
|
+
// Performance Monitoring
|
95
|
+
tracesSampleRate: 1.0,
|
96
|
+
// Set sampling rate for profiling - this is relative to tracesSampleRate
|
97
|
+
profilesSampleRate: 1.0,
|
98
|
+
});
|
99
|
+
|
100
|
+
return compose([errorHandler(), requestHandler(), tracing()]);
|
101
|
+
}
|
@@ -0,0 +1,58 @@
|
|
1
|
+
import { Sql } from "postgres";
|
2
|
+
import { Middleware } from "koa";
|
3
|
+
import Router from "@koa/router";
|
4
|
+
import compose from "koa-compose";
|
5
|
+
import { input } from "@latticexyz/store-sync/indexer-client";
|
6
|
+
import { storeTables } from "@latticexyz/store-sync";
|
7
|
+
import { queryLogs } from "./queryLogs";
|
8
|
+
import { recordToLog } from "./recordToLog";
|
9
|
+
import { debug, error } from "../debug";
|
10
|
+
import { createBenchmark } from "@latticexyz/common";
|
11
|
+
import { compress } from "../koa-middleware/compress";
|
12
|
+
|
13
|
+
export function apiRoutes(database: Sql): Middleware {
|
14
|
+
const router = new Router();
|
15
|
+
|
16
|
+
router.get("/api/logs", compress(), async (ctx) => {
|
17
|
+
const benchmark = createBenchmark("postgres:logs");
|
18
|
+
let options: ReturnType<typeof input.parse>;
|
19
|
+
|
20
|
+
try {
|
21
|
+
options = input.parse(typeof ctx.query.input === "string" ? JSON.parse(ctx.query.input) : {});
|
22
|
+
} catch (e) {
|
23
|
+
ctx.status = 400;
|
24
|
+
ctx.body = JSON.stringify(e);
|
25
|
+
debug(e);
|
26
|
+
return;
|
27
|
+
}
|
28
|
+
|
29
|
+
try {
|
30
|
+
options.filters = options.filters.length > 0 ? [...options.filters, { tableId: storeTables.Tables.tableId }] : [];
|
31
|
+
const records = await queryLogs(database, options ?? {}).execute();
|
32
|
+
benchmark("query records");
|
33
|
+
const logs = records.map(recordToLog);
|
34
|
+
benchmark("map records to logs");
|
35
|
+
|
36
|
+
if (records.length === 0) {
|
37
|
+
ctx.status = 404;
|
38
|
+
ctx.body = "no logs found";
|
39
|
+
error(
|
40
|
+
`no logs found for chainId ${options.chainId}, address ${options.address}, filters ${JSON.stringify(
|
41
|
+
options.filters,
|
42
|
+
)}`,
|
43
|
+
);
|
44
|
+
return;
|
45
|
+
}
|
46
|
+
|
47
|
+
const blockNumber = records[0].chainBlockNumber;
|
48
|
+
ctx.body = JSON.stringify({ blockNumber, logs });
|
49
|
+
ctx.status = 200;
|
50
|
+
} catch (e) {
|
51
|
+
ctx.status = 500;
|
52
|
+
ctx.body = JSON.stringify(e);
|
53
|
+
error(e);
|
54
|
+
}
|
55
|
+
});
|
56
|
+
|
57
|
+
return compose([router.routes(), router.allowedMethods()]) as Middleware;
|
58
|
+
}
|
@@ -0,0 +1,21 @@
|
|
1
|
+
import { Hex } from "viem";
|
2
|
+
|
3
|
+
export type RecordData = {
|
4
|
+
address: Hex;
|
5
|
+
tableId: Hex;
|
6
|
+
keyBytes: Hex;
|
7
|
+
staticData: Hex | null;
|
8
|
+
encodedLengths: Hex | null;
|
9
|
+
dynamicData: Hex | null;
|
10
|
+
recordBlockNumber: string;
|
11
|
+
logIndex: number;
|
12
|
+
};
|
13
|
+
|
14
|
+
export type RecordMetadata = {
|
15
|
+
indexerVersion: string;
|
16
|
+
chainId: string;
|
17
|
+
chainBlockNumber: string;
|
18
|
+
totalRows: number;
|
19
|
+
};
|
20
|
+
|
21
|
+
export type Record = RecordData & RecordMetadata;
|
@@ -0,0 +1,57 @@
|
|
1
|
+
import { getAddress } from "viem";
|
2
|
+
import { PgDatabase } from "drizzle-orm/pg-core";
|
3
|
+
import { TableWithRecords, isTableRegistrationLog, logToTable, storeTables } from "@latticexyz/store-sync";
|
4
|
+
import { decodeKey, decodeValueArgs } from "@latticexyz/protocol-parser/internal";
|
5
|
+
import { QueryAdapter } from "@latticexyz/store-sync/trpc-indexer";
|
6
|
+
import { debug } from "../../debug";
|
7
|
+
import { getLogs } from "./getLogs";
|
8
|
+
import { groupBy } from "@latticexyz/common/utils";
|
9
|
+
|
10
|
+
/**
|
11
|
+
* Creates a query adapter for the tRPC server/client to query data from Postgres.
|
12
|
+
*
|
13
|
+
* @param {PgDatabase<any>} database Postgres database object from Drizzle
|
14
|
+
* @returns {Promise<QueryAdapter>} A set of methods used by tRPC endpoints.
|
15
|
+
* @deprecated
|
16
|
+
*/
|
17
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
18
|
+
export async function createQueryAdapter(database: PgDatabase<any>): Promise<QueryAdapter> {
|
19
|
+
const adapter: QueryAdapter = {
|
20
|
+
async getLogs(opts) {
|
21
|
+
return getLogs(database, opts);
|
22
|
+
},
|
23
|
+
async findAll(opts) {
|
24
|
+
const filters = opts.filters ?? [];
|
25
|
+
const { blockNumber, logs } = await getLogs(database, {
|
26
|
+
...opts,
|
27
|
+
// make sure we're always retrieving `store.Tables` table, so we can decode table values
|
28
|
+
filters: filters.length > 0 ? [...filters, { tableId: storeTables.Tables.tableId }] : [],
|
29
|
+
});
|
30
|
+
|
31
|
+
const tables = logs.filter(isTableRegistrationLog).map(logToTable);
|
32
|
+
|
33
|
+
const logsByTable = groupBy(logs, (log) => `${getAddress(log.address)}:${log.args.tableId}`);
|
34
|
+
|
35
|
+
const tablesWithRecords: TableWithRecords[] = tables.map((table) => {
|
36
|
+
const tableLogs = logsByTable.get(`${getAddress(table.address)}:${table.tableId}`) ?? [];
|
37
|
+
const records = tableLogs.map((log) => ({
|
38
|
+
key: decodeKey(table.keySchema, log.args.keyTuple),
|
39
|
+
value: decodeValueArgs(table.valueSchema, log.args),
|
40
|
+
}));
|
41
|
+
|
42
|
+
return {
|
43
|
+
...table,
|
44
|
+
records,
|
45
|
+
};
|
46
|
+
});
|
47
|
+
|
48
|
+
debug("findAll: decoded %d logs across %d tables", logs.length, tables.length);
|
49
|
+
|
50
|
+
return {
|
51
|
+
blockNumber,
|
52
|
+
tables: tablesWithRecords,
|
53
|
+
};
|
54
|
+
},
|
55
|
+
};
|
56
|
+
return adapter;
|
57
|
+
}
|
@@ -0,0 +1,82 @@
|
|
1
|
+
import { PgDatabase } from "drizzle-orm/pg-core";
|
2
|
+
import { Hex } from "viem";
|
3
|
+
import { StorageAdapterLog, SyncFilter } from "@latticexyz/store-sync";
|
4
|
+
import { tables } from "@latticexyz/store-sync/postgres";
|
5
|
+
import { and, asc, eq, or } from "drizzle-orm";
|
6
|
+
import { bigIntMax } from "@latticexyz/common/utils";
|
7
|
+
import { recordToLog } from "../recordToLog";
|
8
|
+
import { createBenchmark } from "@latticexyz/common";
|
9
|
+
|
10
|
+
/**
|
11
|
+
* @deprecated
|
12
|
+
*/
|
13
|
+
export async function getLogs(
|
14
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
15
|
+
database: PgDatabase<any>,
|
16
|
+
{
|
17
|
+
chainId,
|
18
|
+
address,
|
19
|
+
filters = [],
|
20
|
+
}: {
|
21
|
+
readonly chainId: number;
|
22
|
+
readonly address?: Hex;
|
23
|
+
readonly filters?: readonly SyncFilter[];
|
24
|
+
},
|
25
|
+
): Promise<{ blockNumber: bigint; logs: (StorageAdapterLog & { eventName: "Store_SetRecord" })[] }> {
|
26
|
+
const benchmark = createBenchmark("drizzleGetLogs");
|
27
|
+
|
28
|
+
const conditions = filters.length
|
29
|
+
? filters.map((filter) =>
|
30
|
+
and(
|
31
|
+
address != null ? eq(tables.recordsTable.address, address) : undefined,
|
32
|
+
eq(tables.recordsTable.tableId, filter.tableId),
|
33
|
+
filter.key0 != null ? eq(tables.recordsTable.key0, filter.key0) : undefined,
|
34
|
+
filter.key1 != null ? eq(tables.recordsTable.key1, filter.key1) : undefined,
|
35
|
+
),
|
36
|
+
)
|
37
|
+
: address != null
|
38
|
+
? [eq(tables.recordsTable.address, address)]
|
39
|
+
: [];
|
40
|
+
benchmark("parse config");
|
41
|
+
|
42
|
+
// Query for the block number that the indexer (i.e. chain) is at, in case the
|
43
|
+
// indexer is further along in the chain than a given store/table's last updated
|
44
|
+
// block number. We'll then take the highest block number between the indexer's
|
45
|
+
// chain state and all the records in the query (in case the records updated
|
46
|
+
// between these queries). Using just the highest block number from the queries
|
47
|
+
// could potentially signal to the client an older-than-necessary block number,
|
48
|
+
// for stores/tables that haven't seen recent activity.
|
49
|
+
// TODO: move the block number query into the records query for atomicity so we don't have to merge them here
|
50
|
+
const chainState = await database
|
51
|
+
.select()
|
52
|
+
.from(tables.configTable)
|
53
|
+
.where(eq(tables.configTable.chainId, chainId))
|
54
|
+
.limit(1)
|
55
|
+
.execute()
|
56
|
+
// Get the first record in a way that returns a possible `undefined`
|
57
|
+
// TODO: move this to `.findFirst` after upgrading drizzle or `rows[0]` after enabling `noUncheckedIndexedAccess: true`
|
58
|
+
.then((rows) => rows.find(() => true));
|
59
|
+
const indexerBlockNumber = chainState?.blockNumber ?? 0n;
|
60
|
+
benchmark("query chainState");
|
61
|
+
|
62
|
+
const records = await database
|
63
|
+
.select()
|
64
|
+
.from(tables.recordsTable)
|
65
|
+
.where(or(...conditions))
|
66
|
+
.orderBy(
|
67
|
+
asc(tables.recordsTable.blockNumber),
|
68
|
+
// TODO: add logIndex (https://github.com/latticexyz/mud/issues/1979)
|
69
|
+
);
|
70
|
+
benchmark("query records");
|
71
|
+
|
72
|
+
const blockNumber = records.reduce((max, record) => bigIntMax(max, record.blockNumber ?? 0n), indexerBlockNumber);
|
73
|
+
benchmark("find block number");
|
74
|
+
|
75
|
+
const logs = records
|
76
|
+
// TODO: add this to the query, assuming we can optimize with an index
|
77
|
+
.filter((record) => !record.isDeleted)
|
78
|
+
.map(recordToLog);
|
79
|
+
benchmark("map records to logs");
|
80
|
+
|
81
|
+
return { blockNumber, logs };
|
82
|
+
}
|
@@ -0,0 +1,71 @@
|
|
1
|
+
import { isNotNull } from "@latticexyz/common/utils";
|
2
|
+
import { PendingQuery, Row, Sql } from "postgres";
|
3
|
+
import { hexToBytes } from "viem";
|
4
|
+
import { z } from "zod";
|
5
|
+
import { input } from "@latticexyz/store-sync/indexer-client";
|
6
|
+
import { transformSchemaName } from "@latticexyz/store-sync/postgres";
|
7
|
+
import { Record } from "./common";
|
8
|
+
|
9
|
+
const schemaName = transformSchemaName("mud");
|
10
|
+
|
11
|
+
function and(sql: Sql, conditions: PendingQuery<Row[]>[]): PendingQuery<Row[]> {
|
12
|
+
return sql`(${conditions.reduce((query, condition) => sql`${query} AND ${condition}`)})`;
|
13
|
+
}
|
14
|
+
|
15
|
+
function or(sql: Sql, conditions: PendingQuery<Row[]>[]): PendingQuery<Row[]> {
|
16
|
+
return sql`(${conditions.reduce((query, condition) => sql`${query} OR ${condition}`)})`;
|
17
|
+
}
|
18
|
+
|
19
|
+
export function queryLogs(sql: Sql, opts: z.infer<typeof input>): PendingQuery<Record[]> {
|
20
|
+
const conditions = opts.filters.length
|
21
|
+
? opts.filters.map((filter) =>
|
22
|
+
and(
|
23
|
+
sql,
|
24
|
+
[
|
25
|
+
opts.address != null ? sql`address = ${hexToBytes(opts.address)}` : null,
|
26
|
+
sql`table_id = ${hexToBytes(filter.tableId)}`,
|
27
|
+
filter.key0 != null ? sql`key0 = ${hexToBytes(filter.key0)}` : null,
|
28
|
+
filter.key1 != null ? sql`key1 = ${hexToBytes(filter.key1)}` : null,
|
29
|
+
].filter(isNotNull),
|
30
|
+
),
|
31
|
+
)
|
32
|
+
: opts.address != null
|
33
|
+
? [sql`address = ${hexToBytes(opts.address)}`]
|
34
|
+
: [];
|
35
|
+
|
36
|
+
const where = sql`WHERE ${and(
|
37
|
+
sql,
|
38
|
+
[sql`is_deleted != true`, conditions.length ? or(sql, conditions) : null].filter(isNotNull),
|
39
|
+
)}`;
|
40
|
+
|
41
|
+
// TODO: implement bytea <> hex columns via custom types: https://github.com/porsager/postgres#custom-types
|
42
|
+
return sql<Record[]>`
|
43
|
+
WITH
|
44
|
+
config AS (
|
45
|
+
SELECT
|
46
|
+
version AS "indexerVersion",
|
47
|
+
chain_id AS "chainId",
|
48
|
+
block_number AS "chainBlockNumber"
|
49
|
+
FROM ${sql(`${schemaName}.config`)}
|
50
|
+
LIMIT 1
|
51
|
+
),
|
52
|
+
records AS (
|
53
|
+
SELECT
|
54
|
+
'0x' || encode(address, 'hex') AS address,
|
55
|
+
'0x' || encode(table_id, 'hex') AS "tableId",
|
56
|
+
'0x' || encode(key_bytes, 'hex') AS "keyBytes",
|
57
|
+
'0x' || encode(static_data, 'hex') AS "staticData",
|
58
|
+
'0x' || encode(encoded_lengths, 'hex') AS "encodedLengths",
|
59
|
+
'0x' || encode(dynamic_data, 'hex') AS "dynamicData",
|
60
|
+
block_number AS "recordBlockNumber",
|
61
|
+
log_index AS "logIndex"
|
62
|
+
FROM ${sql(`${schemaName}.records`)}
|
63
|
+
${where}
|
64
|
+
ORDER BY block_number, log_index ASC
|
65
|
+
)
|
66
|
+
SELECT
|
67
|
+
(SELECT COUNT(*) FROM records) AS "totalRows",
|
68
|
+
*
|
69
|
+
FROM config, records
|
70
|
+
`;
|
71
|
+
}
|
@@ -0,0 +1,19 @@
|
|
1
|
+
import { StorageAdapterLog } from "@latticexyz/store-sync";
|
2
|
+
import { decodeDynamicField } from "@latticexyz/protocol-parser/internal";
|
3
|
+
import { RecordData } from "./common";
|
4
|
+
|
5
|
+
export function recordToLog(
|
6
|
+
record: Omit<RecordData, "recordBlockNumber">,
|
7
|
+
): StorageAdapterLog & { eventName: "Store_SetRecord" } {
|
8
|
+
return {
|
9
|
+
address: record.address,
|
10
|
+
eventName: "Store_SetRecord",
|
11
|
+
args: {
|
12
|
+
tableId: record.tableId,
|
13
|
+
keyTuple: decodeDynamicField("bytes32[]", record.keyBytes),
|
14
|
+
staticData: record.staticData ?? "0x",
|
15
|
+
encodedLengths: record.encodedLengths ?? "0x",
|
16
|
+
dynamicData: record.dynamicData ?? "0x",
|
17
|
+
},
|
18
|
+
} as const;
|
19
|
+
}
|
@@ -0,0 +1,48 @@
|
|
1
|
+
import { Middleware } from "koa";
|
2
|
+
import Router from "@koa/router";
|
3
|
+
import compose from "koa-compose";
|
4
|
+
import { input } from "@latticexyz/store-sync/indexer-client";
|
5
|
+
import { storeTables, tablesWithRecordsToLogs } from "@latticexyz/store-sync";
|
6
|
+
import { debug } from "../debug";
|
7
|
+
import { createBenchmark } from "@latticexyz/common";
|
8
|
+
import { compress } from "../koa-middleware/compress";
|
9
|
+
import { getTablesWithRecords } from "./getTablesWithRecords";
|
10
|
+
import { BaseSQLiteDatabase } from "drizzle-orm/sqlite-core";
|
11
|
+
|
12
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
13
|
+
export function apiRoutes(database: BaseSQLiteDatabase<"sync", any>): Middleware {
|
14
|
+
const router = new Router();
|
15
|
+
|
16
|
+
router.get("/api/logs", compress(), async (ctx) => {
|
17
|
+
const benchmark = createBenchmark("sqlite:logs");
|
18
|
+
|
19
|
+
let options: ReturnType<typeof input.parse>;
|
20
|
+
|
21
|
+
try {
|
22
|
+
options = input.parse(typeof ctx.query.input === "string" ? JSON.parse(ctx.query.input) : {});
|
23
|
+
} catch (error) {
|
24
|
+
ctx.status = 400;
|
25
|
+
ctx.body = JSON.stringify(error);
|
26
|
+
debug(error);
|
27
|
+
return;
|
28
|
+
}
|
29
|
+
|
30
|
+
try {
|
31
|
+
options.filters = options.filters.length > 0 ? [...options.filters, { tableId: storeTables.Tables.tableId }] : [];
|
32
|
+
benchmark("parse config");
|
33
|
+
const { blockNumber, tables } = getTablesWithRecords(database, options);
|
34
|
+
benchmark("query tables with records");
|
35
|
+
const logs = tablesWithRecordsToLogs(tables);
|
36
|
+
benchmark("convert records to logs");
|
37
|
+
|
38
|
+
ctx.body = JSON.stringify({ blockNumber: blockNumber?.toString() ?? "-1", logs });
|
39
|
+
ctx.status = 200;
|
40
|
+
} catch (error) {
|
41
|
+
ctx.status = 500;
|
42
|
+
ctx.body = JSON.stringify(error);
|
43
|
+
debug(error);
|
44
|
+
}
|
45
|
+
});
|
46
|
+
|
47
|
+
return compose([router.routes(), router.allowedMethods()]) as Middleware;
|
48
|
+
}
|
@@ -1,8 +1,7 @@
|
|
1
|
-
import { eq } from "drizzle-orm";
|
2
1
|
import { BaseSQLiteDatabase } from "drizzle-orm/sqlite-core";
|
3
|
-
import { buildTable, chainState, getTables } from "@latticexyz/store-sync/sqlite";
|
4
2
|
import { QueryAdapter } from "@latticexyz/store-sync/trpc-indexer";
|
5
|
-
import {
|
3
|
+
import { getTablesWithRecords } from "./getTablesWithRecords";
|
4
|
+
import { tablesWithRecordsToLogs } from "@latticexyz/store-sync";
|
6
5
|
|
7
6
|
/**
|
8
7
|
* Creates a storage adapter for the tRPC server/client to query data from SQLite.
|
@@ -10,34 +9,16 @@ import { debug } from "../debug";
|
|
10
9
|
* @param {BaseSQLiteDatabase<"sync", any>} database SQLite database object from Drizzle
|
11
10
|
* @returns {Promise<QueryAdapter>} A set of methods used by tRPC endpoints.
|
12
11
|
*/
|
12
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
13
13
|
export async function createQueryAdapter(database: BaseSQLiteDatabase<"sync", any>): Promise<QueryAdapter> {
|
14
14
|
const adapter: QueryAdapter = {
|
15
|
-
async
|
16
|
-
const tables =
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
...table,
|
23
|
-
records: records.map((record) => ({
|
24
|
-
key: Object.fromEntries(Object.entries(table.keySchema).map(([name]) => [name, record[name]])),
|
25
|
-
value: Object.fromEntries(Object.entries(table.valueSchema).map(([name]) => [name, record[name]])),
|
26
|
-
})),
|
27
|
-
};
|
28
|
-
});
|
29
|
-
|
30
|
-
const metadata = database.select().from(chainState).where(eq(chainState.chainId, chainId)).all();
|
31
|
-
const { lastUpdatedBlockNumber } = metadata[0] ?? {};
|
32
|
-
|
33
|
-
const result = {
|
34
|
-
blockNumber: lastUpdatedBlockNumber ?? null,
|
35
|
-
tables: tablesWithRecords,
|
36
|
-
};
|
37
|
-
|
38
|
-
debug("findAll", chainId, address, result);
|
39
|
-
|
40
|
-
return result;
|
15
|
+
async getLogs(opts) {
|
16
|
+
const { blockNumber, tables } = getTablesWithRecords(database, opts);
|
17
|
+
const logs = tablesWithRecordsToLogs(tables);
|
18
|
+
return { blockNumber: blockNumber ?? 0n, logs };
|
19
|
+
},
|
20
|
+
async findAll(opts) {
|
21
|
+
return getTablesWithRecords(database, opts);
|
41
22
|
},
|
42
23
|
};
|
43
24
|
return adapter;
|