@latticexyz/store-indexer 2.0.12-account-kit-27d878b7 → 2.0.12-account-kit-72f3555a8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@latticexyz/store-indexer",
3
- "version": "2.0.12-account-kit-27d878b7",
3
+ "version": "2.0.12-account-kit-72f3555a8",
4
4
  "description": "Minimal Typescript indexer for Store",
5
5
  "repository": {
6
6
  "type": "git",
@@ -12,13 +12,15 @@
12
12
  "exports": {
13
13
  ".": "./dist/index.js"
14
14
  },
15
- "types": "src/index.ts",
16
15
  "bin": {
17
16
  "postgres-decoded-indexer": "./dist/bin/postgres-decoded-indexer.js",
18
17
  "postgres-frontend": "./dist/bin/postgres-frontend.js",
19
18
  "postgres-indexer": "./dist/bin/postgres-indexer.js",
20
19
  "sqlite-indexer": "./dist/bin/sqlite-indexer.js"
21
20
  },
21
+ "files": [
22
+ "dist"
23
+ ],
22
24
  "dependencies": {
23
25
  "@koa/cors": "^4.0.0",
24
26
  "@koa/router": "^12.0.1",
@@ -41,11 +43,11 @@
41
43
  "trpc-koa-adapter": "^1.1.3",
42
44
  "viem": "2.9.20",
43
45
  "zod": "^3.22.2",
44
- "@latticexyz/block-logs-stream": "2.0.12-account-kit-27d878b7",
45
- "@latticexyz/common": "2.0.12-account-kit-27d878b7",
46
- "@latticexyz/protocol-parser": "2.0.12-account-kit-27d878b7",
47
- "@latticexyz/store": "2.0.12-account-kit-27d878b7",
48
- "@latticexyz/store-sync": "2.0.12-account-kit-27d878b7"
46
+ "@latticexyz/block-logs-stream": "2.0.12-account-kit-72f3555a8",
47
+ "@latticexyz/protocol-parser": "2.0.12-account-kit-72f3555a8",
48
+ "@latticexyz/common": "2.0.12-account-kit-72f3555a8",
49
+ "@latticexyz/store": "2.0.12-account-kit-72f3555a8",
50
+ "@latticexyz/store-sync": "2.0.12-account-kit-72f3555a8"
49
51
  },
50
52
  "devDependencies": {
51
53
  "@types/accepts": "^1.3.7",
@@ -79,7 +81,7 @@
79
81
  "start:sqlite": "tsx bin/sqlite-indexer",
80
82
  "start:sqlite:local": "SQLITE_FILENAME=anvil.db RPC_HTTP_URL=http://127.0.0.1:8545 pnpm start:sqlite",
81
83
  "start:sqlite:testnet": "SQLITE_FILENAME=testnet.db RPC_HTTP_URL=https://rpc.holesky.redstone.xyz pnpm start:sqlite",
82
- "test": "tsc --noEmit --skipLibCheck",
84
+ "test": "tsc --noEmit",
83
85
  "test:ci": "pnpm run test"
84
86
  }
85
87
  }
package/src/debug.ts DELETED
@@ -1,10 +0,0 @@
1
- import createDebug from "debug";
2
-
3
- export const debug = createDebug("mud:store-indexer");
4
- export const error = createDebug("mud:store-indexer");
5
-
6
- // Pipe debug output to stdout instead of stderr
7
- debug.log = console.debug.bind(console);
8
-
9
- // Pipe error output to stderr
10
- error.log = console.error.bind(console);
package/src/index.ts DELETED
@@ -1 +0,0 @@
1
- // Nothing to export yet
@@ -1,48 +0,0 @@
1
- import { Middleware } from "koa";
2
- import { Readable, Stream } from "node:stream";
3
- import accepts from "accepts";
4
- import { Zlib, createBrotliCompress, createDeflate, createGzip } from "node:zlib";
5
- import { includes } from "@latticexyz/common/utils";
6
-
7
- // Loosely based on https://github.com/holic/koa-compress/blob/master/lib/index.js
8
- // with better handling of streams better with occasional flushing
9
-
10
- const encodings = {
11
- br: createBrotliCompress,
12
- gzip: createGzip,
13
- deflate: createDeflate,
14
- } as const;
15
-
16
- const encodingNames = Object.keys(encodings) as (keyof typeof encodings)[];
17
-
18
- function flushEvery<stream extends Zlib & Readable>(stream: stream, bytesThreshold: number): stream {
19
- let bytesSinceFlush = 0;
20
- stream.on("data", (data) => {
21
- bytesSinceFlush += data.length;
22
- if (bytesSinceFlush > bytesThreshold) {
23
- bytesSinceFlush = 0;
24
- stream.flush();
25
- }
26
- });
27
- return stream;
28
- }
29
-
30
- type CompressOptions = {
31
- flushThreshold?: number;
32
- };
33
-
34
- export function compress({ flushThreshold = 1024 * 4 }: CompressOptions = {}): Middleware {
35
- return async function compressMiddleware(ctx, next) {
36
- ctx.vary("Accept-Encoding");
37
-
38
- await next();
39
-
40
- const encoding = accepts(ctx.req).encoding(encodingNames);
41
- if (!includes(encodingNames, encoding)) return;
42
-
43
- const compressed = flushEvery(encodings[encoding](), flushThreshold);
44
-
45
- ctx.set("Content-Encoding", encoding);
46
- ctx.body = ctx.body instanceof Stream ? ctx.body.pipe(compressed) : compressed.end(ctx.body);
47
- };
48
- }
@@ -1,37 +0,0 @@
1
- import { Middleware } from "koa";
2
-
3
- type HealthcheckOptions = {
4
- isHealthy?: () => boolean;
5
- isReady?: () => boolean;
6
- };
7
-
8
- /**
9
- * Middleware to add Kubernetes healthcheck endpoints
10
- */
11
- export function healthcheck({ isHealthy, isReady }: HealthcheckOptions = {}): Middleware {
12
- return async function healthcheckMiddleware(ctx, next): Promise<void> {
13
- if (ctx.path === "/healthz") {
14
- if (isHealthy == null || isHealthy()) {
15
- ctx.status = 200;
16
- ctx.body = "healthy";
17
- } else {
18
- ctx.status = 503;
19
- ctx.body = "not healthy";
20
- }
21
- return;
22
- }
23
-
24
- if (ctx.path === "/readyz") {
25
- if (isReady == null || isReady()) {
26
- ctx.status = 200;
27
- ctx.body = "ready";
28
- } else {
29
- ctx.status = 503;
30
- ctx.body = "not ready";
31
- }
32
- return;
33
- }
34
-
35
- await next();
36
- };
37
- }
@@ -1,12 +0,0 @@
1
- import { Middleware } from "koa";
2
-
3
- export function helloWorld(): Middleware {
4
- return async function helloWorldMiddleware(ctx, next): Promise<void> {
5
- if (ctx.path === "/") {
6
- ctx.status = 200;
7
- ctx.body = "emit HelloWorld();";
8
- return;
9
- }
10
- await next();
11
- };
12
- }
@@ -1,85 +0,0 @@
1
- import { Middleware } from "koa";
2
- import promClient from "prom-client";
3
-
4
- type MetricsOptions = {
5
- isHealthy?: () => boolean;
6
- isReady?: () => boolean;
7
- getLatestStoredBlockNumber?: () => Promise<bigint | undefined>;
8
- getDistanceFromFollowBlock?: () => Promise<bigint>;
9
- followBlockTag?: "latest" | "safe" | "finalized";
10
- };
11
-
12
- /**
13
- * Middleware to add Prometheus metrics endpoints
14
- */
15
- export function metrics({
16
- isHealthy,
17
- isReady,
18
- getLatestStoredBlockNumber,
19
- getDistanceFromFollowBlock,
20
- followBlockTag,
21
- }: MetricsOptions = {}): Middleware {
22
- promClient.collectDefaultMetrics();
23
- if (isHealthy != null) {
24
- new promClient.Gauge({
25
- name: "health_status",
26
- help: "Health status (0 = unhealthy, 1 = healthy)",
27
- collect(): void {
28
- this.set(Number(isHealthy()));
29
- },
30
- });
31
- }
32
-
33
- if (isReady != null) {
34
- new promClient.Gauge({
35
- name: "readiness_status",
36
- help: "Readiness status (whether the service is ready to receive requests, 0 = not ready, 1 = ready)",
37
- collect(): void {
38
- this.set(Number(isReady()));
39
- },
40
- });
41
- }
42
-
43
- if (getLatestStoredBlockNumber != null) {
44
- new promClient.Gauge({
45
- name: "latest_stored_block_number",
46
- help: "Latest block number stored in the database",
47
- async collect(): Promise<void> {
48
- this.set(Number(await getLatestStoredBlockNumber()));
49
- },
50
- });
51
- }
52
-
53
- if (followBlockTag != null) {
54
- const blockTagGauge = new promClient.Gauge({
55
- name: "follow_block_tag",
56
- help: "Block tag the indexer is following (0 = finalized, 1 = safe, 2 = latest)",
57
- });
58
- const blockTagToValue = {
59
- finalized: 0,
60
- safe: 1,
61
- latest: 2,
62
- };
63
- blockTagGauge.set(blockTagToValue[followBlockTag]);
64
- }
65
-
66
- if (getDistanceFromFollowBlock != null) {
67
- new promClient.Gauge({
68
- name: "distance_from_follow_block",
69
- help: "Block distance from the block tag this the indexer is following",
70
- async collect(): Promise<void> {
71
- this.set(Number(await getDistanceFromFollowBlock()));
72
- },
73
- });
74
- }
75
-
76
- return async function metricsMiddleware(ctx, next): Promise<void> {
77
- if (ctx.path === "/metrics") {
78
- ctx.status = 200;
79
- ctx.body = await promClient.register.metrics();
80
- return;
81
- }
82
-
83
- await next();
84
- };
85
- }
@@ -1,101 +0,0 @@
1
- import * as Sentry from "@sentry/node";
2
- import { ProfilingIntegration } from "@sentry/profiling-node";
3
- import { stripUrlQueryAndFragment } from "@sentry/utils";
4
- import debug from "debug";
5
- import Koa from "koa";
6
- import compose from "koa-compose";
7
-
8
- export function errorHandler(): Koa.Middleware {
9
- return async function errorHandlerMiddleware(ctx, next) {
10
- try {
11
- await next();
12
- } catch (err) {
13
- Sentry.withScope((scope) => {
14
- scope.addEventProcessor((event) => {
15
- return Sentry.addRequestDataToEvent(event, ctx.request);
16
- });
17
- Sentry.captureException(err);
18
- });
19
- throw err;
20
- }
21
- };
22
- }
23
-
24
- export function requestHandler(): Koa.Middleware {
25
- return async function requestHandlerMiddleware(ctx, next) {
26
- await Sentry.runWithAsyncContext(async () => {
27
- const hub = Sentry.getCurrentHub();
28
- hub.configureScope((scope) =>
29
- scope.addEventProcessor((event) =>
30
- Sentry.addRequestDataToEvent(event, ctx.request, {
31
- include: {
32
- user: false,
33
- },
34
- }),
35
- ),
36
- );
37
- await next();
38
- });
39
- };
40
- }
41
-
42
- export function tracing(): Koa.Middleware {
43
- // creates a Sentry transaction per request
44
- return async function tracingMiddleware(ctx, next) {
45
- const reqMethod = (ctx.method || "").toUpperCase();
46
- const reqUrl = ctx.url && stripUrlQueryAndFragment(ctx.url);
47
-
48
- // Connect to trace of upstream app
49
- let traceparentData;
50
- if (ctx.request.get("sentry-trace")) {
51
- traceparentData = Sentry.extractTraceparentData(ctx.request.get("sentry-trace"));
52
- }
53
-
54
- const transaction = Sentry.startTransaction({
55
- name: `${reqMethod} ${reqUrl}`,
56
- op: "http.server",
57
- ...traceparentData,
58
- });
59
-
60
- ctx.__sentry_transaction = transaction;
61
-
62
- // We put the transaction on the scope so users can attach children to it
63
- Sentry.getCurrentHub().configureScope((scope) => {
64
- scope.setSpan(transaction);
65
- });
66
-
67
- ctx.res.on("finish", () => {
68
- // Push `transaction.finish` to the next event loop so open spans have a chance to finish before the transaction closes
69
- setImmediate(() => {
70
- // If you're using koa router, set the matched route as transaction name
71
- if (ctx._matchedRoute) {
72
- const mountPath = ctx.mountPath || "";
73
- transaction.setName(`${reqMethod} ${mountPath}${ctx._matchedRoute}`);
74
- }
75
-
76
- transaction.setHttpStatus(ctx.status);
77
- transaction.finish();
78
- });
79
- });
80
-
81
- await next();
82
- };
83
- }
84
-
85
- export function sentry(dsn: string): Koa.Middleware {
86
- debug("Initializing Sentry");
87
- Sentry.init({
88
- dsn,
89
- integrations: [
90
- // Automatically instrument Node.js libraries and frameworks
91
- ...Sentry.autoDiscoverNodePerformanceMonitoringIntegrations(),
92
- new ProfilingIntegration(),
93
- ],
94
- // Performance Monitoring
95
- tracesSampleRate: 1.0,
96
- // Set sampling rate for profiling - this is relative to tracesSampleRate
97
- profilesSampleRate: 1.0,
98
- });
99
-
100
- return compose([errorHandler(), requestHandler(), tracing()]);
101
- }
@@ -1,81 +0,0 @@
1
- import { Sql } from "postgres";
2
- import { Middleware } from "koa";
3
- import Router from "@koa/router";
4
- import compose from "koa-compose";
5
- import { input } from "@latticexyz/store-sync/indexer-client";
6
- import { storeTables } from "@latticexyz/store-sync";
7
- import { queryLogs } from "./queryLogs";
8
- import { recordToLog } from "./recordToLog";
9
- import { debug, error } from "../debug";
10
- import { createBenchmark } from "@latticexyz/common";
11
- import { compress } from "../koa-middleware/compress";
12
-
13
- export function apiRoutes(database: Sql): Middleware {
14
- const router = new Router();
15
-
16
- router.get("/api/logs", compress(), async (ctx) => {
17
- const benchmark = createBenchmark("postgres:logs");
18
- let options: ReturnType<typeof input.parse>;
19
-
20
- try {
21
- options = input.parse(typeof ctx.query.input === "string" ? JSON.parse(ctx.query.input) : {});
22
- } catch (e) {
23
- ctx.status = 400;
24
- ctx.set("Content-Type", "application/json");
25
- ctx.body = JSON.stringify(e);
26
- debug(e);
27
- return;
28
- }
29
-
30
- try {
31
- options.filters = options.filters.length > 0 ? [...options.filters, { tableId: storeTables.Tables.tableId }] : [];
32
- const records = await queryLogs(database, options ?? {}).execute();
33
- benchmark("query records");
34
- const logs = records.map(recordToLog);
35
- benchmark("map records to logs");
36
-
37
- // Ideally we would immediately return an error if the request is for a Store that the indexer
38
- // is not configured to index. Since we don't have easy access to this information here,
39
- // we return an error if there are no logs found for a given Store, since that would never
40
- // be the case for a Store that is being indexed (since there would at least be records for the
41
- // Tables table with tables created during Store initialization).
42
- if (records.length === 0) {
43
- ctx.status = 404;
44
- ctx.body = "no logs found";
45
- error(
46
- `no logs found for chainId ${options.chainId}, address ${options.address}, filters ${JSON.stringify(
47
- options.filters,
48
- )}`,
49
- );
50
- return;
51
- }
52
-
53
- const blockNumber = records[0].chainBlockNumber;
54
- ctx.status = 200;
55
-
56
- // max age is set to several multiples of the uncached response time (currently ~10s, but using 60s for wiggle room) to ensure only ~one origin request at a time
57
- // and stale-while-revalidate below means that the cache is refreshed under the hood while still responding fast (cached)
58
- const maxAgeSeconds = 60 * 5;
59
- // we set stale-while-revalidate to the time elapsed by the number of blocks we can fetch from the RPC in the same amount of time as an uncached response
60
- // meaning it would take ~the same about of time to get an uncached response from the origin as it would to catch up from the currently cached response
61
- // if an uncached response takes ~10 seconds, we have ~10s to catch up, so let's say we can do enough RPC calls to fetch 4000 blocks
62
- // with a block per 2 seconds, that means we can serve a stale/cached response for 8000 seconds before we should require the response be returned by the origin
63
- const staleWhileRevalidateSeconds = 4000 * 2;
64
-
65
- ctx.set(
66
- "Cache-Control",
67
- `public, max-age=${maxAgeSeconds}, stale-while-revalidate=${staleWhileRevalidateSeconds}`,
68
- );
69
-
70
- ctx.set("Content-Type", "application/json");
71
- ctx.body = JSON.stringify({ blockNumber, logs });
72
- } catch (e) {
73
- ctx.status = 500;
74
- ctx.set("Content-Type", "application/json");
75
- ctx.body = JSON.stringify(e);
76
- error(e);
77
- }
78
- });
79
-
80
- return compose([router.routes(), router.allowedMethods()]) as Middleware;
81
- }
@@ -1,21 +0,0 @@
1
- import { Hex } from "viem";
2
-
3
- export type RecordData = {
4
- address: Hex;
5
- tableId: Hex;
6
- keyBytes: Hex;
7
- staticData: Hex | null;
8
- encodedLengths: Hex | null;
9
- dynamicData: Hex | null;
10
- recordBlockNumber: string;
11
- logIndex: number;
12
- };
13
-
14
- export type RecordMetadata = {
15
- indexerVersion: string;
16
- chainId: string;
17
- chainBlockNumber: string;
18
- totalRows: number;
19
- };
20
-
21
- export type Record = RecordData & RecordMetadata;
@@ -1,57 +0,0 @@
1
- import { getAddress } from "viem";
2
- import { PgDatabase } from "drizzle-orm/pg-core";
3
- import { TableWithRecords, isTableRegistrationLog, logToTable, storeTables } from "@latticexyz/store-sync";
4
- import { decodeKey, decodeValueArgs } from "@latticexyz/protocol-parser/internal";
5
- import { QueryAdapter } from "@latticexyz/store-sync/trpc-indexer";
6
- import { debug } from "../../debug";
7
- import { getLogs } from "./getLogs";
8
- import { groupBy } from "@latticexyz/common/utils";
9
-
10
- /**
11
- * Creates a query adapter for the tRPC server/client to query data from Postgres.
12
- *
13
- * @param {PgDatabase<any>} database Postgres database object from Drizzle
14
- * @returns {Promise<QueryAdapter>} A set of methods used by tRPC endpoints.
15
- * @deprecated
16
- */
17
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
18
- export async function createQueryAdapter(database: PgDatabase<any>): Promise<QueryAdapter> {
19
- const adapter: QueryAdapter = {
20
- async getLogs(opts) {
21
- return getLogs(database, opts);
22
- },
23
- async findAll(opts) {
24
- const filters = opts.filters ?? [];
25
- const { blockNumber, logs } = await getLogs(database, {
26
- ...opts,
27
- // make sure we're always retrieving `store.Tables` table, so we can decode table values
28
- filters: filters.length > 0 ? [...filters, { tableId: storeTables.Tables.tableId }] : [],
29
- });
30
-
31
- const tables = logs.filter(isTableRegistrationLog).map(logToTable);
32
-
33
- const logsByTable = groupBy(logs, (log) => `${getAddress(log.address)}:${log.args.tableId}`);
34
-
35
- const tablesWithRecords: TableWithRecords[] = tables.map((table) => {
36
- const tableLogs = logsByTable.get(`${getAddress(table.address)}:${table.tableId}`) ?? [];
37
- const records = tableLogs.map((log) => ({
38
- key: decodeKey(table.keySchema, log.args.keyTuple),
39
- value: decodeValueArgs(table.valueSchema, log.args),
40
- }));
41
-
42
- return {
43
- ...table,
44
- records,
45
- };
46
- });
47
-
48
- debug("findAll: decoded %d logs across %d tables", logs.length, tables.length);
49
-
50
- return {
51
- blockNumber,
52
- tables: tablesWithRecords,
53
- };
54
- },
55
- };
56
- return adapter;
57
- }
@@ -1,82 +0,0 @@
1
- import { PgDatabase } from "drizzle-orm/pg-core";
2
- import { Hex } from "viem";
3
- import { StorageAdapterLog, SyncFilter } from "@latticexyz/store-sync";
4
- import { tables } from "@latticexyz/store-sync/postgres";
5
- import { and, asc, eq, or } from "drizzle-orm";
6
- import { bigIntMax } from "@latticexyz/common/utils";
7
- import { recordToLog } from "../recordToLog";
8
- import { createBenchmark } from "@latticexyz/common";
9
-
10
- /**
11
- * @deprecated
12
- */
13
- export async function getLogs(
14
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
15
- database: PgDatabase<any>,
16
- {
17
- chainId,
18
- address,
19
- filters = [],
20
- }: {
21
- readonly chainId: number;
22
- readonly address?: Hex;
23
- readonly filters?: readonly SyncFilter[];
24
- },
25
- ): Promise<{ blockNumber: bigint; logs: (StorageAdapterLog & { eventName: "Store_SetRecord" })[] }> {
26
- const benchmark = createBenchmark("drizzleGetLogs");
27
-
28
- const conditions = filters.length
29
- ? filters.map((filter) =>
30
- and(
31
- address != null ? eq(tables.recordsTable.address, address) : undefined,
32
- eq(tables.recordsTable.tableId, filter.tableId),
33
- filter.key0 != null ? eq(tables.recordsTable.key0, filter.key0) : undefined,
34
- filter.key1 != null ? eq(tables.recordsTable.key1, filter.key1) : undefined,
35
- ),
36
- )
37
- : address != null
38
- ? [eq(tables.recordsTable.address, address)]
39
- : [];
40
- benchmark("parse config");
41
-
42
- // Query for the block number that the indexer (i.e. chain) is at, in case the
43
- // indexer is further along in the chain than a given store/table's last updated
44
- // block number. We'll then take the highest block number between the indexer's
45
- // chain state and all the records in the query (in case the records updated
46
- // between these queries). Using just the highest block number from the queries
47
- // could potentially signal to the client an older-than-necessary block number,
48
- // for stores/tables that haven't seen recent activity.
49
- // TODO: move the block number query into the records query for atomicity so we don't have to merge them here
50
- const chainState = await database
51
- .select()
52
- .from(tables.configTable)
53
- .where(eq(tables.configTable.chainId, chainId))
54
- .limit(1)
55
- .execute()
56
- // Get the first record in a way that returns a possible `undefined`
57
- // TODO: move this to `.findFirst` after upgrading drizzle or `rows[0]` after enabling `noUncheckedIndexedAccess: true`
58
- .then((rows) => rows.find(() => true));
59
- const indexerBlockNumber = chainState?.blockNumber ?? 0n;
60
- benchmark("query chainState");
61
-
62
- const records = await database
63
- .select()
64
- .from(tables.recordsTable)
65
- .where(or(...conditions))
66
- .orderBy(
67
- asc(tables.recordsTable.blockNumber),
68
- // TODO: add logIndex (https://github.com/latticexyz/mud/issues/1979)
69
- );
70
- benchmark("query records");
71
-
72
- const blockNumber = records.reduce((max, record) => bigIntMax(max, record.blockNumber ?? 0n), indexerBlockNumber);
73
- benchmark("find block number");
74
-
75
- const logs = records
76
- // TODO: add this to the query, assuming we can optimize with an index
77
- .filter((record) => !record.isDeleted)
78
- .map(recordToLog);
79
- benchmark("map records to logs");
80
-
81
- return { blockNumber, logs };
82
- }
@@ -1,71 +0,0 @@
1
- import { isNotNull } from "@latticexyz/common/utils";
2
- import { PendingQuery, Row, Sql } from "postgres";
3
- import { hexToBytes } from "viem";
4
- import { z } from "zod";
5
- import { input } from "@latticexyz/store-sync/indexer-client";
6
- import { transformSchemaName } from "@latticexyz/store-sync/postgres";
7
- import { Record } from "./common";
8
-
9
- const schemaName = transformSchemaName("mud");
10
-
11
- function and(sql: Sql, conditions: PendingQuery<Row[]>[]): PendingQuery<Row[]> {
12
- return sql`(${conditions.reduce((query, condition) => sql`${query} AND ${condition}`)})`;
13
- }
14
-
15
- function or(sql: Sql, conditions: PendingQuery<Row[]>[]): PendingQuery<Row[]> {
16
- return sql`(${conditions.reduce((query, condition) => sql`${query} OR ${condition}`)})`;
17
- }
18
-
19
- export function queryLogs(sql: Sql, opts: z.infer<typeof input>): PendingQuery<Record[]> {
20
- const conditions = opts.filters.length
21
- ? opts.filters.map((filter) =>
22
- and(
23
- sql,
24
- [
25
- opts.address != null ? sql`address = ${hexToBytes(opts.address)}` : null,
26
- sql`table_id = ${hexToBytes(filter.tableId)}`,
27
- filter.key0 != null ? sql`key0 = ${hexToBytes(filter.key0)}` : null,
28
- filter.key1 != null ? sql`key1 = ${hexToBytes(filter.key1)}` : null,
29
- ].filter(isNotNull),
30
- ),
31
- )
32
- : opts.address != null
33
- ? [sql`address = ${hexToBytes(opts.address)}`]
34
- : [];
35
-
36
- const where = sql`WHERE ${and(
37
- sql,
38
- [sql`is_deleted != true`, conditions.length ? or(sql, conditions) : null].filter(isNotNull),
39
- )}`;
40
-
41
- // TODO: implement bytea <> hex columns via custom types: https://github.com/porsager/postgres#custom-types
42
- return sql<Record[]>`
43
- WITH
44
- config AS (
45
- SELECT
46
- version AS "indexerVersion",
47
- chain_id AS "chainId",
48
- block_number AS "chainBlockNumber"
49
- FROM ${sql(`${schemaName}.config`)}
50
- LIMIT 1
51
- ),
52
- records AS (
53
- SELECT
54
- '0x' || encode(address, 'hex') AS address,
55
- '0x' || encode(table_id, 'hex') AS "tableId",
56
- '0x' || encode(key_bytes, 'hex') AS "keyBytes",
57
- '0x' || encode(static_data, 'hex') AS "staticData",
58
- '0x' || encode(encoded_lengths, 'hex') AS "encodedLengths",
59
- '0x' || encode(dynamic_data, 'hex') AS "dynamicData",
60
- block_number AS "recordBlockNumber",
61
- log_index AS "logIndex"
62
- FROM ${sql(`${schemaName}.records`)}
63
- ${where}
64
- ORDER BY block_number, log_index ASC
65
- )
66
- SELECT
67
- (SELECT COUNT(*) FROM records) AS "totalRows",
68
- *
69
- FROM config, records
70
- `;
71
- }
@@ -1,19 +0,0 @@
1
- import { StorageAdapterLog } from "@latticexyz/store-sync";
2
- import { decodeDynamicField } from "@latticexyz/protocol-parser/internal";
3
- import { RecordData } from "./common";
4
-
5
- export function recordToLog(
6
- record: Omit<RecordData, "recordBlockNumber">,
7
- ): StorageAdapterLog & { eventName: "Store_SetRecord" } {
8
- return {
9
- address: record.address,
10
- eventName: "Store_SetRecord",
11
- args: {
12
- tableId: record.tableId,
13
- keyTuple: decodeDynamicField("bytes32[]", record.keyBytes),
14
- staticData: record.staticData ?? "0x",
15
- encodedLengths: record.encodedLengths ?? "0x",
16
- dynamicData: record.dynamicData ?? "0x",
17
- },
18
- } as const;
19
- }
@@ -1,48 +0,0 @@
1
- import { Middleware } from "koa";
2
- import Router from "@koa/router";
3
- import compose from "koa-compose";
4
- import { input } from "@latticexyz/store-sync/indexer-client";
5
- import { storeTables, tablesWithRecordsToLogs } from "@latticexyz/store-sync";
6
- import { debug } from "../debug";
7
- import { createBenchmark } from "@latticexyz/common";
8
- import { compress } from "../koa-middleware/compress";
9
- import { getTablesWithRecords } from "./getTablesWithRecords";
10
- import { BaseSQLiteDatabase } from "drizzle-orm/sqlite-core";
11
-
12
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
13
- export function apiRoutes(database: BaseSQLiteDatabase<"sync", any>): Middleware {
14
- const router = new Router();
15
-
16
- router.get("/api/logs", compress(), async (ctx) => {
17
- const benchmark = createBenchmark("sqlite:logs");
18
-
19
- let options: ReturnType<typeof input.parse>;
20
-
21
- try {
22
- options = input.parse(typeof ctx.query.input === "string" ? JSON.parse(ctx.query.input) : {});
23
- } catch (error) {
24
- ctx.status = 400;
25
- ctx.body = JSON.stringify(error);
26
- debug(error);
27
- return;
28
- }
29
-
30
- try {
31
- options.filters = options.filters.length > 0 ? [...options.filters, { tableId: storeTables.Tables.tableId }] : [];
32
- benchmark("parse config");
33
- const { blockNumber, tables } = getTablesWithRecords(database, options);
34
- benchmark("query tables with records");
35
- const logs = tablesWithRecordsToLogs(tables);
36
- benchmark("convert records to logs");
37
-
38
- ctx.body = JSON.stringify({ blockNumber: blockNumber?.toString() ?? "-1", logs });
39
- ctx.status = 200;
40
- } catch (error) {
41
- ctx.status = 500;
42
- ctx.body = JSON.stringify(error);
43
- debug(error);
44
- }
45
- });
46
-
47
- return compose([router.routes(), router.allowedMethods()]) as Middleware;
48
- }
@@ -1,25 +0,0 @@
1
- import { BaseSQLiteDatabase } from "drizzle-orm/sqlite-core";
2
- import { QueryAdapter } from "@latticexyz/store-sync/trpc-indexer";
3
- import { getTablesWithRecords } from "./getTablesWithRecords";
4
- import { tablesWithRecordsToLogs } from "@latticexyz/store-sync";
5
-
6
- /**
7
- * Creates a storage adapter for the tRPC server/client to query data from SQLite.
8
- *
9
- * @param {BaseSQLiteDatabase<"sync", any>} database SQLite database object from Drizzle
10
- * @returns {Promise<QueryAdapter>} A set of methods used by tRPC endpoints.
11
- */
12
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
13
- export async function createQueryAdapter(database: BaseSQLiteDatabase<"sync", any>): Promise<QueryAdapter> {
14
- const adapter: QueryAdapter = {
15
- async getLogs(opts) {
16
- const { blockNumber, tables } = getTablesWithRecords(database, opts);
17
- const logs = tablesWithRecordsToLogs(tables);
18
- return { blockNumber: blockNumber ?? 0n, logs };
19
- },
20
- async findAll(opts) {
21
- return getTablesWithRecords(database, opts);
22
- },
23
- };
24
- return adapter;
25
- }
@@ -1,76 +0,0 @@
1
- import { asc, eq } from "drizzle-orm";
2
- import { BaseSQLiteDatabase } from "drizzle-orm/sqlite-core";
3
- import { buildTable, chainState, getTables } from "@latticexyz/store-sync/sqlite";
4
- import { Hex, getAddress } from "viem";
5
- import { decodeDynamicField } from "@latticexyz/protocol-parser/internal";
6
- import { SyncFilter, TableWithRecords } from "@latticexyz/store-sync";
7
-
8
- // TODO: refactor sqlite and replace this with getLogs to match postgres (https://github.com/latticexyz/mud/issues/1970)
9
-
10
- /**
11
- * @deprecated
12
- * */
13
- export function getTablesWithRecords(
14
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
15
- database: BaseSQLiteDatabase<"sync", any>,
16
- {
17
- chainId,
18
- address,
19
- filters = [],
20
- }: {
21
- readonly chainId: number;
22
- readonly address?: Hex;
23
- readonly filters?: readonly SyncFilter[];
24
- },
25
- ): { blockNumber: bigint | null; tables: readonly TableWithRecords[] } {
26
- const metadata = database
27
- .select()
28
- .from(chainState)
29
- .where(eq(chainState.chainId, chainId))
30
- .limit(1)
31
- .all()
32
- .find(() => true);
33
-
34
- // If _any_ filter has a table ID, this will filter down all data to just those tables. Which mean we can't yet mix table filters with key-only filters.
35
- // TODO: improve this so we can express this in the query (need to be able to query data across tables more easily)
36
- const tableIds = Array.from(new Set(filters.map((filter) => filter.tableId)));
37
- const tables = getTables(database)
38
- .filter((table) => address == null || getAddress(address) === getAddress(table.address))
39
- .filter((table) => !tableIds.length || tableIds.includes(table.tableId));
40
-
41
- const tablesWithRecords = tables.map((table) => {
42
- const sqliteTable = buildTable(table);
43
- const records = database
44
- .select()
45
- .from(sqliteTable)
46
- .where(eq(sqliteTable.__isDeleted, false))
47
- .orderBy(
48
- asc(sqliteTable.__lastUpdatedBlockNumber),
49
- // TODO: add logIndex (https://github.com/latticexyz/mud/issues/1979)
50
- )
51
- .all();
52
- const filteredRecords = !filters.length
53
- ? records
54
- : records.filter((record) => {
55
- const keyTuple = decodeDynamicField("bytes32[]", record.__key);
56
- return filters.some(
57
- (filter) =>
58
- filter.tableId === table.tableId &&
59
- (filter.key0 == null || filter.key0 === keyTuple[0]) &&
60
- (filter.key1 == null || filter.key1 === keyTuple[1]),
61
- );
62
- });
63
- return {
64
- ...table,
65
- records: filteredRecords.map((record) => ({
66
- key: Object.fromEntries(Object.entries(table.keySchema).map(([name]) => [name, record[name]])),
67
- value: Object.fromEntries(Object.entries(table.valueSchema).map(([name]) => [name, record[name]])),
68
- })),
69
- };
70
- });
71
-
72
- return {
73
- blockNumber: metadata?.lastUpdatedBlockNumber ?? null,
74
- tables: tablesWithRecords,
75
- };
76
- }