@latticexyz/store-indexer 2.0.0-next.13 → 2.0.0-next.15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,58 @@
1
+ import { Sql } from "postgres";
2
+ import { Middleware } from "koa";
3
+ import Router from "@koa/router";
4
+ import compose from "koa-compose";
5
+ import { input } from "@latticexyz/store-sync/indexer-client";
6
+ import { storeTables } from "@latticexyz/store-sync";
7
+ import { queryLogs } from "./queryLogs";
8
+ import { recordToLog } from "./recordToLog";
9
+ import { debug, error } from "../debug";
10
+ import { createBenchmark } from "@latticexyz/common";
11
+ import { compress } from "../compress";
12
+
13
+ export function apiRoutes(database: Sql): Middleware {
14
+ const router = new Router();
15
+
16
+ router.get("/api/logs", compress(), async (ctx) => {
17
+ const benchmark = createBenchmark("postgres:logs");
18
+ let options: ReturnType<typeof input.parse>;
19
+
20
+ try {
21
+ options = input.parse(typeof ctx.query.input === "string" ? JSON.parse(ctx.query.input) : {});
22
+ } catch (e) {
23
+ ctx.status = 400;
24
+ ctx.body = JSON.stringify(e);
25
+ debug(e);
26
+ return;
27
+ }
28
+
29
+ try {
30
+ options.filters = options.filters.length > 0 ? [...options.filters, { tableId: storeTables.Tables.tableId }] : [];
31
+ const records = await queryLogs(database, options ?? {}).execute();
32
+ benchmark("query records");
33
+ const logs = records.map(recordToLog);
34
+ benchmark("map records to logs");
35
+
36
+ if (records.length === 0) {
37
+ ctx.status = 404;
38
+ ctx.body = "no logs found";
39
+ error(
40
+ `no logs found for chainId ${options.chainId}, address ${options.address}, filters ${JSON.stringify(
41
+ options.filters
42
+ )}`
43
+ );
44
+ return;
45
+ }
46
+
47
+ const blockNumber = records[0].chainBlockNumber;
48
+ ctx.body = JSON.stringify({ blockNumber, logs });
49
+ ctx.status = 200;
50
+ } catch (e) {
51
+ ctx.status = 500;
52
+ ctx.body = JSON.stringify(e);
53
+ error(e);
54
+ }
55
+ });
56
+
57
+ return compose([router.routes(), router.allowedMethods()]) as Middleware;
58
+ }
@@ -0,0 +1,21 @@
1
+ import { Hex } from "viem";
2
+
3
+ export type RecordData = {
4
+ address: Hex;
5
+ tableId: Hex;
6
+ keyBytes: Hex;
7
+ staticData: Hex | null;
8
+ encodedLengths: Hex | null;
9
+ dynamicData: Hex | null;
10
+ recordBlockNumber: string;
11
+ logIndex: number;
12
+ };
13
+
14
+ export type RecordMetadata = {
15
+ indexerVersion: string;
16
+ chainId: string;
17
+ chainBlockNumber: string;
18
+ totalRows: number;
19
+ };
20
+
21
+ export type Record = RecordData & RecordMetadata;
@@ -0,0 +1,56 @@
1
+ import { getAddress } from "viem";
2
+ import { PgDatabase } from "drizzle-orm/pg-core";
3
+ import { TableWithRecords, isTableRegistrationLog, logToTable, storeTables } from "@latticexyz/store-sync";
4
+ import { decodeKey, decodeValueArgs } from "@latticexyz/protocol-parser";
5
+ import { QueryAdapter } from "@latticexyz/store-sync/trpc-indexer";
6
+ import { debug } from "../../debug";
7
+ import { getLogs } from "./getLogs";
8
+ import { groupBy } from "@latticexyz/common/utils";
9
+
10
+ /**
11
+ * Creates a query adapter for the tRPC server/client to query data from Postgres.
12
+ *
13
+ * @param {PgDatabase<any>} database Postgres database object from Drizzle
14
+ * @returns {Promise<QueryAdapter>} A set of methods used by tRPC endpoints.
15
+ * @deprecated
16
+ */
17
+ export async function createQueryAdapter(database: PgDatabase<any>): Promise<QueryAdapter> {
18
+ const adapter: QueryAdapter = {
19
+ async getLogs(opts) {
20
+ return getLogs(database, opts);
21
+ },
22
+ async findAll(opts) {
23
+ const filters = opts.filters ?? [];
24
+ const { blockNumber, logs } = await getLogs(database, {
25
+ ...opts,
26
+ // make sure we're always retrieving `store.Tables` table, so we can decode table values
27
+ filters: filters.length > 0 ? [...filters, { tableId: storeTables.Tables.tableId }] : [],
28
+ });
29
+
30
+ const tables = logs.filter(isTableRegistrationLog).map(logToTable);
31
+
32
+ const logsByTable = groupBy(logs, (log) => `${getAddress(log.address)}:${log.args.tableId}`);
33
+
34
+ const tablesWithRecords: TableWithRecords[] = tables.map((table) => {
35
+ const tableLogs = logsByTable.get(`${getAddress(table.address)}:${table.tableId}`) ?? [];
36
+ const records = tableLogs.map((log) => ({
37
+ key: decodeKey(table.keySchema, log.args.keyTuple),
38
+ value: decodeValueArgs(table.valueSchema, log.args),
39
+ }));
40
+
41
+ return {
42
+ ...table,
43
+ records,
44
+ };
45
+ });
46
+
47
+ debug("findAll: decoded %d logs across %d tables", logs.length, tables.length);
48
+
49
+ return {
50
+ blockNumber,
51
+ tables: tablesWithRecords,
52
+ };
53
+ },
54
+ };
55
+ return adapter;
56
+ }
@@ -0,0 +1,81 @@
1
+ import { PgDatabase } from "drizzle-orm/pg-core";
2
+ import { Hex } from "viem";
3
+ import { StorageAdapterLog, SyncFilter } from "@latticexyz/store-sync";
4
+ import { tables } from "@latticexyz/store-sync/postgres";
5
+ import { and, asc, eq, or } from "drizzle-orm";
6
+ import { bigIntMax } from "@latticexyz/common/utils";
7
+ import { recordToLog } from "../recordToLog";
8
+ import { createBenchmark } from "@latticexyz/common";
9
+
10
+ /**
11
+ * @deprecated
12
+ */
13
+ export async function getLogs(
14
+ database: PgDatabase<any>,
15
+ {
16
+ chainId,
17
+ address,
18
+ filters = [],
19
+ }: {
20
+ readonly chainId: number;
21
+ readonly address?: Hex;
22
+ readonly filters?: readonly SyncFilter[];
23
+ }
24
+ ): Promise<{ blockNumber: bigint; logs: (StorageAdapterLog & { eventName: "Store_SetRecord" })[] }> {
25
+ const benchmark = createBenchmark("drizzleGetLogs");
26
+
27
+ const conditions = filters.length
28
+ ? filters.map((filter) =>
29
+ and(
30
+ address != null ? eq(tables.recordsTable.address, address) : undefined,
31
+ eq(tables.recordsTable.tableId, filter.tableId),
32
+ filter.key0 != null ? eq(tables.recordsTable.key0, filter.key0) : undefined,
33
+ filter.key1 != null ? eq(tables.recordsTable.key1, filter.key1) : undefined
34
+ )
35
+ )
36
+ : address != null
37
+ ? [eq(tables.recordsTable.address, address)]
38
+ : [];
39
+ benchmark("parse config");
40
+
41
+ // Query for the block number that the indexer (i.e. chain) is at, in case the
42
+ // indexer is further along in the chain than a given store/table's last updated
43
+ // block number. We'll then take the highest block number between the indexer's
44
+ // chain state and all the records in the query (in case the records updated
45
+ // between these queries). Using just the highest block number from the queries
46
+ // could potentially signal to the client an older-than-necessary block number,
47
+ // for stores/tables that haven't seen recent activity.
48
+ // TODO: move the block number query into the records query for atomicity so we don't have to merge them here
49
+ const chainState = await database
50
+ .select()
51
+ .from(tables.configTable)
52
+ .where(eq(tables.configTable.chainId, chainId))
53
+ .limit(1)
54
+ .execute()
55
+ // Get the first record in a way that returns a possible `undefined`
56
+ // TODO: move this to `.findFirst` after upgrading drizzle or `rows[0]` after enabling `noUncheckedIndexedAccess: true`
57
+ .then((rows) => rows.find(() => true));
58
+ const indexerBlockNumber = chainState?.blockNumber ?? 0n;
59
+ benchmark("query chainState");
60
+
61
+ const records = await database
62
+ .select()
63
+ .from(tables.recordsTable)
64
+ .where(or(...conditions))
65
+ .orderBy(
66
+ asc(tables.recordsTable.blockNumber)
67
+ // TODO: add logIndex (https://github.com/latticexyz/mud/issues/1979)
68
+ );
69
+ benchmark("query records");
70
+
71
+ const blockNumber = records.reduce((max, record) => bigIntMax(max, record.blockNumber ?? 0n), indexerBlockNumber);
72
+ benchmark("find block number");
73
+
74
+ const logs = records
75
+ // TODO: add this to the query, assuming we can optimize with an index
76
+ .filter((record) => !record.isDeleted)
77
+ .map(recordToLog);
78
+ benchmark("map records to logs");
79
+
80
+ return { blockNumber, logs };
81
+ }
@@ -0,0 +1,72 @@
1
+ import { isNotNull } from "@latticexyz/common/utils";
2
+ import { PendingQuery, Row, Sql } from "postgres";
3
+ import { hexToBytes } from "viem";
4
+ import { z } from "zod";
5
+ import { input } from "@latticexyz/store-sync/indexer-client";
6
+ import { transformSchemaName } from "@latticexyz/store-sync/postgres";
7
+ import { Record } from "./common";
8
+
9
+ const schemaName = transformSchemaName("mud");
10
+
11
+ function and(sql: Sql, conditions: PendingQuery<Row[]>[]): PendingQuery<Row[]> {
12
+ return sql`(${conditions.reduce((query, condition) => sql`${query} AND ${condition}`)})`;
13
+ }
14
+
15
+ function or(sql: Sql, conditions: PendingQuery<Row[]>[]): PendingQuery<Row[]> {
16
+ return sql`(${conditions.reduce((query, condition) => sql`${query} OR ${condition}`)})`;
17
+ }
18
+
19
+ export function queryLogs(sql: Sql, opts: z.infer<typeof input>): PendingQuery<Record[]> {
20
+ const conditions = opts.filters.length
21
+ ? opts.filters.map((filter) =>
22
+ and(
23
+ sql,
24
+ [
25
+ opts.address != null ? sql`address = ${hexToBytes(opts.address)}` : null,
26
+ sql`table_id = ${hexToBytes(filter.tableId)}`,
27
+ filter.key0 != null ? sql`key0 = ${hexToBytes(filter.key0)}` : null,
28
+ filter.key1 != null ? sql`key1 = ${hexToBytes(filter.key1)}` : null,
29
+ ].filter(isNotNull)
30
+ )
31
+ )
32
+ : opts.address != null
33
+ ? [sql`address = ${hexToBytes(opts.address)}`]
34
+ : [];
35
+
36
+ const where = sql`WHERE ${and(
37
+ sql,
38
+ [sql`is_deleted != true`, conditions.length ? or(sql, conditions) : null].filter(isNotNull)
39
+ )}`;
40
+
41
+ // TODO: implement bytea <> hex columns via custom types: https://github.com/porsager/postgres#custom-types
42
+ // TODO: sort by logIndex (https://github.com/latticexyz/mud/issues/1979)
43
+ return sql<Record[]>`
44
+ WITH
45
+ config AS (
46
+ SELECT
47
+ version AS "indexerVersion",
48
+ chain_id AS "chainId",
49
+ block_number AS "chainBlockNumber"
50
+ FROM ${sql(`${schemaName}.config`)}
51
+ LIMIT 1
52
+ ),
53
+ records AS (
54
+ SELECT
55
+ '0x' || encode(address, 'hex') AS address,
56
+ '0x' || encode(table_id, 'hex') AS "tableId",
57
+ '0x' || encode(key_bytes, 'hex') AS "keyBytes",
58
+ '0x' || encode(static_data, 'hex') AS "staticData",
59
+ '0x' || encode(encoded_lengths, 'hex') AS "encodedLengths",
60
+ '0x' || encode(dynamic_data, 'hex') AS "dynamicData",
61
+ block_number AS "recordBlockNumber",
62
+ log_index AS "logIndex"
63
+ FROM ${sql(`${schemaName}.records`)}
64
+ ${where}
65
+ ORDER BY block_number, log_index ASC
66
+ )
67
+ SELECT
68
+ (SELECT COUNT(*) FROM records) AS "totalRows",
69
+ *
70
+ FROM config, records
71
+ `;
72
+ }
@@ -0,0 +1,19 @@
1
+ import { StorageAdapterLog } from "@latticexyz/store-sync";
2
+ import { decodeDynamicField } from "@latticexyz/protocol-parser";
3
+ import { RecordData } from "./common";
4
+
5
+ export function recordToLog(
6
+ record: Omit<RecordData, "recordBlockNumber">
7
+ ): StorageAdapterLog & { eventName: "Store_SetRecord" } {
8
+ return {
9
+ address: record.address,
10
+ eventName: "Store_SetRecord",
11
+ args: {
12
+ tableId: record.tableId,
13
+ keyTuple: decodeDynamicField("bytes32[]", record.keyBytes),
14
+ staticData: record.staticData ?? "0x",
15
+ encodedLengths: record.encodedLengths ?? "0x",
16
+ dynamicData: record.dynamicData ?? "0x",
17
+ },
18
+ } as const;
19
+ }
package/src/sentry.ts ADDED
@@ -0,0 +1,105 @@
1
+ import * as Sentry from "@sentry/node";
2
+ import { ProfilingIntegration } from "@sentry/profiling-node";
3
+ import { stripUrlQueryAndFragment } from "@sentry/utils";
4
+ import { debug } from "./debug";
5
+ import Koa from "koa";
6
+
7
+ Sentry.init({
8
+ dsn: process.env.SENTRY_DSN,
9
+ integrations: [
10
+ // Automatically instrument Node.js libraries and frameworks
11
+ ...Sentry.autoDiscoverNodePerformanceMonitoringIntegrations(),
12
+ new ProfilingIntegration(),
13
+ ],
14
+ // Performance Monitoring
15
+ tracesSampleRate: 1.0,
16
+ // Set sampling rate for profiling - this is relative to tracesSampleRate
17
+ profilesSampleRate: 1.0,
18
+ });
19
+
20
+ const requestHandler: Koa.Middleware = (ctx, next) => {
21
+ return new Promise<void>((resolve, reject) => {
22
+ Sentry.runWithAsyncContext(async () => {
23
+ const hub = Sentry.getCurrentHub();
24
+ hub.configureScope((scope) =>
25
+ scope.addEventProcessor((event) =>
26
+ Sentry.addRequestDataToEvent(event, ctx.request, {
27
+ include: {
28
+ user: false,
29
+ },
30
+ })
31
+ )
32
+ );
33
+
34
+ try {
35
+ await next();
36
+ } catch (err) {
37
+ reject(err);
38
+ }
39
+ resolve();
40
+ });
41
+ });
42
+ };
43
+
44
+ // This tracing middleware creates a transaction per request
45
+ const tracingMiddleWare: Koa.Middleware = async (ctx, next) => {
46
+ const reqMethod = (ctx.method || "").toUpperCase();
47
+ const reqUrl = ctx.url && stripUrlQueryAndFragment(ctx.url);
48
+
49
+ // Connect to trace of upstream app
50
+ let traceparentData;
51
+ if (ctx.request.get("sentry-trace")) {
52
+ traceparentData = Sentry.extractTraceparentData(ctx.request.get("sentry-trace"));
53
+ }
54
+
55
+ const transaction = Sentry.startTransaction({
56
+ name: `${reqMethod} ${reqUrl}`,
57
+ op: "http.server",
58
+ ...traceparentData,
59
+ });
60
+
61
+ ctx.__sentry_transaction = transaction;
62
+
63
+ // We put the transaction on the scope so users can attach children to it
64
+ Sentry.getCurrentHub().configureScope((scope) => {
65
+ scope.setSpan(transaction);
66
+ });
67
+
68
+ ctx.res.on("finish", () => {
69
+ // Push `transaction.finish` to the next event loop so open spans have a chance to finish before the transaction closes
70
+ setImmediate(() => {
71
+ // If you're using koa router, set the matched route as transaction name
72
+ if (ctx._matchedRoute) {
73
+ const mountPath = ctx.mountPath || "";
74
+ transaction.setName(`${reqMethod} ${mountPath}${ctx._matchedRoute}`);
75
+ }
76
+
77
+ transaction.setHttpStatus(ctx.status);
78
+ transaction.finish();
79
+ });
80
+ });
81
+
82
+ await next();
83
+ };
84
+
85
+ const errorHandler: Koa.Middleware = async (ctx, next) => {
86
+ try {
87
+ await next();
88
+ } catch (err) {
89
+ Sentry.withScope((scope) => {
90
+ scope.addEventProcessor((event) => {
91
+ return Sentry.addRequestDataToEvent(event, ctx.request);
92
+ });
93
+ Sentry.captureException(err);
94
+ });
95
+ throw err;
96
+ }
97
+ };
98
+
99
+ export const registerSentryMiddlewares = (server: Koa): void => {
100
+ debug("Registering Sentry middlewares");
101
+
102
+ server.use(errorHandler);
103
+ server.use(requestHandler);
104
+ server.use(tracingMiddleWare);
105
+ };
@@ -0,0 +1,47 @@
1
+ import { Middleware } from "koa";
2
+ import Router from "@koa/router";
3
+ import compose from "koa-compose";
4
+ import { input } from "@latticexyz/store-sync/indexer-client";
5
+ import { storeTables, tablesWithRecordsToLogs } from "@latticexyz/store-sync";
6
+ import { debug } from "../debug";
7
+ import { createBenchmark } from "@latticexyz/common";
8
+ import { compress } from "../compress";
9
+ import { getTablesWithRecords } from "./getTablesWithRecords";
10
+ import { BaseSQLiteDatabase } from "drizzle-orm/sqlite-core";
11
+
12
+ export function apiRoutes(database: BaseSQLiteDatabase<"sync", any>): Middleware {
13
+ const router = new Router();
14
+
15
+ router.get("/api/logs", compress(), async (ctx) => {
16
+ const benchmark = createBenchmark("sqlite:logs");
17
+
18
+ let options: ReturnType<typeof input.parse>;
19
+
20
+ try {
21
+ options = input.parse(typeof ctx.query.input === "string" ? JSON.parse(ctx.query.input) : {});
22
+ } catch (error) {
23
+ ctx.status = 400;
24
+ ctx.body = JSON.stringify(error);
25
+ debug(error);
26
+ return;
27
+ }
28
+
29
+ try {
30
+ options.filters = options.filters.length > 0 ? [...options.filters, { tableId: storeTables.Tables.tableId }] : [];
31
+ benchmark("parse config");
32
+ const { blockNumber, tables } = getTablesWithRecords(database, options);
33
+ benchmark("query tables with records");
34
+ const logs = tablesWithRecordsToLogs(tables);
35
+ benchmark("convert records to logs");
36
+
37
+ ctx.body = JSON.stringify({ blockNumber: blockNumber?.toString() ?? "-1", logs });
38
+ ctx.status = 200;
39
+ } catch (error) {
40
+ ctx.status = 500;
41
+ ctx.body = JSON.stringify(error);
42
+ debug(error);
43
+ }
44
+ });
45
+
46
+ return compose([router.routes(), router.allowedMethods()]) as Middleware;
47
+ }
@@ -1,10 +1,7 @@
1
- import { eq } from "drizzle-orm";
2
1
  import { BaseSQLiteDatabase } from "drizzle-orm/sqlite-core";
3
- import { buildTable, chainState, getTables } from "@latticexyz/store-sync/sqlite";
4
2
  import { QueryAdapter } from "@latticexyz/store-sync/trpc-indexer";
5
- import { debug } from "../debug";
6
- import { getAddress } from "viem";
7
- import { decodeDynamicField } from "@latticexyz/protocol-parser";
3
+ import { getTablesWithRecords } from "./getTablesWithRecords";
4
+ import { tablesWithRecordsToLogs } from "@latticexyz/store-sync";
8
5
 
9
6
  /**
10
7
  * Creates a storage adapter for the tRPC server/client to query data from SQLite.
@@ -14,48 +11,13 @@ import { decodeDynamicField } from "@latticexyz/protocol-parser";
14
11
  */
15
12
  export async function createQueryAdapter(database: BaseSQLiteDatabase<"sync", any>): Promise<QueryAdapter> {
16
13
  const adapter: QueryAdapter = {
17
- async findAll({ chainId, address, filters = [] }) {
18
- // If _any_ filter has a table ID, this will filter down all data to just those tables. Which mean we can't yet mix table filters with key-only filters.
19
- // TODO: improve this so we can express this in the query (need to be able to query data across tables more easily)
20
- const tableIds = Array.from(new Set(filters.map((filter) => filter.tableId)));
21
- const tables = getTables(database)
22
- .filter((table) => address == null || getAddress(address) === getAddress(table.address))
23
- .filter((table) => !tableIds.length || tableIds.includes(table.tableId));
24
-
25
- const tablesWithRecords = tables.map((table) => {
26
- const sqliteTable = buildTable(table);
27
- const records = database.select().from(sqliteTable).where(eq(sqliteTable.__isDeleted, false)).all();
28
- const filteredRecords = !filters.length
29
- ? records
30
- : records.filter((record) => {
31
- const keyTuple = decodeDynamicField("bytes32[]", record.__key);
32
- return filters.some(
33
- (filter) =>
34
- filter.tableId === table.tableId &&
35
- (filter.key0 == null || filter.key0 === keyTuple[0]) &&
36
- (filter.key1 == null || filter.key1 === keyTuple[1])
37
- );
38
- });
39
- return {
40
- ...table,
41
- records: filteredRecords.map((record) => ({
42
- key: Object.fromEntries(Object.entries(table.keySchema).map(([name]) => [name, record[name]])),
43
- value: Object.fromEntries(Object.entries(table.valueSchema).map(([name]) => [name, record[name]])),
44
- })),
45
- };
46
- });
47
-
48
- const metadata = database.select().from(chainState).where(eq(chainState.chainId, chainId)).all();
49
- const { lastUpdatedBlockNumber } = metadata[0] ?? {};
50
-
51
- const result = {
52
- blockNumber: lastUpdatedBlockNumber ?? null,
53
- tables: tablesWithRecords,
54
- };
55
-
56
- debug("findAll", chainId, address, result);
57
-
58
- return result;
14
+ async getLogs(opts) {
15
+ const { blockNumber, tables } = getTablesWithRecords(database, opts);
16
+ const logs = tablesWithRecordsToLogs(tables);
17
+ return { blockNumber: blockNumber ?? 0n, logs };
18
+ },
19
+ async findAll(opts) {
20
+ return getTablesWithRecords(database, opts);
59
21
  },
60
22
  };
61
23
  return adapter;
@@ -0,0 +1,75 @@
1
+ import { asc, eq } from "drizzle-orm";
2
+ import { BaseSQLiteDatabase } from "drizzle-orm/sqlite-core";
3
+ import { buildTable, chainState, getTables } from "@latticexyz/store-sync/sqlite";
4
+ import { Hex, getAddress } from "viem";
5
+ import { decodeDynamicField } from "@latticexyz/protocol-parser";
6
+ import { SyncFilter, TableWithRecords } from "@latticexyz/store-sync";
7
+
8
+ // TODO: refactor sqlite and replace this with getLogs to match postgres (https://github.com/latticexyz/mud/issues/1970)
9
+
10
+ /**
11
+ * @deprecated
12
+ * */
13
+ export function getTablesWithRecords(
14
+ database: BaseSQLiteDatabase<"sync", any>,
15
+ {
16
+ chainId,
17
+ address,
18
+ filters = [],
19
+ }: {
20
+ readonly chainId: number;
21
+ readonly address?: Hex;
22
+ readonly filters?: readonly SyncFilter[];
23
+ }
24
+ ): { blockNumber: bigint | null; tables: readonly TableWithRecords[] } {
25
+ const metadata = database
26
+ .select()
27
+ .from(chainState)
28
+ .where(eq(chainState.chainId, chainId))
29
+ .limit(1)
30
+ .all()
31
+ .find(() => true);
32
+
33
+ // If _any_ filter has a table ID, this will filter down all data to just those tables. Which mean we can't yet mix table filters with key-only filters.
34
+ // TODO: improve this so we can express this in the query (need to be able to query data across tables more easily)
35
+ const tableIds = Array.from(new Set(filters.map((filter) => filter.tableId)));
36
+ const tables = getTables(database)
37
+ .filter((table) => address == null || getAddress(address) === getAddress(table.address))
38
+ .filter((table) => !tableIds.length || tableIds.includes(table.tableId));
39
+
40
+ const tablesWithRecords = tables.map((table) => {
41
+ const sqliteTable = buildTable(table);
42
+ const records = database
43
+ .select()
44
+ .from(sqliteTable)
45
+ .where(eq(sqliteTable.__isDeleted, false))
46
+ .orderBy(
47
+ asc(sqliteTable.__lastUpdatedBlockNumber)
48
+ // TODO: add logIndex (https://github.com/latticexyz/mud/issues/1979)
49
+ )
50
+ .all();
51
+ const filteredRecords = !filters.length
52
+ ? records
53
+ : records.filter((record) => {
54
+ const keyTuple = decodeDynamicField("bytes32[]", record.__key);
55
+ return filters.some(
56
+ (filter) =>
57
+ filter.tableId === table.tableId &&
58
+ (filter.key0 == null || filter.key0 === keyTuple[0]) &&
59
+ (filter.key1 == null || filter.key1 === keyTuple[1])
60
+ );
61
+ });
62
+ return {
63
+ ...table,
64
+ records: filteredRecords.map((record) => ({
65
+ key: Object.fromEntries(Object.entries(table.keySchema).map(([name]) => [name, record[name]])),
66
+ value: Object.fromEntries(Object.entries(table.valueSchema).map(([name]) => [name, record[name]])),
67
+ })),
68
+ };
69
+ });
70
+
71
+ return {
72
+ blockNumber: metadata?.lastUpdatedBlockNumber ?? null,
73
+ tables: tablesWithRecords,
74
+ };
75
+ }
@@ -1,7 +0,0 @@
1
- import{z as e,ZodError as c}from"zod";var t=e.intersection(e.object({HOST:e.string().default("0.0.0.0"),PORT:e.coerce.number().positive().default(3001),START_BLOCK:e.coerce.bigint().nonnegative().default(0n),MAX_BLOCK_RANGE:e.coerce.bigint().positive().default(1000n),POLLING_INTERVAL:e.coerce.number().positive().default(1e3)}),e.union([e.object({RPC_HTTP_URL:e.string(),RPC_WS_URL:e.string().optional()}),e.object({RPC_HTTP_URL:e.string().optional(),RPC_WS_URL:e.string()})]));function f(o){let r=o!==void 0?e.intersection(t,o):t;try{return r.parse(process.env)}catch(n){if(n instanceof c){let{_errors:a,...i}=n.format();console.error(`
2
- Missing or invalid environment variables:
3
-
4
- ${Object.keys(i).join(`
5
- `)}
6
- `),process.exit(1)}throw n}}import s from"debug";var _=s("mud:store-indexer");export{_ as a,f as b};
7
- //# sourceMappingURL=chunk-X3OEYQLT.js.map
@@ -1 +0,0 @@
1
- {"version":3,"sources":["../bin/parseEnv.ts","../src/debug.ts"],"sourcesContent":["import { z, ZodError, ZodIntersection, ZodTypeAny } from \"zod\";\n\nconst commonSchema = z.intersection(\n z.object({\n HOST: z.string().default(\"0.0.0.0\"),\n PORT: z.coerce.number().positive().default(3001),\n START_BLOCK: z.coerce.bigint().nonnegative().default(0n),\n MAX_BLOCK_RANGE: z.coerce.bigint().positive().default(1000n),\n POLLING_INTERVAL: z.coerce.number().positive().default(1000),\n }),\n z.union([\n z.object({\n RPC_HTTP_URL: z.string(),\n RPC_WS_URL: z.string().optional(),\n }),\n z.object({\n RPC_HTTP_URL: z.string().optional(),\n RPC_WS_URL: z.string(),\n }),\n ])\n);\n\nexport function parseEnv<TSchema extends ZodTypeAny | undefined = undefined>(\n schema?: TSchema\n): z.infer<TSchema extends ZodTypeAny ? ZodIntersection<typeof commonSchema, TSchema> : typeof commonSchema> {\n const envSchema = schema !== undefined ? z.intersection(commonSchema, schema) : commonSchema;\n try {\n return envSchema.parse(process.env);\n } catch (error) {\n if (error instanceof ZodError) {\n const { _errors, ...invalidEnvVars } = error.format();\n console.error(`\\nMissing or invalid environment variables:\\n\\n ${Object.keys(invalidEnvVars).join(\"\\n \")}\\n`);\n process.exit(1);\n }\n throw error;\n }\n}\n","import createDebug from \"debug\";\n\nexport const debug = createDebug(\"mud:store-indexer\");\n"],"mappings":"AAAA,OAAS,KAAAA,EAAG,YAAAC,MAA6C,MAEzD,IAAMC,EAAeF,EAAE,aACrBA,EAAE,OAAO,CACP,KAAMA,EAAE,OAAO,EAAE,QAAQ,SAAS,EAClC,KAAMA,EAAE,OAAO,OAAO,EAAE,SAAS,EAAE,QAAQ,IAAI,EAC/C,YAAaA,EAAE,OAAO,OAAO,EAAE,YAAY,EAAE,QAAQ,EAAE,EACvD,gBAAiBA,EAAE,OAAO,OAAO,EAAE,SAAS,EAAE,QAAQ,KAAK,EAC3D,iBAAkBA,EAAE,OAAO,OAAO,EAAE,SAAS,EAAE,QAAQ,GAAI,CAC7D,CAAC,EACDA,EAAE,MAAM,CACNA,EAAE,OAAO,CACP,aAAcA,EAAE,OAAO,EACvB,WAAYA,EAAE,OAAO,EAAE,SAAS,CAClC,CAAC,EACDA,EAAE,OAAO,CACP,aAAcA,EAAE,OAAO,EAAE,SAAS,EAClC,WAAYA,EAAE,OAAO,CACvB,CAAC,CACH,CAAC,CACH,EAEO,SAASG,EACdC,EAC2G,CAC3G,IAAMC,EAAYD,IAAW,OAAYJ,EAAE,aAAaE,EAAcE,CAAM,EAAIF,EAChF,GAAI,CACF,OAAOG,EAAU,MAAM,QAAQ,GAAG,CACpC,OAASC,EAAP,CACA,GAAIA,aAAiBL,EAAU,CAC7B,GAAM,CAAE,QAAAM,EAAS,GAAGC,CAAe,EAAIF,EAAM,OAAO,EACpD,QAAQ,MAAM;AAAA;AAAA;AAAA,IAAoD,OAAO,KAAKE,CAAc,EAAE,KAAK;AAAA,GAAM;AAAA,CAAK,EAC9G,QAAQ,KAAK,CAAC,EAEhB,MAAMF,CACR,CACF,CCpCA,OAAOG,MAAiB,QAEjB,IAAMC,EAAQD,EAAY,mBAAmB","names":["z","ZodError","commonSchema","parseEnv","schema","envSchema","error","_errors","invalidEnvVars","createDebug","debug"]}