ponder 0.8.6 → 0.8.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -4241,8 +4241,8 @@ var createDatabase = ({
4241
4241
  for (const log of reversed) {
4242
4242
  if (log.operation === 0) {
4243
4243
  await tx.deleteFrom(tableName.sql).$call((qb2) => {
4244
- for (const { sql: sql6 } of primaryKeyColumns) {
4245
- qb2 = qb2.where(sql6, "=", log[sql6]);
4244
+ for (const { sql: sql5 } of primaryKeyColumns) {
4245
+ qb2 = qb2.where(sql5, "=", log[sql5]);
4246
4246
  }
4247
4247
  return qb2;
4248
4248
  }).execute();
@@ -4251,8 +4251,8 @@ var createDatabase = ({
4251
4251
  log.checkpoint = void 0;
4252
4252
  log.operation = void 0;
4253
4253
  await tx.updateTable(tableName.sql).set(log).$call((qb2) => {
4254
- for (const { sql: sql6 } of primaryKeyColumns) {
4255
- qb2 = qb2.where(sql6, "=", log[sql6]);
4254
+ for (const { sql: sql5 } of primaryKeyColumns) {
4255
+ qb2 = qb2.where(sql5, "=", log[sql5]);
4256
4256
  }
4257
4257
  return qb2;
4258
4258
  }).execute();
@@ -4261,7 +4261,7 @@ var createDatabase = ({
4261
4261
  log.checkpoint = void 0;
4262
4262
  log.operation = void 0;
4263
4263
  await tx.insertInto(tableName.sql).values(log).onConflict(
4264
- (oc) => oc.columns(primaryKeyColumns.map(({ sql: sql6 }) => sql6)).doNothing()
4264
+ (oc) => oc.columns(primaryKeyColumns.map(({ sql: sql5 }) => sql5)).doNothing()
4265
4265
  ).execute();
4266
4266
  }
4267
4267
  }
@@ -6563,7 +6563,7 @@ function intervalRange(interval) {
6563
6563
  }
6564
6564
 
6565
6565
  // src/sync-store/index.ts
6566
- import { sql as ksql, sql as sql4 } from "kysely";
6566
+ import { sql as ksql } from "kysely";
6567
6567
  import {
6568
6568
  checksumAddress as checksumAddress2,
6569
6569
  hexToBigInt as hexToBigInt2
@@ -6756,20 +6756,32 @@ var createSyncStore = ({
6756
6756
  common,
6757
6757
  db
6758
6758
  }) => ({
6759
- insertIntervals: async ({ intervals }) => {
6759
+ insertIntervals: async ({ intervals, chainId }) => {
6760
6760
  if (intervals.length === 0)
6761
6761
  return;
6762
6762
  await db.wrap({ method: "insertIntervals" }, async () => {
6763
+ const perFragmentIntervals = /* @__PURE__ */ new Map();
6763
6764
  const values = [];
6764
- for (const { interval, filter } of intervals) {
6765
+ for (const { filter, interval } of intervals) {
6765
6766
  for (const fragment of getFragmentIds(filter)) {
6766
- values.push({
6767
- fragment_id: fragment.id,
6768
- chain_id: filter.chainId,
6769
- blocks: ksql`nummultirange(numrange(${interval[0]}, ${interval[1] + 1}, '[]'))`
6770
- });
6767
+ if (perFragmentIntervals.has(fragment.id) === false) {
6768
+ perFragmentIntervals.set(fragment.id, []);
6769
+ }
6770
+ perFragmentIntervals.get(fragment.id).push(interval);
6771
6771
  }
6772
6772
  }
6773
+ for (const [fragmentId, intervals2] of perFragmentIntervals) {
6774
+ const numranges = intervals2.map((interval) => {
6775
+ const start2 = interval[0];
6776
+ const end = interval[1] + 1;
6777
+ return `numrange(${start2}, ${end}, '[]')`;
6778
+ }).join(", ");
6779
+ values.push({
6780
+ fragment_id: fragmentId,
6781
+ chain_id: chainId,
6782
+ blocks: ksql.raw(`nummultirange(${numranges})`)
6783
+ });
6784
+ }
6773
6785
  await db.insertInto("intervals").values(values).onConflict(
6774
6786
  (oc) => oc.column("fragment_id").doUpdateSet({
6775
6787
  blocks: ksql`intervals.blocks + excluded.blocks`
@@ -6784,10 +6796,10 @@ var createSyncStore = ({
6784
6796
  const fragments = getFragmentIds(filter);
6785
6797
  for (const fragment of fragments) {
6786
6798
  const _query = db.selectFrom(
6787
- db.selectFrom("intervals").select(sql4`unnest(blocks)`.as("blocks")).where("fragment_id", "in", fragment.adjacent).as("unnested")
6799
+ db.selectFrom("intervals").select(ksql`unnest(blocks)`.as("blocks")).where("fragment_id", "in", fragment.adjacent).as("unnested")
6788
6800
  ).select([
6789
- sql4`range_agg(unnested.blocks)`.as("merged_blocks"),
6790
- sql4.raw(`${i}`).as("filter")
6801
+ ksql`range_agg(unnested.blocks)`.as("merged_blocks"),
6802
+ ksql.raw(`'${i}'`).as("filter")
6791
6803
  ]);
6792
6804
  query2 = query2 === void 0 ? _query : query2.unionAll(_query);
6793
6805
  }
@@ -7020,7 +7032,7 @@ var createSyncStore = ({
7020
7032
  db2.selectFrom("transactionReceipts").select("status").where(
7021
7033
  "transactionReceipts.transactionHash",
7022
7034
  "=",
7023
- sql4.ref("transactions.hash")
7035
+ ksql.ref("transactions.hash")
7024
7036
  ),
7025
7037
  "=",
7026
7038
  "0x1"
@@ -7299,7 +7311,7 @@ var createSyncStore = ({
7299
7311
  ).execute();
7300
7312
  }),
7301
7313
  getRpcRequestResult: async ({ request, chainId }) => db.wrap({ method: "getRpcRequestResult" }, async () => {
7302
- const result = await db.selectFrom("rpc_request_results").select("result").where("request_hash", "=", sql4`MD5(${request})`).where("chain_id", "=", chainId).executeTakeFirst();
7314
+ const result = await db.selectFrom("rpc_request_results").select("result").where("request_hash", "=", ksql`MD5(${request})`).where("chain_id", "=", chainId).executeTakeFirst();
7303
7315
  return result?.result;
7304
7316
  }),
7305
7317
  pruneRpcRequestResult: async ({ blocks, chainId }) => db.wrap({ method: "pruneRpcRequestResult" }, async () => {
@@ -8660,7 +8672,8 @@ var createHistoricalSync = async (args) => {
8660
8672
  ]);
8661
8673
  if (args.network.disableCache === false) {
8662
8674
  await args.syncStore.insertIntervals({
8663
- intervals: syncedIntervals
8675
+ intervals: syncedIntervals,
8676
+ chainId: args.network.chainId
8664
8677
  });
8665
8678
  }
8666
8679
  blockCache.clear();
@@ -9913,6 +9926,10 @@ var createSync = async (args) => {
9913
9926
  limit: getEventsMaxBatchSize
9914
9927
  });
9915
9928
  consecutiveErrors = 0;
9929
+ args.common.logger.debug({
9930
+ service: "sync",
9931
+ msg: `Fetched ${events.length} events from the database for a ${formatEta(estimateSeconds * 1e3)} range from ${decodeCheckpoint(from).blockTimestamp}`
9932
+ });
9916
9933
  for (const network of args.networks) {
9917
9934
  updateHistoricalStatus({ events, checkpoint: cursor, network });
9918
9935
  }
@@ -9928,22 +9945,12 @@ var createSync = async (args) => {
9928
9945
  });
9929
9946
  yield { events, checkpoint: to };
9930
9947
  from = cursor;
9931
- const { eta, progress } = await getAppProgress(args.common.metrics);
9932
- if (events.length > 0) {
9933
- if (eta === void 0 || progress === void 0) {
9934
- args.common.logger.info({
9935
- service: "app",
9936
- msg: `Indexed ${events.length} events`
9937
- });
9938
- } else {
9939
- args.common.logger.info({
9940
- service: "app",
9941
- msg: `Indexed ${events.length} events with ${formatPercentage(progress)} complete and ${formatEta(eta)} remaining`
9942
- });
9943
- }
9944
- }
9945
9948
  } catch (error) {
9946
9949
  estimateSeconds = Math.max(10, Math.round(estimateSeconds / 10));
9950
+ args.common.logger.debug({
9951
+ service: "sync",
9952
+ msg: `Failed to fetch events from the database, retrying with a ${formatEta(estimateSeconds * 1e3)} range`
9953
+ });
9947
9954
  if (++consecutiveErrors > 4)
9948
9955
  throw error;
9949
9956
  }
@@ -10119,7 +10126,8 @@ var createSync = async (args) => {
10119
10126
  ]);
10120
10127
  if (network.disableCache === false) {
10121
10128
  await args.syncStore.insertIntervals({
10122
- intervals: args.sources.filter(({ filter }) => filter.chainId === network.chainId).map(({ filter }) => ({ filter, interval }))
10129
+ intervals: args.sources.filter(({ filter }) => filter.chainId === network.chainId).map(({ filter }) => ({ filter, interval })),
10130
+ chainId: network.chainId
10123
10131
  });
10124
10132
  }
10125
10133
  if (isSyncEnd(syncProgress)) {
@@ -10511,11 +10519,34 @@ async function run({
10511
10519
  let lastFlush = Date.now();
10512
10520
  for await (const { events, checkpoint } of sync.getEvents()) {
10513
10521
  end = checkpoint;
10514
- const result = await handleEvents(
10515
- decodeEvents(common, indexingBuild.sources, events),
10516
- checkpoint
10517
- );
10522
+ const decodedEvents = decodeEvents(common, indexingBuild.sources, events);
10523
+ const result = await handleEvents(decodedEvents, checkpoint);
10524
+ const { eta, progress } = await getAppProgress(common.metrics);
10525
+ if (events.length > 0) {
10526
+ if (eta === void 0 || progress === void 0) {
10527
+ common.logger.info({
10528
+ service: "app",
10529
+ msg: `Indexed ${events.length} events`
10530
+ });
10531
+ } else {
10532
+ common.logger.info({
10533
+ service: "app",
10534
+ msg: `Indexed ${events.length} events with ${formatPercentage(progress)} complete and ${formatEta(eta)} remaining`
10535
+ });
10536
+ }
10537
+ }
10518
10538
  if (historicalIndexingStore.isCacheFull() && events.length > 0 || common.options.command === "dev" && lastFlush + 5e3 < Date.now() && events.length > 0) {
10539
+ if (historicalIndexingStore.isCacheFull()) {
10540
+ common.logger.debug({
10541
+ service: "indexing",
10542
+ msg: `Indexing cache has exceeded ${common.options.indexingCacheMaxBytes} MB limit, starting flush`
10543
+ });
10544
+ } else {
10545
+ common.logger.debug({
10546
+ service: "indexing",
10547
+ msg: "Dev server periodic flush triggered, starting flush"
10548
+ });
10549
+ }
10519
10550
  await database.finalize({
10520
10551
  checkpoint: encodeCheckpoint(zeroCheckpoint)
10521
10552
  });
@@ -10527,6 +10558,10 @@ async function run({
10527
10558
  checkpoint: events[events.length - 1].checkpoint
10528
10559
  });
10529
10560
  lastFlush = Date.now();
10561
+ common.logger.debug({
10562
+ service: "indexing",
10563
+ msg: "Completed flush"
10564
+ });
10530
10565
  }
10531
10566
  await metadataStore.setStatus(sync.getStatus());
10532
10567
  if (result.status === "killed") {
@@ -10538,6 +10573,10 @@ async function run({
10538
10573
  }
10539
10574
  if (isKilled)
10540
10575
  return;
10576
+ common.logger.debug({
10577
+ service: "indexing",
10578
+ msg: "Completed all historical events, starting final flush"
10579
+ });
10541
10580
  await database.finalize({ checkpoint: encodeCheckpoint(zeroCheckpoint) });
10542
10581
  await historicalIndexingStore.flush();
10543
10582
  await database.complete({ checkpoint: encodeCheckpoint(zeroCheckpoint) });
@@ -11006,7 +11045,7 @@ async function dev({ cliOptions }) {
11006
11045
  }
11007
11046
 
11008
11047
  // src/bin/commands/list.ts
11009
- import { sql as sql5 } from "kysely";
11048
+ import { sql as sql4 } from "kysely";
11010
11049
  var emptySchemaBuild = {
11011
11050
  schema: {},
11012
11051
  statements: {
@@ -11053,7 +11092,7 @@ async function list({ cliOptions }) {
11053
11092
  ).execute();
11054
11093
  let union;
11055
11094
  for (const row of ponderSchemas) {
11056
- const query2 = database.qb.internal.selectFrom(`${row.table_schema}._ponder_meta`).select(["value", sql5`${row.table_schema}`.as("schema")]).where("key", "=", "app");
11095
+ const query2 = database.qb.internal.selectFrom(`${row.table_schema}._ponder_meta`).select(["value", sql4`${row.table_schema}`.as("schema")]).where("key", "=", "app");
11057
11096
  if (union === void 0) {
11058
11097
  union = query2;
11059
11098
  } else {