ponder 0.8.7 → 0.8.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -3922,13 +3922,28 @@ var ReadonlyClient = class extends pg.Client {
3922
3922
  }
3923
3923
  }
3924
3924
  };
3925
- function createPool(config) {
3926
- return new pg.Pool({
3925
+ function createErrorHandler(logger) {
3926
+ return (error) => {
3927
+ const client = error.client;
3928
+ const pid = client?.processID ?? "unknown";
3929
+ const applicationName = client?.connectionParameters?.application_name ?? "unknown";
3930
+ logger.error({
3931
+ service: "postgres",
3932
+ msg: `Pool error (application_name: ${applicationName}, pid: ${pid})`,
3933
+ error
3934
+ });
3935
+ };
3936
+ }
3937
+ function createPool(config, logger) {
3938
+ const pool = new pg.Pool({
3927
3939
  // https://stackoverflow.com/questions/59155572/how-to-set-query-timeout-in-relation-to-statement-timeout
3928
3940
  statement_timeout: 2 * 60 * 1e3,
3929
3941
  // 2 minutes
3930
3942
  ...config
3931
3943
  });
3944
+ const onError2 = createErrorHandler(logger);
3945
+ pool.on("error", onError2);
3946
+ return pool;
3932
3947
  }
3933
3948
 
3934
3949
  // src/utils/pglite.ts
@@ -4108,28 +4123,40 @@ var createDatabase = ({
4108
4123
  );
4109
4124
  const [readonlyMax, userMax, syncMax] = common.options.command === "serve" ? [preBuild.databaseConfig.poolConfig.max - internalMax, 0, 0] : [equalMax, equalMax, equalMax];
4110
4125
  driver = {
4111
- internal: createPool({
4112
- ...preBuild.databaseConfig.poolConfig,
4113
- application_name: `${preBuild.namespace}_internal`,
4114
- max: internalMax,
4115
- statement_timeout: 10 * 60 * 1e3
4116
- // 10 minutes to accommodate slow sync store migrations.
4117
- }),
4118
- user: createPool({
4119
- ...preBuild.databaseConfig.poolConfig,
4120
- application_name: `${preBuild.namespace}_user`,
4121
- max: userMax
4122
- }),
4123
- readonly: createPool({
4124
- ...preBuild.databaseConfig.poolConfig,
4125
- application_name: `${preBuild.namespace}_readonly`,
4126
- max: readonlyMax
4127
- }),
4128
- sync: createPool({
4129
- ...preBuild.databaseConfig.poolConfig,
4130
- application_name: "ponder_sync",
4131
- max: syncMax
4132
- })
4126
+ internal: createPool(
4127
+ {
4128
+ ...preBuild.databaseConfig.poolConfig,
4129
+ application_name: `${preBuild.namespace}_internal`,
4130
+ max: internalMax,
4131
+ statement_timeout: 10 * 60 * 1e3
4132
+ // 10 minutes to accommodate slow sync store migrations.
4133
+ },
4134
+ common.logger
4135
+ ),
4136
+ user: createPool(
4137
+ {
4138
+ ...preBuild.databaseConfig.poolConfig,
4139
+ application_name: `${preBuild.namespace}_user`,
4140
+ max: userMax
4141
+ },
4142
+ common.logger
4143
+ ),
4144
+ readonly: createPool(
4145
+ {
4146
+ ...preBuild.databaseConfig.poolConfig,
4147
+ application_name: `${preBuild.namespace}_readonly`,
4148
+ max: readonlyMax
4149
+ },
4150
+ common.logger
4151
+ ),
4152
+ sync: createPool(
4153
+ {
4154
+ ...preBuild.databaseConfig.poolConfig,
4155
+ application_name: "ponder_sync",
4156
+ max: syncMax
4157
+ },
4158
+ common.logger
4159
+ )
4133
4160
  };
4134
4161
  qb = {
4135
4162
  internal: new HeadlessKysely({
@@ -5402,6 +5429,7 @@ ${prettyPrint(key)}`
5402
5429
  await database.createTriggers();
5403
5430
  await indexingStore.flush();
5404
5431
  await database.removeTriggers();
5432
+ isDatabaseEmpty = false;
5405
5433
  const query2 = { sql: _sql, params, typings };
5406
5434
  const res = await database.qb.user.wrap({ method: "sql" }, async () => {
5407
5435
  try {
@@ -9926,6 +9954,10 @@ var createSync = async (args) => {
9926
9954
  limit: getEventsMaxBatchSize
9927
9955
  });
9928
9956
  consecutiveErrors = 0;
9957
+ args.common.logger.debug({
9958
+ service: "sync",
9959
+ msg: `Fetched ${events.length} events from the database for a ${formatEta(estimateSeconds * 1e3)} range from ${decodeCheckpoint(from).blockTimestamp}`
9960
+ });
9929
9961
  for (const network of args.networks) {
9930
9962
  updateHistoricalStatus({ events, checkpoint: cursor, network });
9931
9963
  }
@@ -9941,22 +9973,12 @@ var createSync = async (args) => {
9941
9973
  });
9942
9974
  yield { events, checkpoint: to };
9943
9975
  from = cursor;
9944
- const { eta, progress } = await getAppProgress(args.common.metrics);
9945
- if (events.length > 0) {
9946
- if (eta === void 0 || progress === void 0) {
9947
- args.common.logger.info({
9948
- service: "app",
9949
- msg: `Indexed ${events.length} events`
9950
- });
9951
- } else {
9952
- args.common.logger.info({
9953
- service: "app",
9954
- msg: `Indexed ${events.length} events with ${formatPercentage(progress)} complete and ${formatEta(eta)} remaining`
9955
- });
9956
- }
9957
- }
9958
9976
  } catch (error) {
9959
9977
  estimateSeconds = Math.max(10, Math.round(estimateSeconds / 10));
9978
+ args.common.logger.debug({
9979
+ service: "sync",
9980
+ msg: `Failed to fetch events from the database, retrying with a ${formatEta(estimateSeconds * 1e3)} range`
9981
+ });
9960
9982
  if (++consecutiveErrors > 4)
9961
9983
  throw error;
9962
9984
  }
@@ -10525,11 +10547,34 @@ async function run({
10525
10547
  let lastFlush = Date.now();
10526
10548
  for await (const { events, checkpoint } of sync.getEvents()) {
10527
10549
  end = checkpoint;
10528
- const result = await handleEvents(
10529
- decodeEvents(common, indexingBuild.sources, events),
10530
- checkpoint
10531
- );
10550
+ const decodedEvents = decodeEvents(common, indexingBuild.sources, events);
10551
+ const result = await handleEvents(decodedEvents, checkpoint);
10552
+ const { eta, progress } = await getAppProgress(common.metrics);
10553
+ if (events.length > 0) {
10554
+ if (eta === void 0 || progress === void 0) {
10555
+ common.logger.info({
10556
+ service: "app",
10557
+ msg: `Indexed ${events.length} events`
10558
+ });
10559
+ } else {
10560
+ common.logger.info({
10561
+ service: "app",
10562
+ msg: `Indexed ${events.length} events with ${formatPercentage(progress)} complete and ${formatEta(eta)} remaining`
10563
+ });
10564
+ }
10565
+ }
10532
10566
  if (historicalIndexingStore.isCacheFull() && events.length > 0 || common.options.command === "dev" && lastFlush + 5e3 < Date.now() && events.length > 0) {
10567
+ if (historicalIndexingStore.isCacheFull()) {
10568
+ common.logger.debug({
10569
+ service: "indexing",
10570
+ msg: `Indexing cache has exceeded ${common.options.indexingCacheMaxBytes} MB limit, starting flush`
10571
+ });
10572
+ } else {
10573
+ common.logger.debug({
10574
+ service: "indexing",
10575
+ msg: "Dev server periodic flush triggered, starting flush"
10576
+ });
10577
+ }
10533
10578
  await database.finalize({
10534
10579
  checkpoint: encodeCheckpoint(zeroCheckpoint)
10535
10580
  });
@@ -10541,6 +10586,10 @@ async function run({
10541
10586
  checkpoint: events[events.length - 1].checkpoint
10542
10587
  });
10543
10588
  lastFlush = Date.now();
10589
+ common.logger.debug({
10590
+ service: "indexing",
10591
+ msg: "Completed flush"
10592
+ });
10544
10593
  }
10545
10594
  await metadataStore.setStatus(sync.getStatus());
10546
10595
  if (result.status === "killed") {
@@ -10552,6 +10601,10 @@ async function run({
10552
10601
  }
10553
10602
  if (isKilled)
10554
10603
  return;
10604
+ common.logger.debug({
10605
+ service: "indexing",
10606
+ msg: "Completed all historical events, starting final flush"
10607
+ });
10555
10608
  await database.finalize({ checkpoint: encodeCheckpoint(zeroCheckpoint) });
10556
10609
  await historicalIndexingStore.flush();
10557
10610
  await database.complete({ checkpoint: encodeCheckpoint(zeroCheckpoint) });