envio 2.28.0-alpha.3 → 2.28.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "envio",
3
- "version": "v2.28.0-alpha.3",
3
+ "version": "v2.28.0",
4
4
  "description": "A latency and sync speed optimized, developer friendly blockchain data indexer.",
5
5
  "bin": "./bin.js",
6
6
  "main": "./index.js",
@@ -25,13 +25,13 @@
25
25
  },
26
26
  "homepage": "https://envio.dev",
27
27
  "optionalDependencies": {
28
- "envio-linux-x64": "v2.28.0-alpha.3",
29
- "envio-linux-arm64": "v2.28.0-alpha.3",
30
- "envio-darwin-x64": "v2.28.0-alpha.3",
31
- "envio-darwin-arm64": "v2.28.0-alpha.3"
28
+ "envio-linux-x64": "v2.28.0",
29
+ "envio-linux-arm64": "v2.28.0",
30
+ "envio-darwin-x64": "v2.28.0",
31
+ "envio-darwin-arm64": "v2.28.0"
32
32
  },
33
33
  "dependencies": {
34
- "@envio-dev/hypersync-client": "0.6.5",
34
+ "@envio-dev/hypersync-client": "0.6.6",
35
35
  "rescript": "11.1.3",
36
36
  "rescript-schema": "9.3.0",
37
37
  "viem": "2.21.0",
package/src/Batch.res ADDED
@@ -0,0 +1,146 @@
1
+ type progressedChain = {
2
+ chainId: int,
3
+ batchSize: int,
4
+ progressBlockNumber: int,
5
+ progressNextBlockLogIndex: option<int>,
6
+ totalEventsProcessed: int,
7
+ }
8
+
9
+ type t = {
10
+ items: array<Internal.eventItem>,
11
+ progressedChains: array<progressedChain>,
12
+ fetchStates: ChainMap.t<FetchState.t>,
13
+ dcsToStoreByChainId: dict<array<FetchState.indexingContract>>,
14
+ }
15
+
16
+ type multiChainEventComparitor = {
17
+ chain: ChainMap.Chain.t,
18
+ earliestEvent: FetchState.queueItem,
19
+ }
20
+
21
+ let getComparitorFromItem = (queueItem: Internal.eventItem) => {
22
+ let {timestamp, chain, blockNumber, logIndex} = queueItem
23
+ EventUtils.getEventComparator({
24
+ timestamp,
25
+ chainId: chain->ChainMap.Chain.toChainId,
26
+ blockNumber,
27
+ logIndex,
28
+ })
29
+ }
30
+
31
+ let getQueueItemComparitor = (earliestQueueItem: FetchState.queueItem, ~chain) => {
32
+ switch earliestQueueItem {
33
+ | Item({item}) => item->getComparitorFromItem
34
+ | NoItem({latestFetchedBlock: {blockTimestamp, blockNumber}}) => (
35
+ blockTimestamp,
36
+ chain->ChainMap.Chain.toChainId,
37
+ blockNumber,
38
+ 0,
39
+ )
40
+ }
41
+ }
42
+
43
+ let isQueueItemEarlier = (a: multiChainEventComparitor, b: multiChainEventComparitor): bool => {
44
+ a.earliestEvent->getQueueItemComparitor(~chain=a.chain) <
45
+ b.earliestEvent->getQueueItemComparitor(~chain=b.chain)
46
+ }
47
+
48
+ /**
49
+ It either returnes an earliest item among all chains, or None if no chains are actively indexing
50
+ */
51
+ let getOrderedNextItem = (fetchStates: ChainMap.t<FetchState.t>): option<
52
+ multiChainEventComparitor,
53
+ > => {
54
+ fetchStates
55
+ ->ChainMap.entries
56
+ ->Belt.Array.reduce(None, (accum, (chain, fetchState)) => {
57
+ // If the fetch state has reached the end block we don't need to consider it
58
+ if fetchState->FetchState.isActivelyIndexing {
59
+ let earliestEvent = fetchState->FetchState.getEarliestEvent
60
+ let current: multiChainEventComparitor = {chain, earliestEvent}
61
+ switch accum {
62
+ | Some(previous) if isQueueItemEarlier(previous, current) => accum
63
+ | _ => Some(current)
64
+ }
65
+ } else {
66
+ accum
67
+ }
68
+ })
69
+ }
70
+
71
+ let popOrderedBatchItems = (
72
+ ~maxBatchSize,
73
+ ~fetchStates: ChainMap.t<FetchState.t>,
74
+ ~sizePerChain: dict<int>,
75
+ ) => {
76
+ let items = []
77
+
78
+ let rec loop = () =>
79
+ if items->Array.length < maxBatchSize {
80
+ switch fetchStates->getOrderedNextItem {
81
+ | Some({earliestEvent}) =>
82
+ switch earliestEvent {
83
+ | NoItem(_) => ()
84
+ | Item({item, popItemOffQueue}) => {
85
+ popItemOffQueue()
86
+ items->Js.Array2.push(item)->ignore
87
+ sizePerChain->Utils.Dict.incrementByInt(item.chain->ChainMap.Chain.toChainId)
88
+ loop()
89
+ }
90
+ }
91
+ | _ => ()
92
+ }
93
+ }
94
+ loop()
95
+
96
+ items
97
+ }
98
+
99
+ let popUnorderedBatchItems = (
100
+ ~maxBatchSize,
101
+ ~fetchStates: ChainMap.t<FetchState.t>,
102
+ ~sizePerChain: dict<int>,
103
+ ) => {
104
+ let items = []
105
+
106
+ let preparedFetchStates =
107
+ fetchStates
108
+ ->ChainMap.values
109
+ ->FetchState.filterAndSortForUnorderedBatch(~maxBatchSize)
110
+
111
+ let idx = ref(0)
112
+ let preparedNumber = preparedFetchStates->Array.length
113
+ let batchSize = ref(0)
114
+
115
+ // Accumulate items for all actively indexing chains
116
+ // the way to group as many items from a single chain as possible
117
+ // This way the loaders optimisations will hit more often
118
+ while batchSize.contents < maxBatchSize && idx.contents < preparedNumber {
119
+ let fetchState = preparedFetchStates->Js.Array2.unsafe_get(idx.contents)
120
+ let batchSizeBeforeTheChain = batchSize.contents
121
+
122
+ let rec loop = () =>
123
+ if batchSize.contents < maxBatchSize {
124
+ let earliestEvent = fetchState->FetchState.getEarliestEvent
125
+ switch earliestEvent {
126
+ | NoItem(_) => ()
127
+ | Item({item, popItemOffQueue}) => {
128
+ popItemOffQueue()
129
+ items->Js.Array2.push(item)->ignore
130
+ batchSize := batchSize.contents + 1
131
+ loop()
132
+ }
133
+ }
134
+ }
135
+ loop()
136
+
137
+ let chainBatchSize = batchSize.contents - batchSizeBeforeTheChain
138
+ if chainBatchSize > 0 {
139
+ sizePerChain->Utils.Dict.setByInt(fetchState.chainId, chainBatchSize)
140
+ }
141
+
142
+ idx := idx.contents + 1
143
+ }
144
+
145
+ items
146
+ }
@@ -0,0 +1,129 @@
1
+ // Generated by ReScript, PLEASE EDIT WITH CARE
2
+ 'use strict';
3
+
4
+ var Utils = require("./Utils.res.js");
5
+ var Caml_obj = require("rescript/lib/js/caml_obj.js");
6
+ var ChainMap = require("./ChainMap.res.js");
7
+ var Belt_Array = require("rescript/lib/js/belt_Array.js");
8
+ var EventUtils = require("./EventUtils.res.js");
9
+ var FetchState = require("./FetchState.res.js");
10
+
11
+ function getComparitorFromItem(queueItem) {
12
+ return EventUtils.getEventComparator({
13
+ timestamp: queueItem.timestamp,
14
+ chainId: queueItem.chain,
15
+ blockNumber: queueItem.blockNumber,
16
+ logIndex: queueItem.logIndex
17
+ });
18
+ }
19
+
20
+ function getQueueItemComparitor(earliestQueueItem, chain) {
21
+ if (earliestQueueItem.TAG === "Item") {
22
+ return getComparitorFromItem(earliestQueueItem._0.item);
23
+ }
24
+ var match = earliestQueueItem.latestFetchedBlock;
25
+ return [
26
+ match.blockTimestamp,
27
+ chain,
28
+ match.blockNumber,
29
+ 0
30
+ ];
31
+ }
32
+
33
+ function isQueueItemEarlier(a, b) {
34
+ return Caml_obj.lessthan(getQueueItemComparitor(a.earliestEvent, a.chain), getQueueItemComparitor(b.earliestEvent, b.chain));
35
+ }
36
+
37
+ function getOrderedNextItem(fetchStates) {
38
+ return Belt_Array.reduce(ChainMap.entries(fetchStates), undefined, (function (accum, param) {
39
+ var fetchState = param[1];
40
+ if (!FetchState.isActivelyIndexing(fetchState)) {
41
+ return accum;
42
+ }
43
+ var earliestEvent = FetchState.getEarliestEvent(fetchState);
44
+ var current_chain = param[0];
45
+ var current = {
46
+ chain: current_chain,
47
+ earliestEvent: earliestEvent
48
+ };
49
+ if (accum !== undefined && isQueueItemEarlier(accum, current)) {
50
+ return accum;
51
+ } else {
52
+ return current;
53
+ }
54
+ }));
55
+ }
56
+
57
+ function popOrderedBatchItems(maxBatchSize, fetchStates, sizePerChain) {
58
+ var items = [];
59
+ var loop = function () {
60
+ while(true) {
61
+ if (items.length >= maxBatchSize) {
62
+ return ;
63
+ }
64
+ var match = getOrderedNextItem(fetchStates);
65
+ if (match === undefined) {
66
+ return ;
67
+ }
68
+ var earliestEvent = match.earliestEvent;
69
+ if (earliestEvent.TAG !== "Item") {
70
+ return ;
71
+ }
72
+ var match$1 = earliestEvent._0;
73
+ var item = match$1.item;
74
+ match$1.popItemOffQueue();
75
+ items.push(item);
76
+ Utils.Dict.incrementByInt(sizePerChain, item.chain);
77
+ continue ;
78
+ };
79
+ };
80
+ loop();
81
+ return items;
82
+ }
83
+
84
+ function popUnorderedBatchItems(maxBatchSize, fetchStates, sizePerChain) {
85
+ var items = [];
86
+ var preparedFetchStates = FetchState.filterAndSortForUnorderedBatch(ChainMap.values(fetchStates), maxBatchSize);
87
+ var idx = 0;
88
+ var preparedNumber = preparedFetchStates.length;
89
+ var batchSize = {
90
+ contents: 0
91
+ };
92
+ while(batchSize.contents < maxBatchSize && idx < preparedNumber) {
93
+ var fetchState = preparedFetchStates[idx];
94
+ var batchSizeBeforeTheChain = batchSize.contents;
95
+ var loop = (function(fetchState){
96
+ return function loop() {
97
+ while(true) {
98
+ if (batchSize.contents >= maxBatchSize) {
99
+ return ;
100
+ }
101
+ var earliestEvent = FetchState.getEarliestEvent(fetchState);
102
+ if (earliestEvent.TAG !== "Item") {
103
+ return ;
104
+ }
105
+ var match = earliestEvent._0;
106
+ match.popItemOffQueue();
107
+ items.push(match.item);
108
+ batchSize.contents = batchSize.contents + 1 | 0;
109
+ continue ;
110
+ };
111
+ }
112
+ }(fetchState));
113
+ loop();
114
+ var chainBatchSize = batchSize.contents - batchSizeBeforeTheChain | 0;
115
+ if (chainBatchSize > 0) {
116
+ sizePerChain[fetchState.chainId] = chainBatchSize;
117
+ }
118
+ idx = idx + 1 | 0;
119
+ };
120
+ return items;
121
+ }
122
+
123
+ exports.getComparitorFromItem = getComparitorFromItem;
124
+ exports.getQueueItemComparitor = getQueueItemComparitor;
125
+ exports.isQueueItemEarlier = isQueueItemEarlier;
126
+ exports.getOrderedNextItem = getOrderedNextItem;
127
+ exports.popOrderedBatchItems = popOrderedBatchItems;
128
+ exports.popUnorderedBatchItems = popUnorderedBatchItems;
129
+ /* Utils Not a pure module */
@@ -1271,3 +1271,18 @@ let filterAndSortForUnorderedBatch = {
1271
1271
  })
1272
1272
  }
1273
1273
  }
1274
+
1275
+ let getProgressBlockNumber = ({latestFullyFetchedBlock, queue}: t) => {
1276
+ switch queue->Utils.Array.last {
1277
+ | Some(item) if latestFullyFetchedBlock.blockNumber >= item.blockNumber => item.blockNumber - 1
1278
+ | _ => latestFullyFetchedBlock.blockNumber
1279
+ }
1280
+ }
1281
+
1282
+ let getProgressNextBlockLogIndex = ({queue, latestFullyFetchedBlock}: t) => {
1283
+ switch queue->Utils.Array.last {
1284
+ | Some(item) if latestFullyFetchedBlock.blockNumber >= item.blockNumber && item.logIndex > 0 =>
1285
+ Some(item.logIndex - 1)
1286
+ | _ => None
1287
+ }
1288
+ }
@@ -960,6 +960,24 @@ function filterAndSortForUnorderedBatch(fetchStates, maxBatchSize) {
960
960
  });
961
961
  }
962
962
 
963
+ function getProgressBlockNumber(param) {
964
+ var latestFullyFetchedBlock = param.latestFullyFetchedBlock;
965
+ var item = Utils.$$Array.last(param.queue);
966
+ if (item !== undefined && latestFullyFetchedBlock.blockNumber >= item.blockNumber) {
967
+ return item.blockNumber - 1 | 0;
968
+ } else {
969
+ return latestFullyFetchedBlock.blockNumber;
970
+ }
971
+ }
972
+
973
+ function getProgressNextBlockLogIndex(param) {
974
+ var item = Utils.$$Array.last(param.queue);
975
+ if (item !== undefined && param.latestFullyFetchedBlock.blockNumber >= item.blockNumber && item.logIndex > 0) {
976
+ return item.logIndex - 1 | 0;
977
+ }
978
+
979
+ }
980
+
963
981
  exports.copy = copy;
964
982
  exports.mergeIntoPartition = mergeIntoPartition;
965
983
  exports.checkIsWithinSyncRange = checkIsWithinSyncRange;
@@ -990,4 +1008,6 @@ exports.rollback = rollback;
990
1008
  exports.isActivelyIndexing = isActivelyIndexing;
991
1009
  exports.isReadyToEnterReorgThreshold = isReadyToEnterReorgThreshold;
992
1010
  exports.filterAndSortForUnorderedBatch = filterAndSortForUnorderedBatch;
1011
+ exports.getProgressBlockNumber = getProgressBlockNumber;
1012
+ exports.getProgressNextBlockLogIndex = getProgressNextBlockLogIndex;
993
1013
  /* Utils Not a pure module */
package/src/Hasura.res CHANGED
@@ -227,38 +227,22 @@ let trackDatabase = async (
227
227
  ~responseLimit,
228
228
  ~schema,
229
229
  ) => {
230
- let trackOnlyInternalTableNames = [
231
- InternalTable.Chains.table.tableName,
232
- InternalTable.EventSyncState.table.tableName,
233
- InternalTable.PersistedState.table.tableName,
234
- InternalTable.EndOfBlockRangeScannedData.table.tableName,
235
- InternalTable.DynamicContractRegistry.table.tableName,
236
- ]
237
230
  let exposedInternalTableNames = [
238
231
  InternalTable.RawEvents.table.tableName,
239
232
  InternalTable.Views.metaViewName,
240
233
  InternalTable.Views.chainMetadataViewName,
241
234
  ]
242
235
  let userTableNames = userEntities->Js.Array2.map(entity => entity.table.tableName)
236
+ let tableNames = [exposedInternalTableNames, userTableNames]->Belt.Array.concatMany
243
237
 
244
238
  Logging.info("Tracking tables in Hasura")
245
239
 
246
240
  let _ = await clearHasuraMetadata(~endpoint, ~auth)
247
241
 
248
- await trackTables(
249
- ~endpoint,
250
- ~auth,
251
- ~pgSchema,
252
- ~tableNames=[
253
- exposedInternalTableNames,
254
- trackOnlyInternalTableNames,
255
- userTableNames,
256
- ]->Belt.Array.concatMany,
257
- )
242
+ await trackTables(~endpoint, ~auth, ~pgSchema, ~tableNames)
258
243
 
259
244
  let _ =
260
- await [exposedInternalTableNames, userTableNames]
261
- ->Belt.Array.concatMany
245
+ await tableNames
262
246
  ->Js.Array2.map(tableName =>
263
247
  createSelectPermissions(
264
248
  ~endpoint,
package/src/Hasura.res.js CHANGED
@@ -205,13 +205,6 @@ async function createEntityRelationship(pgSchema, endpoint, auth, tableName, rel
205
205
  }
206
206
 
207
207
  async function trackDatabase(endpoint, auth, pgSchema, userEntities, aggregateEntities, responseLimit, schema) {
208
- var trackOnlyInternalTableNames = [
209
- InternalTable.Chains.table.tableName,
210
- InternalTable.EventSyncState.table.tableName,
211
- InternalTable.PersistedState.table.tableName,
212
- InternalTable.EndOfBlockRangeScannedData.table.tableName,
213
- InternalTable.DynamicContractRegistry.table.tableName
214
- ];
215
208
  var exposedInternalTableNames = [
216
209
  InternalTable.RawEvents.table.tableName,
217
210
  InternalTable.Views.metaViewName,
@@ -220,17 +213,14 @@ async function trackDatabase(endpoint, auth, pgSchema, userEntities, aggregateEn
220
213
  var userTableNames = userEntities.map(function (entity) {
221
214
  return entity.table.tableName;
222
215
  });
216
+ var tableNames = Belt_Array.concatMany([
217
+ exposedInternalTableNames,
218
+ userTableNames
219
+ ]);
223
220
  Logging.info("Tracking tables in Hasura");
224
221
  await clearHasuraMetadata(endpoint, auth);
225
- await trackTables(endpoint, auth, pgSchema, Belt_Array.concatMany([
226
- exposedInternalTableNames,
227
- trackOnlyInternalTableNames,
228
- userTableNames
229
- ]));
230
- await Promise.all(Caml_splice_call.spliceObjApply(Belt_Array.concatMany([
231
- exposedInternalTableNames,
232
- userTableNames
233
- ]).map(function (tableName) {
222
+ await trackTables(endpoint, auth, pgSchema, tableNames);
223
+ await Promise.all(Caml_splice_call.spliceObjApply(tableNames.map(function (tableName) {
234
224
  return createSelectPermissions(auth, endpoint, tableName, pgSchema, responseLimit, aggregateEntities);
235
225
  }), "concat", [userEntities.map(function (entityConfig) {
236
226
  var match = entityConfig.table;
@@ -18,3 +18,13 @@ type chain = {
18
18
  contracts: array<contract>,
19
19
  sources: array<Source.t>,
20
20
  }
21
+
22
+ type sourceSync = {
23
+ initialBlockInterval: int,
24
+ backoffMultiplicative: float,
25
+ accelerationAdditive: int,
26
+ intervalCeiling: int,
27
+ backoffMillis: int,
28
+ queryTimeoutMillis: int,
29
+ fallbackStallTimeout: int,
30
+ }
@@ -32,7 +32,7 @@ type storage = {
32
32
  ~entities: array<Internal.entityConfig>=?,
33
33
  ~enums: array<Internal.enumConfig<Internal.enum>>=?,
34
34
  ) => promise<initialState>,
35
- loadInitialState: unit => promise<initialState>,
35
+ resumeInitialState: unit => promise<initialState>,
36
36
  @raises("StorageError")
37
37
  loadByIdsOrThrow: 'item. (
38
38
  ~ids: array<string>,
@@ -123,14 +123,12 @@ let init = {
123
123
  persistence.storageStatus = Initializing(promise)
124
124
  if reset || !(await persistence.storage.isInitialized()) {
125
125
  Logging.info(`Initializing the indexer storage...`)
126
-
127
126
  let initialState = await persistence.storage.initialize(
128
127
  ~entities=persistence.allEntities,
129
128
  ~enums=persistence.allEnums,
130
129
  ~chainConfigs,
131
130
  )
132
-
133
- Logging.info(`The indexer storage is ready. Uploading cache...`)
131
+ Logging.info(`The indexer storage is ready. Starting indexing!`)
134
132
  persistence.storageStatus = Ready(initialState)
135
133
  } else if (
136
134
  // In case of a race condition,
@@ -140,8 +138,29 @@ let init = {
140
138
  | _ => false
141
139
  }
142
140
  ) {
143
- Logging.info(`The indexer storage is ready.`)
144
- persistence.storageStatus = Ready(await persistence.storage.loadInitialState())
141
+ Logging.info(`Found existing indexer storage. Resuming indexing state...`)
142
+ let initialState = await persistence.storage.resumeInitialState()
143
+ persistence.storageStatus = Ready(initialState)
144
+ let checkpoints = Js.Dict.empty()
145
+ initialState.chains->Js.Array2.forEach(c => {
146
+ let checkpoint = switch c.progressNextBlockLogIndex {
147
+ | Value(
148
+ logIndex,
149
+ ) => // Latest processed log index (not necessarily processed by the indexer)
150
+ {
151
+ "blockNumber": c.progressBlockNumber + 1,
152
+ "logIndex": logIndex,
153
+ }
154
+ | Null =>
155
+ // Or simply the latest processed block number (might be -1 if not set)
156
+ c.progressBlockNumber->Utils.magic
157
+ }
158
+ checkpoints->Utils.Dict.setByInt(c.id, checkpoint)
159
+ })
160
+ Logging.info({
161
+ "msg": `Successfully resumed indexing state! Continuing from the last checkpoint.`,
162
+ "checkpoints": checkpoints,
163
+ })
145
164
  }
146
165
  resolveRef.contents()
147
166
  }
@@ -66,7 +66,7 @@ async function init(persistence, chainConfigs, resetOpt) {
66
66
  if (reset || !await persistence.storage.isInitialized()) {
67
67
  Logging.info("Initializing the indexer storage...");
68
68
  var initialState = await persistence.storage.initialize(chainConfigs, persistence.allEntities, persistence.allEnums);
69
- Logging.info("The indexer storage is ready. Uploading cache...");
69
+ Logging.info("The indexer storage is ready. Starting indexing!");
70
70
  persistence.storageStatus = {
71
71
  TAG: "Ready",
72
72
  _0: initialState
@@ -76,11 +76,26 @@ async function init(persistence, chainConfigs, resetOpt) {
76
76
  var tmp;
77
77
  tmp = typeof match !== "object" || match.TAG !== "Initializing" ? false : true;
78
78
  if (tmp) {
79
- Logging.info("The indexer storage is ready.");
79
+ Logging.info("Found existing indexer storage. Resuming indexing state...");
80
+ var initialState$1 = await persistence.storage.resumeInitialState();
80
81
  persistence.storageStatus = {
81
82
  TAG: "Ready",
82
- _0: await persistence.storage.loadInitialState()
83
+ _0: initialState$1
83
84
  };
85
+ var checkpoints = {};
86
+ initialState$1.chains.forEach(function (c) {
87
+ var logIndex = c._progress_log_index;
88
+ var checkpoint;
89
+ checkpoint = logIndex === null ? c.progress_block : ({
90
+ blockNumber: c.progress_block + 1 | 0,
91
+ logIndex: logIndex
92
+ });
93
+ checkpoints[c.id] = checkpoint;
94
+ });
95
+ Logging.info({
96
+ msg: "Successfully resumed indexing state! Continuing from the last checkpoint.",
97
+ checkpoints: checkpoints
98
+ });
84
99
  }
85
100
 
86
101
  }
package/src/PgStorage.res CHANGED
@@ -63,7 +63,6 @@ let makeInitializeTransaction = (
63
63
  ~isEmptyPgSchema=false,
64
64
  ) => {
65
65
  let generalTables = [
66
- InternalTable.EventSyncState.table,
67
66
  InternalTable.Chains.table,
68
67
  InternalTable.PersistedState.table,
69
68
  InternalTable.EndOfBlockRangeScannedData.table,
@@ -566,10 +565,10 @@ let make = (
566
565
  ])
567
566
 
568
567
  let isInitialized = async () => {
569
- let envioTables =
570
- await sql->Postgres.unsafe(
571
- `SELECT table_schema FROM information_schema.tables WHERE table_schema = '${pgSchema}' AND table_name = '${InternalTable.EventSyncState.table.tableName}' OR table_name = '${InternalTable.Chains.table.tableName}';`,
572
- )
568
+ let envioTables = await sql->Postgres.unsafe(
569
+ `SELECT table_schema FROM information_schema.tables WHERE table_schema = '${pgSchema}' AND table_name = '${// This is for indexer before envio@2.28
570
+ "event_sync_state"}' OR table_name = '${InternalTable.Chains.table.tableName}';`,
571
+ )
573
572
  envioTables->Utils.Array.notEmpty
574
573
  }
575
574
 
@@ -671,7 +670,9 @@ let make = (
671
670
  // This means that the schema is used for something else than envio.
672
671
  !(
673
672
  schemaTableNames->Js.Array2.some(table =>
674
- table.tableName === InternalTable.EventSyncState.table.tableName
673
+ table.tableName === InternalTable.Chains.table.tableName ||
674
+ // Case for indexer before envio@2.28
675
+ table.tableName === "event_sync_state"
675
676
  )
676
677
  )
677
678
  ) {
@@ -893,7 +894,7 @@ let make = (
893
894
  }
894
895
  }
895
896
 
896
- let loadInitialState = async (): Persistence.initialState => {
897
+ let resumeInitialState = async (): Persistence.initialState => {
897
898
  let (cache, chains) = await Promise.all2((
898
899
  restoreEffectCache(~withUpload=false),
899
900
  sql
@@ -903,6 +904,13 @@ let make = (
903
904
  ->(Utils.magic: promise<array<unknown>> => promise<array<InternalTable.Chains.t>>),
904
905
  ))
905
906
 
907
+ if chains->Utils.Array.notEmpty {
908
+ let () =
909
+ await sql->Postgres.unsafe(
910
+ InternalTable.DynamicContractRegistry.makeCleanUpOnRestartQuery(~pgSchema, ~chains),
911
+ )
912
+ }
913
+
906
914
  {
907
915
  cleanRun: false,
908
916
  cache,
@@ -913,7 +921,7 @@ let make = (
913
921
  {
914
922
  isInitialized,
915
923
  initialize,
916
- loadInitialState,
924
+ resumeInitialState,
917
925
  loadByFieldOrThrow,
918
926
  loadByIdsOrThrow,
919
927
  setOrThrow,
@@ -75,7 +75,6 @@ function makeInitializeTransaction(pgSchema, pgUser, chainConfigsOpt, entitiesOp
75
75
  var enums = enumsOpt !== undefined ? enumsOpt : [];
76
76
  var isEmptyPgSchema = isEmptyPgSchemaOpt !== undefined ? isEmptyPgSchemaOpt : false;
77
77
  var generalTables = [
78
- InternalTable.EventSyncState.table,
79
78
  InternalTable.Chains.table,
80
79
  InternalTable.PersistedState.table,
81
80
  InternalTable.EndOfBlockRangeScannedData.table,
@@ -395,7 +394,7 @@ function make(sql, pgHost, pgSchema, pgPort, pgUser, pgDatabase, pgPassword, onI
395
394
  };
396
395
  var cacheDirPath = Path.resolve("..", ".envio", "cache");
397
396
  var isInitialized = async function () {
398
- var envioTables = await sql.unsafe("SELECT table_schema FROM information_schema.tables WHERE table_schema = '" + pgSchema + "' AND table_name = '" + InternalTable.EventSyncState.table.tableName + "' OR table_name = '" + InternalTable.Chains.table.tableName + "';");
397
+ var envioTables = await sql.unsafe("SELECT table_schema FROM information_schema.tables WHERE table_schema = '" + pgSchema + "' AND table_name = 'event_sync_state' OR table_name = '" + InternalTable.Chains.table.tableName + "';");
399
398
  return Utils.$$Array.notEmpty(envioTables);
400
399
  };
401
400
  var restoreEffectCache = async function (withUpload) {
@@ -482,7 +481,7 @@ function make(sql, pgHost, pgSchema, pgPort, pgUser, pgDatabase, pgPassword, onI
482
481
  var enums = enumsOpt !== undefined ? enumsOpt : [];
483
482
  var schemaTableNames = await sql.unsafe(makeSchemaTableNamesQuery(pgSchema));
484
483
  if (Utils.$$Array.notEmpty(schemaTableNames) && !schemaTableNames.some(function (table) {
485
- return table.table_name === InternalTable.EventSyncState.table.tableName;
484
+ return table.table_name === InternalTable.Chains.table.tableName ? true : table.table_name === "event_sync_state";
486
485
  })) {
487
486
  Js_exn.raiseError("Cannot run Envio migrations on PostgreSQL schema \"" + pgSchema + "\" because it contains non-Envio tables. Running migrations would delete all data in this schema.\n\nTo resolve this:\n1. If you want to use this schema, first backup any important data, then drop it with: \"pnpm envio local db-migrate down\"\n2. Or specify a different schema name by setting the \"ENVIO_PG_PUBLIC_SCHEMA\" environment variable\n3. Or manually drop the schema in your database if you're certain the data is not needed.");
488
487
  }
@@ -634,21 +633,25 @@ function make(sql, pgHost, pgSchema, pgPort, pgUser, pgDatabase, pgPassword, onI
634
633
  return Logging.errorWithExn(Utils.prettifyExn(exn$1), "Failed to dump cache.");
635
634
  }
636
635
  };
637
- var loadInitialState = async function () {
636
+ var resumeInitialState = async function () {
638
637
  var match = await Promise.all([
639
638
  restoreEffectCache(false),
640
639
  sql.unsafe(makeLoadAllQuery(pgSchema, InternalTable.Chains.table.tableName))
641
640
  ]);
641
+ var chains = match[1];
642
+ if (Utils.$$Array.notEmpty(chains)) {
643
+ await sql.unsafe(InternalTable.DynamicContractRegistry.makeCleanUpOnRestartQuery(pgSchema, chains));
644
+ }
642
645
  return {
643
646
  cleanRun: false,
644
647
  cache: match[0],
645
- chains: match[1]
648
+ chains: chains
646
649
  };
647
650
  };
648
651
  return {
649
652
  isInitialized: isInitialized,
650
653
  initialize: initialize,
651
- loadInitialState: loadInitialState,
654
+ resumeInitialState: resumeInitialState,
652
655
  loadByIdsOrThrow: loadByIdsOrThrow,
653
656
  loadByFieldOrThrow: loadByFieldOrThrow,
654
657
  setOrThrow: setOrThrow$1,
package/src/Throttler.res CHANGED
@@ -30,7 +30,10 @@ let rec startInternal = async (throttler: t) => {
30
30
  switch await fn() {
31
31
  | exception exn =>
32
32
  throttler.logger->Pino.errorExn(
33
- Pino.createPinoMessageWithError("Scheduled action failed in throttler", exn),
33
+ Pino.createPinoMessageWithError(
34
+ "Scheduled action failed in throttler",
35
+ exn->Utils.prettifyExn,
36
+ ),
34
37
  )
35
38
  | _ => ()
36
39
  }