envio 2.31.0-alpha.3 → 2.31.1-rc.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "envio",
3
- "version": "v2.31.0-alpha.3",
3
+ "version": "v2.31.1-rc.0",
4
4
  "description": "A latency and sync speed optimized, developer friendly blockchain data indexer.",
5
5
  "bin": "./bin.js",
6
6
  "main": "./index.js",
@@ -25,10 +25,10 @@
25
25
  },
26
26
  "homepage": "https://envio.dev",
27
27
  "optionalDependencies": {
28
- "envio-linux-x64": "v2.31.0-alpha.3",
29
- "envio-linux-arm64": "v2.31.0-alpha.3",
30
- "envio-darwin-x64": "v2.31.0-alpha.3",
31
- "envio-darwin-arm64": "v2.31.0-alpha.3"
28
+ "envio-linux-x64": "v2.31.1-rc.0",
29
+ "envio-linux-arm64": "v2.31.1-rc.0",
30
+ "envio-darwin-x64": "v2.31.1-rc.0",
31
+ "envio-darwin-arm64": "v2.31.1-rc.0"
32
32
  },
33
33
  "dependencies": {
34
34
  "@envio-dev/hypersync-client": "0.6.6",
package/src/Envio.res CHANGED
@@ -59,7 +59,6 @@ let experimental_createEffect = (
59
59
  options: effectOptions<'input, 'output>,
60
60
  handler: effectArgs<'input> => promise<'output>,
61
61
  ) => {
62
- Prometheus.EffectCallsCount.set(~callsCount=0, ~effectName=options.name)
63
62
  let outputSchema =
64
63
  S.schema(_ => options.output)->(Utils.magic: S.t<S.t<'output>> => S.t<Internal.effectOutput>)
65
64
  {
@@ -86,7 +85,7 @@ let experimental_createEffect = (
86
85
  })
87
86
  Some({
88
87
  table: Internal.makeCacheTable(~effectName=options.name),
89
- rowsSchema: S.array(itemSchema),
88
+ outputSchema,
90
89
  itemSchema,
91
90
  })
92
91
  | None
package/src/Envio.res.js CHANGED
@@ -2,11 +2,9 @@
2
2
  'use strict';
3
3
 
4
4
  var Internal = require("./Internal.res.js");
5
- var Prometheus = require("./Prometheus.res.js");
6
5
  var S$RescriptSchema = require("rescript-schema/src/S.res.js");
7
6
 
8
7
  function experimental_createEffect(options, handler) {
9
- Prometheus.EffectCallsCount.set(0, options.name);
10
8
  var outputSchema = S$RescriptSchema.schema(function (param) {
11
9
  return options.output;
12
10
  });
@@ -21,7 +19,7 @@ function experimental_createEffect(options, handler) {
21
19
  });
22
20
  tmp = {
23
21
  itemSchema: itemSchema,
24
- rowsSchema: S$RescriptSchema.array(itemSchema),
22
+ outputSchema: outputSchema,
25
23
  table: Internal.makeCacheTable(options.name)
26
24
  };
27
25
  } else {
@@ -351,8 +351,11 @@ let registerDynamicContracts = (
351
351
  switch item->Internal.getItemDcs {
352
352
  | None => ()
353
353
  | Some(dcs) =>
354
- for idx in 0 to dcs->Array.length - 1 {
355
- let dc = dcs->Js.Array2.unsafe_get(idx)
354
+ let idx = ref(0)
355
+ while idx.contents < dcs->Array.length {
356
+ let dc = dcs->Js.Array2.unsafe_get(idx.contents)
357
+
358
+ let shouldRemove = ref(false)
356
359
 
357
360
  switch fetchState.contractConfigs->Utils.Dict.dangerouslyGetNonOption(dc.contractName) {
358
361
  | Some({filterByAddresses}) =>
@@ -378,8 +381,7 @@ let registerDynamicContracts = (
378
381
  )
379
382
  logger->Logging.childWarn(`Skipping contract registration: Contract address is already registered at a later block number. Currently registration of the same contract address is not supported by Envio. Reach out to us if it's a problem for you.`)
380
383
  }
381
- // Remove the DC from item to prevent it from saving to the db
382
- let _ = dcs->Js.Array2.removeCountInPlace(~count=1, ~pos=idx)
384
+ shouldRemove := true
383
385
  | None =>
384
386
  let shouldUpdate = switch registeringContracts->Utils.Dict.dangerouslyGetNonOption(
385
387
  dc.address->Address.toString,
@@ -401,8 +403,7 @@ let registerDynamicContracts = (
401
403
  Pervasives.min(earliestRegisteringEventBlockNumber.contents, dc.startBlock)
402
404
  registeringContracts->Js.Dict.set(dc.address->Address.toString, dc)
403
405
  } else {
404
- // Remove the DC from item to prevent it from saving to the db
405
- let _ = dcs->Js.Array2.removeCountInPlace(~count=1, ~pos=idx)
406
+ shouldRemove := true
406
407
  }
407
408
  }
408
409
  | None => {
@@ -414,9 +415,17 @@ let registerDynamicContracts = (
414
415
  },
415
416
  )
416
417
  logger->Logging.childWarn(`Skipping contract registration: Contract doesn't have any events to fetch.`)
417
- let _ = dcs->Js.Array2.removeCountInPlace(~count=1, ~pos=idx)
418
+ shouldRemove := true
418
419
  }
419
420
  }
421
+
422
+ if shouldRemove.contents {
423
+ // Remove the DC from item to prevent it from saving to the db
424
+ let _ = dcs->Js.Array2.removeCountInPlace(~count=1, ~pos=idx.contents)
425
+ // Don't increment idx - next element shifted into current position
426
+ } else {
427
+ idx := idx.contents + 1
428
+ }
420
429
  }
421
430
  }
422
431
  }
@@ -237,8 +237,10 @@ function registerDynamicContracts(fetchState, items) {
237
237
  var item = items[itemIdx];
238
238
  var dcs = item.dcs;
239
239
  if (dcs !== undefined) {
240
- for(var idx = 0 ,idx_finish = dcs.length; idx < idx_finish; ++idx){
240
+ var idx = 0;
241
+ while(idx < dcs.length) {
241
242
  var dc = dcs[idx];
243
+ var shouldRemove = false;
242
244
  var match = fetchState.contractConfigs[dc.contractName];
243
245
  if (match !== undefined) {
244
246
  var existingContract = indexingContracts[dc.address];
@@ -254,7 +256,7 @@ function registerDynamicContracts(fetchState, items) {
254
256
  });
255
257
  Logging.childWarn(logger, "Skipping contract registration: Contract address is already registered at a later block number. Currently registration of the same contract address is not supported by Envio. Reach out to us if it's a problem for you.");
256
258
  }
257
- dcs.splice(idx, 1);
259
+ shouldRemove = true;
258
260
  } else {
259
261
  var registeringContract = registeringContracts[dc.address];
260
262
  var shouldUpdate;
@@ -274,7 +276,7 @@ function registerDynamicContracts(fetchState, items) {
274
276
  earliestRegisteringEventBlockNumber = earliestRegisteringEventBlockNumber < dc.startBlock ? earliestRegisteringEventBlockNumber : dc.startBlock;
275
277
  registeringContracts[dc.address] = dc;
276
278
  } else {
277
- dcs.splice(idx, 1);
279
+ shouldRemove = true;
278
280
  }
279
281
  }
280
282
  } else {
@@ -284,9 +286,14 @@ function registerDynamicContracts(fetchState, items) {
284
286
  contractName: dc.contractName
285
287
  });
286
288
  Logging.childWarn(logger$1, "Skipping contract registration: Contract doesn't have any events to fetch.");
289
+ shouldRemove = true;
290
+ }
291
+ if (shouldRemove) {
287
292
  dcs.splice(idx, 1);
293
+ } else {
294
+ idx = idx + 1 | 0;
288
295
  }
289
- }
296
+ };
290
297
  }
291
298
 
292
299
  }
@@ -331,7 +338,7 @@ function registerDynamicContracts(fetchState, items) {
331
338
  addressesByContractName: pendingAddressesByContractName.contents
332
339
  });
333
340
  };
334
- for(var idx$1 = 0 ,idx_finish$1 = Object.keys(addressesByContractName).length; idx$1 < idx_finish$1; ++idx$1){
341
+ for(var idx$1 = 0 ,idx_finish = Object.keys(addressesByContractName).length; idx$1 < idx_finish; ++idx$1){
335
342
  var contractName = Object.keys(addressesByContractName)[idx$1];
336
343
  var addresses = addressesByContractName[contractName];
337
344
  var contractConfig = fetchState.contractConfigs[contractName];
package/src/Internal.res CHANGED
@@ -290,7 +290,7 @@ type effectArgs = {
290
290
  type effectCacheItem = {id: string, output: effectOutput}
291
291
  type effectCacheMeta = {
292
292
  itemSchema: S.t<effectCacheItem>,
293
- rowsSchema: S.t<array<effectCacheItem>>,
293
+ outputSchema: S.t<effectOutput>,
294
294
  table: Table.table,
295
295
  }
296
296
  type effect = {
@@ -302,14 +302,17 @@ type effect = {
302
302
  mutable callsCount: int,
303
303
  }
304
304
  let cacheTablePrefix = "envio_effect_"
305
+ let cacheOutputSchema = S.json(~validate=false)->(Utils.magic: S.t<Js.Json.t> => S.t<effectOutput>)
306
+ let effectCacheItemRowsSchema = S.array(
307
+ S.schema(s => {id: s.matches(S.string), output: s.matches(cacheOutputSchema)}),
308
+ )
305
309
  let makeCacheTable = (~effectName) => {
306
310
  Table.mkTable(
307
311
  cacheTablePrefix ++ effectName,
308
312
  ~fields=[
309
313
  Table.mkField("id", Text, ~fieldSchema=S.string, ~isPrimaryKey=true),
310
- Table.mkField("output", JsonB, ~fieldSchema=S.json(~validate=false), ~isNullable=true),
314
+ Table.mkField("output", JsonB, ~fieldSchema=cacheOutputSchema, ~isNullable=true),
311
315
  ],
312
- ~compositeIndices=[],
313
316
  )
314
317
  }
315
318
 
@@ -36,10 +36,19 @@ function makeEnumConfig(name, variants) {
36
36
 
37
37
  var cacheTablePrefix = "envio_effect_";
38
38
 
39
+ var cacheOutputSchema = S$RescriptSchema.json(false);
40
+
41
+ var effectCacheItemRowsSchema = S$RescriptSchema.array(S$RescriptSchema.schema(function (s) {
42
+ return {
43
+ id: s.m(S$RescriptSchema.string),
44
+ output: s.m(cacheOutputSchema)
45
+ };
46
+ }));
47
+
39
48
  function makeCacheTable(effectName) {
40
- return Table.mkTable(cacheTablePrefix + effectName, [], [
49
+ return Table.mkTable(cacheTablePrefix + effectName, undefined, [
41
50
  Table.mkField("id", "TEXT", S$RescriptSchema.string, undefined, undefined, undefined, true, undefined, undefined),
42
- Table.mkField("output", "JSONB", S$RescriptSchema.json(false), undefined, undefined, true, undefined, undefined, undefined)
51
+ Table.mkField("output", "JSONB", cacheOutputSchema, undefined, undefined, true, undefined, undefined, undefined)
43
52
  ]);
44
53
  }
45
54
 
@@ -47,5 +56,7 @@ exports.fuelSupplyParamsSchema = fuelSupplyParamsSchema;
47
56
  exports.fuelTransferParamsSchema = fuelTransferParamsSchema;
48
57
  exports.makeEnumConfig = makeEnumConfig;
49
58
  exports.cacheTablePrefix = cacheTablePrefix;
59
+ exports.cacheOutputSchema = cacheOutputSchema;
60
+ exports.effectCacheItemRowsSchema = effectCacheItemRowsSchema;
50
61
  exports.makeCacheTable = makeCacheTable;
51
62
  /* fuelSupplyParamsSchema Not a pure module */
@@ -192,7 +192,12 @@ let getInitializedState = persistence => {
192
192
  }
193
193
  }
194
194
 
195
- let setEffectCacheOrThrow = async (persistence, ~effect: Internal.effect, ~items) => {
195
+ let setEffectCacheOrThrow = async (
196
+ persistence,
197
+ ~effect: Internal.effect,
198
+ ~items,
199
+ ~invalidationsCount,
200
+ ) => {
196
201
  switch persistence.storageStatus {
197
202
  | Unknown
198
203
  | Initializing(_) =>
@@ -210,7 +215,8 @@ let setEffectCacheOrThrow = async (persistence, ~effect: Internal.effect, ~items
210
215
  }
211
216
  let initialize = effectCacheRecord.count === 0
212
217
  await storage.setEffectCacheOrThrow(~effect, ~items, ~initialize)
213
- effectCacheRecord.count = effectCacheRecord.count + items->Js.Array2.length
218
+ effectCacheRecord.count =
219
+ effectCacheRecord.count + items->Js.Array2.length - invalidationsCount
214
220
  Prometheus.EffectCacheCount.set(~count=effectCacheRecord.count, ~effectName)
215
221
  }
216
222
  }
@@ -119,7 +119,7 @@ function getInitializedState(persistence) {
119
119
  }
120
120
  }
121
121
 
122
- async function setEffectCacheOrThrow(persistence, effect, items) {
122
+ async function setEffectCacheOrThrow(persistence, effect, items, invalidationsCount) {
123
123
  var match = persistence.storageStatus;
124
124
  if (typeof match !== "object") {
125
125
  return Js_exn.raiseError("Failed to access the indexer storage. The Persistence layer is not initialized.");
@@ -144,7 +144,7 @@ async function setEffectCacheOrThrow(persistence, effect, items) {
144
144
  }
145
145
  var initialize = effectCacheRecord.count === 0;
146
146
  await storage.setEffectCacheOrThrow(effect, items, initialize);
147
- effectCacheRecord.count = effectCacheRecord.count + items.length | 0;
147
+ effectCacheRecord.count = (effectCacheRecord.count + items.length | 0) - invalidationsCount | 0;
148
148
  return Prometheus.EffectCacheCount.set(effectCacheRecord.count, effectName);
149
149
  }
150
150
 
package/src/PgStorage.res CHANGED
@@ -22,7 +22,7 @@ let makeCreateTableIndicesQuery = (table: Table.table, ~pgSchema) => {
22
22
  compositeIndices->Array.map(createCompositeIndex)->Js.Array2.joinWith("\n")
23
23
  }
24
24
 
25
- let makeCreateTableQuery = (table: Table.table, ~pgSchema) => {
25
+ let makeCreateTableQuery = (table: Table.table, ~pgSchema, ~isNumericArrayAsText) => {
26
26
  open Belt
27
27
  let fieldsMapped =
28
28
  table
@@ -34,6 +34,8 @@ let makeCreateTableQuery = (table: Table.table, ~pgSchema) => {
34
34
  {
35
35
  `"${fieldName}" ${switch fieldType {
36
36
  | Custom(name) if !(name->Js.String2.startsWith("NUMERIC(")) => `"${pgSchema}".${name}`
37
+ // Workaround for Hasura bug https://github.com/enviodev/hyperindex/issues/788
38
+ | Numeric if isArray && isNumericArrayAsText => (Table.Text :> string)
37
39
  | _ => (fieldType :> string)
38
40
  }}${isArray ? "[]" : ""}${switch defaultValue {
39
41
  | Some(defaultValue) => ` DEFAULT ${defaultValue}`
@@ -57,6 +59,7 @@ let makeCreateTableQuery = (table: Table.table, ~pgSchema) => {
57
59
  let makeInitializeTransaction = (
58
60
  ~pgSchema,
59
61
  ~pgUser,
62
+ ~isHasuraEnabled,
60
63
  ~chainConfigs=[],
61
64
  ~entities=[],
62
65
  ~enums=[],
@@ -105,7 +108,10 @@ GRANT ALL ON SCHEMA "${pgSchema}" TO public;`,
105
108
 
106
109
  // Batch all table creation first (optimal for PostgreSQL)
107
110
  allTables->Js.Array2.forEach((table: Table.table) => {
108
- query := query.contents ++ "\n" ++ makeCreateTableQuery(table, ~pgSchema)
111
+ query :=
112
+ query.contents ++
113
+ "\n" ++
114
+ makeCreateTableQuery(table, ~pgSchema, ~isNumericArrayAsText=isHasuraEnabled)
109
115
  })
110
116
 
111
117
  // Then batch all indices (better performance when tables exist)
@@ -263,7 +269,7 @@ let makeTableBatchSetQuery = (~pgSchema, ~table: Table.table, ~itemSchema: S.t<'
263
269
  // Currently history update table uses S.object with transformation for schema,
264
270
  // which is being lossed during conversion to dbSchema.
265
271
  // So use simple insert values for now.
266
- let isHistoryUpdate = table.tableName->Js.String2.startsWith("envio_history_")
272
+ let isHistoryUpdate = table.tableName->Js.String2.startsWith(EntityHistory.historyTablePrefix)
267
273
 
268
274
  // Should experiment how much it'll affect performance
269
275
  // Although, it should be fine not to perform the validation check,
@@ -329,7 +335,7 @@ let removeInvalidUtf8InPlace = entities =>
329
335
  // This is unsafe, but we rely that it'll use
330
336
  // the mutated reference on retry.
331
337
  // TODO: Test it properly after we start using
332
- // in-memory PGLite for indexer test framework.
338
+ // real pg for indexer test framework.
333
339
  dict->Js.Dict.set(
334
340
  key,
335
341
  value
@@ -507,6 +513,7 @@ let make = (
507
513
  ~pgUser,
508
514
  ~pgDatabase,
509
515
  ~pgPassword,
516
+ ~isHasuraEnabled,
510
517
  ~onInitialize=?,
511
518
  ~onNewTables=?,
512
519
  ): Persistence.storage => {
@@ -552,7 +559,7 @@ let make = (
552
559
  let table = Internal.makeCacheTable(~effectName)
553
560
 
554
561
  sql
555
- ->Postgres.unsafe(makeCreateTableQuery(table, ~pgSchema))
562
+ ->Postgres.unsafe(makeCreateTableQuery(table, ~pgSchema, ~isNumericArrayAsText=false))
556
563
  ->Promise.then(() => {
557
564
  let inputFile = NodeJs.Path.join(cacheDirPath, entry)->NodeJs.Path.toString
558
565
 
@@ -645,6 +652,7 @@ let make = (
645
652
  ~enums,
646
653
  ~chainConfigs,
647
654
  ~isEmptyPgSchema=schemaTableNames->Utils.Array.isEmpty,
655
+ ~isHasuraEnabled,
648
656
  )
649
657
  // Execute all queries within a single transaction for integrity
650
658
  let _ = await sql->Postgres.beginSql(sql => {
@@ -790,7 +798,10 @@ let make = (
790
798
  }
791
799
 
792
800
  if initialize {
793
- let _ = await sql->Postgres.unsafe(makeCreateTableQuery(table, ~pgSchema))
801
+ let _ =
802
+ await sql->Postgres.unsafe(
803
+ makeCreateTableQuery(table, ~pgSchema, ~isNumericArrayAsText=false),
804
+ )
794
805
  // Integration with other tools like Hasura
795
806
  switch onNewTables {
796
807
  | Some(onNewTables) => await onNewTables(~tableNames=[table.tableName])
@@ -15,6 +15,7 @@ var Internal = require("./Internal.res.js");
15
15
  var Belt_Array = require("rescript/lib/js/belt_Array.js");
16
16
  var Caml_option = require("rescript/lib/js/caml_option.js");
17
17
  var Persistence = require("./Persistence.res.js");
18
+ var EntityHistory = require("./db/EntityHistory.res.js");
18
19
  var InternalTable = require("./db/InternalTable.res.js");
19
20
  var Child_process = require("child_process");
20
21
  var Caml_exceptions = require("rescript/lib/js/caml_exceptions.js");
@@ -44,15 +45,20 @@ function makeCreateTableIndicesQuery(table, pgSchema) {
44
45
  return Belt_Array.map(singleIndices, createIndex).join("\n") + Belt_Array.map(compositeIndices, createCompositeIndex).join("\n");
45
46
  }
46
47
 
47
- function makeCreateTableQuery(table, pgSchema) {
48
+ function makeCreateTableQuery(table, pgSchema, isNumericArrayAsText) {
48
49
  var fieldsMapped = Belt_Array.map(Table.getFields(table), (function (field) {
49
50
  var defaultValue = field.defaultValue;
51
+ var isArray = field.isArray;
50
52
  var fieldType = field.fieldType;
51
53
  var fieldName = Table.getDbFieldName(field);
52
54
  var tmp;
53
- tmp = fieldType === "TIMESTAMP" || fieldType === "TIMESTAMP WITH TIME ZONE" || fieldType === "JSONB" || fieldType === "SERIAL" || fieldType === "TEXT" || fieldType === "DOUBLE PRECISION" || fieldType === "NUMERIC" || fieldType === "BOOLEAN" || fieldType === "BIGINT" || fieldType === "INTEGER" || fieldType === "TIMESTAMP WITH TIME ZONE NULL" || fieldType.startsWith("NUMERIC(") ? fieldType : "\"" + pgSchema + "\"." + fieldType;
55
+ tmp = fieldType === "TIMESTAMP" || fieldType === "TIMESTAMP WITH TIME ZONE" || fieldType === "JSONB" || fieldType === "SERIAL" || fieldType === "TEXT" || fieldType === "DOUBLE PRECISION" || fieldType === "NUMERIC" || fieldType === "BOOLEAN" || fieldType === "BIGINT" || fieldType === "INTEGER" || fieldType === "TIMESTAMP WITH TIME ZONE NULL" ? (
56
+ fieldType === "NUMERIC" && isArray && isNumericArrayAsText ? "TEXT" : fieldType
57
+ ) : (
58
+ fieldType.startsWith("NUMERIC(") ? fieldType : "\"" + pgSchema + "\"." + fieldType
59
+ );
54
60
  return "\"" + fieldName + "\" " + tmp + (
55
- field.isArray ? "[]" : ""
61
+ isArray ? "[]" : ""
56
62
  ) + (
57
63
  defaultValue !== undefined ? " DEFAULT " + defaultValue : (
58
64
  field.isNullable ? "" : " NOT NULL"
@@ -68,7 +74,7 @@ function makeCreateTableQuery(table, pgSchema) {
68
74
  ) + ");";
69
75
  }
70
76
 
71
- function makeInitializeTransaction(pgSchema, pgUser, chainConfigsOpt, entitiesOpt, enumsOpt, isEmptyPgSchemaOpt) {
77
+ function makeInitializeTransaction(pgSchema, pgUser, isHasuraEnabled, chainConfigsOpt, entitiesOpt, enumsOpt, isEmptyPgSchemaOpt) {
72
78
  var chainConfigs = chainConfigsOpt !== undefined ? chainConfigsOpt : [];
73
79
  var entities = entitiesOpt !== undefined ? entitiesOpt : [];
74
80
  var enums = enumsOpt !== undefined ? enumsOpt : [];
@@ -99,7 +105,7 @@ function makeInitializeTransaction(pgSchema, pgUser, chainConfigsOpt, entitiesOp
99
105
  query.contents = query.contents + "\n" + enumCreateQuery;
100
106
  });
101
107
  allTables.forEach(function (table) {
102
- query.contents = query.contents + "\n" + makeCreateTableQuery(table, pgSchema);
108
+ query.contents = query.contents + "\n" + makeCreateTableQuery(table, pgSchema, isHasuraEnabled);
103
109
  });
104
110
  allTables.forEach(function (table) {
105
111
  var indices = makeCreateTableIndicesQuery(table, pgSchema);
@@ -194,7 +200,7 @@ function makeInsertValuesSetQuery(pgSchema, table, itemSchema, itemsCount) {
194
200
  function makeTableBatchSetQuery(pgSchema, table, itemSchema) {
195
201
  var match = Table.toSqlParams(table, itemSchema, pgSchema);
196
202
  var isRawEvents = table.tableName === InternalTable.RawEvents.table.tableName;
197
- var isHistoryUpdate = table.tableName.startsWith("envio_history_");
203
+ var isHistoryUpdate = table.tableName.startsWith(EntityHistory.historyTablePrefix);
198
204
  if ((isRawEvents || !match.hasArrayField) && !isHistoryUpdate) {
199
205
  return {
200
206
  query: makeInsertUnnestSetQuery(pgSchema, table, itemSchema, isRawEvents),
@@ -354,7 +360,7 @@ async function getConnectedPsqlExec(pgUser, pgHost, pgDatabase, pgPort) {
354
360
  return result;
355
361
  }
356
362
 
357
- function make(sql, pgHost, pgSchema, pgPort, pgUser, pgDatabase, pgPassword, onInitialize, onNewTables) {
363
+ function make(sql, pgHost, pgSchema, pgPort, pgUser, pgDatabase, pgPassword, isHasuraEnabled, onInitialize, onNewTables) {
358
364
  var psqlExecOptions_env = Js_dict.fromArray([
359
365
  [
360
366
  "PGPASSWORD",
@@ -403,7 +409,7 @@ function make(sql, pgHost, pgSchema, pgPort, pgUser, pgDatabase, pgPassword, onI
403
409
  await Promise.all(cacheFiles.map(function (entry) {
404
410
  var effectName = entry.slice(0, -4);
405
411
  var table = Internal.makeCacheTable(effectName);
406
- return sql.unsafe(makeCreateTableQuery(table, pgSchema)).then(function () {
412
+ return sql.unsafe(makeCreateTableQuery(table, pgSchema, false)).then(function () {
407
413
  var inputFile = Path.join(cacheDirPath, entry);
408
414
  var command = psqlExec$1 + " -c 'COPY \"" + pgSchema + "\".\"" + table.tableName + "\" FROM STDIN WITH (FORMAT text, HEADER);' < " + inputFile;
409
415
  return new Promise((function (resolve, reject) {
@@ -461,7 +467,7 @@ function make(sql, pgHost, pgSchema, pgPort, pgUser, pgDatabase, pgPassword, onI
461
467
  })) {
462
468
  Js_exn.raiseError("Cannot run Envio migrations on PostgreSQL schema \"" + pgSchema + "\" because it contains non-Envio tables. Running migrations would delete all data in this schema.\n\nTo resolve this:\n1. If you want to use this schema, first backup any important data, then drop it with: \"pnpm envio local db-migrate down\"\n2. Or specify a different schema name by setting the \"ENVIO_PG_PUBLIC_SCHEMA\" environment variable\n3. Or manually drop the schema in your database if you're certain the data is not needed.");
463
469
  }
464
- var queries = makeInitializeTransaction(pgSchema, pgUser, chainConfigs, entities, enums, Utils.$$Array.isEmpty(schemaTableNames));
470
+ var queries = makeInitializeTransaction(pgSchema, pgUser, isHasuraEnabled, chainConfigs, entities, enums, Utils.$$Array.isEmpty(schemaTableNames));
465
471
  await sql.begin(function (sql) {
466
472
  return Promise.all(queries.map(function (query) {
467
473
  return sql.unsafe(query);
@@ -568,7 +574,7 @@ function make(sql, pgHost, pgSchema, pgPort, pgUser, pgDatabase, pgPassword, onI
568
574
  var match = cacheMeta !== undefined ? cacheMeta : Js_exn.raiseError("Failed to set effect cache for \"" + effect.name + "\". Effect has no cache enabled.");
569
575
  var table = match.table;
570
576
  if (initialize) {
571
- await sql.unsafe(makeCreateTableQuery(table, pgSchema));
577
+ await sql.unsafe(makeCreateTableQuery(table, pgSchema, false));
572
578
  if (onNewTables !== undefined) {
573
579
  await onNewTables([table.tableName]);
574
580
  }
@@ -525,30 +525,6 @@ module RollbackTargetBlockNumber = {
525
525
  }
526
526
  }
527
527
 
528
- module ProcessingBlockNumber = {
529
- let gauge = SafeGauge.makeOrThrow(
530
- ~name="envio_processing_block_number",
531
- ~help="The latest item block number included in the currently processing batch for the chain.",
532
- ~labelSchema=chainIdLabelsSchema,
533
- )
534
-
535
- let set = (~blockNumber, ~chainId) => {
536
- gauge->SafeGauge.handleInt(~labels=chainId, ~value=blockNumber)
537
- }
538
- }
539
-
540
- module ProcessingBatchSize = {
541
- let gauge = SafeGauge.makeOrThrow(
542
- ~name="envio_processing_batch_size",
543
- ~help="The number of items included in the currently processing batch for the chain.",
544
- ~labelSchema=chainIdLabelsSchema,
545
- )
546
-
547
- let set = (~batchSize, ~chainId) => {
548
- gauge->SafeGauge.handleInt(~labels=chainId, ~value=batchSize)
549
- }
550
- }
551
-
552
528
  module ProcessingMaxBatchSize = {
553
529
  let gauge = PromClient.Gauge.makeGauge({
554
530
  "name": "envio_processing_max_batch_size",
@@ -593,6 +569,17 @@ module ProgressEventsCount = {
593
569
  }
594
570
  }
595
571
 
572
+ module ProgressBatchCount = {
573
+ let counter = PromClient.Counter.makeCounter({
574
+ "name": "envio_progress_batches_count",
575
+ "help": "The number of batches processed and reflected in the database.",
576
+ })
577
+
578
+ let increment = () => {
579
+ counter->PromClient.Counter.inc
580
+ }
581
+ }
582
+
596
583
  let effectLabelsSchema = S.object(s => {
597
584
  s.field("effect", S.string)
598
585
  })
@@ -621,6 +608,18 @@ module EffectCacheCount = {
621
608
  }
622
609
  }
623
610
 
611
+ module EffectCacheInvalidationsCount = {
612
+ let counter = SafeCounter.makeOrThrow(
613
+ ~name="envio_effect_cache_invalidations_count",
614
+ ~help="The number of effect cache invalidations.",
615
+ ~labelSchema=effectLabelsSchema,
616
+ )
617
+
618
+ let increment = (~effectName) => {
619
+ counter->SafeCounter.increment(~labels=effectName)
620
+ }
621
+ }
622
+
624
623
  module StorageLoad = {
625
624
  let operationLabelsSchema = S.object(s => s.field("operation", S.string))
626
625
 
@@ -628,51 +628,29 @@ var RollbackTargetBlockNumber = {
628
628
  set: set$15
629
629
  };
630
630
 
631
- var gauge$17 = makeOrThrow$1("envio_processing_block_number", "The latest item block number included in the currently processing batch for the chain.", chainIdLabelsSchema);
632
-
633
- function set$16(blockNumber, chainId) {
634
- handleInt$1(gauge$17, chainId, blockNumber);
635
- }
636
-
637
- var ProcessingBlockNumber = {
638
- gauge: gauge$17,
639
- set: set$16
640
- };
641
-
642
- var gauge$18 = makeOrThrow$1("envio_processing_batch_size", "The number of items included in the currently processing batch for the chain.", chainIdLabelsSchema);
643
-
644
- function set$17(batchSize, chainId) {
645
- handleInt$1(gauge$18, chainId, batchSize);
646
- }
647
-
648
- var ProcessingBatchSize = {
649
- gauge: gauge$18,
650
- set: set$17
651
- };
652
-
653
- var gauge$19 = new PromClient.Gauge({
631
+ var gauge$17 = new PromClient.Gauge({
654
632
  name: "envio_processing_max_batch_size",
655
633
  help: "The maximum number of items to process in a single batch."
656
634
  });
657
635
 
658
- function set$18(maxBatchSize) {
659
- gauge$19.set(maxBatchSize);
636
+ function set$16(maxBatchSize) {
637
+ gauge$17.set(maxBatchSize);
660
638
  }
661
639
 
662
640
  var ProcessingMaxBatchSize = {
663
- gauge: gauge$19,
664
- set: set$18
641
+ gauge: gauge$17,
642
+ set: set$16
665
643
  };
666
644
 
667
- var gauge$20 = makeOrThrow$1("envio_progress_block_number", "The block number of the latest block processed and stored in the database.", chainIdLabelsSchema);
645
+ var gauge$18 = makeOrThrow$1("envio_progress_block_number", "The block number of the latest block processed and stored in the database.", chainIdLabelsSchema);
668
646
 
669
- function set$19(blockNumber, chainId) {
670
- handleInt$1(gauge$20, chainId, blockNumber);
647
+ function set$17(blockNumber, chainId) {
648
+ handleInt$1(gauge$18, chainId, blockNumber);
671
649
  }
672
650
 
673
651
  var ProgressBlockNumber = {
674
- gauge: gauge$20,
675
- set: set$19
652
+ gauge: gauge$18,
653
+ set: set$17
676
654
  };
677
655
 
678
656
  var deprecatedGauge$1 = new PromClient.Gauge({
@@ -681,45 +659,70 @@ var deprecatedGauge$1 = new PromClient.Gauge({
681
659
  labelNames: ["chainId"]
682
660
  });
683
661
 
684
- var gauge$21 = makeOrThrow$1("envio_progress_events_count", "The number of events processed and reflected in the database.", chainIdLabelsSchema);
662
+ var gauge$19 = makeOrThrow$1("envio_progress_events_count", "The number of events processed and reflected in the database.", chainIdLabelsSchema);
685
663
 
686
- function set$20(processedCount, chainId) {
664
+ function set$18(processedCount, chainId) {
687
665
  deprecatedGauge$1.labels({
688
666
  chainId: chainId
689
667
  }).set(processedCount);
690
- handleInt$1(gauge$21, chainId, processedCount);
668
+ handleInt$1(gauge$19, chainId, processedCount);
691
669
  }
692
670
 
693
671
  var ProgressEventsCount = {
694
672
  deprecatedGauge: deprecatedGauge$1,
695
- gauge: gauge$21,
696
- set: set$20
673
+ gauge: gauge$19,
674
+ set: set$18
675
+ };
676
+
677
+ var counter$5 = new PromClient.Counter({
678
+ name: "envio_progress_batches_count",
679
+ help: "The number of batches processed and reflected in the database."
680
+ });
681
+
682
+ function increment$5() {
683
+ counter$5.inc();
684
+ }
685
+
686
+ var ProgressBatchCount = {
687
+ counter: counter$5,
688
+ increment: increment$5
697
689
  };
698
690
 
699
691
  var effectLabelsSchema = S$RescriptSchema.object(function (s) {
700
692
  return s.f("effect", S$RescriptSchema.string);
701
693
  });
702
694
 
703
- var gauge$22 = makeOrThrow$1("envio_effect_calls_count", "The number of calls to the effect. Including both handler execution and cache hits.", effectLabelsSchema);
695
+ var gauge$20 = makeOrThrow$1("envio_effect_calls_count", "The number of calls to the effect. Including both handler execution and cache hits.", effectLabelsSchema);
704
696
 
705
- function set$21(callsCount, effectName) {
706
- handleInt$1(gauge$22, effectName, callsCount);
697
+ function set$19(callsCount, effectName) {
698
+ handleInt$1(gauge$20, effectName, callsCount);
707
699
  }
708
700
 
709
701
  var EffectCallsCount = {
710
- gauge: gauge$22,
711
- set: set$21
702
+ gauge: gauge$20,
703
+ set: set$19
712
704
  };
713
705
 
714
- var gauge$23 = makeOrThrow$1("envio_effect_cache_count", "The number of items in the effect cache.", effectLabelsSchema);
706
+ var gauge$21 = makeOrThrow$1("envio_effect_cache_count", "The number of items in the effect cache.", effectLabelsSchema);
715
707
 
716
- function set$22(count, effectName) {
717
- handleInt$1(gauge$23, effectName, count);
708
+ function set$20(count, effectName) {
709
+ handleInt$1(gauge$21, effectName, count);
718
710
  }
719
711
 
720
712
  var EffectCacheCount = {
721
- gauge: gauge$23,
722
- set: set$22
713
+ gauge: gauge$21,
714
+ set: set$20
715
+ };
716
+
717
+ var counter$6 = makeOrThrow("envio_effect_cache_invalidations_count", "The number of effect cache invalidations.", effectLabelsSchema);
718
+
719
+ function increment$6(effectName) {
720
+ increment(counter$6, effectName);
721
+ }
722
+
723
+ var EffectCacheInvalidationsCount = {
724
+ counter: counter$6,
725
+ increment: increment$6
723
726
  };
724
727
 
725
728
  var operationLabelsSchema = S$RescriptSchema.object(function (s) {
@@ -730,7 +733,7 @@ var timeCounter$2 = makeOrThrow("envio_storage_load_time", "Processing time take
730
733
 
731
734
  var totalTimeCounter = makeOrThrow("envio_storage_load_total_time", "Cumulative time spent loading data from storage during the indexing process. (milliseconds)", operationLabelsSchema);
732
735
 
733
- var counter$5 = makeOrThrow("envio_storage_load_count", "Cumulative number of successful storage load operations during the indexing process.", operationLabelsSchema);
736
+ var counter$7 = makeOrThrow("envio_storage_load_count", "Cumulative number of successful storage load operations during the indexing process.", operationLabelsSchema);
734
737
 
735
738
  var whereSizeCounter = makeOrThrow("envio_storage_load_where_size", "Cumulative number of filter conditions ('where' items) used in storage load operations during the indexing process.", operationLabelsSchema);
736
739
 
@@ -759,7 +762,7 @@ function endOperation(timerRef, operation, whereSize, size) {
759
762
  Utils.Dict.deleteInPlace(operations, operation);
760
763
  }
761
764
  handleInt(totalTimeCounter, operation, Hrtime.intFromMillis(Hrtime.toMillis(Hrtime.timeSince(timerRef))));
762
- increment(counter$5, operation);
765
+ increment(counter$7, operation);
763
766
  handleInt(whereSizeCounter, operation, whereSize);
764
767
  handleInt(sizeCounter, operation, size);
765
768
  }
@@ -768,7 +771,7 @@ var StorageLoad = {
768
771
  operationLabelsSchema: operationLabelsSchema,
769
772
  timeCounter: timeCounter$2,
770
773
  totalTimeCounter: totalTimeCounter,
771
- counter: counter$5,
774
+ counter: counter$7,
772
775
  whereSizeCounter: whereSizeCounter,
773
776
  sizeCounter: sizeCounter,
774
777
  operations: operations,
@@ -817,13 +820,13 @@ exports.RollbackEnabled = RollbackEnabled;
817
820
  exports.RollbackSuccess = RollbackSuccess;
818
821
  exports.RollbackHistoryPrune = RollbackHistoryPrune;
819
822
  exports.RollbackTargetBlockNumber = RollbackTargetBlockNumber;
820
- exports.ProcessingBlockNumber = ProcessingBlockNumber;
821
- exports.ProcessingBatchSize = ProcessingBatchSize;
822
823
  exports.ProcessingMaxBatchSize = ProcessingMaxBatchSize;
823
824
  exports.ProgressBlockNumber = ProgressBlockNumber;
824
825
  exports.ProgressEventsCount = ProgressEventsCount;
826
+ exports.ProgressBatchCount = ProgressBatchCount;
825
827
  exports.effectLabelsSchema = effectLabelsSchema;
826
828
  exports.EffectCallsCount = EffectCallsCount;
827
829
  exports.EffectCacheCount = EffectCacheCount;
830
+ exports.EffectCacheInvalidationsCount = EffectCacheInvalidationsCount;
828
831
  exports.StorageLoad = StorageLoad;
829
832
  /* loadEntitiesDurationCounter Not a pure module */
@@ -37,8 +37,6 @@ let reorgDetectedToLogParams = (reorgDetected: reorgDetected, ~shouldRollbackOnR
37
37
  }
38
38
 
39
39
  type reorgResult = NoReorg | ReorgDetected(reorgDetected)
40
- type validBlockError = NotFound | AlreadyReorgedHashes
41
- type validBlockResult = result<blockDataWithTimestamp, validBlockError>
42
40
 
43
41
  type t = {
44
42
  // Whether to rollback on reorg
@@ -186,14 +184,13 @@ let registerReorgGuard = (
186
184
  }
187
185
 
188
186
  /**
189
- Returns the latest block data which matches block number and hashes in the provided array
190
- If it doesn't exist in the reorg threshold it returns None or the latest scanned block outside of the reorg threshold
187
+ Returns the latest block number which matches block number and hashes in the provided array
188
+ If it doesn't exist in the reorg threshold it returns NotFound
191
189
  */
192
190
  let getLatestValidScannedBlock = (
193
191
  self: t,
194
192
  ~blockNumbersAndHashes: array<blockDataWithTimestamp>,
195
193
  ~currentBlockHeight,
196
- ~skipReorgDuplicationCheck=false,
197
194
  ) => {
198
195
  let verifiedDataByBlockNumber = Js.Dict.empty()
199
196
  for idx in 0 to blockNumbersAndHashes->Array.length - 1 {
@@ -201,68 +198,38 @@ let getLatestValidScannedBlock = (
201
198
  verifiedDataByBlockNumber->Js.Dict.set(blockData.blockNumber->Int.toString, blockData)
202
199
  }
203
200
 
204
- /*
205
- Let's say we indexed block X with hash A.
206
- The next query we got the block X with hash B.
207
- We assume that the hash A is reorged since we received it earlier than B.
208
- So when we try to detect the reorg depth, we consider hash A as already invalid,
209
- and retry the block hashes query if we receive one. (since it could come from a different instance and cause a double reorg)
210
- But the assumption that A is reorged might be wrong sometimes,
211
- for example if we got B from instance which didn't handle a reorg A.
212
- Theoretically, it's possible with high partition concurrency.
213
- So to handle this and prevent entering an infinite loop,
214
- we can skip the reorg duplication check if we're sure that the block hashes query
215
- is not coming from a different instance. (let's say we tried several times)
216
- */
217
- let isAlreadyReorgedResponse = skipReorgDuplicationCheck
218
- ? false
219
- : switch self.detectedReorgBlock {
220
- | Some(detectedReorgBlock) =>
221
- switch verifiedDataByBlockNumber->Utils.Dict.dangerouslyGetNonOption(
222
- detectedReorgBlock.blockNumber->Int.toString,
223
- ) {
224
- | Some(verifiedBlockData) => verifiedBlockData.blockHash === detectedReorgBlock.blockHash
225
- | None => false
226
- }
227
- | None => false
228
- }
201
+ let dataByBlockNumber = self->getDataByBlockNumberCopyInThreshold(~currentBlockHeight)
202
+ // Js engine automatically orders numeric object keys
203
+ let ascBlockNumberKeys = dataByBlockNumber->Js.Dict.keys
229
204
 
230
- if isAlreadyReorgedResponse {
231
- Error(AlreadyReorgedHashes)
232
- } else {
233
- let dataByBlockNumber = self->getDataByBlockNumberCopyInThreshold(~currentBlockHeight)
234
- // Js engine automatically orders numeric object keys
235
- let ascBlockNumberKeys = dataByBlockNumber->Js.Dict.keys
236
-
237
- let getPrevScannedBlock = idx =>
238
- switch ascBlockNumberKeys
239
- ->Belt.Array.get(idx - 1)
240
- ->Option.flatMap(key => {
241
- // We should already validate that the block number is verified at the point
242
- verifiedDataByBlockNumber->Utils.Dict.dangerouslyGetNonOption(key)
243
- }) {
244
- | Some(data) => Ok(data)
245
- | None => Error(NotFound)
205
+ let getPrevScannedBlockNumber = idx =>
206
+ ascBlockNumberKeys
207
+ ->Belt.Array.get(idx - 1)
208
+ ->Option.flatMap(key => {
209
+ // We should already validate that the block number is verified at the point
210
+ switch verifiedDataByBlockNumber->Utils.Dict.dangerouslyGetNonOption(key) {
211
+ | Some(v) => Some(v.blockNumber)
212
+ | None => None
246
213
  }
214
+ })
247
215
 
248
- let rec loop = idx => {
249
- switch ascBlockNumberKeys->Belt.Array.get(idx) {
250
- | Some(blockNumberKey) =>
251
- let scannedBlock = dataByBlockNumber->Js.Dict.unsafeGet(blockNumberKey)
252
- switch verifiedDataByBlockNumber->Utils.Dict.dangerouslyGetNonOption(blockNumberKey) {
253
- | None =>
254
- Js.Exn.raiseError(
255
- `Unexpected case. Couldn't find verified hash for block number ${blockNumberKey}`,
256
- )
257
- | Some(verifiedBlockData) if verifiedBlockData.blockHash === scannedBlock.blockHash =>
258
- loop(idx + 1)
259
- | Some(_) => getPrevScannedBlock(idx)
260
- }
261
- | None => getPrevScannedBlock(idx)
216
+ let rec loop = idx => {
217
+ switch ascBlockNumberKeys->Belt.Array.get(idx) {
218
+ | Some(blockNumberKey) =>
219
+ let scannedBlock = dataByBlockNumber->Js.Dict.unsafeGet(blockNumberKey)
220
+ switch verifiedDataByBlockNumber->Utils.Dict.dangerouslyGetNonOption(blockNumberKey) {
221
+ | None =>
222
+ Js.Exn.raiseError(
223
+ `Unexpected case. Couldn't find verified hash for block number ${blockNumberKey}`,
224
+ )
225
+ | Some(verifiedBlockData) if verifiedBlockData.blockHash === scannedBlock.blockHash =>
226
+ loop(idx + 1)
227
+ | Some(_) => getPrevScannedBlockNumber(idx)
262
228
  }
229
+ | None => getPrevScannedBlockNumber(idx)
263
230
  }
264
- loop(0)
265
231
  }
232
+ loop(0)
266
233
  }
267
234
 
268
235
  /**
@@ -303,11 +270,22 @@ let rollbackToValidBlockNumber = (
303
270
  }
304
271
  }
305
272
 
306
- let getThresholdBlockNumbers = (self: t, ~currentBlockHeight) => {
307
- let dataByBlockNumberCopyInThreshold =
308
- self->getDataByBlockNumberCopyInThreshold(~currentBlockHeight)
273
+ let getThresholdBlockNumbersBelowBlock = (self: t, ~blockNumber: int, ~currentBlockHeight) => {
274
+ let arr = []
275
+
276
+ // Js engine automatically orders numeric object keys
277
+ let ascBlockNumberKeys = self.dataByBlockNumber->Js.Dict.keys
278
+ let thresholdBlockNumber = currentBlockHeight - self.maxReorgDepth
309
279
 
310
- dataByBlockNumberCopyInThreshold->Js.Dict.values->Js.Array2.map(v => v.blockNumber)
280
+ for idx in 0 to ascBlockNumberKeys->Array.length - 1 {
281
+ let blockNumberKey = ascBlockNumberKeys->Js.Array2.unsafe_get(idx)
282
+ let scannedBlock = self.dataByBlockNumber->Js.Dict.unsafeGet(blockNumberKey)
283
+ let isInReorgThreshold = scannedBlock.blockNumber >= thresholdBlockNumber
284
+ if isInReorgThreshold && scannedBlock.blockNumber < blockNumber {
285
+ arr->Array.push(scannedBlock.blockNumber)
286
+ }
287
+ }
288
+ arr
311
289
  }
312
290
 
313
291
  let getHashByBlockNumber = (reorgDetection: t, ~blockNumber) => {
@@ -2,7 +2,6 @@
2
2
  'use strict';
3
3
 
4
4
  var Js_exn = require("rescript/lib/js/js_exn.js");
5
- var Js_dict = require("rescript/lib/js/js_dict.js");
6
5
  var Belt_Array = require("rescript/lib/js/belt_Array.js");
7
6
  var Belt_Option = require("rescript/lib/js/belt_Option.js");
8
7
 
@@ -109,63 +108,37 @@ function registerReorgGuard(self, reorgGuard, currentBlockHeight) {
109
108
  }
110
109
  }
111
110
 
112
- function getLatestValidScannedBlock(self, blockNumbersAndHashes, currentBlockHeight, skipReorgDuplicationCheckOpt) {
113
- var skipReorgDuplicationCheck = skipReorgDuplicationCheckOpt !== undefined ? skipReorgDuplicationCheckOpt : false;
111
+ function getLatestValidScannedBlock(self, blockNumbersAndHashes, currentBlockHeight) {
114
112
  var verifiedDataByBlockNumber = {};
115
113
  for(var idx = 0 ,idx_finish = blockNumbersAndHashes.length; idx < idx_finish; ++idx){
116
114
  var blockData = blockNumbersAndHashes[idx];
117
115
  verifiedDataByBlockNumber[String(blockData.blockNumber)] = blockData;
118
116
  }
119
- var isAlreadyReorgedResponse;
120
- if (skipReorgDuplicationCheck) {
121
- isAlreadyReorgedResponse = false;
122
- } else {
123
- var detectedReorgBlock = self.detectedReorgBlock;
124
- if (detectedReorgBlock !== undefined) {
125
- var verifiedBlockData = verifiedDataByBlockNumber[String(detectedReorgBlock.blockNumber)];
126
- isAlreadyReorgedResponse = verifiedBlockData !== undefined ? verifiedBlockData.blockHash === detectedReorgBlock.blockHash : false;
127
- } else {
128
- isAlreadyReorgedResponse = false;
129
- }
130
- }
131
- if (isAlreadyReorgedResponse) {
132
- return {
133
- TAG: "Error",
134
- _0: "AlreadyReorgedHashes"
135
- };
136
- }
137
117
  var dataByBlockNumber = getDataByBlockNumberCopyInThreshold(self, currentBlockHeight);
138
118
  var ascBlockNumberKeys = Object.keys(dataByBlockNumber);
139
- var getPrevScannedBlock = function (idx) {
140
- var data = Belt_Option.flatMap(Belt_Array.get(ascBlockNumberKeys, idx - 1 | 0), (function (key) {
141
- return verifiedDataByBlockNumber[key];
142
- }));
143
- if (data !== undefined) {
144
- return {
145
- TAG: "Ok",
146
- _0: data
147
- };
148
- } else {
149
- return {
150
- TAG: "Error",
151
- _0: "NotFound"
152
- };
153
- }
119
+ var getPrevScannedBlockNumber = function (idx) {
120
+ return Belt_Option.flatMap(Belt_Array.get(ascBlockNumberKeys, idx - 1 | 0), (function (key) {
121
+ var v = verifiedDataByBlockNumber[key];
122
+ if (v !== undefined) {
123
+ return v.blockNumber;
124
+ }
125
+
126
+ }));
154
127
  };
155
128
  var _idx = 0;
156
129
  while(true) {
157
130
  var idx$1 = _idx;
158
131
  var blockNumberKey = Belt_Array.get(ascBlockNumberKeys, idx$1);
159
132
  if (blockNumberKey === undefined) {
160
- return getPrevScannedBlock(idx$1);
133
+ return getPrevScannedBlockNumber(idx$1);
161
134
  }
162
135
  var scannedBlock = dataByBlockNumber[blockNumberKey];
163
- var verifiedBlockData$1 = verifiedDataByBlockNumber[blockNumberKey];
164
- if (verifiedBlockData$1 === undefined) {
136
+ var verifiedBlockData = verifiedDataByBlockNumber[blockNumberKey];
137
+ if (verifiedBlockData === undefined) {
165
138
  return Js_exn.raiseError("Unexpected case. Couldn't find verified hash for block number " + blockNumberKey);
166
139
  }
167
- if (verifiedBlockData$1.blockHash !== scannedBlock.blockHash) {
168
- return getPrevScannedBlock(idx$1);
140
+ if (verifiedBlockData.blockHash !== scannedBlock.blockHash) {
141
+ return getPrevScannedBlockNumber(idx$1);
169
142
  }
170
143
  _idx = idx$1 + 1 | 0;
171
144
  continue ;
@@ -202,11 +175,20 @@ function rollbackToValidBlockNumber(param, blockNumber) {
202
175
  };
203
176
  }
204
177
 
205
- function getThresholdBlockNumbers(self, currentBlockHeight) {
206
- var dataByBlockNumberCopyInThreshold = getDataByBlockNumberCopyInThreshold(self, currentBlockHeight);
207
- return Js_dict.values(dataByBlockNumberCopyInThreshold).map(function (v) {
208
- return v.blockNumber;
209
- });
178
+ function getThresholdBlockNumbersBelowBlock(self, blockNumber, currentBlockHeight) {
179
+ var arr = [];
180
+ var ascBlockNumberKeys = Object.keys(self.dataByBlockNumber);
181
+ var thresholdBlockNumber = currentBlockHeight - self.maxReorgDepth | 0;
182
+ for(var idx = 0 ,idx_finish = ascBlockNumberKeys.length; idx < idx_finish; ++idx){
183
+ var blockNumberKey = ascBlockNumberKeys[idx];
184
+ var scannedBlock = self.dataByBlockNumber[blockNumberKey];
185
+ var isInReorgThreshold = scannedBlock.blockNumber >= thresholdBlockNumber;
186
+ if (isInReorgThreshold && scannedBlock.blockNumber < blockNumber) {
187
+ arr.push(scannedBlock.blockNumber);
188
+ }
189
+
190
+ }
191
+ return arr;
210
192
  }
211
193
 
212
194
  function getHashByBlockNumber(reorgDetection, blockNumber) {
@@ -224,6 +206,6 @@ exports.getDataByBlockNumberCopyInThreshold = getDataByBlockNumberCopyInThreshol
224
206
  exports.registerReorgGuard = registerReorgGuard;
225
207
  exports.getLatestValidScannedBlock = getLatestValidScannedBlock;
226
208
  exports.rollbackToValidBlockNumber = rollbackToValidBlockNumber;
227
- exports.getThresholdBlockNumbers = getThresholdBlockNumbers;
209
+ exports.getThresholdBlockNumbersBelowBlock = getThresholdBlockNumbersBelowBlock;
228
210
  exports.getHashByBlockNumber = getHashByBlockNumber;
229
211
  /* No side effect */
@@ -36,32 +36,31 @@ let make = (
36
36
  let getSafeCheckpointId = (safeCheckpointTracking: t, ~sourceBlockNumber: int) => {
37
37
  let safeBlockNumber = sourceBlockNumber - safeCheckpointTracking.maxReorgDepth
38
38
 
39
- if safeCheckpointTracking.checkpointBlockNumbers->Belt.Array.getUnsafe(0) > safeBlockNumber {
40
- 0
41
- } else {
42
- let trackingCheckpointsCount = safeCheckpointTracking.checkpointBlockNumbers->Array.length
43
- switch trackingCheckpointsCount {
44
- | 1 => safeCheckpointTracking.checkpointIds->Belt.Array.getUnsafe(0)
45
- | _ => {
46
- let result = ref(None)
47
- let idx = ref(1)
39
+ switch safeCheckpointTracking.checkpointIds {
40
+ | [] => 0
41
+ | _
42
+ if safeCheckpointTracking.checkpointBlockNumbers->Belt.Array.getUnsafe(0) > safeBlockNumber => 0
43
+ | [checkpointId] => checkpointId
44
+ | _ => {
45
+ let trackingCheckpointsCount = safeCheckpointTracking.checkpointIds->Array.length
46
+ let result = ref(None)
47
+ let idx = ref(1)
48
48
 
49
- while idx.contents < trackingCheckpointsCount && result.contents === None {
50
- if (
51
- safeCheckpointTracking.checkpointBlockNumbers->Belt.Array.getUnsafe(idx.contents) >
52
- safeBlockNumber
53
- ) {
54
- result :=
55
- Some(safeCheckpointTracking.checkpointIds->Belt.Array.getUnsafe(idx.contents - 1))
56
- }
57
- idx := idx.contents + 1
49
+ while idx.contents < trackingCheckpointsCount && result.contents === None {
50
+ if (
51
+ safeCheckpointTracking.checkpointBlockNumbers->Belt.Array.getUnsafe(idx.contents) >
52
+ safeBlockNumber
53
+ ) {
54
+ result :=
55
+ Some(safeCheckpointTracking.checkpointIds->Belt.Array.getUnsafe(idx.contents - 1))
58
56
  }
57
+ idx := idx.contents + 1
58
+ }
59
59
 
60
- switch result.contents {
61
- | Some(checkpointId) => checkpointId
62
- | None =>
63
- safeCheckpointTracking.checkpointIds->Belt.Array.getUnsafe(trackingCheckpointsCount - 1)
64
- }
60
+ switch result.contents {
61
+ | Some(checkpointId) => checkpointId
62
+ | None =>
63
+ safeCheckpointTracking.checkpointIds->Belt.Array.getUnsafe(trackingCheckpointsCount - 1)
65
64
  }
66
65
  }
67
66
  }
@@ -21,13 +21,17 @@ function make(maxReorgDepth, shouldRollbackOnReorg, chainReorgCheckpoints) {
21
21
 
22
22
  function getSafeCheckpointId(safeCheckpointTracking, sourceBlockNumber) {
23
23
  var safeBlockNumber = sourceBlockNumber - safeCheckpointTracking.maxReorgDepth | 0;
24
+ var match = safeCheckpointTracking.checkpointIds;
25
+ if (match.length === 0) {
26
+ return 0;
27
+ }
24
28
  if (safeCheckpointTracking.checkpointBlockNumbers[0] > safeBlockNumber) {
25
29
  return 0;
26
30
  }
27
- var trackingCheckpointsCount = safeCheckpointTracking.checkpointBlockNumbers.length;
28
- if (trackingCheckpointsCount === 1) {
29
- return safeCheckpointTracking.checkpointIds[0];
31
+ if (match.length === 1) {
32
+ return match[0];
30
33
  }
34
+ var trackingCheckpointsCount = safeCheckpointTracking.checkpointIds.length;
31
35
  var result;
32
36
  var idx = 1;
33
37
  while(idx < trackingCheckpointsCount && result === undefined) {
@@ -43,8 +43,9 @@ type t<'entity> = {
43
43
  }
44
44
 
45
45
  let maxPgTableNameLength = 63
46
+ let historyTablePrefix = "envio_history_"
46
47
  let historyTableName = (~entityName, ~entityIndex) => {
47
- let fullName = "envio_history_" ++ entityName
48
+ let fullName = historyTablePrefix ++ entityName
48
49
  if fullName->String.length > maxPgTableNameLength {
49
50
  let entityIndexStr = entityIndex->Belt.Int.toString
50
51
  fullName->Js.String.slice(~from=0, ~to_=maxPgTableNameLength - entityIndexStr->String.length) ++
@@ -86,8 +87,6 @@ let fromTable = (table: table, ~schema: S.t<'entity>, ~entityIndex): t<'entity>
86
87
  ~isPrimaryKey=true,
87
88
  )
88
89
 
89
- // let dataFieldNames = dataFields->Belt.Array.map(field => field->getFieldName)
90
-
91
90
  let entityTableName = table.tableName
92
91
  let historyTableName = historyTableName(~entityName=entityTableName, ~entityIndex)
93
92
  //ignore composite indices
@@ -39,8 +39,10 @@ function makeSetUpdateSchema(entitySchema) {
39
39
  });
40
40
  }
41
41
 
42
+ var historyTablePrefix = "envio_history_";
43
+
42
44
  function historyTableName(entityName, entityIndex) {
43
- var fullName = "envio_history_" + entityName;
45
+ var fullName = historyTablePrefix + entityName;
44
46
  if (fullName.length <= 63) {
45
47
  return fullName;
46
48
  }
@@ -181,6 +183,7 @@ exports.changeFieldName = changeFieldName;
181
183
  exports.checkpointIdFieldName = checkpointIdFieldName;
182
184
  exports.makeSetUpdateSchema = makeSetUpdateSchema;
183
185
  exports.maxPgTableNameLength = maxPgTableNameLength;
186
+ exports.historyTablePrefix = historyTablePrefix;
184
187
  exports.historyTableName = historyTableName;
185
188
  exports.fromTable = fromTable;
186
189
  exports.makePruneStaleEntityHistoryQuery = makePruneStaleEntityHistoryQuery;
@@ -522,35 +522,38 @@ let make = (
522
522
  },
523
523
  )
524
524
 
525
- let blockLoader = LazyLoader.make(
526
- ~loaderFn=blockNumber =>
527
- getKnownBlockWithBackoff(
528
- ~provider,
529
- ~sourceName=name,
530
- ~chain,
531
- ~backoffMsOnFailure=1000,
532
- ~blockNumber,
533
- ~lowercaseAddresses,
534
- ),
535
- ~onError=(am, ~exn) => {
536
- Logging.error({
537
- "err": exn->Utils.prettifyExn,
538
- "msg": `EE1100: Top level promise timeout reached. Please review other errors or warnings in the code. This function will retry in ${(am._retryDelayMillis / 1000)
539
- ->Belt.Int.toString} seconds. It is highly likely that your indexer isn't syncing on one or more chains currently. Also take a look at the "suggestedFix" in the metadata of this command`,
540
- "source": name,
541
- "chainId": chain->ChainMap.Chain.toChainId,
542
- "metadata": {
543
- {
544
- "asyncTaskName": "blockLoader: fetching block data - `getBlock` rpc call",
545
- "suggestedFix": "This likely means the RPC url you are using is not responding correctly. Please try another RPC endipoint.",
546
- }
547
- },
548
- })
549
- },
550
- )
525
+ let makeBlockLoader = () =>
526
+ LazyLoader.make(
527
+ ~loaderFn=blockNumber =>
528
+ getKnownBlockWithBackoff(
529
+ ~provider,
530
+ ~sourceName=name,
531
+ ~chain,
532
+ ~backoffMsOnFailure=1000,
533
+ ~blockNumber,
534
+ ~lowercaseAddresses,
535
+ ),
536
+ ~onError=(am, ~exn) => {
537
+ Logging.error({
538
+ "err": exn->Utils.prettifyExn,
539
+ "msg": `EE1100: Top level promise timeout reached. Please review other errors or warnings in the code. This function will retry in ${(am._retryDelayMillis / 1000)
540
+ ->Belt.Int.toString} seconds. It is highly likely that your indexer isn't syncing on one or more chains currently. Also take a look at the "suggestedFix" in the metadata of this command`,
541
+ "source": name,
542
+ "chainId": chain->ChainMap.Chain.toChainId,
543
+ "metadata": {
544
+ {
545
+ "asyncTaskName": "blockLoader: fetching block data - `getBlock` rpc call",
546
+ "suggestedFix": "This likely means the RPC url you are using is not responding correctly. Please try another RPC endipoint.",
547
+ }
548
+ },
549
+ })
550
+ },
551
+ )
552
+
553
+ let blockLoader = ref(makeBlockLoader())
551
554
 
552
555
  let getEventBlockOrThrow = makeThrowingGetEventBlock(~getBlock=blockNumber =>
553
- blockLoader->LazyLoader.get(blockNumber)
556
+ blockLoader.contents->LazyLoader.get(blockNumber)
554
557
  )
555
558
  let getEventTransactionOrThrow = makeThrowingGetEventTransaction(
556
559
  ~getTransactionFields=Ethers.JsonRpcProvider.makeGetTransactionFields(
@@ -625,7 +628,7 @@ let make = (
625
628
 
626
629
  let firstBlockParentPromise =
627
630
  fromBlock > 0
628
- ? blockLoader->LazyLoader.get(fromBlock - 1)->Promise.thenResolve(res => res->Some)
631
+ ? blockLoader.contents->LazyLoader.get(fromBlock - 1)->Promise.thenResolve(res => res->Some)
629
632
  : Promise.resolve(None)
630
633
 
631
634
  let {getLogSelectionOrThrow} = getSelectionConfig(selection)
@@ -636,7 +639,7 @@ let make = (
636
639
  ~toBlock=suggestedToBlock,
637
640
  ~addresses,
638
641
  ~topicQuery,
639
- ~loadBlock=blockNumber => blockLoader->LazyLoader.get(blockNumber),
642
+ ~loadBlock=blockNumber => blockLoader.contents->LazyLoader.get(blockNumber),
640
643
  ~syncConfig,
641
644
  ~provider,
642
645
  ~mutSuggestedBlockIntervals,
@@ -873,8 +876,13 @@ let make = (
873
876
  }
874
877
 
875
878
  let getBlockHashes = (~blockNumbers, ~logger as _currentlyUnusedLogger) => {
879
+ // Clear cache by creating a fresh LazyLoader
880
+ // This is important, since we call this
881
+ // function when a reorg is detected
882
+ blockLoader := makeBlockLoader()
883
+
876
884
  blockNumbers
877
- ->Array.map(blockNum => blockLoader->LazyLoader.get(blockNum))
885
+ ->Array.map(blockNum => blockLoader.contents->LazyLoader.get(blockNum))
878
886
  ->Promise.all
879
887
  ->Promise.thenResolve(blocks => {
880
888
  blocks
@@ -533,22 +533,27 @@ function make(param) {
533
533
  }
534
534
  });
535
535
  }), undefined, undefined, undefined, undefined);
536
- var blockLoader = LazyLoader.make((function (blockNumber) {
537
- return getKnownBlockWithBackoff(provider, name, chain, blockNumber, 1000, lowercaseAddresses);
538
- }), (function (am, exn) {
539
- Logging.error({
540
- err: Utils.prettifyExn(exn),
541
- msg: "EE1100: Top level promise timeout reached. Please review other errors or warnings in the code. This function will retry in " + String(am._retryDelayMillis / 1000 | 0) + " seconds. It is highly likely that your indexer isn't syncing on one or more chains currently. Also take a look at the \"suggestedFix\" in the metadata of this command",
542
- source: name,
543
- chainId: chain,
544
- metadata: {
545
- asyncTaskName: "blockLoader: fetching block data - `getBlock` rpc call",
546
- suggestedFix: "This likely means the RPC url you are using is not responding correctly. Please try another RPC endipoint."
547
- }
548
- });
549
- }), undefined, undefined, undefined, undefined);
536
+ var makeBlockLoader = function () {
537
+ return LazyLoader.make((function (blockNumber) {
538
+ return getKnownBlockWithBackoff(provider, name, chain, blockNumber, 1000, lowercaseAddresses);
539
+ }), (function (am, exn) {
540
+ Logging.error({
541
+ err: Utils.prettifyExn(exn),
542
+ msg: "EE1100: Top level promise timeout reached. Please review other errors or warnings in the code. This function will retry in " + String(am._retryDelayMillis / 1000 | 0) + " seconds. It is highly likely that your indexer isn't syncing on one or more chains currently. Also take a look at the \"suggestedFix\" in the metadata of this command",
543
+ source: name,
544
+ chainId: chain,
545
+ metadata: {
546
+ asyncTaskName: "blockLoader: fetching block data - `getBlock` rpc call",
547
+ suggestedFix: "This likely means the RPC url you are using is not responding correctly. Please try another RPC endipoint."
548
+ }
549
+ });
550
+ }), undefined, undefined, undefined, undefined);
551
+ };
552
+ var blockLoader = {
553
+ contents: makeBlockLoader()
554
+ };
550
555
  var getEventBlockOrThrow = makeThrowingGetEventBlock(function (blockNumber) {
551
- return LazyLoader.get(blockLoader, blockNumber);
556
+ return LazyLoader.get(blockLoader.contents, blockNumber);
552
557
  });
553
558
  var getEventTransactionOrThrow = makeThrowingGetEventTransaction(Ethers.JsonRpcProvider.makeGetTransactionFields((function (__x) {
554
559
  return LazyLoader.get(transactionLoader, __x);
@@ -601,13 +606,13 @@ function make(param) {
601
606
  var suggestedBlockInterval = maxSuggestedBlockInterval !== undefined ? maxSuggestedBlockInterval : Belt_Option.getWithDefault(mutSuggestedBlockIntervals[partitionId], syncConfig.initialBlockInterval);
602
607
  var toBlock$1 = toBlock !== undefined && toBlock < currentBlockHeight ? toBlock : currentBlockHeight;
603
608
  var suggestedToBlock = Caml.int_max(Caml.int_min((fromBlock + suggestedBlockInterval | 0) - 1 | 0, toBlock$1), fromBlock);
604
- var firstBlockParentPromise = fromBlock > 0 ? LazyLoader.get(blockLoader, fromBlock - 1 | 0).then(function (res) {
609
+ var firstBlockParentPromise = fromBlock > 0 ? LazyLoader.get(blockLoader.contents, fromBlock - 1 | 0).then(function (res) {
605
610
  return res;
606
611
  }) : Promise.resolve(undefined);
607
612
  var match = getSelectionConfig(selection);
608
613
  var match$1 = match.getLogSelectionOrThrow(addressesByContractName);
609
614
  var match$2 = await getNextPage(fromBlock, suggestedToBlock, match$1.addresses, match$1.topicQuery, (function (blockNumber) {
610
- return LazyLoader.get(blockLoader, blockNumber);
615
+ return LazyLoader.get(blockLoader.contents, blockNumber);
611
616
  }), syncConfig, provider, mutSuggestedBlockIntervals, partitionId);
612
617
  var latestFetchedBlock = match$2.latestFetchedBlock;
613
618
  var logs = match$2.logs;
@@ -784,8 +789,9 @@ function make(param) {
784
789
  };
785
790
  };
786
791
  var getBlockHashes = function (blockNumbers, _currentlyUnusedLogger) {
792
+ blockLoader.contents = makeBlockLoader();
787
793
  return $$Promise.$$catch(Promise.all(Belt_Array.map(blockNumbers, (function (blockNum) {
788
- return LazyLoader.get(blockLoader, blockNum);
794
+ return LazyLoader.get(blockLoader.contents, blockNum);
789
795
  }))).then(function (blocks) {
790
796
  return {
791
797
  TAG: "Ok",