envio 2.21.5 → 2.22.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/index.js CHANGED
@@ -2,7 +2,7 @@
2
2
  // Some parts like Sury reexport are impossible to implement
3
3
  // on the JS side, so we need to do it here
4
4
 
5
- const envioGen = require("./src/Envio.bs.js");
5
+ const envioGen = require("./src/Envio.res.js");
6
6
  Object.assign(exports, envioGen);
7
7
 
8
8
  const Sury = require("rescript-schema");
@@ -30,7 +30,7 @@ exports.S = {
30
30
  merge: Sury.merge,
31
31
  optional: Sury.optional,
32
32
  nullable: Sury.nullable,
33
- bigDecimal: require("./src/bindings/BigDecimal.bs.js").schema,
33
+ bigDecimal: require("./src/bindings/BigDecimal.res.js").schema,
34
34
  // Nullish type will change in "sury@10"
35
35
  // nullish: Sury.nullish,
36
36
  assertOrThrow: Sury.assertOrThrow,
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "envio",
3
- "version": "v2.21.5",
3
+ "version": "v2.22.0",
4
4
  "description": "A latency and sync speed optimized, developer friendly blockchain data indexer.",
5
5
  "bin": "./bin.js",
6
6
  "main": "./index.js",
@@ -25,16 +25,19 @@
25
25
  },
26
26
  "homepage": "https://envio.dev",
27
27
  "optionalDependencies": {
28
- "envio-linux-x64": "v2.21.5",
29
- "envio-linux-arm64": "v2.21.5",
30
- "envio-darwin-x64": "v2.21.5",
31
- "envio-darwin-arm64": "v2.21.5"
28
+ "envio-linux-x64": "v2.22.0",
29
+ "envio-linux-arm64": "v2.22.0",
30
+ "envio-darwin-x64": "v2.22.0",
31
+ "envio-darwin-arm64": "v2.22.0"
32
32
  },
33
33
  "dependencies": {
34
34
  "@envio-dev/hypersync-client": "0.6.5",
35
35
  "rescript": "11.1.3",
36
36
  "rescript-schema": "9.3.0",
37
- "viem": "2.21.0"
37
+ "viem": "2.21.0",
38
+ "bignumber.js": "9.1.2",
39
+ "pino": "8.16.1",
40
+ "pino-pretty": "10.2.3"
38
41
  },
39
42
  "files": [
40
43
  "bin.js",
@@ -43,7 +46,6 @@
43
46
  "rescript.json",
44
47
  "index.d.ts",
45
48
  "index.js",
46
- "src",
47
- "!src/**/*.bs.js"
49
+ "src"
48
50
  ]
49
51
  }
package/rescript.json CHANGED
@@ -7,7 +7,7 @@
7
7
  "subdirs": true
8
8
  }
9
9
  ],
10
- "suffix": ".bs.js",
10
+ "suffix": ".res.js",
11
11
  "package-specs": {
12
12
  "module": "commonjs",
13
13
  "in-source": true
package/src/Envio.res CHANGED
@@ -39,6 +39,7 @@ let experimental_createEffect = (
39
39
  options: effectOptions<'input, 'output>,
40
40
  handler: effectArgs<'input> => promise<'output>,
41
41
  ) => {
42
+ Prometheus.EffectCallsCount.set(~callsCount=0, ~effectName=options.name)
42
43
  {
43
44
  name: options.name,
44
45
  handler: handler->(
@@ -46,5 +47,6 @@ let experimental_createEffect = (
46
47
  Internal.effectOutput,
47
48
  >
48
49
  ),
50
+ callsCount: 0,
49
51
  }->(Utils.magic: Internal.effect => effect<'input, 'output>)
50
52
  }
@@ -33,29 +33,3 @@ let logAndRaise = self => {
33
33
  self->log
34
34
  self->raiseExn
35
35
  }
36
-
37
- /**
38
- An environment to manage control flow propogating results
39
- with Error that contain ErrorHandling.t in async
40
- contexts and avoid nested switch statements on awaited promises
41
- Similar to rust result propogation
42
- */
43
- module ResultPropogateEnv = {
44
- exception ErrorHandlingEarlyReturn(t)
45
-
46
- type resultWithErrorHandle<'a> = result<'a, t>
47
- type asyncBody<'a> = unit => promise<resultWithErrorHandle<'a>>
48
-
49
- let runAsyncEnv = async (body: asyncBody<'a>) => {
50
- switch await body() {
51
- | exception ErrorHandlingEarlyReturn(e) => Error(e)
52
- | endReturn => endReturn
53
- }
54
- }
55
-
56
- let propogate = (res: resultWithErrorHandle<'a>) =>
57
- switch res {
58
- | Ok(v) => v
59
- | Error(e) => raise(ErrorHandlingEarlyReturn(e))
60
- }
61
- }
package/src/Hasura.res ADDED
@@ -0,0 +1,297 @@
1
+ type auth = {
2
+ role: string,
3
+ secret: string,
4
+ }
5
+
6
+ type validHasuraResponse = QuerySucceeded | AlreadyDone
7
+
8
+ let auth = (s: Rest.s) => {
9
+ role: s.header("X-Hasura-Role", S.string),
10
+ secret: s.header("X-Hasura-Admin-Secret", S.string),
11
+ }
12
+
13
+ let responses = [
14
+ (s: Rest.Response.s) => {
15
+ s.status(200)
16
+ let _ = s.data(S.unknown)
17
+ QuerySucceeded
18
+ },
19
+ s => {
20
+ let _ = s.field("code", S.enum(["already-exists", "already-tracked"]))
21
+ AlreadyDone
22
+ },
23
+ ]
24
+
25
+ let clearMetadataRoute = Rest.route(() => {
26
+ method: Post,
27
+ path: "",
28
+ input: s => {
29
+ let _ = s.field("type", S.literal("clear_metadata"))
30
+ let _ = s.field("args", S.literal(Js.Obj.empty()))
31
+ s->auth
32
+ },
33
+ responses,
34
+ })
35
+
36
+ let trackTablesRoute = Rest.route(() => {
37
+ method: Post,
38
+ path: "",
39
+ input: s => {
40
+ let _ = s.field("type", S.literal("pg_track_tables"))
41
+ {
42
+ "args": s.field("args", S.json(~validate=false)),
43
+ "auth": s->auth,
44
+ }
45
+ },
46
+ responses,
47
+ })
48
+
49
+ let createSelectPermissionRoute = Rest.route(() => {
50
+ method: Post,
51
+ path: "",
52
+ input: s => {
53
+ let _ = s.field("type", S.literal("pg_create_select_permission"))
54
+ {
55
+ "args": s.field("args", S.json(~validate=false)),
56
+ "auth": s->auth,
57
+ }
58
+ },
59
+ responses,
60
+ })
61
+
62
+ let rawBodyRoute = Rest.route(() => {
63
+ method: Post,
64
+ path: "",
65
+ input: s => {
66
+ {
67
+ "bodyString": s.rawBody(S.string),
68
+ "auth": s->auth,
69
+ }
70
+ },
71
+ responses,
72
+ })
73
+
74
+ let clearHasuraMetadata = async (~endpoint, ~auth) => {
75
+ try {
76
+ let result = await clearMetadataRoute->Rest.fetch(auth, ~client=Rest.client(endpoint))
77
+ let msg = switch result {
78
+ | QuerySucceeded => "Metadata Cleared"
79
+ | AlreadyDone => "Metadata Already Cleared"
80
+ }
81
+ Logging.trace(msg)
82
+ } catch {
83
+ | exn =>
84
+ Logging.error({
85
+ "msg": `EE806: There was an issue clearing metadata in hasura - indexing may still work - but you may have issues querying the data in hasura.`,
86
+ "err": exn->Internal.prettifyExn,
87
+ })
88
+ }
89
+ }
90
+
91
+ let trackTables = async (~endpoint, ~auth, ~pgSchema, ~tableNames: array<string>) => {
92
+ try {
93
+ let result = await trackTablesRoute->Rest.fetch(
94
+ {
95
+ "auth": auth,
96
+ "args": {
97
+ // If set to false, any warnings will cause the API call to fail and no new tables to be tracked. Otherwise tables that fail to track will be raised as warnings. (default: true)
98
+ "allow_warnings": false,
99
+ "tables": tableNames->Js.Array2.map(tableName =>
100
+ {
101
+ "table": {
102
+ "name": tableName,
103
+ "schema": pgSchema,
104
+ },
105
+ "configuration": {
106
+ // Otherwise the entity in gql will be prefixed with the schema name (when it's not public)
107
+ "custom_name": tableName,
108
+ },
109
+ }
110
+ ),
111
+ }->(Utils.magic: 'a => Js.Json.t),
112
+ },
113
+ ~client=Rest.client(endpoint),
114
+ )
115
+ let msg = switch result {
116
+ | QuerySucceeded => "Tables Tracked"
117
+ | AlreadyDone => "Table Already Tracked"
118
+ }
119
+ Logging.trace({
120
+ "msg": msg,
121
+ "tableNames": tableNames,
122
+ })
123
+ } catch {
124
+ | exn =>
125
+ Logging.error({
126
+ "msg": `EE807: There was an issue tracking tables in hasura - indexing may still work - but you may have issues querying the data in hasura.`,
127
+ "tableNames": tableNames,
128
+ "err": exn->Internal.prettifyExn,
129
+ })
130
+ }
131
+ }
132
+
133
+ let createSelectPermissions = async (
134
+ ~auth,
135
+ ~endpoint,
136
+ ~tableName: string,
137
+ ~pgSchema,
138
+ ~responseLimit,
139
+ ~aggregateEntities,
140
+ ) => {
141
+ try {
142
+ let result = await createSelectPermissionRoute->Rest.fetch(
143
+ {
144
+ "auth": auth,
145
+ "args": {
146
+ "table": {
147
+ "schema": pgSchema,
148
+ "name": tableName,
149
+ },
150
+ "role": "public",
151
+ "source": "default",
152
+ "permission": {
153
+ "columns": "*",
154
+ "filter": Js.Obj.empty(),
155
+ "limit": responseLimit,
156
+ "allow_aggregations": aggregateEntities->Js.Array2.includes(tableName),
157
+ },
158
+ }->(Utils.magic: 'a => Js.Json.t),
159
+ },
160
+ ~client=Rest.client(endpoint),
161
+ )
162
+ let msg = switch result {
163
+ | QuerySucceeded => "Hasura select permissions created"
164
+ | AlreadyDone => "Hasura select permissions already created"
165
+ }
166
+ Logging.trace({
167
+ "msg": msg,
168
+ "tableName": tableName,
169
+ })
170
+ } catch {
171
+ | exn =>
172
+ Logging.error({
173
+ "msg": `EE808: There was an issue setting up view permissions for the ${tableName} table in hasura - indexing may still work - but you may have issues querying the data in hasura.`,
174
+ "tableName": tableName,
175
+ "err": exn->Internal.prettifyExn,
176
+ })
177
+ }
178
+ }
179
+
180
+ let createEntityRelationship = async (
181
+ ~pgSchema,
182
+ ~endpoint,
183
+ ~auth,
184
+ ~tableName: string,
185
+ ~relationshipType: string,
186
+ ~relationalKey: string,
187
+ ~objectName: string,
188
+ ~mappedEntity: string,
189
+ ~isDerivedFrom: bool,
190
+ ) => {
191
+ let derivedFromTo = isDerivedFrom ? `"id": "${relationalKey}"` : `"${relationalKey}_id" : "id"`
192
+
193
+ let bodyString = `{"type": "pg_create_${relationshipType}_relationship","args": {"table": {"schema": "${pgSchema}", "name": "${tableName}"},"name": "${objectName}","source": "default","using": {"manual_configuration": {"remote_table": {"schema": "${pgSchema}", "name": "${mappedEntity}"},"column_mapping": {${derivedFromTo}}}}}}`
194
+
195
+ try {
196
+ let result = await rawBodyRoute->Rest.fetch(
197
+ {
198
+ "auth": auth,
199
+ "bodyString": bodyString,
200
+ },
201
+ ~client=Rest.client(endpoint),
202
+ )
203
+ let msg = switch result {
204
+ | QuerySucceeded => `Hasura ${relationshipType} relationship created`
205
+ | AlreadyDone => `Hasura ${relationshipType} relationship already created`
206
+ }
207
+ Logging.trace({
208
+ "msg": msg,
209
+ "tableName": tableName,
210
+ })
211
+ } catch {
212
+ | exn =>
213
+ Logging.error({
214
+ "msg": `EE808: There was an issue setting up ${relationshipType} relationship for the ${tableName} table in hasura - indexing may still work - but you may have issues querying the data in hasura.`,
215
+ "tableName": tableName,
216
+ "err": exn->Internal.prettifyExn,
217
+ })
218
+ }
219
+ }
220
+
221
+ let trackDatabase = async (
222
+ ~endpoint,
223
+ ~auth,
224
+ ~pgSchema,
225
+ ~allStaticTables,
226
+ ~allEntityTables,
227
+ ~aggregateEntities,
228
+ ~responseLimit,
229
+ ~schema,
230
+ ) => {
231
+ Logging.info("Tracking tables in Hasura")
232
+
233
+ let _ = await clearHasuraMetadata(~endpoint, ~auth)
234
+ let tableNames =
235
+ [allStaticTables, allEntityTables]
236
+ ->Belt.Array.concatMany
237
+ ->Js.Array2.map(({tableName}: Table.table) => tableName)
238
+
239
+ await trackTables(~endpoint, ~auth, ~pgSchema, ~tableNames)
240
+
241
+ let _ =
242
+ await tableNames
243
+ ->Js.Array2.map(tableName =>
244
+ createSelectPermissions(
245
+ ~endpoint,
246
+ ~auth,
247
+ ~tableName,
248
+ ~pgSchema,
249
+ ~responseLimit,
250
+ ~aggregateEntities,
251
+ )
252
+ )
253
+ ->Js.Array2.concatMany(
254
+ allEntityTables->Js.Array2.map(table => {
255
+ let {tableName} = table
256
+ [
257
+ //Set array relationships
258
+ table
259
+ ->Table.getDerivedFromFields
260
+ ->Js.Array2.map(derivedFromField => {
261
+ //determines the actual name of the underlying relational field (if it's an entity mapping then suffixes _id for eg.)
262
+ let relationalFieldName =
263
+ schema->Schema.getDerivedFromFieldName(derivedFromField)->Utils.unwrapResultExn
264
+
265
+ createEntityRelationship(
266
+ ~endpoint,
267
+ ~auth,
268
+ ~pgSchema,
269
+ ~tableName,
270
+ ~relationshipType="array",
271
+ ~isDerivedFrom=true,
272
+ ~objectName=derivedFromField.fieldName,
273
+ ~relationalKey=relationalFieldName,
274
+ ~mappedEntity=derivedFromField.derivedFromEntity,
275
+ )
276
+ }),
277
+ //Set object relationships
278
+ table
279
+ ->Table.getLinkedEntityFields
280
+ ->Js.Array2.map(((field, linkedEntityName)) => {
281
+ createEntityRelationship(
282
+ ~endpoint,
283
+ ~auth,
284
+ ~pgSchema,
285
+ ~tableName,
286
+ ~relationshipType="object",
287
+ ~isDerivedFrom=false,
288
+ ~objectName=field.fieldName,
289
+ ~relationalKey=field.fieldName,
290
+ ~mappedEntity=linkedEntityName,
291
+ )
292
+ }),
293
+ ]->Utils.Array.flatten
294
+ }),
295
+ )
296
+ ->Promise.all
297
+ }
package/src/Internal.res CHANGED
@@ -160,6 +160,31 @@ let fuelTransferParamsSchema = S.schema(s => {
160
160
  })
161
161
 
162
162
  type entity = private {id: string}
163
+ type entityConfig = {
164
+ name: string,
165
+ schema: S.t<entity>,
166
+ rowsSchema: S.t<array<entity>>,
167
+ table: Table.table,
168
+ entityHistory: EntityHistory.t<entity>,
169
+ }
170
+ type enum
171
+ type enumConfig<'enum> = {
172
+ name: string,
173
+ variants: array<'enum>,
174
+ schema: S.t<'enum>,
175
+ default: 'enum,
176
+ }
177
+ external fromGenericEnumConfig: enumConfig<'enum> => enumConfig<enum> = "%identity"
178
+
179
+ let makeEnumConfig = (~name, ~variants) => {
180
+ name,
181
+ variants,
182
+ schema: S.enum(variants),
183
+ default: switch variants->Belt.Array.get(0) {
184
+ | Some(v) => v
185
+ | None => Js.Exn.raiseError("No variants defined for enum " ++ name)
186
+ },
187
+ }
163
188
 
164
189
  type effectInput
165
190
  type effectOutput
@@ -172,6 +197,7 @@ type effectArgs = {
172
197
  type effect = {
173
198
  name: string,
174
199
  handler: effectArgs => promise<effectOutput>,
200
+ mutable callsCount: int,
175
201
  }
176
202
 
177
203
  @genType.import(("./Types.ts", "Invalid"))
package/src/Logging.res CHANGED
@@ -26,7 +26,7 @@ let logLevels = [
26
26
 
27
27
  %%private(let logger = ref(None))
28
28
 
29
- let setLogger = (~logStrategy, ~logFilePath, ~defaultFileLogLevel, ~userLogLevel) => {
29
+ let makeLogger = (~logStrategy, ~logFilePath, ~defaultFileLogLevel, ~userLogLevel) => {
30
30
  // Currently unused - useful if using multiple transports.
31
31
  // let pinoRaw = {"target": "pino/file", "level": Config.userLogLevel}
32
32
  let pinoFile: Transport.transportTarget = {
@@ -42,38 +42,38 @@ let setLogger = (~logStrategy, ~logFilePath, ~defaultFileLogLevel, ~userLogLevel
42
42
  let makeMultiStreamLogger =
43
43
  MultiStreamLogger.make(~userLogLevel, ~defaultFileLogLevel, ~customLevels=logLevels, ...)
44
44
 
45
- logger :=
46
- Some(
47
- switch logStrategy {
48
- | EcsFile =>
49
- makeWithOptionsAndTransport(
50
- {
51
- ...Pino.ECS.make(),
52
- customLevels: logLevels,
53
- },
54
- Transport.make(pinoFile),
55
- )
56
- | EcsConsoleMultistream =>
57
- makeMultiStreamLogger(~logFile=None, ~options=Some(Pino.ECS.make()))
58
- | EcsConsole =>
59
- make({
60
- ...Pino.ECS.make(),
61
- level: userLogLevel,
62
- customLevels: logLevels,
63
- })
64
- | FileOnly =>
65
- makeWithOptionsAndTransport(
66
- {
67
- customLevels: logLevels,
68
- level: defaultFileLogLevel,
69
- },
70
- Transport.make(pinoFile),
71
- )
72
- | ConsoleRaw => makeMultiStreamLogger(~logFile=None, ~options=None)
73
- | ConsolePretty => makeMultiStreamLogger(~logFile=None, ~options=None)
74
- | Both => makeMultiStreamLogger(~logFile=Some(logFilePath), ~options=None)
45
+ switch logStrategy {
46
+ | EcsFile =>
47
+ makeWithOptionsAndTransport(
48
+ {
49
+ ...Pino.ECS.make(),
50
+ customLevels: logLevels,
75
51
  },
52
+ Transport.make(pinoFile),
76
53
  )
54
+ | EcsConsoleMultistream => makeMultiStreamLogger(~logFile=None, ~options=Some(Pino.ECS.make()))
55
+ | EcsConsole =>
56
+ make({
57
+ ...Pino.ECS.make(),
58
+ level: userLogLevel,
59
+ customLevels: logLevels,
60
+ })
61
+ | FileOnly =>
62
+ makeWithOptionsAndTransport(
63
+ {
64
+ customLevels: logLevels,
65
+ level: defaultFileLogLevel,
66
+ },
67
+ Transport.make(pinoFile),
68
+ )
69
+ | ConsoleRaw => makeMultiStreamLogger(~logFile=None, ~options=None)
70
+ | ConsolePretty => makeMultiStreamLogger(~logFile=None, ~options=None)
71
+ | Both => makeMultiStreamLogger(~logFile=Some(logFilePath), ~options=None)
72
+ }
73
+ }
74
+
75
+ let setLogger = l => {
76
+ logger := Some(l)
77
77
  }
78
78
 
79
79
  let getLogger = () => {
@@ -0,0 +1,111 @@
1
+ // A module for the persistence layer
2
+ // This is currently in a WIP state
3
+ // but in the future we should make all DB and in-memory state
4
+ // interactions to this layer with DI and easy for testing.
5
+ // Currently there are quite many code spread across
6
+ // DbFunctions, Db, Migrations, InMemoryStore modules which use codegen code directly.
7
+
8
+ type storage = {
9
+ // Should return true if we already have persisted data
10
+ // and we can skip initialization
11
+ isInitialized: unit => promise<bool>,
12
+ // Should initialize the storage so we can start interacting with it
13
+ // Eg create connection, schema, tables, etc.
14
+ initialize: (
15
+ ~entities: array<Internal.entityConfig>,
16
+ ~staticTables: array<Table.table>,
17
+ ~enums: array<Internal.enumConfig<Internal.enum>>,
18
+ // If true, the storage should clear existing data
19
+ ~cleanRun: bool,
20
+ ) => promise<unit>,
21
+ }
22
+
23
+ type storageStatus =
24
+ | Unknown
25
+ | Initializing(promise<unit>)
26
+ | Ready({cleanRun: bool})
27
+
28
+ type t = {
29
+ userEntities: array<Internal.entityConfig>,
30
+ staticTables: array<Table.table>,
31
+ allEntities: array<Internal.entityConfig>,
32
+ allEnums: array<Internal.enumConfig<Internal.enum>>,
33
+ mutable storageStatus: storageStatus,
34
+ storage: storage,
35
+ onStorageInitialize: option<unit => promise<unit>>,
36
+ }
37
+
38
+ let entityHistoryActionEnumConfig: Internal.enumConfig<EntityHistory.RowAction.t> = {
39
+ name: EntityHistory.RowAction.name,
40
+ variants: EntityHistory.RowAction.variants,
41
+ schema: EntityHistory.RowAction.schema,
42
+ default: SET,
43
+ }
44
+
45
+ let make = (
46
+ ~userEntities,
47
+ ~dcRegistryEntityConfig,
48
+ // TODO: Should only pass userEnums and create internal config in runtime
49
+ ~allEnums,
50
+ ~staticTables,
51
+ ~storage,
52
+ ~onStorageInitialize=?,
53
+ ) => {
54
+ let allEntities = userEntities->Js.Array2.concat([dcRegistryEntityConfig])
55
+ let allEnums =
56
+ allEnums->Js.Array2.concat([entityHistoryActionEnumConfig->Internal.fromGenericEnumConfig])
57
+ {
58
+ userEntities,
59
+ staticTables,
60
+ allEntities,
61
+ allEnums,
62
+ storageStatus: Unknown,
63
+ storage,
64
+ onStorageInitialize,
65
+ }
66
+ }
67
+
68
+ let init = async (
69
+ persistence,
70
+ // There are not much sense in the option,
71
+ // but this is how the runUpMigration used to work
72
+ // and we want to keep the upsert behavior without breaking changes.
73
+ ~skipIsInitializedCheck=false,
74
+ ~reset=false,
75
+ ) => {
76
+ try {
77
+ let shouldRun = switch persistence.storageStatus {
78
+ | Unknown => true
79
+ | Initializing(promise) => {
80
+ await promise
81
+ reset
82
+ }
83
+ | Ready(_) => reset
84
+ }
85
+ if shouldRun {
86
+ let resolveRef = ref(%raw(`null`))
87
+ let promise = Promise.make((resolve, _) => {
88
+ resolveRef := resolve
89
+ })
90
+ persistence.storageStatus = Initializing(promise)
91
+ if !(reset || skipIsInitializedCheck) && (await persistence.storage.isInitialized()) {
92
+ persistence.storageStatus = Ready({cleanRun: false})
93
+ } else {
94
+ let _ = await persistence.storage.initialize(
95
+ ~entities=persistence.allEntities,
96
+ ~staticTables=persistence.staticTables,
97
+ ~enums=persistence.allEnums,
98
+ ~cleanRun=reset || !skipIsInitializedCheck,
99
+ )
100
+ persistence.storageStatus = Ready({cleanRun: true})
101
+ switch persistence.onStorageInitialize {
102
+ | Some(onStorageInitialize) => await onStorageInitialize()
103
+ | None => ()
104
+ }
105
+ }
106
+ resolveRef.contents()
107
+ }
108
+ } catch {
109
+ | exn => exn->ErrorHandling.mkLogAndRaise(~msg=`EE800: Failed to initialize the indexer storage.`)
110
+ }
111
+ }
@@ -0,0 +1,165 @@
1
+ let makeCreateIndexSqlUnsafe = (~tableName, ~indexFields, ~pgSchema) => {
2
+ let indexName = tableName ++ "_" ++ indexFields->Js.Array2.joinWith("_")
3
+ let index = indexFields->Belt.Array.map(idx => `"${idx}"`)->Js.Array2.joinWith(", ")
4
+ `CREATE INDEX IF NOT EXISTS "${indexName}" ON "${pgSchema}"."${tableName}"(${index});`
5
+ }
6
+
7
+ let makeCreateTableIndicesSqlUnsafe = (table: Table.table, ~pgSchema) => {
8
+ open Belt
9
+ let tableName = table.tableName
10
+ let createIndex = indexField =>
11
+ makeCreateIndexSqlUnsafe(~tableName, ~indexFields=[indexField], ~pgSchema)
12
+ let createCompositeIndex = indexFields => {
13
+ makeCreateIndexSqlUnsafe(~tableName, ~indexFields, ~pgSchema)
14
+ }
15
+
16
+ let singleIndices = table->Table.getSingleIndices
17
+ let compositeIndices = table->Table.getCompositeIndices
18
+
19
+ singleIndices->Array.map(createIndex)->Js.Array2.joinWith("\n") ++
20
+ compositeIndices->Array.map(createCompositeIndex)->Js.Array2.joinWith("\n")
21
+ }
22
+
23
+ let makeCreateTableSqlUnsafe = (table: Table.table, ~pgSchema) => {
24
+ open Belt
25
+ let fieldsMapped =
26
+ table
27
+ ->Table.getFields
28
+ ->Array.map(field => {
29
+ let {fieldType, isNullable, isArray, defaultValue} = field
30
+ let fieldName = field->Table.getDbFieldName
31
+
32
+ {
33
+ `"${fieldName}" ${switch fieldType {
34
+ | Custom(name) if !(name->Js.String2.startsWith("NUMERIC(")) => `"${pgSchema}".${name}`
35
+ | _ => (fieldType :> string)
36
+ }}${isArray ? "[]" : ""}${switch defaultValue {
37
+ | Some(defaultValue) => ` DEFAULT ${defaultValue}`
38
+ | None => isNullable ? `` : ` NOT NULL`
39
+ }}`
40
+ }
41
+ })
42
+ ->Js.Array2.joinWith(", ")
43
+
44
+ let primaryKeyFieldNames = table->Table.getPrimaryKeyFieldNames
45
+ let primaryKey =
46
+ primaryKeyFieldNames
47
+ ->Array.map(field => `"${field}"`)
48
+ ->Js.Array2.joinWith(", ")
49
+
50
+ `CREATE TABLE IF NOT EXISTS "${pgSchema}"."${table.tableName}"(${fieldsMapped}${primaryKeyFieldNames->Array.length > 0
51
+ ? `, PRIMARY KEY(${primaryKey})`
52
+ : ""});`
53
+ }
54
+
55
+ let makeInitializeTransaction = (~pgSchema, ~staticTables, ~entities, ~enums, ~cleanRun) => {
56
+ let allTables = staticTables->Array.copy
57
+ let allEntityTables = []
58
+ entities->Js.Array2.forEach((entity: Internal.entityConfig) => {
59
+ allEntityTables->Js.Array2.push(entity.table)->ignore
60
+ allTables->Js.Array2.push(entity.table)->ignore
61
+ allTables->Js.Array2.push(entity.entityHistory.table)->ignore
62
+ })
63
+ let derivedSchema = Schema.make(allEntityTables)
64
+
65
+ let query = ref(
66
+ (
67
+ cleanRun
68
+ ? `DROP SCHEMA IF EXISTS "${pgSchema}" CASCADE;
69
+ CREATE SCHEMA "${pgSchema}";`
70
+ : `CREATE SCHEMA IF NOT EXISTS "${pgSchema}";`
71
+ ) ++
72
+ `GRANT ALL ON SCHEMA "${pgSchema}" TO postgres;
73
+ GRANT ALL ON SCHEMA "${pgSchema}" TO public;`,
74
+ )
75
+
76
+ // Optimized enum creation - direct when cleanRun, conditional otherwise
77
+ enums->Js.Array2.forEach((enumConfig: Internal.enumConfig<Internal.enum>) => {
78
+ // Create base enum creation query once
79
+ let enumCreateQuery = `CREATE TYPE "${pgSchema}".${enumConfig.name} AS ENUM(${enumConfig.variants
80
+ ->Js.Array2.map(v => `'${v->(Utils.magic: Internal.enum => string)}'`)
81
+ ->Js.Array2.joinWith(", ")});`
82
+
83
+ query :=
84
+ query.contents ++
85
+ "\n" ++ if cleanRun {
86
+ // Direct creation when cleanRunting (faster)
87
+ enumCreateQuery
88
+ } else {
89
+ // Wrap with conditional check only when not cleanRunting
90
+ `IF NOT EXISTS (
91
+ SELECT 1 FROM pg_type
92
+ WHERE typname = '${enumConfig.name->Js.String2.toLowerCase}'
93
+ AND typnamespace = (SELECT oid FROM pg_namespace WHERE nspname = '${pgSchema}')
94
+ ) THEN
95
+ ${enumCreateQuery}
96
+ END IF;`
97
+ }
98
+ })
99
+
100
+ // Batch all table creation first (optimal for PostgreSQL)
101
+ allTables->Js.Array2.forEach((table: Table.table) => {
102
+ query := query.contents ++ "\n" ++ makeCreateTableSqlUnsafe(table, ~pgSchema)
103
+ })
104
+
105
+ // Then batch all indices (better performance when tables exist)
106
+ allTables->Js.Array2.forEach((table: Table.table) => {
107
+ let indices = makeCreateTableIndicesSqlUnsafe(table, ~pgSchema)
108
+ if indices !== "" {
109
+ query := query.contents ++ "\n" ++ indices
110
+ }
111
+ })
112
+
113
+ let functionsQuery = ref("")
114
+
115
+ // Add derived indices
116
+ entities->Js.Array2.forEach((entity: Internal.entityConfig) => {
117
+ functionsQuery := functionsQuery.contents ++ "\n" ++ entity.entityHistory.createInsertFnQuery
118
+
119
+ entity.table
120
+ ->Table.getDerivedFromFields
121
+ ->Js.Array2.forEach(derivedFromField => {
122
+ let indexField =
123
+ derivedSchema->Schema.getDerivedFromFieldName(derivedFromField)->Utils.unwrapResultExn
124
+ query :=
125
+ query.contents ++
126
+ "\n" ++
127
+ makeCreateIndexSqlUnsafe(
128
+ ~tableName=derivedFromField.derivedFromEntity,
129
+ ~indexFields=[indexField],
130
+ ~pgSchema,
131
+ )
132
+ })
133
+ })
134
+
135
+ [
136
+ // Return optimized queries - main DDL in DO block, functions separate
137
+ // Note: DO $$ BEGIN wrapper is only needed for PL/pgSQL conditionals (IF NOT EXISTS)
138
+ // Reset case uses direct DDL (faster), non-cleanRun case uses conditionals (safer)
139
+ cleanRun ? query.contents : `DO $$ BEGIN ${query.contents} END $$;`,
140
+ // Functions query (separate as they can't be in DO block)
141
+ ]->Js.Array2.concat(functionsQuery.contents !== "" ? [functionsQuery.contents] : [])
142
+ }
143
+
144
+ let make = (~sql: Postgres.sql, ~pgSchema): Persistence.storage => {
145
+ let isInitialized = async () => {
146
+ let schemas =
147
+ await sql->Postgres.unsafe(
148
+ `SELECT schema_name FROM information_schema.schemata WHERE schema_name = '${pgSchema}';`,
149
+ )
150
+ schemas->Utils.Array.notEmpty
151
+ }
152
+
153
+ let initialize = async (~entities, ~staticTables, ~enums, ~cleanRun) => {
154
+ let queries = makeInitializeTransaction(~pgSchema, ~staticTables, ~entities, ~enums, ~cleanRun)
155
+ // Execute all queries within a single transaction for integrity
156
+ let _ = await sql->Postgres.beginSql(sql => {
157
+ queries->Js.Array2.map(query => sql->Postgres.unsafe(query))
158
+ })
159
+ }
160
+
161
+ {
162
+ isInitialized,
163
+ initialize,
164
+ }
165
+ }
@@ -16,12 +16,6 @@ let executeBatchDurationCounter = PromClient.Counter.makeCounter({
16
16
  "labelNames": [],
17
17
  })
18
18
 
19
- let eventsProcessedCounter = PromClient.Gauge.makeGauge({
20
- "name": "events_processed",
21
- "help": "Total number of events processed",
22
- "labelNames": ["chainId"],
23
- })
24
-
25
19
  let allChainsSyncedToHead = PromClient.Gauge.makeGauge({
26
20
  "name": "hyperindex_synced_to_head",
27
21
  "help": "All chains fully synced",
@@ -207,12 +201,6 @@ module BenchmarkSummaryData = {
207
201
  }
208
202
  }
209
203
 
210
- let processedUntilHeight = PromClient.Gauge.makeGauge({
211
- "name": "chain_block_height_processed",
212
- "help": "Block height processed by indexer",
213
- "labelNames": ["chainId"],
214
- })
215
-
216
204
  let incrementLoadEntityDurationCounter = (~duration) => {
217
205
  loadEntitiesDurationCounter->PromClient.Counter.incMany(duration)
218
206
  }
@@ -225,12 +213,6 @@ let incrementExecuteBatchDurationCounter = (~duration) => {
225
213
  executeBatchDurationCounter->PromClient.Counter.incMany(duration)
226
214
  }
227
215
 
228
- let setEventsProcessedGuage = (~number, ~chainId) => {
229
- eventsProcessedCounter
230
- ->PromClient.Gauge.labels({"chainId": chainId})
231
- ->PromClient.Gauge.set(number)
232
- }
233
-
234
216
  let setSourceChainHeight = (~blockNumber, ~chain) => {
235
217
  sourceChainHeight
236
218
  ->PromClient.Gauge.labels({"chainId": chain->ChainMap.Chain.toString})
@@ -241,12 +223,6 @@ let setAllChainsSyncedToHead = () => {
241
223
  allChainsSyncedToHead->PromClient.Gauge.set(1)
242
224
  }
243
225
 
244
- let setProcessedUntilHeight = (~blockNumber, ~chain) => {
245
- processedUntilHeight
246
- ->PromClient.Gauge.labels({"chainId": chain->ChainMap.Chain.toString})
247
- ->PromClient.Gauge.set(blockNumber)
248
- }
249
-
250
226
  module BenchmarkCounters = {
251
227
  type labels = {label: string}
252
228
  let labelSchema = S.schema(s => {
@@ -304,18 +280,6 @@ module Info = {
304
280
  }
305
281
  }
306
282
 
307
- module ProgressBlockNumber = {
308
- let gauge = SafeGauge.makeOrThrow(
309
- ~name="envio_progress_block_number",
310
- ~help="The block number to track the progress of indexing at. Currently uses the fully fetched block number. In the future will be changed to block number processed and stored in the database.",
311
- ~labelSchema=chainIdLabelsSchema,
312
- )
313
-
314
- let set = (~blockNumber, ~chainId) => {
315
- gauge->SafeGauge.handleInt(~labels=chainId, ~value=blockNumber)
316
- }
317
- }
318
-
319
283
  module IndexingAddresses = {
320
284
  let gauge = SafeGauge.makeOrThrow(
321
285
  ~name="envio_indexing_addresses",
@@ -428,8 +392,6 @@ module IndexingBufferBlockNumber = {
428
392
  deprecatedGauge
429
393
  ->PromClient.Gauge.labels({"chainId": chainId})
430
394
  ->PromClient.Gauge.set(blockNumber)
431
- // TODO: Use the block number stored in the database instead
432
- ProgressBlockNumber.set(~blockNumber, ~chainId)
433
395
  gauge->SafeGauge.handleInt(~labels=chainId, ~value=blockNumber)
434
396
  }
435
397
  }
@@ -484,7 +446,7 @@ module ReorgCount = {
484
446
  "labelNames": ["chainId"],
485
447
  })
486
448
 
487
- let counter = SafeGauge.makeOrThrow(
449
+ let gauge = SafeGauge.makeOrThrow(
488
450
  ~name="envio_reorg_count",
489
451
  ~help="Total number of reorgs detected",
490
452
  ~labelSchema=chainIdLabelsSchema,
@@ -494,7 +456,7 @@ module ReorgCount = {
494
456
  deprecatedCounter
495
457
  ->PromClient.Counter.labels({"chainId": chain->ChainMap.Chain.toString})
496
458
  ->PromClient.Counter.inc
497
- counter->SafeGauge.increment(~labels=chain->ChainMap.Chain.toChainId)
459
+ gauge->SafeGauge.increment(~labels=chain->ChainMap.Chain.toChainId)
498
460
  }
499
461
  }
500
462
 
@@ -545,6 +507,30 @@ module RollbackTargetBlockNumber = {
545
507
  }
546
508
  }
547
509
 
510
+ module ProcessingBlockNumber = {
511
+ let gauge = SafeGauge.makeOrThrow(
512
+ ~name="envio_processing_block_number",
513
+ ~help="The latest item block number included in the currently processing batch for the chain.",
514
+ ~labelSchema=chainIdLabelsSchema,
515
+ )
516
+
517
+ let set = (~blockNumber, ~chainId) => {
518
+ gauge->SafeGauge.handleInt(~labels=chainId, ~value=blockNumber)
519
+ }
520
+ }
521
+
522
+ module ProcessingBatchSize = {
523
+ let gauge = SafeGauge.makeOrThrow(
524
+ ~name="envio_processing_batch_size",
525
+ ~help="The number of items included in the currently processing batch for the chain.",
526
+ ~labelSchema=chainIdLabelsSchema,
527
+ )
528
+
529
+ let set = (~batchSize, ~chainId) => {
530
+ gauge->SafeGauge.handleInt(~labels=chainId, ~value=batchSize)
531
+ }
532
+ }
533
+
548
534
  module ProcessingMaxBatchSize = {
549
535
  let gauge = PromClient.Gauge.makeGauge({
550
536
  "name": "envio_processing_max_batch_size",
@@ -555,3 +541,52 @@ module ProcessingMaxBatchSize = {
555
541
  gauge->PromClient.Gauge.set(maxBatchSize)
556
542
  }
557
543
  }
544
+
545
+ module ProgressBlockNumber = {
546
+ let gauge = SafeGauge.makeOrThrow(
547
+ ~name="envio_progress_block_number",
548
+ ~help="The block number of the latest block processed and stored in the database.",
549
+ ~labelSchema=chainIdLabelsSchema,
550
+ )
551
+
552
+ let set = (~blockNumber, ~chainId) => {
553
+ gauge->SafeGauge.handleInt(~labels=chainId, ~value=blockNumber)
554
+ }
555
+ }
556
+
557
+ module ProgressEventsCount = {
558
+ let deprecatedGauge = PromClient.Gauge.makeGauge({
559
+ "name": "events_processed",
560
+ "help": "Total number of events processed",
561
+ "labelNames": ["chainId"],
562
+ })
563
+
564
+ let gauge = SafeGauge.makeOrThrow(
565
+ ~name="envio_progress_events_count",
566
+ ~help="The number of events processed and reflected in the database.",
567
+ ~labelSchema=chainIdLabelsSchema,
568
+ )
569
+
570
+ let set = (~processedCount, ~chainId) => {
571
+ deprecatedGauge
572
+ ->PromClient.Gauge.labels({"chainId": chainId})
573
+ ->PromClient.Gauge.set(processedCount)
574
+ gauge->SafeGauge.handleInt(~labels=chainId, ~value=processedCount)
575
+ }
576
+ }
577
+
578
+ let effectLabelsSchema = S.object(s => {
579
+ s.field("effect", S.string)
580
+ })
581
+
582
+ module EffectCallsCount = {
583
+ let gauge = SafeGauge.makeOrThrow(
584
+ ~name="envio_effect_calls_count",
585
+ ~help="The number of calls to the effect. Including both handler execution and cache hits.",
586
+ ~labelSchema=effectLabelsSchema,
587
+ )
588
+
589
+ let set = (~callsCount, ~effectName) => {
590
+ gauge->SafeGauge.handleInt(~labels=effectName, ~value=callsCount)
591
+ }
592
+ }
@@ -3,7 +3,7 @@
3
3
  /* eslint-disable */
4
4
  /* tslint:disable */
5
5
 
6
- const BigDecimalJS = require('./BigDecimal.bs.js');
6
+ const BigDecimalJS = require('./BigDecimal.res.js');
7
7
 
8
8
  import type {S_t as RescriptSchema_S_t} from 'rescript-schema/RescriptSchema.gen';
9
9
 
@@ -3,7 +3,7 @@
3
3
  /* eslint-disable */
4
4
  /* tslint:disable */
5
5
 
6
- const EthersJS = require('./Ethers.bs.js');
6
+ const EthersJS = require('./Ethers.res.js');
7
7
 
8
8
  import type {t as Address_t} from '../../src/Address.gen';
9
9
 
@@ -37,6 +37,11 @@ external all6: ((t<'a>, t<'b>, t<'c>, t<'d>, t<'e>, t<'f>)) => t<('a, 'b, 'c, 'd
37
37
  @send
38
38
  external catch: (t<'a>, @uncurry exn => t<'a>) => t<'a> = "catch"
39
39
 
40
+ %%private(let noop = (() => ())->Obj.magic)
41
+ let silentCatch = (promise: promise<'a>): promise<'a> => {
42
+ catch(promise, noop)
43
+ }
44
+
40
45
  let catch = (promise: promise<'a>, callback: exn => promise<'a>): promise<'a> => {
41
46
  catch(promise, err => {
42
47
  callback(Js.Exn.anyToExnInternal(err))
@@ -4,7 +4,7 @@ module RowAction = {
4
4
  type t = SET | DELETE
5
5
  let variants = [SET, DELETE]
6
6
  let name = "ENTITY_HISTORY_ROW_ACTION"
7
- let enum = Enum.make(~name, ~variants)
7
+ let schema = S.enum(variants)
8
8
  }
9
9
 
10
10
  type historyFieldsGeneral<'a> = {
@@ -88,7 +88,7 @@ let makeHistoryRowSchema: S.t<'entity> => S.t<historyRow<'entity>> = entitySchem
88
88
  "current": s.flatten(currentHistoryFieldsSchema),
89
89
  "previous": s.flatten(previousHistoryFieldsSchema),
90
90
  "entityData": s.flatten(nullableEntitySchema),
91
- "action": s.field("action", RowAction.enum.schema),
91
+ "action": s.field("action", RowAction.schema),
92
92
  }
93
93
  })->S.transform(s => {
94
94
  parser: v => {
@@ -228,7 +228,7 @@ let fromTable = (table: table, ~schema: S.t<'entity>): t<'entity> => {
228
228
 
229
229
  let actionFieldName = "action"
230
230
 
231
- let actionField = mkField(actionFieldName, Custom(RowAction.enum.name), ~fieldSchema=S.never)
231
+ let actionField = mkField(actionFieldName, Custom(RowAction.name), ~fieldSchema=S.never)
232
232
 
233
233
  let serialField = mkField("serial", Serial, ~fieldSchema=S.never, ~isNullable=true, ~isIndex=true)
234
234
 
package/src/Enum.res DELETED
@@ -1,22 +0,0 @@
1
- // Graphql Enum Type Variants
2
- type enum<'a> = {
3
- name: string,
4
- variants: array<'a>,
5
- schema: S.t<'a>,
6
- default: 'a,
7
- }
8
-
9
- let make = (~name, ~variants) => {
10
- name,
11
- variants,
12
- schema: S.enum(variants),
13
- default: switch variants->Belt.Array.get(0) {
14
- | Some(v) => v
15
- | None => Js.Exn.raiseError("No variants defined for enum " ++ name)
16
- },
17
- }
18
-
19
- module type S = {
20
- type t
21
- let enum: enum<t>
22
- }