envio 2.29.2 → 2.30.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/evm.schema.json +18 -0
- package/package.json +5 -5
- package/src/Address.res +23 -0
- package/src/Address.res.js +14 -0
- package/src/Batch.res +103 -90
- package/src/Batch.res.js +81 -101
- package/src/FetchState.res +73 -129
- package/src/FetchState.res.js +87 -149
- package/src/Hasura.res +178 -124
- package/src/Hasura.res.js +115 -54
- package/src/Persistence.res +1 -13
- package/src/Persistence.res.js +1 -7
- package/src/PgStorage.res +0 -7
- package/src/PgStorage.res.js +1 -5
- package/src/Utils.res +10 -0
- package/src/Utils.res.js +5 -0
- package/src/bindings/Ethers.res +35 -11
- package/src/bindings/Ethers.res.js +21 -1
- package/src/bindings/PromClient.res +10 -0
- package/src/db/InternalTable.res +1 -59
- package/src/db/InternalTable.res.js +2 -34
- package/src/sources/HyperSyncClient.res +8 -2
- package/src/sources/HyperSyncClient.res.js +3 -2
- package/src/sources/HyperSyncSource.res +8 -1
- package/src/sources/HyperSyncSource.res.js +7 -2
- package/src/sources/RpcSource.res +153 -3
- package/src/sources/RpcSource.res.js +195 -73
package/src/Hasura.res
CHANGED
|
@@ -71,12 +71,43 @@ let rawBodyRoute = Rest.route(() => {
|
|
|
71
71
|
responses,
|
|
72
72
|
})
|
|
73
73
|
|
|
74
|
+
let bulkKeepGoingRoute = Rest.route(() => {
|
|
75
|
+
method: Post,
|
|
76
|
+
path: "",
|
|
77
|
+
input: s => {
|
|
78
|
+
let _ = s.field("type", S.literal("bulk_keep_going"))
|
|
79
|
+
{
|
|
80
|
+
"args": s.field("args", S.json(~validate=false)),
|
|
81
|
+
"auth": s->auth,
|
|
82
|
+
}
|
|
83
|
+
},
|
|
84
|
+
responses: [
|
|
85
|
+
(s: Rest.Response.s) => {
|
|
86
|
+
s.status(200)
|
|
87
|
+
s.data(S.json(~validate=false))
|
|
88
|
+
},
|
|
89
|
+
],
|
|
90
|
+
})
|
|
91
|
+
let bulkKeepGoingErrorsSchema = S.array(
|
|
92
|
+
S.union([
|
|
93
|
+
S.object(s => {
|
|
94
|
+
s.tag("message", "success")
|
|
95
|
+
None
|
|
96
|
+
}),
|
|
97
|
+
S.object(s => {
|
|
98
|
+
Some(s.field("error", S.string))
|
|
99
|
+
}),
|
|
100
|
+
]),
|
|
101
|
+
)->S.transform(_ => {
|
|
102
|
+
parser: a => Belt.Array.keepMapU(a, a => a),
|
|
103
|
+
})
|
|
104
|
+
|
|
74
105
|
let clearHasuraMetadata = async (~endpoint, ~auth) => {
|
|
75
106
|
try {
|
|
76
107
|
let result = await clearMetadataRoute->Rest.fetch(auth, ~client=Rest.client(endpoint))
|
|
77
108
|
let msg = switch result {
|
|
78
|
-
| QuerySucceeded => "
|
|
79
|
-
| AlreadyDone => "
|
|
109
|
+
| QuerySucceeded => "Hasura metadata cleared"
|
|
110
|
+
| AlreadyDone => "Hasura metadata already cleared"
|
|
80
111
|
}
|
|
81
112
|
Logging.trace(msg)
|
|
82
113
|
} catch {
|
|
@@ -113,8 +144,8 @@ let trackTables = async (~endpoint, ~auth, ~pgSchema, ~tableNames: array<string>
|
|
|
113
144
|
~client=Rest.client(endpoint),
|
|
114
145
|
)
|
|
115
146
|
let msg = switch result {
|
|
116
|
-
| QuerySucceeded => "
|
|
117
|
-
| AlreadyDone => "
|
|
147
|
+
| QuerySucceeded => "Hasura finished tracking tables"
|
|
148
|
+
| AlreadyDone => "Hasura tables already tracked"
|
|
118
149
|
}
|
|
119
150
|
Logging.trace({
|
|
120
151
|
"msg": msg,
|
|
@@ -130,91 +161,110 @@ let trackTables = async (~endpoint, ~auth, ~pgSchema, ~tableNames: array<string>
|
|
|
130
161
|
}
|
|
131
162
|
}
|
|
132
163
|
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
164
|
+
type bulkOperation = {
|
|
165
|
+
\"type": string,
|
|
166
|
+
args: Js.Json.t,
|
|
167
|
+
}
|
|
168
|
+
|
|
169
|
+
let createSelectPermissionOperation = (
|
|
136
170
|
~tableName: string,
|
|
137
171
|
~pgSchema,
|
|
138
172
|
~responseLimit,
|
|
139
173
|
~aggregateEntities,
|
|
140
|
-
) => {
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
"
|
|
146
|
-
|
|
147
|
-
"schema": pgSchema,
|
|
148
|
-
"name": tableName,
|
|
149
|
-
},
|
|
150
|
-
"role": "public",
|
|
151
|
-
"source": "default",
|
|
152
|
-
"permission": {
|
|
153
|
-
"columns": "*",
|
|
154
|
-
"filter": Js.Obj.empty(),
|
|
155
|
-
"limit": responseLimit,
|
|
156
|
-
"allow_aggregations": aggregateEntities->Js.Array2.includes(tableName),
|
|
157
|
-
},
|
|
158
|
-
}->(Utils.magic: 'a => Js.Json.t),
|
|
174
|
+
): bulkOperation => {
|
|
175
|
+
{
|
|
176
|
+
\"type": "pg_create_select_permission",
|
|
177
|
+
args: {
|
|
178
|
+
"table": {
|
|
179
|
+
"schema": pgSchema,
|
|
180
|
+
"name": tableName,
|
|
159
181
|
},
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
})
|
|
170
|
-
} catch {
|
|
171
|
-
| exn =>
|
|
172
|
-
Logging.error({
|
|
173
|
-
"msg": `EE808: There was an issue setting up view permissions for the ${tableName} table in hasura - indexing may still work - but you may have issues querying the data in hasura.`,
|
|
174
|
-
"tableName": tableName,
|
|
175
|
-
"err": exn->Utils.prettifyExn,
|
|
176
|
-
})
|
|
182
|
+
"role": "public",
|
|
183
|
+
"source": "default",
|
|
184
|
+
"permission": {
|
|
185
|
+
"columns": "*",
|
|
186
|
+
"filter": Js.Obj.empty(),
|
|
187
|
+
"limit": responseLimit,
|
|
188
|
+
"allow_aggregations": aggregateEntities->Js.Array2.includes(tableName),
|
|
189
|
+
},
|
|
190
|
+
}->(Utils.magic: 'a => Js.Json.t),
|
|
177
191
|
}
|
|
178
192
|
}
|
|
179
193
|
|
|
180
|
-
let
|
|
194
|
+
let createEntityRelationshipOperation = (
|
|
181
195
|
~pgSchema,
|
|
182
|
-
~endpoint,
|
|
183
|
-
~auth,
|
|
184
196
|
~tableName: string,
|
|
185
197
|
~relationshipType: string,
|
|
186
198
|
~relationalKey: string,
|
|
187
199
|
~objectName: string,
|
|
188
200
|
~mappedEntity: string,
|
|
189
201
|
~isDerivedFrom: bool,
|
|
190
|
-
) => {
|
|
202
|
+
): bulkOperation => {
|
|
191
203
|
let derivedFromTo = isDerivedFrom ? `"id": "${relationalKey}"` : `"${relationalKey}_id" : "id"`
|
|
192
204
|
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
"
|
|
199
|
-
"bodyString": bodyString,
|
|
205
|
+
{
|
|
206
|
+
\"type": `pg_create_${relationshipType}_relationship`,
|
|
207
|
+
args: {
|
|
208
|
+
"table": {
|
|
209
|
+
"schema": pgSchema,
|
|
210
|
+
"name": tableName,
|
|
200
211
|
},
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
212
|
+
"name": objectName,
|
|
213
|
+
"source": "default",
|
|
214
|
+
"using": {
|
|
215
|
+
"manual_configuration": {
|
|
216
|
+
"remote_table": {
|
|
217
|
+
"schema": pgSchema,
|
|
218
|
+
"name": mappedEntity,
|
|
219
|
+
},
|
|
220
|
+
"column_mapping": Js.Json.parseExn(`{${derivedFromTo}}`),
|
|
221
|
+
},
|
|
222
|
+
},
|
|
223
|
+
}->(Utils.magic: 'a => Js.Json.t),
|
|
224
|
+
}
|
|
225
|
+
}
|
|
226
|
+
|
|
227
|
+
let executeBulkKeepGoing = async (~endpoint, ~auth, ~operations: array<bulkOperation>) => {
|
|
228
|
+
if operations->Js.Array2.length === 0 {
|
|
229
|
+
Logging.trace("No hasura bulk configuration operations to execute")
|
|
230
|
+
} else {
|
|
231
|
+
try {
|
|
232
|
+
let result = await bulkKeepGoingRoute->Rest.fetch(
|
|
233
|
+
{
|
|
234
|
+
"auth": auth,
|
|
235
|
+
"args": operations->(Utils.magic: 'a => Js.Json.t),
|
|
236
|
+
},
|
|
237
|
+
~client=Rest.client(endpoint),
|
|
238
|
+
)
|
|
239
|
+
|
|
240
|
+
let errors = try {
|
|
241
|
+
result->S.parseJsonOrThrow(bulkKeepGoingErrorsSchema)
|
|
242
|
+
} catch {
|
|
243
|
+
| S.Raised(error) => [error->S.Error.message]
|
|
244
|
+
| exn => [exn->Utils.prettifyExn->Utils.magic]
|
|
245
|
+
}
|
|
246
|
+
|
|
247
|
+
switch errors {
|
|
248
|
+
| [] =>
|
|
249
|
+
Logging.trace({
|
|
250
|
+
"msg": "Hasura configuration completed",
|
|
251
|
+
"operations": operations->Js.Array2.length,
|
|
252
|
+
})
|
|
253
|
+
| _ =>
|
|
254
|
+
Logging.warn({
|
|
255
|
+
"msg": "Hasura configuration completed with errors. Indexing will still work - but you may have issues querying data via GraphQL.",
|
|
256
|
+
"errors": errors,
|
|
257
|
+
"operations": operations->Js.Array2.length,
|
|
258
|
+
})
|
|
259
|
+
}
|
|
260
|
+
} catch {
|
|
261
|
+
| exn =>
|
|
262
|
+
Logging.error({
|
|
263
|
+
"msg": `EE809: There was an issue executing bulk operations in hasura - indexing may still work - but you may have issues querying the data in hasura.`,
|
|
264
|
+
"operations": operations->Js.Array2.length,
|
|
265
|
+
"err": exn->Utils.prettifyExn,
|
|
266
|
+
})
|
|
206
267
|
}
|
|
207
|
-
Logging.trace({
|
|
208
|
-
"msg": msg,
|
|
209
|
-
"tableName": tableName,
|
|
210
|
-
})
|
|
211
|
-
} catch {
|
|
212
|
-
| exn =>
|
|
213
|
-
Logging.error({
|
|
214
|
-
"msg": `EE808: There was an issue setting up ${relationshipType} relationship for the ${tableName} table in hasura - indexing may still work - but you may have issues querying the data in hasura.`,
|
|
215
|
-
"tableName": tableName,
|
|
216
|
-
"err": exn->Utils.prettifyExn,
|
|
217
|
-
})
|
|
218
268
|
}
|
|
219
269
|
}
|
|
220
270
|
|
|
@@ -241,60 +291,64 @@ let trackDatabase = async (
|
|
|
241
291
|
|
|
242
292
|
await trackTables(~endpoint, ~auth, ~pgSchema, ~tableNames)
|
|
243
293
|
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
->Js.Array2.map(tableName =>
|
|
247
|
-
createSelectPermissions(
|
|
248
|
-
~endpoint,
|
|
249
|
-
~auth,
|
|
250
|
-
~tableName,
|
|
251
|
-
~pgSchema,
|
|
252
|
-
~responseLimit,
|
|
253
|
-
~aggregateEntities,
|
|
254
|
-
)
|
|
255
|
-
)
|
|
256
|
-
->Js.Array2.concatMany(
|
|
257
|
-
userEntities->Js.Array2.map(entityConfig => {
|
|
258
|
-
let {tableName} = entityConfig.table
|
|
259
|
-
[
|
|
260
|
-
//Set array relationships
|
|
261
|
-
entityConfig.table
|
|
262
|
-
->Table.getDerivedFromFields
|
|
263
|
-
->Js.Array2.map(derivedFromField => {
|
|
264
|
-
//determines the actual name of the underlying relational field (if it's an entity mapping then suffixes _id for eg.)
|
|
265
|
-
let relationalFieldName =
|
|
266
|
-
schema->Schema.getDerivedFromFieldName(derivedFromField)->Utils.unwrapResultExn
|
|
294
|
+
// Collect all operations for bulk execution
|
|
295
|
+
let allOperations = []
|
|
267
296
|
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
~relationshipType="array",
|
|
274
|
-
~isDerivedFrom=true,
|
|
275
|
-
~objectName=derivedFromField.fieldName,
|
|
276
|
-
~relationalKey=relationalFieldName,
|
|
277
|
-
~mappedEntity=derivedFromField.derivedFromEntity,
|
|
278
|
-
)
|
|
279
|
-
}),
|
|
280
|
-
//Set object relationships
|
|
281
|
-
entityConfig.table
|
|
282
|
-
->Table.getLinkedEntityFields
|
|
283
|
-
->Js.Array2.map(((field, linkedEntityName)) => {
|
|
284
|
-
createEntityRelationship(
|
|
285
|
-
~endpoint,
|
|
286
|
-
~auth,
|
|
287
|
-
~pgSchema,
|
|
288
|
-
~tableName,
|
|
289
|
-
~relationshipType="object",
|
|
290
|
-
~isDerivedFrom=false,
|
|
291
|
-
~objectName=field.fieldName,
|
|
292
|
-
~relationalKey=field.fieldName,
|
|
293
|
-
~mappedEntity=linkedEntityName,
|
|
294
|
-
)
|
|
295
|
-
}),
|
|
296
|
-
]->Utils.Array.flatten
|
|
297
|
-
}),
|
|
297
|
+
// Add select permission operations
|
|
298
|
+
tableNames->Js.Array2.forEach(tableName => {
|
|
299
|
+
allOperations
|
|
300
|
+
->Js.Array2.push(
|
|
301
|
+
createSelectPermissionOperation(~tableName, ~pgSchema, ~responseLimit, ~aggregateEntities),
|
|
298
302
|
)
|
|
299
|
-
->
|
|
303
|
+
->ignore
|
|
304
|
+
})
|
|
305
|
+
|
|
306
|
+
// Add relationship operations
|
|
307
|
+
userEntities->Js.Array2.forEach(entityConfig => {
|
|
308
|
+
let {tableName} = entityConfig.table
|
|
309
|
+
|
|
310
|
+
//Set array relationships
|
|
311
|
+
entityConfig.table
|
|
312
|
+
->Table.getDerivedFromFields
|
|
313
|
+
->Js.Array2.forEach(derivedFromField => {
|
|
314
|
+
//determines the actual name of the underlying relational field (if it's an entity mapping then suffixes _id for eg.)
|
|
315
|
+
let relationalFieldName =
|
|
316
|
+
schema->Schema.getDerivedFromFieldName(derivedFromField)->Utils.unwrapResultExn
|
|
317
|
+
|
|
318
|
+
allOperations
|
|
319
|
+
->Js.Array2.push(
|
|
320
|
+
createEntityRelationshipOperation(
|
|
321
|
+
~pgSchema,
|
|
322
|
+
~tableName,
|
|
323
|
+
~relationshipType="array",
|
|
324
|
+
~isDerivedFrom=true,
|
|
325
|
+
~objectName=derivedFromField.fieldName,
|
|
326
|
+
~relationalKey=relationalFieldName,
|
|
327
|
+
~mappedEntity=derivedFromField.derivedFromEntity,
|
|
328
|
+
),
|
|
329
|
+
)
|
|
330
|
+
->ignore
|
|
331
|
+
})
|
|
332
|
+
|
|
333
|
+
//Set object relationships
|
|
334
|
+
entityConfig.table
|
|
335
|
+
->Table.getLinkedEntityFields
|
|
336
|
+
->Js.Array2.forEach(((field, linkedEntityName)) => {
|
|
337
|
+
allOperations
|
|
338
|
+
->Js.Array2.push(
|
|
339
|
+
createEntityRelationshipOperation(
|
|
340
|
+
~pgSchema,
|
|
341
|
+
~tableName,
|
|
342
|
+
~relationshipType="object",
|
|
343
|
+
~isDerivedFrom=false,
|
|
344
|
+
~objectName=field.fieldName,
|
|
345
|
+
~relationalKey=field.fieldName,
|
|
346
|
+
~mappedEntity=linkedEntityName,
|
|
347
|
+
),
|
|
348
|
+
)
|
|
349
|
+
->ignore
|
|
350
|
+
})
|
|
351
|
+
})
|
|
352
|
+
|
|
353
|
+
await executeBulkKeepGoing(~endpoint, ~auth, ~operations=allOperations)
|
|
300
354
|
}
|
package/src/Hasura.res.js
CHANGED
|
@@ -8,7 +8,6 @@ var Schema = require("./db/Schema.res.js");
|
|
|
8
8
|
var Logging = require("./Logging.res.js");
|
|
9
9
|
var Belt_Array = require("rescript/lib/js/belt_Array.js");
|
|
10
10
|
var InternalTable = require("./db/InternalTable.res.js");
|
|
11
|
-
var Caml_splice_call = require("rescript/lib/js/caml_splice_call.js");
|
|
12
11
|
var S$RescriptSchema = require("rescript-schema/src/S.res.js");
|
|
13
12
|
var Caml_js_exceptions = require("rescript/lib/js/caml_js_exceptions.js");
|
|
14
13
|
|
|
@@ -91,11 +90,46 @@ function rawBodyRoute() {
|
|
|
91
90
|
};
|
|
92
91
|
}
|
|
93
92
|
|
|
93
|
+
function bulkKeepGoingRoute() {
|
|
94
|
+
return {
|
|
95
|
+
method: "POST",
|
|
96
|
+
path: "",
|
|
97
|
+
input: (function (s) {
|
|
98
|
+
s.field("type", S$RescriptSchema.literal("bulk_keep_going"));
|
|
99
|
+
return {
|
|
100
|
+
args: s.field("args", S$RescriptSchema.json(false)),
|
|
101
|
+
auth: auth(s)
|
|
102
|
+
};
|
|
103
|
+
}),
|
|
104
|
+
responses: [(function (s) {
|
|
105
|
+
s.status(200);
|
|
106
|
+
return s.data(S$RescriptSchema.json(false));
|
|
107
|
+
})]
|
|
108
|
+
};
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
var bulkKeepGoingErrorsSchema = S$RescriptSchema.transform(S$RescriptSchema.array(S$RescriptSchema.union([
|
|
112
|
+
S$RescriptSchema.object(function (s) {
|
|
113
|
+
s.tag("message", "success");
|
|
114
|
+
}),
|
|
115
|
+
S$RescriptSchema.object(function (s) {
|
|
116
|
+
return s.f("error", S$RescriptSchema.string);
|
|
117
|
+
})
|
|
118
|
+
])), (function (param) {
|
|
119
|
+
return {
|
|
120
|
+
p: (function (a) {
|
|
121
|
+
return Belt_Array.keepMapU(a, (function (a) {
|
|
122
|
+
return a;
|
|
123
|
+
}));
|
|
124
|
+
})
|
|
125
|
+
};
|
|
126
|
+
}));
|
|
127
|
+
|
|
94
128
|
async function clearHasuraMetadata(endpoint, auth) {
|
|
95
129
|
try {
|
|
96
130
|
var result = await Rest.$$fetch(clearMetadataRoute, auth, Rest.client(endpoint, undefined));
|
|
97
131
|
var msg;
|
|
98
|
-
msg = result === "QuerySucceeded" ? "
|
|
132
|
+
msg = result === "QuerySucceeded" ? "Hasura metadata cleared" : "Hasura metadata already cleared";
|
|
99
133
|
return Logging.trace(msg);
|
|
100
134
|
}
|
|
101
135
|
catch (raw_exn){
|
|
@@ -127,7 +161,7 @@ async function trackTables(endpoint, auth, pgSchema, tableNames) {
|
|
|
127
161
|
}
|
|
128
162
|
}, Rest.client(endpoint, undefined));
|
|
129
163
|
var msg;
|
|
130
|
-
msg = result === "QuerySucceeded" ? "
|
|
164
|
+
msg = result === "QuerySucceeded" ? "Hasura finished tracking tables" : "Hasura tables already tracked";
|
|
131
165
|
return Logging.trace({
|
|
132
166
|
msg: msg,
|
|
133
167
|
tableNames: tableNames
|
|
@@ -143,10 +177,9 @@ async function trackTables(endpoint, auth, pgSchema, tableNames) {
|
|
|
143
177
|
}
|
|
144
178
|
}
|
|
145
179
|
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
auth: auth,
|
|
180
|
+
function createSelectPermissionOperation(tableName, pgSchema, responseLimit, aggregateEntities) {
|
|
181
|
+
return {
|
|
182
|
+
type: "pg_create_select_permission",
|
|
150
183
|
args: {
|
|
151
184
|
table: {
|
|
152
185
|
schema: pgSchema,
|
|
@@ -161,44 +194,68 @@ async function createSelectPermissions(auth, endpoint, tableName, pgSchema, resp
|
|
|
161
194
|
allow_aggregations: aggregateEntities.includes(tableName)
|
|
162
195
|
}
|
|
163
196
|
}
|
|
164
|
-
}
|
|
165
|
-
var msg;
|
|
166
|
-
msg = result === "QuerySucceeded" ? "Hasura select permissions created" : "Hasura select permissions already created";
|
|
167
|
-
return Logging.trace({
|
|
168
|
-
msg: msg,
|
|
169
|
-
tableName: tableName
|
|
170
|
-
});
|
|
171
|
-
}
|
|
172
|
-
catch (raw_exn){
|
|
173
|
-
var exn = Caml_js_exceptions.internalToOCamlException(raw_exn);
|
|
174
|
-
return Logging.error({
|
|
175
|
-
msg: "EE808: There was an issue setting up view permissions for the " + tableName + " table in hasura - indexing may still work - but you may have issues querying the data in hasura.",
|
|
176
|
-
tableName: tableName,
|
|
177
|
-
err: Utils.prettifyExn(exn)
|
|
178
|
-
});
|
|
179
|
-
}
|
|
197
|
+
};
|
|
180
198
|
}
|
|
181
199
|
|
|
182
|
-
|
|
200
|
+
function createEntityRelationshipOperation(pgSchema, tableName, relationshipType, relationalKey, objectName, mappedEntity, isDerivedFrom) {
|
|
183
201
|
var derivedFromTo = isDerivedFrom ? "\"id\": \"" + relationalKey + "\"" : "\"" + relationalKey + "_id\" : \"id\"";
|
|
184
|
-
|
|
202
|
+
return {
|
|
203
|
+
type: "pg_create_" + relationshipType + "_relationship",
|
|
204
|
+
args: {
|
|
205
|
+
table: {
|
|
206
|
+
schema: pgSchema,
|
|
207
|
+
name: tableName
|
|
208
|
+
},
|
|
209
|
+
name: objectName,
|
|
210
|
+
source: "default",
|
|
211
|
+
using: {
|
|
212
|
+
manual_configuration: {
|
|
213
|
+
remote_table: {
|
|
214
|
+
schema: pgSchema,
|
|
215
|
+
name: mappedEntity
|
|
216
|
+
},
|
|
217
|
+
column_mapping: JSON.parse("{" + derivedFromTo + "}")
|
|
218
|
+
}
|
|
219
|
+
}
|
|
220
|
+
}
|
|
221
|
+
};
|
|
222
|
+
}
|
|
223
|
+
|
|
224
|
+
async function executeBulkKeepGoing(endpoint, auth, operations) {
|
|
225
|
+
if (operations.length === 0) {
|
|
226
|
+
return Logging.trace("No hasura bulk configuration operations to execute");
|
|
227
|
+
}
|
|
185
228
|
try {
|
|
186
|
-
var result = await Rest.$$fetch(
|
|
229
|
+
var result = await Rest.$$fetch(bulkKeepGoingRoute, {
|
|
187
230
|
auth: auth,
|
|
188
|
-
|
|
231
|
+
args: operations
|
|
189
232
|
}, Rest.client(endpoint, undefined));
|
|
190
|
-
var
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
233
|
+
var errors;
|
|
234
|
+
try {
|
|
235
|
+
errors = S$RescriptSchema.parseJsonOrThrow(result, bulkKeepGoingErrorsSchema);
|
|
236
|
+
}
|
|
237
|
+
catch (raw_error){
|
|
238
|
+
var error = Caml_js_exceptions.internalToOCamlException(raw_error);
|
|
239
|
+
errors = error.RE_EXN_ID === S$RescriptSchema.Raised ? [S$RescriptSchema.$$Error.message(error._1)] : [Utils.prettifyExn(error)];
|
|
240
|
+
}
|
|
241
|
+
if (errors.length !== 0) {
|
|
242
|
+
return Logging.warn({
|
|
243
|
+
msg: "Hasura configuration completed with errors. Indexing will still work - but you may have issues querying data via GraphQL.",
|
|
244
|
+
errors: errors,
|
|
245
|
+
operations: operations.length
|
|
246
|
+
});
|
|
247
|
+
} else {
|
|
248
|
+
return Logging.trace({
|
|
249
|
+
msg: "Hasura configuration completed",
|
|
250
|
+
operations: operations.length
|
|
251
|
+
});
|
|
252
|
+
}
|
|
196
253
|
}
|
|
197
254
|
catch (raw_exn){
|
|
198
255
|
var exn = Caml_js_exceptions.internalToOCamlException(raw_exn);
|
|
199
256
|
return Logging.error({
|
|
200
|
-
msg: "
|
|
201
|
-
|
|
257
|
+
msg: "EE809: There was an issue executing bulk operations in hasura - indexing may still work - but you may have issues querying the data in hasura.",
|
|
258
|
+
operations: operations.length,
|
|
202
259
|
err: Utils.prettifyExn(exn)
|
|
203
260
|
});
|
|
204
261
|
}
|
|
@@ -220,22 +277,23 @@ async function trackDatabase(endpoint, auth, pgSchema, userEntities, aggregateEn
|
|
|
220
277
|
Logging.info("Tracking tables in Hasura");
|
|
221
278
|
await clearHasuraMetadata(endpoint, auth);
|
|
222
279
|
await trackTables(endpoint, auth, pgSchema, tableNames);
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
280
|
+
var allOperations = [];
|
|
281
|
+
tableNames.forEach(function (tableName) {
|
|
282
|
+
allOperations.push(createSelectPermissionOperation(tableName, pgSchema, responseLimit, aggregateEntities));
|
|
283
|
+
});
|
|
284
|
+
userEntities.forEach(function (entityConfig) {
|
|
285
|
+
var match = entityConfig.table;
|
|
286
|
+
var tableName = match.tableName;
|
|
287
|
+
Table.getDerivedFromFields(entityConfig.table).forEach(function (derivedFromField) {
|
|
288
|
+
var relationalFieldName = Utils.unwrapResultExn(Schema.getDerivedFromFieldName(schema, derivedFromField));
|
|
289
|
+
allOperations.push(createEntityRelationshipOperation(pgSchema, tableName, "array", relationalFieldName, derivedFromField.fieldName, derivedFromField.derivedFromEntity, true));
|
|
290
|
+
});
|
|
291
|
+
Table.getLinkedEntityFields(entityConfig.table).forEach(function (param) {
|
|
292
|
+
var field = param[0];
|
|
293
|
+
allOperations.push(createEntityRelationshipOperation(pgSchema, tableName, "object", field.fieldName, field.fieldName, param[1], false));
|
|
294
|
+
});
|
|
295
|
+
});
|
|
296
|
+
return await executeBulkKeepGoing(endpoint, auth, allOperations);
|
|
239
297
|
}
|
|
240
298
|
|
|
241
299
|
exports.auth = auth;
|
|
@@ -244,9 +302,12 @@ exports.clearMetadataRoute = clearMetadataRoute;
|
|
|
244
302
|
exports.trackTablesRoute = trackTablesRoute;
|
|
245
303
|
exports.createSelectPermissionRoute = createSelectPermissionRoute;
|
|
246
304
|
exports.rawBodyRoute = rawBodyRoute;
|
|
305
|
+
exports.bulkKeepGoingRoute = bulkKeepGoingRoute;
|
|
306
|
+
exports.bulkKeepGoingErrorsSchema = bulkKeepGoingErrorsSchema;
|
|
247
307
|
exports.clearHasuraMetadata = clearHasuraMetadata;
|
|
248
308
|
exports.trackTables = trackTables;
|
|
249
|
-
exports.
|
|
250
|
-
exports.
|
|
309
|
+
exports.createSelectPermissionOperation = createSelectPermissionOperation;
|
|
310
|
+
exports.createEntityRelationshipOperation = createEntityRelationshipOperation;
|
|
311
|
+
exports.executeBulkKeepGoing = executeBulkKeepGoing;
|
|
251
312
|
exports.trackDatabase = trackDatabase;
|
|
252
|
-
/*
|
|
313
|
+
/* bulkKeepGoingErrorsSchema Not a pure module */
|
package/src/Persistence.res
CHANGED
|
@@ -143,19 +143,7 @@ let init = {
|
|
|
143
143
|
persistence.storageStatus = Ready(initialState)
|
|
144
144
|
let checkpoints = Js.Dict.empty()
|
|
145
145
|
initialState.chains->Js.Array2.forEach(c => {
|
|
146
|
-
|
|
147
|
-
| Value(
|
|
148
|
-
logIndex,
|
|
149
|
-
) => // Latest processed log index (not necessarily processed by the indexer)
|
|
150
|
-
{
|
|
151
|
-
"blockNumber": c.progressBlockNumber + 1,
|
|
152
|
-
"logIndex": logIndex,
|
|
153
|
-
}
|
|
154
|
-
| Null =>
|
|
155
|
-
// Or simply the latest processed block number (might be -1 if not set)
|
|
156
|
-
c.progressBlockNumber->Utils.magic
|
|
157
|
-
}
|
|
158
|
-
checkpoints->Utils.Dict.setByInt(c.id, checkpoint)
|
|
146
|
+
checkpoints->Utils.Dict.setByInt(c.id, c.progressBlockNumber)
|
|
159
147
|
})
|
|
160
148
|
Logging.info({
|
|
161
149
|
"msg": `Successfully resumed indexing state! Continuing from the last checkpoint.`,
|
package/src/Persistence.res.js
CHANGED
|
@@ -84,13 +84,7 @@ async function init(persistence, chainConfigs, resetOpt) {
|
|
|
84
84
|
};
|
|
85
85
|
var checkpoints = {};
|
|
86
86
|
initialState$1.chains.forEach(function (c) {
|
|
87
|
-
|
|
88
|
-
var checkpoint;
|
|
89
|
-
checkpoint = logIndex === null ? c.progress_block : ({
|
|
90
|
-
blockNumber: c.progress_block + 1 | 0,
|
|
91
|
-
logIndex: logIndex
|
|
92
|
-
});
|
|
93
|
-
checkpoints[c.id] = checkpoint;
|
|
87
|
+
checkpoints[c.id] = c.progress_block;
|
|
94
88
|
});
|
|
95
89
|
Logging.info({
|
|
96
90
|
msg: "Successfully resumed indexing state! Continuing from the last checkpoint.",
|
package/src/PgStorage.res
CHANGED
|
@@ -904,13 +904,6 @@ let make = (
|
|
|
904
904
|
->(Utils.magic: promise<array<unknown>> => promise<array<InternalTable.Chains.t>>),
|
|
905
905
|
))
|
|
906
906
|
|
|
907
|
-
if chains->Utils.Array.notEmpty {
|
|
908
|
-
let () =
|
|
909
|
-
await sql->Postgres.unsafe(
|
|
910
|
-
InternalTable.DynamicContractRegistry.makeCleanUpOnRestartQuery(~pgSchema, ~chains),
|
|
911
|
-
)
|
|
912
|
-
}
|
|
913
|
-
|
|
914
907
|
{
|
|
915
908
|
cleanRun: false,
|
|
916
909
|
cache,
|
package/src/PgStorage.res.js
CHANGED
|
@@ -638,14 +638,10 @@ function make(sql, pgHost, pgSchema, pgPort, pgUser, pgDatabase, pgPassword, onI
|
|
|
638
638
|
restoreEffectCache(false),
|
|
639
639
|
sql.unsafe(makeLoadAllQuery(pgSchema, InternalTable.Chains.table.tableName))
|
|
640
640
|
]);
|
|
641
|
-
var chains = match[1];
|
|
642
|
-
if (Utils.$$Array.notEmpty(chains)) {
|
|
643
|
-
await sql.unsafe(InternalTable.DynamicContractRegistry.makeCleanUpOnRestartQuery(pgSchema, chains));
|
|
644
|
-
}
|
|
645
641
|
return {
|
|
646
642
|
cleanRun: false,
|
|
647
643
|
cache: match[0],
|
|
648
|
-
chains:
|
|
644
|
+
chains: match[1]
|
|
649
645
|
};
|
|
650
646
|
};
|
|
651
647
|
return {
|
package/src/Utils.res
CHANGED
|
@@ -70,6 +70,12 @@ module Dict = {
|
|
|
70
70
|
*/
|
|
71
71
|
external dangerouslyGetNonOption: (dict<'a>, string) => option<'a> = ""
|
|
72
72
|
|
|
73
|
+
@get_index
|
|
74
|
+
/**
|
|
75
|
+
It's the same as `Js.Dict.get` but it doesn't have runtime overhead to check if the key exists.
|
|
76
|
+
*/
|
|
77
|
+
external dangerouslyGetByIntNonOption: (dict<'a>, int) => option<'a> = ""
|
|
78
|
+
|
|
73
79
|
let has: (dict<'a>, string) => bool = %raw(`(dict, key) => key in dict`)
|
|
74
80
|
|
|
75
81
|
let push = (dict, key, value) => {
|
|
@@ -188,6 +194,10 @@ module Array = {
|
|
|
188
194
|
}
|
|
189
195
|
}
|
|
190
196
|
|
|
197
|
+
let clearInPlace: array<'a> => unit = %raw(`(arr) => {
|
|
198
|
+
arr.length = 0
|
|
199
|
+
}`)
|
|
200
|
+
|
|
191
201
|
/**
|
|
192
202
|
Creates a shallow copy of the array and sets the value at the given index
|
|
193
203
|
*/
|