envio 2.17.0 → 2.18.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +6 -6
- package/src/ContractAddressingMap.res +115 -0
- package/src/ErrorHandling.res +61 -0
- package/src/EventUtils.res +87 -0
- package/src/Internal.res +2 -0
- package/src/LoadManager.res +174 -0
- package/src/Logging.res +179 -0
- package/src/Prometheus.res +316 -0
- package/src/Time.res +41 -0
- package/src/Utils.res +15 -0
- package/src/bindings/BigDecimal.gen.ts +8 -0
- package/src/bindings/BigDecimal.res +60 -0
- package/src/bindings/PromClient.res +58 -0
- package/src/db/EntityHistory.res +4 -4
- package/src/db/Table.res +9 -3
- package/src/sources/Fuel.res +37 -0
- package/src/sources/HyperFuel.res +260 -0
- package/src/sources/HyperFuel.resi +59 -0
- package/src/sources/HyperFuelClient.res +408 -0
- package/src/sources/HyperSync.res +349 -0
- package/src/sources/HyperSync.resi +69 -0
- package/src/sources/vendored-fuel-abi-coder.js +1847 -0
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "envio",
|
|
3
|
-
"version": "v2.
|
|
3
|
+
"version": "v2.18.0",
|
|
4
4
|
"description": "A latency and sync speed optimized, developer friendly blockchain data indexer.",
|
|
5
5
|
"bin": "./bin.js",
|
|
6
6
|
"types": "index.d.ts",
|
|
@@ -24,15 +24,15 @@
|
|
|
24
24
|
},
|
|
25
25
|
"homepage": "https://envio.dev",
|
|
26
26
|
"optionalDependencies": {
|
|
27
|
-
"envio-linux-x64": "v2.
|
|
28
|
-
"envio-linux-arm64": "v2.
|
|
29
|
-
"envio-darwin-x64": "v2.
|
|
30
|
-
"envio-darwin-arm64": "v2.
|
|
27
|
+
"envio-linux-x64": "v2.18.0",
|
|
28
|
+
"envio-linux-arm64": "v2.18.0",
|
|
29
|
+
"envio-darwin-x64": "v2.18.0",
|
|
30
|
+
"envio-darwin-arm64": "v2.18.0"
|
|
31
31
|
},
|
|
32
32
|
"dependencies": {
|
|
33
33
|
"@envio-dev/hypersync-client": "0.6.3",
|
|
34
34
|
"rescript": "11.1.3",
|
|
35
|
-
"rescript-schema": "9.
|
|
35
|
+
"rescript-schema": "9.3.0",
|
|
36
36
|
"viem": "2.21.0"
|
|
37
37
|
},
|
|
38
38
|
"files": [
|
|
@@ -0,0 +1,115 @@
|
|
|
1
|
+
type contractName = string
|
|
2
|
+
|
|
3
|
+
// Currently this mapping append only, so we don't need to worry about
|
|
4
|
+
// protecting static addresses from de-registration.
|
|
5
|
+
|
|
6
|
+
type mapping = {
|
|
7
|
+
nameByAddress: dict<contractName>,
|
|
8
|
+
addressesByName: dict<Belt.Set.String.t>,
|
|
9
|
+
}
|
|
10
|
+
|
|
11
|
+
exception AddressRegisteredForMultipleContracts({address: Address.t, names: array<contractName>})
|
|
12
|
+
|
|
13
|
+
let addAddress = (map: mapping, ~name: string, ~address: Address.t) => {
|
|
14
|
+
switch map.nameByAddress->Utils.Dict.dangerouslyGetNonOption(address->Address.toString) {
|
|
15
|
+
| Some(currentName) if currentName != name =>
|
|
16
|
+
let logger = Logging.createChild(
|
|
17
|
+
~params={
|
|
18
|
+
"address": address->Address.toString,
|
|
19
|
+
"existingContract": currentName,
|
|
20
|
+
"newContract": name,
|
|
21
|
+
},
|
|
22
|
+
)
|
|
23
|
+
AddressRegisteredForMultipleContracts({
|
|
24
|
+
address,
|
|
25
|
+
names: [currentName, name],
|
|
26
|
+
})->ErrorHandling.mkLogAndRaise(~msg="Address registered for multiple contracts", ~logger)
|
|
27
|
+
| _ => ()
|
|
28
|
+
}
|
|
29
|
+
map.nameByAddress->Js.Dict.set(address->Address.toString, name)
|
|
30
|
+
|
|
31
|
+
let oldAddresses =
|
|
32
|
+
map.addressesByName
|
|
33
|
+
->Utils.Dict.dangerouslyGetNonOption(name)
|
|
34
|
+
->Belt.Option.getWithDefault(Belt.Set.String.empty)
|
|
35
|
+
let newAddresses = oldAddresses->Belt.Set.String.add(address->Address.toString)
|
|
36
|
+
map.addressesByName->Js.Dict.set(name, newAddresses)
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
let getAddresses = (map: mapping, name: string) => {
|
|
40
|
+
map.addressesByName->Utils.Dict.dangerouslyGetNonOption(name)
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
let getName = (map: mapping, address: string) => {
|
|
44
|
+
map.nameByAddress->Utils.Dict.dangerouslyGetNonOption(address)
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
let make = () => {
|
|
48
|
+
nameByAddress: Js.Dict.empty(),
|
|
49
|
+
addressesByName: Js.Dict.empty(),
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
let getContractNameFromAddress = (mapping, ~contractAddress: Address.t): option<contractName> => {
|
|
53
|
+
mapping->getName(contractAddress->Address.toString)
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
let stringsToAddresses: array<string> => array<Address.t> = Utils.magic
|
|
57
|
+
let keyValStringToAddress: array<(string, string)> => array<(Address.t, string)> = Utils.magic
|
|
58
|
+
|
|
59
|
+
let getAddressesFromContractName = (mapping, ~contractName) => {
|
|
60
|
+
switch mapping->getAddresses(contractName) {
|
|
61
|
+
| Some(addresses) => addresses
|
|
62
|
+
| None => Belt.Set.String.empty
|
|
63
|
+
}
|
|
64
|
+
->Belt.Set.String.toArray
|
|
65
|
+
->stringsToAddresses
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
let getAllAddresses = (mapping: mapping) => {
|
|
69
|
+
mapping.nameByAddress->Js.Dict.keys->stringsToAddresses
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
let copy = (mapping: mapping) => {
|
|
73
|
+
{
|
|
74
|
+
nameByAddress: mapping.nameByAddress->Utils.Dict.shallowCopy,
|
|
75
|
+
// Since Belt.Set.String.t is immutable, we can simply do shallow copy here
|
|
76
|
+
addressesByName: mapping.addressesByName->Utils.Dict.shallowCopy,
|
|
77
|
+
}
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
let mergeInPlace = (map, ~target) => {
|
|
81
|
+
map.nameByAddress
|
|
82
|
+
->Js.Dict.keys
|
|
83
|
+
->Belt.Array.forEach(addr => {
|
|
84
|
+
let name = map.nameByAddress->Js.Dict.unsafeGet(addr)
|
|
85
|
+
target->addAddress(~address=addr->Address.unsafeFromString, ~name)
|
|
86
|
+
})
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
let fromArray = (nameAddrTuples: array<(Address.t, string)>) => {
|
|
90
|
+
let m = make()
|
|
91
|
+
nameAddrTuples->Belt.Array.forEach(((address, name)) => m->addAddress(~name, ~address))
|
|
92
|
+
m
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
/**
|
|
96
|
+
Creates a new mapping from the previous without the addresses passed in as "addressesToRemove"
|
|
97
|
+
*/
|
|
98
|
+
let removeAddresses = (mapping: mapping, ~addressesToRemove: array<Address.t>) => {
|
|
99
|
+
switch addressesToRemove {
|
|
100
|
+
| [] => mapping
|
|
101
|
+
| _ =>
|
|
102
|
+
mapping.nameByAddress
|
|
103
|
+
->Js.Dict.entries
|
|
104
|
+
->Belt.Array.keep(((addr, _name)) => {
|
|
105
|
+
let shouldRemove = addressesToRemove->Utils.Array.includes(addr->Utils.magic)
|
|
106
|
+
!shouldRemove
|
|
107
|
+
})
|
|
108
|
+
->keyValStringToAddress
|
|
109
|
+
->fromArray
|
|
110
|
+
}
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
let addressCount = (mapping: mapping) => mapping.nameByAddress->Js.Dict.keys->Belt.Array.length
|
|
114
|
+
|
|
115
|
+
let isEmpty = (mapping: mapping) => mapping->addressCount == 0
|
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
type t = {logger: Pino.t, exn: exn, msg: option<string>}
|
|
2
|
+
|
|
3
|
+
let make = (exn, ~logger=Logging.getLogger(), ~msg=?) => {
|
|
4
|
+
{logger, msg, exn}
|
|
5
|
+
}
|
|
6
|
+
|
|
7
|
+
let log = (self: t) => {
|
|
8
|
+
switch self {
|
|
9
|
+
| {exn, msg: Some(msg), logger} =>
|
|
10
|
+
logger->Logging.childErrorWithExn(exn->Internal.prettifyExn, msg)
|
|
11
|
+
| {exn, msg: None, logger} => logger->Logging.childError(exn->Internal.prettifyExn)
|
|
12
|
+
}
|
|
13
|
+
}
|
|
14
|
+
|
|
15
|
+
let raiseExn = (self: t) => {
|
|
16
|
+
self.exn->Internal.prettifyExn->raise
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
let mkLogAndRaise = (~logger=?, ~msg=?, exn) => {
|
|
20
|
+
let exn = exn->Internal.prettifyExn
|
|
21
|
+
exn->make(~logger?, ~msg?)->log
|
|
22
|
+
exn->raise
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
let unwrapLogAndRaise = (~logger=?, ~msg=?, result) => {
|
|
26
|
+
switch result {
|
|
27
|
+
| Ok(v) => v
|
|
28
|
+
| Error(exn) => exn->mkLogAndRaise(~logger?, ~msg?)
|
|
29
|
+
}
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
let logAndRaise = self => {
|
|
33
|
+
self->log
|
|
34
|
+
self->raiseExn
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
/**
|
|
38
|
+
An environment to manage control flow propogating results
|
|
39
|
+
with Error that contain ErrorHandling.t in async
|
|
40
|
+
contexts and avoid nested switch statements on awaited promises
|
|
41
|
+
Similar to rust result propogation
|
|
42
|
+
*/
|
|
43
|
+
module ResultPropogateEnv = {
|
|
44
|
+
exception ErrorHandlingEarlyReturn(t)
|
|
45
|
+
|
|
46
|
+
type resultWithErrorHandle<'a> = result<'a, t>
|
|
47
|
+
type asyncBody<'a> = unit => promise<resultWithErrorHandle<'a>>
|
|
48
|
+
|
|
49
|
+
let runAsyncEnv = async (body: asyncBody<'a>) => {
|
|
50
|
+
switch await body() {
|
|
51
|
+
| exception ErrorHandlingEarlyReturn(e) => Error(e)
|
|
52
|
+
| endReturn => endReturn
|
|
53
|
+
}
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
let propogate = (res: resultWithErrorHandle<'a>) =>
|
|
57
|
+
switch res {
|
|
58
|
+
| Ok(v) => v
|
|
59
|
+
| Error(e) => raise(ErrorHandlingEarlyReturn(e))
|
|
60
|
+
}
|
|
61
|
+
}
|
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
type multiChainEventIndex = {
|
|
2
|
+
timestamp: int,
|
|
3
|
+
chainId: int,
|
|
4
|
+
blockNumber: int,
|
|
5
|
+
logIndex: int,
|
|
6
|
+
}
|
|
7
|
+
|
|
8
|
+
//Comparator used when ordering multichain events
|
|
9
|
+
let getEventComparator = (multiChainEventIndex: multiChainEventIndex) => {
|
|
10
|
+
let {timestamp, chainId, blockNumber, logIndex} = multiChainEventIndex
|
|
11
|
+
(timestamp, chainId, blockNumber, logIndex)
|
|
12
|
+
}
|
|
13
|
+
|
|
14
|
+
let getEventComparatorFromQueueItem = (
|
|
15
|
+
{chain, timestamp, blockNumber, logIndex}: Internal.eventItem,
|
|
16
|
+
) => {
|
|
17
|
+
let chainId = chain->ChainMap.Chain.toChainId
|
|
18
|
+
(timestamp, chainId, blockNumber, logIndex)
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
//Function used to determine if one event is earlier than another
|
|
22
|
+
let isEarlierEvent = (event1: multiChainEventIndex, event2: multiChainEventIndex) => {
|
|
23
|
+
event1->getEventComparator < event2->getEventComparator
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
type eventIndex = {
|
|
27
|
+
blockNumber: int,
|
|
28
|
+
logIndex: int,
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
// takes blockNumber, logIndex and packs them into a number with
|
|
32
|
+
//32 bits, 16 bits and 16 bits respectively
|
|
33
|
+
let packEventIndex = (~blockNumber, ~logIndex) => {
|
|
34
|
+
let blockNumber = blockNumber->BigInt.fromInt
|
|
35
|
+
let logIndex = logIndex->BigInt.fromInt
|
|
36
|
+
let blockNumber = BigInt.Bitwise.shift_left(blockNumber, 16->BigInt.fromInt)
|
|
37
|
+
|
|
38
|
+
blockNumber->BigInt.Bitwise.logor(logIndex)
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
//Currently not used but keeping in utils
|
|
42
|
+
//using @live flag for dead code analyser
|
|
43
|
+
@live
|
|
44
|
+
let packMultiChainEventIndex = (~timestamp, ~chainId, ~blockNumber, ~logIndex) => {
|
|
45
|
+
let timestamp = timestamp->BigInt.fromInt
|
|
46
|
+
let chainId = chainId->BigInt.fromInt
|
|
47
|
+
let blockNumber = blockNumber->BigInt.fromInt
|
|
48
|
+
let logIndex = logIndex->BigInt.fromInt
|
|
49
|
+
|
|
50
|
+
let timestamp = BigInt.Bitwise.shift_left(timestamp, 48->BigInt.fromInt)
|
|
51
|
+
let chainId = BigInt.Bitwise.shift_left(chainId, 16->BigInt.fromInt)
|
|
52
|
+
let blockNumber = BigInt.Bitwise.shift_left(blockNumber, 16->BigInt.fromInt)
|
|
53
|
+
|
|
54
|
+
timestamp
|
|
55
|
+
->BigInt.Bitwise.logor(chainId)
|
|
56
|
+
->BigInt.Bitwise.logor(blockNumber)
|
|
57
|
+
->BigInt.Bitwise.logor(logIndex)
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
//Currently not used but keeping in utils
|
|
61
|
+
//using @live flag for dead code analyser
|
|
62
|
+
@live
|
|
63
|
+
let unpackEventIndex = (packedEventIndex: bigint) => {
|
|
64
|
+
let blockNumber = packedEventIndex->BigInt.Bitwise.shift_right(16->BigInt.fromInt)
|
|
65
|
+
let logIndexMask = 65535->BigInt.fromInt
|
|
66
|
+
let logIndex = packedEventIndex->BigInt.Bitwise.logand(logIndexMask)
|
|
67
|
+
{
|
|
68
|
+
blockNumber: blockNumber->BigInt.toString->Belt.Int.fromString->Belt.Option.getUnsafe,
|
|
69
|
+
logIndex: logIndex->BigInt.toString->Belt.Int.fromString->Belt.Option.getUnsafe,
|
|
70
|
+
}
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
//takes an eventIndex record and returnts a packed event index
|
|
74
|
+
//used in TS tests
|
|
75
|
+
@live
|
|
76
|
+
let packEventIndexFromRecord = (eventIndex: eventIndex) => {
|
|
77
|
+
packEventIndex(~blockNumber=eventIndex.blockNumber, ~logIndex=eventIndex.logIndex)
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
//Returns unique string id for an event using its chain id combined with event id
|
|
81
|
+
//Used in IO for the key in the in mem rawEvents table
|
|
82
|
+
let getEventIdKeyString = (~chainId: int, ~eventId: string) => {
|
|
83
|
+
let chainIdStr = chainId->Belt.Int.toString
|
|
84
|
+
let key = chainIdStr ++ "_" ++ eventId
|
|
85
|
+
|
|
86
|
+
key
|
|
87
|
+
}
|
package/src/Internal.res
CHANGED
|
@@ -134,6 +134,8 @@ type eventItem = {
|
|
|
134
134
|
//be reprocessed after it has loaded dynamic contracts
|
|
135
135
|
//This gets set to true and does not try and reload events
|
|
136
136
|
hasRegisteredDynamicContracts?: bool,
|
|
137
|
+
// Reuse logger object for event
|
|
138
|
+
mutable loggerCache?: Pino.t,
|
|
137
139
|
}
|
|
138
140
|
|
|
139
141
|
@genType
|
|
@@ -0,0 +1,174 @@
|
|
|
1
|
+
open Belt
|
|
2
|
+
|
|
3
|
+
module Call = {
|
|
4
|
+
type input
|
|
5
|
+
type output
|
|
6
|
+
type t = {
|
|
7
|
+
input: input,
|
|
8
|
+
resolve: output => unit,
|
|
9
|
+
reject: exn => unit,
|
|
10
|
+
mutable promise: promise<output>,
|
|
11
|
+
mutable isLoading: bool,
|
|
12
|
+
}
|
|
13
|
+
}
|
|
14
|
+
|
|
15
|
+
module Group = {
|
|
16
|
+
type t = {
|
|
17
|
+
// Unique calls by input as a key
|
|
18
|
+
calls: dict<Call.t>,
|
|
19
|
+
load: array<Call.input> => promise<unit>,
|
|
20
|
+
getUnsafeInMemory: string => Call.output,
|
|
21
|
+
hasInMemory: string => bool,
|
|
22
|
+
}
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
type t = {
|
|
26
|
+
// Batches of different operations by operation key
|
|
27
|
+
// Can be: Load by id, load by index, effect
|
|
28
|
+
groups: dict<Group.t>,
|
|
29
|
+
mutable isCollecting: bool,
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
let make = () => {
|
|
33
|
+
groups: Js.Dict.empty(),
|
|
34
|
+
isCollecting: false,
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
let schedule = async loadManager => {
|
|
38
|
+
// For the first schedule, wait for a microtask first
|
|
39
|
+
// to collect all calls before the next await
|
|
40
|
+
// If the loadManager is already collecting,
|
|
41
|
+
// then we do nothing. The call will be automatically
|
|
42
|
+
// handled when the promise below resolves
|
|
43
|
+
loadManager.isCollecting = true
|
|
44
|
+
await Promise.resolve()
|
|
45
|
+
loadManager.isCollecting = false
|
|
46
|
+
|
|
47
|
+
let groups = loadManager.groups
|
|
48
|
+
groups
|
|
49
|
+
->Js.Dict.keys
|
|
50
|
+
->Utils.Array.forEachAsync(async key => {
|
|
51
|
+
let group = groups->Js.Dict.unsafeGet(key)
|
|
52
|
+
let calls = group.calls
|
|
53
|
+
|
|
54
|
+
let inputsToLoad = []
|
|
55
|
+
let currentInputKeys = []
|
|
56
|
+
calls
|
|
57
|
+
->Js.Dict.keys
|
|
58
|
+
->Js.Array2.forEach(inputKey => {
|
|
59
|
+
let call = calls->Js.Dict.unsafeGet(inputKey)
|
|
60
|
+
if !call.isLoading {
|
|
61
|
+
call.isLoading = true
|
|
62
|
+
currentInputKeys->Js.Array2.push(inputKey)->ignore
|
|
63
|
+
if group.hasInMemory(inputKey)->not {
|
|
64
|
+
inputsToLoad->Js.Array2.push(call.input)->ignore
|
|
65
|
+
}
|
|
66
|
+
}
|
|
67
|
+
})
|
|
68
|
+
|
|
69
|
+
if inputsToLoad->Utils.Array.isEmpty->not {
|
|
70
|
+
try {
|
|
71
|
+
await group.load(inputsToLoad)
|
|
72
|
+
} catch {
|
|
73
|
+
| exn => {
|
|
74
|
+
let exn = exn->Internal.prettifyExn
|
|
75
|
+
currentInputKeys->Array.forEach(inputKey => {
|
|
76
|
+
let call = calls->Js.Dict.unsafeGet(inputKey)
|
|
77
|
+
call.reject(exn)
|
|
78
|
+
})
|
|
79
|
+
}
|
|
80
|
+
}
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
if currentInputKeys->Utils.Array.isEmpty->not {
|
|
84
|
+
currentInputKeys->Js.Array2.forEach(inputKey => {
|
|
85
|
+
let call = calls->Js.Dict.unsafeGet(inputKey)
|
|
86
|
+
calls->Utils.Dict.deleteInPlace(inputKey)
|
|
87
|
+
call.resolve(group.getUnsafeInMemory(inputKey))
|
|
88
|
+
})
|
|
89
|
+
|
|
90
|
+
// Clean up executed batch to reset
|
|
91
|
+
// provided load function which
|
|
92
|
+
// might have an outdated function context
|
|
93
|
+
let latestGroup = groups->Js.Dict.unsafeGet(key)
|
|
94
|
+
if latestGroup.calls->Js.Dict.keys->Utils.Array.isEmpty {
|
|
95
|
+
groups->Utils.Dict.deleteInPlace(key)
|
|
96
|
+
}
|
|
97
|
+
}
|
|
98
|
+
})
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
let noopHasher = input => input->(Utils.magic: 'input => string)
|
|
102
|
+
|
|
103
|
+
let call = (
|
|
104
|
+
loadManager,
|
|
105
|
+
~input,
|
|
106
|
+
~key,
|
|
107
|
+
~load,
|
|
108
|
+
~hasher,
|
|
109
|
+
~shouldGroup,
|
|
110
|
+
~hasInMemory,
|
|
111
|
+
~getUnsafeInMemory,
|
|
112
|
+
) => {
|
|
113
|
+
// This is a micro-optimization to avoid a function call
|
|
114
|
+
let inputKey = hasher === noopHasher ? input->(Utils.magic: 'input => string) : hasher(input)
|
|
115
|
+
|
|
116
|
+
// We group external calls by operation to:
|
|
117
|
+
// 1. Reduce the IO by allowing batch requests
|
|
118
|
+
// 2. By allowing parallel processing of events
|
|
119
|
+
// and make awaits run at the same time
|
|
120
|
+
//
|
|
121
|
+
// In the handlers it's not as important to group
|
|
122
|
+
// calls, because usually we run a single handler at a time
|
|
123
|
+
// So have a quick exit when an entity is already in memory
|
|
124
|
+
//
|
|
125
|
+
// But since we're going to parallelize handlers per chain,
|
|
126
|
+
// keep the grouping logic when the data needs to be loaded
|
|
127
|
+
// It has a small additional runtime cost, but might reduce IO time
|
|
128
|
+
if !shouldGroup && hasInMemory(inputKey) {
|
|
129
|
+
getUnsafeInMemory(inputKey)->Promise.resolve
|
|
130
|
+
} else {
|
|
131
|
+
let group = switch loadManager.groups->Utils.Dict.dangerouslyGetNonOption(key) {
|
|
132
|
+
| Some(group) => group
|
|
133
|
+
| None => {
|
|
134
|
+
let g: Group.t = {
|
|
135
|
+
calls: Js.Dict.empty(),
|
|
136
|
+
load: load->(
|
|
137
|
+
Utils.magic: (array<'input> => promise<unit>) => array<Call.input> => promise<unit>
|
|
138
|
+
),
|
|
139
|
+
getUnsafeInMemory: getUnsafeInMemory->(
|
|
140
|
+
Utils.magic: (string => 'output) => string => Call.output
|
|
141
|
+
),
|
|
142
|
+
hasInMemory: hasInMemory->(Utils.magic: (string => bool) => string => bool),
|
|
143
|
+
}
|
|
144
|
+
loadManager.groups->Js.Dict.set(key, g)
|
|
145
|
+
g
|
|
146
|
+
}
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
switch group.calls->Utils.Dict.dangerouslyGetNonOption(inputKey) {
|
|
150
|
+
| Some(c) => c.promise
|
|
151
|
+
| None => {
|
|
152
|
+
let promise = Promise.make((resolve, reject) => {
|
|
153
|
+
let call: Call.t = {
|
|
154
|
+
input: input->(Utils.magic: 'input => Call.input),
|
|
155
|
+
resolve,
|
|
156
|
+
reject,
|
|
157
|
+
promise: %raw(`null`),
|
|
158
|
+
isLoading: false,
|
|
159
|
+
}
|
|
160
|
+
group.calls->Js.Dict.set(inputKey, call)
|
|
161
|
+
})
|
|
162
|
+
|
|
163
|
+
// Don't use ref since it'll allocate an object to store .contents
|
|
164
|
+
(group.calls->Js.Dict.unsafeGet(inputKey)).promise = promise
|
|
165
|
+
|
|
166
|
+
if !loadManager.isCollecting {
|
|
167
|
+
let _: promise<unit> = loadManager->schedule
|
|
168
|
+
}
|
|
169
|
+
|
|
170
|
+
promise
|
|
171
|
+
}
|
|
172
|
+
}->(Utils.magic: promise<Call.output> => promise<'output>)
|
|
173
|
+
}
|
|
174
|
+
}
|
package/src/Logging.res
ADDED
|
@@ -0,0 +1,179 @@
|
|
|
1
|
+
open Pino
|
|
2
|
+
|
|
3
|
+
type logStrategyType =
|
|
4
|
+
| @as("ecs-file") EcsFile
|
|
5
|
+
| @as("ecs-console") EcsConsole
|
|
6
|
+
| @as("ecs-console-multistream") EcsConsoleMultistream
|
|
7
|
+
| @as("file-only") FileOnly
|
|
8
|
+
| @as("console-raw") ConsoleRaw
|
|
9
|
+
| @as("console-pretty") ConsolePretty
|
|
10
|
+
| @as("both-prettyconsole") Both
|
|
11
|
+
|
|
12
|
+
let logLevels = [
|
|
13
|
+
// custom levels
|
|
14
|
+
("udebug", 32),
|
|
15
|
+
("uinfo", 34),
|
|
16
|
+
("uwarn", 36),
|
|
17
|
+
("uerror", 38),
|
|
18
|
+
// Default levels
|
|
19
|
+
("trace", 10),
|
|
20
|
+
("debug", 20),
|
|
21
|
+
("info", 30),
|
|
22
|
+
("warn", 40),
|
|
23
|
+
("error", 50),
|
|
24
|
+
("fatal", 60),
|
|
25
|
+
]->Js.Dict.fromArray
|
|
26
|
+
|
|
27
|
+
%%private(let logger = ref(None))
|
|
28
|
+
|
|
29
|
+
let setLogger = (~logStrategy, ~logFilePath, ~defaultFileLogLevel, ~userLogLevel) => {
|
|
30
|
+
// Currently unused - useful if using multiple transports.
|
|
31
|
+
// let pinoRaw = {"target": "pino/file", "level": Config.userLogLevel}
|
|
32
|
+
let pinoFile: Transport.transportTarget = {
|
|
33
|
+
target: "pino/file",
|
|
34
|
+
options: {
|
|
35
|
+
"destination": logFilePath,
|
|
36
|
+
"append": true,
|
|
37
|
+
"mkdir": true,
|
|
38
|
+
}->Transport.makeTransportOptions,
|
|
39
|
+
level: defaultFileLogLevel,
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
let makeMultiStreamLogger =
|
|
43
|
+
MultiStreamLogger.make(~userLogLevel, ~defaultFileLogLevel, ~customLevels=logLevels, ...)
|
|
44
|
+
|
|
45
|
+
logger :=
|
|
46
|
+
Some(
|
|
47
|
+
switch logStrategy {
|
|
48
|
+
| EcsFile =>
|
|
49
|
+
makeWithOptionsAndTransport(
|
|
50
|
+
{
|
|
51
|
+
...Pino.ECS.make(),
|
|
52
|
+
customLevels: logLevels,
|
|
53
|
+
},
|
|
54
|
+
Transport.make(pinoFile),
|
|
55
|
+
)
|
|
56
|
+
| EcsConsoleMultistream =>
|
|
57
|
+
makeMultiStreamLogger(~logFile=None, ~options=Some(Pino.ECS.make()))
|
|
58
|
+
| EcsConsole =>
|
|
59
|
+
make({
|
|
60
|
+
...Pino.ECS.make(),
|
|
61
|
+
level: userLogLevel,
|
|
62
|
+
customLevels: logLevels,
|
|
63
|
+
})
|
|
64
|
+
| FileOnly =>
|
|
65
|
+
makeWithOptionsAndTransport(
|
|
66
|
+
{
|
|
67
|
+
customLevels: logLevels,
|
|
68
|
+
level: defaultFileLogLevel,
|
|
69
|
+
},
|
|
70
|
+
Transport.make(pinoFile),
|
|
71
|
+
)
|
|
72
|
+
| ConsoleRaw => makeMultiStreamLogger(~logFile=None, ~options=None)
|
|
73
|
+
| ConsolePretty => makeMultiStreamLogger(~logFile=None, ~options=None)
|
|
74
|
+
| Both => makeMultiStreamLogger(~logFile=Some(logFilePath), ~options=None)
|
|
75
|
+
},
|
|
76
|
+
)
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
let getLogger = () => {
|
|
80
|
+
switch logger.contents {
|
|
81
|
+
| Some(logger) => logger
|
|
82
|
+
| None => Js.Exn.raiseError("Unreachable code. Logger not initialized")
|
|
83
|
+
}
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
let setLogLevel = (level: Pino.logLevel) => {
|
|
87
|
+
getLogger()->setLevel(level)
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
let trace = message => {
|
|
91
|
+
getLogger().trace(message->createPinoMessage)
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
let debug = message => {
|
|
95
|
+
getLogger().debug(message->createPinoMessage)
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
let info = message => {
|
|
99
|
+
getLogger().info(message->createPinoMessage)
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
let warn = message => {
|
|
103
|
+
getLogger().warn(message->createPinoMessage)
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
let error = message => {
|
|
107
|
+
getLogger().error(message->createPinoMessage)
|
|
108
|
+
}
|
|
109
|
+
let errorWithExn = (error, message) => {
|
|
110
|
+
getLogger()->Pino.errorExn(message->createPinoMessageWithError(error))
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
let fatal = message => {
|
|
114
|
+
getLogger().fatal(message->createPinoMessage)
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
let childTrace = (logger, params: 'a) => {
|
|
118
|
+
logger.trace(params->createPinoMessage)
|
|
119
|
+
}
|
|
120
|
+
let childDebug = (logger, params: 'a) => {
|
|
121
|
+
logger.debug(params->createPinoMessage)
|
|
122
|
+
}
|
|
123
|
+
let childInfo = (logger, params: 'a) => {
|
|
124
|
+
logger.info(params->createPinoMessage)
|
|
125
|
+
}
|
|
126
|
+
let childWarn = (logger, params: 'a) => {
|
|
127
|
+
logger.warn(params->createPinoMessage)
|
|
128
|
+
}
|
|
129
|
+
let childError = (logger, params: 'a) => {
|
|
130
|
+
logger.error(params->createPinoMessage)
|
|
131
|
+
}
|
|
132
|
+
let childErrorWithExn = (logger, error, params: 'a) => {
|
|
133
|
+
logger->Pino.errorExn(params->createPinoMessageWithError(error))
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
let childFatal = (logger, params: 'a) => {
|
|
137
|
+
logger.fatal(params->createPinoMessage)
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
let createChild = (~params: 'a) => {
|
|
141
|
+
getLogger()->child(params->createChildParams)
|
|
142
|
+
}
|
|
143
|
+
let createChildFrom = (~logger: t, ~params: 'a) => {
|
|
144
|
+
logger->child(params->createChildParams)
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
let getEventLogger = (eventItem: Internal.eventItem) => {
|
|
148
|
+
switch eventItem.loggerCache {
|
|
149
|
+
| Some(l) => l
|
|
150
|
+
| None => {
|
|
151
|
+
let l = getLogger()->child(
|
|
152
|
+
{
|
|
153
|
+
"context": `Event '${eventItem.eventConfig.name}' for contract '${eventItem.eventConfig.contractName}'`,
|
|
154
|
+
"chainId": eventItem.chain->ChainMap.Chain.toChainId,
|
|
155
|
+
"block": eventItem.blockNumber,
|
|
156
|
+
"logIndex": eventItem.logIndex,
|
|
157
|
+
}->createChildParams,
|
|
158
|
+
)
|
|
159
|
+
eventItem.loggerCache = Some(l)
|
|
160
|
+
l
|
|
161
|
+
}
|
|
162
|
+
}
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
let getUserLogger = {
|
|
166
|
+
@inline
|
|
167
|
+
let log = (eventItem, level: Pino.logLevelUser, message: string, ~params) => {
|
|
168
|
+
(eventItem->getEventLogger->Utils.magic->Js.Dict.unsafeGet((level :> string)))(params, message)
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
(eventItem): Envio.logger => {
|
|
172
|
+
info: (message: string, ~params=?) => eventItem->log(#uinfo, message, ~params),
|
|
173
|
+
debug: (message: string, ~params=?) => eventItem->log(#udebug, message, ~params),
|
|
174
|
+
warn: (message: string, ~params=?) => eventItem->log(#uwarn, message, ~params),
|
|
175
|
+
error: (message: string, ~params=?) => eventItem->log(#uerror, message, ~params),
|
|
176
|
+
errorWithExn: (message: string, exn) =>
|
|
177
|
+
eventItem->log(#uerror, message, ~params={"err": exn->Internal.prettifyExn}),
|
|
178
|
+
}
|
|
179
|
+
}
|