envio 3.0.0-alpha.1 → 3.0.0-alpha.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/evm.schema.json +63 -48
- package/fuel.schema.json +35 -31
- package/index.d.ts +1 -0
- package/package.json +15 -11
- package/rescript.json +1 -1
- package/src/Batch.res.mjs +1 -1
- package/src/Benchmark.res +394 -0
- package/src/Benchmark.res.mjs +398 -0
- package/src/ChainFetcher.res +459 -0
- package/src/ChainFetcher.res.mjs +281 -0
- package/src/ChainManager.res +179 -0
- package/src/ChainManager.res.mjs +139 -0
- package/src/Config.res +18 -7
- package/src/Config.res.mjs +28 -7
- package/src/Ecosystem.res +25 -0
- package/src/Ecosystem.res.mjs +29 -0
- package/src/Env.res +243 -0
- package/src/Env.res.mjs +270 -0
- package/src/Envio.gen.ts +9 -1
- package/src/Envio.res +12 -9
- package/src/EventProcessing.res +476 -0
- package/src/EventProcessing.res.mjs +341 -0
- package/src/EventRegister.res +4 -15
- package/src/EventRegister.res.mjs +3 -9
- package/src/EventRegister.resi +2 -8
- package/src/FetchState.res +54 -29
- package/src/FetchState.res.mjs +62 -35
- package/src/GlobalState.res +1169 -0
- package/src/GlobalState.res.mjs +1196 -0
- package/src/Internal.gen.ts +3 -14
- package/src/Internal.res +4 -12
- package/src/LoadLayer.res +444 -0
- package/src/LoadLayer.res.mjs +296 -0
- package/src/LoadLayer.resi +32 -0
- package/src/Prometheus.res +8 -8
- package/src/Prometheus.res.mjs +10 -10
- package/src/ReorgDetection.res +6 -10
- package/src/ReorgDetection.res.mjs +6 -6
- package/src/UserContext.res +356 -0
- package/src/UserContext.res.mjs +238 -0
- package/src/bindings/DateFns.res +71 -0
- package/src/bindings/DateFns.res.mjs +22 -0
- package/src/bindings/EventSource.res +13 -0
- package/src/bindings/EventSource.res.mjs +2 -0
- package/src/sources/Evm.res +87 -0
- package/src/sources/Evm.res.mjs +105 -0
- package/src/sources/EvmChain.res +95 -0
- package/src/sources/EvmChain.res.mjs +61 -0
- package/src/sources/Fuel.res +19 -34
- package/src/sources/Fuel.res.mjs +34 -16
- package/src/sources/FuelSDK.res +37 -0
- package/src/sources/FuelSDK.res.mjs +29 -0
- package/src/sources/HyperFuel.res +2 -2
- package/src/sources/HyperFuel.resi +1 -1
- package/src/sources/HyperFuelClient.res +2 -2
- package/src/sources/HyperFuelSource.res +8 -8
- package/src/sources/HyperFuelSource.res.mjs +5 -5
- package/src/sources/HyperSyncHeightStream.res +179 -0
- package/src/sources/HyperSyncHeightStream.res.mjs +127 -0
- package/src/sources/HyperSyncSource.res +7 -65
- package/src/sources/HyperSyncSource.res.mjs +10 -66
- package/src/sources/RpcSource.res +4 -4
- package/src/sources/RpcSource.res.mjs +3 -3
- package/src/sources/Solana.res +59 -0
- package/src/sources/Solana.res.mjs +79 -0
- package/src/sources/Source.res +2 -2
- package/src/sources/SourceManager.res +24 -32
- package/src/sources/SourceManager.res.mjs +20 -20
- package/src/sources/SourceManager.resi +4 -5
- package/src/Platform.res +0 -140
- package/src/Platform.res.mjs +0 -170
package/src/Config.res.mjs
CHANGED
|
@@ -1,23 +1,26 @@
|
|
|
1
1
|
// Generated by ReScript, PLEASE EDIT WITH CARE
|
|
2
2
|
|
|
3
|
+
import * as Evm from "./sources/Evm.res.mjs";
|
|
4
|
+
import * as Fuel from "./sources/Fuel.res.mjs";
|
|
3
5
|
import * as Utils from "./Utils.res.mjs";
|
|
4
6
|
import * as Js_exn from "rescript/lib/es6/js_exn.js";
|
|
7
|
+
import * as Solana from "./sources/Solana.res.mjs";
|
|
8
|
+
import * as Js_dict from "rescript/lib/es6/js_dict.js";
|
|
5
9
|
import * as ChainMap from "./ChainMap.res.mjs";
|
|
6
|
-
import * as Platform from "./Platform.res.mjs";
|
|
7
10
|
import * as Belt_Array from "rescript/lib/es6/belt_Array.js";
|
|
8
11
|
|
|
9
|
-
function make(shouldRollbackOnReorgOpt, shouldSaveFullHistoryOpt, chainsOpt, enableRawEventsOpt,
|
|
12
|
+
function make(shouldRollbackOnReorgOpt, shouldSaveFullHistoryOpt, chainsOpt, enableRawEventsOpt, ecosystemOpt, batchSizeOpt, lowercaseAddressesOpt, multichainOpt, shouldUseHypersyncClientDecoderOpt, maxAddrInPartitionOpt, userEntitiesOpt) {
|
|
10
13
|
var shouldRollbackOnReorg = shouldRollbackOnReorgOpt !== undefined ? shouldRollbackOnReorgOpt : true;
|
|
11
14
|
var shouldSaveFullHistory = shouldSaveFullHistoryOpt !== undefined ? shouldSaveFullHistoryOpt : false;
|
|
12
15
|
var chains = chainsOpt !== undefined ? chainsOpt : [];
|
|
13
16
|
var enableRawEvents = enableRawEventsOpt !== undefined ? enableRawEventsOpt : false;
|
|
14
|
-
var preloadHandlers = preloadHandlersOpt !== undefined ? preloadHandlersOpt : false;
|
|
15
17
|
var ecosystem = ecosystemOpt !== undefined ? ecosystemOpt : "evm";
|
|
16
18
|
var batchSize = batchSizeOpt !== undefined ? batchSizeOpt : 5000;
|
|
17
19
|
var lowercaseAddresses = lowercaseAddressesOpt !== undefined ? lowercaseAddressesOpt : false;
|
|
18
20
|
var multichain = multichainOpt !== undefined ? multichainOpt : "unordered";
|
|
19
21
|
var shouldUseHypersyncClientDecoder = shouldUseHypersyncClientDecoderOpt !== undefined ? shouldUseHypersyncClientDecoderOpt : true;
|
|
20
22
|
var maxAddrInPartition = maxAddrInPartitionOpt !== undefined ? maxAddrInPartitionOpt : 5000;
|
|
23
|
+
var userEntities = userEntitiesOpt !== undefined ? userEntitiesOpt : [];
|
|
21
24
|
if (lowercaseAddresses && !shouldUseHypersyncClientDecoder) {
|
|
22
25
|
Js_exn.raiseError("lowercase addresses is not supported when event_decoder is 'viem'. Please set event_decoder to 'hypersync-client' or change address_format to 'checksum'.");
|
|
23
26
|
}
|
|
@@ -34,20 +37,38 @@ function make(shouldRollbackOnReorgOpt, shouldSaveFullHistoryOpt, chainsOpt, ena
|
|
|
34
37
|
addContractNameToContractNameMapping[addKey] = contract.name;
|
|
35
38
|
}));
|
|
36
39
|
}));
|
|
37
|
-
var
|
|
40
|
+
var ecosystem$1;
|
|
41
|
+
switch (ecosystem) {
|
|
42
|
+
case "evm" :
|
|
43
|
+
ecosystem$1 = Evm.ecosystem;
|
|
44
|
+
break;
|
|
45
|
+
case "fuel" :
|
|
46
|
+
ecosystem$1 = Fuel.ecosystem;
|
|
47
|
+
break;
|
|
48
|
+
case "solana" :
|
|
49
|
+
ecosystem$1 = Solana.ecosystem;
|
|
50
|
+
break;
|
|
51
|
+
|
|
52
|
+
}
|
|
53
|
+
var userEntitiesByName = Js_dict.fromArray(userEntities.map(function (entityConfig) {
|
|
54
|
+
return [
|
|
55
|
+
entityConfig.name,
|
|
56
|
+
entityConfig
|
|
57
|
+
];
|
|
58
|
+
}));
|
|
38
59
|
return {
|
|
39
60
|
shouldRollbackOnReorg: shouldRollbackOnReorg,
|
|
40
61
|
shouldSaveFullHistory: shouldSaveFullHistory,
|
|
41
62
|
multichain: multichain,
|
|
42
63
|
chainMap: chainMap,
|
|
43
64
|
defaultChain: Belt_Array.get(chains, 0),
|
|
44
|
-
|
|
65
|
+
ecosystem: ecosystem$1,
|
|
45
66
|
enableRawEvents: enableRawEvents,
|
|
46
|
-
preloadHandlers: preloadHandlers,
|
|
47
67
|
maxAddrInPartition: maxAddrInPartition,
|
|
48
68
|
batchSize: batchSize,
|
|
49
69
|
lowercaseAddresses: lowercaseAddresses,
|
|
50
|
-
addContractNameToContractNameMapping: addContractNameToContractNameMapping
|
|
70
|
+
addContractNameToContractNameMapping: addContractNameToContractNameMapping,
|
|
71
|
+
userEntitiesByName: userEntitiesByName
|
|
51
72
|
};
|
|
52
73
|
}
|
|
53
74
|
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
type name = | @as("evm") Evm | @as("fuel") Fuel | @as("solana") Solana
|
|
2
|
+
|
|
3
|
+
type t = {
|
|
4
|
+
name: name,
|
|
5
|
+
blockFields: array<string>,
|
|
6
|
+
transactionFields: array<string>,
|
|
7
|
+
blockNumberName: string,
|
|
8
|
+
blockTimestampName: string,
|
|
9
|
+
blockHashName: string,
|
|
10
|
+
getNumber: Internal.eventBlock => int,
|
|
11
|
+
getTimestamp: Internal.eventBlock => int,
|
|
12
|
+
getId: Internal.eventBlock => string,
|
|
13
|
+
cleanUpRawEventFieldsInPlace: Js.Json.t => unit,
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
let makeOnBlockArgs = (~blockNumber: int, ~ecosystem: t, ~context): Internal.onBlockArgs => {
|
|
17
|
+
switch ecosystem.name {
|
|
18
|
+
| Solana => {slot: blockNumber, context}
|
|
19
|
+
| _ => {
|
|
20
|
+
let blockEvent = Js.Dict.empty()
|
|
21
|
+
blockEvent->Js.Dict.set(ecosystem.blockNumberName, blockNumber->Utils.magic)
|
|
22
|
+
{block: blockEvent->Utils.magic, context}
|
|
23
|
+
}
|
|
24
|
+
}
|
|
25
|
+
}
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
// Generated by ReScript, PLEASE EDIT WITH CARE
|
|
2
|
+
|
|
3
|
+
import * as Caml_option from "rescript/lib/es6/caml_option.js";
|
|
4
|
+
|
|
5
|
+
function makeOnBlockArgs(blockNumber, ecosystem, context) {
|
|
6
|
+
var match = ecosystem.name;
|
|
7
|
+
switch (match) {
|
|
8
|
+
case "evm" :
|
|
9
|
+
case "fuel" :
|
|
10
|
+
break;
|
|
11
|
+
case "solana" :
|
|
12
|
+
return {
|
|
13
|
+
slot: blockNumber,
|
|
14
|
+
context: context
|
|
15
|
+
};
|
|
16
|
+
|
|
17
|
+
}
|
|
18
|
+
var blockEvent = {};
|
|
19
|
+
blockEvent[ecosystem.blockNumberName] = blockNumber;
|
|
20
|
+
return {
|
|
21
|
+
block: Caml_option.some(blockEvent),
|
|
22
|
+
context: context
|
|
23
|
+
};
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
export {
|
|
27
|
+
makeOnBlockArgs ,
|
|
28
|
+
}
|
|
29
|
+
/* No side effect */
|
package/src/Env.res
ADDED
|
@@ -0,0 +1,243 @@
|
|
|
1
|
+
|
|
2
|
+
// Loads the .env from the root working directory
|
|
3
|
+
%%raw(`import 'dotenv/config'`)
|
|
4
|
+
|
|
5
|
+
%%private(
|
|
6
|
+
let envSafe = EnvSafe.make()
|
|
7
|
+
|
|
8
|
+
let getLogLevelConfig = (name, ~default): Pino.logLevel =>
|
|
9
|
+
envSafe->EnvSafe.get(
|
|
10
|
+
name,
|
|
11
|
+
S.enum([#trace, #debug, #info, #warn, #error, #fatal, #udebug, #uinfo, #uwarn, #uerror]),
|
|
12
|
+
~fallback=default,
|
|
13
|
+
)
|
|
14
|
+
)
|
|
15
|
+
// resets the timestampCaughtUpToHeadOrEndblock after a restart when true
|
|
16
|
+
let updateSyncTimeOnRestart =
|
|
17
|
+
envSafe->EnvSafe.get("UPDATE_SYNC_TIME_ON_RESTART", S.bool, ~fallback=true)
|
|
18
|
+
let targetBufferSize = envSafe->EnvSafe.get("ENVIO_INDEXING_MAX_BUFFER_SIZE", S.option(S.int))
|
|
19
|
+
let maxAddrInPartition = envSafe->EnvSafe.get("MAX_PARTITION_SIZE", S.int, ~fallback=5_000)
|
|
20
|
+
let maxPartitionConcurrency =
|
|
21
|
+
envSafe->EnvSafe.get("ENVIO_MAX_PARTITION_CONCURRENCY", S.int, ~fallback=10)
|
|
22
|
+
let indexingBlockLag = envSafe->EnvSafe.get("ENVIO_INDEXING_BLOCK_LAG", S.option(S.int))
|
|
23
|
+
|
|
24
|
+
// FIXME: This broke HS grafana dashboard. Should investigate it later. Maybe we should use :: as a default value?
|
|
25
|
+
// We want to be able to set it to 0.0.0.0
|
|
26
|
+
// to allow to passthrough the port from a Docker container
|
|
27
|
+
// let serverHost = envSafe->EnvSafe.get("ENVIO_INDEXER_HOST", S.string, ~fallback="localhost")
|
|
28
|
+
let serverPort =
|
|
29
|
+
envSafe->EnvSafe.get(
|
|
30
|
+
"ENVIO_INDEXER_PORT",
|
|
31
|
+
S.int->S.port,
|
|
32
|
+
~fallback=envSafe->EnvSafe.get("METRICS_PORT", S.int->S.port, ~fallback=9898),
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
let tuiOffEnvVar = envSafe->EnvSafe.get("TUI_OFF", S.bool, ~fallback=false)
|
|
36
|
+
|
|
37
|
+
let logFilePath = envSafe->EnvSafe.get("LOG_FILE", S.string, ~fallback="logs/envio.log")
|
|
38
|
+
let userLogLevel = getLogLevelConfig("LOG_LEVEL", ~default=#info)
|
|
39
|
+
let defaultFileLogLevel = getLogLevelConfig("FILE_LOG_LEVEL", ~default=#trace)
|
|
40
|
+
|
|
41
|
+
let prodEnvioAppUrl = "https://envio.dev"
|
|
42
|
+
let envioAppUrl = envSafe->EnvSafe.get("ENVIO_APP", S.string, ~fallback=prodEnvioAppUrl)
|
|
43
|
+
let envioApiToken = envSafe->EnvSafe.get("ENVIO_API_TOKEN", S.option(S.string))
|
|
44
|
+
let hyperSyncClientTimeoutMillis =
|
|
45
|
+
envSafe->EnvSafe.get("ENVIO_HYPERSYNC_CLIENT_TIMEOUT_MILLIS", S.int, ~fallback=120_000)
|
|
46
|
+
|
|
47
|
+
/**
|
|
48
|
+
This is the number of retries that the binary client makes before rejecting the promise with an error
|
|
49
|
+
Default is 0 so that the indexer can handle retries internally
|
|
50
|
+
*/
|
|
51
|
+
let hyperSyncClientMaxRetries =
|
|
52
|
+
envSafe->EnvSafe.get("ENVIO_HYPERSYNC_CLIENT_MAX_RETRIES", S.int, ~fallback=0)
|
|
53
|
+
|
|
54
|
+
let hypersyncClientSerializationFormat =
|
|
55
|
+
envSafe->EnvSafe.get(
|
|
56
|
+
"ENVIO_HYPERSYNC_CLIENT_SERIALIZATION_FORMAT",
|
|
57
|
+
HyperSyncClient.serializationFormatSchema,
|
|
58
|
+
~fallback=CapnProto,
|
|
59
|
+
)
|
|
60
|
+
|
|
61
|
+
let hypersyncClientEnableQueryCaching =
|
|
62
|
+
envSafe->EnvSafe.get("ENVIO_HYPERSYNC_CLIENT_ENABLE_QUERY_CACHING", S.bool, ~fallback=true)
|
|
63
|
+
|
|
64
|
+
module Benchmark = {
|
|
65
|
+
module SaveDataStrategy: {
|
|
66
|
+
type t
|
|
67
|
+
let schema: S.t<t>
|
|
68
|
+
let default: t
|
|
69
|
+
let shouldSaveJsonFile: t => bool
|
|
70
|
+
let shouldSavePrometheus: t => bool
|
|
71
|
+
let shouldSaveData: t => bool
|
|
72
|
+
} = {
|
|
73
|
+
@unboxed
|
|
74
|
+
type t = Bool(bool) | @as("json-file") JsonFile | @as("prometheus") Prometheus
|
|
75
|
+
|
|
76
|
+
let schema = S.enum([Bool(true), Bool(false), JsonFile, Prometheus])
|
|
77
|
+
let default = Bool(false)
|
|
78
|
+
|
|
79
|
+
let shouldSaveJsonFile = self =>
|
|
80
|
+
switch self {
|
|
81
|
+
| JsonFile | Bool(true) => true
|
|
82
|
+
| _ => false
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
let shouldSavePrometheus = _ => true
|
|
86
|
+
|
|
87
|
+
let shouldSaveData = self => self->shouldSavePrometheus || self->shouldSaveJsonFile
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
let saveDataStrategy =
|
|
91
|
+
envSafe->EnvSafe.get(
|
|
92
|
+
"ENVIO_SAVE_BENCHMARK_DATA",
|
|
93
|
+
SaveDataStrategy.schema,
|
|
94
|
+
~fallback=SaveDataStrategy.default,
|
|
95
|
+
)
|
|
96
|
+
|
|
97
|
+
let shouldSaveData = saveDataStrategy->SaveDataStrategy.shouldSaveData
|
|
98
|
+
|
|
99
|
+
/**
|
|
100
|
+
StdDev involves saving sum of squares of data points, which could get very large.
|
|
101
|
+
|
|
102
|
+
Currently only do this for local runs on json-file and not prometheus.
|
|
103
|
+
*/
|
|
104
|
+
let shouldSaveStdDev =
|
|
105
|
+
saveDataStrategy->SaveDataStrategy.shouldSaveJsonFile
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
let logStrategy =
|
|
109
|
+
envSafe->EnvSafe.get(
|
|
110
|
+
"LOG_STRATEGY",
|
|
111
|
+
S.enum([
|
|
112
|
+
Logging.EcsFile,
|
|
113
|
+
EcsConsole,
|
|
114
|
+
EcsConsoleMultistream,
|
|
115
|
+
FileOnly,
|
|
116
|
+
ConsoleRaw,
|
|
117
|
+
ConsolePretty,
|
|
118
|
+
Both,
|
|
119
|
+
]),
|
|
120
|
+
~fallback=ConsolePretty,
|
|
121
|
+
)
|
|
122
|
+
|
|
123
|
+
Logging.setLogger(
|
|
124
|
+
Logging.makeLogger(~logStrategy, ~logFilePath, ~defaultFileLogLevel, ~userLogLevel),
|
|
125
|
+
)
|
|
126
|
+
|
|
127
|
+
module Db = {
|
|
128
|
+
let host = envSafe->EnvSafe.get("ENVIO_PG_HOST", S.string, ~devFallback="localhost")
|
|
129
|
+
let port = envSafe->EnvSafe.get("ENVIO_PG_PORT", S.int->S.port, ~devFallback=5433)
|
|
130
|
+
let user = envSafe->EnvSafe.get("ENVIO_PG_USER", S.string, ~devFallback="postgres")
|
|
131
|
+
let password = envSafe->EnvSafe.get(
|
|
132
|
+
"ENVIO_PG_PASSWORD",
|
|
133
|
+
S.string,
|
|
134
|
+
~fallback={
|
|
135
|
+
envSafe->EnvSafe.get("ENVIO_POSTGRES_PASSWORD", S.string, ~fallback="testing")
|
|
136
|
+
},
|
|
137
|
+
)
|
|
138
|
+
let database = envSafe->EnvSafe.get("ENVIO_PG_DATABASE", S.string, ~devFallback="envio-dev")
|
|
139
|
+
let publicSchema = envSafe->EnvSafe.get("ENVIO_PG_PUBLIC_SCHEMA", S.string, ~fallback="public")
|
|
140
|
+
let ssl = envSafe->EnvSafe.get(
|
|
141
|
+
"ENVIO_PG_SSL_MODE",
|
|
142
|
+
Postgres.sslOptionsSchema,
|
|
143
|
+
//this is a dev fallback option for local deployments, shouldn't run in the prod env
|
|
144
|
+
//the SSL modes should be provided as string otherwise as 'require' | 'allow' | 'prefer' | 'verify-full'
|
|
145
|
+
~devFallback=Bool(false),
|
|
146
|
+
)
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
module ClickHouseSink = {
|
|
150
|
+
let host = envSafe->EnvSafe.get("ENVIO_CLICKHOUSE_SINK_HOST", S.option(S.string))
|
|
151
|
+
let database = envSafe->EnvSafe.get("ENVIO_CLICKHOUSE_SINK_DATABASE", S.option(S.string))
|
|
152
|
+
let username = switch host {
|
|
153
|
+
| None => ""
|
|
154
|
+
| Some(_) => envSafe->EnvSafe.get("ENVIO_CLICKHOUSE_SINK_USERNAME", S.string)
|
|
155
|
+
}
|
|
156
|
+
let password = switch host {
|
|
157
|
+
| None => ""
|
|
158
|
+
| Some(_) => envSafe->EnvSafe.get("ENVIO_CLICKHOUSE_SINK_PASSWORD", S.string)
|
|
159
|
+
}
|
|
160
|
+
}
|
|
161
|
+
|
|
162
|
+
module Hasura = {
|
|
163
|
+
// Disable it on HS indexer run, since we don't have Hasura credentials anyways
|
|
164
|
+
// Also, it might be useful for some users who don't care about Hasura
|
|
165
|
+
let enabled = envSafe->EnvSafe.get("ENVIO_HASURA", S.bool, ~fallback=true)
|
|
166
|
+
|
|
167
|
+
let responseLimit = switch envSafe->EnvSafe.get("ENVIO_HASURA_RESPONSE_LIMIT", S.option(S.int)) {
|
|
168
|
+
| Some(_) as s => s
|
|
169
|
+
| None => envSafe->EnvSafe.get("HASURA_RESPONSE_LIMIT", S.option(S.int))
|
|
170
|
+
}
|
|
171
|
+
|
|
172
|
+
let graphqlEndpoint =
|
|
173
|
+
envSafe->EnvSafe.get(
|
|
174
|
+
"HASURA_GRAPHQL_ENDPOINT",
|
|
175
|
+
S.string,
|
|
176
|
+
~devFallback="http://localhost:8080/v1/metadata",
|
|
177
|
+
)
|
|
178
|
+
|
|
179
|
+
let url = graphqlEndpoint->Js.String2.slice(~from=0, ~to_=-("/v1/metadata"->Js.String2.length))
|
|
180
|
+
|
|
181
|
+
let role = envSafe->EnvSafe.get("HASURA_GRAPHQL_ROLE", S.string, ~devFallback="admin")
|
|
182
|
+
|
|
183
|
+
let secret = envSafe->EnvSafe.get("HASURA_GRAPHQL_ADMIN_SECRET", S.string, ~devFallback="testing")
|
|
184
|
+
|
|
185
|
+
let aggregateEntities = envSafe->EnvSafe.get(
|
|
186
|
+
"ENVIO_HASURA_PUBLIC_AGGREGATE",
|
|
187
|
+
S.union([
|
|
188
|
+
S.array(S.string),
|
|
189
|
+
// Temporary workaround: Hosted Service can't use commas in env vars for multiple entities.
|
|
190
|
+
// Will be removed once comma support is added — don't rely on this.
|
|
191
|
+
S.string->S.transform(s => {
|
|
192
|
+
parser: string =>
|
|
193
|
+
switch string->Js.String2.split("&") {
|
|
194
|
+
| []
|
|
195
|
+
| [_] =>
|
|
196
|
+
s.fail(`Provide an array of entities in the JSON format.`)
|
|
197
|
+
| entities => entities
|
|
198
|
+
},
|
|
199
|
+
}),
|
|
200
|
+
]),
|
|
201
|
+
~fallback=[],
|
|
202
|
+
)
|
|
203
|
+
}
|
|
204
|
+
|
|
205
|
+
module Configurable = {
|
|
206
|
+
module SyncConfig = {
|
|
207
|
+
let initialBlockInterval =
|
|
208
|
+
envSafe->EnvSafe.get("ENVIO_RPC_INITIAL_BLOCK_INTERVAL", S.option(S.int))
|
|
209
|
+
let backoffMultiplicative =
|
|
210
|
+
envSafe->EnvSafe.get("ENVIO_RPC_BACKOFF_MULTIPLICATIVE", S.option(S.float))
|
|
211
|
+
let accelerationAdditive =
|
|
212
|
+
envSafe->EnvSafe.get("ENVIO_RPC_ACCELERATION_ADDITIVE", S.option(S.int))
|
|
213
|
+
let intervalCeiling = envSafe->EnvSafe.get("ENVIO_RPC_INTERVAL_CEILING", S.option(S.int))
|
|
214
|
+
}
|
|
215
|
+
}
|
|
216
|
+
|
|
217
|
+
module ThrottleWrites = {
|
|
218
|
+
let chainMetadataIntervalMillis =
|
|
219
|
+
envSafe->EnvSafe.get("ENVIO_THROTTLE_CHAIN_METADATA_INTERVAL_MILLIS", S.int, ~devFallback=500)
|
|
220
|
+
let pruneStaleDataIntervalMillis =
|
|
221
|
+
envSafe->EnvSafe.get(
|
|
222
|
+
"ENVIO_THROTTLE_PRUNE_STALE_DATA_INTERVAL_MILLIS",
|
|
223
|
+
S.int,
|
|
224
|
+
~devFallback=30_000,
|
|
225
|
+
)
|
|
226
|
+
|
|
227
|
+
let liveMetricsBenchmarkIntervalMillis =
|
|
228
|
+
envSafe->EnvSafe.get(
|
|
229
|
+
"ENVIO_THROTTLE_LIVE_METRICS_BENCHMARK_INTERVAL_MILLIS",
|
|
230
|
+
S.int,
|
|
231
|
+
~devFallback=1_000,
|
|
232
|
+
)
|
|
233
|
+
|
|
234
|
+
let jsonFileBenchmarkIntervalMillis =
|
|
235
|
+
envSafe->EnvSafe.get(
|
|
236
|
+
"ENVIO_THROTTLE_JSON_FILE_BENCHMARK_INTERVAL_MILLIS",
|
|
237
|
+
S.int,
|
|
238
|
+
~devFallback=500,
|
|
239
|
+
)
|
|
240
|
+
}
|
|
241
|
+
|
|
242
|
+
// You need to close the envSafe after you're done with it so that it immediately tells you about your misconfigured environment on startup.
|
|
243
|
+
envSafe->EnvSafe.close
|
package/src/Env.res.mjs
ADDED
|
@@ -0,0 +1,270 @@
|
|
|
1
|
+
// Generated by ReScript, PLEASE EDIT WITH CARE
|
|
2
|
+
|
|
3
|
+
import * as EnvSafe from "rescript-envsafe/src/EnvSafe.res.mjs";
|
|
4
|
+
import * as Logging from "./Logging.res.mjs";
|
|
5
|
+
import * as Postgres from "./bindings/Postgres.res.mjs";
|
|
6
|
+
import * as Caml_option from "rescript/lib/es6/caml_option.js";
|
|
7
|
+
import * as HyperSyncClient from "./sources/HyperSyncClient.res.mjs";
|
|
8
|
+
import * as S$RescriptSchema from "rescript-schema/src/S.res.mjs";
|
|
9
|
+
|
|
10
|
+
import 'dotenv/config'
|
|
11
|
+
;
|
|
12
|
+
|
|
13
|
+
var envSafe = EnvSafe.make(undefined);
|
|
14
|
+
|
|
15
|
+
function getLogLevelConfig(name, $$default) {
|
|
16
|
+
return EnvSafe.get(envSafe, name, S$RescriptSchema.$$enum([
|
|
17
|
+
"trace",
|
|
18
|
+
"debug",
|
|
19
|
+
"info",
|
|
20
|
+
"warn",
|
|
21
|
+
"error",
|
|
22
|
+
"fatal",
|
|
23
|
+
"udebug",
|
|
24
|
+
"uinfo",
|
|
25
|
+
"uwarn",
|
|
26
|
+
"uerror"
|
|
27
|
+
]), undefined, $$default, undefined, undefined);
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
var updateSyncTimeOnRestart = EnvSafe.get(envSafe, "UPDATE_SYNC_TIME_ON_RESTART", S$RescriptSchema.bool, undefined, true, undefined, undefined);
|
|
31
|
+
|
|
32
|
+
var targetBufferSize = EnvSafe.get(envSafe, "ENVIO_INDEXING_MAX_BUFFER_SIZE", S$RescriptSchema.option(S$RescriptSchema.$$int), undefined, undefined, undefined, undefined);
|
|
33
|
+
|
|
34
|
+
var maxAddrInPartition = EnvSafe.get(envSafe, "MAX_PARTITION_SIZE", S$RescriptSchema.$$int, undefined, 5000, undefined, undefined);
|
|
35
|
+
|
|
36
|
+
var maxPartitionConcurrency = EnvSafe.get(envSafe, "ENVIO_MAX_PARTITION_CONCURRENCY", S$RescriptSchema.$$int, undefined, 10, undefined, undefined);
|
|
37
|
+
|
|
38
|
+
var indexingBlockLag = EnvSafe.get(envSafe, "ENVIO_INDEXING_BLOCK_LAG", S$RescriptSchema.option(S$RescriptSchema.$$int), undefined, undefined, undefined, undefined);
|
|
39
|
+
|
|
40
|
+
var serverPort = EnvSafe.get(envSafe, "ENVIO_INDEXER_PORT", S$RescriptSchema.port(S$RescriptSchema.$$int, undefined), undefined, EnvSafe.get(envSafe, "METRICS_PORT", S$RescriptSchema.port(S$RescriptSchema.$$int, undefined), undefined, 9898, undefined, undefined), undefined, undefined);
|
|
41
|
+
|
|
42
|
+
var tuiOffEnvVar = EnvSafe.get(envSafe, "TUI_OFF", S$RescriptSchema.bool, undefined, false, undefined, undefined);
|
|
43
|
+
|
|
44
|
+
var logFilePath = EnvSafe.get(envSafe, "LOG_FILE", S$RescriptSchema.string, undefined, "logs/envio.log", undefined, undefined);
|
|
45
|
+
|
|
46
|
+
var userLogLevel = getLogLevelConfig("LOG_LEVEL", "info");
|
|
47
|
+
|
|
48
|
+
var defaultFileLogLevel = getLogLevelConfig("FILE_LOG_LEVEL", "trace");
|
|
49
|
+
|
|
50
|
+
var prodEnvioAppUrl = "https://envio.dev";
|
|
51
|
+
|
|
52
|
+
var envioAppUrl = EnvSafe.get(envSafe, "ENVIO_APP", S$RescriptSchema.string, undefined, prodEnvioAppUrl, undefined, undefined);
|
|
53
|
+
|
|
54
|
+
var envioApiToken = EnvSafe.get(envSafe, "ENVIO_API_TOKEN", S$RescriptSchema.option(S$RescriptSchema.string), undefined, undefined, undefined, undefined);
|
|
55
|
+
|
|
56
|
+
var hyperSyncClientTimeoutMillis = EnvSafe.get(envSafe, "ENVIO_HYPERSYNC_CLIENT_TIMEOUT_MILLIS", S$RescriptSchema.$$int, undefined, 120000, undefined, undefined);
|
|
57
|
+
|
|
58
|
+
var hyperSyncClientMaxRetries = EnvSafe.get(envSafe, "ENVIO_HYPERSYNC_CLIENT_MAX_RETRIES", S$RescriptSchema.$$int, undefined, 0, undefined, undefined);
|
|
59
|
+
|
|
60
|
+
var hypersyncClientSerializationFormat = EnvSafe.get(envSafe, "ENVIO_HYPERSYNC_CLIENT_SERIALIZATION_FORMAT", HyperSyncClient.serializationFormatSchema, undefined, "CapnProto", undefined, undefined);
|
|
61
|
+
|
|
62
|
+
var hypersyncClientEnableQueryCaching = EnvSafe.get(envSafe, "ENVIO_HYPERSYNC_CLIENT_ENABLE_QUERY_CACHING", S$RescriptSchema.bool, undefined, true, undefined, undefined);
|
|
63
|
+
|
|
64
|
+
var schema = S$RescriptSchema.$$enum([
|
|
65
|
+
true,
|
|
66
|
+
false,
|
|
67
|
+
"json-file",
|
|
68
|
+
"prometheus"
|
|
69
|
+
]);
|
|
70
|
+
|
|
71
|
+
var $$default = false;
|
|
72
|
+
|
|
73
|
+
function shouldSaveJsonFile(self) {
|
|
74
|
+
if (typeof self !== "boolean") {
|
|
75
|
+
if (self === "json-file") {
|
|
76
|
+
return true;
|
|
77
|
+
} else {
|
|
78
|
+
return false;
|
|
79
|
+
}
|
|
80
|
+
} else if (self) {
|
|
81
|
+
return true;
|
|
82
|
+
} else {
|
|
83
|
+
return false;
|
|
84
|
+
}
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
function shouldSavePrometheus(param) {
|
|
88
|
+
return true;
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
function shouldSaveData(self) {
|
|
92
|
+
return true;
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
var SaveDataStrategy = {
|
|
96
|
+
schema: schema,
|
|
97
|
+
$$default: $$default,
|
|
98
|
+
shouldSaveJsonFile: shouldSaveJsonFile,
|
|
99
|
+
shouldSavePrometheus: shouldSavePrometheus,
|
|
100
|
+
shouldSaveData: shouldSaveData
|
|
101
|
+
};
|
|
102
|
+
|
|
103
|
+
var saveDataStrategy = EnvSafe.get(envSafe, "ENVIO_SAVE_BENCHMARK_DATA", schema, undefined, Caml_option.some($$default), undefined, undefined);
|
|
104
|
+
|
|
105
|
+
var shouldSaveData$1 = shouldSaveData(saveDataStrategy);
|
|
106
|
+
|
|
107
|
+
var shouldSaveStdDev = shouldSaveJsonFile(saveDataStrategy);
|
|
108
|
+
|
|
109
|
+
var Benchmark = {
|
|
110
|
+
SaveDataStrategy: SaveDataStrategy,
|
|
111
|
+
saveDataStrategy: saveDataStrategy,
|
|
112
|
+
shouldSaveData: shouldSaveData$1,
|
|
113
|
+
shouldSaveStdDev: shouldSaveStdDev
|
|
114
|
+
};
|
|
115
|
+
|
|
116
|
+
var logStrategy = EnvSafe.get(envSafe, "LOG_STRATEGY", S$RescriptSchema.$$enum([
|
|
117
|
+
"ecs-file",
|
|
118
|
+
"ecs-console",
|
|
119
|
+
"ecs-console-multistream",
|
|
120
|
+
"file-only",
|
|
121
|
+
"console-raw",
|
|
122
|
+
"console-pretty",
|
|
123
|
+
"both-prettyconsole"
|
|
124
|
+
]), undefined, "console-pretty", undefined, undefined);
|
|
125
|
+
|
|
126
|
+
Logging.setLogger(Logging.makeLogger(logStrategy, logFilePath, defaultFileLogLevel, userLogLevel));
|
|
127
|
+
|
|
128
|
+
var host = EnvSafe.get(envSafe, "ENVIO_PG_HOST", S$RescriptSchema.string, undefined, undefined, "localhost", undefined);
|
|
129
|
+
|
|
130
|
+
var port = EnvSafe.get(envSafe, "ENVIO_PG_PORT", S$RescriptSchema.port(S$RescriptSchema.$$int, undefined), undefined, undefined, 5433, undefined);
|
|
131
|
+
|
|
132
|
+
var user = EnvSafe.get(envSafe, "ENVIO_PG_USER", S$RescriptSchema.string, undefined, undefined, "postgres", undefined);
|
|
133
|
+
|
|
134
|
+
var password = EnvSafe.get(envSafe, "ENVIO_PG_PASSWORD", S$RescriptSchema.string, undefined, EnvSafe.get(envSafe, "ENVIO_POSTGRES_PASSWORD", S$RescriptSchema.string, undefined, "testing", undefined, undefined), undefined, undefined);
|
|
135
|
+
|
|
136
|
+
var database = EnvSafe.get(envSafe, "ENVIO_PG_DATABASE", S$RescriptSchema.string, undefined, undefined, "envio-dev", undefined);
|
|
137
|
+
|
|
138
|
+
var publicSchema = EnvSafe.get(envSafe, "ENVIO_PG_PUBLIC_SCHEMA", S$RescriptSchema.string, undefined, "public", undefined, undefined);
|
|
139
|
+
|
|
140
|
+
var ssl = EnvSafe.get(envSafe, "ENVIO_PG_SSL_MODE", Postgres.sslOptionsSchema, undefined, undefined, false, undefined);
|
|
141
|
+
|
|
142
|
+
var Db = {
|
|
143
|
+
host: host,
|
|
144
|
+
port: port,
|
|
145
|
+
user: user,
|
|
146
|
+
password: password,
|
|
147
|
+
database: database,
|
|
148
|
+
publicSchema: publicSchema,
|
|
149
|
+
ssl: ssl
|
|
150
|
+
};
|
|
151
|
+
|
|
152
|
+
var host$1 = EnvSafe.get(envSafe, "ENVIO_CLICKHOUSE_SINK_HOST", S$RescriptSchema.option(S$RescriptSchema.string), undefined, undefined, undefined, undefined);
|
|
153
|
+
|
|
154
|
+
var database$1 = EnvSafe.get(envSafe, "ENVIO_CLICKHOUSE_SINK_DATABASE", S$RescriptSchema.option(S$RescriptSchema.string), undefined, undefined, undefined, undefined);
|
|
155
|
+
|
|
156
|
+
var username = host$1 !== undefined ? EnvSafe.get(envSafe, "ENVIO_CLICKHOUSE_SINK_USERNAME", S$RescriptSchema.string, undefined, undefined, undefined, undefined) : "";
|
|
157
|
+
|
|
158
|
+
var password$1 = host$1 !== undefined ? EnvSafe.get(envSafe, "ENVIO_CLICKHOUSE_SINK_PASSWORD", S$RescriptSchema.string, undefined, undefined, undefined, undefined) : "";
|
|
159
|
+
|
|
160
|
+
var ClickHouseSink = {
|
|
161
|
+
host: host$1,
|
|
162
|
+
database: database$1,
|
|
163
|
+
username: username,
|
|
164
|
+
password: password$1
|
|
165
|
+
};
|
|
166
|
+
|
|
167
|
+
var enabled = EnvSafe.get(envSafe, "ENVIO_HASURA", S$RescriptSchema.bool, undefined, true, undefined, undefined);
|
|
168
|
+
|
|
169
|
+
var s = EnvSafe.get(envSafe, "ENVIO_HASURA_RESPONSE_LIMIT", S$RescriptSchema.option(S$RescriptSchema.$$int), undefined, undefined, undefined, undefined);
|
|
170
|
+
|
|
171
|
+
var responseLimit = s !== undefined ? s : EnvSafe.get(envSafe, "HASURA_RESPONSE_LIMIT", S$RescriptSchema.option(S$RescriptSchema.$$int), undefined, undefined, undefined, undefined);
|
|
172
|
+
|
|
173
|
+
var graphqlEndpoint = EnvSafe.get(envSafe, "HASURA_GRAPHQL_ENDPOINT", S$RescriptSchema.string, undefined, undefined, "http://localhost:8080/v1/metadata", undefined);
|
|
174
|
+
|
|
175
|
+
var url = graphqlEndpoint.slice(0, -"/v1/metadata".length | 0);
|
|
176
|
+
|
|
177
|
+
var role = EnvSafe.get(envSafe, "HASURA_GRAPHQL_ROLE", S$RescriptSchema.string, undefined, undefined, "admin", undefined);
|
|
178
|
+
|
|
179
|
+
var secret = EnvSafe.get(envSafe, "HASURA_GRAPHQL_ADMIN_SECRET", S$RescriptSchema.string, undefined, undefined, "testing", undefined);
|
|
180
|
+
|
|
181
|
+
var aggregateEntities = EnvSafe.get(envSafe, "ENVIO_HASURA_PUBLIC_AGGREGATE", S$RescriptSchema.union([
|
|
182
|
+
S$RescriptSchema.array(S$RescriptSchema.string),
|
|
183
|
+
S$RescriptSchema.transform(S$RescriptSchema.string, (function (s) {
|
|
184
|
+
return {
|
|
185
|
+
p: (function (string) {
|
|
186
|
+
var entities = string.split("&");
|
|
187
|
+
var len = entities.length;
|
|
188
|
+
if (len !== 1 && len !== 0) {
|
|
189
|
+
return entities;
|
|
190
|
+
} else {
|
|
191
|
+
return s.fail("Provide an array of entities in the JSON format.", undefined);
|
|
192
|
+
}
|
|
193
|
+
})
|
|
194
|
+
};
|
|
195
|
+
}))
|
|
196
|
+
]), undefined, [], undefined, undefined);
|
|
197
|
+
|
|
198
|
+
var Hasura = {
|
|
199
|
+
enabled: enabled,
|
|
200
|
+
responseLimit: responseLimit,
|
|
201
|
+
graphqlEndpoint: graphqlEndpoint,
|
|
202
|
+
url: url,
|
|
203
|
+
role: role,
|
|
204
|
+
secret: secret,
|
|
205
|
+
aggregateEntities: aggregateEntities
|
|
206
|
+
};
|
|
207
|
+
|
|
208
|
+
var initialBlockInterval = EnvSafe.get(envSafe, "ENVIO_RPC_INITIAL_BLOCK_INTERVAL", S$RescriptSchema.option(S$RescriptSchema.$$int), undefined, undefined, undefined, undefined);
|
|
209
|
+
|
|
210
|
+
var backoffMultiplicative = EnvSafe.get(envSafe, "ENVIO_RPC_BACKOFF_MULTIPLICATIVE", S$RescriptSchema.option(S$RescriptSchema.$$float), undefined, undefined, undefined, undefined);
|
|
211
|
+
|
|
212
|
+
var accelerationAdditive = EnvSafe.get(envSafe, "ENVIO_RPC_ACCELERATION_ADDITIVE", S$RescriptSchema.option(S$RescriptSchema.$$int), undefined, undefined, undefined, undefined);
|
|
213
|
+
|
|
214
|
+
var intervalCeiling = EnvSafe.get(envSafe, "ENVIO_RPC_INTERVAL_CEILING", S$RescriptSchema.option(S$RescriptSchema.$$int), undefined, undefined, undefined, undefined);
|
|
215
|
+
|
|
216
|
+
var SyncConfig = {
|
|
217
|
+
initialBlockInterval: initialBlockInterval,
|
|
218
|
+
backoffMultiplicative: backoffMultiplicative,
|
|
219
|
+
accelerationAdditive: accelerationAdditive,
|
|
220
|
+
intervalCeiling: intervalCeiling
|
|
221
|
+
};
|
|
222
|
+
|
|
223
|
+
var Configurable = {
|
|
224
|
+
SyncConfig: SyncConfig
|
|
225
|
+
};
|
|
226
|
+
|
|
227
|
+
var chainMetadataIntervalMillis = EnvSafe.get(envSafe, "ENVIO_THROTTLE_CHAIN_METADATA_INTERVAL_MILLIS", S$RescriptSchema.$$int, undefined, undefined, 500, undefined);
|
|
228
|
+
|
|
229
|
+
var pruneStaleDataIntervalMillis = EnvSafe.get(envSafe, "ENVIO_THROTTLE_PRUNE_STALE_DATA_INTERVAL_MILLIS", S$RescriptSchema.$$int, undefined, undefined, 30000, undefined);
|
|
230
|
+
|
|
231
|
+
var liveMetricsBenchmarkIntervalMillis = EnvSafe.get(envSafe, "ENVIO_THROTTLE_LIVE_METRICS_BENCHMARK_INTERVAL_MILLIS", S$RescriptSchema.$$int, undefined, undefined, 1000, undefined);
|
|
232
|
+
|
|
233
|
+
var jsonFileBenchmarkIntervalMillis = EnvSafe.get(envSafe, "ENVIO_THROTTLE_JSON_FILE_BENCHMARK_INTERVAL_MILLIS", S$RescriptSchema.$$int, undefined, undefined, 500, undefined);
|
|
234
|
+
|
|
235
|
+
var ThrottleWrites = {
|
|
236
|
+
chainMetadataIntervalMillis: chainMetadataIntervalMillis,
|
|
237
|
+
pruneStaleDataIntervalMillis: pruneStaleDataIntervalMillis,
|
|
238
|
+
liveMetricsBenchmarkIntervalMillis: liveMetricsBenchmarkIntervalMillis,
|
|
239
|
+
jsonFileBenchmarkIntervalMillis: jsonFileBenchmarkIntervalMillis
|
|
240
|
+
};
|
|
241
|
+
|
|
242
|
+
EnvSafe.close(envSafe);
|
|
243
|
+
|
|
244
|
+
export {
|
|
245
|
+
updateSyncTimeOnRestart ,
|
|
246
|
+
targetBufferSize ,
|
|
247
|
+
maxAddrInPartition ,
|
|
248
|
+
maxPartitionConcurrency ,
|
|
249
|
+
indexingBlockLag ,
|
|
250
|
+
serverPort ,
|
|
251
|
+
tuiOffEnvVar ,
|
|
252
|
+
logFilePath ,
|
|
253
|
+
userLogLevel ,
|
|
254
|
+
defaultFileLogLevel ,
|
|
255
|
+
prodEnvioAppUrl ,
|
|
256
|
+
envioAppUrl ,
|
|
257
|
+
envioApiToken ,
|
|
258
|
+
hyperSyncClientTimeoutMillis ,
|
|
259
|
+
hyperSyncClientMaxRetries ,
|
|
260
|
+
hypersyncClientSerializationFormat ,
|
|
261
|
+
hypersyncClientEnableQueryCaching ,
|
|
262
|
+
Benchmark ,
|
|
263
|
+
logStrategy ,
|
|
264
|
+
Db ,
|
|
265
|
+
ClickHouseSink ,
|
|
266
|
+
Hasura ,
|
|
267
|
+
Configurable ,
|
|
268
|
+
ThrottleWrites ,
|
|
269
|
+
}
|
|
270
|
+
/* Not a pure module */
|
package/src/Envio.gen.ts
CHANGED
|
@@ -13,7 +13,9 @@ import type {S_t as RescriptSchema_S_t} from 'rescript-schema/RescriptSchema.gen
|
|
|
13
13
|
|
|
14
14
|
export type blockEvent = { readonly number: number };
|
|
15
15
|
|
|
16
|
-
export type fuelBlockEvent = { readonly height: number
|
|
16
|
+
export type fuelBlockEvent = { readonly height: number };
|
|
17
|
+
|
|
18
|
+
export type solanaOnBlockArgs<context> = { readonly slot: number; readonly context: context };
|
|
17
19
|
|
|
18
20
|
export type onBlockArgs<block,context> = { readonly block: block; readonly context: context };
|
|
19
21
|
|
|
@@ -25,6 +27,12 @@ export type onBlockOptions<chain> = {
|
|
|
25
27
|
readonly endBlock?: number
|
|
26
28
|
};
|
|
27
29
|
|
|
30
|
+
export type whereOperations<entity,fieldType> = {
|
|
31
|
+
readonly eq: (_1:fieldType) => Promise<entity[]>;
|
|
32
|
+
readonly gt: (_1:fieldType) => Promise<entity[]>;
|
|
33
|
+
readonly lt: (_1:fieldType) => Promise<entity[]>
|
|
34
|
+
};
|
|
35
|
+
|
|
28
36
|
export type logger = $$logger;
|
|
29
37
|
|
|
30
38
|
export type effect<input,output> = $$effect<input,output>;
|