@sentio/runtime 0.0.0-rc.a
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +55 -0
- package/lib/chunk-DYOBLZD3.js +80341 -0
- package/lib/chunk-DYOBLZD3.js.map +1 -0
- package/lib/index.d.ts +663 -0
- package/lib/index.js +127 -0
- package/lib/index.js.map +1 -0
- package/lib/processor-runner.d.ts +34 -0
- package/lib/processor-runner.js +41155 -0
- package/lib/processor-runner.js.map +1 -0
- package/package.json +63 -0
- package/src/action-server.ts +18 -0
- package/src/chain-config.ts +5 -0
- package/src/db-context.ts +227 -0
- package/src/decode-benchmark.ts +28 -0
- package/src/endpoints.ts +11 -0
- package/src/full-service.ts +339 -0
- package/src/gen/google/protobuf/empty.ts +56 -0
- package/src/gen/google/protobuf/struct.ts +494 -0
- package/src/gen/google/protobuf/timestamp.ts +106 -0
- package/src/gen/processor/protos/processor.ts +13640 -0
- package/src/gen/service/common/protos/common.ts +14452 -0
- package/src/global-config.ts +33 -0
- package/src/index.ts +10 -0
- package/src/logger.ts +59 -0
- package/src/metrics.ts +202 -0
- package/src/multicall.ts +1615 -0
- package/src/otlp.ts +59 -0
- package/src/plugin.ts +120 -0
- package/src/processor-runner.ts +226 -0
- package/src/provider.ts +195 -0
- package/src/service-manager.ts +263 -0
- package/src/service-worker.ts +116 -0
- package/src/service.ts +505 -0
- package/src/state.ts +83 -0
- package/src/tsup.config.ts +16 -0
- package/src/utils.ts +93 -0
package/src/otlp.ts
ADDED
@@ -0,0 +1,59 @@
|
|
1
|
+
import { envDetector } from '@opentelemetry/resources'
|
2
|
+
import { MeterProvider, PeriodicExportingMetricReader } from '@opentelemetry/sdk-metrics'
|
3
|
+
import { OTLPMetricExporter } from '@opentelemetry/exporter-metrics-otlp-grpc'
|
4
|
+
import { PrometheusExporter } from '@opentelemetry/exporter-prometheus'
|
5
|
+
import { NodeTracerProvider } from '@opentelemetry/sdk-trace-node'
|
6
|
+
import { OTLPTraceExporter } from '@opentelemetry/exporter-trace-otlp-grpc'
|
7
|
+
import { BatchSpanProcessor } from '@opentelemetry/sdk-trace-base'
|
8
|
+
import { diag, DiagConsoleLogger, DiagLogLevel, metrics, trace, ProxyTracerProvider } from '@opentelemetry/api'
|
9
|
+
|
10
|
+
export async function setupOTLP(debug?: boolean) {
|
11
|
+
if (debug) {
|
12
|
+
diag.setLogger(new DiagConsoleLogger(), DiagLogLevel.DEBUG)
|
13
|
+
}
|
14
|
+
|
15
|
+
const resource = await envDetector.detect()
|
16
|
+
|
17
|
+
const meterProvider = new MeterProvider({
|
18
|
+
resource,
|
19
|
+
readers: [
|
20
|
+
new PeriodicExportingMetricReader({
|
21
|
+
exporter: new OTLPMetricExporter()
|
22
|
+
}),
|
23
|
+
new PrometheusExporter({
|
24
|
+
host: '0.0.0.0',
|
25
|
+
port: 4041
|
26
|
+
})
|
27
|
+
]
|
28
|
+
})
|
29
|
+
|
30
|
+
const traceProvider = new NodeTracerProvider({
|
31
|
+
resource: resource
|
32
|
+
})
|
33
|
+
const exporter = new OTLPTraceExporter() // new ConsoleSpanExporter();
|
34
|
+
const processor = new BatchSpanProcessor(exporter)
|
35
|
+
traceProvider.addSpanProcessor(processor)
|
36
|
+
|
37
|
+
metrics.setGlobalMeterProvider(meterProvider)
|
38
|
+
trace.setGlobalTracerProvider(traceProvider)
|
39
|
+
;['SIGINT', 'SIGTERM'].forEach((signal) => {
|
40
|
+
process.on(signal as any, () => shutdownProvider())
|
41
|
+
})
|
42
|
+
|
43
|
+
metrics.getMeter('processor').createGauge('up').record(1)
|
44
|
+
}
|
45
|
+
|
46
|
+
export async function shutdownProvider() {
|
47
|
+
const traceProvider = trace.getTracerProvider()
|
48
|
+
|
49
|
+
if (traceProvider instanceof ProxyTracerProvider) {
|
50
|
+
const delegate = traceProvider.getDelegate()
|
51
|
+
if (delegate instanceof NodeTracerProvider) {
|
52
|
+
delegate.shutdown().catch(console.error)
|
53
|
+
}
|
54
|
+
}
|
55
|
+
const meterProvider = metrics.getMeterProvider()
|
56
|
+
if (meterProvider instanceof MeterProvider) {
|
57
|
+
meterProvider.shutdown().catch(console.error)
|
58
|
+
}
|
59
|
+
}
|
package/src/plugin.ts
ADDED
@@ -0,0 +1,120 @@
|
|
1
|
+
import {
|
2
|
+
DataBinding,
|
3
|
+
HandlerType,
|
4
|
+
PreparedData,
|
5
|
+
PreprocessResult,
|
6
|
+
ProcessConfigResponse,
|
7
|
+
ProcessResult,
|
8
|
+
StartRequest
|
9
|
+
} from '@sentio/protos'
|
10
|
+
import { StoreContext } from './db-context.js'
|
11
|
+
import { AsyncLocalStorage } from 'node:async_hooks'
|
12
|
+
|
13
|
+
export abstract class Plugin {
|
14
|
+
name: string
|
15
|
+
supportedHandlers: HandlerType[] = []
|
16
|
+
|
17
|
+
async configure(config: ProcessConfigResponse): Promise<void> {}
|
18
|
+
|
19
|
+
async start(start: StartRequest): Promise<void> {}
|
20
|
+
|
21
|
+
/**
|
22
|
+
* @deprecated The method should not be used, use ctx.states instead
|
23
|
+
*/
|
24
|
+
stateDiff(config: ProcessConfigResponse): boolean {
|
25
|
+
return false
|
26
|
+
}
|
27
|
+
|
28
|
+
async processBinding(request: DataBinding, preparedData: PreparedData | undefined): Promise<ProcessResult> {
|
29
|
+
return ProcessResult.create()
|
30
|
+
}
|
31
|
+
|
32
|
+
async preprocessBinding(request: DataBinding, preprocessStore: { [k: string]: any }): Promise<PreprocessResult> {
|
33
|
+
return PreprocessResult.create()
|
34
|
+
}
|
35
|
+
|
36
|
+
/**
|
37
|
+
* method used by action server only
|
38
|
+
* @param port
|
39
|
+
*/
|
40
|
+
async startServer(port?: number): Promise<void> {}
|
41
|
+
|
42
|
+
/**
|
43
|
+
* method used by action server only
|
44
|
+
*/
|
45
|
+
shutdownServer() {}
|
46
|
+
}
|
47
|
+
|
48
|
+
export class PluginManager {
|
49
|
+
static INSTANCE = new PluginManager()
|
50
|
+
|
51
|
+
dbContextLocalStorage = new AsyncLocalStorage<StoreContext | undefined>()
|
52
|
+
plugins: Plugin[] = []
|
53
|
+
typesToPlugin = new Map<HandlerType, Plugin>()
|
54
|
+
|
55
|
+
register(plugin: Plugin) {
|
56
|
+
if (this.plugins.find((p) => p.name === plugin.name)) {
|
57
|
+
return
|
58
|
+
}
|
59
|
+
this.plugins.push(plugin)
|
60
|
+
|
61
|
+
for (const handlerType of plugin.supportedHandlers) {
|
62
|
+
const exsited = this.typesToPlugin.get(handlerType)
|
63
|
+
if (exsited) {
|
64
|
+
throw new Error(`Duplicate plugin for ${handlerType}: ${exsited.name} and ${plugin.name}`)
|
65
|
+
}
|
66
|
+
this.typesToPlugin.set(handlerType, plugin)
|
67
|
+
}
|
68
|
+
}
|
69
|
+
|
70
|
+
configure(config: ProcessConfigResponse) {
|
71
|
+
return Promise.all(this.plugins.map((plugin) => plugin.configure(config)))
|
72
|
+
}
|
73
|
+
|
74
|
+
start(start: StartRequest, actionServerPort?: number) {
|
75
|
+
return Promise.all(this.plugins.map((plugin) => plugin.start(start)))
|
76
|
+
}
|
77
|
+
|
78
|
+
startServer(port?: number) {
|
79
|
+
return Promise.all(this.plugins.map((plugin) => plugin.startServer(port)))
|
80
|
+
}
|
81
|
+
|
82
|
+
shutdown() {
|
83
|
+
this.plugins.forEach((plugin) => plugin.shutdownServer())
|
84
|
+
}
|
85
|
+
|
86
|
+
/**
|
87
|
+
* @deprecated The method should not be used, use ctx.states instead
|
88
|
+
*/
|
89
|
+
stateDiff(config: ProcessConfigResponse): boolean {
|
90
|
+
return this.plugins.some((plugin) => plugin.stateDiff(config))
|
91
|
+
}
|
92
|
+
|
93
|
+
processBinding(
|
94
|
+
request: DataBinding,
|
95
|
+
preparedData: PreparedData | undefined,
|
96
|
+
dbContext?: StoreContext
|
97
|
+
): Promise<ProcessResult> {
|
98
|
+
const plugin = this.typesToPlugin.get(request.handlerType)
|
99
|
+
if (!plugin) {
|
100
|
+
throw new Error(`No plugin for ${request.handlerType}`)
|
101
|
+
}
|
102
|
+
return this.dbContextLocalStorage.run(dbContext, () => {
|
103
|
+
return plugin.processBinding(request, preparedData)
|
104
|
+
})
|
105
|
+
}
|
106
|
+
|
107
|
+
preprocessBinding(
|
108
|
+
request: DataBinding,
|
109
|
+
preprocessStore: { [k: string]: any },
|
110
|
+
dbContext?: StoreContext
|
111
|
+
): Promise<PreprocessResult> {
|
112
|
+
const plugin = this.typesToPlugin.get(request.handlerType)
|
113
|
+
if (!plugin) {
|
114
|
+
throw new Error(`No plugin for ${request.handlerType}`)
|
115
|
+
}
|
116
|
+
return this.dbContextLocalStorage.run(dbContext, () => {
|
117
|
+
return plugin.preprocessBinding(request, preprocessStore)
|
118
|
+
})
|
119
|
+
}
|
120
|
+
}
|
@@ -0,0 +1,226 @@
|
|
1
|
+
#!/usr/bin/env node
|
2
|
+
|
3
|
+
import path from 'path'
|
4
|
+
import fs from 'fs-extra'
|
5
|
+
|
6
|
+
import { compressionAlgorithms } from '@grpc/grpc-js'
|
7
|
+
import commandLineArgs from 'command-line-args'
|
8
|
+
import { createServer } from 'nice-grpc'
|
9
|
+
import { errorDetailsServerMiddleware } from 'nice-grpc-error-details'
|
10
|
+
// import { registry as niceGrpcRegistry } from 'nice-grpc-prometheus'
|
11
|
+
import { openTelemetryServerMiddleware } from 'nice-grpc-opentelemetry'
|
12
|
+
import http from 'http'
|
13
|
+
// @ts-ignore inspector promises is not included in @type/node
|
14
|
+
import { Session } from 'node:inspector/promises'
|
15
|
+
|
16
|
+
import { ProcessorDefinition } from './gen/processor/protos/processor.js'
|
17
|
+
import { ProcessorServiceImpl } from './service.js'
|
18
|
+
import { Endpoints } from './endpoints.js'
|
19
|
+
import { FullProcessorServiceImpl } from './full-service.js'
|
20
|
+
import { ChainConfig } from './chain-config.js'
|
21
|
+
import { setupLogger } from './logger.js'
|
22
|
+
|
23
|
+
import { setupOTLP } from './otlp.js'
|
24
|
+
import { ActionServer } from './action-server.js'
|
25
|
+
import { ServiceManager } from './service-manager.js'
|
26
|
+
|
27
|
+
// const mergedRegistry = Registry.merge([globalRegistry, niceGrpcRegistry])
|
28
|
+
|
29
|
+
export const optionDefinitions = [
|
30
|
+
{ name: 'target', type: String, defaultOption: true },
|
31
|
+
{ name: 'port', alias: 'p', type: String, defaultValue: '4000' },
|
32
|
+
{ name: 'concurrency', type: Number, defaultValue: 4 },
|
33
|
+
{ name: 'batch-count', type: Number, defaultValue: 1 },
|
34
|
+
// { name: 'use-chainserver', type: Boolean, defaultValue: false },
|
35
|
+
{
|
36
|
+
name: 'chains-config',
|
37
|
+
alias: 'c',
|
38
|
+
type: String,
|
39
|
+
defaultValue: 'chains-config.json'
|
40
|
+
},
|
41
|
+
{ name: 'chainquery-server', type: String, defaultValue: '' },
|
42
|
+
{ name: 'pricefeed-server', type: String, defaultValue: '' },
|
43
|
+
{ name: 'log-format', type: String, defaultValue: 'console' },
|
44
|
+
{ name: 'debug', type: Boolean, defaultValue: false },
|
45
|
+
{ name: 'otlp-debug', type: Boolean, defaultValue: false },
|
46
|
+
{ name: 'start-action-server', type: Boolean, defaultValue: false },
|
47
|
+
{ name: 'worker', type: Number, defaultValue: 8 }
|
48
|
+
]
|
49
|
+
|
50
|
+
const options = commandLineArgs(optionDefinitions, { partial: true })
|
51
|
+
|
52
|
+
const logLevel = process.env['LOG_LEVEL']?.toUpperCase()
|
53
|
+
|
54
|
+
setupLogger(options['log-format'] === 'json', logLevel === 'debug' ? true : options.debug)
|
55
|
+
console.debug('Starting with', options.target)
|
56
|
+
|
57
|
+
await setupOTLP(options['otlp-debug'])
|
58
|
+
|
59
|
+
Error.stackTraceLimit = 20
|
60
|
+
|
61
|
+
const fullPath = path.resolve(options['chains-config'])
|
62
|
+
const chainsConfig = fs.readJsonSync(fullPath)
|
63
|
+
|
64
|
+
const concurrencyOverride = process.env['OVERRIDE_CONCURRENCY']
|
65
|
+
? parseInt(process.env['OVERRIDE_CONCURRENCY'])
|
66
|
+
: undefined
|
67
|
+
const batchCountOverride = process.env['OVERRIDE_BATCH_COUNT']
|
68
|
+
? parseInt(process.env['OVERRIDE_BATCH_COUNT'])
|
69
|
+
: undefined
|
70
|
+
|
71
|
+
Endpoints.INSTANCE.concurrency = concurrencyOverride ?? options.concurrency
|
72
|
+
Endpoints.INSTANCE.batchCount = batchCountOverride ?? options['batch-count']
|
73
|
+
Endpoints.INSTANCE.chainQueryAPI = options['chainquery-server']
|
74
|
+
Endpoints.INSTANCE.priceFeedAPI = options['pricefeed-server']
|
75
|
+
|
76
|
+
for (const [id, config] of Object.entries(chainsConfig)) {
|
77
|
+
const chainConfig = config as ChainConfig
|
78
|
+
if (chainConfig.ChainServer) {
|
79
|
+
Endpoints.INSTANCE.chainServer.set(id, chainConfig.ChainServer)
|
80
|
+
} else {
|
81
|
+
const http = chainConfig.Https?.[0]
|
82
|
+
if (http) {
|
83
|
+
Endpoints.INSTANCE.chainServer.set(id, http)
|
84
|
+
} else {
|
85
|
+
console.error('not valid config for chain', id)
|
86
|
+
}
|
87
|
+
}
|
88
|
+
}
|
89
|
+
|
90
|
+
console.debug('Starting Server', options)
|
91
|
+
|
92
|
+
let server: any
|
93
|
+
let baseService: ProcessorServiceImpl | ServiceManager
|
94
|
+
const loader = async () => {
|
95
|
+
const m = await import(options.target)
|
96
|
+
console.debug('Module loaded', m)
|
97
|
+
return m
|
98
|
+
}
|
99
|
+
if (options['start-action-server']) {
|
100
|
+
server = new ActionServer(loader)
|
101
|
+
server.listen(options.port)
|
102
|
+
} else {
|
103
|
+
server = createServer({
|
104
|
+
'grpc.max_send_message_length': 768 * 1024 * 1024,
|
105
|
+
'grpc.max_receive_message_length': 768 * 1024 * 1024,
|
106
|
+
'grpc.default_compression_algorithm': compressionAlgorithms.gzip
|
107
|
+
})
|
108
|
+
// .use(prometheusServerMiddleware())
|
109
|
+
.use(openTelemetryServerMiddleware())
|
110
|
+
.use(errorDetailsServerMiddleware)
|
111
|
+
|
112
|
+
if (options.worker > 1) {
|
113
|
+
baseService = new ServiceManager(options, loader, server.shutdown)
|
114
|
+
} else {
|
115
|
+
baseService = new ProcessorServiceImpl(loader, server.shutdown)
|
116
|
+
}
|
117
|
+
|
118
|
+
const service = new FullProcessorServiceImpl(baseService)
|
119
|
+
|
120
|
+
server.add(ProcessorDefinition, service)
|
121
|
+
|
122
|
+
server.listen('0.0.0.0:' + options.port)
|
123
|
+
|
124
|
+
console.log('Processor Server Started at:', options.port)
|
125
|
+
}
|
126
|
+
|
127
|
+
const metricsPort = 4040
|
128
|
+
const httpServer = http
|
129
|
+
.createServer(async function (req, res) {
|
130
|
+
if (req.url) {
|
131
|
+
const reqUrl = new URL(req.url, `http://${req.headers.host}`)
|
132
|
+
const queries = reqUrl.searchParams
|
133
|
+
switch (reqUrl.pathname) {
|
134
|
+
// case '/metrics':
|
135
|
+
// const metrics = await mergedRegistry.metrics()
|
136
|
+
// res.write(metrics)
|
137
|
+
// break
|
138
|
+
case '/heap': {
|
139
|
+
try {
|
140
|
+
const file = '/tmp/' + Date.now() + '.heapsnapshot'
|
141
|
+
const session = new Session()
|
142
|
+
|
143
|
+
const fd = fs.openSync(file, 'w')
|
144
|
+
session.connect()
|
145
|
+
session.on('HeapProfiler.addHeapSnapshotChunk', (m) => {
|
146
|
+
fs.writeSync(fd, m.params.chunk)
|
147
|
+
})
|
148
|
+
|
149
|
+
await session.post('HeapProfiler.takeHeapSnapshot')
|
150
|
+
session.disconnect()
|
151
|
+
fs.closeSync(fd)
|
152
|
+
// send the file
|
153
|
+
const readStream = fs.createReadStream(file)
|
154
|
+
res.writeHead(200, { 'Content-Type': 'application/json' })
|
155
|
+
readStream.pipe(res)
|
156
|
+
res.end()
|
157
|
+
} catch {
|
158
|
+
res.writeHead(500)
|
159
|
+
res.end()
|
160
|
+
}
|
161
|
+
break
|
162
|
+
}
|
163
|
+
case '/profile': {
|
164
|
+
try {
|
165
|
+
const profileTime = parseInt(queries.get('t') || '1000', 10) || 1000
|
166
|
+
const session = new Session()
|
167
|
+
session.connect()
|
168
|
+
|
169
|
+
await session.post('Profiler.enable')
|
170
|
+
await session.post('Profiler.start')
|
171
|
+
|
172
|
+
await new Promise((resolve) => setTimeout(resolve, profileTime))
|
173
|
+
const { profile } = await session.post('Profiler.stop')
|
174
|
+
|
175
|
+
res.writeHead(200, { 'Content-Type': 'application/json' })
|
176
|
+
res.write(JSON.stringify(profile))
|
177
|
+
session.disconnect()
|
178
|
+
} catch {
|
179
|
+
res.writeHead(500)
|
180
|
+
}
|
181
|
+
break
|
182
|
+
}
|
183
|
+
default:
|
184
|
+
res.writeHead(404)
|
185
|
+
}
|
186
|
+
} else {
|
187
|
+
res.writeHead(404)
|
188
|
+
}
|
189
|
+
res.end()
|
190
|
+
})
|
191
|
+
.listen(metricsPort)
|
192
|
+
|
193
|
+
console.log('Metric Server Started at:', metricsPort)
|
194
|
+
|
195
|
+
process
|
196
|
+
.on('SIGINT', function () {
|
197
|
+
shutdownServers(0)
|
198
|
+
})
|
199
|
+
.on('uncaughtException', (err) => {
|
200
|
+
console.error('Uncaught Exception, please checking if await is properly used', err)
|
201
|
+
if (baseService) {
|
202
|
+
baseService.unhandled = err
|
203
|
+
}
|
204
|
+
// shutdownServers(1)
|
205
|
+
})
|
206
|
+
.on('unhandledRejection', (reason, p) => {
|
207
|
+
// @ts-ignore ignore invalid ens error
|
208
|
+
if (reason?.message.startsWith('invalid ENS name (disallowed character: "*"')) {
|
209
|
+
return
|
210
|
+
}
|
211
|
+
console.error('Unhandled Rejection, please checking if await is properly', reason)
|
212
|
+
if (baseService) {
|
213
|
+
baseService.unhandled = reason as Error
|
214
|
+
}
|
215
|
+
// shutdownServers(1)
|
216
|
+
})
|
217
|
+
|
218
|
+
function shutdownServers(exitCode: number) {
|
219
|
+
server?.forceShutdown()
|
220
|
+
console.log('RPC server shut down')
|
221
|
+
|
222
|
+
httpServer.close(function () {
|
223
|
+
console.log('Http server shut down')
|
224
|
+
process.exit(exitCode)
|
225
|
+
})
|
226
|
+
}
|
package/src/provider.ts
ADDED
@@ -0,0 +1,195 @@
|
|
1
|
+
import { JsonRpcProvider, Network, Provider } from 'ethers'
|
2
|
+
|
3
|
+
import PQueue from 'p-queue'
|
4
|
+
import { Endpoints } from './endpoints.js'
|
5
|
+
import { EthChainId } from '@sentio/chain'
|
6
|
+
import { LRUCache } from 'lru-cache'
|
7
|
+
import { providerMetrics, processMetrics, metricsStorage } from './metrics.js'
|
8
|
+
import { GLOBAL_CONFIG } from './global-config.js'
|
9
|
+
const { miss_count, hit_count, queue_size } = providerMetrics
|
10
|
+
|
11
|
+
export const DummyProvider = new JsonRpcProvider('', Network.from(1))
|
12
|
+
|
13
|
+
const providers = new Map<string, JsonRpcProvider>()
|
14
|
+
|
15
|
+
// export function getEthChainId(networkish?: EthContext | EthChainId): EthChainId {
|
16
|
+
// if (!networkish) {
|
17
|
+
// networkish = EthChainId.ETHEREUM
|
18
|
+
// }
|
19
|
+
// if (networkish instanceof BaseContext) {
|
20
|
+
// networkish = networkish.getChainId()
|
21
|
+
// }
|
22
|
+
// return networkish
|
23
|
+
// }
|
24
|
+
|
25
|
+
export function getProvider(chainId?: EthChainId): Provider {
|
26
|
+
// const network = getNetworkFromCtxOrNetworkish(networkish)
|
27
|
+
if (!chainId) {
|
28
|
+
chainId = EthChainId.ETHEREUM
|
29
|
+
}
|
30
|
+
const network = Network.from(parseInt(chainId))
|
31
|
+
// TODO check if other key needed
|
32
|
+
|
33
|
+
const address = Endpoints.INSTANCE.chainServer.get(chainId)
|
34
|
+
const key = network.chainId.toString() + '-' + address
|
35
|
+
|
36
|
+
// console.debug(`init provider for ${chainId}, address: ${address}`)
|
37
|
+
let provider = providers.get(key)
|
38
|
+
|
39
|
+
if (provider) {
|
40
|
+
return provider
|
41
|
+
}
|
42
|
+
|
43
|
+
if (address === undefined) {
|
44
|
+
throw Error(
|
45
|
+
'Provider not found for chain ' +
|
46
|
+
network.chainId +
|
47
|
+
', configured chains: ' +
|
48
|
+
[...Endpoints.INSTANCE.chainServer.keys()].join(' ')
|
49
|
+
)
|
50
|
+
}
|
51
|
+
// console.log(
|
52
|
+
// `init provider for chain ${network.chainId}, concurrency: ${Endpoints.INSTANCE.concurrency}, batchCount: ${Endpoints.INSTANCE.batchCount}`
|
53
|
+
// )
|
54
|
+
provider = new QueuedStaticJsonRpcProvider(
|
55
|
+
address,
|
56
|
+
network,
|
57
|
+
Endpoints.INSTANCE.concurrency,
|
58
|
+
Endpoints.INSTANCE.batchCount
|
59
|
+
)
|
60
|
+
providers.set(key, provider)
|
61
|
+
return provider
|
62
|
+
}
|
63
|
+
|
64
|
+
function getTag(prefix: string, value: any): string {
|
65
|
+
return (
|
66
|
+
prefix +
|
67
|
+
':' +
|
68
|
+
JSON.stringify(value, (k, v) => {
|
69
|
+
if (v == null) {
|
70
|
+
return 'null'
|
71
|
+
}
|
72
|
+
if (typeof v === 'bigint') {
|
73
|
+
return `bigint:${v.toString()}`
|
74
|
+
}
|
75
|
+
if (typeof v === 'string') {
|
76
|
+
return v.toLowerCase()
|
77
|
+
}
|
78
|
+
|
79
|
+
// Sort object keys
|
80
|
+
if (typeof v === 'object' && !Array.isArray(v)) {
|
81
|
+
const keys = Object.keys(v)
|
82
|
+
keys.sort()
|
83
|
+
return keys.reduce(
|
84
|
+
(accum, key) => {
|
85
|
+
accum[key] = v[key]
|
86
|
+
return accum
|
87
|
+
},
|
88
|
+
<any>{}
|
89
|
+
)
|
90
|
+
}
|
91
|
+
|
92
|
+
return v
|
93
|
+
})
|
94
|
+
)
|
95
|
+
}
|
96
|
+
|
97
|
+
export class QueuedStaticJsonRpcProvider extends JsonRpcProvider {
|
98
|
+
executor: PQueue
|
99
|
+
#performCache = new LRUCache<string, Promise<any>>({
|
100
|
+
max: 300000, // 300k items
|
101
|
+
maxSize: 500 * 1024 * 1024 // 500mb key size for cache
|
102
|
+
// ttl: 1000 * 60 * 60, // 1 hour no ttl for better performance
|
103
|
+
// sizeCalculation: (value: any) => {
|
104
|
+
// assume each item is 1kb for simplicity
|
105
|
+
// return 1024
|
106
|
+
// }
|
107
|
+
})
|
108
|
+
#retryCache = new LRUCache<string, number>({
|
109
|
+
max: 300000 // 300k items
|
110
|
+
})
|
111
|
+
|
112
|
+
constructor(url: string, network: Network, concurrency: number, batchCount = 1) {
|
113
|
+
// TODO re-enable match when possible
|
114
|
+
super(url, network, { staticNetwork: network, batchMaxCount: batchCount })
|
115
|
+
this.executor = new PQueue({ concurrency: concurrency })
|
116
|
+
}
|
117
|
+
|
118
|
+
async send(method: string, params: Array<any>): Promise<any> {
|
119
|
+
if (method !== 'eth_call') {
|
120
|
+
return await this.executor.add(() => super.send(method, params))
|
121
|
+
}
|
122
|
+
const tag = getTag(method, params)
|
123
|
+
const block = params[params.length - 1]
|
124
|
+
let perform = this.#performCache.get(tag)
|
125
|
+
if (!perform) {
|
126
|
+
miss_count.add(1)
|
127
|
+
const handler = metricsStorage.getStore()
|
128
|
+
const queued: number = Date.now()
|
129
|
+
perform = this.executor.add(() => {
|
130
|
+
const started = Date.now()
|
131
|
+
processMetrics.processor_rpc_queue_duration.record(started - queued, {
|
132
|
+
chain_id: this._network.chainId.toString(),
|
133
|
+
handler
|
134
|
+
})
|
135
|
+
|
136
|
+
let success = true
|
137
|
+
return super
|
138
|
+
.send(method, params)
|
139
|
+
.catch((e) => {
|
140
|
+
success = false
|
141
|
+
throw e
|
142
|
+
})
|
143
|
+
.finally(() => {
|
144
|
+
processMetrics.processor_rpc_duration.record(Date.now() - started, {
|
145
|
+
chain_id: this._network.chainId.toString(),
|
146
|
+
handler,
|
147
|
+
success
|
148
|
+
})
|
149
|
+
})
|
150
|
+
})
|
151
|
+
|
152
|
+
queue_size.record(this.executor.size)
|
153
|
+
|
154
|
+
this.#performCache.set(tag, perform, {
|
155
|
+
size: tag.length
|
156
|
+
})
|
157
|
+
// For non latest block call, we cache permanently, otherwise we cache for one minute
|
158
|
+
if (block === 'latest') {
|
159
|
+
setTimeout(() => {
|
160
|
+
if (this.#performCache.get(tag) === perform) {
|
161
|
+
this.#performCache.delete(tag)
|
162
|
+
}
|
163
|
+
}, 60 * 1000)
|
164
|
+
}
|
165
|
+
} else {
|
166
|
+
hit_count.add(1)
|
167
|
+
}
|
168
|
+
|
169
|
+
let result
|
170
|
+
try {
|
171
|
+
result = await perform
|
172
|
+
} catch (e) {
|
173
|
+
this.#performCache.delete(tag)
|
174
|
+
if (e.code === 'TIMEOUT') {
|
175
|
+
let retryCount = this.#retryCache.get(tag)
|
176
|
+
if (GLOBAL_CONFIG.execution.rpcRetryTimes && retryCount === undefined) {
|
177
|
+
retryCount = GLOBAL_CONFIG.execution.rpcRetryTimes
|
178
|
+
}
|
179
|
+
if (retryCount) {
|
180
|
+
this.#retryCache.set(tag, retryCount - 1)
|
181
|
+
return this.send(method, params)
|
182
|
+
}
|
183
|
+
}
|
184
|
+
throw e
|
185
|
+
}
|
186
|
+
if (!result) {
|
187
|
+
throw Error('Unexpected null response')
|
188
|
+
}
|
189
|
+
return result
|
190
|
+
}
|
191
|
+
|
192
|
+
toString() {
|
193
|
+
return 'QueuedStaticJsonRpcProvider'
|
194
|
+
}
|
195
|
+
}
|