@sentio/runtime 3.0.2-rc.2 → 3.1.0-rc.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/{chunk-ROBPWJIE.js → chunk-WTF2W33M.js} +1 -2
- package/lib/{chunk-KVGBPLGJ.js → chunk-Z33FXCEF.js} +10535 -10514
- package/lib/{chunk-KVGBPLGJ.js.map → chunk-Z33FXCEF.js.map} +1 -1
- package/lib/index.js +2 -2
- package/lib/processor-runner.js +11100 -251
- package/lib/processor-runner.js.map +1 -1
- package/package.json +2 -2
- package/src/processor-runner-program.ts +2 -2
- package/src/processor-runner.ts +145 -88
- package/lib/chunk-PCB4OKW7.js +0 -10975
- package/lib/chunk-PCB4OKW7.js.map +0 -1
- package/lib/service-worker.d.ts +0 -10
- package/lib/service-worker.js +0 -134
- package/lib/service-worker.js.map +0 -1
- package/src/service-manager.ts +0 -193
- package/src/service-worker.ts +0 -140
- /package/lib/{chunk-ROBPWJIE.js.map → chunk-WTF2W33M.js.map} +0 -0
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@sentio/runtime",
|
|
3
|
-
"version": "3.0
|
|
3
|
+
"version": "3.1.0-rc.2",
|
|
4
4
|
"license": "Apache-2.0",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"exports": {
|
|
@@ -32,7 +32,7 @@
|
|
|
32
32
|
"run": "tsx src/processor-runner.ts --log-format=json",
|
|
33
33
|
"run-benchmark": "tsx src/decode-benchmark.ts",
|
|
34
34
|
"start_js": "tsx ./lib/processor-runner.js $PWD/../../debug/dist/lib.js",
|
|
35
|
-
"start_ts": "tsx src/processor-runner.ts --debug --chains-config chains-config.json $PWD/../../examples/x2y2/src/processor.ts",
|
|
35
|
+
"start_ts": "tsx src/processor-runner.ts --debug --multi-server 3 --chains-config chains-config.json $PWD/../../examples/x2y2/src/processor.ts",
|
|
36
36
|
"test": "glob -c 'tsx --test' '**/*.test.ts'"
|
|
37
37
|
}
|
|
38
38
|
}
|
|
@@ -23,7 +23,7 @@ export const program = new Command('processor-runner')
|
|
|
23
23
|
.description('Sentio Processor Runtime')
|
|
24
24
|
.argument('<target>', 'Path to the processor module to load')
|
|
25
25
|
.option('-p, --port <port>', 'Port to listen on', '4000')
|
|
26
|
-
.option('--concurrency <number>', 'Number of concurrent workers', myParseInt, 4)
|
|
26
|
+
.option('--concurrency <number>', 'Number of concurrent workers(V2 only, deprecated)', myParseInt, 4)
|
|
27
27
|
.option('--batch-count <number>', 'Batch count for processing', myParseInt, 1)
|
|
28
28
|
.option('-c, --chains-config <path>', 'Path to chains configuration file', 'chains-config.json')
|
|
29
29
|
.option('--chainquery-server <url>', 'Chain query server URL')
|
|
@@ -32,7 +32,7 @@ export const program = new Command('processor-runner')
|
|
|
32
32
|
.option('--debug', 'Enable debug mode')
|
|
33
33
|
.option('--otlp-debug', 'Enable OTLP debug mode')
|
|
34
34
|
.option('--start-action-server', 'Start action server instead of processor server')
|
|
35
|
-
.option('--worker <number>', 'Number of worker threads', myParseInt, workerNum)
|
|
35
|
+
.option('--worker <number>', 'Number of processor worker threads ', myParseInt, workerNum)
|
|
36
36
|
.option('--process-timeout <seconds>', 'Process timeout in seconds', myParseInt, 60)
|
|
37
37
|
.option(
|
|
38
38
|
'--worker-timeout <seconds>',
|
package/src/processor-runner.ts
CHANGED
|
@@ -9,6 +9,8 @@ import { errorDetailsServerMiddleware } from 'nice-grpc-error-details'
|
|
|
9
9
|
import http from 'http'
|
|
10
10
|
// @ts-ignore inspector promises is not included in @type/node
|
|
11
11
|
import { Session } from 'node:inspector/promises'
|
|
12
|
+
import { fork, ChildProcess } from 'child_process'
|
|
13
|
+
import { fileURLToPath } from 'url'
|
|
12
14
|
|
|
13
15
|
import { ProcessorDefinition } from './gen/processor/protos/processor.js'
|
|
14
16
|
import { ProcessorServiceImpl } from './service.js'
|
|
@@ -18,7 +20,6 @@ import { setupLogger } from './logger.js'
|
|
|
18
20
|
|
|
19
21
|
import { setupOTLP } from './otlp.js'
|
|
20
22
|
import { ActionServer } from './action-server.js'
|
|
21
|
-
import { ServiceManager } from './service-manager.js'
|
|
22
23
|
import { ProcessorV3Definition } from '@sentio/protos'
|
|
23
24
|
import { ProcessorServiceImplV3 } from './service-v3.js'
|
|
24
25
|
import { dirname, join } from 'path'
|
|
@@ -44,16 +45,66 @@ configureEndpoints(options)
|
|
|
44
45
|
|
|
45
46
|
console.debug('Starting Server', options)
|
|
46
47
|
|
|
48
|
+
// Check if this is a child process spawned for multi-server mode
|
|
49
|
+
const isChildProcess = process.env['SENTIO_MULTI_SERVER_CHILD'] === 'true'
|
|
50
|
+
const childServerPort = process.env['SENTIO_CHILD_SERVER_PORT']
|
|
51
|
+
|
|
52
|
+
// Multi-worker mode: spawn child processes for additional servers
|
|
53
|
+
if (options.worker > 1 && !isChildProcess) {
|
|
54
|
+
const childProcesses: ChildProcess[] = []
|
|
55
|
+
const basePort = parseInt(options.port)
|
|
56
|
+
|
|
57
|
+
// Spawn child processes for ports basePort+1 to basePort+(multiServer-1)
|
|
58
|
+
for (let i = 1; i < options.worker; i++) {
|
|
59
|
+
const childPort = basePort + i
|
|
60
|
+
const child = fork(fileURLToPath(import.meta.url), process.argv.slice(2), {
|
|
61
|
+
env: {
|
|
62
|
+
...process.env,
|
|
63
|
+
SENTIO_MULTI_SERVER_CHILD: 'true',
|
|
64
|
+
SENTIO_CHILD_SERVER_PORT: String(childPort)
|
|
65
|
+
},
|
|
66
|
+
stdio: 'inherit'
|
|
67
|
+
})
|
|
68
|
+
|
|
69
|
+
child.on('error', (err) => {
|
|
70
|
+
console.error(`Child process on port ${childPort} error:`, err)
|
|
71
|
+
})
|
|
72
|
+
|
|
73
|
+
child.on('exit', (code) => {
|
|
74
|
+
console.log(`Child process on port ${childPort} exited with code ${code}`)
|
|
75
|
+
})
|
|
76
|
+
|
|
77
|
+
childProcesses.push(child)
|
|
78
|
+
console.log(`Spawned child server process for port ${childPort}`)
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
// Handle parent process shutdown - kill all children
|
|
82
|
+
const shutdownChildren = () => {
|
|
83
|
+
for (const child of childProcesses) {
|
|
84
|
+
child.kill('SIGINT')
|
|
85
|
+
}
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
process.on('SIGINT', shutdownChildren)
|
|
89
|
+
process.on('SIGTERM', shutdownChildren)
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
// Determine the actual port for this process
|
|
93
|
+
const actualPort = isChildProcess && childServerPort ? childServerPort : options.port
|
|
94
|
+
|
|
47
95
|
let server: any
|
|
48
|
-
let baseService: ProcessorServiceImpl
|
|
96
|
+
let baseService: ProcessorServiceImpl
|
|
97
|
+
let httpServer: http.Server | undefined
|
|
98
|
+
|
|
49
99
|
const loader = async () => {
|
|
50
100
|
const m = await import(options.target)
|
|
51
101
|
console.debug('Module loaded', m)
|
|
52
102
|
return m
|
|
53
103
|
}
|
|
104
|
+
|
|
54
105
|
if (options.startActionServer) {
|
|
55
106
|
server = new ActionServer(loader)
|
|
56
|
-
server.listen(
|
|
107
|
+
server.listen(actualPort)
|
|
57
108
|
} else {
|
|
58
109
|
server = createServer({
|
|
59
110
|
'grpc.max_send_message_length': 768 * 1024 * 1024,
|
|
@@ -64,82 +115,104 @@ if (options.startActionServer) {
|
|
|
64
115
|
// .use(openTelemetryServerMiddleware())
|
|
65
116
|
.use(errorDetailsServerMiddleware)
|
|
66
117
|
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
} else {
|
|
70
|
-
baseService = new ProcessorServiceImpl(loader, options, server.shutdown)
|
|
71
|
-
}
|
|
72
|
-
|
|
118
|
+
// for V2
|
|
119
|
+
baseService = new ProcessorServiceImpl(loader, options, server.shutdown)
|
|
73
120
|
const service = new FullProcessorServiceImpl(baseService)
|
|
74
121
|
|
|
75
122
|
server.add(ProcessorDefinition, service)
|
|
123
|
+
|
|
124
|
+
// for V3
|
|
76
125
|
server.add(
|
|
77
126
|
ProcessorV3Definition,
|
|
78
127
|
new FullProcessorServiceV3Impl(new ProcessorServiceImplV3(loader, options, server.shutdown))
|
|
79
128
|
)
|
|
80
129
|
|
|
81
|
-
server.listen('0.0.0.0:' +
|
|
82
|
-
console.log('Processor Server Started at:',
|
|
130
|
+
server.listen('0.0.0.0:' + actualPort)
|
|
131
|
+
console.log('Processor Server Started at:', actualPort)
|
|
83
132
|
}
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
133
|
+
|
|
134
|
+
// Only start metrics server on the main process (not child processes)
|
|
135
|
+
if (!isChildProcess) {
|
|
136
|
+
const metricsPort = 4040
|
|
137
|
+
|
|
138
|
+
httpServer = http
|
|
139
|
+
.createServer(async function (req, res) {
|
|
140
|
+
if (req.url) {
|
|
141
|
+
const reqUrl = new URL(req.url, `http://${req.headers.host}`)
|
|
142
|
+
const queries = reqUrl.searchParams
|
|
143
|
+
switch (reqUrl.pathname) {
|
|
144
|
+
// case '/metrics':
|
|
145
|
+
// const metrics = await mergedRegistry.metrics()
|
|
146
|
+
// res.write(metrics)
|
|
147
|
+
// break
|
|
148
|
+
case '/heap': {
|
|
149
|
+
try {
|
|
150
|
+
const file = '/tmp/' + Date.now() + '.heapsnapshot'
|
|
151
|
+
await dumpHeap(file)
|
|
152
|
+
// send the file
|
|
153
|
+
const readStream = fs.createReadStream(file)
|
|
154
|
+
res.writeHead(200, { 'Content-Type': 'application/json' })
|
|
155
|
+
readStream.pipe(res)
|
|
156
|
+
res.end()
|
|
157
|
+
} catch {
|
|
158
|
+
res.writeHead(500)
|
|
159
|
+
res.end()
|
|
160
|
+
}
|
|
161
|
+
break
|
|
108
162
|
}
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
163
|
+
case '/profile': {
|
|
164
|
+
try {
|
|
165
|
+
const profileTime = parseInt(queries.get('t') || '1000', 10) || 1000
|
|
166
|
+
const session = new Session()
|
|
167
|
+
session.connect()
|
|
168
|
+
|
|
169
|
+
await session.post('Profiler.enable')
|
|
170
|
+
await session.post('Profiler.start')
|
|
171
|
+
|
|
172
|
+
await new Promise((resolve) => setTimeout(resolve, profileTime))
|
|
173
|
+
const { profile } = await session.post('Profiler.stop')
|
|
174
|
+
|
|
175
|
+
res.writeHead(200, { 'Content-Type': 'application/json' })
|
|
176
|
+
res.write(JSON.stringify(profile))
|
|
177
|
+
session.disconnect()
|
|
178
|
+
} catch {
|
|
179
|
+
res.writeHead(500)
|
|
180
|
+
}
|
|
181
|
+
break
|
|
128
182
|
}
|
|
129
|
-
|
|
183
|
+
default:
|
|
184
|
+
res.writeHead(404)
|
|
130
185
|
}
|
|
131
|
-
|
|
132
|
-
|
|
186
|
+
} else {
|
|
187
|
+
res.writeHead(404)
|
|
133
188
|
}
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
res.end()
|
|
138
|
-
})
|
|
139
|
-
.listen(metricsPort)
|
|
189
|
+
res.end()
|
|
190
|
+
})
|
|
191
|
+
.listen(metricsPort)
|
|
140
192
|
|
|
141
|
-
console.log('Metric Server Started at:', metricsPort)
|
|
193
|
+
console.log('Metric Server Started at:', metricsPort)
|
|
142
194
|
|
|
195
|
+
if (process.env['OOM_DUMP_MEMORY_SIZE_GB']) {
|
|
196
|
+
let dumping = false
|
|
197
|
+
const memorySize = parseFloat(process.env['OOM_DUMP_MEMORY_SIZE_GB']!)
|
|
198
|
+
console.log('heap dumping is enabled, limit set to ', memorySize, 'gb')
|
|
199
|
+
const dir = process.env['OOM_DUMP_DIR'] || '/tmp'
|
|
200
|
+
setInterval(async () => {
|
|
201
|
+
const mem = process.memoryUsage()
|
|
202
|
+
console.log('Current Memory Usage', mem)
|
|
203
|
+
// if memory usage is greater this size, dump heap and exit
|
|
204
|
+
if (mem.heapTotal > memorySize * 1024 * 1024 * 1024 && !dumping) {
|
|
205
|
+
const file = join(dir, `${Date.now()}.heapsnapshot`)
|
|
206
|
+
dumping = true
|
|
207
|
+
await dumpHeap(file)
|
|
208
|
+
// force exit and keep pod running
|
|
209
|
+
process.exit(11)
|
|
210
|
+
}
|
|
211
|
+
}, 1000 * 60)
|
|
212
|
+
}
|
|
213
|
+
}
|
|
214
|
+
|
|
215
|
+
// Process event handlers
|
|
143
216
|
process
|
|
144
217
|
.on('SIGINT', function () {
|
|
145
218
|
shutdownServers(0)
|
|
@@ -151,7 +224,7 @@ process
|
|
|
151
224
|
}
|
|
152
225
|
// shutdownServers(1)
|
|
153
226
|
})
|
|
154
|
-
.on('unhandledRejection', (reason,
|
|
227
|
+
.on('unhandledRejection', (reason, _p) => {
|
|
155
228
|
// @ts-ignore ignore invalid ens error
|
|
156
229
|
if (reason?.message.startsWith('invalid ENS name (disallowed character: "*"')) {
|
|
157
230
|
return
|
|
@@ -163,26 +236,6 @@ process
|
|
|
163
236
|
// shutdownServers(1)
|
|
164
237
|
})
|
|
165
238
|
|
|
166
|
-
if (process.env['OOM_DUMP_MEMORY_SIZE_GB']) {
|
|
167
|
-
let dumping = false
|
|
168
|
-
const memorySize = parseFloat(process.env['OOM_DUMP_MEMORY_SIZE_GB']!)
|
|
169
|
-
console.log('heap dumping is enabled, limit set to ', memorySize, 'gb')
|
|
170
|
-
const dir = process.env['OOM_DUMP_DIR'] || '/tmp'
|
|
171
|
-
setInterval(async () => {
|
|
172
|
-
const mem = process.memoryUsage()
|
|
173
|
-
console.log('Current Memory Usage', mem)
|
|
174
|
-
// if memory usage is greater this size, dump heap and exit
|
|
175
|
-
if (mem.heapTotal > memorySize * 1024 * 1024 * 1024 && !dumping) {
|
|
176
|
-
const file = join(dir, `${Date.now()}.heapsnapshot`)
|
|
177
|
-
dumping = true
|
|
178
|
-
await dumpHeap(file)
|
|
179
|
-
// force exit and keep pod running
|
|
180
|
-
process.exit(11)
|
|
181
|
-
}
|
|
182
|
-
}, 1000 * 60)
|
|
183
|
-
}
|
|
184
|
-
// }
|
|
185
|
-
|
|
186
239
|
async function dumpHeap(file: string): Promise<void> {
|
|
187
240
|
console.log('Heap dumping to', file)
|
|
188
241
|
const session = new Session()
|
|
@@ -206,8 +259,12 @@ function shutdownServers(exitCode: number): void {
|
|
|
206
259
|
server.forceShutdown()
|
|
207
260
|
console.log('RPC server shut down')
|
|
208
261
|
|
|
209
|
-
|
|
210
|
-
|
|
262
|
+
if (httpServer) {
|
|
263
|
+
httpServer.close(function () {
|
|
264
|
+
console.log('Http server shut down')
|
|
265
|
+
process.exit(exitCode)
|
|
266
|
+
})
|
|
267
|
+
} else {
|
|
211
268
|
process.exit(exitCode)
|
|
212
|
-
}
|
|
269
|
+
}
|
|
213
270
|
}
|