@sentio/runtime 3.0.2-rc.1 → 3.1.0-rc.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/index.d.ts +1 -0
- package/lib/processor-runner.js +116 -70
- package/lib/processor-runner.js.map +1 -1
- package/package.json +2 -2
- package/src/processor-runner-program.ts +9 -2
- package/src/processor-runner.ts +143 -80
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@sentio/runtime",
|
|
3
|
-
"version": "3.0
|
|
3
|
+
"version": "3.1.0-rc.1",
|
|
4
4
|
"license": "Apache-2.0",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"exports": {
|
|
@@ -32,7 +32,7 @@
|
|
|
32
32
|
"run": "tsx src/processor-runner.ts --log-format=json",
|
|
33
33
|
"run-benchmark": "tsx src/decode-benchmark.ts",
|
|
34
34
|
"start_js": "tsx ./lib/processor-runner.js $PWD/../../debug/dist/lib.js",
|
|
35
|
-
"start_ts": "tsx src/processor-runner.ts --debug --chains-config chains-config.json $PWD/../../examples/x2y2/src/processor.ts",
|
|
35
|
+
"start_ts": "tsx src/processor-runner.ts --debug --multi-server 3 --chains-config chains-config.json $PWD/../../examples/x2y2/src/processor.ts",
|
|
36
36
|
"test": "glob -c 'tsx --test' '**/*.test.ts'"
|
|
37
37
|
}
|
|
38
38
|
}
|
|
@@ -7,6 +7,12 @@ try {
|
|
|
7
7
|
console.error('Failed to parse worker number', e)
|
|
8
8
|
}
|
|
9
9
|
|
|
10
|
+
let serverNum = 1
|
|
11
|
+
|
|
12
|
+
if (process.env['PROCESSOR_SERVER_NUM']) {
|
|
13
|
+
serverNum = parseInt(process.env['PROCESSOR_SERVER_NUM']!.trim())
|
|
14
|
+
}
|
|
15
|
+
|
|
10
16
|
function myParseInt(value: string, dummyPrevious: number): number {
|
|
11
17
|
// parseInt takes a string and a radix
|
|
12
18
|
const parsedValue = parseInt(value, 10)
|
|
@@ -23,7 +29,7 @@ export const program = new Command('processor-runner')
|
|
|
23
29
|
.description('Sentio Processor Runtime')
|
|
24
30
|
.argument('<target>', 'Path to the processor module to load')
|
|
25
31
|
.option('-p, --port <port>', 'Port to listen on', '4000')
|
|
26
|
-
.option('--concurrency <number>', 'Number of concurrent workers', myParseInt, 4)
|
|
32
|
+
.option('--concurrency <number>', 'Number of concurrent workers(V2 only, deprecated)', myParseInt, 4)
|
|
27
33
|
.option('--batch-count <number>', 'Batch count for processing', myParseInt, 1)
|
|
28
34
|
.option('-c, --chains-config <path>', 'Path to chains configuration file', 'chains-config.json')
|
|
29
35
|
.option('--chainquery-server <url>', 'Chain query server URL')
|
|
@@ -32,7 +38,7 @@ export const program = new Command('processor-runner')
|
|
|
32
38
|
.option('--debug', 'Enable debug mode')
|
|
33
39
|
.option('--otlp-debug', 'Enable OTLP debug mode')
|
|
34
40
|
.option('--start-action-server', 'Start action server instead of processor server')
|
|
35
|
-
.option('--worker <number>', 'Number of worker threads', myParseInt, workerNum)
|
|
41
|
+
.option('--worker <number>', 'Number of worker threads(V2 only, deprecated) ', myParseInt, workerNum)
|
|
36
42
|
.option('--process-timeout <seconds>', 'Process timeout in seconds', myParseInt, 60)
|
|
37
43
|
.option(
|
|
38
44
|
'--worker-timeout <seconds>',
|
|
@@ -45,6 +51,7 @@ export const program = new Command('processor-runner')
|
|
|
45
51
|
'Enable binding data partition',
|
|
46
52
|
process.env['SENTIO_ENABLE_BINDING_DATA_PARTITION'] === 'true'
|
|
47
53
|
)
|
|
54
|
+
.option('--multi-server <number>', 'Enable multi-server mode for processor runtime', myParseInt, serverNum)
|
|
48
55
|
|
|
49
56
|
export type ProcessorRuntimeOptions = ReturnType<typeof program.opts> & { target: string }
|
|
50
57
|
|
package/src/processor-runner.ts
CHANGED
|
@@ -9,6 +9,8 @@ import { errorDetailsServerMiddleware } from 'nice-grpc-error-details'
|
|
|
9
9
|
import http from 'http'
|
|
10
10
|
// @ts-ignore inspector promises is not included in @type/node
|
|
11
11
|
import { Session } from 'node:inspector/promises'
|
|
12
|
+
import { fork, ChildProcess } from 'child_process'
|
|
13
|
+
import { fileURLToPath } from 'url'
|
|
12
14
|
|
|
13
15
|
import { ProcessorDefinition } from './gen/processor/protos/processor.js'
|
|
14
16
|
import { ProcessorServiceImpl } from './service.js'
|
|
@@ -44,16 +46,66 @@ configureEndpoints(options)
|
|
|
44
46
|
|
|
45
47
|
console.debug('Starting Server', options)
|
|
46
48
|
|
|
49
|
+
// Check if this is a child process spawned for multi-server mode
|
|
50
|
+
const isChildProcess = process.env['SENTIO_MULTI_SERVER_CHILD'] === 'true'
|
|
51
|
+
const childServerPort = process.env['SENTIO_CHILD_SERVER_PORT']
|
|
52
|
+
|
|
53
|
+
// Multi-server mode: spawn child processes for additional servers
|
|
54
|
+
if (options.multiServer > 1 && !isChildProcess) {
|
|
55
|
+
const childProcesses: ChildProcess[] = []
|
|
56
|
+
const basePort = parseInt(options.port)
|
|
57
|
+
|
|
58
|
+
// Spawn child processes for ports basePort+1 to basePort+(multiServer-1)
|
|
59
|
+
for (let i = 1; i < options.multiServer; i++) {
|
|
60
|
+
const childPort = basePort + i
|
|
61
|
+
const child = fork(fileURLToPath(import.meta.url), process.argv.slice(2), {
|
|
62
|
+
env: {
|
|
63
|
+
...process.env,
|
|
64
|
+
SENTIO_MULTI_SERVER_CHILD: 'true',
|
|
65
|
+
SENTIO_CHILD_SERVER_PORT: String(childPort)
|
|
66
|
+
},
|
|
67
|
+
stdio: 'inherit'
|
|
68
|
+
})
|
|
69
|
+
|
|
70
|
+
child.on('error', (err) => {
|
|
71
|
+
console.error(`Child process on port ${childPort} error:`, err)
|
|
72
|
+
})
|
|
73
|
+
|
|
74
|
+
child.on('exit', (code) => {
|
|
75
|
+
console.log(`Child process on port ${childPort} exited with code ${code}`)
|
|
76
|
+
})
|
|
77
|
+
|
|
78
|
+
childProcesses.push(child)
|
|
79
|
+
console.log(`Spawned child server process for port ${childPort}`)
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
// Handle parent process shutdown - kill all children
|
|
83
|
+
const shutdownChildren = () => {
|
|
84
|
+
for (const child of childProcesses) {
|
|
85
|
+
child.kill('SIGINT')
|
|
86
|
+
}
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
process.on('SIGINT', shutdownChildren)
|
|
90
|
+
process.on('SIGTERM', shutdownChildren)
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
// Determine the actual port for this process
|
|
94
|
+
const actualPort = isChildProcess && childServerPort ? childServerPort : options.port
|
|
95
|
+
|
|
47
96
|
let server: any
|
|
48
97
|
let baseService: ProcessorServiceImpl | ServiceManager
|
|
98
|
+
let httpServer: http.Server | undefined
|
|
99
|
+
|
|
49
100
|
const loader = async () => {
|
|
50
101
|
const m = await import(options.target)
|
|
51
102
|
console.debug('Module loaded', m)
|
|
52
103
|
return m
|
|
53
104
|
}
|
|
105
|
+
|
|
54
106
|
if (options.startActionServer) {
|
|
55
107
|
server = new ActionServer(loader)
|
|
56
|
-
server.listen(
|
|
108
|
+
server.listen(actualPort)
|
|
57
109
|
} else {
|
|
58
110
|
server = createServer({
|
|
59
111
|
'grpc.max_send_message_length': 768 * 1024 * 1024,
|
|
@@ -64,6 +116,7 @@ if (options.startActionServer) {
|
|
|
64
116
|
// .use(openTelemetryServerMiddleware())
|
|
65
117
|
.use(errorDetailsServerMiddleware)
|
|
66
118
|
|
|
119
|
+
// for V2
|
|
67
120
|
if (options.worker > 1) {
|
|
68
121
|
baseService = new ServiceManager(loader, options, server.shutdown)
|
|
69
122
|
} else {
|
|
@@ -73,73 +126,99 @@ if (options.startActionServer) {
|
|
|
73
126
|
const service = new FullProcessorServiceImpl(baseService)
|
|
74
127
|
|
|
75
128
|
server.add(ProcessorDefinition, service)
|
|
129
|
+
|
|
130
|
+
// for V3
|
|
76
131
|
server.add(
|
|
77
132
|
ProcessorV3Definition,
|
|
78
133
|
new FullProcessorServiceV3Impl(new ProcessorServiceImplV3(loader, options, server.shutdown))
|
|
79
134
|
)
|
|
80
135
|
|
|
81
|
-
server.listen('0.0.0.0:' +
|
|
82
|
-
console.log('Processor Server Started at:',
|
|
136
|
+
server.listen('0.0.0.0:' + actualPort)
|
|
137
|
+
console.log('Processor Server Started at:', actualPort)
|
|
83
138
|
}
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
139
|
+
|
|
140
|
+
// Only start metrics server on the main process (not child processes)
|
|
141
|
+
if (!isChildProcess) {
|
|
142
|
+
const metricsPort = 4040
|
|
143
|
+
|
|
144
|
+
httpServer = http
|
|
145
|
+
.createServer(async function (req, res) {
|
|
146
|
+
if (req.url) {
|
|
147
|
+
const reqUrl = new URL(req.url, `http://${req.headers.host}`)
|
|
148
|
+
const queries = reqUrl.searchParams
|
|
149
|
+
switch (reqUrl.pathname) {
|
|
150
|
+
// case '/metrics':
|
|
151
|
+
// const metrics = await mergedRegistry.metrics()
|
|
152
|
+
// res.write(metrics)
|
|
153
|
+
// break
|
|
154
|
+
case '/heap': {
|
|
155
|
+
try {
|
|
156
|
+
const file = '/tmp/' + Date.now() + '.heapsnapshot'
|
|
157
|
+
await dumpHeap(file)
|
|
158
|
+
// send the file
|
|
159
|
+
const readStream = fs.createReadStream(file)
|
|
160
|
+
res.writeHead(200, { 'Content-Type': 'application/json' })
|
|
161
|
+
readStream.pipe(res)
|
|
162
|
+
res.end()
|
|
163
|
+
} catch {
|
|
164
|
+
res.writeHead(500)
|
|
165
|
+
res.end()
|
|
166
|
+
}
|
|
167
|
+
break
|
|
108
168
|
}
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
169
|
+
case '/profile': {
|
|
170
|
+
try {
|
|
171
|
+
const profileTime = parseInt(queries.get('t') || '1000', 10) || 1000
|
|
172
|
+
const session = new Session()
|
|
173
|
+
session.connect()
|
|
174
|
+
|
|
175
|
+
await session.post('Profiler.enable')
|
|
176
|
+
await session.post('Profiler.start')
|
|
177
|
+
|
|
178
|
+
await new Promise((resolve) => setTimeout(resolve, profileTime))
|
|
179
|
+
const { profile } = await session.post('Profiler.stop')
|
|
180
|
+
|
|
181
|
+
res.writeHead(200, { 'Content-Type': 'application/json' })
|
|
182
|
+
res.write(JSON.stringify(profile))
|
|
183
|
+
session.disconnect()
|
|
184
|
+
} catch {
|
|
185
|
+
res.writeHead(500)
|
|
186
|
+
}
|
|
187
|
+
break
|
|
128
188
|
}
|
|
129
|
-
|
|
189
|
+
default:
|
|
190
|
+
res.writeHead(404)
|
|
130
191
|
}
|
|
131
|
-
|
|
132
|
-
|
|
192
|
+
} else {
|
|
193
|
+
res.writeHead(404)
|
|
133
194
|
}
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
res.end()
|
|
138
|
-
})
|
|
139
|
-
.listen(metricsPort)
|
|
195
|
+
res.end()
|
|
196
|
+
})
|
|
197
|
+
.listen(metricsPort)
|
|
140
198
|
|
|
141
|
-
console.log('Metric Server Started at:', metricsPort)
|
|
199
|
+
console.log('Metric Server Started at:', metricsPort)
|
|
142
200
|
|
|
201
|
+
if (process.env['OOM_DUMP_MEMORY_SIZE_GB']) {
|
|
202
|
+
let dumping = false
|
|
203
|
+
const memorySize = parseFloat(process.env['OOM_DUMP_MEMORY_SIZE_GB']!)
|
|
204
|
+
console.log('heap dumping is enabled, limit set to ', memorySize, 'gb')
|
|
205
|
+
const dir = process.env['OOM_DUMP_DIR'] || '/tmp'
|
|
206
|
+
setInterval(async () => {
|
|
207
|
+
const mem = process.memoryUsage()
|
|
208
|
+
console.log('Current Memory Usage', mem)
|
|
209
|
+
// if memory usage is greater this size, dump heap and exit
|
|
210
|
+
if (mem.heapTotal > memorySize * 1024 * 1024 * 1024 && !dumping) {
|
|
211
|
+
const file = join(dir, `${Date.now()}.heapsnapshot`)
|
|
212
|
+
dumping = true
|
|
213
|
+
await dumpHeap(file)
|
|
214
|
+
// force exit and keep pod running
|
|
215
|
+
process.exit(11)
|
|
216
|
+
}
|
|
217
|
+
}, 1000 * 60)
|
|
218
|
+
}
|
|
219
|
+
}
|
|
220
|
+
|
|
221
|
+
// Process event handlers
|
|
143
222
|
process
|
|
144
223
|
.on('SIGINT', function () {
|
|
145
224
|
shutdownServers(0)
|
|
@@ -151,7 +230,7 @@ process
|
|
|
151
230
|
}
|
|
152
231
|
// shutdownServers(1)
|
|
153
232
|
})
|
|
154
|
-
.on('unhandledRejection', (reason,
|
|
233
|
+
.on('unhandledRejection', (reason, _p) => {
|
|
155
234
|
// @ts-ignore ignore invalid ens error
|
|
156
235
|
if (reason?.message.startsWith('invalid ENS name (disallowed character: "*"')) {
|
|
157
236
|
return
|
|
@@ -163,26 +242,6 @@ process
|
|
|
163
242
|
// shutdownServers(1)
|
|
164
243
|
})
|
|
165
244
|
|
|
166
|
-
if (process.env['OOM_DUMP_MEMORY_SIZE_GB']) {
|
|
167
|
-
let dumping = false
|
|
168
|
-
const memorySize = parseFloat(process.env['OOM_DUMP_MEMORY_SIZE_GB']!)
|
|
169
|
-
console.log('heap dumping is enabled, limit set to ', memorySize, 'gb')
|
|
170
|
-
const dir = process.env['OOM_DUMP_DIR'] || '/tmp'
|
|
171
|
-
setInterval(async () => {
|
|
172
|
-
const mem = process.memoryUsage()
|
|
173
|
-
console.log('Current Memory Usage', mem)
|
|
174
|
-
// if memory usage is greater this size, dump heap and exit
|
|
175
|
-
if (mem.heapTotal > memorySize * 1024 * 1024 * 1024 && !dumping) {
|
|
176
|
-
const file = join(dir, `${Date.now()}.heapsnapshot`)
|
|
177
|
-
dumping = true
|
|
178
|
-
await dumpHeap(file)
|
|
179
|
-
// force exit and keep pod running
|
|
180
|
-
process.exit(11)
|
|
181
|
-
}
|
|
182
|
-
}, 1000 * 60)
|
|
183
|
-
}
|
|
184
|
-
// }
|
|
185
|
-
|
|
186
245
|
async function dumpHeap(file: string): Promise<void> {
|
|
187
246
|
console.log('Heap dumping to', file)
|
|
188
247
|
const session = new Session()
|
|
@@ -206,8 +265,12 @@ function shutdownServers(exitCode: number): void {
|
|
|
206
265
|
server.forceShutdown()
|
|
207
266
|
console.log('RPC server shut down')
|
|
208
267
|
|
|
209
|
-
|
|
210
|
-
|
|
268
|
+
if (httpServer) {
|
|
269
|
+
httpServer.close(function () {
|
|
270
|
+
console.log('Http server shut down')
|
|
271
|
+
process.exit(exitCode)
|
|
272
|
+
})
|
|
273
|
+
} else {
|
|
211
274
|
process.exit(exitCode)
|
|
212
|
-
}
|
|
275
|
+
}
|
|
213
276
|
}
|