@sentio/runtime 2.61.1-rc.1 → 2.62.0-rc.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. package/lib/{chunk-NVBA53K5.js → chunk-AHIIUVQL.js} +3885 -4083
  2. package/lib/chunk-AHIIUVQL.js.map +1 -0
  3. package/lib/{chunk-4M76KZSL.js → chunk-I5YHR3CE.js} +125 -725
  4. package/lib/chunk-I5YHR3CE.js.map +1 -0
  5. package/lib/chunk-KVSDPGUI.js +50 -0
  6. package/lib/chunk-KVSDPGUI.js.map +1 -0
  7. package/lib/chunk-L2A6JPIQ.js +21 -0
  8. package/lib/chunk-L2A6JPIQ.js.map +1 -0
  9. package/lib/{chunk-DPHTMNDV.js → chunk-PCB4OKW7.js} +2 -2
  10. package/lib/chunk-W3VN25ER.js +1703 -0
  11. package/lib/chunk-W3VN25ER.js.map +1 -0
  12. package/lib/getMachineId-bsd-WJ6BLVOD.js +41 -0
  13. package/lib/getMachineId-bsd-WJ6BLVOD.js.map +1 -0
  14. package/lib/getMachineId-darwin-TW74DVT5.js +41 -0
  15. package/lib/getMachineId-darwin-TW74DVT5.js.map +1 -0
  16. package/lib/getMachineId-linux-MDJOKOMI.js +33 -0
  17. package/lib/getMachineId-linux-MDJOKOMI.js.map +1 -0
  18. package/lib/getMachineId-unsupported-X2EJZ25K.js +24 -0
  19. package/lib/getMachineId-unsupported-X2EJZ25K.js.map +1 -0
  20. package/lib/getMachineId-win-VE7DMTAF.js +43 -0
  21. package/lib/getMachineId-win-VE7DMTAF.js.map +1 -0
  22. package/lib/index.d.ts +24 -3
  23. package/lib/index.js +4 -2
  24. package/lib/index.js.map +1 -1
  25. package/lib/{processor-DwZlMkFj.d.ts → processor-HNY62jHs.d.ts} +7 -23
  26. package/lib/processor-runner.d.ts +0 -33
  27. package/lib/processor-runner.js +8597 -10812
  28. package/lib/processor-runner.js.map +1 -1
  29. package/lib/service-worker.js +8 -6
  30. package/lib/service-worker.js.map +1 -1
  31. package/lib/test-processor.test.d.ts +1 -1
  32. package/lib/test-processor.test.js.map +1 -1
  33. package/package.json +6 -8
  34. package/src/endpoints.ts +4 -4
  35. package/src/gen/processor/protos/processor.ts +59 -356
  36. package/src/gen/service/common/protos/common.ts +6 -0
  37. package/src/otlp.ts +5 -6
  38. package/src/processor-runner-program.ts +57 -0
  39. package/src/processor-runner.ts +19 -49
  40. package/src/service-manager.ts +5 -3
  41. package/src/service-v3.ts +6 -8
  42. package/src/service-worker.ts +6 -5
  43. package/src/service.ts +3 -2
  44. package/lib/chunk-4M76KZSL.js.map +0 -1
  45. package/lib/chunk-NVBA53K5.js.map +0 -1
  46. /package/lib/{chunk-DPHTMNDV.js.map → chunk-PCB4OKW7.js.map} +0 -0
@@ -3,7 +3,7 @@
3
3
  import fs from 'fs-extra'
4
4
 
5
5
  import { compressionAlgorithms } from '@grpc/grpc-js'
6
- import commandLineArgs from 'command-line-args'
6
+
7
7
  import { createServer } from 'nice-grpc'
8
8
  import { errorDetailsServerMiddleware } from 'nice-grpc-error-details'
9
9
  // import { registry as niceGrpcRegistry } from 'nice-grpc-prometheus'
@@ -21,54 +21,24 @@ import { setupLogger } from './logger.js'
21
21
  import { setupOTLP } from './otlp.js'
22
22
  import { ActionServer } from './action-server.js'
23
23
  import { ServiceManager } from './service-manager.js'
24
- import path from 'path'
25
24
  import { ProcessorV3Definition } from '@sentio/protos'
26
25
  import { ProcessorServiceImplV3 } from './service-v3.js'
26
+ import { dirname, join } from 'path'
27
+ import { program, ProcessorRuntimeOptions } from 'processor-runner-program.js'
27
28
 
28
- // const mergedRegistry = Registry.merge([globalRegistry, niceGrpcRegistry])
29
+ program.parse()
29
30
 
30
- let workerNum = 1
31
- try {
32
- workerNum = parseInt(process.env['PROCESSOR_WORKER']?.trim() ?? '1')
33
- } catch (e) {
34
- console.error('Failed to parse worker number', e)
31
+ const options: ProcessorRuntimeOptions = {
32
+ ...program.opts(),
33
+ target: program.args[program.args.length - 1]
35
34
  }
36
- export const optionDefinitions = [
37
- { name: 'target', type: String, defaultOption: true },
38
- { name: 'port', alias: 'p', type: String, defaultValue: '4000' },
39
- { name: 'concurrency', type: Number, defaultValue: 4 },
40
- { name: 'batch-count', type: Number, defaultValue: 1 },
41
- // { name: 'use-chainserver', type: Boolean, defaultValue: false },
42
- {
43
- name: 'chains-config',
44
- alias: 'c',
45
- type: String,
46
- defaultValue: 'chains-config.json'
47
- },
48
- { name: 'chainquery-server', type: String, defaultValue: '' },
49
- { name: 'pricefeed-server', type: String, defaultValue: '' },
50
- { name: 'log-format', type: String, defaultValue: 'console' },
51
- { name: 'debug', type: Boolean, defaultValue: false },
52
- { name: 'otlp-debug', type: Boolean, defaultValue: false },
53
- { name: 'start-action-server', type: Boolean, defaultValue: false },
54
- { name: 'worker', type: Number, defaultValue: workerNum },
55
- { name: 'process-timeout', type: Number, defaultValue: 60 },
56
- { name: 'worker-timeout', type: Number, defaultValue: parseInt(process.env['WORKER_TIMEOUT_SECONDS'] || '60') },
57
- {
58
- name: 'enable-partition',
59
- type: Boolean,
60
- defaultValue: process.env['SENTIO_ENABLE_BINDING_DATA_PARTITION'] === 'true'
61
- }
62
- ]
63
-
64
- const options = commandLineArgs(optionDefinitions, { partial: true })
65
35
 
66
36
  const logLevel = process.env['LOG_LEVEL']?.toLowerCase()
67
37
 
68
- setupLogger(options['log-format'] === 'json', logLevel === 'debug' ? true : options.debug)
38
+ setupLogger(options.logFormat === 'json', logLevel === 'debug' ? true : options.debug!)
69
39
  console.debug('Starting with', options.target)
70
40
 
71
- await setupOTLP(options['otlp-debug'])
41
+ await setupOTLP(options.otlpDebug)
72
42
 
73
43
  Error.stackTraceLimit = 20
74
44
 
@@ -83,7 +53,7 @@ const loader = async () => {
83
53
  console.debug('Module loaded', m)
84
54
  return m
85
55
  }
86
- if (options['start-action-server']) {
56
+ if (options.startActionServer) {
87
57
  server = new ActionServer(loader)
88
58
  server.listen(options.port)
89
59
  } else {
@@ -109,11 +79,10 @@ if (options['start-action-server']) {
109
79
  ProcessorV3Definition,
110
80
  new FullProcessorServiceV3Impl(new ProcessorServiceImplV3(loader, options, server.shutdown))
111
81
  )
112
- server.listen('0.0.0.0:' + options.port)
113
82
 
83
+ server.listen('0.0.0.0:' + options.port)
114
84
  console.log('Processor Server Started at:', options.port)
115
85
  }
116
-
117
86
  const metricsPort = 4040
118
87
 
119
88
  const httpServer = http
@@ -198,7 +167,7 @@ process
198
167
 
199
168
  if (process.env['OOM_DUMP_MEMORY_SIZE_GB']) {
200
169
  let dumping = false
201
- const memorySize = parseFloat(process.env['OOM_DUMP_MEMORY_SIZE_GB'])
170
+ const memorySize = parseFloat(process.env['OOM_DUMP_MEMORY_SIZE_GB']!)
202
171
  console.log('heap dumping is enabled, limit set to ', memorySize, 'gb')
203
172
  const dir = process.env['OOM_DUMP_DIR'] || '/tmp'
204
173
  setInterval(async () => {
@@ -206,7 +175,7 @@ if (process.env['OOM_DUMP_MEMORY_SIZE_GB']) {
206
175
  console.log('Current Memory Usage', mem)
207
176
  // if memory usage is greater this size, dump heap and exit
208
177
  if (mem.heapTotal > memorySize * 1024 * 1024 * 1024 && !dumping) {
209
- const file = path.join(dir, `${Date.now()}.heapsnapshot`)
178
+ const file = join(dir, `${Date.now()}.heapsnapshot`)
210
179
  dumping = true
211
180
  await dumpHeap(file)
212
181
  // force exit and keep pod running
@@ -214,15 +183,16 @@ if (process.env['OOM_DUMP_MEMORY_SIZE_GB']) {
214
183
  }
215
184
  }, 1000 * 60)
216
185
  }
186
+ // }
217
187
 
218
- async function dumpHeap(file: string) {
188
+ async function dumpHeap(file: string): Promise<void> {
219
189
  console.log('Heap dumping to', file)
220
190
  const session = new Session()
221
- fs.mkdirSync(path.dirname(file), { recursive: true })
191
+ fs.mkdirSync(dirname(file), { recursive: true })
222
192
  const fd = fs.openSync(file, 'w')
223
193
  try {
224
194
  session.connect()
225
- session.on('HeapProfiler.addHeapSnapshotChunk', (m) => {
195
+ session.on('HeapProfiler.addHeapSnapshotChunk', (m: any) => {
226
196
  fs.writeSync(fd, m.params.chunk)
227
197
  })
228
198
 
@@ -234,8 +204,8 @@ async function dumpHeap(file: string) {
234
204
  }
235
205
  }
236
206
 
237
- function shutdownServers(exitCode: number) {
238
- server?.forceShutdown()
207
+ function shutdownServers(exitCode: number): void {
208
+ server.forceShutdown()
239
209
  console.log('RPC server shut down')
240
210
 
241
211
  httpServer.close(function () {
@@ -16,6 +16,7 @@ import { Subject } from 'rxjs'
16
16
  import { MessageChannel } from 'node:worker_threads'
17
17
  import { ProcessorServiceImpl } from './service.js'
18
18
  import { TemplateInstanceState } from './state.js'
19
+ import { ProcessorRuntimeOptions } from 'processor-runner-program.js'
19
20
  ;(BigInt.prototype as any).toJSON = function () {
20
21
  return this.toString()
21
22
  }
@@ -26,7 +27,7 @@ export class ServiceManager extends ProcessorServiceImpl {
26
27
 
27
28
  constructor(
28
29
  loader: () => Promise<any>,
29
- readonly options: any,
30
+ readonly options: ProcessorRuntimeOptions,
30
31
  shutdownHandler?: () => void
31
32
  ) {
32
33
  super(loader, options, shutdownHandler)
@@ -123,7 +124,7 @@ export class ServiceManager extends ProcessorServiceImpl {
123
124
 
124
125
  if (this.enablePartition) {
125
126
  const concurrent = parseInt(process.env['PROCESS_CONCURRENCY'] || '0')
126
- if (this.options.worker < concurrent) {
127
+ if (this.options.worker! < concurrent) {
127
128
  console.warn(
128
129
  `When partition is enabled, the worker count must >= 'PROCESS_CONCURRENCY', will set worker count to ${concurrent})`
129
130
  )
@@ -139,7 +140,8 @@ export class ServiceManager extends ProcessorServiceImpl {
139
140
  argv: process.argv,
140
141
  workerData: this.workerData
141
142
  })
142
- this.pool.on('message', (msg) => {
143
+ // @ts-ignore - Piscina message handling for template instance sync
144
+ this.pool.on('message', (msg: any) => {
143
145
  if (msg.event == 'add_template_instance') {
144
146
  // sync the template state from worker to the main thread
145
147
  TemplateInstanceState.INSTANCE.addValue(msg.value)
package/src/service-v3.ts CHANGED
@@ -26,6 +26,7 @@ import { recordRuntimeInfo } from './service.js'
26
26
  import { DataBindingContext } from './db-context.js'
27
27
  import { TemplateInstanceState } from './state.js'
28
28
  import { freezeGlobalConfig } from './global-config.js'
29
+ import { ProcessorRuntimeOptions } from 'processor-runner-program.js'
29
30
 
30
31
  const { process_binding_count, process_binding_time, process_binding_error } = processMetrics
31
32
 
@@ -35,11 +36,11 @@ export class ProcessorServiceImplV3 implements ProcessorV3ServiceImplementation
35
36
  private readonly shutdownHandler?: () => void
36
37
  private started = false
37
38
 
38
- constructor(loader: () => Promise<any>, options?: any, shutdownHandler?: () => void) {
39
+ constructor(loader: () => Promise<any>, options?: ProcessorRuntimeOptions, shutdownHandler?: () => void) {
39
40
  this.loader = loader
40
41
  this.shutdownHandler = shutdownHandler
41
42
 
42
- this.enablePartition = options?.['enable-partition'] == true
43
+ this.enablePartition = options?.enablePartition == true
43
44
  }
44
45
 
45
46
  async start(request: StartRequest, context: CallContext): Promise<Empty> {
@@ -170,9 +171,9 @@ export class ProcessorServiceImplV3 implements ProcessorV3ServiceImplementation
170
171
  .then(async (result) => {
171
172
  console.debug(`process binding ${processId} done`)
172
173
  await context.awaitPendings()
173
-
174
+ const { timeseriesResult, ...otherResults } = result
174
175
  console.debug('sending ts data length:', result.timeseriesResult.length)
175
- for (const ts of result.timeseriesResult) {
176
+ for (const ts of timeseriesResult) {
176
177
  subject.next({
177
178
  processId,
178
179
  tsRequest: {
@@ -193,10 +194,7 @@ export class ProcessorServiceImplV3 implements ProcessorV3ServiceImplementation
193
194
 
194
195
  console.debug('sending binding result', processId)
195
196
  subject.next({
196
- result: {
197
- states: result.states,
198
- exports: result.exports
199
- },
197
+ result: otherResults,
200
198
  processId: processId
201
199
  })
202
200
  recordRuntimeInfo(result, binding.handlerType)
@@ -9,6 +9,7 @@ import { Piscina } from 'piscina'
9
9
  import { configureEndpoints } from './endpoints.js'
10
10
  import { setupLogger } from './logger.js'
11
11
  import { Subject } from 'rxjs'
12
+ import { ProcessorRuntimeOptions } from 'processor-runner-program.js'
12
13
 
13
14
  let started = false
14
15
 
@@ -34,7 +35,7 @@ process
34
35
 
35
36
  let service: ProcessorServiceImpl | undefined
36
37
 
37
- const loader = async (options: any) => {
38
+ const loader = async (options: ProcessorRuntimeOptions) => {
38
39
  if (options.target) {
39
40
  const m = await import(options.target)
40
41
  console.debug('Module loaded, path:', options.target, 'module:', m)
@@ -44,7 +45,7 @@ const loader = async (options: any) => {
44
45
 
45
46
  const emptyCallContext = <CallContext>{}
46
47
 
47
- async function start(request: StartRequest, options: any): Promise<Empty> {
48
+ async function start(request: StartRequest, options: ProcessorRuntimeOptions): Promise<Empty> {
48
49
  if (started) {
49
50
  return {}
50
51
  }
@@ -73,7 +74,7 @@ export default async function ({
73
74
  const { startRequest, configRequest, options } = Piscina.workerData
74
75
  if (!started) {
75
76
  const logLevel = process.env['LOG_LEVEL']?.toUpperCase()
76
- setupLogger(options['log-format'] === 'json', logLevel === 'debug' ? true : options.debug, threadId)
77
+ setupLogger(options.logFormat === 'json', logLevel === 'debug' ? true : options.debug, threadId)
77
78
 
78
79
  configureEndpoints(options)
79
80
 
@@ -103,8 +104,8 @@ export default async function ({
103
104
  ]
104
105
  )
105
106
  }
106
- const timeout = (options['worker-timeout'] || 0) * 1000 // convert to milliseconds
107
- const enablePartition = options['enable-partition'] || false
107
+ const timeout = (options.workerTimeout || 0) * 1000 // convert to milliseconds
108
+ const enablePartition = options.enablePartition || false
108
109
  await new Promise<void>((resolve, reject) => {
109
110
  const subject = new Subject<DeepPartial<ProcessStreamResponse>>()
110
111
  let timeoutId: NodeJS.Timeout | undefined = undefined
package/src/service.ts CHANGED
@@ -36,6 +36,7 @@ import { Provider } from 'ethers'
36
36
  import { decodeMulticallResult, encodeMulticallData, getMulticallAddress, Multicall3Call } from './multicall.js'
37
37
 
38
38
  import { processMetrics } from './metrics.js'
39
+ import { ProcessorRuntimeOptions } from 'processor-runner-program.js'
39
40
 
40
41
  const { process_binding_count, process_binding_time, process_binding_error } = processMetrics
41
42
 
@@ -58,7 +59,7 @@ export class ProcessorServiceImpl implements ProcessorServiceImplementation {
58
59
  private preparedData: PreparedData | undefined
59
60
  readonly enablePartition: boolean
60
61
 
61
- constructor(loader: () => Promise<any>, options?: any, shutdownHandler?: () => void) {
62
+ constructor(loader: () => Promise<any>, options?: ProcessorRuntimeOptions, shutdownHandler?: () => void) {
62
63
  this.loader = loader
63
64
  this.shutdownHandler = shutdownHandler
64
65
 
@@ -66,7 +67,7 @@ export class ProcessorServiceImpl implements ProcessorServiceImplementation {
66
67
  ? process.env['ENABLE_PREPROCESS'].toLowerCase() == 'true'
67
68
  : false
68
69
 
69
- this.enablePartition = options?.['enable-partition'] == true
70
+ this.enablePartition = options?.enablePartition == true
70
71
  }
71
72
 
72
73
  async getConfig(request: ProcessConfigRequest, context: CallContext): Promise<ProcessConfigResponse> {