@sentio/runtime 2.62.0-rc.6 → 2.62.0-rc.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -10,7 +10,7 @@ import {
10
10
  require_cjs,
11
11
  require_lib3 as require_lib,
12
12
  require_lib4 as require_lib2
13
- } from "./chunk-RPV67F56.js";
13
+ } from "./chunk-YBKSM3GO.js";
14
14
  import "./chunk-I5YHR3CE.js";
15
15
  import "./chunk-W3VN25ER.js";
16
16
  import {
@@ -68,7 +68,7 @@ async function service_worker_default({
68
68
  const { startRequest, configRequest, options } = Piscina.workerData;
69
69
  if (!started) {
70
70
  const logLevel = process.env["LOG_LEVEL"]?.toUpperCase();
71
- setupLogger(options["log-format"] === "json", logLevel === "debug" ? true : options.debug, threadId);
71
+ setupLogger(options.logFormat === "json", logLevel === "debug" ? true : options.debug, threadId);
72
72
  configureEndpoints(options);
73
73
  if (startRequest) {
74
74
  await start(startRequest, options);
@@ -94,8 +94,8 @@ async function service_worker_default({
94
94
  ]
95
95
  );
96
96
  }
97
- const timeout = (options["worker-timeout"] || 0) * 1e3;
98
- const enablePartition = options["enable-partition"] || false;
97
+ const timeout = (options.workerTimeout || 0) * 1e3;
98
+ const enablePartition = options.enablePartition || false;
99
99
  await new Promise((resolve, reject) => {
100
100
  const subject = new import_rxjs.Subject();
101
101
  let timeoutId = void 0;
@@ -1 +1 @@
1
- {"version":3,"sources":["../src/service-worker.ts"],"sourcesContent":["import { DeepPartial, Empty, ProcessStreamRequest, ProcessStreamResponse, StartRequest } from '@sentio/protos'\nimport { CallContext, ServerError, Status } from 'nice-grpc'\nimport { errorString } from './utils.js'\nimport { freezeGlobalConfig } from './global-config.js'\nimport { DebugInfo, RichServerError } from 'nice-grpc-error-details'\nimport { ProcessorServiceImpl } from './service.js'\nimport { MessagePort, threadId } from 'worker_threads'\nimport { Piscina } from 'piscina'\nimport { configureEndpoints } from './endpoints.js'\nimport { setupLogger } from './logger.js'\nimport { Subject } from 'rxjs'\n\nlet started = false\n\nlet unhandled: Error | undefined\n\nprocess\n .on('uncaughtException', (err) => {\n console.error('Uncaught Exception, please checking if await is properly used', err)\n unhandled = err\n })\n .on('unhandledRejection', (reason, p) => {\n // @ts-ignore ignore invalid ens error\n if (reason?.message.startsWith('invalid ENS name (disallowed character: \"*\"')) {\n return\n }\n console.error('Unhandled Rejection, please checking if await is properly', reason)\n unhandled = reason as Error\n // shutdownServers(1)\n })\n .on('exit', () => {\n console.info('Worker thread exiting, threadId:', threadId)\n })\n\nlet service: ProcessorServiceImpl | undefined\n\nconst loader = async (options: any) => {\n if (options.target) {\n const m = await import(options.target)\n console.debug('Module loaded, path:', options.target, 'module:', m)\n return m\n }\n}\n\nconst emptyCallContext = <CallContext>{}\n\nasync function start(request: StartRequest, options: any): Promise<Empty> {\n if (started) {\n return {}\n }\n freezeGlobalConfig()\n\n try {\n service = new ProcessorServiceImpl(() => loader(options), options)\n } catch (e) {\n throw new ServerError(Status.INVALID_ARGUMENT, 'Failed to load processor: ' + errorString(e))\n }\n\n await service.start(request, emptyCallContext)\n started = true\n return {}\n}\n\nexport default async function ({\n processId,\n request: firstRequest,\n workerPort\n}: {\n processId: number\n request: ProcessStreamRequest\n workerPort: MessagePort\n}) {\n const { startRequest, configRequest, options } = Piscina.workerData\n if (!started) {\n const logLevel = process.env['LOG_LEVEL']?.toUpperCase()\n setupLogger(options['log-format'] === 'json', logLevel === 'debug' ? true : options.debug, threadId)\n\n configureEndpoints(options)\n\n if (startRequest) {\n await start(startRequest, options)\n console.debug('worker', threadId, ' started, template instance:', startRequest.templateInstances?.length)\n }\n\n if (configRequest) {\n await service?.getConfig(configRequest, emptyCallContext)\n console.debug('worker', threadId, ' configured')\n }\n }\n\n if (unhandled) {\n const err = unhandled\n unhandled = undefined\n console.error('Unhandled exception/rejection in previous request:', err)\n throw new RichServerError(\n Status.UNAVAILABLE,\n 'Unhandled exception/rejection in previous request: ' + errorString(err),\n [\n DebugInfo.fromPartial({\n detail: err.message,\n stackEntries: err.stack?.split('\\n')\n })\n ]\n )\n }\n const timeout = (options['worker-timeout'] || 0) * 1000 // convert to milliseconds\n const enablePartition = options['enable-partition'] || false\n await new Promise<void>((resolve, reject) => {\n const subject = new Subject<DeepPartial<ProcessStreamResponse>>()\n let timeoutId: NodeJS.Timeout | undefined = undefined\n subject.subscribe((resp: ProcessStreamResponse) => {\n console.debug('Worker', threadId, 'send response:', resp.result ? 'result' : 'dbResult')\n workerPort.postMessage(resp)\n // receive the response from the processor , close and resolve the promise\n if (resp.result) {\n if (timeoutId) clearTimeout(timeoutId)\n resolve()\n workerPort.close()\n }\n })\n workerPort.on('message', (msg: ProcessStreamRequest) => {\n const request = msg as ProcessStreamRequest\n console.debug('Worker', threadId, 'received request:', request.start ? 'start' : 'dbResult')\n service?.handleRequest(request, firstRequest.binding, subject)\n if (enablePartition && request.start && timeout > 0) {\n timeoutId = setTimeout(async () => {\n reject(new RichServerError(Status.DEADLINE_EXCEEDED, 'Worker timeout exceeded'))\n }, timeout)\n }\n })\n console.debug('Worker', threadId, 'handle request: binding')\n service?.handleRequest(firstRequest, firstRequest.binding, subject)\n if (!enablePartition && timeout > 0) {\n timeoutId = setTimeout(() => {\n reject(new RichServerError(Status.DEADLINE_EXCEEDED, 'Worker timeout exceeded'))\n }, timeout)\n }\n })\n}\n;import(\"node:process\").then((p) => p.stdout.write(\"\"));"],"mappings":";;;;;;;;;;;;;;;;;;;;AACA,uBAAiD;AAGjD,qCAA2C;AAE3C,SAAsB,gBAAgB;AACtC,SAAS,eAAe;AAGxB,kBAAwB;AAExB,IAAI,UAAU;AAEd,IAAI;AAEJ,QACG,GAAG,qBAAqB,CAAC,QAAQ;AAChC,UAAQ,MAAM,iEAAiE,GAAG;AAClF,cAAY;AACd,CAAC,EACA,GAAG,sBAAsB,CAAC,QAAQ,MAAM;AAEvC,MAAI,QAAQ,QAAQ,WAAW,6CAA6C,GAAG;AAC7E;AAAA,EACF;AACA,UAAQ,MAAM,6DAA6D,MAAM;AACjF,cAAY;AAEd,CAAC,EACA,GAAG,QAAQ,MAAM;AAChB,UAAQ,KAAK,oCAAoC,QAAQ;AAC3D,CAAC;AAEH,IAAI;AAEJ,IAAM,SAAS,OAAO,YAAiB;AACrC,MAAI,QAAQ,QAAQ;AAClB,UAAM,IAAI,MAAM,OAAO,QAAQ;AAC/B,YAAQ,MAAM,wBAAwB,QAAQ,QAAQ,WAAW,CAAC;AAClE,WAAO;AAAA,EACT;AACF;AAEA,IAAM,mBAAgC,CAAC;AAEvC,eAAe,MAAM,SAAuB,SAA8B;AACxE,MAAI,SAAS;AACX,WAAO,CAAC;AAAA,EACV;AACA,qBAAmB;AAEnB,MAAI;AACF,cAAU,IAAI,qBAAqB,MAAM,OAAO,OAAO,GAAG,OAAO;AAAA,EACnE,SAAS,GAAG;AACV,UAAM,IAAI,6BAAY,wBAAO,kBAAkB,+BAA+B,YAAY,CAAC,CAAC;AAAA,EAC9F;AAEA,QAAM,QAAQ,MAAM,SAAS,gBAAgB;AAC7C,YAAU;AACV,SAAO,CAAC;AACV;AAEA,eAAO,uBAAwB;AAAA,EAC7B;AAAA,EACA,SAAS;AAAA,EACT;AACF,GAIG;AACD,QAAM,EAAE,cAAc,eAAe,QAAQ,IAAI,QAAQ;AACzD,MAAI,CAAC,SAAS;AACZ,UAAM,WAAW,QAAQ,IAAI,WAAW,GAAG,YAAY;AACvD,gBAAY,QAAQ,YAAY,MAAM,QAAQ,aAAa,UAAU,OAAO,QAAQ,OAAO,QAAQ;AAEnG,uBAAmB,OAAO;AAE1B,QAAI,cAAc;AAChB,YAAM,MAAM,cAAc,OAAO;AACjC,cAAQ,MAAM,UAAU,UAAU,gCAAgC,aAAa,mBAAmB,MAAM;AAAA,IAC1G;AAEA,QAAI,eAAe;AACjB,YAAM,SAAS,UAAU,eAAe,gBAAgB;AACxD,cAAQ,MAAM,UAAU,UAAU,aAAa;AAAA,IACjD;AAAA,EACF;AAEA,MAAI,WAAW;AACb,UAAM,MAAM;AACZ,gBAAY;AACZ,YAAQ,MAAM,sDAAsD,GAAG;AACvE,UAAM,IAAI;AAAA,MACR,wBAAO;AAAA,MACP,wDAAwD,YAAY,GAAG;AAAA,MACvE;AAAA,QACE,yCAAU,YAAY;AAAA,UACpB,QAAQ,IAAI;AAAA,UACZ,cAAc,IAAI,OAAO,MAAM,IAAI;AAAA,QACrC,CAAC;AAAA,MACH;AAAA,IACF;AAAA,EACF;AACA,QAAM,WAAW,QAAQ,gBAAgB,KAAK,KAAK;AACnD,QAAM,kBAAkB,QAAQ,kBAAkB,KAAK;AACvD,QAAM,IAAI,QAAc,CAAC,SAAS,WAAW;AAC3C,UAAM,UAAU,IAAI,oBAA4C;AAChE,QAAI,YAAwC;AAC5C,YAAQ,UAAU,CAAC,SAAgC;AACjD,cAAQ,MAAM,UAAU,UAAU,kBAAkB,KAAK,SAAS,WAAW,UAAU;AACvF,iBAAW,YAAY,IAAI;AAE3B,UAAI,KAAK,QAAQ;AACf,YAAI,UAAW,cAAa,SAAS;AACrC,gBAAQ;AACR,mBAAW,MAAM;AAAA,MACnB;AAAA,IACF,CAAC;AACD,eAAW,GAAG,WAAW,CAAC,QAA8B;AACtD,YAAM,UAAU;AAChB,cAAQ,MAAM,UAAU,UAAU,qBAAqB,QAAQ,QAAQ,UAAU,UAAU;AAC3F,eAAS,cAAc,SAAS,aAAa,SAAS,OAAO;AAC7D,UAAI,mBAAmB,QAAQ,SAAS,UAAU,GAAG;AACnD,oBAAY,WAAW,YAAY;AACjC,iBAAO,IAAI,+CAAgB,wBAAO,mBAAmB,yBAAyB,CAAC;AAAA,QACjF,GAAG,OAAO;AAAA,MACZ;AAAA,IACF,CAAC;AACD,YAAQ,MAAM,UAAU,UAAU,yBAAyB;AAC3D,aAAS,cAAc,cAAc,aAAa,SAAS,OAAO;AAClE,QAAI,CAAC,mBAAmB,UAAU,GAAG;AACnC,kBAAY,WAAW,MAAM;AAC3B,eAAO,IAAI,+CAAgB,wBAAO,mBAAmB,yBAAyB,CAAC;AAAA,MACjF,GAAG,OAAO;AAAA,IACZ;AAAA,EACF,CAAC;AACH;AACC,OAAO,cAAc,EAAE,KAAK,CAAC,MAAM,EAAE,OAAO,MAAM,EAAE,CAAC;","names":[]}
1
+ {"version":3,"sources":["../src/service-worker.ts"],"sourcesContent":["import { DeepPartial, Empty, ProcessStreamRequest, ProcessStreamResponse, StartRequest } from '@sentio/protos'\nimport { CallContext, ServerError, Status } from 'nice-grpc'\nimport { errorString } from './utils.js'\nimport { freezeGlobalConfig } from './global-config.js'\nimport { DebugInfo, RichServerError } from 'nice-grpc-error-details'\nimport { ProcessorServiceImpl } from './service.js'\nimport { MessagePort, threadId } from 'worker_threads'\nimport { Piscina } from 'piscina'\nimport { configureEndpoints } from './endpoints.js'\nimport { setupLogger } from './logger.js'\nimport { Subject } from 'rxjs'\n\nlet started = false\n\nlet unhandled: Error | undefined\n\nprocess\n .on('uncaughtException', (err) => {\n console.error('Uncaught Exception, please checking if await is properly used', err)\n unhandled = err\n })\n .on('unhandledRejection', (reason, p) => {\n // @ts-ignore ignore invalid ens error\n if (reason?.message.startsWith('invalid ENS name (disallowed character: \"*\"')) {\n return\n }\n console.error('Unhandled Rejection, please checking if await is properly', reason)\n unhandled = reason as Error\n // shutdownServers(1)\n })\n .on('exit', () => {\n console.info('Worker thread exiting, threadId:', threadId)\n })\n\nlet service: ProcessorServiceImpl | undefined\n\nconst loader = async (options: any) => {\n if (options.target) {\n const m = await import(options.target)\n console.debug('Module loaded, path:', options.target, 'module:', m)\n return m\n }\n}\n\nconst emptyCallContext = <CallContext>{}\n\nasync function start(request: StartRequest, options: any): Promise<Empty> {\n if (started) {\n return {}\n }\n freezeGlobalConfig()\n\n try {\n service = new ProcessorServiceImpl(() => loader(options), options)\n } catch (e) {\n throw new ServerError(Status.INVALID_ARGUMENT, 'Failed to load processor: ' + errorString(e))\n }\n\n await service.start(request, emptyCallContext)\n started = true\n return {}\n}\n\nexport default async function ({\n processId,\n request: firstRequest,\n workerPort\n}: {\n processId: number\n request: ProcessStreamRequest\n workerPort: MessagePort\n}) {\n const { startRequest, configRequest, options } = Piscina.workerData\n if (!started) {\n const logLevel = process.env['LOG_LEVEL']?.toUpperCase()\n setupLogger(options.logFormat === 'json', logLevel === 'debug' ? true : options.debug, threadId)\n\n configureEndpoints(options)\n\n if (startRequest) {\n await start(startRequest, options)\n console.debug('worker', threadId, ' started, template instance:', startRequest.templateInstances?.length)\n }\n\n if (configRequest) {\n await service?.getConfig(configRequest, emptyCallContext)\n console.debug('worker', threadId, ' configured')\n }\n }\n\n if (unhandled) {\n const err = unhandled\n unhandled = undefined\n console.error('Unhandled exception/rejection in previous request:', err)\n throw new RichServerError(\n Status.UNAVAILABLE,\n 'Unhandled exception/rejection in previous request: ' + errorString(err),\n [\n DebugInfo.fromPartial({\n detail: err.message,\n stackEntries: err.stack?.split('\\n')\n })\n ]\n )\n }\n const timeout = (options.workerTimeout || 0) * 1000 // convert to milliseconds\n const enablePartition = options.enablePartition || false\n await new Promise<void>((resolve, reject) => {\n const subject = new Subject<DeepPartial<ProcessStreamResponse>>()\n let timeoutId: NodeJS.Timeout | undefined = undefined\n subject.subscribe((resp: ProcessStreamResponse) => {\n console.debug('Worker', threadId, 'send response:', resp.result ? 'result' : 'dbResult')\n workerPort.postMessage(resp)\n // receive the response from the processor , close and resolve the promise\n if (resp.result) {\n if (timeoutId) clearTimeout(timeoutId)\n resolve()\n workerPort.close()\n }\n })\n workerPort.on('message', (msg: ProcessStreamRequest) => {\n const request = msg as ProcessStreamRequest\n console.debug('Worker', threadId, 'received request:', request.start ? 'start' : 'dbResult')\n service?.handleRequest(request, firstRequest.binding, subject)\n if (enablePartition && request.start && timeout > 0) {\n timeoutId = setTimeout(async () => {\n reject(new RichServerError(Status.DEADLINE_EXCEEDED, 'Worker timeout exceeded'))\n }, timeout)\n }\n })\n console.debug('Worker', threadId, 'handle request: binding')\n service?.handleRequest(firstRequest, firstRequest.binding, subject)\n if (!enablePartition && timeout > 0) {\n timeoutId = setTimeout(() => {\n reject(new RichServerError(Status.DEADLINE_EXCEEDED, 'Worker timeout exceeded'))\n }, timeout)\n }\n })\n}\n;import(\"node:process\").then((p) => p.stdout.write(\"\"));"],"mappings":";;;;;;;;;;;;;;;;;;;;AACA,uBAAiD;AAGjD,qCAA2C;AAE3C,SAAsB,gBAAgB;AACtC,SAAS,eAAe;AAGxB,kBAAwB;AAExB,IAAI,UAAU;AAEd,IAAI;AAEJ,QACG,GAAG,qBAAqB,CAAC,QAAQ;AAChC,UAAQ,MAAM,iEAAiE,GAAG;AAClF,cAAY;AACd,CAAC,EACA,GAAG,sBAAsB,CAAC,QAAQ,MAAM;AAEvC,MAAI,QAAQ,QAAQ,WAAW,6CAA6C,GAAG;AAC7E;AAAA,EACF;AACA,UAAQ,MAAM,6DAA6D,MAAM;AACjF,cAAY;AAEd,CAAC,EACA,GAAG,QAAQ,MAAM;AAChB,UAAQ,KAAK,oCAAoC,QAAQ;AAC3D,CAAC;AAEH,IAAI;AAEJ,IAAM,SAAS,OAAO,YAAiB;AACrC,MAAI,QAAQ,QAAQ;AAClB,UAAM,IAAI,MAAM,OAAO,QAAQ;AAC/B,YAAQ,MAAM,wBAAwB,QAAQ,QAAQ,WAAW,CAAC;AAClE,WAAO;AAAA,EACT;AACF;AAEA,IAAM,mBAAgC,CAAC;AAEvC,eAAe,MAAM,SAAuB,SAA8B;AACxE,MAAI,SAAS;AACX,WAAO,CAAC;AAAA,EACV;AACA,qBAAmB;AAEnB,MAAI;AACF,cAAU,IAAI,qBAAqB,MAAM,OAAO,OAAO,GAAG,OAAO;AAAA,EACnE,SAAS,GAAG;AACV,UAAM,IAAI,6BAAY,wBAAO,kBAAkB,+BAA+B,YAAY,CAAC,CAAC;AAAA,EAC9F;AAEA,QAAM,QAAQ,MAAM,SAAS,gBAAgB;AAC7C,YAAU;AACV,SAAO,CAAC;AACV;AAEA,eAAO,uBAAwB;AAAA,EAC7B;AAAA,EACA,SAAS;AAAA,EACT;AACF,GAIG;AACD,QAAM,EAAE,cAAc,eAAe,QAAQ,IAAI,QAAQ;AACzD,MAAI,CAAC,SAAS;AACZ,UAAM,WAAW,QAAQ,IAAI,WAAW,GAAG,YAAY;AACvD,gBAAY,QAAQ,cAAc,QAAQ,aAAa,UAAU,OAAO,QAAQ,OAAO,QAAQ;AAE/F,uBAAmB,OAAO;AAE1B,QAAI,cAAc;AAChB,YAAM,MAAM,cAAc,OAAO;AACjC,cAAQ,MAAM,UAAU,UAAU,gCAAgC,aAAa,mBAAmB,MAAM;AAAA,IAC1G;AAEA,QAAI,eAAe;AACjB,YAAM,SAAS,UAAU,eAAe,gBAAgB;AACxD,cAAQ,MAAM,UAAU,UAAU,aAAa;AAAA,IACjD;AAAA,EACF;AAEA,MAAI,WAAW;AACb,UAAM,MAAM;AACZ,gBAAY;AACZ,YAAQ,MAAM,sDAAsD,GAAG;AACvE,UAAM,IAAI;AAAA,MACR,wBAAO;AAAA,MACP,wDAAwD,YAAY,GAAG;AAAA,MACvE;AAAA,QACE,yCAAU,YAAY;AAAA,UACpB,QAAQ,IAAI;AAAA,UACZ,cAAc,IAAI,OAAO,MAAM,IAAI;AAAA,QACrC,CAAC;AAAA,MACH;AAAA,IACF;AAAA,EACF;AACA,QAAM,WAAW,QAAQ,iBAAiB,KAAK;AAC/C,QAAM,kBAAkB,QAAQ,mBAAmB;AACnD,QAAM,IAAI,QAAc,CAAC,SAAS,WAAW;AAC3C,UAAM,UAAU,IAAI,oBAA4C;AAChE,QAAI,YAAwC;AAC5C,YAAQ,UAAU,CAAC,SAAgC;AACjD,cAAQ,MAAM,UAAU,UAAU,kBAAkB,KAAK,SAAS,WAAW,UAAU;AACvF,iBAAW,YAAY,IAAI;AAE3B,UAAI,KAAK,QAAQ;AACf,YAAI,UAAW,cAAa,SAAS;AACrC,gBAAQ;AACR,mBAAW,MAAM;AAAA,MACnB;AAAA,IACF,CAAC;AACD,eAAW,GAAG,WAAW,CAAC,QAA8B;AACtD,YAAM,UAAU;AAChB,cAAQ,MAAM,UAAU,UAAU,qBAAqB,QAAQ,QAAQ,UAAU,UAAU;AAC3F,eAAS,cAAc,SAAS,aAAa,SAAS,OAAO;AAC7D,UAAI,mBAAmB,QAAQ,SAAS,UAAU,GAAG;AACnD,oBAAY,WAAW,YAAY;AACjC,iBAAO,IAAI,+CAAgB,wBAAO,mBAAmB,yBAAyB,CAAC;AAAA,QACjF,GAAG,OAAO;AAAA,MACZ;AAAA,IACF,CAAC;AACD,YAAQ,MAAM,UAAU,UAAU,yBAAyB;AAC3D,aAAS,cAAc,cAAc,aAAa,SAAS,OAAO;AAClE,QAAI,CAAC,mBAAmB,UAAU,GAAG;AACnC,kBAAY,WAAW,MAAM;AAC3B,eAAO,IAAI,+CAAgB,wBAAO,mBAAmB,yBAAyB,CAAC;AAAA,MACjF,GAAG,OAAO;AAAA,IACZ;AAAA,EACF,CAAC;AACH;AACC,OAAO,cAAc,EAAE,KAAK,CAAC,MAAM,EAAE,OAAO,MAAM,EAAE,CAAC;","names":[]}
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@sentio/runtime",
3
- "version": "2.62.0-rc.6",
3
+ "version": "2.62.0-rc.8",
4
4
  "license": "Apache-2.0",
5
5
  "type": "module",
6
6
  "exports": {
@@ -31,7 +31,7 @@
31
31
  "run": "tsx src/processor-runner.ts --log-format=json",
32
32
  "run-benchmark": "tsx src/decode-benchmark.ts",
33
33
  "start_js": "tsx ./lib/processor-runner.js $PWD/../../debug/dist/lib.js",
34
- "start_ts": "tsx ./lib/processor-runner.js --log-format=json $PWD/../../debug/src/processor.ts",
34
+ "start_ts": "tsx src/processor-runner.ts --log-format=json $PWD/../../examples/x2y2/src/processor.ts",
35
35
  "test": "glob -c 'tsx --test' '**/*.test.ts'"
36
36
  }
37
37
  }
package/src/endpoints.ts CHANGED
@@ -15,7 +15,7 @@ export class Endpoints {
15
15
  }
16
16
 
17
17
  export function configureEndpoints(options: any) {
18
- const fullPath = path.resolve(options['chains-config'])
18
+ const fullPath = path.resolve(options.chainsConfig)
19
19
  const chainsConfig = fs.readJsonSync(fullPath)
20
20
 
21
21
  const concurrencyOverride = process.env['OVERRIDE_CONCURRENCY']
@@ -26,9 +26,9 @@ export function configureEndpoints(options: any) {
26
26
  : undefined
27
27
 
28
28
  Endpoints.INSTANCE.concurrency = concurrencyOverride ?? options.concurrency
29
- Endpoints.INSTANCE.batchCount = batchCountOverride ?? options['batch-count']
30
- Endpoints.INSTANCE.chainQueryAPI = options['chainquery-server']
31
- Endpoints.INSTANCE.priceFeedAPI = options['pricefeed-server']
29
+ Endpoints.INSTANCE.batchCount = batchCountOverride ?? options.batchCount
30
+ Endpoints.INSTANCE.chainQueryAPI = options.chainqueryServer
31
+ Endpoints.INSTANCE.priceFeedAPI = options.pricefeedServer
32
32
 
33
33
  for (const [id, config] of Object.entries(chainsConfig)) {
34
34
  const chainConfig = config as ChainConfig
@@ -55,11 +55,11 @@ const program = new Command()
55
55
 
56
56
  program
57
57
  .allowUnknownOption()
58
- .allowExcessArguments()
58
+ // .allowExcessArguments()
59
59
  .name('processor-runner')
60
60
  .description('Sentio Processor Runtime')
61
61
  .version(packageJson.version)
62
- .option('--target <path>', 'Path to the processor module to load')
62
+ .argument('<target>', 'Path to the processor module to load')
63
63
  .option('-p, --port <port>', 'Port to listen on', '4000')
64
64
  .option('--concurrency <number>', 'Number of concurrent workers', myParseInt, 4)
65
65
  .option('--batch-count <number>', 'Batch count for processing', myParseInt, 1)
@@ -83,170 +83,162 @@ program
83
83
  'Enable binding data partition',
84
84
  process.env['SENTIO_ENABLE_BINDING_DATA_PARTITION'] === 'true'
85
85
  )
86
- .action(async (options: any) => {
87
- try {
88
- await startServer(options)
89
- } catch (error) {
90
- console.error('Failed to start server:', error)
91
- process.exit(1)
92
- }
93
- })
86
+ .parse()
94
87
 
95
- // Parse arguments
96
- program.parse()
88
+ const options = program.opts()
89
+ options.target = program.processedArgs[0]
97
90
 
98
- async function startServer(options: any): Promise<void> {
99
- const logLevel = process.env['LOG_LEVEL']?.toLowerCase()
91
+ const logLevel = process.env['LOG_LEVEL']?.toLowerCase()
100
92
 
101
- setupLogger(options['log-format'] === 'json', logLevel === 'debug' ? true : options.debug)
102
- console.debug('Starting with', options.target)
93
+ setupLogger(options.logFormat === 'json', logLevel === 'debug' ? true : options.debug)
94
+ console.debug('Starting with', options.target)
103
95
 
104
- await setupOTLP(options['otlp-debug'])
96
+ await setupOTLP(options.otlpDebug)
105
97
 
106
- Error.stackTraceLimit = 20
98
+ Error.stackTraceLimit = 20
107
99
 
108
- configureEndpoints(options)
100
+ configureEndpoints(options)
109
101
 
110
- console.debug('Starting Server', options)
102
+ console.debug('Starting Server', options)
111
103
 
112
- let server: any
113
- let baseService: ProcessorServiceImpl | ServiceManager
114
- const loader = async () => {
115
- const m = await import(options.target)
116
- console.debug('Module loaded', m)
117
- return m
118
- }
119
- if (options['start-action-server']) {
120
- server = new ActionServer(loader)
121
- server.listen(options.port)
122
- } else {
123
- server = createServer({
124
- 'grpc.max_send_message_length': 768 * 1024 * 1024,
125
- 'grpc.max_receive_message_length': 768 * 1024 * 1024,
126
- 'grpc.default_compression_algorithm': compressionAlgorithms.gzip
127
- })
128
- // .use(prometheusServerMiddleware())
129
- .use(openTelemetryServerMiddleware())
130
- .use(errorDetailsServerMiddleware)
104
+ let server: any
105
+ let baseService: ProcessorServiceImpl | ServiceManager
106
+ const loader = async () => {
107
+ const m = await import(options.target)
108
+ console.debug('Module loaded', m)
109
+ return m
110
+ }
111
+ if (options.startActionServer) {
112
+ server = new ActionServer(loader)
113
+ server.listen(options.port)
114
+ } else {
115
+ server = createServer({
116
+ 'grpc.max_send_message_length': 768 * 1024 * 1024,
117
+ 'grpc.max_receive_message_length': 768 * 1024 * 1024,
118
+ 'grpc.default_compression_algorithm': compressionAlgorithms.gzip
119
+ })
120
+ // .use(prometheusServerMiddleware())
121
+ .use(openTelemetryServerMiddleware())
122
+ .use(errorDetailsServerMiddleware)
131
123
 
132
- if (options.worker > 1) {
133
- baseService = new ServiceManager(loader, options, server.shutdown)
134
- } else {
135
- baseService = new ProcessorServiceImpl(loader, options, server.shutdown)
136
- }
124
+ if (options.worker > 1) {
125
+ baseService = new ServiceManager(loader, options, server.shutdown)
126
+ } else {
127
+ baseService = new ProcessorServiceImpl(loader, options, server.shutdown)
128
+ }
137
129
 
138
- const service = new FullProcessorServiceImpl(baseService)
130
+ const service = new FullProcessorServiceImpl(baseService)
139
131
 
140
- server.add(ProcessorDefinition, service)
141
- server.add(
142
- ProcessorV3Definition,
143
- new FullProcessorServiceV3Impl(new ProcessorServiceImplV3(loader, options, server.shutdown))
144
- )
132
+ server.add(ProcessorDefinition, service)
133
+ server.add(
134
+ ProcessorV3Definition,
135
+ new FullProcessorServiceV3Impl(new ProcessorServiceImplV3(loader, options, server.shutdown))
136
+ )
145
137
 
146
- server.listen('0.0.0.0:' + options.port)
147
- console.log('Processor Server Started at:', options.port)
148
- }
149
- const metricsPort = 4040
150
-
151
- const httpServer = http
152
- .createServer(async function (req, res) {
153
- if (req.url) {
154
- const reqUrl = new URL(req.url, `http://${req.headers.host}`)
155
- const queries = reqUrl.searchParams
156
- switch (reqUrl.pathname) {
157
- // case '/metrics':
158
- // const metrics = await mergedRegistry.metrics()
159
- // res.write(metrics)
160
- // break
161
- case '/heap': {
162
- try {
163
- const file = '/tmp/' + Date.now() + '.heapsnapshot'
164
- await dumpHeap(file)
165
- // send the file
166
- const readStream = fs.createReadStream(file)
167
- res.writeHead(200, { 'Content-Type': 'application/json' })
168
- readStream.pipe(res)
169
- res.end()
170
- } catch {
171
- res.writeHead(500)
172
- res.end()
173
- }
174
- break
138
+ server.listen('0.0.0.0:' + options.port)
139
+ console.log('Processor Server Started at:', options.port)
140
+ }
141
+ const metricsPort = 4040
142
+
143
+ const httpServer = http
144
+ .createServer(async function (req, res) {
145
+ if (req.url) {
146
+ const reqUrl = new URL(req.url, `http://${req.headers.host}`)
147
+ const queries = reqUrl.searchParams
148
+ switch (reqUrl.pathname) {
149
+ // case '/metrics':
150
+ // const metrics = await mergedRegistry.metrics()
151
+ // res.write(metrics)
152
+ // break
153
+ case '/heap': {
154
+ try {
155
+ const file = '/tmp/' + Date.now() + '.heapsnapshot'
156
+ await dumpHeap(file)
157
+ // send the file
158
+ const readStream = fs.createReadStream(file)
159
+ res.writeHead(200, { 'Content-Type': 'application/json' })
160
+ readStream.pipe(res)
161
+ res.end()
162
+ } catch {
163
+ res.writeHead(500)
164
+ res.end()
175
165
  }
176
- case '/profile': {
177
- try {
178
- const profileTime = parseInt(queries.get('t') || '1000', 10) || 1000
179
- const session = new Session()
180
- session.connect()
181
-
182
- await session.post('Profiler.enable')
183
- await session.post('Profiler.start')
184
-
185
- await new Promise((resolve) => setTimeout(resolve, profileTime))
186
- const { profile } = await session.post('Profiler.stop')
187
-
188
- res.writeHead(200, { 'Content-Type': 'application/json' })
189
- res.write(JSON.stringify(profile))
190
- session.disconnect()
191
- } catch {
192
- res.writeHead(500)
193
- }
194
- break
166
+ break
167
+ }
168
+ case '/profile': {
169
+ try {
170
+ const profileTime = parseInt(queries.get('t') || '1000', 10) || 1000
171
+ const session = new Session()
172
+ session.connect()
173
+
174
+ await session.post('Profiler.enable')
175
+ await session.post('Profiler.start')
176
+
177
+ await new Promise((resolve) => setTimeout(resolve, profileTime))
178
+ const { profile } = await session.post('Profiler.stop')
179
+
180
+ res.writeHead(200, { 'Content-Type': 'application/json' })
181
+ res.write(JSON.stringify(profile))
182
+ session.disconnect()
183
+ } catch {
184
+ res.writeHead(500)
195
185
  }
196
- default:
197
- res.writeHead(404)
186
+ break
198
187
  }
199
- } else {
200
- res.writeHead(404)
188
+ default:
189
+ res.writeHead(404)
201
190
  }
202
- res.end()
203
- })
204
- .listen(metricsPort)
191
+ } else {
192
+ res.writeHead(404)
193
+ }
194
+ res.end()
195
+ })
196
+ .listen(metricsPort)
205
197
 
206
- console.log('Metric Server Started at:', metricsPort)
198
+ console.log('Metric Server Started at:', metricsPort)
207
199
 
208
- process
209
- .on('SIGINT', function () {
210
- shutdownServers(server, httpServer, 0)
211
- })
212
- .on('uncaughtException', (err) => {
213
- console.error('Uncaught Exception, please checking if await is properly used', err)
214
- if (baseService) {
215
- baseService.unhandled = err
216
- }
217
- // shutdownServers(1)
218
- })
219
- .on('unhandledRejection', (reason, p) => {
220
- // @ts-ignore ignore invalid ens error
221
- if (reason?.message.startsWith('invalid ENS name (disallowed character: "*"')) {
222
- return
223
- }
224
- console.error('Unhandled Rejection, please checking if await is properly', reason)
225
- if (baseService) {
226
- baseService.unhandled = reason as Error
227
- }
228
- // shutdownServers(1)
229
- })
200
+ process
201
+ .on('SIGINT', function () {
202
+ shutdownServers(0)
203
+ })
204
+ .on('uncaughtException', (err) => {
205
+ console.error('Uncaught Exception, please checking if await is properly used', err)
206
+ if (baseService) {
207
+ baseService.unhandled = err
208
+ }
209
+ // shutdownServers(1)
210
+ })
211
+ .on('unhandledRejection', (reason, p) => {
212
+ // @ts-ignore ignore invalid ens error
213
+ if (reason?.message.startsWith('invalid ENS name (disallowed character: "*"')) {
214
+ return
215
+ }
216
+ console.error('Unhandled Rejection, please checking if await is properly', reason)
217
+ if (baseService) {
218
+ baseService.unhandled = reason as Error
219
+ }
220
+ // shutdownServers(1)
221
+ })
230
222
 
231
- if (process.env['OOM_DUMP_MEMORY_SIZE_GB']) {
232
- let dumping = false
233
- const memorySize = parseFloat(process.env['OOM_DUMP_MEMORY_SIZE_GB']!)
234
- console.log('heap dumping is enabled, limit set to ', memorySize, 'gb')
235
- const dir = process.env['OOM_DUMP_DIR'] || '/tmp'
236
- setInterval(async () => {
237
- const mem = process.memoryUsage()
238
- console.log('Current Memory Usage', mem)
239
- // if memory usage is greater this size, dump heap and exit
240
- if (mem.heapTotal > memorySize * 1024 * 1024 * 1024 && !dumping) {
241
- const file = path.join(dir, `${Date.now()}.heapsnapshot`)
242
- dumping = true
243
- await dumpHeap(file)
244
- // force exit and keep pod running
245
- process.exit(11)
246
- }
247
- }, 1000 * 60)
248
- }
223
+ if (process.env['OOM_DUMP_MEMORY_SIZE_GB']) {
224
+ let dumping = false
225
+ const memorySize = parseFloat(process.env['OOM_DUMP_MEMORY_SIZE_GB']!)
226
+ console.log('heap dumping is enabled, limit set to ', memorySize, 'gb')
227
+ const dir = process.env['OOM_DUMP_DIR'] || '/tmp'
228
+ setInterval(async () => {
229
+ const mem = process.memoryUsage()
230
+ console.log('Current Memory Usage', mem)
231
+ // if memory usage is greater this size, dump heap and exit
232
+ if (mem.heapTotal > memorySize * 1024 * 1024 * 1024 && !dumping) {
233
+ const file = path.join(dir, `${Date.now()}.heapsnapshot`)
234
+ dumping = true
235
+ await dumpHeap(file)
236
+ // force exit and keep pod running
237
+ process.exit(11)
238
+ }
239
+ }, 1000 * 60)
249
240
  }
241
+ // }
250
242
 
251
243
  async function dumpHeap(file: string): Promise<void> {
252
244
  console.log('Heap dumping to', file)
@@ -267,8 +259,8 @@ async function dumpHeap(file: string): Promise<void> {
267
259
  }
268
260
  }
269
261
 
270
- function shutdownServers(server: any, httpServer: any, exitCode: number): void {
271
- server?.forceShutdown()
262
+ function shutdownServers(exitCode: number): void {
263
+ server.forceShutdown()
272
264
  console.log('RPC server shut down')
273
265
 
274
266
  httpServer.close(function () {
@@ -73,7 +73,7 @@ export default async function ({
73
73
  const { startRequest, configRequest, options } = Piscina.workerData
74
74
  if (!started) {
75
75
  const logLevel = process.env['LOG_LEVEL']?.toUpperCase()
76
- setupLogger(options['log-format'] === 'json', logLevel === 'debug' ? true : options.debug, threadId)
76
+ setupLogger(options.logFormat === 'json', logLevel === 'debug' ? true : options.debug, threadId)
77
77
 
78
78
  configureEndpoints(options)
79
79
 
@@ -103,8 +103,8 @@ export default async function ({
103
103
  ]
104
104
  )
105
105
  }
106
- const timeout = (options['worker-timeout'] || 0) * 1000 // convert to milliseconds
107
- const enablePartition = options['enable-partition'] || false
106
+ const timeout = (options.workerTimeout || 0) * 1000 // convert to milliseconds
107
+ const enablePartition = options.enablePartition || false
108
108
  await new Promise<void>((resolve, reject) => {
109
109
  const subject = new Subject<DeepPartial<ProcessStreamResponse>>()
110
110
  let timeoutId: NodeJS.Timeout | undefined = undefined