@platformatic/runtime 2.74.3 → 3.0.0-alpha.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/config.d.ts +1 -1
- package/index.d.ts +70 -21
- package/index.js +151 -18
- package/lib/config.js +160 -219
- package/lib/errors.js +150 -108
- package/lib/{generator/runtime-generator.js → generator.js} +36 -53
- package/lib/logger.js +7 -30
- package/lib/management-api.js +6 -56
- package/lib/runtime.js +233 -264
- package/lib/schema.js +2 -1
- package/lib/shared-http-cache.js +1 -1
- package/lib/upgrade.js +6 -4
- package/lib/utils.js +1 -48
- package/lib/worker/app.js +52 -68
- package/lib/worker/itc.js +16 -4
- package/lib/worker/main.js +6 -3
- package/lib/worker/messaging.js +2 -2
- package/package.json +22 -30
- package/schema.json +2 -1
- package/help/compile.txt +0 -8
- package/help/help.txt +0 -5
- package/help/start.txt +0 -21
- package/index.test-d.ts +0 -41
- package/lib/build-server.js +0 -67
- package/lib/compile.js +0 -108
- package/lib/generator/README.md +0 -32
- package/lib/generator/errors.js +0 -10
- package/lib/generator/runtime-generator.d.ts +0 -37
- package/lib/start.js +0 -211
- package/lib/worker/default-stackable.js +0 -33
- package/runtime.mjs +0 -54
package/lib/runtime.js
CHANGED
|
@@ -1,22 +1,30 @@
|
|
|
1
1
|
'use strict'
|
|
2
2
|
|
|
3
3
|
const { ITC } = require('@platformatic/itc')
|
|
4
|
-
const {
|
|
4
|
+
const {
|
|
5
|
+
features,
|
|
6
|
+
ensureLoggableError,
|
|
7
|
+
ensureError,
|
|
8
|
+
executeWithTimeout,
|
|
9
|
+
deepmerge,
|
|
10
|
+
parseMemorySize,
|
|
11
|
+
kTimeout,
|
|
12
|
+
kMetadata
|
|
13
|
+
} = require('@platformatic/foundation')
|
|
5
14
|
const { once, EventEmitter } = require('node:events')
|
|
6
|
-
const {
|
|
7
|
-
const {
|
|
15
|
+
const { existsSync } = require('node:fs')
|
|
16
|
+
const { readFile } = require('node:fs/promises')
|
|
8
17
|
const { STATUS_CODES } = require('node:http')
|
|
9
18
|
const { join } = require('node:path')
|
|
19
|
+
const { pathToFileURL } = require('node:url')
|
|
10
20
|
const { setTimeout: sleep, setImmediate: immediate } = require('node:timers/promises')
|
|
11
21
|
const { Worker } = require('node:worker_threads')
|
|
12
|
-
const ts = require('tail-file-stream')
|
|
13
22
|
const { Agent, interceptors: undiciInterceptors, request } = require('undici')
|
|
14
23
|
const { createThreadInterceptor } = require('undici-thread-interceptor')
|
|
15
24
|
const SonicBoom = require('sonic-boom')
|
|
16
|
-
|
|
17
25
|
const { checkDependencies, topologicalSort } = require('./dependencies')
|
|
18
26
|
const errors = require('./errors')
|
|
19
|
-
const { createLogger } = require('./logger')
|
|
27
|
+
const { abstractLogger, createLogger } = require('./logger')
|
|
20
28
|
const { startManagementApi } = require('./management-api')
|
|
21
29
|
const { startPrometheusServer } = require('./prom-server')
|
|
22
30
|
const { startScheduler } = require('./scheduler')
|
|
@@ -37,7 +45,6 @@ const {
|
|
|
37
45
|
kLastELU,
|
|
38
46
|
kWorkersBroadcast
|
|
39
47
|
} = require('./worker/symbols')
|
|
40
|
-
|
|
41
48
|
const fastify = require('fastify')
|
|
42
49
|
|
|
43
50
|
const platformaticVersion = require('../package.json').version
|
|
@@ -58,11 +65,12 @@ const telemetryPath = require.resolve('@platformatic/telemetry')
|
|
|
58
65
|
const openTelemetrySetupPath = join(telemetryPath, '..', 'lib', 'node-telemetry.js')
|
|
59
66
|
|
|
60
67
|
class Runtime extends EventEmitter {
|
|
61
|
-
#
|
|
68
|
+
#root
|
|
69
|
+
#config
|
|
70
|
+
#env
|
|
71
|
+
#context
|
|
62
72
|
#isProduction
|
|
63
73
|
#runtimeTmpDir
|
|
64
|
-
#runtimeLogsDir
|
|
65
|
-
#env
|
|
66
74
|
#servicesIds
|
|
67
75
|
#entrypointId
|
|
68
76
|
#url
|
|
@@ -85,27 +93,30 @@ class Runtime extends EventEmitter {
|
|
|
85
93
|
#stdio
|
|
86
94
|
#sharedContext
|
|
87
95
|
|
|
88
|
-
constructor (
|
|
96
|
+
constructor (config, context) {
|
|
89
97
|
super()
|
|
90
98
|
this.setMaxListeners(MAX_LISTENERS_COUNT)
|
|
91
99
|
|
|
92
|
-
this.#
|
|
93
|
-
this.#
|
|
94
|
-
this.#
|
|
95
|
-
this.#
|
|
100
|
+
this.#config = config
|
|
101
|
+
this.#root = config[kMetadata].root
|
|
102
|
+
this.#env = config[kMetadata].env
|
|
103
|
+
this.#context = context ?? {}
|
|
104
|
+
this.#isProduction = this.#context.isProduction ?? this.#context.production ?? false
|
|
105
|
+
this.#runtimeTmpDir = getRuntimeTmpDir(this.#root)
|
|
96
106
|
this.#workers = new RoundRobinMap()
|
|
97
107
|
this.#servicesIds = []
|
|
98
108
|
this.#url = undefined
|
|
99
109
|
this.#meshInterceptor = createThreadInterceptor({
|
|
100
110
|
domain: '.plt.local',
|
|
101
|
-
timeout: this.#
|
|
111
|
+
timeout: this.#config.serviceTimeout
|
|
102
112
|
})
|
|
113
|
+
this.logger = abstractLogger // This is replaced by the real logger in init() and eventually removed in close()
|
|
103
114
|
this.#status = undefined
|
|
104
115
|
this.#restartingWorkers = new Map()
|
|
105
116
|
this.#sharedHttpCache = null
|
|
106
117
|
this.servicesConfigsPatches = new Map()
|
|
107
118
|
|
|
108
|
-
if (!this.#
|
|
119
|
+
if (!this.#config.logger.captureStdio) {
|
|
109
120
|
this.#stdio = {
|
|
110
121
|
stdout: new SonicBoom({ fd: process.stdout.fd }),
|
|
111
122
|
stderr: new SonicBoom({ fd: process.stderr.fd })
|
|
@@ -129,14 +140,18 @@ class Runtime extends EventEmitter {
|
|
|
129
140
|
}
|
|
130
141
|
|
|
131
142
|
async init () {
|
|
132
|
-
|
|
143
|
+
if (typeof this.#status !== 'undefined') {
|
|
144
|
+
return
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
const config = this.#config
|
|
133
148
|
const autoloadEnabled = config.autoload
|
|
134
149
|
|
|
135
150
|
// This cannot be transferred to worker threads
|
|
136
151
|
delete config.configManager
|
|
137
152
|
|
|
138
153
|
if (config.managementApi) {
|
|
139
|
-
this.#managementApi = await startManagementApi(this, this.#
|
|
154
|
+
this.#managementApi = await startManagementApi(this, this.#root)
|
|
140
155
|
}
|
|
141
156
|
|
|
142
157
|
if (config.metrics) {
|
|
@@ -144,17 +159,16 @@ class Runtime extends EventEmitter {
|
|
|
144
159
|
}
|
|
145
160
|
|
|
146
161
|
// Create the logger
|
|
147
|
-
const [logger, destination] = await createLogger(config
|
|
162
|
+
const [logger, destination] = await createLogger(config)
|
|
148
163
|
this.logger = logger
|
|
149
164
|
this.#loggerDestination = destination
|
|
150
165
|
|
|
151
|
-
this.#isProduction = this.#configManager.args?.production ?? false
|
|
152
166
|
this.#servicesIds = config.services.map(service => service.id)
|
|
153
167
|
this.#createWorkersBroadcastChannel()
|
|
154
168
|
|
|
155
169
|
const workersConfig = []
|
|
156
170
|
for (const service of config.services) {
|
|
157
|
-
const count = service.workers ?? this.#
|
|
171
|
+
const count = service.workers ?? this.#config.workers
|
|
158
172
|
if (count > 1 && service.entrypoint && !features.node.reusePort) {
|
|
159
173
|
this.logger.warn(
|
|
160
174
|
`"${service.id}" is set as the entrypoint, but reusePort is not available in your OS; setting workers to 1 instead of ${count}`
|
|
@@ -181,7 +195,7 @@ class Runtime extends EventEmitter {
|
|
|
181
195
|
if (!serviceConfig.path) {
|
|
182
196
|
if (serviceConfig.url) {
|
|
183
197
|
// Try to backfill the path for external services
|
|
184
|
-
serviceConfig.path = join(this.#
|
|
198
|
+
serviceConfig.path = join(this.#root, config.resolvedServicesBasePath, serviceConfig.id)
|
|
185
199
|
|
|
186
200
|
if (!existsSync(serviceConfig.path)) {
|
|
187
201
|
const executable = globalThis.platformatic?.executable ?? 'platformatic'
|
|
@@ -244,7 +258,11 @@ class Runtime extends EventEmitter {
|
|
|
244
258
|
}
|
|
245
259
|
|
|
246
260
|
async start (silent = false) {
|
|
247
|
-
if (typeof this.#
|
|
261
|
+
if (typeof this.#status === 'undefined') {
|
|
262
|
+
await this.init()
|
|
263
|
+
}
|
|
264
|
+
|
|
265
|
+
if (typeof this.#config.entrypoint === 'undefined') {
|
|
248
266
|
throw new errors.MissingEntrypointError()
|
|
249
267
|
}
|
|
250
268
|
this.#updateStatus('starting')
|
|
@@ -256,16 +274,16 @@ class Runtime extends EventEmitter {
|
|
|
256
274
|
await this.startService(service, silent)
|
|
257
275
|
}
|
|
258
276
|
|
|
259
|
-
if (this.#
|
|
260
|
-
const { port } = this.#
|
|
277
|
+
if (this.#config.inspectorOptions) {
|
|
278
|
+
const { port } = this.#config.inspectorOptions
|
|
261
279
|
|
|
262
280
|
const server = fastify({
|
|
263
281
|
loggerInstance: this.logger.child({ name: 'inspector' }, { level: 'warn' })
|
|
264
282
|
})
|
|
265
283
|
|
|
266
|
-
const version = await fetch(
|
|
267
|
-
|
|
268
|
-
)
|
|
284
|
+
const version = await fetch(`http://127.0.0.1:${this.#config.inspectorOptions.port + 1}/json/version`).then(
|
|
285
|
+
res => res.json()
|
|
286
|
+
)
|
|
269
287
|
|
|
270
288
|
const data = await Promise.all(
|
|
271
289
|
Array.from(this.#workers.values()).map(async worker => {
|
|
@@ -363,21 +381,17 @@ class Runtime extends EventEmitter {
|
|
|
363
381
|
|
|
364
382
|
await this.stop(silent)
|
|
365
383
|
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
setImmediate(() => {
|
|
369
|
-
this.#managementApi.close()
|
|
370
|
-
})
|
|
371
|
-
}
|
|
384
|
+
// The management API autocloses by itself via event in management-api.js.
|
|
385
|
+
// This is needed to let management API stop endpoint to reply.
|
|
372
386
|
|
|
373
387
|
if (this.#prometheusServer) {
|
|
374
388
|
await this.#prometheusServer.close()
|
|
375
389
|
}
|
|
376
390
|
|
|
377
391
|
if (this.logger) {
|
|
378
|
-
this.#loggerDestination
|
|
392
|
+
this.#loggerDestination?.end()
|
|
379
393
|
|
|
380
|
-
this.logger =
|
|
394
|
+
this.logger = abstractLogger
|
|
381
395
|
this.#loggerDestination = null
|
|
382
396
|
}
|
|
383
397
|
|
|
@@ -406,7 +420,7 @@ class Runtime extends EventEmitter {
|
|
|
406
420
|
throw new errors.ApplicationAlreadyStartedError()
|
|
407
421
|
}
|
|
408
422
|
|
|
409
|
-
const config = this.#
|
|
423
|
+
const config = this.#config
|
|
410
424
|
const serviceConfig = config.services.find(s => s.id === id)
|
|
411
425
|
|
|
412
426
|
if (!serviceConfig) {
|
|
@@ -425,7 +439,7 @@ class Runtime extends EventEmitter {
|
|
|
425
439
|
}
|
|
426
440
|
|
|
427
441
|
async stopService (id, silent = false) {
|
|
428
|
-
const config = this.#
|
|
442
|
+
const config = this.#config
|
|
429
443
|
const serviceConfig = config.services.find(s => s.id === id)
|
|
430
444
|
|
|
431
445
|
if (!serviceConfig) {
|
|
@@ -506,7 +520,7 @@ class Runtime extends EventEmitter {
|
|
|
506
520
|
}
|
|
507
521
|
|
|
508
522
|
async updateUndiciInterceptors (undiciConfig) {
|
|
509
|
-
this.#
|
|
523
|
+
this.#config.undici = undiciConfig
|
|
510
524
|
|
|
511
525
|
const promises = []
|
|
512
526
|
for (const worker of this.#workers.values()) {
|
|
@@ -533,8 +547,7 @@ class Runtime extends EventEmitter {
|
|
|
533
547
|
metrics = await this.getFormattedMetrics()
|
|
534
548
|
} catch (error) {
|
|
535
549
|
if (!(error instanceof errors.RuntimeExitedError)) {
|
|
536
|
-
|
|
537
|
-
console.error('Error collecting metrics', error)
|
|
550
|
+
this.logger.error({ err: ensureLoggableError(error) }, 'Error collecting metrics')
|
|
538
551
|
}
|
|
539
552
|
return
|
|
540
553
|
}
|
|
@@ -547,87 +560,18 @@ class Runtime extends EventEmitter {
|
|
|
547
560
|
}, COLLECT_METRICS_TIMEOUT).unref()
|
|
548
561
|
}
|
|
549
562
|
|
|
550
|
-
async
|
|
551
|
-
|
|
552
|
-
|
|
553
|
-
|
|
554
|
-
const runtimeLogFiles = await this.#getRuntimeLogFiles(runtimePID)
|
|
563
|
+
async addLoggerDestination (writableStream) {
|
|
564
|
+
// Add the stream - We output everything we get
|
|
565
|
+
this.#loggerDestination.add({ stream: writableStream, level: 1 })
|
|
555
566
|
|
|
556
|
-
|
|
557
|
-
|
|
558
|
-
return
|
|
559
|
-
}
|
|
560
|
-
|
|
561
|
-
let latestFileId = parseInt(runtimeLogFiles.at(-1).slice('logs.'.length))
|
|
562
|
-
|
|
563
|
-
let fileStream = null
|
|
564
|
-
let fileId = startLogId ?? latestFileId
|
|
565
|
-
let isClosed = false
|
|
566
|
-
|
|
567
|
-
const runtimeLogsDir = this.#getRuntimeLogsDir(runtimePID)
|
|
568
|
-
|
|
569
|
-
const watcher = watch(runtimeLogsDir, async (event, filename) => {
|
|
570
|
-
if (event === 'rename' && filename.startsWith('logs')) {
|
|
571
|
-
const logFileId = parseInt(filename.slice('logs.'.length))
|
|
572
|
-
if (logFileId > latestFileId) {
|
|
573
|
-
latestFileId = logFileId
|
|
574
|
-
fileStream.unwatch()
|
|
575
|
-
}
|
|
576
|
-
}
|
|
577
|
-
}).unref()
|
|
578
|
-
|
|
579
|
-
const streamLogFile = () => {
|
|
580
|
-
if (fileId > endLogId) {
|
|
581
|
-
writableStream.end()
|
|
582
|
-
return
|
|
583
|
-
}
|
|
584
|
-
|
|
585
|
-
const fileName = 'logs.' + fileId
|
|
586
|
-
const filePath = join(runtimeLogsDir, fileName)
|
|
587
|
-
|
|
588
|
-
const prevFileStream = fileStream
|
|
589
|
-
|
|
590
|
-
fileStream = ts.createReadStream(filePath)
|
|
591
|
-
fileStream.pipe(writableStream, { end: false, persistent: false })
|
|
592
|
-
|
|
593
|
-
if (prevFileStream) {
|
|
594
|
-
prevFileStream.unpipe(writableStream)
|
|
595
|
-
prevFileStream.destroy()
|
|
596
|
-
}
|
|
597
|
-
|
|
598
|
-
fileStream.on('close', () => {
|
|
599
|
-
if (latestFileId > fileId && !isClosed) {
|
|
600
|
-
streamLogFile(++fileId)
|
|
601
|
-
}
|
|
602
|
-
})
|
|
603
|
-
|
|
604
|
-
fileStream.on('error', err => {
|
|
605
|
-
isClosed = true
|
|
606
|
-
logger.error(err, 'Error streaming log file')
|
|
607
|
-
fileStream.destroy()
|
|
608
|
-
watcher.close()
|
|
609
|
-
writableStream.end()
|
|
610
|
-
})
|
|
611
|
-
|
|
612
|
-
fileStream.on('eof', () => {
|
|
613
|
-
if (fileId >= endLogId) {
|
|
614
|
-
writableStream.end()
|
|
615
|
-
return
|
|
616
|
-
}
|
|
617
|
-
if (latestFileId > fileId) {
|
|
618
|
-
fileStream.unwatch()
|
|
619
|
-
}
|
|
620
|
-
})
|
|
621
|
-
|
|
622
|
-
return fileStream
|
|
623
|
-
}
|
|
624
|
-
|
|
625
|
-
streamLogFile(fileId)
|
|
567
|
+
// Immediately get the counter of the lastId so we can use it to later remove it
|
|
568
|
+
const id = this.#loggerDestination.lastId
|
|
626
569
|
|
|
627
570
|
const onClose = () => {
|
|
628
|
-
|
|
629
|
-
|
|
630
|
-
|
|
571
|
+
writableStream.removeListener('close', onClose)
|
|
572
|
+
writableStream.removeListener('error', onClose)
|
|
573
|
+
this.removeListener('closed', onClose)
|
|
574
|
+
this.#loggerDestination.remove(id)
|
|
631
575
|
}
|
|
632
576
|
|
|
633
577
|
writableStream.on('close', onClose)
|
|
@@ -635,6 +579,10 @@ class Runtime extends EventEmitter {
|
|
|
635
579
|
this.on('closed', onClose)
|
|
636
580
|
}
|
|
637
581
|
|
|
582
|
+
async getUrl () {
|
|
583
|
+
return this.#url
|
|
584
|
+
}
|
|
585
|
+
|
|
638
586
|
async getRuntimeMetadata () {
|
|
639
587
|
const packageJson = await this.#getRuntimePackageJson()
|
|
640
588
|
const entrypointDetails = await this.getEntrypointDetails()
|
|
@@ -646,7 +594,7 @@ class Runtime extends EventEmitter {
|
|
|
646
594
|
uptimeSeconds: Math.floor(process.uptime()),
|
|
647
595
|
execPath: process.execPath,
|
|
648
596
|
nodeVersion: process.version,
|
|
649
|
-
projectDir: this.#
|
|
597
|
+
projectDir: this.#root,
|
|
650
598
|
packageName: packageJson.name ?? null,
|
|
651
599
|
packageVersion: packageJson.version ?? null,
|
|
652
600
|
url: entrypointDetails?.url ?? null,
|
|
@@ -655,11 +603,16 @@ class Runtime extends EventEmitter {
|
|
|
655
603
|
}
|
|
656
604
|
|
|
657
605
|
getRuntimeEnv () {
|
|
658
|
-
return this.#
|
|
606
|
+
return this.#env
|
|
659
607
|
}
|
|
660
608
|
|
|
661
|
-
getRuntimeConfig () {
|
|
662
|
-
|
|
609
|
+
getRuntimeConfig (includeMeta = false) {
|
|
610
|
+
if (includeMeta) {
|
|
611
|
+
return this.#config
|
|
612
|
+
}
|
|
613
|
+
|
|
614
|
+
const { [kMetadata]: _, ...config } = this.#config
|
|
615
|
+
return config
|
|
663
616
|
}
|
|
664
617
|
|
|
665
618
|
getInterceptor () {
|
|
@@ -979,44 +932,6 @@ class Runtime extends EventEmitter {
|
|
|
979
932
|
this.servicesConfigsPatches.delete(id)
|
|
980
933
|
}
|
|
981
934
|
|
|
982
|
-
async getLogIds (runtimePID) {
|
|
983
|
-
runtimePID = runtimePID ?? process.pid
|
|
984
|
-
|
|
985
|
-
const runtimeLogFiles = await this.#getRuntimeLogFiles(runtimePID)
|
|
986
|
-
const runtimeLogIds = []
|
|
987
|
-
|
|
988
|
-
for (const logFile of runtimeLogFiles) {
|
|
989
|
-
const logId = parseInt(logFile.slice('logs.'.length))
|
|
990
|
-
runtimeLogIds.push(logId)
|
|
991
|
-
}
|
|
992
|
-
return runtimeLogIds
|
|
993
|
-
}
|
|
994
|
-
|
|
995
|
-
async getAllLogIds () {
|
|
996
|
-
const runtimesLogFiles = await this.#getAllLogsFiles()
|
|
997
|
-
const runtimesLogsIds = []
|
|
998
|
-
|
|
999
|
-
for (const runtime of runtimesLogFiles) {
|
|
1000
|
-
const runtimeLogIds = []
|
|
1001
|
-
for (const logFile of runtime.runtimeLogFiles) {
|
|
1002
|
-
const logId = parseInt(logFile.slice('logs.'.length))
|
|
1003
|
-
runtimeLogIds.push(logId)
|
|
1004
|
-
}
|
|
1005
|
-
runtimesLogsIds.push({
|
|
1006
|
-
pid: runtime.runtimePID,
|
|
1007
|
-
indexes: runtimeLogIds
|
|
1008
|
-
})
|
|
1009
|
-
}
|
|
1010
|
-
|
|
1011
|
-
return runtimesLogsIds
|
|
1012
|
-
}
|
|
1013
|
-
|
|
1014
|
-
async getLogFileStream (logFileId, runtimePID) {
|
|
1015
|
-
const runtimeLogsDir = this.#getRuntimeLogsDir(runtimePID)
|
|
1016
|
-
const filePath = join(runtimeLogsDir, `logs.${logFileId}`)
|
|
1017
|
-
return createReadStream(filePath)
|
|
1018
|
-
}
|
|
1019
|
-
|
|
1020
935
|
#getHttpCacheValue ({ request }) {
|
|
1021
936
|
if (!this.#sharedHttpCache) {
|
|
1022
937
|
return
|
|
@@ -1082,6 +997,7 @@ class Runtime extends EventEmitter {
|
|
|
1082
997
|
}
|
|
1083
998
|
}
|
|
1084
999
|
|
|
1000
|
+
this.logger.trace({ event, payload }, 'Runtime event')
|
|
1085
1001
|
return super.emit(event, payload)
|
|
1086
1002
|
}
|
|
1087
1003
|
|
|
@@ -1095,9 +1011,7 @@ class Runtime extends EventEmitter {
|
|
|
1095
1011
|
|
|
1096
1012
|
const promises = []
|
|
1097
1013
|
for (const worker of this.#workers.values()) {
|
|
1098
|
-
promises.push(
|
|
1099
|
-
sendViaITC(worker, 'setSharedContext', sharedContext)
|
|
1100
|
-
)
|
|
1014
|
+
promises.push(sendViaITC(worker, 'setSharedContext', sharedContext))
|
|
1101
1015
|
}
|
|
1102
1016
|
|
|
1103
1017
|
const results = await Promise.allSettled(promises)
|
|
@@ -1115,13 +1029,13 @@ class Runtime extends EventEmitter {
|
|
|
1115
1029
|
}
|
|
1116
1030
|
|
|
1117
1031
|
async #setDispatcher (undiciConfig) {
|
|
1118
|
-
const config = this.#
|
|
1032
|
+
const config = this.#config
|
|
1119
1033
|
|
|
1120
1034
|
const dispatcherOpts = { ...undiciConfig }
|
|
1121
1035
|
const interceptors = [this.#meshInterceptor]
|
|
1122
1036
|
|
|
1123
1037
|
if (config.httpCache) {
|
|
1124
|
-
this.#sharedHttpCache = await createSharedStore(this.#
|
|
1038
|
+
this.#sharedHttpCache = await createSharedStore(this.#root, config.httpCache)
|
|
1125
1039
|
interceptors.push(
|
|
1126
1040
|
undiciInterceptors.cache({
|
|
1127
1041
|
store: this.#sharedHttpCache,
|
|
@@ -1144,7 +1058,7 @@ class Runtime extends EventEmitter {
|
|
|
1144
1058
|
async #setupService (serviceConfig) {
|
|
1145
1059
|
if (this.#status === 'stopping' || this.#status === 'closed') return
|
|
1146
1060
|
|
|
1147
|
-
const config = this.#
|
|
1061
|
+
const config = this.#config
|
|
1148
1062
|
const workersCount = await this.#workers.getCount(serviceConfig.id)
|
|
1149
1063
|
const id = serviceConfig.id
|
|
1150
1064
|
|
|
@@ -1162,9 +1076,9 @@ class Runtime extends EventEmitter {
|
|
|
1162
1076
|
// Handle inspector
|
|
1163
1077
|
let inspectorOptions
|
|
1164
1078
|
|
|
1165
|
-
if (this.#
|
|
1079
|
+
if (this.#config.inspectorOptions) {
|
|
1166
1080
|
inspectorOptions = {
|
|
1167
|
-
...this.#
|
|
1081
|
+
...this.#config.inspectorOptions
|
|
1168
1082
|
}
|
|
1169
1083
|
|
|
1170
1084
|
inspectorOptions.port = inspectorOptions.port + this.#workers.size + 1
|
|
@@ -1183,11 +1097,12 @@ class Runtime extends EventEmitter {
|
|
|
1183
1097
|
|
|
1184
1098
|
const execArgv = []
|
|
1185
1099
|
|
|
1186
|
-
if (!serviceConfig.
|
|
1100
|
+
if (!serviceConfig.skipTelemetryHooks && config.telemetry && config.telemetry.enabled !== false) {
|
|
1101
|
+
const hookUrl = pathToFileURL(require.resolve('@opentelemetry/instrumentation/hook.mjs'))
|
|
1187
1102
|
// We need the following because otherwise some open telemetry instrumentations won't work with ESM (like express)
|
|
1188
1103
|
// see: https://github.com/open-telemetry/opentelemetry-js/blob/main/doc/esm-support.md#instrumentation-hook-required-for-esm
|
|
1189
|
-
execArgv.push('--
|
|
1190
|
-
execArgv.push('--
|
|
1104
|
+
execArgv.push('--import', `data:text/javascript, import { register } from 'node:module'; register('${hookUrl}')`)
|
|
1105
|
+
execArgv.push('--import', pathToFileURL(openTelemetrySetupPath))
|
|
1191
1106
|
}
|
|
1192
1107
|
|
|
1193
1108
|
if ((serviceConfig.sourceMaps ?? config.sourceMaps) === true) {
|
|
@@ -1202,8 +1117,12 @@ class Runtime extends EventEmitter {
|
|
|
1202
1117
|
workerEnv['NODE_OPTIONS'] = `${originalNodeOptions} ${serviceConfig.nodeOptions}`.trim()
|
|
1203
1118
|
}
|
|
1204
1119
|
|
|
1205
|
-
const maxHeapTotal =
|
|
1206
|
-
|
|
1120
|
+
const maxHeapTotal =
|
|
1121
|
+
typeof health.maxHeapTotal === 'string' ? parseMemorySize(health.maxHeapTotal) : health.maxHeapTotal
|
|
1122
|
+
const maxYoungGeneration =
|
|
1123
|
+
typeof health.maxYoungGeneration === 'string'
|
|
1124
|
+
? parseMemorySize(health.maxYoungGeneration)
|
|
1125
|
+
: health.maxYoungGeneration
|
|
1207
1126
|
|
|
1208
1127
|
const maxOldGenerationSizeMb = Math.floor(
|
|
1209
1128
|
(maxYoungGeneration > 0 ? maxHeapTotal - maxYoungGeneration : maxHeapTotal) / (1024 * 1024)
|
|
@@ -1224,8 +1143,7 @@ class Runtime extends EventEmitter {
|
|
|
1224
1143
|
count: workersCount
|
|
1225
1144
|
},
|
|
1226
1145
|
inspectorOptions,
|
|
1227
|
-
dirname: this.#
|
|
1228
|
-
runtimeLogsDir: this.#runtimeLogsDir
|
|
1146
|
+
dirname: this.#root
|
|
1229
1147
|
},
|
|
1230
1148
|
argv: serviceConfig.arguments,
|
|
1231
1149
|
execArgv,
|
|
@@ -1300,7 +1218,7 @@ class Runtime extends EventEmitter {
|
|
|
1300
1218
|
worker[kInspectorOptions] = {
|
|
1301
1219
|
port: inspectorOptions.port,
|
|
1302
1220
|
id: serviceId,
|
|
1303
|
-
dirname: this.#
|
|
1221
|
+
dirname: this.#root
|
|
1304
1222
|
}
|
|
1305
1223
|
}
|
|
1306
1224
|
|
|
@@ -1317,10 +1235,15 @@ class Runtime extends EventEmitter {
|
|
|
1317
1235
|
})
|
|
1318
1236
|
worker[kITC].listen()
|
|
1319
1237
|
|
|
1238
|
+
// Forward events from the worker
|
|
1239
|
+
worker[kITC].on('event', ({ event, payload }) => {
|
|
1240
|
+
this.emit(`service:worker:event:${event}`, { ...eventPayload, payload })
|
|
1241
|
+
})
|
|
1242
|
+
|
|
1320
1243
|
// Only activate watch for the first instance
|
|
1321
1244
|
if (index === 0) {
|
|
1322
1245
|
// Handle services changes
|
|
1323
|
-
// This is not purposely activated on when this.#
|
|
1246
|
+
// This is not purposely activated on when this.#config.watch === true
|
|
1324
1247
|
// so that services can eventually manually trigger a restart. This mechanism is current
|
|
1325
1248
|
// used by the composer.
|
|
1326
1249
|
worker[kITC].on('changed', async () => {
|
|
@@ -1334,13 +1257,14 @@ class Runtime extends EventEmitter {
|
|
|
1334
1257
|
await this.startService(serviceId)
|
|
1335
1258
|
}
|
|
1336
1259
|
|
|
1337
|
-
this.logger
|
|
1260
|
+
this.logger.info(`The service "${serviceId}" has been successfully reloaded ...`)
|
|
1261
|
+
this.emit('service:worker:reloaded', eventPayload)
|
|
1338
1262
|
|
|
1339
1263
|
if (serviceConfig.entrypoint) {
|
|
1340
1264
|
this.#showUrl()
|
|
1341
1265
|
}
|
|
1342
1266
|
} catch (e) {
|
|
1343
|
-
this.logger
|
|
1267
|
+
this.logger.error(e)
|
|
1344
1268
|
}
|
|
1345
1269
|
})
|
|
1346
1270
|
}
|
|
@@ -1379,9 +1303,7 @@ class Runtime extends EventEmitter {
|
|
|
1379
1303
|
if (features.node.worker.getHeapStatistics) {
|
|
1380
1304
|
const { used_heap_size: heapUsed, total_heap_size: heapTotal } = await worker.getHeapStatistics()
|
|
1381
1305
|
const currentELU = worker.performance.eventLoopUtilization()
|
|
1382
|
-
const elu = worker[kLastELU]
|
|
1383
|
-
? worker.performance.eventLoopUtilization(currentELU, worker[kLastELU])
|
|
1384
|
-
: currentELU
|
|
1306
|
+
const elu = worker[kLastELU] ? worker.performance.eventLoopUtilization(currentELU, worker[kLastELU]) : currentELU
|
|
1385
1307
|
worker[kLastELU] = currentELU
|
|
1386
1308
|
return { elu: elu.utilization, heapUsed, heapTotal }
|
|
1387
1309
|
}
|
|
@@ -1416,11 +1338,10 @@ class Runtime extends EventEmitter {
|
|
|
1416
1338
|
health = { elu: -1, heapUsed: -1, heapTotal: -1 }
|
|
1417
1339
|
}
|
|
1418
1340
|
|
|
1419
|
-
|
|
1420
|
-
this.emit('health', {
|
|
1341
|
+
this.emit('service:worker:health', {
|
|
1421
1342
|
id: worker[kId],
|
|
1422
|
-
service:
|
|
1423
|
-
worker:
|
|
1343
|
+
service: id,
|
|
1344
|
+
worker: index,
|
|
1424
1345
|
currentHealth: health,
|
|
1425
1346
|
unhealthy,
|
|
1426
1347
|
healthConfig: worker[kConfig].health
|
|
@@ -1446,6 +1367,8 @@ class Runtime extends EventEmitter {
|
|
|
1446
1367
|
|
|
1447
1368
|
if (unhealthyChecks === maxUnhealthyChecks) {
|
|
1448
1369
|
try {
|
|
1370
|
+
this.emit('service:worker:unhealthy', { service: id, worker: index })
|
|
1371
|
+
|
|
1449
1372
|
this.logger.error(
|
|
1450
1373
|
{ elu: health.elu, maxELU, memoryUsage: health.heapUsed, maxMemoryUsage: maxHeapUsed },
|
|
1451
1374
|
`The ${errorLabel} is unhealthy. Replacing it ...`
|
|
@@ -1480,7 +1403,7 @@ class Runtime extends EventEmitter {
|
|
|
1480
1403
|
const label = this.#workerExtendedLabel(id, index, workersCount)
|
|
1481
1404
|
|
|
1482
1405
|
if (!silent) {
|
|
1483
|
-
this.logger
|
|
1406
|
+
this.logger.info(`Starting the ${label}...`)
|
|
1484
1407
|
}
|
|
1485
1408
|
|
|
1486
1409
|
if (!worker) {
|
|
@@ -1524,25 +1447,42 @@ class Runtime extends EventEmitter {
|
|
|
1524
1447
|
this.#broadcastWorkers()
|
|
1525
1448
|
|
|
1526
1449
|
if (!silent) {
|
|
1527
|
-
this.logger
|
|
1450
|
+
this.logger.info(`Started the ${label}...`)
|
|
1528
1451
|
}
|
|
1529
1452
|
|
|
1530
1453
|
const { enabled, gracePeriod } = worker[kConfig].health
|
|
1531
1454
|
if (enabled && config.restartOnError > 0) {
|
|
1532
1455
|
// if gracePeriod is 0, it will be set to 1 to start health checks immediately
|
|
1533
1456
|
// however, the health event will start when the worker is started
|
|
1534
|
-
this.#setupHealthCheck(
|
|
1457
|
+
this.#setupHealthCheck(
|
|
1458
|
+
config,
|
|
1459
|
+
serviceConfig,
|
|
1460
|
+
workersCount,
|
|
1461
|
+
id,
|
|
1462
|
+
index,
|
|
1463
|
+
worker,
|
|
1464
|
+
label,
|
|
1465
|
+
gracePeriod > 0 ? gracePeriod : 1
|
|
1466
|
+
)
|
|
1535
1467
|
}
|
|
1536
|
-
} catch (
|
|
1468
|
+
} catch (err) {
|
|
1469
|
+
const error = ensureError(err)
|
|
1470
|
+
|
|
1537
1471
|
// TODO: handle port allocation error here
|
|
1538
|
-
if (error.code === 'EADDRINUSE') throw error
|
|
1472
|
+
if (error.code === 'EADDRINUSE' || error.code === 'EACCES') throw error
|
|
1539
1473
|
|
|
1540
1474
|
this.#cleanupWorker(worker)
|
|
1541
1475
|
|
|
1542
1476
|
if (worker[kWorkerStatus] !== 'exited') {
|
|
1543
1477
|
// This prevent the exit handler to restart service
|
|
1544
1478
|
worker[kWorkerStatus] = 'exited'
|
|
1545
|
-
|
|
1479
|
+
|
|
1480
|
+
// Wait for the worker to exit gracefully, otherwise we terminate it
|
|
1481
|
+
const waitTimeout = await executeWithTimeout(once(worker, 'exit'), config.gracefulShutdown.service)
|
|
1482
|
+
|
|
1483
|
+
if (waitTimeout === kTimeout) {
|
|
1484
|
+
await worker.terminate()
|
|
1485
|
+
}
|
|
1546
1486
|
}
|
|
1547
1487
|
|
|
1548
1488
|
this.emit('service:worker:start:error', { ...eventPayload, error })
|
|
@@ -1593,15 +1533,16 @@ class Runtime extends EventEmitter {
|
|
|
1593
1533
|
const eventPayload = { service: id, worker: index, workersCount }
|
|
1594
1534
|
|
|
1595
1535
|
worker[kWorkerStatus] = 'stopping'
|
|
1536
|
+
worker[kITC].removeAllListeners('changed')
|
|
1596
1537
|
this.emit('service:worker:stopping', eventPayload)
|
|
1597
1538
|
|
|
1598
1539
|
const label = this.#workerExtendedLabel(id, index, workersCount)
|
|
1599
1540
|
|
|
1600
1541
|
if (!silent) {
|
|
1601
|
-
this.logger
|
|
1542
|
+
this.logger.info(`Stopping the ${label}...`)
|
|
1602
1543
|
}
|
|
1603
1544
|
|
|
1604
|
-
const exitTimeout = this.#
|
|
1545
|
+
const exitTimeout = this.#config.gracefulShutdown.runtime
|
|
1605
1546
|
const exitPromise = once(worker, 'exit')
|
|
1606
1547
|
|
|
1607
1548
|
// Always send the stop message, it will shut down workers that only had ITC and interceptors setup
|
|
@@ -1609,13 +1550,13 @@ class Runtime extends EventEmitter {
|
|
|
1609
1550
|
await executeWithTimeout(sendViaITC(worker, 'stop'), exitTimeout)
|
|
1610
1551
|
} catch (error) {
|
|
1611
1552
|
this.emit('service:worker:stop:timeout', eventPayload)
|
|
1612
|
-
this.logger
|
|
1553
|
+
this.logger.info({ error: ensureLoggableError(error) }, `Failed to stop ${label}. Killing a worker thread.`)
|
|
1613
1554
|
} finally {
|
|
1614
1555
|
worker[kITC].close()
|
|
1615
1556
|
}
|
|
1616
1557
|
|
|
1617
1558
|
if (!silent) {
|
|
1618
|
-
this.logger
|
|
1559
|
+
this.logger.info(`Stopped the ${label}...`)
|
|
1619
1560
|
}
|
|
1620
1561
|
|
|
1621
1562
|
// Wait for the worker thread to finish, we're going to create a new one if the service is ever restarted
|
|
@@ -1834,7 +1775,7 @@ class Runtime extends EventEmitter {
|
|
|
1834
1775
|
// Send the first port to the target
|
|
1835
1776
|
const response = await executeWithTimeout(
|
|
1836
1777
|
sendViaITC(target, 'saveMessagingChannel', port1, [port1]),
|
|
1837
|
-
this.#
|
|
1778
|
+
this.#config.messagingTimeout
|
|
1838
1779
|
)
|
|
1839
1780
|
|
|
1840
1781
|
if (response === kTimeout) {
|
|
@@ -1847,56 +1788,13 @@ class Runtime extends EventEmitter {
|
|
|
1847
1788
|
}
|
|
1848
1789
|
|
|
1849
1790
|
async #getRuntimePackageJson () {
|
|
1850
|
-
const runtimeDir = this.#
|
|
1791
|
+
const runtimeDir = this.#root
|
|
1851
1792
|
const packageJsonPath = join(runtimeDir, 'package.json')
|
|
1852
1793
|
const packageJsonFile = await readFile(packageJsonPath, 'utf8')
|
|
1853
1794
|
const packageJson = JSON.parse(packageJsonFile)
|
|
1854
1795
|
return packageJson
|
|
1855
1796
|
}
|
|
1856
1797
|
|
|
1857
|
-
#getRuntimeLogsDir (runtimePID) {
|
|
1858
|
-
return join(this.#runtimeTmpDir, runtimePID.toString(), 'logs')
|
|
1859
|
-
}
|
|
1860
|
-
|
|
1861
|
-
async #getRuntimeLogFiles (runtimePID) {
|
|
1862
|
-
const runtimeLogsDir = this.#getRuntimeLogsDir(runtimePID)
|
|
1863
|
-
const runtimeLogsFiles = await readdir(runtimeLogsDir)
|
|
1864
|
-
return runtimeLogsFiles
|
|
1865
|
-
.filter(file => file.startsWith('logs'))
|
|
1866
|
-
.sort((log1, log2) => {
|
|
1867
|
-
const index1 = parseInt(log1.slice('logs.'.length))
|
|
1868
|
-
const index2 = parseInt(log2.slice('logs.'.length))
|
|
1869
|
-
return index1 - index2
|
|
1870
|
-
})
|
|
1871
|
-
}
|
|
1872
|
-
|
|
1873
|
-
async #getAllLogsFiles () {
|
|
1874
|
-
try {
|
|
1875
|
-
await access(this.#runtimeTmpDir)
|
|
1876
|
-
} catch (err) {
|
|
1877
|
-
this.logger.error({ err: ensureLoggableError(err) }, 'Cannot access temporary folder.')
|
|
1878
|
-
return []
|
|
1879
|
-
}
|
|
1880
|
-
|
|
1881
|
-
const runtimePIDs = await readdir(this.#runtimeTmpDir)
|
|
1882
|
-
const runtimesLogFiles = []
|
|
1883
|
-
|
|
1884
|
-
for (const runtimePID of runtimePIDs) {
|
|
1885
|
-
const runtimeLogsDir = this.#getRuntimeLogsDir(runtimePID)
|
|
1886
|
-
const runtimeLogsDirStat = await stat(runtimeLogsDir)
|
|
1887
|
-
const runtimeLogFiles = await this.#getRuntimeLogFiles(runtimePID)
|
|
1888
|
-
const lastModified = runtimeLogsDirStat.mtime
|
|
1889
|
-
|
|
1890
|
-
runtimesLogFiles.push({
|
|
1891
|
-
runtimePID: parseInt(runtimePID),
|
|
1892
|
-
runtimeLogFiles,
|
|
1893
|
-
lastModified
|
|
1894
|
-
})
|
|
1895
|
-
}
|
|
1896
|
-
|
|
1897
|
-
return runtimesLogFiles.sort((runtime1, runtime2) => runtime1.lastModified - runtime2.lastModified)
|
|
1898
|
-
}
|
|
1899
|
-
|
|
1900
1798
|
#handleWorkerStandardStreams (worker, serviceId, workerId) {
|
|
1901
1799
|
const binding = { name: serviceId }
|
|
1902
1800
|
|
|
@@ -1931,7 +1829,7 @@ class Runtime extends EventEmitter {
|
|
|
1931
1829
|
// label is the key in the logger object, either 'stdout' or 'stderr'
|
|
1932
1830
|
#forwardThreadLog (logger, { level, caller }, data, label) {
|
|
1933
1831
|
// When captureStdio is false, write directly to the logger destination
|
|
1934
|
-
if (!this.#
|
|
1832
|
+
if (!this.#config.logger.captureStdio) {
|
|
1935
1833
|
this.#stdio[label].write(data)
|
|
1936
1834
|
return
|
|
1937
1835
|
}
|
|
@@ -2007,7 +1905,7 @@ class Runtime extends EventEmitter {
|
|
|
2007
1905
|
async #updateServiceConfigWorkers (serviceId, workers) {
|
|
2008
1906
|
this.logger.info(`Updating service "${serviceId}" config workers to ${workers}`)
|
|
2009
1907
|
|
|
2010
|
-
this.#
|
|
1908
|
+
this.#config.services.find(s => s.id === serviceId).workers = workers
|
|
2011
1909
|
const service = await this.#getServiceById(serviceId)
|
|
2012
1910
|
this.#workers.setCount(serviceId, workers)
|
|
2013
1911
|
service[kConfig].workers = workers
|
|
@@ -2032,7 +1930,7 @@ class Runtime extends EventEmitter {
|
|
|
2032
1930
|
this.logger.info(`Updating service "${serviceId}" config health heap to ${JSON.stringify(health)}`)
|
|
2033
1931
|
const { maxHeapTotal, maxYoungGeneration } = health
|
|
2034
1932
|
|
|
2035
|
-
const service = this.#
|
|
1933
|
+
const service = this.#config.services.find(s => s.id === serviceId)
|
|
2036
1934
|
if (maxHeapTotal) {
|
|
2037
1935
|
service.health.maxHeapTotal = maxHeapTotal
|
|
2038
1936
|
}
|
|
@@ -2093,21 +1991,36 @@ class Runtime extends EventEmitter {
|
|
|
2093
1991
|
}
|
|
2094
1992
|
|
|
2095
1993
|
const ups = await this.#validateUpdateServiceResources(updates)
|
|
2096
|
-
const config = this.#
|
|
1994
|
+
const config = this.#config
|
|
2097
1995
|
|
|
2098
1996
|
const report = []
|
|
2099
1997
|
for (const update of ups) {
|
|
2100
1998
|
const { serviceId, config: serviceConfig, workers, health, currentWorkers, currentHealth } = update
|
|
2101
1999
|
|
|
2102
2000
|
if (workers && health) {
|
|
2103
|
-
const r = await this.#updateServiceWorkersAndHealth(
|
|
2001
|
+
const r = await this.#updateServiceWorkersAndHealth(
|
|
2002
|
+
serviceId,
|
|
2003
|
+
config,
|
|
2004
|
+
serviceConfig,
|
|
2005
|
+
workers,
|
|
2006
|
+
health,
|
|
2007
|
+
currentWorkers,
|
|
2008
|
+
currentHealth
|
|
2009
|
+
)
|
|
2104
2010
|
report.push({
|
|
2105
2011
|
service: serviceId,
|
|
2106
2012
|
workers: r.workers,
|
|
2107
2013
|
health: r.health
|
|
2108
2014
|
})
|
|
2109
2015
|
} else if (health) {
|
|
2110
|
-
const r = await this.#updateServiceHealth(
|
|
2016
|
+
const r = await this.#updateServiceHealth(
|
|
2017
|
+
serviceId,
|
|
2018
|
+
config,
|
|
2019
|
+
serviceConfig,
|
|
2020
|
+
currentWorkers,
|
|
2021
|
+
currentHealth,
|
|
2022
|
+
health
|
|
2023
|
+
)
|
|
2111
2024
|
report.push({
|
|
2112
2025
|
service: serviceId,
|
|
2113
2026
|
health: r.health
|
|
@@ -2132,7 +2045,7 @@ class Runtime extends EventEmitter {
|
|
|
2132
2045
|
throw new errors.InvalidArgumentError('updates', 'must have at least one element')
|
|
2133
2046
|
}
|
|
2134
2047
|
|
|
2135
|
-
const config = this.#
|
|
2048
|
+
const config = this.#config
|
|
2136
2049
|
const validatedUpdates = []
|
|
2137
2050
|
for (const update of updates) {
|
|
2138
2051
|
const { service: serviceId } = update
|
|
@@ -2181,7 +2094,10 @@ class Runtime extends EventEmitter {
|
|
|
2181
2094
|
throw new errors.InvalidArgumentError('maxHeapTotal', 'must be greater than 0')
|
|
2182
2095
|
}
|
|
2183
2096
|
} else {
|
|
2184
|
-
throw new errors.InvalidArgumentError(
|
|
2097
|
+
throw new errors.InvalidArgumentError(
|
|
2098
|
+
'maxHeapTotal',
|
|
2099
|
+
'must be a number or a string representing a memory size'
|
|
2100
|
+
)
|
|
2185
2101
|
}
|
|
2186
2102
|
|
|
2187
2103
|
if (currentHealth.maxHeapTotal === maxHeapTotal) {
|
|
@@ -2203,7 +2119,10 @@ class Runtime extends EventEmitter {
|
|
|
2203
2119
|
throw new errors.InvalidArgumentError('maxYoungGeneration', 'must be greater than 0')
|
|
2204
2120
|
}
|
|
2205
2121
|
} else {
|
|
2206
|
-
throw new errors.InvalidArgumentError(
|
|
2122
|
+
throw new errors.InvalidArgumentError(
|
|
2123
|
+
'maxYoungGeneration',
|
|
2124
|
+
'must be a number or a string representing a memory size'
|
|
2125
|
+
)
|
|
2207
2126
|
}
|
|
2208
2127
|
|
|
2209
2128
|
if (currentHealth.maxYoungGeneration && currentHealth.maxYoungGeneration === maxYoungGeneration) {
|
|
@@ -2216,7 +2135,7 @@ class Runtime extends EventEmitter {
|
|
|
2216
2135
|
if (workers || maxHeapTotal || maxYoungGeneration) {
|
|
2217
2136
|
let health
|
|
2218
2137
|
if (maxHeapTotal || maxYoungGeneration) {
|
|
2219
|
-
health = {
|
|
2138
|
+
health = {}
|
|
2220
2139
|
if (maxHeapTotal) {
|
|
2221
2140
|
health.maxHeapTotal = maxHeapTotal
|
|
2222
2141
|
}
|
|
@@ -2231,12 +2150,27 @@ class Runtime extends EventEmitter {
|
|
|
2231
2150
|
return validatedUpdates
|
|
2232
2151
|
}
|
|
2233
2152
|
|
|
2234
|
-
async #updateServiceWorkersAndHealth (
|
|
2153
|
+
async #updateServiceWorkersAndHealth (
|
|
2154
|
+
serviceId,
|
|
2155
|
+
config,
|
|
2156
|
+
serviceConfig,
|
|
2157
|
+
workers,
|
|
2158
|
+
health,
|
|
2159
|
+
currentWorkers,
|
|
2160
|
+
currentHealth
|
|
2161
|
+
) {
|
|
2235
2162
|
if (currentWorkers > workers) {
|
|
2236
2163
|
// stop workers
|
|
2237
2164
|
const reportWorkers = await this.#updateServiceWorkers(serviceId, config, serviceConfig, workers, currentWorkers)
|
|
2238
2165
|
// update heap for current workers
|
|
2239
|
-
const reportHealth = await this.#updateServiceHealth(
|
|
2166
|
+
const reportHealth = await this.#updateServiceHealth(
|
|
2167
|
+
serviceId,
|
|
2168
|
+
config,
|
|
2169
|
+
serviceConfig,
|
|
2170
|
+
workers,
|
|
2171
|
+
currentHealth,
|
|
2172
|
+
health
|
|
2173
|
+
)
|
|
2240
2174
|
|
|
2241
2175
|
return { workers: reportWorkers, health: reportHealth }
|
|
2242
2176
|
} else {
|
|
@@ -2245,13 +2179,29 @@ class Runtime extends EventEmitter {
|
|
|
2245
2179
|
// start new workers with new heap
|
|
2246
2180
|
const reportWorkers = await this.#updateServiceWorkers(serviceId, config, serviceConfig, workers, currentWorkers)
|
|
2247
2181
|
// update heap for current workers
|
|
2248
|
-
const reportHealth = await this.#updateServiceHealth(
|
|
2182
|
+
const reportHealth = await this.#updateServiceHealth(
|
|
2183
|
+
serviceId,
|
|
2184
|
+
config,
|
|
2185
|
+
serviceConfig,
|
|
2186
|
+
currentWorkers,
|
|
2187
|
+
currentHealth,
|
|
2188
|
+
health,
|
|
2189
|
+
false
|
|
2190
|
+
)
|
|
2249
2191
|
|
|
2250
2192
|
return { workers: reportWorkers, health: reportHealth }
|
|
2251
2193
|
}
|
|
2252
2194
|
}
|
|
2253
2195
|
|
|
2254
|
-
async #updateServiceHealth (
|
|
2196
|
+
async #updateServiceHealth (
|
|
2197
|
+
serviceId,
|
|
2198
|
+
config,
|
|
2199
|
+
serviceConfig,
|
|
2200
|
+
currentWorkers,
|
|
2201
|
+
currentHealth,
|
|
2202
|
+
health,
|
|
2203
|
+
updateConfig = true
|
|
2204
|
+
) {
|
|
2255
2205
|
const report = {
|
|
2256
2206
|
current: currentHealth,
|
|
2257
2207
|
new: health,
|
|
@@ -2263,15 +2213,25 @@ class Runtime extends EventEmitter {
|
|
|
2263
2213
|
}
|
|
2264
2214
|
|
|
2265
2215
|
for (let i = 0; i < currentWorkers; i++) {
|
|
2266
|
-
this.logger.info(
|
|
2216
|
+
this.logger.info(
|
|
2217
|
+
{ health: { current: currentHealth, new: health } },
|
|
2218
|
+
`Restarting service "${serviceId}" worker ${i} to update config health heap...`
|
|
2219
|
+
)
|
|
2267
2220
|
|
|
2268
2221
|
const worker = await this.#getWorkerById(serviceId, i)
|
|
2269
|
-
if (health.maxHeapTotal) {
|
|
2270
|
-
|
|
2222
|
+
if (health.maxHeapTotal) {
|
|
2223
|
+
worker[kConfig].health.maxHeapTotal = health.maxHeapTotal
|
|
2224
|
+
}
|
|
2225
|
+
if (health.maxYoungGeneration) {
|
|
2226
|
+
worker[kConfig].health.maxYoungGeneration = health.maxYoungGeneration
|
|
2227
|
+
}
|
|
2271
2228
|
|
|
2272
2229
|
await this.#replaceWorker(config, serviceConfig, currentWorkers, serviceId, i, worker)
|
|
2273
2230
|
report.updated.push(i)
|
|
2274
|
-
this.logger.info(
|
|
2231
|
+
this.logger.info(
|
|
2232
|
+
{ health: { current: currentHealth, new: health } },
|
|
2233
|
+
`Restarted service "${serviceId}" worker ${i}`
|
|
2234
|
+
)
|
|
2275
2235
|
}
|
|
2276
2236
|
report.success = true
|
|
2277
2237
|
} catch (err) {
|
|
@@ -2279,7 +2239,10 @@ class Runtime extends EventEmitter {
|
|
|
2279
2239
|
this.logger.error({ err }, 'Cannot update service health heap, no worker updated')
|
|
2280
2240
|
await this.#updateServiceConfigHealth(serviceId, currentHealth)
|
|
2281
2241
|
} else {
|
|
2282
|
-
this.logger.error(
|
|
2242
|
+
this.logger.error(
|
|
2243
|
+
{ err },
|
|
2244
|
+
`Cannot update service health heap, updated workers: ${report.updated.length} out of ${currentWorkers}`
|
|
2245
|
+
)
|
|
2283
2246
|
}
|
|
2284
2247
|
report.success = false
|
|
2285
2248
|
}
|
|
@@ -2306,7 +2269,10 @@ class Runtime extends EventEmitter {
|
|
|
2306
2269
|
this.logger.error({ err }, 'Cannot start service workers, no worker started')
|
|
2307
2270
|
await this.#updateServiceConfigWorkers(serviceId, currentWorkers)
|
|
2308
2271
|
} else {
|
|
2309
|
-
this.logger.error(
|
|
2272
|
+
this.logger.error(
|
|
2273
|
+
{ err },
|
|
2274
|
+
`Cannot start service workers, started workers: ${report.started.length} out of ${workers}`
|
|
2275
|
+
)
|
|
2310
2276
|
await this.#updateServiceConfigWorkers(serviceId, currentWorkers + report.started.length)
|
|
2311
2277
|
}
|
|
2312
2278
|
report.success = false
|
|
@@ -2327,7 +2293,10 @@ class Runtime extends EventEmitter {
|
|
|
2327
2293
|
if (report.stopped.length < 1) {
|
|
2328
2294
|
this.logger.error({ err }, 'Cannot stop service workers, no worker stopped')
|
|
2329
2295
|
} else {
|
|
2330
|
-
this.logger.error(
|
|
2296
|
+
this.logger.error(
|
|
2297
|
+
{ err },
|
|
2298
|
+
`Cannot stop service workers, stopped workers: ${report.stopped.length} out of ${workers}`
|
|
2299
|
+
)
|
|
2331
2300
|
await this.#updateServiceConfigWorkers(serviceId, currentWorkers - report.stopped)
|
|
2332
2301
|
}
|
|
2333
2302
|
report.success = false
|