@platformatic/runtime 2.72.0 → 3.0.0-alpha.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/config.d.ts +1 -1
- package/index.d.ts +51 -23
- package/index.js +99 -17
- package/lib/config.js +159 -218
- package/lib/errors.js +150 -108
- package/lib/{generator/runtime-generator.js → generator.js} +37 -33
- package/lib/logger.js +2 -2
- package/lib/management-api.js +2 -2
- package/lib/prom-server.js +3 -7
- package/lib/runtime.js +209 -125
- package/lib/schema.js +1 -0
- package/lib/upgrade.js +6 -4
- package/lib/utils.js +1 -42
- package/lib/worker/app.js +39 -71
- package/lib/worker/itc.js +1 -5
- package/lib/worker/main.js +1 -11
- package/lib/worker/messaging.js +2 -2
- package/package.json +16 -23
- package/schema.json +2 -1
- package/help/compile.txt +0 -8
- package/help/help.txt +0 -5
- package/help/start.txt +0 -21
- package/index.test-d.ts +0 -41
- package/lib/build-server.js +0 -67
- package/lib/compile.js +0 -108
- package/lib/generator/README.md +0 -32
- package/lib/generator/errors.js +0 -10
- package/lib/generator/runtime-generator.d.ts +0 -37
- package/lib/start.js +0 -211
- package/lib/worker/default-stackable.js +0 -33
- package/lib/worker/shared-context.js +0 -26
- package/runtime.mjs +0 -54
package/lib/runtime.js
CHANGED
|
@@ -1,27 +1,35 @@
|
|
|
1
1
|
'use strict'
|
|
2
2
|
|
|
3
3
|
const { ITC } = require('@platformatic/itc')
|
|
4
|
-
const {
|
|
4
|
+
const {
|
|
5
|
+
features,
|
|
6
|
+
ensureLoggableError,
|
|
7
|
+
executeWithTimeout,
|
|
8
|
+
deepmerge,
|
|
9
|
+
parseMemorySize,
|
|
10
|
+
kTimeout,
|
|
11
|
+
kMetadata
|
|
12
|
+
} = require('@platformatic/utils')
|
|
5
13
|
const { once, EventEmitter } = require('node:events')
|
|
6
14
|
const { createReadStream, watch, existsSync } = require('node:fs')
|
|
7
15
|
const { readdir, readFile, stat, access } = require('node:fs/promises')
|
|
8
16
|
const { STATUS_CODES } = require('node:http')
|
|
9
17
|
const { join } = require('node:path')
|
|
18
|
+
const { pathToFileURL } = require('node:url')
|
|
10
19
|
const { setTimeout: sleep, setImmediate: immediate } = require('node:timers/promises')
|
|
11
20
|
const { Worker } = require('node:worker_threads')
|
|
12
21
|
const ts = require('tail-file-stream')
|
|
13
22
|
const { Agent, interceptors: undiciInterceptors, request } = require('undici')
|
|
14
23
|
const { createThreadInterceptor } = require('undici-thread-interceptor')
|
|
15
24
|
const SonicBoom = require('sonic-boom')
|
|
16
|
-
|
|
17
25
|
const { checkDependencies, topologicalSort } = require('./dependencies')
|
|
18
26
|
const errors = require('./errors')
|
|
19
|
-
const { createLogger } = require('./logger')
|
|
27
|
+
const { abstractLogger, createLogger } = require('./logger')
|
|
20
28
|
const { startManagementApi } = require('./management-api')
|
|
21
29
|
const { startPrometheusServer } = require('./prom-server')
|
|
22
30
|
const { startScheduler } = require('./scheduler')
|
|
23
31
|
const { createSharedStore } = require('./shared-http-cache')
|
|
24
|
-
const { getRuntimeTmpDir } = require('./utils')
|
|
32
|
+
const { getRuntimeTmpDir, getRuntimeLogsDir } = require('./utils')
|
|
25
33
|
const { sendViaITC, waitEventFromITC } = require('./worker/itc')
|
|
26
34
|
const { RoundRobinMap } = require('./worker/round-robin-map.js')
|
|
27
35
|
const {
|
|
@@ -37,7 +45,6 @@ const {
|
|
|
37
45
|
kLastELU,
|
|
38
46
|
kWorkersBroadcast
|
|
39
47
|
} = require('./worker/symbols')
|
|
40
|
-
|
|
41
48
|
const fastify = require('fastify')
|
|
42
49
|
|
|
43
50
|
const platformaticVersion = require('../package.json').version
|
|
@@ -58,11 +65,13 @@ const telemetryPath = require.resolve('@platformatic/telemetry')
|
|
|
58
65
|
const openTelemetrySetupPath = join(telemetryPath, '..', 'lib', 'node-telemetry.js')
|
|
59
66
|
|
|
60
67
|
class Runtime extends EventEmitter {
|
|
61
|
-
#
|
|
68
|
+
#root
|
|
69
|
+
#config
|
|
70
|
+
#env
|
|
71
|
+
#context
|
|
62
72
|
#isProduction
|
|
63
73
|
#runtimeTmpDir
|
|
64
74
|
#runtimeLogsDir
|
|
65
|
-
#env
|
|
66
75
|
#servicesIds
|
|
67
76
|
#entrypointId
|
|
68
77
|
#url
|
|
@@ -83,29 +92,32 @@ class Runtime extends EventEmitter {
|
|
|
83
92
|
servicesConfigsPatches
|
|
84
93
|
#scheduler
|
|
85
94
|
#stdio
|
|
86
|
-
#sharedContext
|
|
87
95
|
|
|
88
|
-
constructor (
|
|
96
|
+
constructor (config, context) {
|
|
89
97
|
super()
|
|
90
98
|
this.setMaxListeners(MAX_LISTENERS_COUNT)
|
|
91
99
|
|
|
92
|
-
this.#
|
|
93
|
-
this.#
|
|
94
|
-
this.#
|
|
95
|
-
this.#
|
|
100
|
+
this.#config = config
|
|
101
|
+
this.#root = config[kMetadata].root
|
|
102
|
+
this.#env = config[kMetadata].env
|
|
103
|
+
this.#context = context ?? {}
|
|
104
|
+
this.#isProduction = this.#context.isProduction ?? this.#context.production ?? false
|
|
105
|
+
this.#runtimeTmpDir = getRuntimeTmpDir(this.#root)
|
|
106
|
+
this.#runtimeLogsDir = getRuntimeLogsDir(this.#root, process.pid)
|
|
96
107
|
this.#workers = new RoundRobinMap()
|
|
97
108
|
this.#servicesIds = []
|
|
98
109
|
this.#url = undefined
|
|
99
110
|
this.#meshInterceptor = createThreadInterceptor({
|
|
100
111
|
domain: '.plt.local',
|
|
101
|
-
timeout: this.#
|
|
112
|
+
timeout: this.#config.serviceTimeout
|
|
102
113
|
})
|
|
114
|
+
this.logger = abstractLogger // This is replaced by the real logger in init() and eventually removed in close()
|
|
103
115
|
this.#status = undefined
|
|
104
116
|
this.#restartingWorkers = new Map()
|
|
105
117
|
this.#sharedHttpCache = null
|
|
106
118
|
this.servicesConfigsPatches = new Map()
|
|
107
119
|
|
|
108
|
-
if (!this.#
|
|
120
|
+
if (!this.#config.logger.captureStdio) {
|
|
109
121
|
this.#stdio = {
|
|
110
122
|
stdout: new SonicBoom({ fd: process.stdout.fd }),
|
|
111
123
|
stderr: new SonicBoom({ fd: process.stderr.fd })
|
|
@@ -121,22 +133,23 @@ class Runtime extends EventEmitter {
|
|
|
121
133
|
getHttpCacheValue: this.#getHttpCacheValue.bind(this),
|
|
122
134
|
setHttpCacheValue: this.#setHttpCacheValue.bind(this),
|
|
123
135
|
deleteHttpCacheValue: this.#deleteHttpCacheValue.bind(this),
|
|
124
|
-
invalidateHttpCache: this.invalidateHttpCache.bind(this)
|
|
125
|
-
updateSharedContext: this.updateSharedContext.bind(this),
|
|
126
|
-
getSharedContext: this.getSharedContext.bind(this)
|
|
136
|
+
invalidateHttpCache: this.invalidateHttpCache.bind(this)
|
|
127
137
|
}
|
|
128
|
-
this.#sharedContext = {}
|
|
129
138
|
}
|
|
130
139
|
|
|
131
140
|
async init () {
|
|
132
|
-
|
|
141
|
+
if (typeof this.#status !== 'undefined') {
|
|
142
|
+
return
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
const config = this.#config
|
|
133
146
|
const autoloadEnabled = config.autoload
|
|
134
147
|
|
|
135
148
|
// This cannot be transferred to worker threads
|
|
136
149
|
delete config.configManager
|
|
137
150
|
|
|
138
151
|
if (config.managementApi) {
|
|
139
|
-
this.#managementApi = await startManagementApi(this, this.#
|
|
152
|
+
this.#managementApi = await startManagementApi(this, this.#root)
|
|
140
153
|
}
|
|
141
154
|
|
|
142
155
|
if (config.metrics) {
|
|
@@ -148,13 +161,12 @@ class Runtime extends EventEmitter {
|
|
|
148
161
|
this.logger = logger
|
|
149
162
|
this.#loggerDestination = destination
|
|
150
163
|
|
|
151
|
-
this.#isProduction = this.#configManager.args?.production ?? false
|
|
152
164
|
this.#servicesIds = config.services.map(service => service.id)
|
|
153
165
|
this.#createWorkersBroadcastChannel()
|
|
154
166
|
|
|
155
167
|
const workersConfig = []
|
|
156
168
|
for (const service of config.services) {
|
|
157
|
-
const count = service.workers ?? this.#
|
|
169
|
+
const count = service.workers ?? this.#config.workers
|
|
158
170
|
if (count > 1 && service.entrypoint && !features.node.reusePort) {
|
|
159
171
|
this.logger.warn(
|
|
160
172
|
`"${service.id}" is set as the entrypoint, but reusePort is not available in your OS; setting workers to 1 instead of ${count}`
|
|
@@ -181,7 +193,7 @@ class Runtime extends EventEmitter {
|
|
|
181
193
|
if (!serviceConfig.path) {
|
|
182
194
|
if (serviceConfig.url) {
|
|
183
195
|
// Try to backfill the path for external services
|
|
184
|
-
serviceConfig.path = join(this.#
|
|
196
|
+
serviceConfig.path = join(this.#root, config.resolvedServicesBasePath, serviceConfig.id)
|
|
185
197
|
|
|
186
198
|
if (!existsSync(serviceConfig.path)) {
|
|
187
199
|
const executable = globalThis.platformatic?.executable ?? 'platformatic'
|
|
@@ -244,7 +256,11 @@ class Runtime extends EventEmitter {
|
|
|
244
256
|
}
|
|
245
257
|
|
|
246
258
|
async start (silent = false) {
|
|
247
|
-
if (typeof this.#
|
|
259
|
+
if (typeof this.#status === 'undefined') {
|
|
260
|
+
await this.init()
|
|
261
|
+
}
|
|
262
|
+
|
|
263
|
+
if (typeof this.#config.entrypoint === 'undefined') {
|
|
248
264
|
throw new errors.MissingEntrypointError()
|
|
249
265
|
}
|
|
250
266
|
this.#updateStatus('starting')
|
|
@@ -256,16 +272,16 @@ class Runtime extends EventEmitter {
|
|
|
256
272
|
await this.startService(service, silent)
|
|
257
273
|
}
|
|
258
274
|
|
|
259
|
-
if (this.#
|
|
260
|
-
const { port } = this.#
|
|
275
|
+
if (this.#config.inspectorOptions) {
|
|
276
|
+
const { port } = this.#config.inspectorOptions
|
|
261
277
|
|
|
262
278
|
const server = fastify({
|
|
263
279
|
loggerInstance: this.logger.child({ name: 'inspector' }, { level: 'warn' })
|
|
264
280
|
})
|
|
265
281
|
|
|
266
|
-
const version = await fetch(
|
|
267
|
-
|
|
268
|
-
)
|
|
282
|
+
const version = await fetch(`http://127.0.0.1:${this.#config.inspectorOptions.port + 1}/json/version`).then(
|
|
283
|
+
res => res.json()
|
|
284
|
+
)
|
|
269
285
|
|
|
270
286
|
const data = await Promise.all(
|
|
271
287
|
Array.from(this.#workers.values()).map(async worker => {
|
|
@@ -375,9 +391,9 @@ class Runtime extends EventEmitter {
|
|
|
375
391
|
}
|
|
376
392
|
|
|
377
393
|
if (this.logger) {
|
|
378
|
-
this.#loggerDestination
|
|
394
|
+
this.#loggerDestination?.end()
|
|
379
395
|
|
|
380
|
-
this.logger =
|
|
396
|
+
this.logger = abstractLogger
|
|
381
397
|
this.#loggerDestination = null
|
|
382
398
|
}
|
|
383
399
|
|
|
@@ -406,7 +422,7 @@ class Runtime extends EventEmitter {
|
|
|
406
422
|
throw new errors.ApplicationAlreadyStartedError()
|
|
407
423
|
}
|
|
408
424
|
|
|
409
|
-
const config = this.#
|
|
425
|
+
const config = this.#config
|
|
410
426
|
const serviceConfig = config.services.find(s => s.id === id)
|
|
411
427
|
|
|
412
428
|
if (!serviceConfig) {
|
|
@@ -425,7 +441,7 @@ class Runtime extends EventEmitter {
|
|
|
425
441
|
}
|
|
426
442
|
|
|
427
443
|
async stopService (id, silent = false) {
|
|
428
|
-
const config = this.#
|
|
444
|
+
const config = this.#config
|
|
429
445
|
const serviceConfig = config.services.find(s => s.id === id)
|
|
430
446
|
|
|
431
447
|
if (!serviceConfig) {
|
|
@@ -506,7 +522,7 @@ class Runtime extends EventEmitter {
|
|
|
506
522
|
}
|
|
507
523
|
|
|
508
524
|
async updateUndiciInterceptors (undiciConfig) {
|
|
509
|
-
this.#
|
|
525
|
+
this.#config.undici = undiciConfig
|
|
510
526
|
|
|
511
527
|
const promises = []
|
|
512
528
|
for (const worker of this.#workers.values()) {
|
|
@@ -635,6 +651,10 @@ class Runtime extends EventEmitter {
|
|
|
635
651
|
this.on('closed', onClose)
|
|
636
652
|
}
|
|
637
653
|
|
|
654
|
+
async getUrl () {
|
|
655
|
+
return this.#url
|
|
656
|
+
}
|
|
657
|
+
|
|
638
658
|
async getRuntimeMetadata () {
|
|
639
659
|
const packageJson = await this.#getRuntimePackageJson()
|
|
640
660
|
const entrypointDetails = await this.getEntrypointDetails()
|
|
@@ -646,7 +666,7 @@ class Runtime extends EventEmitter {
|
|
|
646
666
|
uptimeSeconds: Math.floor(process.uptime()),
|
|
647
667
|
execPath: process.execPath,
|
|
648
668
|
nodeVersion: process.version,
|
|
649
|
-
projectDir: this.#
|
|
669
|
+
projectDir: this.#root,
|
|
650
670
|
packageName: packageJson.name ?? null,
|
|
651
671
|
packageVersion: packageJson.version ?? null,
|
|
652
672
|
url: entrypointDetails?.url ?? null,
|
|
@@ -655,11 +675,16 @@ class Runtime extends EventEmitter {
|
|
|
655
675
|
}
|
|
656
676
|
|
|
657
677
|
getRuntimeEnv () {
|
|
658
|
-
return this.#
|
|
678
|
+
return this.#env
|
|
659
679
|
}
|
|
660
680
|
|
|
661
|
-
getRuntimeConfig () {
|
|
662
|
-
|
|
681
|
+
getRuntimeConfig (includeMeta = false) {
|
|
682
|
+
if (includeMeta) {
|
|
683
|
+
return this.#config
|
|
684
|
+
}
|
|
685
|
+
|
|
686
|
+
const { [kMetadata]: _, ...config } = this.#config
|
|
687
|
+
return config
|
|
663
688
|
}
|
|
664
689
|
|
|
665
690
|
getInterceptor () {
|
|
@@ -1082,46 +1107,18 @@ class Runtime extends EventEmitter {
|
|
|
1082
1107
|
}
|
|
1083
1108
|
}
|
|
1084
1109
|
|
|
1110
|
+
this.logger.trace({ event, payload }, 'Runtime event')
|
|
1085
1111
|
return super.emit(event, payload)
|
|
1086
1112
|
}
|
|
1087
1113
|
|
|
1088
|
-
async updateSharedContext (options = {}) {
|
|
1089
|
-
const { context, overwrite = false } = options
|
|
1090
|
-
|
|
1091
|
-
const sharedContext = overwrite ? {} : this.#sharedContext
|
|
1092
|
-
Object.assign(sharedContext, context)
|
|
1093
|
-
|
|
1094
|
-
this.#sharedContext = sharedContext
|
|
1095
|
-
|
|
1096
|
-
const promises = []
|
|
1097
|
-
for (const worker of this.#workers.values()) {
|
|
1098
|
-
promises.push(
|
|
1099
|
-
sendViaITC(worker, 'setSharedContext', sharedContext)
|
|
1100
|
-
)
|
|
1101
|
-
}
|
|
1102
|
-
|
|
1103
|
-
const results = await Promise.allSettled(promises)
|
|
1104
|
-
for (const result of results) {
|
|
1105
|
-
if (result.status === 'rejected') {
|
|
1106
|
-
this.logger.error({ err: result.reason }, 'Cannot update shared context')
|
|
1107
|
-
}
|
|
1108
|
-
}
|
|
1109
|
-
|
|
1110
|
-
return sharedContext
|
|
1111
|
-
}
|
|
1112
|
-
|
|
1113
|
-
getSharedContext () {
|
|
1114
|
-
return this.#sharedContext
|
|
1115
|
-
}
|
|
1116
|
-
|
|
1117
1114
|
async #setDispatcher (undiciConfig) {
|
|
1118
|
-
const config = this.#
|
|
1115
|
+
const config = this.#config
|
|
1119
1116
|
|
|
1120
1117
|
const dispatcherOpts = { ...undiciConfig }
|
|
1121
1118
|
const interceptors = [this.#meshInterceptor]
|
|
1122
1119
|
|
|
1123
1120
|
if (config.httpCache) {
|
|
1124
|
-
this.#sharedHttpCache = await createSharedStore(this.#
|
|
1121
|
+
this.#sharedHttpCache = await createSharedStore(this.#root, config.httpCache)
|
|
1125
1122
|
interceptors.push(
|
|
1126
1123
|
undiciInterceptors.cache({
|
|
1127
1124
|
store: this.#sharedHttpCache,
|
|
@@ -1144,7 +1141,7 @@ class Runtime extends EventEmitter {
|
|
|
1144
1141
|
async #setupService (serviceConfig) {
|
|
1145
1142
|
if (this.#status === 'stopping' || this.#status === 'closed') return
|
|
1146
1143
|
|
|
1147
|
-
const config = this.#
|
|
1144
|
+
const config = this.#config
|
|
1148
1145
|
const workersCount = await this.#workers.getCount(serviceConfig.id)
|
|
1149
1146
|
const id = serviceConfig.id
|
|
1150
1147
|
|
|
@@ -1162,9 +1159,9 @@ class Runtime extends EventEmitter {
|
|
|
1162
1159
|
// Handle inspector
|
|
1163
1160
|
let inspectorOptions
|
|
1164
1161
|
|
|
1165
|
-
if (this.#
|
|
1162
|
+
if (this.#config.inspectorOptions) {
|
|
1166
1163
|
inspectorOptions = {
|
|
1167
|
-
...this.#
|
|
1164
|
+
...this.#config.inspectorOptions
|
|
1168
1165
|
}
|
|
1169
1166
|
|
|
1170
1167
|
inspectorOptions.port = inspectorOptions.port + this.#workers.size + 1
|
|
@@ -1183,11 +1180,12 @@ class Runtime extends EventEmitter {
|
|
|
1183
1180
|
|
|
1184
1181
|
const execArgv = []
|
|
1185
1182
|
|
|
1186
|
-
if (!serviceConfig.
|
|
1183
|
+
if (!serviceConfig.skipTelemetryHooks && config.telemetry && config.telemetry.enabled !== false) {
|
|
1184
|
+
const hookUrl = pathToFileURL(require.resolve('@opentelemetry/instrumentation/hook.mjs'))
|
|
1187
1185
|
// We need the following because otherwise some open telemetry instrumentations won't work with ESM (like express)
|
|
1188
1186
|
// see: https://github.com/open-telemetry/opentelemetry-js/blob/main/doc/esm-support.md#instrumentation-hook-required-for-esm
|
|
1189
|
-
execArgv.push('--
|
|
1190
|
-
execArgv.push('--
|
|
1187
|
+
execArgv.push('--import', `data:text/javascript, import { register } from 'node:module'; register('${hookUrl}')`)
|
|
1188
|
+
execArgv.push('--import', pathToFileURL(openTelemetrySetupPath))
|
|
1191
1189
|
}
|
|
1192
1190
|
|
|
1193
1191
|
if ((serviceConfig.sourceMaps ?? config.sourceMaps) === true) {
|
|
@@ -1202,8 +1200,12 @@ class Runtime extends EventEmitter {
|
|
|
1202
1200
|
workerEnv['NODE_OPTIONS'] = `${originalNodeOptions} ${serviceConfig.nodeOptions}`.trim()
|
|
1203
1201
|
}
|
|
1204
1202
|
|
|
1205
|
-
const maxHeapTotal =
|
|
1206
|
-
|
|
1203
|
+
const maxHeapTotal =
|
|
1204
|
+
typeof health.maxHeapTotal === 'string' ? parseMemorySize(health.maxHeapTotal) : health.maxHeapTotal
|
|
1205
|
+
const maxYoungGeneration =
|
|
1206
|
+
typeof health.maxYoungGeneration === 'string'
|
|
1207
|
+
? parseMemorySize(health.maxYoungGeneration)
|
|
1208
|
+
: health.maxYoungGeneration
|
|
1207
1209
|
|
|
1208
1210
|
const maxOldGenerationSizeMb = Math.floor(
|
|
1209
1211
|
(maxYoungGeneration > 0 ? maxHeapTotal - maxYoungGeneration : maxHeapTotal) / (1024 * 1024)
|
|
@@ -1224,7 +1226,7 @@ class Runtime extends EventEmitter {
|
|
|
1224
1226
|
count: workersCount
|
|
1225
1227
|
},
|
|
1226
1228
|
inspectorOptions,
|
|
1227
|
-
dirname: this.#
|
|
1229
|
+
dirname: this.#root,
|
|
1228
1230
|
runtimeLogsDir: this.#runtimeLogsDir
|
|
1229
1231
|
},
|
|
1230
1232
|
argv: serviceConfig.arguments,
|
|
@@ -1300,7 +1302,7 @@ class Runtime extends EventEmitter {
|
|
|
1300
1302
|
worker[kInspectorOptions] = {
|
|
1301
1303
|
port: inspectorOptions.port,
|
|
1302
1304
|
id: serviceId,
|
|
1303
|
-
dirname: this.#
|
|
1305
|
+
dirname: this.#root
|
|
1304
1306
|
}
|
|
1305
1307
|
}
|
|
1306
1308
|
|
|
@@ -1317,10 +1319,15 @@ class Runtime extends EventEmitter {
|
|
|
1317
1319
|
})
|
|
1318
1320
|
worker[kITC].listen()
|
|
1319
1321
|
|
|
1322
|
+
// Forward events from the worker
|
|
1323
|
+
worker[kITC].on('event', ({ event, payload }) => {
|
|
1324
|
+
this.emit(`service:worker:event:${event}`, { ...eventPayload, payload })
|
|
1325
|
+
})
|
|
1326
|
+
|
|
1320
1327
|
// Only activate watch for the first instance
|
|
1321
1328
|
if (index === 0) {
|
|
1322
1329
|
// Handle services changes
|
|
1323
|
-
// This is not purposely activated on when this.#
|
|
1330
|
+
// This is not purposely activated on when this.#config.watch === true
|
|
1324
1331
|
// so that services can eventually manually trigger a restart. This mechanism is current
|
|
1325
1332
|
// used by the composer.
|
|
1326
1333
|
worker[kITC].on('changed', async () => {
|
|
@@ -1334,13 +1341,14 @@ class Runtime extends EventEmitter {
|
|
|
1334
1341
|
await this.startService(serviceId)
|
|
1335
1342
|
}
|
|
1336
1343
|
|
|
1337
|
-
this.logger
|
|
1344
|
+
this.logger.info(`The service "${serviceId}" has been successfully reloaded ...`)
|
|
1345
|
+
this.emit('service:worker:reloaded', eventPayload)
|
|
1338
1346
|
|
|
1339
1347
|
if (serviceConfig.entrypoint) {
|
|
1340
1348
|
this.#showUrl()
|
|
1341
1349
|
}
|
|
1342
1350
|
} catch (e) {
|
|
1343
|
-
this.logger
|
|
1351
|
+
this.logger.error(e)
|
|
1344
1352
|
}
|
|
1345
1353
|
})
|
|
1346
1354
|
}
|
|
@@ -1379,9 +1387,7 @@ class Runtime extends EventEmitter {
|
|
|
1379
1387
|
if (features.node.worker.getHeapStatistics) {
|
|
1380
1388
|
const { used_heap_size: heapUsed, total_heap_size: heapTotal } = await worker.getHeapStatistics()
|
|
1381
1389
|
const currentELU = worker.performance.eventLoopUtilization()
|
|
1382
|
-
const elu = worker[kLastELU]
|
|
1383
|
-
? worker.performance.eventLoopUtilization(currentELU, worker[kLastELU])
|
|
1384
|
-
: currentELU
|
|
1390
|
+
const elu = worker[kLastELU] ? worker.performance.eventLoopUtilization(currentELU, worker[kLastELU]) : currentELU
|
|
1385
1391
|
worker[kLastELU] = currentELU
|
|
1386
1392
|
return { elu: elu.utilization, heapUsed, heapTotal }
|
|
1387
1393
|
}
|
|
@@ -1416,11 +1422,10 @@ class Runtime extends EventEmitter {
|
|
|
1416
1422
|
health = { elu: -1, heapUsed: -1, heapTotal: -1 }
|
|
1417
1423
|
}
|
|
1418
1424
|
|
|
1419
|
-
|
|
1420
|
-
this.emit('health', {
|
|
1425
|
+
this.emit('service:worker:health', {
|
|
1421
1426
|
id: worker[kId],
|
|
1422
|
-
service:
|
|
1423
|
-
worker:
|
|
1427
|
+
service: id,
|
|
1428
|
+
worker: index,
|
|
1424
1429
|
currentHealth: health,
|
|
1425
1430
|
unhealthy,
|
|
1426
1431
|
healthConfig: worker[kConfig].health
|
|
@@ -1446,6 +1451,8 @@ class Runtime extends EventEmitter {
|
|
|
1446
1451
|
|
|
1447
1452
|
if (unhealthyChecks === maxUnhealthyChecks) {
|
|
1448
1453
|
try {
|
|
1454
|
+
this.emit('service:worker:unhealthy', { service: id, worker: index })
|
|
1455
|
+
|
|
1449
1456
|
this.logger.error(
|
|
1450
1457
|
{ elu: health.elu, maxELU, memoryUsage: health.heapUsed, maxMemoryUsage: maxHeapUsed },
|
|
1451
1458
|
`The ${errorLabel} is unhealthy. Replacing it ...`
|
|
@@ -1480,7 +1487,7 @@ class Runtime extends EventEmitter {
|
|
|
1480
1487
|
const label = this.#workerExtendedLabel(id, index, workersCount)
|
|
1481
1488
|
|
|
1482
1489
|
if (!silent) {
|
|
1483
|
-
this.logger
|
|
1490
|
+
this.logger.info(`Starting the ${label}...`)
|
|
1484
1491
|
}
|
|
1485
1492
|
|
|
1486
1493
|
if (!worker) {
|
|
@@ -1524,14 +1531,23 @@ class Runtime extends EventEmitter {
|
|
|
1524
1531
|
this.#broadcastWorkers()
|
|
1525
1532
|
|
|
1526
1533
|
if (!silent) {
|
|
1527
|
-
this.logger
|
|
1534
|
+
this.logger.info(`Started the ${label}...`)
|
|
1528
1535
|
}
|
|
1529
1536
|
|
|
1530
1537
|
const { enabled, gracePeriod } = worker[kConfig].health
|
|
1531
1538
|
if (enabled && config.restartOnError > 0) {
|
|
1532
1539
|
// if gracePeriod is 0, it will be set to 1 to start health checks immediately
|
|
1533
1540
|
// however, the health event will start when the worker is started
|
|
1534
|
-
this.#setupHealthCheck(
|
|
1541
|
+
this.#setupHealthCheck(
|
|
1542
|
+
config,
|
|
1543
|
+
serviceConfig,
|
|
1544
|
+
workersCount,
|
|
1545
|
+
id,
|
|
1546
|
+
index,
|
|
1547
|
+
worker,
|
|
1548
|
+
label,
|
|
1549
|
+
gracePeriod > 0 ? gracePeriod : 1
|
|
1550
|
+
)
|
|
1535
1551
|
}
|
|
1536
1552
|
} catch (error) {
|
|
1537
1553
|
// TODO: handle port allocation error here
|
|
@@ -1593,15 +1609,16 @@ class Runtime extends EventEmitter {
|
|
|
1593
1609
|
const eventPayload = { service: id, worker: index, workersCount }
|
|
1594
1610
|
|
|
1595
1611
|
worker[kWorkerStatus] = 'stopping'
|
|
1612
|
+
worker[kITC].removeAllListeners('changed')
|
|
1596
1613
|
this.emit('service:worker:stopping', eventPayload)
|
|
1597
1614
|
|
|
1598
1615
|
const label = this.#workerExtendedLabel(id, index, workersCount)
|
|
1599
1616
|
|
|
1600
1617
|
if (!silent) {
|
|
1601
|
-
this.logger
|
|
1618
|
+
this.logger.info(`Stopping the ${label}...`)
|
|
1602
1619
|
}
|
|
1603
1620
|
|
|
1604
|
-
const exitTimeout = this.#
|
|
1621
|
+
const exitTimeout = this.#config.gracefulShutdown.runtime
|
|
1605
1622
|
const exitPromise = once(worker, 'exit')
|
|
1606
1623
|
|
|
1607
1624
|
// Always send the stop message, it will shut down workers that only had ITC and interceptors setup
|
|
@@ -1609,13 +1626,13 @@ class Runtime extends EventEmitter {
|
|
|
1609
1626
|
await executeWithTimeout(sendViaITC(worker, 'stop'), exitTimeout)
|
|
1610
1627
|
} catch (error) {
|
|
1611
1628
|
this.emit('service:worker:stop:timeout', eventPayload)
|
|
1612
|
-
this.logger
|
|
1629
|
+
this.logger.info({ error: ensureLoggableError(error) }, `Failed to stop ${label}. Killing a worker thread.`)
|
|
1613
1630
|
} finally {
|
|
1614
1631
|
worker[kITC].close()
|
|
1615
1632
|
}
|
|
1616
1633
|
|
|
1617
1634
|
if (!silent) {
|
|
1618
|
-
this.logger
|
|
1635
|
+
this.logger.info(`Stopped the ${label}...`)
|
|
1619
1636
|
}
|
|
1620
1637
|
|
|
1621
1638
|
// Wait for the worker thread to finish, we're going to create a new one if the service is ever restarted
|
|
@@ -1819,11 +1836,7 @@ class Runtime extends EventEmitter {
|
|
|
1819
1836
|
})
|
|
1820
1837
|
}
|
|
1821
1838
|
|
|
1822
|
-
|
|
1823
|
-
this.#workersBroadcastChannel.postMessage(workers)
|
|
1824
|
-
} catch (err) {
|
|
1825
|
-
this.logger?.error({ err }, 'Error when broadcasting workers')
|
|
1826
|
-
}
|
|
1839
|
+
this.#workersBroadcastChannel.postMessage(workers)
|
|
1827
1840
|
}
|
|
1828
1841
|
|
|
1829
1842
|
async #getWorkerMessagingChannel ({ service, worker }, context) {
|
|
@@ -1834,7 +1847,7 @@ class Runtime extends EventEmitter {
|
|
|
1834
1847
|
// Send the first port to the target
|
|
1835
1848
|
const response = await executeWithTimeout(
|
|
1836
1849
|
sendViaITC(target, 'saveMessagingChannel', port1, [port1]),
|
|
1837
|
-
this.#
|
|
1850
|
+
this.#config.messagingTimeout
|
|
1838
1851
|
)
|
|
1839
1852
|
|
|
1840
1853
|
if (response === kTimeout) {
|
|
@@ -1847,7 +1860,7 @@ class Runtime extends EventEmitter {
|
|
|
1847
1860
|
}
|
|
1848
1861
|
|
|
1849
1862
|
async #getRuntimePackageJson () {
|
|
1850
|
-
const runtimeDir = this.#
|
|
1863
|
+
const runtimeDir = this.#root
|
|
1851
1864
|
const packageJsonPath = join(runtimeDir, 'package.json')
|
|
1852
1865
|
const packageJsonFile = await readFile(packageJsonPath, 'utf8')
|
|
1853
1866
|
const packageJson = JSON.parse(packageJsonFile)
|
|
@@ -1931,7 +1944,7 @@ class Runtime extends EventEmitter {
|
|
|
1931
1944
|
// label is the key in the logger object, either 'stdout' or 'stderr'
|
|
1932
1945
|
#forwardThreadLog (logger, { level, caller }, data, label) {
|
|
1933
1946
|
// When captureStdio is false, write directly to the logger destination
|
|
1934
|
-
if (!this.#
|
|
1947
|
+
if (!this.#config.logger.captureStdio) {
|
|
1935
1948
|
this.#stdio[label].write(data)
|
|
1936
1949
|
return
|
|
1937
1950
|
}
|
|
@@ -2007,7 +2020,7 @@ class Runtime extends EventEmitter {
|
|
|
2007
2020
|
async #updateServiceConfigWorkers (serviceId, workers) {
|
|
2008
2021
|
this.logger.info(`Updating service "${serviceId}" config workers to ${workers}`)
|
|
2009
2022
|
|
|
2010
|
-
this.#
|
|
2023
|
+
this.#config.services.find(s => s.id === serviceId).workers = workers
|
|
2011
2024
|
const service = await this.#getServiceById(serviceId)
|
|
2012
2025
|
this.#workers.setCount(serviceId, workers)
|
|
2013
2026
|
service[kConfig].workers = workers
|
|
@@ -2032,7 +2045,7 @@ class Runtime extends EventEmitter {
|
|
|
2032
2045
|
this.logger.info(`Updating service "${serviceId}" config health heap to ${JSON.stringify(health)}`)
|
|
2033
2046
|
const { maxHeapTotal, maxYoungGeneration } = health
|
|
2034
2047
|
|
|
2035
|
-
const service = this.#
|
|
2048
|
+
const service = this.#config.services.find(s => s.id === serviceId)
|
|
2036
2049
|
if (maxHeapTotal) {
|
|
2037
2050
|
service.health.maxHeapTotal = maxHeapTotal
|
|
2038
2051
|
}
|
|
@@ -2093,21 +2106,36 @@ class Runtime extends EventEmitter {
|
|
|
2093
2106
|
}
|
|
2094
2107
|
|
|
2095
2108
|
const ups = await this.#validateUpdateServiceResources(updates)
|
|
2096
|
-
const config = this.#
|
|
2109
|
+
const config = this.#config
|
|
2097
2110
|
|
|
2098
2111
|
const report = []
|
|
2099
2112
|
for (const update of ups) {
|
|
2100
2113
|
const { serviceId, config: serviceConfig, workers, health, currentWorkers, currentHealth } = update
|
|
2101
2114
|
|
|
2102
2115
|
if (workers && health) {
|
|
2103
|
-
const r = await this.#updateServiceWorkersAndHealth(
|
|
2116
|
+
const r = await this.#updateServiceWorkersAndHealth(
|
|
2117
|
+
serviceId,
|
|
2118
|
+
config,
|
|
2119
|
+
serviceConfig,
|
|
2120
|
+
workers,
|
|
2121
|
+
health,
|
|
2122
|
+
currentWorkers,
|
|
2123
|
+
currentHealth
|
|
2124
|
+
)
|
|
2104
2125
|
report.push({
|
|
2105
2126
|
service: serviceId,
|
|
2106
2127
|
workers: r.workers,
|
|
2107
2128
|
health: r.health
|
|
2108
2129
|
})
|
|
2109
2130
|
} else if (health) {
|
|
2110
|
-
const r = await this.#updateServiceHealth(
|
|
2131
|
+
const r = await this.#updateServiceHealth(
|
|
2132
|
+
serviceId,
|
|
2133
|
+
config,
|
|
2134
|
+
serviceConfig,
|
|
2135
|
+
currentWorkers,
|
|
2136
|
+
currentHealth,
|
|
2137
|
+
health
|
|
2138
|
+
)
|
|
2111
2139
|
report.push({
|
|
2112
2140
|
service: serviceId,
|
|
2113
2141
|
health: r.health
|
|
@@ -2132,7 +2160,7 @@ class Runtime extends EventEmitter {
|
|
|
2132
2160
|
throw new errors.InvalidArgumentError('updates', 'must have at least one element')
|
|
2133
2161
|
}
|
|
2134
2162
|
|
|
2135
|
-
const config = this.#
|
|
2163
|
+
const config = this.#config
|
|
2136
2164
|
const validatedUpdates = []
|
|
2137
2165
|
for (const update of updates) {
|
|
2138
2166
|
const { service: serviceId } = update
|
|
@@ -2181,7 +2209,10 @@ class Runtime extends EventEmitter {
|
|
|
2181
2209
|
throw new errors.InvalidArgumentError('maxHeapTotal', 'must be greater than 0')
|
|
2182
2210
|
}
|
|
2183
2211
|
} else {
|
|
2184
|
-
throw new errors.InvalidArgumentError(
|
|
2212
|
+
throw new errors.InvalidArgumentError(
|
|
2213
|
+
'maxHeapTotal',
|
|
2214
|
+
'must be a number or a string representing a memory size'
|
|
2215
|
+
)
|
|
2185
2216
|
}
|
|
2186
2217
|
|
|
2187
2218
|
if (currentHealth.maxHeapTotal === maxHeapTotal) {
|
|
@@ -2203,7 +2234,10 @@ class Runtime extends EventEmitter {
|
|
|
2203
2234
|
throw new errors.InvalidArgumentError('maxYoungGeneration', 'must be greater than 0')
|
|
2204
2235
|
}
|
|
2205
2236
|
} else {
|
|
2206
|
-
throw new errors.InvalidArgumentError(
|
|
2237
|
+
throw new errors.InvalidArgumentError(
|
|
2238
|
+
'maxYoungGeneration',
|
|
2239
|
+
'must be a number or a string representing a memory size'
|
|
2240
|
+
)
|
|
2207
2241
|
}
|
|
2208
2242
|
|
|
2209
2243
|
if (currentHealth.maxYoungGeneration && currentHealth.maxYoungGeneration === maxYoungGeneration) {
|
|
@@ -2216,7 +2250,7 @@ class Runtime extends EventEmitter {
|
|
|
2216
2250
|
if (workers || maxHeapTotal || maxYoungGeneration) {
|
|
2217
2251
|
let health
|
|
2218
2252
|
if (maxHeapTotal || maxYoungGeneration) {
|
|
2219
|
-
health = {
|
|
2253
|
+
health = {}
|
|
2220
2254
|
if (maxHeapTotal) {
|
|
2221
2255
|
health.maxHeapTotal = maxHeapTotal
|
|
2222
2256
|
}
|
|
@@ -2231,12 +2265,27 @@ class Runtime extends EventEmitter {
|
|
|
2231
2265
|
return validatedUpdates
|
|
2232
2266
|
}
|
|
2233
2267
|
|
|
2234
|
-
async #updateServiceWorkersAndHealth (
|
|
2268
|
+
async #updateServiceWorkersAndHealth (
|
|
2269
|
+
serviceId,
|
|
2270
|
+
config,
|
|
2271
|
+
serviceConfig,
|
|
2272
|
+
workers,
|
|
2273
|
+
health,
|
|
2274
|
+
currentWorkers,
|
|
2275
|
+
currentHealth
|
|
2276
|
+
) {
|
|
2235
2277
|
if (currentWorkers > workers) {
|
|
2236
2278
|
// stop workers
|
|
2237
2279
|
const reportWorkers = await this.#updateServiceWorkers(serviceId, config, serviceConfig, workers, currentWorkers)
|
|
2238
2280
|
// update heap for current workers
|
|
2239
|
-
const reportHealth = await this.#updateServiceHealth(
|
|
2281
|
+
const reportHealth = await this.#updateServiceHealth(
|
|
2282
|
+
serviceId,
|
|
2283
|
+
config,
|
|
2284
|
+
serviceConfig,
|
|
2285
|
+
workers,
|
|
2286
|
+
currentHealth,
|
|
2287
|
+
health
|
|
2288
|
+
)
|
|
2240
2289
|
|
|
2241
2290
|
return { workers: reportWorkers, health: reportHealth }
|
|
2242
2291
|
} else {
|
|
@@ -2245,13 +2294,29 @@ class Runtime extends EventEmitter {
|
|
|
2245
2294
|
// start new workers with new heap
|
|
2246
2295
|
const reportWorkers = await this.#updateServiceWorkers(serviceId, config, serviceConfig, workers, currentWorkers)
|
|
2247
2296
|
// update heap for current workers
|
|
2248
|
-
const reportHealth = await this.#updateServiceHealth(
|
|
2297
|
+
const reportHealth = await this.#updateServiceHealth(
|
|
2298
|
+
serviceId,
|
|
2299
|
+
config,
|
|
2300
|
+
serviceConfig,
|
|
2301
|
+
currentWorkers,
|
|
2302
|
+
currentHealth,
|
|
2303
|
+
health,
|
|
2304
|
+
false
|
|
2305
|
+
)
|
|
2249
2306
|
|
|
2250
2307
|
return { workers: reportWorkers, health: reportHealth }
|
|
2251
2308
|
}
|
|
2252
2309
|
}
|
|
2253
2310
|
|
|
2254
|
-
async #updateServiceHealth (
|
|
2311
|
+
async #updateServiceHealth (
|
|
2312
|
+
serviceId,
|
|
2313
|
+
config,
|
|
2314
|
+
serviceConfig,
|
|
2315
|
+
currentWorkers,
|
|
2316
|
+
currentHealth,
|
|
2317
|
+
health,
|
|
2318
|
+
updateConfig = true
|
|
2319
|
+
) {
|
|
2255
2320
|
const report = {
|
|
2256
2321
|
current: currentHealth,
|
|
2257
2322
|
new: health,
|
|
@@ -2263,15 +2328,25 @@ class Runtime extends EventEmitter {
|
|
|
2263
2328
|
}
|
|
2264
2329
|
|
|
2265
2330
|
for (let i = 0; i < currentWorkers; i++) {
|
|
2266
|
-
this.logger.info(
|
|
2331
|
+
this.logger.info(
|
|
2332
|
+
{ health: { current: currentHealth, new: health } },
|
|
2333
|
+
`Restarting service "${serviceId}" worker ${i} to update config health heap...`
|
|
2334
|
+
)
|
|
2267
2335
|
|
|
2268
2336
|
const worker = await this.#getWorkerById(serviceId, i)
|
|
2269
|
-
if (health.maxHeapTotal) {
|
|
2270
|
-
|
|
2337
|
+
if (health.maxHeapTotal) {
|
|
2338
|
+
worker[kConfig].health.maxHeapTotal = health.maxHeapTotal
|
|
2339
|
+
}
|
|
2340
|
+
if (health.maxYoungGeneration) {
|
|
2341
|
+
worker[kConfig].health.maxYoungGeneration = health.maxYoungGeneration
|
|
2342
|
+
}
|
|
2271
2343
|
|
|
2272
2344
|
await this.#replaceWorker(config, serviceConfig, currentWorkers, serviceId, i, worker)
|
|
2273
2345
|
report.updated.push(i)
|
|
2274
|
-
this.logger.info(
|
|
2346
|
+
this.logger.info(
|
|
2347
|
+
{ health: { current: currentHealth, new: health } },
|
|
2348
|
+
`Restarted service "${serviceId}" worker ${i}`
|
|
2349
|
+
)
|
|
2275
2350
|
}
|
|
2276
2351
|
report.success = true
|
|
2277
2352
|
} catch (err) {
|
|
@@ -2279,7 +2354,10 @@ class Runtime extends EventEmitter {
|
|
|
2279
2354
|
this.logger.error({ err }, 'Cannot update service health heap, no worker updated')
|
|
2280
2355
|
await this.#updateServiceConfigHealth(serviceId, currentHealth)
|
|
2281
2356
|
} else {
|
|
2282
|
-
this.logger.error(
|
|
2357
|
+
this.logger.error(
|
|
2358
|
+
{ err },
|
|
2359
|
+
`Cannot update service health heap, updated workers: ${report.updated.length} out of ${currentWorkers}`
|
|
2360
|
+
)
|
|
2283
2361
|
}
|
|
2284
2362
|
report.success = false
|
|
2285
2363
|
}
|
|
@@ -2306,7 +2384,10 @@ class Runtime extends EventEmitter {
|
|
|
2306
2384
|
this.logger.error({ err }, 'Cannot start service workers, no worker started')
|
|
2307
2385
|
await this.#updateServiceConfigWorkers(serviceId, currentWorkers)
|
|
2308
2386
|
} else {
|
|
2309
|
-
this.logger.error(
|
|
2387
|
+
this.logger.error(
|
|
2388
|
+
{ err },
|
|
2389
|
+
`Cannot start service workers, started workers: ${report.started.length} out of ${workers}`
|
|
2390
|
+
)
|
|
2310
2391
|
await this.#updateServiceConfigWorkers(serviceId, currentWorkers + report.started.length)
|
|
2311
2392
|
}
|
|
2312
2393
|
report.success = false
|
|
@@ -2327,7 +2408,10 @@ class Runtime extends EventEmitter {
|
|
|
2327
2408
|
if (report.stopped.length < 1) {
|
|
2328
2409
|
this.logger.error({ err }, 'Cannot stop service workers, no worker stopped')
|
|
2329
2410
|
} else {
|
|
2330
|
-
this.logger.error(
|
|
2411
|
+
this.logger.error(
|
|
2412
|
+
{ err },
|
|
2413
|
+
`Cannot stop service workers, stopped workers: ${report.stopped.length} out of ${workers}`
|
|
2414
|
+
)
|
|
2331
2415
|
await this.#updateServiceConfigWorkers(serviceId, currentWorkers - report.stopped)
|
|
2332
2416
|
}
|
|
2333
2417
|
report.success = false
|