@platformatic/runtime 2.75.0-alpha.0 → 2.75.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/config.d.ts +2 -1
- package/lib/errors.js +3 -1
- package/lib/prom-server.js +16 -8
- package/lib/runtime.js +130 -29
- package/lib/start.js +1 -1
- package/lib/worker/main.js +12 -2
- package/package.json +15 -15
- package/schema.json +5 -1
package/config.d.ts
CHANGED
|
@@ -5,7 +5,7 @@
|
|
|
5
5
|
* and run json-schema-to-typescript to regenerate this file.
|
|
6
6
|
*/
|
|
7
7
|
|
|
8
|
-
export type
|
|
8
|
+
export type HttpsSchemasPlatformaticDevPlatformaticRuntime2750Json = {
|
|
9
9
|
[k: string]: unknown;
|
|
10
10
|
} & {
|
|
11
11
|
$schema?: string;
|
|
@@ -129,6 +129,7 @@ export type HttpsSchemasPlatformaticDevPlatformaticRuntime2750Alpha0Json = {
|
|
|
129
129
|
};
|
|
130
130
|
startTimeout?: number;
|
|
131
131
|
restartOnError?: boolean | number;
|
|
132
|
+
exitOnUnhandledErrors?: boolean;
|
|
132
133
|
gracefulShutdown?: {
|
|
133
134
|
runtime: number | string;
|
|
134
135
|
service: number | string;
|
package/lib/errors.js
CHANGED
|
@@ -112,5 +112,7 @@ module.exports = {
|
|
|
112
112
|
CannotRemoveServiceOnUpdateError: createError(
|
|
113
113
|
`${ERROR_PREFIX}_CANNOT_REMOVE_SERVICE_ON_UPDATE`,
|
|
114
114
|
'Cannot remove service "%s" when updating a Runtime'
|
|
115
|
-
)
|
|
115
|
+
),
|
|
116
|
+
|
|
117
|
+
MissingPprofCapture: createError(`${ERROR_PREFIX}_MISSING_PPROF_CAPTURE`, 'Please install @platformatic/watt-pprof-capture')
|
|
116
118
|
}
|
package/lib/prom-server.js
CHANGED
|
@@ -19,13 +19,21 @@ const DEFAULT_LIVENESS_FAIL_BODY = 'ERR'
|
|
|
19
19
|
async function checkReadiness (runtime) {
|
|
20
20
|
const workers = await runtime.getWorkers()
|
|
21
21
|
|
|
22
|
-
//
|
|
22
|
+
// Make sure there is at least one started worker
|
|
23
|
+
const services = new Set()
|
|
24
|
+
const started = new Set()
|
|
23
25
|
for (const worker of Object.values(workers)) {
|
|
24
|
-
|
|
25
|
-
|
|
26
|
+
services.add(worker.service)
|
|
27
|
+
|
|
28
|
+
if (worker.status === 'started') {
|
|
29
|
+
started.add(worker.service)
|
|
26
30
|
}
|
|
27
31
|
}
|
|
28
32
|
|
|
33
|
+
if (started.size !== services.size) {
|
|
34
|
+
return { status: false }
|
|
35
|
+
}
|
|
36
|
+
|
|
29
37
|
// perform custom readiness checks, get custom response content if any
|
|
30
38
|
const checks = await runtime.getCustomReadinessChecks()
|
|
31
39
|
|
|
@@ -89,7 +97,7 @@ async function startPrometheusServer (runtime, opts) {
|
|
|
89
97
|
return reply.code(401).send({ message: 'Unauthorized' })
|
|
90
98
|
}
|
|
91
99
|
return done()
|
|
92
|
-
}
|
|
100
|
+
}
|
|
93
101
|
})
|
|
94
102
|
onRequestHook = promServer.basicAuth
|
|
95
103
|
}
|
|
@@ -129,7 +137,7 @@ async function startPrometheusServer (runtime, opts) {
|
|
|
129
137
|
reply.type('text/plain')
|
|
130
138
|
}
|
|
131
139
|
return (await runtime.getMetrics(reqType)).metrics
|
|
132
|
-
}
|
|
140
|
+
}
|
|
133
141
|
})
|
|
134
142
|
|
|
135
143
|
if (opts.readiness !== false) {
|
|
@@ -167,7 +175,7 @@ async function startPrometheusServer (runtime, opts) {
|
|
|
167
175
|
reply.status(failStatusCode).send(failBody)
|
|
168
176
|
}
|
|
169
177
|
}
|
|
170
|
-
}
|
|
178
|
+
}
|
|
171
179
|
})
|
|
172
180
|
}
|
|
173
181
|
|
|
@@ -206,7 +214,7 @@ async function startPrometheusServer (runtime, opts) {
|
|
|
206
214
|
reply.status(failStatusCode).send(readiness?.body || failBody)
|
|
207
215
|
}
|
|
208
216
|
}
|
|
209
|
-
}
|
|
217
|
+
}
|
|
210
218
|
})
|
|
211
219
|
}
|
|
212
220
|
|
|
@@ -215,5 +223,5 @@ async function startPrometheusServer (runtime, opts) {
|
|
|
215
223
|
}
|
|
216
224
|
|
|
217
225
|
module.exports = {
|
|
218
|
-
startPrometheusServer
|
|
226
|
+
startPrometheusServer
|
|
219
227
|
}
|
package/lib/runtime.js
CHANGED
|
@@ -1,7 +1,14 @@
|
|
|
1
1
|
'use strict'
|
|
2
2
|
|
|
3
3
|
const { ITC } = require('@platformatic/itc')
|
|
4
|
-
const {
|
|
4
|
+
const {
|
|
5
|
+
features,
|
|
6
|
+
ensureLoggableError,
|
|
7
|
+
executeWithTimeout,
|
|
8
|
+
deepmerge,
|
|
9
|
+
parseMemorySize,
|
|
10
|
+
kTimeout
|
|
11
|
+
} = require('@platformatic/utils')
|
|
5
12
|
const { once, EventEmitter } = require('node:events')
|
|
6
13
|
const { createReadStream, watch, existsSync } = require('node:fs')
|
|
7
14
|
const { readdir, readFile, stat, access } = require('node:fs/promises')
|
|
@@ -718,7 +725,9 @@ class Runtime extends EventEmitter {
|
|
|
718
725
|
const label = `${service}:${i}`
|
|
719
726
|
const worker = this.#workers.get(label)
|
|
720
727
|
|
|
721
|
-
|
|
728
|
+
if (worker) {
|
|
729
|
+
status[label] = await sendViaITC(worker, 'getCustomHealthCheck')
|
|
730
|
+
}
|
|
722
731
|
}
|
|
723
732
|
}
|
|
724
733
|
|
|
@@ -733,7 +742,9 @@ class Runtime extends EventEmitter {
|
|
|
733
742
|
const label = `${service}:${i}`
|
|
734
743
|
const worker = this.#workers.get(label)
|
|
735
744
|
|
|
736
|
-
|
|
745
|
+
if (worker) {
|
|
746
|
+
status[label] = await sendViaITC(worker, 'getCustomReadinessCheck')
|
|
747
|
+
}
|
|
737
748
|
}
|
|
738
749
|
}
|
|
739
750
|
|
|
@@ -795,14 +806,24 @@ class Runtime extends EventEmitter {
|
|
|
795
806
|
return sendViaITC(service, 'getServiceEnv')
|
|
796
807
|
}
|
|
797
808
|
|
|
809
|
+
#validatePprofCapturePreload () {
|
|
810
|
+
const found = this.#configManager.current.preload?.some(p => p.includes('watt-pprof-capture'))
|
|
811
|
+
|
|
812
|
+
if (!found) {
|
|
813
|
+
throw new errors.MissingPprofCapture()
|
|
814
|
+
}
|
|
815
|
+
}
|
|
816
|
+
|
|
798
817
|
async startServiceProfiling (id, options = {}, ensureStarted = true) {
|
|
799
818
|
const service = await this.#getServiceById(id, ensureStarted)
|
|
819
|
+
this.#validatePprofCapturePreload()
|
|
800
820
|
|
|
801
821
|
return sendViaITC(service, 'startProfiling', options)
|
|
802
822
|
}
|
|
803
823
|
|
|
804
824
|
async stopServiceProfiling (id, ensureStarted = true) {
|
|
805
825
|
const service = await this.#getServiceById(id, ensureStarted)
|
|
826
|
+
this.#validatePprofCapturePreload()
|
|
806
827
|
|
|
807
828
|
return sendViaITC(service, 'stopProfiling')
|
|
808
829
|
}
|
|
@@ -1107,9 +1128,7 @@ class Runtime extends EventEmitter {
|
|
|
1107
1128
|
|
|
1108
1129
|
const promises = []
|
|
1109
1130
|
for (const worker of this.#workers.values()) {
|
|
1110
|
-
promises.push(
|
|
1111
|
-
sendViaITC(worker, 'setSharedContext', sharedContext)
|
|
1112
|
-
)
|
|
1131
|
+
promises.push(sendViaITC(worker, 'setSharedContext', sharedContext))
|
|
1113
1132
|
}
|
|
1114
1133
|
|
|
1115
1134
|
const results = await Promise.allSettled(promises)
|
|
@@ -1214,8 +1233,12 @@ class Runtime extends EventEmitter {
|
|
|
1214
1233
|
workerEnv['NODE_OPTIONS'] = `${originalNodeOptions} ${serviceConfig.nodeOptions}`.trim()
|
|
1215
1234
|
}
|
|
1216
1235
|
|
|
1217
|
-
const maxHeapTotal =
|
|
1218
|
-
|
|
1236
|
+
const maxHeapTotal =
|
|
1237
|
+
typeof health.maxHeapTotal === 'string' ? parseMemorySize(health.maxHeapTotal) : health.maxHeapTotal
|
|
1238
|
+
const maxYoungGeneration =
|
|
1239
|
+
typeof health.maxYoungGeneration === 'string'
|
|
1240
|
+
? parseMemorySize(health.maxYoungGeneration)
|
|
1241
|
+
: health.maxYoungGeneration
|
|
1219
1242
|
|
|
1220
1243
|
const maxOldGenerationSizeMb = Math.floor(
|
|
1221
1244
|
(maxYoungGeneration > 0 ? maxHeapTotal - maxYoungGeneration : maxHeapTotal) / (1024 * 1024)
|
|
@@ -1391,9 +1414,7 @@ class Runtime extends EventEmitter {
|
|
|
1391
1414
|
if (features.node.worker.getHeapStatistics) {
|
|
1392
1415
|
const { used_heap_size: heapUsed, total_heap_size: heapTotal } = await worker.getHeapStatistics()
|
|
1393
1416
|
const currentELU = worker.performance.eventLoopUtilization()
|
|
1394
|
-
const elu = worker[kLastELU]
|
|
1395
|
-
? worker.performance.eventLoopUtilization(currentELU, worker[kLastELU])
|
|
1396
|
-
: currentELU
|
|
1417
|
+
const elu = worker[kLastELU] ? worker.performance.eventLoopUtilization(currentELU, worker[kLastELU]) : currentELU
|
|
1397
1418
|
worker[kLastELU] = currentELU
|
|
1398
1419
|
return { elu: elu.utilization, heapUsed, heapTotal }
|
|
1399
1420
|
}
|
|
@@ -1543,7 +1564,16 @@ class Runtime extends EventEmitter {
|
|
|
1543
1564
|
if (enabled && config.restartOnError > 0) {
|
|
1544
1565
|
// if gracePeriod is 0, it will be set to 1 to start health checks immediately
|
|
1545
1566
|
// however, the health event will start when the worker is started
|
|
1546
|
-
this.#setupHealthCheck(
|
|
1567
|
+
this.#setupHealthCheck(
|
|
1568
|
+
config,
|
|
1569
|
+
serviceConfig,
|
|
1570
|
+
workersCount,
|
|
1571
|
+
id,
|
|
1572
|
+
index,
|
|
1573
|
+
worker,
|
|
1574
|
+
label,
|
|
1575
|
+
gracePeriod > 0 ? gracePeriod : 1
|
|
1576
|
+
)
|
|
1547
1577
|
}
|
|
1548
1578
|
} catch (error) {
|
|
1549
1579
|
// TODO: handle port allocation error here
|
|
@@ -1613,7 +1643,7 @@ class Runtime extends EventEmitter {
|
|
|
1613
1643
|
this.logger?.info(`Stopping the ${label}...`)
|
|
1614
1644
|
}
|
|
1615
1645
|
|
|
1616
|
-
const exitTimeout = this.#configManager.current.gracefulShutdown.
|
|
1646
|
+
const exitTimeout = this.#configManager.current.gracefulShutdown.service
|
|
1617
1647
|
const exitPromise = once(worker, 'exit')
|
|
1618
1648
|
|
|
1619
1649
|
// Always send the stop message, it will shut down workers that only had ITC and interceptors setup
|
|
@@ -2112,14 +2142,29 @@ class Runtime extends EventEmitter {
|
|
|
2112
2142
|
const { serviceId, config: serviceConfig, workers, health, currentWorkers, currentHealth } = update
|
|
2113
2143
|
|
|
2114
2144
|
if (workers && health) {
|
|
2115
|
-
const r = await this.#updateServiceWorkersAndHealth(
|
|
2145
|
+
const r = await this.#updateServiceWorkersAndHealth(
|
|
2146
|
+
serviceId,
|
|
2147
|
+
config,
|
|
2148
|
+
serviceConfig,
|
|
2149
|
+
workers,
|
|
2150
|
+
health,
|
|
2151
|
+
currentWorkers,
|
|
2152
|
+
currentHealth
|
|
2153
|
+
)
|
|
2116
2154
|
report.push({
|
|
2117
2155
|
service: serviceId,
|
|
2118
2156
|
workers: r.workers,
|
|
2119
2157
|
health: r.health
|
|
2120
2158
|
})
|
|
2121
2159
|
} else if (health) {
|
|
2122
|
-
const r = await this.#updateServiceHealth(
|
|
2160
|
+
const r = await this.#updateServiceHealth(
|
|
2161
|
+
serviceId,
|
|
2162
|
+
config,
|
|
2163
|
+
serviceConfig,
|
|
2164
|
+
currentWorkers,
|
|
2165
|
+
currentHealth,
|
|
2166
|
+
health
|
|
2167
|
+
)
|
|
2123
2168
|
report.push({
|
|
2124
2169
|
service: serviceId,
|
|
2125
2170
|
health: r.health
|
|
@@ -2193,7 +2238,10 @@ class Runtime extends EventEmitter {
|
|
|
2193
2238
|
throw new errors.InvalidArgumentError('maxHeapTotal', 'must be greater than 0')
|
|
2194
2239
|
}
|
|
2195
2240
|
} else {
|
|
2196
|
-
throw new errors.InvalidArgumentError(
|
|
2241
|
+
throw new errors.InvalidArgumentError(
|
|
2242
|
+
'maxHeapTotal',
|
|
2243
|
+
'must be a number or a string representing a memory size'
|
|
2244
|
+
)
|
|
2197
2245
|
}
|
|
2198
2246
|
|
|
2199
2247
|
if (currentHealth.maxHeapTotal === maxHeapTotal) {
|
|
@@ -2215,7 +2263,10 @@ class Runtime extends EventEmitter {
|
|
|
2215
2263
|
throw new errors.InvalidArgumentError('maxYoungGeneration', 'must be greater than 0')
|
|
2216
2264
|
}
|
|
2217
2265
|
} else {
|
|
2218
|
-
throw new errors.InvalidArgumentError(
|
|
2266
|
+
throw new errors.InvalidArgumentError(
|
|
2267
|
+
'maxYoungGeneration',
|
|
2268
|
+
'must be a number or a string representing a memory size'
|
|
2269
|
+
)
|
|
2219
2270
|
}
|
|
2220
2271
|
|
|
2221
2272
|
if (currentHealth.maxYoungGeneration && currentHealth.maxYoungGeneration === maxYoungGeneration) {
|
|
@@ -2228,7 +2279,7 @@ class Runtime extends EventEmitter {
|
|
|
2228
2279
|
if (workers || maxHeapTotal || maxYoungGeneration) {
|
|
2229
2280
|
let health
|
|
2230
2281
|
if (maxHeapTotal || maxYoungGeneration) {
|
|
2231
|
-
health = {
|
|
2282
|
+
health = {}
|
|
2232
2283
|
if (maxHeapTotal) {
|
|
2233
2284
|
health.maxHeapTotal = maxHeapTotal
|
|
2234
2285
|
}
|
|
@@ -2243,12 +2294,27 @@ class Runtime extends EventEmitter {
|
|
|
2243
2294
|
return validatedUpdates
|
|
2244
2295
|
}
|
|
2245
2296
|
|
|
2246
|
-
async #updateServiceWorkersAndHealth (
|
|
2297
|
+
async #updateServiceWorkersAndHealth (
|
|
2298
|
+
serviceId,
|
|
2299
|
+
config,
|
|
2300
|
+
serviceConfig,
|
|
2301
|
+
workers,
|
|
2302
|
+
health,
|
|
2303
|
+
currentWorkers,
|
|
2304
|
+
currentHealth
|
|
2305
|
+
) {
|
|
2247
2306
|
if (currentWorkers > workers) {
|
|
2248
2307
|
// stop workers
|
|
2249
2308
|
const reportWorkers = await this.#updateServiceWorkers(serviceId, config, serviceConfig, workers, currentWorkers)
|
|
2250
2309
|
// update heap for current workers
|
|
2251
|
-
const reportHealth = await this.#updateServiceHealth(
|
|
2310
|
+
const reportHealth = await this.#updateServiceHealth(
|
|
2311
|
+
serviceId,
|
|
2312
|
+
config,
|
|
2313
|
+
serviceConfig,
|
|
2314
|
+
workers,
|
|
2315
|
+
currentHealth,
|
|
2316
|
+
health
|
|
2317
|
+
)
|
|
2252
2318
|
|
|
2253
2319
|
return { workers: reportWorkers, health: reportHealth }
|
|
2254
2320
|
} else {
|
|
@@ -2257,13 +2323,29 @@ class Runtime extends EventEmitter {
|
|
|
2257
2323
|
// start new workers with new heap
|
|
2258
2324
|
const reportWorkers = await this.#updateServiceWorkers(serviceId, config, serviceConfig, workers, currentWorkers)
|
|
2259
2325
|
// update heap for current workers
|
|
2260
|
-
const reportHealth = await this.#updateServiceHealth(
|
|
2326
|
+
const reportHealth = await this.#updateServiceHealth(
|
|
2327
|
+
serviceId,
|
|
2328
|
+
config,
|
|
2329
|
+
serviceConfig,
|
|
2330
|
+
currentWorkers,
|
|
2331
|
+
currentHealth,
|
|
2332
|
+
health,
|
|
2333
|
+
false
|
|
2334
|
+
)
|
|
2261
2335
|
|
|
2262
2336
|
return { workers: reportWorkers, health: reportHealth }
|
|
2263
2337
|
}
|
|
2264
2338
|
}
|
|
2265
2339
|
|
|
2266
|
-
async #updateServiceHealth (
|
|
2340
|
+
async #updateServiceHealth (
|
|
2341
|
+
serviceId,
|
|
2342
|
+
config,
|
|
2343
|
+
serviceConfig,
|
|
2344
|
+
currentWorkers,
|
|
2345
|
+
currentHealth,
|
|
2346
|
+
health,
|
|
2347
|
+
updateConfig = true
|
|
2348
|
+
) {
|
|
2267
2349
|
const report = {
|
|
2268
2350
|
current: currentHealth,
|
|
2269
2351
|
new: health,
|
|
@@ -2275,15 +2357,25 @@ class Runtime extends EventEmitter {
|
|
|
2275
2357
|
}
|
|
2276
2358
|
|
|
2277
2359
|
for (let i = 0; i < currentWorkers; i++) {
|
|
2278
|
-
this.logger.info(
|
|
2360
|
+
this.logger.info(
|
|
2361
|
+
{ health: { current: currentHealth, new: health } },
|
|
2362
|
+
`Restarting service "${serviceId}" worker ${i} to update config health heap...`
|
|
2363
|
+
)
|
|
2279
2364
|
|
|
2280
2365
|
const worker = await this.#getWorkerById(serviceId, i)
|
|
2281
|
-
if (health.maxHeapTotal) {
|
|
2282
|
-
|
|
2366
|
+
if (health.maxHeapTotal) {
|
|
2367
|
+
worker[kConfig].health.maxHeapTotal = health.maxHeapTotal
|
|
2368
|
+
}
|
|
2369
|
+
if (health.maxYoungGeneration) {
|
|
2370
|
+
worker[kConfig].health.maxYoungGeneration = health.maxYoungGeneration
|
|
2371
|
+
}
|
|
2283
2372
|
|
|
2284
2373
|
await this.#replaceWorker(config, serviceConfig, currentWorkers, serviceId, i, worker)
|
|
2285
2374
|
report.updated.push(i)
|
|
2286
|
-
this.logger.info(
|
|
2375
|
+
this.logger.info(
|
|
2376
|
+
{ health: { current: currentHealth, new: health } },
|
|
2377
|
+
`Restarted service "${serviceId}" worker ${i}`
|
|
2378
|
+
)
|
|
2287
2379
|
}
|
|
2288
2380
|
report.success = true
|
|
2289
2381
|
} catch (err) {
|
|
@@ -2291,7 +2383,10 @@ class Runtime extends EventEmitter {
|
|
|
2291
2383
|
this.logger.error({ err }, 'Cannot update service health heap, no worker updated')
|
|
2292
2384
|
await this.#updateServiceConfigHealth(serviceId, currentHealth)
|
|
2293
2385
|
} else {
|
|
2294
|
-
this.logger.error(
|
|
2386
|
+
this.logger.error(
|
|
2387
|
+
{ err },
|
|
2388
|
+
`Cannot update service health heap, updated workers: ${report.updated.length} out of ${currentWorkers}`
|
|
2389
|
+
)
|
|
2295
2390
|
}
|
|
2296
2391
|
report.success = false
|
|
2297
2392
|
}
|
|
@@ -2318,7 +2413,10 @@ class Runtime extends EventEmitter {
|
|
|
2318
2413
|
this.logger.error({ err }, 'Cannot start service workers, no worker started')
|
|
2319
2414
|
await this.#updateServiceConfigWorkers(serviceId, currentWorkers)
|
|
2320
2415
|
} else {
|
|
2321
|
-
this.logger.error(
|
|
2416
|
+
this.logger.error(
|
|
2417
|
+
{ err },
|
|
2418
|
+
`Cannot start service workers, started workers: ${report.started.length} out of ${workers}`
|
|
2419
|
+
)
|
|
2322
2420
|
await this.#updateServiceConfigWorkers(serviceId, currentWorkers + report.started.length)
|
|
2323
2421
|
}
|
|
2324
2422
|
report.success = false
|
|
@@ -2339,7 +2437,10 @@ class Runtime extends EventEmitter {
|
|
|
2339
2437
|
if (report.stopped.length < 1) {
|
|
2340
2438
|
this.logger.error({ err }, 'Cannot stop service workers, no worker stopped')
|
|
2341
2439
|
} else {
|
|
2342
|
-
this.logger.error(
|
|
2440
|
+
this.logger.error(
|
|
2441
|
+
{ err },
|
|
2442
|
+
`Cannot stop service workers, stopped workers: ${report.stopped.length} out of ${workers}`
|
|
2443
|
+
)
|
|
2343
2444
|
await this.#updateServiceConfigWorkers(serviceId, currentWorkers - report.stopped)
|
|
2344
2445
|
}
|
|
2345
2446
|
report.success = false
|
package/lib/start.js
CHANGED
|
@@ -150,7 +150,7 @@ async function startCommand (args, throwAllErrors = false, returnRuntime = false
|
|
|
150
150
|
const runtime = startResult.runtime
|
|
151
151
|
const res = startResult.address
|
|
152
152
|
|
|
153
|
-
closeWithGrace(async event => {
|
|
153
|
+
closeWithGrace({ delay: config.configManager.current.gracefulShutdown?.runtime ?? 10000 }, async event => {
|
|
154
154
|
if (event.err instanceof Error) {
|
|
155
155
|
console.error(event.err)
|
|
156
156
|
}
|
package/lib/worker/main.js
CHANGED
|
@@ -178,8 +178,18 @@ async function main () {
|
|
|
178
178
|
!!config.watch
|
|
179
179
|
)
|
|
180
180
|
|
|
181
|
-
|
|
182
|
-
|
|
181
|
+
if (config.exitOnUnhandledErrors) {
|
|
182
|
+
process.on('uncaughtException', handleUnhandled.bind(null, app, 'uncaught exception'))
|
|
183
|
+
process.on('unhandledRejection', handleUnhandled.bind(null, app, 'unhandled rejection'))
|
|
184
|
+
|
|
185
|
+
process.on('newListener', event => {
|
|
186
|
+
if (event === 'uncaughtException' || event === 'unhandledRejection') {
|
|
187
|
+
globalThis.platformatic.logger.warn(
|
|
188
|
+
`A listener has been added for the "process.${event}" event. This listener will be never triggered as Watt default behavior will kill the process before.\n To disable this behavior, set "exitOnUnhandledErrors" to false in the runtime config.`
|
|
189
|
+
)
|
|
190
|
+
}
|
|
191
|
+
})
|
|
192
|
+
}
|
|
183
193
|
|
|
184
194
|
await app.init()
|
|
185
195
|
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@platformatic/runtime",
|
|
3
|
-
"version": "2.75.0
|
|
3
|
+
"version": "2.75.0",
|
|
4
4
|
"description": "",
|
|
5
5
|
"main": "index.js",
|
|
6
6
|
"bin": {
|
|
@@ -37,12 +37,12 @@
|
|
|
37
37
|
"typescript": "^5.5.4",
|
|
38
38
|
"undici-oidc-interceptor": "^0.5.0",
|
|
39
39
|
"why-is-node-running": "^2.2.2",
|
|
40
|
-
"@platformatic/composer": "2.75.0
|
|
41
|
-
"@platformatic/db": "2.75.0
|
|
42
|
-
"@platformatic/node": "2.75.0
|
|
43
|
-
"@platformatic/service": "2.75.0
|
|
44
|
-
"@platformatic/sql-graphql": "2.75.0
|
|
45
|
-
"@platformatic/sql-mapper": "2.75.0
|
|
40
|
+
"@platformatic/composer": "2.75.0",
|
|
41
|
+
"@platformatic/db": "2.75.0",
|
|
42
|
+
"@platformatic/node": "2.75.0",
|
|
43
|
+
"@platformatic/service": "2.75.0",
|
|
44
|
+
"@platformatic/sql-graphql": "2.75.0",
|
|
45
|
+
"@platformatic/sql-mapper": "2.75.0"
|
|
46
46
|
},
|
|
47
47
|
"dependencies": {
|
|
48
48
|
"@fastify/accepts": "^5.0.0",
|
|
@@ -76,14 +76,14 @@
|
|
|
76
76
|
"undici": "^7.0.0",
|
|
77
77
|
"undici-thread-interceptor": "^0.14.0",
|
|
78
78
|
"ws": "^8.16.0",
|
|
79
|
-
"@platformatic/basic": "2.75.0
|
|
80
|
-
"@platformatic/
|
|
81
|
-
"@platformatic/
|
|
82
|
-
"@platformatic/metrics": "2.75.0
|
|
83
|
-
"@platformatic/
|
|
84
|
-
"@platformatic/
|
|
85
|
-
"@platformatic/
|
|
86
|
-
"@platformatic/
|
|
79
|
+
"@platformatic/basic": "2.75.0",
|
|
80
|
+
"@platformatic/itc": "2.75.0",
|
|
81
|
+
"@platformatic/telemetry": "2.75.0",
|
|
82
|
+
"@platformatic/metrics": "2.75.0",
|
|
83
|
+
"@platformatic/ts-compiler": "2.75.0",
|
|
84
|
+
"@platformatic/utils": "2.75.0",
|
|
85
|
+
"@platformatic/generators": "2.75.0",
|
|
86
|
+
"@platformatic/config": "2.75.0"
|
|
87
87
|
},
|
|
88
88
|
"scripts": {
|
|
89
89
|
"test": "pnpm run lint && borp --concurrency=1 --timeout=1200000 && tsd",
|
package/schema.json
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
{
|
|
2
|
-
"$id": "https://schemas.platformatic.dev/@platformatic/runtime/2.75.0
|
|
2
|
+
"$id": "https://schemas.platformatic.dev/@platformatic/runtime/2.75.0.json",
|
|
3
3
|
"$schema": "http://json-schema.org/draft-07/schema#",
|
|
4
4
|
"type": "object",
|
|
5
5
|
"properties": {
|
|
@@ -960,6 +960,10 @@
|
|
|
960
960
|
}
|
|
961
961
|
]
|
|
962
962
|
},
|
|
963
|
+
"exitOnUnhandledErrors": {
|
|
964
|
+
"default": true,
|
|
965
|
+
"type": "boolean"
|
|
966
|
+
},
|
|
963
967
|
"gracefulShutdown": {
|
|
964
968
|
"type": "object",
|
|
965
969
|
"properties": {
|