@platformatic/runtime 2.75.0-alpha.0 → 2.75.0-alpha.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/config.d.ts CHANGED
@@ -5,7 +5,7 @@
5
5
  * and run json-schema-to-typescript to regenerate this file.
6
6
  */
7
7
 
8
- export type HttpsSchemasPlatformaticDevPlatformaticRuntime2750Alpha0Json = {
8
+ export type HttpsSchemasPlatformaticDevPlatformaticRuntime2750Alpha1Json = {
9
9
  [k: string]: unknown;
10
10
  } & {
11
11
  $schema?: string;
@@ -129,6 +129,7 @@ export type HttpsSchemasPlatformaticDevPlatformaticRuntime2750Alpha0Json = {
129
129
  };
130
130
  startTimeout?: number;
131
131
  restartOnError?: boolean | number;
132
+ exitOnUnhandledErrors?: boolean;
132
133
  gracefulShutdown?: {
133
134
  runtime: number | string;
134
135
  service: number | string;
@@ -19,13 +19,21 @@ const DEFAULT_LIVENESS_FAIL_BODY = 'ERR'
19
19
  async function checkReadiness (runtime) {
20
20
  const workers = await runtime.getWorkers()
21
21
 
22
- // check if all workers are started
22
+ // Make sure there is at least one started worker
23
+ const services = new Set()
24
+ const started = new Set()
23
25
  for (const worker of Object.values(workers)) {
24
- if (worker.status !== 'started') {
25
- return { status: false }
26
+ services.add(worker.service)
27
+
28
+ if (worker.status === 'started') {
29
+ started.add(worker.service)
26
30
  }
27
31
  }
28
32
 
33
+ if (started.size !== services.size) {
34
+ return { status: false }
35
+ }
36
+
29
37
  // perform custom readiness checks, get custom response content if any
30
38
  const checks = await runtime.getCustomReadinessChecks()
31
39
 
@@ -89,7 +97,7 @@ async function startPrometheusServer (runtime, opts) {
89
97
  return reply.code(401).send({ message: 'Unauthorized' })
90
98
  }
91
99
  return done()
92
- },
100
+ }
93
101
  })
94
102
  onRequestHook = promServer.basicAuth
95
103
  }
@@ -129,7 +137,7 @@ async function startPrometheusServer (runtime, opts) {
129
137
  reply.type('text/plain')
130
138
  }
131
139
  return (await runtime.getMetrics(reqType)).metrics
132
- },
140
+ }
133
141
  })
134
142
 
135
143
  if (opts.readiness !== false) {
@@ -167,7 +175,7 @@ async function startPrometheusServer (runtime, opts) {
167
175
  reply.status(failStatusCode).send(failBody)
168
176
  }
169
177
  }
170
- },
178
+ }
171
179
  })
172
180
  }
173
181
 
@@ -206,7 +214,7 @@ async function startPrometheusServer (runtime, opts) {
206
214
  reply.status(failStatusCode).send(readiness?.body || failBody)
207
215
  }
208
216
  }
209
- },
217
+ }
210
218
  })
211
219
  }
212
220
 
@@ -215,5 +223,5 @@ async function startPrometheusServer (runtime, opts) {
215
223
  }
216
224
 
217
225
  module.exports = {
218
- startPrometheusServer,
226
+ startPrometheusServer
219
227
  }
package/lib/runtime.js CHANGED
@@ -1,7 +1,14 @@
1
1
  'use strict'
2
2
 
3
3
  const { ITC } = require('@platformatic/itc')
4
- const { features, ensureLoggableError, executeWithTimeout, deepmerge, parseMemorySize, kTimeout } = require('@platformatic/utils')
4
+ const {
5
+ features,
6
+ ensureLoggableError,
7
+ executeWithTimeout,
8
+ deepmerge,
9
+ parseMemorySize,
10
+ kTimeout
11
+ } = require('@platformatic/utils')
5
12
  const { once, EventEmitter } = require('node:events')
6
13
  const { createReadStream, watch, existsSync } = require('node:fs')
7
14
  const { readdir, readFile, stat, access } = require('node:fs/promises')
@@ -718,7 +725,9 @@ class Runtime extends EventEmitter {
718
725
  const label = `${service}:${i}`
719
726
  const worker = this.#workers.get(label)
720
727
 
721
- status[label] = await sendViaITC(worker, 'getCustomHealthCheck')
728
+ if (worker) {
729
+ status[label] = await sendViaITC(worker, 'getCustomHealthCheck')
730
+ }
722
731
  }
723
732
  }
724
733
 
@@ -733,7 +742,9 @@ class Runtime extends EventEmitter {
733
742
  const label = `${service}:${i}`
734
743
  const worker = this.#workers.get(label)
735
744
 
736
- status[label] = await sendViaITC(worker, 'getCustomReadinessCheck')
745
+ if (worker) {
746
+ status[label] = await sendViaITC(worker, 'getCustomReadinessCheck')
747
+ }
737
748
  }
738
749
  }
739
750
 
@@ -1107,9 +1118,7 @@ class Runtime extends EventEmitter {
1107
1118
 
1108
1119
  const promises = []
1109
1120
  for (const worker of this.#workers.values()) {
1110
- promises.push(
1111
- sendViaITC(worker, 'setSharedContext', sharedContext)
1112
- )
1121
+ promises.push(sendViaITC(worker, 'setSharedContext', sharedContext))
1113
1122
  }
1114
1123
 
1115
1124
  const results = await Promise.allSettled(promises)
@@ -1214,8 +1223,12 @@ class Runtime extends EventEmitter {
1214
1223
  workerEnv['NODE_OPTIONS'] = `${originalNodeOptions} ${serviceConfig.nodeOptions}`.trim()
1215
1224
  }
1216
1225
 
1217
- const maxHeapTotal = typeof health.maxHeapTotal === 'string' ? parseMemorySize(health.maxHeapTotal) : health.maxHeapTotal
1218
- const maxYoungGeneration = typeof health.maxYoungGeneration === 'string' ? parseMemorySize(health.maxYoungGeneration) : health.maxYoungGeneration
1226
+ const maxHeapTotal =
1227
+ typeof health.maxHeapTotal === 'string' ? parseMemorySize(health.maxHeapTotal) : health.maxHeapTotal
1228
+ const maxYoungGeneration =
1229
+ typeof health.maxYoungGeneration === 'string'
1230
+ ? parseMemorySize(health.maxYoungGeneration)
1231
+ : health.maxYoungGeneration
1219
1232
 
1220
1233
  const maxOldGenerationSizeMb = Math.floor(
1221
1234
  (maxYoungGeneration > 0 ? maxHeapTotal - maxYoungGeneration : maxHeapTotal) / (1024 * 1024)
@@ -1391,9 +1404,7 @@ class Runtime extends EventEmitter {
1391
1404
  if (features.node.worker.getHeapStatistics) {
1392
1405
  const { used_heap_size: heapUsed, total_heap_size: heapTotal } = await worker.getHeapStatistics()
1393
1406
  const currentELU = worker.performance.eventLoopUtilization()
1394
- const elu = worker[kLastELU]
1395
- ? worker.performance.eventLoopUtilization(currentELU, worker[kLastELU])
1396
- : currentELU
1407
+ const elu = worker[kLastELU] ? worker.performance.eventLoopUtilization(currentELU, worker[kLastELU]) : currentELU
1397
1408
  worker[kLastELU] = currentELU
1398
1409
  return { elu: elu.utilization, heapUsed, heapTotal }
1399
1410
  }
@@ -1543,7 +1554,16 @@ class Runtime extends EventEmitter {
1543
1554
  if (enabled && config.restartOnError > 0) {
1544
1555
  // if gracePeriod is 0, it will be set to 1 to start health checks immediately
1545
1556
  // however, the health event will start when the worker is started
1546
- this.#setupHealthCheck(config, serviceConfig, workersCount, id, index, worker, label, gracePeriod > 0 ? gracePeriod : 1)
1557
+ this.#setupHealthCheck(
1558
+ config,
1559
+ serviceConfig,
1560
+ workersCount,
1561
+ id,
1562
+ index,
1563
+ worker,
1564
+ label,
1565
+ gracePeriod > 0 ? gracePeriod : 1
1566
+ )
1547
1567
  }
1548
1568
  } catch (error) {
1549
1569
  // TODO: handle port allocation error here
@@ -1613,7 +1633,7 @@ class Runtime extends EventEmitter {
1613
1633
  this.logger?.info(`Stopping the ${label}...`)
1614
1634
  }
1615
1635
 
1616
- const exitTimeout = this.#configManager.current.gracefulShutdown.runtime
1636
+ const exitTimeout = this.#configManager.current.gracefulShutdown.service
1617
1637
  const exitPromise = once(worker, 'exit')
1618
1638
 
1619
1639
  // Always send the stop message, it will shut down workers that only had ITC and interceptors setup
@@ -2112,14 +2132,29 @@ class Runtime extends EventEmitter {
2112
2132
  const { serviceId, config: serviceConfig, workers, health, currentWorkers, currentHealth } = update
2113
2133
 
2114
2134
  if (workers && health) {
2115
- const r = await this.#updateServiceWorkersAndHealth(serviceId, config, serviceConfig, workers, health, currentWorkers, currentHealth)
2135
+ const r = await this.#updateServiceWorkersAndHealth(
2136
+ serviceId,
2137
+ config,
2138
+ serviceConfig,
2139
+ workers,
2140
+ health,
2141
+ currentWorkers,
2142
+ currentHealth
2143
+ )
2116
2144
  report.push({
2117
2145
  service: serviceId,
2118
2146
  workers: r.workers,
2119
2147
  health: r.health
2120
2148
  })
2121
2149
  } else if (health) {
2122
- const r = await this.#updateServiceHealth(serviceId, config, serviceConfig, currentWorkers, currentHealth, health)
2150
+ const r = await this.#updateServiceHealth(
2151
+ serviceId,
2152
+ config,
2153
+ serviceConfig,
2154
+ currentWorkers,
2155
+ currentHealth,
2156
+ health
2157
+ )
2123
2158
  report.push({
2124
2159
  service: serviceId,
2125
2160
  health: r.health
@@ -2193,7 +2228,10 @@ class Runtime extends EventEmitter {
2193
2228
  throw new errors.InvalidArgumentError('maxHeapTotal', 'must be greater than 0')
2194
2229
  }
2195
2230
  } else {
2196
- throw new errors.InvalidArgumentError('maxHeapTotal', 'must be a number or a string representing a memory size')
2231
+ throw new errors.InvalidArgumentError(
2232
+ 'maxHeapTotal',
2233
+ 'must be a number or a string representing a memory size'
2234
+ )
2197
2235
  }
2198
2236
 
2199
2237
  if (currentHealth.maxHeapTotal === maxHeapTotal) {
@@ -2215,7 +2253,10 @@ class Runtime extends EventEmitter {
2215
2253
  throw new errors.InvalidArgumentError('maxYoungGeneration', 'must be greater than 0')
2216
2254
  }
2217
2255
  } else {
2218
- throw new errors.InvalidArgumentError('maxYoungGeneration', 'must be a number or a string representing a memory size')
2256
+ throw new errors.InvalidArgumentError(
2257
+ 'maxYoungGeneration',
2258
+ 'must be a number or a string representing a memory size'
2259
+ )
2219
2260
  }
2220
2261
 
2221
2262
  if (currentHealth.maxYoungGeneration && currentHealth.maxYoungGeneration === maxYoungGeneration) {
@@ -2228,7 +2269,7 @@ class Runtime extends EventEmitter {
2228
2269
  if (workers || maxHeapTotal || maxYoungGeneration) {
2229
2270
  let health
2230
2271
  if (maxHeapTotal || maxYoungGeneration) {
2231
- health = { }
2272
+ health = {}
2232
2273
  if (maxHeapTotal) {
2233
2274
  health.maxHeapTotal = maxHeapTotal
2234
2275
  }
@@ -2243,12 +2284,27 @@ class Runtime extends EventEmitter {
2243
2284
  return validatedUpdates
2244
2285
  }
2245
2286
 
2246
- async #updateServiceWorkersAndHealth (serviceId, config, serviceConfig, workers, health, currentWorkers, currentHealth) {
2287
+ async #updateServiceWorkersAndHealth (
2288
+ serviceId,
2289
+ config,
2290
+ serviceConfig,
2291
+ workers,
2292
+ health,
2293
+ currentWorkers,
2294
+ currentHealth
2295
+ ) {
2247
2296
  if (currentWorkers > workers) {
2248
2297
  // stop workers
2249
2298
  const reportWorkers = await this.#updateServiceWorkers(serviceId, config, serviceConfig, workers, currentWorkers)
2250
2299
  // update heap for current workers
2251
- const reportHealth = await this.#updateServiceHealth(serviceId, config, serviceConfig, workers, currentHealth, health)
2300
+ const reportHealth = await this.#updateServiceHealth(
2301
+ serviceId,
2302
+ config,
2303
+ serviceConfig,
2304
+ workers,
2305
+ currentHealth,
2306
+ health
2307
+ )
2252
2308
 
2253
2309
  return { workers: reportWorkers, health: reportHealth }
2254
2310
  } else {
@@ -2257,13 +2313,29 @@ class Runtime extends EventEmitter {
2257
2313
  // start new workers with new heap
2258
2314
  const reportWorkers = await this.#updateServiceWorkers(serviceId, config, serviceConfig, workers, currentWorkers)
2259
2315
  // update heap for current workers
2260
- const reportHealth = await this.#updateServiceHealth(serviceId, config, serviceConfig, currentWorkers, currentHealth, health, false)
2316
+ const reportHealth = await this.#updateServiceHealth(
2317
+ serviceId,
2318
+ config,
2319
+ serviceConfig,
2320
+ currentWorkers,
2321
+ currentHealth,
2322
+ health,
2323
+ false
2324
+ )
2261
2325
 
2262
2326
  return { workers: reportWorkers, health: reportHealth }
2263
2327
  }
2264
2328
  }
2265
2329
 
2266
- async #updateServiceHealth (serviceId, config, serviceConfig, currentWorkers, currentHealth, health, updateConfig = true) {
2330
+ async #updateServiceHealth (
2331
+ serviceId,
2332
+ config,
2333
+ serviceConfig,
2334
+ currentWorkers,
2335
+ currentHealth,
2336
+ health,
2337
+ updateConfig = true
2338
+ ) {
2267
2339
  const report = {
2268
2340
  current: currentHealth,
2269
2341
  new: health,
@@ -2275,15 +2347,25 @@ class Runtime extends EventEmitter {
2275
2347
  }
2276
2348
 
2277
2349
  for (let i = 0; i < currentWorkers; i++) {
2278
- this.logger.info({ health: { current: currentHealth, new: health } }, `Restarting service "${serviceId}" worker ${i} to update config health heap...`)
2350
+ this.logger.info(
2351
+ { health: { current: currentHealth, new: health } },
2352
+ `Restarting service "${serviceId}" worker ${i} to update config health heap...`
2353
+ )
2279
2354
 
2280
2355
  const worker = await this.#getWorkerById(serviceId, i)
2281
- if (health.maxHeapTotal) { worker[kConfig].health.maxHeapTotal = health.maxHeapTotal }
2282
- if (health.maxYoungGeneration) { worker[kConfig].health.maxYoungGeneration = health.maxYoungGeneration }
2356
+ if (health.maxHeapTotal) {
2357
+ worker[kConfig].health.maxHeapTotal = health.maxHeapTotal
2358
+ }
2359
+ if (health.maxYoungGeneration) {
2360
+ worker[kConfig].health.maxYoungGeneration = health.maxYoungGeneration
2361
+ }
2283
2362
 
2284
2363
  await this.#replaceWorker(config, serviceConfig, currentWorkers, serviceId, i, worker)
2285
2364
  report.updated.push(i)
2286
- this.logger.info({ health: { current: currentHealth, new: health } }, `Restarted service "${serviceId}" worker ${i}`)
2365
+ this.logger.info(
2366
+ { health: { current: currentHealth, new: health } },
2367
+ `Restarted service "${serviceId}" worker ${i}`
2368
+ )
2287
2369
  }
2288
2370
  report.success = true
2289
2371
  } catch (err) {
@@ -2291,7 +2373,10 @@ class Runtime extends EventEmitter {
2291
2373
  this.logger.error({ err }, 'Cannot update service health heap, no worker updated')
2292
2374
  await this.#updateServiceConfigHealth(serviceId, currentHealth)
2293
2375
  } else {
2294
- this.logger.error({ err }, `Cannot update service health heap, updated workers: ${report.updated.length} out of ${currentWorkers}`)
2376
+ this.logger.error(
2377
+ { err },
2378
+ `Cannot update service health heap, updated workers: ${report.updated.length} out of ${currentWorkers}`
2379
+ )
2295
2380
  }
2296
2381
  report.success = false
2297
2382
  }
@@ -2318,7 +2403,10 @@ class Runtime extends EventEmitter {
2318
2403
  this.logger.error({ err }, 'Cannot start service workers, no worker started')
2319
2404
  await this.#updateServiceConfigWorkers(serviceId, currentWorkers)
2320
2405
  } else {
2321
- this.logger.error({ err }, `Cannot start service workers, started workers: ${report.started.length} out of ${workers}`)
2406
+ this.logger.error(
2407
+ { err },
2408
+ `Cannot start service workers, started workers: ${report.started.length} out of ${workers}`
2409
+ )
2322
2410
  await this.#updateServiceConfigWorkers(serviceId, currentWorkers + report.started.length)
2323
2411
  }
2324
2412
  report.success = false
@@ -2339,7 +2427,10 @@ class Runtime extends EventEmitter {
2339
2427
  if (report.stopped.length < 1) {
2340
2428
  this.logger.error({ err }, 'Cannot stop service workers, no worker stopped')
2341
2429
  } else {
2342
- this.logger.error({ err }, `Cannot stop service workers, stopped workers: ${report.stopped.length} out of ${workers}`)
2430
+ this.logger.error(
2431
+ { err },
2432
+ `Cannot stop service workers, stopped workers: ${report.stopped.length} out of ${workers}`
2433
+ )
2343
2434
  await this.#updateServiceConfigWorkers(serviceId, currentWorkers - report.stopped)
2344
2435
  }
2345
2436
  report.success = false
package/lib/start.js CHANGED
@@ -150,7 +150,7 @@ async function startCommand (args, throwAllErrors = false, returnRuntime = false
150
150
  const runtime = startResult.runtime
151
151
  const res = startResult.address
152
152
 
153
- closeWithGrace(async event => {
153
+ closeWithGrace({ delay: config.configManager.current.gracefulShutdown?.runtime ?? 10000 }, async event => {
154
154
  if (event.err instanceof Error) {
155
155
  console.error(event.err)
156
156
  }
@@ -178,8 +178,18 @@ async function main () {
178
178
  !!config.watch
179
179
  )
180
180
 
181
- process.on('uncaughtException', handleUnhandled.bind(null, app, 'uncaught exception'))
182
- process.on('unhandledRejection', handleUnhandled.bind(null, app, 'unhandled rejection'))
181
+ if (config.exitOnUnhandledErrors) {
182
+ process.on('uncaughtException', handleUnhandled.bind(null, app, 'uncaught exception'))
183
+ process.on('unhandledRejection', handleUnhandled.bind(null, app, 'unhandled rejection'))
184
+
185
+ process.on('newListener', event => {
186
+ if (event === 'uncaughtException' || event === 'unhandledRejection') {
187
+ globalThis.platformatic.logger.warn(
188
+ `A listener has been added for the "process.${event}" event. This listener will be never triggered as Watt default behavior will kill the process before.\n To disable this behavior, set "exitOnUnhandledErrors" to false in the runtime config.`
189
+ )
190
+ }
191
+ })
192
+ }
183
193
 
184
194
  await app.init()
185
195
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@platformatic/runtime",
3
- "version": "2.75.0-alpha.0",
3
+ "version": "2.75.0-alpha.1",
4
4
  "description": "",
5
5
  "main": "index.js",
6
6
  "bin": {
@@ -37,12 +37,12 @@
37
37
  "typescript": "^5.5.4",
38
38
  "undici-oidc-interceptor": "^0.5.0",
39
39
  "why-is-node-running": "^2.2.2",
40
- "@platformatic/composer": "2.75.0-alpha.0",
41
- "@platformatic/db": "2.75.0-alpha.0",
42
- "@platformatic/node": "2.75.0-alpha.0",
43
- "@platformatic/service": "2.75.0-alpha.0",
44
- "@platformatic/sql-graphql": "2.75.0-alpha.0",
45
- "@platformatic/sql-mapper": "2.75.0-alpha.0"
40
+ "@platformatic/composer": "2.75.0-alpha.1",
41
+ "@platformatic/db": "2.75.0-alpha.1",
42
+ "@platformatic/node": "2.75.0-alpha.1",
43
+ "@platformatic/service": "2.75.0-alpha.1",
44
+ "@platformatic/sql-graphql": "2.75.0-alpha.1",
45
+ "@platformatic/sql-mapper": "2.75.0-alpha.1"
46
46
  },
47
47
  "dependencies": {
48
48
  "@fastify/accepts": "^5.0.0",
@@ -76,14 +76,14 @@
76
76
  "undici": "^7.0.0",
77
77
  "undici-thread-interceptor": "^0.14.0",
78
78
  "ws": "^8.16.0",
79
- "@platformatic/basic": "2.75.0-alpha.0",
80
- "@platformatic/config": "2.75.0-alpha.0",
81
- "@platformatic/generators": "2.75.0-alpha.0",
82
- "@platformatic/metrics": "2.75.0-alpha.0",
83
- "@platformatic/itc": "2.75.0-alpha.0",
84
- "@platformatic/telemetry": "2.75.0-alpha.0",
85
- "@platformatic/ts-compiler": "2.75.0-alpha.0",
86
- "@platformatic/utils": "2.75.0-alpha.0"
79
+ "@platformatic/basic": "2.75.0-alpha.1",
80
+ "@platformatic/config": "2.75.0-alpha.1",
81
+ "@platformatic/generators": "2.75.0-alpha.1",
82
+ "@platformatic/metrics": "2.75.0-alpha.1",
83
+ "@platformatic/itc": "2.75.0-alpha.1",
84
+ "@platformatic/telemetry": "2.75.0-alpha.1",
85
+ "@platformatic/ts-compiler": "2.75.0-alpha.1",
86
+ "@platformatic/utils": "2.75.0-alpha.1"
87
87
  },
88
88
  "scripts": {
89
89
  "test": "pnpm run lint && borp --concurrency=1 --timeout=1200000 && tsd",
package/schema.json CHANGED
@@ -1,5 +1,5 @@
1
1
  {
2
- "$id": "https://schemas.platformatic.dev/@platformatic/runtime/2.75.0-alpha.0.json",
2
+ "$id": "https://schemas.platformatic.dev/@platformatic/runtime/2.75.0-alpha.1.json",
3
3
  "$schema": "http://json-schema.org/draft-07/schema#",
4
4
  "type": "object",
5
5
  "properties": {
@@ -960,6 +960,10 @@
960
960
  }
961
961
  ]
962
962
  },
963
+ "exitOnUnhandledErrors": {
964
+ "default": true,
965
+ "type": "boolean"
966
+ },
963
967
  "gracefulShutdown": {
964
968
  "type": "object",
965
969
  "properties": {