@ynode/cluster 1.1.0 → 1.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -62,6 +62,9 @@ The `run(startWorker, options)` function accepts the following options:
62
62
  | `scaleDownThreshold` | `number` | `10` | Event loop lag (ms) threshold to trigger scaling down. |
63
63
  | `scalingCooldown` | `number` | `10000` | Minimum time (ms) between scaling actions. |
64
64
  | `scaleDownGrace` | `number` | `30000` | Grace period (ms) after scaling up before scaling down is allowed. |
65
+ | `autoScaleInterval` | `number` | `5000` | Interval (ms) for auto-scaling checks in "smart" mode. |
66
+ | `shutdownSignals` | `string[]` | `['SIGINT', 'SIGTERM', 'SIGQUIT']` | Signals to listen for to trigger graceful shutdown. |
67
+ | `shutdownTimeout` | `number` | `10000` | Time (ms) to wait for workers to shutdown before forced exit. |
65
68
 
66
69
  ## Working with @ynode/autoshutdown
67
70
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@ynode/cluster",
3
- "version": "1.1.0",
3
+ "version": "1.2.0",
4
4
  "description": "Smart, auto-scaling Node.js cluster manager that monitors event loop lag to optimize performance and resource usage.",
5
5
  "main": "src/cluster.js",
6
6
  "exports": {
package/src/cluster.js CHANGED
@@ -67,6 +67,9 @@ export function run(startWorker, options = true, log = console) {
67
67
  mode = "smart", // 'smart' or 'max'
68
68
  scalingCooldown = 10000,
69
69
  scaleDownGrace = 30000,
70
+ autoScaleInterval = 5000,
71
+ shutdownSignals = ["SIGINT", "SIGTERM", "SIGQUIT"],
72
+ shutdownTimeout = 10000,
70
73
  } = typeof options === "object" ? options : {};
71
74
 
72
75
  if (minWorkers > maxWorkers) {
@@ -208,34 +211,30 @@ export function run(startWorker, options = true, log = console) {
208
211
 
209
212
  return;
210
213
  }
211
- }, 5000).unref();
214
+ return;
215
+ }, autoScaleInterval).unref();
212
216
  }
213
217
 
214
218
  // Graceful shutdown handling for Master
215
- const signals = ["SIGINT", "SIGTERM", "SIGQUIT"];
216
-
217
- signals.forEach((signal) => {
218
- process.on(signal, () => {
219
- log.info(`Master received ${signal}, shutting down workers...`);
220
- isShuttingDown = true;
221
- for (const worker of Object.values(cluster.workers)) {
222
- if (worker && worker.isConnected()) {
223
- worker.send("shutdown");
219
+ if (Array.isArray(shutdownSignals) && shutdownSignals.length > 0) {
220
+ shutdownSignals.forEach((signal) => {
221
+ process.on(signal, () => {
222
+ log.info(`Master received ${signal}, shutting down workers...`);
223
+ isShuttingDown = true;
224
+ for (const worker of Object.values(cluster.workers)) {
225
+ if (worker && worker.isConnected()) {
226
+ worker.send("shutdown");
227
+ }
224
228
  }
225
- }
226
229
 
227
- // Allow some time for workers to clean up?
228
- // Ideally we wait for them to exit, but for now we just let the process exit eventually
229
- // or rely on the fact that existing "shutdown" message logic in worker handles close.
230
- // We can just exit the master after a timeout if we want to force it,
231
- // but usually letting workers exit will cause master to exit if all handles are closed.
232
- // For safety in this template, we'll force exit after a timeout.
233
- setTimeout(() => {
234
- log.info("Master force exiting after timeout.");
235
- process.exit(0);
236
- }, 10000).unref();
230
+ // Allow some time for workers to clean up
231
+ if (shutdownTimeout > 0) {
232
+ setTimeout(() => {
233
+ log.warn(`Master force exiting after ${shutdownTimeout}s timeout.`);
234
+ process.exit(0);
235
+ }, shutdownTimeout * 1000).unref();
236
+ }
237
+ });
237
238
  });
238
- });
239
- }
240
-
241
-
239
+ }
240
+ }