defuss-express 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +244 -0
- package/dist/index.cjs +610 -0
- package/dist/index.d.cts +289 -0
- package/dist/index.d.mts +289 -0
- package/dist/index.mjs +596 -0
- package/package.json +62 -0
package/dist/index.cjs
ADDED
|
@@ -0,0 +1,610 @@
|
|
|
1
|
+
'use strict';
|
|
2
|
+
|
|
3
|
+
Object.defineProperty(exports, '__esModule', { value: true });
|
|
4
|
+
|
|
5
|
+
var cluster = require('node:cluster');
|
|
6
|
+
var ultimateExpress = require('ultimate-express');
|
|
7
|
+
var process = require('node:process');
|
|
8
|
+
var os = require('node:os');
|
|
9
|
+
var defussOpenTelemetry = require('defuss-open-telemetry');
|
|
10
|
+
var net = require('node:net');
|
|
11
|
+
|
|
12
|
+
const express = ultimateExpress;
|
|
13
|
+
|
|
14
|
+
const withFallback = (candidates) => {
|
|
15
|
+
const first = candidates[0];
|
|
16
|
+
if (!first) {
|
|
17
|
+
throw new Error("defuss-express: no healthy backend candidates are available");
|
|
18
|
+
}
|
|
19
|
+
return first;
|
|
20
|
+
};
|
|
21
|
+
const roundRobinLoadBalancer = ({
|
|
22
|
+
candidates,
|
|
23
|
+
previousIndex
|
|
24
|
+
}) => {
|
|
25
|
+
const index = candidates.length === 0 ? 0 : previousIndex % candidates.length;
|
|
26
|
+
return withFallback(candidates.slice(index).concat(candidates.slice(0, index)));
|
|
27
|
+
};
|
|
28
|
+
const leastConnectionsLoadBalancer = ({
|
|
29
|
+
candidates
|
|
30
|
+
}) => {
|
|
31
|
+
const sorted = [...candidates].sort(
|
|
32
|
+
(left, right) => left.activeProxyConnections - right.activeProxyConnections
|
|
33
|
+
);
|
|
34
|
+
return withFallback(sorted);
|
|
35
|
+
};
|
|
36
|
+
const scoreResourceAware = (candidate) => {
|
|
37
|
+
const stats = candidate.stats;
|
|
38
|
+
const cpu = stats?.cpuPercent ?? 0;
|
|
39
|
+
const heapRatio = stats && stats.heapTotalBytes > 0 ? stats.heapUsedBytes / stats.heapTotalBytes : 0;
|
|
40
|
+
return candidate.activeProxyConnections * 10 + cpu * 2 + heapRatio * 100;
|
|
41
|
+
};
|
|
42
|
+
const resourceAwareLoadBalancer = ({
|
|
43
|
+
candidates
|
|
44
|
+
}) => {
|
|
45
|
+
const sorted = [...candidates].sort(
|
|
46
|
+
(left, right) => scoreResourceAware(left) - scoreResourceAware(right)
|
|
47
|
+
);
|
|
48
|
+
return withFallback(sorted);
|
|
49
|
+
};
|
|
50
|
+
const defaultLoadBalancer = roundRobinLoadBalancer;
|
|
51
|
+
const pickBackend = async (context, loadBalancer) => {
|
|
52
|
+
const chooser = loadBalancer ?? defaultLoadBalancer;
|
|
53
|
+
return chooser(context);
|
|
54
|
+
};
|
|
55
|
+
|
|
56
|
+
const defaultLogger = {
|
|
57
|
+
debug: (...args) => console.debug(...args),
|
|
58
|
+
info: (...args) => console.info(...args),
|
|
59
|
+
warn: (...args) => console.warn(...args),
|
|
60
|
+
error: (...args) => console.error(...args)
|
|
61
|
+
};
|
|
62
|
+
const availableCpus = () => typeof os.availableParallelism === "function" ? os.availableParallelism() : os.cpus().length;
|
|
63
|
+
const fallbackConfig = () => ({
|
|
64
|
+
host: process.env.HOST ?? process.env.LISTEN_HOST ?? "0.0.0.0",
|
|
65
|
+
port: Number(process.env.PORT ?? process.env.LISTEN_PORT ?? 3e3),
|
|
66
|
+
workerHost: process.env.WORKER_HOST ?? "127.0.0.1",
|
|
67
|
+
baseWorkerPort: Number(process.env.BASE_WORKER_PORT ?? 3001),
|
|
68
|
+
workers: availableCpus(),
|
|
69
|
+
requestInspectionTimeoutMs: 10,
|
|
70
|
+
maxHeaderBytes: 16 * 1024,
|
|
71
|
+
allowHalfOpen: true,
|
|
72
|
+
workerHeartbeatIntervalMs: 6e4,
|
|
73
|
+
workerHeartbeatStaleAfterMs: 15e4,
|
|
74
|
+
gracefulShutdownTimeoutMs: 1e4,
|
|
75
|
+
respawnWorkers: true,
|
|
76
|
+
loadBalancer: defaultLoadBalancer,
|
|
77
|
+
logger: defaultLogger,
|
|
78
|
+
workerEnv: {},
|
|
79
|
+
installSignalHandlers: true,
|
|
80
|
+
telemetry: defussOpenTelemetry.noopTelemetrySink
|
|
81
|
+
});
|
|
82
|
+
let currentConfig = fallbackConfig();
|
|
83
|
+
const resolveServerConfig = (next) => {
|
|
84
|
+
const defaults = fallbackConfig();
|
|
85
|
+
const workersInput = next?.workers ?? currentConfig.workers ?? defaults.workers;
|
|
86
|
+
const workers = workersInput === "auto" ? availableCpus() : Math.max(1, Math.floor(workersInput));
|
|
87
|
+
const resolvedLogger = {
|
|
88
|
+
...defaultLogger,
|
|
89
|
+
...currentConfig.logger ?? {},
|
|
90
|
+
...next?.logger ?? {}
|
|
91
|
+
};
|
|
92
|
+
return {
|
|
93
|
+
...defaults,
|
|
94
|
+
...currentConfig,
|
|
95
|
+
...next,
|
|
96
|
+
workers,
|
|
97
|
+
logger: resolvedLogger,
|
|
98
|
+
workerEnv: {
|
|
99
|
+
...currentConfig.workerEnv ?? {},
|
|
100
|
+
...next?.workerEnv ?? {}
|
|
101
|
+
},
|
|
102
|
+
loadBalancer: next?.loadBalancer ?? currentConfig.loadBalancer ?? defaults.loadBalancer,
|
|
103
|
+
telemetry: next?.telemetry ?? currentConfig.telemetry ?? defaults.telemetry
|
|
104
|
+
};
|
|
105
|
+
};
|
|
106
|
+
const setServerConfig = (next) => {
|
|
107
|
+
currentConfig = resolveServerConfig(next);
|
|
108
|
+
return currentConfig;
|
|
109
|
+
};
|
|
110
|
+
const getServerConfig = () => currentConfig;
|
|
111
|
+
|
|
112
|
+
const HEADER_END = Buffer.from("\r\n\r\n");
|
|
113
|
+
const hasCompleteHttpHeaders = (buffer) => buffer.includes(HEADER_END);
|
|
114
|
+
const parseRequestHead = (buffer, remoteAddress, remotePort) => {
|
|
115
|
+
const rawHead = buffer.toString("latin1");
|
|
116
|
+
const lines = rawHead.split("\r\n");
|
|
117
|
+
const requestLine = lines[0] ?? "";
|
|
118
|
+
const [method, path, httpVersionToken] = requestLine.split(" ");
|
|
119
|
+
const httpVersion = httpVersionToken?.startsWith("HTTP/") ? httpVersionToken.slice("HTTP/".length) : void 0;
|
|
120
|
+
const headers = lines.slice(1).reduce((acc, line) => {
|
|
121
|
+
const separatorIndex = line.indexOf(":");
|
|
122
|
+
if (separatorIndex <= 0) {
|
|
123
|
+
return acc;
|
|
124
|
+
}
|
|
125
|
+
const name = line.slice(0, separatorIndex).trim().toLowerCase();
|
|
126
|
+
const value = line.slice(separatorIndex + 1).trim();
|
|
127
|
+
if (name.length > 0) {
|
|
128
|
+
acc[name] = value;
|
|
129
|
+
}
|
|
130
|
+
return acc;
|
|
131
|
+
}, {});
|
|
132
|
+
return {
|
|
133
|
+
method,
|
|
134
|
+
path,
|
|
135
|
+
httpVersion,
|
|
136
|
+
host: headers.host,
|
|
137
|
+
headers,
|
|
138
|
+
remoteAddress,
|
|
139
|
+
remotePort,
|
|
140
|
+
protocol: method && path ? "http1" : "unknown",
|
|
141
|
+
rawHead
|
|
142
|
+
};
|
|
143
|
+
};
|
|
144
|
+
|
|
145
|
+
const primaryRuntime = {
|
|
146
|
+
mode: "primary",
|
|
147
|
+
backends: /* @__PURE__ */ new Map(),
|
|
148
|
+
workerByIndex: /* @__PURE__ */ new Map(),
|
|
149
|
+
workerIndexById: /* @__PURE__ */ new Map(),
|
|
150
|
+
signalHandlersInstalled: false
|
|
151
|
+
};
|
|
152
|
+
const workerRuntime = {
|
|
153
|
+
mode: "worker",
|
|
154
|
+
activeConnections: /* @__PURE__ */ new Set(),
|
|
155
|
+
signalHandlersInstalled: false
|
|
156
|
+
};
|
|
157
|
+
|
|
158
|
+
const now$1 = () => Date.now();
|
|
159
|
+
const asCandidates = (config) => [...primaryRuntime.backends.values()].filter((backend) => backend.ready).map((backend) => {
|
|
160
|
+
const stale = backend.lastHeartbeatAt ? now$1() - backend.lastHeartbeatAt > config.workerHeartbeatStaleAfterMs : false;
|
|
161
|
+
return {
|
|
162
|
+
id: backend.id,
|
|
163
|
+
workerIndex: backend.workerIndex,
|
|
164
|
+
host: backend.host,
|
|
165
|
+
port: backend.port,
|
|
166
|
+
pid: backend.pid,
|
|
167
|
+
ready: backend.ready,
|
|
168
|
+
healthy: backend.healthy && !stale,
|
|
169
|
+
lastHeartbeatAt: backend.lastHeartbeatAt,
|
|
170
|
+
activeProxyConnections: backend.activeProxyConnections,
|
|
171
|
+
stats: backend.stats
|
|
172
|
+
};
|
|
173
|
+
}).filter((candidate) => candidate.healthy);
|
|
174
|
+
const waitFor = (ms) => new Promise((resolve) => setTimeout(resolve, ms));
|
|
175
|
+
const collectPrelude = async (socket, config) => {
|
|
176
|
+
socket.pause();
|
|
177
|
+
return new Promise((resolve) => {
|
|
178
|
+
const chunks = [];
|
|
179
|
+
let total = 0;
|
|
180
|
+
let settled = false;
|
|
181
|
+
let bufferedSoFar = Buffer.alloc(0);
|
|
182
|
+
const finish = () => {
|
|
183
|
+
if (settled) {
|
|
184
|
+
return;
|
|
185
|
+
}
|
|
186
|
+
settled = true;
|
|
187
|
+
cleanup();
|
|
188
|
+
socket.pause();
|
|
189
|
+
resolve({
|
|
190
|
+
buffered: bufferedSoFar,
|
|
191
|
+
request: parseRequestHead(bufferedSoFar, socket.remoteAddress, socket.remotePort)
|
|
192
|
+
});
|
|
193
|
+
};
|
|
194
|
+
const onData = (chunk) => {
|
|
195
|
+
chunks.push(chunk);
|
|
196
|
+
total += chunk.length;
|
|
197
|
+
bufferedSoFar = Buffer.concat(chunks);
|
|
198
|
+
if (total >= config.maxHeaderBytes || hasCompleteHttpHeaders(bufferedSoFar)) {
|
|
199
|
+
finish();
|
|
200
|
+
}
|
|
201
|
+
};
|
|
202
|
+
const onClose = () => finish();
|
|
203
|
+
const onError = () => finish();
|
|
204
|
+
const timer = setTimeout(finish, config.requestInspectionTimeoutMs);
|
|
205
|
+
timer.unref();
|
|
206
|
+
const cleanup = () => {
|
|
207
|
+
clearTimeout(timer);
|
|
208
|
+
socket.off("data", onData);
|
|
209
|
+
socket.off("close", onClose);
|
|
210
|
+
socket.off("end", onClose);
|
|
211
|
+
socket.off("error", onError);
|
|
212
|
+
};
|
|
213
|
+
socket.on("data", onData);
|
|
214
|
+
socket.once("close", onClose);
|
|
215
|
+
socket.once("end", onClose);
|
|
216
|
+
socket.once("error", onError);
|
|
217
|
+
socket.resume();
|
|
218
|
+
});
|
|
219
|
+
};
|
|
220
|
+
const updateBackendFromWorkerMessage = (message, config) => {
|
|
221
|
+
const worker = primaryRuntime.workerByIndex.get(message.workerIndex);
|
|
222
|
+
const backend = primaryRuntime.backends.get(message.workerIndex) ?? {
|
|
223
|
+
id: `worker-${message.workerIndex}`,
|
|
224
|
+
workerIndex: message.workerIndex,
|
|
225
|
+
host: config.workerHost,
|
|
226
|
+
port: message.port,
|
|
227
|
+
pid: message.pid,
|
|
228
|
+
ready: false,
|
|
229
|
+
healthy: false,
|
|
230
|
+
activeProxyConnections: 0
|
|
231
|
+
};
|
|
232
|
+
if (message.type === "defuss:worker-stopping") {
|
|
233
|
+
primaryRuntime.backends.set(message.workerIndex, {
|
|
234
|
+
...backend,
|
|
235
|
+
worker,
|
|
236
|
+
ready: false,
|
|
237
|
+
healthy: false,
|
|
238
|
+
pid: message.pid,
|
|
239
|
+
lastHeartbeatAt: now$1()
|
|
240
|
+
});
|
|
241
|
+
config.telemetry.setGauge("healthy_backends", asCandidates(config).length);
|
|
242
|
+
return;
|
|
243
|
+
}
|
|
244
|
+
primaryRuntime.backends.set(message.workerIndex, {
|
|
245
|
+
...backend,
|
|
246
|
+
worker,
|
|
247
|
+
pid: message.pid,
|
|
248
|
+
port: message.port,
|
|
249
|
+
host: config.workerHost,
|
|
250
|
+
ready: true,
|
|
251
|
+
healthy: true,
|
|
252
|
+
lastHeartbeatAt: now$1(),
|
|
253
|
+
stats: message.stats
|
|
254
|
+
});
|
|
255
|
+
config.telemetry.setGauge("healthy_backends", asCandidates(config).length);
|
|
256
|
+
};
|
|
257
|
+
const forkWorker = (index, config) => {
|
|
258
|
+
const worker = cluster.fork({
|
|
259
|
+
...process.env,
|
|
260
|
+
...config.workerEnv,
|
|
261
|
+
DEFUSS_WORKER_INDEX: String(index)
|
|
262
|
+
});
|
|
263
|
+
primaryRuntime.workerByIndex.set(index, worker);
|
|
264
|
+
primaryRuntime.workerIndexById.set(worker.id, index);
|
|
265
|
+
worker.on("message", (message) => {
|
|
266
|
+
if (!message || typeof message !== "object" || !("type" in message)) {
|
|
267
|
+
return;
|
|
268
|
+
}
|
|
269
|
+
updateBackendFromWorkerMessage(message, config);
|
|
270
|
+
});
|
|
271
|
+
return worker;
|
|
272
|
+
};
|
|
273
|
+
const installSignalHandlers$1 = (stop, config) => {
|
|
274
|
+
if (!config.installSignalHandlers || primaryRuntime.signalHandlersInstalled) {
|
|
275
|
+
return;
|
|
276
|
+
}
|
|
277
|
+
primaryRuntime.signalHandlersInstalled = true;
|
|
278
|
+
process.on("SIGINT", () => {
|
|
279
|
+
void stop();
|
|
280
|
+
});
|
|
281
|
+
process.on("SIGTERM", () => {
|
|
282
|
+
void stop();
|
|
283
|
+
});
|
|
284
|
+
};
|
|
285
|
+
const waitForAtLeastOneReadyBackend = async (config) => {
|
|
286
|
+
const deadline = now$1() + Math.max(5e3, config.gracefulShutdownTimeoutMs);
|
|
287
|
+
while (now$1() < deadline) {
|
|
288
|
+
if (asCandidates(config).length > 0) {
|
|
289
|
+
return;
|
|
290
|
+
}
|
|
291
|
+
await waitFor(25);
|
|
292
|
+
}
|
|
293
|
+
throw new Error("defuss-express: no worker became ready during startup");
|
|
294
|
+
};
|
|
295
|
+
const connectToBackend = async (client, config, request, buffered, previousIndexRef) => {
|
|
296
|
+
const candidates = asCandidates(config);
|
|
297
|
+
if (candidates.length === 0) {
|
|
298
|
+
client.destroy(new Error("defuss-express: no healthy backends are available"));
|
|
299
|
+
return;
|
|
300
|
+
}
|
|
301
|
+
const backend = await pickBackend(
|
|
302
|
+
{
|
|
303
|
+
candidates,
|
|
304
|
+
request,
|
|
305
|
+
socket: client,
|
|
306
|
+
previousIndex: previousIndexRef.value
|
|
307
|
+
},
|
|
308
|
+
config.loadBalancer
|
|
309
|
+
);
|
|
310
|
+
previousIndexRef.value += 1;
|
|
311
|
+
const entry = primaryRuntime.backends.get(backend.workerIndex);
|
|
312
|
+
if (!entry) {
|
|
313
|
+
client.destroy(new Error("defuss-express: selected backend disappeared"));
|
|
314
|
+
return;
|
|
315
|
+
}
|
|
316
|
+
entry.activeProxyConnections += 1;
|
|
317
|
+
let released = false;
|
|
318
|
+
const release = () => {
|
|
319
|
+
if (released) {
|
|
320
|
+
return;
|
|
321
|
+
}
|
|
322
|
+
released = true;
|
|
323
|
+
entry.activeProxyConnections = Math.max(0, entry.activeProxyConnections - 1);
|
|
324
|
+
};
|
|
325
|
+
const upstream = net.connect({
|
|
326
|
+
host: entry.host,
|
|
327
|
+
port: entry.port,
|
|
328
|
+
allowHalfOpen: config.allowHalfOpen
|
|
329
|
+
});
|
|
330
|
+
const cleanup = () => {
|
|
331
|
+
release();
|
|
332
|
+
client.destroy();
|
|
333
|
+
upstream.destroy();
|
|
334
|
+
};
|
|
335
|
+
upstream.once("connect", () => {
|
|
336
|
+
client.setNoDelay(true);
|
|
337
|
+
upstream.setNoDelay(true);
|
|
338
|
+
if (buffered.length > 0) {
|
|
339
|
+
upstream.write(buffered);
|
|
340
|
+
}
|
|
341
|
+
client.pipe(upstream);
|
|
342
|
+
upstream.pipe(client);
|
|
343
|
+
client.resume();
|
|
344
|
+
});
|
|
345
|
+
upstream.once("error", cleanup);
|
|
346
|
+
upstream.once("close", () => {
|
|
347
|
+
release();
|
|
348
|
+
client.destroy();
|
|
349
|
+
});
|
|
350
|
+
client.once("error", cleanup);
|
|
351
|
+
client.once("close", () => {
|
|
352
|
+
release();
|
|
353
|
+
upstream.destroy();
|
|
354
|
+
});
|
|
355
|
+
};
|
|
356
|
+
const createLoadBalancer = (config) => {
|
|
357
|
+
const previousIndexRef = { value: 0 };
|
|
358
|
+
const listenServer = net.createServer(
|
|
359
|
+
{
|
|
360
|
+
allowHalfOpen: config.allowHalfOpen
|
|
361
|
+
},
|
|
362
|
+
async (client) => {
|
|
363
|
+
config.telemetry.incrementCounter("connections");
|
|
364
|
+
try {
|
|
365
|
+
const t0 = performance.now();
|
|
366
|
+
const { buffered, request } = await collectPrelude(client, config);
|
|
367
|
+
config.telemetry.recordHistogram("prelude_duration_ms", performance.now() - t0);
|
|
368
|
+
await connectToBackend(client, config, request, buffered, previousIndexRef);
|
|
369
|
+
} catch (error) {
|
|
370
|
+
config.logger.error("[defuss-express] load balancer connection error", error);
|
|
371
|
+
config.telemetry.incrementCounter("proxy_errors");
|
|
372
|
+
client.destroy();
|
|
373
|
+
}
|
|
374
|
+
}
|
|
375
|
+
);
|
|
376
|
+
return new Promise((resolve, reject) => {
|
|
377
|
+
listenServer.once("error", reject);
|
|
378
|
+
listenServer.listen(config.port, config.host, () => {
|
|
379
|
+
listenServer.off("error", reject);
|
|
380
|
+
config.logger.info(
|
|
381
|
+
`[defuss-express] primary ${process.pid} listening on tcp://${config.host}:${config.port}`
|
|
382
|
+
);
|
|
383
|
+
resolve(listenServer);
|
|
384
|
+
});
|
|
385
|
+
});
|
|
386
|
+
};
|
|
387
|
+
const stopPrimaryRuntime = async (config = getServerConfig()) => {
|
|
388
|
+
if (primaryRuntime.stopPromise) {
|
|
389
|
+
return primaryRuntime.stopPromise;
|
|
390
|
+
}
|
|
391
|
+
if (!primaryRuntime.listenServer) {
|
|
392
|
+
return;
|
|
393
|
+
}
|
|
394
|
+
primaryRuntime.stopPromise = new Promise((resolve) => {
|
|
395
|
+
const timeout = setTimeout(resolve, config.gracefulShutdownTimeoutMs);
|
|
396
|
+
timeout.unref();
|
|
397
|
+
const finish = () => {
|
|
398
|
+
clearTimeout(timeout);
|
|
399
|
+
resolve();
|
|
400
|
+
};
|
|
401
|
+
try {
|
|
402
|
+
primaryRuntime.listenServer?.close(finish);
|
|
403
|
+
} catch {
|
|
404
|
+
finish();
|
|
405
|
+
}
|
|
406
|
+
for (const worker of primaryRuntime.workerByIndex.values()) {
|
|
407
|
+
worker.kill("SIGTERM");
|
|
408
|
+
}
|
|
409
|
+
});
|
|
410
|
+
return primaryRuntime.stopPromise;
|
|
411
|
+
};
|
|
412
|
+
const startPrimaryRuntime = async (_app, config) => {
|
|
413
|
+
if (primaryRuntime.startPromise) {
|
|
414
|
+
return primaryRuntime.startPromise;
|
|
415
|
+
}
|
|
416
|
+
primaryRuntime.startPromise = (async () => {
|
|
417
|
+
for (let index = 0; index < config.workers; index += 1) {
|
|
418
|
+
forkWorker(index, config);
|
|
419
|
+
}
|
|
420
|
+
cluster.on("exit", (worker) => {
|
|
421
|
+
const index = primaryRuntime.workerIndexById.get(worker.id);
|
|
422
|
+
if (index === void 0) {
|
|
423
|
+
return;
|
|
424
|
+
}
|
|
425
|
+
primaryRuntime.workerIndexById.delete(worker.id);
|
|
426
|
+
primaryRuntime.workerByIndex.delete(index);
|
|
427
|
+
primaryRuntime.backends.delete(index);
|
|
428
|
+
if (config.respawnWorkers && !primaryRuntime.stopPromise) {
|
|
429
|
+
config.logger.warn(
|
|
430
|
+
`[defuss-express] worker ${worker.process.pid ?? "unknown"} exited, respawning index ${index}`
|
|
431
|
+
);
|
|
432
|
+
forkWorker(index, config);
|
|
433
|
+
}
|
|
434
|
+
});
|
|
435
|
+
await waitForAtLeastOneReadyBackend(config);
|
|
436
|
+
primaryRuntime.listenServer = await createLoadBalancer(config);
|
|
437
|
+
installSignalHandlers$1(() => stopPrimaryRuntime(config), config);
|
|
438
|
+
return {
|
|
439
|
+
mode: "primary",
|
|
440
|
+
pid: process.pid,
|
|
441
|
+
host: config.host,
|
|
442
|
+
port: config.port,
|
|
443
|
+
workers: config.workers
|
|
444
|
+
};
|
|
445
|
+
})();
|
|
446
|
+
return primaryRuntime.startPromise;
|
|
447
|
+
};
|
|
448
|
+
|
|
449
|
+
const now = () => Date.now();
|
|
450
|
+
const getWorkerIndex = () => Number(process.env.DEFUSS_WORKER_INDEX ?? 0);
|
|
451
|
+
const getWorkerPort = (config) => config.baseWorkerPort + getWorkerIndex();
|
|
452
|
+
const serializeWorkerStats = () => {
|
|
453
|
+
const memory = process.memoryUsage();
|
|
454
|
+
return {
|
|
455
|
+
pid: process.pid,
|
|
456
|
+
cpuPercent: 0,
|
|
457
|
+
rssBytes: memory.rss,
|
|
458
|
+
heapUsedBytes: memory.heapUsed,
|
|
459
|
+
heapTotalBytes: memory.heapTotal,
|
|
460
|
+
externalBytes: memory.external,
|
|
461
|
+
arrayBuffersBytes: memory.arrayBuffers,
|
|
462
|
+
uptimeMs: process.uptime() * 1e3,
|
|
463
|
+
activeConnections: workerRuntime.activeConnections.size,
|
|
464
|
+
sampledAt: now()
|
|
465
|
+
};
|
|
466
|
+
};
|
|
467
|
+
const createCpuSampler = () => {
|
|
468
|
+
let previousUsage = process.cpuUsage();
|
|
469
|
+
let previousTime = process.hrtime.bigint();
|
|
470
|
+
return () => {
|
|
471
|
+
const usage = process.cpuUsage(previousUsage);
|
|
472
|
+
const currentTime = process.hrtime.bigint();
|
|
473
|
+
const elapsedMicros = Number(currentTime - previousTime) / 1e3;
|
|
474
|
+
previousUsage = process.cpuUsage();
|
|
475
|
+
previousTime = currentTime;
|
|
476
|
+
if (elapsedMicros <= 0) {
|
|
477
|
+
return 0;
|
|
478
|
+
}
|
|
479
|
+
const consumedMicros = usage.user + usage.system;
|
|
480
|
+
return consumedMicros / elapsedMicros * 100;
|
|
481
|
+
};
|
|
482
|
+
};
|
|
483
|
+
const createStatsReporter = (config, port) => {
|
|
484
|
+
const sampleCpu = createCpuSampler();
|
|
485
|
+
const buildMessage = (type) => {
|
|
486
|
+
const cpuPercent = sampleCpu();
|
|
487
|
+
const stats = {
|
|
488
|
+
...serializeWorkerStats(),
|
|
489
|
+
cpuPercent
|
|
490
|
+
};
|
|
491
|
+
config.telemetry.setGauge("active_connections", stats.activeConnections);
|
|
492
|
+
config.telemetry.recordHistogram("cpu_percent", cpuPercent);
|
|
493
|
+
return {
|
|
494
|
+
type,
|
|
495
|
+
workerIndex: getWorkerIndex(),
|
|
496
|
+
port,
|
|
497
|
+
pid: process.pid,
|
|
498
|
+
stats
|
|
499
|
+
};
|
|
500
|
+
};
|
|
501
|
+
return {
|
|
502
|
+
announceReady: () => process.send?.(buildMessage("defuss:worker-ready")),
|
|
503
|
+
announceHeartbeat: () => process.send?.(buildMessage("defuss:worker-stats")),
|
|
504
|
+
announceStopping: () => process.send?.({
|
|
505
|
+
type: "defuss:worker-stopping",
|
|
506
|
+
workerIndex: getWorkerIndex(),
|
|
507
|
+
port,
|
|
508
|
+
pid: process.pid
|
|
509
|
+
}),
|
|
510
|
+
startHeartbeatLoop: () => {
|
|
511
|
+
workerRuntime.heartbeatTimer = setInterval(() => {
|
|
512
|
+
process.send?.(buildMessage("defuss:worker-stats"));
|
|
513
|
+
}, config.workerHeartbeatIntervalMs);
|
|
514
|
+
workerRuntime.heartbeatTimer.unref();
|
|
515
|
+
}
|
|
516
|
+
};
|
|
517
|
+
};
|
|
518
|
+
const registerConnectionTracking = (socket) => {
|
|
519
|
+
workerRuntime.activeConnections.add(socket);
|
|
520
|
+
const cleanup = () => {
|
|
521
|
+
workerRuntime.activeConnections.delete(socket);
|
|
522
|
+
};
|
|
523
|
+
socket.once("close", cleanup);
|
|
524
|
+
socket.once("error", cleanup);
|
|
525
|
+
};
|
|
526
|
+
const stopWorkerRuntime = async (config = getServerConfig()) => {
|
|
527
|
+
if (workerRuntime.stopPromise) {
|
|
528
|
+
return workerRuntime.stopPromise;
|
|
529
|
+
}
|
|
530
|
+
if (!workerRuntime.appServer) {
|
|
531
|
+
return;
|
|
532
|
+
}
|
|
533
|
+
workerRuntime.stopPromise = new Promise((resolve) => {
|
|
534
|
+
if (workerRuntime.heartbeatTimer) {
|
|
535
|
+
clearInterval(workerRuntime.heartbeatTimer);
|
|
536
|
+
workerRuntime.heartbeatTimer = void 0;
|
|
537
|
+
}
|
|
538
|
+
for (const socket of workerRuntime.activeConnections) {
|
|
539
|
+
socket.destroy();
|
|
540
|
+
}
|
|
541
|
+
const timeout = setTimeout(resolve, config.gracefulShutdownTimeoutMs);
|
|
542
|
+
timeout.unref();
|
|
543
|
+
try {
|
|
544
|
+
workerRuntime.appServer?.close(() => {
|
|
545
|
+
clearTimeout(timeout);
|
|
546
|
+
resolve();
|
|
547
|
+
});
|
|
548
|
+
} catch {
|
|
549
|
+
clearTimeout(timeout);
|
|
550
|
+
resolve();
|
|
551
|
+
}
|
|
552
|
+
});
|
|
553
|
+
return workerRuntime.stopPromise;
|
|
554
|
+
};
|
|
555
|
+
const installSignalHandlers = (reporter, config) => {
|
|
556
|
+
if (!config.installSignalHandlers || workerRuntime.signalHandlersInstalled) {
|
|
557
|
+
return;
|
|
558
|
+
}
|
|
559
|
+
workerRuntime.signalHandlersInstalled = true;
|
|
560
|
+
const stop = () => {
|
|
561
|
+
reporter.announceStopping();
|
|
562
|
+
void stopWorkerRuntime(config).finally(() => process.exit(0));
|
|
563
|
+
};
|
|
564
|
+
process.on("SIGINT", stop);
|
|
565
|
+
process.on("SIGTERM", stop);
|
|
566
|
+
};
|
|
567
|
+
const startWorkerRuntime = async (app, config) => {
|
|
568
|
+
const port = getWorkerPort(config);
|
|
569
|
+
const reporter = createStatsReporter(config, port);
|
|
570
|
+
return new Promise((resolve) => {
|
|
571
|
+
const server = app.listen(port, config.workerHost, () => {
|
|
572
|
+
config.logger.info(
|
|
573
|
+
`[defuss-express] worker ${process.pid} listening on http://${config.workerHost}:${port}`
|
|
574
|
+
);
|
|
575
|
+
reporter.announceReady();
|
|
576
|
+
reporter.announceHeartbeat();
|
|
577
|
+
resolve({
|
|
578
|
+
mode: "worker",
|
|
579
|
+
pid: process.pid,
|
|
580
|
+
host: config.workerHost,
|
|
581
|
+
port,
|
|
582
|
+
workerIndex: getWorkerIndex(),
|
|
583
|
+
workerPort: port
|
|
584
|
+
});
|
|
585
|
+
});
|
|
586
|
+
workerRuntime.appServer = server;
|
|
587
|
+
server.on?.("connection", registerConnectionTracking);
|
|
588
|
+
reporter.startHeartbeatLoop();
|
|
589
|
+
installSignalHandlers(reporter, config);
|
|
590
|
+
});
|
|
591
|
+
};
|
|
592
|
+
|
|
593
|
+
const normalizeConfig = (next) => next ? setServerConfig(next) : resolveServerConfig();
|
|
594
|
+
const startServer = async (app, nextConfig) => {
|
|
595
|
+
const config = normalizeConfig(nextConfig);
|
|
596
|
+
return cluster.isPrimary ? startPrimaryRuntime(app, config) : startWorkerRuntime(app, config);
|
|
597
|
+
};
|
|
598
|
+
const stopServer = async () => cluster.isPrimary ? stopPrimaryRuntime(getServerConfig()) : stopWorkerRuntime(getServerConfig());
|
|
599
|
+
|
|
600
|
+
exports.default = express;
|
|
601
|
+
exports.defaultLoadBalancer = defaultLoadBalancer;
|
|
602
|
+
exports.express = express;
|
|
603
|
+
exports.expressDefault = express;
|
|
604
|
+
exports.getServerConfig = getServerConfig;
|
|
605
|
+
exports.leastConnectionsLoadBalancer = leastConnectionsLoadBalancer;
|
|
606
|
+
exports.resourceAwareLoadBalancer = resourceAwareLoadBalancer;
|
|
607
|
+
exports.roundRobinLoadBalancer = roundRobinLoadBalancer;
|
|
608
|
+
exports.setServerConfig = setServerConfig;
|
|
609
|
+
exports.startServer = startServer;
|
|
610
|
+
exports.stopServer = stopServer;
|