svelte-adapter-uws 0.4.14 → 0.5.0-next.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +127 -2
- package/client.d.ts +20 -4
- package/client.js +63 -13
- package/files/handler.js +240 -5
- package/files/utils.js +112 -0
- package/index.d.ts +221 -5
- package/index.js +2 -1
- package/package.json +16 -1
- package/plugins/dedup/server.d.ts +79 -0
- package/plugins/dedup/server.js +156 -0
- package/plugins/lock/server.d.ts +63 -0
- package/plugins/lock/server.js +121 -0
- package/plugins/session/server.d.ts +76 -0
- package/plugins/session/server.js +170 -0
- package/testing.js +13 -4
- package/vite.js +1 -1
package/README.md
CHANGED
|
@@ -887,6 +887,13 @@ sub.on('message', (channel, payload) => {
|
|
|
887
887
|
});
|
|
888
888
|
```
|
|
889
889
|
|
|
890
|
+
Every published frame is also stamped with a monotonic per-topic `seq` field in the envelope (first publish to a topic is `seq: 1`, then 2, 3, ...). Reconnecting clients can use this to detect dropped frames and resume from where they left off. Pass `{ seq: false }` to skip stamping for ephemeral or high-cardinality topics where the counter map would grow unbounded:
|
|
891
|
+
|
|
892
|
+
```js
|
|
893
|
+
// Skip seq for per-user cursor topics: counter map would grow with users
|
|
894
|
+
platform.publish(`cursor:${userId}`, 'move', pos, { seq: false });
|
|
895
|
+
```
|
|
896
|
+
|
|
890
897
|
```js
|
|
891
898
|
// src/routes/todos/+page.server.js
|
|
892
899
|
export const actions = {
|
|
@@ -953,6 +960,40 @@ export function message(ws, { data, platform }) {
|
|
|
953
960
|
}
|
|
954
961
|
```
|
|
955
962
|
|
|
963
|
+
### `platform.sendCoalesced(ws, { key, topic, event, data })`
|
|
964
|
+
|
|
965
|
+
Send a message to a single connection with **coalesce-by-key** semantics. Each `(connection, key)` pair holds at most one pending message; if a newer call for the same `key` arrives before the previous frame drains to the wire, the older value is replaced in place.
|
|
966
|
+
|
|
967
|
+
Use this for latest-value streams where intermediate values are noise -- price ticks, cursor positions, presence state, typing indicators, scroll position. Under load, this is the difference between the client lagging by a thousand stale frames and the client always seeing the most recent value.
|
|
968
|
+
|
|
969
|
+
For at-least-once delivery use `platform.send()` or `platform.publish()` instead. `sendCoalesced` is explicitly drop-the-middle, keep-the-latest.
|
|
970
|
+
|
|
971
|
+
```js
|
|
972
|
+
// src/hooks.ws.js - cursor positions during a collaborative edit
|
|
973
|
+
export function message(ws, { data, platform }) {
|
|
974
|
+
const msg = JSON.parse(Buffer.from(data).toString());
|
|
975
|
+
if (msg.event === 'cursor') {
|
|
976
|
+
const { docId, userId } = ws.getUserData();
|
|
977
|
+
// Coalesce per (connection, user) - one pending cursor frame per peer.
|
|
978
|
+
// High-frequency mousemove updates collapse cleanly under backpressure.
|
|
979
|
+
for (const peer of getPeersOf(docId)) {
|
|
980
|
+
platform.sendCoalesced(peer, {
|
|
981
|
+
key: 'cursor:' + userId,
|
|
982
|
+
topic: 'doc:' + docId,
|
|
983
|
+
event: 'cursor',
|
|
984
|
+
data: { userId, x: msg.data.x, y: msg.data.y }
|
|
985
|
+
});
|
|
986
|
+
}
|
|
987
|
+
}
|
|
988
|
+
}
|
|
989
|
+
```
|
|
990
|
+
|
|
991
|
+
Three properties worth knowing:
|
|
992
|
+
|
|
993
|
+
- **Latest value wins.** `set` on an existing key replaces the value but keeps the original slot, so coalescing one key never reorders the rest of the queue.
|
|
994
|
+
- **Lazy serialization.** `data` is held as-is in the per-connection buffer and only `JSON.stringify`'d at flush time. A stream that overwrites the same key 1000 times before a single drain pays one serialization, not 1000.
|
|
995
|
+
- **Auto-resume on drain.** When `maxBackpressure` is hit, pumping stops and resumes on the next uWS drain event automatically. No manual flow control.
|
|
996
|
+
|
|
956
997
|
### `platform.sendTo(filter, topic, event, data)`
|
|
957
998
|
|
|
958
999
|
Send a message to all connections whose `userData` matches a filter function. Returns the number of connections the message was sent to.
|
|
@@ -1006,6 +1047,77 @@ export async function GET({ platform, params }) {
|
|
|
1006
1047
|
}
|
|
1007
1048
|
```
|
|
1008
1049
|
|
|
1050
|
+
### `platform.pressure` and `platform.onPressure(cb)`
|
|
1051
|
+
|
|
1052
|
+
Worker-local backpressure signal. The adapter samples once per second (configurable) and reports the most urgent active stress as a single `reason` enum, so user code can degrade with intent instead of generic panic.
|
|
1053
|
+
|
|
1054
|
+
```js
|
|
1055
|
+
platform.pressure
|
|
1056
|
+
// {
|
|
1057
|
+
// active: false,
|
|
1058
|
+
// subscriberRatio: 12.4, // total subscriptions / connections, on this worker
|
|
1059
|
+
// publishRate: 240, // platform.publish() calls/sec, last sample
|
|
1060
|
+
// memoryMB: 128, // process.memoryUsage().rss in MB
|
|
1061
|
+
// reason: 'NONE' // 'NONE' | 'PUBLISH_RATE' | 'SUBSCRIBERS' | 'MEMORY'
|
|
1062
|
+
// }
|
|
1063
|
+
```
|
|
1064
|
+
|
|
1065
|
+
Reading `platform.pressure` is a property access -- safe in hot paths, no I/O. Use it for synchronous shed decisions in request handlers:
|
|
1066
|
+
|
|
1067
|
+
```js
|
|
1068
|
+
// src/routes/api/heavy-write/+server.js
|
|
1069
|
+
export async function POST({ platform, request }) {
|
|
1070
|
+
if (platform.pressure.reason === 'MEMORY') {
|
|
1071
|
+
return new Response('Try again shortly', { status: 503 });
|
|
1072
|
+
}
|
|
1073
|
+
// ... normal write path
|
|
1074
|
+
}
|
|
1075
|
+
```
|
|
1076
|
+
|
|
1077
|
+
`platform.onPressure(cb)` fires only on **transitions** (when `reason` changes between samples), not on every tick. Returns an unsubscribe function:
|
|
1078
|
+
|
|
1079
|
+
```js
|
|
1080
|
+
// src/hooks.ws.js - notify the connected client when pressure state changes
|
|
1081
|
+
export function open(ws, { platform }) {
|
|
1082
|
+
const off = platform.onPressure(({ reason, active }) => {
|
|
1083
|
+
platform.send(ws, '__pressure', reason, { active });
|
|
1084
|
+
});
|
|
1085
|
+
ws.getUserData().__offPressure = off;
|
|
1086
|
+
}
|
|
1087
|
+
|
|
1088
|
+
export function close(ws) {
|
|
1089
|
+
ws.getUserData().__offPressure?.();
|
|
1090
|
+
}
|
|
1091
|
+
```
|
|
1092
|
+
|
|
1093
|
+
**Reason precedence is fixed:** `MEMORY > PUBLISH_RATE > SUBSCRIBERS`. A worker under multiple stresses reports the most urgent one. Memory wins because the worker is approaching OOM and nothing else matters; publish rate is next because CPU saturation cascades fastest; subscriber ratio is last because heavy fan-out degrades gracefully.
|
|
1094
|
+
|
|
1095
|
+
**Thresholds are configurable per-deployment.** Defaults are conservative -- a healthy small app should never trip them in steady state. Override via `WebSocketOptions.pressure`:
|
|
1096
|
+
|
|
1097
|
+
```js
|
|
1098
|
+
// svelte.config.js
|
|
1099
|
+
import adapter from 'svelte-adapter-uws';
|
|
1100
|
+
|
|
1101
|
+
export default {
|
|
1102
|
+
kit: {
|
|
1103
|
+
adapter: adapter({
|
|
1104
|
+
websocket: {
|
|
1105
|
+
pressure: {
|
|
1106
|
+
memoryHeapUsedRatio: 0.9, // default 0.85
|
|
1107
|
+
publishRatePerSec: 50000, // default 10000
|
|
1108
|
+
subscriberRatio: false, // disable this signal
|
|
1109
|
+
sampleIntervalMs: 500 // default 1000; clamped to >=100
|
|
1110
|
+
}
|
|
1111
|
+
}
|
|
1112
|
+
})
|
|
1113
|
+
}
|
|
1114
|
+
};
|
|
1115
|
+
```
|
|
1116
|
+
|
|
1117
|
+
Set any individual threshold to `false` to disable that signal. `sampleIntervalMs` is clamped to a minimum of 100 ms.
|
|
1118
|
+
|
|
1119
|
+
> **Clustering:** `platform.pressure` is per-worker. Each worker samples its own counters and reports its own snapshot. There is no aggregate "cluster pressure" -- a hot worker should shed its own load without waiting for the rest of the cluster.
|
|
1120
|
+
|
|
1009
1121
|
### `platform.topic(name)` - scoped helper
|
|
1010
1122
|
|
|
1011
1123
|
Reduces repetition when publishing multiple events to the same topic:
|
|
@@ -2900,13 +3012,26 @@ Every message sent through `platform.publish()` or `platform.topic().created()`
|
|
|
2900
3012
|
{
|
|
2901
3013
|
"topic": "todos",
|
|
2902
3014
|
"event": "created",
|
|
2903
|
-
"data": { "id": 1, "text": "Buy milk", "done": false }
|
|
3015
|
+
"data": { "id": 1, "text": "Buy milk", "done": false },
|
|
3016
|
+
"seq": 42
|
|
2904
3017
|
}
|
|
2905
3018
|
```
|
|
2906
3019
|
|
|
3020
|
+
The `seq` field is a monotonic per-topic sequence number stamped automatically on every `platform.publish()`. The first publish to a topic sends `seq: 1`, the next `seq: 2`, and so on; each topic has its own counter. Reconnecting clients can use the seq to detect dropped frames and resume from where they left off. Pass `{ seq: false }` to skip stamping when you don't care about gap detection or when topic cardinality is unbounded:
|
|
3021
|
+
|
|
3022
|
+
```js
|
|
3023
|
+
// Standard publish - seq stamped automatically
|
|
3024
|
+
platform.publish('chat', 'message', msg);
|
|
3025
|
+
|
|
3026
|
+
// Opt out for ephemeral or high-cardinality topics
|
|
3027
|
+
platform.publish(`cursor:${userId}`, 'move', pos, { seq: false });
|
|
3028
|
+
```
|
|
3029
|
+
|
|
3030
|
+
> **Clustering:** the per-topic counter is worker-local. Each worker stamps its own publishes; relayed messages from other workers pass through with the originating worker's seq. For cluster-wide monotonic seq across all workers, wire up the Redis Lua INCR variant from the extensions package.
|
|
3031
|
+
|
|
2907
3032
|
The client store parses this automatically. When you use `on('todos')`, the store value is:
|
|
2908
3033
|
```js
|
|
2909
|
-
{ topic: 'todos', event: 'created', data: { id: 1, text: 'Buy milk', done: false } }
|
|
3034
|
+
{ topic: 'todos', event: 'created', data: { id: 1, text: 'Buy milk', done: false }, seq: 42 }
|
|
2910
3035
|
```
|
|
2911
3036
|
|
|
2912
3037
|
When you use `on('todos', 'created')`, you get the payload wrapped in `{ data }`:
|
package/client.d.ts
CHANGED
|
@@ -17,14 +17,20 @@ export interface ConnectOptions {
|
|
|
17
17
|
|
|
18
18
|
/**
|
|
19
19
|
* Base delay in ms before reconnecting after a disconnect.
|
|
20
|
-
*
|
|
20
|
+
* The actual delay grows as `base * 2.2^attempt` with a +/- 25%
|
|
21
|
+
* jitter, capped at `maxReconnectInterval`.
|
|
21
22
|
* @default 3000
|
|
22
23
|
*/
|
|
23
24
|
reconnectInterval?: number;
|
|
24
25
|
|
|
25
26
|
/**
|
|
26
|
-
* Maximum delay in ms between reconnection attempts.
|
|
27
|
-
*
|
|
27
|
+
* Maximum delay in ms between reconnection attempts. Once the
|
|
28
|
+
* exponential curve hits this cap it stays there until the
|
|
29
|
+
* connection succeeds. The default 5 minute cap is long enough
|
|
30
|
+
* that 10K clients hammering a recovering server don't sustain the
|
|
31
|
+
* outage, short enough that a recovered server picks up its
|
|
32
|
+
* clients within a coffee break.
|
|
33
|
+
* @default 300000
|
|
28
34
|
*/
|
|
29
35
|
maxReconnectInterval?: number;
|
|
30
36
|
|
|
@@ -92,6 +98,16 @@ export interface WSEvent<T = unknown> {
|
|
|
92
98
|
event: string;
|
|
93
99
|
/** The event payload. */
|
|
94
100
|
data: T;
|
|
101
|
+
/**
|
|
102
|
+
* Monotonic per-topic sequence number stamped by the server on every
|
|
103
|
+
* `platform.publish()` (omitted when the publisher opts out via
|
|
104
|
+
* `{ seq: false }`). Each topic has an independent counter starting
|
|
105
|
+
* at 1.
|
|
106
|
+
*
|
|
107
|
+
* Worker-local in clustered mode unless an extension provides a
|
|
108
|
+
* cluster-wide source of truth (e.g. Redis Lua INCR).
|
|
109
|
+
*/
|
|
110
|
+
seq?: number;
|
|
95
111
|
}
|
|
96
112
|
|
|
97
113
|
// -- Scannable store ----------------------------------------------------------
|
|
@@ -335,7 +351,7 @@ export function once<T = unknown>(topic: string, event: string, options?: { time
|
|
|
335
351
|
* the new topic and the old one is released.
|
|
336
352
|
*
|
|
337
353
|
* Useful when the topic depends on runtime state like a user ID, selected item,
|
|
338
|
-
* or route parameter
|
|
354
|
+
* or route parameter - no manual subscribe/unsubscribe lifecycle to manage.
|
|
339
355
|
*
|
|
340
356
|
* @example
|
|
341
357
|
* ```svelte
|
package/client.js
CHANGED
|
@@ -498,6 +498,62 @@ const THROTTLE_CLOSE_CODES = new Set([
|
|
|
498
498
|
4429, // Rate limited (custom)
|
|
499
499
|
]);
|
|
500
500
|
|
|
501
|
+
/**
|
|
502
|
+
* Classify a WebSocket close code into one of three reconnect behaviors.
|
|
503
|
+
*
|
|
504
|
+
* - `'TERMINAL'`: the server has permanently rejected this client.
|
|
505
|
+
* Reconnecting would be pointless. The client store transitions to a
|
|
506
|
+
* permanently-closed state and stops trying. Codes: 1008 (policy
|
|
507
|
+
* violation), 4401 (unauthorized), 4403 (forbidden).
|
|
508
|
+
* - `'THROTTLE'`: the server is rate-limiting. Reconnect is still
|
|
509
|
+
* attempted but the client jumps ahead in the backoff curve to avoid
|
|
510
|
+
* hammering a busy server. Code: 4429 (too many requests).
|
|
511
|
+
* - `'RETRY'`: every other code, including normal closes (1000/1001) and
|
|
512
|
+
* abnormal ones (1006/1011/1012). The client reconnects with the
|
|
513
|
+
* standard backoff curve.
|
|
514
|
+
*
|
|
515
|
+
* Pure: no I/O, no globals. Suitable for unit tests.
|
|
516
|
+
*
|
|
517
|
+
* @param {number | undefined} code
|
|
518
|
+
* @returns {'TERMINAL' | 'THROTTLE' | 'RETRY'}
|
|
519
|
+
*/
|
|
520
|
+
export function classifyCloseCode(code) {
|
|
521
|
+
if (TERMINAL_CLOSE_CODES.has(code)) return 'TERMINAL';
|
|
522
|
+
if (THROTTLE_CLOSE_CODES.has(code)) return 'THROTTLE';
|
|
523
|
+
return 'RETRY';
|
|
524
|
+
}
|
|
525
|
+
|
|
526
|
+
/**
|
|
527
|
+
* Compute the next reconnect delay using exponential backoff with
|
|
528
|
+
* proportional jitter.
|
|
529
|
+
*
|
|
530
|
+
* The capped delay is `min(base * 2.2^attempt, maxDelay)`. A random factor
|
|
531
|
+
* in `[0.75, 1.25]` is then applied multiplicatively, so the final delay
|
|
532
|
+
* spans +/- 25% of the capped value. Multiplicative jitter keeps spread
|
|
533
|
+
* meaningful at high attempt counts: with 10K clients all reconnecting
|
|
534
|
+
* after a server restart, additive +/- 500ms jitter clusters reconnects
|
|
535
|
+
* inside a 1 second window; proportional jitter spreads them across
|
|
536
|
+
* a window proportional to the current backoff.
|
|
537
|
+
*
|
|
538
|
+
* The 2.2 exponent with a 5 minute cap is aggressive enough to back off
|
|
539
|
+
* fast under sustained server pain (the default 3 second base hits the
|
|
540
|
+
* cap by attempt 6) and gentle enough that a brief restart resolves
|
|
541
|
+
* before the user notices.
|
|
542
|
+
*
|
|
543
|
+
* Pure: no I/O, no globals. Pass a deterministic `randFactor` for
|
|
544
|
+
* reproducible assertions in tests.
|
|
545
|
+
*
|
|
546
|
+
* @param {number} base base interval in ms (e.g. 3000)
|
|
547
|
+
* @param {number} maxDelay cap in ms (e.g. 300000)
|
|
548
|
+
* @param {number} attempt zero-based attempt counter
|
|
549
|
+
* @param {number} [randFactor] random factor in [0, 1); defaults to Math.random()
|
|
550
|
+
* @returns {number}
|
|
551
|
+
*/
|
|
552
|
+
export function nextReconnectDelay(base, maxDelay, attempt, randFactor = Math.random()) {
|
|
553
|
+
const capped = Math.min(base * Math.pow(2.2, attempt), maxDelay);
|
|
554
|
+
return capped * (0.75 + randFactor * 0.5);
|
|
555
|
+
}
|
|
556
|
+
|
|
501
557
|
/**
|
|
502
558
|
* @param {import('./client.js').ConnectOptions} options
|
|
503
559
|
* @returns {import('./client.js').WSConnection & { _onEvent: (topic: string, event: string) => import('svelte/store').Readable<unknown> }}
|
|
@@ -507,7 +563,7 @@ function createConnection(options) {
|
|
|
507
563
|
url,
|
|
508
564
|
path = '/ws',
|
|
509
565
|
reconnectInterval = 3000,
|
|
510
|
-
maxReconnectInterval =
|
|
566
|
+
maxReconnectInterval = 300000,
|
|
511
567
|
maxReconnectAttempts = Infinity,
|
|
512
568
|
debug = false,
|
|
513
569
|
auth = false
|
|
@@ -757,19 +813,19 @@ function createConnection(options) {
|
|
|
757
813
|
if (debug) console.log('[ws] disconnected');
|
|
758
814
|
if (intentionallyClosed) return;
|
|
759
815
|
|
|
760
|
-
|
|
816
|
+
const cls = classifyCloseCode(event?.code);
|
|
817
|
+
if (cls === 'TERMINAL') {
|
|
761
818
|
// Server has permanently rejected this client - do not retry.
|
|
762
819
|
// Use ws.close(4401) or ws.close(1008) on the server when credentials
|
|
763
820
|
// are invalid or the connection is forbidden, to stop the retry loop.
|
|
764
|
-
if (debug) console.warn('[ws] connection permanently closed by server (code ' + event
|
|
821
|
+
if (debug) console.warn('[ws] connection permanently closed by server (code ' + event?.code + ')');
|
|
765
822
|
terminalClosed = true;
|
|
766
823
|
permaClosedStore.set(true);
|
|
767
824
|
return;
|
|
768
825
|
}
|
|
769
826
|
|
|
770
|
-
if (
|
|
771
|
-
//
|
|
772
|
-
// to avoid hammering it with immediate reconnect attempts.
|
|
827
|
+
if (cls === 'THROTTLE') {
|
|
828
|
+
// Jump ahead in the backoff curve to avoid hammering a rate-limited server.
|
|
773
829
|
attempt = Math.max(attempt, 5);
|
|
774
830
|
}
|
|
775
831
|
|
|
@@ -789,13 +845,7 @@ function createConnection(options) {
|
|
|
789
845
|
permaClosedStore.set(true);
|
|
790
846
|
return;
|
|
791
847
|
}
|
|
792
|
-
|
|
793
|
-
// on server restarts. With 10K clients and additive ±500ms jitter all
|
|
794
|
-
// reconnections cluster in a 1s window; proportional jitter spreads them
|
|
795
|
-
// over ~15s at higher attempt counts where the base delay is large.
|
|
796
|
-
const base = Math.min(reconnectInterval * Math.pow(1.5, attempt), maxReconnectInterval);
|
|
797
|
-
const jitter = base * 0.25 * (Math.random() * 2 - 1);
|
|
798
|
-
const delay = Math.max(0, base + jitter);
|
|
848
|
+
const delay = nextReconnectDelay(reconnectInterval, maxReconnectInterval, attempt);
|
|
799
849
|
attempt++;
|
|
800
850
|
reconnectTimer = setTimeout(() => {
|
|
801
851
|
reconnectTimer = null;
|
package/files/handler.js
CHANGED
|
@@ -12,7 +12,7 @@ import { manifest, prerendered, base } from 'MANIFEST';
|
|
|
12
12
|
import { env } from 'ENV';
|
|
13
13
|
import * as wsModule from 'WS_HANDLER';
|
|
14
14
|
import { parseCookies, createCookies } from './cookies.js';
|
|
15
|
-
import { mimeLookup, parse_as_bytes, parse_origin, writeChunkWithBackpressure } from './utils.js';
|
|
15
|
+
import { mimeLookup, parse_as_bytes, parse_origin, writeChunkWithBackpressure, drainCoalesced, computePressureReason, nextTopicSeq, completeEnvelope } from './utils.js';
|
|
16
16
|
|
|
17
17
|
/* global ENV_PREFIX */
|
|
18
18
|
/* global PRECOMPRESS */
|
|
@@ -408,6 +408,160 @@ const wsConnections = new Set();
|
|
|
408
408
|
// Read once at module load so it is never sampled inside a hot callback.
|
|
409
409
|
const wsDebug = WS_ENABLED && env('WS_DEBUG', '') === '1';
|
|
410
410
|
|
|
411
|
+
// -- Per-topic broadcast sequence numbers ------------------------------------
|
|
412
|
+
// Each platform.publish() stamps a monotonic per-topic seq into the envelope
|
|
413
|
+
// so reconnecting clients can detect gaps and resume from where they left
|
|
414
|
+
// off. Worker-local in clustered mode: cross-worker authority requires the
|
|
415
|
+
// extensions package's Lua INCR variant. See README "Sequence numbers" for
|
|
416
|
+
// the cluster caveat. The map persists for process lifetime; one entry per
|
|
417
|
+
// topic ever published. High-cardinality producers can opt out per-call
|
|
418
|
+
// via { seq: false }.
|
|
419
|
+
/** @type {Map<string, number>} */
|
|
420
|
+
const topicSeqs = new Map();
|
|
421
|
+
|
|
422
|
+
// -- Pressure tracking -------------------------------------------------------
|
|
423
|
+
// Coarse 1 Hz sampler exposed as `platform.pressure` (snapshot) and
|
|
424
|
+
// `platform.onPressure(cb)` (transition callback). State lives at module
|
|
425
|
+
// scope so platform.publish() and the subscribe/unsubscribe handlers can
|
|
426
|
+
// bump counters with one integer add - no allocations on the hot path.
|
|
427
|
+
|
|
428
|
+
let publishCountWindow = 0;
|
|
429
|
+
let totalSubscriptions = 0;
|
|
430
|
+
|
|
431
|
+
/**
|
|
432
|
+
* @typedef {{
|
|
433
|
+
* active: boolean,
|
|
434
|
+
* subscriberRatio: number,
|
|
435
|
+
* publishRate: number,
|
|
436
|
+
* memoryMB: number,
|
|
437
|
+
* reason: 'NONE' | 'PUBLISH_RATE' | 'SUBSCRIBERS' | 'MEMORY'
|
|
438
|
+
* }} PressureSnapshot
|
|
439
|
+
*/
|
|
440
|
+
|
|
441
|
+
/** @type {PressureSnapshot} */
|
|
442
|
+
const pressureSnapshot = {
|
|
443
|
+
active: false,
|
|
444
|
+
subscriberRatio: 0,
|
|
445
|
+
publishRate: 0,
|
|
446
|
+
memoryMB: 0,
|
|
447
|
+
reason: 'NONE'
|
|
448
|
+
};
|
|
449
|
+
|
|
450
|
+
/** @type {Set<(snapshot: PressureSnapshot) => void>} */
|
|
451
|
+
const pressureListeners = new Set();
|
|
452
|
+
|
|
453
|
+
/** @type {ReturnType<typeof setInterval> | null} */
|
|
454
|
+
let pressureTimer = null;
|
|
455
|
+
|
|
456
|
+
/**
|
|
457
|
+
* Default pressure thresholds. Designed to be safe rather than tight: the
|
|
458
|
+
* goal is "no false positives in the steady state of a healthy small app,"
|
|
459
|
+
* not "perfectly tuned for sustained five-figure publish rates." Override
|
|
460
|
+
* per-deployment via the `pressure` field on the WebSocket options.
|
|
461
|
+
*/
|
|
462
|
+
const DEFAULT_PRESSURE_THRESHOLDS = {
|
|
463
|
+
memoryHeapUsedRatio: 0.85,
|
|
464
|
+
publishRatePerSec: 10000,
|
|
465
|
+
subscriberRatio: 50,
|
|
466
|
+
sampleIntervalMs: 1000
|
|
467
|
+
};
|
|
468
|
+
|
|
469
|
+
/**
|
|
470
|
+
* Sample once: read counters, fold them into the snapshot, fire listeners
|
|
471
|
+
* iff `reason` changed. Called by the 1 Hz timer; also extracted so a test
|
|
472
|
+
* harness can drive samples directly without spinning real timers.
|
|
473
|
+
*
|
|
474
|
+
* @param {{ memoryHeapUsedRatio: number | false, publishRatePerSec: number | false, subscriberRatio: number | false, sampleIntervalMs: number }} thresholds
|
|
475
|
+
*/
|
|
476
|
+
function samplePressure(thresholds) {
|
|
477
|
+
const interval = thresholds.sampleIntervalMs / 1000;
|
|
478
|
+
const publishRate = interval > 0 ? publishCountWindow / interval : 0;
|
|
479
|
+
publishCountWindow = 0;
|
|
480
|
+
|
|
481
|
+
const connections = wsConnections.size;
|
|
482
|
+
const subscriberRatio = connections > 0 ? totalSubscriptions / connections : 0;
|
|
483
|
+
|
|
484
|
+
const mem = process.memoryUsage();
|
|
485
|
+
const heapUsedRatio = mem.heapTotal > 0 ? mem.heapUsed / mem.heapTotal : 0;
|
|
486
|
+
const memoryMB = mem.rss / (1024 * 1024);
|
|
487
|
+
|
|
488
|
+
const reason = computePressureReason(
|
|
489
|
+
{ heapUsedRatio, publishRate, subscriberRatio },
|
|
490
|
+
thresholds
|
|
491
|
+
);
|
|
492
|
+
|
|
493
|
+
const transitioned = reason !== pressureSnapshot.reason;
|
|
494
|
+
pressureSnapshot.subscriberRatio = subscriberRatio;
|
|
495
|
+
pressureSnapshot.publishRate = publishRate;
|
|
496
|
+
pressureSnapshot.memoryMB = memoryMB;
|
|
497
|
+
pressureSnapshot.reason = reason;
|
|
498
|
+
pressureSnapshot.active = reason !== 'NONE';
|
|
499
|
+
|
|
500
|
+
if (transitioned) {
|
|
501
|
+
for (const cb of pressureListeners) {
|
|
502
|
+
try {
|
|
503
|
+
cb(pressureSnapshot);
|
|
504
|
+
} catch (err) {
|
|
505
|
+
console.error('[pressure] listener threw:', err);
|
|
506
|
+
}
|
|
507
|
+
}
|
|
508
|
+
}
|
|
509
|
+
}
|
|
510
|
+
|
|
511
|
+
/**
|
|
512
|
+
* Merge user-supplied pressure options on top of the safe defaults. Each
|
|
513
|
+
* threshold accepts `false` to disable that signal. `sampleIntervalMs` is
|
|
514
|
+
* clamped to a sane minimum to avoid pathological tight-loop sampling if
|
|
515
|
+
* a user passes 0 or a negative number.
|
|
516
|
+
*
|
|
517
|
+
* @param {{ memoryHeapUsedRatio?: number | false, publishRatePerSec?: number | false, subscriberRatio?: number | false, sampleIntervalMs?: number } | undefined} opts
|
|
518
|
+
*/
|
|
519
|
+
function resolvePressureThresholds(opts) {
|
|
520
|
+
const merged = { ...DEFAULT_PRESSURE_THRESHOLDS, ...(opts || {}) };
|
|
521
|
+
if (typeof merged.sampleIntervalMs !== 'number' || merged.sampleIntervalMs < 100) {
|
|
522
|
+
merged.sampleIntervalMs = DEFAULT_PRESSURE_THRESHOLDS.sampleIntervalMs;
|
|
523
|
+
}
|
|
524
|
+
return merged;
|
|
525
|
+
}
|
|
526
|
+
|
|
527
|
+
/**
|
|
528
|
+
* Start the 1 Hz pressure sampler. Idempotent: a second call replaces the
|
|
529
|
+
* existing timer with a new one using the supplied thresholds.
|
|
530
|
+
*
|
|
531
|
+
* @param {Parameters<typeof resolvePressureThresholds>[0]} opts
|
|
532
|
+
*/
|
|
533
|
+
function startPressureSampling(opts) {
|
|
534
|
+
const thresholds = resolvePressureThresholds(opts);
|
|
535
|
+
if (pressureTimer) clearInterval(pressureTimer);
|
|
536
|
+
pressureTimer = setInterval(() => samplePressure(thresholds), thresholds.sampleIntervalMs);
|
|
537
|
+
if (typeof pressureTimer.unref === 'function') pressureTimer.unref();
|
|
538
|
+
}
|
|
539
|
+
|
|
540
|
+
function stopPressureSampling() {
|
|
541
|
+
if (pressureTimer) {
|
|
542
|
+
clearInterval(pressureTimer);
|
|
543
|
+
pressureTimer = null;
|
|
544
|
+
}
|
|
545
|
+
}
|
|
546
|
+
|
|
547
|
+
/**
|
|
548
|
+
* Drain any pending coalesce-by-key messages on a single connection.
|
|
549
|
+
* Serializes lazily: only the surviving (latest) value per key pays
|
|
550
|
+
* JSON.stringify cost.
|
|
551
|
+
*
|
|
552
|
+
* @param {import('uWebSockets.js').WebSocket<any>} ws
|
|
553
|
+
*/
|
|
554
|
+
function flushCoalescedFor(ws) {
|
|
555
|
+
const userData = ws.getUserData();
|
|
556
|
+
const pending = userData.__coalesced;
|
|
557
|
+
if (!pending || pending.size === 0) return;
|
|
558
|
+
drainCoalesced(pending, (msg) => ws.send(
|
|
559
|
+
envelopePrefix(msg.topic, msg.event) + JSON.stringify(msg.data ?? null) + '}',
|
|
560
|
+
false,
|
|
561
|
+
false
|
|
562
|
+
));
|
|
563
|
+
}
|
|
564
|
+
|
|
411
565
|
/** @type {import('./index.js').Platform} */
|
|
412
566
|
const platform = {
|
|
413
567
|
/**
|
|
@@ -416,7 +570,11 @@ const platform = {
|
|
|
416
570
|
* No-op if no clients are subscribed - safe to call unconditionally.
|
|
417
571
|
*/
|
|
418
572
|
publish(topic, event, data, options) {
|
|
419
|
-
|
|
573
|
+
publishCountWindow++;
|
|
574
|
+
const seq = (options && options.seq === false)
|
|
575
|
+
? null
|
|
576
|
+
: nextTopicSeq(topicSeqs, topic);
|
|
577
|
+
const envelope = completeEnvelope(envelopePrefix(topic, event), data, seq);
|
|
420
578
|
const result = app.publish(topic, envelope, false, false);
|
|
421
579
|
// Relay to other workers via main thread (no-op in single-process mode).
|
|
422
580
|
// Pass { relay: false } when the message originates from an external
|
|
@@ -444,6 +602,38 @@ const platform = {
|
|
|
444
602
|
return ws.send(envelopePrefix(topic, event) + JSON.stringify(data ?? null) + '}', false, false);
|
|
445
603
|
},
|
|
446
604
|
|
|
605
|
+
/**
|
|
606
|
+
* Send a message to a single connection with coalesce-by-key semantics.
|
|
607
|
+
*
|
|
608
|
+
* Each (ws, key) pair holds at most one pending message. If a newer
|
|
609
|
+
* sendCoalesced for the same key arrives before the previous one drains
|
|
610
|
+
* out to the wire, the older message is dropped in place: latest value
|
|
611
|
+
* wins, original insertion order is preserved.
|
|
612
|
+
*
|
|
613
|
+
* Use for latest-value streams where intermediate values are noise:
|
|
614
|
+
* price ticks, cursor positions, presence state, typing indicators,
|
|
615
|
+
* scroll/scrub positions. For at-least-once delivery use send() or
|
|
616
|
+
* publish() instead.
|
|
617
|
+
*
|
|
618
|
+
* Serialization is deferred to the actual flush, so a stream that
|
|
619
|
+
* overwrites the same key 1000 times before a single drain pays only
|
|
620
|
+
* one JSON.stringify, not 1000.
|
|
621
|
+
*
|
|
622
|
+
* The flush attempts immediately and again on every uWS drain event.
|
|
623
|
+
* On BACKPRESSURE or DROPPED from ws.send, pumping stops and resumes
|
|
624
|
+
* on the next drain.
|
|
625
|
+
*/
|
|
626
|
+
sendCoalesced(ws, { key, topic, event, data }) {
|
|
627
|
+
const userData = ws.getUserData();
|
|
628
|
+
let pending = userData.__coalesced;
|
|
629
|
+
if (!pending) {
|
|
630
|
+
pending = new Map();
|
|
631
|
+
userData.__coalesced = pending;
|
|
632
|
+
}
|
|
633
|
+
pending.set(key, { topic, event, data });
|
|
634
|
+
flushCoalescedFor(ws);
|
|
635
|
+
},
|
|
636
|
+
|
|
447
637
|
/**
|
|
448
638
|
* Send a message to connections matching a filter.
|
|
449
639
|
* The filter receives each connection's userData (from the upgrade handler).
|
|
@@ -493,6 +683,35 @@ const platform = {
|
|
|
493
683
|
return results;
|
|
494
684
|
},
|
|
495
685
|
|
|
686
|
+
/**
|
|
687
|
+
* Live snapshot of worker-local backpressure signals.
|
|
688
|
+
*
|
|
689
|
+
* `reason` is one of `'NONE'`, `'PUBLISH_RATE'`, `'SUBSCRIBERS'`,
|
|
690
|
+
* `'MEMORY'`. Precedence is fixed (MEMORY > PUBLISH_RATE > SUBSCRIBERS),
|
|
691
|
+
* so a worker under multiple stresses reports the most urgent one.
|
|
692
|
+
*
|
|
693
|
+
* Sampled by a coarse 1 Hz timer. Reading the snapshot is a property
|
|
694
|
+
* access; no I/O or computation per read. Use `onPressure` for
|
|
695
|
+
* push-style reaction on transitions.
|
|
696
|
+
*/
|
|
697
|
+
get pressure() {
|
|
698
|
+
return pressureSnapshot;
|
|
699
|
+
},
|
|
700
|
+
|
|
701
|
+
/**
|
|
702
|
+
* Register a callback fired on each pressure-state transition (when
|
|
703
|
+
* `reason` changes between samples). Fired at most once per sample
|
|
704
|
+
* tick. Returns an unsubscribe function.
|
|
705
|
+
*
|
|
706
|
+
* Callbacks are invoked synchronously inside the sampler. A throwing
|
|
707
|
+
* listener does not break the sampler or other listeners; the error
|
|
708
|
+
* is logged and the next listener still runs.
|
|
709
|
+
*/
|
|
710
|
+
onPressure(cb) {
|
|
711
|
+
pressureListeners.add(cb);
|
|
712
|
+
return () => pressureListeners.delete(cb);
|
|
713
|
+
},
|
|
714
|
+
|
|
496
715
|
/**
|
|
497
716
|
* Get a scoped helper for a topic - less repetition when publishing
|
|
498
717
|
* multiple events to the same topic.
|
|
@@ -1791,14 +2010,19 @@ if (WS_ENABLED) {
|
|
|
1791
2010
|
if (wsModule.subscribe && wsModule.subscribe(ws, msg.topic, { platform }) === false) {
|
|
1792
2011
|
return;
|
|
1793
2012
|
}
|
|
2013
|
+
const subs = ws.getUserData().__subscriptions;
|
|
2014
|
+
const isNew = !subs.has(msg.topic);
|
|
1794
2015
|
ws.subscribe(msg.topic);
|
|
1795
|
-
|
|
2016
|
+
subs.add(msg.topic);
|
|
2017
|
+
if (isNew) totalSubscriptions++;
|
|
1796
2018
|
if (wsDebug) console.log('[ws] subscribe topic=%s', msg.topic);
|
|
1797
2019
|
return;
|
|
1798
2020
|
}
|
|
1799
2021
|
if (msg.type === 'unsubscribe' && typeof msg.topic === 'string') {
|
|
1800
2022
|
ws.unsubscribe(msg.topic);
|
|
1801
|
-
ws.getUserData().__subscriptions.delete(msg.topic)
|
|
2023
|
+
if (ws.getUserData().__subscriptions.delete(msg.topic)) {
|
|
2024
|
+
totalSubscriptions--;
|
|
2025
|
+
}
|
|
1802
2026
|
if (wsDebug) console.log('[ws] unsubscribe topic=%s', msg.topic);
|
|
1803
2027
|
wsModule.unsubscribe?.(ws, msg.topic, { platform });
|
|
1804
2028
|
return;
|
|
@@ -1818,8 +2042,10 @@ if (WS_ENABLED) {
|
|
|
1818
2042
|
}
|
|
1819
2043
|
if (!valid) continue;
|
|
1820
2044
|
if (wsModule.subscribe && wsModule.subscribe(ws, topic, { platform }) === false) continue;
|
|
2045
|
+
const isNew = !userData.__subscriptions.has(topic);
|
|
1821
2046
|
ws.subscribe(topic);
|
|
1822
2047
|
userData.__subscriptions.add(topic);
|
|
2048
|
+
if (isNew) totalSubscriptions++;
|
|
1823
2049
|
subscribed++;
|
|
1824
2050
|
}
|
|
1825
2051
|
if (wsDebug) console.log('[ws] subscribe-batch count=%d', subscribed);
|
|
@@ -1833,13 +2059,19 @@ if (WS_ENABLED) {
|
|
|
1833
2059
|
wsModule.message?.(ws, { data: message, isBinary, platform });
|
|
1834
2060
|
},
|
|
1835
2061
|
|
|
1836
|
-
drain:
|
|
2062
|
+
drain: (ws) => {
|
|
2063
|
+
// Resume any sendCoalesced traffic held back by backpressure
|
|
2064
|
+
// before delegating to the user's drain hook.
|
|
2065
|
+
flushCoalescedFor(ws);
|
|
2066
|
+
wsModule.drain?.(ws, { platform });
|
|
2067
|
+
},
|
|
1837
2068
|
|
|
1838
2069
|
close: (ws, code, message) => {
|
|
1839
2070
|
const subscriptions = ws.getUserData().__subscriptions || new Set();
|
|
1840
2071
|
try {
|
|
1841
2072
|
wsModule.close?.(ws, { code, message, platform, subscriptions });
|
|
1842
2073
|
} finally {
|
|
2074
|
+
totalSubscriptions -= subscriptions.size;
|
|
1843
2075
|
wsConnections.delete(ws);
|
|
1844
2076
|
if (wsDebug) console.log('[ws] close code=%d connections=%d', code, wsConnections.size);
|
|
1845
2077
|
}
|
|
@@ -1860,6 +2092,8 @@ if (WS_ENABLED) {
|
|
|
1860
2092
|
if (WS_PATH !== '/ws') {
|
|
1861
2093
|
console.log(`Client must match: connect({ path: '${WS_PATH}' })`);
|
|
1862
2094
|
}
|
|
2095
|
+
|
|
2096
|
+
startPressureSampling(wsOptions.pressure);
|
|
1863
2097
|
}
|
|
1864
2098
|
|
|
1865
2099
|
// Health check endpoint (before catch-all so it never hits SSR)
|
|
@@ -1931,6 +2165,7 @@ export function shutdown() {
|
|
|
1931
2165
|
uWS.us_listen_socket_close(listenSocket);
|
|
1932
2166
|
listenSocket = null;
|
|
1933
2167
|
}
|
|
2168
|
+
stopPressureSampling();
|
|
1934
2169
|
for (const ws of wsConnections) {
|
|
1935
2170
|
ws.close(1001, 'Server shutting down');
|
|
1936
2171
|
}
|