@agentunion/kite 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (69) hide show
  1. package/__init__.py +1 -0
  2. package/__main__.py +15 -0
  3. package/cli.js +70 -0
  4. package/core/__init__.py +0 -0
  5. package/core/__pycache__/__init__.cpython-313.pyc +0 -0
  6. package/core/event_hub/BENCHMARK.md +94 -0
  7. package/core/event_hub/__init__.py +0 -0
  8. package/core/event_hub/__pycache__/__init__.cpython-313.pyc +0 -0
  9. package/core/event_hub/__pycache__/bench.cpython-313.pyc +0 -0
  10. package/core/event_hub/__pycache__/bench_perf.cpython-313.pyc +0 -0
  11. package/core/event_hub/__pycache__/dedup.cpython-313.pyc +0 -0
  12. package/core/event_hub/__pycache__/entry.cpython-313.pyc +0 -0
  13. package/core/event_hub/__pycache__/hub.cpython-313.pyc +0 -0
  14. package/core/event_hub/__pycache__/router.cpython-313.pyc +0 -0
  15. package/core/event_hub/__pycache__/server.cpython-313.pyc +0 -0
  16. package/core/event_hub/bench.py +459 -0
  17. package/core/event_hub/bench_extreme.py +308 -0
  18. package/core/event_hub/bench_perf.py +350 -0
  19. package/core/event_hub/bench_results/.gitkeep +0 -0
  20. package/core/event_hub/bench_results/2026-02-28_13-26-48.json +51 -0
  21. package/core/event_hub/bench_results/2026-02-28_13-44-45.json +51 -0
  22. package/core/event_hub/bench_results/2026-02-28_13-45-39.json +51 -0
  23. package/core/event_hub/dedup.py +31 -0
  24. package/core/event_hub/entry.py +113 -0
  25. package/core/event_hub/hub.py +263 -0
  26. package/core/event_hub/module.md +21 -0
  27. package/core/event_hub/router.py +21 -0
  28. package/core/event_hub/server.py +138 -0
  29. package/core/event_hub_bench/entry.py +371 -0
  30. package/core/event_hub_bench/module.md +25 -0
  31. package/core/launcher/__init__.py +0 -0
  32. package/core/launcher/__pycache__/__init__.cpython-313.pyc +0 -0
  33. package/core/launcher/__pycache__/entry.cpython-313.pyc +0 -0
  34. package/core/launcher/__pycache__/module_scanner.cpython-313.pyc +0 -0
  35. package/core/launcher/__pycache__/process_manager.cpython-313.pyc +0 -0
  36. package/core/launcher/data/log/lifecycle.jsonl +1045 -0
  37. package/core/launcher/data/processes_14752.json +32 -0
  38. package/core/launcher/data/token.txt +1 -0
  39. package/core/launcher/entry.py +965 -0
  40. package/core/launcher/module.md +37 -0
  41. package/core/launcher/module_scanner.py +253 -0
  42. package/core/launcher/process_manager.py +435 -0
  43. package/core/registry/__init__.py +0 -0
  44. package/core/registry/__pycache__/__init__.cpython-313.pyc +0 -0
  45. package/core/registry/__pycache__/entry.cpython-313.pyc +0 -0
  46. package/core/registry/__pycache__/server.cpython-313.pyc +0 -0
  47. package/core/registry/__pycache__/store.cpython-313.pyc +0 -0
  48. package/core/registry/data/port.txt +1 -0
  49. package/core/registry/data/port_14752.txt +1 -0
  50. package/core/registry/data/port_484.txt +1 -0
  51. package/core/registry/entry.py +73 -0
  52. package/core/registry/module.md +30 -0
  53. package/core/registry/server.py +256 -0
  54. package/core/registry/store.py +232 -0
  55. package/extensions/__init__.py +0 -0
  56. package/extensions/__pycache__/__init__.cpython-313.pyc +0 -0
  57. package/extensions/services/__init__.py +0 -0
  58. package/extensions/services/__pycache__/__init__.cpython-313.pyc +0 -0
  59. package/extensions/services/watchdog/__init__.py +0 -0
  60. package/extensions/services/watchdog/__pycache__/__init__.cpython-313.pyc +0 -0
  61. package/extensions/services/watchdog/__pycache__/entry.cpython-313.pyc +0 -0
  62. package/extensions/services/watchdog/__pycache__/monitor.cpython-313.pyc +0 -0
  63. package/extensions/services/watchdog/__pycache__/server.cpython-313.pyc +0 -0
  64. package/extensions/services/watchdog/entry.py +143 -0
  65. package/extensions/services/watchdog/module.md +25 -0
  66. package/extensions/services/watchdog/monitor.py +420 -0
  67. package/extensions/services/watchdog/server.py +167 -0
  68. package/main.py +17 -0
  69. package/package.json +27 -0
package/__init__.py ADDED
@@ -0,0 +1 @@
1
+ # Kite framework package
package/__main__.py ADDED
@@ -0,0 +1,15 @@
1
+ """Allow running Kite as a package: python -m Kite"""
2
+ import os, sys
3
+ sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
4
+ import secrets
5
+ from core.launcher.entry import Launcher
6
+
7
+
8
+ def main():
9
+ token = secrets.token_hex(32)
10
+ print(f"[main] KITE_TOKEN generated ({len(token)} chars)")
11
+ Launcher(kite_token=token).run()
12
+
13
+
14
+ if __name__ == "__main__":
15
+ main()
package/cli.js ADDED
@@ -0,0 +1,70 @@
1
+ #!/usr/bin/env node
2
+ /**
3
+ * Kite CLI — Node.js entry point.
4
+ * Generates KITE_TOKEN, spawns the Python launcher, forwards output.
5
+ *
6
+ * Usage:
7
+ * kite # after npm install -g / npm link
8
+ * kite --cwd /some/path # override working directory
9
+ * node cli.js # direct execution
10
+ *
11
+ * Environment variables passed to Python:
12
+ * KITE_HOME — Kite framework root (where core/, extensions/ live)
13
+ * KITE_CWD — directory where user executed the kite command
14
+ */
15
+ const { spawn } = require('child_process');
16
+ const crypto = require('crypto');
17
+ const fs = require('fs');
18
+ const path = require('path');
19
+
20
+ // Parse CLI args
21
+ const args = process.argv.slice(2);
22
+ const pythonArgs = [];
23
+ let cwdOverride = null;
24
+
25
+ for (let i = 0; i < args.length; i++) {
26
+ if (args[i] === '--cwd' && args[i + 1]) {
27
+ cwdOverride = path.resolve(args[++i]);
28
+ } else {
29
+ pythonArgs.push(args[i]);
30
+ }
31
+ }
32
+
33
+ const token = crypto.randomBytes(32).toString('hex');
34
+ const kiteHome = fs.realpathSync(__dirname);
35
+ const kiteCwd = cwdOverride || process.cwd();
36
+
37
+ console.log(`[main] KITE_TOKEN generated (${token.length} chars)`);
38
+ console.log(`[main] KITE_HOME: ${kiteHome}`);
39
+ console.log(`[main] KITE_CWD: ${kiteCwd}`);
40
+
41
+ // Spawn Python using package mode: python -m Kite
42
+ // PYTHONPATH points to Kite's parent so "import Kite" / "python -m Kite" works
43
+ const kiteParent = path.dirname(kiteHome);
44
+ const env = {
45
+ ...process.env,
46
+ KITE_HOME: kiteHome,
47
+ KITE_CWD: kiteCwd,
48
+ PYTHONPATH: kiteParent + (process.env.PYTHONPATH ? path.delimiter + process.env.PYTHONPATH : ''),
49
+ PYTHONUTF8: '1',
50
+ };
51
+
52
+ const python = process.platform === 'win32' ? 'python' : 'python3';
53
+ const child = spawn(
54
+ python,
55
+ [path.join(kiteHome, 'main.py'), ...pythonArgs],
56
+ { cwd: kiteHome, env, stdio: 'inherit' }
57
+ );
58
+
59
+ child.on('error', err => {
60
+ console.error(`[main] Failed to start Python: ${err.message}`);
61
+ console.error(`[main] Make sure '${python}' is in PATH and dependencies are installed`);
62
+ process.exit(1);
63
+ });
64
+
65
+ child.on('exit', code => process.exit(code ?? 0));
66
+
67
+ // Forward signals for graceful shutdown
68
+ for (const sig of ['SIGINT', 'SIGTERM']) {
69
+ process.on(sig, () => child.kill(sig));
70
+ }
File without changes
@@ -0,0 +1,94 @@
1
+ # Event Hub 基准测试指南
2
+
3
+ ## 快速开始
4
+
5
+ 从 Kite 根目录运行:
6
+
7
+ ```bash
8
+ python -m core.event_hub.bench_perf
9
+ ```
10
+
11
+ 运行约 20 秒,结果自动保存到 `core/event_hub/bench_results/` 目录。
12
+
13
+ ## 测试项说明
14
+
15
+ ### 1. Throughput(吞吐量)
16
+
17
+ 一次性发送 10000 个事件,测量整条链路的处理速度。
18
+
19
+ | 指标 | 含义 |
20
+ |------|------|
21
+ | send_rate | publisher 每秒发送事件数(evt/s) |
22
+ | hub_queued | hub 成功放入订阅者队列的数量 |
23
+ | hub_routed | hub sender loop 成功发出的数量 |
24
+ | client_recv | 订阅者客户端实际收到的数量 |
25
+
26
+ 三个数字一致 = 零丢失。哪个数字变小,哪个环节就是瓶颈。
27
+
28
+ ### 2. Latency(延迟)
29
+
30
+ 逐条发送 200 个事件,测量端到端耗时。
31
+
32
+ | 指标 | 含义 |
33
+ |------|------|
34
+ | avg_ms | 平均延迟 |
35
+ | p50_ms | 中位数延迟(50% 的请求低于此值) |
36
+ | p95_ms | 95 分位延迟 |
37
+ | p99_ms | 99 分位延迟(最差情况) |
38
+
39
+ ### 3. Fan-out(扇出)
40
+
41
+ 1 个 publisher 发送 2000 个事件,N 个 subscriber 同时接收。
42
+
43
+ | 场景 | 含义 |
44
+ |------|------|
45
+ | x1 | 1 个订阅者,基线性能 |
46
+ | x10 | 10 个订阅者,中等负载 |
47
+ | x50 | 50 个订阅者,高负载 |
48
+
49
+ 每个场景报告 `avg_recv`(平均接收数)和 `min_recv`(最少接收数)。全部等于发送数 = 零丢失。
50
+
51
+ ## 结果文件
52
+
53
+ 每次运行自动保存到 `core/event_hub/bench_results/`:
54
+
55
+ ```
56
+ bench_results/
57
+ 2026-02-28_05-22-18.json
58
+ 2026-02-28_06-30-00.json
59
+ ```
60
+
61
+ JSON 结构:
62
+
63
+ ```json
64
+ {
65
+ "timestamp": "2026-02-28T05:22:18",
66
+ "env": { "platform": "win32", "python": "3.12.0" },
67
+ "throughput": { "send_rate": 9752, "hub_queued": 10000, ... },
68
+ "latency": { "avg_ms": 0.6, "p50_ms": 0.55, ... },
69
+ "fanout_1": { ... },
70
+ "fanout_10": { ... },
71
+ "fanout_50": { ... },
72
+ "hub_counters": { ... }
73
+ }
74
+ ```
75
+
76
+ ## 如何对比优化效果
77
+
78
+ 1. 改动代码前跑一次 `python -m core.event_hub.bench_perf`
79
+ 2. 实施优化
80
+ 3. 再跑一次
81
+ 4. 对比两个 JSON 文件中的关键指标
82
+
83
+ 重点关注:
84
+ - `throughput.send_rate` — 越高越好
85
+ - `latency.p99_ms` — 越低越好
86
+ - `fanout_50.min_recv` — 应等于 2000(零丢失)
87
+
88
+ ## 其他测试脚本
89
+
90
+ | 脚本 | 用途 | 耗时 |
91
+ |------|------|------|
92
+ | `bench_perf` | 快速基准对比 | ~20s |
93
+ | `bench_extreme` | 极限压测(50000 burst、100MB 消息、50 subscriber) | ~5min |
94
+ | `bench` | 长时间混合压力测试 | ~10min |
File without changes
@@ -0,0 +1,459 @@
1
+ """
2
+ Event Hub stress test (standalone).
3
+
4
+ Usage: python -m core.event_hub.bench
5
+ (from Kite root directory)
6
+
7
+ Phase 1: 3-channel mixed stress test (10 minutes)
8
+ CH1 — high-freq small messages (500 evt/s)
9
+ CH2 — medium-freq large messages (50 evt/s, 10KB)
10
+ CH3 — bursty traffic (200-event burst every 2s)
11
+
12
+ Phase 2: 3 extreme tests
13
+ EX1 — max burst (ramp until failure)
14
+ EX2 — max concurrent connections
15
+ EX3 — max message size
16
+ """
17
+
18
+ import asyncio
19
+ import json
20
+ import os
21
+ import socket
22
+ import statistics
23
+ import sys
24
+ import threading
25
+ import time
26
+ import uuid
27
+ from datetime import datetime, timezone
28
+
29
+ _root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
30
+ if _root not in sys.path:
31
+ sys.path.insert(0, _root)
32
+
33
+ import psutil
34
+ import uvicorn
35
+ import websockets
36
+ from fastapi import FastAPI, WebSocket, WebSocketDisconnect
37
+
38
+ from core.event_hub.hub import EventHub
39
+
40
+
41
+ # ── Minimal Event Hub server (no auth) ──
42
+
43
+ def _create_app(hub: EventHub) -> FastAPI:
44
+ app = FastAPI()
45
+
46
+ @app.websocket("/ws")
47
+ async def ws(ws: WebSocket):
48
+ mid = ws.query_params.get("id", f"anon_{id(ws)}")
49
+ await ws.accept()
50
+ hub.add_connection(mid, ws)
51
+ try:
52
+ while True:
53
+ raw = await ws.receive_text()
54
+ await hub.handle_message(mid, ws, raw)
55
+ except (WebSocketDisconnect, Exception):
56
+ pass
57
+ finally:
58
+ hub.remove_connection(mid)
59
+
60
+ return app
61
+
62
+
63
+ def _free_port() -> int:
64
+ with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
65
+ s.bind(("127.0.0.1", 0))
66
+ return s.getsockname()[1]
67
+
68
+
69
+ # ── Client ──
70
+
71
+ class C:
72
+ """Lightweight WS client."""
73
+ def __init__(self, url: str, name: str):
74
+ self.url, self.name, self.ws = url, name, None
75
+
76
+ async def connect(self):
77
+ self.ws = await websockets.connect(f"{self.url}?id={self.name}", max_size=None)
78
+
79
+ async def sub(self, patterns):
80
+ await self.ws.send(json.dumps({"type": "subscribe", "events": patterns}))
81
+
82
+ async def pub(self, event: str, data: dict) -> str:
83
+ eid = str(uuid.uuid4())
84
+ await self.ws.send(json.dumps({
85
+ "type": "event", "event_id": eid, "event": event,
86
+ "source": self.name, "timestamp": datetime.now(timezone.utc).isoformat(),
87
+ "data": data,
88
+ }))
89
+ return eid
90
+
91
+ async def recv(self, timeout=5.0):
92
+ try:
93
+ return json.loads(await asyncio.wait_for(self.ws.recv(), timeout=timeout))
94
+ except Exception:
95
+ return None
96
+
97
+ async def drain(self, timeout=0.05):
98
+ while await self.recv(timeout=timeout) is not None:
99
+ pass
100
+
101
+ async def close(self):
102
+ if self.ws:
103
+ await self.ws.close()
104
+
105
+
106
+ # ── Stats collector ──
107
+
108
+ class Stats:
109
+ def __init__(self):
110
+ self.sent = 0
111
+ self.received = 0
112
+ self.acked = 0
113
+ self.latencies: list[float] = []
114
+ self.lock = asyncio.Lock()
115
+
116
+ async def record_latency(self, ts_iso: str):
117
+ try:
118
+ sent = datetime.fromisoformat(ts_iso)
119
+ ms = (datetime.now(timezone.utc) - sent).total_seconds() * 1000
120
+ async with self.lock:
121
+ self.latencies.append(ms)
122
+ except Exception:
123
+ pass
124
+
125
+ def summary(self) -> dict:
126
+ lat = sorted(self.latencies) if self.latencies else [0]
127
+ return {
128
+ "sent": self.sent,
129
+ "recv": self.received,
130
+ "acked": self.acked,
131
+ "loss": self.sent - self.received,
132
+ "lat_avg": round(statistics.mean(lat), 1),
133
+ "lat_p50": round(statistics.median(lat), 1),
134
+ "lat_p95": round(lat[int(len(lat) * 0.95)], 1),
135
+ "lat_p99": round(lat[int(len(lat) * 0.99)], 1),
136
+ }
137
+
138
+
139
+ # ── Benchmark ──
140
+
141
+ class Benchmark:
142
+ def __init__(self):
143
+ self.hub = EventHub()
144
+ self.port = _free_port()
145
+ self.ws_url = f"ws://127.0.0.1:{self.port}/ws"
146
+ self._server: uvicorn.Server | None = None
147
+ self.proc = psutil.Process()
148
+
149
+ def _start_server(self):
150
+ app = _create_app(self.hub)
151
+ cfg = uvicorn.Config(app, host="127.0.0.1", port=self.port, log_level="warning")
152
+ self._server = uvicorn.Server(cfg)
153
+ threading.Thread(target=self._server.run, daemon=True).start()
154
+ deadline = time.time() + 5
155
+ while time.time() < deadline:
156
+ if self._server.started:
157
+ return
158
+ time.sleep(0.05)
159
+ raise RuntimeError("Server failed to start")
160
+
161
+ def _res_snapshot(self) -> dict:
162
+ return {
163
+ "cpu": self.proc.cpu_percent(),
164
+ "mem_mb": round(self.proc.memory_info().rss / 1024 / 1024, 1),
165
+ }
166
+
167
+ async def _make_channel(self, pub_name: str, sub_name: str, pattern: str):
168
+ """Create a pub/sub pair, subscribe, return (pub, sub, stats)."""
169
+ pub = C(self.ws_url, pub_name)
170
+ sub = C(self.ws_url, sub_name)
171
+ await pub.connect()
172
+ await sub.connect()
173
+ await sub.sub([pattern])
174
+ await asyncio.sleep(0.1)
175
+ return pub, sub, Stats()
176
+
177
+ # ── ACK drainer (keeps publisher WS buffer clear) ──
178
+
179
+ async def _drainer(self, pub: C, stats: Stats, stop: asyncio.Event):
180
+ while not stop.is_set():
181
+ msg = await pub.recv(timeout=0.3)
182
+ if msg and msg.get("type") == "ack":
183
+ stats.acked += 1
184
+
185
+ # ── Subscriber receiver (shared by all channels) ──
186
+
187
+ async def _receiver(self, sub: C, stats: Stats, stop: asyncio.Event):
188
+ while not stop.is_set():
189
+ msg = await sub.recv(timeout=0.3)
190
+ if not msg:
191
+ continue
192
+ if msg.get("type") == "event":
193
+ stats.received += 1
194
+ ts = msg.get("timestamp", "")
195
+ if ts:
196
+ await stats.record_latency(ts)
197
+
198
+ # ── CH1: high-freq small messages ──
199
+
200
+ async def _ch1_sender(self, pub: C, stats: Stats, stop: asyncio.Event):
201
+ """500 evt/s, tiny payload."""
202
+ while not stop.is_set():
203
+ await pub.pub("ch1.fast", {"seq": stats.sent})
204
+ stats.sent += 1
205
+ await asyncio.sleep(0.002)
206
+
207
+ # ── CH2: medium-freq large messages ──
208
+
209
+ async def _ch2_sender(self, pub: C, stats: Stats, stop: asyncio.Event):
210
+ """50 evt/s, 10KB payload."""
211
+ payload = "X" * 10240
212
+ while not stop.is_set():
213
+ await pub.pub("ch2.large", {"p": payload})
214
+ stats.sent += 1
215
+ await asyncio.sleep(0.02)
216
+
217
+ # ── CH3: bursty traffic ──
218
+
219
+ async def _ch3_sender(self, pub: C, stats: Stats, stop: asyncio.Event):
220
+ """200-event burst every 2s."""
221
+ while not stop.is_set():
222
+ for _ in range(200):
223
+ await pub.pub("ch3.burst", {"b": stats.sent})
224
+ stats.sent += 1
225
+ await asyncio.sleep(2)
226
+
227
+ # ── Phase 1: 10-min mixed stress ──
228
+
229
+ async def phase1(self, duration=600):
230
+ print("=" * 60)
231
+ print(f"PHASE 1: 3-channel mixed stress test ({duration}s)")
232
+ print("=" * 60)
233
+
234
+ p1, s1, st1 = await self._make_channel("ch1_pub", "ch1_sub", "ch1.*")
235
+ p2, s2, st2 = await self._make_channel("ch2_pub", "ch2_sub", "ch2.*")
236
+ p3, s3, st3 = await self._make_channel("ch3_pub", "ch3_sub", "ch3.*")
237
+ stop = asyncio.Event()
238
+
239
+ tasks = [
240
+ asyncio.create_task(self._ch1_sender(p1, st1, stop)),
241
+ asyncio.create_task(self._drainer(p1, st1, stop)),
242
+ asyncio.create_task(self._receiver(s1, st1, stop)),
243
+ asyncio.create_task(self._ch2_sender(p2, st2, stop)),
244
+ asyncio.create_task(self._drainer(p2, st2, stop)),
245
+ asyncio.create_task(self._receiver(s2, st2, stop)),
246
+ asyncio.create_task(self._ch3_sender(p3, st3, stop)),
247
+ asyncio.create_task(self._drainer(p3, st3, stop)),
248
+ asyncio.create_task(self._receiver(s3, st3, stop)),
249
+ ]
250
+
251
+ self.proc.cpu_percent() # prime
252
+ start = time.time()
253
+ interval = 30 # report every 30s
254
+ next_report = start + interval
255
+
256
+ while time.time() - start < duration:
257
+ await asyncio.sleep(1)
258
+ if time.time() >= next_report:
259
+ elapsed = round(time.time() - start)
260
+ res = self._res_snapshot()
261
+ hub_stats = self.hub._counters_dict()
262
+ print(f"\n [{elapsed:>4}s] cpu={res['cpu']:.0f}% mem={res['mem_mb']}MB "
263
+ f"hub_recv={hub_stats['events_received']} hub_route={hub_stats['events_routed']}")
264
+ for name, st in [("CH1-fast", st1), ("CH2-large", st2), ("CH3-burst", st3)]:
265
+ s = st.summary()
266
+ print(f" {name}: sent={s['sent']} recv={s['recv']} loss={s['loss']} "
267
+ f"avg={s['lat_avg']}ms p95={s['lat_p95']}ms p99={s['lat_p99']}ms")
268
+ next_report = time.time() + interval
269
+
270
+ stop.set()
271
+ for t in tasks:
272
+ t.cancel()
273
+ await asyncio.gather(*tasks, return_exceptions=True)
274
+
275
+ # Final report
276
+ print(f"\n FINAL ({duration}s):")
277
+ res = self._res_snapshot()
278
+ print(f" cpu={res['cpu']:.0f}% mem={res['mem_mb']}MB")
279
+ for name, st in [("CH1-fast", st1), ("CH2-large", st2), ("CH3-burst", st3)]:
280
+ s = st.summary()
281
+ print(f" {name}: sent={s['sent']} recv={s['recv']} loss={s['loss']} "
282
+ f"avg={s['lat_avg']}ms p50={s['lat_p50']}ms p95={s['lat_p95']}ms p99={s['lat_p99']}ms")
283
+
284
+ for c in [p1, s1, p2, s2, p3, s3]:
285
+ await c.close()
286
+ print()
287
+
288
+ # ── Phase 2: Extreme tests ──
289
+
290
+ async def extreme1_max_burst(self):
291
+ """Ramp burst size until loss or timeout."""
292
+ print("=" * 60)
293
+ print("EXTREME 1: Max burst (ramp until loss)")
294
+ print("=" * 60)
295
+
296
+ pub, sub, _ = await self._make_channel("ex1_pub", "ex1_sub", "ex1.*")
297
+
298
+ for size in [1000, 5000, 10000, 20000, 50000]:
299
+ recvd = 0
300
+ stop = asyncio.Event()
301
+
302
+ async def _recv():
303
+ nonlocal recvd
304
+ while not stop.is_set():
305
+ msg = await sub.recv(timeout=0.3)
306
+ if msg and msg.get("type") == "event":
307
+ recvd += 1
308
+
309
+ task = asyncio.create_task(_recv())
310
+ start = time.time()
311
+ for i in range(size):
312
+ await pub.pub("ex1.burst", {"i": i})
313
+ send_time = time.time() - start
314
+
315
+ # Wait for delivery
316
+ await asyncio.sleep(max(3, size / 2000))
317
+ stop.set()
318
+ await task
319
+ await pub.drain()
320
+ await sub.drain()
321
+
322
+ rate = size / send_time if send_time > 0 else 0
323
+ loss = size - recvd
324
+ loss_pct = loss / size * 100
325
+ res = self._res_snapshot()
326
+ print(f" burst={size:>6}: recv={recvd} loss={loss}({loss_pct:.1f}%) "
327
+ f"rate={rate:.0f} evt/s cpu={res['cpu']:.0f}% mem={res['mem_mb']}MB")
328
+
329
+ if loss_pct > 5:
330
+ print(f" >> Loss exceeded 5%, stopping ramp")
331
+ break
332
+
333
+ await pub.close()
334
+ await sub.close()
335
+ print()
336
+
337
+ async def extreme2_max_connections(self):
338
+ """Ramp concurrent connections, each publishing 1 evt/s."""
339
+ print("=" * 60)
340
+ print("EXTREME 2: Max concurrent connections")
341
+ print("=" * 60)
342
+
343
+ # One global subscriber
344
+ gsub = C(self.ws_url, "ex2_gsub")
345
+ await gsub.connect()
346
+ await gsub.sub(["ex2.*"])
347
+ await asyncio.sleep(0.1)
348
+
349
+ for n_conn in [10, 50, 100, 200, 500]:
350
+ clients = []
351
+ try:
352
+ for i in range(n_conn):
353
+ c = C(self.ws_url, f"ex2_c{i}")
354
+ await c.connect()
355
+ clients.append(c)
356
+ except Exception as e:
357
+ print(f" {len(clients)} connections: FAILED to open more ({e})")
358
+ break
359
+
360
+ # Each client sends 1 event
361
+ recvd = 0
362
+ stop = asyncio.Event()
363
+
364
+ async def _recv():
365
+ nonlocal recvd
366
+ while not stop.is_set():
367
+ msg = await gsub.recv(timeout=0.3)
368
+ if msg and msg.get("type") == "event":
369
+ recvd += 1
370
+
371
+ task = asyncio.create_task(_recv())
372
+ start = time.time()
373
+ for c in clients:
374
+ await c.pub("ex2.ping", {"from": c.name})
375
+ send_time = time.time() - start
376
+
377
+ await asyncio.sleep(max(2, n_conn / 200))
378
+ stop.set()
379
+ await task
380
+
381
+ res = self._res_snapshot()
382
+ print(f" {n_conn:>4} conns: recv={recvd}/{n_conn} "
383
+ f"send_time={send_time:.2f}s cpu={res['cpu']:.0f}% mem={res['mem_mb']}MB")
384
+
385
+ for c in clients:
386
+ await c.close()
387
+
388
+ await gsub.close()
389
+ print()
390
+
391
+ async def extreme3_max_message_size(self):
392
+ """Ramp message size until failure."""
393
+ print("=" * 60)
394
+ print("EXTREME 3: Max message size")
395
+ print("=" * 60)
396
+
397
+ pub, sub, _ = await self._make_channel("ex3_pub", "ex3_sub", "ex3.*")
398
+
399
+ for size_kb in [100, 500, 1000, 2000, 5000]:
400
+ payload = "Z" * (size_kb * 1024)
401
+ latencies = []
402
+ ok = True
403
+
404
+ for _ in range(5):
405
+ try:
406
+ await pub.pub("ex3.big", {"p": payload})
407
+ except Exception as e:
408
+ print(f" {size_kb:>5} KB: SEND FAILED ({e})")
409
+ ok = False
410
+ break
411
+
412
+ if not ok:
413
+ break
414
+
415
+ await asyncio.sleep(max(1, size_kb / 500))
416
+ # Collect
417
+ for _ in range(20):
418
+ msg = await sub.recv(timeout=0.3)
419
+ if msg and msg.get("type") == "event":
420
+ ts = msg.get("timestamp", "")
421
+ try:
422
+ sent = datetime.fromisoformat(ts)
423
+ latencies.append((datetime.now(timezone.utc) - sent).total_seconds() * 1000)
424
+ except Exception:
425
+ pass
426
+ if msg is None:
427
+ break
428
+
429
+ await pub.drain()
430
+ res = self._res_snapshot()
431
+ if latencies:
432
+ avg = statistics.mean(latencies)
433
+ print(f" {size_kb:>5} KB: recv={len(latencies)}/5 "
434
+ f"avg={avg:.1f}ms cpu={res['cpu']:.0f}% mem={res['mem_mb']}MB")
435
+ else:
436
+ print(f" {size_kb:>5} KB: recv=0/5 cpu={res['cpu']:.0f}% mem={res['mem_mb']}MB")
437
+
438
+ await pub.close()
439
+ await sub.close()
440
+ print()
441
+
442
+ # ── Entry ──
443
+
444
+ async def run_all(self):
445
+ self._start_server()
446
+ print(f"Event Hub started on port {self.port}\n")
447
+ try:
448
+ await self.phase1(duration=600)
449
+ await self.extreme1_max_burst()
450
+ await self.extreme2_max_connections()
451
+ await self.extreme3_max_message_size()
452
+ print("All tests complete.")
453
+ finally:
454
+ if self._server:
455
+ self._server.should_exit = True
456
+
457
+
458
+ if __name__ == "__main__":
459
+ asyncio.run(Benchmark().run_all())