@agentunion/kite 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/__init__.py +1 -0
- package/__main__.py +15 -0
- package/cli.js +70 -0
- package/core/__init__.py +0 -0
- package/core/__pycache__/__init__.cpython-313.pyc +0 -0
- package/core/event_hub/BENCHMARK.md +94 -0
- package/core/event_hub/__init__.py +0 -0
- package/core/event_hub/__pycache__/__init__.cpython-313.pyc +0 -0
- package/core/event_hub/__pycache__/bench.cpython-313.pyc +0 -0
- package/core/event_hub/__pycache__/bench_perf.cpython-313.pyc +0 -0
- package/core/event_hub/__pycache__/dedup.cpython-313.pyc +0 -0
- package/core/event_hub/__pycache__/entry.cpython-313.pyc +0 -0
- package/core/event_hub/__pycache__/hub.cpython-313.pyc +0 -0
- package/core/event_hub/__pycache__/router.cpython-313.pyc +0 -0
- package/core/event_hub/__pycache__/server.cpython-313.pyc +0 -0
- package/core/event_hub/bench.py +459 -0
- package/core/event_hub/bench_extreme.py +308 -0
- package/core/event_hub/bench_perf.py +350 -0
- package/core/event_hub/bench_results/.gitkeep +0 -0
- package/core/event_hub/bench_results/2026-02-28_13-26-48.json +51 -0
- package/core/event_hub/bench_results/2026-02-28_13-44-45.json +51 -0
- package/core/event_hub/bench_results/2026-02-28_13-45-39.json +51 -0
- package/core/event_hub/dedup.py +31 -0
- package/core/event_hub/entry.py +113 -0
- package/core/event_hub/hub.py +263 -0
- package/core/event_hub/module.md +21 -0
- package/core/event_hub/router.py +21 -0
- package/core/event_hub/server.py +138 -0
- package/core/event_hub_bench/entry.py +371 -0
- package/core/event_hub_bench/module.md +25 -0
- package/core/launcher/__init__.py +0 -0
- package/core/launcher/__pycache__/__init__.cpython-313.pyc +0 -0
- package/core/launcher/__pycache__/entry.cpython-313.pyc +0 -0
- package/core/launcher/__pycache__/module_scanner.cpython-313.pyc +0 -0
- package/core/launcher/__pycache__/process_manager.cpython-313.pyc +0 -0
- package/core/launcher/data/log/lifecycle.jsonl +1045 -0
- package/core/launcher/data/processes_14752.json +32 -0
- package/core/launcher/data/token.txt +1 -0
- package/core/launcher/entry.py +965 -0
- package/core/launcher/module.md +37 -0
- package/core/launcher/module_scanner.py +253 -0
- package/core/launcher/process_manager.py +435 -0
- package/core/registry/__init__.py +0 -0
- package/core/registry/__pycache__/__init__.cpython-313.pyc +0 -0
- package/core/registry/__pycache__/entry.cpython-313.pyc +0 -0
- package/core/registry/__pycache__/server.cpython-313.pyc +0 -0
- package/core/registry/__pycache__/store.cpython-313.pyc +0 -0
- package/core/registry/data/port.txt +1 -0
- package/core/registry/data/port_14752.txt +1 -0
- package/core/registry/data/port_484.txt +1 -0
- package/core/registry/entry.py +73 -0
- package/core/registry/module.md +30 -0
- package/core/registry/server.py +256 -0
- package/core/registry/store.py +232 -0
- package/extensions/__init__.py +0 -0
- package/extensions/__pycache__/__init__.cpython-313.pyc +0 -0
- package/extensions/services/__init__.py +0 -0
- package/extensions/services/__pycache__/__init__.cpython-313.pyc +0 -0
- package/extensions/services/watchdog/__init__.py +0 -0
- package/extensions/services/watchdog/__pycache__/__init__.cpython-313.pyc +0 -0
- package/extensions/services/watchdog/__pycache__/entry.cpython-313.pyc +0 -0
- package/extensions/services/watchdog/__pycache__/monitor.cpython-313.pyc +0 -0
- package/extensions/services/watchdog/__pycache__/server.cpython-313.pyc +0 -0
- package/extensions/services/watchdog/entry.py +143 -0
- package/extensions/services/watchdog/module.md +25 -0
- package/extensions/services/watchdog/monitor.py +420 -0
- package/extensions/services/watchdog/server.py +167 -0
- package/main.py +17 -0
- package/package.json +27 -0
|
@@ -0,0 +1,308 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Event Hub extreme tests (standalone).
|
|
3
|
+
|
|
4
|
+
Usage: python -m core.event_hub.bench_extreme
|
|
5
|
+
(from Kite root directory)
|
|
6
|
+
|
|
7
|
+
Test 1: Burst ramp — step 1000 from 1000 to 50000
|
|
8
|
+
Test 2: Message size ramp — 500KB to 100MB
|
|
9
|
+
Test 3: Fan-out — 1 publisher, N subscribers (N=1..50)
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
import asyncio
|
|
13
|
+
import json
|
|
14
|
+
import os
|
|
15
|
+
import socket
|
|
16
|
+
import statistics
|
|
17
|
+
import sys
|
|
18
|
+
import threading
|
|
19
|
+
import time
|
|
20
|
+
import uuid
|
|
21
|
+
from datetime import datetime, timezone
|
|
22
|
+
|
|
23
|
+
_root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
|
24
|
+
if _root not in sys.path:
|
|
25
|
+
sys.path.insert(0, _root)
|
|
26
|
+
|
|
27
|
+
import uvicorn
|
|
28
|
+
import websockets
|
|
29
|
+
from fastapi import FastAPI, WebSocket, WebSocketDisconnect
|
|
30
|
+
|
|
31
|
+
from core.event_hub.hub import EventHub
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def _create_app(hub: EventHub) -> FastAPI:
|
|
35
|
+
app = FastAPI()
|
|
36
|
+
|
|
37
|
+
@app.websocket("/ws")
|
|
38
|
+
async def ws(ws: WebSocket):
|
|
39
|
+
mid = ws.query_params.get("id", f"anon_{id(ws)}")
|
|
40
|
+
await ws.accept()
|
|
41
|
+
hub.add_connection(mid, ws)
|
|
42
|
+
try:
|
|
43
|
+
while True:
|
|
44
|
+
raw = await ws.receive_text()
|
|
45
|
+
await hub.handle_message(mid, ws, raw)
|
|
46
|
+
except (WebSocketDisconnect, Exception):
|
|
47
|
+
pass
|
|
48
|
+
finally:
|
|
49
|
+
hub.remove_connection(mid)
|
|
50
|
+
|
|
51
|
+
return app
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
def _free_port() -> int:
|
|
55
|
+
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
|
56
|
+
s.bind(("127.0.0.1", 0))
|
|
57
|
+
return s.getsockname()[1]
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
class C:
|
|
61
|
+
def __init__(self, url: str, name: str):
|
|
62
|
+
self.url, self.name, self.ws = url, name, None
|
|
63
|
+
|
|
64
|
+
async def connect(self):
|
|
65
|
+
self.ws = await websockets.connect(
|
|
66
|
+
f"{self.url}?id={self.name}", max_size=None
|
|
67
|
+
)
|
|
68
|
+
|
|
69
|
+
async def sub(self, patterns):
|
|
70
|
+
await self.ws.send(json.dumps({"type": "subscribe", "events": patterns}))
|
|
71
|
+
|
|
72
|
+
async def pub(self, event: str, data: dict) -> str:
|
|
73
|
+
eid = str(uuid.uuid4())
|
|
74
|
+
await self.ws.send(json.dumps({
|
|
75
|
+
"type": "event", "event_id": eid, "event": event,
|
|
76
|
+
"source": self.name, "timestamp": datetime.now(timezone.utc).isoformat(),
|
|
77
|
+
"data": data,
|
|
78
|
+
}))
|
|
79
|
+
return eid
|
|
80
|
+
|
|
81
|
+
async def recv(self, timeout=5.0):
|
|
82
|
+
try:
|
|
83
|
+
return json.loads(await asyncio.wait_for(self.ws.recv(), timeout=timeout))
|
|
84
|
+
except Exception:
|
|
85
|
+
return None
|
|
86
|
+
|
|
87
|
+
async def drain(self, timeout=0.05):
|
|
88
|
+
while await self.recv(timeout=timeout) is not None:
|
|
89
|
+
pass
|
|
90
|
+
|
|
91
|
+
async def close(self):
|
|
92
|
+
if self.ws:
|
|
93
|
+
await self.ws.close()
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
class BenchExtreme:
|
|
97
|
+
def __init__(self):
|
|
98
|
+
self.hub = EventHub()
|
|
99
|
+
self.port = _free_port()
|
|
100
|
+
self.ws_url = f"ws://127.0.0.1:{self.port}/ws"
|
|
101
|
+
self._server = None
|
|
102
|
+
|
|
103
|
+
def _start_server(self):
|
|
104
|
+
app = _create_app(self.hub)
|
|
105
|
+
cfg = uvicorn.Config(app, host="127.0.0.1", port=self.port, log_level="warning")
|
|
106
|
+
self._server = uvicorn.Server(cfg)
|
|
107
|
+
threading.Thread(target=self._server.run, daemon=True).start()
|
|
108
|
+
deadline = time.time() + 5
|
|
109
|
+
while time.time() < deadline:
|
|
110
|
+
if self._server.started:
|
|
111
|
+
return
|
|
112
|
+
time.sleep(0.05)
|
|
113
|
+
raise RuntimeError("Server failed to start")
|
|
114
|
+
|
|
115
|
+
async def _make_channel(self, pub_name, sub_name, pattern):
|
|
116
|
+
pub = C(self.ws_url, pub_name)
|
|
117
|
+
sub = C(self.ws_url, sub_name)
|
|
118
|
+
await pub.connect()
|
|
119
|
+
await sub.connect()
|
|
120
|
+
await sub.sub([pattern])
|
|
121
|
+
await asyncio.sleep(0.1)
|
|
122
|
+
return pub, sub
|
|
123
|
+
|
|
124
|
+
# ── Test 1: Burst ramp ──
|
|
125
|
+
|
|
126
|
+
async def test_burst_ramp(self):
|
|
127
|
+
print("=" * 60)
|
|
128
|
+
print("TEST 1: Burst ramp (step 1000, 1000→50000)")
|
|
129
|
+
print("=" * 60)
|
|
130
|
+
|
|
131
|
+
pub, sub = await self._make_channel("burst_pub", "burst_sub", "burst.*")
|
|
132
|
+
|
|
133
|
+
for size in range(1000, 50001, 1000):
|
|
134
|
+
# Snapshot hub counters before
|
|
135
|
+
q_before = self.hub._cnt_queued
|
|
136
|
+
r_before = self.hub._cnt_routed
|
|
137
|
+
|
|
138
|
+
recvd = 0
|
|
139
|
+
stop = asyncio.Event()
|
|
140
|
+
|
|
141
|
+
async def _recv():
|
|
142
|
+
nonlocal recvd
|
|
143
|
+
while not stop.is_set():
|
|
144
|
+
msg = await sub.recv(timeout=0.3)
|
|
145
|
+
if msg and msg.get("type") == "event":
|
|
146
|
+
recvd += 1
|
|
147
|
+
|
|
148
|
+
task = asyncio.create_task(_recv())
|
|
149
|
+
|
|
150
|
+
start = time.time()
|
|
151
|
+
for i in range(size):
|
|
152
|
+
await pub.pub("burst.test", {"i": i})
|
|
153
|
+
send_time = time.time() - start
|
|
154
|
+
rate = size / send_time if send_time > 0 else 0
|
|
155
|
+
|
|
156
|
+
# Wait proportional to burst size
|
|
157
|
+
await asyncio.sleep(max(3, size / 1000))
|
|
158
|
+
stop.set()
|
|
159
|
+
await task
|
|
160
|
+
await pub.drain()
|
|
161
|
+
await sub.drain()
|
|
162
|
+
|
|
163
|
+
# Hub-side counters
|
|
164
|
+
hub_queued = self.hub._cnt_queued - q_before
|
|
165
|
+
hub_routed = self.hub._cnt_routed - r_before
|
|
166
|
+
|
|
167
|
+
loss = size - recvd
|
|
168
|
+
loss_pct = loss / size * 100
|
|
169
|
+
print(f" burst={size:>6}: recv={recvd:>6} loss={loss}({loss_pct:.1f}%) "
|
|
170
|
+
f"rate={rate:.0f} evt/s send={send_time:.2f}s "
|
|
171
|
+
f"hub_q={hub_queued} hub_r={hub_routed}")
|
|
172
|
+
|
|
173
|
+
if loss_pct > 1:
|
|
174
|
+
print(f" >> Loss at burst={size} — hub_queued={hub_queued} hub_routed={hub_routed} client_recv={recvd}")
|
|
175
|
+
|
|
176
|
+
await pub.close()
|
|
177
|
+
await sub.close()
|
|
178
|
+
print()
|
|
179
|
+
|
|
180
|
+
# ── Test 2: Message size ramp ──
|
|
181
|
+
|
|
182
|
+
async def test_message_size_ramp(self):
|
|
183
|
+
print("=" * 60)
|
|
184
|
+
print("TEST 2: Message size ramp (500KB→100MB)")
|
|
185
|
+
print("=" * 60)
|
|
186
|
+
|
|
187
|
+
pub, sub = await self._make_channel("size_pub", "size_sub", "size.*")
|
|
188
|
+
|
|
189
|
+
for size_kb in [500, 1024, 2048, 5120, 10240, 20480, 51200, 102400]:
|
|
190
|
+
size_label = f"{size_kb}KB" if size_kb < 1024 else f"{size_kb//1024}MB"
|
|
191
|
+
payload = "X" * (size_kb * 1024)
|
|
192
|
+
n_msgs = 3
|
|
193
|
+
latencies = []
|
|
194
|
+
ok = True
|
|
195
|
+
|
|
196
|
+
for trial in range(n_msgs):
|
|
197
|
+
try:
|
|
198
|
+
t0 = time.time()
|
|
199
|
+
await pub.pub("size.test", {"p": payload})
|
|
200
|
+
except Exception as e:
|
|
201
|
+
print(f" {size_label:>6}: SEND FAILED on msg {trial+1} ({e})")
|
|
202
|
+
ok = False
|
|
203
|
+
break
|
|
204
|
+
|
|
205
|
+
# Dynamic timeout based on size
|
|
206
|
+
wait = max(10, size_kb / 200)
|
|
207
|
+
msg = await sub.recv(timeout=wait)
|
|
208
|
+
elapsed = (time.time() - t0) * 1000
|
|
209
|
+
|
|
210
|
+
if msg and msg.get("type") == "event":
|
|
211
|
+
latencies.append(elapsed)
|
|
212
|
+
await pub.drain()
|
|
213
|
+
|
|
214
|
+
if not ok:
|
|
215
|
+
break
|
|
216
|
+
|
|
217
|
+
if latencies:
|
|
218
|
+
avg = statistics.mean(latencies)
|
|
219
|
+
print(f" {size_label:>6}: recv={len(latencies)}/{n_msgs} "
|
|
220
|
+
f"avg={avg:.0f}ms min={min(latencies):.0f}ms max={max(latencies):.0f}ms")
|
|
221
|
+
else:
|
|
222
|
+
print(f" {size_label:>6}: recv=0/{n_msgs} DELIVERY FAILED")
|
|
223
|
+
break
|
|
224
|
+
|
|
225
|
+
await pub.close()
|
|
226
|
+
await sub.close()
|
|
227
|
+
print()
|
|
228
|
+
|
|
229
|
+
# ── Test 3: Fan-out ──
|
|
230
|
+
|
|
231
|
+
async def test_fanout(self):
|
|
232
|
+
print("=" * 60)
|
|
233
|
+
print("TEST 3: Fan-out (1 pub, N subs, 5000 events)")
|
|
234
|
+
print("=" * 60)
|
|
235
|
+
|
|
236
|
+
n_events = 5000
|
|
237
|
+
|
|
238
|
+
for n_subs in [1, 5, 10, 20, 50]:
|
|
239
|
+
pub = C(self.ws_url, "fanout_pub")
|
|
240
|
+
await pub.connect()
|
|
241
|
+
|
|
242
|
+
subs = []
|
|
243
|
+
for i in range(n_subs):
|
|
244
|
+
s = C(self.ws_url, f"fanout_sub_{i}")
|
|
245
|
+
await s.connect()
|
|
246
|
+
await s.sub(["fanout.*"])
|
|
247
|
+
subs.append(s)
|
|
248
|
+
await asyncio.sleep(0.2)
|
|
249
|
+
|
|
250
|
+
# Receiver tasks
|
|
251
|
+
counts = [0] * n_subs
|
|
252
|
+
|
|
253
|
+
async def _recv_loop(idx, client, stop_evt):
|
|
254
|
+
while not stop_evt.is_set():
|
|
255
|
+
msg = await client.recv(timeout=0.3)
|
|
256
|
+
if msg and msg.get("type") == "event":
|
|
257
|
+
counts[idx] += 1
|
|
258
|
+
|
|
259
|
+
stop = asyncio.Event()
|
|
260
|
+
tasks = [asyncio.create_task(_recv_loop(i, s, stop)) for i, s in enumerate(subs)]
|
|
261
|
+
|
|
262
|
+
start = time.time()
|
|
263
|
+
for i in range(n_events):
|
|
264
|
+
await pub.pub("fanout.test", {"i": i})
|
|
265
|
+
send_time = time.time() - start
|
|
266
|
+
|
|
267
|
+
# Wait for delivery
|
|
268
|
+
await asyncio.sleep(max(5, n_events * n_subs / 5000))
|
|
269
|
+
stop.set()
|
|
270
|
+
for t in tasks:
|
|
271
|
+
await t
|
|
272
|
+
|
|
273
|
+
await pub.drain()
|
|
274
|
+
for s in subs:
|
|
275
|
+
await s.drain()
|
|
276
|
+
|
|
277
|
+
min_recv = min(counts)
|
|
278
|
+
max_recv = max(counts)
|
|
279
|
+
avg_recv = sum(counts) / n_subs
|
|
280
|
+
rate = n_events / send_time if send_time > 0 else 0
|
|
281
|
+
|
|
282
|
+
print(f" subs={n_subs:>3}: avg_recv={avg_recv:.0f}/{n_events} "
|
|
283
|
+
f"min={min_recv} max={max_recv} "
|
|
284
|
+
f"pub_rate={rate:.0f} evt/s send={send_time:.2f}s")
|
|
285
|
+
|
|
286
|
+
await pub.close()
|
|
287
|
+
for s in subs:
|
|
288
|
+
await s.close()
|
|
289
|
+
|
|
290
|
+
print()
|
|
291
|
+
|
|
292
|
+
# ── Entry ──
|
|
293
|
+
|
|
294
|
+
async def run_all(self):
|
|
295
|
+
self._start_server()
|
|
296
|
+
print(f"Event Hub started on port {self.port}\n")
|
|
297
|
+
try:
|
|
298
|
+
await self.test_burst_ramp()
|
|
299
|
+
await self.test_message_size_ramp()
|
|
300
|
+
await self.test_fanout()
|
|
301
|
+
print("All extreme tests complete.")
|
|
302
|
+
finally:
|
|
303
|
+
if self._server:
|
|
304
|
+
self._server.should_exit = True
|
|
305
|
+
|
|
306
|
+
|
|
307
|
+
if __name__ == "__main__":
|
|
308
|
+
asyncio.run(BenchExtreme().run_all())
|
|
@@ -0,0 +1,350 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Event Hub performance benchmark — quick repeatable run.
|
|
3
|
+
|
|
4
|
+
Usage: python -m core.event_hub.bench_perf
|
|
5
|
+
(from Kite root directory)
|
|
6
|
+
|
|
7
|
+
Runs in ~20s, outputs a summary table for before/after comparison.
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
import asyncio
|
|
11
|
+
import json
|
|
12
|
+
import os
|
|
13
|
+
import platform
|
|
14
|
+
import socket
|
|
15
|
+
import statistics
|
|
16
|
+
import sys
|
|
17
|
+
import threading
|
|
18
|
+
import time
|
|
19
|
+
import uuid
|
|
20
|
+
from datetime import datetime, timezone
|
|
21
|
+
|
|
22
|
+
_root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
|
23
|
+
if _root not in sys.path:
|
|
24
|
+
sys.path.insert(0, _root)
|
|
25
|
+
|
|
26
|
+
import uvicorn
|
|
27
|
+
import websockets
|
|
28
|
+
from fastapi import FastAPI, WebSocket, WebSocketDisconnect
|
|
29
|
+
|
|
30
|
+
from core.event_hub.hub import EventHub
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def _create_app(hub: EventHub) -> FastAPI:
|
|
34
|
+
app = FastAPI()
|
|
35
|
+
|
|
36
|
+
@app.websocket("/ws")
|
|
37
|
+
async def ws(ws: WebSocket):
|
|
38
|
+
mid = ws.query_params.get("id", f"anon_{id(ws)}")
|
|
39
|
+
await ws.accept()
|
|
40
|
+
hub.add_connection(mid, ws)
|
|
41
|
+
try:
|
|
42
|
+
while True:
|
|
43
|
+
raw = await ws.receive_text()
|
|
44
|
+
await hub.handle_message(mid, ws, raw)
|
|
45
|
+
except (WebSocketDisconnect, Exception):
|
|
46
|
+
pass
|
|
47
|
+
finally:
|
|
48
|
+
hub.remove_connection(mid)
|
|
49
|
+
|
|
50
|
+
return app
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
def _free_port() -> int:
|
|
54
|
+
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
|
55
|
+
s.bind(("127.0.0.1", 0))
|
|
56
|
+
return s.getsockname()[1]
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
class Client:
|
|
60
|
+
def __init__(self, url: str, name: str):
|
|
61
|
+
self.url, self.name, self.ws = url, name, None
|
|
62
|
+
|
|
63
|
+
async def connect(self):
|
|
64
|
+
self.ws = await websockets.connect(
|
|
65
|
+
f"{self.url}?id={self.name}", max_size=None
|
|
66
|
+
)
|
|
67
|
+
|
|
68
|
+
async def sub(self, patterns):
|
|
69
|
+
await self.ws.send(json.dumps({"type": "subscribe", "events": patterns}))
|
|
70
|
+
|
|
71
|
+
async def pub(self, event: str, data: dict) -> str:
|
|
72
|
+
eid = str(uuid.uuid4())
|
|
73
|
+
await self.ws.send(json.dumps({
|
|
74
|
+
"type": "event", "event_id": eid, "event": event,
|
|
75
|
+
"source": self.name,
|
|
76
|
+
"timestamp": datetime.now(timezone.utc).isoformat(),
|
|
77
|
+
"data": data,
|
|
78
|
+
}))
|
|
79
|
+
return eid
|
|
80
|
+
|
|
81
|
+
async def recv(self, timeout=5.0):
|
|
82
|
+
try:
|
|
83
|
+
return json.loads(
|
|
84
|
+
await asyncio.wait_for(self.ws.recv(), timeout=timeout)
|
|
85
|
+
)
|
|
86
|
+
except Exception:
|
|
87
|
+
return None
|
|
88
|
+
|
|
89
|
+
async def drain(self, timeout=0.05):
|
|
90
|
+
while await self.recv(timeout=timeout) is not None:
|
|
91
|
+
pass
|
|
92
|
+
|
|
93
|
+
async def close(self):
|
|
94
|
+
if self.ws:
|
|
95
|
+
await self.ws.close()
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
class PerfBench:
|
|
99
|
+
def __init__(self):
|
|
100
|
+
self.hub = EventHub()
|
|
101
|
+
self.port = _free_port()
|
|
102
|
+
self.ws_url = f"ws://127.0.0.1:{self.port}/ws"
|
|
103
|
+
self._server = None
|
|
104
|
+
self.results = {}
|
|
105
|
+
|
|
106
|
+
def _start_server(self):
|
|
107
|
+
app = _create_app(self.hub)
|
|
108
|
+
cfg = uvicorn.Config(app, host="127.0.0.1", port=self.port, log_level="warning")
|
|
109
|
+
self._server = uvicorn.Server(cfg)
|
|
110
|
+
threading.Thread(target=self._server.run, daemon=True).start()
|
|
111
|
+
deadline = time.time() + 5
|
|
112
|
+
while time.time() < deadline:
|
|
113
|
+
if self._server.started:
|
|
114
|
+
return
|
|
115
|
+
time.sleep(0.05)
|
|
116
|
+
raise RuntimeError("Server failed to start")
|
|
117
|
+
|
|
118
|
+
async def _make_pair(self, pub_name, sub_name, pattern):
|
|
119
|
+
pub = Client(self.ws_url, pub_name)
|
|
120
|
+
sub = Client(self.ws_url, sub_name)
|
|
121
|
+
await pub.connect()
|
|
122
|
+
await sub.connect()
|
|
123
|
+
await sub.sub([pattern])
|
|
124
|
+
await asyncio.sleep(0.1)
|
|
125
|
+
return pub, sub
|
|
126
|
+
|
|
127
|
+
# ── 1. Throughput: burst 10000 events, measure send+recv rate ──
|
|
128
|
+
|
|
129
|
+
async def bench_throughput(self):
|
|
130
|
+
N = 10000
|
|
131
|
+
pub, sub = await self._make_pair("tp_pub", "tp_sub", "tp.*")
|
|
132
|
+
recvd = 0
|
|
133
|
+
stop = asyncio.Event()
|
|
134
|
+
|
|
135
|
+
async def _recv():
|
|
136
|
+
nonlocal recvd
|
|
137
|
+
while not stop.is_set():
|
|
138
|
+
try:
|
|
139
|
+
raw = await asyncio.wait_for(sub.ws.recv(), timeout=0.5)
|
|
140
|
+
if '"event"' in raw and '"ack"' not in raw:
|
|
141
|
+
recvd += 1
|
|
142
|
+
except Exception:
|
|
143
|
+
pass
|
|
144
|
+
|
|
145
|
+
async def _drain_acks():
|
|
146
|
+
"""Drain ACKs on publisher side to prevent server-side backpressure."""
|
|
147
|
+
while not stop.is_set():
|
|
148
|
+
try:
|
|
149
|
+
await asyncio.wait_for(pub.ws.recv(), timeout=0.5)
|
|
150
|
+
except Exception:
|
|
151
|
+
pass
|
|
152
|
+
|
|
153
|
+
recv_task = asyncio.create_task(_recv())
|
|
154
|
+
ack_task = asyncio.create_task(_drain_acks())
|
|
155
|
+
q_before = self.hub._cnt_queued
|
|
156
|
+
r_before = self.hub._cnt_routed
|
|
157
|
+
|
|
158
|
+
t0 = time.time()
|
|
159
|
+
for i in range(N):
|
|
160
|
+
await pub.pub("tp.test", {"i": i})
|
|
161
|
+
send_time = time.time() - t0
|
|
162
|
+
|
|
163
|
+
# Wait until hub finishes routing
|
|
164
|
+
deadline = time.time() + max(15, N / 300)
|
|
165
|
+
while recvd < N and time.time() < deadline:
|
|
166
|
+
await asyncio.sleep(0.1)
|
|
167
|
+
recv_time = time.time() - t0
|
|
168
|
+
stop.set()
|
|
169
|
+
await recv_task
|
|
170
|
+
ack_task.cancel()
|
|
171
|
+
|
|
172
|
+
hub_queued = self.hub._cnt_queued - q_before
|
|
173
|
+
hub_routed = self.hub._cnt_routed - r_before
|
|
174
|
+
|
|
175
|
+
await pub.drain()
|
|
176
|
+
await pub.close()
|
|
177
|
+
await sub.close()
|
|
178
|
+
|
|
179
|
+
self.results["throughput"] = {
|
|
180
|
+
"events": N,
|
|
181
|
+
"send_rate": round(N / send_time),
|
|
182
|
+
"client_recv": recvd,
|
|
183
|
+
"hub_queued": hub_queued,
|
|
184
|
+
"hub_routed": hub_routed,
|
|
185
|
+
"send_time": round(send_time, 2),
|
|
186
|
+
"recv_time": round(recv_time, 2),
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
# ── 2. Latency: 200 sequential events, measure per-event RTT ──
|
|
190
|
+
|
|
191
|
+
async def bench_latency(self):
|
|
192
|
+
N = 200
|
|
193
|
+
pub, sub = await self._make_pair("lat_pub", "lat_sub", "lat.*")
|
|
194
|
+
latencies = []
|
|
195
|
+
|
|
196
|
+
for i in range(N):
|
|
197
|
+
t0 = time.time()
|
|
198
|
+
await pub.pub("lat.test", {"i": i})
|
|
199
|
+
# Wait for ack on pub side
|
|
200
|
+
await pub.recv(timeout=2)
|
|
201
|
+
# Wait for event on sub side
|
|
202
|
+
msg = await sub.recv(timeout=2)
|
|
203
|
+
if msg and msg.get("type") == "event":
|
|
204
|
+
latencies.append((time.time() - t0) * 1000)
|
|
205
|
+
|
|
206
|
+
await pub.close()
|
|
207
|
+
await sub.close()
|
|
208
|
+
|
|
209
|
+
if latencies:
|
|
210
|
+
latencies.sort()
|
|
211
|
+
self.results["latency"] = {
|
|
212
|
+
"samples": len(latencies),
|
|
213
|
+
"avg_ms": round(statistics.mean(latencies), 2),
|
|
214
|
+
"p50_ms": round(latencies[len(latencies) // 2], 2),
|
|
215
|
+
"p95_ms": round(latencies[int(len(latencies) * 0.95)], 2),
|
|
216
|
+
"p99_ms": round(latencies[int(len(latencies) * 0.99)], 2),
|
|
217
|
+
}
|
|
218
|
+
|
|
219
|
+
# ── 3. Fan-out: 1 pub, N subs, 2000 events ──
|
|
220
|
+
|
|
221
|
+
async def bench_fanout(self):
|
|
222
|
+
N_EVENTS = 2000
|
|
223
|
+
for n_subs in [1, 10, 50]:
|
|
224
|
+
pub = Client(self.ws_url, "fo_pub")
|
|
225
|
+
await pub.connect()
|
|
226
|
+
|
|
227
|
+
subs = []
|
|
228
|
+
for i in range(n_subs):
|
|
229
|
+
s = Client(self.ws_url, f"fo_sub_{i}")
|
|
230
|
+
await s.connect()
|
|
231
|
+
await s.sub(["fo.*"])
|
|
232
|
+
subs.append(s)
|
|
233
|
+
await asyncio.sleep(0.2)
|
|
234
|
+
|
|
235
|
+
counts = [0] * n_subs
|
|
236
|
+
stop = asyncio.Event()
|
|
237
|
+
|
|
238
|
+
async def _recv(idx, client):
|
|
239
|
+
while not stop.is_set():
|
|
240
|
+
msg = await client.recv(timeout=0.3)
|
|
241
|
+
if msg and msg.get("type") == "event":
|
|
242
|
+
counts[idx] += 1
|
|
243
|
+
|
|
244
|
+
tasks = [asyncio.create_task(_recv(i, s)) for i, s in enumerate(subs)]
|
|
245
|
+
|
|
246
|
+
t0 = time.time()
|
|
247
|
+
for i in range(N_EVENTS):
|
|
248
|
+
await pub.pub("fo.test", {"i": i})
|
|
249
|
+
send_time = time.time() - t0
|
|
250
|
+
|
|
251
|
+
await asyncio.sleep(max(3, N_EVENTS * n_subs / 3000))
|
|
252
|
+
stop.set()
|
|
253
|
+
for t in tasks:
|
|
254
|
+
await t
|
|
255
|
+
|
|
256
|
+
await pub.drain()
|
|
257
|
+
for s in subs:
|
|
258
|
+
await s.drain()
|
|
259
|
+
await s.close()
|
|
260
|
+
await pub.close()
|
|
261
|
+
|
|
262
|
+
avg_recv = sum(counts) / n_subs
|
|
263
|
+
self.results[f"fanout_{n_subs}"] = {
|
|
264
|
+
"subs": n_subs,
|
|
265
|
+
"events": N_EVENTS,
|
|
266
|
+
"send_rate": round(N_EVENTS / send_time),
|
|
267
|
+
"avg_recv": round(avg_recv),
|
|
268
|
+
"min_recv": min(counts),
|
|
269
|
+
}
|
|
270
|
+
|
|
271
|
+
# ── Summary ──
|
|
272
|
+
|
|
273
|
+
def _print_summary(self):
|
|
274
|
+
print("\n" + "=" * 60)
|
|
275
|
+
print("PERFORMANCE SUMMARY")
|
|
276
|
+
print("=" * 60)
|
|
277
|
+
|
|
278
|
+
tp = self.results.get("throughput", {})
|
|
279
|
+
print(f"\n [Throughput] {tp.get('events',0)} events")
|
|
280
|
+
print(f" send_rate: {tp.get('send_rate',0)} evt/s")
|
|
281
|
+
print(f" hub_queued: {tp.get('hub_queued',0)} hub_routed: {tp.get('hub_routed',0)}")
|
|
282
|
+
print(f" client_recv: {tp.get('client_recv',0)}")
|
|
283
|
+
print(f" send: {tp.get('send_time',0)}s recv: {tp.get('recv_time',0)}s")
|
|
284
|
+
|
|
285
|
+
lat = self.results.get("latency", {})
|
|
286
|
+
print(f"\n [Latency] {lat.get('samples',0)} samples")
|
|
287
|
+
print(f" avg: {lat.get('avg_ms',0)}ms p50: {lat.get('p50_ms',0)}ms "
|
|
288
|
+
f"p95: {lat.get('p95_ms',0)}ms p99: {lat.get('p99_ms',0)}ms")
|
|
289
|
+
|
|
290
|
+
for n in [1, 10, 50]:
|
|
291
|
+
fo = self.results.get(f"fanout_{n}", {})
|
|
292
|
+
if fo:
|
|
293
|
+
print(f"\n [Fan-out x{n}] {fo.get('events',0)} events")
|
|
294
|
+
print(f" send_rate: {fo.get('send_rate',0)} evt/s "
|
|
295
|
+
f"avg_recv: {fo.get('avg_recv',0)} min_recv: {fo.get('min_recv',0)}")
|
|
296
|
+
|
|
297
|
+
hub = self.hub._counters_dict()
|
|
298
|
+
print(f"\n [Hub counters]")
|
|
299
|
+
print(f" received: {hub['events_received']} queued: {hub['events_queued']} "
|
|
300
|
+
f"routed: {hub['events_routed']} dedup: {hub['events_deduplicated']}")
|
|
301
|
+
print("=" * 60)
|
|
302
|
+
|
|
303
|
+
# ── Save ──
|
|
304
|
+
|
|
305
|
+
def _save_results(self):
|
|
306
|
+
results_dir = os.path.join(
|
|
307
|
+
os.path.dirname(os.path.abspath(__file__)), "bench_results"
|
|
308
|
+
)
|
|
309
|
+
os.makedirs(results_dir, exist_ok=True)
|
|
310
|
+
|
|
311
|
+
now = datetime.now()
|
|
312
|
+
filename = now.strftime("%Y-%m-%d_%H-%M-%S") + ".json"
|
|
313
|
+
filepath = os.path.join(results_dir, filename)
|
|
314
|
+
|
|
315
|
+
data = {
|
|
316
|
+
"timestamp": now.isoformat(),
|
|
317
|
+
"env": {
|
|
318
|
+
"platform": sys.platform,
|
|
319
|
+
"python": platform.python_version(),
|
|
320
|
+
},
|
|
321
|
+
**self.results,
|
|
322
|
+
"hub_counters": self.hub._counters_dict(),
|
|
323
|
+
}
|
|
324
|
+
|
|
325
|
+
with open(filepath, "w", encoding="utf-8") as f:
|
|
326
|
+
json.dump(data, f, indent=2, ensure_ascii=False)
|
|
327
|
+
|
|
328
|
+
print(f"\n Results saved: {filepath}")
|
|
329
|
+
|
|
330
|
+
# ── Entry ──
|
|
331
|
+
|
|
332
|
+
async def run(self):
|
|
333
|
+
self._start_server()
|
|
334
|
+
print(f"Event Hub perf bench on port {self.port}")
|
|
335
|
+
try:
|
|
336
|
+
print("\n Running throughput test...")
|
|
337
|
+
await self.bench_throughput()
|
|
338
|
+
print(" Running latency test...")
|
|
339
|
+
await self.bench_latency()
|
|
340
|
+
print(" Running fan-out test...")
|
|
341
|
+
await self.bench_fanout()
|
|
342
|
+
self._print_summary()
|
|
343
|
+
self._save_results()
|
|
344
|
+
finally:
|
|
345
|
+
if self._server:
|
|
346
|
+
self._server.should_exit = True
|
|
347
|
+
|
|
348
|
+
|
|
349
|
+
if __name__ == "__main__":
|
|
350
|
+
asyncio.run(PerfBench().run())
|
|
File without changes
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
{
|
|
2
|
+
"timestamp": "2026-02-28T13:26:48.644977",
|
|
3
|
+
"env": {
|
|
4
|
+
"platform": "win32",
|
|
5
|
+
"python": "3.13.12"
|
|
6
|
+
},
|
|
7
|
+
"throughput": {
|
|
8
|
+
"events": 10000,
|
|
9
|
+
"send_rate": 13121,
|
|
10
|
+
"client_recv": 10000,
|
|
11
|
+
"hub_queued": 10000,
|
|
12
|
+
"hub_routed": 10000,
|
|
13
|
+
"send_time": 0.76,
|
|
14
|
+
"recv_time": 1.59
|
|
15
|
+
},
|
|
16
|
+
"latency": {
|
|
17
|
+
"samples": 200,
|
|
18
|
+
"avg_ms": 0.69,
|
|
19
|
+
"p50_ms": 0.65,
|
|
20
|
+
"p95_ms": 0.92,
|
|
21
|
+
"p99_ms": 1.27
|
|
22
|
+
},
|
|
23
|
+
"fanout_1": {
|
|
24
|
+
"subs": 1,
|
|
25
|
+
"events": 2000,
|
|
26
|
+
"send_rate": 17292,
|
|
27
|
+
"avg_recv": 2000,
|
|
28
|
+
"min_recv": 2000
|
|
29
|
+
},
|
|
30
|
+
"fanout_10": {
|
|
31
|
+
"subs": 10,
|
|
32
|
+
"events": 2000,
|
|
33
|
+
"send_rate": 18074,
|
|
34
|
+
"avg_recv": 2000,
|
|
35
|
+
"min_recv": 2000
|
|
36
|
+
},
|
|
37
|
+
"fanout_50": {
|
|
38
|
+
"subs": 50,
|
|
39
|
+
"events": 2000,
|
|
40
|
+
"send_rate": 6138,
|
|
41
|
+
"avg_recv": 2000,
|
|
42
|
+
"min_recv": 2000
|
|
43
|
+
},
|
|
44
|
+
"hub_counters": {
|
|
45
|
+
"events_received": 16200,
|
|
46
|
+
"events_routed": 132200,
|
|
47
|
+
"events_queued": 132200,
|
|
48
|
+
"events_deduplicated": 0,
|
|
49
|
+
"errors": 0
|
|
50
|
+
}
|
|
51
|
+
}
|