zen-ai-pentest 2.2.0__py3-none-any.whl → 2.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,479 @@
1
+ """
2
+ API Performance Benchmarks
3
+
4
+ Measures API response times, throughput, and endpoint-specific latency.
5
+ """
6
+
7
+ import asyncio
8
+ import time
9
+ from typing import List, Dict, Any, Optional, Callable
10
+ from dataclasses import dataclass
11
+ from enum import Enum
12
+
13
+ import sys
14
+ from pathlib import Path
15
+ sys.path.insert(0, str(Path(__file__).parent.parent))
16
+
17
+ from modules.benchmark import (
18
+ BenchmarkRunner, BenchmarkResult, BenchmarkCategory,
19
+ measure_api_latency, ThroughputMetrics
20
+ )
21
+
22
+
23
+ class APIEndpointType(Enum):
24
+ """Types of API endpoints for benchmarking."""
25
+ AUTH = "authentication"
26
+ SCANS = "scans"
27
+ FINDINGS = "findings"
28
+ REPORTS = "reports"
29
+ WEBSOCKET = "websocket"
30
+
31
+
32
+ @dataclass
33
+ class APIBenchmarkConfig:
34
+ """Configuration for API benchmarks."""
35
+ base_url: str = "http://localhost:8000"
36
+ iterations: int = 100
37
+ concurrent_requests: int = 10
38
+ warmup_requests: int = 10
39
+ timeout_seconds: float = 30.0
40
+
41
+
42
+ class APIPerformanceBenchmark:
43
+ """Benchmark suite for API performance measurement."""
44
+
45
+ def __init__(self, output_dir: str = "benchmark_results"):
46
+ self.runner = BenchmarkRunner(output_dir=output_dir)
47
+ self.config = APIBenchmarkConfig()
48
+
49
+ def _get_test_endpoints(self) -> List[Dict[str, Any]]:
50
+ """Get test endpoint definitions."""
51
+ return [
52
+ {
53
+ "name": "health_check",
54
+ "path": "/health",
55
+ "method": "GET",
56
+ "type": APIEndpointType.AUTH,
57
+ "payload": None
58
+ },
59
+ {
60
+ "name": "auth_login",
61
+ "path": "/auth/login",
62
+ "method": "POST",
63
+ "type": APIEndpointType.AUTH,
64
+ "payload": {"username": "test", "password": "test"}
65
+ },
66
+ {
67
+ "name": "list_scans",
68
+ "path": "/scans",
69
+ "method": "GET",
70
+ "type": APIEndpointType.SCANS,
71
+ "payload": None
72
+ },
73
+ {
74
+ "name": "create_scan",
75
+ "path": "/scans",
76
+ "method": "POST",
77
+ "type": APIEndpointType.SCANS,
78
+ "payload": {"name": "test", "target": "127.0.0.1", "type": "quick"}
79
+ },
80
+ {
81
+ "name": "list_findings",
82
+ "path": "/findings",
83
+ "method": "GET",
84
+ "type": APIEndpointType.FINDINGS,
85
+ "payload": None
86
+ },
87
+ ]
88
+
89
+ async def benchmark_endpoint_latency(
90
+ self,
91
+ endpoint_name: str,
92
+ request_func: Callable,
93
+ config: Optional[APIBenchmarkConfig] = None
94
+ ) -> BenchmarkResult:
95
+ """
96
+ Benchmark latency for a specific endpoint.
97
+
98
+ Args:
99
+ endpoint_name: Name of the endpoint
100
+ request_func: Async function that makes the request
101
+ config: Benchmark configuration
102
+
103
+ Returns:
104
+ BenchmarkResult with latency metrics
105
+ """
106
+ cfg = config or self.config
107
+
108
+ result = await self.runner.run_benchmark(
109
+ name=f"api_latency_{endpoint_name}",
110
+ category=BenchmarkCategory.API,
111
+ description=f"API latency for {endpoint_name}",
112
+ benchmark_func=request_func,
113
+ iterations=cfg.iterations,
114
+ warmup_iterations=cfg.warmup_requests,
115
+ monitor_resources=True
116
+ )
117
+
118
+ # Calculate RPS
119
+ result.custom_metrics["requests_per_second"] = (
120
+ cfg.iterations / result.timing.duration_seconds
121
+ if result.timing.duration_seconds > 0 else 0
122
+ )
123
+ result.custom_metrics["endpoint"] = endpoint_name
124
+
125
+ return result
126
+
127
+ async def benchmark_all_endpoints(self, config: Optional[APIBenchmarkConfig] = None) -> List[BenchmarkResult]:
128
+ """
129
+ Benchmark all API endpoints.
130
+
131
+ Returns:
132
+ List of BenchmarkResult for each endpoint
133
+ """
134
+ cfg = config or self.config
135
+ endpoints = self._get_test_endpoints()
136
+ results = []
137
+
138
+ for endpoint in endpoints:
139
+ # Create mock request function
140
+ async def mock_request(endpoint=endpoint):
141
+ # Simulate request delay based on endpoint type
142
+ delay_map = {
143
+ APIEndpointType.AUTH: 0.05,
144
+ APIEndpointType.SCANS: 0.1,
145
+ APIEndpointType.FINDINGS: 0.08,
146
+ APIEndpointType.REPORTS: 0.15,
147
+ APIEndpointType.WEBSOCKET: 0.02
148
+ }
149
+ delay = delay_map.get(endpoint["type"], 0.1)
150
+ delay += (hash(endpoint["name"]) % 50) / 1000 # Add variance
151
+ await asyncio.sleep(delay)
152
+ return {"status": "ok", "endpoint": endpoint["name"]}
153
+
154
+ try:
155
+ result = await self.benchmark_endpoint_latency(
156
+ endpoint["name"],
157
+ mock_request,
158
+ cfg
159
+ )
160
+ results.append(result)
161
+ except Exception as e:
162
+ print(f" Failed to benchmark {endpoint['name']}: {e}")
163
+
164
+ return results
165
+
166
+ async def benchmark_concurrent_requests(self, config: Optional[APIBenchmarkConfig] = None) -> BenchmarkResult:
167
+ """
168
+ Benchmark API with concurrent requests.
169
+
170
+ Returns:
171
+ BenchmarkResult with concurrent request metrics
172
+ """
173
+ cfg = config or APIBenchmarkConfig(concurrent_requests=20)
174
+
175
+ async def concurrent_requests():
176
+ semaphore = asyncio.Semaphore(cfg.concurrent_requests)
177
+
178
+ async def make_request():
179
+ async with semaphore:
180
+ await asyncio.sleep(0.05) # Simulate request
181
+ return {"status": "ok"}
182
+
183
+ # Make multiple batches of concurrent requests
184
+ total_requests = cfg.iterations * cfg.concurrent_requests
185
+ tasks = [make_request() for _ in range(total_requests)]
186
+ await asyncio.gather(*tasks)
187
+
188
+ return {"total_requests": total_requests}
189
+
190
+ result = await self.runner.run_benchmark(
191
+ name="api_concurrent_requests",
192
+ category=BenchmarkCategory.API,
193
+ description=f"API concurrent requests ({cfg.concurrent_requests} parallel)",
194
+ benchmark_func=concurrent_requests,
195
+ iterations=1,
196
+ monitor_resources=True
197
+ )
198
+
199
+ total_requests = cfg.iterations * cfg.concurrent_requests
200
+ result.throughput = ThroughputMetrics(
201
+ operations=total_requests,
202
+ duration_seconds=result.timing.duration_seconds
203
+ )
204
+ result.throughput.calculate()
205
+
206
+ result.custom_metrics["concurrent_limit"] = cfg.concurrent_requests
207
+ result.custom_metrics["total_requests"] = total_requests
208
+ result.custom_metrics["requests_per_second"] = result.throughput.ops_per_second
209
+
210
+ return result
211
+
212
+ async def benchmark_authentication_flow(self, config: Optional[APIBenchmarkConfig] = None) -> BenchmarkResult:
213
+ """
214
+ Benchmark complete authentication flow.
215
+
216
+ Returns:
217
+ BenchmarkResult with auth flow metrics
218
+ """
219
+ cfg = config or self.config
220
+
221
+ async def auth_flow():
222
+ # Step 1: Login
223
+ await asyncio.sleep(0.1)
224
+
225
+ # Step 2: Get token
226
+ await asyncio.sleep(0.05)
227
+
228
+ # Step 3: Validate token
229
+ await asyncio.sleep(0.03)
230
+
231
+ return {"authenticated": True}
232
+
233
+ result = await self.runner.run_benchmark(
234
+ name="api_authentication_flow",
235
+ category=BenchmarkCategory.API,
236
+ description="Complete authentication flow",
237
+ benchmark_func=auth_flow,
238
+ iterations=cfg.iterations,
239
+ monitor_resources=True
240
+ )
241
+
242
+ result.custom_metrics["flow_steps"] = 3
243
+ result.custom_metrics["avg_time_per_step"] = result.timing.avg_ms / 3
244
+
245
+ return result
246
+
247
+ async def benchmark_scan_workflow(self, config: Optional[APIBenchmarkConfig] = None) -> BenchmarkResult:
248
+ """
249
+ Benchmark complete scan workflow (create -> start -> poll -> results).
250
+
251
+ Returns:
252
+ BenchmarkResult with scan workflow metrics
253
+ """
254
+ cfg = config or self.config
255
+
256
+ async def scan_workflow():
257
+ # Step 1: Create scan
258
+ await asyncio.sleep(0.1)
259
+ scan_id = "scan_123"
260
+
261
+ # Step 2: Start scan
262
+ await asyncio.sleep(0.05)
263
+
264
+ # Step 3: Poll for completion (3 polls)
265
+ for _ in range(3):
266
+ await asyncio.sleep(0.08)
267
+
268
+ # Step 4: Get results
269
+ await asyncio.sleep(0.1)
270
+
271
+ return {"scan_id": scan_id, "completed": True}
272
+
273
+ result = await self.runner.run_benchmark(
274
+ name="api_scan_workflow",
275
+ category=BenchmarkCategory.API,
276
+ description="Complete scan workflow",
277
+ benchmark_func=scan_workflow,
278
+ iterations=cfg.iterations // 10, # Fewer iterations as it's slower
279
+ monitor_resources=True
280
+ )
281
+
282
+ result.custom_metrics["workflow_steps"] = 6
283
+ result.custom_metrics["avg_time_per_step"] = result.timing.avg_ms / 6
284
+
285
+ return result
286
+
287
+ async def benchmark_websocket_performance(self, config: Optional[APIBenchmarkConfig] = None) -> BenchmarkResult:
288
+ """
289
+ Benchmark WebSocket connection performance.
290
+
291
+ Returns:
292
+ BenchmarkResult with WebSocket metrics
293
+ """
294
+ cfg = config or APIBenchmarkConfig(iterations=50)
295
+
296
+ async def websocket_benchmark():
297
+ # Simulate WebSocket connection and message exchange
298
+ messages = 10
299
+
300
+ # Connection establishment
301
+ await asyncio.sleep(0.05)
302
+
303
+ # Message exchange
304
+ for _ in range(messages):
305
+ await asyncio.sleep(0.02)
306
+
307
+ # Disconnection
308
+ await asyncio.sleep(0.01)
309
+
310
+ return {"messages_exchanged": messages}
311
+
312
+ result = await self.runner.run_benchmark(
313
+ name="api_websocket_performance",
314
+ category=BenchmarkCategory.API,
315
+ description="WebSocket connection and messaging",
316
+ benchmark_func=websocket_benchmark,
317
+ iterations=cfg.iterations,
318
+ monitor_resources=True
319
+ )
320
+
321
+ result.custom_metrics["messages_per_connection"] = 10
322
+ result.custom_metrics["connections_per_second"] = (
323
+ cfg.iterations / result.timing.duration_seconds
324
+ if result.timing.duration_seconds > 0 else 0
325
+ )
326
+
327
+ return result
328
+
329
+ async def benchmark_memory_usage_under_load(self, config: Optional[APIBenchmarkConfig] = None) -> BenchmarkResult:
330
+ """
331
+ Benchmark API memory usage under sustained load.
332
+
333
+ Returns:
334
+ BenchmarkResult with memory usage metrics
335
+ """
336
+ cfg = config or APIBenchmarkConfig(
337
+ iterations=500,
338
+ concurrent_requests=50
339
+ )
340
+
341
+ async def sustained_load():
342
+ semaphore = asyncio.Semaphore(cfg.concurrent_requests)
343
+
344
+ async def make_request():
345
+ async with semaphore:
346
+ # Simulate varying response sizes
347
+ await asyncio.sleep(0.03)
348
+ return {"data": "x" * 1000} # 1KB response
349
+
350
+ # Sustained load
351
+ tasks = [make_request() for _ in range(cfg.iterations)]
352
+ await asyncio.gather(*tasks)
353
+
354
+ return {"requests_made": cfg.iterations}
355
+
356
+ result = await self.runner.run_benchmark(
357
+ name="api_memory_under_load",
358
+ category=BenchmarkCategory.API,
359
+ description="Memory usage under sustained load",
360
+ benchmark_func=sustained_load,
361
+ iterations=1,
362
+ monitor_resources=True
363
+ )
364
+
365
+ result.custom_metrics["total_requests"] = cfg.iterations
366
+ result.custom_metrics["memory_per_100_requests"] = (
367
+ result.memory.peak_mb / (cfg.iterations / 100)
368
+ )
369
+
370
+ return result
371
+
372
+ async def run_all(self) -> List[BenchmarkResult]:
373
+ """Run all API performance benchmarks."""
374
+ results = []
375
+
376
+ print("Running API performance benchmarks...")
377
+
378
+ # Benchmark individual endpoints
379
+ print(" Running: Individual endpoint latency...")
380
+ endpoint_results = await self.benchmark_all_endpoints()
381
+ results.extend(endpoint_results)
382
+ for result in endpoint_results:
383
+ self.runner.save_result(result)
384
+ print(f" ✓ {result.custom_metrics.get('endpoint', 'unknown')}: "
385
+ f"{result.timing.avg_ms:.2f}ms avg")
386
+
387
+ # Run other benchmarks
388
+ benchmarks = [
389
+ ("Concurrent Requests", self.benchmark_concurrent_requests),
390
+ ("Authentication Flow", self.benchmark_authentication_flow),
391
+ ("Scan Workflow", self.benchmark_scan_workflow),
392
+ ("WebSocket Performance", self.benchmark_websocket_performance),
393
+ ("Memory Under Load", self.benchmark_memory_usage_under_load),
394
+ ]
395
+
396
+ for name, benchmark_func in benchmarks:
397
+ print(f" Running: {name}...")
398
+ try:
399
+ result = await benchmark_func()
400
+ results.append(result)
401
+ self.runner.save_result(result)
402
+ print(f" ✓ {name}: {result.timing.avg_ms:.2f}ms avg")
403
+ except Exception as e:
404
+ print(f" ✗ {name} failed: {e}")
405
+
406
+ # Save combined results
407
+ self.runner.save_all_results("api_benchmarks.json")
408
+
409
+ return results
410
+
411
+ def get_summary(self) -> Dict[str, Any]:
412
+ """Get summary of API benchmark results."""
413
+ return self.runner.get_summary()
414
+
415
+
416
+ # Convenience function
417
+ async def measure_api_response_time(
418
+ endpoint: str,
419
+ request_func: Callable,
420
+ iterations: int = 100,
421
+ output_dir: str = "benchmark_results"
422
+ ) -> BenchmarkResult:
423
+ """
424
+ Quick function to measure API response time.
425
+
426
+ Args:
427
+ endpoint: API endpoint name/path
428
+ request_func: Async function that makes the request
429
+ iterations: Number of requests
430
+ output_dir: Directory for results
431
+
432
+ Returns:
433
+ BenchmarkResult with latency metrics
434
+ """
435
+ benchmark = APIPerformanceBenchmark(output_dir=output_dir)
436
+ result = await benchmark.benchmark_endpoint_latency(endpoint, request_func)
437
+ return result
438
+
439
+
440
+ # CLI interface
441
+ if __name__ == "__main__":
442
+ import argparse
443
+
444
+ parser = argparse.ArgumentParser(description="API Performance Benchmarks")
445
+ parser.add_argument("--output", default="benchmark_results", help="Output directory")
446
+ parser.add_argument("--iterations", type=int, default=100, help="Number of iterations")
447
+ parser.add_argument("--concurrent", type=int, default=10, help="Concurrent requests")
448
+ parser.add_argument("--base-url", default="http://localhost:8000", help="API base URL")
449
+
450
+ args = parser.parse_args()
451
+
452
+ async def main():
453
+ config = APIBenchmarkConfig(
454
+ iterations=args.iterations,
455
+ concurrent_requests=args.concurrent,
456
+ base_url=args.base_url
457
+ )
458
+
459
+ benchmark = APIPerformanceBenchmark(output_dir=args.output)
460
+ results = await benchmark.run_all()
461
+
462
+ print("\n" + "="*60)
463
+ print("API PERFORMANCE BENCHMARK RESULTS")
464
+ print("="*60)
465
+
466
+ for result in results:
467
+ print(f"\n{result.name}:")
468
+ print(f" Avg Latency: {result.timing.avg_ms:.2f}ms")
469
+ print(f" P95: {result.timing.p95_ms:.2f}ms")
470
+ print(f" P99: {result.timing.p99_ms:.2f}ms")
471
+
472
+ if "requests_per_second" in result.custom_metrics:
473
+ rps = result.custom_metrics["requests_per_second"]
474
+ print(f" RPS: {rps:.2f}")
475
+
476
+ if result.memory.peak_mb > 0:
477
+ print(f" Peak Memory: {result.memory.peak_mb:.2f} MB")
478
+
479
+ asyncio.run(main())