zen-ai-pentest 2.2.0__py3-none-any.whl → 2.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
modules/benchmark.py ADDED
@@ -0,0 +1,706 @@
1
+ """
2
+ Zen-AI-Pentest Benchmark Framework Module
3
+
4
+ Core benchmark framework for performance metrics collection, timing measurements,
5
+ resource usage tracking, and baseline comparisons.
6
+
7
+ Author: Zen-AI Team
8
+ Version: 1.0.0
9
+ """
10
+
11
+ import asyncio
12
+ import json
13
+ import logging
14
+ import os
15
+ import platform
16
+ import psutil
17
+ import statistics
18
+ import time
19
+ import uuid
20
+ from abc import ABC, abstractmethod
21
+ from contextlib import contextmanager
22
+ from dataclasses import dataclass, field, asdict
23
+ from datetime import datetime
24
+ from enum import Enum, auto
25
+ from pathlib import Path
26
+ from typing import Callable, Dict, List, Optional, Any, Union, Tuple
27
+ from functools import wraps
28
+
29
+ # Configure logging
30
+ logger = logging.getLogger(__name__)
31
+
32
+
33
+ class MetricType(Enum):
34
+ """Types of performance metrics."""
35
+ TIMING = "timing"
36
+ MEMORY = "memory"
37
+ CPU = "cpu"
38
+ THROUGHPUT = "throughput"
39
+ LATENCY = "latency"
40
+ COUNT = "count"
41
+ PERCENTAGE = "percentage"
42
+
43
+
44
+ class BenchmarkCategory(Enum):
45
+ """Benchmark categories."""
46
+ SCAN = "scan"
47
+ AGENT = "agent"
48
+ API = "api"
49
+ TOOL = "tool"
50
+ OVERALL = "overall"
51
+
52
+
53
+ @dataclass
54
+ class MetricValue:
55
+ """Single metric measurement."""
56
+ name: str
57
+ value: float
58
+ unit: str
59
+ metric_type: MetricType
60
+ timestamp: datetime = field(default_factory=datetime.utcnow)
61
+ metadata: Dict[str, Any] = field(default_factory=dict)
62
+
63
+ def to_dict(self) -> Dict[str, Any]:
64
+ return {
65
+ "name": self.name,
66
+ "value": self.value,
67
+ "unit": self.unit,
68
+ "type": self.metric_type.value,
69
+ "timestamp": self.timestamp.isoformat(),
70
+ "metadata": self.metadata
71
+ }
72
+
73
+
74
+ @dataclass
75
+ class TimingMetrics:
76
+ """Timing-related metrics."""
77
+ start_time: Optional[datetime] = None
78
+ end_time: Optional[datetime] = None
79
+ duration_ms: float = 0.0
80
+ iterations: int = 0
81
+ min_ms: float = 0.0
82
+ max_ms: float = 0.0
83
+ avg_ms: float = 0.0
84
+ median_ms: float = 0.0
85
+ std_dev_ms: float = 0.0
86
+ p95_ms: float = 0.0
87
+ p99_ms: float = 0.0
88
+
89
+ @property
90
+ def duration_seconds(self) -> float:
91
+ """Get duration in seconds."""
92
+ return self.duration_ms / 1000.0
93
+
94
+ def to_dict(self) -> Dict[str, Any]:
95
+ return {
96
+ "start_time": self.start_time.isoformat() if self.start_time else None,
97
+ "end_time": self.end_time.isoformat() if self.end_time else None,
98
+ "duration_ms": self.duration_ms,
99
+ "duration_seconds": self.duration_seconds,
100
+ "iterations": self.iterations,
101
+ "min_ms": self.min_ms,
102
+ "max_ms": self.max_ms,
103
+ "avg_ms": self.avg_ms,
104
+ "median_ms": self.median_ms,
105
+ "std_dev_ms": self.std_dev_ms,
106
+ "p95_ms": self.p95_ms,
107
+ "p99_ms": self.p99_ms
108
+ }
109
+
110
+
111
+ @dataclass
112
+ class MemoryMetrics:
113
+ """Memory usage metrics."""
114
+ initial_mb: float = 0.0
115
+ peak_mb: float = 0.0
116
+ final_mb: float = 0.0
117
+ delta_mb: float = 0.0
118
+ avg_mb: float = 0.0
119
+ samples: List[float] = field(default_factory=list)
120
+
121
+ def to_dict(self) -> Dict[str, Any]:
122
+ return {
123
+ "initial_mb": self.initial_mb,
124
+ "peak_mb": self.peak_mb,
125
+ "final_mb": self.final_mb,
126
+ "delta_mb": self.delta_mb,
127
+ "avg_mb": self.avg_mb,
128
+ "sample_count": len(self.samples)
129
+ }
130
+
131
+
132
+ @dataclass
133
+ class CPUMetrics:
134
+ """CPU usage metrics."""
135
+ avg_percent: float = 0.0
136
+ peak_percent: float = 0.0
137
+ samples: List[float] = field(default_factory=list)
138
+
139
+ def to_dict(self) -> Dict[str, Any]:
140
+ return {
141
+ "avg_percent": self.avg_percent,
142
+ "peak_percent": self.peak_percent,
143
+ "sample_count": len(self.samples)
144
+ }
145
+
146
+
147
+ @dataclass
148
+ class ThroughputMetrics:
149
+ """Throughput metrics (operations per time unit)."""
150
+ operations: int = 0
151
+ duration_seconds: float = 0.0
152
+ ops_per_second: float = 0.0
153
+ ops_per_minute: float = 0.0
154
+
155
+ def calculate(self) -> None:
156
+ """Calculate throughput rates."""
157
+ if self.duration_seconds > 0:
158
+ self.ops_per_second = self.operations / self.duration_seconds
159
+ self.ops_per_minute = self.ops_per_second * 60
160
+
161
+ def to_dict(self) -> Dict[str, Any]:
162
+ return {
163
+ "operations": self.operations,
164
+ "duration_seconds": self.duration_seconds,
165
+ "ops_per_second": self.ops_per_second,
166
+ "ops_per_minute": self.ops_per_minute
167
+ }
168
+
169
+
170
+ @dataclass
171
+ class BenchmarkResult:
172
+ """Complete benchmark result."""
173
+ benchmark_id: str
174
+ name: str
175
+ category: BenchmarkCategory
176
+ description: str
177
+
178
+ # Timing
179
+ timing: TimingMetrics = field(default_factory=TimingMetrics)
180
+
181
+ # Resources
182
+ memory: MemoryMetrics = field(default_factory=MemoryMetrics)
183
+ cpu: CPUMetrics = field(default_factory=CPUMetrics)
184
+
185
+ # Throughput
186
+ throughput: ThroughputMetrics = field(default_factory=ThroughputMetrics)
187
+
188
+ # Additional metrics
189
+ custom_metrics: Dict[str, Any] = field(default_factory=dict)
190
+
191
+ # Metadata
192
+ timestamp: datetime = field(default_factory=datetime.utcnow)
193
+ version: str = "1.0.0"
194
+ environment: Dict[str, Any] = field(default_factory=dict)
195
+
196
+ def to_dict(self) -> Dict[str, Any]:
197
+ return {
198
+ "benchmark_id": self.benchmark_id,
199
+ "name": self.name,
200
+ "category": self.category.value,
201
+ "description": self.description,
202
+ "timing": self.timing.to_dict(),
203
+ "memory": self.memory.to_dict(),
204
+ "cpu": self.cpu.to_dict(),
205
+ "throughput": self.throughput.to_dict(),
206
+ "custom_metrics": self.custom_metrics,
207
+ "timestamp": self.timestamp.isoformat(),
208
+ "version": self.version,
209
+ "environment": self.environment
210
+ }
211
+
212
+ def to_json(self, indent: int = 2) -> str:
213
+ """Convert to JSON string."""
214
+ return json.dumps(self.to_dict(), indent=indent, default=str)
215
+
216
+
217
+ class ResourceMonitor:
218
+ """Monitor system resources during benchmark execution."""
219
+
220
+ def __init__(self, interval: float = 0.5):
221
+ self.interval = interval
222
+ self.monitoring = False
223
+ self.memory_samples: List[float] = []
224
+ self.cpu_samples: List[float] = []
225
+ self._monitor_task: Optional[asyncio.Task] = None
226
+ self._process = psutil.Process()
227
+
228
+ async def start(self) -> None:
229
+ """Start resource monitoring."""
230
+ self.monitoring = True
231
+ self.memory_samples = []
232
+ self.cpu_samples = []
233
+ self._monitor_task = asyncio.create_task(self._monitor_loop())
234
+ logger.debug("Resource monitoring started")
235
+
236
+ async def stop(self) -> None:
237
+ """Stop resource monitoring."""
238
+ self.monitoring = False
239
+ if self._monitor_task:
240
+ self._monitor_task.cancel()
241
+ try:
242
+ await self._monitor_task
243
+ except asyncio.CancelledError:
244
+ pass
245
+ logger.debug("Resource monitoring stopped")
246
+
247
+ async def _monitor_loop(self) -> None:
248
+ """Monitoring loop."""
249
+ while self.monitoring:
250
+ try:
251
+ # Memory usage in MB
252
+ mem_info = self._process.memory_info()
253
+ memory_mb = mem_info.rss / (1024 * 1024)
254
+ self.memory_samples.append(memory_mb)
255
+
256
+ # CPU usage percentage
257
+ cpu_percent = self._process.cpu_percent(interval=None)
258
+ self.cpu_samples.append(cpu_percent)
259
+
260
+ await asyncio.sleep(self.interval)
261
+ except Exception as e:
262
+ logger.warning(f"Error in resource monitoring: {e}")
263
+ break
264
+
265
+ def get_memory_metrics(self) -> MemoryMetrics:
266
+ """Get memory metrics from samples."""
267
+ if not self.memory_samples:
268
+ return MemoryMetrics()
269
+
270
+ return MemoryMetrics(
271
+ initial_mb=self.memory_samples[0] if self.memory_samples else 0,
272
+ peak_mb=max(self.memory_samples) if self.memory_samples else 0,
273
+ final_mb=self.memory_samples[-1] if self.memory_samples else 0,
274
+ delta_mb=(self.memory_samples[-1] - self.memory_samples[0])
275
+ if len(self.memory_samples) > 1 else 0,
276
+ avg_mb=statistics.mean(self.memory_samples) if self.memory_samples else 0,
277
+ samples=self.memory_samples.copy()
278
+ )
279
+
280
+ def get_cpu_metrics(self) -> CPUMetrics:
281
+ """Get CPU metrics from samples."""
282
+ if not self.cpu_samples:
283
+ return CPUMetrics()
284
+
285
+ return CPUMetrics(
286
+ avg_percent=statistics.mean(self.cpu_samples) if self.cpu_samples else 0,
287
+ peak_percent=max(self.cpu_samples) if self.cpu_samples else 0,
288
+ samples=self.cpu_samples.copy()
289
+ )
290
+
291
+
292
+ class BenchmarkRunner:
293
+ """Runner for executing benchmarks with full metrics collection."""
294
+
295
+ def __init__(self, output_dir: str = "benchmark_results"):
296
+ self.output_dir = Path(output_dir)
297
+ self.output_dir.mkdir(parents=True, exist_ok=True)
298
+ self.results: List[BenchmarkResult] = []
299
+ self.baselines: Dict[str, Dict[str, Any]] = {}
300
+ self._load_baselines()
301
+
302
+ def _load_baselines(self) -> None:
303
+ """Load baseline metrics from file."""
304
+ baseline_file = self.output_dir / "baselines.json"
305
+ if baseline_file.exists():
306
+ try:
307
+ with open(baseline_file) as f:
308
+ self.baselines = json.load(f)
309
+ logger.info(f"Loaded {len(self.baselines)} baselines")
310
+ except Exception as e:
311
+ logger.warning(f"Could not load baselines: {e}")
312
+
313
+ def _save_baselines(self) -> None:
314
+ """Save baseline metrics to file."""
315
+ baseline_file = self.output_dir / "baselines.json"
316
+ try:
317
+ with open(baseline_file, 'w') as f:
318
+ json.dump(self.baselines, f, indent=2)
319
+ except Exception as e:
320
+ logger.warning(f"Could not save baselines: {e}")
321
+
322
+ def _get_environment_info(self) -> Dict[str, Any]:
323
+ """Get environment information."""
324
+ return {
325
+ "platform": platform.system(),
326
+ "platform_version": platform.version(),
327
+ "python_version": platform.python_version(),
328
+ "cpu_count": psutil.cpu_count(),
329
+ "total_memory_gb": psutil.virtual_memory().total / (1024**3),
330
+ "timestamp": datetime.utcnow().isoformat()
331
+ }
332
+
333
+ @contextmanager
334
+ def measure(self, name: str, category: BenchmarkCategory,
335
+ description: str = ""):
336
+ """Context manager for simple timing measurements."""
337
+ start_time = time.perf_counter()
338
+ start_dt = datetime.utcnow()
339
+
340
+ result = BenchmarkResult(
341
+ benchmark_id=str(uuid.uuid4())[:8],
342
+ name=name,
343
+ category=category,
344
+ description=description,
345
+ environment=self._get_environment_info()
346
+ )
347
+ result.timing.start_time = start_dt
348
+
349
+ try:
350
+ yield result
351
+ finally:
352
+ end_time = time.perf_counter()
353
+ result.timing.end_time = datetime.utcnow()
354
+ result.timing.duration_ms = (end_time - start_time) * 1000
355
+
356
+ async def run_benchmark(
357
+ self,
358
+ name: str,
359
+ category: BenchmarkCategory,
360
+ benchmark_func: Callable,
361
+ description: str = "",
362
+ iterations: int = 1,
363
+ warmup_iterations: int = 0,
364
+ monitor_resources: bool = True,
365
+ **func_kwargs
366
+ ) -> BenchmarkResult:
367
+ """
368
+ Run a benchmark function with full metrics collection.
369
+
370
+ Args:
371
+ name: Benchmark name
372
+ category: Benchmark category
373
+ benchmark_func: Async function to benchmark
374
+ description: Benchmark description
375
+ iterations: Number of iterations to run
376
+ warmup_iterations: Number of warmup iterations (not measured)
377
+ monitor_resources: Whether to monitor resource usage
378
+ **func_kwargs: Arguments to pass to benchmark function
379
+
380
+ Returns:
381
+ BenchmarkResult with all metrics
382
+ """
383
+ benchmark_id = str(uuid.uuid4())[:8]
384
+
385
+ result = BenchmarkResult(
386
+ benchmark_id=benchmark_id,
387
+ name=name,
388
+ category=category,
389
+ description=description,
390
+ environment=self._get_environment_info()
391
+ )
392
+
393
+ logger.info(f"Starting benchmark: {name} ({benchmark_id})")
394
+
395
+ # Warmup iterations
396
+ for i in range(warmup_iterations):
397
+ logger.debug(f"Warmup iteration {i+1}/{warmup_iterations}")
398
+ await benchmark_func(**func_kwargs)
399
+
400
+ # Start resource monitoring
401
+ monitor = ResourceMonitor()
402
+ if monitor_resources:
403
+ await monitor.start()
404
+
405
+ # Run benchmark iterations
406
+ iteration_times = []
407
+ result.timing.start_time = datetime.utcnow()
408
+
409
+ for i in range(iterations):
410
+ iter_start = time.perf_counter()
411
+
412
+ try:
413
+ await benchmark_func(**func_kwargs)
414
+ except Exception as e:
415
+ logger.error(f"Benchmark iteration {i+1} failed: {e}")
416
+ raise
417
+
418
+ iter_end = time.perf_counter()
419
+ iter_time_ms = (iter_end - iter_start) * 1000
420
+ iteration_times.append(iter_time_ms)
421
+
422
+ result.timing.end_time = datetime.utcnow()
423
+
424
+ # Stop resource monitoring
425
+ if monitor_resources:
426
+ await monitor.stop()
427
+ result.memory = monitor.get_memory_metrics()
428
+ result.cpu = monitor.get_cpu_metrics()
429
+
430
+ # Calculate timing metrics
431
+ result.timing.iterations = iterations
432
+ result.timing.duration_ms = sum(iteration_times)
433
+
434
+ if iteration_times:
435
+ result.timing.min_ms = min(iteration_times)
436
+ result.timing.max_ms = max(iteration_times)
437
+ result.timing.avg_ms = statistics.mean(iteration_times)
438
+ result.timing.median_ms = statistics.median(iteration_times)
439
+
440
+ if len(iteration_times) > 1:
441
+ result.timing.std_dev_ms = statistics.stdev(iteration_times)
442
+
443
+ # Calculate percentiles
444
+ sorted_times = sorted(iteration_times)
445
+ p95_idx = int(len(sorted_times) * 0.95)
446
+ p99_idx = int(len(sorted_times) * 0.99)
447
+ result.timing.p95_ms = sorted_times[min(p95_idx, len(sorted_times)-1)]
448
+ result.timing.p99_ms = sorted_times[min(p99_idx, len(sorted_times)-1)]
449
+
450
+ # Store result
451
+ self.results.append(result)
452
+
453
+ logger.info(f"Benchmark completed: {name} - "
454
+ f"Duration: {result.timing.duration_ms:.2f}ms, "
455
+ f"Avg: {result.timing.avg_ms:.2f}ms")
456
+
457
+ return result
458
+
459
+ def compare_with_baseline(self, result: BenchmarkResult) -> Dict[str, Any]:
460
+ """Compare benchmark result with baseline."""
461
+ baseline_key = f"{result.category.value}:{result.name}"
462
+
463
+ if baseline_key not in self.baselines:
464
+ return {"status": "no_baseline", "message": "No baseline found"}
465
+
466
+ baseline = self.baselines[baseline_key]
467
+ comparison = {
468
+ "status": "compared",
469
+ "baseline_timestamp": baseline.get("timestamp"),
470
+ "metrics": {}
471
+ }
472
+
473
+ # Compare timing
474
+ if "timing" in baseline:
475
+ baseline_duration = baseline["timing"].get("duration_ms", 0)
476
+ current_duration = result.timing.duration_ms
477
+
478
+ if baseline_duration > 0:
479
+ change_pct = ((current_duration - baseline_duration)
480
+ / baseline_duration * 100)
481
+ comparison["metrics"]["duration_ms"] = {
482
+ "baseline": baseline_duration,
483
+ "current": current_duration,
484
+ "change_percent": change_pct,
485
+ "regression": change_pct > 10 # 10% threshold
486
+ }
487
+
488
+ # Compare memory
489
+ if "memory" in baseline and baseline["memory"]:
490
+ baseline_peak = baseline["memory"].get("peak_mb", 0)
491
+ current_peak = result.memory.peak_mb
492
+
493
+ if baseline_peak > 0:
494
+ change_pct = ((current_peak - baseline_peak) / baseline_peak * 100)
495
+ comparison["metrics"]["peak_memory_mb"] = {
496
+ "baseline": baseline_peak,
497
+ "current": current_peak,
498
+ "change_percent": change_pct,
499
+ "regression": change_pct > 20 # 20% threshold
500
+ }
501
+
502
+ return comparison
503
+
504
+ def set_baseline(self, result: BenchmarkResult) -> None:
505
+ """Set benchmark result as new baseline."""
506
+ baseline_key = f"{result.category.value}:{result.name}"
507
+ self.baselines[baseline_key] = result.to_dict()
508
+ self._save_baselines()
509
+ logger.info(f"Set baseline for {baseline_key}")
510
+
511
+ def save_result(self, result: BenchmarkResult,
512
+ filename: Optional[str] = None) -> Path:
513
+ """Save benchmark result to file."""
514
+ if filename is None:
515
+ filename = f"{result.category.value}_{result.name}_{result.benchmark_id}.json"
516
+ filename = filename.replace(" ", "_").lower()
517
+
518
+ filepath = self.output_dir / filename
519
+
520
+ with open(filepath, 'w') as f:
521
+ f.write(result.to_json())
522
+
523
+ logger.info(f"Saved benchmark result to {filepath}")
524
+ return filepath
525
+
526
+ def save_all_results(self, filename: str = "all_results.json") -> Path:
527
+ """Save all benchmark results to a single file."""
528
+ filepath = self.output_dir / filename
529
+
530
+ data = {
531
+ "timestamp": datetime.utcnow().isoformat(),
532
+ "count": len(self.results),
533
+ "results": [r.to_dict() for r in self.results]
534
+ }
535
+
536
+ with open(filepath, 'w') as f:
537
+ json.dump(data, f, indent=2)
538
+
539
+ return filepath
540
+
541
+ def get_summary(self) -> Dict[str, Any]:
542
+ """Get summary of all benchmark results."""
543
+ if not self.results:
544
+ return {"message": "No benchmark results"}
545
+
546
+ by_category = {}
547
+ for result in self.results:
548
+ cat = result.category.value
549
+ if cat not in by_category:
550
+ by_category[cat] = []
551
+ by_category[cat].append({
552
+ "name": result.name,
553
+ "duration_ms": result.timing.duration_ms,
554
+ "avg_ms": result.timing.avg_ms,
555
+ "peak_memory_mb": result.memory.peak_mb
556
+ })
557
+
558
+ return {
559
+ "total_benchmarks": len(self.results),
560
+ "categories": list(by_category.keys()),
561
+ "by_category": by_category
562
+ }
563
+
564
+
565
+ def benchmark_timer(name: str, category: BenchmarkCategory = BenchmarkCategory.OVERALL):
566
+ """Decorator for timing function execution."""
567
+ def decorator(func: Callable) -> Callable:
568
+ @wraps(func)
569
+ async def async_wrapper(*args, **kwargs) -> Any:
570
+ start = time.perf_counter()
571
+ try:
572
+ return await func(*args, **kwargs)
573
+ finally:
574
+ duration = (time.perf_counter() - start) * 1000
575
+ logger.debug(f"[Benchmark] {name}: {duration:.2f}ms")
576
+
577
+ @wraps(func)
578
+ def sync_wrapper(*args, **kwargs) -> Any:
579
+ start = time.perf_counter()
580
+ try:
581
+ return func(*args, **kwargs)
582
+ finally:
583
+ duration = (time.perf_counter() - start) * 1000
584
+ logger.debug(f"[Benchmark] {name}: {duration:.2f}ms")
585
+
586
+ return async_wrapper if asyncio.iscoroutinefunction(func) else sync_wrapper
587
+ return decorator
588
+
589
+
590
+ class ThroughputCalculator:
591
+ """Calculate throughput metrics for operations."""
592
+
593
+ def __init__(self):
594
+ self.operations = 0
595
+ self.start_time: Optional[float] = None
596
+ self.end_time: Optional[float] = None
597
+
598
+ def start(self) -> None:
599
+ """Start throughput measurement."""
600
+ self.start_time = time.perf_counter()
601
+ self.operations = 0
602
+
603
+ def record_operation(self, count: int = 1) -> None:
604
+ """Record operations completed."""
605
+ self.operations += count
606
+
607
+ def stop(self) -> ThroughputMetrics:
608
+ """Stop measurement and return throughput metrics."""
609
+ self.end_time = time.perf_counter()
610
+
611
+ duration = (self.end_time - self.start_time) if self.start_time else 0
612
+
613
+ metrics = ThroughputMetrics(
614
+ operations=self.operations,
615
+ duration_seconds=duration
616
+ )
617
+ metrics.calculate()
618
+
619
+ return metrics
620
+
621
+
622
+ # Convenience functions
623
+ async def measure_scan_throughput(
624
+ scan_func: Callable,
625
+ targets: List[str],
626
+ **kwargs
627
+ ) -> Tuple[BenchmarkResult, List[Any]]:
628
+ """
629
+ Measure scan throughput (targets per minute).
630
+
631
+ Args:
632
+ scan_func: Async scan function
633
+ targets: List of targets to scan
634
+ **kwargs: Additional arguments for scan function
635
+
636
+ Returns:
637
+ Tuple of (BenchmarkResult, scan results)
638
+ """
639
+ runner = BenchmarkRunner()
640
+ scan_results = []
641
+
642
+ async def benchmark_scan():
643
+ for target in targets:
644
+ result = await scan_func(target, **kwargs)
645
+ scan_results.append(result)
646
+
647
+ result = await runner.run_benchmark(
648
+ name="scan_throughput",
649
+ category=BenchmarkCategory.SCAN,
650
+ description=f"Scan throughput for {len(targets)} targets",
651
+ benchmark_func=benchmark_scan,
652
+ iterations=1,
653
+ monitor_resources=True
654
+ )
655
+
656
+ # Calculate throughput
657
+ result.throughput = ThroughputMetrics(
658
+ operations=len(targets),
659
+ duration_seconds=result.timing.duration_seconds
660
+ )
661
+ result.throughput.calculate()
662
+
663
+ # Store targets per minute as custom metric
664
+ result.custom_metrics["targets_per_minute"] = result.throughput.ops_per_minute
665
+ result.custom_metrics["target_count"] = len(targets)
666
+
667
+ return result, scan_results
668
+
669
+
670
+ async def measure_api_latency(
671
+ endpoint: str,
672
+ request_func: Callable,
673
+ iterations: int = 100,
674
+ **kwargs
675
+ ) -> BenchmarkResult:
676
+ """
677
+ Measure API endpoint latency.
678
+
679
+ Args:
680
+ endpoint: API endpoint name/path
681
+ request_func: Async function that makes the request
682
+ iterations: Number of requests to make
683
+ **kwargs: Arguments for request function
684
+
685
+ Returns:
686
+ BenchmarkResult with latency metrics
687
+ """
688
+ runner = BenchmarkRunner()
689
+
690
+ result = await runner.run_benchmark(
691
+ name=f"api_latency_{endpoint}",
692
+ category=BenchmarkCategory.API,
693
+ description=f"API latency for {endpoint}",
694
+ benchmark_func=request_func,
695
+ iterations=iterations,
696
+ monitor_resources=True,
697
+ **kwargs
698
+ )
699
+
700
+ # Calculate requests per second
701
+ result.custom_metrics["requests_per_second"] = (
702
+ iterations / result.timing.duration_seconds
703
+ if result.timing.duration_seconds > 0 else 0
704
+ )
705
+
706
+ return result