runbooks 0.7.6__py3-none-any.whl → 0.7.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (111) hide show
  1. runbooks/__init__.py +1 -1
  2. runbooks/base.py +5 -1
  3. runbooks/cfat/__init__.py +8 -4
  4. runbooks/cfat/assessment/collectors.py +171 -14
  5. runbooks/cfat/assessment/compliance.py +871 -0
  6. runbooks/cfat/assessment/runner.py +122 -11
  7. runbooks/cfat/models.py +6 -2
  8. runbooks/common/logger.py +14 -0
  9. runbooks/common/rich_utils.py +451 -0
  10. runbooks/enterprise/__init__.py +68 -0
  11. runbooks/enterprise/error_handling.py +411 -0
  12. runbooks/enterprise/logging.py +439 -0
  13. runbooks/enterprise/multi_tenant.py +583 -0
  14. runbooks/finops/README.md +468 -241
  15. runbooks/finops/__init__.py +39 -3
  16. runbooks/finops/cli.py +83 -18
  17. runbooks/finops/cross_validation.py +375 -0
  18. runbooks/finops/dashboard_runner.py +812 -164
  19. runbooks/finops/enhanced_dashboard_runner.py +525 -0
  20. runbooks/finops/finops_dashboard.py +1892 -0
  21. runbooks/finops/helpers.py +485 -51
  22. runbooks/finops/optimizer.py +823 -0
  23. runbooks/finops/tests/__init__.py +19 -0
  24. runbooks/finops/tests/results_test_finops_dashboard.xml +1 -0
  25. runbooks/finops/tests/run_comprehensive_tests.py +421 -0
  26. runbooks/finops/tests/run_tests.py +305 -0
  27. runbooks/finops/tests/test_finops_dashboard.py +705 -0
  28. runbooks/finops/tests/test_integration.py +477 -0
  29. runbooks/finops/tests/test_performance.py +380 -0
  30. runbooks/finops/tests/test_performance_benchmarks.py +500 -0
  31. runbooks/finops/tests/test_reference_images_validation.py +867 -0
  32. runbooks/finops/tests/test_single_account_features.py +715 -0
  33. runbooks/finops/tests/validate_test_suite.py +220 -0
  34. runbooks/finops/types.py +1 -1
  35. runbooks/hitl/enhanced_workflow_engine.py +725 -0
  36. runbooks/inventory/artifacts/scale-optimize-status.txt +12 -0
  37. runbooks/inventory/collectors/aws_comprehensive.py +442 -0
  38. runbooks/inventory/collectors/enterprise_scale.py +281 -0
  39. runbooks/inventory/core/collector.py +172 -13
  40. runbooks/inventory/discovery.md +1 -1
  41. runbooks/inventory/list_ec2_instances.py +18 -20
  42. runbooks/inventory/list_ssm_parameters.py +31 -3
  43. runbooks/inventory/organizations_discovery.py +1269 -0
  44. runbooks/inventory/rich_inventory_display.py +393 -0
  45. runbooks/inventory/run_on_multi_accounts.py +35 -19
  46. runbooks/inventory/runbooks.security.report_generator.log +0 -0
  47. runbooks/inventory/runbooks.security.run_script.log +0 -0
  48. runbooks/inventory/vpc_flow_analyzer.py +1030 -0
  49. runbooks/main.py +2215 -119
  50. runbooks/metrics/dora_metrics_engine.py +599 -0
  51. runbooks/operate/__init__.py +2 -2
  52. runbooks/operate/base.py +122 -10
  53. runbooks/operate/deployment_framework.py +1032 -0
  54. runbooks/operate/deployment_validator.py +853 -0
  55. runbooks/operate/dynamodb_operations.py +10 -6
  56. runbooks/operate/ec2_operations.py +319 -11
  57. runbooks/operate/executive_dashboard.py +779 -0
  58. runbooks/operate/mcp_integration.py +750 -0
  59. runbooks/operate/nat_gateway_operations.py +1120 -0
  60. runbooks/operate/networking_cost_heatmap.py +685 -0
  61. runbooks/operate/privatelink_operations.py +940 -0
  62. runbooks/operate/s3_operations.py +10 -6
  63. runbooks/operate/vpc_endpoints.py +644 -0
  64. runbooks/operate/vpc_operations.py +1038 -0
  65. runbooks/remediation/__init__.py +2 -2
  66. runbooks/remediation/acm_remediation.py +1 -1
  67. runbooks/remediation/base.py +1 -1
  68. runbooks/remediation/cloudtrail_remediation.py +1 -1
  69. runbooks/remediation/cognito_remediation.py +1 -1
  70. runbooks/remediation/dynamodb_remediation.py +1 -1
  71. runbooks/remediation/ec2_remediation.py +1 -1
  72. runbooks/remediation/ec2_unattached_ebs_volumes.py +1 -1
  73. runbooks/remediation/kms_enable_key_rotation.py +1 -1
  74. runbooks/remediation/kms_remediation.py +1 -1
  75. runbooks/remediation/lambda_remediation.py +1 -1
  76. runbooks/remediation/multi_account.py +1 -1
  77. runbooks/remediation/rds_remediation.py +1 -1
  78. runbooks/remediation/s3_block_public_access.py +1 -1
  79. runbooks/remediation/s3_enable_access_logging.py +1 -1
  80. runbooks/remediation/s3_encryption.py +1 -1
  81. runbooks/remediation/s3_remediation.py +1 -1
  82. runbooks/remediation/vpc_remediation.py +475 -0
  83. runbooks/security/__init__.py +3 -1
  84. runbooks/security/compliance_automation.py +632 -0
  85. runbooks/security/report_generator.py +10 -0
  86. runbooks/security/run_script.py +31 -5
  87. runbooks/security/security_baseline_tester.py +169 -30
  88. runbooks/security/security_export.py +477 -0
  89. runbooks/validation/__init__.py +10 -0
  90. runbooks/validation/benchmark.py +484 -0
  91. runbooks/validation/cli.py +356 -0
  92. runbooks/validation/mcp_validator.py +768 -0
  93. runbooks/vpc/__init__.py +38 -0
  94. runbooks/vpc/config.py +212 -0
  95. runbooks/vpc/cost_engine.py +347 -0
  96. runbooks/vpc/heatmap_engine.py +605 -0
  97. runbooks/vpc/manager_interface.py +634 -0
  98. runbooks/vpc/networking_wrapper.py +1260 -0
  99. runbooks/vpc/rich_formatters.py +679 -0
  100. runbooks/vpc/tests/__init__.py +5 -0
  101. runbooks/vpc/tests/conftest.py +356 -0
  102. runbooks/vpc/tests/test_cli_integration.py +530 -0
  103. runbooks/vpc/tests/test_config.py +458 -0
  104. runbooks/vpc/tests/test_cost_engine.py +479 -0
  105. runbooks/vpc/tests/test_networking_wrapper.py +512 -0
  106. {runbooks-0.7.6.dist-info → runbooks-0.7.9.dist-info}/METADATA +40 -12
  107. {runbooks-0.7.6.dist-info → runbooks-0.7.9.dist-info}/RECORD +111 -50
  108. {runbooks-0.7.6.dist-info → runbooks-0.7.9.dist-info}/WHEEL +0 -0
  109. {runbooks-0.7.6.dist-info → runbooks-0.7.9.dist-info}/entry_points.txt +0 -0
  110. {runbooks-0.7.6.dist-info → runbooks-0.7.9.dist-info}/licenses/LICENSE +0 -0
  111. {runbooks-0.7.6.dist-info → runbooks-0.7.9.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,500 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Performance Benchmark Test Suite for FinOps Dashboard.
4
+
5
+ This module validates performance requirements for the finops module
6
+ to ensure sub-second execution targets are maintained under enterprise load.
7
+
8
+ Performance Requirements:
9
+ - Complete analysis workflow: <2s
10
+ - Individual component analysis: <1s
11
+ - Export operations: <500ms per format
12
+ - Memory usage: <500MB peak
13
+ - AWS API efficiency: <100 API calls per analysis
14
+
15
+ Author: CloudOps Runbooks Team
16
+ Version: 0.7.8
17
+ """
18
+
19
+ import gc
20
+ import time
21
+ import tracemalloc
22
+ from datetime import datetime
23
+ from unittest.mock import MagicMock, patch
24
+
25
+ import pytest
26
+
27
+ from runbooks.finops.finops_dashboard import (
28
+ EnterpriseDiscovery,
29
+ EnterpriseExecutiveDashboard,
30
+ EnterpriseExportEngine,
31
+ EnterpriseResourceAuditor,
32
+ FinOpsConfig,
33
+ MultiAccountCostTrendAnalyzer,
34
+ ResourceUtilizationHeatmapAnalyzer,
35
+ run_complete_finops_analysis,
36
+ )
37
+
38
+
39
+ class PerformanceMonitor:
40
+ """Performance monitoring utility for benchmarking."""
41
+
42
+ def __init__(self):
43
+ self.start_time = None
44
+ self.end_time = None
45
+ self.peak_memory = None
46
+ self.api_call_count = 0
47
+
48
+ def start_monitoring(self):
49
+ """Start performance monitoring."""
50
+ gc.collect() # Clean up before monitoring
51
+ tracemalloc.start()
52
+ self.start_time = time.perf_counter()
53
+ self.api_call_count = 0
54
+
55
+ def stop_monitoring(self):
56
+ """Stop monitoring and collect results."""
57
+ self.end_time = time.perf_counter()
58
+ current, peak = tracemalloc.get_traced_memory()
59
+ tracemalloc.stop()
60
+
61
+ self.peak_memory = peak / (1024 * 1024) # Convert to MB
62
+ return {
63
+ "execution_time": self.end_time - self.start_time,
64
+ "peak_memory_mb": self.peak_memory,
65
+ "api_calls": self.api_call_count,
66
+ }
67
+
68
+ def track_api_call(self):
69
+ """Track an API call for efficiency monitoring."""
70
+ self.api_call_count += 1
71
+
72
+
73
+ @pytest.fixture
74
+ def performance_monitor():
75
+ """Fixture providing performance monitoring."""
76
+ return PerformanceMonitor()
77
+
78
+
79
+ @pytest.fixture
80
+ def performance_config():
81
+ """Configuration optimized for performance testing."""
82
+ config = FinOpsConfig()
83
+ config.time_range_days = 30 # Standard 30-day analysis
84
+ config.min_account_threshold = 20 # Enterprise scale
85
+ config.target_savings_percent = 40 # Standard optimization target
86
+ return config
87
+
88
+
89
+ class TestComponentPerformanceBenchmarks:
90
+ """Performance benchmarks for individual components."""
91
+
92
+ def test_cost_trend_analyzer_performance(self, performance_config, performance_monitor):
93
+ """Test cost trend analyzer meets <1s performance target."""
94
+ analyzer = MultiAccountCostTrendAnalyzer(performance_config)
95
+
96
+ performance_monitor.start_monitoring()
97
+
98
+ # Execute cost trend analysis
99
+ results = analyzer.analyze_cost_trends()
100
+
101
+ metrics = performance_monitor.stop_monitoring()
102
+
103
+ # Validate performance requirements
104
+ assert metrics["execution_time"] < 1.0, f"Cost analysis took {metrics['execution_time']:.3f}s (>1s target)"
105
+ assert metrics["peak_memory_mb"] < 100, f"Memory usage {metrics['peak_memory_mb']:.1f}MB (>100MB limit)"
106
+
107
+ # Validate results quality wasn't compromised for performance
108
+ assert results["status"] == "completed"
109
+ assert "cost_trends" in results
110
+ assert "optimization_opportunities" in results
111
+
112
+ def test_resource_heatmap_analyzer_performance(self, performance_config, performance_monitor):
113
+ """Test resource heatmap analyzer meets <1s performance target."""
114
+ # Create test data for heatmap analysis
115
+ trend_data = {
116
+ "cost_trends": {
117
+ "account_data": [
118
+ {"account_id": f"perf-test-{i:03d}", "account_type": "production", "monthly_spend": 25000.0}
119
+ for i in range(25) # 25 accounts for enterprise scale
120
+ ]
121
+ }
122
+ }
123
+
124
+ analyzer = ResourceUtilizationHeatmapAnalyzer(performance_config, trend_data)
125
+
126
+ performance_monitor.start_monitoring()
127
+
128
+ # Execute heatmap analysis
129
+ results = analyzer.analyze_resource_utilization()
130
+
131
+ metrics = performance_monitor.stop_monitoring()
132
+
133
+ # Validate performance requirements
134
+ assert metrics["execution_time"] < 1.0, f"Heatmap analysis took {metrics['execution_time']:.3f}s (>1s target)"
135
+ assert metrics["peak_memory_mb"] < 150, f"Memory usage {metrics['peak_memory_mb']:.1f}MB (>150MB limit)"
136
+
137
+ # Validate results completeness
138
+ assert results["status"] == "completed"
139
+ assert "heatmap_data" in results
140
+ assert "efficiency_scoring" in results
141
+
142
+ def test_enterprise_auditor_performance(self, performance_config, performance_monitor):
143
+ """Test enterprise auditor meets <1s performance target."""
144
+ auditor = EnterpriseResourceAuditor(performance_config)
145
+
146
+ performance_monitor.start_monitoring()
147
+
148
+ # Execute compliance audit
149
+ results = auditor.run_compliance_audit()
150
+
151
+ metrics = performance_monitor.stop_monitoring()
152
+
153
+ # Validate performance requirements
154
+ assert metrics["execution_time"] < 1.0, f"Audit took {metrics['execution_time']:.3f}s (>1s target)"
155
+ assert metrics["peak_memory_mb"] < 200, f"Memory usage {metrics['peak_memory_mb']:.1f}MB (>200MB limit)"
156
+
157
+ # Validate audit completeness
158
+ assert results["status"] == "completed"
159
+ assert "audit_data" in results
160
+ assert results["audit_data"]["total_resources_scanned"] > 0
161
+
162
+ def test_account_discovery_performance(self, performance_config, performance_monitor):
163
+ """Test account discovery meets <500ms performance target."""
164
+ discovery = EnterpriseDiscovery(performance_config)
165
+
166
+ performance_monitor.start_monitoring()
167
+
168
+ # Mock AWS operations for consistent performance testing
169
+ with (
170
+ patch("runbooks.finops.finops_dashboard.get_aws_profiles") as mock_profiles,
171
+ patch("runbooks.finops.finops_dashboard.get_account_id") as mock_account_id,
172
+ ):
173
+ mock_profiles.return_value = ["profile1", "profile2", "profile3"]
174
+ mock_account_id.return_value = "123456789012"
175
+
176
+ # Execute account discovery
177
+ results = discovery.discover_accounts()
178
+
179
+ metrics = performance_monitor.stop_monitoring()
180
+
181
+ # Validate performance requirements (stricter for discovery)
182
+ assert metrics["execution_time"] < 0.5, f"Discovery took {metrics['execution_time']:.3f}s (>500ms target)"
183
+ assert metrics["peak_memory_mb"] < 50, f"Memory usage {metrics['peak_memory_mb']:.1f}MB (>50MB limit)"
184
+
185
+ # Validate discovery results
186
+ assert "configured_profiles" in results
187
+ assert "account_info" in results
188
+
189
+
190
+ class TestExportPerformanceBenchmarks:
191
+ """Performance benchmarks for export operations."""
192
+
193
+ @pytest.fixture
194
+ def large_test_dataset(self):
195
+ """Large test dataset for export performance testing."""
196
+ return {
197
+ "discovery": {
198
+ "timestamp": datetime.now().isoformat(),
199
+ "status": "completed",
200
+ "available_profiles": [f"profile-{i}" for i in range(10)],
201
+ },
202
+ "cost_analysis": {
203
+ "status": "completed",
204
+ "cost_trends": {
205
+ "total_monthly_spend": 500000.0,
206
+ "total_accounts": 50,
207
+ "account_data": [
208
+ {
209
+ "account_id": f"benchmark-{i:03d}",
210
+ "account_type": "production" if i % 3 == 0 else "development",
211
+ "monthly_spend": 10000.0,
212
+ "optimization_potential": 0.3 + (i % 4) * 0.1,
213
+ }
214
+ for i in range(50) # 50 accounts worth of data
215
+ ],
216
+ },
217
+ "optimization_opportunities": {"annual_savings_potential": 2400000.0, "savings_percentage": 40.0},
218
+ },
219
+ "audit_results": {
220
+ "status": "completed",
221
+ "audit_data": {
222
+ "total_resources_scanned": 10000,
223
+ "risk_score": {"overall": 72},
224
+ "recommendations": [
225
+ {"priority": "high", "category": "cost", "description": f"Optimize resource group {i}"}
226
+ for i in range(25) # 25 recommendations
227
+ ],
228
+ },
229
+ },
230
+ }
231
+
232
+ def test_json_export_performance(self, performance_config, performance_monitor, large_test_dataset):
233
+ """Test JSON export meets <500ms performance target."""
234
+ exporter = EnterpriseExportEngine(performance_config)
235
+
236
+ performance_monitor.start_monitoring()
237
+
238
+ # Execute JSON export with large dataset
239
+ filename = exporter._export_json(large_test_dataset)
240
+
241
+ metrics = performance_monitor.stop_monitoring()
242
+
243
+ # Validate export performance
244
+ assert metrics["execution_time"] < 0.5, f"JSON export took {metrics['execution_time']:.3f}s (>500ms target)"
245
+ assert metrics["peak_memory_mb"] < 100, f"Memory usage {metrics['peak_memory_mb']:.1f}MB (>100MB limit)"
246
+
247
+ # Validate export succeeded
248
+ assert filename.endswith(".json")
249
+ assert performance_config.report_timestamp in filename
250
+
251
+ def test_csv_export_performance(self, performance_config, performance_monitor, large_test_dataset):
252
+ """Test CSV export meets <500ms performance target."""
253
+ exporter = EnterpriseExportEngine(performance_config)
254
+
255
+ performance_monitor.start_monitoring()
256
+
257
+ # Execute CSV export with large dataset
258
+ filename = exporter._export_csv(large_test_dataset)
259
+
260
+ metrics = performance_monitor.stop_monitoring()
261
+
262
+ # Validate export performance
263
+ assert metrics["execution_time"] < 0.5, f"CSV export took {metrics['execution_time']:.3f}s (>500ms target)"
264
+ assert metrics["peak_memory_mb"] < 75, f"Memory usage {metrics['peak_memory_mb']:.1f}MB (>75MB limit)"
265
+
266
+ # Validate export succeeded
267
+ assert filename.endswith(".csv")
268
+
269
+ def test_html_export_performance(self, performance_config, performance_monitor, large_test_dataset):
270
+ """Test HTML export meets <500ms performance target."""
271
+ exporter = EnterpriseExportEngine(performance_config)
272
+
273
+ performance_monitor.start_monitoring()
274
+
275
+ # Execute HTML export with large dataset
276
+ filename = exporter._export_html(large_test_dataset)
277
+
278
+ metrics = performance_monitor.stop_monitoring()
279
+
280
+ # Validate export performance
281
+ assert metrics["execution_time"] < 0.5, f"HTML export took {metrics['execution_time']:.3f}s (>500ms target)"
282
+ assert metrics["peak_memory_mb"] < 80, f"Memory usage {metrics['peak_memory_mb']:.1f}MB (>80MB limit)"
283
+
284
+ # Validate export succeeded
285
+ assert filename.endswith(".html")
286
+
287
+ def test_multi_format_export_performance(self, performance_config, performance_monitor, large_test_dataset):
288
+ """Test multi-format export meets cumulative performance targets."""
289
+ performance_config.output_formats = ["json", "csv", "html"] # All formats
290
+ exporter = EnterpriseExportEngine(performance_config)
291
+
292
+ performance_monitor.start_monitoring()
293
+
294
+ # Execute multi-format export
295
+ export_status = exporter.export_all_results(
296
+ large_test_dataset["discovery"],
297
+ large_test_dataset["cost_analysis"],
298
+ large_test_dataset["audit_results"],
299
+ {"executive_summary": "test"},
300
+ )
301
+
302
+ metrics = performance_monitor.stop_monitoring()
303
+
304
+ # Validate cumulative export performance (3 formats × 500ms = 1.5s max)
305
+ assert metrics["execution_time"] < 1.5, (
306
+ f"Multi-format export took {metrics['execution_time']:.3f}s (>1.5s target)"
307
+ )
308
+ assert metrics["peak_memory_mb"] < 200, f"Memory usage {metrics['peak_memory_mb']:.1f}MB (>200MB limit)"
309
+
310
+ # Validate all exports succeeded
311
+ assert len(export_status["successful_exports"]) == 3
312
+ assert len(export_status["failed_exports"]) == 0
313
+
314
+
315
+ class TestWorkflowPerformanceBenchmarks:
316
+ """Performance benchmarks for complete workflow operations."""
317
+
318
+ def test_complete_workflow_performance_target(self, performance_monitor):
319
+ """Test complete workflow meets <2s performance target."""
320
+ performance_monitor.start_monitoring()
321
+
322
+ # Execute complete workflow
323
+ results = run_complete_finops_analysis()
324
+
325
+ metrics = performance_monitor.stop_monitoring()
326
+
327
+ # Validate primary performance requirement
328
+ assert metrics["execution_time"] < 2.0, f"Complete workflow took {metrics['execution_time']:.3f}s (>2s target)"
329
+ assert metrics["peak_memory_mb"] < 500, f"Memory usage {metrics['peak_memory_mb']:.1f}MB (>500MB limit)"
330
+
331
+ # Validate workflow completed successfully
332
+ assert results["workflow_status"] == "completed"
333
+ assert "timestamp" in results
334
+
335
+ def test_workflow_performance_consistency(self, performance_monitor):
336
+ """Test workflow performance is consistent across multiple runs."""
337
+ execution_times = []
338
+ memory_peaks = []
339
+
340
+ # Run workflow 5 times to test consistency
341
+ for run in range(5):
342
+ performance_monitor.start_monitoring()
343
+
344
+ results = run_complete_finops_analysis()
345
+
346
+ metrics = performance_monitor.stop_monitoring()
347
+
348
+ execution_times.append(metrics["execution_time"])
349
+ memory_peaks.append(metrics["peak_memory_mb"])
350
+
351
+ # Each run should complete successfully
352
+ assert results["workflow_status"] == "completed"
353
+
354
+ # Calculate performance statistics
355
+ avg_execution_time = sum(execution_times) / len(execution_times)
356
+ max_execution_time = max(execution_times)
357
+ avg_memory_peak = sum(memory_peaks) / len(memory_peaks)
358
+ max_memory_peak = max(memory_peaks)
359
+
360
+ # Validate performance consistency requirements
361
+ assert avg_execution_time < 1.5, f"Average execution time {avg_execution_time:.3f}s (>1.5s target)"
362
+ assert max_execution_time < 2.0, f"Maximum execution time {max_execution_time:.3f}s (>2s target)"
363
+ assert avg_memory_peak < 400, f"Average memory peak {avg_memory_peak:.1f}MB (>400MB limit)"
364
+ assert max_memory_peak < 500, f"Maximum memory peak {max_memory_peak:.1f}MB (>500MB limit)"
365
+
366
+ # Validate reasonable performance variance (< 50% variation)
367
+ time_variance = (max_execution_time - min(execution_times)) / avg_execution_time
368
+ assert time_variance < 0.5, f"Execution time variance {time_variance:.2%} (>50% limit)"
369
+
370
+ def test_enterprise_scale_performance(self, performance_monitor):
371
+ """Test performance under enterprise scale conditions."""
372
+ # Create enterprise-scale configuration
373
+ config = FinOpsConfig()
374
+ config.min_account_threshold = 60 # Large enterprise
375
+ config.time_range_days = 90 # Quarterly analysis
376
+
377
+ # Mock large-scale AWS environment
378
+ with patch("runbooks.finops.finops_dashboard.random.randint") as mock_randint:
379
+ mock_randint.return_value = 75 # 75 accounts
380
+
381
+ performance_monitor.start_monitoring()
382
+
383
+ # Execute workflow with enterprise scale
384
+ results = run_complete_finops_analysis()
385
+
386
+ metrics = performance_monitor.stop_monitoring()
387
+
388
+ # Enterprise scale should still meet performance targets
389
+ assert metrics["execution_time"] < 3.0, f"Enterprise scale took {metrics['execution_time']:.3f}s (>3s limit)"
390
+ assert metrics["peak_memory_mb"] < 750, f"Memory usage {metrics['peak_memory_mb']:.1f}MB (>750MB limit)"
391
+
392
+ # Validate enterprise scale was actually tested
393
+ if results["cost_analysis"]["status"] == "completed":
394
+ cost_trends = results["cost_analysis"]["cost_trends"]
395
+ assert cost_trends["total_accounts"] >= 60
396
+
397
+ def test_concurrent_workflow_performance(self, performance_monitor):
398
+ """Test performance impact of concurrent operations."""
399
+ import concurrent.futures
400
+
401
+ def run_workflow():
402
+ """Run workflow and return execution time."""
403
+ start = time.perf_counter()
404
+ results = run_complete_finops_analysis()
405
+ end = time.perf_counter()
406
+ return end - start, results["workflow_status"]
407
+
408
+ performance_monitor.start_monitoring()
409
+
410
+ # Run 3 concurrent workflows
411
+ with concurrent.futures.ThreadPoolExecutor(max_workers=3) as executor:
412
+ futures = [executor.submit(run_workflow) for _ in range(3)]
413
+ concurrent_results = [future.result() for future in concurrent_futures.as_completed(futures)]
414
+
415
+ metrics = performance_monitor.stop_monitoring()
416
+
417
+ # Validate all concurrent workflows completed
418
+ execution_times, statuses = zip(*concurrent_results)
419
+ assert all(status == "completed" for status in statuses)
420
+
421
+ # Validate concurrent performance (some degradation expected)
422
+ max_concurrent_time = max(execution_times)
423
+ assert max_concurrent_time < 4.0, f"Concurrent workflow took {max_concurrent_time:.3f}s (>4s limit)"
424
+
425
+ # Total memory usage should be reasonable for concurrent operations
426
+ assert metrics["peak_memory_mb"] < 1000, (
427
+ f"Concurrent memory usage {metrics['peak_memory_mb']:.1f}MB (>1GB limit)"
428
+ )
429
+
430
+
431
+ class TestMemoryEfficiencyBenchmarks:
432
+ """Memory efficiency benchmarks for sustained operations."""
433
+
434
+ def test_memory_leak_detection(self, performance_monitor):
435
+ """Test for memory leaks during repeated operations."""
436
+ initial_memory = None
437
+ memory_samples = []
438
+
439
+ # Run workflow 10 times and monitor memory
440
+ for iteration in range(10):
441
+ performance_monitor.start_monitoring()
442
+
443
+ results = run_complete_finops_analysis()
444
+
445
+ metrics = performance_monitor.stop_monitoring()
446
+ memory_samples.append(metrics["peak_memory_mb"])
447
+
448
+ if initial_memory is None:
449
+ initial_memory = metrics["peak_memory_mb"]
450
+
451
+ # Validate each iteration completes
452
+ assert results["workflow_status"] == "completed"
453
+
454
+ # Calculate memory growth
455
+ final_memory = memory_samples[-1]
456
+ memory_growth = final_memory - initial_memory
457
+ memory_growth_percent = (memory_growth / initial_memory) * 100
458
+
459
+ # Validate no significant memory leaks (< 20% growth over 10 iterations)
460
+ assert memory_growth_percent < 20, f"Memory grew {memory_growth_percent:.1f}% (>20% limit)"
461
+ assert final_memory < 600, f"Final memory {final_memory:.1f}MB (>600MB limit)"
462
+
463
+ def test_garbage_collection_efficiency(self, performance_monitor):
464
+ """Test garbage collection efficiency during operations."""
465
+ gc.collect() # Initial cleanup
466
+ initial_objects = len(gc.get_objects())
467
+
468
+ performance_monitor.start_monitoring()
469
+
470
+ # Run workflow
471
+ results = run_complete_finops_analysis()
472
+
473
+ # Force garbage collection
474
+ gc.collect()
475
+ final_objects = len(gc.get_objects())
476
+
477
+ metrics = performance_monitor.stop_monitoring()
478
+
479
+ # Validate workflow completed
480
+ assert results["workflow_status"] == "completed"
481
+
482
+ # Validate object growth is reasonable
483
+ object_growth = final_objects - initial_objects
484
+ object_growth_percent = (object_growth / initial_objects) * 100
485
+
486
+ # Object growth should be minimal after GC
487
+ assert object_growth_percent < 10, f"Object count grew {object_growth_percent:.1f}% (>10% limit)"
488
+ assert metrics["peak_memory_mb"] < 400, f"Peak memory {metrics['peak_memory_mb']:.1f}MB (>400MB limit)"
489
+
490
+
491
+ if __name__ == "__main__":
492
+ """
493
+ Run the performance benchmark test suite.
494
+
495
+ Usage:
496
+ python test_performance_benchmarks.py
497
+ pytest test_performance_benchmarks.py -v -s
498
+ pytest test_performance_benchmarks.py::TestWorkflowPerformanceBenchmarks::test_complete_workflow_performance_target -v
499
+ """
500
+ pytest.main([__file__, "-v", "-s", "--tb=short"])