runbooks 0.7.6__py3-none-any.whl → 0.7.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (111) hide show
  1. runbooks/__init__.py +1 -1
  2. runbooks/base.py +5 -1
  3. runbooks/cfat/__init__.py +8 -4
  4. runbooks/cfat/assessment/collectors.py +171 -14
  5. runbooks/cfat/assessment/compliance.py +871 -0
  6. runbooks/cfat/assessment/runner.py +122 -11
  7. runbooks/cfat/models.py +6 -2
  8. runbooks/common/logger.py +14 -0
  9. runbooks/common/rich_utils.py +451 -0
  10. runbooks/enterprise/__init__.py +68 -0
  11. runbooks/enterprise/error_handling.py +411 -0
  12. runbooks/enterprise/logging.py +439 -0
  13. runbooks/enterprise/multi_tenant.py +583 -0
  14. runbooks/finops/README.md +468 -241
  15. runbooks/finops/__init__.py +39 -3
  16. runbooks/finops/cli.py +83 -18
  17. runbooks/finops/cross_validation.py +375 -0
  18. runbooks/finops/dashboard_runner.py +812 -164
  19. runbooks/finops/enhanced_dashboard_runner.py +525 -0
  20. runbooks/finops/finops_dashboard.py +1892 -0
  21. runbooks/finops/helpers.py +485 -51
  22. runbooks/finops/optimizer.py +823 -0
  23. runbooks/finops/tests/__init__.py +19 -0
  24. runbooks/finops/tests/results_test_finops_dashboard.xml +1 -0
  25. runbooks/finops/tests/run_comprehensive_tests.py +421 -0
  26. runbooks/finops/tests/run_tests.py +305 -0
  27. runbooks/finops/tests/test_finops_dashboard.py +705 -0
  28. runbooks/finops/tests/test_integration.py +477 -0
  29. runbooks/finops/tests/test_performance.py +380 -0
  30. runbooks/finops/tests/test_performance_benchmarks.py +500 -0
  31. runbooks/finops/tests/test_reference_images_validation.py +867 -0
  32. runbooks/finops/tests/test_single_account_features.py +715 -0
  33. runbooks/finops/tests/validate_test_suite.py +220 -0
  34. runbooks/finops/types.py +1 -1
  35. runbooks/hitl/enhanced_workflow_engine.py +725 -0
  36. runbooks/inventory/artifacts/scale-optimize-status.txt +12 -0
  37. runbooks/inventory/collectors/aws_comprehensive.py +442 -0
  38. runbooks/inventory/collectors/enterprise_scale.py +281 -0
  39. runbooks/inventory/core/collector.py +172 -13
  40. runbooks/inventory/discovery.md +1 -1
  41. runbooks/inventory/list_ec2_instances.py +18 -20
  42. runbooks/inventory/list_ssm_parameters.py +31 -3
  43. runbooks/inventory/organizations_discovery.py +1269 -0
  44. runbooks/inventory/rich_inventory_display.py +393 -0
  45. runbooks/inventory/run_on_multi_accounts.py +35 -19
  46. runbooks/inventory/runbooks.security.report_generator.log +0 -0
  47. runbooks/inventory/runbooks.security.run_script.log +0 -0
  48. runbooks/inventory/vpc_flow_analyzer.py +1030 -0
  49. runbooks/main.py +2215 -119
  50. runbooks/metrics/dora_metrics_engine.py +599 -0
  51. runbooks/operate/__init__.py +2 -2
  52. runbooks/operate/base.py +122 -10
  53. runbooks/operate/deployment_framework.py +1032 -0
  54. runbooks/operate/deployment_validator.py +853 -0
  55. runbooks/operate/dynamodb_operations.py +10 -6
  56. runbooks/operate/ec2_operations.py +319 -11
  57. runbooks/operate/executive_dashboard.py +779 -0
  58. runbooks/operate/mcp_integration.py +750 -0
  59. runbooks/operate/nat_gateway_operations.py +1120 -0
  60. runbooks/operate/networking_cost_heatmap.py +685 -0
  61. runbooks/operate/privatelink_operations.py +940 -0
  62. runbooks/operate/s3_operations.py +10 -6
  63. runbooks/operate/vpc_endpoints.py +644 -0
  64. runbooks/operate/vpc_operations.py +1038 -0
  65. runbooks/remediation/__init__.py +2 -2
  66. runbooks/remediation/acm_remediation.py +1 -1
  67. runbooks/remediation/base.py +1 -1
  68. runbooks/remediation/cloudtrail_remediation.py +1 -1
  69. runbooks/remediation/cognito_remediation.py +1 -1
  70. runbooks/remediation/dynamodb_remediation.py +1 -1
  71. runbooks/remediation/ec2_remediation.py +1 -1
  72. runbooks/remediation/ec2_unattached_ebs_volumes.py +1 -1
  73. runbooks/remediation/kms_enable_key_rotation.py +1 -1
  74. runbooks/remediation/kms_remediation.py +1 -1
  75. runbooks/remediation/lambda_remediation.py +1 -1
  76. runbooks/remediation/multi_account.py +1 -1
  77. runbooks/remediation/rds_remediation.py +1 -1
  78. runbooks/remediation/s3_block_public_access.py +1 -1
  79. runbooks/remediation/s3_enable_access_logging.py +1 -1
  80. runbooks/remediation/s3_encryption.py +1 -1
  81. runbooks/remediation/s3_remediation.py +1 -1
  82. runbooks/remediation/vpc_remediation.py +475 -0
  83. runbooks/security/__init__.py +3 -1
  84. runbooks/security/compliance_automation.py +632 -0
  85. runbooks/security/report_generator.py +10 -0
  86. runbooks/security/run_script.py +31 -5
  87. runbooks/security/security_baseline_tester.py +169 -30
  88. runbooks/security/security_export.py +477 -0
  89. runbooks/validation/__init__.py +10 -0
  90. runbooks/validation/benchmark.py +484 -0
  91. runbooks/validation/cli.py +356 -0
  92. runbooks/validation/mcp_validator.py +768 -0
  93. runbooks/vpc/__init__.py +38 -0
  94. runbooks/vpc/config.py +212 -0
  95. runbooks/vpc/cost_engine.py +347 -0
  96. runbooks/vpc/heatmap_engine.py +605 -0
  97. runbooks/vpc/manager_interface.py +634 -0
  98. runbooks/vpc/networking_wrapper.py +1260 -0
  99. runbooks/vpc/rich_formatters.py +679 -0
  100. runbooks/vpc/tests/__init__.py +5 -0
  101. runbooks/vpc/tests/conftest.py +356 -0
  102. runbooks/vpc/tests/test_cli_integration.py +530 -0
  103. runbooks/vpc/tests/test_config.py +458 -0
  104. runbooks/vpc/tests/test_cost_engine.py +479 -0
  105. runbooks/vpc/tests/test_networking_wrapper.py +512 -0
  106. {runbooks-0.7.6.dist-info → runbooks-0.7.9.dist-info}/METADATA +40 -12
  107. {runbooks-0.7.6.dist-info → runbooks-0.7.9.dist-info}/RECORD +111 -50
  108. {runbooks-0.7.6.dist-info → runbooks-0.7.9.dist-info}/WHEEL +0 -0
  109. {runbooks-0.7.6.dist-info → runbooks-0.7.9.dist-info}/entry_points.txt +0 -0
  110. {runbooks-0.7.6.dist-info → runbooks-0.7.9.dist-info}/licenses/LICENSE +0 -0
  111. {runbooks-0.7.6.dist-info → runbooks-0.7.9.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,380 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Performance Tests for FinOps Dashboard Enterprise Components.
4
+
5
+ This module provides performance testing to ensure the FinOps dashboard
6
+ can handle enterprise-scale workloads efficiently.
7
+
8
+ Test Coverage:
9
+ - Large account dataset processing (100+ accounts)
10
+ - High resource count analysis (10,000+ resources)
11
+ - Memory usage optimization
12
+ - Response time benchmarking
13
+ - Concurrent analysis capabilities
14
+
15
+ Author: CloudOps Runbooks Team
16
+ Version: 0.7.8
17
+ """
18
+
19
+ import threading
20
+ import time
21
+ from concurrent.futures import ThreadPoolExecutor, as_completed
22
+ from unittest.mock import patch
23
+
24
+ import psutil
25
+ import pytest
26
+
27
+ # Import the components we're testing
28
+ from runbooks.finops.finops_dashboard import (
29
+ EnterpriseResourceAuditor,
30
+ FinOpsConfig,
31
+ MultiAccountCostTrendAnalyzer,
32
+ ResourceUtilizationHeatmapAnalyzer,
33
+ run_complete_finops_analysis,
34
+ )
35
+
36
+
37
+ class TestPerformanceBenchmarks:
38
+ """Performance benchmarks for FinOps dashboard components."""
39
+
40
+ def test_cost_analysis_response_time(self):
41
+ """Test cost analysis response time with various account counts."""
42
+ config = FinOpsConfig()
43
+ analyzer = MultiAccountCostTrendAnalyzer(config)
44
+
45
+ # Test with different account counts
46
+ test_cases = [5, 10, 25, 50, 85] # Various account counts
47
+ response_times = []
48
+
49
+ for account_count in test_cases:
50
+ with patch("runbooks.finops.finops_dashboard.random.randint") as mock_randint:
51
+ mock_randint.return_value = account_count
52
+
53
+ start_time = time.perf_counter()
54
+ results = analyzer.analyze_cost_trends()
55
+ end_time = time.perf_counter()
56
+
57
+ response_time = end_time - start_time
58
+ response_times.append((account_count, response_time))
59
+
60
+ # Verify results are valid
61
+ assert results["status"] == "completed"
62
+ assert results["cost_trends"]["total_accounts"] == account_count
63
+
64
+ # Performance assertions
65
+ assert response_time < 5.0, f"Analysis took {response_time:.2f}s for {account_count} accounts"
66
+
67
+ # Verify performance scaling is reasonable
68
+ print("\nCost Analysis Response Times:")
69
+ for account_count, response_time in response_times:
70
+ print(f" {account_count} accounts: {response_time:.3f}s")
71
+
72
+ # Response time should scale reasonably with account count
73
+ small_time = response_times[0][1] # 5 accounts
74
+ large_time = response_times[-1][1] # 85 accounts
75
+
76
+ # Large dataset shouldn't be more than 10x slower
77
+ assert large_time < small_time * 10
78
+
79
+ def test_memory_usage_optimization(self):
80
+ """Test memory usage with large datasets."""
81
+ config = FinOpsConfig()
82
+
83
+ # Get initial memory usage
84
+ process = psutil.Process()
85
+ initial_memory = process.memory_info().rss
86
+
87
+ # Run analysis with maximum account count
88
+ with patch("runbooks.finops.finops_dashboard.random.randint") as mock_randint:
89
+ mock_randint.return_value = 85 # Maximum accounts
90
+
91
+ analyzer = MultiAccountCostTrendAnalyzer(config)
92
+ results = analyzer.analyze_cost_trends()
93
+
94
+ # Check memory usage after analysis
95
+ peak_memory = process.memory_info().rss
96
+ memory_increase = peak_memory - initial_memory
97
+
98
+ # Memory increase should be reasonable (less than 100MB)
99
+ assert memory_increase < 100 * 1024 * 1024, f"Memory increased by {memory_increase / 1024 / 1024:.2f}MB"
100
+
101
+ # Verify results are valid
102
+ assert results["status"] == "completed"
103
+ assert len(results["cost_trends"]["account_data"]) == 85
104
+
105
+ print(f"\nMemory Usage:")
106
+ print(f" Initial: {initial_memory / 1024 / 1024:.2f}MB")
107
+ print(f" Peak: {peak_memory / 1024 / 1024:.2f}MB")
108
+ print(f" Increase: {memory_increase / 1024 / 1024:.2f}MB")
109
+
110
+ def test_resource_heatmap_performance(self):
111
+ """Test resource heatmap performance with large resource counts."""
112
+ config = FinOpsConfig()
113
+
114
+ # Create large dataset with high-spend accounts (generates more resources)
115
+ large_account_data = []
116
+ for i in range(30): # 30 high-spend accounts
117
+ large_account_data.append(
118
+ {
119
+ "account_id": f"perf-test-{i:03d}",
120
+ "account_type": "production",
121
+ "monthly_spend": 75000.0, # High spend = more resources
122
+ }
123
+ )
124
+
125
+ trend_data = {"cost_trends": {"account_data": large_account_data}}
126
+
127
+ analyzer = ResourceUtilizationHeatmapAnalyzer(config, trend_data)
128
+
129
+ # Measure performance
130
+ start_time = time.perf_counter()
131
+ results = analyzer.analyze_resource_utilization()
132
+ end_time = time.perf_counter()
133
+
134
+ response_time = end_time - start_time
135
+
136
+ # Performance assertions
137
+ assert response_time < 10.0, f"Heatmap analysis took {response_time:.2f}s"
138
+
139
+ # Verify results
140
+ assert results["status"] == "completed"
141
+ heatmap_data = results["heatmap_data"]
142
+ assert heatmap_data["total_accounts"] == 30
143
+ assert heatmap_data["total_resources"] > 1000 # Should generate many resources
144
+
145
+ print(f"\nResource Heatmap Performance:")
146
+ print(f" Response time: {response_time:.3f}s")
147
+ print(f" Total resources: {heatmap_data['total_resources']:,}")
148
+ print(f" Resources per second: {heatmap_data['total_resources'] / response_time:,.0f}")
149
+
150
+ def test_complete_workflow_performance(self):
151
+ """Test complete workflow performance under realistic load."""
152
+ start_time = time.perf_counter()
153
+
154
+ # Get initial memory
155
+ process = psutil.Process()
156
+ initial_memory = process.memory_info().rss
157
+
158
+ # Run complete analysis
159
+ results = run_complete_finops_analysis()
160
+
161
+ end_time = time.perf_counter()
162
+ peak_memory = process.memory_info().rss
163
+
164
+ total_time = end_time - start_time
165
+ memory_increase = peak_memory - initial_memory
166
+
167
+ # Performance assertions
168
+ assert total_time < 30.0, f"Complete workflow took {total_time:.2f}s"
169
+ assert memory_increase < 200 * 1024 * 1024, f"Memory increased by {memory_increase / 1024 / 1024:.2f}MB"
170
+
171
+ # Verify workflow completed successfully
172
+ assert results["workflow_status"] == "completed"
173
+
174
+ print(f"\nComplete Workflow Performance:")
175
+ print(f" Total time: {total_time:.3f}s")
176
+ print(f" Memory increase: {memory_increase / 1024 / 1024:.2f}MB")
177
+
178
+ # Verify all components completed
179
+ assert results["cost_analysis"]["status"] == "completed"
180
+ assert results["audit_results"]["status"] == "completed"
181
+ assert "executive_summary" in results
182
+ assert "export_status" in results
183
+
184
+
185
+ class TestConcurrencyAndThreadSafety:
186
+ """Test concurrent execution and thread safety."""
187
+
188
+ def test_concurrent_cost_analysis(self):
189
+ """Test concurrent cost analysis execution."""
190
+ config = FinOpsConfig()
191
+
192
+ def run_analysis():
193
+ """Run a single cost analysis."""
194
+ analyzer = MultiAccountCostTrendAnalyzer(config)
195
+ results = analyzer.analyze_cost_trends()
196
+ assert results["status"] == "completed"
197
+ return results
198
+
199
+ # Run multiple analyses concurrently
200
+ start_time = time.perf_counter()
201
+
202
+ with ThreadPoolExecutor(max_workers=4) as executor:
203
+ futures = [executor.submit(run_analysis) for _ in range(4)]
204
+ results = [future.result() for future in as_completed(futures)]
205
+
206
+ end_time = time.perf_counter()
207
+
208
+ # Verify all analyses completed successfully
209
+ assert len(results) == 4
210
+ for result in results:
211
+ assert result["status"] == "completed"
212
+ assert "cost_trends" in result
213
+ assert "optimization_opportunities" in result
214
+
215
+ concurrent_time = end_time - start_time
216
+
217
+ # Concurrent execution should be faster than sequential
218
+ print(f"\nConcurrent Analysis Performance:")
219
+ print(f" 4 concurrent analyses: {concurrent_time:.3f}s")
220
+
221
+ # Should complete within reasonable time
222
+ assert concurrent_time < 15.0, f"Concurrent analyses took {concurrent_time:.2f}s"
223
+
224
+ def test_thread_safety_data_integrity(self):
225
+ """Test data integrity under concurrent access."""
226
+ config = FinOpsConfig()
227
+ results_list = []
228
+
229
+ def analyze_and_store():
230
+ """Run analysis and store results."""
231
+ analyzer = MultiAccountCostTrendAnalyzer(config)
232
+ result = analyzer.analyze_cost_trends()
233
+ results_list.append(result)
234
+
235
+ # Run multiple threads
236
+ threads = []
237
+ for _ in range(3):
238
+ thread = threading.Thread(target=analyze_and_store)
239
+ threads.append(thread)
240
+ thread.start()
241
+
242
+ # Wait for all threads to complete
243
+ for thread in threads:
244
+ thread.join()
245
+
246
+ # Verify data integrity
247
+ assert len(results_list) == 3
248
+ for result in results_list:
249
+ assert result["status"] == "completed"
250
+ assert isinstance(result["cost_trends"]["total_monthly_spend"], (int, float))
251
+ assert result["cost_trends"]["total_monthly_spend"] > 0
252
+
253
+
254
+ class TestScalabilityLimits:
255
+ """Test system behavior at scalability limits."""
256
+
257
+ def test_maximum_account_processing(self):
258
+ """Test processing with maximum supported account count."""
259
+ config = FinOpsConfig()
260
+
261
+ # Test with maximum account count
262
+ with patch("runbooks.finops.finops_dashboard.random.randint") as mock_randint:
263
+ mock_randint.return_value = 100 # Beyond normal maximum
264
+
265
+ analyzer = MultiAccountCostTrendAnalyzer(config)
266
+
267
+ start_time = time.perf_counter()
268
+ results = analyzer.analyze_cost_trends()
269
+ end_time = time.perf_counter()
270
+
271
+ response_time = end_time - start_time
272
+
273
+ # Should handle large account counts gracefully
274
+ assert results["status"] == "completed"
275
+ assert results["cost_trends"]["total_accounts"] == 100
276
+ assert response_time < 10.0, f"Max account processing took {response_time:.2f}s"
277
+
278
+ # Verify data quality isn't compromised
279
+ cost_trends = results["cost_trends"]
280
+ assert len(cost_trends["account_data"]) == 100
281
+ assert cost_trends["total_monthly_spend"] > 0
282
+ assert cost_trends["cost_trend_summary"]["average_account_spend"] > 0
283
+
284
+ def test_memory_limits_with_huge_datasets(self):
285
+ """Test memory usage with very large datasets."""
286
+ config = FinOpsConfig()
287
+
288
+ # Create dataset with many high-resource accounts
289
+ huge_account_data = []
290
+ for i in range(50): # 50 very high-spend accounts
291
+ huge_account_data.append(
292
+ {
293
+ "account_id": f"huge-account-{i:03d}",
294
+ "account_type": "production",
295
+ "monthly_spend": 100000.0, # Very high spend
296
+ }
297
+ )
298
+
299
+ trend_data = {"cost_trends": {"account_data": huge_account_data}}
300
+
301
+ analyzer = ResourceUtilizationHeatmapAnalyzer(config, trend_data)
302
+
303
+ # Monitor memory usage during analysis
304
+ process = psutil.Process()
305
+ initial_memory = process.memory_info().rss
306
+
307
+ results = analyzer.analyze_resource_utilization()
308
+
309
+ peak_memory = process.memory_info().rss
310
+ memory_increase = peak_memory - initial_memory
311
+
312
+ # Verify results
313
+ assert results["status"] == "completed"
314
+
315
+ # Memory usage should remain reasonable even with huge datasets
316
+ assert memory_increase < 500 * 1024 * 1024, f"Memory increased by {memory_increase / 1024 / 1024:.2f}MB"
317
+
318
+ heatmap_data = results["heatmap_data"]
319
+ print(f"\nHuge Dataset Processing:")
320
+ print(f" Total resources: {heatmap_data['total_resources']:,}")
321
+ print(f" Memory increase: {memory_increase / 1024 / 1024:.2f}MB")
322
+ print(f" Memory per resource: {memory_increase / heatmap_data['total_resources']:.0f} bytes")
323
+
324
+
325
+ class TestResponseTimeConsistency:
326
+ """Test response time consistency and variance."""
327
+
328
+ def test_response_time_consistency(self):
329
+ """Test that response times are consistent across multiple runs."""
330
+ config = FinOpsConfig()
331
+
332
+ # Run same analysis multiple times
333
+ response_times = []
334
+ account_count = 25 # Fixed account count for consistency
335
+
336
+ for _ in range(10): # 10 runs
337
+ with patch("runbooks.finops.finops_dashboard.random.randint") as mock_randint:
338
+ mock_randint.return_value = account_count
339
+
340
+ analyzer = MultiAccountCostTrendAnalyzer(config)
341
+
342
+ start_time = time.perf_counter()
343
+ results = analyzer.analyze_cost_trends()
344
+ end_time = time.perf_counter()
345
+
346
+ response_time = end_time - start_time
347
+ response_times.append(response_time)
348
+
349
+ assert results["status"] == "completed"
350
+
351
+ # Calculate statistics
352
+ avg_time = sum(response_times) / len(response_times)
353
+ min_time = min(response_times)
354
+ max_time = max(response_times)
355
+ variance = sum((t - avg_time) ** 2 for t in response_times) / len(response_times)
356
+ std_dev = variance**0.5
357
+
358
+ print(f"\nResponse Time Consistency (10 runs):")
359
+ print(f" Average: {avg_time:.3f}s")
360
+ print(f" Min: {min_time:.3f}s")
361
+ print(f" Max: {max_time:.3f}s")
362
+ print(f" Std Dev: {std_dev:.3f}s")
363
+
364
+ # Response times should be consistent (low variance)
365
+ assert std_dev < avg_time * 0.3, f"High variance in response times: {std_dev:.3f}s"
366
+
367
+ # Maximum time shouldn't be more than 2x average
368
+ assert max_time < avg_time * 2, f"Maximum time {max_time:.3f}s too high vs average {avg_time:.3f}s"
369
+
370
+
371
+ if __name__ == "__main__":
372
+ """
373
+ Run the performance test suite directly.
374
+
375
+ Usage:
376
+ python test_performance.py
377
+ pytest test_performance.py -v -s # -s to see print output
378
+ pytest test_performance.py::TestPerformanceBenchmarks::test_memory_usage_optimization -v -s
379
+ """
380
+ pytest.main([__file__, "-v", "-s"])