runbooks 0.7.6__py3-none-any.whl โ†’ 0.7.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (111) hide show
  1. runbooks/__init__.py +1 -1
  2. runbooks/base.py +5 -1
  3. runbooks/cfat/__init__.py +8 -4
  4. runbooks/cfat/assessment/collectors.py +171 -14
  5. runbooks/cfat/assessment/compliance.py +871 -0
  6. runbooks/cfat/assessment/runner.py +122 -11
  7. runbooks/cfat/models.py +6 -2
  8. runbooks/common/logger.py +14 -0
  9. runbooks/common/rich_utils.py +451 -0
  10. runbooks/enterprise/__init__.py +68 -0
  11. runbooks/enterprise/error_handling.py +411 -0
  12. runbooks/enterprise/logging.py +439 -0
  13. runbooks/enterprise/multi_tenant.py +583 -0
  14. runbooks/finops/README.md +468 -241
  15. runbooks/finops/__init__.py +39 -3
  16. runbooks/finops/cli.py +83 -18
  17. runbooks/finops/cross_validation.py +375 -0
  18. runbooks/finops/dashboard_runner.py +812 -164
  19. runbooks/finops/enhanced_dashboard_runner.py +525 -0
  20. runbooks/finops/finops_dashboard.py +1892 -0
  21. runbooks/finops/helpers.py +485 -51
  22. runbooks/finops/optimizer.py +823 -0
  23. runbooks/finops/tests/__init__.py +19 -0
  24. runbooks/finops/tests/results_test_finops_dashboard.xml +1 -0
  25. runbooks/finops/tests/run_comprehensive_tests.py +421 -0
  26. runbooks/finops/tests/run_tests.py +305 -0
  27. runbooks/finops/tests/test_finops_dashboard.py +705 -0
  28. runbooks/finops/tests/test_integration.py +477 -0
  29. runbooks/finops/tests/test_performance.py +380 -0
  30. runbooks/finops/tests/test_performance_benchmarks.py +500 -0
  31. runbooks/finops/tests/test_reference_images_validation.py +867 -0
  32. runbooks/finops/tests/test_single_account_features.py +715 -0
  33. runbooks/finops/tests/validate_test_suite.py +220 -0
  34. runbooks/finops/types.py +1 -1
  35. runbooks/hitl/enhanced_workflow_engine.py +725 -0
  36. runbooks/inventory/artifacts/scale-optimize-status.txt +12 -0
  37. runbooks/inventory/collectors/aws_comprehensive.py +442 -0
  38. runbooks/inventory/collectors/enterprise_scale.py +281 -0
  39. runbooks/inventory/core/collector.py +172 -13
  40. runbooks/inventory/discovery.md +1 -1
  41. runbooks/inventory/list_ec2_instances.py +18 -20
  42. runbooks/inventory/list_ssm_parameters.py +31 -3
  43. runbooks/inventory/organizations_discovery.py +1269 -0
  44. runbooks/inventory/rich_inventory_display.py +393 -0
  45. runbooks/inventory/run_on_multi_accounts.py +35 -19
  46. runbooks/inventory/runbooks.security.report_generator.log +0 -0
  47. runbooks/inventory/runbooks.security.run_script.log +0 -0
  48. runbooks/inventory/vpc_flow_analyzer.py +1030 -0
  49. runbooks/main.py +2215 -119
  50. runbooks/metrics/dora_metrics_engine.py +599 -0
  51. runbooks/operate/__init__.py +2 -2
  52. runbooks/operate/base.py +122 -10
  53. runbooks/operate/deployment_framework.py +1032 -0
  54. runbooks/operate/deployment_validator.py +853 -0
  55. runbooks/operate/dynamodb_operations.py +10 -6
  56. runbooks/operate/ec2_operations.py +319 -11
  57. runbooks/operate/executive_dashboard.py +779 -0
  58. runbooks/operate/mcp_integration.py +750 -0
  59. runbooks/operate/nat_gateway_operations.py +1120 -0
  60. runbooks/operate/networking_cost_heatmap.py +685 -0
  61. runbooks/operate/privatelink_operations.py +940 -0
  62. runbooks/operate/s3_operations.py +10 -6
  63. runbooks/operate/vpc_endpoints.py +644 -0
  64. runbooks/operate/vpc_operations.py +1038 -0
  65. runbooks/remediation/__init__.py +2 -2
  66. runbooks/remediation/acm_remediation.py +1 -1
  67. runbooks/remediation/base.py +1 -1
  68. runbooks/remediation/cloudtrail_remediation.py +1 -1
  69. runbooks/remediation/cognito_remediation.py +1 -1
  70. runbooks/remediation/dynamodb_remediation.py +1 -1
  71. runbooks/remediation/ec2_remediation.py +1 -1
  72. runbooks/remediation/ec2_unattached_ebs_volumes.py +1 -1
  73. runbooks/remediation/kms_enable_key_rotation.py +1 -1
  74. runbooks/remediation/kms_remediation.py +1 -1
  75. runbooks/remediation/lambda_remediation.py +1 -1
  76. runbooks/remediation/multi_account.py +1 -1
  77. runbooks/remediation/rds_remediation.py +1 -1
  78. runbooks/remediation/s3_block_public_access.py +1 -1
  79. runbooks/remediation/s3_enable_access_logging.py +1 -1
  80. runbooks/remediation/s3_encryption.py +1 -1
  81. runbooks/remediation/s3_remediation.py +1 -1
  82. runbooks/remediation/vpc_remediation.py +475 -0
  83. runbooks/security/__init__.py +3 -1
  84. runbooks/security/compliance_automation.py +632 -0
  85. runbooks/security/report_generator.py +10 -0
  86. runbooks/security/run_script.py +31 -5
  87. runbooks/security/security_baseline_tester.py +169 -30
  88. runbooks/security/security_export.py +477 -0
  89. runbooks/validation/__init__.py +10 -0
  90. runbooks/validation/benchmark.py +484 -0
  91. runbooks/validation/cli.py +356 -0
  92. runbooks/validation/mcp_validator.py +768 -0
  93. runbooks/vpc/__init__.py +38 -0
  94. runbooks/vpc/config.py +212 -0
  95. runbooks/vpc/cost_engine.py +347 -0
  96. runbooks/vpc/heatmap_engine.py +605 -0
  97. runbooks/vpc/manager_interface.py +634 -0
  98. runbooks/vpc/networking_wrapper.py +1260 -0
  99. runbooks/vpc/rich_formatters.py +679 -0
  100. runbooks/vpc/tests/__init__.py +5 -0
  101. runbooks/vpc/tests/conftest.py +356 -0
  102. runbooks/vpc/tests/test_cli_integration.py +530 -0
  103. runbooks/vpc/tests/test_config.py +458 -0
  104. runbooks/vpc/tests/test_cost_engine.py +479 -0
  105. runbooks/vpc/tests/test_networking_wrapper.py +512 -0
  106. {runbooks-0.7.6.dist-info โ†’ runbooks-0.7.9.dist-info}/METADATA +40 -12
  107. {runbooks-0.7.6.dist-info โ†’ runbooks-0.7.9.dist-info}/RECORD +111 -50
  108. {runbooks-0.7.6.dist-info โ†’ runbooks-0.7.9.dist-info}/WHEEL +0 -0
  109. {runbooks-0.7.6.dist-info โ†’ runbooks-0.7.9.dist-info}/entry_points.txt +0 -0
  110. {runbooks-0.7.6.dist-info โ†’ runbooks-0.7.9.dist-info}/licenses/LICENSE +0 -0
  111. {runbooks-0.7.6.dist-info โ†’ runbooks-0.7.9.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,19 @@
1
+ """
2
+ Test suite for FinOps Dashboard Enterprise Components.
3
+
4
+ This package contains comprehensive tests for:
5
+ - Multi-account cost trend analysis
6
+ - Resource utilization heatmap generation
7
+ - Enterprise discovery and auditing
8
+ - Executive dashboard reporting
9
+ - Multi-format export functionality
10
+
11
+ Test Categories:
12
+ - Unit tests: Individual class and function testing
13
+ - Integration tests: AWS service integration with moto mocking
14
+ - Performance tests: Load testing for large account datasets
15
+ - End-to-end tests: Complete workflow validation
16
+
17
+ Author: CloudOps Runbooks Team
18
+ Version: 0.7.8
19
+ """
@@ -0,0 +1 @@
1
+ <?xml version="1.0" encoding="utf-8"?><testsuites name="pytest tests"><testsuite name="pytest" errors="0" failures="0" skipped="0" tests="28" time="12.113" timestamp="2025-08-27T19:48:38.255901+12:00" hostname="OS.local"><testcase classname="src.runbooks.finops.tests.test_finops_dashboard.TestFinOpsConfig" name="test_default_configuration" time="0.005" /><testcase classname="src.runbooks.finops.tests.test_finops_dashboard.TestFinOpsConfig" name="test_environment_variable_override" time="0.004" /><testcase classname="src.runbooks.finops.tests.test_finops_dashboard.TestFinOpsConfig" name="test_report_timestamp_format" time="0.004" /><testcase classname="src.runbooks.finops.tests.test_finops_dashboard.TestEnterpriseDiscovery" name="test_discovery_initialization" time="0.004" /><testcase classname="src.runbooks.finops.tests.test_finops_dashboard.TestEnterpriseDiscovery" name="test_successful_account_discovery" time="0.024" /><testcase classname="src.runbooks.finops.tests.test_finops_dashboard.TestEnterpriseDiscovery" name="test_discovery_error_handling" time="0.004" /><testcase classname="src.runbooks.finops.tests.test_finops_dashboard.TestMultiAccountCostTrendAnalyzer" name="test_analyzer_initialization" time="0.004" /><testcase classname="src.runbooks.finops.tests.test_finops_dashboard.TestMultiAccountCostTrendAnalyzer" name="test_cost_trend_analysis_success" time="0.248" /><testcase classname="src.runbooks.finops.tests.test_finops_dashboard.TestMultiAccountCostTrendAnalyzer" name="test_dynamic_account_discovery" time="1.135" /><testcase classname="src.runbooks.finops.tests.test_finops_dashboard.TestMultiAccountCostTrendAnalyzer" name="test_optimization_calculation_logic" time="0.005" /><testcase classname="src.runbooks.finops.tests.test_finops_dashboard.TestResourceUtilizationHeatmapAnalyzer" name="test_heatmap_analyzer_initialization" time="0.005" /><testcase classname="src.runbooks.finops.tests.test_finops_dashboard.TestResourceUtilizationHeatmapAnalyzer" name="test_resource_heatmap_analysis" time="0.005" /><testcase classname="src.runbooks.finops.tests.test_finops_dashboard.TestResourceUtilizationHeatmapAnalyzer" name="test_efficiency_scoring_calculation" time="0.005" /><testcase classname="src.runbooks.finops.tests.test_finops_dashboard.TestEnterpriseResourceAuditor" name="test_auditor_initialization" time="0.004" /><testcase classname="src.runbooks.finops.tests.test_finops_dashboard.TestEnterpriseResourceAuditor" name="test_compliance_audit_execution" time="2.437" /><testcase classname="src.runbooks.finops.tests.test_finops_dashboard.TestEnterpriseExecutiveDashboard" name="test_executive_dashboard_initialization" time="0.004" /><testcase classname="src.runbooks.finops.tests.test_finops_dashboard.TestEnterpriseExecutiveDashboard" name="test_executive_summary_generation" time="0.004" /><testcase classname="src.runbooks.finops.tests.test_finops_dashboard.TestEnterpriseExportEngine" name="test_export_engine_initialization" time="0.004" /><testcase classname="src.runbooks.finops.tests.test_finops_dashboard.TestEnterpriseExportEngine" name="test_export_all_results" time="0.004" /><testcase classname="src.runbooks.finops.tests.test_finops_dashboard.TestEnterpriseExportEngine" name="test_json_export" time="0.004" /><testcase classname="src.runbooks.finops.tests.test_finops_dashboard.TestEnterpriseExportEngine" name="test_csv_export" time="0.004" /><testcase classname="src.runbooks.finops.tests.test_finops_dashboard.TestEnterpriseExportEngine" name="test_html_export" time="0.004" /><testcase classname="src.runbooks.finops.tests.test_finops_dashboard.TestFactoryFunctions" name="test_create_finops_dashboard" time="0.004" /><testcase classname="src.runbooks.finops.tests.test_finops_dashboard.TestFactoryFunctions" name="test_create_finops_dashboard_with_custom_config" time="0.004" /><testcase classname="src.runbooks.finops.tests.test_finops_dashboard.TestFactoryFunctions" name="test_run_complete_finops_analysis" time="3.357" /><testcase classname="src.runbooks.finops.tests.test_finops_dashboard.TestErrorHandlingAndEdgeCases" name="test_cost_analyzer_with_invalid_trend_data" time="0.005" /><testcase classname="src.runbooks.finops.tests.test_finops_dashboard.TestErrorHandlingAndEdgeCases" name="test_heatmap_analyzer_with_empty_account_data" time="0.005" /><testcase classname="src.runbooks.finops.tests.test_finops_dashboard.TestErrorHandlingAndEdgeCases" name="test_export_with_invalid_format" time="0.004" /></testsuite></testsuites>
@@ -0,0 +1,421 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Comprehensive Test Runner for FinOps Dashboard Test Suite.
4
+
5
+ This script runs all test suites for the finops module including:
6
+ 1. Core unit tests (test_finops_dashboard.py)
7
+ 2. Reference images validation (test_reference_images_validation.py)
8
+ 3. Performance benchmarks (test_performance_benchmarks.py)
9
+ 4. Integration tests (test_integration.py)
10
+
11
+ Generates comprehensive test reports with coverage analysis and performance metrics.
12
+
13
+ Author: CloudOps Runbooks Team
14
+ Version: 0.7.8
15
+ """
16
+
17
+ import json
18
+ import os
19
+ import subprocess
20
+ import sys
21
+ import time
22
+ from datetime import datetime
23
+ from pathlib import Path
24
+
25
+
26
+ class ComprehensiveTestRunner:
27
+ """Comprehensive test runner for FinOps dashboard test suite."""
28
+
29
+ def __init__(self):
30
+ self.test_dir = Path(__file__).parent
31
+ self.project_root = self.test_dir.parent.parent.parent.parent
32
+ self.results = {
33
+ "test_execution": {
34
+ "timestamp": datetime.now().isoformat(),
35
+ "test_suites": {},
36
+ "summary": {},
37
+ "performance_metrics": {},
38
+ }
39
+ }
40
+
41
+ def run_test_suite(self, test_file: str, description: str) -> dict:
42
+ """Run individual test suite and collect results."""
43
+ print(f"\n๐Ÿงช Running {description}")
44
+ print("=" * 60)
45
+
46
+ test_path = self.test_dir / test_file
47
+ if not test_path.exists():
48
+ return {
49
+ "status": "skipped",
50
+ "reason": f"Test file not found: {test_file}",
51
+ "execution_time": 0,
52
+ "test_count": 0,
53
+ "passed": 0,
54
+ "failed": 0,
55
+ "errors": [],
56
+ }
57
+
58
+ # Run pytest with verbose output and timing
59
+ start_time = time.perf_counter()
60
+
61
+ try:
62
+ result = subprocess.run(
63
+ [
64
+ sys.executable,
65
+ "-m",
66
+ "pytest",
67
+ str(test_path),
68
+ "-v",
69
+ "--tb=short",
70
+ "--disable-warnings",
71
+ f"--junit-xml={self.test_dir}/results_{test_file.replace('.py', '')}.xml",
72
+ ],
73
+ capture_output=True,
74
+ text=True,
75
+ timeout=300, # 5 minute timeout per suite
76
+ )
77
+
78
+ execution_time = time.perf_counter() - start_time
79
+
80
+ # Parse pytest output for results
81
+ output_lines = result.stdout.split("\n")
82
+ test_results = self._parse_pytest_output(output_lines, result.returncode)
83
+ test_results["execution_time"] = execution_time
84
+ test_results["stdout"] = result.stdout
85
+ test_results["stderr"] = result.stderr
86
+
87
+ # Display results
88
+ self._display_suite_results(description, test_results)
89
+
90
+ return test_results
91
+
92
+ except subprocess.TimeoutExpired:
93
+ execution_time = time.perf_counter() - start_time
94
+ return {
95
+ "status": "timeout",
96
+ "execution_time": execution_time,
97
+ "test_count": 0,
98
+ "passed": 0,
99
+ "failed": 0,
100
+ "errors": ["Test suite exceeded 5 minute timeout"],
101
+ }
102
+ except Exception as e:
103
+ execution_time = time.perf_counter() - start_time
104
+ return {
105
+ "status": "error",
106
+ "execution_time": execution_time,
107
+ "test_count": 0,
108
+ "passed": 0,
109
+ "failed": 0,
110
+ "errors": [str(e)],
111
+ }
112
+
113
+ def _parse_pytest_output(self, output_lines: list, return_code: int) -> dict:
114
+ """Parse pytest output to extract test results."""
115
+ test_count = 0
116
+ passed = 0
117
+ failed = 0
118
+ errors = []
119
+
120
+ # Look for pytest result summary line
121
+ for line in output_lines:
122
+ if "passed" in line and ("failed" in line or "error" in line):
123
+ # Parse line like "2 failed, 8 passed in 1.23s"
124
+ parts = line.split()
125
+ for i, part in enumerate(parts):
126
+ if part == "passed":
127
+ if i > 0 and parts[i - 1].isdigit():
128
+ passed = int(parts[i - 1])
129
+ elif part == "failed":
130
+ if i > 0 and parts[i - 1].isdigit():
131
+ failed = int(parts[i - 1])
132
+ elif part == "error":
133
+ if i > 0 and parts[i - 1].isdigit():
134
+ errors.append(f"{parts[i - 1]} test errors")
135
+ break
136
+ elif "passed in" in line and "failed" not in line:
137
+ # Parse line like "10 passed in 1.23s"
138
+ parts = line.split()
139
+ for i, part in enumerate(parts):
140
+ if part == "passed":
141
+ if i > 0 and parts[i - 1].isdigit():
142
+ passed = int(parts[i - 1])
143
+ break
144
+
145
+ test_count = passed + failed
146
+
147
+ # Collect error details
148
+ in_error_section = False
149
+ current_error = []
150
+
151
+ for line in output_lines:
152
+ if line.startswith("FAILED ") or line.startswith("ERROR "):
153
+ in_error_section = True
154
+ current_error = [line]
155
+ elif in_error_section:
156
+ if line.startswith("=") or line.startswith("_"):
157
+ if current_error:
158
+ errors.append("\n".join(current_error))
159
+ current_error = []
160
+ in_error_section = False
161
+ else:
162
+ current_error.append(line)
163
+
164
+ # Add final error if exists
165
+ if current_error:
166
+ errors.append("\n".join(current_error))
167
+
168
+ status = "passed" if return_code == 0 else "failed"
169
+
170
+ return {"status": status, "test_count": test_count, "passed": passed, "failed": failed, "errors": errors}
171
+
172
+ def _display_suite_results(self, description: str, results: dict):
173
+ """Display test suite results."""
174
+ status_icon = "โœ…" if results["status"] == "passed" else "โŒ"
175
+
176
+ print(f"{status_icon} {description}")
177
+ print(f" Status: {results['status'].upper()}")
178
+ print(f" Tests: {results['test_count']} total")
179
+ print(f" Passed: {results['passed']}")
180
+ print(f" Failed: {results['failed']}")
181
+ print(f" Duration: {results['execution_time']:.2f}s")
182
+
183
+ if results["errors"]:
184
+ print(f" Errors: {len(results['errors'])}")
185
+ if len(results["errors"]) <= 3: # Show first 3 errors
186
+ for i, error in enumerate(results["errors"][:3], 1):
187
+ print(f" Error {i}: {error.split(chr(10))[0][:80]}...")
188
+
189
+ def run_all_test_suites(self):
190
+ """Run all test suites and generate comprehensive report."""
191
+ print("๐Ÿš€ CloudOps FinOps Dashboard - Comprehensive Test Suite")
192
+ print("=" * 60)
193
+ print(f"Test execution started: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
194
+
195
+ # Define test suites
196
+ test_suites = [
197
+ {"file": "test_finops_dashboard.py", "description": "Core Unit Tests", "category": "unit"},
198
+ {
199
+ "file": "test_reference_images_validation.py",
200
+ "description": "Reference Images Validation (5 Use Cases)",
201
+ "category": "validation",
202
+ },
203
+ {
204
+ "file": "test_performance_benchmarks.py",
205
+ "description": "Performance Benchmarks",
206
+ "category": "performance",
207
+ },
208
+ {"file": "test_integration.py", "description": "Integration Tests", "category": "integration"},
209
+ ]
210
+
211
+ # Run each test suite
212
+ overall_start_time = time.perf_counter()
213
+
214
+ for suite_info in test_suites:
215
+ suite_results = self.run_test_suite(suite_info["file"], suite_info["description"])
216
+ self.results["test_execution"]["test_suites"][suite_info["file"]] = {
217
+ "description": suite_info["description"],
218
+ "category": suite_info["category"],
219
+ "results": suite_results,
220
+ }
221
+
222
+ overall_execution_time = time.perf_counter() - overall_start_time
223
+
224
+ # Generate summary
225
+ self._generate_summary(overall_execution_time)
226
+
227
+ # Save results
228
+ self._save_results()
229
+
230
+ # Display final summary
231
+ self._display_final_summary()
232
+
233
+ return self.results
234
+
235
+ def _generate_summary(self, total_execution_time: float):
236
+ """Generate comprehensive summary of all test results."""
237
+ summary = {
238
+ "total_execution_time": total_execution_time,
239
+ "total_suites": len(self.results["test_execution"]["test_suites"]),
240
+ "suites_passed": 0,
241
+ "suites_failed": 0,
242
+ "suites_skipped": 0,
243
+ "total_tests": 0,
244
+ "total_passed": 0,
245
+ "total_failed": 0,
246
+ "total_errors": 0,
247
+ "success_rate": 0.0,
248
+ "performance_metrics": {"fastest_suite": None, "slowest_suite": None, "average_suite_time": 0.0},
249
+ }
250
+
251
+ suite_times = []
252
+
253
+ for suite_name, suite_data in self.results["test_execution"]["test_suites"].items():
254
+ results = suite_data["results"]
255
+
256
+ # Count suite statuses
257
+ if results["status"] == "passed":
258
+ summary["suites_passed"] += 1
259
+ elif results["status"] == "skipped":
260
+ summary["suites_skipped"] += 1
261
+ else:
262
+ summary["suites_failed"] += 1
263
+
264
+ # Accumulate test counts
265
+ summary["total_tests"] += results["test_count"]
266
+ summary["total_passed"] += results["passed"]
267
+ summary["total_failed"] += results["failed"]
268
+ summary["total_errors"] += len(results["errors"])
269
+
270
+ # Track execution times
271
+ exec_time = results["execution_time"]
272
+ suite_times.append((suite_name, exec_time))
273
+
274
+ # Calculate success rate
275
+ if summary["total_tests"] > 0:
276
+ summary["success_rate"] = (summary["total_passed"] / summary["total_tests"]) * 100
277
+
278
+ # Calculate performance metrics
279
+ if suite_times:
280
+ suite_times.sort(key=lambda x: x[1])
281
+ summary["performance_metrics"]["fastest_suite"] = {"name": suite_times[0][0], "time": suite_times[0][1]}
282
+ summary["performance_metrics"]["slowest_suite"] = {"name": suite_times[-1][0], "time": suite_times[-1][1]}
283
+ summary["performance_metrics"]["average_suite_time"] = sum(t[1] for t in suite_times) / len(suite_times)
284
+
285
+ self.results["test_execution"]["summary"] = summary
286
+
287
+ def _save_results(self):
288
+ """Save comprehensive test results to file."""
289
+ results_file = self.test_dir / f"comprehensive_test_results_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json"
290
+
291
+ with open(results_file, "w") as f:
292
+ json.dump(self.results, f, indent=2, default=str)
293
+
294
+ print(f"\n๐Ÿ“„ Detailed results saved: {results_file}")
295
+
296
+ def _display_final_summary(self):
297
+ """Display final comprehensive summary."""
298
+ summary = self.results["test_execution"]["summary"]
299
+
300
+ print("\n" + "=" * 60)
301
+ print("๐Ÿ“Š COMPREHENSIVE TEST SUMMARY")
302
+ print("=" * 60)
303
+
304
+ # Overall results
305
+ overall_status = "โœ… PASSED" if summary["suites_failed"] == 0 else "โŒ FAILED"
306
+ print(f"Overall Status: {overall_status}")
307
+ print(f"Total Execution Time: {summary['total_execution_time']:.2f}s")
308
+
309
+ print(f"\n๐Ÿ“‹ Test Suites:")
310
+ print(f" Total: {summary['total_suites']}")
311
+ print(f" Passed: {summary['suites_passed']} โœ…")
312
+ print(f" Failed: {summary['suites_failed']} โŒ")
313
+ print(f" Skipped: {summary['suites_skipped']} โญ๏ธ")
314
+
315
+ print(f"\n๐Ÿงช Individual Tests:")
316
+ print(f" Total: {summary['total_tests']}")
317
+ print(f" Passed: {summary['total_passed']} โœ…")
318
+ print(f" Failed: {summary['total_failed']} โŒ")
319
+ print(f" Success Rate: {summary['success_rate']:.1f}%")
320
+
321
+ # Performance metrics
322
+ perf = summary["performance_metrics"]
323
+ if perf["fastest_suite"]:
324
+ print(f"\nโšก Performance:")
325
+ print(f" Fastest Suite: {perf['fastest_suite']['name']} ({perf['fastest_suite']['time']:.2f}s)")
326
+ print(f" Slowest Suite: {perf['slowest_suite']['name']} ({perf['slowest_suite']['time']:.2f}s)")
327
+ print(f" Average Suite Time: {perf['average_suite_time']:.2f}s")
328
+
329
+ # Quality assessment
330
+ print(f"\n๐Ÿ† Quality Assessment:")
331
+ if summary["success_rate"] >= 95:
332
+ print(" โœ… EXCELLENT - Success rate โ‰ฅ95%")
333
+ elif summary["success_rate"] >= 90:
334
+ print(" โœ… GOOD - Success rate โ‰ฅ90%")
335
+ elif summary["success_rate"] >= 80:
336
+ print(" โš ๏ธ FAIR - Success rate โ‰ฅ80%")
337
+ else:
338
+ print(" โŒ POOR - Success rate <80%")
339
+
340
+ # Performance assessment
341
+ if summary["total_execution_time"] < 10:
342
+ print(" โšก FAST - Total execution <10s")
343
+ elif summary["total_execution_time"] < 30:
344
+ print(" โšก REASONABLE - Total execution <30s")
345
+ else:
346
+ print(" ๐ŸŒ SLOW - Total execution โ‰ฅ30s")
347
+
348
+ print("\n" + "=" * 60)
349
+
350
+ # Exit with appropriate code
351
+ exit_code = 0 if summary["suites_failed"] == 0 else 1
352
+ if exit_code != 0:
353
+ print("โŒ Some tests failed. Check detailed results above.")
354
+
355
+ return exit_code
356
+
357
+ def run_specific_category(self, category: str):
358
+ """Run tests for a specific category only."""
359
+ category_mapping = {
360
+ "unit": ["test_finops_dashboard.py"],
361
+ "validation": ["test_reference_images_validation.py"],
362
+ "performance": ["test_performance_benchmarks.py"],
363
+ "integration": ["test_integration.py"],
364
+ }
365
+
366
+ if category not in category_mapping:
367
+ print(f"โŒ Unknown category: {category}")
368
+ print(f"Available categories: {', '.join(category_mapping.keys())}")
369
+ return 1
370
+
371
+ print(f"๐ŸŽฏ Running {category.upper()} tests only")
372
+
373
+ for test_file in category_mapping[category]:
374
+ description = f"{category.title()} Tests"
375
+ suite_results = self.run_test_suite(test_file, description)
376
+
377
+ if suite_results["status"] != "passed":
378
+ return 1
379
+
380
+ return 0
381
+
382
+
383
+ def main():
384
+ """Main entry point for comprehensive test runner."""
385
+ import argparse
386
+
387
+ parser = argparse.ArgumentParser(description="CloudOps FinOps Comprehensive Test Runner")
388
+ parser.add_argument(
389
+ "--category",
390
+ choices=["unit", "validation", "performance", "integration"],
391
+ help="Run specific test category only",
392
+ )
393
+ parser.add_argument("--quick", action="store_true", help="Run quick validation tests only")
394
+
395
+ args = parser.parse_args()
396
+
397
+ runner = ComprehensiveTestRunner()
398
+
399
+ try:
400
+ if args.category:
401
+ exit_code = runner.run_specific_category(args.category)
402
+ elif args.quick:
403
+ # Quick validation - core unit tests only
404
+ exit_code = runner.run_specific_category("unit")
405
+ else:
406
+ # Run comprehensive test suite
407
+ results = runner.run_all_test_suites()
408
+ exit_code = 0 if results["test_execution"]["summary"]["suites_failed"] == 0 else 1
409
+
410
+ sys.exit(exit_code)
411
+
412
+ except KeyboardInterrupt:
413
+ print("\nโน๏ธ Test execution interrupted by user")
414
+ sys.exit(130)
415
+ except Exception as e:
416
+ print(f"\n๐Ÿ’ฅ Test runner error: {e}")
417
+ sys.exit(1)
418
+
419
+
420
+ if __name__ == "__main__":
421
+ main()