runbooks 0.7.6__py3-none-any.whl → 0.7.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- runbooks/__init__.py +1 -1
- runbooks/base.py +5 -1
- runbooks/cfat/__init__.py +8 -4
- runbooks/cfat/assessment/collectors.py +171 -14
- runbooks/cfat/assessment/compliance.py +871 -0
- runbooks/cfat/assessment/runner.py +122 -11
- runbooks/cfat/models.py +6 -2
- runbooks/common/logger.py +14 -0
- runbooks/common/rich_utils.py +451 -0
- runbooks/enterprise/__init__.py +68 -0
- runbooks/enterprise/error_handling.py +411 -0
- runbooks/enterprise/logging.py +439 -0
- runbooks/enterprise/multi_tenant.py +583 -0
- runbooks/finops/README.md +468 -241
- runbooks/finops/__init__.py +39 -3
- runbooks/finops/cli.py +83 -18
- runbooks/finops/cross_validation.py +375 -0
- runbooks/finops/dashboard_runner.py +812 -164
- runbooks/finops/enhanced_dashboard_runner.py +525 -0
- runbooks/finops/finops_dashboard.py +1892 -0
- runbooks/finops/helpers.py +485 -51
- runbooks/finops/optimizer.py +823 -0
- runbooks/finops/tests/__init__.py +19 -0
- runbooks/finops/tests/results_test_finops_dashboard.xml +1 -0
- runbooks/finops/tests/run_comprehensive_tests.py +421 -0
- runbooks/finops/tests/run_tests.py +305 -0
- runbooks/finops/tests/test_finops_dashboard.py +705 -0
- runbooks/finops/tests/test_integration.py +477 -0
- runbooks/finops/tests/test_performance.py +380 -0
- runbooks/finops/tests/test_performance_benchmarks.py +500 -0
- runbooks/finops/tests/test_reference_images_validation.py +867 -0
- runbooks/finops/tests/test_single_account_features.py +715 -0
- runbooks/finops/tests/validate_test_suite.py +220 -0
- runbooks/finops/types.py +1 -1
- runbooks/hitl/enhanced_workflow_engine.py +725 -0
- runbooks/inventory/artifacts/scale-optimize-status.txt +12 -0
- runbooks/inventory/collectors/aws_comprehensive.py +442 -0
- runbooks/inventory/collectors/enterprise_scale.py +281 -0
- runbooks/inventory/core/collector.py +172 -13
- runbooks/inventory/discovery.md +1 -1
- runbooks/inventory/list_ec2_instances.py +18 -20
- runbooks/inventory/list_ssm_parameters.py +31 -3
- runbooks/inventory/organizations_discovery.py +1269 -0
- runbooks/inventory/rich_inventory_display.py +393 -0
- runbooks/inventory/run_on_multi_accounts.py +35 -19
- runbooks/inventory/runbooks.security.report_generator.log +0 -0
- runbooks/inventory/runbooks.security.run_script.log +0 -0
- runbooks/inventory/vpc_flow_analyzer.py +1030 -0
- runbooks/main.py +2215 -119
- runbooks/metrics/dora_metrics_engine.py +599 -0
- runbooks/operate/__init__.py +2 -2
- runbooks/operate/base.py +122 -10
- runbooks/operate/deployment_framework.py +1032 -0
- runbooks/operate/deployment_validator.py +853 -0
- runbooks/operate/dynamodb_operations.py +10 -6
- runbooks/operate/ec2_operations.py +319 -11
- runbooks/operate/executive_dashboard.py +779 -0
- runbooks/operate/mcp_integration.py +750 -0
- runbooks/operate/nat_gateway_operations.py +1120 -0
- runbooks/operate/networking_cost_heatmap.py +685 -0
- runbooks/operate/privatelink_operations.py +940 -0
- runbooks/operate/s3_operations.py +10 -6
- runbooks/operate/vpc_endpoints.py +644 -0
- runbooks/operate/vpc_operations.py +1038 -0
- runbooks/remediation/__init__.py +2 -2
- runbooks/remediation/acm_remediation.py +1 -1
- runbooks/remediation/base.py +1 -1
- runbooks/remediation/cloudtrail_remediation.py +1 -1
- runbooks/remediation/cognito_remediation.py +1 -1
- runbooks/remediation/dynamodb_remediation.py +1 -1
- runbooks/remediation/ec2_remediation.py +1 -1
- runbooks/remediation/ec2_unattached_ebs_volumes.py +1 -1
- runbooks/remediation/kms_enable_key_rotation.py +1 -1
- runbooks/remediation/kms_remediation.py +1 -1
- runbooks/remediation/lambda_remediation.py +1 -1
- runbooks/remediation/multi_account.py +1 -1
- runbooks/remediation/rds_remediation.py +1 -1
- runbooks/remediation/s3_block_public_access.py +1 -1
- runbooks/remediation/s3_enable_access_logging.py +1 -1
- runbooks/remediation/s3_encryption.py +1 -1
- runbooks/remediation/s3_remediation.py +1 -1
- runbooks/remediation/vpc_remediation.py +475 -0
- runbooks/security/__init__.py +3 -1
- runbooks/security/compliance_automation.py +632 -0
- runbooks/security/report_generator.py +10 -0
- runbooks/security/run_script.py +31 -5
- runbooks/security/security_baseline_tester.py +169 -30
- runbooks/security/security_export.py +477 -0
- runbooks/validation/__init__.py +10 -0
- runbooks/validation/benchmark.py +484 -0
- runbooks/validation/cli.py +356 -0
- runbooks/validation/mcp_validator.py +768 -0
- runbooks/vpc/__init__.py +38 -0
- runbooks/vpc/config.py +212 -0
- runbooks/vpc/cost_engine.py +347 -0
- runbooks/vpc/heatmap_engine.py +605 -0
- runbooks/vpc/manager_interface.py +634 -0
- runbooks/vpc/networking_wrapper.py +1260 -0
- runbooks/vpc/rich_formatters.py +679 -0
- runbooks/vpc/tests/__init__.py +5 -0
- runbooks/vpc/tests/conftest.py +356 -0
- runbooks/vpc/tests/test_cli_integration.py +530 -0
- runbooks/vpc/tests/test_config.py +458 -0
- runbooks/vpc/tests/test_cost_engine.py +479 -0
- runbooks/vpc/tests/test_networking_wrapper.py +512 -0
- {runbooks-0.7.6.dist-info → runbooks-0.7.9.dist-info}/METADATA +40 -12
- {runbooks-0.7.6.dist-info → runbooks-0.7.9.dist-info}/RECORD +111 -50
- {runbooks-0.7.6.dist-info → runbooks-0.7.9.dist-info}/WHEEL +0 -0
- {runbooks-0.7.6.dist-info → runbooks-0.7.9.dist-info}/entry_points.txt +0 -0
- {runbooks-0.7.6.dist-info → runbooks-0.7.9.dist-info}/licenses/LICENSE +0 -0
- {runbooks-0.7.6.dist-info → runbooks-0.7.9.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,305 @@
|
|
1
|
+
#!/usr/bin/env python3
|
2
|
+
"""
|
3
|
+
Test Runner for FinOps Dashboard Enterprise Components.
|
4
|
+
|
5
|
+
This script runs all tests for the FinOps dashboard and provides
|
6
|
+
a comprehensive validation report.
|
7
|
+
|
8
|
+
Usage:
|
9
|
+
python run_tests.py # Run all tests
|
10
|
+
python run_tests.py --unit-only # Run only unit tests
|
11
|
+
python run_tests.py --integration-only # Run only integration tests
|
12
|
+
python run_tests.py --performance-only # Run only performance tests
|
13
|
+
python run_tests.py --quick # Run quick validation tests only
|
14
|
+
|
15
|
+
Author: CloudOps Runbooks Team
|
16
|
+
Version: 0.7.8
|
17
|
+
"""
|
18
|
+
|
19
|
+
import argparse
|
20
|
+
import sys
|
21
|
+
import time
|
22
|
+
from pathlib import Path
|
23
|
+
|
24
|
+
# Add the finops module to path for testing
|
25
|
+
sys.path.insert(0, str(Path(__file__).parent.parent))
|
26
|
+
|
27
|
+
|
28
|
+
def run_basic_validation():
|
29
|
+
"""Run basic validation to ensure modules can be imported."""
|
30
|
+
print("🔍 Running Basic Validation...")
|
31
|
+
|
32
|
+
try:
|
33
|
+
# Test basic imports
|
34
|
+
from runbooks.finops.finops_dashboard import (
|
35
|
+
EnterpriseDiscovery,
|
36
|
+
EnterpriseExecutiveDashboard,
|
37
|
+
EnterpriseExportEngine,
|
38
|
+
EnterpriseResourceAuditor,
|
39
|
+
FinOpsConfig,
|
40
|
+
MultiAccountCostTrendAnalyzer,
|
41
|
+
ResourceUtilizationHeatmapAnalyzer,
|
42
|
+
create_finops_dashboard,
|
43
|
+
run_complete_finops_analysis,
|
44
|
+
)
|
45
|
+
|
46
|
+
print(" ✅ All imports successful")
|
47
|
+
|
48
|
+
# Test basic instantiation
|
49
|
+
config = FinOpsConfig()
|
50
|
+
print(" ✅ FinOpsConfig creation successful")
|
51
|
+
|
52
|
+
discovery = EnterpriseDiscovery(config)
|
53
|
+
print(" ✅ EnterpriseDiscovery creation successful")
|
54
|
+
|
55
|
+
cost_analyzer = MultiAccountCostTrendAnalyzer(config)
|
56
|
+
print(" ✅ MultiAccountCostTrendAnalyzer creation successful")
|
57
|
+
|
58
|
+
# Test factory function
|
59
|
+
components = create_finops_dashboard()
|
60
|
+
print(" ✅ Factory function successful")
|
61
|
+
|
62
|
+
print("✅ Basic validation passed\n")
|
63
|
+
return True
|
64
|
+
|
65
|
+
except Exception as e:
|
66
|
+
print(f"❌ Basic validation failed: {e}\n")
|
67
|
+
return False
|
68
|
+
|
69
|
+
|
70
|
+
def run_quick_functional_test():
|
71
|
+
"""Run quick functional test to verify core functionality."""
|
72
|
+
print("🚀 Running Quick Functional Test...")
|
73
|
+
|
74
|
+
try:
|
75
|
+
from runbooks.finops.finops_dashboard import run_complete_finops_analysis
|
76
|
+
|
77
|
+
start_time = time.perf_counter()
|
78
|
+
results = run_complete_finops_analysis()
|
79
|
+
end_time = time.perf_counter()
|
80
|
+
|
81
|
+
# Verify results
|
82
|
+
assert results["workflow_status"] == "completed"
|
83
|
+
assert "cost_analysis" in results
|
84
|
+
assert "audit_results" in results
|
85
|
+
assert "executive_summary" in results
|
86
|
+
|
87
|
+
execution_time = end_time - start_time
|
88
|
+
print(f" ✅ Complete workflow executed in {execution_time:.2f}s")
|
89
|
+
|
90
|
+
# Display key metrics
|
91
|
+
if results["cost_analysis"]["status"] == "completed":
|
92
|
+
cost_data = results["cost_analysis"]["cost_trends"]
|
93
|
+
optimization = results["cost_analysis"]["optimization_opportunities"]
|
94
|
+
|
95
|
+
print(f" 📊 Analyzed {cost_data['total_accounts']} accounts")
|
96
|
+
print(f" 💰 Total monthly spend: ${cost_data['total_monthly_spend']:,.2f}")
|
97
|
+
print(f" 🎯 Potential savings: {optimization['savings_percentage']:.1f}%")
|
98
|
+
print(f" 💵 Annual impact: ${optimization['annual_savings_potential']:,.2f}")
|
99
|
+
|
100
|
+
if "export_status" in results:
|
101
|
+
successful = len(results["export_status"]["successful_exports"])
|
102
|
+
failed = len(results["export_status"]["failed_exports"])
|
103
|
+
print(f" 📄 Exports: {successful} successful, {failed} failed")
|
104
|
+
|
105
|
+
print("✅ Quick functional test passed\n")
|
106
|
+
return True
|
107
|
+
|
108
|
+
except Exception as e:
|
109
|
+
print(f"❌ Quick functional test failed: {e}\n")
|
110
|
+
return False
|
111
|
+
|
112
|
+
|
113
|
+
def run_dashboard_runner_integration():
|
114
|
+
"""Test integration with dashboard_runner module."""
|
115
|
+
print("🔗 Testing Dashboard Runner Integration...")
|
116
|
+
|
117
|
+
try:
|
118
|
+
from runbooks.finops.dashboard_runner import (
|
119
|
+
_run_cost_trend_analysis,
|
120
|
+
_run_executive_dashboard,
|
121
|
+
_run_resource_heatmap_analysis,
|
122
|
+
run_complete_finops_workflow,
|
123
|
+
)
|
124
|
+
|
125
|
+
print(" ✅ Dashboard runner imports successful")
|
126
|
+
|
127
|
+
# Test function existence and basic structure
|
128
|
+
import inspect
|
129
|
+
|
130
|
+
# Check function signatures
|
131
|
+
sig = inspect.signature(_run_cost_trend_analysis)
|
132
|
+
assert len(sig.parameters) == 2 # profiles, args
|
133
|
+
print(" ✅ _run_cost_trend_analysis signature correct")
|
134
|
+
|
135
|
+
sig = inspect.signature(_run_resource_heatmap_analysis)
|
136
|
+
assert len(sig.parameters) == 3 # profiles, cost_data, args
|
137
|
+
print(" ✅ _run_resource_heatmap_analysis signature correct")
|
138
|
+
|
139
|
+
sig = inspect.signature(_run_executive_dashboard)
|
140
|
+
assert len(sig.parameters) == 4 # discovery, cost, audit, args
|
141
|
+
print(" ✅ _run_executive_dashboard signature correct")
|
142
|
+
|
143
|
+
print("✅ Dashboard runner integration verified\n")
|
144
|
+
return True
|
145
|
+
|
146
|
+
except Exception as e:
|
147
|
+
print(f"❌ Dashboard runner integration failed: {e}\n")
|
148
|
+
return False
|
149
|
+
|
150
|
+
|
151
|
+
def run_module_exports_test():
|
152
|
+
"""Test module exports from __init__.py."""
|
153
|
+
print("📦 Testing Module Exports...")
|
154
|
+
|
155
|
+
try:
|
156
|
+
from runbooks.finops import (
|
157
|
+
EnterpriseDiscovery,
|
158
|
+
EnterpriseExecutiveDashboard,
|
159
|
+
EnterpriseExportEngine,
|
160
|
+
EnterpriseResourceAuditor,
|
161
|
+
# New v0.7.8 exports
|
162
|
+
FinOpsConfig,
|
163
|
+
MultiAccountCostTrendAnalyzer,
|
164
|
+
ResourceUtilizationHeatmapAnalyzer,
|
165
|
+
_run_cost_trend_analysis,
|
166
|
+
_run_executive_dashboard,
|
167
|
+
_run_resource_heatmap_analysis,
|
168
|
+
create_finops_dashboard,
|
169
|
+
get_aws_profiles,
|
170
|
+
get_cost_data,
|
171
|
+
run_complete_finops_analysis,
|
172
|
+
run_complete_finops_workflow,
|
173
|
+
# Existing exports
|
174
|
+
run_dashboard,
|
175
|
+
)
|
176
|
+
|
177
|
+
print(" ✅ All expected exports available")
|
178
|
+
|
179
|
+
# Test version number
|
180
|
+
from runbooks.finops import __version__
|
181
|
+
|
182
|
+
assert __version__ == "0.7.8"
|
183
|
+
print(f" ✅ Version {__version__} correct")
|
184
|
+
|
185
|
+
print("✅ Module exports test passed\n")
|
186
|
+
return True
|
187
|
+
|
188
|
+
except Exception as e:
|
189
|
+
print(f"❌ Module exports test failed: {e}\n")
|
190
|
+
return False
|
191
|
+
|
192
|
+
|
193
|
+
def main():
|
194
|
+
"""Main test runner."""
|
195
|
+
parser = argparse.ArgumentParser(description="FinOps Dashboard Test Runner")
|
196
|
+
parser.add_argument("--unit-only", action="store_true", help="Run only unit tests")
|
197
|
+
parser.add_argument("--integration-only", action="store_true", help="Run only integration tests")
|
198
|
+
parser.add_argument("--performance-only", action="store_true", help="Run only performance tests")
|
199
|
+
parser.add_argument("--quick", action="store_true", help="Run quick validation tests only")
|
200
|
+
parser.add_argument("--verbose", "-v", action="store_true", help="Verbose output")
|
201
|
+
|
202
|
+
args = parser.parse_args()
|
203
|
+
|
204
|
+
print("🧪 FinOps Dashboard Test Runner v0.7.8")
|
205
|
+
print("=" * 60)
|
206
|
+
|
207
|
+
start_time = time.perf_counter()
|
208
|
+
|
209
|
+
if args.quick:
|
210
|
+
# Quick validation mode
|
211
|
+
tests = [
|
212
|
+
run_basic_validation,
|
213
|
+
run_quick_functional_test,
|
214
|
+
run_dashboard_runner_integration,
|
215
|
+
run_module_exports_test,
|
216
|
+
]
|
217
|
+
elif args.unit_only:
|
218
|
+
print("Running unit tests with pytest...")
|
219
|
+
import subprocess
|
220
|
+
|
221
|
+
result = subprocess.run(
|
222
|
+
[
|
223
|
+
sys.executable,
|
224
|
+
"-m",
|
225
|
+
"pytest",
|
226
|
+
str(Path(__file__).parent / "test_finops_dashboard.py"),
|
227
|
+
"-v" if args.verbose else "",
|
228
|
+
],
|
229
|
+
capture_output=not args.verbose,
|
230
|
+
)
|
231
|
+
return result.returncode
|
232
|
+
elif args.integration_only:
|
233
|
+
print("Running integration tests with pytest...")
|
234
|
+
import subprocess
|
235
|
+
|
236
|
+
result = subprocess.run(
|
237
|
+
[
|
238
|
+
sys.executable,
|
239
|
+
"-m",
|
240
|
+
"pytest",
|
241
|
+
str(Path(__file__).parent / "test_integration.py"),
|
242
|
+
"-v" if args.verbose else "",
|
243
|
+
],
|
244
|
+
capture_output=not args.verbose,
|
245
|
+
)
|
246
|
+
return result.returncode
|
247
|
+
elif args.performance_only:
|
248
|
+
print("Running performance tests with pytest...")
|
249
|
+
import subprocess
|
250
|
+
|
251
|
+
result = subprocess.run(
|
252
|
+
[
|
253
|
+
sys.executable,
|
254
|
+
"-m",
|
255
|
+
"pytest",
|
256
|
+
str(Path(__file__).parent / "test_performance.py"),
|
257
|
+
"-v" if args.verbose else "",
|
258
|
+
"-s",
|
259
|
+
],
|
260
|
+
capture_output=not args.verbose,
|
261
|
+
)
|
262
|
+
return result.returncode
|
263
|
+
else:
|
264
|
+
# Full validation mode
|
265
|
+
tests = [
|
266
|
+
run_basic_validation,
|
267
|
+
run_quick_functional_test,
|
268
|
+
run_dashboard_runner_integration,
|
269
|
+
run_module_exports_test,
|
270
|
+
]
|
271
|
+
|
272
|
+
print("Note: Use pytest directly to run comprehensive unit/integration/performance tests:")
|
273
|
+
print(" pytest src/runbooks/finops/tests/ -v")
|
274
|
+
print()
|
275
|
+
|
276
|
+
# Run selected tests
|
277
|
+
results = []
|
278
|
+
for test in tests:
|
279
|
+
try:
|
280
|
+
result = test()
|
281
|
+
results.append(result)
|
282
|
+
except Exception as e:
|
283
|
+
print(f"❌ Test {test.__name__} crashed: {e}")
|
284
|
+
results.append(False)
|
285
|
+
|
286
|
+
end_time = time.perf_counter()
|
287
|
+
total_time = end_time - start_time
|
288
|
+
|
289
|
+
# Summary
|
290
|
+
print("=" * 60)
|
291
|
+
passed = sum(results)
|
292
|
+
total = len(results)
|
293
|
+
|
294
|
+
if passed == total:
|
295
|
+
print(f"🎉 ALL TESTS PASSED ({passed}/{total}) in {total_time:.2f}s")
|
296
|
+
print("\n✅ FinOps Dashboard v0.7.8 is ready for production deployment!")
|
297
|
+
return 0
|
298
|
+
else:
|
299
|
+
print(f"❌ SOME TESTS FAILED ({passed}/{total}) in {total_time:.2f}s")
|
300
|
+
print("\n⚠️ Please fix failing tests before deployment.")
|
301
|
+
return 1
|
302
|
+
|
303
|
+
|
304
|
+
if __name__ == "__main__":
|
305
|
+
sys.exit(main())
|