runbooks 0.7.6__py3-none-any.whl → 0.7.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (111) hide show
  1. runbooks/__init__.py +1 -1
  2. runbooks/base.py +5 -1
  3. runbooks/cfat/__init__.py +8 -4
  4. runbooks/cfat/assessment/collectors.py +171 -14
  5. runbooks/cfat/assessment/compliance.py +871 -0
  6. runbooks/cfat/assessment/runner.py +122 -11
  7. runbooks/cfat/models.py +6 -2
  8. runbooks/common/logger.py +14 -0
  9. runbooks/common/rich_utils.py +451 -0
  10. runbooks/enterprise/__init__.py +68 -0
  11. runbooks/enterprise/error_handling.py +411 -0
  12. runbooks/enterprise/logging.py +439 -0
  13. runbooks/enterprise/multi_tenant.py +583 -0
  14. runbooks/finops/README.md +468 -241
  15. runbooks/finops/__init__.py +39 -3
  16. runbooks/finops/cli.py +83 -18
  17. runbooks/finops/cross_validation.py +375 -0
  18. runbooks/finops/dashboard_runner.py +812 -164
  19. runbooks/finops/enhanced_dashboard_runner.py +525 -0
  20. runbooks/finops/finops_dashboard.py +1892 -0
  21. runbooks/finops/helpers.py +485 -51
  22. runbooks/finops/optimizer.py +823 -0
  23. runbooks/finops/tests/__init__.py +19 -0
  24. runbooks/finops/tests/results_test_finops_dashboard.xml +1 -0
  25. runbooks/finops/tests/run_comprehensive_tests.py +421 -0
  26. runbooks/finops/tests/run_tests.py +305 -0
  27. runbooks/finops/tests/test_finops_dashboard.py +705 -0
  28. runbooks/finops/tests/test_integration.py +477 -0
  29. runbooks/finops/tests/test_performance.py +380 -0
  30. runbooks/finops/tests/test_performance_benchmarks.py +500 -0
  31. runbooks/finops/tests/test_reference_images_validation.py +867 -0
  32. runbooks/finops/tests/test_single_account_features.py +715 -0
  33. runbooks/finops/tests/validate_test_suite.py +220 -0
  34. runbooks/finops/types.py +1 -1
  35. runbooks/hitl/enhanced_workflow_engine.py +725 -0
  36. runbooks/inventory/artifacts/scale-optimize-status.txt +12 -0
  37. runbooks/inventory/collectors/aws_comprehensive.py +442 -0
  38. runbooks/inventory/collectors/enterprise_scale.py +281 -0
  39. runbooks/inventory/core/collector.py +172 -13
  40. runbooks/inventory/discovery.md +1 -1
  41. runbooks/inventory/list_ec2_instances.py +18 -20
  42. runbooks/inventory/list_ssm_parameters.py +31 -3
  43. runbooks/inventory/organizations_discovery.py +1269 -0
  44. runbooks/inventory/rich_inventory_display.py +393 -0
  45. runbooks/inventory/run_on_multi_accounts.py +35 -19
  46. runbooks/inventory/runbooks.security.report_generator.log +0 -0
  47. runbooks/inventory/runbooks.security.run_script.log +0 -0
  48. runbooks/inventory/vpc_flow_analyzer.py +1030 -0
  49. runbooks/main.py +2215 -119
  50. runbooks/metrics/dora_metrics_engine.py +599 -0
  51. runbooks/operate/__init__.py +2 -2
  52. runbooks/operate/base.py +122 -10
  53. runbooks/operate/deployment_framework.py +1032 -0
  54. runbooks/operate/deployment_validator.py +853 -0
  55. runbooks/operate/dynamodb_operations.py +10 -6
  56. runbooks/operate/ec2_operations.py +319 -11
  57. runbooks/operate/executive_dashboard.py +779 -0
  58. runbooks/operate/mcp_integration.py +750 -0
  59. runbooks/operate/nat_gateway_operations.py +1120 -0
  60. runbooks/operate/networking_cost_heatmap.py +685 -0
  61. runbooks/operate/privatelink_operations.py +940 -0
  62. runbooks/operate/s3_operations.py +10 -6
  63. runbooks/operate/vpc_endpoints.py +644 -0
  64. runbooks/operate/vpc_operations.py +1038 -0
  65. runbooks/remediation/__init__.py +2 -2
  66. runbooks/remediation/acm_remediation.py +1 -1
  67. runbooks/remediation/base.py +1 -1
  68. runbooks/remediation/cloudtrail_remediation.py +1 -1
  69. runbooks/remediation/cognito_remediation.py +1 -1
  70. runbooks/remediation/dynamodb_remediation.py +1 -1
  71. runbooks/remediation/ec2_remediation.py +1 -1
  72. runbooks/remediation/ec2_unattached_ebs_volumes.py +1 -1
  73. runbooks/remediation/kms_enable_key_rotation.py +1 -1
  74. runbooks/remediation/kms_remediation.py +1 -1
  75. runbooks/remediation/lambda_remediation.py +1 -1
  76. runbooks/remediation/multi_account.py +1 -1
  77. runbooks/remediation/rds_remediation.py +1 -1
  78. runbooks/remediation/s3_block_public_access.py +1 -1
  79. runbooks/remediation/s3_enable_access_logging.py +1 -1
  80. runbooks/remediation/s3_encryption.py +1 -1
  81. runbooks/remediation/s3_remediation.py +1 -1
  82. runbooks/remediation/vpc_remediation.py +475 -0
  83. runbooks/security/__init__.py +3 -1
  84. runbooks/security/compliance_automation.py +632 -0
  85. runbooks/security/report_generator.py +10 -0
  86. runbooks/security/run_script.py +31 -5
  87. runbooks/security/security_baseline_tester.py +169 -30
  88. runbooks/security/security_export.py +477 -0
  89. runbooks/validation/__init__.py +10 -0
  90. runbooks/validation/benchmark.py +484 -0
  91. runbooks/validation/cli.py +356 -0
  92. runbooks/validation/mcp_validator.py +768 -0
  93. runbooks/vpc/__init__.py +38 -0
  94. runbooks/vpc/config.py +212 -0
  95. runbooks/vpc/cost_engine.py +347 -0
  96. runbooks/vpc/heatmap_engine.py +605 -0
  97. runbooks/vpc/manager_interface.py +634 -0
  98. runbooks/vpc/networking_wrapper.py +1260 -0
  99. runbooks/vpc/rich_formatters.py +679 -0
  100. runbooks/vpc/tests/__init__.py +5 -0
  101. runbooks/vpc/tests/conftest.py +356 -0
  102. runbooks/vpc/tests/test_cli_integration.py +530 -0
  103. runbooks/vpc/tests/test_config.py +458 -0
  104. runbooks/vpc/tests/test_cost_engine.py +479 -0
  105. runbooks/vpc/tests/test_networking_wrapper.py +512 -0
  106. {runbooks-0.7.6.dist-info → runbooks-0.7.9.dist-info}/METADATA +40 -12
  107. {runbooks-0.7.6.dist-info → runbooks-0.7.9.dist-info}/RECORD +111 -50
  108. {runbooks-0.7.6.dist-info → runbooks-0.7.9.dist-info}/WHEEL +0 -0
  109. {runbooks-0.7.6.dist-info → runbooks-0.7.9.dist-info}/entry_points.txt +0 -0
  110. {runbooks-0.7.6.dist-info → runbooks-0.7.9.dist-info}/licenses/LICENSE +0 -0
  111. {runbooks-0.7.6.dist-info → runbooks-0.7.9.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,867 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Reference Images Validation Test Suite for FinOps Dashboard.
4
+
5
+ This module validates the 5 reference image outputs from the finops module
6
+ to ensure comprehensive functionality and output validation against expected
7
+ enterprise-grade dashboard and reporting requirements.
8
+
9
+ Test Cases Based on Reference Images:
10
+ 1. Cost Analysis Dashboard - Multi-account cost trending and optimization
11
+ 2. Resource Utilization Heatmap - Efficiency scoring and rightsizing
12
+ 3. Executive Summary Reports - C-suite financial visibility
13
+ 4. Audit & Compliance Reports - Risk assessment and compliance scoring
14
+ 5. Export & Integration - Multi-format data export capabilities
15
+
16
+ Author: CloudOps Runbooks Team
17
+ Version: 0.7.8
18
+ """
19
+
20
+ import json
21
+ import os
22
+ import tempfile
23
+ from datetime import datetime, timedelta
24
+ from pathlib import Path
25
+ from unittest.mock import MagicMock, Mock, patch
26
+
27
+ import pytest
28
+
29
+ # Import the components we're testing
30
+ from runbooks.finops.finops_dashboard import (
31
+ EnterpriseDiscovery,
32
+ EnterpriseExecutiveDashboard,
33
+ EnterpriseExportEngine,
34
+ EnterpriseResourceAuditor,
35
+ FinOpsConfig,
36
+ MultiAccountCostTrendAnalyzer,
37
+ ResourceUtilizationHeatmapAnalyzer,
38
+ create_finops_dashboard,
39
+ run_complete_finops_analysis,
40
+ )
41
+
42
+
43
+ class TestReferenceImage1_CostAnalysisDashboard:
44
+ """
45
+ Test Case 1: Cost Analysis Dashboard Validation
46
+
47
+ Validates the multi-account cost trending dashboard functionality
48
+ corresponding to aws-finops-dashboard reference images.
49
+
50
+ Expected Output Characteristics:
51
+ - Multi-account cost trends with $152,991.07 baseline
52
+ - 20+ account cost breakdown visualization
53
+ - Optimization opportunities identification (25-50% target)
54
+ - Time-series cost analysis and forecasting
55
+ """
56
+
57
+ @pytest.fixture
58
+ def cost_dashboard_config(self):
59
+ """Configuration for cost dashboard testing."""
60
+ config = FinOpsConfig()
61
+ config.target_savings_percent = 40 # 40% optimization target
62
+ config.time_range_days = 90 # 3 months analysis
63
+ config.min_account_threshold = 20 # Match 20+ accounts requirement
64
+ return config
65
+
66
+ def test_multi_account_cost_trending(self, cost_dashboard_config):
67
+ """Test multi-account cost trending analysis matches reference image output."""
68
+ analyzer = MultiAccountCostTrendAnalyzer(cost_dashboard_config)
69
+
70
+ # Mock to simulate $152,991.07 baseline from reference
71
+ with patch("runbooks.finops.finops_dashboard.random.uniform") as mock_uniform:
72
+ mock_uniform.side_effect = [152991.07 / 25] * 25 # Distribute across 25 accounts
73
+
74
+ results = analyzer.analyze_cost_trends()
75
+
76
+ # Verify reference image characteristics
77
+ assert results["status"] == "completed"
78
+ assert "cost_trends" in results
79
+
80
+ cost_trends = results["cost_trends"]
81
+
82
+ # Validate multi-account structure (20+ accounts)
83
+ assert cost_trends["total_accounts"] >= 20
84
+
85
+ # Validate cost baseline approximates reference ($152,991.07)
86
+ assert 140000 <= cost_trends["total_monthly_spend"] <= 160000
87
+
88
+ # Validate account data structure for dashboard visualization
89
+ account_data = cost_trends["account_data"]
90
+ assert len(account_data) >= 20
91
+
92
+ for account in account_data[:3]: # Check first 3 accounts
93
+ assert "account_id" in account
94
+ assert "account_type" in account
95
+ assert "monthly_spend" in account
96
+ assert "optimization_potential" in account
97
+ assert 0 <= account["optimization_potential"] <= 1
98
+
99
+ def test_cost_optimization_opportunities_calculation(self, cost_dashboard_config):
100
+ """Test cost optimization opportunities match reference image expectations."""
101
+ analyzer = MultiAccountCostTrendAnalyzer(cost_dashboard_config)
102
+
103
+ # Create test data matching reference image characteristics
104
+ test_cost_trends = {
105
+ "total_monthly_spend": 152991.07,
106
+ "account_data": [
107
+ {
108
+ "account_id": "prod-001",
109
+ "account_type": "production",
110
+ "monthly_spend": 45000.0,
111
+ "optimization_potential": 0.35, # 35% potential
112
+ },
113
+ {
114
+ "account_id": "dev-001",
115
+ "account_type": "development",
116
+ "monthly_spend": 25000.0,
117
+ "optimization_potential": 0.60, # 60% potential
118
+ },
119
+ {
120
+ "account_id": "staging-001",
121
+ "account_type": "staging",
122
+ "monthly_spend": 15000.0,
123
+ "optimization_potential": 0.45, # 45% potential
124
+ },
125
+ ],
126
+ }
127
+
128
+ optimization = analyzer._calculate_optimization_opportunities(test_cost_trends)
129
+
130
+ # Validate optimization calculations for dashboard display
131
+ expected_savings = (45000 * 0.35) + (25000 * 0.60) + (15000 * 0.45)
132
+ assert optimization["total_potential_savings"] == expected_savings
133
+
134
+ # Validate annual projection for executive reporting
135
+ assert optimization["annual_savings_potential"] == expected_savings * 12
136
+
137
+ # Validate target achievement status for dashboard
138
+ savings_percentage = (expected_savings / 152991.07) * 100
139
+ assert optimization["savings_percentage"] == pytest.approx(savings_percentage, rel=0.01)
140
+
141
+ def test_cost_trend_time_series_data(self, cost_dashboard_config):
142
+ """Test time series data structure for dashboard charting."""
143
+ analyzer = MultiAccountCostTrendAnalyzer(cost_dashboard_config)
144
+ results = analyzer.analyze_cost_trends()
145
+
146
+ cost_trends = results["cost_trends"]
147
+
148
+ # Validate time series structure for dashboard visualization
149
+ assert "cost_trend_summary" in cost_trends
150
+
151
+ trend_summary = cost_trends["cost_trend_summary"]
152
+ assert "trend_direction" in trend_summary
153
+ assert "month_over_month_change" in trend_summary
154
+ assert "cost_volatility" in trend_summary
155
+
156
+ # Validate trend direction is valid for dashboard display
157
+ assert trend_summary["trend_direction"] in ["increasing", "decreasing", "stable"]
158
+
159
+ # Validate month-over-month change is numeric for charts
160
+ assert isinstance(trend_summary["month_over_month_change"], (int, float))
161
+
162
+
163
+ class TestReferenceImage2_ResourceUtilizationHeatmap:
164
+ """
165
+ Test Case 2: Resource Utilization Heatmap Validation
166
+
167
+ Validates the resource utilization heatmap functionality showing
168
+ efficiency scoring and rightsizing recommendations across services.
169
+
170
+ Expected Output Characteristics:
171
+ - Resource utilization matrix across compute, storage, database, network
172
+ - Efficiency scoring (low/medium/high categories)
173
+ - Rightsizing recommendations with cost impact
174
+ - Heat map data structure for visualization
175
+ """
176
+
177
+ @pytest.fixture
178
+ def heatmap_test_data(self):
179
+ """Test data for heatmap analysis."""
180
+ return {
181
+ "cost_trends": {
182
+ "account_data": [
183
+ {"account_id": "prod-heatmap-001", "account_type": "production", "monthly_spend": 35000.0},
184
+ {"account_id": "dev-heatmap-001", "account_type": "development", "monthly_spend": 12000.0},
185
+ {"account_id": "staging-heatmap-001", "account_type": "staging", "monthly_spend": 8000.0},
186
+ ]
187
+ }
188
+ }
189
+
190
+ def test_resource_utilization_matrix_structure(self, heatmap_test_data):
191
+ """Test resource utilization matrix matches reference image structure."""
192
+ config = FinOpsConfig()
193
+ analyzer = ResourceUtilizationHeatmapAnalyzer(config, heatmap_test_data)
194
+
195
+ results = analyzer.analyze_resource_utilization()
196
+
197
+ # Verify heatmap analysis completed
198
+ assert results["status"] == "completed"
199
+ assert "heatmap_data" in results
200
+
201
+ heatmap_data = results["heatmap_data"]
202
+
203
+ # Validate matrix structure for heatmap visualization
204
+ assert "utilization_matrix" in heatmap_data
205
+ assert "resource_categories" in heatmap_data
206
+
207
+ # Validate resource categories match reference image services
208
+ categories = heatmap_data["resource_categories"]
209
+ required_categories = ["compute", "storage", "database", "network"]
210
+
211
+ for category in required_categories:
212
+ assert category in categories
213
+
214
+ # Validate utilization matrix has account-level data
215
+ utilization_matrix = heatmap_data["utilization_matrix"]
216
+ assert len(utilization_matrix) == 3 # 3 accounts from test data
217
+
218
+ # Validate each account has resource utilization data
219
+ for account_util in utilization_matrix:
220
+ assert "account_id" in account_util
221
+ assert "resource_utilization" in account_util
222
+
223
+ resource_util = account_util["resource_utilization"]
224
+ for category in required_categories:
225
+ assert category in resource_util
226
+
227
+ def test_efficiency_scoring_calculation(self, heatmap_test_data):
228
+ """Test efficiency scoring matches reference image scoring logic."""
229
+ config = FinOpsConfig()
230
+ analyzer = ResourceUtilizationHeatmapAnalyzer(config, heatmap_test_data)
231
+
232
+ results = analyzer.analyze_resource_utilization()
233
+
234
+ # Validate efficiency scoring structure
235
+ assert "efficiency_scoring" in results
236
+ efficiency = results["efficiency_scoring"]
237
+
238
+ assert "average_efficiency_score" in efficiency
239
+ assert "efficiency_distribution" in efficiency
240
+
241
+ # Validate average efficiency score is numeric
242
+ avg_score = efficiency["average_efficiency_score"]
243
+ assert 0 <= avg_score <= 100
244
+
245
+ # Validate efficiency distribution for heatmap color coding
246
+ distribution = efficiency["efficiency_distribution"]
247
+ assert "low_efficiency" in distribution # < 40% (red)
248
+ assert "medium_efficiency" in distribution # 40-70% (yellow)
249
+ assert "high_efficiency" in distribution # >= 70% (green)
250
+ assert "total_resources_scored" in distribution
251
+
252
+ # Validate counts sum correctly
253
+ total_counted = (
254
+ distribution["low_efficiency"] + distribution["medium_efficiency"] + distribution["high_efficiency"]
255
+ )
256
+ assert total_counted == distribution["total_resources_scored"]
257
+
258
+ def test_rightsizing_recommendations(self, heatmap_test_data):
259
+ """Test rightsizing recommendations for reference image display."""
260
+ config = FinOpsConfig()
261
+ analyzer = ResourceUtilizationHeatmapAnalyzer(config, heatmap_test_data)
262
+
263
+ results = analyzer.analyze_resource_utilization()
264
+
265
+ # Validate rightsizing recommendations structure
266
+ assert "rightsizing_recommendations" in results
267
+ recommendations = results["rightsizing_recommendations"]
268
+
269
+ assert "total_rightsizing_opportunities" in recommendations
270
+ assert "potential_monthly_savings" in recommendations
271
+ assert "recommendation_breakdown" in recommendations
272
+
273
+ # Validate recommendation breakdown by resource type
274
+ breakdown = recommendations["recommendation_breakdown"]
275
+
276
+ for resource_type in ["ec2_instances", "lambda_functions", "s3_buckets", "rds_databases"]:
277
+ if resource_type in breakdown:
278
+ rec = breakdown[resource_type]
279
+ assert "count" in rec
280
+ assert "potential_savings" in rec
281
+ assert "confidence_level" in rec
282
+
283
+ # Validate confidence level for recommendation reliability
284
+ assert rec["confidence_level"] in ["high", "medium", "low"]
285
+
286
+
287
+ class TestReferenceImage3_ExecutiveSummaryReports:
288
+ """
289
+ Test Case 3: Executive Summary Reports Validation
290
+
291
+ Validates the C-suite executive summary functionality providing
292
+ high-level financial and operational insights.
293
+
294
+ Expected Output Characteristics:
295
+ - Executive financial overview with key metrics
296
+ - Operational overview with risk scoring
297
+ - Executive recommendations with priorities
298
+ - Board-level presentation format
299
+ """
300
+
301
+ @pytest.fixture
302
+ def executive_test_data(self):
303
+ """Test data for executive dashboard."""
304
+ discovery_results = {"timestamp": datetime.now().isoformat(), "status": "completed"}
305
+
306
+ cost_analysis = {
307
+ "status": "completed",
308
+ "cost_trends": {"total_monthly_spend": 152991.07, "total_accounts": 23},
309
+ "optimization_opportunities": {
310
+ "annual_savings_potential": 1835892.84, # 40% of annual spend
311
+ "savings_percentage": 40.0,
312
+ "target_achievement": {"target": 40, "achieved": 40.0, "status": "achieved"},
313
+ },
314
+ }
315
+
316
+ audit_results = {
317
+ "status": "completed",
318
+ "audit_data": {
319
+ "total_resources_scanned": 3750,
320
+ "risk_score": {
321
+ "overall": 68,
322
+ "breakdown": {"security": 72, "compliance": 65, "cost_optimization": 70, "governance": 64},
323
+ },
324
+ "recommendations": [
325
+ {"priority": "critical", "category": "security", "description": "Address public S3 buckets"},
326
+ {"priority": "high", "category": "cost", "description": "Implement EC2 rightsizing"},
327
+ {"priority": "high", "category": "governance", "description": "Enforce resource tagging"},
328
+ {"priority": "medium", "category": "compliance", "description": "Review IAM policies"},
329
+ ],
330
+ },
331
+ }
332
+
333
+ return discovery_results, cost_analysis, audit_results
334
+
335
+ def test_executive_summary_generation(self, executive_test_data):
336
+ """Test executive summary matches reference image requirements."""
337
+ config = FinOpsConfig()
338
+ discovery, cost_analysis, audit = executive_test_data
339
+
340
+ dashboard = EnterpriseExecutiveDashboard(config, discovery, cost_analysis, audit)
341
+ summary = dashboard.generate_executive_summary()
342
+
343
+ # Validate executive summary structure
344
+ assert "report_metadata" in summary
345
+ assert "financial_overview" in summary
346
+ assert "operational_overview" in summary
347
+ assert "executive_recommendations" in summary
348
+
349
+ # Validate report metadata for C-suite presentation
350
+ metadata = summary["report_metadata"]
351
+ assert metadata["report_type"] == "enterprise_finops_executive_summary"
352
+ assert metadata["analysis_period"] == f"{config.time_range_days} days"
353
+ assert metadata["target_savings"] == f"{config.target_savings_percent}%"
354
+ assert "generation_timestamp" in metadata
355
+
356
+ def test_financial_overview_metrics(self, executive_test_data):
357
+ """Test financial overview matches reference image financial metrics."""
358
+ config = FinOpsConfig()
359
+ discovery, cost_analysis, audit = executive_test_data
360
+
361
+ dashboard = EnterpriseExecutiveDashboard(config, discovery, cost_analysis, audit)
362
+ summary = dashboard.generate_executive_summary()
363
+
364
+ financial = summary["financial_overview"]
365
+
366
+ # Validate key financial metrics for executive dashboard
367
+ assert financial["current_monthly_spend"] == 152991.07
368
+ assert financial["potential_annual_savings"] == 1835892.84
369
+ assert financial["savings_percentage"] == 40.0
370
+ assert financial["target_achieved"] is True
371
+
372
+ # Validate financial trend indicators
373
+ assert "roi_projection" in financial
374
+ assert "payback_period" in financial
375
+
376
+ # Validate ROI calculation (should be positive)
377
+ assert financial["roi_projection"] > 0
378
+
379
+ def test_operational_overview_scoring(self, executive_test_data):
380
+ """Test operational overview scoring for executive visibility."""
381
+ config = FinOpsConfig()
382
+ discovery, cost_analysis, audit = executive_test_data
383
+
384
+ dashboard = EnterpriseExecutiveDashboard(config, discovery, cost_analysis, audit)
385
+ summary = dashboard.generate_executive_summary()
386
+
387
+ operational = summary["operational_overview"]
388
+
389
+ # Validate operational metrics
390
+ assert operational["resources_scanned"] == 3750
391
+ assert operational["overall_risk_score"] == 68
392
+ assert operational["critical_findings"] == 1
393
+ assert operational["high_findings"] == 2
394
+ assert operational["total_accounts"] == 23
395
+
396
+ # Validate operational health indicators
397
+ assert "risk_level" in operational
398
+ assert operational["risk_level"] in ["low", "medium", "high", "critical"]
399
+
400
+ # Risk score 68 should be 'medium' risk
401
+ assert operational["risk_level"] == "medium"
402
+
403
+ def test_executive_recommendations_prioritization(self, executive_test_data):
404
+ """Test executive recommendations match reference image priorities."""
405
+ config = FinOpsConfig()
406
+ discovery, cost_analysis, audit = executive_test_data
407
+
408
+ dashboard = EnterpriseExecutiveDashboard(config, discovery, cost_analysis, audit)
409
+ summary = dashboard.generate_executive_summary()
410
+
411
+ recommendations = summary["executive_recommendations"]
412
+
413
+ # Validate recommendations structure
414
+ assert "strategic_priorities" in recommendations
415
+ assert "immediate_actions" in recommendations
416
+ assert "investment_recommendations" in recommendations
417
+
418
+ strategic = recommendations["strategic_priorities"]
419
+ immediate = recommendations["immediate_actions"]
420
+
421
+ # Validate strategic priorities are high-level
422
+ assert len(strategic) >= 3
423
+ for priority in strategic:
424
+ assert "area" in priority
425
+ assert "recommendation" in priority
426
+ assert "business_impact" in priority
427
+
428
+ # Validate immediate actions are actionable
429
+ assert len(immediate) >= 1
430
+ for action in immediate:
431
+ assert "description" in action
432
+ assert "timeline" in action
433
+ assert "expected_outcome" in action
434
+
435
+
436
+ class TestReferenceImage4_AuditComplianceReports:
437
+ """
438
+ Test Case 4: Audit & Compliance Reports Validation
439
+
440
+ Validates the audit and compliance functionality providing
441
+ comprehensive risk assessment and compliance scoring.
442
+
443
+ Expected Output Characteristics:
444
+ - Multi-account compliance audit across all resources
445
+ - Risk scoring with breakdown by category
446
+ - Compliance findings with remediation priorities
447
+ - Regulatory framework alignment (SOC2, etc.)
448
+ """
449
+
450
+ def test_compliance_audit_comprehensive_scope(self):
451
+ """Test compliance audit covers comprehensive enterprise scope."""
452
+ config = FinOpsConfig()
453
+ auditor = EnterpriseResourceAuditor(config)
454
+
455
+ results = auditor.run_compliance_audit()
456
+
457
+ # Validate audit completion and scope
458
+ assert results["status"] == "completed"
459
+ assert results["audit_scope"] == "multi-account-enterprise"
460
+
461
+ audit_data = results["audit_data"]
462
+
463
+ # Validate comprehensive scanning metrics
464
+ assert audit_data["total_resources_scanned"] > 1000 # Enterprise-scale
465
+ assert audit_data["accounts_audited"] >= 5 # Multi-account coverage
466
+ assert audit_data["regions_covered"] >= 2 # Multi-region coverage
467
+
468
+ # Validate audit timestamp for compliance reporting
469
+ assert "timestamp" in results
470
+ assert isinstance(results["timestamp"], str)
471
+
472
+ def test_risk_scoring_breakdown(self):
473
+ """Test risk scoring breakdown matches compliance requirements."""
474
+ config = FinOpsConfig()
475
+ auditor = EnterpriseResourceAuditor(config)
476
+
477
+ results = auditor.run_compliance_audit()
478
+ audit_data = results["audit_data"]
479
+
480
+ # Validate risk score structure
481
+ assert "risk_score" in audit_data
482
+ risk_score = audit_data["risk_score"]
483
+
484
+ assert "overall" in risk_score
485
+ assert "breakdown" in risk_score
486
+
487
+ # Validate overall risk score
488
+ overall_risk = risk_score["overall"]
489
+ assert 0 <= overall_risk <= 100
490
+
491
+ # Validate risk breakdown categories
492
+ breakdown = risk_score["breakdown"]
493
+ required_categories = ["security", "compliance", "cost_optimization", "governance"]
494
+
495
+ for category in required_categories:
496
+ assert category in breakdown
497
+ assert 0 <= breakdown[category] <= 100
498
+
499
+ def test_compliance_findings_categorization(self):
500
+ """Test compliance findings match regulatory requirements."""
501
+ config = FinOpsConfig()
502
+ auditor = EnterpriseResourceAuditor(config)
503
+
504
+ results = auditor.run_compliance_audit()
505
+ audit_data = results["audit_data"]
506
+
507
+ # Validate compliance findings structure
508
+ assert "compliance_findings" in audit_data
509
+ findings = audit_data["compliance_findings"]
510
+
511
+ # Validate required finding categories for enterprise compliance
512
+ required_findings = ["untagged_resources", "unused_resources", "security_groups", "public_resources"]
513
+
514
+ for finding_type in required_findings:
515
+ assert finding_type in findings
516
+
517
+ finding = findings[finding_type]
518
+ assert "count" in finding
519
+ assert "severity" in finding
520
+ assert "impact" in finding
521
+
522
+ # Validate severity levels for prioritization
523
+ assert finding["severity"] in ["low", "medium", "high", "critical"]
524
+
525
+ # Validate impact assessment for business context
526
+ assert finding["impact"] in ["low", "medium", "high"]
527
+
528
+ def test_recommendations_prioritization_system(self):
529
+ """Test recommendations follow enterprise prioritization system."""
530
+ config = FinOpsConfig()
531
+ auditor = EnterpriseResourceAuditor(config)
532
+
533
+ results = auditor.run_compliance_audit()
534
+ audit_data = results["audit_data"]
535
+
536
+ # Validate recommendations structure
537
+ assert "recommendations" in audit_data
538
+ recommendations = audit_data["recommendations"]
539
+
540
+ # Should have recommendations for enterprise environment
541
+ assert len(recommendations) > 0
542
+
543
+ # Validate recommendation structure and prioritization
544
+ priority_counts = {"critical": 0, "high": 0, "medium": 0, "low": 0}
545
+
546
+ for rec in recommendations:
547
+ assert "priority" in rec
548
+ assert "category" in rec
549
+ assert "description" in rec
550
+ assert "remediation_effort" in rec
551
+
552
+ # Count priorities for validation
553
+ if rec["priority"] in priority_counts:
554
+ priority_counts[rec["priority"]] += 1
555
+
556
+ # Validate category alignment
557
+ assert rec["category"] in ["security", "compliance", "cost", "governance", "performance"]
558
+
559
+ # Validate remediation effort estimation
560
+ assert rec["remediation_effort"] in ["low", "medium", "high"]
561
+
562
+
563
+ class TestReferenceImage5_ExportIntegration:
564
+ """
565
+ Test Case 5: Export & Integration Validation
566
+
567
+ Validates the multi-format export functionality providing
568
+ integration capabilities with external systems and reporting.
569
+
570
+ Expected Output Characteristics:
571
+ - Multi-format export (JSON, CSV, HTML, PDF)
572
+ - Data integrity across export formats
573
+ - Integration-ready data structures
574
+ - Executive presentation formats
575
+ """
576
+
577
+ @pytest.fixture
578
+ def export_test_data(self):
579
+ """Comprehensive test data for export validation."""
580
+ discovery = {
581
+ "timestamp": datetime.now().isoformat(),
582
+ "status": "completed",
583
+ "available_profiles": ["profile1", "profile2"],
584
+ "configured_profiles": {
585
+ "billing": "ams-admin-Billing-ReadOnlyAccess-909135376185",
586
+ "management": "ams-admin-ReadOnlyAccess-909135376185",
587
+ },
588
+ }
589
+
590
+ cost_analysis = {
591
+ "status": "completed",
592
+ "timestamp": datetime.now().isoformat(),
593
+ "cost_trends": {
594
+ "total_monthly_spend": 152991.07,
595
+ "total_accounts": 23,
596
+ "account_data": [
597
+ {
598
+ "account_id": "export-test-001",
599
+ "account_type": "production",
600
+ "monthly_spend": 45000.0,
601
+ "optimization_potential": 0.35,
602
+ },
603
+ {
604
+ "account_id": "export-test-002",
605
+ "account_type": "development",
606
+ "monthly_spend": 25000.0,
607
+ "optimization_potential": 0.60,
608
+ },
609
+ ],
610
+ },
611
+ "optimization_opportunities": {"annual_savings_potential": 1835892.84, "savings_percentage": 40.0},
612
+ }
613
+
614
+ audit_results = {
615
+ "status": "completed",
616
+ "timestamp": datetime.now().isoformat(),
617
+ "audit_data": {
618
+ "total_resources_scanned": 3750,
619
+ "risk_score": {"overall": 68},
620
+ "recommendations": [
621
+ {"priority": "critical", "category": "security", "description": "Fix critical issues"},
622
+ {"priority": "high", "category": "cost", "description": "Optimize high-cost resources"},
623
+ ],
624
+ },
625
+ }
626
+
627
+ executive_summary = {
628
+ "report_metadata": {
629
+ "timestamp": datetime.now().isoformat(),
630
+ "report_type": "enterprise_finops_executive_summary",
631
+ },
632
+ "financial_overview": {"current_monthly_spend": 152991.07, "potential_annual_savings": 1835892.84},
633
+ }
634
+
635
+ return discovery, cost_analysis, audit_results, executive_summary
636
+
637
+ def test_multi_format_export_capability(self, export_test_data):
638
+ """Test multi-format export matches reference image capabilities."""
639
+ config = FinOpsConfig()
640
+ # Ensure all formats are enabled for testing
641
+ config.output_formats = ["json", "csv", "html"]
642
+
643
+ exporter = EnterpriseExportEngine(config)
644
+ discovery, cost_analysis, audit_results, executive_summary = export_test_data
645
+
646
+ export_status = exporter.export_all_results(discovery, cost_analysis, audit_results, executive_summary)
647
+
648
+ # Validate export status structure
649
+ assert "successful_exports" in export_status
650
+ assert "failed_exports" in export_status
651
+
652
+ # Validate all formats exported successfully
653
+ successful = export_status["successful_exports"]
654
+ assert len(successful) == len(config.output_formats)
655
+
656
+ # Validate each export has required metadata
657
+ for export_info in successful:
658
+ assert "format" in export_info
659
+ assert "filename" in export_info
660
+ assert "timestamp" in export_info
661
+ assert "size_bytes" in export_info
662
+
663
+ # Validate filename includes timestamp for uniqueness
664
+ assert config.report_timestamp in export_info["filename"]
665
+
666
+ def test_json_export_data_integrity(self, export_test_data):
667
+ """Test JSON export maintains complete data integrity."""
668
+ config = FinOpsConfig()
669
+ exporter = EnterpriseExportEngine(config)
670
+
671
+ discovery, cost_analysis, audit_results, executive_summary = export_test_data
672
+
673
+ # Create comprehensive data structure for JSON export
674
+ complete_data = {
675
+ "discovery": discovery,
676
+ "cost_analysis": cost_analysis,
677
+ "audit_results": audit_results,
678
+ "executive_summary": executive_summary,
679
+ "export_metadata": {
680
+ "export_timestamp": datetime.now().isoformat(),
681
+ "export_version": "0.7.8",
682
+ "data_completeness": "full",
683
+ },
684
+ }
685
+
686
+ filename = exporter._export_json(complete_data)
687
+
688
+ # Validate JSON export succeeded
689
+ assert filename.endswith(".json")
690
+ assert config.report_timestamp in filename
691
+
692
+ # Validate file was created (in-memory testing)
693
+ # In real implementation, would verify file contents match input data
694
+
695
+ def test_csv_export_tabular_structure(self, export_test_data):
696
+ """Test CSV export provides proper tabular structure."""
697
+ config = FinOpsConfig()
698
+ exporter = EnterpriseExportEngine(config)
699
+
700
+ discovery, cost_analysis, audit_results, executive_summary = export_test_data
701
+
702
+ # Structure data for CSV export testing
703
+ csv_data = {
704
+ "cost_analysis": cost_analysis,
705
+ "audit_summary": {
706
+ "total_resources": audit_results["audit_data"]["total_resources_scanned"],
707
+ "risk_score": audit_results["audit_data"]["risk_score"]["overall"],
708
+ },
709
+ }
710
+
711
+ filename = exporter._export_csv(csv_data)
712
+
713
+ # Validate CSV export succeeded
714
+ assert filename.endswith(".csv")
715
+ assert config.report_timestamp in filename
716
+
717
+ def test_html_executive_presentation_format(self, export_test_data):
718
+ """Test HTML export provides executive presentation format."""
719
+ config = FinOpsConfig()
720
+ exporter = EnterpriseExportEngine(config)
721
+
722
+ discovery, cost_analysis, audit_results, executive_summary = export_test_data
723
+
724
+ # Structure data for HTML executive presentation
725
+ presentation_data = {
726
+ "executive_summary": executive_summary,
727
+ "key_metrics": {
728
+ "monthly_spend": cost_analysis["cost_trends"]["total_monthly_spend"],
729
+ "total_accounts": cost_analysis["cost_trends"]["total_accounts"],
730
+ "risk_score": audit_results["audit_data"]["risk_score"]["overall"],
731
+ "potential_savings": cost_analysis["optimization_opportunities"]["annual_savings_potential"],
732
+ },
733
+ "presentation_metadata": {"timestamp": datetime.now().isoformat(), "format": "executive_dashboard"},
734
+ }
735
+
736
+ filename = exporter._export_html(presentation_data)
737
+
738
+ # Validate HTML export succeeded
739
+ assert filename.endswith(".html")
740
+ assert config.report_timestamp in filename
741
+
742
+ def test_export_error_handling_resilience(self):
743
+ """Test export error handling for production resilience."""
744
+ config = FinOpsConfig()
745
+ # Test with invalid format to trigger error handling
746
+ config.output_formats = ["json", "invalid_format", "csv"]
747
+
748
+ exporter = EnterpriseExportEngine(config)
749
+
750
+ # Minimal test data
751
+ test_data = {
752
+ "discovery": {"status": "completed"},
753
+ "cost_analysis": {"status": "completed"},
754
+ "audit_results": {"status": "completed"},
755
+ "executive_summary": {"report_metadata": {}},
756
+ }
757
+
758
+ export_status = exporter.export_all_results(
759
+ test_data["discovery"],
760
+ test_data["cost_analysis"],
761
+ test_data["audit_results"],
762
+ test_data["executive_summary"],
763
+ )
764
+
765
+ # Validate error handling
766
+ assert len(export_status["successful_exports"]) == 2 # json and csv
767
+ assert len(export_status["failed_exports"]) == 1 # invalid_format
768
+
769
+ # Validate failed export contains error information
770
+ failed_export = export_status["failed_exports"][0]
771
+ assert failed_export["format"] == "invalid_format"
772
+ assert "error" in failed_export
773
+
774
+
775
+ class TestIntegratedWorkflowValidation:
776
+ """
777
+ Integration test validating complete workflow across all 5 reference images.
778
+
779
+ This test ensures the complete finops analysis workflow produces
780
+ outputs that match all reference image requirements simultaneously.
781
+ """
782
+
783
+ def test_complete_finops_workflow_integration(self):
784
+ """Test complete workflow produces all reference image outputs."""
785
+ # Run complete analysis workflow
786
+ results = run_complete_finops_analysis()
787
+
788
+ # Validate workflow completion
789
+ assert results["workflow_status"] == "completed"
790
+ assert "timestamp" in results
791
+
792
+ # Validate all major components completed successfully
793
+ component_statuses = {
794
+ "discovery": results["discovery_results"].get("status"),
795
+ "cost_analysis": results["cost_analysis"].get("status"),
796
+ "audit_results": results["audit_results"].get("status"),
797
+ }
798
+
799
+ for component, status in component_statuses.items():
800
+ assert status in ["completed", "error"], f"{component} has invalid status: {status}"
801
+
802
+ # At minimum, cost analysis and audit should complete
803
+ assert results["cost_analysis"]["status"] == "completed"
804
+ assert results["audit_results"]["status"] == "completed"
805
+
806
+ # Validate executive summary generation
807
+ assert "executive_summary" in results
808
+ assert "report_metadata" in results["executive_summary"]
809
+
810
+ # Validate export functionality
811
+ assert "export_status" in results
812
+ export_status = results["export_status"]
813
+
814
+ # Should have at least some successful exports
815
+ if "successful_exports" in export_status:
816
+ assert len(export_status["successful_exports"]) > 0
817
+
818
+ def test_performance_targets_validation(self):
819
+ """Test performance targets are met across all reference functionalities."""
820
+ import time
821
+
822
+ # Measure complete workflow execution time
823
+ start_time = time.perf_counter()
824
+ results = run_complete_finops_analysis()
825
+ execution_time = time.perf_counter() - start_time
826
+
827
+ # Validate performance target: <2s for complete analysis
828
+ assert execution_time < 2.0, f"Execution time {execution_time:.2f}s exceeds 2s target"
829
+
830
+ # Validate workflow completed despite performance constraints
831
+ assert results["workflow_status"] == "completed"
832
+
833
+ def test_enterprise_scale_data_validation(self):
834
+ """Test enterprise-scale data characteristics across all components."""
835
+ results = run_complete_finops_analysis()
836
+
837
+ # Validate enterprise scale in cost analysis
838
+ if results["cost_analysis"]["status"] == "completed":
839
+ cost_trends = results["cost_analysis"]["cost_trends"]
840
+
841
+ # Should handle enterprise account counts
842
+ assert cost_trends["total_accounts"] >= 5
843
+
844
+ # Should handle enterprise spend levels
845
+ assert cost_trends["total_monthly_spend"] > 10000
846
+
847
+ # Validate enterprise scale in audit results
848
+ if results["audit_results"]["status"] == "completed":
849
+ audit_data = results["audit_results"]["audit_data"]
850
+
851
+ # Should scan enterprise-scale resources
852
+ assert audit_data["total_resources_scanned"] >= 100
853
+
854
+ # Should cover multiple accounts
855
+ assert audit_data["accounts_audited"] >= 1
856
+
857
+
858
+ if __name__ == "__main__":
859
+ """
860
+ Run the reference images validation test suite.
861
+
862
+ Usage:
863
+ python test_reference_images_validation.py
864
+ pytest test_reference_images_validation.py -v
865
+ pytest test_reference_images_validation.py::TestReferenceImage1_CostAnalysisDashboard -v
866
+ """
867
+ pytest.main([__file__, "-v", "--tb=short"])