runbooks 0.7.6__py3-none-any.whl → 0.7.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (111) hide show
  1. runbooks/__init__.py +1 -1
  2. runbooks/base.py +5 -1
  3. runbooks/cfat/__init__.py +8 -4
  4. runbooks/cfat/assessment/collectors.py +171 -14
  5. runbooks/cfat/assessment/compliance.py +871 -0
  6. runbooks/cfat/assessment/runner.py +122 -11
  7. runbooks/cfat/models.py +6 -2
  8. runbooks/common/logger.py +14 -0
  9. runbooks/common/rich_utils.py +451 -0
  10. runbooks/enterprise/__init__.py +68 -0
  11. runbooks/enterprise/error_handling.py +411 -0
  12. runbooks/enterprise/logging.py +439 -0
  13. runbooks/enterprise/multi_tenant.py +583 -0
  14. runbooks/finops/README.md +468 -241
  15. runbooks/finops/__init__.py +39 -3
  16. runbooks/finops/cli.py +83 -18
  17. runbooks/finops/cross_validation.py +375 -0
  18. runbooks/finops/dashboard_runner.py +812 -164
  19. runbooks/finops/enhanced_dashboard_runner.py +525 -0
  20. runbooks/finops/finops_dashboard.py +1892 -0
  21. runbooks/finops/helpers.py +485 -51
  22. runbooks/finops/optimizer.py +823 -0
  23. runbooks/finops/tests/__init__.py +19 -0
  24. runbooks/finops/tests/results_test_finops_dashboard.xml +1 -0
  25. runbooks/finops/tests/run_comprehensive_tests.py +421 -0
  26. runbooks/finops/tests/run_tests.py +305 -0
  27. runbooks/finops/tests/test_finops_dashboard.py +705 -0
  28. runbooks/finops/tests/test_integration.py +477 -0
  29. runbooks/finops/tests/test_performance.py +380 -0
  30. runbooks/finops/tests/test_performance_benchmarks.py +500 -0
  31. runbooks/finops/tests/test_reference_images_validation.py +867 -0
  32. runbooks/finops/tests/test_single_account_features.py +715 -0
  33. runbooks/finops/tests/validate_test_suite.py +220 -0
  34. runbooks/finops/types.py +1 -1
  35. runbooks/hitl/enhanced_workflow_engine.py +725 -0
  36. runbooks/inventory/artifacts/scale-optimize-status.txt +12 -0
  37. runbooks/inventory/collectors/aws_comprehensive.py +442 -0
  38. runbooks/inventory/collectors/enterprise_scale.py +281 -0
  39. runbooks/inventory/core/collector.py +172 -13
  40. runbooks/inventory/discovery.md +1 -1
  41. runbooks/inventory/list_ec2_instances.py +18 -20
  42. runbooks/inventory/list_ssm_parameters.py +31 -3
  43. runbooks/inventory/organizations_discovery.py +1269 -0
  44. runbooks/inventory/rich_inventory_display.py +393 -0
  45. runbooks/inventory/run_on_multi_accounts.py +35 -19
  46. runbooks/inventory/runbooks.security.report_generator.log +0 -0
  47. runbooks/inventory/runbooks.security.run_script.log +0 -0
  48. runbooks/inventory/vpc_flow_analyzer.py +1030 -0
  49. runbooks/main.py +2215 -119
  50. runbooks/metrics/dora_metrics_engine.py +599 -0
  51. runbooks/operate/__init__.py +2 -2
  52. runbooks/operate/base.py +122 -10
  53. runbooks/operate/deployment_framework.py +1032 -0
  54. runbooks/operate/deployment_validator.py +853 -0
  55. runbooks/operate/dynamodb_operations.py +10 -6
  56. runbooks/operate/ec2_operations.py +319 -11
  57. runbooks/operate/executive_dashboard.py +779 -0
  58. runbooks/operate/mcp_integration.py +750 -0
  59. runbooks/operate/nat_gateway_operations.py +1120 -0
  60. runbooks/operate/networking_cost_heatmap.py +685 -0
  61. runbooks/operate/privatelink_operations.py +940 -0
  62. runbooks/operate/s3_operations.py +10 -6
  63. runbooks/operate/vpc_endpoints.py +644 -0
  64. runbooks/operate/vpc_operations.py +1038 -0
  65. runbooks/remediation/__init__.py +2 -2
  66. runbooks/remediation/acm_remediation.py +1 -1
  67. runbooks/remediation/base.py +1 -1
  68. runbooks/remediation/cloudtrail_remediation.py +1 -1
  69. runbooks/remediation/cognito_remediation.py +1 -1
  70. runbooks/remediation/dynamodb_remediation.py +1 -1
  71. runbooks/remediation/ec2_remediation.py +1 -1
  72. runbooks/remediation/ec2_unattached_ebs_volumes.py +1 -1
  73. runbooks/remediation/kms_enable_key_rotation.py +1 -1
  74. runbooks/remediation/kms_remediation.py +1 -1
  75. runbooks/remediation/lambda_remediation.py +1 -1
  76. runbooks/remediation/multi_account.py +1 -1
  77. runbooks/remediation/rds_remediation.py +1 -1
  78. runbooks/remediation/s3_block_public_access.py +1 -1
  79. runbooks/remediation/s3_enable_access_logging.py +1 -1
  80. runbooks/remediation/s3_encryption.py +1 -1
  81. runbooks/remediation/s3_remediation.py +1 -1
  82. runbooks/remediation/vpc_remediation.py +475 -0
  83. runbooks/security/__init__.py +3 -1
  84. runbooks/security/compliance_automation.py +632 -0
  85. runbooks/security/report_generator.py +10 -0
  86. runbooks/security/run_script.py +31 -5
  87. runbooks/security/security_baseline_tester.py +169 -30
  88. runbooks/security/security_export.py +477 -0
  89. runbooks/validation/__init__.py +10 -0
  90. runbooks/validation/benchmark.py +484 -0
  91. runbooks/validation/cli.py +356 -0
  92. runbooks/validation/mcp_validator.py +768 -0
  93. runbooks/vpc/__init__.py +38 -0
  94. runbooks/vpc/config.py +212 -0
  95. runbooks/vpc/cost_engine.py +347 -0
  96. runbooks/vpc/heatmap_engine.py +605 -0
  97. runbooks/vpc/manager_interface.py +634 -0
  98. runbooks/vpc/networking_wrapper.py +1260 -0
  99. runbooks/vpc/rich_formatters.py +679 -0
  100. runbooks/vpc/tests/__init__.py +5 -0
  101. runbooks/vpc/tests/conftest.py +356 -0
  102. runbooks/vpc/tests/test_cli_integration.py +530 -0
  103. runbooks/vpc/tests/test_config.py +458 -0
  104. runbooks/vpc/tests/test_cost_engine.py +479 -0
  105. runbooks/vpc/tests/test_networking_wrapper.py +512 -0
  106. {runbooks-0.7.6.dist-info → runbooks-0.7.9.dist-info}/METADATA +40 -12
  107. {runbooks-0.7.6.dist-info → runbooks-0.7.9.dist-info}/RECORD +111 -50
  108. {runbooks-0.7.6.dist-info → runbooks-0.7.9.dist-info}/WHEEL +0 -0
  109. {runbooks-0.7.6.dist-info → runbooks-0.7.9.dist-info}/entry_points.txt +0 -0
  110. {runbooks-0.7.6.dist-info → runbooks-0.7.9.dist-info}/licenses/LICENSE +0 -0
  111. {runbooks-0.7.6.dist-info → runbooks-0.7.9.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,705 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Comprehensive Unit Tests for FinOps Dashboard Enterprise Components.
4
+
5
+ This module provides extensive unit testing for all major components
6
+ of the enterprise FinOps dashboard system including:
7
+ - Configuration management
8
+ - Account discovery
9
+ - Cost trend analysis
10
+ - Resource utilization heatmaps
11
+ - Executive reporting
12
+ - Export functionality
13
+
14
+ Author: CloudOps Runbooks Team
15
+ Version: 0.7.8
16
+ """
17
+
18
+ import json
19
+ import os
20
+ import tempfile
21
+ from datetime import datetime, timedelta
22
+ from unittest.mock import MagicMock, Mock, patch
23
+
24
+ import pytest
25
+
26
+ # Import the components we're testing
27
+ from runbooks.finops.finops_dashboard import (
28
+ EnterpriseDiscovery,
29
+ EnterpriseExecutiveDashboard,
30
+ EnterpriseExportEngine,
31
+ EnterpriseResourceAuditor,
32
+ FinOpsConfig,
33
+ MultiAccountCostTrendAnalyzer,
34
+ ResourceUtilizationHeatmapAnalyzer,
35
+ create_finops_dashboard,
36
+ run_complete_finops_analysis,
37
+ )
38
+
39
+
40
+ class TestFinOpsConfig:
41
+ """Test suite for FinOpsConfig class."""
42
+
43
+ def test_default_configuration(self):
44
+ """Test default configuration values."""
45
+ config = FinOpsConfig()
46
+
47
+ # Test default profile values
48
+ assert config.billing_profile == "ams-admin-Billing-ReadOnlyAccess-909135376185"
49
+ assert config.management_profile == "ams-admin-ReadOnlyAccess-909135376185"
50
+ assert config.operational_profile == "ams-centralised-ops-ReadOnlyAccess-335083429030"
51
+
52
+ # Test analysis parameters
53
+ assert config.time_range_days == 30
54
+ assert config.target_savings_percent == 40
55
+ assert config.min_account_threshold == 5
56
+ assert config.risk_threshold == 25
57
+
58
+ # Test safety controls
59
+ assert config.dry_run is True
60
+ assert config.require_approval is True
61
+ assert config.enable_cross_account is True
62
+ assert config.audit_mode is True
63
+
64
+ # Test output configuration
65
+ assert "json" in config.output_formats
66
+ assert "csv" in config.output_formats
67
+ assert "html" in config.output_formats
68
+ assert config.enable_ou_analysis is True
69
+ assert config.include_reserved_instance_recommendations is True
70
+
71
+ def test_environment_variable_override(self):
72
+ """Test configuration override via environment variables."""
73
+ with patch.dict(
74
+ os.environ,
75
+ {
76
+ "BILLING_PROFILE": "test-billing-profile",
77
+ "MANAGEMENT_PROFILE": "test-management-profile",
78
+ "CENTRALISED_OPS_PROFILE": "test-ops-profile",
79
+ },
80
+ ):
81
+ config = FinOpsConfig()
82
+
83
+ assert config.billing_profile == "test-billing-profile"
84
+ assert config.management_profile == "test-management-profile"
85
+ assert config.operational_profile == "test-ops-profile"
86
+
87
+ def test_report_timestamp_format(self):
88
+ """Test report timestamp format."""
89
+ config = FinOpsConfig()
90
+
91
+ # Should be in format YYYYMMDD_HHMM
92
+ assert len(config.report_timestamp) == 13
93
+ assert "_" in config.report_timestamp
94
+
95
+ # Should parse as valid datetime components
96
+ date_part, time_part = config.report_timestamp.split("_")
97
+ assert len(date_part) == 8 # YYYYMMDD
98
+ assert len(time_part) == 4 # HHMM
99
+
100
+
101
+ class TestEnterpriseDiscovery:
102
+ """Test suite for EnterpriseDiscovery class."""
103
+
104
+ def test_discovery_initialization(self):
105
+ """Test proper initialization of discovery engine."""
106
+ config = FinOpsConfig()
107
+ discovery = EnterpriseDiscovery(config)
108
+
109
+ assert discovery.config == config
110
+ assert discovery.results == {}
111
+
112
+ @patch("runbooks.finops.finops_dashboard.get_aws_profiles")
113
+ @patch("runbooks.finops.finops_dashboard.get_account_id")
114
+ def test_successful_account_discovery(self, mock_get_account_id, mock_get_profiles):
115
+ """Test successful account discovery workflow."""
116
+ # Mock AWS functions
117
+ mock_get_profiles.return_value = ["profile1", "profile2", "default"]
118
+ mock_get_account_id.return_value = "123456789012"
119
+
120
+ config = FinOpsConfig()
121
+ discovery = EnterpriseDiscovery(config)
122
+
123
+ results = discovery.discover_accounts()
124
+
125
+ # Verify results structure
126
+ assert "timestamp" in results
127
+ assert "available_profiles" in results
128
+ assert "configured_profiles" in results
129
+ assert "discovery_mode" in results
130
+ assert "account_info" in results
131
+
132
+ # Verify configured profiles
133
+ configured = results["configured_profiles"]
134
+ assert configured["billing"] == config.billing_profile
135
+ assert configured["management"] == config.management_profile
136
+ assert configured["operational"] == config.operational_profile
137
+
138
+ # Verify discovery mode respects dry_run setting
139
+ assert results["discovery_mode"] == "DRY-RUN"
140
+
141
+ # Verify account info structure
142
+ account_info = results["account_info"]
143
+ assert "billing" in account_info
144
+ assert "management" in account_info
145
+ assert "operational" in account_info
146
+
147
+ for profile_type, info in account_info.items():
148
+ assert "profile" in info
149
+ assert "account_id" in info or "error" in info
150
+ assert "status" in info
151
+
152
+ def test_discovery_error_handling(self):
153
+ """Test discovery error handling."""
154
+ config = FinOpsConfig()
155
+ discovery = EnterpriseDiscovery(config)
156
+
157
+ # Mock AWS_AVAILABLE as False to trigger error path
158
+ with patch("runbooks.finops.finops_dashboard.AWS_AVAILABLE", False):
159
+ results = discovery.discover_accounts()
160
+
161
+ # Should still return valid structure with simulated data
162
+ assert "timestamp" in results
163
+ assert "account_info" in results
164
+
165
+ # Account info should have simulated status
166
+ for profile_info in results["account_info"].values():
167
+ assert profile_info["status"] == "🔄 Simulated"
168
+ assert profile_info["account_id"] == "simulated-account"
169
+
170
+
171
+ class TestMultiAccountCostTrendAnalyzer:
172
+ """Test suite for MultiAccountCostTrendAnalyzer class."""
173
+
174
+ def test_analyzer_initialization(self):
175
+ """Test proper initialization of cost trend analyzer."""
176
+ config = FinOpsConfig()
177
+ analyzer = MultiAccountCostTrendAnalyzer(config)
178
+
179
+ assert analyzer.config == config
180
+ assert analyzer.trend_results == {}
181
+
182
+ def test_cost_trend_analysis_success(self):
183
+ """Test successful cost trend analysis."""
184
+ config = FinOpsConfig()
185
+ analyzer = MultiAccountCostTrendAnalyzer(config)
186
+
187
+ # Set fixed random seed for reproducible test results
188
+ with (
189
+ patch("runbooks.finops.finops_dashboard.random.randint") as mock_randint,
190
+ patch("runbooks.finops.finops_dashboard.random.uniform") as mock_uniform,
191
+ patch("runbooks.finops.finops_dashboard.random.choice") as mock_choice,
192
+ ):
193
+ # Mock random functions for predictable results
194
+ mock_randint.return_value = 10 # 10 accounts
195
+ mock_uniform.return_value = 20000.0 # $20k base spend
196
+ mock_choice.return_value = ("production", 15000, 65000)
197
+
198
+ results = analyzer.analyze_cost_trends()
199
+
200
+ # Verify results structure
201
+ assert results["status"] == "completed"
202
+ assert "timestamp" in results
203
+ assert "analysis_type" in results
204
+ assert results["analysis_type"] == "multi_account_cost_trends"
205
+ assert "target_savings" in results
206
+ assert results["target_savings"] == config.target_savings_percent
207
+ assert "cost_trends" in results
208
+ assert "optimization_opportunities" in results
209
+
210
+ # Verify cost trends structure
211
+ cost_trends = results["cost_trends"]
212
+ assert "total_accounts" in cost_trends
213
+ assert "total_monthly_spend" in cost_trends
214
+ assert "account_data" in cost_trends
215
+ assert "cost_trend_summary" in cost_trends
216
+
217
+ # Verify optimization opportunities structure
218
+ optimization = results["optimization_opportunities"]
219
+ assert "total_potential_savings" in optimization
220
+ assert "savings_percentage" in optimization
221
+ assert "target_achievement" in optimization
222
+ assert "optimization_by_account_type" in optimization
223
+ assert "annual_savings_potential" in optimization
224
+
225
+ def test_dynamic_account_discovery(self):
226
+ """Test dynamic account count generation."""
227
+ config = FinOpsConfig()
228
+ analyzer = MultiAccountCostTrendAnalyzer(config)
229
+
230
+ # Test multiple runs to ensure account count varies
231
+ account_counts = []
232
+ for _ in range(5):
233
+ results = analyzer.analyze_cost_trends()
234
+ if results["status"] == "completed":
235
+ account_counts.append(results["cost_trends"]["total_accounts"])
236
+
237
+ # Should generate different account counts within expected range
238
+ assert all(config.min_account_threshold <= count <= 85 for count in account_counts)
239
+ assert len(set(account_counts)) >= 1 # At least some variation (may be same due to randomness)
240
+
241
+ def test_optimization_calculation_logic(self):
242
+ """Test optimization calculation logic."""
243
+ config = FinOpsConfig()
244
+ analyzer = MultiAccountCostTrendAnalyzer(config)
245
+
246
+ # Create test data with known values
247
+ test_cost_trends = {
248
+ "total_monthly_spend": 100000.0,
249
+ "account_data": [
250
+ {
251
+ "account_id": "test-001",
252
+ "account_type": "production",
253
+ "monthly_spend": 50000.0,
254
+ "optimization_potential": 0.4, # 40% potential
255
+ },
256
+ {
257
+ "account_id": "test-002",
258
+ "account_type": "development",
259
+ "monthly_spend": 50000.0,
260
+ "optimization_potential": 0.6, # 60% potential
261
+ },
262
+ ],
263
+ }
264
+
265
+ optimization = analyzer._calculate_optimization_opportunities(test_cost_trends)
266
+
267
+ # Expected: (50000 * 0.4) + (50000 * 0.6) = 50000 total savings
268
+ # 50000 / 100000 = 50% savings percentage
269
+ assert optimization["total_potential_savings"] == 50000.0
270
+ assert optimization["savings_percentage"] == 50.0
271
+ assert optimization["annual_savings_potential"] == 600000.0
272
+
273
+ # Verify target achievement
274
+ target_achievement = optimization["target_achievement"]
275
+ assert target_achievement["target"] == config.target_savings_percent
276
+ assert target_achievement["achieved"] == 50.0
277
+ assert target_achievement["status"] == "achieved" # 50% > 40% target
278
+ assert target_achievement["gap"] == 0 # No gap since target exceeded
279
+
280
+
281
+ class TestResourceUtilizationHeatmapAnalyzer:
282
+ """Test suite for ResourceUtilizationHeatmapAnalyzer class."""
283
+
284
+ def test_heatmap_analyzer_initialization(self):
285
+ """Test proper initialization of heatmap analyzer."""
286
+ config = FinOpsConfig()
287
+ trend_data = {"cost_trends": {"account_data": []}}
288
+ analyzer = ResourceUtilizationHeatmapAnalyzer(config, trend_data)
289
+
290
+ assert analyzer.config == config
291
+ assert analyzer.trend_data == trend_data
292
+ assert analyzer.heatmap_results == {}
293
+
294
+ def test_resource_heatmap_analysis(self):
295
+ """Test resource utilization heatmap analysis."""
296
+ config = FinOpsConfig()
297
+ trend_data = {
298
+ "cost_trends": {
299
+ "account_data": [
300
+ {"account_id": "test-001", "account_type": "production", "monthly_spend": 25000.0},
301
+ {"account_id": "test-002", "account_type": "development", "monthly_spend": 5000.0},
302
+ ]
303
+ }
304
+ }
305
+
306
+ analyzer = ResourceUtilizationHeatmapAnalyzer(config, trend_data)
307
+ results = analyzer.analyze_resource_utilization()
308
+
309
+ # Verify results structure
310
+ assert results["status"] == "completed"
311
+ assert "timestamp" in results
312
+ assert "analysis_type" in results
313
+ assert results["analysis_type"] == "resource_utilization_heatmap"
314
+ assert "heatmap_data" in results
315
+ assert "efficiency_scoring" in results
316
+ assert "rightsizing_recommendations" in results
317
+
318
+ # Verify heatmap data structure
319
+ heatmap_data = results["heatmap_data"]
320
+ assert heatmap_data["total_accounts"] == 2
321
+ assert heatmap_data["total_resources"] > 0
322
+ assert "utilization_matrix" in heatmap_data
323
+ assert "resource_categories" in heatmap_data
324
+
325
+ # Verify resource categories
326
+ categories = heatmap_data["resource_categories"]
327
+ assert "compute" in categories
328
+ assert "storage" in categories
329
+ assert "database" in categories
330
+ assert "network" in categories
331
+
332
+ def test_efficiency_scoring_calculation(self):
333
+ """Test efficiency scoring calculation logic."""
334
+ config = FinOpsConfig()
335
+ trend_data = {"cost_trends": {"account_data": []}}
336
+ analyzer = ResourceUtilizationHeatmapAnalyzer(config, trend_data)
337
+
338
+ # Create test heatmap data
339
+ test_heatmap_data = {
340
+ "utilization_matrix": [
341
+ {
342
+ "account_id": "test-001",
343
+ "resource_utilization": {
344
+ "compute": {
345
+ "ec2_instances": {"efficiency_score": 60.0},
346
+ "lambda_functions": {"efficiency_score": 80.0},
347
+ },
348
+ "storage": {"s3_buckets": {"efficiency_score": 39.0}},
349
+ },
350
+ }
351
+ ]
352
+ }
353
+
354
+ efficiency = analyzer._calculate_efficiency_scoring(test_heatmap_data)
355
+
356
+ # Expected: (60 + 80 + 39) / 3 = 59.7% average
357
+ assert efficiency["average_efficiency_score"] == 59.7
358
+ assert efficiency["efficiency_distribution"]["total_resources_scored"] == 3
359
+
360
+ # Test distribution counts
361
+ distribution = efficiency["efficiency_distribution"]
362
+ assert distribution["low_efficiency"] == 1 # 39% < 40
363
+ assert distribution["medium_efficiency"] == 1 # 60% in 40-70 range
364
+ assert distribution["high_efficiency"] == 1 # 80% >= 70
365
+
366
+
367
+ class TestEnterpriseResourceAuditor:
368
+ """Test suite for EnterpriseResourceAuditor class."""
369
+
370
+ def test_auditor_initialization(self):
371
+ """Test proper initialization of resource auditor."""
372
+ config = FinOpsConfig()
373
+ auditor = EnterpriseResourceAuditor(config)
374
+
375
+ assert auditor.config == config
376
+ assert auditor.audit_results == {}
377
+
378
+ def test_compliance_audit_execution(self):
379
+ """Test compliance audit execution."""
380
+ config = FinOpsConfig()
381
+ auditor = EnterpriseResourceAuditor(config)
382
+
383
+ results = auditor.run_compliance_audit()
384
+
385
+ # Verify results structure
386
+ assert results["status"] == "completed"
387
+ assert "timestamp" in results
388
+ assert "audit_scope" in results
389
+ assert results["audit_scope"] == "multi-account-enterprise"
390
+ assert "profiles_audited" in results
391
+ assert "audit_data" in results
392
+
393
+ # Verify audit data structure
394
+ audit_data = results["audit_data"]
395
+ assert "total_resources_scanned" in audit_data
396
+ assert "accounts_audited" in audit_data
397
+ assert "regions_covered" in audit_data
398
+ assert "compliance_findings" in audit_data
399
+ assert "risk_score" in audit_data
400
+ assert "recommendations" in audit_data
401
+
402
+ # Verify compliance findings structure
403
+ findings = audit_data["compliance_findings"]
404
+ assert "untagged_resources" in findings
405
+ assert "unused_resources" in findings
406
+ assert "security_groups" in findings
407
+ assert "public_resources" in findings
408
+
409
+ # Verify risk score structure
410
+ risk_score = audit_data["risk_score"]
411
+ assert "overall" in risk_score
412
+ assert "breakdown" in risk_score
413
+ assert 0 <= risk_score["overall"] <= 100
414
+
415
+
416
+ class TestEnterpriseExecutiveDashboard:
417
+ """Test suite for EnterpriseExecutiveDashboard class."""
418
+
419
+ def create_test_data(self):
420
+ """Create test data for executive dashboard."""
421
+ discovery_results = {"timestamp": datetime.now().isoformat(), "status": "completed"}
422
+
423
+ cost_analysis = {
424
+ "status": "completed",
425
+ "cost_trends": {"total_monthly_spend": 100000.0},
426
+ "optimization_opportunities": {"annual_savings_potential": 480000.0, "savings_percentage": 40.0},
427
+ }
428
+
429
+ audit_results = {
430
+ "status": "completed",
431
+ "audit_data": {
432
+ "total_resources_scanned": 2500,
433
+ "risk_score": {"overall": 75},
434
+ "recommendations": [
435
+ {"priority": "critical", "category": "security"},
436
+ {"priority": "high", "category": "cost"},
437
+ {"priority": "medium", "category": "governance"},
438
+ ],
439
+ },
440
+ }
441
+
442
+ return discovery_results, cost_analysis, audit_results
443
+
444
+ def test_executive_dashboard_initialization(self):
445
+ """Test proper initialization of executive dashboard."""
446
+ config = FinOpsConfig()
447
+ discovery, cost_analysis, audit = self.create_test_data()
448
+ dashboard = EnterpriseExecutiveDashboard(config, discovery, cost_analysis, audit)
449
+
450
+ assert dashboard.config == config
451
+ assert dashboard.discovery == discovery
452
+ assert dashboard.cost_analysis == cost_analysis
453
+ assert dashboard.audit_results == audit
454
+
455
+ def test_executive_summary_generation(self):
456
+ """Test executive summary generation."""
457
+ config = FinOpsConfig()
458
+ discovery, cost_analysis, audit = self.create_test_data()
459
+ dashboard = EnterpriseExecutiveDashboard(config, discovery, cost_analysis, audit)
460
+
461
+ summary = dashboard.generate_executive_summary()
462
+
463
+ # Verify summary structure
464
+ assert "report_metadata" in summary
465
+ assert "financial_overview" in summary
466
+ assert "operational_overview" in summary
467
+ assert "executive_recommendations" in summary
468
+
469
+ # Verify metadata
470
+ metadata = summary["report_metadata"]
471
+ assert metadata["report_type"] == "enterprise_finops_executive_summary"
472
+ assert metadata["analysis_period"] == f"{config.time_range_days} days"
473
+ assert metadata["target_savings"] == f"{config.target_savings_percent}%"
474
+
475
+ # Verify financial overview
476
+ financial = summary["financial_overview"]
477
+ assert financial["current_monthly_spend"] == 100000.0
478
+ assert financial["potential_annual_savings"] == 480000.0
479
+ assert financial["savings_percentage"] == 40.0
480
+ assert financial["target_achieved"] is True # 40% == 40% target
481
+
482
+ # Verify operational overview
483
+ operational = summary["operational_overview"]
484
+ assert operational["resources_scanned"] == 2500
485
+ assert operational["overall_risk_score"] == 75
486
+ assert operational["critical_findings"] == 1
487
+ assert operational["high_findings"] == 1
488
+
489
+
490
+ class TestEnterpriseExportEngine:
491
+ """Test suite for EnterpriseExportEngine class."""
492
+
493
+ def test_export_engine_initialization(self):
494
+ """Test proper initialization of export engine."""
495
+ config = FinOpsConfig()
496
+ exporter = EnterpriseExportEngine(config)
497
+
498
+ assert exporter.config == config
499
+ assert exporter.export_results == {}
500
+
501
+ def test_export_all_results(self):
502
+ """Test export of all results in multiple formats."""
503
+ config = FinOpsConfig()
504
+ exporter = EnterpriseExportEngine(config)
505
+
506
+ # Create test data
507
+ test_data = {
508
+ "discovery": {"status": "completed"},
509
+ "cost_analysis": {
510
+ "status": "completed",
511
+ "cost_trends": {"total_monthly_spend": 100000.0, "total_accounts": 15},
512
+ "optimization_opportunities": {"annual_savings_potential": 480000.0, "savings_percentage": 25.0},
513
+ },
514
+ "audit_results": {
515
+ "status": "completed",
516
+ "audit_data": {
517
+ "total_resources_scanned": 2500,
518
+ "risk_score": {"overall": 75},
519
+ "recommendations": [
520
+ {"priority": "critical", "description": "Fix security issues"},
521
+ {"priority": "high", "description": "Optimize costs"},
522
+ ],
523
+ },
524
+ },
525
+ "executive_summary": {"report_metadata": {"timestamp": datetime.now().isoformat()}},
526
+ }
527
+
528
+ export_status = exporter.export_all_results(
529
+ test_data["discovery"],
530
+ test_data["cost_analysis"],
531
+ test_data["audit_results"],
532
+ test_data["executive_summary"],
533
+ )
534
+
535
+ # Verify export status structure
536
+ assert "successful_exports" in export_status
537
+ assert "failed_exports" in export_status
538
+
539
+ # Should have successful exports for all configured formats
540
+ successful = export_status["successful_exports"]
541
+ assert len(successful) == len(config.output_formats)
542
+
543
+ # Verify each format was exported
544
+ exported_formats = [exp["format"] for exp in successful]
545
+ for format_type in config.output_formats:
546
+ assert format_type in exported_formats
547
+
548
+ def test_json_export(self):
549
+ """Test JSON export functionality."""
550
+ config = FinOpsConfig()
551
+ exporter = EnterpriseExportEngine(config)
552
+
553
+ test_data = {
554
+ "metadata": {"timestamp": datetime.now().isoformat()},
555
+ "test_value": 42,
556
+ "nested_data": {"key": "value"},
557
+ }
558
+
559
+ # Should not raise exception for valid data
560
+ filename = exporter._export_json(test_data)
561
+ assert filename.endswith(".json")
562
+ assert config.report_timestamp in filename
563
+
564
+ def test_csv_export(self):
565
+ """Test CSV export functionality."""
566
+ config = FinOpsConfig()
567
+ exporter = EnterpriseExportEngine(config)
568
+
569
+ test_data = {
570
+ "cost_analysis": {
571
+ "status": "completed",
572
+ "cost_trends": {"total_monthly_spend": 100000.0},
573
+ "optimization_opportunities": {"annual_savings_potential": 480000.0, "savings_percentage": 40.0},
574
+ }
575
+ }
576
+
577
+ filename = exporter._export_csv(test_data)
578
+ assert filename.endswith(".csv")
579
+ assert config.report_timestamp in filename
580
+
581
+ def test_html_export(self):
582
+ """Test HTML export functionality."""
583
+ config = FinOpsConfig()
584
+ exporter = EnterpriseExportEngine(config)
585
+
586
+ test_data = {"metadata": {"export_timestamp": datetime.now().isoformat()}}
587
+
588
+ filename = exporter._export_html(test_data)
589
+ assert filename.endswith(".html")
590
+ assert config.report_timestamp in filename
591
+
592
+
593
+ class TestFactoryFunctions:
594
+ """Test suite for factory and utility functions."""
595
+
596
+ def test_create_finops_dashboard(self):
597
+ """Test factory function for creating complete dashboard system."""
598
+ config, discovery, cost_analyzer, auditor, exporter = create_finops_dashboard()
599
+
600
+ # Verify all components are created
601
+ assert isinstance(config, FinOpsConfig)
602
+ assert isinstance(discovery, EnterpriseDiscovery)
603
+ assert isinstance(cost_analyzer, MultiAccountCostTrendAnalyzer)
604
+ assert isinstance(auditor, EnterpriseResourceAuditor)
605
+ assert isinstance(exporter, EnterpriseExportEngine)
606
+
607
+ # Verify components reference the same config
608
+ assert discovery.config == config
609
+ assert cost_analyzer.config == config
610
+ assert auditor.config == config
611
+ assert exporter.config == config
612
+
613
+ def test_create_finops_dashboard_with_custom_config(self):
614
+ """Test factory function with custom configuration."""
615
+ custom_config = FinOpsConfig()
616
+ custom_config.target_savings_percent = 50
617
+
618
+ config, discovery, cost_analyzer, auditor, exporter = create_finops_dashboard(custom_config)
619
+
620
+ assert config == custom_config
621
+ assert config.target_savings_percent == 50
622
+
623
+ def test_run_complete_finops_analysis(self):
624
+ """Test complete analysis workflow function."""
625
+ results = run_complete_finops_analysis()
626
+
627
+ # Verify results structure
628
+ assert "config" in results
629
+ assert "discovery_results" in results
630
+ assert "cost_analysis" in results
631
+ assert "audit_results" in results
632
+ assert "executive_summary" in results
633
+ assert "export_status" in results
634
+ assert "workflow_status" in results
635
+ assert "timestamp" in results
636
+
637
+ # Verify workflow completed successfully
638
+ assert results["workflow_status"] == "completed"
639
+
640
+ # Verify individual component results
641
+ assert results["discovery_results"]["status"] or "error" in results["discovery_results"]
642
+ assert results["cost_analysis"]["status"] == "completed"
643
+ assert results["audit_results"]["status"] == "completed"
644
+
645
+
646
+ class TestErrorHandlingAndEdgeCases:
647
+ """Test suite for error handling and edge cases."""
648
+
649
+ def test_cost_analyzer_with_invalid_trend_data(self):
650
+ """Test cost analyzer with invalid trend data."""
651
+ config = FinOpsConfig()
652
+ analyzer = MultiAccountCostTrendAnalyzer(config)
653
+
654
+ # Force an error in trend generation
655
+ with patch.object(analyzer, "_generate_dynamic_account_cost_trends", side_effect=Exception("Test error")):
656
+ results = analyzer.analyze_cost_trends()
657
+
658
+ assert results["status"] == "error"
659
+ assert "error" in results
660
+ assert results["error"] == "Test error"
661
+
662
+ def test_heatmap_analyzer_with_empty_account_data(self):
663
+ """Test heatmap analyzer with empty account data."""
664
+ config = FinOpsConfig()
665
+ empty_trend_data = {"cost_trends": {"account_data": []}}
666
+ analyzer = ResourceUtilizationHeatmapAnalyzer(config, empty_trend_data)
667
+
668
+ results = analyzer.analyze_resource_utilization()
669
+
670
+ # Should handle empty data gracefully
671
+ assert results["status"] == "completed"
672
+ assert results["heatmap_data"]["total_accounts"] == 0
673
+ assert results["heatmap_data"]["total_resources"] == 0
674
+
675
+ def test_export_with_invalid_format(self):
676
+ """Test export engine with invalid format."""
677
+ config = FinOpsConfig()
678
+ config.output_formats = ["invalid_format"] # Invalid format
679
+ exporter = EnterpriseExportEngine(config)
680
+
681
+ test_data = {"discovery": {}, "cost_analysis": {}, "audit_results": {}, "executive_summary": {}}
682
+
683
+ export_status = exporter.export_all_results(
684
+ test_data["discovery"],
685
+ test_data["cost_analysis"],
686
+ test_data["audit_results"],
687
+ test_data["executive_summary"],
688
+ )
689
+
690
+ # Should have failed export for invalid format
691
+ assert len(export_status["failed_exports"]) == 1
692
+ assert export_status["failed_exports"][0]["format"] == "invalid_format"
693
+ assert "error" in export_status["failed_exports"][0]
694
+
695
+
696
+ if __name__ == "__main__":
697
+ """
698
+ Run the test suite directly.
699
+
700
+ Usage:
701
+ python test_finops_dashboard.py
702
+ pytest test_finops_dashboard.py -v
703
+ pytest test_finops_dashboard.py::TestFinOpsConfig -v
704
+ """
705
+ pytest.main([__file__, "-v"])