runbooks 1.1.4__py3-none-any.whl → 1.1.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- runbooks/__init__.py +31 -2
- runbooks/__init___optimized.py +18 -4
- runbooks/_platform/__init__.py +1 -5
- runbooks/_platform/core/runbooks_wrapper.py +141 -138
- runbooks/aws2/accuracy_validator.py +812 -0
- runbooks/base.py +7 -0
- runbooks/cfat/assessment/compliance.py +1 -1
- runbooks/cfat/assessment/runner.py +1 -0
- runbooks/cfat/cloud_foundations_assessment.py +227 -239
- runbooks/cli/__init__.py +1 -1
- runbooks/cli/commands/cfat.py +64 -23
- runbooks/cli/commands/finops.py +1005 -54
- runbooks/cli/commands/inventory.py +135 -91
- runbooks/cli/commands/operate.py +9 -36
- runbooks/cli/commands/security.py +42 -18
- runbooks/cli/commands/validation.py +432 -18
- runbooks/cli/commands/vpc.py +81 -17
- runbooks/cli/registry.py +22 -10
- runbooks/cloudops/__init__.py +20 -27
- runbooks/cloudops/base.py +96 -107
- runbooks/cloudops/cost_optimizer.py +544 -542
- runbooks/cloudops/infrastructure_optimizer.py +5 -4
- runbooks/cloudops/interfaces.py +224 -225
- runbooks/cloudops/lifecycle_manager.py +5 -4
- runbooks/cloudops/mcp_cost_validation.py +252 -235
- runbooks/cloudops/models.py +78 -53
- runbooks/cloudops/monitoring_automation.py +5 -4
- runbooks/cloudops/notebook_framework.py +177 -213
- runbooks/cloudops/security_enforcer.py +125 -159
- runbooks/common/accuracy_validator.py +17 -12
- runbooks/common/aws_pricing.py +349 -326
- runbooks/common/aws_pricing_api.py +211 -212
- runbooks/common/aws_profile_manager.py +40 -36
- runbooks/common/aws_utils.py +74 -79
- runbooks/common/business_logic.py +126 -104
- runbooks/common/cli_decorators.py +36 -60
- runbooks/common/comprehensive_cost_explorer_integration.py +455 -463
- runbooks/common/cross_account_manager.py +197 -204
- runbooks/common/date_utils.py +27 -39
- runbooks/common/decorators.py +29 -19
- runbooks/common/dry_run_examples.py +173 -208
- runbooks/common/dry_run_framework.py +157 -155
- runbooks/common/enhanced_exception_handler.py +15 -4
- runbooks/common/enhanced_logging_example.py +50 -64
- runbooks/common/enhanced_logging_integration_example.py +65 -37
- runbooks/common/env_utils.py +16 -16
- runbooks/common/error_handling.py +40 -38
- runbooks/common/lazy_loader.py +41 -23
- runbooks/common/logging_integration_helper.py +79 -86
- runbooks/common/mcp_cost_explorer_integration.py +476 -493
- runbooks/common/mcp_integration.py +99 -79
- runbooks/common/memory_optimization.py +140 -118
- runbooks/common/module_cli_base.py +37 -58
- runbooks/common/organizations_client.py +175 -193
- runbooks/common/patterns.py +23 -25
- runbooks/common/performance_monitoring.py +67 -71
- runbooks/common/performance_optimization_engine.py +283 -274
- runbooks/common/profile_utils.py +111 -37
- runbooks/common/rich_utils.py +315 -141
- runbooks/common/sre_performance_suite.py +177 -186
- runbooks/enterprise/__init__.py +1 -1
- runbooks/enterprise/logging.py +144 -106
- runbooks/enterprise/security.py +187 -204
- runbooks/enterprise/validation.py +43 -56
- runbooks/finops/__init__.py +26 -30
- runbooks/finops/account_resolver.py +1 -1
- runbooks/finops/advanced_optimization_engine.py +980 -0
- runbooks/finops/automation_core.py +268 -231
- runbooks/finops/business_case_config.py +184 -179
- runbooks/finops/cli.py +660 -139
- runbooks/finops/commvault_ec2_analysis.py +157 -164
- runbooks/finops/compute_cost_optimizer.py +336 -320
- runbooks/finops/config.py +20 -20
- runbooks/finops/cost_optimizer.py +484 -618
- runbooks/finops/cost_processor.py +332 -214
- runbooks/finops/dashboard_runner.py +1006 -172
- runbooks/finops/ebs_cost_optimizer.py +991 -657
- runbooks/finops/elastic_ip_optimizer.py +317 -257
- runbooks/finops/enhanced_mcp_integration.py +340 -0
- runbooks/finops/enhanced_progress.py +32 -29
- runbooks/finops/enhanced_trend_visualization.py +3 -2
- runbooks/finops/enterprise_wrappers.py +223 -285
- runbooks/finops/executive_export.py +203 -160
- runbooks/finops/helpers.py +130 -288
- runbooks/finops/iam_guidance.py +1 -1
- runbooks/finops/infrastructure/__init__.py +80 -0
- runbooks/finops/infrastructure/commands.py +506 -0
- runbooks/finops/infrastructure/load_balancer_optimizer.py +866 -0
- runbooks/finops/infrastructure/vpc_endpoint_optimizer.py +832 -0
- runbooks/finops/markdown_exporter.py +337 -174
- runbooks/finops/mcp_validator.py +1952 -0
- runbooks/finops/nat_gateway_optimizer.py +1512 -481
- runbooks/finops/network_cost_optimizer.py +657 -587
- runbooks/finops/notebook_utils.py +226 -188
- runbooks/finops/optimization_engine.py +1136 -0
- runbooks/finops/optimizer.py +19 -23
- runbooks/finops/rds_snapshot_optimizer.py +367 -411
- runbooks/finops/reservation_optimizer.py +427 -363
- runbooks/finops/scenario_cli_integration.py +64 -65
- runbooks/finops/scenarios.py +1277 -438
- runbooks/finops/schemas.py +218 -182
- runbooks/finops/snapshot_manager.py +2289 -0
- runbooks/finops/types.py +3 -3
- runbooks/finops/validation_framework.py +259 -265
- runbooks/finops/vpc_cleanup_exporter.py +189 -144
- runbooks/finops/vpc_cleanup_optimizer.py +591 -573
- runbooks/finops/workspaces_analyzer.py +171 -182
- runbooks/integration/__init__.py +89 -0
- runbooks/integration/mcp_integration.py +1920 -0
- runbooks/inventory/CLAUDE.md +816 -0
- runbooks/inventory/__init__.py +2 -2
- runbooks/inventory/aws_decorators.py +2 -3
- runbooks/inventory/check_cloudtrail_compliance.py +2 -4
- runbooks/inventory/check_controltower_readiness.py +152 -151
- runbooks/inventory/check_landingzone_readiness.py +85 -84
- runbooks/inventory/cloud_foundations_integration.py +144 -149
- runbooks/inventory/collectors/aws_comprehensive.py +1 -1
- runbooks/inventory/collectors/aws_networking.py +109 -99
- runbooks/inventory/collectors/base.py +4 -0
- runbooks/inventory/core/collector.py +495 -313
- runbooks/inventory/core/formatter.py +11 -0
- runbooks/inventory/draw_org_structure.py +8 -9
- runbooks/inventory/drift_detection_cli.py +69 -96
- runbooks/inventory/ec2_vpc_utils.py +2 -2
- runbooks/inventory/find_cfn_drift_detection.py +5 -7
- runbooks/inventory/find_cfn_orphaned_stacks.py +7 -9
- runbooks/inventory/find_cfn_stackset_drift.py +5 -6
- runbooks/inventory/find_ec2_security_groups.py +48 -42
- runbooks/inventory/find_landingzone_versions.py +4 -6
- runbooks/inventory/find_vpc_flow_logs.py +7 -9
- runbooks/inventory/inventory_mcp_cli.py +48 -46
- runbooks/inventory/inventory_modules.py +103 -91
- runbooks/inventory/list_cfn_stacks.py +9 -10
- runbooks/inventory/list_cfn_stackset_operation_results.py +1 -3
- runbooks/inventory/list_cfn_stackset_operations.py +79 -57
- runbooks/inventory/list_cfn_stacksets.py +8 -10
- runbooks/inventory/list_config_recorders_delivery_channels.py +49 -39
- runbooks/inventory/list_ds_directories.py +65 -53
- runbooks/inventory/list_ec2_availability_zones.py +2 -4
- runbooks/inventory/list_ec2_ebs_volumes.py +32 -35
- runbooks/inventory/list_ec2_instances.py +23 -28
- runbooks/inventory/list_ecs_clusters_and_tasks.py +26 -34
- runbooks/inventory/list_elbs_load_balancers.py +22 -20
- runbooks/inventory/list_enis_network_interfaces.py +26 -33
- runbooks/inventory/list_guardduty_detectors.py +2 -4
- runbooks/inventory/list_iam_policies.py +2 -4
- runbooks/inventory/list_iam_roles.py +5 -7
- runbooks/inventory/list_iam_saml_providers.py +4 -6
- runbooks/inventory/list_lambda_functions.py +38 -38
- runbooks/inventory/list_org_accounts.py +6 -8
- runbooks/inventory/list_org_accounts_users.py +55 -44
- runbooks/inventory/list_rds_db_instances.py +31 -33
- runbooks/inventory/list_rds_snapshots_aggregator.py +192 -208
- runbooks/inventory/list_route53_hosted_zones.py +3 -5
- runbooks/inventory/list_servicecatalog_provisioned_products.py +37 -41
- runbooks/inventory/list_sns_topics.py +2 -4
- runbooks/inventory/list_ssm_parameters.py +4 -7
- runbooks/inventory/list_vpc_subnets.py +2 -4
- runbooks/inventory/list_vpcs.py +7 -10
- runbooks/inventory/mcp_inventory_validator.py +554 -468
- runbooks/inventory/mcp_vpc_validator.py +359 -442
- runbooks/inventory/organizations_discovery.py +63 -55
- runbooks/inventory/recover_cfn_stack_ids.py +7 -8
- runbooks/inventory/requirements.txt +0 -1
- runbooks/inventory/rich_inventory_display.py +35 -34
- runbooks/inventory/run_on_multi_accounts.py +3 -5
- runbooks/inventory/unified_validation_engine.py +281 -253
- runbooks/inventory/verify_ec2_security_groups.py +1 -1
- runbooks/inventory/vpc_analyzer.py +735 -697
- runbooks/inventory/vpc_architecture_validator.py +293 -348
- runbooks/inventory/vpc_dependency_analyzer.py +384 -380
- runbooks/inventory/vpc_flow_analyzer.py +1 -1
- runbooks/main.py +49 -34
- runbooks/main_final.py +91 -60
- runbooks/main_minimal.py +22 -10
- runbooks/main_optimized.py +131 -100
- runbooks/main_ultra_minimal.py +7 -2
- runbooks/mcp/__init__.py +36 -0
- runbooks/mcp/integration.py +679 -0
- runbooks/monitoring/performance_monitor.py +9 -4
- runbooks/operate/dynamodb_operations.py +3 -1
- runbooks/operate/ec2_operations.py +145 -137
- runbooks/operate/iam_operations.py +146 -152
- runbooks/operate/networking_cost_heatmap.py +29 -8
- runbooks/operate/rds_operations.py +223 -254
- runbooks/operate/s3_operations.py +107 -118
- runbooks/operate/vpc_operations.py +646 -616
- runbooks/remediation/base.py +1 -1
- runbooks/remediation/commons.py +10 -7
- runbooks/remediation/commvault_ec2_analysis.py +70 -66
- runbooks/remediation/ec2_unattached_ebs_volumes.py +1 -0
- runbooks/remediation/multi_account.py +24 -21
- runbooks/remediation/rds_snapshot_list.py +86 -60
- runbooks/remediation/remediation_cli.py +92 -146
- runbooks/remediation/universal_account_discovery.py +83 -79
- runbooks/remediation/workspaces_list.py +46 -41
- runbooks/security/__init__.py +19 -0
- runbooks/security/assessment_runner.py +1150 -0
- runbooks/security/baseline_checker.py +812 -0
- runbooks/security/cloudops_automation_security_validator.py +509 -535
- runbooks/security/compliance_automation_engine.py +17 -17
- runbooks/security/config/__init__.py +2 -2
- runbooks/security/config/compliance_config.py +50 -50
- runbooks/security/config_template_generator.py +63 -76
- runbooks/security/enterprise_security_framework.py +1 -1
- runbooks/security/executive_security_dashboard.py +519 -508
- runbooks/security/multi_account_security_controls.py +959 -1210
- runbooks/security/real_time_security_monitor.py +422 -444
- runbooks/security/security_baseline_tester.py +1 -1
- runbooks/security/security_cli.py +143 -112
- runbooks/security/test_2way_validation.py +439 -0
- runbooks/security/two_way_validation_framework.py +852 -0
- runbooks/sre/production_monitoring_framework.py +167 -177
- runbooks/tdd/__init__.py +15 -0
- runbooks/tdd/cli.py +1071 -0
- runbooks/utils/__init__.py +14 -17
- runbooks/utils/logger.py +7 -2
- runbooks/utils/version_validator.py +50 -47
- runbooks/validation/__init__.py +6 -6
- runbooks/validation/cli.py +9 -3
- runbooks/validation/comprehensive_2way_validator.py +745 -704
- runbooks/validation/mcp_validator.py +906 -228
- runbooks/validation/terraform_citations_validator.py +104 -115
- runbooks/validation/terraform_drift_detector.py +461 -454
- runbooks/vpc/README.md +617 -0
- runbooks/vpc/__init__.py +8 -1
- runbooks/vpc/analyzer.py +577 -0
- runbooks/vpc/cleanup_wrapper.py +476 -413
- runbooks/vpc/cli_cloudtrail_commands.py +339 -0
- runbooks/vpc/cli_mcp_validation_commands.py +480 -0
- runbooks/vpc/cloudtrail_audit_integration.py +717 -0
- runbooks/vpc/config.py +92 -97
- runbooks/vpc/cost_engine.py +411 -148
- runbooks/vpc/cost_explorer_integration.py +553 -0
- runbooks/vpc/cross_account_session.py +101 -106
- runbooks/vpc/enhanced_mcp_validation.py +917 -0
- runbooks/vpc/eni_gate_validator.py +961 -0
- runbooks/vpc/heatmap_engine.py +185 -160
- runbooks/vpc/mcp_no_eni_validator.py +680 -639
- runbooks/vpc/nat_gateway_optimizer.py +358 -0
- runbooks/vpc/networking_wrapper.py +15 -8
- runbooks/vpc/pdca_remediation_planner.py +528 -0
- runbooks/vpc/performance_optimized_analyzer.py +219 -231
- runbooks/vpc/runbooks_adapter.py +1167 -241
- runbooks/vpc/tdd_red_phase_stubs.py +601 -0
- runbooks/vpc/test_data_loader.py +358 -0
- runbooks/vpc/tests/conftest.py +314 -4
- runbooks/vpc/tests/test_cleanup_framework.py +1022 -0
- runbooks/vpc/tests/test_cost_engine.py +0 -2
- runbooks/vpc/topology_generator.py +326 -0
- runbooks/vpc/unified_scenarios.py +1297 -1124
- runbooks/vpc/vpc_cleanup_integration.py +1943 -1115
- runbooks-1.1.6.dist-info/METADATA +327 -0
- runbooks-1.1.6.dist-info/RECORD +489 -0
- runbooks/finops/README.md +0 -414
- runbooks/finops/accuracy_cross_validator.py +0 -647
- runbooks/finops/business_cases.py +0 -950
- runbooks/finops/dashboard_router.py +0 -922
- runbooks/finops/ebs_optimizer.py +0 -973
- runbooks/finops/embedded_mcp_validator.py +0 -1629
- runbooks/finops/enhanced_dashboard_runner.py +0 -527
- runbooks/finops/finops_dashboard.py +0 -584
- runbooks/finops/finops_scenarios.py +0 -1218
- runbooks/finops/legacy_migration.py +0 -730
- runbooks/finops/multi_dashboard.py +0 -1519
- runbooks/finops/single_dashboard.py +0 -1113
- runbooks/finops/unlimited_scenarios.py +0 -393
- runbooks-1.1.4.dist-info/METADATA +0 -800
- runbooks-1.1.4.dist-info/RECORD +0 -468
- {runbooks-1.1.4.dist-info → runbooks-1.1.6.dist-info}/WHEEL +0 -0
- {runbooks-1.1.4.dist-info → runbooks-1.1.6.dist-info}/entry_points.txt +0 -0
- {runbooks-1.1.4.dist-info → runbooks-1.1.6.dist-info}/licenses/LICENSE +0 -0
- {runbooks-1.1.4.dist-info → runbooks-1.1.6.dist-info}/top_level.txt +0 -0
@@ -1,5 +1,5 @@
|
|
1
1
|
"""
|
2
|
-
WorkSpaces Cost Optimization Analysis - Enterprise Framework:
|
2
|
+
WorkSpaces Cost Optimization Analysis - Enterprise Framework:
|
3
3
|
|
4
4
|
This module provides business-focused WorkSpaces analysis with enterprise patterns:
|
5
5
|
- Real AWS API integration (no hardcoded values)
|
@@ -17,19 +17,27 @@ Strategic Alignment:
|
|
17
17
|
import asyncio
|
18
18
|
import json
|
19
19
|
import logging
|
20
|
+
from dataclasses import asdict, dataclass
|
20
21
|
from datetime import datetime, timedelta, timezone
|
21
|
-
from typing import Dict, List, Optional, Tuple
|
22
|
-
from dataclasses import dataclass, asdict
|
22
|
+
from typing import Any, Dict, List, Optional, Tuple
|
23
23
|
|
24
24
|
import boto3
|
25
25
|
from botocore.exceptions import ClientError
|
26
26
|
|
27
|
+
from ..common.profile_utils import get_profile_for_operation
|
27
28
|
from ..common.rich_utils import (
|
28
|
-
console,
|
29
|
-
|
29
|
+
console,
|
30
|
+
create_panel,
|
31
|
+
create_progress_bar,
|
32
|
+
create_table,
|
33
|
+
format_cost,
|
34
|
+
print_error,
|
35
|
+
print_header,
|
36
|
+
print_info,
|
37
|
+
print_success,
|
38
|
+
print_warning,
|
30
39
|
)
|
31
|
-
from ..
|
32
|
-
from ..remediation.workspaces_list import get_workspaces, calculate_workspace_monthly_cost
|
40
|
+
from ..remediation.workspaces_list import calculate_workspace_monthly_cost, get_workspaces
|
33
41
|
|
34
42
|
logger = logging.getLogger(__name__)
|
35
43
|
|
@@ -37,6 +45,7 @@ logger = logging.getLogger(__name__)
|
|
37
45
|
@dataclass
|
38
46
|
class WorkSpaceAnalysisResult:
|
39
47
|
"""WorkSpace analysis result with cost optimization data."""
|
48
|
+
|
40
49
|
workspace_id: str
|
41
50
|
username: str
|
42
51
|
state: str
|
@@ -49,7 +58,7 @@ class WorkSpaceAnalysisResult:
|
|
49
58
|
is_unused: bool
|
50
59
|
usage_hours: float
|
51
60
|
connection_state: str
|
52
|
-
|
61
|
+
|
53
62
|
def to_dict(self) -> Dict[str, Any]:
|
54
63
|
"""Convert to dictionary for JSON serialization."""
|
55
64
|
return asdict(self)
|
@@ -58,6 +67,7 @@ class WorkSpaceAnalysisResult:
|
|
58
67
|
@dataclass
|
59
68
|
class WorkSpacesCostSummary:
|
60
69
|
"""Summary of WorkSpaces cost analysis."""
|
70
|
+
|
61
71
|
total_workspaces: int
|
62
72
|
unused_workspaces: int
|
63
73
|
total_monthly_cost: float
|
@@ -65,7 +75,7 @@ class WorkSpacesCostSummary:
|
|
65
75
|
potential_annual_savings: float
|
66
76
|
target_achievement_rate: float
|
67
77
|
analysis_timestamp: str
|
68
|
-
|
78
|
+
|
69
79
|
def to_dict(self) -> Dict[str, Any]:
|
70
80
|
"""Convert to dictionary for JSON serialization."""
|
71
81
|
return asdict(self)
|
@@ -74,97 +84,95 @@ class WorkSpacesCostSummary:
|
|
74
84
|
class WorkSpacesCostAnalyzer:
|
75
85
|
"""
|
76
86
|
WorkSpaces cost optimization analyzer following enterprise patterns.
|
77
|
-
|
87
|
+
|
78
88
|
Implements WorkSpaces optimization requirements with proven profile management and Rich CLI standards.
|
79
89
|
"""
|
80
|
-
|
90
|
+
|
81
91
|
def __init__(self, profile: Optional[str] = None):
|
82
92
|
"""Initialize analyzer with enterprise profile management."""
|
83
93
|
# Apply proven profile management pattern from dashboard_runner.py
|
84
94
|
self.profile = get_profile_for_operation("operational", profile)
|
85
95
|
self.session = boto3.Session(profile_name=self.profile)
|
86
|
-
|
96
|
+
|
87
97
|
# WorkSpaces optimization business targets
|
88
98
|
self.target_annual_savings = 12518.0
|
89
99
|
self.unused_threshold_days = 90
|
90
100
|
self.analysis_period_days = 30
|
91
|
-
|
101
|
+
|
92
102
|
logger.info(f"WorkSpaces analyzer initialized with profile: {self.profile}")
|
93
|
-
|
103
|
+
|
94
104
|
def analyze_workspaces(
|
95
|
-
self,
|
96
|
-
unused_days: int = 90,
|
97
|
-
analysis_days: int = 30,
|
98
|
-
dry_run: bool = True
|
105
|
+
self, unused_days: int = 90, analysis_days: int = 30, dry_run: bool = True
|
99
106
|
) -> Tuple[List[WorkSpaceAnalysisResult], WorkSpacesCostSummary]:
|
100
107
|
"""
|
101
108
|
Analyze WorkSpaces for cost optimization opportunities.
|
102
|
-
|
109
|
+
|
103
110
|
Args:
|
104
111
|
unused_days: Days threshold for unused WorkSpaces detection
|
105
112
|
analysis_days: Period for usage analysis
|
106
113
|
dry_run: Safety flag for preview mode
|
107
|
-
|
114
|
+
|
108
115
|
Returns:
|
109
116
|
Tuple of analysis results and summary
|
110
117
|
"""
|
111
118
|
print_header("WorkSpaces Cost Optimization Analysis", f"Profile: {self.profile}")
|
112
|
-
|
119
|
+
|
113
120
|
if dry_run:
|
114
121
|
print_info("🔍 Running in DRY-RUN mode (safe preview)")
|
115
|
-
|
122
|
+
|
116
123
|
try:
|
117
124
|
# Get WorkSpaces client
|
118
|
-
ws_client = self.session.client(
|
119
|
-
|
125
|
+
ws_client = self.session.client("workspaces")
|
126
|
+
|
120
127
|
# Calculate time ranges
|
121
128
|
end_time = datetime.now(tz=timezone.utc)
|
122
129
|
start_time = end_time - timedelta(days=analysis_days)
|
123
130
|
unused_threshold = end_time - timedelta(days=unused_days)
|
124
|
-
|
125
|
-
console.print(
|
131
|
+
|
132
|
+
console.print(
|
133
|
+
f"[dim]Analysis period: {start_time.strftime('%Y-%m-%d')} to {end_time.strftime('%Y-%m-%d')}[/dim]"
|
134
|
+
)
|
126
135
|
console.print(f"[dim]Unused threshold: {unused_days} days ({unused_threshold.strftime('%Y-%m-%d')})[/dim]")
|
127
|
-
|
136
|
+
|
128
137
|
# Get all WorkSpaces with progress tracking
|
129
138
|
print_info("Collecting WorkSpaces inventory...")
|
130
139
|
paginator = ws_client.get_paginator("describe_workspaces")
|
131
140
|
all_workspaces = []
|
132
|
-
|
141
|
+
|
133
142
|
for page in paginator.paginate():
|
134
143
|
workspaces = page.get("Workspaces", [])
|
135
144
|
all_workspaces.extend(workspaces)
|
136
|
-
|
145
|
+
|
137
146
|
console.print(f"[green]✅ Found {len(all_workspaces)} WorkSpaces[/green]")
|
138
|
-
|
147
|
+
|
139
148
|
# Analyze each WorkSpace with progress bar
|
140
149
|
analysis_results = []
|
141
150
|
total_cost = 0.0
|
142
151
|
unused_cost = 0.0
|
143
|
-
|
152
|
+
|
144
153
|
with create_progress_bar() as progress:
|
145
|
-
task_id = progress.add_task(
|
146
|
-
|
147
|
-
total=len(all_workspaces)
|
148
|
-
)
|
149
|
-
|
154
|
+
task_id = progress.add_task(f"Analyzing WorkSpaces cost optimization...", total=len(all_workspaces))
|
155
|
+
|
150
156
|
for workspace in all_workspaces:
|
151
157
|
result = self._analyze_single_workspace(
|
152
158
|
workspace, ws_client, start_time, end_time, unused_threshold
|
153
159
|
)
|
154
|
-
|
160
|
+
|
155
161
|
analysis_results.append(result)
|
156
162
|
total_cost += result.monthly_cost
|
157
|
-
|
163
|
+
|
158
164
|
if result.is_unused:
|
159
165
|
unused_cost += result.monthly_cost
|
160
|
-
|
166
|
+
|
161
167
|
progress.advance(task_id)
|
162
|
-
|
168
|
+
|
163
169
|
# Create summary
|
164
170
|
unused_count = len([r for r in analysis_results if r.is_unused])
|
165
171
|
potential_annual_savings = unused_cost * 12
|
166
|
-
achievement_rate = (
|
167
|
-
|
172
|
+
achievement_rate = (
|
173
|
+
(potential_annual_savings / self.target_annual_savings * 100) if self.target_annual_savings > 0 else 0
|
174
|
+
)
|
175
|
+
|
168
176
|
summary = WorkSpacesCostSummary(
|
169
177
|
total_workspaces=len(analysis_results),
|
170
178
|
unused_workspaces=unused_count,
|
@@ -172,11 +180,11 @@ class WorkSpacesCostAnalyzer:
|
|
172
180
|
unused_monthly_cost=unused_cost,
|
173
181
|
potential_annual_savings=potential_annual_savings,
|
174
182
|
target_achievement_rate=achievement_rate,
|
175
|
-
analysis_timestamp=datetime.now().isoformat()
|
183
|
+
analysis_timestamp=datetime.now().isoformat(),
|
176
184
|
)
|
177
|
-
|
185
|
+
|
178
186
|
return analysis_results, summary
|
179
|
-
|
187
|
+
|
180
188
|
except ClientError as e:
|
181
189
|
print_error(f"AWS API error: {e}")
|
182
190
|
if "AccessDenied" in str(e):
|
@@ -186,14 +194,9 @@ class WorkSpacesCostAnalyzer:
|
|
186
194
|
except Exception as e:
|
187
195
|
print_error(f"Analysis failed: {e}")
|
188
196
|
raise
|
189
|
-
|
197
|
+
|
190
198
|
def _analyze_single_workspace(
|
191
|
-
self,
|
192
|
-
workspace: Dict[str, Any],
|
193
|
-
ws_client,
|
194
|
-
start_time: datetime,
|
195
|
-
end_time: datetime,
|
196
|
-
unused_threshold: datetime
|
199
|
+
self, workspace: Dict[str, Any], ws_client, start_time: datetime, end_time: datetime, unused_threshold: datetime
|
197
200
|
) -> WorkSpaceAnalysisResult:
|
198
201
|
"""Analyze a single WorkSpace for cost optimization."""
|
199
202
|
workspace_id = workspace["WorkspaceId"]
|
@@ -201,23 +204,21 @@ class WorkSpacesCostAnalyzer:
|
|
201
204
|
state = workspace["State"]
|
202
205
|
bundle_id = workspace["BundleId"]
|
203
206
|
running_mode = workspace["WorkspaceProperties"]["RunningMode"]
|
204
|
-
|
207
|
+
|
205
208
|
# Get connection status
|
206
209
|
last_connection = None
|
207
210
|
connection_state = "UNKNOWN"
|
208
|
-
|
211
|
+
|
209
212
|
try:
|
210
|
-
connection_response = ws_client.describe_workspaces_connection_status(
|
211
|
-
|
212
|
-
)
|
213
|
-
|
213
|
+
connection_response = ws_client.describe_workspaces_connection_status(WorkspaceIds=[workspace_id])
|
214
|
+
|
214
215
|
connection_status_list = connection_response.get("WorkspacesConnectionStatus", [])
|
215
216
|
if connection_status_list:
|
216
217
|
last_connection = connection_status_list[0].get("LastKnownUserConnectionTimestamp")
|
217
218
|
connection_state = connection_status_list[0].get("ConnectionState", "UNKNOWN")
|
218
219
|
except ClientError as e:
|
219
220
|
logger.warning(f"Could not get connection status for {workspace_id}: {e}")
|
220
|
-
|
221
|
+
|
221
222
|
# Format connection info
|
222
223
|
if last_connection:
|
223
224
|
last_connection_str = last_connection.strftime("%Y-%m-%d %H:%M:%S")
|
@@ -225,17 +226,17 @@ class WorkSpacesCostAnalyzer:
|
|
225
226
|
else:
|
226
227
|
last_connection_str = None
|
227
228
|
days_since_connection = 999
|
228
|
-
|
229
|
+
|
229
230
|
# Get usage metrics
|
230
231
|
usage_hours = self._get_workspace_usage(workspace_id, start_time, end_time)
|
231
|
-
|
232
|
+
|
232
233
|
# Calculate costs
|
233
234
|
monthly_cost = calculate_workspace_monthly_cost(bundle_id, running_mode)
|
234
235
|
annual_cost = monthly_cost * 12
|
235
|
-
|
236
|
+
|
236
237
|
# Determine if unused
|
237
238
|
is_unused = last_connection is None or last_connection < unused_threshold
|
238
|
-
|
239
|
+
|
239
240
|
return WorkSpaceAnalysisResult(
|
240
241
|
workspace_id=workspace_id,
|
241
242
|
username=username,
|
@@ -248,16 +249,14 @@ class WorkSpacesCostAnalyzer:
|
|
248
249
|
days_since_connection=days_since_connection,
|
249
250
|
is_unused=is_unused,
|
250
251
|
usage_hours=usage_hours,
|
251
|
-
connection_state=connection_state
|
252
|
+
connection_state=connection_state,
|
252
253
|
)
|
253
|
-
|
254
|
-
def _get_workspace_usage(
|
255
|
-
self, workspace_id: str, start_time: datetime, end_time: datetime
|
256
|
-
) -> float:
|
254
|
+
|
255
|
+
def _get_workspace_usage(self, workspace_id: str, start_time: datetime, end_time: datetime) -> float:
|
257
256
|
"""Get WorkSpace usage hours from CloudWatch metrics."""
|
258
257
|
try:
|
259
258
|
cloudwatch = self.session.client("cloudwatch")
|
260
|
-
|
259
|
+
|
261
260
|
response = cloudwatch.get_metric_statistics(
|
262
261
|
Namespace="AWS/WorkSpaces",
|
263
262
|
MetricName="UserConnected",
|
@@ -267,68 +266,68 @@ class WorkSpacesCostAnalyzer:
|
|
267
266
|
Period=3600, # 1 hour intervals
|
268
267
|
Statistics=["Sum"],
|
269
268
|
)
|
270
|
-
|
269
|
+
|
271
270
|
usage_hours = sum(datapoint["Sum"] for datapoint in response.get("Datapoints", []))
|
272
271
|
return round(usage_hours, 2)
|
273
|
-
|
272
|
+
|
274
273
|
except ClientError as e:
|
275
274
|
logger.warning(f"Could not get usage metrics for {workspace_id}: {e}")
|
276
275
|
return 0.0
|
277
|
-
|
278
|
-
def display_analysis_results(
|
279
|
-
self,
|
280
|
-
results: List[WorkSpaceAnalysisResult],
|
281
|
-
summary: WorkSpacesCostSummary
|
282
|
-
) -> None:
|
276
|
+
|
277
|
+
def display_analysis_results(self, results: List[WorkSpaceAnalysisResult], summary: WorkSpacesCostSummary) -> None:
|
283
278
|
"""Display analysis results using Rich CLI formatting."""
|
284
|
-
|
279
|
+
|
285
280
|
# Summary table
|
286
281
|
print_header("WorkSpaces Cost Analysis Summary")
|
287
|
-
|
282
|
+
|
288
283
|
summary_table = create_table(
|
289
284
|
title="WorkSpaces Optimization Summary",
|
290
285
|
columns=[
|
291
286
|
{"header": "Metric", "style": "cyan"},
|
292
287
|
{"header": "Count", "style": "green bold"},
|
293
288
|
{"header": "Monthly Cost", "style": "red"},
|
294
|
-
{"header": "Annual Cost", "style": "red bold"}
|
295
|
-
]
|
289
|
+
{"header": "Annual Cost", "style": "red bold"},
|
290
|
+
],
|
296
291
|
)
|
297
|
-
|
292
|
+
|
298
293
|
summary_table.add_row(
|
299
294
|
"Total WorkSpaces",
|
300
295
|
str(summary.total_workspaces),
|
301
296
|
format_cost(summary.total_monthly_cost),
|
302
|
-
format_cost(summary.total_monthly_cost * 12)
|
297
|
+
format_cost(summary.total_monthly_cost * 12),
|
303
298
|
)
|
304
|
-
|
299
|
+
|
305
300
|
summary_table.add_row(
|
306
301
|
f"Unused WorkSpaces (>{self.unused_threshold_days} days)",
|
307
302
|
str(summary.unused_workspaces),
|
308
303
|
format_cost(summary.unused_monthly_cost),
|
309
|
-
format_cost(summary.potential_annual_savings)
|
304
|
+
format_cost(summary.potential_annual_savings),
|
310
305
|
)
|
311
|
-
|
306
|
+
|
312
307
|
summary_table.add_row(
|
313
308
|
"🎯 Potential Savings",
|
314
309
|
f"{summary.unused_workspaces} WorkSpaces",
|
315
310
|
format_cost(summary.unused_monthly_cost),
|
316
|
-
format_cost(summary.potential_annual_savings)
|
311
|
+
format_cost(summary.potential_annual_savings),
|
317
312
|
)
|
318
|
-
|
313
|
+
|
319
314
|
console.print(summary_table)
|
320
|
-
|
315
|
+
|
321
316
|
# Achievement analysis
|
322
317
|
if summary.target_achievement_rate >= 80:
|
323
|
-
print_success(
|
318
|
+
print_success(
|
319
|
+
f"🎯 Target Achievement: {summary.target_achievement_rate:.1f}% of ${self.target_annual_savings:,.0f} annual savings target"
|
320
|
+
)
|
324
321
|
else:
|
325
|
-
print_warning(
|
326
|
-
|
322
|
+
print_warning(
|
323
|
+
f"📊 Analysis: {summary.target_achievement_rate:.1f}% of ${self.target_annual_savings:,.0f} annual savings target"
|
324
|
+
)
|
325
|
+
|
327
326
|
# Detailed unused WorkSpaces
|
328
327
|
unused_results = [r for r in results if r.is_unused]
|
329
328
|
if unused_results:
|
330
329
|
print_warning(f"⚠ Found {len(unused_results)} unused WorkSpaces:")
|
331
|
-
|
330
|
+
|
332
331
|
unused_table = create_table(
|
333
332
|
title="Unused WorkSpaces Details",
|
334
333
|
columns=[
|
@@ -337,10 +336,10 @@ class WorkSpacesCostAnalyzer:
|
|
337
336
|
{"header": "Days Unused", "style": "yellow"},
|
338
337
|
{"header": "Running Mode", "style": "green"},
|
339
338
|
{"header": "Monthly Cost", "style": "red"},
|
340
|
-
{"header": "State", "style": "magenta"}
|
341
|
-
]
|
339
|
+
{"header": "State", "style": "magenta"},
|
340
|
+
],
|
342
341
|
)
|
343
|
-
|
342
|
+
|
344
343
|
# Show first 10 for readability
|
345
344
|
for ws in unused_results[:10]:
|
346
345
|
unused_table.add_row(
|
@@ -349,27 +348,27 @@ class WorkSpacesCostAnalyzer:
|
|
349
348
|
str(ws.days_since_connection),
|
350
349
|
ws.running_mode,
|
351
350
|
format_cost(ws.monthly_cost),
|
352
|
-
ws.state
|
351
|
+
ws.state,
|
353
352
|
)
|
354
|
-
|
353
|
+
|
355
354
|
console.print(unused_table)
|
356
|
-
|
355
|
+
|
357
356
|
if len(unused_results) > 10:
|
358
357
|
console.print(f"[dim]... and {len(unused_results) - 10} more unused WorkSpaces[/dim]")
|
359
|
-
|
358
|
+
|
360
359
|
def export_results(
|
361
360
|
self,
|
362
361
|
results: List[WorkSpaceAnalysisResult],
|
363
362
|
summary: WorkSpacesCostSummary,
|
364
363
|
output_format: str = "json",
|
365
|
-
output_file: Optional[str] = None
|
364
|
+
output_file: Optional[str] = None,
|
366
365
|
) -> str:
|
367
366
|
"""Export analysis results in specified format."""
|
368
|
-
|
367
|
+
|
369
368
|
if not output_file:
|
370
369
|
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
371
370
|
output_file = f"./tmp/workspaces_analysis_{timestamp}.{output_format}"
|
372
|
-
|
371
|
+
|
373
372
|
export_data = {
|
374
373
|
"summary": summary.to_dict(),
|
375
374
|
"workspaces": [result.to_dict() for result in results],
|
@@ -377,58 +376,57 @@ class WorkSpacesCostAnalyzer:
|
|
377
376
|
"analysis_timestamp": summary.analysis_timestamp,
|
378
377
|
"profile": self.profile,
|
379
378
|
"target_savings": self.target_annual_savings,
|
380
|
-
"version": "latest version"
|
381
|
-
}
|
379
|
+
"version": "latest version",
|
380
|
+
},
|
382
381
|
}
|
383
|
-
|
382
|
+
|
384
383
|
if output_format.lower() == "json":
|
385
|
-
with open(output_file,
|
384
|
+
with open(output_file, "w") as f:
|
386
385
|
json.dump(export_data, f, indent=2, default=str)
|
387
386
|
elif output_format.lower() == "csv":
|
388
387
|
import csv
|
389
|
-
|
388
|
+
|
389
|
+
with open(output_file, "w", newline="") as f:
|
390
390
|
if results:
|
391
391
|
writer = csv.DictWriter(f, fieldnames=results[0].to_dict().keys())
|
392
392
|
writer.writeheader()
|
393
393
|
for result in results:
|
394
394
|
writer.writerow(result.to_dict())
|
395
|
-
|
395
|
+
|
396
396
|
print_success(f"Analysis results exported to: {output_file}")
|
397
397
|
return output_file
|
398
|
-
|
398
|
+
|
399
399
|
def cleanup_unused_workspaces(
|
400
|
-
self,
|
401
|
-
unused_results: List[WorkSpaceAnalysisResult],
|
402
|
-
dry_run: bool = True,
|
403
|
-
confirm: bool = False
|
400
|
+
self, unused_results: List[WorkSpaceAnalysisResult], dry_run: bool = True, confirm: bool = False
|
404
401
|
) -> Dict[str, Any]:
|
405
402
|
"""
|
406
403
|
Cleanup unused WorkSpaces with enterprise safety controls.
|
407
|
-
|
404
|
+
|
408
405
|
Args:
|
409
406
|
unused_results: List of unused WorkSpaces to cleanup
|
410
407
|
dry_run: Safety flag for preview mode
|
411
408
|
confirm: Skip confirmation prompts
|
412
|
-
|
409
|
+
|
413
410
|
Returns:
|
414
411
|
Cleanup operation results
|
415
412
|
"""
|
416
413
|
print_header("WorkSpaces Cleanup Operation", "🚨 HIGH-RISK OPERATION")
|
417
|
-
|
414
|
+
|
418
415
|
if not unused_results:
|
419
416
|
print_info("✅ No unused WorkSpaces found for cleanup")
|
420
417
|
return {"status": "no_action", "deleted": 0, "message": "No unused WorkSpaces"}
|
421
|
-
|
418
|
+
|
422
419
|
# Safety validation
|
423
420
|
cleanup_candidates = [
|
424
|
-
ws
|
421
|
+
ws
|
422
|
+
for ws in unused_results
|
425
423
|
if ws.state in ["AVAILABLE", "STOPPED"] and ws.days_since_connection >= self.unused_threshold_days
|
426
424
|
]
|
427
|
-
|
425
|
+
|
428
426
|
if not cleanup_candidates:
|
429
427
|
print_warning("⚠ No WorkSpaces meet the safety criteria for cleanup")
|
430
428
|
return {"status": "no_candidates", "deleted": 0, "message": "No cleanup candidates"}
|
431
|
-
|
429
|
+
|
432
430
|
# Display cleanup preview
|
433
431
|
cleanup_table = create_table(
|
434
432
|
title=f"Cleanup Candidates ({len(cleanup_candidates)} WorkSpaces)",
|
@@ -437,98 +435,93 @@ class WorkSpacesCostAnalyzer:
|
|
437
435
|
{"header": "Username", "style": "blue"},
|
438
436
|
{"header": "Days Unused", "style": "yellow"},
|
439
437
|
{"header": "Monthly Cost", "style": "red"},
|
440
|
-
{"header": "State", "style": "magenta"}
|
441
|
-
]
|
438
|
+
{"header": "State", "style": "magenta"},
|
439
|
+
],
|
442
440
|
)
|
443
|
-
|
441
|
+
|
444
442
|
total_cleanup_savings = 0.0
|
445
443
|
for ws in cleanup_candidates:
|
446
444
|
cleanup_table.add_row(
|
447
|
-
ws.workspace_id,
|
448
|
-
ws.username,
|
449
|
-
str(ws.days_since_connection),
|
450
|
-
format_cost(ws.monthly_cost),
|
451
|
-
ws.state
|
445
|
+
ws.workspace_id, ws.username, str(ws.days_since_connection), format_cost(ws.monthly_cost), ws.state
|
452
446
|
)
|
453
447
|
total_cleanup_savings += ws.monthly_cost
|
454
|
-
|
448
|
+
|
455
449
|
console.print(cleanup_table)
|
456
|
-
|
450
|
+
|
457
451
|
annual_cleanup_savings = total_cleanup_savings * 12
|
458
|
-
print_info(
|
459
|
-
|
452
|
+
print_info(
|
453
|
+
f"💰 Cleanup savings: {format_cost(total_cleanup_savings)}/month, {format_cost(annual_cleanup_savings)}/year"
|
454
|
+
)
|
455
|
+
|
460
456
|
if dry_run:
|
461
457
|
print_info("🔍 DRY-RUN: Preview mode - no WorkSpaces will be deleted")
|
462
458
|
return {
|
463
459
|
"status": "dry_run",
|
464
460
|
"candidates": len(cleanup_candidates),
|
465
461
|
"monthly_savings": total_cleanup_savings,
|
466
|
-
"annual_savings": annual_cleanup_savings
|
462
|
+
"annual_savings": annual_cleanup_savings,
|
467
463
|
}
|
468
|
-
|
464
|
+
|
469
465
|
# Confirmation required for actual cleanup
|
470
466
|
if not confirm:
|
471
467
|
print_warning("🚨 DANGER: This will permanently delete WorkSpaces and all user data")
|
472
468
|
print_warning(f"About to delete {len(cleanup_candidates)} WorkSpaces")
|
473
|
-
|
469
|
+
|
474
470
|
if not console.input("Type 'DELETE' to confirm: ") == "DELETE":
|
475
471
|
print_error("Cleanup cancelled - confirmation failed")
|
476
472
|
return {"status": "cancelled", "deleted": 0}
|
477
|
-
|
473
|
+
|
478
474
|
# Perform cleanup
|
479
475
|
print_warning("🗑 Starting WorkSpaces cleanup...")
|
480
476
|
ws_client = self.session.client("workspaces")
|
481
|
-
|
477
|
+
|
482
478
|
deleted_count = 0
|
483
479
|
failed_count = 0
|
484
480
|
cleanup_results = []
|
485
|
-
|
481
|
+
|
486
482
|
for ws in cleanup_candidates:
|
487
483
|
try:
|
488
484
|
print_info(f"Deleting: {ws.workspace_id} ({ws.username})")
|
489
|
-
|
490
|
-
ws_client.terminate_workspaces(
|
491
|
-
|
492
|
-
)
|
493
|
-
|
485
|
+
|
486
|
+
ws_client.terminate_workspaces(TerminateWorkspaceRequests=[{"WorkspaceId": ws.workspace_id}])
|
487
|
+
|
494
488
|
deleted_count += 1
|
495
|
-
cleanup_results.append(
|
496
|
-
|
497
|
-
|
498
|
-
|
499
|
-
|
500
|
-
|
501
|
-
|
489
|
+
cleanup_results.append(
|
490
|
+
{
|
491
|
+
"workspace_id": ws.workspace_id,
|
492
|
+
"username": ws.username,
|
493
|
+
"status": "deleted",
|
494
|
+
"monthly_saving": ws.monthly_cost,
|
495
|
+
}
|
496
|
+
)
|
497
|
+
|
502
498
|
print_success(f"✅ Deleted: {ws.workspace_id}")
|
503
|
-
|
499
|
+
|
504
500
|
except ClientError as e:
|
505
501
|
failed_count += 1
|
506
|
-
cleanup_results.append(
|
507
|
-
"workspace_id": ws.workspace_id,
|
508
|
-
|
509
|
-
"status": "failed",
|
510
|
-
"error": str(e)
|
511
|
-
})
|
502
|
+
cleanup_results.append(
|
503
|
+
{"workspace_id": ws.workspace_id, "username": ws.username, "status": "failed", "error": str(e)}
|
504
|
+
)
|
512
505
|
print_error(f"❌ Failed: {ws.workspace_id} - {e}")
|
513
|
-
|
506
|
+
|
514
507
|
# Summary
|
515
508
|
actual_monthly_savings = sum(
|
516
|
-
result.get("monthly_saving", 0)
|
517
|
-
for result in cleanup_results
|
518
|
-
if result["status"] == "deleted"
|
509
|
+
result.get("monthly_saving", 0) for result in cleanup_results if result["status"] == "deleted"
|
519
510
|
)
|
520
511
|
actual_annual_savings = actual_monthly_savings * 12
|
521
|
-
|
512
|
+
|
522
513
|
print_success(f"🔄 Cleanup complete: {deleted_count} deleted, {failed_count} failed")
|
523
|
-
print_success(
|
524
|
-
|
514
|
+
print_success(
|
515
|
+
f"💰 Realized savings: {format_cost(actual_monthly_savings)}/month, {format_cost(actual_annual_savings)}/year"
|
516
|
+
)
|
517
|
+
|
525
518
|
return {
|
526
519
|
"status": "completed",
|
527
520
|
"deleted": deleted_count,
|
528
521
|
"failed": failed_count,
|
529
522
|
"monthly_savings": actual_monthly_savings,
|
530
523
|
"annual_savings": actual_annual_savings,
|
531
|
-
"details": cleanup_results
|
524
|
+
"details": cleanup_results,
|
532
525
|
}
|
533
526
|
|
534
527
|
|
@@ -538,11 +531,11 @@ def analyze_workspaces(
|
|
538
531
|
analysis_days: int = 30,
|
539
532
|
output_format: str = "json",
|
540
533
|
output_file: Optional[str] = None,
|
541
|
-
dry_run: bool = True
|
534
|
+
dry_run: bool = True,
|
542
535
|
) -> Dict[str, Any]:
|
543
536
|
"""
|
544
537
|
WorkSpaces analysis wrapper for CLI and notebook integration.
|
545
|
-
|
538
|
+
|
546
539
|
Args:
|
547
540
|
profile: AWS profile to use
|
548
541
|
unused_days: Days threshold for unused detection
|
@@ -550,7 +543,7 @@ def analyze_workspaces(
|
|
550
543
|
output_format: Export format (json, csv)
|
551
544
|
output_file: Optional output file path
|
552
545
|
dry_run: Safety flag for preview mode
|
553
|
-
|
546
|
+
|
554
547
|
Returns:
|
555
548
|
Analysis results with cost optimization recommendations
|
556
549
|
"""
|
@@ -562,21 +555,17 @@ def analyze_workspaces(
|
|
562
555
|
|
563
556
|
analyzer = WorkSpacesCostAnalyzer(profile=profile)
|
564
557
|
results, summary = analyzer.analyze_workspaces(
|
565
|
-
unused_days=unused_days,
|
566
|
-
analysis_days=analysis_days,
|
567
|
-
dry_run=dry_run
|
558
|
+
unused_days=unused_days, analysis_days=analysis_days, dry_run=dry_run
|
568
559
|
)
|
569
|
-
|
560
|
+
|
570
561
|
# Display results
|
571
562
|
analyzer.display_analysis_results(results, summary)
|
572
|
-
|
563
|
+
|
573
564
|
# Export if requested
|
574
565
|
export_file = None
|
575
566
|
if output_file or output_format:
|
576
|
-
export_file = analyzer.export_results(
|
577
|
-
|
578
|
-
)
|
579
|
-
|
567
|
+
export_file = analyzer.export_results(results, summary, output_format, output_file)
|
568
|
+
|
580
569
|
# Return comprehensive results
|
581
570
|
if summary is not None:
|
582
571
|
return {
|
@@ -584,7 +573,7 @@ def analyze_workspaces(
|
|
584
573
|
"workspaces": [result.to_dict() for result in results],
|
585
574
|
"export_file": export_file,
|
586
575
|
"achievement_rate": summary.target_achievement_rate,
|
587
|
-
"status": "success"
|
576
|
+
"status": "success",
|
588
577
|
}
|
589
578
|
else:
|
590
579
|
return {
|
@@ -592,7 +581,7 @@ def analyze_workspaces(
|
|
592
581
|
"workspaces": [],
|
593
582
|
"export_file": None,
|
594
583
|
"achievement_rate": 0,
|
595
|
-
"status": "partial_failure"
|
584
|
+
"status": "partial_failure",
|
596
585
|
}
|
597
586
|
|
598
587
|
except Exception as e:
|
@@ -603,11 +592,11 @@ def analyze_workspaces(
|
|
603
592
|
"summary": {"error": str(e)},
|
604
593
|
"workspaces": [],
|
605
594
|
"export_file": None,
|
606
|
-
"achievement_rate": 0
|
595
|
+
"achievement_rate": 0,
|
607
596
|
}
|
608
597
|
|
609
598
|
|
610
599
|
# Legacy alias for backward compatibility
|
611
600
|
def analyze_workspaces_finops_24(*args, **kwargs):
|
612
601
|
"""Legacy alias for analyze_workspaces - deprecated, use analyze_workspaces instead."""
|
613
|
-
return analyze_workspaces(*args, **kwargs)
|
602
|
+
return analyze_workspaces(*args, **kwargs)
|