runbooks 0.9.7__py3-none-any.whl → 0.9.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- runbooks/__init__.py +1 -1
- runbooks/common/mcp_integration.py +174 -0
- runbooks/common/performance_monitor.py +4 -4
- runbooks/common/rich_utils.py +3 -0
- runbooks/enterprise/__init__.py +18 -10
- runbooks/enterprise/security.py +708 -0
- runbooks/finops/enhanced_dashboard_runner.py +2 -1
- runbooks/finops/finops_dashboard.py +322 -11
- runbooks/finops/markdown_exporter.py +226 -0
- runbooks/finops/optimizer.py +2 -0
- runbooks/finops/single_dashboard.py +16 -16
- runbooks/finops/vpc_cleanup_exporter.py +328 -0
- runbooks/finops/vpc_cleanup_optimizer.py +1318 -0
- runbooks/main.py +384 -15
- runbooks/operate/vpc_operations.py +8 -2
- runbooks/vpc/__init__.py +12 -0
- runbooks/vpc/cleanup_wrapper.py +757 -0
- runbooks/vpc/cost_engine.py +527 -3
- runbooks/vpc/networking_wrapper.py +29 -29
- runbooks/vpc/runbooks_adapter.py +479 -0
- runbooks/vpc/unified_scenarios.py +3199 -0
- runbooks/vpc/vpc_cleanup_integration.py +2629 -0
- {runbooks-0.9.7.dist-info → runbooks-0.9.9.dist-info}/METADATA +1 -1
- {runbooks-0.9.7.dist-info → runbooks-0.9.9.dist-info}/RECORD +28 -21
- {runbooks-0.9.7.dist-info → runbooks-0.9.9.dist-info}/WHEEL +0 -0
- {runbooks-0.9.7.dist-info → runbooks-0.9.9.dist-info}/entry_points.txt +0 -0
- {runbooks-0.9.7.dist-info → runbooks-0.9.9.dist-info}/licenses/LICENSE +0 -0
- {runbooks-0.9.7.dist-info → runbooks-0.9.9.dist-info}/top_level.txt +0 -0
@@ -117,9 +117,9 @@ class SingleAccountDashboard:
|
|
117
117
|
|
118
118
|
# Show detailed configuration only for CLI users
|
119
119
|
if self.context_console.config.show_technical_details:
|
120
|
-
|
121
|
-
|
122
|
-
|
120
|
+
print_info(f"🎯 Analysis Focus: TOP {top_services} Services")
|
121
|
+
print_info("• Optimization Target: Service-level insights")
|
122
|
+
print_info("• User Profile: Technical teams\n")
|
123
123
|
|
124
124
|
# Get profile for analysis
|
125
125
|
profile = self._determine_analysis_profile(args)
|
@@ -316,7 +316,7 @@ class SingleAccountDashboard:
|
|
316
316
|
if EMBEDDED_MCP_AVAILABLE:
|
317
317
|
self._run_embedded_mcp_validation([profile], cost_data, service_list, args)
|
318
318
|
else:
|
319
|
-
|
319
|
+
print_warning("MCP validation requested but not available - check MCP server configuration")
|
320
320
|
|
321
321
|
return 0
|
322
322
|
|
@@ -638,7 +638,7 @@ class SingleAccountDashboard:
|
|
638
638
|
style="dim",
|
639
639
|
)
|
640
640
|
|
641
|
-
|
641
|
+
rich_console.print(table)
|
642
642
|
|
643
643
|
# Summary panel (using filtered services for consistent analysis)
|
644
644
|
total_current = sum(filtered_current_services.values())
|
@@ -681,7 +681,7 @@ class SingleAccountDashboard:
|
|
681
681
|
• Services Analyzed: {len(all_services)}{period_info}
|
682
682
|
"""
|
683
683
|
|
684
|
-
|
684
|
+
rich_console.print(Panel(summary_text.strip(), title="📊 Analysis Summary", style="info"))
|
685
685
|
|
686
686
|
def _export_service_analysis(
|
687
687
|
self, args: argparse.Namespace, cost_data: Dict[str, Any], service_costs: List[str], account_id: str
|
@@ -818,7 +818,7 @@ class SingleAccountDashboard:
|
|
818
818
|
f.write("\n".join(lines))
|
819
819
|
|
820
820
|
print_success(f"Markdown export saved to: {file_path}")
|
821
|
-
|
821
|
+
print_info("📋 Ready for GitHub/MkDocs documentation")
|
822
822
|
|
823
823
|
except Exception as e:
|
824
824
|
print_warning(f"Markdown export failed: {str(e)[:50]}")
|
@@ -854,7 +854,7 @@ class SingleAccountDashboard:
|
|
854
854
|
hasattr(args, 'report_type') and args.report_type):
|
855
855
|
return
|
856
856
|
|
857
|
-
|
857
|
+
print_info("📊 Processing export requests...")
|
858
858
|
|
859
859
|
# Convert service data to ProfileData format compatible with existing export functions
|
860
860
|
from .types import ProfileData
|
@@ -890,7 +890,7 @@ class SingleAccountDashboard:
|
|
890
890
|
export_count = 0
|
891
891
|
for report_type in args.report_type:
|
892
892
|
if report_type == "pdf":
|
893
|
-
|
893
|
+
print_info("Generating PDF export...")
|
894
894
|
pdf_path = export_cost_dashboard_to_pdf(
|
895
895
|
export_data,
|
896
896
|
args.report_name,
|
@@ -902,10 +902,10 @@ class SingleAccountDashboard:
|
|
902
902
|
print_success(f"PDF export completed: {pdf_path}")
|
903
903
|
export_count += 1
|
904
904
|
else:
|
905
|
-
|
905
|
+
print_error("PDF export failed")
|
906
906
|
|
907
907
|
elif report_type == "csv":
|
908
|
-
|
908
|
+
print_info("Generating CSV export...")
|
909
909
|
from .cost_processor import export_to_csv
|
910
910
|
csv_path = export_to_csv(
|
911
911
|
export_data,
|
@@ -919,7 +919,7 @@ class SingleAccountDashboard:
|
|
919
919
|
export_count += 1
|
920
920
|
|
921
921
|
elif report_type == "json":
|
922
|
-
|
922
|
+
print_info("Generating JSON export...")
|
923
923
|
from .cost_processor import export_to_json
|
924
924
|
json_path = export_to_json(export_data, args.report_name, getattr(args, 'dir', None))
|
925
925
|
if json_path:
|
@@ -927,7 +927,7 @@ class SingleAccountDashboard:
|
|
927
927
|
export_count += 1
|
928
928
|
|
929
929
|
elif report_type == "markdown":
|
930
|
-
|
930
|
+
print_info("Generating Markdown export...")
|
931
931
|
# Use existing markdown export functionality
|
932
932
|
self._export_service_table_to_markdown(
|
933
933
|
services_data[:10], {}, {}, # Simplified data structure
|
@@ -939,12 +939,12 @@ class SingleAccountDashboard:
|
|
939
939
|
export_count += 1
|
940
940
|
|
941
941
|
if export_count > 0:
|
942
|
-
|
942
|
+
print_success(f"{export_count} exports completed successfully")
|
943
943
|
else:
|
944
|
-
|
944
|
+
print_warning("No exports were generated")
|
945
945
|
|
946
946
|
except Exception as e:
|
947
|
-
|
947
|
+
print_error(f"Export failed: {str(e)}")
|
948
948
|
import traceback
|
949
949
|
self.console.print(f"[red]Details: {traceback.format_exc()}[/]")
|
950
950
|
|
@@ -0,0 +1,328 @@
|
|
1
|
+
#!/usr/bin/env python3
|
2
|
+
"""
|
3
|
+
VPC Cleanup Exporter Module - Enterprise VPC Cleanup Result Export
|
4
|
+
|
5
|
+
This module provides export functionality for VPC cleanup analysis results,
|
6
|
+
leveraging the existing markdown_exporter infrastructure with VPC-specific formatting.
|
7
|
+
|
8
|
+
Author: CloudOps Runbooks Team
|
9
|
+
Version: 0.9.9
|
10
|
+
"""
|
11
|
+
|
12
|
+
import csv
|
13
|
+
import json
|
14
|
+
import os
|
15
|
+
from datetime import datetime
|
16
|
+
from typing import Any, Dict, List
|
17
|
+
|
18
|
+
from .markdown_exporter import MarkdownExporter
|
19
|
+
|
20
|
+
|
21
|
+
def _format_tags_for_display(tags_dict: Dict[str, str]) -> str:
|
22
|
+
"""Format tags for display with priority order."""
|
23
|
+
if not tags_dict:
|
24
|
+
return "No tags"
|
25
|
+
|
26
|
+
priority_keys = ['Name', 'Environment', 'Project', 'Owner', 'BusinessOwner', 'Team', 'CostCenter']
|
27
|
+
relevant_tags = []
|
28
|
+
|
29
|
+
for key in priority_keys:
|
30
|
+
if key in tags_dict and tags_dict[key]:
|
31
|
+
relevant_tags.append(f"{key}:{tags_dict[key]}")
|
32
|
+
|
33
|
+
# Add other important tags
|
34
|
+
for key, value in tags_dict.items():
|
35
|
+
if key not in priority_keys and value and len(relevant_tags) < 5:
|
36
|
+
relevant_tags.append(f"{key}:{value}")
|
37
|
+
|
38
|
+
return "; ".join(relevant_tags) if relevant_tags else f"({len(tags_dict)} tags)"
|
39
|
+
|
40
|
+
|
41
|
+
def export_vpc_cleanup_results(vpc_result: Any, export_formats: List[str], output_dir: str = "./") -> Dict[str, str]:
|
42
|
+
"""
|
43
|
+
Export VPC cleanup results in multiple formats.
|
44
|
+
|
45
|
+
Args:
|
46
|
+
vpc_result: VPC cleanup analysis result object
|
47
|
+
export_formats: List of formats to export (markdown, csv, json, pdf)
|
48
|
+
output_dir: Directory to save exported files
|
49
|
+
|
50
|
+
Returns:
|
51
|
+
Dict mapping format to exported filename
|
52
|
+
"""
|
53
|
+
results = {}
|
54
|
+
|
55
|
+
# Extract VPC candidates from result - use correct attribute name
|
56
|
+
vpc_candidates = getattr(vpc_result, 'cleanup_candidates', [])
|
57
|
+
if not vpc_candidates:
|
58
|
+
# Fallback to other possible attribute names
|
59
|
+
vpc_candidates = getattr(vpc_result, 'vpc_candidates', [])
|
60
|
+
|
61
|
+
if 'markdown' in export_formats:
|
62
|
+
try:
|
63
|
+
exporter = MarkdownExporter()
|
64
|
+
markdown_filename = exporter.export_vpc_analysis_to_file(
|
65
|
+
vpc_candidates,
|
66
|
+
filename="vpc-cleanup-candidates.md",
|
67
|
+
output_dir=output_dir
|
68
|
+
)
|
69
|
+
results['markdown'] = markdown_filename
|
70
|
+
except Exception as e:
|
71
|
+
print(f"Warning: Markdown export failed: {e}")
|
72
|
+
results['markdown'] = None
|
73
|
+
|
74
|
+
# Real implementations for other formats
|
75
|
+
if 'csv' in export_formats:
|
76
|
+
try:
|
77
|
+
csv_filename = _export_vpc_candidates_csv(vpc_candidates, output_dir)
|
78
|
+
results['csv'] = csv_filename
|
79
|
+
except Exception as e:
|
80
|
+
print(f"Warning: CSV export failed: {e}")
|
81
|
+
results['csv'] = None
|
82
|
+
|
83
|
+
if 'json' in export_formats:
|
84
|
+
try:
|
85
|
+
json_filename = _export_vpc_candidates_json(vpc_candidates, output_dir)
|
86
|
+
results['json'] = json_filename
|
87
|
+
except Exception as e:
|
88
|
+
print(f"Warning: JSON export failed: {e}")
|
89
|
+
results['json'] = None
|
90
|
+
|
91
|
+
if 'pdf' in export_formats:
|
92
|
+
try:
|
93
|
+
pdf_filename = _export_vpc_candidates_pdf(vpc_candidates, output_dir)
|
94
|
+
results['pdf'] = pdf_filename
|
95
|
+
except Exception as e:
|
96
|
+
print(f"Warning: PDF export failed: {e}")
|
97
|
+
results['pdf'] = None
|
98
|
+
|
99
|
+
return results
|
100
|
+
|
101
|
+
|
102
|
+
def _export_vpc_candidates_csv(vpc_candidates: List[Any], output_dir: str) -> str:
|
103
|
+
"""Export VPC candidates to CSV format with all 15 columns."""
|
104
|
+
filename = os.path.join(output_dir, "vpc-cleanup-candidates.csv")
|
105
|
+
|
106
|
+
# 15-column headers for comprehensive VPC analysis
|
107
|
+
headers = [
|
108
|
+
"Account_ID", "VPC_ID", "VPC_Name", "CIDR_Block", "Overlapping",
|
109
|
+
"Is_Default", "ENI_Count", "Tags", "Flow_Logs", "TGW/Peering",
|
110
|
+
"LBs_Present", "IaC", "Timeline", "Decision", "Owners/Approvals", "Notes"
|
111
|
+
]
|
112
|
+
|
113
|
+
with open(filename, 'w', newline='', encoding='utf-8') as csvfile:
|
114
|
+
writer = csv.writer(csvfile)
|
115
|
+
writer.writerow(headers)
|
116
|
+
|
117
|
+
for candidate in vpc_candidates:
|
118
|
+
# Extract data with enhanced tag and owner handling
|
119
|
+
tags_dict = getattr(candidate, 'tags', {}) or {}
|
120
|
+
|
121
|
+
# Enhanced tag display - prioritize important tags
|
122
|
+
if tags_dict:
|
123
|
+
priority_keys = ['Name', 'Environment', 'Project', 'Owner', 'BusinessOwner', 'Team']
|
124
|
+
relevant_tags = []
|
125
|
+
for key in priority_keys:
|
126
|
+
if key in tags_dict and tags_dict[key]:
|
127
|
+
relevant_tags.append(f"{key}:{tags_dict[key]}")
|
128
|
+
|
129
|
+
# Add other important tags
|
130
|
+
for key, value in tags_dict.items():
|
131
|
+
if key not in priority_keys and value and len(relevant_tags) < 5:
|
132
|
+
relevant_tags.append(f"{key}:{value}")
|
133
|
+
|
134
|
+
tags_str = "; ".join(relevant_tags)
|
135
|
+
else:
|
136
|
+
tags_str = "No tags"
|
137
|
+
|
138
|
+
load_balancers = getattr(candidate, 'load_balancers', []) or []
|
139
|
+
lbs_present = "Yes" if load_balancers else "No"
|
140
|
+
|
141
|
+
# Enhanced owner extraction
|
142
|
+
owners = getattr(candidate, 'owners_approvals', []) or []
|
143
|
+
|
144
|
+
# If no owners found via attributes, extract from tags directly
|
145
|
+
if not owners and tags_dict:
|
146
|
+
owner_keys = ['Owner', 'BusinessOwner', 'TechnicalOwner', 'Team', 'Contact', 'CreatedBy', 'ManagedBy']
|
147
|
+
for key in owner_keys:
|
148
|
+
if key in tags_dict and tags_dict[key]:
|
149
|
+
if 'business' in key.lower() or 'manager' in tags_dict[key].lower():
|
150
|
+
owners.append(f"{tags_dict[key]} (Business)")
|
151
|
+
elif 'technical' in key.lower() or any(tech in tags_dict[key].lower() for tech in ['ops', 'devops', 'engineering']):
|
152
|
+
owners.append(f"{tags_dict[key]} (Technical)")
|
153
|
+
else:
|
154
|
+
owners.append(tags_dict[key])
|
155
|
+
|
156
|
+
if owners:
|
157
|
+
owners_str = "; ".join(owners)
|
158
|
+
else:
|
159
|
+
# Enhanced fallback for CSV
|
160
|
+
if getattr(candidate, 'is_default', False):
|
161
|
+
owners_str = "System Default VPC"
|
162
|
+
elif getattr(candidate, 'iac_detected', False):
|
163
|
+
owners_str = "IaC Managed"
|
164
|
+
else:
|
165
|
+
owners_str = "No owner tags found"
|
166
|
+
|
167
|
+
row = [
|
168
|
+
getattr(candidate, 'account_id', 'Unknown'),
|
169
|
+
getattr(candidate, 'vpc_id', ''),
|
170
|
+
getattr(candidate, 'vpc_name', 'Unnamed'),
|
171
|
+
getattr(candidate, 'cidr_block', ''),
|
172
|
+
"No", # Overlapping analysis would need CIDR comparison
|
173
|
+
"Yes" if getattr(candidate, 'is_default', False) else "No",
|
174
|
+
getattr(candidate, 'dependency_analysis', {}).eni_count if hasattr(candidate, 'dependency_analysis') else 0,
|
175
|
+
tags_str,
|
176
|
+
"Yes" if getattr(candidate, 'flow_logs_enabled', False) else "No",
|
177
|
+
"No", # TGW/Peering analysis placeholder
|
178
|
+
lbs_present,
|
179
|
+
"Yes" if getattr(candidate, 'iac_detected', False) else "No",
|
180
|
+
"Unknown", # Timeline analysis placeholder
|
181
|
+
getattr(candidate, 'cleanup_recommendation', 'unknown'),
|
182
|
+
owners_str,
|
183
|
+
"Generated by CloudOps Runbooks VPC Module"
|
184
|
+
]
|
185
|
+
writer.writerow(row)
|
186
|
+
|
187
|
+
return filename
|
188
|
+
|
189
|
+
|
190
|
+
def _export_vpc_candidates_json(vpc_candidates: List[Any], output_dir: str) -> str:
|
191
|
+
"""Export VPC candidates to JSON format with full data structure."""
|
192
|
+
filename = os.path.join(output_dir, "vpc-cleanup-candidates.json")
|
193
|
+
|
194
|
+
# Convert candidates to serializable format
|
195
|
+
candidates_data = []
|
196
|
+
for candidate in vpc_candidates:
|
197
|
+
candidate_dict = {
|
198
|
+
"account_id": getattr(candidate, 'account_id', 'Unknown'),
|
199
|
+
"vpc_id": getattr(candidate, 'vpc_id', ''),
|
200
|
+
"vpc_name": getattr(candidate, 'vpc_name', 'Unnamed'),
|
201
|
+
"cidr_block": getattr(candidate, 'cidr_block', ''),
|
202
|
+
"region": getattr(candidate, 'region', 'unknown'),
|
203
|
+
"is_default": getattr(candidate, 'is_default', False),
|
204
|
+
"state": getattr(candidate, 'state', 'unknown'),
|
205
|
+
"tags": getattr(candidate, 'tags', {}) or {},
|
206
|
+
"tags_summary": _format_tags_for_display(getattr(candidate, 'tags', {}) or {}),
|
207
|
+
"flow_logs_enabled": getattr(candidate, 'flow_logs_enabled', False),
|
208
|
+
"load_balancers": getattr(candidate, 'load_balancers', []) or [],
|
209
|
+
"iac_detected": getattr(candidate, 'iac_detected', False),
|
210
|
+
"owners_approvals": getattr(candidate, 'owners_approvals', []) or [],
|
211
|
+
"cleanup_bucket": getattr(candidate, 'cleanup_bucket', 'unknown'),
|
212
|
+
"cleanup_recommendation": getattr(candidate, 'cleanup_recommendation', 'unknown'),
|
213
|
+
"risk_assessment": getattr(candidate, 'risk_assessment', 'unknown'),
|
214
|
+
"business_impact": getattr(candidate, 'business_impact', 'unknown')
|
215
|
+
}
|
216
|
+
|
217
|
+
# Add dependency analysis if available
|
218
|
+
if hasattr(candidate, 'dependency_analysis') and candidate.dependency_analysis:
|
219
|
+
dep_analysis = candidate.dependency_analysis
|
220
|
+
candidate_dict["dependency_analysis"] = {
|
221
|
+
"eni_count": getattr(dep_analysis, 'eni_count', 0),
|
222
|
+
"route_tables": getattr(dep_analysis, 'route_tables', []),
|
223
|
+
"security_groups": getattr(dep_analysis, 'security_groups', []),
|
224
|
+
"internet_gateways": getattr(dep_analysis, 'internet_gateways', []),
|
225
|
+
"nat_gateways": getattr(dep_analysis, 'nat_gateways', []),
|
226
|
+
"vpc_endpoints": getattr(dep_analysis, 'vpc_endpoints', []),
|
227
|
+
"peering_connections": getattr(dep_analysis, 'peering_connections', []),
|
228
|
+
"dependency_risk_level": getattr(dep_analysis, 'dependency_risk_level', 'unknown')
|
229
|
+
}
|
230
|
+
|
231
|
+
candidates_data.append(candidate_dict)
|
232
|
+
|
233
|
+
# Create export metadata
|
234
|
+
export_data = {
|
235
|
+
"metadata": {
|
236
|
+
"export_timestamp": datetime.now().isoformat(),
|
237
|
+
"total_candidates": len(candidates_data),
|
238
|
+
"generator": "CloudOps Runbooks VPC Module v0.9.9"
|
239
|
+
},
|
240
|
+
"vpc_candidates": candidates_data
|
241
|
+
}
|
242
|
+
|
243
|
+
with open(filename, 'w', encoding='utf-8') as jsonfile:
|
244
|
+
json.dump(export_data, jsonfile, indent=2, ensure_ascii=False)
|
245
|
+
|
246
|
+
return filename
|
247
|
+
|
248
|
+
|
249
|
+
def _export_vpc_candidates_pdf(vpc_candidates: List[Any], output_dir: str) -> str:
|
250
|
+
"""Export VPC candidates to PDF format for executive presentation."""
|
251
|
+
filename = os.path.join(output_dir, "vpc-cleanup-candidates.pdf")
|
252
|
+
|
253
|
+
try:
|
254
|
+
# Try to use reportlab for PDF generation
|
255
|
+
from reportlab.lib import colors
|
256
|
+
from reportlab.lib.pagesizes import letter, A4
|
257
|
+
from reportlab.platypus import SimpleDocTemplate, Table, TableStyle, Paragraph, Spacer
|
258
|
+
from reportlab.lib.styles import getSampleStyleSheet
|
259
|
+
|
260
|
+
doc = SimpleDocTemplate(filename, pagesize=A4)
|
261
|
+
styles = getSampleStyleSheet()
|
262
|
+
story = []
|
263
|
+
|
264
|
+
# Title
|
265
|
+
title = Paragraph("VPC Cleanup Analysis Report", styles['Title'])
|
266
|
+
story.append(title)
|
267
|
+
story.append(Spacer(1, 20))
|
268
|
+
|
269
|
+
# Summary
|
270
|
+
summary_text = f"""
|
271
|
+
<b>Generated:</b> {datetime.now().strftime('%Y-%m-%d %H:%M:%S UTC')}<br/>
|
272
|
+
<b>Total VPC Candidates:</b> {len(vpc_candidates)}<br/>
|
273
|
+
<b>Analysis Source:</b> CloudOps Runbooks VPC Module v0.9.9
|
274
|
+
"""
|
275
|
+
summary = Paragraph(summary_text, styles['Normal'])
|
276
|
+
story.append(summary)
|
277
|
+
story.append(Spacer(1, 20))
|
278
|
+
|
279
|
+
# Create table data
|
280
|
+
table_data = [
|
281
|
+
["Account ID", "VPC ID", "VPC Name", "CIDR", "Default", "ENI Count", "Decision"]
|
282
|
+
]
|
283
|
+
|
284
|
+
for candidate in vpc_candidates:
|
285
|
+
row = [
|
286
|
+
str(getattr(candidate, 'account_id', 'Unknown'))[:15], # Truncate for PDF width
|
287
|
+
str(getattr(candidate, 'vpc_id', ''))[:20],
|
288
|
+
str(getattr(candidate, 'vpc_name', 'Unnamed'))[:15],
|
289
|
+
str(getattr(candidate, 'cidr_block', ''))[:15],
|
290
|
+
"Yes" if getattr(candidate, 'is_default', False) else "No",
|
291
|
+
str(getattr(candidate, 'dependency_analysis', {}).eni_count if hasattr(candidate, 'dependency_analysis') else 0),
|
292
|
+
str(getattr(candidate, 'cleanup_recommendation', 'unknown'))[:10]
|
293
|
+
]
|
294
|
+
table_data.append(row)
|
295
|
+
|
296
|
+
# Create table
|
297
|
+
table = Table(table_data)
|
298
|
+
table.setStyle(TableStyle([
|
299
|
+
('BACKGROUND', (0, 0), (-1, 0), colors.grey),
|
300
|
+
('TEXTCOLOR', (0, 0), (-1, 0), colors.whitesmoke),
|
301
|
+
('ALIGN', (0, 0), (-1, -1), 'CENTER'),
|
302
|
+
('FONTNAME', (0, 0), (-1, 0), 'Helvetica-Bold'),
|
303
|
+
('FONTSIZE', (0, 0), (-1, 0), 10),
|
304
|
+
('BOTTOMPADDING', (0, 0), (-1, 0), 12),
|
305
|
+
('BACKGROUND', (0, 1), (-1, -1), colors.beige),
|
306
|
+
('FONTSIZE', (0, 1), (-1, -1), 8),
|
307
|
+
('GRID', (0, 0), (-1, -1), 1, colors.black)
|
308
|
+
]))
|
309
|
+
|
310
|
+
story.append(table)
|
311
|
+
doc.build(story)
|
312
|
+
|
313
|
+
except ImportError:
|
314
|
+
# Fallback: create a simple text-based PDF placeholder
|
315
|
+
with open(filename, 'w', encoding='utf-8') as f:
|
316
|
+
f.write("VPC Cleanup Analysis Report (PDF)\n")
|
317
|
+
f.write("=" * 40 + "\n\n")
|
318
|
+
f.write(f"Generated: {datetime.now().isoformat()}\n")
|
319
|
+
f.write(f"Total VPC Candidates: {len(vpc_candidates)}\n\n")
|
320
|
+
|
321
|
+
for i, candidate in enumerate(vpc_candidates, 1):
|
322
|
+
f.write(f"{i}. VPC {getattr(candidate, 'vpc_id', 'Unknown')}\n")
|
323
|
+
f.write(f" Account: {getattr(candidate, 'account_id', 'Unknown')}\n")
|
324
|
+
f.write(f" CIDR: {getattr(candidate, 'cidr_block', 'Unknown')}\n")
|
325
|
+
f.write(f" ENI Count: {getattr(candidate, 'dependency_analysis', {}).eni_count if hasattr(candidate, 'dependency_analysis') else 0}\n")
|
326
|
+
f.write(f" Decision: {getattr(candidate, 'cleanup_recommendation', 'unknown')}\n\n")
|
327
|
+
|
328
|
+
return filename
|