runbooks 1.1.9__py3-none-any.whl → 1.1.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- runbooks/__init__.py +1 -1
- runbooks/__init___optimized.py +2 -1
- runbooks/_platform/__init__.py +1 -1
- runbooks/cfat/cli.py +4 -3
- runbooks/cfat/cloud_foundations_assessment.py +1 -2
- runbooks/cfat/tests/test_cli.py +4 -1
- runbooks/cli/commands/finops.py +68 -19
- runbooks/cli/commands/inventory.py +796 -7
- runbooks/cli/commands/operate.py +65 -4
- runbooks/cloudops/cost_optimizer.py +1 -3
- runbooks/common/cli_decorators.py +6 -4
- runbooks/common/config_loader.py +787 -0
- runbooks/common/config_schema.py +280 -0
- runbooks/common/dry_run_framework.py +14 -2
- runbooks/common/mcp_integration.py +238 -0
- runbooks/finops/ebs_cost_optimizer.py +7 -4
- runbooks/finops/elastic_ip_optimizer.py +7 -4
- runbooks/finops/infrastructure/__init__.py +3 -2
- runbooks/finops/infrastructure/commands.py +7 -4
- runbooks/finops/infrastructure/load_balancer_optimizer.py +7 -4
- runbooks/finops/infrastructure/vpc_endpoint_optimizer.py +7 -4
- runbooks/finops/nat_gateway_optimizer.py +7 -4
- runbooks/finops/tests/run_tests.py +1 -1
- runbooks/inventory/ArgumentsClass.py +2 -1
- runbooks/inventory/README.md +111 -12
- runbooks/inventory/Tests/test_Inventory_Modules.py +27 -10
- runbooks/inventory/Tests/test_cfn_describe_stacks.py +18 -7
- runbooks/inventory/Tests/test_ec2_describe_instances.py +30 -15
- runbooks/inventory/Tests/test_lambda_list_functions.py +17 -3
- runbooks/inventory/Tests/test_org_list_accounts.py +17 -4
- runbooks/inventory/account_class.py +0 -1
- runbooks/inventory/all_my_instances_wrapper.py +4 -8
- runbooks/inventory/aws_organization.png +0 -0
- runbooks/inventory/check_cloudtrail_compliance.py +4 -4
- runbooks/inventory/check_controltower_readiness.py +50 -47
- runbooks/inventory/check_landingzone_readiness.py +35 -31
- runbooks/inventory/cloud_foundations_integration.py +8 -3
- runbooks/inventory/core/collector.py +201 -1
- runbooks/inventory/discovery.md +2 -1
- runbooks/inventory/{draw_org_structure.py → draw_org.py} +55 -9
- runbooks/inventory/drift_detection_cli.py +8 -68
- runbooks/inventory/find_cfn_drift_detection.py +14 -4
- runbooks/inventory/find_cfn_orphaned_stacks.py +7 -5
- runbooks/inventory/find_cfn_stackset_drift.py +5 -5
- runbooks/inventory/find_ec2_security_groups.py +6 -3
- runbooks/inventory/find_landingzone_versions.py +5 -5
- runbooks/inventory/find_vpc_flow_logs.py +5 -5
- runbooks/inventory/inventory.sh +20 -7
- runbooks/inventory/inventory_mcp_cli.py +4 -0
- runbooks/inventory/inventory_modules.py +9 -7
- runbooks/inventory/list_cfn_stacks.py +18 -8
- runbooks/inventory/list_cfn_stackset_operation_results.py +2 -2
- runbooks/inventory/list_cfn_stackset_operations.py +32 -20
- runbooks/inventory/list_cfn_stacksets.py +7 -4
- runbooks/inventory/list_config_recorders_delivery_channels.py +4 -4
- runbooks/inventory/list_ds_directories.py +3 -3
- runbooks/inventory/list_ec2_availability_zones.py +7 -3
- runbooks/inventory/list_ec2_ebs_volumes.py +3 -3
- runbooks/inventory/list_ec2_instances.py +1 -1
- runbooks/inventory/list_ecs_clusters_and_tasks.py +8 -4
- runbooks/inventory/list_elbs_load_balancers.py +7 -3
- runbooks/inventory/list_enis_network_interfaces.py +3 -3
- runbooks/inventory/list_guardduty_detectors.py +9 -5
- runbooks/inventory/list_iam_policies.py +7 -3
- runbooks/inventory/list_iam_roles.py +3 -3
- runbooks/inventory/list_iam_saml_providers.py +8 -4
- runbooks/inventory/list_lambda_functions.py +8 -4
- runbooks/inventory/list_org_accounts.py +306 -276
- runbooks/inventory/list_org_accounts_users.py +45 -9
- runbooks/inventory/list_rds_db_instances.py +4 -4
- runbooks/inventory/list_route53_hosted_zones.py +3 -3
- runbooks/inventory/list_servicecatalog_provisioned_products.py +5 -5
- runbooks/inventory/list_sns_topics.py +4 -4
- runbooks/inventory/list_ssm_parameters.py +6 -3
- runbooks/inventory/list_vpc_subnets.py +8 -4
- runbooks/inventory/list_vpcs.py +15 -4
- runbooks/inventory/mcp_vpc_validator.py +6 -0
- runbooks/inventory/organizations_discovery.py +17 -3
- runbooks/inventory/organizations_utils.py +553 -0
- runbooks/inventory/output_formatters.py +422 -0
- runbooks/inventory/recover_cfn_stack_ids.py +5 -5
- runbooks/inventory/run_on_multi_accounts.py +3 -3
- runbooks/inventory/tag_coverage.py +481 -0
- runbooks/inventory/validation_utils.py +358 -0
- runbooks/inventory/verify_ec2_security_groups.py +18 -5
- runbooks/inventory/vpc_architecture_validator.py +7 -1
- runbooks/inventory/vpc_dependency_analyzer.py +6 -0
- runbooks/main_final.py +2 -2
- runbooks/main_ultra_minimal.py +2 -2
- runbooks/mcp/integration.py +6 -4
- runbooks/remediation/acm_remediation.py +2 -2
- runbooks/remediation/cloudtrail_remediation.py +2 -2
- runbooks/remediation/cognito_remediation.py +2 -2
- runbooks/remediation/dynamodb_remediation.py +2 -2
- runbooks/remediation/ec2_remediation.py +2 -2
- runbooks/remediation/kms_remediation.py +2 -2
- runbooks/remediation/lambda_remediation.py +2 -2
- runbooks/remediation/rds_remediation.py +2 -2
- runbooks/remediation/s3_remediation.py +1 -1
- runbooks/vpc/cloudtrail_audit_integration.py +1 -1
- {runbooks-1.1.9.dist-info → runbooks-1.1.10.dist-info}/METADATA +74 -4
- {runbooks-1.1.9.dist-info → runbooks-1.1.10.dist-info}/RECORD +106 -100
- runbooks/__init__.py.backup +0 -134
- {runbooks-1.1.9.dist-info → runbooks-1.1.10.dist-info}/WHEEL +0 -0
- {runbooks-1.1.9.dist-info → runbooks-1.1.10.dist-info}/entry_points.txt +0 -0
- {runbooks-1.1.9.dist-info → runbooks-1.1.10.dist-info}/licenses/LICENSE +0 -0
- {runbooks-1.1.9.dist-info → runbooks-1.1.10.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,422 @@
|
|
1
|
+
#!/usr/bin/env python3
|
2
|
+
"""
|
3
|
+
Output formatting utilities for Organizations module.
|
4
|
+
|
5
|
+
This module provides consistent multi-format export capabilities for
|
6
|
+
Organizations inventory scripts supporting JSON, CSV, Markdown, and
|
7
|
+
Rich table formats.
|
8
|
+
|
9
|
+
Features:
|
10
|
+
- Rich table formatting with CloudOps theme
|
11
|
+
- Multi-format export (JSON, CSV, Markdown, Table)
|
12
|
+
- Account metadata formatting utilities
|
13
|
+
- Organizations hierarchy visualization helpers
|
14
|
+
|
15
|
+
Author: CloudOps Runbooks Team
|
16
|
+
Version: 1.1.10
|
17
|
+
"""
|
18
|
+
|
19
|
+
import csv
|
20
|
+
import json
|
21
|
+
import logging
|
22
|
+
from io import StringIO
|
23
|
+
from pathlib import Path
|
24
|
+
from typing import Any, Dict, List, Optional
|
25
|
+
|
26
|
+
from rich.table import Table
|
27
|
+
|
28
|
+
from runbooks.common.rich_utils import console, create_table, print_error, print_success
|
29
|
+
from runbooks.common.config_loader import get_config_loader
|
30
|
+
|
31
|
+
logger = logging.getLogger(__name__)
|
32
|
+
|
33
|
+
|
34
|
+
class OrganizationsFormatter:
|
35
|
+
"""Format Organizations data for various output types."""
|
36
|
+
|
37
|
+
@staticmethod
|
38
|
+
def format_accounts_table(accounts: List[Dict], title: str = "AWS Organization Accounts") -> Table:
|
39
|
+
"""
|
40
|
+
Create Rich table for accounts listing.
|
41
|
+
|
42
|
+
Args:
|
43
|
+
accounts: List of account dictionaries with keys:
|
44
|
+
- id: Account ID
|
45
|
+
- name: Account name
|
46
|
+
- email: Email address
|
47
|
+
- status: Account status
|
48
|
+
- profile: Mapped profile name
|
49
|
+
title: Table title
|
50
|
+
|
51
|
+
Returns:
|
52
|
+
Rich Table object with CloudOps theme
|
53
|
+
"""
|
54
|
+
table = create_table(
|
55
|
+
title=title,
|
56
|
+
columns=[
|
57
|
+
{"name": "Account ID", "style": "cyan", "justify": "left"},
|
58
|
+
{"name": "Account Name", "style": "white", "justify": "left"},
|
59
|
+
{"name": "Email", "style": "dim", "justify": "left"},
|
60
|
+
{"name": "Status", "style": "green", "justify": "center"},
|
61
|
+
{"name": "Profile", "style": "yellow", "justify": "left"},
|
62
|
+
],
|
63
|
+
)
|
64
|
+
|
65
|
+
for account in accounts:
|
66
|
+
# Color status based on value
|
67
|
+
status = account.get("status", "UNKNOWN")
|
68
|
+
status_style = "green" if status == "ACTIVE" else "red"
|
69
|
+
status_display = f"[{status_style}]{status}[/{status_style}]"
|
70
|
+
|
71
|
+
table.add_row(
|
72
|
+
account.get("id", ""),
|
73
|
+
account.get("name", ""),
|
74
|
+
account.get("email", ""),
|
75
|
+
status_display,
|
76
|
+
account.get("profile", ""),
|
77
|
+
)
|
78
|
+
|
79
|
+
return table
|
80
|
+
|
81
|
+
@staticmethod
|
82
|
+
def export_json(accounts: List[Dict], output_file: str, metadata: Optional[Dict] = None) -> None:
|
83
|
+
"""
|
84
|
+
Export accounts to JSON format with config-aware 14+ columns.
|
85
|
+
|
86
|
+
Args:
|
87
|
+
accounts: List of account dictionaries
|
88
|
+
output_file: Output file path
|
89
|
+
metadata: Optional metadata to include in export
|
90
|
+
|
91
|
+
Raises:
|
92
|
+
IOError: If file write fails
|
93
|
+
"""
|
94
|
+
try:
|
95
|
+
# Load tag mappings for field names (optional metadata)
|
96
|
+
config_loader = get_config_loader()
|
97
|
+
tag_mappings = config_loader.load_tag_mappings()
|
98
|
+
|
99
|
+
# Enhanced accounts with all tiers
|
100
|
+
enhanced_accounts = []
|
101
|
+
for account in accounts:
|
102
|
+
enhanced_account = {
|
103
|
+
# Baseline fields (9 columns - unchanged for backward compatibility)
|
104
|
+
'id': account.get('id'),
|
105
|
+
'name': account.get('name'),
|
106
|
+
'email': account.get('email'),
|
107
|
+
'status': account.get('status'),
|
108
|
+
'joined_method': account.get('joined_method'),
|
109
|
+
'joined_timestamp': account.get('joined_timestamp'),
|
110
|
+
'organizational_unit': account.get('organizational_unit'),
|
111
|
+
'organizational_unit_id': account.get('organizational_unit_id'),
|
112
|
+
'parent_id': account.get('parent_id'),
|
113
|
+
|
114
|
+
# TIER 1: Business Metadata (config-aware)
|
115
|
+
'wbs_code': account.get('wbs_code', 'N/A'),
|
116
|
+
'cost_group': account.get('cost_group', 'N/A'),
|
117
|
+
'technical_lead': account.get('technical_lead', 'N/A'),
|
118
|
+
'account_owner': account.get('account_owner', 'N/A'),
|
119
|
+
|
120
|
+
# TIER 2: Governance Metadata (config-aware)
|
121
|
+
'business_unit': account.get('business_unit', 'N/A'),
|
122
|
+
'functional_area': account.get('functional_area', 'N/A'),
|
123
|
+
'managed_by': account.get('managed_by', 'N/A'),
|
124
|
+
'product_owner': account.get('product_owner', 'N/A'),
|
125
|
+
|
126
|
+
# TIER 3: Operational Metadata (config-aware)
|
127
|
+
'purpose': account.get('purpose', 'N/A'),
|
128
|
+
'environment': account.get('environment', 'N/A'),
|
129
|
+
'compliance_scope': account.get('compliance_scope', 'N/A'),
|
130
|
+
'data_classification': account.get('data_classification', 'N/A'),
|
131
|
+
|
132
|
+
# TIER 4: Extended Metadata (optional, config-aware)
|
133
|
+
'project_name': account.get('project_name', 'N/A'),
|
134
|
+
'budget_code': account.get('budget_code', 'N/A'),
|
135
|
+
'support_tier': account.get('support_tier', 'N/A'),
|
136
|
+
'created_date': account.get('created_date', 'N/A'),
|
137
|
+
'expiry_date': account.get('expiry_date', 'N/A'),
|
138
|
+
|
139
|
+
# Computed fields (if present)
|
140
|
+
'all_tags': account.get('all_tags', {}),
|
141
|
+
'wbs_comparison': account.get('wbs_comparison', {}),
|
142
|
+
}
|
143
|
+
|
144
|
+
# Preserve any additional fields from source (forward compatibility)
|
145
|
+
for key in account:
|
146
|
+
if key not in enhanced_account:
|
147
|
+
enhanced_account[key] = account[key]
|
148
|
+
|
149
|
+
enhanced_accounts.append(enhanced_account)
|
150
|
+
|
151
|
+
output_data = {"accounts": enhanced_accounts}
|
152
|
+
|
153
|
+
if metadata:
|
154
|
+
output_data["metadata"] = metadata
|
155
|
+
|
156
|
+
# Add tag mapping metadata for reference
|
157
|
+
output_data["tag_mappings_used"] = tag_mappings
|
158
|
+
output_data["config_sources"] = config_loader.get_config_sources()
|
159
|
+
|
160
|
+
with open(output_file, "w", encoding="utf-8") as f:
|
161
|
+
json.dump(output_data, f, indent=2, ensure_ascii=False)
|
162
|
+
|
163
|
+
print_success(f"Exported {len(accounts)} accounts to {output_file} (JSON, {len(enhanced_accounts[0])} fields)")
|
164
|
+
|
165
|
+
except Exception as e:
|
166
|
+
error_msg = f"Failed to export JSON: {str(e)}"
|
167
|
+
logger.error(error_msg)
|
168
|
+
print_error(error_msg)
|
169
|
+
raise IOError(error_msg) from e
|
170
|
+
|
171
|
+
@staticmethod
|
172
|
+
def export_csv(accounts: List[Dict], output_file: str, include_header: bool = True) -> None:
|
173
|
+
"""
|
174
|
+
Export accounts to CSV format with config-aware 14+ columns.
|
175
|
+
|
176
|
+
Args:
|
177
|
+
accounts: List of account dictionaries
|
178
|
+
output_file: Output file path
|
179
|
+
include_header: Include CSV header row
|
180
|
+
|
181
|
+
Raises:
|
182
|
+
IOError: If file write fails
|
183
|
+
"""
|
184
|
+
if not accounts:
|
185
|
+
logger.warning("No accounts to export to CSV")
|
186
|
+
return
|
187
|
+
|
188
|
+
try:
|
189
|
+
# Define CSV headers (all tiers in priority order)
|
190
|
+
headers = [
|
191
|
+
# Baseline fields (9 columns)
|
192
|
+
'id', 'name', 'email', 'status', 'joined_method', 'joined_timestamp',
|
193
|
+
'organizational_unit', 'organizational_unit_id', 'parent_id',
|
194
|
+
# TIER 1: Business Metadata
|
195
|
+
'wbs_code', 'cost_group', 'technical_lead', 'account_owner',
|
196
|
+
# TIER 2: Governance Metadata
|
197
|
+
'business_unit', 'functional_area', 'managed_by', 'product_owner',
|
198
|
+
# TIER 3: Operational Metadata
|
199
|
+
'purpose', 'environment', 'compliance_scope', 'data_classification',
|
200
|
+
# TIER 4: Extended Metadata (optional)
|
201
|
+
'project_name', 'budget_code', 'support_tier', 'created_date', 'expiry_date'
|
202
|
+
]
|
203
|
+
|
204
|
+
with open(output_file, "w", encoding="utf-8", newline="") as f:
|
205
|
+
writer = csv.DictWriter(f, fieldnames=headers, extrasaction='ignore')
|
206
|
+
|
207
|
+
if include_header:
|
208
|
+
writer.writeheader()
|
209
|
+
|
210
|
+
# Write rows with N/A for missing fields
|
211
|
+
for account in accounts:
|
212
|
+
row_data = {header: account.get(header, 'N/A') for header in headers}
|
213
|
+
writer.writerow(row_data)
|
214
|
+
|
215
|
+
print_success(f"Exported {len(accounts)} accounts to {output_file} (CSV, {len(headers)} columns)")
|
216
|
+
|
217
|
+
except Exception as e:
|
218
|
+
error_msg = f"Failed to export CSV: {str(e)}"
|
219
|
+
logger.error(error_msg)
|
220
|
+
print_error(error_msg)
|
221
|
+
raise IOError(error_msg) from e
|
222
|
+
|
223
|
+
@staticmethod
|
224
|
+
def export_markdown(accounts: List[Dict], output_file: str, title: str = "AWS Organization Accounts") -> None:
|
225
|
+
"""
|
226
|
+
Export accounts to Markdown table format with config-aware 14+ columns.
|
227
|
+
|
228
|
+
Args:
|
229
|
+
accounts: List of account dictionaries
|
230
|
+
output_file: Output file path
|
231
|
+
title: Markdown document title
|
232
|
+
|
233
|
+
Raises:
|
234
|
+
IOError: If file write fails
|
235
|
+
"""
|
236
|
+
if not accounts:
|
237
|
+
logger.warning("No accounts to export to Markdown")
|
238
|
+
return
|
239
|
+
|
240
|
+
try:
|
241
|
+
# Load config for metadata
|
242
|
+
config_loader = get_config_loader()
|
243
|
+
tag_mappings = config_loader.load_tag_mappings()
|
244
|
+
|
245
|
+
# Priority columns for markdown display (top 12 most important)
|
246
|
+
display_columns = [
|
247
|
+
'id', 'name', 'status', 'email',
|
248
|
+
'wbs_code', 'cost_group', 'technical_lead',
|
249
|
+
'business_unit', 'environment',
|
250
|
+
'organizational_unit', 'managed_by', 'purpose'
|
251
|
+
]
|
252
|
+
|
253
|
+
with open(output_file, "w", encoding="utf-8") as f:
|
254
|
+
# Write title
|
255
|
+
f.write(f"# {title}\n\n")
|
256
|
+
|
257
|
+
# Write metadata
|
258
|
+
f.write("## Configuration Details\n\n")
|
259
|
+
f.write(f"**Config Sources**: {' → '.join(config_loader.get_config_sources())}\n\n")
|
260
|
+
f.write(f"**Tag Mappings**: {len(tag_mappings)} fields configured\n\n")
|
261
|
+
|
262
|
+
# Write table header (display columns only)
|
263
|
+
header_names = [col.replace('_', ' ').title() for col in display_columns]
|
264
|
+
f.write("| " + " | ".join(header_names) + " |\n")
|
265
|
+
f.write("| " + " | ".join(["---"] * len(display_columns)) + " |\n")
|
266
|
+
|
267
|
+
# Write table rows
|
268
|
+
for account in accounts:
|
269
|
+
values = [str(account.get(col, 'N/A')) for col in display_columns]
|
270
|
+
# Truncate long values for readability
|
271
|
+
values = [v[:50] + '...' if len(v) > 50 else v for v in values]
|
272
|
+
f.write("| " + " | ".join(values) + " |\n")
|
273
|
+
|
274
|
+
# Write summary
|
275
|
+
f.write(f"\n**Total Accounts:** {len(accounts)}\n")
|
276
|
+
f.write(f"\n**Display Columns:** {len(display_columns)} (showing most important fields)\n")
|
277
|
+
f.write(f"\n**Full Export:** Use JSON/CSV format for complete {len(accounts[0])} field export\n")
|
278
|
+
|
279
|
+
print_success(f"Exported {len(accounts)} accounts to {output_file} (Markdown, {len(display_columns)} display columns)")
|
280
|
+
|
281
|
+
except Exception as e:
|
282
|
+
error_msg = f"Failed to export Markdown: {str(e)}"
|
283
|
+
logger.error(error_msg)
|
284
|
+
print_error(error_msg)
|
285
|
+
raise IOError(error_msg) from e
|
286
|
+
|
287
|
+
@staticmethod
|
288
|
+
def to_csv_string(accounts: List[Dict]) -> str:
|
289
|
+
"""
|
290
|
+
Convert accounts to CSV string format (in-memory).
|
291
|
+
|
292
|
+
Args:
|
293
|
+
accounts: List of account dictionaries
|
294
|
+
|
295
|
+
Returns:
|
296
|
+
CSV formatted string
|
297
|
+
"""
|
298
|
+
if not accounts:
|
299
|
+
return ""
|
300
|
+
|
301
|
+
output = StringIO()
|
302
|
+
fieldnames = list(accounts[0].keys())
|
303
|
+
|
304
|
+
writer = csv.DictWriter(output, fieldnames=fieldnames)
|
305
|
+
writer.writeheader()
|
306
|
+
writer.writerows(accounts)
|
307
|
+
|
308
|
+
return output.getvalue()
|
309
|
+
|
310
|
+
@staticmethod
|
311
|
+
def to_json_string(accounts: List[Dict], indent: int = 2) -> str:
|
312
|
+
"""
|
313
|
+
Convert accounts to JSON string format (in-memory).
|
314
|
+
|
315
|
+
Args:
|
316
|
+
accounts: List of account dictionaries
|
317
|
+
indent: JSON indentation level
|
318
|
+
|
319
|
+
Returns:
|
320
|
+
JSON formatted string
|
321
|
+
"""
|
322
|
+
return json.dumps({"accounts": accounts}, indent=indent, ensure_ascii=False)
|
323
|
+
|
324
|
+
|
325
|
+
class HierarchyFormatter:
|
326
|
+
"""Format Organizations hierarchy visualization data."""
|
327
|
+
|
328
|
+
@staticmethod
|
329
|
+
def format_hierarchy_tree(accounts: List[Dict], show_profiles: bool = True) -> str:
|
330
|
+
"""
|
331
|
+
Format accounts as hierarchical tree structure.
|
332
|
+
|
333
|
+
Args:
|
334
|
+
accounts: List of account dictionaries
|
335
|
+
show_profiles: Include profile mappings in output
|
336
|
+
|
337
|
+
Returns:
|
338
|
+
Formatted tree string
|
339
|
+
"""
|
340
|
+
tree_lines = []
|
341
|
+
tree_lines.append("AWS Organization Hierarchy")
|
342
|
+
tree_lines.append("=" * 50)
|
343
|
+
tree_lines.append("")
|
344
|
+
|
345
|
+
for idx, account in enumerate(accounts):
|
346
|
+
is_last = idx == len(accounts) - 1
|
347
|
+
prefix = "└── " if is_last else "├── "
|
348
|
+
|
349
|
+
account_line = f"{prefix}{account.get('name', 'N/A')} ({account.get('id', 'N/A')})"
|
350
|
+
|
351
|
+
if show_profiles:
|
352
|
+
account_line += f" → {account.get('profile', 'N/A')}"
|
353
|
+
|
354
|
+
tree_lines.append(account_line)
|
355
|
+
|
356
|
+
# Add status as sub-item
|
357
|
+
status_prefix = " " if is_last else "│ "
|
358
|
+
tree_lines.append(f"{status_prefix}Status: {account.get('status', 'UNKNOWN')}")
|
359
|
+
|
360
|
+
if not is_last:
|
361
|
+
tree_lines.append("│")
|
362
|
+
|
363
|
+
return "\n".join(tree_lines)
|
364
|
+
|
365
|
+
@staticmethod
|
366
|
+
def format_summary(accounts: List[Dict]) -> str:
|
367
|
+
"""
|
368
|
+
Format summary statistics for accounts.
|
369
|
+
|
370
|
+
Args:
|
371
|
+
accounts: List of account dictionaries
|
372
|
+
|
373
|
+
Returns:
|
374
|
+
Formatted summary string
|
375
|
+
"""
|
376
|
+
total = len(accounts)
|
377
|
+
active = sum(1 for a in accounts if a.get("status") == "ACTIVE")
|
378
|
+
suspended = sum(1 for a in accounts if a.get("status") == "SUSPENDED")
|
379
|
+
closed = sum(1 for a in accounts if a.get("status") == "CLOSED")
|
380
|
+
|
381
|
+
summary_lines = []
|
382
|
+
summary_lines.append("Account Summary")
|
383
|
+
summary_lines.append("=" * 40)
|
384
|
+
summary_lines.append(f"Total Accounts: {total}")
|
385
|
+
summary_lines.append(f"Active Accounts: {active}")
|
386
|
+
summary_lines.append(f"Suspended Accounts: {suspended}")
|
387
|
+
summary_lines.append(f"Closed Accounts: {closed}")
|
388
|
+
|
389
|
+
return "\n".join(summary_lines)
|
390
|
+
|
391
|
+
|
392
|
+
def export_to_file(accounts: List[Dict], output_path: str, format_type: str = "json", **kwargs) -> None:
|
393
|
+
"""
|
394
|
+
Universal export function supporting multiple formats.
|
395
|
+
|
396
|
+
Args:
|
397
|
+
accounts: List of account dictionaries
|
398
|
+
output_path: Output file path
|
399
|
+
format_type: Export format ('json', 'csv', 'markdown')
|
400
|
+
**kwargs: Additional format-specific arguments
|
401
|
+
|
402
|
+
Raises:
|
403
|
+
ValueError: If format_type is unsupported
|
404
|
+
IOError: If file write fails
|
405
|
+
"""
|
406
|
+
format_type = format_type.lower()
|
407
|
+
|
408
|
+
if format_type == "json":
|
409
|
+
metadata = kwargs.get("metadata")
|
410
|
+
OrganizationsFormatter.export_json(accounts, output_path, metadata=metadata)
|
411
|
+
|
412
|
+
elif format_type == "csv":
|
413
|
+
include_header = kwargs.get("include_header", True)
|
414
|
+
OrganizationsFormatter.export_csv(accounts, output_path, include_header=include_header)
|
415
|
+
|
416
|
+
elif format_type == "markdown":
|
417
|
+
title = kwargs.get("title", "AWS Organization Accounts")
|
418
|
+
OrganizationsFormatter.export_markdown(accounts, output_path, title=title)
|
419
|
+
|
420
|
+
else:
|
421
|
+
supported_formats = ["json", "csv", "markdown"]
|
422
|
+
raise ValueError(f"Unsupported format: {format_type}. Supported formats: {supported_formats}")
|
@@ -6,19 +6,19 @@ from os.path import split
|
|
6
6
|
from pprint import pprint
|
7
7
|
from time import time
|
8
8
|
|
9
|
-
import Inventory_Modules
|
9
|
+
from runbooks.inventory import inventory_modules as Inventory_Modules
|
10
10
|
import simplejson as json
|
11
|
-
from account_class import aws_acct_access
|
12
|
-
from ArgumentsClass import CommonArguments
|
11
|
+
from runbooks.inventory.account_class import aws_acct_access
|
12
|
+
from runbooks.inventory.ArgumentsClass import CommonArguments
|
13
13
|
from runbooks.common.rich_utils import console
|
14
|
-
from
|
14
|
+
from runbooks.inventory.inventory_modules import get_credentials_for_accounts_in_org
|
15
|
+
from runbooks import __version__
|
15
16
|
|
16
17
|
"""
|
17
18
|
This script was created to help solve a testing problem for the "move_stack_instances.py" script.e
|
18
19
|
Originally, that script didn't have built-in recovery, so we needed this script to "recover" those stack-instance ids that might have been lost during the move_stack_instances.py run. However, that script now has built-in recovery, so this script isn't really needed. However, it can still be used to find any stack-instances that have been orphaned from their original stack-set, if that happens.
|
19
20
|
"""
|
20
21
|
|
21
|
-
__version__ = "2024.05.18"
|
22
22
|
|
23
23
|
|
24
24
|
#########################
|
@@ -4,16 +4,16 @@ import logging
|
|
4
4
|
import sys
|
5
5
|
|
6
6
|
import Inventory_Modules
|
7
|
-
from account_class import aws_acct_access
|
8
|
-
from ArgumentsClass import CommonArguments
|
7
|
+
from runbooks.inventory.account_class import aws_acct_access
|
8
|
+
from runbooks.inventory.ArgumentsClass import CommonArguments
|
9
9
|
from botocore.exceptions import ClientError
|
10
10
|
from runbooks.common.rich_utils import console
|
11
11
|
from rich.panel import Panel
|
12
12
|
|
13
13
|
# Initialize Rich console with test mode support
|
14
14
|
from runbooks.common.rich_utils import console
|
15
|
+
from runbooks import __version__
|
15
16
|
|
16
|
-
__version__ = "2023.05.04"
|
17
17
|
|
18
18
|
parser = CommonArguments()
|
19
19
|
parser.singleprofile()
|