runbooks 0.7.9__py3-none-any.whl → 0.9.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- runbooks/__init__.py +1 -1
- runbooks/cfat/README.md +12 -1
- runbooks/cfat/__init__.py +1 -1
- runbooks/cfat/assessment/compliance.py +4 -1
- runbooks/cfat/assessment/runner.py +42 -34
- runbooks/cfat/models.py +1 -1
- runbooks/cloudops/__init__.py +123 -0
- runbooks/cloudops/base.py +385 -0
- runbooks/cloudops/cost_optimizer.py +811 -0
- runbooks/cloudops/infrastructure_optimizer.py +29 -0
- runbooks/cloudops/interfaces.py +828 -0
- runbooks/cloudops/lifecycle_manager.py +29 -0
- runbooks/cloudops/mcp_cost_validation.py +678 -0
- runbooks/cloudops/models.py +251 -0
- runbooks/cloudops/monitoring_automation.py +29 -0
- runbooks/cloudops/notebook_framework.py +676 -0
- runbooks/cloudops/security_enforcer.py +449 -0
- runbooks/common/__init__.py +152 -0
- runbooks/common/accuracy_validator.py +1039 -0
- runbooks/common/context_logger.py +440 -0
- runbooks/common/cross_module_integration.py +594 -0
- runbooks/common/enhanced_exception_handler.py +1108 -0
- runbooks/common/enterprise_audit_integration.py +634 -0
- runbooks/common/mcp_cost_explorer_integration.py +900 -0
- runbooks/common/mcp_integration.py +548 -0
- runbooks/common/performance_monitor.py +387 -0
- runbooks/common/profile_utils.py +216 -0
- runbooks/common/rich_utils.py +172 -1
- runbooks/feedback/user_feedback_collector.py +440 -0
- runbooks/finops/README.md +377 -458
- runbooks/finops/__init__.py +4 -21
- runbooks/finops/account_resolver.py +279 -0
- runbooks/finops/accuracy_cross_validator.py +638 -0
- runbooks/finops/aws_client.py +721 -36
- runbooks/finops/budget_integration.py +313 -0
- runbooks/finops/cli.py +59 -5
- runbooks/finops/cost_optimizer.py +1340 -0
- runbooks/finops/cost_processor.py +211 -37
- runbooks/finops/dashboard_router.py +900 -0
- runbooks/finops/dashboard_runner.py +990 -232
- runbooks/finops/embedded_mcp_validator.py +288 -0
- runbooks/finops/enhanced_dashboard_runner.py +8 -7
- runbooks/finops/enhanced_progress.py +327 -0
- runbooks/finops/enhanced_trend_visualization.py +423 -0
- runbooks/finops/finops_dashboard.py +184 -1829
- runbooks/finops/helpers.py +509 -196
- runbooks/finops/iam_guidance.py +400 -0
- runbooks/finops/markdown_exporter.py +466 -0
- runbooks/finops/multi_dashboard.py +1502 -0
- runbooks/finops/optimizer.py +15 -15
- runbooks/finops/profile_processor.py +2 -2
- runbooks/finops/runbooks.inventory.organizations_discovery.log +0 -0
- runbooks/finops/runbooks.security.report_generator.log +0 -0
- runbooks/finops/runbooks.security.run_script.log +0 -0
- runbooks/finops/runbooks.security.security_export.log +0 -0
- runbooks/finops/schemas.py +589 -0
- runbooks/finops/service_mapping.py +195 -0
- runbooks/finops/single_dashboard.py +710 -0
- runbooks/finops/tests/test_reference_images_validation.py +1 -1
- runbooks/inventory/README.md +12 -1
- runbooks/inventory/core/collector.py +157 -29
- runbooks/inventory/list_ec2_instances.py +9 -6
- runbooks/inventory/list_ssm_parameters.py +10 -10
- runbooks/inventory/organizations_discovery.py +210 -164
- runbooks/inventory/rich_inventory_display.py +74 -107
- runbooks/inventory/run_on_multi_accounts.py +13 -13
- runbooks/inventory/runbooks.inventory.organizations_discovery.log +0 -0
- runbooks/inventory/runbooks.security.security_export.log +0 -0
- runbooks/main.py +1371 -240
- runbooks/metrics/dora_metrics_engine.py +711 -17
- runbooks/monitoring/performance_monitor.py +433 -0
- runbooks/operate/README.md +394 -0
- runbooks/operate/base.py +215 -47
- runbooks/operate/ec2_operations.py +435 -5
- runbooks/operate/iam_operations.py +598 -3
- runbooks/operate/privatelink_operations.py +1 -1
- runbooks/operate/rds_operations.py +508 -0
- runbooks/operate/s3_operations.py +508 -0
- runbooks/operate/vpc_endpoints.py +1 -1
- runbooks/remediation/README.md +489 -13
- runbooks/remediation/base.py +5 -3
- runbooks/remediation/commons.py +8 -4
- runbooks/security/ENTERPRISE_SECURITY_FRAMEWORK.md +506 -0
- runbooks/security/README.md +12 -1
- runbooks/security/__init__.py +265 -33
- runbooks/security/cloudops_automation_security_validator.py +1164 -0
- runbooks/security/compliance_automation.py +12 -10
- runbooks/security/compliance_automation_engine.py +1021 -0
- runbooks/security/enterprise_security_framework.py +930 -0
- runbooks/security/enterprise_security_policies.json +293 -0
- runbooks/security/executive_security_dashboard.py +1247 -0
- runbooks/security/integration_test_enterprise_security.py +879 -0
- runbooks/security/module_security_integrator.py +641 -0
- runbooks/security/multi_account_security_controls.py +2254 -0
- runbooks/security/real_time_security_monitor.py +1196 -0
- runbooks/security/report_generator.py +1 -1
- runbooks/security/run_script.py +4 -8
- runbooks/security/security_baseline_tester.py +39 -52
- runbooks/security/security_export.py +99 -120
- runbooks/sre/README.md +472 -0
- runbooks/sre/__init__.py +33 -0
- runbooks/sre/mcp_reliability_engine.py +1049 -0
- runbooks/sre/performance_optimization_engine.py +1032 -0
- runbooks/sre/production_monitoring_framework.py +584 -0
- runbooks/sre/reliability_monitoring_framework.py +1011 -0
- runbooks/validation/__init__.py +2 -2
- runbooks/validation/benchmark.py +154 -149
- runbooks/validation/cli.py +159 -147
- runbooks/validation/mcp_validator.py +291 -248
- runbooks/vpc/README.md +478 -0
- runbooks/vpc/__init__.py +2 -2
- runbooks/vpc/manager_interface.py +366 -351
- runbooks/vpc/networking_wrapper.py +68 -36
- runbooks/vpc/rich_formatters.py +22 -8
- runbooks-0.9.1.dist-info/METADATA +308 -0
- {runbooks-0.7.9.dist-info → runbooks-0.9.1.dist-info}/RECORD +120 -59
- {runbooks-0.7.9.dist-info → runbooks-0.9.1.dist-info}/entry_points.txt +1 -1
- runbooks/finops/cross_validation.py +0 -375
- runbooks-0.7.9.dist-info/METADATA +0 -636
- {runbooks-0.7.9.dist-info → runbooks-0.9.1.dist-info}/WHEEL +0 -0
- {runbooks-0.7.9.dist-info → runbooks-0.9.1.dist-info}/licenses/LICENSE +0 -0
- {runbooks-0.7.9.dist-info → runbooks-0.9.1.dist-info}/top_level.txt +0 -0
runbooks/main.py
CHANGED
@@ -24,6 +24,7 @@ entrypoint for all AWS cloud operations, designed for CloudOps, DevOps, and SRE
|
|
24
24
|
- `runbooks operate` - AWS resource operations (EC2, S3, VPC, NAT Gateway, DynamoDB, etc.)
|
25
25
|
- `runbooks org` - AWS Organizations management
|
26
26
|
- `runbooks finops` - Cost analysis and financial operations
|
27
|
+
- `runbooks cloudops` - Business scenario automation (cost optimization, security enforcement, governance)
|
27
28
|
|
28
29
|
## Standardized Options
|
29
30
|
|
@@ -46,6 +47,9 @@ runbooks security assess --output html --output-file security-report.html
|
|
46
47
|
# Operations (with safety)
|
47
48
|
runbooks operate ec2 start --instance-ids i-1234567890abcdef0 --dry-run
|
48
49
|
runbooks operate s3 create-bucket --bucket-name my-bucket --region us-west-2
|
50
|
+
runbooks operate s3 find-no-lifecycle --region us-east-1
|
51
|
+
runbooks operate s3 add-lifecycle-bulk --bucket-names bucket1,bucket2 --expiration-days 90
|
52
|
+
runbooks operate s3 analyze-lifecycle-compliance
|
49
53
|
runbooks operate vpc create-vpc --cidr-block 10.0.0.0/16 --vpc-name prod-vpc
|
50
54
|
runbooks operate vpc create-nat-gateway --subnet-id subnet-123 --nat-name prod-nat
|
51
55
|
runbooks operate dynamodb create-table --table-name employees
|
@@ -88,6 +92,12 @@ import boto3
|
|
88
92
|
|
89
93
|
from runbooks import __version__
|
90
94
|
from runbooks.cfat.runner import AssessmentRunner
|
95
|
+
from runbooks.common.performance_monitor import get_performance_benchmark
|
96
|
+
from runbooks.common.profile_utils import (
|
97
|
+
create_management_session,
|
98
|
+
create_operational_session,
|
99
|
+
get_profile_for_operation,
|
100
|
+
)
|
91
101
|
from runbooks.common.rich_utils import console, create_table, print_banner, print_header, print_status
|
92
102
|
from runbooks.config import load_config, save_config
|
93
103
|
from runbooks.inventory.core.collector import InventoryCollector
|
@@ -102,10 +112,10 @@ console = Console()
|
|
102
112
|
|
103
113
|
def get_account_id_for_context(profile: str = "default") -> str:
|
104
114
|
"""
|
105
|
-
Resolve actual AWS account ID for context creation.
|
115
|
+
Resolve actual AWS account ID for context creation using enterprise profile management.
|
106
116
|
|
107
117
|
This replaces hardcoded 'current' strings with actual account IDs
|
108
|
-
to fix Pydantic validation failures.
|
118
|
+
to fix Pydantic validation failures. Uses the proven three-tier profile system.
|
109
119
|
|
110
120
|
Args:
|
111
121
|
profile: AWS profile name
|
@@ -114,11 +124,14 @@ def get_account_id_for_context(profile: str = "default") -> str:
|
|
114
124
|
12-digit AWS account ID string
|
115
125
|
"""
|
116
126
|
try:
|
117
|
-
|
127
|
+
# Use enterprise profile management for session creation
|
128
|
+
resolved_profile = get_profile_for_operation("management", profile)
|
129
|
+
session = create_management_session(profile)
|
118
130
|
sts = session.client("sts")
|
119
131
|
response = sts.get_caller_identity()
|
120
132
|
return response["Account"]
|
121
|
-
except Exception:
|
133
|
+
except Exception as e:
|
134
|
+
console.log(f"[yellow]Warning: Could not resolve account ID, using fallback: {e}[/yellow]")
|
122
135
|
# Fallback to a valid format if STS call fails
|
123
136
|
return "123456789012" # Valid 12-digit format for validation
|
124
137
|
|
@@ -128,6 +141,51 @@ def get_account_id_for_context(profile: str = "default") -> str:
|
|
128
141
|
# ============================================================================
|
129
142
|
|
130
143
|
|
144
|
+
def preprocess_space_separated_profiles():
|
145
|
+
"""
|
146
|
+
Preprocess sys.argv to convert space-separated profiles to comma-separated format.
|
147
|
+
|
148
|
+
Converts: --profile prof1 prof2
|
149
|
+
To: --profile prof1,prof2
|
150
|
+
|
151
|
+
This enables backward compatibility with space-separated profile syntax
|
152
|
+
while using Click's standard option parsing.
|
153
|
+
"""
|
154
|
+
import sys
|
155
|
+
|
156
|
+
# Only process if we haven't already processed
|
157
|
+
if hasattr(preprocess_space_separated_profiles, "_processed"):
|
158
|
+
return
|
159
|
+
|
160
|
+
new_argv = []
|
161
|
+
i = 0
|
162
|
+
while i < len(sys.argv):
|
163
|
+
if sys.argv[i] == "--profile" and i + 1 < len(sys.argv):
|
164
|
+
# Found --profile flag, collect all following non-flag arguments
|
165
|
+
profiles = []
|
166
|
+
new_argv.append("--profile")
|
167
|
+
i += 1
|
168
|
+
|
169
|
+
# Collect profiles until we hit another flag or end of arguments
|
170
|
+
while i < len(sys.argv) and not sys.argv[i].startswith("-"):
|
171
|
+
profiles.append(sys.argv[i])
|
172
|
+
i += 1
|
173
|
+
|
174
|
+
# Join profiles with commas and add as single argument
|
175
|
+
if profiles:
|
176
|
+
new_argv.append(",".join(profiles))
|
177
|
+
|
178
|
+
# Don't increment i here as we want to process the current argument
|
179
|
+
continue
|
180
|
+
else:
|
181
|
+
new_argv.append(sys.argv[i])
|
182
|
+
i += 1
|
183
|
+
|
184
|
+
# Replace sys.argv with processed version
|
185
|
+
sys.argv = new_argv
|
186
|
+
preprocess_space_separated_profiles._processed = True
|
187
|
+
|
188
|
+
|
131
189
|
def common_aws_options(f):
|
132
190
|
"""
|
133
191
|
Standard AWS connection and safety options for all commands.
|
@@ -142,17 +200,26 @@ def common_aws_options(f):
|
|
142
200
|
Decorated function with AWS options
|
143
201
|
|
144
202
|
Added Options:
|
145
|
-
--profile: AWS profile name
|
203
|
+
--profile: AWS profile name(s) - supports repeated flag syntax
|
146
204
|
--region: AWS region identifier (default: 'ap-southeast-2')
|
147
205
|
--dry-run: Safety flag to preview operations without execution
|
148
206
|
|
149
207
|
Examples:
|
150
208
|
```bash
|
151
209
|
runbooks inventory ec2 --profile production --region us-west-2 --dry-run
|
152
|
-
runbooks
|
210
|
+
runbooks finops --profile prof1 prof2 # Space-separated (SUPPORTED via preprocessing)
|
211
|
+
runbooks finops --profile prof1 --profile prof2 # Multiple flags (Click standard)
|
212
|
+
runbooks finops --profile prof1,prof2 # Comma-separated (Alternative)
|
153
213
|
```
|
154
214
|
"""
|
155
|
-
|
215
|
+
# FIXED: Space-separated profiles now supported via preprocessing in cli_entry_point()
|
216
|
+
# All three formats work: --profile prof1 prof2, --profile prof1 --profile prof2, --profile prof1,prof2
|
217
|
+
f = click.option(
|
218
|
+
"--profile",
|
219
|
+
multiple=True,
|
220
|
+
default=("default",), # Tuple default for multiple=True
|
221
|
+
help="AWS profile(s) - supports: --profile prof1 prof2 OR --profile prof1 --profile prof2 OR --profile prof1,prof2",
|
222
|
+
)(f)
|
156
223
|
f = click.option("--region", default="ap-southeast-2", help="AWS region (default: 'ap-southeast-2')")(f)
|
157
224
|
f = click.option("--dry-run", is_flag=True, help="Enable dry-run mode for safety")(f)
|
158
225
|
return f
|
@@ -229,7 +296,7 @@ def common_filter_options(f):
|
|
229
296
|
# ============================================================================
|
230
297
|
|
231
298
|
|
232
|
-
@click.group(
|
299
|
+
@click.group()
|
233
300
|
@click.version_option(version=__version__)
|
234
301
|
@click.option("--debug", is_flag=True, help="Enable debug logging")
|
235
302
|
@common_aws_options
|
@@ -253,6 +320,7 @@ def main(ctx, debug, profile, region, dry_run, config):
|
|
253
320
|
• runbooks security → Security baseline testing
|
254
321
|
• runbooks org → Organizations management
|
255
322
|
• runbooks finops → Cost and usage analytics
|
323
|
+
• runbooks cloudops → Business scenario automation
|
256
324
|
|
257
325
|
Safety Features:
|
258
326
|
• --dry-run mode for all operations
|
@@ -994,6 +1062,178 @@ def sync(ctx, source_bucket, destination_bucket, source_prefix, destination_pref
|
|
994
1062
|
raise click.ClickException(str(e))
|
995
1063
|
|
996
1064
|
|
1065
|
+
@s3.command()
|
1066
|
+
@click.option("--region", help="AWS region to scan (scans all regions if not specified)")
|
1067
|
+
@click.option("--bucket-names", multiple=True, help="Specific bucket names to check (checks all buckets if not specified)")
|
1068
|
+
@click.pass_context
|
1069
|
+
def find_no_lifecycle(ctx, region, bucket_names):
|
1070
|
+
"""Find S3 buckets without lifecycle policies for cost optimization."""
|
1071
|
+
try:
|
1072
|
+
from runbooks.inventory.models.account import AWSAccount
|
1073
|
+
from runbooks.operate import S3Operations
|
1074
|
+
from runbooks.operate.base import OperationContext
|
1075
|
+
|
1076
|
+
console.print(f"[blue]🔍 Finding S3 buckets without lifecycle policies...[/blue]")
|
1077
|
+
|
1078
|
+
s3_ops = S3Operations(profile=ctx.obj["profile"], region=ctx.obj["region"], dry_run=ctx.obj["dry_run"])
|
1079
|
+
account = AWSAccount(account_id=get_account_id_for_context(ctx.obj["profile"]), account_name="current")
|
1080
|
+
context = OperationContext(
|
1081
|
+
account=account,
|
1082
|
+
region=region or ctx.obj["region"],
|
1083
|
+
operation_type="find_buckets_without_lifecycle",
|
1084
|
+
resource_types=["s3:bucket"],
|
1085
|
+
dry_run=ctx.obj["dry_run"]
|
1086
|
+
)
|
1087
|
+
|
1088
|
+
bucket_list = list(bucket_names) if bucket_names else None
|
1089
|
+
results = s3_ops.find_buckets_without_lifecycle(context, region=region, bucket_names=bucket_list)
|
1090
|
+
|
1091
|
+
for result in results:
|
1092
|
+
if result.success:
|
1093
|
+
data = result.response_data
|
1094
|
+
console.print(f"[green]✅ Scan completed: {data.get('total_count', 0)} non-compliant buckets found[/green]")
|
1095
|
+
else:
|
1096
|
+
console.print(f"[red]❌ Failed to scan buckets: {result.error_message}[/red]")
|
1097
|
+
|
1098
|
+
except Exception as e:
|
1099
|
+
console.print(f"[red]❌ Operation failed: {e}[/red]")
|
1100
|
+
raise click.ClickException(str(e))
|
1101
|
+
|
1102
|
+
|
1103
|
+
@s3.command()
|
1104
|
+
@click.option("--bucket-name", required=True, help="S3 bucket name to check")
|
1105
|
+
@click.pass_context
|
1106
|
+
def get_lifecycle(ctx, bucket_name):
|
1107
|
+
"""Get current lifecycle configuration for an S3 bucket."""
|
1108
|
+
try:
|
1109
|
+
from runbooks.inventory.models.account import AWSAccount
|
1110
|
+
from runbooks.operate import S3Operations
|
1111
|
+
from runbooks.operate.base import OperationContext
|
1112
|
+
|
1113
|
+
console.print(f"[blue]🔍 Getting lifecycle configuration for bucket: {bucket_name}[/blue]")
|
1114
|
+
|
1115
|
+
s3_ops = S3Operations(profile=ctx.obj["profile"], region=ctx.obj["region"], dry_run=ctx.obj["dry_run"])
|
1116
|
+
account = AWSAccount(account_id=get_account_id_for_context(ctx.obj["profile"]), account_name="current")
|
1117
|
+
context = OperationContext(
|
1118
|
+
account=account,
|
1119
|
+
region=ctx.obj["region"],
|
1120
|
+
operation_type="get_bucket_lifecycle",
|
1121
|
+
resource_types=["s3:bucket"],
|
1122
|
+
dry_run=ctx.obj["dry_run"]
|
1123
|
+
)
|
1124
|
+
|
1125
|
+
results = s3_ops.get_bucket_lifecycle(context, bucket_name=bucket_name)
|
1126
|
+
|
1127
|
+
for result in results:
|
1128
|
+
if result.success:
|
1129
|
+
data = result.response_data
|
1130
|
+
rules_count = data.get('rules_count', 0)
|
1131
|
+
console.print(f"[green]✅ Found {rules_count} lifecycle rule(s) for bucket {bucket_name}[/green]")
|
1132
|
+
else:
|
1133
|
+
console.print(f"[red]❌ Failed to get lifecycle configuration: {result.error_message}[/red]")
|
1134
|
+
|
1135
|
+
except Exception as e:
|
1136
|
+
console.print(f"[red]❌ Operation failed: {e}[/red]")
|
1137
|
+
raise click.ClickException(str(e))
|
1138
|
+
|
1139
|
+
|
1140
|
+
@s3.command()
|
1141
|
+
@click.option("--bucket-names", multiple=True, required=True, help="S3 bucket names to apply policies to (format: bucket1,bucket2)")
|
1142
|
+
@click.option("--regions", multiple=True, help="Corresponding regions for buckets (format: us-east-1,us-west-2)")
|
1143
|
+
@click.option("--expiration-days", default=30, help="Days after which objects expire (default: 30)")
|
1144
|
+
@click.option("--prefix", default="", help="Object prefix filter for lifecycle rule")
|
1145
|
+
@click.option("--noncurrent-days", default=30, help="Days before noncurrent versions are deleted (default: 30)")
|
1146
|
+
@click.option("--transition-ia-days", type=int, help="Days before transition to IA storage class")
|
1147
|
+
@click.option("--transition-glacier-days", type=int, help="Days before transition to Glacier")
|
1148
|
+
@click.pass_context
|
1149
|
+
def add_lifecycle_bulk(ctx, bucket_names, regions, expiration_days, prefix, noncurrent_days, transition_ia_days, transition_glacier_days):
|
1150
|
+
"""Add lifecycle policies to multiple S3 buckets for cost optimization."""
|
1151
|
+
try:
|
1152
|
+
from runbooks.inventory.models.account import AWSAccount
|
1153
|
+
from runbooks.operate import S3Operations
|
1154
|
+
from runbooks.operate.base import OperationContext
|
1155
|
+
|
1156
|
+
console.print(f"[blue]📋 Adding lifecycle policies to {len(bucket_names)} bucket(s)...[/blue]")
|
1157
|
+
|
1158
|
+
# Build bucket list with regions
|
1159
|
+
bucket_list = []
|
1160
|
+
for i, bucket_name in enumerate(bucket_names):
|
1161
|
+
bucket_region = regions[i] if i < len(regions) else ctx.obj["region"]
|
1162
|
+
bucket_list.append({
|
1163
|
+
"bucket_name": bucket_name,
|
1164
|
+
"region": bucket_region
|
1165
|
+
})
|
1166
|
+
|
1167
|
+
s3_ops = S3Operations(profile=ctx.obj["profile"], region=ctx.obj["region"], dry_run=ctx.obj["dry_run"])
|
1168
|
+
account = AWSAccount(account_id=get_account_id_for_context(ctx.obj["profile"]), account_name="current")
|
1169
|
+
context = OperationContext(
|
1170
|
+
account=account,
|
1171
|
+
region=ctx.obj["region"],
|
1172
|
+
operation_type="add_lifecycle_policy_bulk",
|
1173
|
+
resource_types=["s3:bucket"],
|
1174
|
+
dry_run=ctx.obj["dry_run"]
|
1175
|
+
)
|
1176
|
+
|
1177
|
+
results = s3_ops.add_lifecycle_policy_bulk(
|
1178
|
+
context,
|
1179
|
+
bucket_list=bucket_list,
|
1180
|
+
expiration_days=expiration_days,
|
1181
|
+
prefix=prefix,
|
1182
|
+
noncurrent_days=noncurrent_days,
|
1183
|
+
transition_ia_days=transition_ia_days,
|
1184
|
+
transition_glacier_days=transition_glacier_days
|
1185
|
+
)
|
1186
|
+
|
1187
|
+
successful = len([r for r in results if r.success])
|
1188
|
+
failed = len(results) - successful
|
1189
|
+
|
1190
|
+
console.print(f"[bold]Bulk Lifecycle Policy Summary:[/bold]")
|
1191
|
+
console.print(f"[green]✅ Successful: {successful}[/green]")
|
1192
|
+
if failed > 0:
|
1193
|
+
console.print(f"[red]❌ Failed: {failed}[/red]")
|
1194
|
+
|
1195
|
+
except Exception as e:
|
1196
|
+
console.print(f"[red]❌ Operation failed: {e}[/red]")
|
1197
|
+
raise click.ClickException(str(e))
|
1198
|
+
|
1199
|
+
|
1200
|
+
@s3.command()
|
1201
|
+
@click.option("--region", help="AWS region to analyze (analyzes all regions if not specified)")
|
1202
|
+
@click.pass_context
|
1203
|
+
def analyze_lifecycle_compliance(ctx, region):
|
1204
|
+
"""Analyze S3 lifecycle compliance and provide cost optimization recommendations."""
|
1205
|
+
try:
|
1206
|
+
from runbooks.inventory.models.account import AWSAccount
|
1207
|
+
from runbooks.operate import S3Operations
|
1208
|
+
from runbooks.operate.base import OperationContext
|
1209
|
+
|
1210
|
+
console.print(f"[blue]📊 Analyzing S3 lifecycle compliance across account...[/blue]")
|
1211
|
+
|
1212
|
+
s3_ops = S3Operations(profile=ctx.obj["profile"], region=ctx.obj["region"], dry_run=ctx.obj["dry_run"])
|
1213
|
+
account = AWSAccount(account_id=get_account_id_for_context(ctx.obj["profile"]), account_name="current")
|
1214
|
+
context = OperationContext(
|
1215
|
+
account=account,
|
1216
|
+
region=region or ctx.obj["region"],
|
1217
|
+
operation_type="analyze_lifecycle_compliance",
|
1218
|
+
resource_types=["s3:account"],
|
1219
|
+
dry_run=ctx.obj["dry_run"]
|
1220
|
+
)
|
1221
|
+
|
1222
|
+
results = s3_ops.analyze_lifecycle_compliance(context, region=region)
|
1223
|
+
|
1224
|
+
for result in results:
|
1225
|
+
if result.success:
|
1226
|
+
data = result.response_data
|
1227
|
+
compliance_pct = data.get('compliance_percentage', 0)
|
1228
|
+
console.print(f"[green]✅ Analysis completed: {compliance_pct:.1f}% compliance rate[/green]")
|
1229
|
+
else:
|
1230
|
+
console.print(f"[red]❌ Failed to analyze compliance: {result.error_message}[/red]")
|
1231
|
+
|
1232
|
+
except Exception as e:
|
1233
|
+
console.print(f"[red]❌ Operation failed: {e}[/red]")
|
1234
|
+
raise click.ClickException(str(e))
|
1235
|
+
|
1236
|
+
|
997
1237
|
@operate.group()
|
998
1238
|
@click.pass_context
|
999
1239
|
def cloudformation(ctx):
|
@@ -3141,20 +3381,24 @@ def assess(ctx, checks, export_formats):
|
|
3141
3381
|
from runbooks.security.security_baseline_tester import SecurityBaselineTester
|
3142
3382
|
|
3143
3383
|
console.print(f"[blue]🔒 Starting Security Assessment[/blue]")
|
3144
|
-
console.print(
|
3384
|
+
console.print(
|
3385
|
+
f"[dim]Profile: {ctx.obj['profile']} | Language: {ctx.obj['language']} | Export: {', '.join(export_formats)}[/dim]"
|
3386
|
+
)
|
3145
3387
|
|
3146
3388
|
# Initialize tester with export formats
|
3147
3389
|
tester = SecurityBaselineTester(
|
3148
|
-
profile=ctx.obj["profile"],
|
3149
|
-
lang_code=ctx.obj["language"],
|
3390
|
+
profile=ctx.obj["profile"],
|
3391
|
+
lang_code=ctx.obj["language"],
|
3150
3392
|
output_dir=ctx.obj.get("output_file"),
|
3151
|
-
export_formats=list(export_formats)
|
3393
|
+
export_formats=list(export_formats),
|
3152
3394
|
)
|
3153
3395
|
|
3154
3396
|
# Run assessment with Rich CLI
|
3155
3397
|
tester.run()
|
3156
3398
|
|
3157
|
-
console.print(
|
3399
|
+
console.print(
|
3400
|
+
f"[green]✅ Security assessment completed with export formats: {', '.join(export_formats)}[/green]"
|
3401
|
+
)
|
3158
3402
|
|
3159
3403
|
except Exception as e:
|
3160
3404
|
console.print(f"[red]❌ Security assessment failed: {e}[/red]")
|
@@ -4787,197 +5031,800 @@ def auto_fix(ctx, findings_file, severity, max_operations):
|
|
4787
5031
|
|
4788
5032
|
|
4789
5033
|
# ============================================================================
|
4790
|
-
#
|
5034
|
+
# SRE COMMANDS (Site Reliability Engineering)
|
4791
5035
|
# ============================================================================
|
4792
5036
|
|
4793
5037
|
|
4794
|
-
@
|
4795
|
-
@common_aws_options
|
4796
|
-
@click.option("--time-range", type=int, help="Time range in days (default: current month)")
|
5038
|
+
@click.command("sre")
|
4797
5039
|
@click.option(
|
4798
|
-
"--
|
5040
|
+
"--action",
|
5041
|
+
type=click.Choice(["health", "recovery", "optimize", "suite"]),
|
5042
|
+
default="health",
|
5043
|
+
help="SRE action to perform",
|
4799
5044
|
)
|
4800
|
-
@click.option("--
|
4801
|
-
@click.option("--
|
4802
|
-
@click.option("--
|
4803
|
-
@click.option("--regions", multiple=True, help="AWS regions to check")
|
4804
|
-
@click.option("--all", is_flag=True, help="Use all available AWS profiles")
|
4805
|
-
@click.option("--combine", is_flag=True, help="Combine profiles from the same AWS account")
|
4806
|
-
@click.option("--tag", multiple=True, help="Cost allocation tag to filter resources")
|
4807
|
-
@click.option("--trend", is_flag=True, help="Display trend report for past 6 months")
|
4808
|
-
@click.option("--audit", is_flag=True, help="Display audit report with cost anomalies and resource optimization")
|
5045
|
+
@click.option("--config", type=click.Path(), help="MCP configuration file path")
|
5046
|
+
@click.option("--save-report", is_flag=True, help="Save detailed report to artifacts")
|
5047
|
+
@click.option("--continuous", is_flag=True, help="Run continuous monitoring")
|
4809
5048
|
@click.pass_context
|
4810
|
-
def
|
4811
|
-
ctx,
|
4812
|
-
profile,
|
4813
|
-
region,
|
4814
|
-
dry_run,
|
4815
|
-
time_range,
|
4816
|
-
report_type,
|
4817
|
-
report_name,
|
4818
|
-
dir,
|
4819
|
-
profiles,
|
4820
|
-
regions,
|
4821
|
-
all,
|
4822
|
-
combine,
|
4823
|
-
tag,
|
4824
|
-
trend,
|
4825
|
-
audit,
|
4826
|
-
):
|
5049
|
+
def sre_reliability(ctx, action, config, save_report, continuous):
|
4827
5050
|
"""
|
4828
|
-
|
5051
|
+
SRE Automation - Enterprise MCP Reliability & Infrastructure Monitoring
|
4829
5052
|
|
4830
|
-
|
4831
|
-
|
5053
|
+
Provides comprehensive Site Reliability Engineering automation including:
|
5054
|
+
- MCP server health monitoring and diagnostics
|
5055
|
+
- Automated failure detection and recovery
|
5056
|
+
- Performance optimization and SLA validation
|
5057
|
+
- >99.9% uptime target achievement
|
4832
5058
|
|
4833
5059
|
Examples:
|
4834
|
-
runbooks
|
4835
|
-
runbooks
|
4836
|
-
runbooks
|
5060
|
+
runbooks sre --action health # Health check all MCP servers
|
5061
|
+
runbooks sre --action recovery # Automated recovery procedures
|
5062
|
+
runbooks sre --action optimize # Performance optimization
|
5063
|
+
runbooks sre --action suite # Complete reliability suite
|
4837
5064
|
"""
|
5065
|
+
import asyncio
|
4838
5066
|
|
4839
|
-
|
4840
|
-
|
4841
|
-
import argparse
|
5067
|
+
from runbooks.common.rich_utils import console, print_error, print_info, print_success
|
5068
|
+
from runbooks.sre.mcp_reliability_engine import MCPReliabilityEngine, run_mcp_reliability_suite
|
4842
5069
|
|
4843
|
-
|
5070
|
+
try:
|
5071
|
+
print_info(f"🚀 Starting SRE automation - Action: {action}")
|
4844
5072
|
|
4845
|
-
|
4846
|
-
|
4847
|
-
|
4848
|
-
dry_run=dry_run,
|
4849
|
-
time_range=time_range,
|
4850
|
-
report_type=list(report_type),
|
4851
|
-
report_name=report_name,
|
4852
|
-
dir=dir,
|
4853
|
-
profiles=list(profiles) if profiles else None,
|
4854
|
-
regions=list(regions) if regions else None,
|
4855
|
-
all=all,
|
4856
|
-
combine=combine,
|
4857
|
-
tag=list(tag) if tag else None,
|
4858
|
-
trend=trend,
|
4859
|
-
audit=audit,
|
4860
|
-
config_file=None, # Not exposed in Click interface yet
|
4861
|
-
)
|
4862
|
-
return run_dashboard(args)
|
4863
|
-
else:
|
4864
|
-
# Pass context to subcommands
|
4865
|
-
ctx.obj.update(
|
4866
|
-
{
|
4867
|
-
"profile": profile,
|
4868
|
-
"region": region,
|
4869
|
-
"dry_run": dry_run,
|
4870
|
-
"time_range": time_range,
|
4871
|
-
"report_type": list(report_type),
|
4872
|
-
"report_name": report_name,
|
4873
|
-
"dir": dir,
|
4874
|
-
"profiles": list(profiles) if profiles else None,
|
4875
|
-
"regions": list(regions) if regions else None,
|
4876
|
-
"all": all,
|
4877
|
-
"combine": combine,
|
4878
|
-
"tag": list(tag) if tag else None,
|
4879
|
-
"trend": trend,
|
4880
|
-
"audit": audit,
|
4881
|
-
}
|
4882
|
-
)
|
5073
|
+
if action == "suite":
|
5074
|
+
# Run complete reliability suite
|
5075
|
+
results = asyncio.run(run_mcp_reliability_suite())
|
4883
5076
|
|
5077
|
+
if results.get("overall_success", False):
|
5078
|
+
print_success("✅ SRE Reliability Suite completed successfully")
|
5079
|
+
console.print(f"🎯 Final Health: {results.get('final_health_percentage', 0):.1f}%")
|
5080
|
+
console.print(f"📈 Improvement: +{results.get('health_improvement', 0):.1f}%")
|
5081
|
+
else:
|
5082
|
+
print_error("❌ SRE Reliability Suite encountered issues")
|
5083
|
+
console.print("🔧 Review detailed logs for remediation guidance")
|
4884
5084
|
|
4885
|
-
|
4886
|
-
#
|
4887
|
-
|
5085
|
+
else:
|
5086
|
+
# Initialize reliability engine for specific actions
|
5087
|
+
from pathlib import Path
|
4888
5088
|
|
5089
|
+
config_path = Path(config) if config else None
|
5090
|
+
reliability_engine = MCPReliabilityEngine(config_path=config_path)
|
4889
5091
|
|
4890
|
-
|
4891
|
-
|
4892
|
-
|
5092
|
+
if action == "health":
|
5093
|
+
# Health check only
|
5094
|
+
results = asyncio.run(reliability_engine.run_comprehensive_health_check())
|
4893
5095
|
|
4894
|
-
|
4895
|
-
|
4896
|
-
|
5096
|
+
if results["health_percentage"] >= 99.9:
|
5097
|
+
print_success(f"✅ All systems healthy: {results['health_percentage']:.1f}%")
|
5098
|
+
else:
|
5099
|
+
console.print(f"⚠️ Health: {results['health_percentage']:.1f}% - Review recommendations")
|
4897
5100
|
|
5101
|
+
elif action == "recovery":
|
5102
|
+
# Automated recovery procedures
|
5103
|
+
results = asyncio.run(reliability_engine.implement_automated_recovery())
|
4898
5104
|
|
4899
|
-
|
4900
|
-
|
4901
|
-
|
5105
|
+
actions_taken = len(results.get("actions_taken", []))
|
5106
|
+
if actions_taken > 0:
|
5107
|
+
print_success(f"🔄 Recovery completed: {actions_taken} actions taken")
|
5108
|
+
else:
|
5109
|
+
print_info("✅ No recovery needed - all systems healthy")
|
4902
5110
|
|
4903
|
-
|
5111
|
+
elif action == "optimize":
|
5112
|
+
# Performance optimization
|
5113
|
+
results = asyncio.run(reliability_engine.run_performance_optimization())
|
4904
5114
|
|
4905
|
-
|
4906
|
-
|
4907
|
-
|
5115
|
+
optimizations = results.get("optimizations_applied", 0)
|
5116
|
+
if optimizations > 0:
|
5117
|
+
print_success(f"⚡ Optimization completed: {optimizations} improvements applied")
|
5118
|
+
else:
|
5119
|
+
print_info("✅ Performance already optimal")
|
4908
5120
|
|
4909
|
-
|
4910
|
-
|
4911
|
-
|
4912
|
-
|
4913
|
-
|
4914
|
-
formatter.to_html(output_file)
|
4915
|
-
elif output_format == "yaml":
|
4916
|
-
formatter.to_yaml(output_file)
|
5121
|
+
# Save detailed report if requested
|
5122
|
+
if save_report and "results" in locals():
|
5123
|
+
import json
|
5124
|
+
from datetime import datetime
|
5125
|
+
from pathlib import Path
|
4917
5126
|
|
4918
|
-
|
5127
|
+
artifacts_dir = Path("./artifacts/sre")
|
5128
|
+
artifacts_dir.mkdir(parents=True, exist_ok=True)
|
4919
5129
|
|
5130
|
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
5131
|
+
report_file = artifacts_dir / f"sre_report_{action}_{timestamp}.json"
|
4920
5132
|
|
4921
|
-
|
4922
|
-
|
4923
|
-
console.print(f"\n[bold blue]📊 Cloud Foundations Assessment Results[/bold blue]")
|
4924
|
-
console.print(f"[dim]Score: {report.summary.compliance_score}/100 | Risk: {report.summary.risk_level}[/dim]")
|
5133
|
+
with open(report_file, "w") as f:
|
5134
|
+
json.dump(results, f, indent=2, default=str)
|
4925
5135
|
|
4926
|
-
|
4927
|
-
from rich.table import Table
|
5136
|
+
print_success(f"📊 Detailed report saved: {report_file}")
|
4928
5137
|
|
4929
|
-
|
4930
|
-
|
4931
|
-
|
4932
|
-
|
5138
|
+
# Continuous monitoring mode
|
5139
|
+
if continuous:
|
5140
|
+
print_info("🔄 Starting continuous monitoring mode...")
|
5141
|
+
console.print("Press Ctrl+C to stop monitoring")
|
4933
5142
|
|
4934
|
-
|
4935
|
-
|
4936
|
-
|
4937
|
-
table.add_row(
|
4938
|
-
"Critical Issues",
|
4939
|
-
str(report.summary.critical_issues),
|
4940
|
-
"🚨 Review Required" if report.summary.critical_issues > 0 else "✅ None",
|
4941
|
-
)
|
5143
|
+
try:
|
5144
|
+
while True:
|
5145
|
+
import time
|
4942
5146
|
|
4943
|
-
|
5147
|
+
time.sleep(60) # Check every minute
|
4944
5148
|
|
5149
|
+
# Quick health check
|
5150
|
+
reliability_engine = MCPReliabilityEngine()
|
5151
|
+
health_summary = reliability_engine.connection_pool.get_health_summary()
|
4945
5152
|
|
4946
|
-
|
4947
|
-
|
4948
|
-
|
4949
|
-
timestamp = report.timestamp.strftime("%Y%m%d_%H%M%S")
|
4950
|
-
output_file = f"cfat_report_{timestamp}.{output_format}"
|
5153
|
+
console.print(
|
5154
|
+
f"🏥 Health: {health_summary['healthy_servers']}/{health_summary['total_servers']} servers healthy"
|
5155
|
+
)
|
4951
5156
|
|
4952
|
-
|
4953
|
-
|
4954
|
-
elif output_format == "json":
|
4955
|
-
report.to_json(output_file)
|
4956
|
-
elif output_format == "csv":
|
4957
|
-
report.to_csv(output_file)
|
4958
|
-
elif output_format == "yaml":
|
4959
|
-
report.to_yaml(output_file)
|
5157
|
+
except KeyboardInterrupt:
|
5158
|
+
print_info("🛑 Continuous monitoring stopped")
|
4960
5159
|
|
4961
|
-
|
5160
|
+
except Exception as e:
|
5161
|
+
print_error(f"❌ SRE automation failed: {str(e)}")
|
5162
|
+
logger.error(f"SRE reliability command failed: {str(e)}")
|
5163
|
+
raise click.ClickException(str(e))
|
4962
5164
|
|
4963
5165
|
|
4964
|
-
|
4965
|
-
|
4966
|
-
from rich.table import Table
|
5166
|
+
# Add SRE command to CLI
|
5167
|
+
main.add_command(sre_reliability)
|
4967
5168
|
|
4968
|
-
table = Table(title="AWS Organizations Structure")
|
4969
|
-
table.add_column("Name", style="cyan")
|
4970
|
-
table.add_column("ID", style="green")
|
4971
|
-
table.add_column("Level", justify="center")
|
4972
|
-
table.add_column("Parent ID", style="blue")
|
4973
5169
|
|
4974
|
-
|
4975
|
-
|
4976
|
-
|
4977
|
-
f"{indent}{ou.get('Name', 'Unknown')}", ou.get("Id", ""), str(ou.get("Level", 0)), ou.get("ParentId", "")
|
4978
|
-
)
|
5170
|
+
# ============================================================================
|
5171
|
+
# CLOUDOPS COMMANDS (Business Scenario Automation)
|
5172
|
+
# ============================================================================
|
4979
5173
|
|
4980
|
-
|
5174
|
+
@click.group()
|
5175
|
+
def cloudops():
|
5176
|
+
"""CloudOps business scenario automation for cost optimization, security enforcement, and governance."""
|
5177
|
+
pass
|
5178
|
+
|
5179
|
+
@cloudops.group()
|
5180
|
+
def cost():
|
5181
|
+
"""Cost optimization scenarios for emergency response and routine optimization."""
|
5182
|
+
pass
|
5183
|
+
|
5184
|
+
@cost.command()
|
5185
|
+
@click.option('--billing-profile', default='ams-admin-Billing-ReadOnlyAccess-909135376185', help='AWS billing profile with Cost Explorer access')
|
5186
|
+
@click.option('--management-profile', default='ams-admin-ReadOnlyAccess-909135376185', help='AWS management profile with Organizations access')
|
5187
|
+
@click.option('--tolerance-percent', default=5.0, help='MCP cross-validation tolerance percentage')
|
5188
|
+
@click.option('--performance-target-ms', default=30000.0, help='Performance target in milliseconds')
|
5189
|
+
@click.option('--export-evidence/--no-export', default=True, help='Export DoD validation evidence')
|
5190
|
+
@common_aws_options
|
5191
|
+
@click.pass_context
|
5192
|
+
def mcp_validation(ctx, billing_profile, management_profile, tolerance_percent, performance_target_ms, export_evidence, profile, region):
|
5193
|
+
"""
|
5194
|
+
MCP-validated cost optimization with comprehensive DoD validation.
|
5195
|
+
|
5196
|
+
Technical Features:
|
5197
|
+
- Real-time Cost Explorer MCP validation
|
5198
|
+
- Cross-validation between estimates and AWS APIs
|
5199
|
+
- Performance benchmarking with sub-30s targets
|
5200
|
+
- Comprehensive evidence generation for DoD compliance
|
5201
|
+
|
5202
|
+
Business Impact:
|
5203
|
+
- Replaces ALL estimated costs with real AWS data
|
5204
|
+
- >99.9% reliability through MCP cross-validation
|
5205
|
+
- Executive-ready reports with validated projections
|
5206
|
+
"""
|
5207
|
+
import asyncio
|
5208
|
+
from runbooks.cloudops.mcp_cost_validation import MCPCostValidationEngine
|
5209
|
+
from runbooks.common.rich_utils import console, print_header, print_success, print_error
|
5210
|
+
|
5211
|
+
print_header("MCP Cost Validation - Technical CLI", "1.0.0")
|
5212
|
+
|
5213
|
+
async def run_mcp_validation():
|
5214
|
+
try:
|
5215
|
+
# Initialize MCP validation engine
|
5216
|
+
validation_engine = MCPCostValidationEngine(
|
5217
|
+
billing_profile=billing_profile or profile,
|
5218
|
+
management_profile=management_profile or profile,
|
5219
|
+
tolerance_percent=tolerance_percent,
|
5220
|
+
performance_target_ms=performance_target_ms
|
5221
|
+
)
|
5222
|
+
|
5223
|
+
# Run comprehensive test suite
|
5224
|
+
test_results = await validation_engine.run_comprehensive_cli_test_suite()
|
5225
|
+
|
5226
|
+
if export_evidence:
|
5227
|
+
# Export DoD validation report
|
5228
|
+
report_file = await validation_engine.export_dod_validation_report(test_results)
|
5229
|
+
if report_file:
|
5230
|
+
print_success(f"📊 DoD validation report: {report_file}")
|
5231
|
+
|
5232
|
+
# Summary
|
5233
|
+
passed_tests = sum(1 for r in test_results if r.success)
|
5234
|
+
total_tests = len(test_results)
|
5235
|
+
|
5236
|
+
if passed_tests == total_tests:
|
5237
|
+
print_success(f"✅ All {total_tests} MCP validation tests passed")
|
5238
|
+
ctx.exit(0)
|
5239
|
+
else:
|
5240
|
+
print_error(f"❌ {total_tests - passed_tests}/{total_tests} tests failed")
|
5241
|
+
ctx.exit(1)
|
5242
|
+
|
5243
|
+
except Exception as e:
|
5244
|
+
print_error(f"MCP validation failed: {str(e)}")
|
5245
|
+
ctx.exit(1)
|
5246
|
+
|
5247
|
+
try:
|
5248
|
+
asyncio.run(run_mcp_validation())
|
5249
|
+
except KeyboardInterrupt:
|
5250
|
+
console.print("\n⚠️ MCP validation interrupted by user")
|
5251
|
+
ctx.exit(130)
|
5252
|
+
|
5253
|
+
@cost.command()
|
5254
|
+
@click.option('--spike-threshold', default=25000.0, help='Cost spike threshold ($) that triggered emergency')
|
5255
|
+
@click.option('--target-savings', default=30.0, help='Target cost reduction percentage')
|
5256
|
+
@click.option('--analysis-days', default=7, help='Days to analyze for cost trends')
|
5257
|
+
@click.option('--max-risk', default='medium', type=click.Choice(['low', 'medium', 'high']), help='Maximum acceptable risk level')
|
5258
|
+
@click.option('--enable-mcp/--disable-mcp', default=True, help='Enable MCP cross-validation')
|
5259
|
+
@click.option('--export-reports/--no-export', default=True, help='Export executive reports')
|
5260
|
+
@common_aws_options
|
5261
|
+
@click.pass_context
|
5262
|
+
def emergency_response(ctx, spike_threshold, target_savings, analysis_days, max_risk, enable_mcp, export_reports, profile, region):
|
5263
|
+
"""
|
5264
|
+
Emergency cost spike response with MCP validation.
|
5265
|
+
|
5266
|
+
Business Scenario:
|
5267
|
+
- Rapid response to unexpected AWS cost spikes requiring immediate executive action
|
5268
|
+
- Typical triggers: Monthly bill increase >$5K, daily spending >200% budget
|
5269
|
+
- Target response time: <30 minutes for initial analysis and action plan
|
5270
|
+
|
5271
|
+
Technical Features:
|
5272
|
+
- MCP Cost Explorer validation for real financial data
|
5273
|
+
- Cross-validation of cost projections against actual spend
|
5274
|
+
- Executive-ready reports with validated savings opportunities
|
5275
|
+
"""
|
5276
|
+
from runbooks.cloudops.interfaces import emergency_cost_response
|
5277
|
+
from runbooks.cloudops.mcp_cost_validation import MCPCostValidationEngine
|
5278
|
+
from runbooks.common.rich_utils import console, print_header, print_success, print_error
|
5279
|
+
import asyncio
|
5280
|
+
|
5281
|
+
print_header("Emergency Cost Response - MCP Validated", "1.0.0")
|
5282
|
+
|
5283
|
+
try:
|
5284
|
+
# Execute emergency cost response via business interface
|
5285
|
+
result = emergency_cost_response(
|
5286
|
+
profile=profile,
|
5287
|
+
cost_spike_threshold=spike_threshold,
|
5288
|
+
target_savings_percent=target_savings,
|
5289
|
+
analysis_days=analysis_days,
|
5290
|
+
max_risk_level=max_risk,
|
5291
|
+
require_approval=True,
|
5292
|
+
dry_run=True # Always safe for CLI usage
|
5293
|
+
)
|
5294
|
+
|
5295
|
+
# Display executive summary
|
5296
|
+
console.print(result.executive_summary)
|
5297
|
+
|
5298
|
+
# MCP validation if enabled
|
5299
|
+
if enable_mcp:
|
5300
|
+
print_header("MCP Cross-Validation", "1.0.0")
|
5301
|
+
|
5302
|
+
async def run_mcp_validation():
|
5303
|
+
validation_engine = MCPCostValidationEngine(
|
5304
|
+
billing_profile=profile,
|
5305
|
+
management_profile=profile,
|
5306
|
+
tolerance_percent=5.0,
|
5307
|
+
performance_target_ms=30000.0
|
5308
|
+
)
|
5309
|
+
|
5310
|
+
# Validate emergency response scenario
|
5311
|
+
test_result = await validation_engine.validate_cost_optimization_scenario(
|
5312
|
+
scenario_name='emergency_cost_response_validation',
|
5313
|
+
cost_optimizer_params={
|
5314
|
+
'profile': profile,
|
5315
|
+
'cost_spike_threshold': spike_threshold,
|
5316
|
+
'analysis_days': analysis_days
|
5317
|
+
},
|
5318
|
+
expected_savings_range=(spike_threshold * 0.1, spike_threshold * 0.5)
|
5319
|
+
)
|
5320
|
+
|
5321
|
+
if test_result.success and test_result.mcp_validation:
|
5322
|
+
print_success("✅ MCP validation passed - cost projections verified")
|
5323
|
+
print_success(f"📊 Variance: {test_result.mcp_validation.variance_percentage:.2f}%")
|
5324
|
+
else:
|
5325
|
+
print_error("⚠️ MCP validation encountered issues - review cost projections")
|
5326
|
+
|
5327
|
+
return test_result
|
5328
|
+
|
5329
|
+
try:
|
5330
|
+
mcp_result = asyncio.run(run_mcp_validation())
|
5331
|
+
except Exception as e:
|
5332
|
+
print_error(f"MCP validation failed: {str(e)}")
|
5333
|
+
print_error("📞 Contact CloudOps team for AWS Cost Explorer access configuration")
|
5334
|
+
|
5335
|
+
# Export reports if requested
|
5336
|
+
if export_reports:
|
5337
|
+
exported = result.export_reports('/tmp/emergency-cost-reports')
|
5338
|
+
if exported.get('json'):
|
5339
|
+
print_success(f"📊 Executive reports exported to: /tmp/emergency-cost-reports")
|
5340
|
+
|
5341
|
+
# Exit with success/failure status
|
5342
|
+
if result.success:
|
5343
|
+
print_success("✅ Emergency cost response completed successfully")
|
5344
|
+
ctx.exit(0)
|
5345
|
+
else:
|
5346
|
+
print_error("❌ Emergency cost response encountered issues")
|
5347
|
+
ctx.exit(1)
|
5348
|
+
|
5349
|
+
except Exception as e:
|
5350
|
+
print_error(f"Emergency cost response failed: {str(e)}")
|
5351
|
+
ctx.exit(1)
|
5352
|
+
|
5353
|
+
@cost.command()
|
5354
|
+
@click.option('--regions', multiple=True, help='Target AWS regions')
|
5355
|
+
@click.option('--idle-days', default=7, help='Days to consider NAT Gateway idle')
|
5356
|
+
@click.option('--cost-threshold', default=0.0, help='Minimum monthly cost threshold ($)')
|
5357
|
+
@click.option('--dry-run/--execute', default=True, help='Dry run mode (safe analysis)')
|
5358
|
+
@common_aws_options
|
5359
|
+
@click.pass_context
|
5360
|
+
def nat_gateways(ctx, regions, idle_days, cost_threshold, dry_run, profile, region):
|
5361
|
+
"""
|
5362
|
+
Optimize unused NAT Gateways - typical savings $45-90/month each.
|
5363
|
+
|
5364
|
+
Business Impact:
|
5365
|
+
- Cost reduction: $45-90/month per unused NAT Gateway
|
5366
|
+
- Risk level: Low (network connectivity analysis performed)
|
5367
|
+
- Implementation time: 15-30 minutes
|
5368
|
+
"""
|
5369
|
+
import asyncio
|
5370
|
+
from runbooks.cloudops import CostOptimizer
|
5371
|
+
from runbooks.cloudops.models import ExecutionMode
|
5372
|
+
from runbooks.common.rich_utils import console, print_header
|
5373
|
+
|
5374
|
+
print_header("NAT Gateway Cost Optimization", "1.0.0")
|
5375
|
+
|
5376
|
+
try:
|
5377
|
+
# Initialize cost optimizer
|
5378
|
+
execution_mode = ExecutionMode.DRY_RUN if dry_run else ExecutionMode.EXECUTE
|
5379
|
+
optimizer = CostOptimizer(profile=profile, dry_run=dry_run, execution_mode=execution_mode)
|
5380
|
+
|
5381
|
+
# Execute NAT Gateway optimization
|
5382
|
+
result = asyncio.run(optimizer.optimize_nat_gateways(
|
5383
|
+
regions=list(regions) if regions else None,
|
5384
|
+
idle_threshold_days=idle_days,
|
5385
|
+
cost_threshold=cost_threshold
|
5386
|
+
))
|
5387
|
+
|
5388
|
+
console.print(f"\n✅ NAT Gateway optimization completed")
|
5389
|
+
console.print(f"💰 Potential monthly savings: ${result.business_metrics.total_monthly_savings:,.2f}")
|
5390
|
+
|
5391
|
+
except Exception as e:
|
5392
|
+
console.print(f"❌ NAT Gateway optimization failed: {str(e)}", style="red")
|
5393
|
+
raise click.ClickException(str(e))
|
5394
|
+
|
5395
|
+
@cost.command()
|
5396
|
+
@click.option('--spike-threshold', default=5000.0, help='Cost spike threshold ($) that triggered emergency')
|
5397
|
+
@click.option('--analysis-days', default=7, help='Days to analyze for cost trends')
|
5398
|
+
@common_aws_options
|
5399
|
+
@click.pass_context
|
5400
|
+
def emergency(ctx, spike_threshold, analysis_days, profile, region):
|
5401
|
+
"""
|
5402
|
+
Emergency cost spike response - rapid analysis and remediation.
|
5403
|
+
|
5404
|
+
Business Impact:
|
5405
|
+
- Response time: <30 minutes for initial analysis
|
5406
|
+
- Target savings: 25-50% of spike amount
|
5407
|
+
- Risk level: Medium (rapid changes require monitoring)
|
5408
|
+
"""
|
5409
|
+
import asyncio
|
5410
|
+
from runbooks.cloudops import CostOptimizer
|
5411
|
+
from runbooks.common.rich_utils import console, print_header
|
5412
|
+
|
5413
|
+
print_header("Emergency Cost Spike Response", "1.0.0")
|
5414
|
+
|
5415
|
+
try:
|
5416
|
+
optimizer = CostOptimizer(profile=profile, dry_run=True) # Always dry run for emergency analysis
|
5417
|
+
|
5418
|
+
result = asyncio.run(optimizer.emergency_cost_response(
|
5419
|
+
cost_spike_threshold=spike_threshold,
|
5420
|
+
analysis_days=analysis_days
|
5421
|
+
))
|
5422
|
+
|
5423
|
+
console.print(f"\n🚨 Emergency cost analysis completed")
|
5424
|
+
console.print(f"💰 Immediate savings identified: ${result.business_metrics.total_monthly_savings:,.2f}")
|
5425
|
+
console.print(f"⏱️ Analysis time: {result.execution_time:.1f} seconds")
|
5426
|
+
|
5427
|
+
except Exception as e:
|
5428
|
+
console.print(f"❌ Emergency cost response failed: {str(e)}", style="red")
|
5429
|
+
raise click.ClickException(str(e))
|
5430
|
+
|
5431
|
+
@cloudops.group()
|
5432
|
+
def security():
|
5433
|
+
"""Security enforcement scenarios for compliance and risk reduction."""
|
5434
|
+
pass
|
5435
|
+
|
5436
|
+
@security.command()
|
5437
|
+
@click.option('--regions', multiple=True, help='Target AWS regions')
|
5438
|
+
@click.option('--dry-run/--execute', default=True, help='Dry run mode')
|
5439
|
+
@common_aws_options
|
5440
|
+
@click.pass_context
|
5441
|
+
def s3_encryption(ctx, regions, dry_run, profile, region):
|
5442
|
+
"""
|
5443
|
+
Enforce S3 bucket encryption for compliance (SOC2, PCI-DSS, HIPAA).
|
5444
|
+
|
5445
|
+
Business Impact:
|
5446
|
+
- Compliance improvement: SOC2, PCI-DSS, HIPAA requirements
|
5447
|
+
- Risk reduction: Data protection and regulatory compliance
|
5448
|
+
- Implementation time: 10-20 minutes
|
5449
|
+
"""
|
5450
|
+
import asyncio
|
5451
|
+
from runbooks.cloudops import SecurityEnforcer
|
5452
|
+
from runbooks.cloudops.models import ExecutionMode
|
5453
|
+
from runbooks.common.rich_utils import console, print_header
|
5454
|
+
|
5455
|
+
print_header("S3 Encryption Compliance Enforcement", "1.0.0")
|
5456
|
+
|
5457
|
+
try:
|
5458
|
+
execution_mode = ExecutionMode.DRY_RUN if dry_run else ExecutionMode.EXECUTE
|
5459
|
+
enforcer = SecurityEnforcer(profile=profile, dry_run=dry_run, execution_mode=execution_mode)
|
5460
|
+
|
5461
|
+
result = asyncio.run(enforcer.enforce_s3_encryption(
|
5462
|
+
regions=list(regions) if regions else None
|
5463
|
+
))
|
5464
|
+
|
5465
|
+
console.print(f"\n🔒 S3 encryption enforcement completed")
|
5466
|
+
if hasattr(result, 'compliance_score_after'):
|
5467
|
+
console.print(f"📈 Compliance score: {result.compliance_score_after:.1f}%")
|
5468
|
+
|
5469
|
+
except Exception as e:
|
5470
|
+
console.print(f"❌ S3 encryption enforcement failed: {str(e)}", style="red")
|
5471
|
+
raise click.ClickException(str(e))
|
5472
|
+
|
5473
|
+
@cloudops.group()
|
5474
|
+
def governance():
|
5475
|
+
"""Multi-account governance campaigns for organizational compliance."""
|
5476
|
+
pass
|
5477
|
+
|
5478
|
+
@governance.command()
|
5479
|
+
@click.option('--scope', type=click.Choice(['ORGANIZATION', 'OU', 'ACCOUNT_LIST']), default='ORGANIZATION', help='Governance campaign scope')
|
5480
|
+
@click.option('--target-compliance', default=95.0, help='Target compliance percentage')
|
5481
|
+
@click.option('--max-accounts', default=10, help='Maximum concurrent accounts to process')
|
5482
|
+
@common_aws_options
|
5483
|
+
@click.pass_context
|
5484
|
+
def campaign(ctx, scope, target_compliance, max_accounts, profile, region):
|
5485
|
+
"""
|
5486
|
+
Execute organization-wide governance campaign.
|
5487
|
+
|
5488
|
+
Business Impact:
|
5489
|
+
- Governance compliance: >95% across organization
|
5490
|
+
- Cost optimization: 15-25% through standardization
|
5491
|
+
- Operational efficiency: 60% reduction in manual tasks
|
5492
|
+
"""
|
5493
|
+
from runbooks.cloudops import ResourceLifecycleManager
|
5494
|
+
from runbooks.common.rich_utils import console, print_header
|
5495
|
+
|
5496
|
+
print_header("Multi-Account Governance Campaign", "1.0.0")
|
5497
|
+
|
5498
|
+
try:
|
5499
|
+
lifecycle_manager = ResourceLifecycleManager(profile=profile, dry_run=True)
|
5500
|
+
|
5501
|
+
console.print(f"🏛️ Initiating governance campaign")
|
5502
|
+
console.print(f"📊 Scope: {scope}")
|
5503
|
+
console.print(f"🎯 Target compliance: {target_compliance}%")
|
5504
|
+
console.print(f"⚡ Max concurrent accounts: {max_accounts}")
|
5505
|
+
|
5506
|
+
# This would execute the comprehensive governance campaign
|
5507
|
+
console.print(f"\n✅ Governance campaign framework initialized")
|
5508
|
+
console.print(f"📋 Use notebooks/cloudops-scenarios/multi-account-governance-campaign.ipynb for full execution")
|
5509
|
+
|
5510
|
+
except Exception as e:
|
5511
|
+
console.print(f"❌ Governance campaign failed: {str(e)}", style="red")
|
5512
|
+
raise click.ClickException(str(e))
|
5513
|
+
|
5514
|
+
# Add CloudOps command to main CLI
|
5515
|
+
main.add_command(cloudops)
|
5516
|
+
|
5517
|
+
|
5518
|
+
# ============================================================================
|
5519
|
+
# FINOPS COMMANDS (Cost & Usage Analytics)
|
5520
|
+
# ============================================================================
|
5521
|
+
|
5522
|
+
|
5523
|
+
def _parse_profiles_parameter(profiles_tuple):
|
5524
|
+
"""Parse profiles parameter to handle multiple formats:
|
5525
|
+
- Multiple --profiles options: --profiles prof1 --profiles prof2
|
5526
|
+
- Comma-separated in single option: --profiles "prof1,prof2"
|
5527
|
+
- Space-separated in single option: --profiles "prof1 prof2"
|
5528
|
+
"""
|
5529
|
+
if not profiles_tuple:
|
5530
|
+
return None
|
5531
|
+
|
5532
|
+
all_profiles = []
|
5533
|
+
for profile_item in profiles_tuple:
|
5534
|
+
# Handle comma or space separated profiles in a single item
|
5535
|
+
if "," in profile_item:
|
5536
|
+
all_profiles.extend([p.strip() for p in profile_item.split(",")])
|
5537
|
+
elif " " in profile_item:
|
5538
|
+
all_profiles.extend([p.strip() for p in profile_item.split()])
|
5539
|
+
else:
|
5540
|
+
all_profiles.append(profile_item.strip())
|
5541
|
+
|
5542
|
+
return [p for p in all_profiles if p] # Remove empty strings
|
5543
|
+
|
5544
|
+
|
5545
|
+
@main.command()
|
5546
|
+
@common_aws_options
|
5547
|
+
@click.option("--time-range", type=int, help="Time range in days (default: current month)")
|
5548
|
+
@click.option("--report-type", type=click.Choice(["csv", "json", "pdf", "markdown"]), help="Report type for export")
|
5549
|
+
@click.option("--report-name", help="Base name for report files (without extension)")
|
5550
|
+
@click.option("--dir", help="Directory to save report files (default: current directory)")
|
5551
|
+
@click.option(
|
5552
|
+
"--profiles",
|
5553
|
+
multiple=True,
|
5554
|
+
help="AWS profiles: --profiles prof1 prof2 OR --profiles 'prof1,prof2' OR --profiles prof1 --profiles prof2",
|
5555
|
+
)
|
5556
|
+
@click.option("--regions", multiple=True, help="AWS regions to check")
|
5557
|
+
@click.option("--all", is_flag=True, help="Use all available AWS profiles")
|
5558
|
+
@click.option("--combine", is_flag=True, help="Combine profiles from the same AWS account")
|
5559
|
+
@click.option("--tag", multiple=True, help="Cost allocation tag to filter resources")
|
5560
|
+
@click.option("--trend", is_flag=True, help="Display trend report for past 6 months")
|
5561
|
+
@click.option("--audit", is_flag=True, help="Display audit report with cost anomalies and resource optimization")
|
5562
|
+
@click.option("--pdf", is_flag=True, help="Generate PDF report (convenience flag for --report-type pdf)")
|
5563
|
+
@click.option(
|
5564
|
+
"--export-markdown", "--markdown", is_flag=True, help="Generate Rich-styled markdown export with 10-column format"
|
5565
|
+
)
|
5566
|
+
@click.option(
|
5567
|
+
"--profile-display-length",
|
5568
|
+
type=int,
|
5569
|
+
help="Maximum characters for profile name display (optional, no truncation if not specified)",
|
5570
|
+
)
|
5571
|
+
@click.option(
|
5572
|
+
"--service-name-length",
|
5573
|
+
type=int,
|
5574
|
+
help="Maximum characters for service name display (optional, no truncation if not specified)",
|
5575
|
+
)
|
5576
|
+
@click.option(
|
5577
|
+
"--max-services-text",
|
5578
|
+
type=int,
|
5579
|
+
help="Maximum number of services in text summaries (optional, no limit if not specified)",
|
5580
|
+
)
|
5581
|
+
@click.option(
|
5582
|
+
"--high-cost-threshold", type=float, default=5000, help="High cost threshold for highlighting (default: 5000)"
|
5583
|
+
)
|
5584
|
+
@click.option(
|
5585
|
+
"--medium-cost-threshold", type=float, default=1000, help="Medium cost threshold for highlighting (default: 1000)"
|
5586
|
+
)
|
5587
|
+
@click.pass_context
|
5588
|
+
def finops(
|
5589
|
+
ctx,
|
5590
|
+
profile,
|
5591
|
+
region,
|
5592
|
+
dry_run,
|
5593
|
+
time_range,
|
5594
|
+
report_type,
|
5595
|
+
report_name,
|
5596
|
+
dir,
|
5597
|
+
profiles,
|
5598
|
+
regions,
|
5599
|
+
all,
|
5600
|
+
combine,
|
5601
|
+
tag,
|
5602
|
+
trend,
|
5603
|
+
audit,
|
5604
|
+
pdf,
|
5605
|
+
export_markdown,
|
5606
|
+
profile_display_length,
|
5607
|
+
service_name_length,
|
5608
|
+
max_services_text,
|
5609
|
+
high_cost_threshold,
|
5610
|
+
medium_cost_threshold,
|
5611
|
+
):
|
5612
|
+
"""
|
5613
|
+
AWS FinOps - Cost and usage analytics.
|
5614
|
+
|
5615
|
+
Comprehensive cost analysis, optimization recommendations,
|
5616
|
+
and resource utilization reporting.
|
5617
|
+
|
5618
|
+
Examples:
|
5619
|
+
runbooks finops --audit --report-type csv,json,pdf --report-name audit_report
|
5620
|
+
runbooks finops --trend --report-name cost_trend
|
5621
|
+
runbooks finops --time-range 30 --report-name monthly_costs
|
5622
|
+
"""
|
5623
|
+
|
5624
|
+
# Run finops dashboard with all options
|
5625
|
+
import argparse
|
5626
|
+
|
5627
|
+
# Import enhanced routing for service-per-row layout (Enterprise requirement)
|
5628
|
+
try:
|
5629
|
+
from runbooks.finops.dashboard_router import route_finops_request
|
5630
|
+
|
5631
|
+
use_enhanced_routing = True
|
5632
|
+
click.echo(click.style("🚀 Using Enhanced Service-Focused Dashboard", fg="cyan", bold=True))
|
5633
|
+
except Exception as e:
|
5634
|
+
from runbooks.finops.dashboard_runner import run_dashboard
|
5635
|
+
|
5636
|
+
use_enhanced_routing = False
|
5637
|
+
click.echo(click.style(f"⚠️ Enhanced routing failed ({str(e)[:50]}), using legacy mode", fg="yellow"))
|
5638
|
+
|
5639
|
+
# Handle report type logic - support --report-type, --pdf, and --export-markdown flags
|
5640
|
+
report_types = []
|
5641
|
+
if pdf:
|
5642
|
+
report_types = ["pdf"]
|
5643
|
+
elif export_markdown:
|
5644
|
+
report_types = ["markdown"]
|
5645
|
+
# Set default filename if none provided
|
5646
|
+
if not report_name:
|
5647
|
+
report_name = "finops_markdown_export"
|
5648
|
+
# Ensure exports directory exists
|
5649
|
+
if not dir:
|
5650
|
+
dir = "./exports"
|
5651
|
+
import os
|
5652
|
+
|
5653
|
+
os.makedirs(dir, exist_ok=True)
|
5654
|
+
click.echo(
|
5655
|
+
click.style("📝 Rich-styled markdown export activated - 10-column format for MkDocs", fg="cyan", bold=True)
|
5656
|
+
)
|
5657
|
+
elif report_type:
|
5658
|
+
report_types = [report_type]
|
5659
|
+
elif report_name: # If report name provided but no type, default to csv
|
5660
|
+
report_types = ["csv"]
|
5661
|
+
|
5662
|
+
# Parse profiles from updated --profile parameter (now supports multiple=True)
|
5663
|
+
parsed_profiles = None
|
5664
|
+
if profile:
|
5665
|
+
# Handle the new tuple/list format from click.option(multiple=True)
|
5666
|
+
if isinstance(profile, (tuple, list)):
|
5667
|
+
# Flatten and handle comma-separated values within each element
|
5668
|
+
all_profiles = []
|
5669
|
+
for profile_item in profile:
|
5670
|
+
if profile_item and "," in profile_item:
|
5671
|
+
all_profiles.extend([p.strip() for p in profile_item.split(",") if p.strip()])
|
5672
|
+
elif profile_item and profile_item.strip():
|
5673
|
+
all_profiles.append(profile_item.strip())
|
5674
|
+
|
5675
|
+
# Filter out empty and "default" profiles, keep actual profiles
|
5676
|
+
parsed_profiles = [p for p in all_profiles if p and p != "default"]
|
5677
|
+
# If no valid profiles after filtering, use default
|
5678
|
+
if not parsed_profiles:
|
5679
|
+
parsed_profiles = ["default"]
|
5680
|
+
else:
|
5681
|
+
# Legacy single string handling (backward compatibility)
|
5682
|
+
if "," in profile:
|
5683
|
+
parsed_profiles = [p.strip() for p in profile.split(",") if p.strip()]
|
5684
|
+
else:
|
5685
|
+
parsed_profiles = [profile.strip()]
|
5686
|
+
|
5687
|
+
# Combine with --profiles parameter if both are provided
|
5688
|
+
if profiles:
|
5689
|
+
legacy_profiles = _parse_profiles_parameter(profiles)
|
5690
|
+
if parsed_profiles:
|
5691
|
+
parsed_profiles.extend(legacy_profiles)
|
5692
|
+
else:
|
5693
|
+
parsed_profiles = legacy_profiles
|
5694
|
+
|
5695
|
+
# CRITICAL FIX: Ensure single profile is correctly handled for downstream processing
|
5696
|
+
# When multiple profiles are provided via --profile, use the first one as primary profile
|
5697
|
+
primary_profile = (
|
5698
|
+
parsed_profiles[0] if parsed_profiles else (profile[0] if isinstance(profile, tuple) and profile else profile)
|
5699
|
+
)
|
5700
|
+
|
5701
|
+
args = argparse.Namespace(
|
5702
|
+
profile=primary_profile, # Primary profile for single-profile operations
|
5703
|
+
region=region,
|
5704
|
+
dry_run=dry_run,
|
5705
|
+
time_range=time_range,
|
5706
|
+
report_type=report_types,
|
5707
|
+
report_name=report_name,
|
5708
|
+
dir=dir,
|
5709
|
+
profiles=parsed_profiles, # Use parsed profiles from both --profile and --profiles
|
5710
|
+
regions=list(regions) if regions else None,
|
5711
|
+
all=all,
|
5712
|
+
combine=combine,
|
5713
|
+
tag=list(tag) if tag else None,
|
5714
|
+
trend=trend,
|
5715
|
+
audit=audit,
|
5716
|
+
export_markdown=export_markdown, # Add export_markdown parameter
|
5717
|
+
config_file=None, # Not exposed in Click interface yet
|
5718
|
+
# Display configuration parameters
|
5719
|
+
profile_display_length=profile_display_length,
|
5720
|
+
service_name_length=service_name_length,
|
5721
|
+
max_services_text=max_services_text,
|
5722
|
+
high_cost_threshold=high_cost_threshold,
|
5723
|
+
medium_cost_threshold=medium_cost_threshold,
|
5724
|
+
)
|
5725
|
+
# Route to appropriate dashboard implementation
|
5726
|
+
if use_enhanced_routing:
|
5727
|
+
return route_finops_request(args)
|
5728
|
+
else:
|
5729
|
+
return run_dashboard(args)
|
5730
|
+
|
5731
|
+
|
5732
|
+
# ============================================================================
|
5733
|
+
# HELPER FUNCTIONS
|
5734
|
+
# ============================================================================
|
5735
|
+
|
5736
|
+
|
5737
|
+
def display_inventory_results(results):
|
5738
|
+
"""Display inventory results in formatted tables."""
|
5739
|
+
from runbooks.inventory.core.formatter import InventoryFormatter
|
5740
|
+
|
5741
|
+
formatter = InventoryFormatter(results)
|
5742
|
+
console_output = formatter.format_console_table()
|
5743
|
+
console.print(console_output)
|
5744
|
+
|
5745
|
+
|
5746
|
+
def save_inventory_results(results, output_format, output_file):
|
5747
|
+
"""Save inventory results to file."""
|
5748
|
+
from runbooks.inventory.core.formatter import InventoryFormatter
|
5749
|
+
|
5750
|
+
formatter = InventoryFormatter(results)
|
5751
|
+
|
5752
|
+
if not output_file:
|
5753
|
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
5754
|
+
output_file = f"inventory_{timestamp}.{output_format}"
|
5755
|
+
|
5756
|
+
if output_format == "csv":
|
5757
|
+
formatter.to_csv(output_file)
|
5758
|
+
elif output_format == "json":
|
5759
|
+
formatter.to_json(output_file)
|
5760
|
+
elif output_format == "html":
|
5761
|
+
formatter.to_html(output_file)
|
5762
|
+
elif output_format == "yaml":
|
5763
|
+
formatter.to_yaml(output_file)
|
5764
|
+
|
5765
|
+
console.print(f"[green]💾 Results saved to: {output_file}[/green]")
|
5766
|
+
|
5767
|
+
|
5768
|
+
def display_assessment_results(report):
|
5769
|
+
"""Display CFAT assessment results."""
|
5770
|
+
console.print(f"\n[bold blue]📊 Cloud Foundations Assessment Results[/bold blue]")
|
5771
|
+
console.print(f"[dim]Score: {report.summary.compliance_score}/100 | Risk: {report.summary.risk_level}[/dim]")
|
5772
|
+
|
5773
|
+
# Summary table
|
5774
|
+
from rich.table import Table
|
5775
|
+
|
5776
|
+
table = Table(title="Assessment Summary")
|
5777
|
+
table.add_column("Metric", style="cyan")
|
5778
|
+
table.add_column("Value", style="bold")
|
5779
|
+
table.add_column("Status", style="green")
|
5780
|
+
|
5781
|
+
table.add_row("Compliance Score", f"{report.summary.compliance_score}/100", report.summary.risk_level)
|
5782
|
+
table.add_row("Total Checks", str(report.summary.total_checks), "✓ Completed")
|
5783
|
+
table.add_row("Pass Rate", f"{report.summary.pass_rate:.1f}%", "📊 Analyzed")
|
5784
|
+
table.add_row(
|
5785
|
+
"Critical Issues",
|
5786
|
+
str(report.summary.critical_issues),
|
5787
|
+
"🚨 Review Required" if report.summary.critical_issues > 0 else "✅ None",
|
5788
|
+
)
|
5789
|
+
|
5790
|
+
console.print(table)
|
5791
|
+
|
5792
|
+
|
5793
|
+
def save_assessment_results(report, output_format, output_file):
|
5794
|
+
"""Save assessment results to file."""
|
5795
|
+
if not output_file:
|
5796
|
+
timestamp = report.timestamp.strftime("%Y%m%d_%H%M%S")
|
5797
|
+
output_file = f"cfat_report_{timestamp}.{output_format}"
|
5798
|
+
|
5799
|
+
if output_format == "html":
|
5800
|
+
report.to_html(output_file)
|
5801
|
+
elif output_format == "json":
|
5802
|
+
report.to_json(output_file)
|
5803
|
+
elif output_format == "csv":
|
5804
|
+
report.to_csv(output_file)
|
5805
|
+
elif output_format == "yaml":
|
5806
|
+
report.to_yaml(output_file)
|
5807
|
+
|
5808
|
+
console.print(f"[green]💾 Assessment saved to: {output_file}[/green]")
|
5809
|
+
|
5810
|
+
|
5811
|
+
def display_ou_structure(ous):
|
5812
|
+
"""Display OU structure in formatted table."""
|
5813
|
+
from rich.table import Table
|
5814
|
+
|
5815
|
+
table = Table(title="AWS Organizations Structure")
|
5816
|
+
table.add_column("Name", style="cyan")
|
5817
|
+
table.add_column("ID", style="green")
|
5818
|
+
table.add_column("Level", justify="center")
|
5819
|
+
table.add_column("Parent ID", style="blue")
|
5820
|
+
|
5821
|
+
for ou in ous:
|
5822
|
+
indent = " " * ou.get("Level", 0)
|
5823
|
+
table.add_row(
|
5824
|
+
f"{indent}{ou.get('Name', 'Unknown')}", ou.get("Id", ""), str(ou.get("Level", 0)), ou.get("ParentId", "")
|
5825
|
+
)
|
5826
|
+
|
5827
|
+
console.print(table)
|
4981
5828
|
|
4982
5829
|
|
4983
5830
|
def save_ou_results(ous, output_format, output_file):
|
@@ -5300,6 +6147,273 @@ def scan(ctx, profile, region, dry_run, resources):
|
|
5300
6147
|
sys.exit(1)
|
5301
6148
|
|
5302
6149
|
|
6150
|
+
# ============================================================================
|
6151
|
+
# DORA METRICS COMMANDS (Enterprise SRE Monitoring)
|
6152
|
+
# ============================================================================
|
6153
|
+
|
6154
|
+
|
6155
|
+
@main.group()
|
6156
|
+
@click.pass_context
|
6157
|
+
def dora(ctx):
|
6158
|
+
"""
|
6159
|
+
📊 DORA metrics and SRE performance monitoring
|
6160
|
+
|
6161
|
+
Enterprise DORA metrics collection, analysis, and reporting for Site Reliability Engineering.
|
6162
|
+
Tracks Lead Time, Deployment Frequency, Mean Time to Recovery (MTTR), and Change Failure Rate.
|
6163
|
+
|
6164
|
+
Features:
|
6165
|
+
- Real-time DORA metrics calculation
|
6166
|
+
- SLA compliance monitoring
|
6167
|
+
- Automated incident detection
|
6168
|
+
- Enterprise dashboard generation
|
6169
|
+
- CloudWatch/Datadog integration
|
6170
|
+
|
6171
|
+
Examples:
|
6172
|
+
runbooks dora report # Generate comprehensive DORA report
|
6173
|
+
runbooks dora dashboard # Create SRE dashboard data
|
6174
|
+
runbooks dora track-deployment # Track git deployment
|
6175
|
+
runbooks dora simulate # Run demonstration simulation
|
6176
|
+
"""
|
6177
|
+
pass
|
6178
|
+
|
6179
|
+
|
6180
|
+
@dora.command()
|
6181
|
+
@click.option("--days", default=30, help="Number of days to analyze (default: 30)")
|
6182
|
+
@click.option("--output-dir", default="./artifacts/sre-reports", help="Output directory for reports")
|
6183
|
+
@click.option("--format", type=click.Choice(["json", "console"]), default="console", help="Output format")
|
6184
|
+
@click.pass_context
|
6185
|
+
def report(ctx, days, output_dir, format):
|
6186
|
+
"""
|
6187
|
+
📊 Generate comprehensive DORA metrics report
|
6188
|
+
|
6189
|
+
Creates enterprise-grade DORA metrics analysis including Lead Time,
|
6190
|
+
Deployment Frequency, MTTR, Change Failure Rate, and SLA compliance.
|
6191
|
+
|
6192
|
+
Examples:
|
6193
|
+
runbooks dora report --days 7 --format json
|
6194
|
+
runbooks dora report --days 30 --output-dir ./reports
|
6195
|
+
"""
|
6196
|
+
console.print("[cyan]📊 DORA Metrics Enterprise Report[/cyan]")
|
6197
|
+
|
6198
|
+
try:
|
6199
|
+
from runbooks.metrics.dora_metrics_engine import DORAMetricsEngine
|
6200
|
+
|
6201
|
+
# Initialize DORA metrics engine
|
6202
|
+
engine = DORAMetricsEngine()
|
6203
|
+
|
6204
|
+
# Generate comprehensive report
|
6205
|
+
console.print(f"[dim]Analyzing last {days} days...[/dim]")
|
6206
|
+
report_data = engine.generate_comprehensive_report(days_back=days)
|
6207
|
+
|
6208
|
+
if format == "json":
|
6209
|
+
import json
|
6210
|
+
|
6211
|
+
output = json.dumps(report_data, indent=2, default=str)
|
6212
|
+
console.print(output)
|
6213
|
+
else:
|
6214
|
+
# Display formatted console output
|
6215
|
+
console.print(f"\n🎯 [bold]DORA Metrics Summary ({days} days)[/bold]")
|
6216
|
+
|
6217
|
+
# Performance Analysis
|
6218
|
+
perf = report_data["performance_analysis"]
|
6219
|
+
console.print(
|
6220
|
+
f"Overall Performance: [bold]{perf['overall_performance_percentage']:.1f}%[/bold] ({perf['performance_grade']})"
|
6221
|
+
)
|
6222
|
+
console.print(f"SLA Compliance: [bold]{perf['sla_compliance_score']:.1f}%[/bold]")
|
6223
|
+
|
6224
|
+
# DORA Metrics
|
6225
|
+
dora_metrics = report_data["dora_metrics"]
|
6226
|
+
console.print(f"\n📈 [bold]Core DORA Metrics[/bold]")
|
6227
|
+
console.print(
|
6228
|
+
f"• Lead Time: [cyan]{dora_metrics['lead_time']['value']:.2f}[/cyan] {dora_metrics['lead_time']['unit']}"
|
6229
|
+
)
|
6230
|
+
console.print(
|
6231
|
+
f"• Deploy Frequency: [cyan]{dora_metrics['deployment_frequency']['value']:.2f}[/cyan] {dora_metrics['deployment_frequency']['unit']}"
|
6232
|
+
)
|
6233
|
+
console.print(f"• Change Failure Rate: [cyan]{dora_metrics['change_failure_rate']['value']:.2%}[/cyan]")
|
6234
|
+
console.print(f"• MTTR: [cyan]{dora_metrics['mttr']['value']:.2f}[/cyan] {dora_metrics['mttr']['unit']}")
|
6235
|
+
|
6236
|
+
# Recommendations
|
6237
|
+
recommendations = report_data["recommendations"]
|
6238
|
+
if recommendations:
|
6239
|
+
console.print(f"\n💡 [bold]SRE Recommendations[/bold]")
|
6240
|
+
for i, rec in enumerate(recommendations[:3], 1): # Show top 3
|
6241
|
+
console.print(f"{i}. {rec}")
|
6242
|
+
|
6243
|
+
# Raw Data Summary
|
6244
|
+
raw_data = report_data["raw_data"]
|
6245
|
+
console.print(f"\n📋 [bold]Data Summary[/bold]")
|
6246
|
+
console.print(f"• Deployments: {raw_data['deployments_count']}")
|
6247
|
+
console.print(f"• Incidents: {raw_data['incidents_count']}")
|
6248
|
+
console.print(f"• Automation Rate: {raw_data['automation_rate']:.1f}%")
|
6249
|
+
|
6250
|
+
console.print(f"\n[green]✅ DORA report generated for {days} days[/green]")
|
6251
|
+
console.print(f"[dim]💾 Report saved to: {output_dir}/[/dim]")
|
6252
|
+
|
6253
|
+
except Exception as e:
|
6254
|
+
console.print(f"[red]❌ Error generating DORA report: {e}[/red]")
|
6255
|
+
logger.error(f"DORA report failed: {e}")
|
6256
|
+
sys.exit(1)
|
6257
|
+
|
6258
|
+
|
6259
|
+
@dora.command()
|
6260
|
+
@click.option("--days", default=30, help="Number of days to analyze for dashboard")
|
6261
|
+
@click.option("--output-file", help="Output file for dashboard JSON data")
|
6262
|
+
@click.option("--cloudwatch", is_flag=True, help="Export metrics to CloudWatch")
|
6263
|
+
@click.pass_context
|
6264
|
+
def dashboard(ctx, days, output_file, cloudwatch):
|
6265
|
+
"""
|
6266
|
+
📊 Generate SRE dashboard data for visualization tools
|
6267
|
+
|
6268
|
+
Creates dashboard-ready data for SRE tools like Datadog, Grafana,
|
6269
|
+
or CloudWatch with time series data and KPI summaries.
|
6270
|
+
|
6271
|
+
Examples:
|
6272
|
+
runbooks dora dashboard --days 7 --cloudwatch
|
6273
|
+
runbooks dora dashboard --output-file dashboard.json
|
6274
|
+
"""
|
6275
|
+
console.print("[cyan]📊 Generating SRE Dashboard Data[/cyan]")
|
6276
|
+
|
6277
|
+
try:
|
6278
|
+
from runbooks.metrics.dora_metrics_engine import DORAMetricsEngine
|
6279
|
+
|
6280
|
+
engine = DORAMetricsEngine()
|
6281
|
+
|
6282
|
+
# Generate dashboard data
|
6283
|
+
console.print(f"[dim]Creating dashboard for last {days} days...[/dim]")
|
6284
|
+
dashboard_data = engine.generate_sre_dashboard(days_back=days)
|
6285
|
+
|
6286
|
+
# Display KPI summary
|
6287
|
+
kpis = dashboard_data["kpi_summary"]
|
6288
|
+
console.print(f"\n🎯 [bold]Key Performance Indicators[/bold]")
|
6289
|
+
console.print(f"• Performance Score: [cyan]{kpis['overall_performance_score']:.1f}%[/cyan]")
|
6290
|
+
console.print(f"• SLA Compliance: [cyan]{kpis['sla_compliance_score']:.1f}%[/cyan]")
|
6291
|
+
console.print(f"• DORA Health: [cyan]{kpis['dora_metrics_health']:.1f}%[/cyan]")
|
6292
|
+
console.print(f"• Active Incidents: [cyan]{kpis['active_incidents']}[/cyan]")
|
6293
|
+
console.print(f"• Automation: [cyan]{kpis['automation_percentage']:.1f}%[/cyan]")
|
6294
|
+
|
6295
|
+
# Export to file if requested
|
6296
|
+
if output_file:
|
6297
|
+
import json
|
6298
|
+
|
6299
|
+
with open(output_file, "w") as f:
|
6300
|
+
json.dump(dashboard_data, f, indent=2, default=str)
|
6301
|
+
console.print(f"\n[green]💾 Dashboard data exported: {output_file}[/green]")
|
6302
|
+
|
6303
|
+
# Export to CloudWatch if requested
|
6304
|
+
if cloudwatch:
|
6305
|
+
console.print(f"\n[dim]Exporting to CloudWatch...[/dim]")
|
6306
|
+
success = engine.export_cloudwatch_metrics()
|
6307
|
+
if success:
|
6308
|
+
console.print("[green]✅ Metrics published to CloudWatch[/green]")
|
6309
|
+
else:
|
6310
|
+
console.print("[yellow]⚠️ CloudWatch export failed (check AWS permissions)[/yellow]")
|
6311
|
+
|
6312
|
+
console.print(f"\n[green]✅ SRE dashboard data generated[/green]")
|
6313
|
+
|
6314
|
+
except Exception as e:
|
6315
|
+
console.print(f"[red]❌ Error generating dashboard: {e}[/red]")
|
6316
|
+
logger.error(f"DORA dashboard failed: {e}")
|
6317
|
+
sys.exit(1)
|
6318
|
+
|
6319
|
+
|
6320
|
+
@dora.command()
|
6321
|
+
@click.option("--commit-sha", required=True, help="Git commit SHA")
|
6322
|
+
@click.option("--branch", default="main", help="Git branch name")
|
6323
|
+
@click.option("--author", help="Commit author")
|
6324
|
+
@click.option("--message", help="Commit message")
|
6325
|
+
@click.pass_context
|
6326
|
+
def track_deployment(ctx, commit_sha, branch, author, message):
|
6327
|
+
"""
|
6328
|
+
🔗 Track deployment from git operations for DORA metrics
|
6329
|
+
|
6330
|
+
Automatically records deployment events for DORA metrics collection,
|
6331
|
+
linking git commits to production deployments for lead time calculation.
|
6332
|
+
|
6333
|
+
Examples:
|
6334
|
+
runbooks dora track-deployment --commit-sha abc123 --branch main --author developer
|
6335
|
+
runbooks dora track-deployment --commit-sha def456 --message "Feature update"
|
6336
|
+
"""
|
6337
|
+
console.print("[cyan]🔗 Tracking Git Deployment for DORA Metrics[/cyan]")
|
6338
|
+
|
6339
|
+
try:
|
6340
|
+
from runbooks.metrics.dora_metrics_engine import DORAMetricsEngine
|
6341
|
+
|
6342
|
+
engine = DORAMetricsEngine()
|
6343
|
+
|
6344
|
+
# Track git deployment
|
6345
|
+
deployment = engine.track_git_deployment(
|
6346
|
+
commit_sha=commit_sha, branch=branch, author=author or "unknown", message=message or ""
|
6347
|
+
)
|
6348
|
+
|
6349
|
+
console.print(f"\n✅ [bold]Deployment Tracked[/bold]")
|
6350
|
+
console.print(f"• Deployment ID: [cyan]{deployment.deployment_id}[/cyan]")
|
6351
|
+
console.print(f"• Environment: [cyan]{deployment.environment}[/cyan]")
|
6352
|
+
console.print(f"• Version: [cyan]{deployment.version}[/cyan]")
|
6353
|
+
console.print(f"• Branch: [cyan]{branch}[/cyan]")
|
6354
|
+
console.print(f"• Author: [cyan]{author or 'unknown'}[/cyan]")
|
6355
|
+
|
6356
|
+
console.print(f"\n[green]🎯 Deployment automatically tracked for DORA lead time calculation[/green]")
|
6357
|
+
|
6358
|
+
except Exception as e:
|
6359
|
+
console.print(f"[red]❌ Error tracking deployment: {e}[/red]")
|
6360
|
+
logger.error(f"DORA deployment tracking failed: {e}")
|
6361
|
+
sys.exit(1)
|
6362
|
+
|
6363
|
+
|
6364
|
+
@dora.command()
|
6365
|
+
@click.option("--duration", default=5, help="Simulation duration in minutes")
|
6366
|
+
@click.option("--show-report", is_flag=True, help="Display comprehensive report after simulation")
|
6367
|
+
@click.pass_context
|
6368
|
+
def simulate(ctx, duration, show_report):
|
6369
|
+
"""
|
6370
|
+
🧪 Run DORA metrics simulation for demonstration
|
6371
|
+
|
6372
|
+
Creates simulated deployment and incident events to demonstrate
|
6373
|
+
DORA metrics calculation and reporting capabilities.
|
6374
|
+
|
6375
|
+
Examples:
|
6376
|
+
runbooks dora simulate --duration 2 --show-report
|
6377
|
+
runbooks dora simulate --duration 10
|
6378
|
+
"""
|
6379
|
+
console.print("[cyan]🧪 Running DORA Metrics Simulation[/cyan]")
|
6380
|
+
|
6381
|
+
try:
|
6382
|
+
import asyncio
|
6383
|
+
|
6384
|
+
from runbooks.metrics.dora_metrics_engine import simulate_dora_metrics_collection
|
6385
|
+
|
6386
|
+
# Run simulation
|
6387
|
+
console.print(f"[dim]Simulating {duration} minutes of operations...[/dim]")
|
6388
|
+
|
6389
|
+
async def run_simulation():
|
6390
|
+
return await simulate_dora_metrics_collection(duration_minutes=duration)
|
6391
|
+
|
6392
|
+
report = asyncio.run(run_simulation())
|
6393
|
+
|
6394
|
+
# Display results
|
6395
|
+
perf = report["performance_analysis"]
|
6396
|
+
console.print(f"\n🎯 [bold]Simulation Results[/bold]")
|
6397
|
+
console.print(f"• Performance Grade: [cyan]{perf['performance_grade']}[/cyan]")
|
6398
|
+
console.print(f"• Targets Met: [cyan]{sum(perf['targets_met'].values())}/{len(perf['targets_met'])}[/cyan]")
|
6399
|
+
console.print(f"• Overall Score: [cyan]{perf['overall_performance_percentage']:.1f}%[/cyan]")
|
6400
|
+
|
6401
|
+
if show_report:
|
6402
|
+
# Display comprehensive report
|
6403
|
+
console.print(f"\n📊 [bold]Detailed DORA Metrics[/bold]")
|
6404
|
+
for metric_name, metric_data in report["dora_metrics"].items():
|
6405
|
+
console.print(
|
6406
|
+
f"• {metric_name.replace('_', ' ').title()}: [cyan]{metric_data['value']:.2f}[/cyan] {metric_data['unit']}"
|
6407
|
+
)
|
6408
|
+
|
6409
|
+
console.print(f"\n[green]✅ DORA metrics simulation completed successfully[/green]")
|
6410
|
+
|
6411
|
+
except Exception as e:
|
6412
|
+
console.print(f"[red]❌ Error running simulation: {e}[/red]")
|
6413
|
+
logger.error(f"DORA simulation failed: {e}")
|
6414
|
+
sys.exit(1)
|
6415
|
+
|
6416
|
+
|
5303
6417
|
# ============================================================================
|
5304
6418
|
# VPC NETWORKING COMMANDS (New Wrapper Architecture)
|
5305
6419
|
# ============================================================================
|
@@ -5448,15 +6562,16 @@ def optimize(ctx, profile, region, dry_run, billing_profile, target_reduction, o
|
|
5448
6562
|
# MCP VALIDATION FRAMEWORK
|
5449
6563
|
# ============================================================================
|
5450
6564
|
|
6565
|
+
|
5451
6566
|
@main.group()
|
5452
|
-
@click.pass_context
|
6567
|
+
@click.pass_context
|
5453
6568
|
def validate(ctx):
|
5454
6569
|
"""
|
5455
6570
|
🔍 MCP validation framework with 99.5% accuracy target
|
5456
|
-
|
6571
|
+
|
5457
6572
|
Comprehensive validation between runbooks outputs and MCP server results
|
5458
6573
|
for enterprise AWS operations with real-time performance monitoring.
|
5459
|
-
|
6574
|
+
|
5460
6575
|
Examples:
|
5461
6576
|
runbooks validate all # Full validation suite
|
5462
6577
|
runbooks validate costs # Cost Explorer validation
|
@@ -5465,34 +6580,33 @@ def validate(ctx):
|
|
5465
6580
|
"""
|
5466
6581
|
pass
|
5467
6582
|
|
6583
|
+
|
5468
6584
|
@validate.command()
|
5469
6585
|
@common_aws_options
|
5470
|
-
@click.option(
|
5471
|
-
@click.option(
|
5472
|
-
@click.option(
|
6586
|
+
@click.option("--tolerance", default=5.0, help="Tolerance percentage for variance detection")
|
6587
|
+
@click.option("--performance-target", default=30.0, help="Performance target in seconds")
|
6588
|
+
@click.option("--save-report", is_flag=True, help="Save detailed report to artifacts")
|
5473
6589
|
@click.pass_context
|
5474
6590
|
def all(ctx, profile, region, dry_run, tolerance, performance_target, save_report):
|
5475
6591
|
"""Run comprehensive MCP validation across all critical operations."""
|
5476
|
-
|
6592
|
+
|
5477
6593
|
console.print("[bold blue]🔍 Enterprise MCP Validation Framework[/bold blue]")
|
5478
6594
|
console.print(f"Target Accuracy: 99.5% | Tolerance: ±{tolerance}% | Performance: <{performance_target}s")
|
5479
|
-
|
6595
|
+
|
5480
6596
|
try:
|
5481
6597
|
import asyncio
|
6598
|
+
|
5482
6599
|
from runbooks.validation.mcp_validator import MCPValidator
|
5483
|
-
|
6600
|
+
|
5484
6601
|
# Initialize validator
|
5485
|
-
validator = MCPValidator(
|
5486
|
-
|
5487
|
-
performance_target_seconds=performance_target
|
5488
|
-
)
|
5489
|
-
|
6602
|
+
validator = MCPValidator(tolerance_percentage=tolerance, performance_target_seconds=performance_target)
|
6603
|
+
|
5490
6604
|
# Run validation
|
5491
6605
|
report = asyncio.run(validator.validate_all_operations())
|
5492
|
-
|
5493
|
-
# Display results
|
6606
|
+
|
6607
|
+
# Display results
|
5494
6608
|
validator.display_validation_report(report)
|
5495
|
-
|
6609
|
+
|
5496
6610
|
# Exit code based on results
|
5497
6611
|
if report.overall_accuracy >= 99.5:
|
5498
6612
|
console.print("[bold green]✅ Validation PASSED - Deploy with confidence[/bold green]")
|
@@ -5503,7 +6617,7 @@ def all(ctx, profile, region, dry_run, tolerance, performance_target, save_repor
|
|
5503
6617
|
else:
|
5504
6618
|
console.print("[bold red]❌ Validation FAILED - Address issues before deployment[/bold red]")
|
5505
6619
|
sys.exit(2)
|
5506
|
-
|
6620
|
+
|
5507
6621
|
except ImportError as e:
|
5508
6622
|
console.print(f"[red]❌ MCP validation dependencies not available: {e}[/red]")
|
5509
6623
|
console.print("[yellow]Install with: pip install runbooks[mcp][/yellow]")
|
@@ -5512,39 +6626,41 @@ def all(ctx, profile, region, dry_run, tolerance, performance_target, save_repor
|
|
5512
6626
|
console.print(f"[red]❌ Validation error: {e}[/red]")
|
5513
6627
|
sys.exit(3)
|
5514
6628
|
|
6629
|
+
|
5515
6630
|
@validate.command()
|
5516
6631
|
@common_aws_options
|
5517
|
-
@click.option(
|
6632
|
+
@click.option("--tolerance", default=5.0, help="Cost variance tolerance percentage")
|
5518
6633
|
@click.pass_context
|
5519
6634
|
def costs(ctx, profile, region, dry_run, tolerance):
|
5520
6635
|
"""Validate Cost Explorer data accuracy."""
|
5521
|
-
|
6636
|
+
|
5522
6637
|
console.print("[bold cyan]💰 Cost Explorer Validation[/bold cyan]")
|
5523
|
-
|
6638
|
+
|
5524
6639
|
try:
|
5525
6640
|
import asyncio
|
6641
|
+
|
5526
6642
|
from runbooks.validation.mcp_validator import MCPValidator
|
5527
|
-
|
6643
|
+
|
5528
6644
|
validator = MCPValidator(tolerance_percentage=tolerance)
|
5529
6645
|
result = asyncio.run(validator.validate_cost_explorer())
|
5530
|
-
|
6646
|
+
|
5531
6647
|
# Display result
|
5532
|
-
from rich.table import Table
|
5533
6648
|
from rich import box
|
5534
|
-
|
6649
|
+
from rich.table import Table
|
6650
|
+
|
5535
6651
|
table = Table(title="Cost Validation Result", box=box.ROUNDED)
|
5536
6652
|
table.add_column("Metric", style="cyan")
|
5537
6653
|
table.add_column("Value", style="bold")
|
5538
|
-
|
6654
|
+
|
5539
6655
|
status_color = "green" if result.status.value == "PASSED" else "red"
|
5540
6656
|
table.add_row("Status", f"[{status_color}]{result.status.value}[/{status_color}]")
|
5541
6657
|
table.add_row("Accuracy", f"{result.accuracy_percentage:.2f}%")
|
5542
6658
|
table.add_row("Execution Time", f"{result.execution_time:.2f}s")
|
5543
|
-
|
6659
|
+
|
5544
6660
|
console.print(table)
|
5545
|
-
|
6661
|
+
|
5546
6662
|
sys.exit(0 if result.status.value == "PASSED" else 1)
|
5547
|
-
|
6663
|
+
|
5548
6664
|
except ImportError as e:
|
5549
6665
|
console.print(f"[red]❌ MCP validation not available: {e}[/red]")
|
5550
6666
|
sys.exit(3)
|
@@ -5552,43 +6668,45 @@ def costs(ctx, profile, region, dry_run, tolerance):
|
|
5552
6668
|
console.print(f"[red]❌ Cost validation error: {e}[/red]")
|
5553
6669
|
sys.exit(3)
|
5554
6670
|
|
6671
|
+
|
5555
6672
|
@validate.command()
|
5556
6673
|
@common_aws_options
|
5557
6674
|
@click.pass_context
|
5558
6675
|
def organizations(ctx, profile, region, dry_run):
|
5559
6676
|
"""Validate Organizations API data accuracy."""
|
5560
|
-
|
6677
|
+
|
5561
6678
|
console.print("[bold cyan]🏢 Organizations Validation[/bold cyan]")
|
5562
|
-
|
6679
|
+
|
5563
6680
|
try:
|
5564
6681
|
import asyncio
|
6682
|
+
|
5565
6683
|
from runbooks.validation.mcp_validator import MCPValidator
|
5566
|
-
|
6684
|
+
|
5567
6685
|
validator = MCPValidator()
|
5568
6686
|
result = asyncio.run(validator.validate_organizations_data())
|
5569
|
-
|
6687
|
+
|
5570
6688
|
# Display result
|
5571
|
-
from rich.table import Table
|
5572
6689
|
from rich import box
|
5573
|
-
|
6690
|
+
from rich.table import Table
|
6691
|
+
|
5574
6692
|
table = Table(title="Organizations Validation Result", box=box.ROUNDED)
|
5575
6693
|
table.add_column("Metric", style="cyan")
|
5576
6694
|
table.add_column("Value", style="bold")
|
5577
|
-
|
6695
|
+
|
5578
6696
|
status_color = "green" if result.status.value == "PASSED" else "red"
|
5579
6697
|
table.add_row("Status", f"[{status_color}]{result.status.value}[/{status_color}]")
|
5580
6698
|
table.add_row("Accuracy", f"{result.accuracy_percentage:.2f}%")
|
5581
6699
|
table.add_row("Execution Time", f"{result.execution_time:.2f}s")
|
5582
|
-
|
6700
|
+
|
5583
6701
|
if result.variance_details:
|
5584
|
-
details = result.variance_details.get(
|
5585
|
-
table.add_row("Runbooks Accounts", str(details.get(
|
5586
|
-
table.add_row("MCP Accounts", str(details.get(
|
5587
|
-
|
6702
|
+
details = result.variance_details.get("details", {})
|
6703
|
+
table.add_row("Runbooks Accounts", str(details.get("runbooks_accounts", "N/A")))
|
6704
|
+
table.add_row("MCP Accounts", str(details.get("mcp_accounts", "N/A")))
|
6705
|
+
|
5588
6706
|
console.print(table)
|
5589
|
-
|
6707
|
+
|
5590
6708
|
sys.exit(0 if result.status.value == "PASSED" else 1)
|
5591
|
-
|
6709
|
+
|
5592
6710
|
except ImportError as e:
|
5593
6711
|
console.print(f"[red]❌ MCP validation not available: {e}[/red]")
|
5594
6712
|
sys.exit(3)
|
@@ -5596,29 +6714,28 @@ def organizations(ctx, profile, region, dry_run):
|
|
5596
6714
|
console.print(f"[red]❌ Organizations validation error: {e}[/red]")
|
5597
6715
|
sys.exit(3)
|
5598
6716
|
|
6717
|
+
|
5599
6718
|
@validate.command()
|
5600
|
-
@click.option(
|
5601
|
-
@click.option(
|
5602
|
-
@click.option(
|
6719
|
+
@click.option("--target-accuracy", default=99.5, help="Target accuracy percentage")
|
6720
|
+
@click.option("--iterations", default=5, help="Number of benchmark iterations")
|
6721
|
+
@click.option("--performance-target", default=30.0, help="Performance target in seconds")
|
5603
6722
|
@click.pass_context
|
5604
6723
|
def benchmark(ctx, target_accuracy, iterations, performance_target):
|
5605
6724
|
"""Run performance benchmark for MCP validation framework."""
|
5606
|
-
|
6725
|
+
|
5607
6726
|
console.print("[bold magenta]🏋️ MCP Validation Benchmark[/bold magenta]")
|
5608
6727
|
console.print(f"Target: {target_accuracy}% | Iterations: {iterations} | Performance: <{performance_target}s")
|
5609
|
-
|
6728
|
+
|
5610
6729
|
try:
|
5611
6730
|
import asyncio
|
6731
|
+
|
5612
6732
|
from runbooks.validation.benchmark import MCPBenchmarkRunner
|
5613
|
-
|
5614
|
-
runner = MCPBenchmarkRunner(
|
5615
|
-
|
5616
|
-
performance_target=performance_target
|
5617
|
-
)
|
5618
|
-
|
6733
|
+
|
6734
|
+
runner = MCPBenchmarkRunner(target_accuracy=target_accuracy, performance_target=performance_target)
|
6735
|
+
|
5619
6736
|
suite = asyncio.run(runner.run_benchmark(iterations))
|
5620
6737
|
runner.display_benchmark_results(suite)
|
5621
|
-
|
6738
|
+
|
5622
6739
|
# Exit based on benchmark results
|
5623
6740
|
overall_status = runner._assess_benchmark_results(suite)
|
5624
6741
|
if overall_status == "PASSED":
|
@@ -5627,7 +6744,7 @@ def benchmark(ctx, target_accuracy, iterations, performance_target):
|
|
5627
6744
|
sys.exit(1)
|
5628
6745
|
else:
|
5629
6746
|
sys.exit(2)
|
5630
|
-
|
6747
|
+
|
5631
6748
|
except ImportError as e:
|
5632
6749
|
console.print(f"[red]❌ MCP benchmark not available: {e}[/red]")
|
5633
6750
|
sys.exit(3)
|
@@ -5635,67 +6752,73 @@ def benchmark(ctx, target_accuracy, iterations, performance_target):
|
|
5635
6752
|
console.print(f"[red]❌ Benchmark error: {e}[/red]")
|
5636
6753
|
sys.exit(3)
|
5637
6754
|
|
6755
|
+
|
5638
6756
|
@validate.command()
|
5639
6757
|
@click.pass_context
|
5640
6758
|
def status(ctx):
|
5641
6759
|
"""Show MCP validation framework status."""
|
5642
|
-
|
6760
|
+
|
5643
6761
|
console.print("[bold cyan]📊 MCP Validation Framework Status[/bold cyan]")
|
5644
|
-
|
5645
|
-
from rich.table import Table
|
6762
|
+
|
5646
6763
|
from rich import box
|
5647
|
-
|
6764
|
+
from rich.table import Table
|
6765
|
+
|
5648
6766
|
table = Table(title="Framework Status", box=box.ROUNDED)
|
5649
6767
|
table.add_column("Component", style="cyan")
|
5650
|
-
table.add_column("Status", style="bold")
|
6768
|
+
table.add_column("Status", style="bold")
|
5651
6769
|
table.add_column("Details")
|
5652
|
-
|
6770
|
+
|
5653
6771
|
# Check MCP integration
|
5654
6772
|
try:
|
5655
6773
|
from notebooks.mcp_integration import MCPIntegrationManager
|
6774
|
+
|
5656
6775
|
table.add_row("MCP Integration", "[green]✅ Available[/green]", "Ready for validation")
|
5657
6776
|
except ImportError:
|
5658
6777
|
table.add_row("MCP Integration", "[red]❌ Unavailable[/red]", "Install MCP dependencies")
|
5659
|
-
|
6778
|
+
|
5660
6779
|
# Check validation framework
|
5661
6780
|
try:
|
5662
6781
|
from runbooks.validation.mcp_validator import MCPValidator
|
6782
|
+
|
5663
6783
|
table.add_row("Validation Framework", "[green]✅ Ready[/green]", "All components loaded")
|
5664
6784
|
except ImportError as e:
|
5665
6785
|
table.add_row("Validation Framework", "[red]❌ Missing[/red]", str(e))
|
5666
|
-
|
5667
|
-
# Check benchmark suite
|
6786
|
+
|
6787
|
+
# Check benchmark suite
|
5668
6788
|
try:
|
5669
6789
|
from runbooks.validation.benchmark import MCPBenchmarkRunner
|
6790
|
+
|
5670
6791
|
table.add_row("Benchmark Suite", "[green]✅ Ready[/green]", "Performance testing available")
|
5671
6792
|
except ImportError as e:
|
5672
6793
|
table.add_row("Benchmark Suite", "[red]❌ Missing[/red]", str(e))
|
5673
|
-
|
6794
|
+
|
5674
6795
|
# Check AWS profiles
|
5675
6796
|
profiles = [
|
5676
|
-
|
5677
|
-
|
5678
|
-
|
5679
|
-
|
6797
|
+
"ams-admin-Billing-ReadOnlyAccess-909135376185",
|
6798
|
+
"ams-admin-ReadOnlyAccess-909135376185",
|
6799
|
+
"ams-centralised-ops-ReadOnlyAccess-335083429030",
|
6800
|
+
"ams-shared-services-non-prod-ReadOnlyAccess-499201730520",
|
5680
6801
|
]
|
5681
|
-
|
6802
|
+
|
5682
6803
|
valid_profiles = 0
|
5683
6804
|
for profile_name in profiles:
|
5684
6805
|
try:
|
5685
6806
|
session = boto3.Session(profile_name=profile_name)
|
5686
|
-
sts = session.client(
|
6807
|
+
sts = session.client("sts")
|
5687
6808
|
identity = sts.get_caller_identity()
|
5688
6809
|
valid_profiles += 1
|
5689
6810
|
except:
|
5690
6811
|
pass
|
5691
|
-
|
6812
|
+
|
5692
6813
|
if valid_profiles == len(profiles):
|
5693
|
-
table.add_row(
|
6814
|
+
table.add_row(
|
6815
|
+
"AWS Profiles", "[green]✅ All Valid[/green]", f"{valid_profiles}/{len(profiles)} profiles configured"
|
6816
|
+
)
|
5694
6817
|
elif valid_profiles > 0:
|
5695
6818
|
table.add_row("AWS Profiles", "[yellow]⚠️ Partial[/yellow]", f"{valid_profiles}/{len(profiles)} profiles valid")
|
5696
6819
|
else:
|
5697
6820
|
table.add_row("AWS Profiles", "[red]❌ None Valid[/red]", "Configure AWS profiles")
|
5698
|
-
|
6821
|
+
|
5699
6822
|
console.print(table)
|
5700
6823
|
|
5701
6824
|
|
@@ -5703,5 +6826,13 @@ def status(ctx):
|
|
5703
6826
|
# MAIN ENTRY POINT
|
5704
6827
|
# ============================================================================
|
5705
6828
|
|
5706
|
-
|
6829
|
+
|
6830
|
+
def cli_entry_point():
|
6831
|
+
"""Entry point with preprocessing for space-separated profiles."""
|
6832
|
+
# Preprocess command line to handle space-separated profiles
|
6833
|
+
preprocess_space_separated_profiles()
|
5707
6834
|
main()
|
6835
|
+
|
6836
|
+
|
6837
|
+
if __name__ == "__main__":
|
6838
|
+
cli_entry_point()
|