runbooks 0.7.9__py3-none-any.whl → 0.9.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (95) hide show
  1. runbooks/__init__.py +1 -1
  2. runbooks/cfat/README.md +12 -1
  3. runbooks/cfat/__init__.py +1 -1
  4. runbooks/cfat/assessment/runner.py +42 -34
  5. runbooks/cfat/models.py +1 -1
  6. runbooks/common/__init__.py +152 -0
  7. runbooks/common/accuracy_validator.py +1039 -0
  8. runbooks/common/context_logger.py +440 -0
  9. runbooks/common/cross_module_integration.py +594 -0
  10. runbooks/common/enhanced_exception_handler.py +1108 -0
  11. runbooks/common/enterprise_audit_integration.py +634 -0
  12. runbooks/common/mcp_integration.py +539 -0
  13. runbooks/common/performance_monitor.py +387 -0
  14. runbooks/common/profile_utils.py +216 -0
  15. runbooks/common/rich_utils.py +171 -0
  16. runbooks/feedback/user_feedback_collector.py +440 -0
  17. runbooks/finops/README.md +339 -451
  18. runbooks/finops/__init__.py +4 -21
  19. runbooks/finops/account_resolver.py +279 -0
  20. runbooks/finops/accuracy_cross_validator.py +638 -0
  21. runbooks/finops/aws_client.py +721 -36
  22. runbooks/finops/budget_integration.py +313 -0
  23. runbooks/finops/cli.py +59 -5
  24. runbooks/finops/cost_processor.py +211 -37
  25. runbooks/finops/dashboard_router.py +900 -0
  26. runbooks/finops/dashboard_runner.py +990 -232
  27. runbooks/finops/embedded_mcp_validator.py +288 -0
  28. runbooks/finops/enhanced_dashboard_runner.py +8 -7
  29. runbooks/finops/enhanced_progress.py +327 -0
  30. runbooks/finops/enhanced_trend_visualization.py +423 -0
  31. runbooks/finops/finops_dashboard.py +29 -1880
  32. runbooks/finops/helpers.py +509 -196
  33. runbooks/finops/iam_guidance.py +400 -0
  34. runbooks/finops/markdown_exporter.py +466 -0
  35. runbooks/finops/multi_dashboard.py +1502 -0
  36. runbooks/finops/optimizer.py +15 -15
  37. runbooks/finops/profile_processor.py +2 -2
  38. runbooks/finops/runbooks.inventory.organizations_discovery.log +0 -0
  39. runbooks/finops/runbooks.security.report_generator.log +0 -0
  40. runbooks/finops/runbooks.security.run_script.log +0 -0
  41. runbooks/finops/runbooks.security.security_export.log +0 -0
  42. runbooks/finops/service_mapping.py +195 -0
  43. runbooks/finops/single_dashboard.py +710 -0
  44. runbooks/finops/tests/test_reference_images_validation.py +1 -1
  45. runbooks/inventory/README.md +12 -1
  46. runbooks/inventory/core/collector.py +157 -29
  47. runbooks/inventory/list_ec2_instances.py +9 -6
  48. runbooks/inventory/list_ssm_parameters.py +10 -10
  49. runbooks/inventory/organizations_discovery.py +210 -164
  50. runbooks/inventory/rich_inventory_display.py +74 -107
  51. runbooks/inventory/run_on_multi_accounts.py +13 -13
  52. runbooks/main.py +740 -134
  53. runbooks/metrics/dora_metrics_engine.py +711 -17
  54. runbooks/monitoring/performance_monitor.py +433 -0
  55. runbooks/operate/README.md +394 -0
  56. runbooks/operate/base.py +215 -47
  57. runbooks/operate/ec2_operations.py +7 -5
  58. runbooks/operate/privatelink_operations.py +1 -1
  59. runbooks/operate/vpc_endpoints.py +1 -1
  60. runbooks/remediation/README.md +489 -13
  61. runbooks/remediation/commons.py +8 -4
  62. runbooks/security/ENTERPRISE_SECURITY_FRAMEWORK.md +506 -0
  63. runbooks/security/README.md +12 -1
  64. runbooks/security/__init__.py +164 -33
  65. runbooks/security/compliance_automation.py +12 -10
  66. runbooks/security/compliance_automation_engine.py +1021 -0
  67. runbooks/security/enterprise_security_framework.py +931 -0
  68. runbooks/security/enterprise_security_policies.json +293 -0
  69. runbooks/security/integration_test_enterprise_security.py +879 -0
  70. runbooks/security/module_security_integrator.py +641 -0
  71. runbooks/security/report_generator.py +1 -1
  72. runbooks/security/run_script.py +4 -8
  73. runbooks/security/security_baseline_tester.py +36 -49
  74. runbooks/security/security_export.py +99 -120
  75. runbooks/sre/README.md +472 -0
  76. runbooks/sre/__init__.py +33 -0
  77. runbooks/sre/mcp_reliability_engine.py +1049 -0
  78. runbooks/sre/performance_optimization_engine.py +1032 -0
  79. runbooks/sre/reliability_monitoring_framework.py +1011 -0
  80. runbooks/validation/__init__.py +2 -2
  81. runbooks/validation/benchmark.py +154 -149
  82. runbooks/validation/cli.py +159 -147
  83. runbooks/validation/mcp_validator.py +265 -236
  84. runbooks/vpc/README.md +478 -0
  85. runbooks/vpc/__init__.py +2 -2
  86. runbooks/vpc/manager_interface.py +366 -351
  87. runbooks/vpc/networking_wrapper.py +62 -33
  88. runbooks/vpc/rich_formatters.py +22 -8
  89. {runbooks-0.7.9.dist-info → runbooks-0.9.0.dist-info}/METADATA +136 -54
  90. {runbooks-0.7.9.dist-info → runbooks-0.9.0.dist-info}/RECORD +94 -55
  91. {runbooks-0.7.9.dist-info → runbooks-0.9.0.dist-info}/entry_points.txt +1 -1
  92. runbooks/finops/cross_validation.py +0 -375
  93. {runbooks-0.7.9.dist-info → runbooks-0.9.0.dist-info}/WHEEL +0 -0
  94. {runbooks-0.7.9.dist-info → runbooks-0.9.0.dist-info}/licenses/LICENSE +0 -0
  95. {runbooks-0.7.9.dist-info → runbooks-0.9.0.dist-info}/top_level.txt +0 -0
runbooks/main.py CHANGED
@@ -88,6 +88,12 @@ import boto3
88
88
 
89
89
  from runbooks import __version__
90
90
  from runbooks.cfat.runner import AssessmentRunner
91
+ from runbooks.common.performance_monitor import get_performance_benchmark
92
+ from runbooks.common.profile_utils import (
93
+ create_management_session,
94
+ create_operational_session,
95
+ get_profile_for_operation,
96
+ )
91
97
  from runbooks.common.rich_utils import console, create_table, print_banner, print_header, print_status
92
98
  from runbooks.config import load_config, save_config
93
99
  from runbooks.inventory.core.collector import InventoryCollector
@@ -102,10 +108,10 @@ console = Console()
102
108
 
103
109
  def get_account_id_for_context(profile: str = "default") -> str:
104
110
  """
105
- Resolve actual AWS account ID for context creation.
111
+ Resolve actual AWS account ID for context creation using enterprise profile management.
106
112
 
107
113
  This replaces hardcoded 'current' strings with actual account IDs
108
- to fix Pydantic validation failures.
114
+ to fix Pydantic validation failures. Uses the proven three-tier profile system.
109
115
 
110
116
  Args:
111
117
  profile: AWS profile name
@@ -114,11 +120,14 @@ def get_account_id_for_context(profile: str = "default") -> str:
114
120
  12-digit AWS account ID string
115
121
  """
116
122
  try:
117
- session = boto3.Session(profile_name=profile)
123
+ # Use enterprise profile management for session creation
124
+ resolved_profile = get_profile_for_operation("management", profile)
125
+ session = create_management_session(profile)
118
126
  sts = session.client("sts")
119
127
  response = sts.get_caller_identity()
120
128
  return response["Account"]
121
- except Exception:
129
+ except Exception as e:
130
+ console.log(f"[yellow]Warning: Could not resolve account ID, using fallback: {e}[/yellow]")
122
131
  # Fallback to a valid format if STS call fails
123
132
  return "123456789012" # Valid 12-digit format for validation
124
133
 
@@ -128,6 +137,51 @@ def get_account_id_for_context(profile: str = "default") -> str:
128
137
  # ============================================================================
129
138
 
130
139
 
140
+ def preprocess_space_separated_profiles():
141
+ """
142
+ Preprocess sys.argv to convert space-separated profiles to comma-separated format.
143
+
144
+ Converts: --profile prof1 prof2
145
+ To: --profile prof1,prof2
146
+
147
+ This enables backward compatibility with space-separated profile syntax
148
+ while using Click's standard option parsing.
149
+ """
150
+ import sys
151
+
152
+ # Only process if we haven't already processed
153
+ if hasattr(preprocess_space_separated_profiles, "_processed"):
154
+ return
155
+
156
+ new_argv = []
157
+ i = 0
158
+ while i < len(sys.argv):
159
+ if sys.argv[i] == "--profile" and i + 1 < len(sys.argv):
160
+ # Found --profile flag, collect all following non-flag arguments
161
+ profiles = []
162
+ new_argv.append("--profile")
163
+ i += 1
164
+
165
+ # Collect profiles until we hit another flag or end of arguments
166
+ while i < len(sys.argv) and not sys.argv[i].startswith("-"):
167
+ profiles.append(sys.argv[i])
168
+ i += 1
169
+
170
+ # Join profiles with commas and add as single argument
171
+ if profiles:
172
+ new_argv.append(",".join(profiles))
173
+
174
+ # Don't increment i here as we want to process the current argument
175
+ continue
176
+ else:
177
+ new_argv.append(sys.argv[i])
178
+ i += 1
179
+
180
+ # Replace sys.argv with processed version
181
+ sys.argv = new_argv
182
+ preprocess_space_separated_profiles._processed = True
183
+
184
+
131
185
  def common_aws_options(f):
132
186
  """
133
187
  Standard AWS connection and safety options for all commands.
@@ -142,17 +196,26 @@ def common_aws_options(f):
142
196
  Decorated function with AWS options
143
197
 
144
198
  Added Options:
145
- --profile: AWS profile name (default: 'default')
199
+ --profile: AWS profile name(s) - supports repeated flag syntax
146
200
  --region: AWS region identifier (default: 'ap-southeast-2')
147
201
  --dry-run: Safety flag to preview operations without execution
148
202
 
149
203
  Examples:
150
204
  ```bash
151
205
  runbooks inventory ec2 --profile production --region us-west-2 --dry-run
152
- runbooks operate s3 create-bucket --profile dev --region eu-west-1
206
+ runbooks finops --profile prof1 prof2 # Space-separated (SUPPORTED via preprocessing)
207
+ runbooks finops --profile prof1 --profile prof2 # Multiple flags (Click standard)
208
+ runbooks finops --profile prof1,prof2 # Comma-separated (Alternative)
153
209
  ```
154
210
  """
155
- f = click.option("--profile", default="default", help="AWS profile (default: 'default')")(f)
211
+ # FIXED: Space-separated profiles now supported via preprocessing in cli_entry_point()
212
+ # All three formats work: --profile prof1 prof2, --profile prof1 --profile prof2, --profile prof1,prof2
213
+ f = click.option(
214
+ "--profile",
215
+ multiple=True,
216
+ default=("default",), # Tuple default for multiple=True
217
+ help="AWS profile(s) - supports: --profile prof1 prof2 OR --profile prof1 --profile prof2 OR --profile prof1,prof2",
218
+ )(f)
156
219
  f = click.option("--region", default="ap-southeast-2", help="AWS region (default: 'ap-southeast-2')")(f)
157
220
  f = click.option("--dry-run", is_flag=True, help="Enable dry-run mode for safety")(f)
158
221
  return f
@@ -229,7 +292,7 @@ def common_filter_options(f):
229
292
  # ============================================================================
230
293
 
231
294
 
232
- @click.group(invoke_without_command=True)
295
+ @click.group()
233
296
  @click.version_option(version=__version__)
234
297
  @click.option("--debug", is_flag=True, help="Enable debug logging")
235
298
  @common_aws_options
@@ -3141,20 +3204,24 @@ def assess(ctx, checks, export_formats):
3141
3204
  from runbooks.security.security_baseline_tester import SecurityBaselineTester
3142
3205
 
3143
3206
  console.print(f"[blue]🔒 Starting Security Assessment[/blue]")
3144
- console.print(f"[dim]Profile: {ctx.obj['profile']} | Language: {ctx.obj['language']} | Export: {', '.join(export_formats)}[/dim]")
3207
+ console.print(
3208
+ f"[dim]Profile: {ctx.obj['profile']} | Language: {ctx.obj['language']} | Export: {', '.join(export_formats)}[/dim]"
3209
+ )
3145
3210
 
3146
3211
  # Initialize tester with export formats
3147
3212
  tester = SecurityBaselineTester(
3148
- profile=ctx.obj["profile"],
3149
- lang_code=ctx.obj["language"],
3213
+ profile=ctx.obj["profile"],
3214
+ lang_code=ctx.obj["language"],
3150
3215
  output_dir=ctx.obj.get("output_file"),
3151
- export_formats=list(export_formats)
3216
+ export_formats=list(export_formats),
3152
3217
  )
3153
3218
 
3154
3219
  # Run assessment with Rich CLI
3155
3220
  tester.run()
3156
3221
 
3157
- console.print(f"[green]✅ Security assessment completed with export formats: {', '.join(export_formats)}[/green]")
3222
+ console.print(
3223
+ f"[green]✅ Security assessment completed with export formats: {', '.join(export_formats)}[/green]"
3224
+ )
3158
3225
 
3159
3226
  except Exception as e:
3160
3227
  console.print(f"[red]❌ Security assessment failed: {e}[/red]")
@@ -4786,26 +4853,212 @@ def auto_fix(ctx, findings_file, severity, max_operations):
4786
4853
  raise click.ClickException(str(e))
4787
4854
 
4788
4855
 
4856
+ # ============================================================================
4857
+ # SRE COMMANDS (Site Reliability Engineering)
4858
+ # ============================================================================
4859
+
4860
+
4861
+ @click.command("sre")
4862
+ @click.option(
4863
+ "--action",
4864
+ type=click.Choice(["health", "recovery", "optimize", "suite"]),
4865
+ default="health",
4866
+ help="SRE action to perform",
4867
+ )
4868
+ @click.option("--config", type=click.Path(), help="MCP configuration file path")
4869
+ @click.option("--save-report", is_flag=True, help="Save detailed report to artifacts")
4870
+ @click.option("--continuous", is_flag=True, help="Run continuous monitoring")
4871
+ @click.pass_context
4872
+ def sre_reliability(ctx, action, config, save_report, continuous):
4873
+ """
4874
+ SRE Automation - Enterprise MCP Reliability & Infrastructure Monitoring
4875
+
4876
+ Provides comprehensive Site Reliability Engineering automation including:
4877
+ - MCP server health monitoring and diagnostics
4878
+ - Automated failure detection and recovery
4879
+ - Performance optimization and SLA validation
4880
+ - >99.9% uptime target achievement
4881
+
4882
+ Examples:
4883
+ runbooks sre --action health # Health check all MCP servers
4884
+ runbooks sre --action recovery # Automated recovery procedures
4885
+ runbooks sre --action optimize # Performance optimization
4886
+ runbooks sre --action suite # Complete reliability suite
4887
+ """
4888
+ import asyncio
4889
+
4890
+ from runbooks.common.rich_utils import console, print_error, print_info, print_success
4891
+ from runbooks.sre.mcp_reliability_engine import MCPReliabilityEngine, run_mcp_reliability_suite
4892
+
4893
+ try:
4894
+ print_info(f"🚀 Starting SRE automation - Action: {action}")
4895
+
4896
+ if action == "suite":
4897
+ # Run complete reliability suite
4898
+ results = asyncio.run(run_mcp_reliability_suite())
4899
+
4900
+ if results.get("overall_success", False):
4901
+ print_success("✅ SRE Reliability Suite completed successfully")
4902
+ console.print(f"🎯 Final Health: {results.get('final_health_percentage', 0):.1f}%")
4903
+ console.print(f"📈 Improvement: +{results.get('health_improvement', 0):.1f}%")
4904
+ else:
4905
+ print_error("❌ SRE Reliability Suite encountered issues")
4906
+ console.print("🔧 Review detailed logs for remediation guidance")
4907
+
4908
+ else:
4909
+ # Initialize reliability engine for specific actions
4910
+ from pathlib import Path
4911
+
4912
+ config_path = Path(config) if config else None
4913
+ reliability_engine = MCPReliabilityEngine(config_path=config_path)
4914
+
4915
+ if action == "health":
4916
+ # Health check only
4917
+ results = asyncio.run(reliability_engine.run_comprehensive_health_check())
4918
+
4919
+ if results["health_percentage"] >= 99.9:
4920
+ print_success(f"✅ All systems healthy: {results['health_percentage']:.1f}%")
4921
+ else:
4922
+ console.print(f"⚠️ Health: {results['health_percentage']:.1f}% - Review recommendations")
4923
+
4924
+ elif action == "recovery":
4925
+ # Automated recovery procedures
4926
+ results = asyncio.run(reliability_engine.implement_automated_recovery())
4927
+
4928
+ actions_taken = len(results.get("actions_taken", []))
4929
+ if actions_taken > 0:
4930
+ print_success(f"🔄 Recovery completed: {actions_taken} actions taken")
4931
+ else:
4932
+ print_info("✅ No recovery needed - all systems healthy")
4933
+
4934
+ elif action == "optimize":
4935
+ # Performance optimization
4936
+ results = asyncio.run(reliability_engine.run_performance_optimization())
4937
+
4938
+ optimizations = results.get("optimizations_applied", 0)
4939
+ if optimizations > 0:
4940
+ print_success(f"⚡ Optimization completed: {optimizations} improvements applied")
4941
+ else:
4942
+ print_info("✅ Performance already optimal")
4943
+
4944
+ # Save detailed report if requested
4945
+ if save_report and "results" in locals():
4946
+ import json
4947
+ from datetime import datetime
4948
+ from pathlib import Path
4949
+
4950
+ artifacts_dir = Path("./artifacts/sre")
4951
+ artifacts_dir.mkdir(parents=True, exist_ok=True)
4952
+
4953
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
4954
+ report_file = artifacts_dir / f"sre_report_{action}_{timestamp}.json"
4955
+
4956
+ with open(report_file, "w") as f:
4957
+ json.dump(results, f, indent=2, default=str)
4958
+
4959
+ print_success(f"📊 Detailed report saved: {report_file}")
4960
+
4961
+ # Continuous monitoring mode
4962
+ if continuous:
4963
+ print_info("🔄 Starting continuous monitoring mode...")
4964
+ console.print("Press Ctrl+C to stop monitoring")
4965
+
4966
+ try:
4967
+ while True:
4968
+ import time
4969
+
4970
+ time.sleep(60) # Check every minute
4971
+
4972
+ # Quick health check
4973
+ reliability_engine = MCPReliabilityEngine()
4974
+ health_summary = reliability_engine.connection_pool.get_health_summary()
4975
+
4976
+ console.print(
4977
+ f"🏥 Health: {health_summary['healthy_servers']}/{health_summary['total_servers']} servers healthy"
4978
+ )
4979
+
4980
+ except KeyboardInterrupt:
4981
+ print_info("🛑 Continuous monitoring stopped")
4982
+
4983
+ except Exception as e:
4984
+ print_error(f"❌ SRE automation failed: {str(e)}")
4985
+ logger.error(f"SRE reliability command failed: {str(e)}")
4986
+ raise click.ClickException(str(e))
4987
+
4988
+
4989
+ # Add SRE command to CLI
4990
+ main.add_command(sre_reliability)
4991
+
4992
+
4789
4993
  # ============================================================================
4790
4994
  # FINOPS COMMANDS (Cost & Usage Analytics)
4791
4995
  # ============================================================================
4792
4996
 
4793
4997
 
4794
- @main.group(invoke_without_command=True)
4998
+ def _parse_profiles_parameter(profiles_tuple):
4999
+ """Parse profiles parameter to handle multiple formats:
5000
+ - Multiple --profiles options: --profiles prof1 --profiles prof2
5001
+ - Comma-separated in single option: --profiles "prof1,prof2"
5002
+ - Space-separated in single option: --profiles "prof1 prof2"
5003
+ """
5004
+ if not profiles_tuple:
5005
+ return None
5006
+
5007
+ all_profiles = []
5008
+ for profile_item in profiles_tuple:
5009
+ # Handle comma or space separated profiles in a single item
5010
+ if "," in profile_item:
5011
+ all_profiles.extend([p.strip() for p in profile_item.split(",")])
5012
+ elif " " in profile_item:
5013
+ all_profiles.extend([p.strip() for p in profile_item.split()])
5014
+ else:
5015
+ all_profiles.append(profile_item.strip())
5016
+
5017
+ return [p for p in all_profiles if p] # Remove empty strings
5018
+
5019
+
5020
+ @main.command()
4795
5021
  @common_aws_options
4796
5022
  @click.option("--time-range", type=int, help="Time range in days (default: current month)")
4797
- @click.option(
4798
- "--report-type", multiple=True, type=click.Choice(["csv", "json", "pdf"]), default=("csv",), help="Report types"
4799
- )
5023
+ @click.option("--report-type", type=click.Choice(["csv", "json", "pdf", "markdown"]), help="Report type for export")
4800
5024
  @click.option("--report-name", help="Base name for report files (without extension)")
4801
5025
  @click.option("--dir", help="Directory to save report files (default: current directory)")
4802
- @click.option("--profiles", multiple=True, help="Specific AWS profiles to use")
5026
+ @click.option(
5027
+ "--profiles",
5028
+ multiple=True,
5029
+ help="AWS profiles: --profiles prof1 prof2 OR --profiles 'prof1,prof2' OR --profiles prof1 --profiles prof2",
5030
+ )
4803
5031
  @click.option("--regions", multiple=True, help="AWS regions to check")
4804
5032
  @click.option("--all", is_flag=True, help="Use all available AWS profiles")
4805
5033
  @click.option("--combine", is_flag=True, help="Combine profiles from the same AWS account")
4806
5034
  @click.option("--tag", multiple=True, help="Cost allocation tag to filter resources")
4807
5035
  @click.option("--trend", is_flag=True, help="Display trend report for past 6 months")
4808
5036
  @click.option("--audit", is_flag=True, help="Display audit report with cost anomalies and resource optimization")
5037
+ @click.option("--pdf", is_flag=True, help="Generate PDF report (convenience flag for --report-type pdf)")
5038
+ @click.option(
5039
+ "--export-markdown", "--markdown", is_flag=True, help="Generate Rich-styled markdown export with 10-column format"
5040
+ )
5041
+ @click.option(
5042
+ "--profile-display-length",
5043
+ type=int,
5044
+ help="Maximum characters for profile name display (optional, no truncation if not specified)",
5045
+ )
5046
+ @click.option(
5047
+ "--service-name-length",
5048
+ type=int,
5049
+ help="Maximum characters for service name display (optional, no truncation if not specified)",
5050
+ )
5051
+ @click.option(
5052
+ "--max-services-text",
5053
+ type=int,
5054
+ help="Maximum number of services in text summaries (optional, no limit if not specified)",
5055
+ )
5056
+ @click.option(
5057
+ "--high-cost-threshold", type=float, default=5000, help="High cost threshold for highlighting (default: 5000)"
5058
+ )
5059
+ @click.option(
5060
+ "--medium-cost-threshold", type=float, default=1000, help="Medium cost threshold for highlighting (default: 1000)"
5061
+ )
4809
5062
  @click.pass_context
4810
5063
  def finops(
4811
5064
  ctx,
@@ -4823,6 +5076,13 @@ def finops(
4823
5076
  tag,
4824
5077
  trend,
4825
5078
  audit,
5079
+ pdf,
5080
+ export_markdown,
5081
+ profile_display_length,
5082
+ service_name_length,
5083
+ max_services_text,
5084
+ high_cost_threshold,
5085
+ medium_cost_threshold,
4826
5086
  ):
4827
5087
  """
4828
5088
  AWS FinOps - Cost and usage analytics.
@@ -4836,50 +5096,112 @@ def finops(
4836
5096
  runbooks finops --time-range 30 --report-name monthly_costs
4837
5097
  """
4838
5098
 
4839
- if ctx.invoked_subcommand is None:
4840
- # Run default dashboard with all options
4841
- import argparse
5099
+ # Run finops dashboard with all options
5100
+ import argparse
5101
+
5102
+ # Import enhanced routing for service-per-row layout (Enterprise requirement)
5103
+ try:
5104
+ from runbooks.finops.dashboard_router import route_finops_request
4842
5105
 
5106
+ use_enhanced_routing = True
5107
+ click.echo(click.style("🚀 Using Enhanced Service-Focused Dashboard", fg="cyan", bold=True))
5108
+ except Exception as e:
4843
5109
  from runbooks.finops.dashboard_runner import run_dashboard
4844
5110
 
4845
- args = argparse.Namespace(
4846
- profile=profile,
4847
- region=region,
4848
- dry_run=dry_run,
4849
- time_range=time_range,
4850
- report_type=list(report_type),
4851
- report_name=report_name,
4852
- dir=dir,
4853
- profiles=list(profiles) if profiles else None,
4854
- regions=list(regions) if regions else None,
4855
- all=all,
4856
- combine=combine,
4857
- tag=list(tag) if tag else None,
4858
- trend=trend,
4859
- audit=audit,
4860
- config_file=None, # Not exposed in Click interface yet
4861
- )
4862
- return run_dashboard(args)
5111
+ use_enhanced_routing = False
5112
+ click.echo(click.style(f"⚠️ Enhanced routing failed ({str(e)[:50]}), using legacy mode", fg="yellow"))
5113
+
5114
+ # Handle report type logic - support --report-type, --pdf, and --export-markdown flags
5115
+ report_types = []
5116
+ if pdf:
5117
+ report_types = ["pdf"]
5118
+ elif export_markdown:
5119
+ report_types = ["markdown"]
5120
+ # Set default filename if none provided
5121
+ if not report_name:
5122
+ report_name = "finops_markdown_export"
5123
+ # Ensure exports directory exists
5124
+ if not dir:
5125
+ dir = "./exports"
5126
+ import os
5127
+
5128
+ os.makedirs(dir, exist_ok=True)
5129
+ click.echo(
5130
+ click.style("📝 Rich-styled markdown export activated - 10-column format for MkDocs", fg="cyan", bold=True)
5131
+ )
5132
+ elif report_type:
5133
+ report_types = [report_type]
5134
+ elif report_name: # If report name provided but no type, default to csv
5135
+ report_types = ["csv"]
5136
+
5137
+ # Parse profiles from updated --profile parameter (now supports multiple=True)
5138
+ parsed_profiles = None
5139
+ if profile:
5140
+ # Handle the new tuple/list format from click.option(multiple=True)
5141
+ if isinstance(profile, (tuple, list)):
5142
+ # Flatten and handle comma-separated values within each element
5143
+ all_profiles = []
5144
+ for profile_item in profile:
5145
+ if profile_item and "," in profile_item:
5146
+ all_profiles.extend([p.strip() for p in profile_item.split(",") if p.strip()])
5147
+ elif profile_item and profile_item.strip():
5148
+ all_profiles.append(profile_item.strip())
5149
+
5150
+ # Filter out empty and "default" profiles, keep actual profiles
5151
+ parsed_profiles = [p for p in all_profiles if p and p != "default"]
5152
+ # If no valid profiles after filtering, use default
5153
+ if not parsed_profiles:
5154
+ parsed_profiles = ["default"]
5155
+ else:
5156
+ # Legacy single string handling (backward compatibility)
5157
+ if "," in profile:
5158
+ parsed_profiles = [p.strip() for p in profile.split(",") if p.strip()]
5159
+ else:
5160
+ parsed_profiles = [profile.strip()]
5161
+
5162
+ # Combine with --profiles parameter if both are provided
5163
+ if profiles:
5164
+ legacy_profiles = _parse_profiles_parameter(profiles)
5165
+ if parsed_profiles:
5166
+ parsed_profiles.extend(legacy_profiles)
5167
+ else:
5168
+ parsed_profiles = legacy_profiles
5169
+
5170
+ # CRITICAL FIX: Ensure single profile is correctly handled for downstream processing
5171
+ # When multiple profiles are provided via --profile, use the first one as primary profile
5172
+ primary_profile = (
5173
+ parsed_profiles[0] if parsed_profiles else (profile[0] if isinstance(profile, tuple) and profile else profile)
5174
+ )
5175
+
5176
+ args = argparse.Namespace(
5177
+ profile=primary_profile, # Primary profile for single-profile operations
5178
+ region=region,
5179
+ dry_run=dry_run,
5180
+ time_range=time_range,
5181
+ report_type=report_types,
5182
+ report_name=report_name,
5183
+ dir=dir,
5184
+ profiles=parsed_profiles, # Use parsed profiles from both --profile and --profiles
5185
+ regions=list(regions) if regions else None,
5186
+ all=all,
5187
+ combine=combine,
5188
+ tag=list(tag) if tag else None,
5189
+ trend=trend,
5190
+ audit=audit,
5191
+ export_markdown=export_markdown, # Add export_markdown parameter
5192
+ config_file=None, # Not exposed in Click interface yet
5193
+ # Display configuration parameters
5194
+ profile_display_length=profile_display_length,
5195
+ service_name_length=service_name_length,
5196
+ max_services_text=max_services_text,
5197
+ high_cost_threshold=high_cost_threshold,
5198
+ medium_cost_threshold=medium_cost_threshold,
5199
+ )
5200
+ # Route to appropriate dashboard implementation
5201
+ if use_enhanced_routing:
5202
+ return route_finops_request(args)
4863
5203
  else:
4864
- # Pass context to subcommands
4865
- ctx.obj.update(
4866
- {
4867
- "profile": profile,
4868
- "region": region,
4869
- "dry_run": dry_run,
4870
- "time_range": time_range,
4871
- "report_type": list(report_type),
4872
- "report_name": report_name,
4873
- "dir": dir,
4874
- "profiles": list(profiles) if profiles else None,
4875
- "regions": list(regions) if regions else None,
4876
- "all": all,
4877
- "combine": combine,
4878
- "tag": list(tag) if tag else None,
4879
- "trend": trend,
4880
- "audit": audit,
4881
- }
4882
- )
5204
+ return run_dashboard(args)
4883
5205
 
4884
5206
 
4885
5207
  # ============================================================================
@@ -5300,6 +5622,273 @@ def scan(ctx, profile, region, dry_run, resources):
5300
5622
  sys.exit(1)
5301
5623
 
5302
5624
 
5625
+ # ============================================================================
5626
+ # DORA METRICS COMMANDS (Enterprise SRE Monitoring)
5627
+ # ============================================================================
5628
+
5629
+
5630
+ @main.group()
5631
+ @click.pass_context
5632
+ def dora(ctx):
5633
+ """
5634
+ 📊 DORA metrics and SRE performance monitoring
5635
+
5636
+ Enterprise DORA metrics collection, analysis, and reporting for Site Reliability Engineering.
5637
+ Tracks Lead Time, Deployment Frequency, Mean Time to Recovery (MTTR), and Change Failure Rate.
5638
+
5639
+ Features:
5640
+ - Real-time DORA metrics calculation
5641
+ - SLA compliance monitoring
5642
+ - Automated incident detection
5643
+ - Enterprise dashboard generation
5644
+ - CloudWatch/Datadog integration
5645
+
5646
+ Examples:
5647
+ runbooks dora report # Generate comprehensive DORA report
5648
+ runbooks dora dashboard # Create SRE dashboard data
5649
+ runbooks dora track-deployment # Track git deployment
5650
+ runbooks dora simulate # Run demonstration simulation
5651
+ """
5652
+ pass
5653
+
5654
+
5655
+ @dora.command()
5656
+ @click.option("--days", default=30, help="Number of days to analyze (default: 30)")
5657
+ @click.option("--output-dir", default="./artifacts/sre-reports", help="Output directory for reports")
5658
+ @click.option("--format", type=click.Choice(["json", "console"]), default="console", help="Output format")
5659
+ @click.pass_context
5660
+ def report(ctx, days, output_dir, format):
5661
+ """
5662
+ 📊 Generate comprehensive DORA metrics report
5663
+
5664
+ Creates enterprise-grade DORA metrics analysis including Lead Time,
5665
+ Deployment Frequency, MTTR, Change Failure Rate, and SLA compliance.
5666
+
5667
+ Examples:
5668
+ runbooks dora report --days 7 --format json
5669
+ runbooks dora report --days 30 --output-dir ./reports
5670
+ """
5671
+ console.print("[cyan]📊 DORA Metrics Enterprise Report[/cyan]")
5672
+
5673
+ try:
5674
+ from runbooks.metrics.dora_metrics_engine import DORAMetricsEngine
5675
+
5676
+ # Initialize DORA metrics engine
5677
+ engine = DORAMetricsEngine()
5678
+
5679
+ # Generate comprehensive report
5680
+ console.print(f"[dim]Analyzing last {days} days...[/dim]")
5681
+ report_data = engine.generate_comprehensive_report(days_back=days)
5682
+
5683
+ if format == "json":
5684
+ import json
5685
+
5686
+ output = json.dumps(report_data, indent=2, default=str)
5687
+ console.print(output)
5688
+ else:
5689
+ # Display formatted console output
5690
+ console.print(f"\n🎯 [bold]DORA Metrics Summary ({days} days)[/bold]")
5691
+
5692
+ # Performance Analysis
5693
+ perf = report_data["performance_analysis"]
5694
+ console.print(
5695
+ f"Overall Performance: [bold]{perf['overall_performance_percentage']:.1f}%[/bold] ({perf['performance_grade']})"
5696
+ )
5697
+ console.print(f"SLA Compliance: [bold]{perf['sla_compliance_score']:.1f}%[/bold]")
5698
+
5699
+ # DORA Metrics
5700
+ dora_metrics = report_data["dora_metrics"]
5701
+ console.print(f"\n📈 [bold]Core DORA Metrics[/bold]")
5702
+ console.print(
5703
+ f"• Lead Time: [cyan]{dora_metrics['lead_time']['value']:.2f}[/cyan] {dora_metrics['lead_time']['unit']}"
5704
+ )
5705
+ console.print(
5706
+ f"• Deploy Frequency: [cyan]{dora_metrics['deployment_frequency']['value']:.2f}[/cyan] {dora_metrics['deployment_frequency']['unit']}"
5707
+ )
5708
+ console.print(f"• Change Failure Rate: [cyan]{dora_metrics['change_failure_rate']['value']:.2%}[/cyan]")
5709
+ console.print(f"• MTTR: [cyan]{dora_metrics['mttr']['value']:.2f}[/cyan] {dora_metrics['mttr']['unit']}")
5710
+
5711
+ # Recommendations
5712
+ recommendations = report_data["recommendations"]
5713
+ if recommendations:
5714
+ console.print(f"\n💡 [bold]SRE Recommendations[/bold]")
5715
+ for i, rec in enumerate(recommendations[:3], 1): # Show top 3
5716
+ console.print(f"{i}. {rec}")
5717
+
5718
+ # Raw Data Summary
5719
+ raw_data = report_data["raw_data"]
5720
+ console.print(f"\n📋 [bold]Data Summary[/bold]")
5721
+ console.print(f"• Deployments: {raw_data['deployments_count']}")
5722
+ console.print(f"• Incidents: {raw_data['incidents_count']}")
5723
+ console.print(f"• Automation Rate: {raw_data['automation_rate']:.1f}%")
5724
+
5725
+ console.print(f"\n[green]✅ DORA report generated for {days} days[/green]")
5726
+ console.print(f"[dim]💾 Report saved to: {output_dir}/[/dim]")
5727
+
5728
+ except Exception as e:
5729
+ console.print(f"[red]❌ Error generating DORA report: {e}[/red]")
5730
+ logger.error(f"DORA report failed: {e}")
5731
+ sys.exit(1)
5732
+
5733
+
5734
+ @dora.command()
5735
+ @click.option("--days", default=30, help="Number of days to analyze for dashboard")
5736
+ @click.option("--output-file", help="Output file for dashboard JSON data")
5737
+ @click.option("--cloudwatch", is_flag=True, help="Export metrics to CloudWatch")
5738
+ @click.pass_context
5739
+ def dashboard(ctx, days, output_file, cloudwatch):
5740
+ """
5741
+ 📊 Generate SRE dashboard data for visualization tools
5742
+
5743
+ Creates dashboard-ready data for SRE tools like Datadog, Grafana,
5744
+ or CloudWatch with time series data and KPI summaries.
5745
+
5746
+ Examples:
5747
+ runbooks dora dashboard --days 7 --cloudwatch
5748
+ runbooks dora dashboard --output-file dashboard.json
5749
+ """
5750
+ console.print("[cyan]📊 Generating SRE Dashboard Data[/cyan]")
5751
+
5752
+ try:
5753
+ from runbooks.metrics.dora_metrics_engine import DORAMetricsEngine
5754
+
5755
+ engine = DORAMetricsEngine()
5756
+
5757
+ # Generate dashboard data
5758
+ console.print(f"[dim]Creating dashboard for last {days} days...[/dim]")
5759
+ dashboard_data = engine.generate_sre_dashboard(days_back=days)
5760
+
5761
+ # Display KPI summary
5762
+ kpis = dashboard_data["kpi_summary"]
5763
+ console.print(f"\n🎯 [bold]Key Performance Indicators[/bold]")
5764
+ console.print(f"• Performance Score: [cyan]{kpis['overall_performance_score']:.1f}%[/cyan]")
5765
+ console.print(f"• SLA Compliance: [cyan]{kpis['sla_compliance_score']:.1f}%[/cyan]")
5766
+ console.print(f"• DORA Health: [cyan]{kpis['dora_metrics_health']:.1f}%[/cyan]")
5767
+ console.print(f"• Active Incidents: [cyan]{kpis['active_incidents']}[/cyan]")
5768
+ console.print(f"• Automation: [cyan]{kpis['automation_percentage']:.1f}%[/cyan]")
5769
+
5770
+ # Export to file if requested
5771
+ if output_file:
5772
+ import json
5773
+
5774
+ with open(output_file, "w") as f:
5775
+ json.dump(dashboard_data, f, indent=2, default=str)
5776
+ console.print(f"\n[green]💾 Dashboard data exported: {output_file}[/green]")
5777
+
5778
+ # Export to CloudWatch if requested
5779
+ if cloudwatch:
5780
+ console.print(f"\n[dim]Exporting to CloudWatch...[/dim]")
5781
+ success = engine.export_cloudwatch_metrics()
5782
+ if success:
5783
+ console.print("[green]✅ Metrics published to CloudWatch[/green]")
5784
+ else:
5785
+ console.print("[yellow]⚠️ CloudWatch export failed (check AWS permissions)[/yellow]")
5786
+
5787
+ console.print(f"\n[green]✅ SRE dashboard data generated[/green]")
5788
+
5789
+ except Exception as e:
5790
+ console.print(f"[red]❌ Error generating dashboard: {e}[/red]")
5791
+ logger.error(f"DORA dashboard failed: {e}")
5792
+ sys.exit(1)
5793
+
5794
+
5795
+ @dora.command()
5796
+ @click.option("--commit-sha", required=True, help="Git commit SHA")
5797
+ @click.option("--branch", default="main", help="Git branch name")
5798
+ @click.option("--author", help="Commit author")
5799
+ @click.option("--message", help="Commit message")
5800
+ @click.pass_context
5801
+ def track_deployment(ctx, commit_sha, branch, author, message):
5802
+ """
5803
+ 🔗 Track deployment from git operations for DORA metrics
5804
+
5805
+ Automatically records deployment events for DORA metrics collection,
5806
+ linking git commits to production deployments for lead time calculation.
5807
+
5808
+ Examples:
5809
+ runbooks dora track-deployment --commit-sha abc123 --branch main --author developer
5810
+ runbooks dora track-deployment --commit-sha def456 --message "Feature update"
5811
+ """
5812
+ console.print("[cyan]🔗 Tracking Git Deployment for DORA Metrics[/cyan]")
5813
+
5814
+ try:
5815
+ from runbooks.metrics.dora_metrics_engine import DORAMetricsEngine
5816
+
5817
+ engine = DORAMetricsEngine()
5818
+
5819
+ # Track git deployment
5820
+ deployment = engine.track_git_deployment(
5821
+ commit_sha=commit_sha, branch=branch, author=author or "unknown", message=message or ""
5822
+ )
5823
+
5824
+ console.print(f"\n✅ [bold]Deployment Tracked[/bold]")
5825
+ console.print(f"• Deployment ID: [cyan]{deployment.deployment_id}[/cyan]")
5826
+ console.print(f"• Environment: [cyan]{deployment.environment}[/cyan]")
5827
+ console.print(f"• Version: [cyan]{deployment.version}[/cyan]")
5828
+ console.print(f"• Branch: [cyan]{branch}[/cyan]")
5829
+ console.print(f"• Author: [cyan]{author or 'unknown'}[/cyan]")
5830
+
5831
+ console.print(f"\n[green]🎯 Deployment automatically tracked for DORA lead time calculation[/green]")
5832
+
5833
+ except Exception as e:
5834
+ console.print(f"[red]❌ Error tracking deployment: {e}[/red]")
5835
+ logger.error(f"DORA deployment tracking failed: {e}")
5836
+ sys.exit(1)
5837
+
5838
+
5839
+ @dora.command()
5840
+ @click.option("--duration", default=5, help="Simulation duration in minutes")
5841
+ @click.option("--show-report", is_flag=True, help="Display comprehensive report after simulation")
5842
+ @click.pass_context
5843
+ def simulate(ctx, duration, show_report):
5844
+ """
5845
+ 🧪 Run DORA metrics simulation for demonstration
5846
+
5847
+ Creates simulated deployment and incident events to demonstrate
5848
+ DORA metrics calculation and reporting capabilities.
5849
+
5850
+ Examples:
5851
+ runbooks dora simulate --duration 2 --show-report
5852
+ runbooks dora simulate --duration 10
5853
+ """
5854
+ console.print("[cyan]🧪 Running DORA Metrics Simulation[/cyan]")
5855
+
5856
+ try:
5857
+ import asyncio
5858
+
5859
+ from runbooks.metrics.dora_metrics_engine import simulate_dora_metrics_collection
5860
+
5861
+ # Run simulation
5862
+ console.print(f"[dim]Simulating {duration} minutes of operations...[/dim]")
5863
+
5864
+ async def run_simulation():
5865
+ return await simulate_dora_metrics_collection(duration_minutes=duration)
5866
+
5867
+ report = asyncio.run(run_simulation())
5868
+
5869
+ # Display results
5870
+ perf = report["performance_analysis"]
5871
+ console.print(f"\n🎯 [bold]Simulation Results[/bold]")
5872
+ console.print(f"• Performance Grade: [cyan]{perf['performance_grade']}[/cyan]")
5873
+ console.print(f"• Targets Met: [cyan]{sum(perf['targets_met'].values())}/{len(perf['targets_met'])}[/cyan]")
5874
+ console.print(f"• Overall Score: [cyan]{perf['overall_performance_percentage']:.1f}%[/cyan]")
5875
+
5876
+ if show_report:
5877
+ # Display comprehensive report
5878
+ console.print(f"\n📊 [bold]Detailed DORA Metrics[/bold]")
5879
+ for metric_name, metric_data in report["dora_metrics"].items():
5880
+ console.print(
5881
+ f"• {metric_name.replace('_', ' ').title()}: [cyan]{metric_data['value']:.2f}[/cyan] {metric_data['unit']}"
5882
+ )
5883
+
5884
+ console.print(f"\n[green]✅ DORA metrics simulation completed successfully[/green]")
5885
+
5886
+ except Exception as e:
5887
+ console.print(f"[red]❌ Error running simulation: {e}[/red]")
5888
+ logger.error(f"DORA simulation failed: {e}")
5889
+ sys.exit(1)
5890
+
5891
+
5303
5892
  # ============================================================================
5304
5893
  # VPC NETWORKING COMMANDS (New Wrapper Architecture)
5305
5894
  # ============================================================================
@@ -5448,15 +6037,16 @@ def optimize(ctx, profile, region, dry_run, billing_profile, target_reduction, o
5448
6037
  # MCP VALIDATION FRAMEWORK
5449
6038
  # ============================================================================
5450
6039
 
6040
+
5451
6041
  @main.group()
5452
- @click.pass_context
6042
+ @click.pass_context
5453
6043
  def validate(ctx):
5454
6044
  """
5455
6045
  🔍 MCP validation framework with 99.5% accuracy target
5456
-
6046
+
5457
6047
  Comprehensive validation between runbooks outputs and MCP server results
5458
6048
  for enterprise AWS operations with real-time performance monitoring.
5459
-
6049
+
5460
6050
  Examples:
5461
6051
  runbooks validate all # Full validation suite
5462
6052
  runbooks validate costs # Cost Explorer validation
@@ -5465,34 +6055,33 @@ def validate(ctx):
5465
6055
  """
5466
6056
  pass
5467
6057
 
6058
+
5468
6059
  @validate.command()
5469
6060
  @common_aws_options
5470
- @click.option('--tolerance', default=5.0, help='Tolerance percentage for variance detection')
5471
- @click.option('--performance-target', default=30.0, help='Performance target in seconds')
5472
- @click.option('--save-report', is_flag=True, help='Save detailed report to artifacts')
6061
+ @click.option("--tolerance", default=5.0, help="Tolerance percentage for variance detection")
6062
+ @click.option("--performance-target", default=30.0, help="Performance target in seconds")
6063
+ @click.option("--save-report", is_flag=True, help="Save detailed report to artifacts")
5473
6064
  @click.pass_context
5474
6065
  def all(ctx, profile, region, dry_run, tolerance, performance_target, save_report):
5475
6066
  """Run comprehensive MCP validation across all critical operations."""
5476
-
6067
+
5477
6068
  console.print("[bold blue]🔍 Enterprise MCP Validation Framework[/bold blue]")
5478
6069
  console.print(f"Target Accuracy: 99.5% | Tolerance: ±{tolerance}% | Performance: <{performance_target}s")
5479
-
6070
+
5480
6071
  try:
5481
6072
  import asyncio
6073
+
5482
6074
  from runbooks.validation.mcp_validator import MCPValidator
5483
-
6075
+
5484
6076
  # Initialize validator
5485
- validator = MCPValidator(
5486
- tolerance_percentage=tolerance,
5487
- performance_target_seconds=performance_target
5488
- )
5489
-
6077
+ validator = MCPValidator(tolerance_percentage=tolerance, performance_target_seconds=performance_target)
6078
+
5490
6079
  # Run validation
5491
6080
  report = asyncio.run(validator.validate_all_operations())
5492
-
5493
- # Display results
6081
+
6082
+ # Display results
5494
6083
  validator.display_validation_report(report)
5495
-
6084
+
5496
6085
  # Exit code based on results
5497
6086
  if report.overall_accuracy >= 99.5:
5498
6087
  console.print("[bold green]✅ Validation PASSED - Deploy with confidence[/bold green]")
@@ -5503,7 +6092,7 @@ def all(ctx, profile, region, dry_run, tolerance, performance_target, save_repor
5503
6092
  else:
5504
6093
  console.print("[bold red]❌ Validation FAILED - Address issues before deployment[/bold red]")
5505
6094
  sys.exit(2)
5506
-
6095
+
5507
6096
  except ImportError as e:
5508
6097
  console.print(f"[red]❌ MCP validation dependencies not available: {e}[/red]")
5509
6098
  console.print("[yellow]Install with: pip install runbooks[mcp][/yellow]")
@@ -5512,39 +6101,41 @@ def all(ctx, profile, region, dry_run, tolerance, performance_target, save_repor
5512
6101
  console.print(f"[red]❌ Validation error: {e}[/red]")
5513
6102
  sys.exit(3)
5514
6103
 
6104
+
5515
6105
  @validate.command()
5516
6106
  @common_aws_options
5517
- @click.option('--tolerance', default=5.0, help='Cost variance tolerance percentage')
6107
+ @click.option("--tolerance", default=5.0, help="Cost variance tolerance percentage")
5518
6108
  @click.pass_context
5519
6109
  def costs(ctx, profile, region, dry_run, tolerance):
5520
6110
  """Validate Cost Explorer data accuracy."""
5521
-
6111
+
5522
6112
  console.print("[bold cyan]💰 Cost Explorer Validation[/bold cyan]")
5523
-
6113
+
5524
6114
  try:
5525
6115
  import asyncio
6116
+
5526
6117
  from runbooks.validation.mcp_validator import MCPValidator
5527
-
6118
+
5528
6119
  validator = MCPValidator(tolerance_percentage=tolerance)
5529
6120
  result = asyncio.run(validator.validate_cost_explorer())
5530
-
6121
+
5531
6122
  # Display result
5532
- from rich.table import Table
5533
6123
  from rich import box
5534
-
6124
+ from rich.table import Table
6125
+
5535
6126
  table = Table(title="Cost Validation Result", box=box.ROUNDED)
5536
6127
  table.add_column("Metric", style="cyan")
5537
6128
  table.add_column("Value", style="bold")
5538
-
6129
+
5539
6130
  status_color = "green" if result.status.value == "PASSED" else "red"
5540
6131
  table.add_row("Status", f"[{status_color}]{result.status.value}[/{status_color}]")
5541
6132
  table.add_row("Accuracy", f"{result.accuracy_percentage:.2f}%")
5542
6133
  table.add_row("Execution Time", f"{result.execution_time:.2f}s")
5543
-
6134
+
5544
6135
  console.print(table)
5545
-
6136
+
5546
6137
  sys.exit(0 if result.status.value == "PASSED" else 1)
5547
-
6138
+
5548
6139
  except ImportError as e:
5549
6140
  console.print(f"[red]❌ MCP validation not available: {e}[/red]")
5550
6141
  sys.exit(3)
@@ -5552,43 +6143,45 @@ def costs(ctx, profile, region, dry_run, tolerance):
5552
6143
  console.print(f"[red]❌ Cost validation error: {e}[/red]")
5553
6144
  sys.exit(3)
5554
6145
 
6146
+
5555
6147
  @validate.command()
5556
6148
  @common_aws_options
5557
6149
  @click.pass_context
5558
6150
  def organizations(ctx, profile, region, dry_run):
5559
6151
  """Validate Organizations API data accuracy."""
5560
-
6152
+
5561
6153
  console.print("[bold cyan]🏢 Organizations Validation[/bold cyan]")
5562
-
6154
+
5563
6155
  try:
5564
6156
  import asyncio
6157
+
5565
6158
  from runbooks.validation.mcp_validator import MCPValidator
5566
-
6159
+
5567
6160
  validator = MCPValidator()
5568
6161
  result = asyncio.run(validator.validate_organizations_data())
5569
-
6162
+
5570
6163
  # Display result
5571
- from rich.table import Table
5572
6164
  from rich import box
5573
-
6165
+ from rich.table import Table
6166
+
5574
6167
  table = Table(title="Organizations Validation Result", box=box.ROUNDED)
5575
6168
  table.add_column("Metric", style="cyan")
5576
6169
  table.add_column("Value", style="bold")
5577
-
6170
+
5578
6171
  status_color = "green" if result.status.value == "PASSED" else "red"
5579
6172
  table.add_row("Status", f"[{status_color}]{result.status.value}[/{status_color}]")
5580
6173
  table.add_row("Accuracy", f"{result.accuracy_percentage:.2f}%")
5581
6174
  table.add_row("Execution Time", f"{result.execution_time:.2f}s")
5582
-
6175
+
5583
6176
  if result.variance_details:
5584
- details = result.variance_details.get('details', {})
5585
- table.add_row("Runbooks Accounts", str(details.get('runbooks_accounts', 'N/A')))
5586
- table.add_row("MCP Accounts", str(details.get('mcp_accounts', 'N/A')))
5587
-
6177
+ details = result.variance_details.get("details", {})
6178
+ table.add_row("Runbooks Accounts", str(details.get("runbooks_accounts", "N/A")))
6179
+ table.add_row("MCP Accounts", str(details.get("mcp_accounts", "N/A")))
6180
+
5588
6181
  console.print(table)
5589
-
6182
+
5590
6183
  sys.exit(0 if result.status.value == "PASSED" else 1)
5591
-
6184
+
5592
6185
  except ImportError as e:
5593
6186
  console.print(f"[red]❌ MCP validation not available: {e}[/red]")
5594
6187
  sys.exit(3)
@@ -5596,29 +6189,28 @@ def organizations(ctx, profile, region, dry_run):
5596
6189
  console.print(f"[red]❌ Organizations validation error: {e}[/red]")
5597
6190
  sys.exit(3)
5598
6191
 
6192
+
5599
6193
  @validate.command()
5600
- @click.option('--target-accuracy', default=99.5, help='Target accuracy percentage')
5601
- @click.option('--iterations', default=5, help='Number of benchmark iterations')
5602
- @click.option('--performance-target', default=30.0, help='Performance target in seconds')
6194
+ @click.option("--target-accuracy", default=99.5, help="Target accuracy percentage")
6195
+ @click.option("--iterations", default=5, help="Number of benchmark iterations")
6196
+ @click.option("--performance-target", default=30.0, help="Performance target in seconds")
5603
6197
  @click.pass_context
5604
6198
  def benchmark(ctx, target_accuracy, iterations, performance_target):
5605
6199
  """Run performance benchmark for MCP validation framework."""
5606
-
6200
+
5607
6201
  console.print("[bold magenta]🏋️ MCP Validation Benchmark[/bold magenta]")
5608
6202
  console.print(f"Target: {target_accuracy}% | Iterations: {iterations} | Performance: <{performance_target}s")
5609
-
6203
+
5610
6204
  try:
5611
6205
  import asyncio
6206
+
5612
6207
  from runbooks.validation.benchmark import MCPBenchmarkRunner
5613
-
5614
- runner = MCPBenchmarkRunner(
5615
- target_accuracy=target_accuracy,
5616
- performance_target=performance_target
5617
- )
5618
-
6208
+
6209
+ runner = MCPBenchmarkRunner(target_accuracy=target_accuracy, performance_target=performance_target)
6210
+
5619
6211
  suite = asyncio.run(runner.run_benchmark(iterations))
5620
6212
  runner.display_benchmark_results(suite)
5621
-
6213
+
5622
6214
  # Exit based on benchmark results
5623
6215
  overall_status = runner._assess_benchmark_results(suite)
5624
6216
  if overall_status == "PASSED":
@@ -5627,7 +6219,7 @@ def benchmark(ctx, target_accuracy, iterations, performance_target):
5627
6219
  sys.exit(1)
5628
6220
  else:
5629
6221
  sys.exit(2)
5630
-
6222
+
5631
6223
  except ImportError as e:
5632
6224
  console.print(f"[red]❌ MCP benchmark not available: {e}[/red]")
5633
6225
  sys.exit(3)
@@ -5635,67 +6227,73 @@ def benchmark(ctx, target_accuracy, iterations, performance_target):
5635
6227
  console.print(f"[red]❌ Benchmark error: {e}[/red]")
5636
6228
  sys.exit(3)
5637
6229
 
6230
+
5638
6231
  @validate.command()
5639
6232
  @click.pass_context
5640
6233
  def status(ctx):
5641
6234
  """Show MCP validation framework status."""
5642
-
6235
+
5643
6236
  console.print("[bold cyan]📊 MCP Validation Framework Status[/bold cyan]")
5644
-
5645
- from rich.table import Table
6237
+
5646
6238
  from rich import box
5647
-
6239
+ from rich.table import Table
6240
+
5648
6241
  table = Table(title="Framework Status", box=box.ROUNDED)
5649
6242
  table.add_column("Component", style="cyan")
5650
- table.add_column("Status", style="bold")
6243
+ table.add_column("Status", style="bold")
5651
6244
  table.add_column("Details")
5652
-
6245
+
5653
6246
  # Check MCP integration
5654
6247
  try:
5655
6248
  from notebooks.mcp_integration import MCPIntegrationManager
6249
+
5656
6250
  table.add_row("MCP Integration", "[green]✅ Available[/green]", "Ready for validation")
5657
6251
  except ImportError:
5658
6252
  table.add_row("MCP Integration", "[red]❌ Unavailable[/red]", "Install MCP dependencies")
5659
-
6253
+
5660
6254
  # Check validation framework
5661
6255
  try:
5662
6256
  from runbooks.validation.mcp_validator import MCPValidator
6257
+
5663
6258
  table.add_row("Validation Framework", "[green]✅ Ready[/green]", "All components loaded")
5664
6259
  except ImportError as e:
5665
6260
  table.add_row("Validation Framework", "[red]❌ Missing[/red]", str(e))
5666
-
5667
- # Check benchmark suite
6261
+
6262
+ # Check benchmark suite
5668
6263
  try:
5669
6264
  from runbooks.validation.benchmark import MCPBenchmarkRunner
6265
+
5670
6266
  table.add_row("Benchmark Suite", "[green]✅ Ready[/green]", "Performance testing available")
5671
6267
  except ImportError as e:
5672
6268
  table.add_row("Benchmark Suite", "[red]❌ Missing[/red]", str(e))
5673
-
6269
+
5674
6270
  # Check AWS profiles
5675
6271
  profiles = [
5676
- 'ams-admin-Billing-ReadOnlyAccess-909135376185',
5677
- 'ams-admin-ReadOnlyAccess-909135376185',
5678
- 'ams-centralised-ops-ReadOnlyAccess-335083429030',
5679
- 'ams-shared-services-non-prod-ReadOnlyAccess-499201730520'
6272
+ "ams-admin-Billing-ReadOnlyAccess-909135376185",
6273
+ "ams-admin-ReadOnlyAccess-909135376185",
6274
+ "ams-centralised-ops-ReadOnlyAccess-335083429030",
6275
+ "ams-shared-services-non-prod-ReadOnlyAccess-499201730520",
5680
6276
  ]
5681
-
6277
+
5682
6278
  valid_profiles = 0
5683
6279
  for profile_name in profiles:
5684
6280
  try:
5685
6281
  session = boto3.Session(profile_name=profile_name)
5686
- sts = session.client('sts')
6282
+ sts = session.client("sts")
5687
6283
  identity = sts.get_caller_identity()
5688
6284
  valid_profiles += 1
5689
6285
  except:
5690
6286
  pass
5691
-
6287
+
5692
6288
  if valid_profiles == len(profiles):
5693
- table.add_row("AWS Profiles", "[green]✅ All Valid[/green]", f"{valid_profiles}/{len(profiles)} profiles configured")
6289
+ table.add_row(
6290
+ "AWS Profiles", "[green]✅ All Valid[/green]", f"{valid_profiles}/{len(profiles)} profiles configured"
6291
+ )
5694
6292
  elif valid_profiles > 0:
5695
6293
  table.add_row("AWS Profiles", "[yellow]⚠️ Partial[/yellow]", f"{valid_profiles}/{len(profiles)} profiles valid")
5696
6294
  else:
5697
6295
  table.add_row("AWS Profiles", "[red]❌ None Valid[/red]", "Configure AWS profiles")
5698
-
6296
+
5699
6297
  console.print(table)
5700
6298
 
5701
6299
 
@@ -5703,5 +6301,13 @@ def status(ctx):
5703
6301
  # MAIN ENTRY POINT
5704
6302
  # ============================================================================
5705
6303
 
5706
- if __name__ == "__main__":
6304
+
6305
+ def cli_entry_point():
6306
+ """Entry point with preprocessing for space-separated profiles."""
6307
+ # Preprocess command line to handle space-separated profiles
6308
+ preprocess_space_separated_profiles()
5707
6309
  main()
6310
+
6311
+
6312
+ if __name__ == "__main__":
6313
+ cli_entry_point()