runbooks 1.1.4__py3-none-any.whl → 1.1.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- runbooks/__init__.py +31 -2
- runbooks/__init___optimized.py +18 -4
- runbooks/_platform/__init__.py +1 -5
- runbooks/_platform/core/runbooks_wrapper.py +141 -138
- runbooks/aws2/accuracy_validator.py +812 -0
- runbooks/base.py +7 -0
- runbooks/cfat/assessment/compliance.py +1 -1
- runbooks/cfat/assessment/runner.py +1 -0
- runbooks/cfat/cloud_foundations_assessment.py +227 -239
- runbooks/cli/__init__.py +1 -1
- runbooks/cli/commands/cfat.py +64 -23
- runbooks/cli/commands/finops.py +1005 -54
- runbooks/cli/commands/inventory.py +138 -35
- runbooks/cli/commands/operate.py +9 -36
- runbooks/cli/commands/security.py +42 -18
- runbooks/cli/commands/validation.py +432 -18
- runbooks/cli/commands/vpc.py +81 -17
- runbooks/cli/registry.py +22 -10
- runbooks/cloudops/__init__.py +20 -27
- runbooks/cloudops/base.py +96 -107
- runbooks/cloudops/cost_optimizer.py +544 -542
- runbooks/cloudops/infrastructure_optimizer.py +5 -4
- runbooks/cloudops/interfaces.py +224 -225
- runbooks/cloudops/lifecycle_manager.py +5 -4
- runbooks/cloudops/mcp_cost_validation.py +252 -235
- runbooks/cloudops/models.py +78 -53
- runbooks/cloudops/monitoring_automation.py +5 -4
- runbooks/cloudops/notebook_framework.py +177 -213
- runbooks/cloudops/security_enforcer.py +125 -159
- runbooks/common/accuracy_validator.py +11 -0
- runbooks/common/aws_pricing.py +349 -326
- runbooks/common/aws_pricing_api.py +211 -212
- runbooks/common/aws_profile_manager.py +40 -36
- runbooks/common/aws_utils.py +74 -79
- runbooks/common/business_logic.py +126 -104
- runbooks/common/cli_decorators.py +36 -60
- runbooks/common/comprehensive_cost_explorer_integration.py +455 -463
- runbooks/common/cross_account_manager.py +197 -204
- runbooks/common/date_utils.py +27 -39
- runbooks/common/decorators.py +29 -19
- runbooks/common/dry_run_examples.py +173 -208
- runbooks/common/dry_run_framework.py +157 -155
- runbooks/common/enhanced_exception_handler.py +15 -4
- runbooks/common/enhanced_logging_example.py +50 -64
- runbooks/common/enhanced_logging_integration_example.py +65 -37
- runbooks/common/env_utils.py +16 -16
- runbooks/common/error_handling.py +40 -38
- runbooks/common/lazy_loader.py +41 -23
- runbooks/common/logging_integration_helper.py +79 -86
- runbooks/common/mcp_cost_explorer_integration.py +476 -493
- runbooks/common/mcp_integration.py +63 -74
- runbooks/common/memory_optimization.py +140 -118
- runbooks/common/module_cli_base.py +37 -58
- runbooks/common/organizations_client.py +175 -193
- runbooks/common/patterns.py +23 -25
- runbooks/common/performance_monitoring.py +67 -71
- runbooks/common/performance_optimization_engine.py +283 -274
- runbooks/common/profile_utils.py +111 -37
- runbooks/common/rich_utils.py +201 -141
- runbooks/common/sre_performance_suite.py +177 -186
- runbooks/enterprise/__init__.py +1 -1
- runbooks/enterprise/logging.py +144 -106
- runbooks/enterprise/security.py +187 -204
- runbooks/enterprise/validation.py +43 -56
- runbooks/finops/__init__.py +26 -30
- runbooks/finops/account_resolver.py +1 -1
- runbooks/finops/advanced_optimization_engine.py +980 -0
- runbooks/finops/automation_core.py +268 -231
- runbooks/finops/business_case_config.py +184 -179
- runbooks/finops/cli.py +660 -139
- runbooks/finops/commvault_ec2_analysis.py +157 -164
- runbooks/finops/compute_cost_optimizer.py +336 -320
- runbooks/finops/config.py +20 -20
- runbooks/finops/cost_optimizer.py +484 -618
- runbooks/finops/cost_processor.py +332 -214
- runbooks/finops/dashboard_runner.py +1006 -172
- runbooks/finops/ebs_cost_optimizer.py +991 -657
- runbooks/finops/elastic_ip_optimizer.py +317 -257
- runbooks/finops/enhanced_mcp_integration.py +340 -0
- runbooks/finops/enhanced_progress.py +32 -29
- runbooks/finops/enhanced_trend_visualization.py +3 -2
- runbooks/finops/enterprise_wrappers.py +223 -285
- runbooks/finops/executive_export.py +203 -160
- runbooks/finops/helpers.py +130 -288
- runbooks/finops/iam_guidance.py +1 -1
- runbooks/finops/infrastructure/__init__.py +80 -0
- runbooks/finops/infrastructure/commands.py +506 -0
- runbooks/finops/infrastructure/load_balancer_optimizer.py +866 -0
- runbooks/finops/infrastructure/vpc_endpoint_optimizer.py +832 -0
- runbooks/finops/markdown_exporter.py +337 -174
- runbooks/finops/mcp_validator.py +1952 -0
- runbooks/finops/nat_gateway_optimizer.py +1512 -481
- runbooks/finops/network_cost_optimizer.py +657 -587
- runbooks/finops/notebook_utils.py +226 -188
- runbooks/finops/optimization_engine.py +1136 -0
- runbooks/finops/optimizer.py +19 -23
- runbooks/finops/rds_snapshot_optimizer.py +367 -411
- runbooks/finops/reservation_optimizer.py +427 -363
- runbooks/finops/scenario_cli_integration.py +64 -65
- runbooks/finops/scenarios.py +1277 -438
- runbooks/finops/schemas.py +218 -182
- runbooks/finops/snapshot_manager.py +2289 -0
- runbooks/finops/types.py +3 -3
- runbooks/finops/validation_framework.py +259 -265
- runbooks/finops/vpc_cleanup_exporter.py +189 -144
- runbooks/finops/vpc_cleanup_optimizer.py +591 -573
- runbooks/finops/workspaces_analyzer.py +171 -182
- runbooks/integration/__init__.py +89 -0
- runbooks/integration/mcp_integration.py +1920 -0
- runbooks/inventory/CLAUDE.md +816 -0
- runbooks/inventory/__init__.py +2 -2
- runbooks/inventory/cloud_foundations_integration.py +144 -149
- runbooks/inventory/collectors/aws_comprehensive.py +1 -1
- runbooks/inventory/collectors/aws_networking.py +109 -99
- runbooks/inventory/collectors/base.py +4 -0
- runbooks/inventory/core/collector.py +495 -313
- runbooks/inventory/drift_detection_cli.py +69 -96
- runbooks/inventory/inventory_mcp_cli.py +48 -46
- runbooks/inventory/list_rds_snapshots_aggregator.py +192 -208
- runbooks/inventory/mcp_inventory_validator.py +549 -465
- runbooks/inventory/mcp_vpc_validator.py +359 -442
- runbooks/inventory/organizations_discovery.py +55 -51
- runbooks/inventory/rich_inventory_display.py +33 -32
- runbooks/inventory/unified_validation_engine.py +278 -251
- runbooks/inventory/vpc_analyzer.py +732 -695
- runbooks/inventory/vpc_architecture_validator.py +293 -348
- runbooks/inventory/vpc_dependency_analyzer.py +382 -378
- runbooks/inventory/vpc_flow_analyzer.py +1 -1
- runbooks/main.py +49 -34
- runbooks/main_final.py +91 -60
- runbooks/main_minimal.py +22 -10
- runbooks/main_optimized.py +131 -100
- runbooks/main_ultra_minimal.py +7 -2
- runbooks/mcp/__init__.py +36 -0
- runbooks/mcp/integration.py +679 -0
- runbooks/monitoring/performance_monitor.py +9 -4
- runbooks/operate/dynamodb_operations.py +3 -1
- runbooks/operate/ec2_operations.py +145 -137
- runbooks/operate/iam_operations.py +146 -152
- runbooks/operate/networking_cost_heatmap.py +29 -8
- runbooks/operate/rds_operations.py +223 -254
- runbooks/operate/s3_operations.py +107 -118
- runbooks/operate/vpc_operations.py +646 -616
- runbooks/remediation/base.py +1 -1
- runbooks/remediation/commons.py +10 -7
- runbooks/remediation/commvault_ec2_analysis.py +70 -66
- runbooks/remediation/ec2_unattached_ebs_volumes.py +1 -0
- runbooks/remediation/multi_account.py +24 -21
- runbooks/remediation/rds_snapshot_list.py +86 -60
- runbooks/remediation/remediation_cli.py +92 -146
- runbooks/remediation/universal_account_discovery.py +83 -79
- runbooks/remediation/workspaces_list.py +46 -41
- runbooks/security/__init__.py +19 -0
- runbooks/security/assessment_runner.py +1150 -0
- runbooks/security/baseline_checker.py +812 -0
- runbooks/security/cloudops_automation_security_validator.py +509 -535
- runbooks/security/compliance_automation_engine.py +17 -17
- runbooks/security/config/__init__.py +2 -2
- runbooks/security/config/compliance_config.py +50 -50
- runbooks/security/config_template_generator.py +63 -76
- runbooks/security/enterprise_security_framework.py +1 -1
- runbooks/security/executive_security_dashboard.py +519 -508
- runbooks/security/multi_account_security_controls.py +959 -1210
- runbooks/security/real_time_security_monitor.py +422 -444
- runbooks/security/security_baseline_tester.py +1 -1
- runbooks/security/security_cli.py +143 -112
- runbooks/security/test_2way_validation.py +439 -0
- runbooks/security/two_way_validation_framework.py +852 -0
- runbooks/sre/production_monitoring_framework.py +167 -177
- runbooks/tdd/__init__.py +15 -0
- runbooks/tdd/cli.py +1071 -0
- runbooks/utils/__init__.py +14 -17
- runbooks/utils/logger.py +7 -2
- runbooks/utils/version_validator.py +50 -47
- runbooks/validation/__init__.py +6 -6
- runbooks/validation/cli.py +9 -3
- runbooks/validation/comprehensive_2way_validator.py +745 -704
- runbooks/validation/mcp_validator.py +906 -228
- runbooks/validation/terraform_citations_validator.py +104 -115
- runbooks/validation/terraform_drift_detector.py +447 -451
- runbooks/vpc/README.md +617 -0
- runbooks/vpc/__init__.py +8 -1
- runbooks/vpc/analyzer.py +577 -0
- runbooks/vpc/cleanup_wrapper.py +476 -413
- runbooks/vpc/cli_cloudtrail_commands.py +339 -0
- runbooks/vpc/cli_mcp_validation_commands.py +480 -0
- runbooks/vpc/cloudtrail_audit_integration.py +717 -0
- runbooks/vpc/config.py +92 -97
- runbooks/vpc/cost_engine.py +411 -148
- runbooks/vpc/cost_explorer_integration.py +553 -0
- runbooks/vpc/cross_account_session.py +101 -106
- runbooks/vpc/enhanced_mcp_validation.py +917 -0
- runbooks/vpc/eni_gate_validator.py +961 -0
- runbooks/vpc/heatmap_engine.py +185 -160
- runbooks/vpc/mcp_no_eni_validator.py +680 -639
- runbooks/vpc/nat_gateway_optimizer.py +358 -0
- runbooks/vpc/networking_wrapper.py +15 -8
- runbooks/vpc/pdca_remediation_planner.py +528 -0
- runbooks/vpc/performance_optimized_analyzer.py +219 -231
- runbooks/vpc/runbooks_adapter.py +1167 -241
- runbooks/vpc/tdd_red_phase_stubs.py +601 -0
- runbooks/vpc/test_data_loader.py +358 -0
- runbooks/vpc/tests/conftest.py +314 -4
- runbooks/vpc/tests/test_cleanup_framework.py +1022 -0
- runbooks/vpc/tests/test_cost_engine.py +0 -2
- runbooks/vpc/topology_generator.py +326 -0
- runbooks/vpc/unified_scenarios.py +1297 -1124
- runbooks/vpc/vpc_cleanup_integration.py +1943 -1115
- runbooks-1.1.5.dist-info/METADATA +328 -0
- {runbooks-1.1.4.dist-info → runbooks-1.1.5.dist-info}/RECORD +214 -193
- runbooks/finops/README.md +0 -414
- runbooks/finops/accuracy_cross_validator.py +0 -647
- runbooks/finops/business_cases.py +0 -950
- runbooks/finops/dashboard_router.py +0 -922
- runbooks/finops/ebs_optimizer.py +0 -973
- runbooks/finops/embedded_mcp_validator.py +0 -1629
- runbooks/finops/enhanced_dashboard_runner.py +0 -527
- runbooks/finops/finops_dashboard.py +0 -584
- runbooks/finops/finops_scenarios.py +0 -1218
- runbooks/finops/legacy_migration.py +0 -730
- runbooks/finops/multi_dashboard.py +0 -1519
- runbooks/finops/single_dashboard.py +0 -1113
- runbooks/finops/unlimited_scenarios.py +0 -393
- runbooks-1.1.4.dist-info/METADATA +0 -800
- {runbooks-1.1.4.dist-info → runbooks-1.1.5.dist-info}/WHEEL +0 -0
- {runbooks-1.1.4.dist-info → runbooks-1.1.5.dist-info}/entry_points.txt +0 -0
- {runbooks-1.1.4.dist-info → runbooks-1.1.5.dist-info}/licenses/LICENSE +0 -0
- {runbooks-1.1.4.dist-info → runbooks-1.1.5.dist-info}/top_level.txt +0 -0
@@ -96,13 +96,11 @@ def create_validation_group():
|
|
96
96
|
"billing": resolved_profile,
|
97
97
|
"management": resolved_profile,
|
98
98
|
"centralised_ops": resolved_profile,
|
99
|
-
"single_aws": resolved_profile
|
99
|
+
"single_aws": resolved_profile,
|
100
100
|
}
|
101
101
|
|
102
102
|
validator = MCPValidator(
|
103
|
-
profiles=profiles,
|
104
|
-
tolerance_percentage=tolerance,
|
105
|
-
performance_target_seconds=performance_target
|
103
|
+
profiles=profiles, tolerance_percentage=tolerance, performance_target_seconds=performance_target
|
106
104
|
)
|
107
105
|
|
108
106
|
# Run comprehensive validation
|
@@ -155,10 +153,7 @@ def create_validation_group():
|
|
155
153
|
# Use ProfileManager for dynamic profile resolution (billing operation)
|
156
154
|
resolved_profile = get_profile_for_operation("billing", profile)
|
157
155
|
|
158
|
-
validator = MCPValidator(
|
159
|
-
profiles={"billing": resolved_profile},
|
160
|
-
tolerance_percentage=tolerance
|
161
|
-
)
|
156
|
+
validator = MCPValidator(profiles={"billing": resolved_profile}, tolerance_percentage=tolerance)
|
162
157
|
|
163
158
|
result = asyncio.run(validator.validate_cost_explorer())
|
164
159
|
|
@@ -176,7 +171,9 @@ def create_validation_group():
|
|
176
171
|
|
177
172
|
@validation.command()
|
178
173
|
@common_aws_options
|
179
|
-
@click.option(
|
174
|
+
@click.option(
|
175
|
+
"--all", is_flag=True, help="Use all available AWS profiles for multi-account organizations validation"
|
176
|
+
)
|
180
177
|
@click.pass_context
|
181
178
|
def organizations(ctx, profile, region, dry_run, all):
|
182
179
|
"""
|
@@ -248,7 +245,9 @@ def create_validation_group():
|
|
248
245
|
import asyncio
|
249
246
|
|
250
247
|
console.print(f"[bold magenta]🎯 Running MCP validation benchmark[/bold magenta]")
|
251
|
-
console.print(
|
248
|
+
console.print(
|
249
|
+
f"Target: {target_accuracy}% | Iterations: {iterations} | Performance: <{performance_target}s"
|
250
|
+
)
|
252
251
|
|
253
252
|
# Use ProfileManager for dynamic profile resolution
|
254
253
|
resolved_profile = get_profile_for_operation("operational", profile)
|
@@ -292,10 +291,12 @@ def create_validation_group():
|
|
292
291
|
"--operation",
|
293
292
|
type=click.Choice(["costs", "organizations", "ec2", "security", "vpc"]),
|
294
293
|
required=True,
|
295
|
-
help="Specific operation to validate"
|
294
|
+
help="Specific operation to validate",
|
296
295
|
)
|
297
296
|
@click.option("--tolerance", default=5.0, help="Tolerance percentage")
|
298
|
-
@click.option(
|
297
|
+
@click.option(
|
298
|
+
"--all", is_flag=True, help="Use all available AWS profiles for multi-account single operation validation"
|
299
|
+
)
|
299
300
|
@click.pass_context
|
300
301
|
def single(ctx, profile, region, dry_run, operation, tolerance, all):
|
301
302
|
"""
|
@@ -325,13 +326,10 @@ def create_validation_group():
|
|
325
326
|
"organizations": "management",
|
326
327
|
"ec2": "operational",
|
327
328
|
"security": "operational",
|
328
|
-
"vpc": "operational"
|
329
|
+
"vpc": "operational",
|
329
330
|
}
|
330
331
|
|
331
|
-
resolved_profile = get_profile_for_operation(
|
332
|
-
operation_type_map.get(operation, "operational"),
|
333
|
-
profile
|
334
|
-
)
|
332
|
+
resolved_profile = get_profile_for_operation(operation_type_map.get(operation, "operational"), profile)
|
335
333
|
|
336
334
|
validator = MCPValidator(tolerance_percentage=tolerance)
|
337
335
|
|
@@ -408,4 +406,420 @@ def create_validation_group():
|
|
408
406
|
console.print(f"[red]❌ Status check failed: {e}[/red]")
|
409
407
|
raise click.ClickException(str(e))
|
410
408
|
|
411
|
-
|
409
|
+
@validation.command()
|
410
|
+
@common_aws_options
|
411
|
+
@click.option(
|
412
|
+
"--module",
|
413
|
+
type=click.Choice(["finops", "inventory", "security", "vpc", "cfat", "operate"]),
|
414
|
+
required=True,
|
415
|
+
help="Module to test",
|
416
|
+
)
|
417
|
+
@click.option("--component", help="Specific component within module (e.g., 'ec2-snapshots' for finops module)")
|
418
|
+
@click.option(
|
419
|
+
"--test-type",
|
420
|
+
type=click.Choice(["basic", "profile-tests", "mcp-validation", "full-validation"]),
|
421
|
+
default="basic",
|
422
|
+
help="Type of testing to perform",
|
423
|
+
)
|
424
|
+
@click.option("--performance-target", default=30.0, help="Performance target in seconds")
|
425
|
+
@click.option("--accuracy-target", default=99.5, help="MCP validation accuracy target percentage")
|
426
|
+
@click.option(
|
427
|
+
"--generate-evidence", is_flag=True, help="Generate comprehensive evidence reports for manager review"
|
428
|
+
)
|
429
|
+
@click.option("--export-results", is_flag=True, help="Export test results to JSON file")
|
430
|
+
@click.pass_context
|
431
|
+
def test(
|
432
|
+
ctx,
|
433
|
+
profile,
|
434
|
+
region,
|
435
|
+
dry_run,
|
436
|
+
module,
|
437
|
+
component,
|
438
|
+
test_type,
|
439
|
+
performance_target,
|
440
|
+
accuracy_target,
|
441
|
+
generate_evidence,
|
442
|
+
export_results,
|
443
|
+
):
|
444
|
+
"""
|
445
|
+
Comprehensive test command integration for Sprint 1 validation framework.
|
446
|
+
|
447
|
+
STRATEGIC CONTEXT: Enterprise framework requires `/test` command validation
|
448
|
+
with ≥99.5% MCP validation accuracy for ALL deployments before completion claims.
|
449
|
+
|
450
|
+
Test Framework Features:
|
451
|
+
• Real AWS profile testing across all resolution scenarios
|
452
|
+
• MCP validation testing with configurable accuracy targets
|
453
|
+
• CLI parameter testing for all command combinations
|
454
|
+
• Evidence generation testing for manager reports
|
455
|
+
• Performance testing with enterprise targets (<30s analysis time)
|
456
|
+
|
457
|
+
Test Types:
|
458
|
+
• basic: Core functionality and CLI integration
|
459
|
+
• profile-tests: All AWS profile resolution scenarios
|
460
|
+
• mcp-validation: MCP accuracy validation ≥99.5%
|
461
|
+
• full-validation: Comprehensive end-to-end testing
|
462
|
+
|
463
|
+
Examples:
|
464
|
+
# Test finops ec2-snapshots with basic validation
|
465
|
+
runbooks validation test --module finops --component ec2-snapshots --test-type basic
|
466
|
+
|
467
|
+
# Test profile resolution scenarios
|
468
|
+
runbooks validation test --module finops --component ec2-snapshots --test-type profile-tests
|
469
|
+
|
470
|
+
# Test MCP validation accuracy
|
471
|
+
runbooks validation test --module finops --component ec2-snapshots --test-type mcp-validation --accuracy-target 99.5
|
472
|
+
|
473
|
+
# Full validation with evidence generation
|
474
|
+
runbooks validation test --module finops --component ec2-snapshots --test-type full-validation --generate-evidence --export-results
|
475
|
+
|
476
|
+
# Performance testing
|
477
|
+
runbooks validation test --module finops --component ec2-snapshots --test-type basic --performance-target 20
|
478
|
+
|
479
|
+
Sprint 1 Context:
|
480
|
+
Required for Sprint 1, Task 1 completion validation ensuring all identified
|
481
|
+
issues are fixed and comprehensive evidence generated for manager review.
|
482
|
+
"""
|
483
|
+
try:
|
484
|
+
import asyncio
|
485
|
+
import time
|
486
|
+
import subprocess
|
487
|
+
import sys
|
488
|
+
from pathlib import Path
|
489
|
+
|
490
|
+
if component:
|
491
|
+
console.print(
|
492
|
+
f"\n[bold blue]🧪 Enterprise Test Framework - {module.upper()} Component: {component}[/bold blue]"
|
493
|
+
)
|
494
|
+
else:
|
495
|
+
console.print(f"\n[bold blue]🧪 Enterprise Test Framework - {module.upper()}[/bold blue]")
|
496
|
+
console.print(
|
497
|
+
f"[dim]Type: {test_type} | Performance: <{performance_target}s | Accuracy: ≥{accuracy_target}%[/dim]\n"
|
498
|
+
)
|
499
|
+
|
500
|
+
# Resolve profile for testing
|
501
|
+
resolved_profile = profile or "default"
|
502
|
+
|
503
|
+
# Test execution tracking
|
504
|
+
test_results = {
|
505
|
+
"module": module,
|
506
|
+
"component": component,
|
507
|
+
"test_type": test_type,
|
508
|
+
"profile": resolved_profile,
|
509
|
+
"start_time": time.time(),
|
510
|
+
"tests_executed": [],
|
511
|
+
"failures": [],
|
512
|
+
"performance_metrics": {},
|
513
|
+
"mcp_validation_results": {},
|
514
|
+
"evidence_generated": [],
|
515
|
+
}
|
516
|
+
|
517
|
+
# Basic functionality testing
|
518
|
+
if test_type in ["basic", "full-validation"]:
|
519
|
+
console.print("[cyan]🔧 Basic Functionality Testing[/cyan]")
|
520
|
+
|
521
|
+
# Test module import
|
522
|
+
try:
|
523
|
+
if module == "finops" and component == "ec2-snapshots":
|
524
|
+
from runbooks.finops.snapshot_manager import EC2SnapshotManager
|
525
|
+
|
526
|
+
console.print(" ✅ Module import successful")
|
527
|
+
test_results["tests_executed"].append("module_import")
|
528
|
+
else:
|
529
|
+
console.print(f" ⚠️ Test configuration for {module}/{component} not implemented yet")
|
530
|
+
test_results["tests_executed"].append("module_import_skipped")
|
531
|
+
except ImportError as e:
|
532
|
+
console.print(f" ❌ Module import failed: {e}")
|
533
|
+
test_results["failures"].append(f"module_import: {e}")
|
534
|
+
|
535
|
+
# Test CLI command availability
|
536
|
+
try:
|
537
|
+
if module == "finops" and component == "ec2-snapshots":
|
538
|
+
result = subprocess.run(
|
539
|
+
[sys.executable, "-m", "runbooks", "finops", "ec2-snapshots", "--help"],
|
540
|
+
capture_output=True,
|
541
|
+
text=True,
|
542
|
+
timeout=10,
|
543
|
+
)
|
544
|
+
|
545
|
+
if result.returncode == 0:
|
546
|
+
console.print(" ✅ CLI command available")
|
547
|
+
test_results["tests_executed"].append("cli_available")
|
548
|
+
else:
|
549
|
+
console.print(f" ❌ CLI command failed: {result.stderr}")
|
550
|
+
test_results["failures"].append(f"cli_available: {result.stderr}")
|
551
|
+
else:
|
552
|
+
console.print(f" ⚠️ CLI test for {module}/{component} not configured")
|
553
|
+
test_results["tests_executed"].append("cli_test_skipped")
|
554
|
+
except Exception as e:
|
555
|
+
console.print(f" ❌ CLI test failed: {e}")
|
556
|
+
test_results["failures"].append(f"cli_test: {e}")
|
557
|
+
|
558
|
+
# Profile resolution testing
|
559
|
+
if test_type in ["profile-tests", "full-validation"]:
|
560
|
+
console.print("\n[cyan]🔐 Profile Resolution Testing[/cyan]")
|
561
|
+
|
562
|
+
try:
|
563
|
+
from runbooks.common.profile_utils import get_profile_for_operation
|
564
|
+
|
565
|
+
# Test different profile scenarios
|
566
|
+
profile_scenarios = [
|
567
|
+
("billing", resolved_profile),
|
568
|
+
("management", resolved_profile),
|
569
|
+
("operational", resolved_profile),
|
570
|
+
]
|
571
|
+
|
572
|
+
for operation_type, test_profile in profile_scenarios:
|
573
|
+
try:
|
574
|
+
resolved = get_profile_for_operation(operation_type, test_profile)
|
575
|
+
console.print(f" ✅ {operation_type}: {resolved or 'default'}")
|
576
|
+
test_results["tests_executed"].append(f"profile_{operation_type}")
|
577
|
+
except Exception as e:
|
578
|
+
console.print(f" ❌ {operation_type}: {e}")
|
579
|
+
test_results["failures"].append(f"profile_{operation_type}: {e}")
|
580
|
+
|
581
|
+
except ImportError as e:
|
582
|
+
console.print(f" ❌ Profile utils not available: {e}")
|
583
|
+
test_results["failures"].append(f"profile_utils: {e}")
|
584
|
+
|
585
|
+
# MCP validation testing
|
586
|
+
if test_type in ["mcp-validation", "full-validation"]:
|
587
|
+
console.print(f"\n[cyan]🎯 MCP Validation Testing (Target: ≥{accuracy_target}%)[/cyan]")
|
588
|
+
|
589
|
+
try:
|
590
|
+
from runbooks.validation.mcp_validator import MCPValidator
|
591
|
+
|
592
|
+
validator = MCPValidator(tolerance_percentage=5.0)
|
593
|
+
|
594
|
+
if module == "finops":
|
595
|
+
# Test cost validation
|
596
|
+
start_time = time.time()
|
597
|
+
result = asyncio.run(validator.validate_cost_explorer())
|
598
|
+
validation_time = time.time() - start_time
|
599
|
+
|
600
|
+
accuracy = result.get("accuracy_percentage", 0)
|
601
|
+
if accuracy >= accuracy_target:
|
602
|
+
console.print(f" ✅ Cost validation: {accuracy:.2f}% accuracy ({validation_time:.1f}s)")
|
603
|
+
test_results["mcp_validation_results"]["cost_validation"] = {
|
604
|
+
"accuracy": accuracy,
|
605
|
+
"time": validation_time,
|
606
|
+
"passed": True,
|
607
|
+
}
|
608
|
+
else:
|
609
|
+
console.print(
|
610
|
+
f" ❌ Cost validation: {accuracy:.2f}% accuracy (Required: ≥{accuracy_target}%)"
|
611
|
+
)
|
612
|
+
test_results["mcp_validation_results"]["cost_validation"] = {
|
613
|
+
"accuracy": accuracy,
|
614
|
+
"time": validation_time,
|
615
|
+
"passed": False,
|
616
|
+
}
|
617
|
+
test_results["failures"].append(f"mcp_accuracy: {accuracy}% < {accuracy_target}%")
|
618
|
+
else:
|
619
|
+
console.print(f" ⚠️ MCP validation for {module} not configured")
|
620
|
+
test_results["tests_executed"].append("mcp_validation_skipped")
|
621
|
+
|
622
|
+
except ImportError as e:
|
623
|
+
console.print(f" ❌ MCP validator not available: {e}")
|
624
|
+
test_results["failures"].append(f"mcp_validator: {e}")
|
625
|
+
except Exception as e:
|
626
|
+
console.print(f" ❌ MCP validation failed: {e}")
|
627
|
+
test_results["failures"].append(f"mcp_validation: {e}")
|
628
|
+
|
629
|
+
# Performance testing
|
630
|
+
if test_type in ["basic", "full-validation"]:
|
631
|
+
console.print(f"\n[cyan]⚡ Performance Testing (Target: <{performance_target}s)[/cyan]")
|
632
|
+
|
633
|
+
try:
|
634
|
+
if module == "finops" and component == "ec2-snapshots":
|
635
|
+
from runbooks.finops.snapshot_manager import EC2SnapshotManager
|
636
|
+
|
637
|
+
manager = EC2SnapshotManager(profile=resolved_profile, dry_run=True)
|
638
|
+
|
639
|
+
# Test session initialization performance
|
640
|
+
start_time = time.time()
|
641
|
+
session_result = manager.initialize_session()
|
642
|
+
init_time = time.time() - start_time
|
643
|
+
|
644
|
+
if init_time < performance_target:
|
645
|
+
console.print(f" ✅ Session initialization: {init_time:.2f}s")
|
646
|
+
test_results["performance_metrics"]["session_init"] = {
|
647
|
+
"time": init_time,
|
648
|
+
"target": performance_target,
|
649
|
+
"passed": True,
|
650
|
+
}
|
651
|
+
else:
|
652
|
+
console.print(
|
653
|
+
f" ❌ Session initialization: {init_time:.2f}s (Target: <{performance_target}s)"
|
654
|
+
)
|
655
|
+
test_results["performance_metrics"]["session_init"] = {
|
656
|
+
"time": init_time,
|
657
|
+
"target": performance_target,
|
658
|
+
"passed": False,
|
659
|
+
}
|
660
|
+
test_results["failures"].append(f"performance: {init_time:.2f}s > {performance_target}s")
|
661
|
+
else:
|
662
|
+
console.print(f" ⚠️ Performance test for {module}/{component} not configured")
|
663
|
+
test_results["tests_executed"].append("performance_test_skipped")
|
664
|
+
|
665
|
+
except Exception as e:
|
666
|
+
console.print(f" ❌ Performance test failed: {e}")
|
667
|
+
test_results["failures"].append(f"performance_test: {e}")
|
668
|
+
|
669
|
+
# Unit test execution
|
670
|
+
if test_type in ["full-validation"]:
|
671
|
+
console.print("\n[cyan]🧪 Unit Test Execution[/cyan]")
|
672
|
+
|
673
|
+
try:
|
674
|
+
if module == "finops" and component == "ec2-snapshots":
|
675
|
+
test_file = "tests/finops/test_ec2_snapshot_manager.py"
|
676
|
+
|
677
|
+
# Run the comprehensive test suite
|
678
|
+
result = subprocess.run(
|
679
|
+
[sys.executable, "-m", "pytest", test_file, "-v", "--tb=short"],
|
680
|
+
capture_output=True,
|
681
|
+
text=True,
|
682
|
+
timeout=300,
|
683
|
+
)
|
684
|
+
|
685
|
+
if result.returncode == 0:
|
686
|
+
passed_tests = result.stdout.count("PASSED")
|
687
|
+
failed_tests = result.stdout.count("FAILED")
|
688
|
+
console.print(f" ✅ Unit tests: {passed_tests} passed, {failed_tests} failed")
|
689
|
+
test_results["tests_executed"].append("unit_tests")
|
690
|
+
test_results["unit_test_results"] = {
|
691
|
+
"passed": passed_tests,
|
692
|
+
"failed": failed_tests,
|
693
|
+
"output": result.stdout[:1000], # Truncate for storage
|
694
|
+
}
|
695
|
+
else:
|
696
|
+
console.print(f" ❌ Unit tests failed: {result.stderr[:200]}")
|
697
|
+
test_results["failures"].append(f"unit_tests: {result.stderr[:200]}")
|
698
|
+
else:
|
699
|
+
console.print(f" ⚠️ Unit tests for {module}/{component} not configured")
|
700
|
+
test_results["tests_executed"].append("unit_tests_skipped")
|
701
|
+
|
702
|
+
except Exception as e:
|
703
|
+
console.print(f" ❌ Unit test execution failed: {e}")
|
704
|
+
test_results["failures"].append(f"unit_tests: {e}")
|
705
|
+
|
706
|
+
# Calculate final results
|
707
|
+
test_results["end_time"] = time.time()
|
708
|
+
test_results["total_duration"] = test_results["end_time"] - test_results["start_time"]
|
709
|
+
test_results["success_count"] = len(test_results["tests_executed"])
|
710
|
+
test_results["failure_count"] = len(test_results["failures"])
|
711
|
+
test_results["success_rate"] = (
|
712
|
+
(test_results["success_count"] / (test_results["success_count"] + test_results["failure_count"])) * 100
|
713
|
+
if (test_results["success_count"] + test_results["failure_count"]) > 0
|
714
|
+
else 0
|
715
|
+
)
|
716
|
+
|
717
|
+
# Generate summary
|
718
|
+
console.print(f"\n[bold green]📊 Test Summary[/bold green]")
|
719
|
+
console.print(f"Duration: {test_results['total_duration']:.1f}s")
|
720
|
+
console.print(f"Tests Executed: {test_results['success_count']}")
|
721
|
+
console.print(f"Failures: {test_results['failure_count']}")
|
722
|
+
console.print(f"Success Rate: {test_results['success_rate']:.1f}%")
|
723
|
+
|
724
|
+
# Success/failure indicator
|
725
|
+
if test_results["failure_count"] == 0:
|
726
|
+
console.print(f"\n[bold green]✅ ALL TESTS PASSED[/bold green]")
|
727
|
+
console.print(f"[green]Enterprise validation requirements satisfied[/green]")
|
728
|
+
else:
|
729
|
+
console.print(f"\n[bold red]❌ {test_results['failure_count']} TEST(S) FAILED[/bold red]")
|
730
|
+
console.print(f"[red]Review failures and re-run validation[/red]")
|
731
|
+
|
732
|
+
# Evidence generation
|
733
|
+
if generate_evidence:
|
734
|
+
console.print(f"\n[cyan]📋 Generating Evidence Reports[/cyan]")
|
735
|
+
|
736
|
+
try:
|
737
|
+
evidence_dir = Path("artifacts/test_evidence")
|
738
|
+
evidence_dir.mkdir(parents=True, exist_ok=True)
|
739
|
+
|
740
|
+
timestamp = time.strftime("%Y%m%d_%H%M%S")
|
741
|
+
evidence_file = evidence_dir / f"test_evidence_{module}_{component}_{timestamp}.json"
|
742
|
+
|
743
|
+
import json
|
744
|
+
|
745
|
+
with open(evidence_file, "w") as f:
|
746
|
+
json.dump(test_results, f, indent=2, default=str)
|
747
|
+
|
748
|
+
console.print(f" ✅ Evidence saved: {evidence_file}")
|
749
|
+
test_results["evidence_generated"].append(str(evidence_file))
|
750
|
+
|
751
|
+
# Generate manager summary
|
752
|
+
manager_summary = evidence_dir / f"manager_summary_{module}_{component}_{timestamp}.md"
|
753
|
+
with open(manager_summary, "w") as f:
|
754
|
+
f.write(f"# Test Validation Report: {module.upper()}")
|
755
|
+
if component:
|
756
|
+
f.write(f" - {component}")
|
757
|
+
f.write(f"\n\n**Date**: {time.strftime('%Y-%m-%d %H:%M:%S')}\n")
|
758
|
+
f.write(f"**Test Type**: {test_type}\n")
|
759
|
+
f.write(f"**Profile**: {resolved_profile}\n\n")
|
760
|
+
f.write(f"## Summary\n")
|
761
|
+
f.write(f"- **Duration**: {test_results['total_duration']:.1f}s\n")
|
762
|
+
f.write(f"- **Tests Executed**: {test_results['success_count']}\n")
|
763
|
+
f.write(f"- **Failures**: {test_results['failure_count']}\n")
|
764
|
+
f.write(f"- **Success Rate**: {test_results['success_rate']:.1f}%\n\n")
|
765
|
+
|
766
|
+
if test_results["failure_count"] == 0:
|
767
|
+
f.write("## ✅ VALIDATION STATUS: PASSED\n")
|
768
|
+
f.write("All enterprise validation requirements satisfied.\n\n")
|
769
|
+
else:
|
770
|
+
f.write("## ❌ VALIDATION STATUS: FAILED\n")
|
771
|
+
f.write("Review failures below and re-run validation.\n\n")
|
772
|
+
f.write("### Failures:\n")
|
773
|
+
for failure in test_results["failures"]:
|
774
|
+
f.write(f"- {failure}\n")
|
775
|
+
f.write("\n")
|
776
|
+
|
777
|
+
f.write("## Strategic Context\n")
|
778
|
+
f.write(
|
779
|
+
"Enterprise framework requires `/test` command validation with ≥99.5% MCP validation accuracy for ALL deployments before completion claims.\n\n"
|
780
|
+
)
|
781
|
+
f.write(
|
782
|
+
"**Agent Coordination**: qa-testing-specialist [3] (Primary), python-runbooks-engineer [1] (Support)\n"
|
783
|
+
)
|
784
|
+
|
785
|
+
console.print(f" ✅ Manager summary: {manager_summary}")
|
786
|
+
test_results["evidence_generated"].append(str(manager_summary))
|
787
|
+
|
788
|
+
except Exception as e:
|
789
|
+
console.print(f" ❌ Evidence generation failed: {e}")
|
790
|
+
test_results["failures"].append(f"evidence_generation: {e}")
|
791
|
+
|
792
|
+
# Export results if requested
|
793
|
+
if export_results:
|
794
|
+
try:
|
795
|
+
export_dir = Path("artifacts/test_results")
|
796
|
+
export_dir.mkdir(parents=True, exist_ok=True)
|
797
|
+
|
798
|
+
timestamp = time.strftime("%Y%m%d_%H%M%S")
|
799
|
+
export_file = export_dir / f"test_results_{module}_{component}_{timestamp}.json"
|
800
|
+
|
801
|
+
import json
|
802
|
+
|
803
|
+
with open(export_file, "w") as f:
|
804
|
+
json.dump(test_results, f, indent=2, default=str)
|
805
|
+
|
806
|
+
console.print(f"\n[cyan]📤 Results exported: {export_file}[/cyan]")
|
807
|
+
|
808
|
+
except Exception as e:
|
809
|
+
console.print(f"\n[red]❌ Export failed: {e}[/red]")
|
810
|
+
|
811
|
+
# Enterprise coordination confirmation
|
812
|
+
console.print(f"\n[dim]🏢 Enterprise coordination: qa-testing-specialist [3] (Primary)[/dim]")
|
813
|
+
console.print(f"[dim]🎯 Supporting: python-runbooks-engineer [1][/dim]")
|
814
|
+
console.print(f"[dim]📋 Strategic: ALL deployments require `/test` validation[/dim]")
|
815
|
+
|
816
|
+
return test_results
|
817
|
+
|
818
|
+
except ImportError as e:
|
819
|
+
console.print(f"[red]❌ Test framework not available: {e}[/red]")
|
820
|
+
raise click.ClickException("Test functionality not available")
|
821
|
+
except Exception as e:
|
822
|
+
console.print(f"[red]❌ Test execution failed: {e}[/red]")
|
823
|
+
raise click.ClickException(str(e))
|
824
|
+
|
825
|
+
return validation
|