runbooks 0.9.0__py3-none-any.whl → 0.9.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. runbooks/__init__.py +1 -1
  2. runbooks/cfat/assessment/compliance.py +4 -1
  3. runbooks/cloudops/__init__.py +123 -0
  4. runbooks/cloudops/base.py +385 -0
  5. runbooks/cloudops/cost_optimizer.py +811 -0
  6. runbooks/cloudops/infrastructure_optimizer.py +29 -0
  7. runbooks/cloudops/interfaces.py +828 -0
  8. runbooks/cloudops/lifecycle_manager.py +29 -0
  9. runbooks/cloudops/mcp_cost_validation.py +678 -0
  10. runbooks/cloudops/models.py +251 -0
  11. runbooks/cloudops/monitoring_automation.py +29 -0
  12. runbooks/cloudops/notebook_framework.py +676 -0
  13. runbooks/cloudops/security_enforcer.py +449 -0
  14. runbooks/common/mcp_cost_explorer_integration.py +900 -0
  15. runbooks/common/mcp_integration.py +19 -10
  16. runbooks/common/rich_utils.py +1 -1
  17. runbooks/finops/README.md +31 -0
  18. runbooks/finops/cost_optimizer.py +1340 -0
  19. runbooks/finops/finops_dashboard.py +211 -5
  20. runbooks/finops/schemas.py +589 -0
  21. runbooks/inventory/runbooks.inventory.organizations_discovery.log +0 -0
  22. runbooks/inventory/runbooks.security.security_export.log +0 -0
  23. runbooks/main.py +525 -0
  24. runbooks/operate/ec2_operations.py +428 -0
  25. runbooks/operate/iam_operations.py +598 -3
  26. runbooks/operate/rds_operations.py +508 -0
  27. runbooks/operate/s3_operations.py +508 -0
  28. runbooks/remediation/base.py +5 -3
  29. runbooks/security/__init__.py +101 -0
  30. runbooks/security/cloudops_automation_security_validator.py +1164 -0
  31. runbooks/security/compliance_automation_engine.py +4 -4
  32. runbooks/security/enterprise_security_framework.py +4 -5
  33. runbooks/security/executive_security_dashboard.py +1247 -0
  34. runbooks/security/multi_account_security_controls.py +2254 -0
  35. runbooks/security/real_time_security_monitor.py +1196 -0
  36. runbooks/security/security_baseline_tester.py +3 -3
  37. runbooks/sre/production_monitoring_framework.py +584 -0
  38. runbooks/validation/mcp_validator.py +29 -15
  39. runbooks/vpc/networking_wrapper.py +6 -3
  40. runbooks-0.9.1.dist-info/METADATA +308 -0
  41. {runbooks-0.9.0.dist-info → runbooks-0.9.1.dist-info}/RECORD +45 -23
  42. runbooks-0.9.0.dist-info/METADATA +0 -718
  43. {runbooks-0.9.0.dist-info → runbooks-0.9.1.dist-info}/WHEEL +0 -0
  44. {runbooks-0.9.0.dist-info → runbooks-0.9.1.dist-info}/entry_points.txt +0 -0
  45. {runbooks-0.9.0.dist-info → runbooks-0.9.1.dist-info}/licenses/LICENSE +0 -0
  46. {runbooks-0.9.0.dist-info → runbooks-0.9.1.dist-info}/top_level.txt +0 -0
@@ -59,6 +59,10 @@ class S3Operations(BaseOperation):
59
59
  "set_public_access_block",
60
60
  "get_public_access_block",
61
61
  "sync_objects",
62
+ "find_buckets_without_lifecycle",
63
+ "add_lifecycle_policy_bulk",
64
+ "get_bucket_lifecycle",
65
+ "analyze_lifecycle_compliance",
62
66
  }
63
67
  requires_confirmation = True
64
68
 
@@ -174,6 +178,14 @@ class S3Operations(BaseOperation):
174
178
  return self.get_public_access_block(context, kwargs.get("account_id"))
175
179
  elif operation_type == "sync_objects":
176
180
  return self.sync_objects(context, **kwargs)
181
+ elif operation_type == "find_buckets_without_lifecycle":
182
+ return self.find_buckets_without_lifecycle(context, **kwargs)
183
+ elif operation_type == "add_lifecycle_policy_bulk":
184
+ return self.add_lifecycle_policy_bulk(context, **kwargs)
185
+ elif operation_type == "get_bucket_lifecycle":
186
+ return self.get_bucket_lifecycle(context, **kwargs)
187
+ elif operation_type == "analyze_lifecycle_compliance":
188
+ return self.analyze_lifecycle_compliance(context, **kwargs)
177
189
  else:
178
190
  raise ValueError(f"Unsupported operation: {operation_type}")
179
191
 
@@ -723,6 +735,502 @@ class S3Operations(BaseOperation):
723
735
 
724
736
  return [result]
725
737
 
738
+ def find_buckets_without_lifecycle(
739
+ self,
740
+ context: OperationContext,
741
+ region: Optional[str] = None,
742
+ bucket_names: Optional[List[str]] = None
743
+ ) -> List[OperationResult]:
744
+ """
745
+ Find S3 buckets without lifecycle policies.
746
+
747
+ Enhanced from unSkript notebook: AWS_Add_Lifecycle_Policy_To_S3_Buckets.ipynb
748
+ Identifies buckets that do not have any configured lifecycle rules for
749
+ managing object lifecycle, valuable for optimizing storage costs.
750
+
751
+ Args:
752
+ context: Operation context
753
+ region: AWS region to search (if None, searches all regions)
754
+ bucket_names: Specific bucket names to check (if None, checks all buckets)
755
+
756
+ Returns:
757
+ List of operation results with buckets without lifecycle policies
758
+ """
759
+ result = self.create_operation_result(
760
+ context, "find_buckets_without_lifecycle", "s3:bucket", "lifecycle-audit"
761
+ )
762
+
763
+ try:
764
+ console.print(f"[bold blue]🔍 Scanning for S3 buckets without lifecycle policies...[/bold blue]")
765
+
766
+ buckets_without_policy = []
767
+
768
+ # Get list of regions to search
769
+ search_regions = []
770
+ if region:
771
+ search_regions = [region]
772
+ elif bucket_names and region:
773
+ search_regions = [region]
774
+ else:
775
+ # Get all regions if not specified - use boto3 to get all regions
776
+ try:
777
+ ec2_client = self.get_client("ec2", "us-east-1") # Use us-east-1 to get all regions
778
+ regions_response = self.execute_aws_call(ec2_client, "describe_regions")
779
+ search_regions = [r["RegionName"] for r in regions_response.get("Regions", [])]
780
+ except Exception:
781
+ # Fallback to common regions
782
+ search_regions = ["us-east-1", "us-west-2", "eu-west-1", "ap-southeast-1"]
783
+
784
+ # Process each region
785
+ for reg in search_regions:
786
+ try:
787
+ s3_client = self.get_client("s3", reg)
788
+
789
+ # Get buckets to check
790
+ if bucket_names:
791
+ # Check specific buckets
792
+ buckets_to_check = bucket_names
793
+ else:
794
+ # Get all buckets in region
795
+ response = self.execute_aws_call(s3_client, "list_buckets")
796
+ buckets_to_check = [bucket["Name"] for bucket in response.get("Buckets", [])]
797
+
798
+ # Check each bucket for lifecycle policies
799
+ for bucket_name in buckets_to_check:
800
+ try:
801
+ # Get bucket location to ensure it's in the current region
802
+ try:
803
+ bucket_location = self.execute_aws_call(s3_client, "get_bucket_location", Bucket=bucket_name)
804
+ bucket_region = bucket_location.get("LocationConstraint")
805
+
806
+ # us-east-1 returns None for LocationConstraint
807
+ if bucket_region is None:
808
+ bucket_region = "us-east-1"
809
+
810
+ # Skip if bucket is not in current region (when checking all regions)
811
+ if not bucket_names and bucket_region != reg:
812
+ continue
813
+
814
+ except ClientError as e:
815
+ if e.response["Error"]["Code"] in ["NoSuchBucket", "AccessDenied"]:
816
+ continue
817
+ raise
818
+
819
+ # Check for lifecycle configuration
820
+ try:
821
+ lifecycle_response = self.execute_aws_call(
822
+ s3_client, "get_bucket_lifecycle_configuration", Bucket=bucket_name
823
+ )
824
+
825
+ # If we get here, bucket has lifecycle rules
826
+ rules = lifecycle_response.get("Rules", [])
827
+ if not rules:
828
+ # Empty rules list means no active lifecycle
829
+ buckets_without_policy.append({
830
+ "bucket_name": bucket_name,
831
+ "region": bucket_region,
832
+ "issue": "Empty lifecycle configuration"
833
+ })
834
+
835
+ except ClientError as e:
836
+ if e.response["Error"]["Code"] == "NoSuchLifecycleConfiguration":
837
+ # No lifecycle configuration found
838
+ buckets_without_policy.append({
839
+ "bucket_name": bucket_name,
840
+ "region": bucket_region,
841
+ "issue": "No lifecycle configuration"
842
+ })
843
+ else:
844
+ logger.warning(f"Could not check lifecycle for bucket {bucket_name}: {e}")
845
+
846
+ except Exception as bucket_error:
847
+ logger.warning(f"Error checking bucket {bucket_name}: {bucket_error}")
848
+ continue
849
+
850
+ except Exception as region_error:
851
+ logger.warning(f"Error processing region {reg}: {region_error}")
852
+ continue
853
+
854
+ # Format results with Rich console output
855
+ if buckets_without_policy:
856
+ console.print(f"[bold yellow]⚠️ Found {len(buckets_without_policy)} bucket(s) without lifecycle policies:[/bold yellow]")
857
+
858
+ from rich.table import Table
859
+ table = Table(show_header=True, header_style="bold magenta")
860
+ table.add_column("Bucket Name", style="cyan")
861
+ table.add_column("Region", style="green")
862
+ table.add_column("Issue", style="yellow")
863
+
864
+ for bucket_info in buckets_without_policy:
865
+ table.add_row(
866
+ bucket_info["bucket_name"],
867
+ bucket_info["region"],
868
+ bucket_info["issue"]
869
+ )
870
+
871
+ console.print(table)
872
+
873
+ result.response_data = {
874
+ "buckets_without_lifecycle": buckets_without_policy,
875
+ "total_count": len(buckets_without_policy),
876
+ "regions_scanned": search_regions
877
+ }
878
+ result.mark_completed(OperationStatus.SUCCESS)
879
+ else:
880
+ console.print("[bold green]✅ All buckets have lifecycle policies configured![/bold green]")
881
+ result.response_data = {
882
+ "buckets_without_lifecycle": [],
883
+ "total_count": 0,
884
+ "regions_scanned": search_regions,
885
+ "message": "All buckets have lifecycle policies"
886
+ }
887
+ result.mark_completed(OperationStatus.SUCCESS)
888
+
889
+ except Exception as e:
890
+ error_msg = f"Failed to scan for buckets without lifecycle policies: {e}"
891
+ logger.error(error_msg)
892
+ result.mark_completed(OperationStatus.FAILED, error_msg)
893
+
894
+ return [result]
895
+
896
+ def add_lifecycle_policy_bulk(
897
+ self,
898
+ context: OperationContext,
899
+ bucket_list: List[Dict[str, str]],
900
+ expiration_days: int = 30,
901
+ prefix: str = "",
902
+ noncurrent_days: int = 30,
903
+ transition_ia_days: Optional[int] = None,
904
+ transition_glacier_days: Optional[int] = None,
905
+ ) -> List[OperationResult]:
906
+ """
907
+ Add lifecycle policies to multiple S3 buckets in bulk.
908
+
909
+ Enhanced from unSkript notebook: AWS_Add_Lifecycle_Policy_To_S3_Buckets.ipynb
910
+ Applies optimized lifecycle configuration for cost management.
911
+
912
+ Args:
913
+ context: Operation context
914
+ bucket_list: List of dictionaries with 'bucket_name' and 'region' keys
915
+ expiration_days: Days after which objects expire
916
+ prefix: Object prefix filter for lifecycle rule
917
+ noncurrent_days: Days before noncurrent versions are deleted
918
+ transition_ia_days: Days before transition to IA storage class
919
+ transition_glacier_days: Days before transition to Glacier
920
+
921
+ Returns:
922
+ List of operation results for each bucket processed
923
+ """
924
+ results = []
925
+
926
+ console.print(f"[bold blue]📋 Adding lifecycle policies to {len(bucket_list)} bucket(s)...[/bold blue]")
927
+
928
+ if context.dry_run:
929
+ console.print("[yellow]🧪 DRY-RUN MODE: No actual changes will be made[/yellow]")
930
+
931
+ for i, bucket_info in enumerate(bucket_list, 1):
932
+ bucket_name = bucket_info.get("bucket_name")
933
+ bucket_region = bucket_info.get("region")
934
+
935
+ if not bucket_name or not bucket_region:
936
+ logger.error(f"Invalid bucket info: {bucket_info}")
937
+ continue
938
+
939
+ console.print(f"[cyan]({i}/{len(bucket_list)}) Processing bucket: {bucket_name}[/cyan]")
940
+
941
+ result = self.create_operation_result(
942
+ context, "add_lifecycle_policy", "s3:bucket", bucket_name
943
+ )
944
+
945
+ try:
946
+ s3_client = self.get_client("s3", bucket_region)
947
+
948
+ # Build lifecycle configuration
949
+ lifecycle_rules = []
950
+
951
+ # Main lifecycle rule
952
+ rule = {
953
+ "ID": f"lifecycle-rule-{int(datetime.now().timestamp())}",
954
+ "Status": "Enabled",
955
+ "Filter": {"Prefix": prefix} if prefix else {},
956
+ "Expiration": {"Days": expiration_days},
957
+ }
958
+
959
+ # Add noncurrent version expiration
960
+ if noncurrent_days:
961
+ rule["NoncurrentVersionExpiration"] = {"NoncurrentDays": noncurrent_days}
962
+
963
+ # Add storage class transitions
964
+ transitions = []
965
+ if transition_ia_days and transition_ia_days < expiration_days:
966
+ transitions.append({
967
+ "Days": transition_ia_days,
968
+ "StorageClass": "STANDARD_IA"
969
+ })
970
+
971
+ if transition_glacier_days and transition_glacier_days < expiration_days:
972
+ transitions.append({
973
+ "Days": transition_glacier_days,
974
+ "StorageClass": "GLACIER"
975
+ })
976
+
977
+ if transitions:
978
+ rule["Transitions"] = transitions
979
+
980
+ lifecycle_rules.append(rule)
981
+
982
+ lifecycle_config = {"Rules": lifecycle_rules}
983
+
984
+ if context.dry_run:
985
+ console.print(f"[yellow] [DRY-RUN] Would apply lifecycle policy to {bucket_name}[/yellow]")
986
+ result.response_data = {
987
+ "bucket_name": bucket_name,
988
+ "region": bucket_region,
989
+ "lifecycle_config": lifecycle_config,
990
+ "dry_run": True
991
+ }
992
+ result.mark_completed(OperationStatus.DRY_RUN)
993
+ else:
994
+ # Apply lifecycle configuration
995
+ response = self.execute_aws_call(
996
+ s3_client,
997
+ "put_bucket_lifecycle_configuration",
998
+ Bucket=bucket_name,
999
+ LifecycleConfiguration=lifecycle_config
1000
+ )
1001
+
1002
+ console.print(f"[green] ✅ Successfully applied lifecycle policy to {bucket_name}[/green]")
1003
+
1004
+ result.response_data = {
1005
+ "bucket_name": bucket_name,
1006
+ "region": bucket_region,
1007
+ "lifecycle_config": lifecycle_config,
1008
+ "aws_response": response
1009
+ }
1010
+ result.mark_completed(OperationStatus.SUCCESS)
1011
+
1012
+ except ClientError as e:
1013
+ error_msg = f"Failed to set lifecycle policy on {bucket_name}: {e}"
1014
+ console.print(f"[red] ❌ {error_msg}[/red]")
1015
+ logger.error(error_msg)
1016
+ result.mark_completed(OperationStatus.FAILED, error_msg)
1017
+ except Exception as e:
1018
+ error_msg = f"Unexpected error for bucket {bucket_name}: {e}"
1019
+ console.print(f"[red] ❌ {error_msg}[/red]")
1020
+ logger.error(error_msg)
1021
+ result.mark_completed(OperationStatus.FAILED, error_msg)
1022
+
1023
+ results.append(result)
1024
+
1025
+ # Summary report
1026
+ successful = len([r for r in results if r.success])
1027
+ failed = len(results) - successful
1028
+
1029
+ if context.dry_run:
1030
+ console.print(f"[yellow]🧪 DRY-RUN SUMMARY: Would process {len(results)} bucket(s)[/yellow]")
1031
+ else:
1032
+ console.print(f"[bold blue]📊 BULK OPERATION SUMMARY:[/bold blue]")
1033
+ console.print(f"[green] ✅ Successful: {successful}[/green]")
1034
+ if failed > 0:
1035
+ console.print(f"[red] ❌ Failed: {failed}[/red]")
1036
+
1037
+ return results
1038
+
1039
+ def get_bucket_lifecycle(
1040
+ self, context: OperationContext, bucket_name: str
1041
+ ) -> List[OperationResult]:
1042
+ """
1043
+ Get current lifecycle configuration for an S3 bucket.
1044
+
1045
+ Args:
1046
+ context: Operation context
1047
+ bucket_name: Name of bucket to check
1048
+
1049
+ Returns:
1050
+ List of operation results with current lifecycle configuration
1051
+ """
1052
+ s3_client = self.get_client("s3")
1053
+
1054
+ result = self.create_operation_result(
1055
+ context, "get_bucket_lifecycle", "s3:bucket", bucket_name
1056
+ )
1057
+
1058
+ try:
1059
+ console.print(f"[blue]🔍 Checking lifecycle configuration for bucket: {bucket_name}[/blue]")
1060
+
1061
+ try:
1062
+ response = self.execute_aws_call(
1063
+ s3_client, "get_bucket_lifecycle_configuration", Bucket=bucket_name
1064
+ )
1065
+
1066
+ rules = response.get("Rules", [])
1067
+ console.print(f"[green]✅ Found {len(rules)} lifecycle rule(s) for bucket {bucket_name}[/green]")
1068
+
1069
+ # Display rules in a formatted table
1070
+ if rules:
1071
+ from rich.table import Table
1072
+ table = Table(show_header=True, header_style="bold magenta")
1073
+ table.add_column("Rule ID", style="cyan")
1074
+ table.add_column("Status", style="green")
1075
+ table.add_column("Prefix", style="yellow")
1076
+ table.add_column("Expiration", style="red")
1077
+
1078
+ for rule in rules:
1079
+ rule_id = rule.get("ID", "N/A")
1080
+ status = rule.get("Status", "N/A")
1081
+
1082
+ # Handle different filter formats
1083
+ filter_info = rule.get("Filter", {})
1084
+ if isinstance(filter_info, dict):
1085
+ prefix = filter_info.get("Prefix", "")
1086
+ else:
1087
+ prefix = ""
1088
+
1089
+ expiration = rule.get("Expiration", {})
1090
+ exp_days = expiration.get("Days", "N/A")
1091
+
1092
+ table.add_row(rule_id, status, prefix or "All objects", str(exp_days))
1093
+
1094
+ console.print(table)
1095
+
1096
+ result.response_data = {
1097
+ "bucket_name": bucket_name,
1098
+ "lifecycle_rules": rules,
1099
+ "rules_count": len(rules)
1100
+ }
1101
+ result.mark_completed(OperationStatus.SUCCESS)
1102
+
1103
+ except ClientError as e:
1104
+ if e.response["Error"]["Code"] == "NoSuchLifecycleConfiguration":
1105
+ console.print(f"[yellow]⚠️ No lifecycle configuration found for bucket {bucket_name}[/yellow]")
1106
+ result.response_data = {
1107
+ "bucket_name": bucket_name,
1108
+ "lifecycle_rules": [],
1109
+ "rules_count": 0,
1110
+ "message": "No lifecycle configuration"
1111
+ }
1112
+ result.mark_completed(OperationStatus.SUCCESS)
1113
+ else:
1114
+ raise e
1115
+
1116
+ except ClientError as e:
1117
+ error_msg = f"Failed to get lifecycle configuration for {bucket_name}: {e}"
1118
+ logger.error(error_msg)
1119
+ result.mark_completed(OperationStatus.FAILED, error_msg)
1120
+
1121
+ return [result]
1122
+
1123
+ def analyze_lifecycle_compliance(
1124
+ self, context: OperationContext, region: Optional[str] = None
1125
+ ) -> List[OperationResult]:
1126
+ """
1127
+ Analyze lifecycle compliance across S3 buckets and provide cost optimization recommendations.
1128
+
1129
+ Args:
1130
+ context: Operation context
1131
+ region: AWS region to analyze (if None, analyzes all regions)
1132
+
1133
+ Returns:
1134
+ List of operation results with compliance analysis and recommendations
1135
+ """
1136
+ result = self.create_operation_result(
1137
+ context, "analyze_lifecycle_compliance", "s3:account", "compliance-analysis"
1138
+ )
1139
+
1140
+ try:
1141
+ console.print("[bold blue]📊 Analyzing S3 lifecycle compliance and cost optimization opportunities...[/bold blue]")
1142
+
1143
+ # Find buckets without lifecycle policies
1144
+ find_results = self.find_buckets_without_lifecycle(context, region=region)
1145
+
1146
+ if not find_results or not find_results[0].success:
1147
+ result.mark_completed(OperationStatus.FAILED, "Failed to analyze buckets")
1148
+ return [result]
1149
+
1150
+ buckets_data = find_results[0].response_data
1151
+ non_compliant_buckets = buckets_data.get("buckets_without_lifecycle", [])
1152
+ total_buckets_scanned = buckets_data.get("total_count", 0)
1153
+
1154
+ # Calculate compliance metrics
1155
+ s3_client = self.get_client("s3")
1156
+
1157
+ # Get total bucket count for compliance percentage
1158
+ list_response = self.execute_aws_call(s3_client, "list_buckets")
1159
+ total_buckets = len(list_response.get("Buckets", []))
1160
+
1161
+ compliance_percentage = ((total_buckets - len(non_compliant_buckets)) / total_buckets * 100) if total_buckets > 0 else 100
1162
+
1163
+ # Generate recommendations
1164
+ recommendations = []
1165
+ potential_savings = 0
1166
+
1167
+ if non_compliant_buckets:
1168
+ recommendations.extend([
1169
+ "🎯 Implement lifecycle policies to automatically transition objects to cheaper storage classes",
1170
+ "💰 Configure automatic deletion of old object versions to reduce storage costs",
1171
+ "📈 Set up transitions: Standard → IA (30 days) → Glacier (90 days) → Deep Archive (365 days)",
1172
+ "🔧 Use prefixes to apply different policies to different object types",
1173
+ "📊 Monitor lifecycle rule effectiveness with CloudWatch metrics"
1174
+ ])
1175
+
1176
+ # Estimate potential savings (rough calculation)
1177
+ estimated_savings_per_bucket = 25 # Estimated 25% savings per bucket
1178
+ potential_savings = len(non_compliant_buckets) * estimated_savings_per_bucket
1179
+
1180
+ # Create summary report
1181
+ from rich.panel import Panel
1182
+ from rich.table import Table
1183
+
1184
+ # Compliance summary table
1185
+ summary_table = Table(show_header=True, header_style="bold magenta")
1186
+ summary_table.add_column("Metric", style="cyan")
1187
+ summary_table.add_column("Value", style="green")
1188
+
1189
+ summary_table.add_row("Total Buckets", str(total_buckets))
1190
+ summary_table.add_row("Compliant Buckets", str(total_buckets - len(non_compliant_buckets)))
1191
+ summary_table.add_row("Non-Compliant Buckets", str(len(non_compliant_buckets)))
1192
+ summary_table.add_row("Compliance Percentage", f"{compliance_percentage:.1f}%")
1193
+ summary_table.add_row("Potential Cost Savings", f"~{potential_savings}%")
1194
+
1195
+ console.print(Panel(summary_table, title="S3 Lifecycle Compliance Report", border_style="blue"))
1196
+
1197
+ # Display recommendations if any
1198
+ if recommendations:
1199
+ console.print("\n[bold yellow]💡 Cost Optimization Recommendations:[/bold yellow]")
1200
+ for i, rec in enumerate(recommendations, 1):
1201
+ console.print(f" {i}. {rec}")
1202
+
1203
+ # Compliance status color coding
1204
+ if compliance_percentage >= 90:
1205
+ compliance_status = "[bold green]EXCELLENT[/bold green]"
1206
+ elif compliance_percentage >= 75:
1207
+ compliance_status = "[bold yellow]GOOD[/bold yellow]"
1208
+ elif compliance_percentage >= 50:
1209
+ compliance_status = "[bold orange]NEEDS IMPROVEMENT[/bold orange]"
1210
+ else:
1211
+ compliance_status = "[bold red]POOR[/bold red]"
1212
+
1213
+ console.print(f"\n[bold blue]Overall Compliance Status: {compliance_status}[/bold blue]")
1214
+
1215
+ result.response_data = {
1216
+ "compliance_percentage": compliance_percentage,
1217
+ "total_buckets": total_buckets,
1218
+ "compliant_buckets": total_buckets - len(non_compliant_buckets),
1219
+ "non_compliant_buckets": len(non_compliant_buckets),
1220
+ "non_compliant_details": non_compliant_buckets,
1221
+ "recommendations": recommendations,
1222
+ "potential_savings_percentage": potential_savings,
1223
+ "compliance_status": compliance_status.replace("[bold ", "").replace("[/bold ", "").replace("]", "")
1224
+ }
1225
+ result.mark_completed(OperationStatus.SUCCESS)
1226
+
1227
+ except Exception as e:
1228
+ error_msg = f"Failed to analyze lifecycle compliance: {e}"
1229
+ logger.error(error_msg)
1230
+ result.mark_completed(OperationStatus.FAILED, error_msg)
1231
+
1232
+ return [result]
1233
+
726
1234
  def delete_bucket_and_objects(self, context: OperationContext, bucket_name: str) -> List[OperationResult]:
727
1235
  """
728
1236
  Delete S3 bucket and all its objects/versions (complete cleanup).
@@ -79,6 +79,7 @@ from botocore.exceptions import BotoCoreError, ClientError
79
79
  from loguru import logger
80
80
  from pydantic import BaseModel, Field
81
81
 
82
+ from runbooks.common.profile_utils import create_management_session
82
83
  from runbooks.inventory.models.account import AWSAccount
83
84
 
84
85
 
@@ -385,13 +386,14 @@ class BaseRemediation(ABC):
385
386
 
386
387
  @property
387
388
  def session(self) -> boto3.Session:
388
- """Get or create AWS session with profile configuration."""
389
+ """Get or create AWS session with profile configuration using enterprise profile management."""
389
390
  if self._session is None:
390
391
  try:
391
- self._session = boto3.Session(profile_name=self.profile, region_name=self.region)
392
+ # Use management profile for remediation operations requiring cross-account access
393
+ self._session = create_management_session(profile=self.profile)
392
394
  except Exception as e:
393
395
  logger.warning(f"Failed to create session with profile {self.profile}: {e}")
394
- self._session = boto3.Session(region_name=self.region)
396
+ self._session = create_management_session() # Use default profile
395
397
  return self._session
396
398
 
397
399
  def get_client(self, service_name: str, region: str = None) -> Any:
@@ -136,6 +136,63 @@ from .enterprise_security_framework import (
136
136
  SecuritySeverity,
137
137
  )
138
138
 
139
+ # CloudOps-Automation Security Validation
140
+ from .cloudops_automation_security_validator import (
141
+ CloudOpsAutomationSecurityValidator,
142
+ CloudOpsSecurityComponent,
143
+ CloudOpsSecurityLevel,
144
+ ComplianceFrameworkEngine,
145
+ MCPSecurityIntegration,
146
+ MultiAccountSecurityController,
147
+ MultiAccountSecurityValidation,
148
+ RealTimeSecurityValidator,
149
+ ValidationCategory,
150
+ )
151
+
152
+ # Real-time Security Monitoring
153
+ from .real_time_security_monitor import (
154
+ AutomatedResponseEngine,
155
+ MCPSecurityConnector,
156
+ RealTimeSecurityMonitor,
157
+ SecurityDashboard,
158
+ SecurityEvent,
159
+ SecurityEventProcessor,
160
+ SecurityEventType,
161
+ ThreatDetectionEngine,
162
+ ThreatLevel,
163
+ )
164
+
165
+ # Multi-Account Security Controls
166
+ from .multi_account_security_controls import (
167
+ AccountSecurityProfile,
168
+ ControlStatus,
169
+ DeploymentStrategy,
170
+ MultiAccountDeploymentTracker,
171
+ MultiAccountSecurityController,
172
+ MultiAccountSecurityReport,
173
+ SecurityControl,
174
+ SecurityControlType,
175
+ )
176
+
177
+ # Executive Security Dashboard
178
+ from .executive_security_dashboard import (
179
+ BusinessImpactCategory,
180
+ ComplianceFrameworkStatus,
181
+ ComplianceStatusAnalyzer,
182
+ ExecutiveMetricsCollector,
183
+ ExecutiveReportGenerator,
184
+ ExecutiveSecurityDashboard,
185
+ ExecutiveSecurityMetric,
186
+ ExecutiveSecurityReport,
187
+ IndustryBenchmarkAnalyzer,
188
+ RiskAppetite,
189
+ SecurityIncidentExecutiveSummary,
190
+ SecurityInvestmentROI,
191
+ SecurityMaturityLevel,
192
+ SecurityROICalculator,
193
+ SecurityVisualizationEngine,
194
+ )
195
+
139
196
  # Cross-module security integration
140
197
  from .module_security_integrator import (
141
198
  CFATSecurityValidator,
@@ -185,6 +242,50 @@ __all__ = [
185
242
  "ComplianceAssessment",
186
243
  "ComplianceReport",
187
244
  "ComplianceMonitor",
245
+ # CloudOps-Automation Security Validation
246
+ "CloudOpsAutomationSecurityValidator",
247
+ "CloudOpsSecurityComponent",
248
+ "CloudOpsSecurityLevel",
249
+ "ComplianceFrameworkEngine",
250
+ "MCPSecurityIntegration",
251
+ "MultiAccountSecurityValidation",
252
+ "RealTimeSecurityValidator",
253
+ "ValidationCategory",
254
+ # Real-time Security Monitoring
255
+ "AutomatedResponseEngine",
256
+ "MCPSecurityConnector",
257
+ "RealTimeSecurityMonitor",
258
+ "SecurityDashboard",
259
+ "SecurityEvent",
260
+ "SecurityEventProcessor",
261
+ "SecurityEventType",
262
+ "ThreatDetectionEngine",
263
+ "ThreatLevel",
264
+ # Multi-Account Security Controls
265
+ "AccountSecurityProfile",
266
+ "ControlStatus",
267
+ "DeploymentStrategy",
268
+ "MultiAccountDeploymentTracker",
269
+ "MultiAccountSecurityController",
270
+ "MultiAccountSecurityReport",
271
+ "SecurityControl",
272
+ "SecurityControlType",
273
+ # Executive Security Dashboard
274
+ "BusinessImpactCategory",
275
+ "ComplianceFrameworkStatus",
276
+ "ComplianceStatusAnalyzer",
277
+ "ExecutiveMetricsCollector",
278
+ "ExecutiveReportGenerator",
279
+ "ExecutiveSecurityDashboard",
280
+ "ExecutiveSecurityMetric",
281
+ "ExecutiveSecurityReport",
282
+ "IndustryBenchmarkAnalyzer",
283
+ "RiskAppetite",
284
+ "SecurityIncidentExecutiveSummary",
285
+ "SecurityInvestmentROI",
286
+ "SecurityMaturityLevel",
287
+ "SecurityROICalculator",
288
+ "SecurityVisualizationEngine",
188
289
  # Cross-module security integration
189
290
  "ModuleSecurityIntegrator",
190
291
  "InventorySecurityValidator",