runbooks 0.9.2__py3-none-any.whl → 0.9.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. runbooks/__init__.py +15 -6
  2. runbooks/cfat/__init__.py +3 -1
  3. runbooks/cloudops/__init__.py +3 -1
  4. runbooks/common/aws_utils.py +367 -0
  5. runbooks/common/enhanced_logging_example.py +239 -0
  6. runbooks/common/enhanced_logging_integration_example.py +257 -0
  7. runbooks/common/logging_integration_helper.py +344 -0
  8. runbooks/common/profile_utils.py +8 -6
  9. runbooks/common/rich_utils.py +347 -3
  10. runbooks/enterprise/logging.py +400 -38
  11. runbooks/finops/README.md +262 -406
  12. runbooks/finops/__init__.py +2 -1
  13. runbooks/finops/accuracy_cross_validator.py +12 -3
  14. runbooks/finops/commvault_ec2_analysis.py +415 -0
  15. runbooks/finops/cost_processor.py +718 -42
  16. runbooks/finops/dashboard_router.py +44 -22
  17. runbooks/finops/dashboard_runner.py +302 -39
  18. runbooks/finops/embedded_mcp_validator.py +358 -48
  19. runbooks/finops/finops_scenarios.py +771 -0
  20. runbooks/finops/multi_dashboard.py +30 -15
  21. runbooks/finops/single_dashboard.py +386 -58
  22. runbooks/finops/types.py +29 -4
  23. runbooks/inventory/__init__.py +2 -1
  24. runbooks/main.py +522 -29
  25. runbooks/operate/__init__.py +3 -1
  26. runbooks/remediation/__init__.py +3 -1
  27. runbooks/remediation/commons.py +55 -16
  28. runbooks/remediation/commvault_ec2_analysis.py +259 -0
  29. runbooks/remediation/rds_snapshot_list.py +267 -102
  30. runbooks/remediation/workspaces_list.py +182 -31
  31. runbooks/security/__init__.py +3 -1
  32. runbooks/sre/__init__.py +2 -1
  33. runbooks/utils/__init__.py +81 -6
  34. runbooks/utils/version_validator.py +241 -0
  35. runbooks/vpc/__init__.py +2 -1
  36. {runbooks-0.9.2.dist-info → runbooks-0.9.4.dist-info}/METADATA +98 -60
  37. {runbooks-0.9.2.dist-info → runbooks-0.9.4.dist-info}/RECORD +41 -38
  38. {runbooks-0.9.2.dist-info → runbooks-0.9.4.dist-info}/entry_points.txt +1 -0
  39. runbooks/inventory/cloudtrail.md +0 -727
  40. runbooks/inventory/discovery.md +0 -81
  41. runbooks/remediation/CLAUDE.md +0 -100
  42. runbooks/remediation/DOME9.md +0 -218
  43. runbooks/security/ENTERPRISE_SECURITY_FRAMEWORK.md +0 -506
  44. {runbooks-0.9.2.dist-info → runbooks-0.9.4.dist-info}/WHEEL +0 -0
  45. {runbooks-0.9.2.dist-info → runbooks-0.9.4.dist-info}/licenses/LICENSE +0 -0
  46. {runbooks-0.9.2.dist-info → runbooks-0.9.4.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,257 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Enhanced Multi-Level Logging Integration Example
4
+
5
+ This example demonstrates how modules can integrate with the enhanced
6
+ multi-level logging architecture for user-type specific content.
7
+
8
+ ## Usage Examples by Log Level
9
+
10
+ ### Tech Users (DEBUG level)
11
+ ```bash
12
+ runbooks finops --log-level DEBUG --profile my-profile
13
+ ```
14
+
15
+ ### Standard Users (INFO level - default)
16
+ ```bash
17
+ runbooks finops --profile my-profile
18
+ ```
19
+
20
+ ### Business Users (WARNING level)
21
+ ```bash
22
+ runbooks finops --log-level WARNING --profile my-profile
23
+ ```
24
+
25
+ ### Error Focus (ERROR level)
26
+ ```bash
27
+ runbooks finops --log-level ERROR --profile my-profile
28
+ ```
29
+
30
+ Author: CloudOps Runbooks Team
31
+ """
32
+
33
+ import time
34
+ from typing import Optional, Dict, Any, List
35
+
36
+ try:
37
+ import boto3
38
+ except ImportError:
39
+ boto3 = None
40
+
41
+ from runbooks.enterprise.logging import get_module_logger
42
+
43
+
44
+ class EnhancedLoggingIntegrationExample:
45
+ """Example class demonstrating enhanced logging integration patterns."""
46
+
47
+ def __init__(self, module_name: str = "example", log_level: str = "INFO", json_output: bool = False):
48
+ """
49
+ Initialize with enhanced logging.
50
+
51
+ Args:
52
+ module_name: Name of the module
53
+ log_level: Logging level (DEBUG, INFO, WARNING, ERROR)
54
+ json_output: Enable JSON output for programmatic use
55
+ """
56
+ self.logger = get_module_logger(module_name, level=log_level, json_output=json_output)
57
+ self.log_level = log_level.upper()
58
+
59
+ def demonstrate_aws_operation_logging(self):
60
+ """Demonstrate AWS operation logging with different levels."""
61
+ print(f"\n📊 AWS OPERATION LOGGING EXAMPLES ({self.log_level} level)")
62
+ print("=" * 60)
63
+
64
+ # Simulate AWS operations with different outcomes
65
+ operations = [
66
+ {"service": "cost-explorer", "operation": "get_cost_and_usage", "duration": 0.8, "success": True, "resource_count": 25},
67
+ {"service": "ec2", "operation": "describe_instances", "duration": 1.2, "success": True, "resource_count": 10},
68
+ {"service": "s3", "operation": "list_buckets", "duration": 0.3, "success": True, "resource_count": 5},
69
+ {"service": "iam", "operation": "get_account_summary", "duration": 15.2, "success": False, "error": "AccessDenied: Insufficient permissions"}
70
+ ]
71
+
72
+ for op in operations:
73
+ self.logger.log_aws_operation(
74
+ operation=op["operation"],
75
+ service=op["service"],
76
+ duration=op["duration"],
77
+ success=op["success"],
78
+ resource_count=op.get("resource_count"),
79
+ error=op.get("error"),
80
+ request_id=f"req-{int(time.time())}-{hash(op['service']) % 10000}"
81
+ )
82
+ time.sleep(0.1) # Brief pause for demonstration
83
+
84
+ def demonstrate_cost_analysis_logging(self):
85
+ """Demonstrate cost analysis logging with business focus."""
86
+ print(f"\n💰 COST ANALYSIS LOGGING EXAMPLES ({self.log_level} level)")
87
+ print("=" * 60)
88
+
89
+ cost_scenarios = [
90
+ {
91
+ "operation": "monthly_ec2_spend_analysis",
92
+ "cost_impact": 2500.0,
93
+ "savings_opportunity": 750.0,
94
+ "recommendation": "Consider Reserved Instances for consistent workloads"
95
+ },
96
+ {
97
+ "operation": "s3_storage_optimization",
98
+ "cost_impact": 150.0,
99
+ "savings_opportunity": 45.0,
100
+ "recommendation": "Implement lifecycle policies for infrequent access data"
101
+ },
102
+ {
103
+ "operation": "unused_eip_analysis",
104
+ "cost_impact": 50.0,
105
+ "savings_opportunity": 50.0,
106
+ "recommendation": "Release 10 unused Elastic IPs immediately"
107
+ }
108
+ ]
109
+
110
+ for scenario in cost_scenarios:
111
+ self.logger.log_cost_analysis(**scenario)
112
+ time.sleep(0.1)
113
+
114
+ def demonstrate_performance_logging(self):
115
+ """Demonstrate performance metric logging."""
116
+ print(f"\n⚡ PERFORMANCE LOGGING EXAMPLES ({self.log_level} level)")
117
+ print("=" * 60)
118
+
119
+ performance_scenarios = [
120
+ {"operation": "inventory_collection", "duration": 2.1, "threshold": 5.0, "memory_usage": 52428800}, # Fast operation
121
+ {"operation": "large_cost_analysis", "duration": 8.5, "threshold": 5.0, "memory_usage": 104857600}, # Slow operation
122
+ {"operation": "security_scan", "duration": 0.8, "threshold": 2.0, "memory_usage": 26214400} # Quick scan
123
+ ]
124
+
125
+ for scenario in performance_scenarios:
126
+ self.logger.log_performance_metric(**scenario)
127
+ time.sleep(0.1)
128
+
129
+ def demonstrate_security_logging(self):
130
+ """Demonstrate security finding logging."""
131
+ print(f"\n🔒 SECURITY LOGGING EXAMPLES ({self.log_level} level)")
132
+ print("=" * 60)
133
+
134
+ security_findings = [
135
+ {
136
+ "finding": "S3 bucket with public read access detected",
137
+ "severity": "high",
138
+ "remediation_steps": [
139
+ "Review bucket policy for public access",
140
+ "Remove public read permissions if not required",
141
+ "Enable bucket logging for audit trail"
142
+ ]
143
+ },
144
+ {
145
+ "finding": "IAM user without MFA enabled",
146
+ "severity": "medium",
147
+ "remediation_steps": [
148
+ "Enable MFA for the affected user",
149
+ "Review IAM policies for excessive permissions",
150
+ "Consider using IAM roles instead of users"
151
+ ]
152
+ },
153
+ {
154
+ "finding": "Security group with overly permissive rules",
155
+ "severity": "low",
156
+ "remediation_steps": ["Review and tighten security group rules"]
157
+ }
158
+ ]
159
+
160
+ for finding in security_findings:
161
+ self.logger.log_security_finding(**finding)
162
+ time.sleep(0.1)
163
+
164
+ def demonstrate_operation_context(self):
165
+ """Demonstrate operation context logging."""
166
+ print(f"\n🔄 OPERATION CONTEXT EXAMPLES ({self.log_level} level)")
167
+ print("=" * 60)
168
+
169
+ # Successful operation
170
+ with self.logger.operation_context("cost_dashboard_generation", account_count=5, region_count=3):
171
+ time.sleep(1.0) # Simulate work
172
+ self.logger.info_standard("Generated cost dashboard", resource_count=25)
173
+
174
+ time.sleep(0.2)
175
+
176
+ # Failed operation (simulated)
177
+ try:
178
+ with self.logger.operation_context("unauthorized_operation", api_call="iam:GetAccountSummary"):
179
+ time.sleep(0.5)
180
+ raise PermissionError("Access denied: Insufficient IAM permissions for operation")
181
+ except PermissionError:
182
+ pass # Expected for demonstration
183
+
184
+ def demonstrate_json_output(self):
185
+ """Demonstrate JSON output for programmatic use."""
186
+ print(f"\n📋 JSON OUTPUT EXAMPLE ({self.log_level} level)")
187
+ print("=" * 60)
188
+
189
+ # Create a JSON logger
190
+ json_logger = get_module_logger("example_json", level=self.log_level, json_output=True)
191
+
192
+ json_logger.info_standard("JSON output demonstration", resource_count=42, operation_status="completed")
193
+ json_logger.log_cost_analysis(
194
+ "json_cost_analysis",
195
+ cost_impact=1200.0,
196
+ savings_opportunity=360.0,
197
+ recommendation="Optimize resource allocation for cost efficiency"
198
+ )
199
+
200
+ def run_all_demonstrations(self):
201
+ """Run all logging demonstrations."""
202
+ print(f"\n🎯 ENHANCED MULTI-LEVEL LOGGING DEMONSTRATION")
203
+ print(f"Current Log Level: {self.log_level}")
204
+ print(f"User Type Focus: {self._get_user_type_description()}")
205
+ print("=" * 80)
206
+
207
+ self.demonstrate_aws_operation_logging()
208
+ self.demonstrate_cost_analysis_logging()
209
+ self.demonstrate_performance_logging()
210
+ self.demonstrate_security_logging()
211
+ self.demonstrate_operation_context()
212
+
213
+ if not hasattr(self.logger, 'json_output') or not self.logger.json_output:
214
+ self.demonstrate_json_output()
215
+
216
+ def _get_user_type_description(self) -> str:
217
+ """Get user type description for current log level."""
218
+ descriptions = {
219
+ "DEBUG": "Tech Users (SRE/DevOps) - Full technical details, API traces, performance metrics",
220
+ "INFO": "Standard Users - Clean operation status, progress indicators, business-friendly output",
221
+ "WARNING": "Business Users - Cost insights, recommendations, optimization opportunities",
222
+ "ERROR": "All Users - Clear error messages with solutions and troubleshooting steps"
223
+ }
224
+ return descriptions.get(self.log_level, "Unknown user type")
225
+
226
+
227
+ def main():
228
+ """Main demonstration function."""
229
+ print("🚀 ENHANCED MULTI-LEVEL LOGGING ARCHITECTURE DEMONSTRATION")
230
+ print("=" * 80)
231
+ print("This demonstration shows how logging adapts content based on user type:")
232
+ print("• DEBUG Level: Technical users (SRE/DevOps)")
233
+ print("• INFO Level: Standard users (default)")
234
+ print("• WARNING Level: Business users")
235
+ print("• ERROR Level: All users (minimal output)")
236
+ print("=" * 80)
237
+
238
+ # Test all log levels
239
+ log_levels = ["DEBUG", "INFO", "WARNING", "ERROR"]
240
+
241
+ for level in log_levels:
242
+ demo = EnhancedLoggingIntegrationExample("enhanced_logging_demo", level)
243
+ demo.run_all_demonstrations()
244
+
245
+ if level != "ERROR": # Add separator except for last level
246
+ print(f"\n{'=' * 80}\n")
247
+
248
+ print("\n✅ DEMONSTRATION COMPLETE")
249
+ print("\nTo use enhanced logging in your module:")
250
+ print("1. from runbooks.enterprise.logging import get_module_logger")
251
+ print("2. logger = get_module_logger('your_module_name')")
252
+ print("3. Use logger.info_standard(), logger.debug_tech(), logger.warning_business(), etc.")
253
+ print("4. Use logger.log_aws_operation(), logger.log_cost_analysis() for convenience")
254
+
255
+
256
+ if __name__ == "__main__":
257
+ main()
@@ -0,0 +1,344 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Logging Integration Helper
4
+
5
+ This module provides helper functions to make it easy for existing modules
6
+ to upgrade to the enhanced multi-level logging architecture.
7
+
8
+ Author: CloudOps Runbooks Team
9
+ """
10
+
11
+ from typing import Optional, Dict, Any, Callable
12
+ from functools import wraps
13
+ import time
14
+ import boto3
15
+ from botocore.exceptions import ClientError, BotoCoreError
16
+ from runbooks.enterprise.logging import get_module_logger, EnterpriseRichLogger
17
+
18
+
19
+ def create_enhanced_module_logger(module_name: str, log_level: Optional[str] = None,
20
+ json_output: bool = False) -> EnterpriseRichLogger:
21
+ """
22
+ Create an enhanced logger for a module with automatic level detection.
23
+
24
+ Args:
25
+ module_name: Name of the module (e.g., 'finops', 'inventory')
26
+ log_level: Override log level, or None to use CLI/environment setting
27
+ json_output: Enable JSON output for programmatic use
28
+
29
+ Returns:
30
+ Configured enhanced logger
31
+ """
32
+ # Try to detect log level from CLI context if not provided
33
+ if log_level is None:
34
+ import os
35
+ import sys
36
+
37
+ # Check if we're in a CLI context
38
+ log_level = os.getenv('RUNBOOKS_LOG_LEVEL', 'INFO')
39
+
40
+ # Check command line arguments for log level
41
+ for arg in sys.argv:
42
+ if arg.startswith('--log-level'):
43
+ if '=' in arg:
44
+ log_level = arg.split('=')[1].upper()
45
+ else:
46
+ # Next argument should be the level
47
+ try:
48
+ idx = sys.argv.index(arg)
49
+ if idx + 1 < len(sys.argv):
50
+ log_level = sys.argv[idx + 1].upper()
51
+ except (ValueError, IndexError):
52
+ pass
53
+ break
54
+
55
+ return get_module_logger(module_name, level=log_level or 'INFO', json_output=json_output)
56
+
57
+
58
+ def log_aws_operation(func: Callable) -> Callable:
59
+ """
60
+ Decorator to automatically log AWS operations with enhanced context.
61
+
62
+ Usage:
63
+ @log_aws_operation
64
+ def describe_instances(session, region):
65
+ client = session.client('ec2', region_name=region)
66
+ return client.describe_instances()
67
+ """
68
+ @wraps(func)
69
+ def wrapper(*args, **kwargs):
70
+ # Try to extract module name from function
71
+ module_name = func.__module__.split('.')[-1] if hasattr(func, '__module__') else 'unknown'
72
+ logger = create_enhanced_module_logger(module_name)
73
+
74
+ # Extract operation details from function name and arguments
75
+ operation_name = func.__name__
76
+ service_name = 'aws' # Default, could be improved by parsing function or args
77
+
78
+ start_time = time.time()
79
+ success = True
80
+ result = None
81
+ error_details = None
82
+
83
+ try:
84
+ result = func(*args, **kwargs)
85
+ return result
86
+ except (ClientError, BotoCoreError) as e:
87
+ success = False
88
+ error_details = str(e)
89
+ logger.error_all(
90
+ f"AWS operation failed: {operation_name}",
91
+ solution=f"Check AWS permissions and service availability",
92
+ aws_error=error_details,
93
+ suggested_command=f"aws {service_name} {operation_name.replace('_', '-')} --help"
94
+ )
95
+ raise
96
+ except Exception as e:
97
+ success = False
98
+ error_details = str(e)
99
+ logger.error_all(
100
+ f"Operation failed: {operation_name}",
101
+ solution="Check the error details above and verify inputs",
102
+ aws_error=error_details
103
+ )
104
+ raise
105
+ finally:
106
+ duration = time.time() - start_time
107
+ if success:
108
+ resource_count = None
109
+ # Try to extract resource count from result
110
+ if isinstance(result, dict):
111
+ if 'Reservations' in result:
112
+ resource_count = sum(len(r.get('Instances', [])) for r in result['Reservations'])
113
+ elif 'Buckets' in result:
114
+ resource_count = len(result['Buckets'])
115
+ elif isinstance(result.get('ResponseMetadata'), dict):
116
+ # It's an AWS response, try to count items
117
+ for key, value in result.items():
118
+ if isinstance(value, list) and key != 'ResponseMetadata':
119
+ resource_count = len(value)
120
+ break
121
+
122
+ logger.log_aws_operation(
123
+ operation=operation_name,
124
+ service=service_name,
125
+ duration=duration,
126
+ success=True,
127
+ resource_count=resource_count
128
+ )
129
+
130
+ return wrapper
131
+
132
+
133
+ def log_cost_operation(func: Callable) -> Callable:
134
+ """
135
+ Decorator to automatically log cost-related operations.
136
+
137
+ Usage:
138
+ @log_cost_operation
139
+ def analyze_monthly_costs(cost_data):
140
+ # ... cost analysis logic ...
141
+ return {"total_cost": 1500.0, "savings": 300.0}
142
+ """
143
+ @wraps(func)
144
+ def wrapper(*args, **kwargs):
145
+ module_name = func.__module__.split('.')[-1] if hasattr(func, '__module__') else 'finops'
146
+ logger = create_enhanced_module_logger(module_name)
147
+
148
+ operation_name = func.__name__
149
+ start_time = time.time()
150
+
151
+ try:
152
+ result = func(*args, **kwargs)
153
+ duration = time.time() - start_time
154
+
155
+ # Extract cost information from result
156
+ cost_impact = None
157
+ savings_opportunity = None
158
+
159
+ if isinstance(result, dict):
160
+ cost_impact = result.get('total_cost') or result.get('cost_impact')
161
+ savings_opportunity = result.get('savings') or result.get('savings_opportunity')
162
+
163
+ logger.log_cost_analysis(
164
+ operation=operation_name,
165
+ cost_impact=cost_impact,
166
+ savings_opportunity=savings_opportunity
167
+ )
168
+
169
+ # Also log performance if it's slow
170
+ logger.log_performance_metric(operation_name, duration)
171
+
172
+ return result
173
+
174
+ except Exception as e:
175
+ logger.error_all(
176
+ f"Cost analysis failed: {operation_name}",
177
+ solution="Check input data format and AWS permissions",
178
+ aws_error=str(e)
179
+ )
180
+ raise
181
+
182
+ return wrapper
183
+
184
+
185
+ def log_security_operation(func: Callable) -> Callable:
186
+ """
187
+ Decorator to automatically log security operations.
188
+
189
+ Usage:
190
+ @log_security_operation
191
+ def scan_s3_permissions(bucket_name):
192
+ # ... security scan logic ...
193
+ return {"findings": [...], "severity": "medium"}
194
+ """
195
+ @wraps(func)
196
+ def wrapper(*args, **kwargs):
197
+ module_name = func.__module__.split('.')[-1] if hasattr(func, '__module__') else 'security'
198
+ logger = create_enhanced_module_logger(module_name)
199
+
200
+ operation_name = func.__name__
201
+
202
+ try:
203
+ result = func(*args, **kwargs)
204
+
205
+ # Extract security findings from result
206
+ if isinstance(result, dict) and 'findings' in result:
207
+ findings = result['findings']
208
+ severity = result.get('severity', 'medium')
209
+
210
+ if findings:
211
+ for finding in findings:
212
+ if isinstance(finding, dict):
213
+ logger.log_security_finding(
214
+ finding=finding.get('description', str(finding)),
215
+ severity=finding.get('severity', severity),
216
+ remediation_steps=finding.get('remediation_steps')
217
+ )
218
+ else:
219
+ logger.log_security_finding(str(finding), severity=severity)
220
+ else:
221
+ logger.info_standard(f"Security scan completed: {operation_name} (no findings)")
222
+
223
+ return result
224
+
225
+ except Exception as e:
226
+ logger.error_all(
227
+ f"Security operation failed: {operation_name}",
228
+ solution="Check security scan configuration and permissions",
229
+ aws_error=str(e)
230
+ )
231
+ raise
232
+
233
+ return wrapper
234
+
235
+
236
+ class LoggingMigrationHelper:
237
+ """Helper class to assist with migrating existing modules to enhanced logging."""
238
+
239
+ def __init__(self, module_name: str):
240
+ self.module_name = module_name
241
+ self.logger = create_enhanced_module_logger(module_name)
242
+
243
+ def replace_print_statements(self, message: str, level: str = "info", **kwargs):
244
+ """
245
+ Replace print statements with appropriate logging calls.
246
+
247
+ Args:
248
+ message: The message to log
249
+ level: Log level (debug, info, warning, error)
250
+ **kwargs: Additional context for enhanced logging
251
+ """
252
+ if level.lower() == "debug":
253
+ self.logger.debug_tech(message, **kwargs)
254
+ elif level.lower() == "warning":
255
+ self.logger.warning_business(message, **kwargs)
256
+ elif level.lower() == "error":
257
+ self.logger.error_all(message, **kwargs)
258
+ else:
259
+ self.logger.info_standard(message, **kwargs)
260
+
261
+ def log_operation_start(self, operation: str, **context):
262
+ """Log the start of an operation."""
263
+ return self.logger.operation_context(operation, **context)
264
+
265
+ def log_aws_call(self, service: str, operation: str, duration: float = None,
266
+ success: bool = True, **kwargs):
267
+ """Log an AWS API call."""
268
+ self.logger.log_aws_operation(
269
+ operation=operation,
270
+ service=service,
271
+ duration=duration,
272
+ success=success,
273
+ **kwargs
274
+ )
275
+
276
+ def log_cost_finding(self, operation: str, cost: float = None, savings: float = None,
277
+ recommendation: str = None):
278
+ """Log a cost-related finding."""
279
+ self.logger.log_cost_analysis(
280
+ operation=operation,
281
+ cost_impact=cost,
282
+ savings_opportunity=savings,
283
+ recommendation=recommendation
284
+ )
285
+
286
+
287
+ # Convenience functions for quick integration
288
+ def quick_log_info(module_name: str, message: str, **kwargs):
289
+ """Quick logging for info messages."""
290
+ logger = create_enhanced_module_logger(module_name)
291
+ logger.info_standard(message, **kwargs)
292
+
293
+
294
+ def quick_log_error(module_name: str, message: str, solution: str = None, **kwargs):
295
+ """Quick logging for error messages."""
296
+ logger = create_enhanced_module_logger(module_name)
297
+ logger.error_all(message, solution=solution, **kwargs)
298
+
299
+
300
+ def quick_log_warning(module_name: str, message: str, recommendation: str = None, **kwargs):
301
+ """Quick logging for warning messages."""
302
+ logger = create_enhanced_module_logger(module_name)
303
+ logger.warning_business(message, recommendation=recommendation, **kwargs)
304
+
305
+
306
+ def quick_log_debug(module_name: str, message: str, **kwargs):
307
+ """Quick logging for debug messages."""
308
+ logger = create_enhanced_module_logger(module_name)
309
+ logger.debug_tech(message, **kwargs)
310
+
311
+
312
+ # Module upgrade checklist function
313
+ def print_upgrade_checklist(module_name: str):
314
+ """Print upgrade checklist for a module."""
315
+ print(f"\n📋 ENHANCED LOGGING UPGRADE CHECKLIST FOR {module_name.upper()}")
316
+ print("=" * 60)
317
+ print("1. Replace logger imports:")
318
+ print(f" OLD: from logging import getLogger")
319
+ print(f" NEW: from runbooks.common.logging_integration_helper import create_enhanced_module_logger")
320
+ print()
321
+ print("2. Update logger initialization:")
322
+ print(f" OLD: logger = getLogger(__name__)")
323
+ print(f" NEW: logger = create_enhanced_module_logger('{module_name}')")
324
+ print()
325
+ print("3. Replace print statements:")
326
+ print(f" OLD: print('Operation completed')")
327
+ print(f" NEW: logger.info_standard('Operation completed', operation_status='completed')")
328
+ print()
329
+ print("4. Use context-aware logging methods:")
330
+ print(f" • logger.debug_tech() - for technical details")
331
+ print(f" • logger.info_standard() - for standard operations")
332
+ print(f" • logger.warning_business() - for business insights")
333
+ print(f" • logger.error_all() - for errors with solutions")
334
+ print()
335
+ print("5. Use convenience methods:")
336
+ print(f" • logger.log_aws_operation() - for AWS API calls")
337
+ print(f" • logger.log_cost_analysis() - for cost operations")
338
+ print(f" • logger.log_security_finding() - for security scans")
339
+ print()
340
+ print("6. Use operation context:")
341
+ print(f" with logger.operation_context('operation_name'):")
342
+ print(f" # ... operation code ...")
343
+ print()
344
+ print("✅ After upgrade, users can control output with --log-level DEBUG|INFO|WARNING|ERROR")
@@ -59,9 +59,10 @@ def get_profile_for_operation(operation_type: str, user_specified_profile: Optio
59
59
 
60
60
  # PRIORITY 2: Environment variables (only when no user input)
61
61
  profile_map = {
62
- "billing": os.getenv("BILLING_PROFILE"),
63
- "management": os.getenv("MANAGEMENT_PROFILE"),
64
- "operational": os.getenv("CENTRALISED_OPS_PROFILE"),
62
+ "billing": os.getenv("AWS_BILLING_PROFILE") or os.getenv("BILLING_PROFILE"),
63
+ "management": os.getenv("AWS_MANAGEMENT_PROFILE") or os.getenv("MANAGEMENT_PROFILE"),
64
+ "operational": os.getenv("AWS_CENTRALISED_OPS_PROFILE") or os.getenv("CENTRALISED_OPS_PROFILE"),
65
+ "single_account": os.getenv("AWS_SINGLE_ACCOUNT_PROFILE") or os.getenv("SINGLE_AWS_PROFILE"),
65
66
  }
66
67
 
67
68
  env_profile = profile_map.get(operation_type)
@@ -101,9 +102,10 @@ def resolve_profile_for_operation_silent(operation_type: str, user_specified_pro
101
102
 
102
103
  # PRIORITY 2: Environment variables (only when no user input)
103
104
  profile_map = {
104
- "billing": os.getenv("BILLING_PROFILE"),
105
- "management": os.getenv("MANAGEMENT_PROFILE"),
106
- "operational": os.getenv("CENTRALISED_OPS_PROFILE"),
105
+ "billing": os.getenv("AWS_BILLING_PROFILE") or os.getenv("BILLING_PROFILE"),
106
+ "management": os.getenv("AWS_MANAGEMENT_PROFILE") or os.getenv("MANAGEMENT_PROFILE"),
107
+ "operational": os.getenv("AWS_CENTRALISED_OPS_PROFILE") or os.getenv("CENTRALISED_OPS_PROFILE"),
108
+ "single_account": os.getenv("AWS_SINGLE_ACCOUNT_PROFILE") or os.getenv("SINGLE_AWS_PROFILE"),
107
109
  }
108
110
 
109
111
  env_profile = profile_map.get(operation_type)