runbooks 0.7.9__py3-none-any.whl → 0.9.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (122) hide show
  1. runbooks/__init__.py +1 -1
  2. runbooks/cfat/README.md +12 -1
  3. runbooks/cfat/__init__.py +1 -1
  4. runbooks/cfat/assessment/compliance.py +4 -1
  5. runbooks/cfat/assessment/runner.py +42 -34
  6. runbooks/cfat/models.py +1 -1
  7. runbooks/cloudops/__init__.py +123 -0
  8. runbooks/cloudops/base.py +385 -0
  9. runbooks/cloudops/cost_optimizer.py +811 -0
  10. runbooks/cloudops/infrastructure_optimizer.py +29 -0
  11. runbooks/cloudops/interfaces.py +828 -0
  12. runbooks/cloudops/lifecycle_manager.py +29 -0
  13. runbooks/cloudops/mcp_cost_validation.py +678 -0
  14. runbooks/cloudops/models.py +251 -0
  15. runbooks/cloudops/monitoring_automation.py +29 -0
  16. runbooks/cloudops/notebook_framework.py +676 -0
  17. runbooks/cloudops/security_enforcer.py +449 -0
  18. runbooks/common/__init__.py +152 -0
  19. runbooks/common/accuracy_validator.py +1039 -0
  20. runbooks/common/context_logger.py +440 -0
  21. runbooks/common/cross_module_integration.py +594 -0
  22. runbooks/common/enhanced_exception_handler.py +1108 -0
  23. runbooks/common/enterprise_audit_integration.py +634 -0
  24. runbooks/common/mcp_cost_explorer_integration.py +900 -0
  25. runbooks/common/mcp_integration.py +548 -0
  26. runbooks/common/performance_monitor.py +387 -0
  27. runbooks/common/profile_utils.py +216 -0
  28. runbooks/common/rich_utils.py +172 -1
  29. runbooks/feedback/user_feedback_collector.py +440 -0
  30. runbooks/finops/README.md +377 -458
  31. runbooks/finops/__init__.py +4 -21
  32. runbooks/finops/account_resolver.py +279 -0
  33. runbooks/finops/accuracy_cross_validator.py +638 -0
  34. runbooks/finops/aws_client.py +721 -36
  35. runbooks/finops/budget_integration.py +313 -0
  36. runbooks/finops/cli.py +59 -5
  37. runbooks/finops/cost_optimizer.py +1340 -0
  38. runbooks/finops/cost_processor.py +211 -37
  39. runbooks/finops/dashboard_router.py +900 -0
  40. runbooks/finops/dashboard_runner.py +990 -232
  41. runbooks/finops/embedded_mcp_validator.py +288 -0
  42. runbooks/finops/enhanced_dashboard_runner.py +8 -7
  43. runbooks/finops/enhanced_progress.py +327 -0
  44. runbooks/finops/enhanced_trend_visualization.py +423 -0
  45. runbooks/finops/finops_dashboard.py +184 -1829
  46. runbooks/finops/helpers.py +509 -196
  47. runbooks/finops/iam_guidance.py +400 -0
  48. runbooks/finops/markdown_exporter.py +466 -0
  49. runbooks/finops/multi_dashboard.py +1502 -0
  50. runbooks/finops/optimizer.py +15 -15
  51. runbooks/finops/profile_processor.py +2 -2
  52. runbooks/finops/runbooks.inventory.organizations_discovery.log +0 -0
  53. runbooks/finops/runbooks.security.report_generator.log +0 -0
  54. runbooks/finops/runbooks.security.run_script.log +0 -0
  55. runbooks/finops/runbooks.security.security_export.log +0 -0
  56. runbooks/finops/schemas.py +589 -0
  57. runbooks/finops/service_mapping.py +195 -0
  58. runbooks/finops/single_dashboard.py +710 -0
  59. runbooks/finops/tests/test_reference_images_validation.py +1 -1
  60. runbooks/inventory/README.md +12 -1
  61. runbooks/inventory/core/collector.py +157 -29
  62. runbooks/inventory/list_ec2_instances.py +9 -6
  63. runbooks/inventory/list_ssm_parameters.py +10 -10
  64. runbooks/inventory/organizations_discovery.py +210 -164
  65. runbooks/inventory/rich_inventory_display.py +74 -107
  66. runbooks/inventory/run_on_multi_accounts.py +13 -13
  67. runbooks/inventory/runbooks.inventory.organizations_discovery.log +0 -0
  68. runbooks/inventory/runbooks.security.security_export.log +0 -0
  69. runbooks/main.py +1371 -240
  70. runbooks/metrics/dora_metrics_engine.py +711 -17
  71. runbooks/monitoring/performance_monitor.py +433 -0
  72. runbooks/operate/README.md +394 -0
  73. runbooks/operate/base.py +215 -47
  74. runbooks/operate/ec2_operations.py +435 -5
  75. runbooks/operate/iam_operations.py +598 -3
  76. runbooks/operate/privatelink_operations.py +1 -1
  77. runbooks/operate/rds_operations.py +508 -0
  78. runbooks/operate/s3_operations.py +508 -0
  79. runbooks/operate/vpc_endpoints.py +1 -1
  80. runbooks/remediation/README.md +489 -13
  81. runbooks/remediation/base.py +5 -3
  82. runbooks/remediation/commons.py +8 -4
  83. runbooks/security/ENTERPRISE_SECURITY_FRAMEWORK.md +506 -0
  84. runbooks/security/README.md +12 -1
  85. runbooks/security/__init__.py +265 -33
  86. runbooks/security/cloudops_automation_security_validator.py +1164 -0
  87. runbooks/security/compliance_automation.py +12 -10
  88. runbooks/security/compliance_automation_engine.py +1021 -0
  89. runbooks/security/enterprise_security_framework.py +930 -0
  90. runbooks/security/enterprise_security_policies.json +293 -0
  91. runbooks/security/executive_security_dashboard.py +1247 -0
  92. runbooks/security/integration_test_enterprise_security.py +879 -0
  93. runbooks/security/module_security_integrator.py +641 -0
  94. runbooks/security/multi_account_security_controls.py +2254 -0
  95. runbooks/security/real_time_security_monitor.py +1196 -0
  96. runbooks/security/report_generator.py +1 -1
  97. runbooks/security/run_script.py +4 -8
  98. runbooks/security/security_baseline_tester.py +39 -52
  99. runbooks/security/security_export.py +99 -120
  100. runbooks/sre/README.md +472 -0
  101. runbooks/sre/__init__.py +33 -0
  102. runbooks/sre/mcp_reliability_engine.py +1049 -0
  103. runbooks/sre/performance_optimization_engine.py +1032 -0
  104. runbooks/sre/production_monitoring_framework.py +584 -0
  105. runbooks/sre/reliability_monitoring_framework.py +1011 -0
  106. runbooks/validation/__init__.py +2 -2
  107. runbooks/validation/benchmark.py +154 -149
  108. runbooks/validation/cli.py +159 -147
  109. runbooks/validation/mcp_validator.py +291 -248
  110. runbooks/vpc/README.md +478 -0
  111. runbooks/vpc/__init__.py +2 -2
  112. runbooks/vpc/manager_interface.py +366 -351
  113. runbooks/vpc/networking_wrapper.py +68 -36
  114. runbooks/vpc/rich_formatters.py +22 -8
  115. runbooks-0.9.1.dist-info/METADATA +308 -0
  116. {runbooks-0.7.9.dist-info → runbooks-0.9.1.dist-info}/RECORD +120 -59
  117. {runbooks-0.7.9.dist-info → runbooks-0.9.1.dist-info}/entry_points.txt +1 -1
  118. runbooks/finops/cross_validation.py +0 -375
  119. runbooks-0.7.9.dist-info/METADATA +0 -636
  120. {runbooks-0.7.9.dist-info → runbooks-0.9.1.dist-info}/WHEEL +0 -0
  121. {runbooks-0.7.9.dist-info → runbooks-0.9.1.dist-info}/licenses/LICENSE +0 -0
  122. {runbooks-0.7.9.dist-info → runbooks-0.9.1.dist-info}/top_level.txt +0 -0
@@ -76,6 +76,25 @@ def get_console() -> Console:
76
76
  return console
77
77
 
78
78
 
79
+ def get_context_aware_console():
80
+ """
81
+ Get a context-aware console that adapts to CLI vs Jupyter environments.
82
+
83
+ This function is a bridge to the context_logger module to maintain
84
+ backward compatibility while enabling context awareness.
85
+
86
+ Returns:
87
+ Context-aware console instance
88
+ """
89
+ try:
90
+ from runbooks.common.context_logger import get_context_console
91
+
92
+ return get_context_console()
93
+ except ImportError:
94
+ # Fallback to regular console if context_logger not available
95
+ return console
96
+
97
+
79
98
  def print_header(title: str, version: str = "0.7.8") -> None:
80
99
  """
81
100
  Print a consistent header for all modules.
@@ -105,7 +124,7 @@ def print_banner() -> None:
105
124
  ║ | |____| | (_) | |_| | (_| | |__| | |_) \__ \ | |_) | |_| | | |║
106
125
  ║ \_____|_|\___/ \__,_|\__,_|\____/| .__/|___/ |____/ \__,_|_| |║
107
126
  ║ | | ║
108
- ║ Enterprise AWS Automation |_| Platform v0.7.8
127
+ ║ Enterprise AWS Automation |_| Platform v1.0.0
109
128
  ╚═══════════════════════════════════════════════════════════════╝
110
129
  """
111
130
  console.print(banner, style="header")
@@ -328,6 +347,154 @@ def format_resource_count(count: int, resource_type: str) -> Text:
328
347
  return text
329
348
 
330
349
 
350
+ def create_display_profile_name(profile_name: str, max_length: int = 25, context_aware: bool = True) -> str:
351
+ """
352
+ Create user-friendly display version of AWS profile names for better readability.
353
+
354
+ This function intelligently truncates long enterprise profile names while preserving
355
+ meaningful information for identification. Full names remain available for AWS API calls.
356
+
357
+ Examples:
358
+ 'ams-admin-Billing-ReadOnlyAccess-909135376185' → 'ams-admin-Billing-9091...'
359
+ 'ams-centralised-ops-ReadOnlyAccess-335083429030' → 'ams-centralised-ops-3350...'
360
+ 'short-profile' → 'short-profile' (no truncation needed)
361
+
362
+ Args:
363
+ profile_name: Full AWS profile name
364
+ max_length: Maximum display length (default 25 for table formatting)
365
+ context_aware: Whether to adapt truncation based on execution context
366
+
367
+ Returns:
368
+ User-friendly display name for console output
369
+ """
370
+ if not profile_name or len(profile_name) <= max_length:
371
+ return profile_name
372
+
373
+ # Context-aware length adjustment
374
+ if context_aware:
375
+ try:
376
+ from runbooks.common.context_logger import ExecutionContext, get_context_config
377
+
378
+ config = get_context_config()
379
+
380
+ if config.context == ExecutionContext.JUPYTER:
381
+ # Shorter names for notebook tables
382
+ max_length = min(max_length, 20)
383
+ elif config.context == ExecutionContext.CLI:
384
+ # Slightly longer for CLI terminals
385
+ max_length = min(max_length + 5, 30)
386
+ except ImportError:
387
+ # Fallback if context_logger not available
388
+ pass
389
+
390
+ # Smart truncation strategy for AWS profile patterns
391
+ # Common patterns: ams-{type}-{service}-{permissions}-{account_id}
392
+
393
+ if "-" in profile_name:
394
+ parts = profile_name.split("-")
395
+
396
+ # Strategy 1: Keep meaningful prefix + account ID suffix
397
+ if len(parts) >= 4 and parts[-1].isdigit():
398
+ # Enterprise pattern: ams-admin-Billing-ReadOnlyAccess-909135376185
399
+ account_id = parts[-1]
400
+ prefix_parts = parts[:-2] # Skip permissions part for brevity
401
+
402
+ prefix = "-".join(prefix_parts)
403
+ account_short = account_id[:4] # First 4 digits of account ID
404
+
405
+ truncated = f"{prefix}-{account_short}..."
406
+
407
+ if len(truncated) <= max_length:
408
+ return truncated
409
+
410
+ # Strategy 2: Keep first few meaningful parts
411
+ meaningful_parts = []
412
+ current_length = 0
413
+
414
+ for part in parts:
415
+ # Skip common noise words but keep meaningful ones
416
+ if part.lower() in ["readonlyaccess", "fullaccess", "access"]:
417
+ continue
418
+
419
+ part_with_sep = f"{part}-" if meaningful_parts else part
420
+ if current_length + len(part_with_sep) + 3 <= max_length: # +3 for "..."
421
+ meaningful_parts.append(part)
422
+ current_length += len(part_with_sep)
423
+ else:
424
+ break
425
+
426
+ if len(meaningful_parts) >= 2:
427
+ return f"{'-'.join(meaningful_parts)}..."
428
+
429
+ # Strategy 3: Simple prefix truncation with ellipsis
430
+ return f"{profile_name[: max_length - 3]}..."
431
+
432
+
433
+ def format_profile_name(profile_name: str, style: str = "cyan", display_max_length: int = 25) -> Text:
434
+ """
435
+ Format profile name with consistent styling and intelligent truncation.
436
+
437
+ This function creates a Rich Text object with:
438
+ - Smart truncation for display readability
439
+ - Consistent styling across all modules
440
+ - Hover-friendly formatting (full name in tooltip would be future enhancement)
441
+
442
+ Args:
443
+ profile_name: AWS profile name
444
+ style: Rich style for the profile name
445
+ display_max_length: Maximum length for display
446
+
447
+ Returns:
448
+ Rich Text object with formatted profile name
449
+ """
450
+ display_name = create_display_profile_name(profile_name, display_max_length)
451
+
452
+ text = Text()
453
+
454
+ # Add visual indicators for truncated names
455
+ if display_name.endswith("..."):
456
+ # Truncated name - use slightly different style
457
+ text.append(display_name, style=f"{style} italic")
458
+ else:
459
+ # Full name - normal style
460
+ text.append(display_name, style=style)
461
+
462
+ return text
463
+
464
+
465
+ def format_account_name(
466
+ account_name: str, account_id: str, style: str = "bold bright_white", max_length: int = 35
467
+ ) -> str:
468
+ """
469
+ Format account name with ID for consistent enterprise display in tables.
470
+
471
+ This function provides consistent account display formatting across all FinOps dashboards:
472
+ - Account name with intelligent truncation
473
+ - Account ID as secondary line for identification
474
+ - Rich markup for professional presentation
475
+
476
+ Args:
477
+ account_name: Resolved account name from Organizations API
478
+ account_id: AWS account ID
479
+ style: Rich style for the account name
480
+ max_length: Maximum display length for account name
481
+
482
+ Returns:
483
+ Formatted display string with Rich markup
484
+
485
+ Example:
486
+ "Data Management"
487
+ "123456789012"
488
+ """
489
+ if account_name and account_name != account_id and len(account_name.strip()) > 0:
490
+ # We have a resolved account name - format with both name and ID
491
+ display_name = account_name if len(account_name) <= max_length else account_name[: max_length - 3] + "..."
492
+ return f"[{style}]{display_name}[/]\n[dim]{account_id}[/]"
493
+ else:
494
+ # No resolved name available - show account ID prominently
495
+ return f"[{style}]{account_id}[/]"
496
+
497
+
331
498
  def create_layout(sections: Dict[str, Any]) -> Layout:
332
499
  """
333
500
  Create a layout for complex displays.
@@ -429,6 +596,7 @@ __all__ = [
429
596
  "STATUS_INDICATORS",
430
597
  "console",
431
598
  "get_console",
599
+ "get_context_aware_console",
432
600
  "print_header",
433
601
  "print_banner",
434
602
  "create_table",
@@ -443,6 +611,9 @@ __all__ = [
443
611
  "create_panel",
444
612
  "format_cost",
445
613
  "format_resource_count",
614
+ "create_display_profile_name",
615
+ "format_profile_name",
616
+ "format_account_name",
446
617
  "create_layout",
447
618
  "print_json",
448
619
  "print_markdown",
@@ -0,0 +1,440 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ User Feedback Collection System for CloudOps Runbooks Platform.
4
+
5
+ Collects user feedback on Rich CLI improvements, performance, and feature usage
6
+ to drive continuous improvement and measure deployment success.
7
+
8
+ Features:
9
+ - CLI experience feedback collection
10
+ - Performance satisfaction tracking
11
+ - Feature usage analytics
12
+ - Terminal compatibility reporting
13
+ - A/B testing support for CLI improvements
14
+
15
+ Author: Enterprise Product Owner
16
+ Version: 1.0.0 - Phase 2 Production Deployment
17
+ """
18
+
19
+ import json
20
+ import os
21
+ import platform
22
+ import uuid
23
+ from datetime import datetime
24
+ from pathlib import Path
25
+ from typing import Any, Dict, List, Optional
26
+
27
+ from rich.console import Console
28
+ from rich.panel import Panel
29
+ from rich.progress import track
30
+ from rich.prompt import Confirm, Prompt
31
+ from rich.table import Table
32
+
33
+ console = Console()
34
+
35
+
36
+ class UserFeedbackCollector:
37
+ """
38
+ Enterprise user feedback collection for CloudOps Runbooks platform.
39
+
40
+ Collects structured feedback on Rich CLI enhancements, performance,
41
+ and overall user experience to guide continuous improvement.
42
+ """
43
+
44
+ def __init__(self):
45
+ """Initialize user feedback collection system."""
46
+ self.feedback_file = Path("artifacts/feedback/user_feedback.json")
47
+ self.feedback_file.parent.mkdir(parents=True, exist_ok=True)
48
+
49
+ self.session_id = str(uuid.uuid4())[:8]
50
+ self.system_info = self._collect_system_info()
51
+
52
+ def _collect_system_info(self) -> Dict[str, str]:
53
+ """Collect system information for compatibility analysis."""
54
+ return {
55
+ "platform": platform.system(),
56
+ "platform_version": platform.version(),
57
+ "python_version": platform.python_version(),
58
+ "terminal": os.environ.get("TERM", "unknown"),
59
+ "terminal_program": os.environ.get("TERM_PROGRAM", "unknown"),
60
+ "color_support": str(console.color_system),
61
+ "width": str(console.size.width),
62
+ "height": str(console.size.height),
63
+ }
64
+
65
+ def collect_cli_experience_feedback(self) -> Dict[str, Any]:
66
+ """
67
+ Collect feedback specifically on Rich CLI improvements.
68
+
69
+ Returns:
70
+ Structured feedback data
71
+ """
72
+ console.print(
73
+ Panel(
74
+ "[bold blue]📋 Rich CLI Experience Feedback[/bold blue]\n\n"
75
+ "Help us improve the CloudOps Runbooks CLI experience!\n"
76
+ "Your feedback drives our continuous improvement.",
77
+ title="User Feedback Collection",
78
+ border_style="blue",
79
+ )
80
+ )
81
+
82
+ feedback = {
83
+ "session_id": self.session_id,
84
+ "timestamp": datetime.now().isoformat(),
85
+ "feedback_type": "cli_experience",
86
+ "system_info": self.system_info,
87
+ }
88
+
89
+ # Overall satisfaction rating
90
+ satisfaction = Prompt.ask(
91
+ "\n[cyan]Overall CLI Experience Rating[/cyan] (1-10, where 10 is excellent)",
92
+ choices=[str(i) for i in range(1, 11)],
93
+ default="8",
94
+ )
95
+ feedback["overall_satisfaction"] = int(satisfaction)
96
+
97
+ # Rich CLI specific feedback
98
+ console.print("\n[yellow]📊 Rich CLI Features Feedback[/yellow]")
99
+
100
+ # Color coding effectiveness
101
+ color_rating = Prompt.ask(
102
+ "How helpful are the color-coded messages? (1-10)", choices=[str(i) for i in range(1, 11)], default="8"
103
+ )
104
+ feedback["color_coding_rating"] = int(color_rating)
105
+
106
+ # Progress indicators
107
+ progress_rating = Prompt.ask(
108
+ "How useful are the progress indicators? (1-10)", choices=[str(i) for i in range(1, 11)], default="8"
109
+ )
110
+ feedback["progress_indicators_rating"] = int(progress_rating)
111
+
112
+ # Error messages clarity
113
+ error_clarity = Prompt.ask(
114
+ "How clear are the error messages? (1-10)", choices=[str(i) for i in range(1, 11)], default="8"
115
+ )
116
+ feedback["error_message_clarity"] = int(error_clarity)
117
+
118
+ # Terminal compatibility
119
+ compatibility_issues = Confirm.ask("\nDid you experience any display issues in your terminal?")
120
+ feedback["compatibility_issues"] = compatibility_issues
121
+
122
+ if compatibility_issues:
123
+ issues_description = Prompt.ask(
124
+ "Please describe the display issues (optional)", default="No description provided"
125
+ )
126
+ feedback["issues_description"] = issues_description
127
+
128
+ # Feature usage
129
+ console.print("\n[green]🚀 Feature Usage[/green]")
130
+
131
+ modules_used = []
132
+ available_modules = ["operate", "cfat", "inventory", "security", "finops", "vpc"]
133
+
134
+ for module in available_modules:
135
+ if Confirm.ask(f"Have you used the {module} module?"):
136
+ modules_used.append(module)
137
+
138
+ feedback["modules_used"] = modules_used
139
+
140
+ # Most valuable features
141
+ if modules_used:
142
+ favorite_module = Prompt.ask(
143
+ "Which module do you find most valuable?",
144
+ choices=modules_used,
145
+ default=modules_used[0] if modules_used else "operate",
146
+ )
147
+ feedback["favorite_module"] = favorite_module
148
+
149
+ # Performance satisfaction
150
+ console.print("\n[blue]⚡ Performance Feedback[/blue]")
151
+
152
+ performance_rating = Prompt.ask(
153
+ "How satisfied are you with operation speed? (1-10)", choices=[str(i) for i in range(1, 11)], default="8"
154
+ )
155
+ feedback["performance_satisfaction"] = int(performance_rating)
156
+
157
+ # Free-form feedback
158
+ console.print("\n[magenta]💬 Additional Feedback[/magenta]")
159
+
160
+ improvements = Prompt.ask("What improvements would you like to see? (optional)", default="No suggestions")
161
+ feedback["suggested_improvements"] = improvements
162
+
163
+ # Recommendation likelihood (NPS-style)
164
+ nps_score = Prompt.ask(
165
+ "How likely are you to recommend CloudOps Runbooks? (0-10)",
166
+ choices=[str(i) for i in range(0, 11)],
167
+ default="8",
168
+ )
169
+ feedback["nps_score"] = int(nps_score)
170
+
171
+ return feedback
172
+
173
+ def collect_performance_feedback(self, module: str, operation: str, execution_time: float) -> Dict[str, Any]:
174
+ """
175
+ Collect performance-specific feedback after operations.
176
+
177
+ Args:
178
+ module: Module name that was used
179
+ operation: Operation that was performed
180
+ execution_time: Actual execution time
181
+
182
+ Returns:
183
+ Performance feedback data
184
+ """
185
+ feedback = {
186
+ "session_id": self.session_id,
187
+ "timestamp": datetime.now().isoformat(),
188
+ "feedback_type": "performance",
189
+ "module": module,
190
+ "operation": operation,
191
+ "execution_time": execution_time,
192
+ "system_info": self.system_info,
193
+ }
194
+
195
+ # Quick performance satisfaction
196
+ performance_acceptable = Confirm.ask(
197
+ f"\n[cyan]Was the {module} {operation} performance acceptable?[/cyan] (took {execution_time:.2f}s)"
198
+ )
199
+ feedback["performance_acceptable"] = performance_acceptable
200
+
201
+ if not performance_acceptable:
202
+ expected_time = Prompt.ask("What would be an acceptable time for this operation? (seconds)", default="5")
203
+ feedback["expected_time"] = float(expected_time)
204
+
205
+ return feedback
206
+
207
+ def collect_feature_request(self) -> Dict[str, Any]:
208
+ """
209
+ Collect feature requests and enhancement suggestions.
210
+
211
+ Returns:
212
+ Feature request data
213
+ """
214
+ console.print(
215
+ Panel(
216
+ "[bold green]💡 Feature Request & Enhancement Ideas[/bold green]\n\n"
217
+ "Share your ideas to help us enhance CloudOps Runbooks!",
218
+ title="Feature Requests",
219
+ border_style="green",
220
+ )
221
+ )
222
+
223
+ feedback = {
224
+ "session_id": self.session_id,
225
+ "timestamp": datetime.now().isoformat(),
226
+ "feedback_type": "feature_request",
227
+ "system_info": self.system_info,
228
+ }
229
+
230
+ # Feature category
231
+ categories = ["cli_experience", "new_module", "performance", "reporting", "integration", "other"]
232
+ category = Prompt.ask(
233
+ "What category does your request fall into?", choices=categories, default="cli_experience"
234
+ )
235
+ feedback["category"] = category
236
+
237
+ # Priority level
238
+ priority = Prompt.ask(
239
+ "How important is this to you?", choices=["low", "medium", "high", "critical"], default="medium"
240
+ )
241
+ feedback["priority"] = priority
242
+
243
+ # Description
244
+ description = Prompt.ask("Please describe your feature request or enhancement idea", default="Feature request")
245
+ feedback["description"] = description
246
+
247
+ # Use case
248
+ use_case = Prompt.ask(
249
+ "What problem would this solve or what value would it add?", default="General improvement"
250
+ )
251
+ feedback["use_case"] = use_case
252
+
253
+ return feedback
254
+
255
+ def store_feedback(self, feedback_data: Dict[str, Any]) -> None:
256
+ """
257
+ Store feedback data to persistent storage.
258
+
259
+ Args:
260
+ feedback_data: Structured feedback data
261
+ """
262
+ try:
263
+ # Load existing feedback
264
+ if self.feedback_file.exists():
265
+ with open(self.feedback_file, "r") as f:
266
+ all_feedback = json.load(f)
267
+ else:
268
+ all_feedback = {"feedback_entries": []}
269
+
270
+ # Add new feedback
271
+ all_feedback["feedback_entries"].append(feedback_data)
272
+
273
+ # Save updated feedback
274
+ with open(self.feedback_file, "w") as f:
275
+ json.dump(all_feedback, f, indent=2)
276
+
277
+ console.print(
278
+ f"[green]✅ Thank you! Feedback saved (ID: {feedback_data.get('session_id', 'unknown')})[/green]"
279
+ )
280
+
281
+ except Exception as e:
282
+ console.print(f"[red]❌ Error saving feedback: {e}[/red]")
283
+
284
+ def analyze_feedback_trends(self) -> Dict[str, Any]:
285
+ """
286
+ Analyze collected feedback for trends and insights.
287
+
288
+ Returns:
289
+ Analysis results and trends
290
+ """
291
+ if not self.feedback_file.exists():
292
+ return {"status": "no_data", "message": "No feedback data available"}
293
+
294
+ try:
295
+ with open(self.feedback_file, "r") as f:
296
+ data = json.load(f)
297
+
298
+ entries = data.get("feedback_entries", [])
299
+
300
+ if not entries:
301
+ return {"status": "no_entries", "message": "No feedback entries found"}
302
+
303
+ # Overall statistics
304
+ total_entries = len(entries)
305
+ cli_feedback = [e for e in entries if e.get("feedback_type") == "cli_experience"]
306
+
307
+ analysis = {
308
+ "status": "success",
309
+ "total_entries": total_entries,
310
+ "analysis_date": datetime.now().isoformat(),
311
+ "feedback_breakdown": {
312
+ "cli_experience": len([e for e in entries if e.get("feedback_type") == "cli_experience"]),
313
+ "performance": len([e for e in entries if e.get("feedback_type") == "performance"]),
314
+ "feature_request": len([e for e in entries if e.get("feedback_type") == "feature_request"]),
315
+ },
316
+ }
317
+
318
+ # CLI experience analysis
319
+ if cli_feedback:
320
+ satisfaction_scores = [e.get("overall_satisfaction", 0) for e in cli_feedback]
321
+ nps_scores = [e.get("nps_score", 0) for e in cli_feedback]
322
+ color_ratings = [e.get("color_coding_rating", 0) for e in cli_feedback]
323
+
324
+ analysis["cli_analysis"] = {
325
+ "average_satisfaction": sum(satisfaction_scores) / len(satisfaction_scores),
326
+ "average_nps": sum(nps_scores) / len(nps_scores),
327
+ "average_color_rating": sum(color_ratings) / len(color_ratings),
328
+ "compatibility_issues_rate": sum(1 for e in cli_feedback if e.get("compatibility_issues", False))
329
+ / len(cli_feedback)
330
+ * 100,
331
+ }
332
+
333
+ # Module usage analysis
334
+ module_usage = {}
335
+ for entry in cli_feedback:
336
+ modules = entry.get("modules_used", [])
337
+ for module in modules:
338
+ module_usage[module] = module_usage.get(module, 0) + 1
339
+
340
+ analysis["module_usage"] = module_usage
341
+
342
+ # System compatibility
343
+ systems = {}
344
+ terminals = {}
345
+ for entry in entries:
346
+ sys_info = entry.get("system_info", {})
347
+ system = sys_info.get("platform", "unknown")
348
+ terminal = sys_info.get("terminal_program", "unknown")
349
+
350
+ systems[system] = systems.get(system, 0) + 1
351
+ terminals[terminal] = terminals.get(terminal, 0) + 1
352
+
353
+ analysis["system_compatibility"] = {"platforms": systems, "terminals": terminals}
354
+
355
+ return analysis
356
+
357
+ except Exception as e:
358
+ return {"status": "error", "message": str(e)}
359
+
360
+ def display_feedback_summary(self) -> None:
361
+ """Display a formatted summary of feedback analysis."""
362
+ analysis = self.analyze_feedback_trends()
363
+
364
+ if analysis["status"] != "success":
365
+ console.print(f"[yellow]⚠️ {analysis['message']}[/yellow]")
366
+ return
367
+
368
+ # Summary panel
369
+ summary_panel = Panel(
370
+ f"[green]Total Feedback Entries:[/green] {analysis['total_entries']}\n"
371
+ f"[blue]CLI Experience:[/blue] {analysis['feedback_breakdown']['cli_experience']}\n"
372
+ f"[cyan]Performance:[/cyan] {analysis['feedback_breakdown']['performance']}\n"
373
+ f"[magenta]Feature Requests:[/magenta] {analysis['feedback_breakdown']['feature_request']}",
374
+ title="📊 Feedback Summary",
375
+ border_style="blue",
376
+ )
377
+
378
+ console.print(summary_panel)
379
+
380
+ # CLI analysis
381
+ if "cli_analysis" in analysis:
382
+ cli_analysis = analysis["cli_analysis"]
383
+
384
+ cli_panel = Panel(
385
+ f"[green]Average Satisfaction:[/green] {cli_analysis['average_satisfaction']:.1f}/10\n"
386
+ f"[blue]Average NPS Score:[/blue] {cli_analysis['average_nps']:.1f}/10\n"
387
+ f"[cyan]Color Rating:[/cyan] {cli_analysis['average_color_rating']:.1f}/10\n"
388
+ f"[yellow]Compatibility Issues:[/yellow] {cli_analysis['compatibility_issues_rate']:.1f}%",
389
+ title="🎨 Rich CLI Analysis",
390
+ border_style="green" if cli_analysis["average_satisfaction"] >= 8 else "yellow",
391
+ )
392
+
393
+ console.print(cli_panel)
394
+
395
+ # Module usage table
396
+ if analysis.get("module_usage"):
397
+ usage_table = Table(title="Module Usage Popularity")
398
+ usage_table.add_column("Module", style="bold")
399
+ usage_table.add_column("Usage Count", justify="center")
400
+ usage_table.add_column("Popularity", justify="center")
401
+
402
+ total_usage = sum(analysis["module_usage"].values())
403
+
404
+ for module, count in sorted(analysis["module_usage"].items(), key=lambda x: x[1], reverse=True):
405
+ popularity = count / total_usage * 100
406
+ usage_table.add_row(module.title(), str(count), f"{popularity:.1f}%")
407
+
408
+ console.print(usage_table)
409
+
410
+
411
+ # Command-line interface for feedback collection
412
+ def main():
413
+ """Main CLI interface for feedback collection."""
414
+ collector = UserFeedbackCollector()
415
+
416
+ console.print("[bold blue]🎯 CloudOps Runbooks Feedback System[/bold blue]")
417
+
418
+ action = Prompt.ask(
419
+ "\nWhat would you like to do?",
420
+ choices=["give_feedback", "request_feature", "view_summary", "quit"],
421
+ default="give_feedback",
422
+ )
423
+
424
+ if action == "give_feedback":
425
+ feedback = collector.collect_cli_experience_feedback()
426
+ collector.store_feedback(feedback)
427
+
428
+ elif action == "request_feature":
429
+ request = collector.collect_feature_request()
430
+ collector.store_feedback(request)
431
+
432
+ elif action == "view_summary":
433
+ collector.display_feedback_summary()
434
+
435
+ elif action == "quit":
436
+ console.print("[dim]Thank you for using CloudOps Runbooks![/dim]")
437
+
438
+
439
+ if __name__ == "__main__":
440
+ main()