crackerjack 0.32.0__py3-none-any.whl → 0.33.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crackerjack might be problematic. Click here for more details.

Files changed (200) hide show
  1. crackerjack/__main__.py +1350 -34
  2. crackerjack/adapters/__init__.py +17 -0
  3. crackerjack/adapters/lsp_client.py +358 -0
  4. crackerjack/adapters/rust_tool_adapter.py +194 -0
  5. crackerjack/adapters/rust_tool_manager.py +193 -0
  6. crackerjack/adapters/skylos_adapter.py +231 -0
  7. crackerjack/adapters/zuban_adapter.py +560 -0
  8. crackerjack/agents/base.py +7 -3
  9. crackerjack/agents/coordinator.py +271 -33
  10. crackerjack/agents/documentation_agent.py +9 -15
  11. crackerjack/agents/dry_agent.py +3 -15
  12. crackerjack/agents/formatting_agent.py +1 -1
  13. crackerjack/agents/import_optimization_agent.py +36 -180
  14. crackerjack/agents/performance_agent.py +17 -98
  15. crackerjack/agents/performance_helpers.py +7 -31
  16. crackerjack/agents/proactive_agent.py +1 -3
  17. crackerjack/agents/refactoring_agent.py +16 -85
  18. crackerjack/agents/refactoring_helpers.py +7 -42
  19. crackerjack/agents/security_agent.py +9 -48
  20. crackerjack/agents/test_creation_agent.py +356 -513
  21. crackerjack/agents/test_specialist_agent.py +0 -4
  22. crackerjack/api.py +6 -25
  23. crackerjack/cli/cache_handlers.py +204 -0
  24. crackerjack/cli/cache_handlers_enhanced.py +683 -0
  25. crackerjack/cli/facade.py +100 -0
  26. crackerjack/cli/handlers.py +224 -9
  27. crackerjack/cli/interactive.py +6 -4
  28. crackerjack/cli/options.py +642 -55
  29. crackerjack/cli/utils.py +2 -1
  30. crackerjack/code_cleaner.py +58 -117
  31. crackerjack/config/global_lock_config.py +8 -48
  32. crackerjack/config/hooks.py +53 -62
  33. crackerjack/core/async_workflow_orchestrator.py +24 -34
  34. crackerjack/core/autofix_coordinator.py +3 -17
  35. crackerjack/core/enhanced_container.py +64 -6
  36. crackerjack/core/file_lifecycle.py +12 -89
  37. crackerjack/core/performance.py +2 -2
  38. crackerjack/core/performance_monitor.py +15 -55
  39. crackerjack/core/phase_coordinator.py +257 -218
  40. crackerjack/core/resource_manager.py +14 -90
  41. crackerjack/core/service_watchdog.py +62 -95
  42. crackerjack/core/session_coordinator.py +149 -0
  43. crackerjack/core/timeout_manager.py +14 -72
  44. crackerjack/core/websocket_lifecycle.py +13 -78
  45. crackerjack/core/workflow_orchestrator.py +558 -240
  46. crackerjack/docs/INDEX.md +11 -0
  47. crackerjack/docs/generated/api/API_REFERENCE.md +10895 -0
  48. crackerjack/docs/generated/api/CLI_REFERENCE.md +109 -0
  49. crackerjack/docs/generated/api/CROSS_REFERENCES.md +1755 -0
  50. crackerjack/docs/generated/api/PROTOCOLS.md +3 -0
  51. crackerjack/docs/generated/api/SERVICES.md +1252 -0
  52. crackerjack/documentation/__init__.py +31 -0
  53. crackerjack/documentation/ai_templates.py +756 -0
  54. crackerjack/documentation/dual_output_generator.py +765 -0
  55. crackerjack/documentation/mkdocs_integration.py +518 -0
  56. crackerjack/documentation/reference_generator.py +977 -0
  57. crackerjack/dynamic_config.py +55 -50
  58. crackerjack/executors/async_hook_executor.py +10 -15
  59. crackerjack/executors/cached_hook_executor.py +117 -43
  60. crackerjack/executors/hook_executor.py +8 -34
  61. crackerjack/executors/hook_lock_manager.py +26 -183
  62. crackerjack/executors/individual_hook_executor.py +13 -11
  63. crackerjack/executors/lsp_aware_hook_executor.py +270 -0
  64. crackerjack/executors/tool_proxy.py +417 -0
  65. crackerjack/hooks/lsp_hook.py +79 -0
  66. crackerjack/intelligence/adaptive_learning.py +25 -10
  67. crackerjack/intelligence/agent_orchestrator.py +2 -5
  68. crackerjack/intelligence/agent_registry.py +34 -24
  69. crackerjack/intelligence/agent_selector.py +5 -7
  70. crackerjack/interactive.py +17 -6
  71. crackerjack/managers/async_hook_manager.py +0 -1
  72. crackerjack/managers/hook_manager.py +79 -1
  73. crackerjack/managers/publish_manager.py +66 -13
  74. crackerjack/managers/test_command_builder.py +5 -17
  75. crackerjack/managers/test_executor.py +1 -3
  76. crackerjack/managers/test_manager.py +109 -7
  77. crackerjack/managers/test_manager_backup.py +10 -9
  78. crackerjack/mcp/cache.py +2 -2
  79. crackerjack/mcp/client_runner.py +1 -1
  80. crackerjack/mcp/context.py +191 -68
  81. crackerjack/mcp/dashboard.py +7 -5
  82. crackerjack/mcp/enhanced_progress_monitor.py +31 -28
  83. crackerjack/mcp/file_monitor.py +30 -23
  84. crackerjack/mcp/progress_components.py +31 -21
  85. crackerjack/mcp/progress_monitor.py +50 -53
  86. crackerjack/mcp/rate_limiter.py +6 -6
  87. crackerjack/mcp/server_core.py +161 -32
  88. crackerjack/mcp/service_watchdog.py +2 -1
  89. crackerjack/mcp/state.py +4 -7
  90. crackerjack/mcp/task_manager.py +11 -9
  91. crackerjack/mcp/tools/core_tools.py +174 -33
  92. crackerjack/mcp/tools/error_analyzer.py +3 -2
  93. crackerjack/mcp/tools/execution_tools.py +15 -12
  94. crackerjack/mcp/tools/execution_tools_backup.py +42 -30
  95. crackerjack/mcp/tools/intelligence_tool_registry.py +7 -5
  96. crackerjack/mcp/tools/intelligence_tools.py +5 -2
  97. crackerjack/mcp/tools/monitoring_tools.py +33 -70
  98. crackerjack/mcp/tools/proactive_tools.py +24 -11
  99. crackerjack/mcp/tools/progress_tools.py +5 -8
  100. crackerjack/mcp/tools/utility_tools.py +20 -14
  101. crackerjack/mcp/tools/workflow_executor.py +62 -40
  102. crackerjack/mcp/websocket/app.py +8 -0
  103. crackerjack/mcp/websocket/endpoints.py +352 -357
  104. crackerjack/mcp/websocket/jobs.py +40 -57
  105. crackerjack/mcp/websocket/monitoring_endpoints.py +2935 -0
  106. crackerjack/mcp/websocket/server.py +7 -25
  107. crackerjack/mcp/websocket/websocket_handler.py +6 -17
  108. crackerjack/mixins/__init__.py +3 -0
  109. crackerjack/mixins/error_handling.py +145 -0
  110. crackerjack/models/config.py +21 -1
  111. crackerjack/models/config_adapter.py +49 -1
  112. crackerjack/models/protocols.py +176 -107
  113. crackerjack/models/resource_protocols.py +55 -210
  114. crackerjack/models/task.py +3 -0
  115. crackerjack/monitoring/ai_agent_watchdog.py +13 -13
  116. crackerjack/monitoring/metrics_collector.py +426 -0
  117. crackerjack/monitoring/regression_prevention.py +8 -8
  118. crackerjack/monitoring/websocket_server.py +643 -0
  119. crackerjack/orchestration/advanced_orchestrator.py +11 -6
  120. crackerjack/orchestration/coverage_improvement.py +3 -3
  121. crackerjack/orchestration/execution_strategies.py +26 -6
  122. crackerjack/orchestration/test_progress_streamer.py +8 -5
  123. crackerjack/plugins/base.py +2 -2
  124. crackerjack/plugins/hooks.py +7 -0
  125. crackerjack/plugins/managers.py +11 -8
  126. crackerjack/security/__init__.py +0 -1
  127. crackerjack/security/audit.py +90 -105
  128. crackerjack/services/anomaly_detector.py +392 -0
  129. crackerjack/services/api_extractor.py +615 -0
  130. crackerjack/services/backup_service.py +2 -2
  131. crackerjack/services/bounded_status_operations.py +15 -152
  132. crackerjack/services/cache.py +127 -1
  133. crackerjack/services/changelog_automation.py +395 -0
  134. crackerjack/services/config.py +18 -11
  135. crackerjack/services/config_merge.py +30 -85
  136. crackerjack/services/config_template.py +506 -0
  137. crackerjack/services/contextual_ai_assistant.py +48 -22
  138. crackerjack/services/coverage_badge_service.py +171 -0
  139. crackerjack/services/coverage_ratchet.py +41 -17
  140. crackerjack/services/debug.py +3 -3
  141. crackerjack/services/dependency_analyzer.py +460 -0
  142. crackerjack/services/dependency_monitor.py +14 -11
  143. crackerjack/services/documentation_generator.py +491 -0
  144. crackerjack/services/documentation_service.py +675 -0
  145. crackerjack/services/enhanced_filesystem.py +6 -5
  146. crackerjack/services/enterprise_optimizer.py +865 -0
  147. crackerjack/services/error_pattern_analyzer.py +676 -0
  148. crackerjack/services/file_hasher.py +1 -1
  149. crackerjack/services/git.py +41 -45
  150. crackerjack/services/health_metrics.py +10 -8
  151. crackerjack/services/heatmap_generator.py +735 -0
  152. crackerjack/services/initialization.py +30 -33
  153. crackerjack/services/input_validator.py +5 -97
  154. crackerjack/services/intelligent_commit.py +327 -0
  155. crackerjack/services/log_manager.py +15 -12
  156. crackerjack/services/logging.py +4 -3
  157. crackerjack/services/lsp_client.py +628 -0
  158. crackerjack/services/memory_optimizer.py +409 -0
  159. crackerjack/services/metrics.py +42 -33
  160. crackerjack/services/parallel_executor.py +416 -0
  161. crackerjack/services/pattern_cache.py +1 -1
  162. crackerjack/services/pattern_detector.py +6 -6
  163. crackerjack/services/performance_benchmarks.py +250 -576
  164. crackerjack/services/performance_cache.py +382 -0
  165. crackerjack/services/performance_monitor.py +565 -0
  166. crackerjack/services/predictive_analytics.py +510 -0
  167. crackerjack/services/quality_baseline.py +234 -0
  168. crackerjack/services/quality_baseline_enhanced.py +646 -0
  169. crackerjack/services/quality_intelligence.py +785 -0
  170. crackerjack/services/regex_patterns.py +605 -524
  171. crackerjack/services/regex_utils.py +43 -123
  172. crackerjack/services/secure_path_utils.py +5 -164
  173. crackerjack/services/secure_status_formatter.py +30 -141
  174. crackerjack/services/secure_subprocess.py +11 -92
  175. crackerjack/services/security.py +61 -30
  176. crackerjack/services/security_logger.py +18 -22
  177. crackerjack/services/server_manager.py +124 -16
  178. crackerjack/services/status_authentication.py +16 -159
  179. crackerjack/services/status_security_manager.py +4 -131
  180. crackerjack/services/terminal_utils.py +0 -0
  181. crackerjack/services/thread_safe_status_collector.py +19 -125
  182. crackerjack/services/unified_config.py +21 -13
  183. crackerjack/services/validation_rate_limiter.py +5 -54
  184. crackerjack/services/version_analyzer.py +459 -0
  185. crackerjack/services/version_checker.py +1 -1
  186. crackerjack/services/websocket_resource_limiter.py +10 -144
  187. crackerjack/services/zuban_lsp_service.py +390 -0
  188. crackerjack/slash_commands/__init__.py +2 -7
  189. crackerjack/slash_commands/run.md +2 -2
  190. crackerjack/tools/validate_input_validator_patterns.py +14 -40
  191. crackerjack/tools/validate_regex_patterns.py +19 -48
  192. {crackerjack-0.32.0.dist-info → crackerjack-0.33.1.dist-info}/METADATA +197 -26
  193. crackerjack-0.33.1.dist-info/RECORD +229 -0
  194. crackerjack/CLAUDE.md +0 -207
  195. crackerjack/RULES.md +0 -380
  196. crackerjack/py313.py +0 -234
  197. crackerjack-0.32.0.dist-info/RECORD +0 -180
  198. {crackerjack-0.32.0.dist-info → crackerjack-0.33.1.dist-info}/WHEEL +0 -0
  199. {crackerjack-0.32.0.dist-info → crackerjack-0.33.1.dist-info}/entry_points.txt +0 -0
  200. {crackerjack-0.32.0.dist-info → crackerjack-0.33.1.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,510 @@
1
+ """Advanced predictive analytics engine for quality metrics and trends."""
2
+
3
+ import logging
4
+ import statistics
5
+ import typing as t
6
+ from collections import defaultdict, deque
7
+ from dataclasses import dataclass, field
8
+ from datetime import datetime, timedelta
9
+ from pathlib import Path
10
+
11
+ logger = logging.getLogger(__name__)
12
+
13
+
14
+ @dataclass
15
+ class TrendAnalysis:
16
+ """Trend analysis result for a metric."""
17
+
18
+ metric_type: str
19
+ trend_direction: str # increasing, decreasing, stable, volatile
20
+ trend_strength: float # 0.0 to 1.0
21
+ predicted_values: list[float]
22
+ confidence_intervals: list[tuple[float, float]]
23
+ analysis_period: timedelta
24
+ last_updated: datetime
25
+
26
+
27
+ @dataclass
28
+ class Prediction:
29
+ """Prediction for a specific metric at a future time."""
30
+
31
+ metric_type: str
32
+ predicted_at: datetime
33
+ predicted_for: datetime
34
+ predicted_value: float
35
+ confidence_interval: tuple[float, float]
36
+ confidence_level: float
37
+ model_accuracy: float
38
+ metadata: dict[str, t.Any] = field(default_factory=dict[str, t.Any])
39
+
40
+
41
+ @dataclass
42
+ class CapacityForecast:
43
+ """Capacity planning forecast."""
44
+
45
+ resource_type: str
46
+ current_usage: float
47
+ predicted_usage: list[tuple[datetime, float]]
48
+ capacity_threshold: float
49
+ estimated_exhaustion: datetime | None
50
+ recommended_actions: list[str]
51
+ confidence: float
52
+
53
+
54
+ class MovingAveragePredictor:
55
+ """Simple moving average predictor."""
56
+
57
+ def __init__(self, window_size: int = 10):
58
+ self.window_size = window_size
59
+
60
+ def predict(self, values: list[float], periods: int = 1) -> list[float]:
61
+ """Predict future values using moving average."""
62
+ if len(values) < self.window_size:
63
+ return [values[-1]] * periods if values else [0.0] * periods
64
+
65
+ recent_values = values[-self.window_size :]
66
+ ma = statistics.mean(recent_values)
67
+
68
+ return [ma] * periods
69
+
70
+
71
+ class LinearTrendPredictor:
72
+ """Linear trend-based predictor."""
73
+
74
+ def predict(self, values: list[float], periods: int = 1) -> list[float]:
75
+ """Predict future values using linear regression."""
76
+ if len(values) < 2:
77
+ return [values[-1]] * periods if values else [0.0] * periods
78
+
79
+ # Simple linear regression
80
+ n = len(values)
81
+ x = list[t.Any](range(n))
82
+ y = values
83
+
84
+ # Calculate slope and intercept
85
+ x_mean = statistics.mean(x)
86
+ y_mean = statistics.mean(y)
87
+
88
+ numerator = sum((x[i] - x_mean) * (y[i] - y_mean) for i in range(n))
89
+ denominator = sum((x[i] - x_mean) ** 2 for i in range(n))
90
+
91
+ if denominator == 0:
92
+ return [y_mean] * periods
93
+
94
+ slope = numerator / denominator
95
+ intercept = y_mean - slope * x_mean
96
+
97
+ # Predict future values
98
+ predictions = []
99
+ for i in range(1, periods + 1):
100
+ future_x = n + i - 1
101
+ prediction = slope * future_x + intercept
102
+ predictions.append(prediction)
103
+
104
+ return predictions
105
+
106
+
107
+ class SeasonalPredictor:
108
+ """Seasonal pattern-based predictor."""
109
+
110
+ def __init__(self, season_length: int = 24):
111
+ self.season_length = season_length
112
+
113
+ def predict(self, values: list[float], periods: int = 1) -> list[float]:
114
+ """Predict using seasonal patterns."""
115
+ if len(values) < self.season_length:
116
+ return [values[-1]] * periods if values else [0.0] * periods
117
+
118
+ predictions = []
119
+ for i in range(periods):
120
+ # Use seasonal pattern
121
+ season_index = (len(values) + i) % self.season_length
122
+ if season_index < len(values):
123
+ seasonal_value = values[-(self.season_length - season_index)]
124
+ predictions.append(seasonal_value)
125
+ else:
126
+ predictions.append(values[-1])
127
+
128
+ return predictions
129
+
130
+
131
+ class PredictiveAnalyticsEngine:
132
+ """Advanced predictive analytics system for quality metrics."""
133
+
134
+ def __init__(self, history_limit: int = 1000):
135
+ """Initialize predictive analytics engine."""
136
+ self.history_limit = history_limit
137
+
138
+ # Data storage
139
+ self.metric_history: dict[str, deque[tuple[datetime, float]]] = defaultdict(
140
+ lambda: deque[tuple[datetime, float]](maxlen=history_limit)
141
+ )
142
+
143
+ # Predictors
144
+ self.predictors = {
145
+ "moving_average": MovingAveragePredictor(window_size=10),
146
+ "linear_trend": LinearTrendPredictor(),
147
+ "seasonal": SeasonalPredictor(season_length=24),
148
+ }
149
+
150
+ # Cached analyses
151
+ self.trend_analyses: dict[str, TrendAnalysis] = {}
152
+ self.predictions_cache: dict[str, list[Prediction]] = defaultdict(list)
153
+
154
+ # Configuration
155
+ self.metric_configs = {
156
+ "test_pass_rate": {
157
+ "critical_threshold": 0.8,
158
+ "optimal_range": (0.95, 1.0),
159
+ "predictor": "moving_average",
160
+ },
161
+ "coverage_percentage": {
162
+ "critical_threshold": 0.7,
163
+ "optimal_range": (0.9, 1.0),
164
+ "predictor": "linear_trend",
165
+ },
166
+ "execution_time": {
167
+ "critical_threshold": 300.0,
168
+ "predictor": "seasonal",
169
+ },
170
+ "memory_usage": {
171
+ "critical_threshold": 1024.0,
172
+ "predictor": "linear_trend",
173
+ },
174
+ "complexity_score": {
175
+ "critical_threshold": 15.0,
176
+ "predictor": "moving_average",
177
+ },
178
+ }
179
+
180
+ def add_metric(
181
+ self,
182
+ metric_type: str,
183
+ value: float,
184
+ timestamp: datetime | None = None,
185
+ ) -> None:
186
+ """Add new metric data point."""
187
+ if timestamp is None:
188
+ timestamp = datetime.now()
189
+
190
+ self.metric_history[metric_type].append((timestamp, value))
191
+
192
+ # Update trend analysis if we have enough data
193
+ if len(self.metric_history[metric_type]) >= 10:
194
+ self._update_trend_analysis(metric_type)
195
+
196
+ def _update_trend_analysis(self, metric_type: str) -> None:
197
+ """Update trend analysis for a metric."""
198
+ history: list[tuple[datetime, float]] = list(self.metric_history[metric_type])
199
+ values = [point[1] for point in history]
200
+ timestamps = [point[0] for point in history]
201
+
202
+ # Calculate trend direction and strength
203
+ trend_direction, trend_strength = self._calculate_trend(values)
204
+
205
+ # Generate predictions
206
+ config = self.metric_configs.get(metric_type, {})
207
+ predictor_name = config.get("predictor", "moving_average")
208
+ predictor = self.predictors[predictor_name]
209
+
210
+ predicted_values = predictor.predict(values, periods=24) # 24 periods ahead
211
+ confidence_intervals = self._calculate_confidence_intervals(
212
+ values, predicted_values
213
+ )
214
+
215
+ self.trend_analyses[metric_type] = TrendAnalysis(
216
+ metric_type=metric_type,
217
+ trend_direction=trend_direction,
218
+ trend_strength=trend_strength,
219
+ predicted_values=predicted_values,
220
+ confidence_intervals=confidence_intervals,
221
+ analysis_period=timestamps[-1] - timestamps[0],
222
+ last_updated=datetime.now(),
223
+ )
224
+
225
+ def _calculate_trend(self, values: list[float]) -> tuple[str, float]:
226
+ """Calculate trend direction and strength."""
227
+ if len(values) < 3:
228
+ return "stable", 0.0
229
+
230
+ # Use linear regression to determine trend
231
+ n = len(values)
232
+ x: list[int] = list(range(n))
233
+ y = values
234
+
235
+ x_mean = statistics.mean(x)
236
+ y_mean = statistics.mean(y)
237
+
238
+ numerator = sum((x[i] - x_mean) * (y[i] - y_mean) for i in range(n))
239
+ denominator = sum((x[i] - x_mean) ** 2 for i in range(n))
240
+
241
+ if denominator == 0:
242
+ return "stable", 0.0
243
+
244
+ slope = numerator / denominator
245
+
246
+ # Calculate R-squared for trend strength
247
+ y_pred = [slope * xi + (y_mean - slope * x_mean) for xi in x]
248
+ ss_res = sum((y[i] - y_pred[i]) ** 2 for i in range(n))
249
+ ss_tot = sum((y[i] - y_mean) ** 2 for i in range(n))
250
+
251
+ r_squared = 1 - (ss_res / ss_tot) if ss_tot != 0 else 0
252
+ trend_strength = max(0.0, min(1.0, r_squared))
253
+
254
+ # Determine direction
255
+ if abs(slope) < 0.01:
256
+ direction = "stable"
257
+ elif slope > 0:
258
+ direction = "increasing"
259
+ else:
260
+ direction = "decreasing"
261
+
262
+ # Check for volatility
263
+ if trend_strength < 0.3:
264
+ recent_std = statistics.stdev(values[-10:]) if len(values) >= 10 else 0
265
+ overall_std = statistics.stdev(values)
266
+ if recent_std > overall_std * 1.5:
267
+ direction = "volatile"
268
+
269
+ return direction, trend_strength
270
+
271
+ def _calculate_confidence_intervals(
272
+ self, historical: list[float], predictions: list[float]
273
+ ) -> list[tuple[float, float]]:
274
+ """Calculate confidence intervals for predictions."""
275
+ if len(historical) < 2:
276
+ return [(pred, pred) for pred in predictions]
277
+
278
+ # Use historical standard deviation for confidence intervals
279
+ std_dev = statistics.stdev(historical)
280
+ confidence_multiplier = 1.96 # 95% confidence
281
+
282
+ intervals = []
283
+ for pred in predictions:
284
+ lower = pred - confidence_multiplier * std_dev
285
+ upper = pred + confidence_multiplier * std_dev
286
+ intervals.append((lower, upper))
287
+
288
+ return intervals
289
+
290
+ def predict_metric(
291
+ self,
292
+ metric_type: str,
293
+ periods_ahead: int = 1,
294
+ predictor_name: str | None = None,
295
+ ) -> list[Prediction]:
296
+ """Generate predictions for a metric."""
297
+ if metric_type not in self.metric_history:
298
+ return []
299
+
300
+ history: list[tuple[datetime, float]] = list(self.metric_history[metric_type])
301
+ values = [point[1] for point in history]
302
+ last_timestamp = history[-1][0] if history else datetime.now()
303
+
304
+ # Select predictor
305
+ if predictor_name is None:
306
+ config = self.metric_configs.get(metric_type, {})
307
+ predictor_name = t.cast(str, config.get("predictor", "moving_average"))
308
+
309
+ predictor = self.predictors[predictor_name]
310
+ predicted_values = predictor.predict(values, periods_ahead)
311
+
312
+ # Calculate confidence intervals
313
+ confidence_intervals = self._calculate_confidence_intervals(
314
+ values, predicted_values
315
+ )
316
+
317
+ # Calculate model accuracy
318
+ accuracy = self._calculate_model_accuracy(metric_type, predictor_name)
319
+
320
+ # Generate predictions
321
+ predictions = []
322
+ for i, (pred_value, conf_interval) in enumerate(
323
+ zip(predicted_values, confidence_intervals)
324
+ ):
325
+ prediction_time = last_timestamp + timedelta(hours=i + 1)
326
+
327
+ prediction = Prediction(
328
+ metric_type=metric_type,
329
+ predicted_at=datetime.now(),
330
+ predicted_for=prediction_time,
331
+ predicted_value=pred_value,
332
+ confidence_interval=conf_interval,
333
+ confidence_level=0.95,
334
+ model_accuracy=accuracy,
335
+ metadata={"predictor": predictor_name},
336
+ )
337
+ predictions.append(prediction)
338
+
339
+ # Cache predictions
340
+ self.predictions_cache[metric_type] = predictions
341
+
342
+ return predictions
343
+
344
+ def _calculate_model_accuracy(self, metric_type: str, predictor_name: str) -> float:
345
+ """Calculate historical accuracy of the prediction model."""
346
+ if len(self.metric_history[metric_type]) < 20:
347
+ return 0.5 # Default accuracy for insufficient data
348
+
349
+ history: list[tuple[datetime, float]] = list(self.metric_history[metric_type])
350
+ values = [point[1] for point in history]
351
+
352
+ # Use last 10 points for validation
353
+ train_data = values[:-10]
354
+ validation_data = values[-10:]
355
+
356
+ if len(train_data) < 5:
357
+ return 0.5
358
+
359
+ # Generate predictions for validation period
360
+ predictor = self.predictors[predictor_name]
361
+ predictions = predictor.predict(train_data, periods=len(validation_data))
362
+
363
+ # Calculate accuracy (inverse of mean absolute error)
364
+ mae = statistics.mean(
365
+ abs(pred - actual) for pred, actual in zip(predictions, validation_data)
366
+ )
367
+
368
+ # Convert to accuracy score (0-1)
369
+ if mae == 0:
370
+ return 1.0
371
+
372
+ avg_value = statistics.mean(validation_data)
373
+ relative_error = mae / abs(avg_value) if avg_value != 0 else mae
374
+
375
+ return max(0.1, min(1.0, 1.0 - relative_error))
376
+
377
+ def analyze_capacity_requirements(
378
+ self, resource_type: str, current_usage: float, threshold: float = 0.8
379
+ ) -> CapacityForecast:
380
+ """Analyze capacity requirements and forecast exhaustion."""
381
+ if resource_type not in self.metric_history:
382
+ return CapacityForecast(
383
+ resource_type=resource_type,
384
+ current_usage=current_usage,
385
+ predicted_usage=[],
386
+ capacity_threshold=threshold,
387
+ estimated_exhaustion=None,
388
+ recommended_actions=["Insufficient data for analysis"],
389
+ confidence=0.0,
390
+ )
391
+
392
+ # Get predictions for the next 30 days
393
+ predictions = self.predict_metric(resource_type, periods_ahead=24 * 30)
394
+
395
+ predicted_usage = [
396
+ (pred.predicted_for, pred.predicted_value) for pred in predictions
397
+ ]
398
+
399
+ # Find when threshold will be exceeded
400
+ estimated_exhaustion = None
401
+ for timestamp, usage in predicted_usage:
402
+ if usage >= threshold:
403
+ estimated_exhaustion = timestamp
404
+ break
405
+
406
+ # Generate recommendations
407
+ recommendations = self._generate_capacity_recommendations(
408
+ resource_type, current_usage, threshold, estimated_exhaustion
409
+ )
410
+
411
+ # Calculate confidence based on prediction accuracy
412
+ avg_accuracy = statistics.mean(pred.model_accuracy for pred in predictions)
413
+
414
+ return CapacityForecast(
415
+ resource_type=resource_type,
416
+ current_usage=current_usage,
417
+ predicted_usage=predicted_usage,
418
+ capacity_threshold=threshold,
419
+ estimated_exhaustion=estimated_exhaustion,
420
+ recommended_actions=recommendations,
421
+ confidence=avg_accuracy,
422
+ )
423
+
424
+ def _generate_capacity_recommendations(
425
+ self,
426
+ resource_type: str,
427
+ current_usage: float,
428
+ threshold: float,
429
+ estimated_exhaustion: datetime | None,
430
+ ) -> list[str]:
431
+ """Generate capacity planning recommendations."""
432
+ recommendations: list[str] = []
433
+
434
+ utilization = current_usage / threshold if threshold > 0 else 0
435
+
436
+ if estimated_exhaustion:
437
+ days_until = (estimated_exhaustion - datetime.now()).days
438
+ if days_until < 7:
439
+ recommendations.extend(
440
+ (
441
+ f"URGENT: {resource_type} capacity will be exceeded in {days_until} days",
442
+ "Consider immediate scaling or optimization",
443
+ )
444
+ )
445
+ elif days_until < 30:
446
+ recommendations.append(
447
+ f"Plan capacity increase for {resource_type} within {days_until} days"
448
+ )
449
+ else:
450
+ recommendations.append(
451
+ f"Monitor {resource_type} usage, capacity limit expected in {days_until} days"
452
+ )
453
+
454
+ if utilization > 0.7:
455
+ recommendations.extend(
456
+ (
457
+ f"High {resource_type} utilization ({utilization:.1%})",
458
+ "Consider proactive scaling",
459
+ )
460
+ )
461
+
462
+ if not recommendations:
463
+ recommendations.append(f"{resource_type} capacity is within normal limits")
464
+
465
+ return recommendations
466
+
467
+ def get_trend_summary(self) -> dict[str, dict[str, t.Any]]:
468
+ """Get summary of all trend analyses."""
469
+ summary = {}
470
+
471
+ for metric_type, analysis in self.trend_analyses.items():
472
+ summary[metric_type] = {
473
+ "trend_direction": analysis.trend_direction,
474
+ "trend_strength": analysis.trend_strength,
475
+ "next_predicted_value": analysis.predicted_values[0]
476
+ if analysis.predicted_values
477
+ else None,
478
+ "confidence_range": analysis.confidence_intervals[0]
479
+ if analysis.confidence_intervals
480
+ else None,
481
+ "last_updated": analysis.last_updated.isoformat(),
482
+ }
483
+
484
+ return summary
485
+
486
+ def export_analytics_data(self, output_path: str | Path) -> None:
487
+ """Export analytics data for external analysis."""
488
+ import json
489
+
490
+ data = {
491
+ "trend_analyses": {
492
+ metric_type: {
493
+ "metric_type": analysis.metric_type,
494
+ "trend_direction": analysis.trend_direction,
495
+ "trend_strength": analysis.trend_strength,
496
+ "predicted_values": analysis.predicted_values[:10], # Limit size
497
+ "analysis_period": analysis.analysis_period.total_seconds(),
498
+ "last_updated": analysis.last_updated.isoformat(),
499
+ }
500
+ for metric_type, analysis in self.trend_analyses.items()
501
+ },
502
+ "predictions_summary": {
503
+ metric_type: len(predictions)
504
+ for metric_type, predictions in self.predictions_cache.items()
505
+ },
506
+ "exported_at": datetime.now().isoformat(),
507
+ }
508
+
509
+ with open(output_path, "w", encoding="utf-8") as f:
510
+ json.dump(data, f, indent=2)