crackerjack 0.33.0__py3-none-any.whl → 0.33.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of crackerjack might be problematic. Click here for more details.
- crackerjack/__main__.py +1350 -34
- crackerjack/adapters/__init__.py +17 -0
- crackerjack/adapters/lsp_client.py +358 -0
- crackerjack/adapters/rust_tool_adapter.py +194 -0
- crackerjack/adapters/rust_tool_manager.py +193 -0
- crackerjack/adapters/skylos_adapter.py +231 -0
- crackerjack/adapters/zuban_adapter.py +560 -0
- crackerjack/agents/base.py +7 -3
- crackerjack/agents/coordinator.py +271 -33
- crackerjack/agents/documentation_agent.py +9 -15
- crackerjack/agents/dry_agent.py +3 -15
- crackerjack/agents/formatting_agent.py +1 -1
- crackerjack/agents/import_optimization_agent.py +36 -180
- crackerjack/agents/performance_agent.py +17 -98
- crackerjack/agents/performance_helpers.py +7 -31
- crackerjack/agents/proactive_agent.py +1 -3
- crackerjack/agents/refactoring_agent.py +16 -85
- crackerjack/agents/refactoring_helpers.py +7 -42
- crackerjack/agents/security_agent.py +9 -48
- crackerjack/agents/test_creation_agent.py +356 -513
- crackerjack/agents/test_specialist_agent.py +0 -4
- crackerjack/api.py +6 -25
- crackerjack/cli/cache_handlers.py +204 -0
- crackerjack/cli/cache_handlers_enhanced.py +683 -0
- crackerjack/cli/facade.py +100 -0
- crackerjack/cli/handlers.py +224 -9
- crackerjack/cli/interactive.py +6 -4
- crackerjack/cli/options.py +642 -55
- crackerjack/cli/utils.py +2 -1
- crackerjack/code_cleaner.py +58 -117
- crackerjack/config/global_lock_config.py +8 -48
- crackerjack/config/hooks.py +53 -62
- crackerjack/core/async_workflow_orchestrator.py +24 -34
- crackerjack/core/autofix_coordinator.py +3 -17
- crackerjack/core/enhanced_container.py +4 -13
- crackerjack/core/file_lifecycle.py +12 -89
- crackerjack/core/performance.py +2 -2
- crackerjack/core/performance_monitor.py +15 -55
- crackerjack/core/phase_coordinator.py +104 -204
- crackerjack/core/resource_manager.py +14 -90
- crackerjack/core/service_watchdog.py +62 -95
- crackerjack/core/session_coordinator.py +149 -0
- crackerjack/core/timeout_manager.py +14 -72
- crackerjack/core/websocket_lifecycle.py +13 -78
- crackerjack/core/workflow_orchestrator.py +171 -174
- crackerjack/docs/INDEX.md +11 -0
- crackerjack/docs/generated/api/API_REFERENCE.md +10895 -0
- crackerjack/docs/generated/api/CLI_REFERENCE.md +109 -0
- crackerjack/docs/generated/api/CROSS_REFERENCES.md +1755 -0
- crackerjack/docs/generated/api/PROTOCOLS.md +3 -0
- crackerjack/docs/generated/api/SERVICES.md +1252 -0
- crackerjack/documentation/__init__.py +31 -0
- crackerjack/documentation/ai_templates.py +756 -0
- crackerjack/documentation/dual_output_generator.py +765 -0
- crackerjack/documentation/mkdocs_integration.py +518 -0
- crackerjack/documentation/reference_generator.py +977 -0
- crackerjack/dynamic_config.py +55 -50
- crackerjack/executors/async_hook_executor.py +10 -15
- crackerjack/executors/cached_hook_executor.py +117 -43
- crackerjack/executors/hook_executor.py +8 -34
- crackerjack/executors/hook_lock_manager.py +26 -183
- crackerjack/executors/individual_hook_executor.py +13 -11
- crackerjack/executors/lsp_aware_hook_executor.py +270 -0
- crackerjack/executors/tool_proxy.py +417 -0
- crackerjack/hooks/lsp_hook.py +79 -0
- crackerjack/intelligence/adaptive_learning.py +25 -10
- crackerjack/intelligence/agent_orchestrator.py +2 -5
- crackerjack/intelligence/agent_registry.py +34 -24
- crackerjack/intelligence/agent_selector.py +5 -7
- crackerjack/interactive.py +17 -6
- crackerjack/managers/async_hook_manager.py +0 -1
- crackerjack/managers/hook_manager.py +79 -1
- crackerjack/managers/publish_manager.py +44 -8
- crackerjack/managers/test_command_builder.py +1 -15
- crackerjack/managers/test_executor.py +1 -3
- crackerjack/managers/test_manager.py +98 -7
- crackerjack/managers/test_manager_backup.py +10 -9
- crackerjack/mcp/cache.py +2 -2
- crackerjack/mcp/client_runner.py +1 -1
- crackerjack/mcp/context.py +191 -68
- crackerjack/mcp/dashboard.py +7 -5
- crackerjack/mcp/enhanced_progress_monitor.py +31 -28
- crackerjack/mcp/file_monitor.py +30 -23
- crackerjack/mcp/progress_components.py +31 -21
- crackerjack/mcp/progress_monitor.py +50 -53
- crackerjack/mcp/rate_limiter.py +6 -6
- crackerjack/mcp/server_core.py +17 -16
- crackerjack/mcp/service_watchdog.py +2 -1
- crackerjack/mcp/state.py +4 -7
- crackerjack/mcp/task_manager.py +11 -9
- crackerjack/mcp/tools/core_tools.py +173 -32
- crackerjack/mcp/tools/error_analyzer.py +3 -2
- crackerjack/mcp/tools/execution_tools.py +8 -10
- crackerjack/mcp/tools/execution_tools_backup.py +42 -30
- crackerjack/mcp/tools/intelligence_tool_registry.py +7 -5
- crackerjack/mcp/tools/intelligence_tools.py +5 -2
- crackerjack/mcp/tools/monitoring_tools.py +33 -70
- crackerjack/mcp/tools/proactive_tools.py +24 -11
- crackerjack/mcp/tools/progress_tools.py +5 -8
- crackerjack/mcp/tools/utility_tools.py +20 -14
- crackerjack/mcp/tools/workflow_executor.py +62 -40
- crackerjack/mcp/websocket/app.py +8 -0
- crackerjack/mcp/websocket/endpoints.py +352 -357
- crackerjack/mcp/websocket/jobs.py +40 -57
- crackerjack/mcp/websocket/monitoring_endpoints.py +2935 -0
- crackerjack/mcp/websocket/server.py +7 -25
- crackerjack/mcp/websocket/websocket_handler.py +6 -17
- crackerjack/mixins/__init__.py +0 -2
- crackerjack/mixins/error_handling.py +1 -70
- crackerjack/models/config.py +12 -1
- crackerjack/models/config_adapter.py +49 -1
- crackerjack/models/protocols.py +122 -122
- crackerjack/models/resource_protocols.py +55 -210
- crackerjack/monitoring/ai_agent_watchdog.py +13 -13
- crackerjack/monitoring/metrics_collector.py +426 -0
- crackerjack/monitoring/regression_prevention.py +8 -8
- crackerjack/monitoring/websocket_server.py +643 -0
- crackerjack/orchestration/advanced_orchestrator.py +11 -6
- crackerjack/orchestration/coverage_improvement.py +3 -3
- crackerjack/orchestration/execution_strategies.py +26 -6
- crackerjack/orchestration/test_progress_streamer.py +8 -5
- crackerjack/plugins/base.py +2 -2
- crackerjack/plugins/hooks.py +7 -0
- crackerjack/plugins/managers.py +11 -8
- crackerjack/security/__init__.py +0 -1
- crackerjack/security/audit.py +6 -35
- crackerjack/services/anomaly_detector.py +392 -0
- crackerjack/services/api_extractor.py +615 -0
- crackerjack/services/backup_service.py +2 -2
- crackerjack/services/bounded_status_operations.py +15 -152
- crackerjack/services/cache.py +127 -1
- crackerjack/services/changelog_automation.py +395 -0
- crackerjack/services/config.py +15 -9
- crackerjack/services/config_merge.py +19 -80
- crackerjack/services/config_template.py +506 -0
- crackerjack/services/contextual_ai_assistant.py +48 -22
- crackerjack/services/coverage_badge_service.py +171 -0
- crackerjack/services/coverage_ratchet.py +27 -25
- crackerjack/services/debug.py +3 -3
- crackerjack/services/dependency_analyzer.py +460 -0
- crackerjack/services/dependency_monitor.py +14 -11
- crackerjack/services/documentation_generator.py +491 -0
- crackerjack/services/documentation_service.py +675 -0
- crackerjack/services/enhanced_filesystem.py +6 -5
- crackerjack/services/enterprise_optimizer.py +865 -0
- crackerjack/services/error_pattern_analyzer.py +676 -0
- crackerjack/services/file_hasher.py +1 -1
- crackerjack/services/git.py +8 -25
- crackerjack/services/health_metrics.py +10 -8
- crackerjack/services/heatmap_generator.py +735 -0
- crackerjack/services/initialization.py +11 -30
- crackerjack/services/input_validator.py +5 -97
- crackerjack/services/intelligent_commit.py +327 -0
- crackerjack/services/log_manager.py +15 -12
- crackerjack/services/logging.py +4 -3
- crackerjack/services/lsp_client.py +628 -0
- crackerjack/services/memory_optimizer.py +19 -87
- crackerjack/services/metrics.py +42 -33
- crackerjack/services/parallel_executor.py +9 -67
- crackerjack/services/pattern_cache.py +1 -1
- crackerjack/services/pattern_detector.py +6 -6
- crackerjack/services/performance_benchmarks.py +18 -59
- crackerjack/services/performance_cache.py +20 -81
- crackerjack/services/performance_monitor.py +27 -95
- crackerjack/services/predictive_analytics.py +510 -0
- crackerjack/services/quality_baseline.py +234 -0
- crackerjack/services/quality_baseline_enhanced.py +646 -0
- crackerjack/services/quality_intelligence.py +785 -0
- crackerjack/services/regex_patterns.py +605 -524
- crackerjack/services/regex_utils.py +43 -123
- crackerjack/services/secure_path_utils.py +5 -164
- crackerjack/services/secure_status_formatter.py +30 -141
- crackerjack/services/secure_subprocess.py +11 -92
- crackerjack/services/security.py +9 -41
- crackerjack/services/security_logger.py +12 -24
- crackerjack/services/server_manager.py +124 -16
- crackerjack/services/status_authentication.py +16 -159
- crackerjack/services/status_security_manager.py +4 -131
- crackerjack/services/thread_safe_status_collector.py +19 -125
- crackerjack/services/unified_config.py +21 -13
- crackerjack/services/validation_rate_limiter.py +5 -54
- crackerjack/services/version_analyzer.py +459 -0
- crackerjack/services/version_checker.py +1 -1
- crackerjack/services/websocket_resource_limiter.py +10 -144
- crackerjack/services/zuban_lsp_service.py +390 -0
- crackerjack/slash_commands/__init__.py +2 -7
- crackerjack/slash_commands/run.md +2 -2
- crackerjack/tools/validate_input_validator_patterns.py +14 -40
- crackerjack/tools/validate_regex_patterns.py +19 -48
- {crackerjack-0.33.0.dist-info → crackerjack-0.33.1.dist-info}/METADATA +196 -25
- crackerjack-0.33.1.dist-info/RECORD +229 -0
- crackerjack/CLAUDE.md +0 -207
- crackerjack/RULES.md +0 -380
- crackerjack/py313.py +0 -234
- crackerjack-0.33.0.dist-info/RECORD +0 -187
- {crackerjack-0.33.0.dist-info → crackerjack-0.33.1.dist-info}/WHEEL +0 -0
- {crackerjack-0.33.0.dist-info → crackerjack-0.33.1.dist-info}/entry_points.txt +0 -0
- {crackerjack-0.33.0.dist-info → crackerjack-0.33.1.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,646 @@
|
|
|
1
|
+
"""Enhanced Quality Baseline Service with trending, alerts, and export capabilities."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import typing as t
|
|
5
|
+
from dataclasses import asdict, dataclass, field
|
|
6
|
+
from datetime import datetime, timedelta
|
|
7
|
+
from enum import Enum
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
|
|
10
|
+
from crackerjack.services.cache import CrackerjackCache
|
|
11
|
+
from crackerjack.services.quality_baseline import QualityBaselineService, QualityMetrics
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class TrendDirection(str, Enum):
|
|
15
|
+
"""Quality trend direction."""
|
|
16
|
+
|
|
17
|
+
IMPROVING = "improving"
|
|
18
|
+
DECLINING = "declining"
|
|
19
|
+
STABLE = "stable"
|
|
20
|
+
VOLATILE = "volatile"
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class AlertSeverity(str, Enum):
|
|
24
|
+
"""Alert severity levels."""
|
|
25
|
+
|
|
26
|
+
INFO = "info"
|
|
27
|
+
WARNING = "warning"
|
|
28
|
+
CRITICAL = "critical"
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
@dataclass
|
|
32
|
+
class QualityTrend:
|
|
33
|
+
"""Quality trend analysis over time."""
|
|
34
|
+
|
|
35
|
+
direction: TrendDirection
|
|
36
|
+
change_rate: float # Points per day
|
|
37
|
+
confidence: float # 0.0 to 1.0
|
|
38
|
+
period_days: int
|
|
39
|
+
recent_scores: list[int] = field(default_factory=list)
|
|
40
|
+
|
|
41
|
+
def to_dict(self) -> dict[str, t.Any]:
|
|
42
|
+
return asdict(self)
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
@dataclass
|
|
46
|
+
class QualityAlert:
|
|
47
|
+
"""Quality alert for significant changes."""
|
|
48
|
+
|
|
49
|
+
severity: AlertSeverity
|
|
50
|
+
message: str
|
|
51
|
+
metric_name: str
|
|
52
|
+
current_value: float
|
|
53
|
+
threshold_value: float
|
|
54
|
+
triggered_at: datetime
|
|
55
|
+
git_hash: str | None = None
|
|
56
|
+
|
|
57
|
+
def to_dict(self) -> dict[str, t.Any]:
|
|
58
|
+
data = asdict(self)
|
|
59
|
+
data["triggered_at"] = self.triggered_at.isoformat()
|
|
60
|
+
return data
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
@dataclass
|
|
64
|
+
class UnifiedMetrics:
|
|
65
|
+
"""Unified metrics for real-time monitoring dashboard."""
|
|
66
|
+
|
|
67
|
+
timestamp: datetime
|
|
68
|
+
quality_score: int
|
|
69
|
+
test_coverage: float
|
|
70
|
+
hook_duration: float
|
|
71
|
+
active_jobs: int
|
|
72
|
+
error_count: int
|
|
73
|
+
trend_direction: TrendDirection
|
|
74
|
+
predictions: dict[str, t.Any] = field(default_factory=dict[str, t.Any])
|
|
75
|
+
|
|
76
|
+
def to_dict(self) -> dict[str, t.Any]:
|
|
77
|
+
data = asdict(self)
|
|
78
|
+
data["timestamp"] = self.timestamp.isoformat()
|
|
79
|
+
return data
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
@dataclass
|
|
83
|
+
class SystemHealthStatus:
|
|
84
|
+
"""System health status for monitoring."""
|
|
85
|
+
|
|
86
|
+
overall_status: str # "healthy", "warning", "critical"
|
|
87
|
+
cpu_usage: float
|
|
88
|
+
memory_usage: float
|
|
89
|
+
disk_usage: float
|
|
90
|
+
service_status: dict[str, str] = field(default_factory=dict[str, t.Any])
|
|
91
|
+
|
|
92
|
+
def to_dict(self) -> dict[str, t.Any]:
|
|
93
|
+
return asdict(self)
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
@dataclass
|
|
97
|
+
class DashboardState:
|
|
98
|
+
"""Complete dashboard state for real-time monitoring."""
|
|
99
|
+
|
|
100
|
+
current_metrics: UnifiedMetrics
|
|
101
|
+
historical_data: list[UnifiedMetrics]
|
|
102
|
+
active_alerts: list[QualityAlert]
|
|
103
|
+
system_health: SystemHealthStatus
|
|
104
|
+
recommendations: list[str]
|
|
105
|
+
last_updated: datetime = field(default_factory=datetime.now)
|
|
106
|
+
|
|
107
|
+
def to_dict(self) -> dict[str, t.Any]:
|
|
108
|
+
return {
|
|
109
|
+
"current_metrics": self.current_metrics.to_dict(),
|
|
110
|
+
"historical_data": [metrics.to_dict() for metrics in self.historical_data],
|
|
111
|
+
"active_alerts": [alert.to_dict() for alert in self.active_alerts],
|
|
112
|
+
"system_health": self.system_health.to_dict(),
|
|
113
|
+
"recommendations": self.recommendations,
|
|
114
|
+
"last_updated": self.last_updated.isoformat(),
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
@dataclass
|
|
119
|
+
class QualityReport:
|
|
120
|
+
"""Comprehensive quality report."""
|
|
121
|
+
|
|
122
|
+
current_metrics: QualityMetrics | None
|
|
123
|
+
trend: QualityTrend | None
|
|
124
|
+
alerts: list[QualityAlert]
|
|
125
|
+
historical_data: list[QualityMetrics]
|
|
126
|
+
recommendations: list[str]
|
|
127
|
+
generated_at: datetime = field(default_factory=datetime.now)
|
|
128
|
+
|
|
129
|
+
def to_dict(self) -> dict[str, t.Any]:
|
|
130
|
+
return {
|
|
131
|
+
"current_metrics": self.current_metrics.to_dict()
|
|
132
|
+
if self.current_metrics
|
|
133
|
+
else None,
|
|
134
|
+
"trend": self.trend.to_dict() if self.trend else None,
|
|
135
|
+
"alerts": [alert.to_dict() for alert in self.alerts],
|
|
136
|
+
"historical_data": [metrics.to_dict() for metrics in self.historical_data],
|
|
137
|
+
"recommendations": self.recommendations,
|
|
138
|
+
"generated_at": self.generated_at.isoformat(),
|
|
139
|
+
}
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
class EnhancedQualityBaselineService(QualityBaselineService):
|
|
143
|
+
"""Enhanced quality baseline service with advanced analytics."""
|
|
144
|
+
|
|
145
|
+
def __init__(
|
|
146
|
+
self,
|
|
147
|
+
cache: CrackerjackCache | None = None,
|
|
148
|
+
alert_thresholds: dict[str, float] | None = None,
|
|
149
|
+
) -> None:
|
|
150
|
+
super().__init__(cache)
|
|
151
|
+
self.alert_thresholds = alert_thresholds or {
|
|
152
|
+
"quality_score_drop": 10.0, # Alert if score drops by 10+ points
|
|
153
|
+
"coverage_drop": 5.0, # Alert if coverage drops by 5%+
|
|
154
|
+
"test_pass_rate_drop": 10.0, # Alert if pass rate drops by 10%+
|
|
155
|
+
"security_issues_increase": 1, # Alert on any security issue increase
|
|
156
|
+
"type_errors_threshold": 10, # Alert if type errors exceed 10
|
|
157
|
+
}
|
|
158
|
+
|
|
159
|
+
def analyze_quality_trend(
|
|
160
|
+
self, days: int = 30, min_data_points: int = 5
|
|
161
|
+
) -> QualityTrend | None:
|
|
162
|
+
"""Analyze quality trend over specified period."""
|
|
163
|
+
baselines = self.get_recent_baselines(
|
|
164
|
+
limit=days * 2
|
|
165
|
+
) # Get more data for analysis
|
|
166
|
+
|
|
167
|
+
if len(baselines) < min_data_points:
|
|
168
|
+
return None
|
|
169
|
+
|
|
170
|
+
# Filter to specified period
|
|
171
|
+
cutoff_date = datetime.now() - timedelta(days=days)
|
|
172
|
+
recent_baselines = [b for b in baselines if b.timestamp >= cutoff_date]
|
|
173
|
+
|
|
174
|
+
if len(recent_baselines) < min_data_points:
|
|
175
|
+
return None
|
|
176
|
+
|
|
177
|
+
# Calculate trend
|
|
178
|
+
scores = [b.quality_score for b in recent_baselines]
|
|
179
|
+
timestamps = [
|
|
180
|
+
(b.timestamp - cutoff_date).total_seconds() / 86400
|
|
181
|
+
for b in recent_baselines
|
|
182
|
+
]
|
|
183
|
+
|
|
184
|
+
# Simple linear regression for trend
|
|
185
|
+
n = len(scores)
|
|
186
|
+
sum_x = sum(timestamps)
|
|
187
|
+
sum_y = sum(scores)
|
|
188
|
+
sum_xy = sum(x * y for x, y in zip(timestamps, scores))
|
|
189
|
+
sum_x2 = sum(x * x for x in timestamps)
|
|
190
|
+
|
|
191
|
+
if n * sum_x2 - sum_x * sum_x == 0:
|
|
192
|
+
slope: float = 0.0
|
|
193
|
+
else:
|
|
194
|
+
slope = (n * sum_xy - sum_x * sum_y) / (n * sum_x2 - sum_x * sum_x)
|
|
195
|
+
|
|
196
|
+
# Determine direction and confidence
|
|
197
|
+
abs_slope = abs(slope)
|
|
198
|
+
|
|
199
|
+
if abs_slope < 0.1:
|
|
200
|
+
direction = TrendDirection.STABLE
|
|
201
|
+
elif slope > 0:
|
|
202
|
+
direction = TrendDirection.IMPROVING
|
|
203
|
+
else:
|
|
204
|
+
direction = TrendDirection.DECLINING
|
|
205
|
+
|
|
206
|
+
# Calculate volatility (standard deviation of scores)
|
|
207
|
+
mean_score = sum(scores) / len(scores)
|
|
208
|
+
variance = sum((score - mean_score) ** 2 for score in scores) / len(scores)
|
|
209
|
+
volatility = variance**0.5
|
|
210
|
+
|
|
211
|
+
if volatility > 15: # High volatility threshold
|
|
212
|
+
direction = TrendDirection.VOLATILE
|
|
213
|
+
|
|
214
|
+
# Confidence based on data consistency and amount
|
|
215
|
+
confidence = min(1.0, (len(scores) / 10) * (1 / (volatility + 1)))
|
|
216
|
+
|
|
217
|
+
return QualityTrend(
|
|
218
|
+
direction=direction,
|
|
219
|
+
change_rate=slope,
|
|
220
|
+
confidence=confidence,
|
|
221
|
+
period_days=days,
|
|
222
|
+
recent_scores=scores[-10:], # Last 10 scores
|
|
223
|
+
)
|
|
224
|
+
|
|
225
|
+
def check_quality_alerts(
|
|
226
|
+
self, current_metrics: dict[str, t.Any], baseline_git_hash: str | None = None
|
|
227
|
+
) -> list[QualityAlert]:
|
|
228
|
+
"""Check for quality alerts based on thresholds."""
|
|
229
|
+
alerts: list[QualityAlert] = []
|
|
230
|
+
baseline = self.get_baseline(baseline_git_hash)
|
|
231
|
+
|
|
232
|
+
if not baseline:
|
|
233
|
+
return alerts
|
|
234
|
+
|
|
235
|
+
# Filter metrics to only include parameters that calculate_quality_score accepts
|
|
236
|
+
score_metrics = {
|
|
237
|
+
k: v
|
|
238
|
+
for k, v in current_metrics.items()
|
|
239
|
+
if k
|
|
240
|
+
in (
|
|
241
|
+
"coverage_percent",
|
|
242
|
+
"test_pass_rate",
|
|
243
|
+
"hook_failures",
|
|
244
|
+
"complexity_violations",
|
|
245
|
+
"security_issues",
|
|
246
|
+
"type_errors",
|
|
247
|
+
"linting_issues",
|
|
248
|
+
)
|
|
249
|
+
}
|
|
250
|
+
current_score = self.calculate_quality_score(**score_metrics)
|
|
251
|
+
git_hash = self.get_current_git_hash()
|
|
252
|
+
|
|
253
|
+
# Quality score drop alert
|
|
254
|
+
score_drop = baseline.quality_score - current_score
|
|
255
|
+
if score_drop >= self.alert_thresholds["quality_score_drop"]:
|
|
256
|
+
alerts.append(
|
|
257
|
+
QualityAlert(
|
|
258
|
+
severity=AlertSeverity.CRITICAL
|
|
259
|
+
if score_drop >= 20
|
|
260
|
+
else AlertSeverity.WARNING,
|
|
261
|
+
message=f"Quality score dropped by {score_drop:.1f} points (from {baseline.quality_score} to {current_score})",
|
|
262
|
+
metric_name="quality_score",
|
|
263
|
+
current_value=current_score,
|
|
264
|
+
threshold_value=baseline.quality_score
|
|
265
|
+
- self.alert_thresholds["quality_score_drop"],
|
|
266
|
+
triggered_at=datetime.now(),
|
|
267
|
+
git_hash=git_hash,
|
|
268
|
+
)
|
|
269
|
+
)
|
|
270
|
+
|
|
271
|
+
# Coverage drop alert
|
|
272
|
+
coverage_drop = baseline.coverage_percent - current_metrics.get(
|
|
273
|
+
"coverage_percent", 0
|
|
274
|
+
)
|
|
275
|
+
if coverage_drop >= self.alert_thresholds["coverage_drop"]:
|
|
276
|
+
alerts.append(
|
|
277
|
+
QualityAlert(
|
|
278
|
+
severity=AlertSeverity.WARNING,
|
|
279
|
+
message=f"Test coverage dropped by {coverage_drop:.1f}% (from {baseline.coverage_percent:.1f}% to {current_metrics.get('coverage_percent', 0):.1f}%)",
|
|
280
|
+
metric_name="coverage_percent",
|
|
281
|
+
current_value=current_metrics.get("coverage_percent", 0),
|
|
282
|
+
threshold_value=baseline.coverage_percent
|
|
283
|
+
- self.alert_thresholds["coverage_drop"],
|
|
284
|
+
triggered_at=datetime.now(),
|
|
285
|
+
git_hash=git_hash,
|
|
286
|
+
)
|
|
287
|
+
)
|
|
288
|
+
|
|
289
|
+
# Security issues increase alert
|
|
290
|
+
security_increase = (
|
|
291
|
+
current_metrics.get("security_issues", 0) - baseline.security_issues
|
|
292
|
+
)
|
|
293
|
+
if security_increase >= self.alert_thresholds["security_issues_increase"]:
|
|
294
|
+
alerts.append(
|
|
295
|
+
QualityAlert(
|
|
296
|
+
severity=AlertSeverity.CRITICAL,
|
|
297
|
+
message=f"Security issues increased by {security_increase} (from {baseline.security_issues} to {current_metrics.get('security_issues', 0)})",
|
|
298
|
+
metric_name="security_issues",
|
|
299
|
+
current_value=current_metrics.get("security_issues", 0),
|
|
300
|
+
threshold_value=baseline.security_issues
|
|
301
|
+
+ self.alert_thresholds["security_issues_increase"]
|
|
302
|
+
- 1,
|
|
303
|
+
triggered_at=datetime.now(),
|
|
304
|
+
git_hash=git_hash,
|
|
305
|
+
)
|
|
306
|
+
)
|
|
307
|
+
|
|
308
|
+
# Type errors threshold alert
|
|
309
|
+
type_errors = current_metrics.get("type_errors", 0)
|
|
310
|
+
if type_errors >= self.alert_thresholds["type_errors_threshold"]:
|
|
311
|
+
alerts.append(
|
|
312
|
+
QualityAlert(
|
|
313
|
+
severity=AlertSeverity.WARNING,
|
|
314
|
+
message=f"Type errors ({type_errors}) exceed threshold ({self.alert_thresholds['type_errors_threshold']})",
|
|
315
|
+
metric_name="type_errors",
|
|
316
|
+
current_value=type_errors,
|
|
317
|
+
threshold_value=self.alert_thresholds["type_errors_threshold"],
|
|
318
|
+
triggered_at=datetime.now(),
|
|
319
|
+
git_hash=git_hash,
|
|
320
|
+
)
|
|
321
|
+
)
|
|
322
|
+
|
|
323
|
+
return alerts
|
|
324
|
+
|
|
325
|
+
def generate_recommendations(
|
|
326
|
+
self,
|
|
327
|
+
current_metrics: dict[str, t.Any],
|
|
328
|
+
trend: QualityTrend | None,
|
|
329
|
+
alerts: list[QualityAlert],
|
|
330
|
+
) -> list[str]:
|
|
331
|
+
"""Generate actionable recommendations."""
|
|
332
|
+
recommendations: list[str] = []
|
|
333
|
+
|
|
334
|
+
# Generate different types of recommendations
|
|
335
|
+
self._add_coverage_recommendations(current_metrics, recommendations)
|
|
336
|
+
self._add_error_recommendations(current_metrics, recommendations)
|
|
337
|
+
self._add_trend_recommendations(trend, recommendations)
|
|
338
|
+
self._add_alert_recommendations(alerts, recommendations)
|
|
339
|
+
self._add_general_recommendations(current_metrics, recommendations)
|
|
340
|
+
|
|
341
|
+
return recommendations
|
|
342
|
+
|
|
343
|
+
def _add_coverage_recommendations(
|
|
344
|
+
self, metrics: dict[str, t.Any], recommendations: list[str]
|
|
345
|
+
) -> None:
|
|
346
|
+
"""Add coverage-based recommendations."""
|
|
347
|
+
coverage = metrics.get("coverage_percent", 0)
|
|
348
|
+
if coverage < 80:
|
|
349
|
+
recommendations.append(
|
|
350
|
+
f"📊 Increase test coverage from {coverage:.1f}% to 80%+ by adding tests for uncovered code paths"
|
|
351
|
+
)
|
|
352
|
+
elif coverage < 95:
|
|
353
|
+
recommendations.append(
|
|
354
|
+
f"🎯 Consider targeting 95%+ coverage (currently {coverage:.1f}%) for better code quality"
|
|
355
|
+
)
|
|
356
|
+
|
|
357
|
+
def _add_error_recommendations(
|
|
358
|
+
self, metrics: dict[str, t.Any], recommendations: list[str]
|
|
359
|
+
) -> None:
|
|
360
|
+
"""Add error-based recommendations."""
|
|
361
|
+
type_errors = metrics.get("type_errors", 0)
|
|
362
|
+
if type_errors > 0:
|
|
363
|
+
recommendations.append(
|
|
364
|
+
f"🔧 Fix {type_errors} type errors to improve code reliability"
|
|
365
|
+
)
|
|
366
|
+
|
|
367
|
+
security_issues = metrics.get("security_issues", 0)
|
|
368
|
+
if security_issues > 0:
|
|
369
|
+
recommendations.append(
|
|
370
|
+
f"🔒 Address {security_issues} security issues immediately"
|
|
371
|
+
)
|
|
372
|
+
|
|
373
|
+
def _add_trend_recommendations(
|
|
374
|
+
self, trend: QualityTrend | None, recommendations: list[str]
|
|
375
|
+
) -> None:
|
|
376
|
+
"""Add trend-based recommendations."""
|
|
377
|
+
if not trend:
|
|
378
|
+
return
|
|
379
|
+
|
|
380
|
+
trend_messages = {
|
|
381
|
+
TrendDirection.DECLINING: "📉 Quality trend is declining - consider code review process improvements",
|
|
382
|
+
TrendDirection.VOLATILE: "⚠️ Quality is volatile - implement more consistent testing practices",
|
|
383
|
+
TrendDirection.IMPROVING: "📈 Great job! Quality is improving - maintain current practices",
|
|
384
|
+
}
|
|
385
|
+
|
|
386
|
+
message = trend_messages.get(trend.direction)
|
|
387
|
+
if message:
|
|
388
|
+
recommendations.append(message)
|
|
389
|
+
|
|
390
|
+
def _add_alert_recommendations(
|
|
391
|
+
self, alerts: list[QualityAlert], recommendations: list[str]
|
|
392
|
+
) -> None:
|
|
393
|
+
"""Add alert-based recommendations."""
|
|
394
|
+
critical_alerts = [a for a in alerts if a.severity == AlertSeverity.CRITICAL]
|
|
395
|
+
if critical_alerts:
|
|
396
|
+
recommendations.append(
|
|
397
|
+
f"🚨 Address {len(critical_alerts)} critical quality issues before proceeding"
|
|
398
|
+
)
|
|
399
|
+
|
|
400
|
+
def _add_general_recommendations(
|
|
401
|
+
self, metrics: dict[str, t.Any], recommendations: list[str]
|
|
402
|
+
) -> None:
|
|
403
|
+
"""Add general recommendations."""
|
|
404
|
+
hook_failures = metrics.get("hook_failures", 0)
|
|
405
|
+
if hook_failures > 0:
|
|
406
|
+
recommendations.append(
|
|
407
|
+
f"⚙️ Fix {hook_failures} pre-commit hook failures to streamline development"
|
|
408
|
+
)
|
|
409
|
+
|
|
410
|
+
def generate_comprehensive_report(
|
|
411
|
+
self, current_metrics: dict[str, t.Any] | None = None, days: int = 30
|
|
412
|
+
) -> QualityReport:
|
|
413
|
+
"""Generate comprehensive quality report."""
|
|
414
|
+
# Get current metrics or create from latest baseline
|
|
415
|
+
current_baseline = None
|
|
416
|
+
if current_metrics:
|
|
417
|
+
git_hash = self.get_current_git_hash()
|
|
418
|
+
if git_hash:
|
|
419
|
+
# Filter metrics to only include parameters that calculate_quality_score accepts
|
|
420
|
+
score_metrics = {
|
|
421
|
+
k: v
|
|
422
|
+
for k, v in current_metrics.items()
|
|
423
|
+
if k
|
|
424
|
+
in (
|
|
425
|
+
"coverage_percent",
|
|
426
|
+
"test_pass_rate",
|
|
427
|
+
"hook_failures",
|
|
428
|
+
"complexity_violations",
|
|
429
|
+
"security_issues",
|
|
430
|
+
"type_errors",
|
|
431
|
+
"linting_issues",
|
|
432
|
+
)
|
|
433
|
+
}
|
|
434
|
+
quality_score = self.calculate_quality_score(**score_metrics)
|
|
435
|
+
current_baseline = QualityMetrics(
|
|
436
|
+
git_hash=git_hash,
|
|
437
|
+
timestamp=datetime.now(),
|
|
438
|
+
coverage_percent=current_metrics.get("coverage_percent", 0.0),
|
|
439
|
+
test_count=current_metrics.get("test_count", 0),
|
|
440
|
+
test_pass_rate=current_metrics.get("test_pass_rate", 0.0),
|
|
441
|
+
hook_failures=current_metrics.get("hook_failures", 0),
|
|
442
|
+
complexity_violations=current_metrics.get(
|
|
443
|
+
"complexity_violations", 0
|
|
444
|
+
),
|
|
445
|
+
security_issues=current_metrics.get("security_issues", 0),
|
|
446
|
+
type_errors=current_metrics.get("type_errors", 0),
|
|
447
|
+
linting_issues=current_metrics.get("linting_issues", 0),
|
|
448
|
+
quality_score=quality_score,
|
|
449
|
+
)
|
|
450
|
+
else:
|
|
451
|
+
current_baseline = self.get_baseline()
|
|
452
|
+
|
|
453
|
+
# Analyze trend
|
|
454
|
+
trend = self.analyze_quality_trend(days=days)
|
|
455
|
+
|
|
456
|
+
# Check alerts
|
|
457
|
+
alerts = []
|
|
458
|
+
if current_metrics:
|
|
459
|
+
alerts = self.check_quality_alerts(current_metrics)
|
|
460
|
+
|
|
461
|
+
# Get historical data
|
|
462
|
+
historical_data = self.get_recent_baselines(limit=days)
|
|
463
|
+
|
|
464
|
+
# Generate recommendations
|
|
465
|
+
metrics_dict = current_metrics or (
|
|
466
|
+
{
|
|
467
|
+
"coverage_percent": current_baseline.coverage_percent,
|
|
468
|
+
"test_count": current_baseline.test_count,
|
|
469
|
+
"test_pass_rate": current_baseline.test_pass_rate,
|
|
470
|
+
"hook_failures": current_baseline.hook_failures,
|
|
471
|
+
"complexity_violations": current_baseline.complexity_violations,
|
|
472
|
+
"security_issues": current_baseline.security_issues,
|
|
473
|
+
"type_errors": current_baseline.type_errors,
|
|
474
|
+
"linting_issues": current_baseline.linting_issues,
|
|
475
|
+
}
|
|
476
|
+
if current_baseline
|
|
477
|
+
else {}
|
|
478
|
+
)
|
|
479
|
+
|
|
480
|
+
recommendations = self.generate_recommendations(metrics_dict, trend, alerts)
|
|
481
|
+
|
|
482
|
+
return QualityReport(
|
|
483
|
+
current_metrics=current_baseline,
|
|
484
|
+
trend=trend,
|
|
485
|
+
alerts=alerts,
|
|
486
|
+
historical_data=historical_data,
|
|
487
|
+
recommendations=recommendations,
|
|
488
|
+
)
|
|
489
|
+
|
|
490
|
+
def export_report(
|
|
491
|
+
self, report: QualityReport, output_path: Path, format: str = "json"
|
|
492
|
+
) -> None:
|
|
493
|
+
"""Export quality report to file."""
|
|
494
|
+
if format.lower() == "json":
|
|
495
|
+
with output_path.open("w") as f:
|
|
496
|
+
json.dump(report.to_dict(), f, indent=2, default=str)
|
|
497
|
+
else:
|
|
498
|
+
raise ValueError(f"Unsupported export format: {format}")
|
|
499
|
+
|
|
500
|
+
def set_alert_threshold(self, metric: str, threshold: float) -> None:
|
|
501
|
+
"""Update alert threshold for specific metric."""
|
|
502
|
+
self.alert_thresholds[metric] = threshold
|
|
503
|
+
|
|
504
|
+
def get_alert_thresholds(self) -> dict[str, float]:
|
|
505
|
+
"""Get current alert thresholds."""
|
|
506
|
+
return self.alert_thresholds.copy()
|
|
507
|
+
|
|
508
|
+
def create_unified_metrics(
|
|
509
|
+
self, current_metrics: dict[str, t.Any], active_job_count: int = 0
|
|
510
|
+
) -> UnifiedMetrics:
|
|
511
|
+
"""Create UnifiedMetrics from current quality data."""
|
|
512
|
+
# Calculate quality score
|
|
513
|
+
score_metrics = {
|
|
514
|
+
k: v
|
|
515
|
+
for k, v in current_metrics.items()
|
|
516
|
+
if k
|
|
517
|
+
in (
|
|
518
|
+
"coverage_percent",
|
|
519
|
+
"test_pass_rate",
|
|
520
|
+
"hook_failures",
|
|
521
|
+
"complexity_violations",
|
|
522
|
+
"security_issues",
|
|
523
|
+
"type_errors",
|
|
524
|
+
"linting_issues",
|
|
525
|
+
)
|
|
526
|
+
}
|
|
527
|
+
quality_score = self.calculate_quality_score(**score_metrics)
|
|
528
|
+
|
|
529
|
+
# Get trend direction
|
|
530
|
+
trend = self.analyze_quality_trend(days=7)
|
|
531
|
+
trend_direction = trend.direction if trend else TrendDirection.STABLE
|
|
532
|
+
|
|
533
|
+
# Calculate error count
|
|
534
|
+
error_count = (
|
|
535
|
+
current_metrics.get("hook_failures", 0)
|
|
536
|
+
+ current_metrics.get("security_issues", 0)
|
|
537
|
+
+ current_metrics.get("type_errors", 0)
|
|
538
|
+
+ current_metrics.get("linting_issues", 0)
|
|
539
|
+
)
|
|
540
|
+
|
|
541
|
+
# Create predictions based on trend
|
|
542
|
+
predictions = {}
|
|
543
|
+
if trend and trend.confidence > 0.5:
|
|
544
|
+
days_ahead = 7
|
|
545
|
+
predicted_score = quality_score + (trend.change_rate * days_ahead)
|
|
546
|
+
predictions["quality_score_7_days"] = max(0.0, min(100.0, predicted_score))
|
|
547
|
+
|
|
548
|
+
return UnifiedMetrics(
|
|
549
|
+
timestamp=datetime.now(),
|
|
550
|
+
quality_score=quality_score,
|
|
551
|
+
test_coverage=current_metrics.get("coverage_percent", 0.0),
|
|
552
|
+
hook_duration=current_metrics.get("hook_duration", 0.0),
|
|
553
|
+
active_jobs=active_job_count,
|
|
554
|
+
error_count=error_count,
|
|
555
|
+
trend_direction=trend_direction,
|
|
556
|
+
predictions=predictions,
|
|
557
|
+
)
|
|
558
|
+
|
|
559
|
+
def get_system_health(self) -> SystemHealthStatus:
|
|
560
|
+
"""Get current system health status."""
|
|
561
|
+
import psutil
|
|
562
|
+
|
|
563
|
+
try:
|
|
564
|
+
cpu_percent = psutil.cpu_percent(interval=1)
|
|
565
|
+
memory = psutil.virtual_memory()
|
|
566
|
+
disk = psutil.disk_usage("/")
|
|
567
|
+
|
|
568
|
+
# Determine overall status
|
|
569
|
+
if cpu_percent > 90 or memory.percent > 90 or disk.percent > 95:
|
|
570
|
+
overall_status = "critical"
|
|
571
|
+
elif cpu_percent > 70 or memory.percent > 80 or disk.percent > 85:
|
|
572
|
+
overall_status = "warning"
|
|
573
|
+
else:
|
|
574
|
+
overall_status = "healthy"
|
|
575
|
+
|
|
576
|
+
service_status = {
|
|
577
|
+
"quality_baseline": "healthy",
|
|
578
|
+
"git": "healthy" if self.get_current_git_hash() else "warning",
|
|
579
|
+
"cache": "healthy" if self.cache else "warning",
|
|
580
|
+
}
|
|
581
|
+
|
|
582
|
+
return SystemHealthStatus(
|
|
583
|
+
overall_status=overall_status,
|
|
584
|
+
cpu_usage=cpu_percent,
|
|
585
|
+
memory_usage=memory.percent,
|
|
586
|
+
disk_usage=disk.percent,
|
|
587
|
+
service_status=service_status,
|
|
588
|
+
)
|
|
589
|
+
except ImportError:
|
|
590
|
+
# psutil not available, return basic status
|
|
591
|
+
return SystemHealthStatus(
|
|
592
|
+
overall_status="healthy",
|
|
593
|
+
cpu_usage=0.0,
|
|
594
|
+
memory_usage=0.0,
|
|
595
|
+
disk_usage=0.0,
|
|
596
|
+
service_status={"quality_baseline": "healthy"},
|
|
597
|
+
)
|
|
598
|
+
|
|
599
|
+
def create_dashboard_state(
|
|
600
|
+
self,
|
|
601
|
+
current_metrics: dict[str, t.Any],
|
|
602
|
+
active_job_count: int = 0,
|
|
603
|
+
historical_days: int = 30,
|
|
604
|
+
) -> DashboardState:
|
|
605
|
+
"""Create complete dashboard state for monitoring."""
|
|
606
|
+
# Create current unified metrics
|
|
607
|
+
unified_metrics = self.create_unified_metrics(current_metrics, active_job_count)
|
|
608
|
+
|
|
609
|
+
# Get historical data and convert to UnifiedMetrics
|
|
610
|
+
historical_baselines = self.get_recent_baselines(limit=historical_days)
|
|
611
|
+
historical_unified = [
|
|
612
|
+
UnifiedMetrics(
|
|
613
|
+
timestamp=baseline.timestamp,
|
|
614
|
+
quality_score=baseline.quality_score,
|
|
615
|
+
test_coverage=baseline.coverage_percent,
|
|
616
|
+
hook_duration=0.0, # Not tracked in baseline
|
|
617
|
+
active_jobs=0, # Historical data
|
|
618
|
+
error_count=(
|
|
619
|
+
baseline.hook_failures
|
|
620
|
+
+ baseline.security_issues
|
|
621
|
+
+ baseline.type_errors
|
|
622
|
+
+ baseline.linting_issues
|
|
623
|
+
),
|
|
624
|
+
trend_direction=TrendDirection.STABLE, # Calculate per point if needed
|
|
625
|
+
predictions={},
|
|
626
|
+
)
|
|
627
|
+
for baseline in historical_baselines[-10:] # Last 10 data points
|
|
628
|
+
]
|
|
629
|
+
|
|
630
|
+
# Get active alerts
|
|
631
|
+
alerts = self.check_quality_alerts(current_metrics)
|
|
632
|
+
|
|
633
|
+
# Get system health
|
|
634
|
+
system_health = self.get_system_health()
|
|
635
|
+
|
|
636
|
+
# Generate recommendations
|
|
637
|
+
trend = self.analyze_quality_trend(days=7)
|
|
638
|
+
recommendations = self.generate_recommendations(current_metrics, trend, alerts)
|
|
639
|
+
|
|
640
|
+
return DashboardState(
|
|
641
|
+
current_metrics=unified_metrics,
|
|
642
|
+
historical_data=historical_unified,
|
|
643
|
+
active_alerts=alerts,
|
|
644
|
+
system_health=system_health,
|
|
645
|
+
recommendations=recommendations,
|
|
646
|
+
)
|