empathy-framework 4.6.6__py3-none-any.whl → 4.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {empathy_framework-4.6.6.dist-info → empathy_framework-4.7.0.dist-info}/METADATA +7 -6
- empathy_framework-4.7.0.dist-info/RECORD +354 -0
- {empathy_framework-4.6.6.dist-info → empathy_framework-4.7.0.dist-info}/top_level.txt +0 -2
- empathy_healthcare_plugin/monitors/monitoring/__init__.py +9 -9
- empathy_llm_toolkit/agent_factory/__init__.py +6 -6
- empathy_llm_toolkit/agent_factory/adapters/wizard_adapter.py +7 -10
- empathy_llm_toolkit/agents_md/__init__.py +22 -0
- empathy_llm_toolkit/agents_md/loader.py +218 -0
- empathy_llm_toolkit/agents_md/parser.py +271 -0
- empathy_llm_toolkit/agents_md/registry.py +307 -0
- empathy_llm_toolkit/commands/__init__.py +51 -0
- empathy_llm_toolkit/commands/context.py +375 -0
- empathy_llm_toolkit/commands/loader.py +301 -0
- empathy_llm_toolkit/commands/models.py +231 -0
- empathy_llm_toolkit/commands/parser.py +371 -0
- empathy_llm_toolkit/commands/registry.py +429 -0
- empathy_llm_toolkit/config/__init__.py +8 -8
- empathy_llm_toolkit/config/unified.py +3 -7
- empathy_llm_toolkit/context/__init__.py +22 -0
- empathy_llm_toolkit/context/compaction.py +455 -0
- empathy_llm_toolkit/context/manager.py +434 -0
- empathy_llm_toolkit/hooks/__init__.py +24 -0
- empathy_llm_toolkit/hooks/config.py +306 -0
- empathy_llm_toolkit/hooks/executor.py +289 -0
- empathy_llm_toolkit/hooks/registry.py +302 -0
- empathy_llm_toolkit/hooks/scripts/__init__.py +39 -0
- empathy_llm_toolkit/hooks/scripts/evaluate_session.py +201 -0
- empathy_llm_toolkit/hooks/scripts/first_time_init.py +285 -0
- empathy_llm_toolkit/hooks/scripts/pre_compact.py +207 -0
- empathy_llm_toolkit/hooks/scripts/session_end.py +183 -0
- empathy_llm_toolkit/hooks/scripts/session_start.py +163 -0
- empathy_llm_toolkit/hooks/scripts/suggest_compact.py +225 -0
- empathy_llm_toolkit/learning/__init__.py +30 -0
- empathy_llm_toolkit/learning/evaluator.py +438 -0
- empathy_llm_toolkit/learning/extractor.py +514 -0
- empathy_llm_toolkit/learning/storage.py +560 -0
- empathy_llm_toolkit/providers.py +4 -11
- empathy_llm_toolkit/security/__init__.py +17 -17
- empathy_llm_toolkit/utils/tokens.py +2 -5
- empathy_os/__init__.py +202 -70
- empathy_os/cache_monitor.py +5 -3
- empathy_os/cli/__init__.py +11 -55
- empathy_os/cli/__main__.py +29 -15
- empathy_os/cli/commands/inspection.py +21 -12
- empathy_os/cli/commands/memory.py +4 -12
- empathy_os/cli/commands/profiling.py +198 -0
- empathy_os/cli/commands/utilities.py +27 -7
- empathy_os/cli.py +28 -57
- empathy_os/cli_unified.py +525 -1164
- empathy_os/cost_tracker.py +9 -3
- empathy_os/dashboard/server.py +200 -2
- empathy_os/hot_reload/__init__.py +7 -7
- empathy_os/hot_reload/config.py +6 -7
- empathy_os/hot_reload/integration.py +35 -35
- empathy_os/hot_reload/reloader.py +57 -57
- empathy_os/hot_reload/watcher.py +28 -28
- empathy_os/hot_reload/websocket.py +2 -2
- empathy_os/memory/__init__.py +11 -4
- empathy_os/memory/claude_memory.py +1 -1
- empathy_os/memory/cross_session.py +8 -12
- empathy_os/memory/edges.py +6 -6
- empathy_os/memory/file_session.py +770 -0
- empathy_os/memory/graph.py +30 -30
- empathy_os/memory/nodes.py +6 -6
- empathy_os/memory/short_term.py +15 -9
- empathy_os/memory/unified.py +606 -140
- empathy_os/meta_workflows/agent_creator.py +3 -9
- empathy_os/meta_workflows/cli_meta_workflows.py +113 -53
- empathy_os/meta_workflows/form_engine.py +6 -18
- empathy_os/meta_workflows/intent_detector.py +64 -24
- empathy_os/meta_workflows/models.py +3 -1
- empathy_os/meta_workflows/pattern_learner.py +13 -31
- empathy_os/meta_workflows/plan_generator.py +55 -47
- empathy_os/meta_workflows/session_context.py +2 -3
- empathy_os/meta_workflows/workflow.py +20 -51
- empathy_os/models/cli.py +2 -2
- empathy_os/models/tasks.py +1 -2
- empathy_os/models/telemetry.py +4 -1
- empathy_os/models/token_estimator.py +3 -1
- empathy_os/monitoring/alerts.py +938 -9
- empathy_os/monitoring/alerts_cli.py +346 -183
- empathy_os/orchestration/execution_strategies.py +12 -29
- empathy_os/orchestration/pattern_learner.py +20 -26
- empathy_os/orchestration/real_tools.py +6 -15
- empathy_os/platform_utils.py +2 -1
- empathy_os/plugins/__init__.py +2 -2
- empathy_os/plugins/base.py +64 -64
- empathy_os/plugins/registry.py +32 -32
- empathy_os/project_index/index.py +49 -15
- empathy_os/project_index/models.py +1 -2
- empathy_os/project_index/reports.py +1 -1
- empathy_os/project_index/scanner.py +1 -0
- empathy_os/redis_memory.py +10 -7
- empathy_os/resilience/__init__.py +1 -1
- empathy_os/resilience/health.py +10 -10
- empathy_os/routing/__init__.py +7 -7
- empathy_os/routing/chain_executor.py +37 -37
- empathy_os/routing/classifier.py +36 -36
- empathy_os/routing/smart_router.py +40 -40
- empathy_os/routing/{wizard_registry.py → workflow_registry.py} +47 -47
- empathy_os/scaffolding/__init__.py +8 -8
- empathy_os/scaffolding/__main__.py +1 -1
- empathy_os/scaffolding/cli.py +28 -28
- empathy_os/socratic/__init__.py +3 -19
- empathy_os/socratic/ab_testing.py +25 -36
- empathy_os/socratic/blueprint.py +38 -38
- empathy_os/socratic/cli.py +34 -20
- empathy_os/socratic/collaboration.py +30 -28
- empathy_os/socratic/domain_templates.py +9 -1
- empathy_os/socratic/embeddings.py +17 -13
- empathy_os/socratic/engine.py +135 -70
- empathy_os/socratic/explainer.py +70 -60
- empathy_os/socratic/feedback.py +24 -19
- empathy_os/socratic/forms.py +15 -10
- empathy_os/socratic/generator.py +51 -35
- empathy_os/socratic/llm_analyzer.py +25 -23
- empathy_os/socratic/mcp_server.py +99 -159
- empathy_os/socratic/session.py +19 -13
- empathy_os/socratic/storage.py +98 -67
- empathy_os/socratic/success.py +38 -27
- empathy_os/socratic/visual_editor.py +51 -39
- empathy_os/socratic/web_ui.py +99 -66
- empathy_os/telemetry/cli.py +3 -1
- empathy_os/telemetry/usage_tracker.py +1 -3
- empathy_os/test_generator/__init__.py +3 -3
- empathy_os/test_generator/cli.py +28 -28
- empathy_os/test_generator/generator.py +64 -66
- empathy_os/test_generator/risk_analyzer.py +11 -11
- empathy_os/vscode_bridge.py +173 -0
- empathy_os/workflows/__init__.py +212 -120
- empathy_os/workflows/batch_processing.py +8 -24
- empathy_os/workflows/bug_predict.py +1 -1
- empathy_os/workflows/code_review.py +20 -5
- empathy_os/workflows/code_review_pipeline.py +13 -8
- empathy_os/workflows/keyboard_shortcuts/workflow.py +6 -2
- empathy_os/workflows/manage_documentation.py +1 -0
- empathy_os/workflows/orchestrated_health_check.py +6 -11
- empathy_os/workflows/orchestrated_release_prep.py +3 -3
- empathy_os/workflows/pr_review.py +18 -10
- empathy_os/workflows/progressive/__init__.py +2 -12
- empathy_os/workflows/progressive/cli.py +14 -37
- empathy_os/workflows/progressive/core.py +12 -12
- empathy_os/workflows/progressive/orchestrator.py +166 -144
- empathy_os/workflows/progressive/reports.py +22 -31
- empathy_os/workflows/progressive/telemetry.py +8 -14
- empathy_os/workflows/progressive/test_gen.py +29 -48
- empathy_os/workflows/progressive/workflow.py +31 -70
- empathy_os/workflows/release_prep.py +21 -6
- empathy_os/workflows/release_prep_crew.py +1 -0
- empathy_os/workflows/secure_release.py +13 -6
- empathy_os/workflows/security_audit.py +8 -3
- empathy_os/workflows/test_coverage_boost_crew.py +3 -2
- empathy_os/workflows/test_maintenance_crew.py +1 -0
- empathy_os/workflows/test_runner.py +16 -12
- empathy_software_plugin/SOFTWARE_PLUGIN_README.md +25 -703
- empathy_software_plugin/cli.py +0 -122
- coach_wizards/__init__.py +0 -45
- coach_wizards/accessibility_wizard.py +0 -91
- coach_wizards/api_wizard.py +0 -91
- coach_wizards/base_wizard.py +0 -209
- coach_wizards/cicd_wizard.py +0 -91
- coach_wizards/code_reviewer_README.md +0 -60
- coach_wizards/code_reviewer_wizard.py +0 -180
- coach_wizards/compliance_wizard.py +0 -91
- coach_wizards/database_wizard.py +0 -91
- coach_wizards/debugging_wizard.py +0 -91
- coach_wizards/documentation_wizard.py +0 -91
- coach_wizards/generate_wizards.py +0 -347
- coach_wizards/localization_wizard.py +0 -173
- coach_wizards/migration_wizard.py +0 -91
- coach_wizards/monitoring_wizard.py +0 -91
- coach_wizards/observability_wizard.py +0 -91
- coach_wizards/performance_wizard.py +0 -91
- coach_wizards/prompt_engineering_wizard.py +0 -661
- coach_wizards/refactoring_wizard.py +0 -91
- coach_wizards/scaling_wizard.py +0 -90
- coach_wizards/security_wizard.py +0 -92
- coach_wizards/testing_wizard.py +0 -91
- empathy_framework-4.6.6.dist-info/RECORD +0 -410
- empathy_llm_toolkit/wizards/__init__.py +0 -43
- empathy_llm_toolkit/wizards/base_wizard.py +0 -364
- empathy_llm_toolkit/wizards/customer_support_wizard.py +0 -190
- empathy_llm_toolkit/wizards/healthcare_wizard.py +0 -378
- empathy_llm_toolkit/wizards/patient_assessment_README.md +0 -64
- empathy_llm_toolkit/wizards/patient_assessment_wizard.py +0 -193
- empathy_llm_toolkit/wizards/technology_wizard.py +0 -209
- empathy_os/wizard_factory_cli.py +0 -170
- empathy_software_plugin/wizards/__init__.py +0 -42
- empathy_software_plugin/wizards/advanced_debugging_wizard.py +0 -395
- empathy_software_plugin/wizards/agent_orchestration_wizard.py +0 -511
- empathy_software_plugin/wizards/ai_collaboration_wizard.py +0 -503
- empathy_software_plugin/wizards/ai_context_wizard.py +0 -441
- empathy_software_plugin/wizards/ai_documentation_wizard.py +0 -503
- empathy_software_plugin/wizards/base_wizard.py +0 -288
- empathy_software_plugin/wizards/book_chapter_wizard.py +0 -519
- empathy_software_plugin/wizards/code_review_wizard.py +0 -604
- empathy_software_plugin/wizards/debugging/__init__.py +0 -50
- empathy_software_plugin/wizards/debugging/bug_risk_analyzer.py +0 -414
- empathy_software_plugin/wizards/debugging/config_loaders.py +0 -446
- empathy_software_plugin/wizards/debugging/fix_applier.py +0 -469
- empathy_software_plugin/wizards/debugging/language_patterns.py +0 -385
- empathy_software_plugin/wizards/debugging/linter_parsers.py +0 -470
- empathy_software_plugin/wizards/debugging/verification.py +0 -369
- empathy_software_plugin/wizards/enhanced_testing_wizard.py +0 -537
- empathy_software_plugin/wizards/memory_enhanced_debugging_wizard.py +0 -816
- empathy_software_plugin/wizards/multi_model_wizard.py +0 -501
- empathy_software_plugin/wizards/pattern_extraction_wizard.py +0 -422
- empathy_software_plugin/wizards/pattern_retriever_wizard.py +0 -400
- empathy_software_plugin/wizards/performance/__init__.py +0 -9
- empathy_software_plugin/wizards/performance/bottleneck_detector.py +0 -221
- empathy_software_plugin/wizards/performance/profiler_parsers.py +0 -278
- empathy_software_plugin/wizards/performance/trajectory_analyzer.py +0 -429
- empathy_software_plugin/wizards/performance_profiling_wizard.py +0 -305
- empathy_software_plugin/wizards/prompt_engineering_wizard.py +0 -425
- empathy_software_plugin/wizards/rag_pattern_wizard.py +0 -461
- empathy_software_plugin/wizards/security/__init__.py +0 -32
- empathy_software_plugin/wizards/security/exploit_analyzer.py +0 -290
- empathy_software_plugin/wizards/security/owasp_patterns.py +0 -241
- empathy_software_plugin/wizards/security/vulnerability_scanner.py +0 -604
- empathy_software_plugin/wizards/security_analysis_wizard.py +0 -322
- empathy_software_plugin/wizards/security_learning_wizard.py +0 -740
- empathy_software_plugin/wizards/tech_debt_wizard.py +0 -726
- empathy_software_plugin/wizards/testing/__init__.py +0 -27
- empathy_software_plugin/wizards/testing/coverage_analyzer.py +0 -459
- empathy_software_plugin/wizards/testing/quality_analyzer.py +0 -525
- empathy_software_plugin/wizards/testing/test_suggester.py +0 -533
- empathy_software_plugin/wizards/testing_wizard.py +0 -274
- wizards/__init__.py +0 -82
- wizards/admission_assessment_wizard.py +0 -644
- wizards/care_plan.py +0 -321
- wizards/clinical_assessment.py +0 -769
- wizards/discharge_planning.py +0 -77
- wizards/discharge_summary_wizard.py +0 -468
- wizards/dosage_calculation.py +0 -497
- wizards/incident_report_wizard.py +0 -454
- wizards/medication_reconciliation.py +0 -85
- wizards/nursing_assessment.py +0 -171
- wizards/patient_education.py +0 -654
- wizards/quality_improvement.py +0 -705
- wizards/sbar_report.py +0 -324
- wizards/sbar_wizard.py +0 -608
- wizards/shift_handoff_wizard.py +0 -535
- wizards/soap_note_wizard.py +0 -679
- wizards/treatment_plan.py +0 -15
- {empathy_framework-4.6.6.dist-info → empathy_framework-4.7.0.dist-info}/WHEEL +0 -0
- {empathy_framework-4.6.6.dist-info → empathy_framework-4.7.0.dist-info}/entry_points.txt +0 -0
- {empathy_framework-4.6.6.dist-info → empathy_framework-4.7.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,537 +0,0 @@
|
|
|
1
|
-
"""Enhanced Testing Wizard (Level 4)
|
|
2
|
-
|
|
3
|
-
Goes beyond test coverage to analyze test QUALITY and predict which untested code will cause bugs.
|
|
4
|
-
|
|
5
|
-
Level 4: Anticipatory - predicts which missing tests will cause production issues.
|
|
6
|
-
|
|
7
|
-
Copyright 2025 Smart AI Memory, LLC
|
|
8
|
-
Licensed under Fair Source 0.9
|
|
9
|
-
"""
|
|
10
|
-
|
|
11
|
-
import logging
|
|
12
|
-
import re
|
|
13
|
-
from pathlib import Path
|
|
14
|
-
from typing import Any
|
|
15
|
-
|
|
16
|
-
from .base_wizard import BaseWizard
|
|
17
|
-
|
|
18
|
-
logger = logging.getLogger(__name__)
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
class EnhancedTestingWizard(BaseWizard):
|
|
22
|
-
"""Enhanced Testing Wizard - Level 4
|
|
23
|
-
|
|
24
|
-
Beyond coverage metrics:
|
|
25
|
-
- Test quality analysis (do tests actually catch bugs?)
|
|
26
|
-
- Bug-risk prediction for untested code
|
|
27
|
-
- Brittle test detection
|
|
28
|
-
- Smart test suggestions based on risk
|
|
29
|
-
"""
|
|
30
|
-
|
|
31
|
-
@property
|
|
32
|
-
def name(self) -> str:
|
|
33
|
-
return "Enhanced Testing Wizard"
|
|
34
|
-
|
|
35
|
-
@property
|
|
36
|
-
def level(self) -> int:
|
|
37
|
-
return 4
|
|
38
|
-
|
|
39
|
-
def __init__(self):
|
|
40
|
-
super().__init__()
|
|
41
|
-
|
|
42
|
-
# High-risk code patterns that need tests
|
|
43
|
-
self.high_risk_patterns = {
|
|
44
|
-
"error_handling": {
|
|
45
|
-
"patterns": [r"try\s*:", r"except\s+\w+:", r"catch\s*\("],
|
|
46
|
-
"risk": "HIGH",
|
|
47
|
-
"reason": "Error paths often cause production failures",
|
|
48
|
-
},
|
|
49
|
-
"database_operations": {
|
|
50
|
-
"patterns": [r"\.execute\(", r"\.query\(", r"\.save\(", r"\.update\("],
|
|
51
|
-
"risk": "HIGH",
|
|
52
|
-
"reason": "Database operations prone to edge case bugs",
|
|
53
|
-
},
|
|
54
|
-
"user_input": {
|
|
55
|
-
"patterns": [r"request\.", r"input\(", r"readLine\("],
|
|
56
|
-
"risk": "CRITICAL",
|
|
57
|
-
"reason": "Unvalidated input leads to security issues",
|
|
58
|
-
},
|
|
59
|
-
"authentication": {
|
|
60
|
-
"patterns": [r"login", r"authenticate", r"verify.*password"],
|
|
61
|
-
"risk": "CRITICAL",
|
|
62
|
-
"reason": "Auth bugs cause security breaches",
|
|
63
|
-
},
|
|
64
|
-
"payment_processing": {
|
|
65
|
-
"patterns": [r"payment|charge|transaction|bill|invoice"],
|
|
66
|
-
"risk": "CRITICAL",
|
|
67
|
-
"reason": "Payment bugs cause financial loss",
|
|
68
|
-
},
|
|
69
|
-
"financial_calculations": {
|
|
70
|
-
"patterns": [r"price\s*=", r"total\s*=", r"amount\s*=", r"\.round\("],
|
|
71
|
-
"risk": "HIGH",
|
|
72
|
-
"reason": "Money calculations must be exact",
|
|
73
|
-
},
|
|
74
|
-
}
|
|
75
|
-
|
|
76
|
-
async def analyze(self, context: dict[str, Any]) -> dict[str, Any]:
|
|
77
|
-
"""Analyze test quality and coverage.
|
|
78
|
-
|
|
79
|
-
Context expects:
|
|
80
|
-
- project_path: Path to project
|
|
81
|
-
- coverage_report: Coverage data (JSON or dict)
|
|
82
|
-
- test_files: List of test file paths (optional)
|
|
83
|
-
- source_files: List of source file paths (optional)
|
|
84
|
-
|
|
85
|
-
Returns:
|
|
86
|
-
Analysis with test quality, coverage gaps, bug risk predictions
|
|
87
|
-
|
|
88
|
-
"""
|
|
89
|
-
project_path = context.get("project_path", ".")
|
|
90
|
-
coverage_data = context.get("coverage_report", {})
|
|
91
|
-
test_files = context.get("test_files", [])
|
|
92
|
-
source_files = context.get("source_files", [])
|
|
93
|
-
|
|
94
|
-
# Auto-discover if not provided
|
|
95
|
-
if not source_files:
|
|
96
|
-
source_files = self._discover_source_files(project_path)
|
|
97
|
-
|
|
98
|
-
if not test_files:
|
|
99
|
-
test_files = self._discover_test_files(project_path)
|
|
100
|
-
|
|
101
|
-
# Phase 1: Analyze test coverage
|
|
102
|
-
coverage_analysis = self._analyze_coverage(coverage_data, source_files)
|
|
103
|
-
|
|
104
|
-
# Phase 2: Analyze test quality
|
|
105
|
-
test_quality = self._analyze_test_quality(test_files, source_files)
|
|
106
|
-
|
|
107
|
-
# Phase 3: Identify high-risk untested code (Level 4)
|
|
108
|
-
risk_gaps = self._identify_risk_gaps(source_files, coverage_data)
|
|
109
|
-
|
|
110
|
-
# Phase 4: Detect brittle tests
|
|
111
|
-
brittle_tests = self._detect_brittle_tests(test_files)
|
|
112
|
-
|
|
113
|
-
# Phase 5: Generate smart test suggestions (Level 4)
|
|
114
|
-
test_suggestions = self._generate_test_suggestions(risk_gaps, coverage_analysis)
|
|
115
|
-
|
|
116
|
-
# Phase 6: Predictions (Level 4)
|
|
117
|
-
predictions = self._generate_predictions(risk_gaps, brittle_tests, coverage_analysis)
|
|
118
|
-
|
|
119
|
-
# Phase 7: Recommendations
|
|
120
|
-
recommendations = self._generate_recommendations(coverage_analysis, test_quality, risk_gaps)
|
|
121
|
-
|
|
122
|
-
return {
|
|
123
|
-
"coverage": coverage_analysis,
|
|
124
|
-
"test_quality": test_quality,
|
|
125
|
-
"risk_gaps": risk_gaps,
|
|
126
|
-
"high_risk_gaps": risk_gaps, # Alias for compatibility
|
|
127
|
-
"brittle_tests": brittle_tests,
|
|
128
|
-
"test_suggestions": test_suggestions,
|
|
129
|
-
# Standard wizard outputs
|
|
130
|
-
"predictions": predictions,
|
|
131
|
-
"recommendations": recommendations,
|
|
132
|
-
"confidence": 0.85,
|
|
133
|
-
}
|
|
134
|
-
|
|
135
|
-
def _discover_source_files(self, project_path: str) -> list[str]:
|
|
136
|
-
"""Discover source files in project"""
|
|
137
|
-
source_files = []
|
|
138
|
-
project = Path(project_path)
|
|
139
|
-
|
|
140
|
-
# Python
|
|
141
|
-
for file in project.rglob("*.py"):
|
|
142
|
-
if "/test" not in str(file) and "_test.py" not in file.name:
|
|
143
|
-
source_files.append(str(file))
|
|
144
|
-
|
|
145
|
-
# JavaScript/TypeScript
|
|
146
|
-
for ext in ["*.js", "*.ts", "*.jsx", "*.tsx"]:
|
|
147
|
-
for file in project.rglob(ext):
|
|
148
|
-
if (
|
|
149
|
-
"/test" not in str(file)
|
|
150
|
-
and ".test." not in file.name
|
|
151
|
-
and ".spec." not in file.name
|
|
152
|
-
):
|
|
153
|
-
source_files.append(str(file))
|
|
154
|
-
|
|
155
|
-
return source_files[:100] # Limit for performance
|
|
156
|
-
|
|
157
|
-
def _discover_test_files(self, project_path: str) -> list[str]:
|
|
158
|
-
"""Discover test files in project"""
|
|
159
|
-
test_files = []
|
|
160
|
-
project = Path(project_path)
|
|
161
|
-
|
|
162
|
-
# Python tests
|
|
163
|
-
for file in project.rglob("test_*.py"):
|
|
164
|
-
test_files.append(str(file))
|
|
165
|
-
for file in project.rglob("*_test.py"):
|
|
166
|
-
test_files.append(str(file))
|
|
167
|
-
|
|
168
|
-
# JavaScript/TypeScript tests
|
|
169
|
-
for file in project.rglob("*.test.js"):
|
|
170
|
-
test_files.append(str(file))
|
|
171
|
-
for file in project.rglob("*.spec.ts"):
|
|
172
|
-
test_files.append(str(file))
|
|
173
|
-
|
|
174
|
-
return test_files
|
|
175
|
-
|
|
176
|
-
def _analyze_coverage(
|
|
177
|
-
self,
|
|
178
|
-
coverage_data: dict[str, Any],
|
|
179
|
-
source_files: list[str],
|
|
180
|
-
) -> dict[str, Any]:
|
|
181
|
-
"""Analyze test coverage"""
|
|
182
|
-
if not coverage_data:
|
|
183
|
-
return {
|
|
184
|
-
"overall_coverage": 0,
|
|
185
|
-
"line_coverage": 0,
|
|
186
|
-
"branch_coverage": 0,
|
|
187
|
-
"uncovered_files": len(source_files),
|
|
188
|
-
"status": "no_coverage_data",
|
|
189
|
-
}
|
|
190
|
-
|
|
191
|
-
# Extract coverage metrics
|
|
192
|
-
# Assuming coverage_data format: {filename: {lines_covered, lines_total, branches_covered, branches_total}}
|
|
193
|
-
|
|
194
|
-
total_lines = 0
|
|
195
|
-
covered_lines = 0
|
|
196
|
-
total_branches = 0
|
|
197
|
-
covered_branches = 0
|
|
198
|
-
|
|
199
|
-
for _file_path, metrics in coverage_data.items():
|
|
200
|
-
total_lines += metrics.get("lines_total", 0)
|
|
201
|
-
covered_lines += metrics.get("lines_covered", 0)
|
|
202
|
-
total_branches += metrics.get("branches_total", 0)
|
|
203
|
-
covered_branches += metrics.get("branches_covered", 0)
|
|
204
|
-
|
|
205
|
-
line_coverage = (covered_lines / total_lines * 100) if total_lines > 0 else 0
|
|
206
|
-
branch_coverage = (covered_branches / total_branches * 100) if total_branches > 0 else 0
|
|
207
|
-
|
|
208
|
-
return {
|
|
209
|
-
"overall_coverage": (line_coverage + branch_coverage) / 2,
|
|
210
|
-
"line_coverage": line_coverage,
|
|
211
|
-
"branch_coverage": branch_coverage,
|
|
212
|
-
"total_lines": total_lines,
|
|
213
|
-
"covered_lines": covered_lines,
|
|
214
|
-
"uncovered_lines": total_lines - covered_lines,
|
|
215
|
-
"status": "analyzed",
|
|
216
|
-
}
|
|
217
|
-
|
|
218
|
-
def _analyze_test_quality(
|
|
219
|
-
self,
|
|
220
|
-
test_files: list[str],
|
|
221
|
-
source_files: list[str],
|
|
222
|
-
) -> dict[str, Any]:
|
|
223
|
-
"""Analyze test quality beyond coverage"""
|
|
224
|
-
total_tests = len(test_files)
|
|
225
|
-
assertions_found = 0
|
|
226
|
-
tests_with_assertions = 0
|
|
227
|
-
|
|
228
|
-
for test_file in test_files:
|
|
229
|
-
try:
|
|
230
|
-
with open(test_file) as f:
|
|
231
|
-
content = f.read()
|
|
232
|
-
|
|
233
|
-
# Count assertions (word boundaries to avoid matching in comments/strings)
|
|
234
|
-
assertion_count = len(
|
|
235
|
-
re.findall(
|
|
236
|
-
r"\bassert\s|\bexpect\(|\bshould\(|\btoEqual\(|\bassertEqual\(",
|
|
237
|
-
content,
|
|
238
|
-
),
|
|
239
|
-
)
|
|
240
|
-
|
|
241
|
-
if assertion_count > 0:
|
|
242
|
-
tests_with_assertions += 1
|
|
243
|
-
assertions_found += assertion_count
|
|
244
|
-
|
|
245
|
-
except Exception as e:
|
|
246
|
-
logger.warning(f"Could not read test file {test_file}: {e}")
|
|
247
|
-
|
|
248
|
-
# Test-to-source ratio
|
|
249
|
-
ratio = total_tests / len(source_files) if source_files else 0
|
|
250
|
-
|
|
251
|
-
return {
|
|
252
|
-
"total_test_files": total_tests,
|
|
253
|
-
"tests_with_assertions": tests_with_assertions,
|
|
254
|
-
"total_assertions": assertions_found,
|
|
255
|
-
"test_to_source_ratio": ratio,
|
|
256
|
-
"quality_score": self._calculate_quality_score(
|
|
257
|
-
total_tests,
|
|
258
|
-
tests_with_assertions,
|
|
259
|
-
assertions_found,
|
|
260
|
-
ratio,
|
|
261
|
-
),
|
|
262
|
-
}
|
|
263
|
-
|
|
264
|
-
def _calculate_quality_score(
|
|
265
|
-
self,
|
|
266
|
-
total_tests: int,
|
|
267
|
-
tests_with_assertions: int,
|
|
268
|
-
assertions_found: int,
|
|
269
|
-
ratio: float,
|
|
270
|
-
) -> float:
|
|
271
|
-
"""Calculate overall test quality score (0-100)"""
|
|
272
|
-
if total_tests == 0:
|
|
273
|
-
return 0
|
|
274
|
-
|
|
275
|
-
# Factor 1: Percentage of tests with assertions
|
|
276
|
-
assertion_score = (tests_with_assertions / total_tests * 100) if total_tests > 0 else 0
|
|
277
|
-
|
|
278
|
-
# Factor 2: Average assertions per test
|
|
279
|
-
avg_assertions = assertions_found / total_tests if total_tests > 0 else 0
|
|
280
|
-
assertion_depth_score = min(avg_assertions / 3 * 100, 100) # 3+ assertions = 100
|
|
281
|
-
|
|
282
|
-
# Factor 3: Test-to-source ratio
|
|
283
|
-
ratio_score = min(ratio * 100, 100) # 1:1 or better = 100
|
|
284
|
-
|
|
285
|
-
# Weighted average
|
|
286
|
-
quality_score = assertion_score * 0.4 + assertion_depth_score * 0.3 + ratio_score * 0.3
|
|
287
|
-
|
|
288
|
-
return round(quality_score, 2)
|
|
289
|
-
|
|
290
|
-
def _identify_risk_gaps(
|
|
291
|
-
self,
|
|
292
|
-
source_files: list[str],
|
|
293
|
-
coverage_data: dict[str, Any],
|
|
294
|
-
) -> list[dict[str, Any]]:
|
|
295
|
-
"""Identify high-risk code that lacks tests.
|
|
296
|
-
|
|
297
|
-
This is Level 4 - predicting which gaps will cause bugs.
|
|
298
|
-
"""
|
|
299
|
-
risk_gaps = []
|
|
300
|
-
|
|
301
|
-
for source_file in source_files[:50]: # Limit for performance
|
|
302
|
-
try:
|
|
303
|
-
with open(source_file) as f:
|
|
304
|
-
content = f.read()
|
|
305
|
-
|
|
306
|
-
# Check if file is covered
|
|
307
|
-
is_covered = source_file in coverage_data
|
|
308
|
-
|
|
309
|
-
# Check for high-risk patterns
|
|
310
|
-
for pattern_name, pattern_info in self.high_risk_patterns.items():
|
|
311
|
-
for pattern in pattern_info["patterns"]:
|
|
312
|
-
matches = re.findall(pattern, content, re.IGNORECASE)
|
|
313
|
-
|
|
314
|
-
if matches and not is_covered:
|
|
315
|
-
risk_gaps.append(
|
|
316
|
-
{
|
|
317
|
-
"file": source_file,
|
|
318
|
-
"pattern": pattern_name,
|
|
319
|
-
"risk_level": pattern_info["risk"],
|
|
320
|
-
"reason": pattern_info["reason"],
|
|
321
|
-
"occurrences": len(matches),
|
|
322
|
-
"prediction": f"In our experience, untested {pattern_name} causes production bugs",
|
|
323
|
-
},
|
|
324
|
-
)
|
|
325
|
-
break # One gap per file per pattern
|
|
326
|
-
|
|
327
|
-
except Exception as e:
|
|
328
|
-
logger.warning(f"Could not analyze {source_file}: {e}")
|
|
329
|
-
|
|
330
|
-
# Sort by risk level
|
|
331
|
-
risk_order = {"CRITICAL": 0, "HIGH": 1, "MEDIUM": 2, "LOW": 3}
|
|
332
|
-
risk_gaps.sort(key=lambda x: risk_order.get(x["risk_level"], 4))
|
|
333
|
-
|
|
334
|
-
return risk_gaps
|
|
335
|
-
|
|
336
|
-
def _detect_brittle_tests(self, test_files: list[str]) -> list[dict[str, Any]]:
|
|
337
|
-
"""Detect tests that are likely to break often"""
|
|
338
|
-
brittle_tests = []
|
|
339
|
-
|
|
340
|
-
brittle_patterns = {
|
|
341
|
-
"sleep/wait": {
|
|
342
|
-
"pattern": r"sleep\(|wait\(|setTimeout\(",
|
|
343
|
-
"reason": "Timing-based tests are flaky",
|
|
344
|
-
},
|
|
345
|
-
"hardcoded_values": {
|
|
346
|
-
"pattern": r"==\s*\d{10,}|===\s*\d{10,}", # Long numbers
|
|
347
|
-
"reason": "Hardcoded IDs/timestamps break easily",
|
|
348
|
-
},
|
|
349
|
-
"test_order_dependency": {
|
|
350
|
-
"pattern": r"test.*order|sequential|beforeAll.*state",
|
|
351
|
-
"reason": "Tests should be independent",
|
|
352
|
-
},
|
|
353
|
-
}
|
|
354
|
-
|
|
355
|
-
for test_file in test_files[:50]:
|
|
356
|
-
try:
|
|
357
|
-
with open(test_file) as f:
|
|
358
|
-
content = f.read()
|
|
359
|
-
|
|
360
|
-
for pattern_name, pattern_info in brittle_patterns.items():
|
|
361
|
-
if re.search(pattern_info["pattern"], content):
|
|
362
|
-
brittle_tests.append(
|
|
363
|
-
{
|
|
364
|
-
"file": test_file,
|
|
365
|
-
"pattern": pattern_name,
|
|
366
|
-
"reason": pattern_info["reason"],
|
|
367
|
-
},
|
|
368
|
-
)
|
|
369
|
-
|
|
370
|
-
except Exception as e:
|
|
371
|
-
logger.warning(f"Could not analyze {test_file}: {e}")
|
|
372
|
-
|
|
373
|
-
return brittle_tests
|
|
374
|
-
|
|
375
|
-
def _generate_test_suggestions(
|
|
376
|
-
self,
|
|
377
|
-
risk_gaps: list[dict[str, Any]],
|
|
378
|
-
coverage_analysis: dict[str, Any],
|
|
379
|
-
) -> list[dict[str, Any]]:
|
|
380
|
-
"""Generate smart test suggestions based on risk"""
|
|
381
|
-
suggestions = []
|
|
382
|
-
|
|
383
|
-
# Suggest tests for high-risk gaps first
|
|
384
|
-
for gap in risk_gaps[:10]: # Top 10
|
|
385
|
-
suggestions.append(
|
|
386
|
-
{
|
|
387
|
-
"priority": gap["risk_level"],
|
|
388
|
-
"file": gap["file"],
|
|
389
|
-
"test_type": f"Test {gap['pattern']}",
|
|
390
|
-
"rationale": gap["reason"],
|
|
391
|
-
"suggested_tests": self._suggest_specific_tests(gap["pattern"]),
|
|
392
|
-
},
|
|
393
|
-
)
|
|
394
|
-
|
|
395
|
-
return suggestions
|
|
396
|
-
|
|
397
|
-
def _suggest_specific_tests(self, pattern: str) -> list[str]:
|
|
398
|
-
"""Suggest specific tests for pattern"""
|
|
399
|
-
test_templates = {
|
|
400
|
-
"error_handling": [
|
|
401
|
-
"Test with invalid input",
|
|
402
|
-
"Test with network failure",
|
|
403
|
-
"Test with timeout",
|
|
404
|
-
"Verify error message clarity",
|
|
405
|
-
],
|
|
406
|
-
"database_operations": [
|
|
407
|
-
"Test with empty result set",
|
|
408
|
-
"Test with duplicate keys",
|
|
409
|
-
"Test with connection failure",
|
|
410
|
-
"Test transaction rollback",
|
|
411
|
-
],
|
|
412
|
-
"user_input": [
|
|
413
|
-
"Test with malformed input",
|
|
414
|
-
"Test with SQL injection attempt",
|
|
415
|
-
"Test with XSS payload",
|
|
416
|
-
"Test with empty/null input",
|
|
417
|
-
],
|
|
418
|
-
"authentication": [
|
|
419
|
-
"Test with invalid credentials",
|
|
420
|
-
"Test with expired token",
|
|
421
|
-
"Test with missing permissions",
|
|
422
|
-
"Test session timeout",
|
|
423
|
-
],
|
|
424
|
-
"financial_calculations": [
|
|
425
|
-
"Test with edge case amounts (0, negative)",
|
|
426
|
-
"Test rounding behavior",
|
|
427
|
-
"Test currency conversion",
|
|
428
|
-
"Test overflow/underflow",
|
|
429
|
-
],
|
|
430
|
-
}
|
|
431
|
-
|
|
432
|
-
return test_templates.get(pattern, ["Add comprehensive tests"])
|
|
433
|
-
|
|
434
|
-
def _generate_predictions(
|
|
435
|
-
self,
|
|
436
|
-
risk_gaps: list[dict[str, Any]],
|
|
437
|
-
brittle_tests: list[dict[str, Any]],
|
|
438
|
-
coverage_analysis: dict[str, Any],
|
|
439
|
-
) -> list[dict[str, Any]]:
|
|
440
|
-
"""Generate Level 4 predictions"""
|
|
441
|
-
predictions = []
|
|
442
|
-
|
|
443
|
-
# Prediction 1: Critical risk gaps
|
|
444
|
-
critical_gaps = [g for g in risk_gaps if g["risk_level"] == "CRITICAL"]
|
|
445
|
-
if critical_gaps:
|
|
446
|
-
predictions.append(
|
|
447
|
-
{
|
|
448
|
-
"type": "production_bug_risk",
|
|
449
|
-
"severity": "critical",
|
|
450
|
-
"description": (
|
|
451
|
-
f"{len(critical_gaps)} critical code paths lack tests. "
|
|
452
|
-
f"In our experience, untested {critical_gaps[0]['pattern']} "
|
|
453
|
-
"causes production incidents."
|
|
454
|
-
),
|
|
455
|
-
"affected_files": [g["file"] for g in critical_gaps[:5]],
|
|
456
|
-
"prevention_steps": [
|
|
457
|
-
f"Add tests for {g['pattern']}" for g in critical_gaps[:3]
|
|
458
|
-
],
|
|
459
|
-
},
|
|
460
|
-
)
|
|
461
|
-
|
|
462
|
-
# Prediction 2: Brittle test maintenance burden
|
|
463
|
-
if len(brittle_tests) > 5:
|
|
464
|
-
predictions.append(
|
|
465
|
-
{
|
|
466
|
-
"type": "test_maintenance_burden",
|
|
467
|
-
"severity": "medium",
|
|
468
|
-
"description": (
|
|
469
|
-
f"{len(brittle_tests)} brittle tests detected. "
|
|
470
|
-
"In our experience, these break often and slow development."
|
|
471
|
-
),
|
|
472
|
-
"prevention_steps": [
|
|
473
|
-
"Refactor tests to remove timing dependencies",
|
|
474
|
-
"Use test fixtures instead of hardcoded values",
|
|
475
|
-
"Ensure test independence",
|
|
476
|
-
],
|
|
477
|
-
},
|
|
478
|
-
)
|
|
479
|
-
|
|
480
|
-
# Prediction 3: Low coverage trajectory
|
|
481
|
-
if coverage_analysis["overall_coverage"] < 60:
|
|
482
|
-
predictions.append(
|
|
483
|
-
{
|
|
484
|
-
"type": "coverage_trajectory",
|
|
485
|
-
"severity": "high",
|
|
486
|
-
"description": (
|
|
487
|
-
f"Coverage at {coverage_analysis['overall_coverage']:.1f}%. "
|
|
488
|
-
"In our experience, projects below 70% see more production bugs."
|
|
489
|
-
),
|
|
490
|
-
"prevention_steps": [
|
|
491
|
-
"Set coverage target of 70%+",
|
|
492
|
-
"Add pre-commit hooks to prevent coverage drops",
|
|
493
|
-
"Focus on high-risk code first",
|
|
494
|
-
],
|
|
495
|
-
},
|
|
496
|
-
)
|
|
497
|
-
|
|
498
|
-
return predictions
|
|
499
|
-
|
|
500
|
-
def _generate_recommendations(
|
|
501
|
-
self,
|
|
502
|
-
coverage_analysis: dict[str, Any],
|
|
503
|
-
test_quality: dict[str, Any],
|
|
504
|
-
risk_gaps: list[dict[str, Any]],
|
|
505
|
-
) -> list[str]:
|
|
506
|
-
"""Generate actionable recommendations"""
|
|
507
|
-
recommendations = []
|
|
508
|
-
|
|
509
|
-
# Coverage recommendations
|
|
510
|
-
coverage = coverage_analysis.get("overall_coverage", 0)
|
|
511
|
-
if coverage < 70:
|
|
512
|
-
recommendations.append(
|
|
513
|
-
f"Increase coverage from {coverage:.1f}% to 70%+ (industry standard)",
|
|
514
|
-
)
|
|
515
|
-
|
|
516
|
-
# Quality recommendations
|
|
517
|
-
quality_score = test_quality.get("quality_score", 0)
|
|
518
|
-
if quality_score < 60:
|
|
519
|
-
recommendations.append(f"Improve test quality score from {quality_score:.1f} to 60+")
|
|
520
|
-
|
|
521
|
-
# Risk-based recommendations
|
|
522
|
-
critical_gaps = [g for g in risk_gaps if g["risk_level"] == "CRITICAL"]
|
|
523
|
-
if critical_gaps:
|
|
524
|
-
recommendations.append(
|
|
525
|
-
f"URGENT: Add tests for {len(critical_gaps)} critical code paths",
|
|
526
|
-
)
|
|
527
|
-
|
|
528
|
-
high_risk_gaps = [g for g in risk_gaps if g["risk_level"] == "HIGH"]
|
|
529
|
-
if high_risk_gaps:
|
|
530
|
-
recommendations.append(f"HIGH PRIORITY: Test {len(high_risk_gaps)} high-risk areas")
|
|
531
|
-
|
|
532
|
-
# Test-to-source ratio
|
|
533
|
-
ratio = test_quality.get("test_to_source_ratio", 0)
|
|
534
|
-
if ratio < 0.5:
|
|
535
|
-
recommendations.append(f"Add more test files (currently {ratio:.2f}:1, target 1:1)")
|
|
536
|
-
|
|
537
|
-
return recommendations
|