empathy-framework 4.6.6__py3-none-any.whl → 4.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {empathy_framework-4.6.6.dist-info → empathy_framework-4.7.0.dist-info}/METADATA +7 -6
- empathy_framework-4.7.0.dist-info/RECORD +354 -0
- {empathy_framework-4.6.6.dist-info → empathy_framework-4.7.0.dist-info}/top_level.txt +0 -2
- empathy_healthcare_plugin/monitors/monitoring/__init__.py +9 -9
- empathy_llm_toolkit/agent_factory/__init__.py +6 -6
- empathy_llm_toolkit/agent_factory/adapters/wizard_adapter.py +7 -10
- empathy_llm_toolkit/agents_md/__init__.py +22 -0
- empathy_llm_toolkit/agents_md/loader.py +218 -0
- empathy_llm_toolkit/agents_md/parser.py +271 -0
- empathy_llm_toolkit/agents_md/registry.py +307 -0
- empathy_llm_toolkit/commands/__init__.py +51 -0
- empathy_llm_toolkit/commands/context.py +375 -0
- empathy_llm_toolkit/commands/loader.py +301 -0
- empathy_llm_toolkit/commands/models.py +231 -0
- empathy_llm_toolkit/commands/parser.py +371 -0
- empathy_llm_toolkit/commands/registry.py +429 -0
- empathy_llm_toolkit/config/__init__.py +8 -8
- empathy_llm_toolkit/config/unified.py +3 -7
- empathy_llm_toolkit/context/__init__.py +22 -0
- empathy_llm_toolkit/context/compaction.py +455 -0
- empathy_llm_toolkit/context/manager.py +434 -0
- empathy_llm_toolkit/hooks/__init__.py +24 -0
- empathy_llm_toolkit/hooks/config.py +306 -0
- empathy_llm_toolkit/hooks/executor.py +289 -0
- empathy_llm_toolkit/hooks/registry.py +302 -0
- empathy_llm_toolkit/hooks/scripts/__init__.py +39 -0
- empathy_llm_toolkit/hooks/scripts/evaluate_session.py +201 -0
- empathy_llm_toolkit/hooks/scripts/first_time_init.py +285 -0
- empathy_llm_toolkit/hooks/scripts/pre_compact.py +207 -0
- empathy_llm_toolkit/hooks/scripts/session_end.py +183 -0
- empathy_llm_toolkit/hooks/scripts/session_start.py +163 -0
- empathy_llm_toolkit/hooks/scripts/suggest_compact.py +225 -0
- empathy_llm_toolkit/learning/__init__.py +30 -0
- empathy_llm_toolkit/learning/evaluator.py +438 -0
- empathy_llm_toolkit/learning/extractor.py +514 -0
- empathy_llm_toolkit/learning/storage.py +560 -0
- empathy_llm_toolkit/providers.py +4 -11
- empathy_llm_toolkit/security/__init__.py +17 -17
- empathy_llm_toolkit/utils/tokens.py +2 -5
- empathy_os/__init__.py +202 -70
- empathy_os/cache_monitor.py +5 -3
- empathy_os/cli/__init__.py +11 -55
- empathy_os/cli/__main__.py +29 -15
- empathy_os/cli/commands/inspection.py +21 -12
- empathy_os/cli/commands/memory.py +4 -12
- empathy_os/cli/commands/profiling.py +198 -0
- empathy_os/cli/commands/utilities.py +27 -7
- empathy_os/cli.py +28 -57
- empathy_os/cli_unified.py +525 -1164
- empathy_os/cost_tracker.py +9 -3
- empathy_os/dashboard/server.py +200 -2
- empathy_os/hot_reload/__init__.py +7 -7
- empathy_os/hot_reload/config.py +6 -7
- empathy_os/hot_reload/integration.py +35 -35
- empathy_os/hot_reload/reloader.py +57 -57
- empathy_os/hot_reload/watcher.py +28 -28
- empathy_os/hot_reload/websocket.py +2 -2
- empathy_os/memory/__init__.py +11 -4
- empathy_os/memory/claude_memory.py +1 -1
- empathy_os/memory/cross_session.py +8 -12
- empathy_os/memory/edges.py +6 -6
- empathy_os/memory/file_session.py +770 -0
- empathy_os/memory/graph.py +30 -30
- empathy_os/memory/nodes.py +6 -6
- empathy_os/memory/short_term.py +15 -9
- empathy_os/memory/unified.py +606 -140
- empathy_os/meta_workflows/agent_creator.py +3 -9
- empathy_os/meta_workflows/cli_meta_workflows.py +113 -53
- empathy_os/meta_workflows/form_engine.py +6 -18
- empathy_os/meta_workflows/intent_detector.py +64 -24
- empathy_os/meta_workflows/models.py +3 -1
- empathy_os/meta_workflows/pattern_learner.py +13 -31
- empathy_os/meta_workflows/plan_generator.py +55 -47
- empathy_os/meta_workflows/session_context.py +2 -3
- empathy_os/meta_workflows/workflow.py +20 -51
- empathy_os/models/cli.py +2 -2
- empathy_os/models/tasks.py +1 -2
- empathy_os/models/telemetry.py +4 -1
- empathy_os/models/token_estimator.py +3 -1
- empathy_os/monitoring/alerts.py +938 -9
- empathy_os/monitoring/alerts_cli.py +346 -183
- empathy_os/orchestration/execution_strategies.py +12 -29
- empathy_os/orchestration/pattern_learner.py +20 -26
- empathy_os/orchestration/real_tools.py +6 -15
- empathy_os/platform_utils.py +2 -1
- empathy_os/plugins/__init__.py +2 -2
- empathy_os/plugins/base.py +64 -64
- empathy_os/plugins/registry.py +32 -32
- empathy_os/project_index/index.py +49 -15
- empathy_os/project_index/models.py +1 -2
- empathy_os/project_index/reports.py +1 -1
- empathy_os/project_index/scanner.py +1 -0
- empathy_os/redis_memory.py +10 -7
- empathy_os/resilience/__init__.py +1 -1
- empathy_os/resilience/health.py +10 -10
- empathy_os/routing/__init__.py +7 -7
- empathy_os/routing/chain_executor.py +37 -37
- empathy_os/routing/classifier.py +36 -36
- empathy_os/routing/smart_router.py +40 -40
- empathy_os/routing/{wizard_registry.py → workflow_registry.py} +47 -47
- empathy_os/scaffolding/__init__.py +8 -8
- empathy_os/scaffolding/__main__.py +1 -1
- empathy_os/scaffolding/cli.py +28 -28
- empathy_os/socratic/__init__.py +3 -19
- empathy_os/socratic/ab_testing.py +25 -36
- empathy_os/socratic/blueprint.py +38 -38
- empathy_os/socratic/cli.py +34 -20
- empathy_os/socratic/collaboration.py +30 -28
- empathy_os/socratic/domain_templates.py +9 -1
- empathy_os/socratic/embeddings.py +17 -13
- empathy_os/socratic/engine.py +135 -70
- empathy_os/socratic/explainer.py +70 -60
- empathy_os/socratic/feedback.py +24 -19
- empathy_os/socratic/forms.py +15 -10
- empathy_os/socratic/generator.py +51 -35
- empathy_os/socratic/llm_analyzer.py +25 -23
- empathy_os/socratic/mcp_server.py +99 -159
- empathy_os/socratic/session.py +19 -13
- empathy_os/socratic/storage.py +98 -67
- empathy_os/socratic/success.py +38 -27
- empathy_os/socratic/visual_editor.py +51 -39
- empathy_os/socratic/web_ui.py +99 -66
- empathy_os/telemetry/cli.py +3 -1
- empathy_os/telemetry/usage_tracker.py +1 -3
- empathy_os/test_generator/__init__.py +3 -3
- empathy_os/test_generator/cli.py +28 -28
- empathy_os/test_generator/generator.py +64 -66
- empathy_os/test_generator/risk_analyzer.py +11 -11
- empathy_os/vscode_bridge.py +173 -0
- empathy_os/workflows/__init__.py +212 -120
- empathy_os/workflows/batch_processing.py +8 -24
- empathy_os/workflows/bug_predict.py +1 -1
- empathy_os/workflows/code_review.py +20 -5
- empathy_os/workflows/code_review_pipeline.py +13 -8
- empathy_os/workflows/keyboard_shortcuts/workflow.py +6 -2
- empathy_os/workflows/manage_documentation.py +1 -0
- empathy_os/workflows/orchestrated_health_check.py +6 -11
- empathy_os/workflows/orchestrated_release_prep.py +3 -3
- empathy_os/workflows/pr_review.py +18 -10
- empathy_os/workflows/progressive/__init__.py +2 -12
- empathy_os/workflows/progressive/cli.py +14 -37
- empathy_os/workflows/progressive/core.py +12 -12
- empathy_os/workflows/progressive/orchestrator.py +166 -144
- empathy_os/workflows/progressive/reports.py +22 -31
- empathy_os/workflows/progressive/telemetry.py +8 -14
- empathy_os/workflows/progressive/test_gen.py +29 -48
- empathy_os/workflows/progressive/workflow.py +31 -70
- empathy_os/workflows/release_prep.py +21 -6
- empathy_os/workflows/release_prep_crew.py +1 -0
- empathy_os/workflows/secure_release.py +13 -6
- empathy_os/workflows/security_audit.py +8 -3
- empathy_os/workflows/test_coverage_boost_crew.py +3 -2
- empathy_os/workflows/test_maintenance_crew.py +1 -0
- empathy_os/workflows/test_runner.py +16 -12
- empathy_software_plugin/SOFTWARE_PLUGIN_README.md +25 -703
- empathy_software_plugin/cli.py +0 -122
- coach_wizards/__init__.py +0 -45
- coach_wizards/accessibility_wizard.py +0 -91
- coach_wizards/api_wizard.py +0 -91
- coach_wizards/base_wizard.py +0 -209
- coach_wizards/cicd_wizard.py +0 -91
- coach_wizards/code_reviewer_README.md +0 -60
- coach_wizards/code_reviewer_wizard.py +0 -180
- coach_wizards/compliance_wizard.py +0 -91
- coach_wizards/database_wizard.py +0 -91
- coach_wizards/debugging_wizard.py +0 -91
- coach_wizards/documentation_wizard.py +0 -91
- coach_wizards/generate_wizards.py +0 -347
- coach_wizards/localization_wizard.py +0 -173
- coach_wizards/migration_wizard.py +0 -91
- coach_wizards/monitoring_wizard.py +0 -91
- coach_wizards/observability_wizard.py +0 -91
- coach_wizards/performance_wizard.py +0 -91
- coach_wizards/prompt_engineering_wizard.py +0 -661
- coach_wizards/refactoring_wizard.py +0 -91
- coach_wizards/scaling_wizard.py +0 -90
- coach_wizards/security_wizard.py +0 -92
- coach_wizards/testing_wizard.py +0 -91
- empathy_framework-4.6.6.dist-info/RECORD +0 -410
- empathy_llm_toolkit/wizards/__init__.py +0 -43
- empathy_llm_toolkit/wizards/base_wizard.py +0 -364
- empathy_llm_toolkit/wizards/customer_support_wizard.py +0 -190
- empathy_llm_toolkit/wizards/healthcare_wizard.py +0 -378
- empathy_llm_toolkit/wizards/patient_assessment_README.md +0 -64
- empathy_llm_toolkit/wizards/patient_assessment_wizard.py +0 -193
- empathy_llm_toolkit/wizards/technology_wizard.py +0 -209
- empathy_os/wizard_factory_cli.py +0 -170
- empathy_software_plugin/wizards/__init__.py +0 -42
- empathy_software_plugin/wizards/advanced_debugging_wizard.py +0 -395
- empathy_software_plugin/wizards/agent_orchestration_wizard.py +0 -511
- empathy_software_plugin/wizards/ai_collaboration_wizard.py +0 -503
- empathy_software_plugin/wizards/ai_context_wizard.py +0 -441
- empathy_software_plugin/wizards/ai_documentation_wizard.py +0 -503
- empathy_software_plugin/wizards/base_wizard.py +0 -288
- empathy_software_plugin/wizards/book_chapter_wizard.py +0 -519
- empathy_software_plugin/wizards/code_review_wizard.py +0 -604
- empathy_software_plugin/wizards/debugging/__init__.py +0 -50
- empathy_software_plugin/wizards/debugging/bug_risk_analyzer.py +0 -414
- empathy_software_plugin/wizards/debugging/config_loaders.py +0 -446
- empathy_software_plugin/wizards/debugging/fix_applier.py +0 -469
- empathy_software_plugin/wizards/debugging/language_patterns.py +0 -385
- empathy_software_plugin/wizards/debugging/linter_parsers.py +0 -470
- empathy_software_plugin/wizards/debugging/verification.py +0 -369
- empathy_software_plugin/wizards/enhanced_testing_wizard.py +0 -537
- empathy_software_plugin/wizards/memory_enhanced_debugging_wizard.py +0 -816
- empathy_software_plugin/wizards/multi_model_wizard.py +0 -501
- empathy_software_plugin/wizards/pattern_extraction_wizard.py +0 -422
- empathy_software_plugin/wizards/pattern_retriever_wizard.py +0 -400
- empathy_software_plugin/wizards/performance/__init__.py +0 -9
- empathy_software_plugin/wizards/performance/bottleneck_detector.py +0 -221
- empathy_software_plugin/wizards/performance/profiler_parsers.py +0 -278
- empathy_software_plugin/wizards/performance/trajectory_analyzer.py +0 -429
- empathy_software_plugin/wizards/performance_profiling_wizard.py +0 -305
- empathy_software_plugin/wizards/prompt_engineering_wizard.py +0 -425
- empathy_software_plugin/wizards/rag_pattern_wizard.py +0 -461
- empathy_software_plugin/wizards/security/__init__.py +0 -32
- empathy_software_plugin/wizards/security/exploit_analyzer.py +0 -290
- empathy_software_plugin/wizards/security/owasp_patterns.py +0 -241
- empathy_software_plugin/wizards/security/vulnerability_scanner.py +0 -604
- empathy_software_plugin/wizards/security_analysis_wizard.py +0 -322
- empathy_software_plugin/wizards/security_learning_wizard.py +0 -740
- empathy_software_plugin/wizards/tech_debt_wizard.py +0 -726
- empathy_software_plugin/wizards/testing/__init__.py +0 -27
- empathy_software_plugin/wizards/testing/coverage_analyzer.py +0 -459
- empathy_software_plugin/wizards/testing/quality_analyzer.py +0 -525
- empathy_software_plugin/wizards/testing/test_suggester.py +0 -533
- empathy_software_plugin/wizards/testing_wizard.py +0 -274
- wizards/__init__.py +0 -82
- wizards/admission_assessment_wizard.py +0 -644
- wizards/care_plan.py +0 -321
- wizards/clinical_assessment.py +0 -769
- wizards/discharge_planning.py +0 -77
- wizards/discharge_summary_wizard.py +0 -468
- wizards/dosage_calculation.py +0 -497
- wizards/incident_report_wizard.py +0 -454
- wizards/medication_reconciliation.py +0 -85
- wizards/nursing_assessment.py +0 -171
- wizards/patient_education.py +0 -654
- wizards/quality_improvement.py +0 -705
- wizards/sbar_report.py +0 -324
- wizards/sbar_wizard.py +0 -608
- wizards/shift_handoff_wizard.py +0 -535
- wizards/soap_note_wizard.py +0 -679
- wizards/treatment_plan.py +0 -15
- {empathy_framework-4.6.6.dist-info → empathy_framework-4.7.0.dist-info}/WHEEL +0 -0
- {empathy_framework-4.6.6.dist-info → empathy_framework-4.7.0.dist-info}/entry_points.txt +0 -0
- {empathy_framework-4.6.6.dist-info → empathy_framework-4.7.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,816 +0,0 @@
|
|
|
1
|
-
"""Memory-Enhanced Debugging Wizard (Level 4+)
|
|
2
|
-
|
|
3
|
-
Debugging wizard that correlates current errors with historical patterns.
|
|
4
|
-
Demonstrates what's possible with persistent memory: AI that remembers
|
|
5
|
-
past bugs and how they were fixed.
|
|
6
|
-
|
|
7
|
-
"This error looks like something we fixed 3 months ago—here's what worked."
|
|
8
|
-
|
|
9
|
-
Key capabilities enabled by persistent memory:
|
|
10
|
-
- Cross-session bug correlation
|
|
11
|
-
- Historical fix recommendations
|
|
12
|
-
- Resolution time predictions
|
|
13
|
-
- Team knowledge accumulation
|
|
14
|
-
|
|
15
|
-
Copyright 2025 Smart AI Memory, LLC
|
|
16
|
-
Licensed under Fair Source 0.9
|
|
17
|
-
"""
|
|
18
|
-
|
|
19
|
-
import hashlib
|
|
20
|
-
import json
|
|
21
|
-
import logging
|
|
22
|
-
import re
|
|
23
|
-
from dataclasses import dataclass, field
|
|
24
|
-
from datetime import datetime
|
|
25
|
-
from pathlib import Path
|
|
26
|
-
from typing import Any
|
|
27
|
-
|
|
28
|
-
from .base_wizard import BaseWizard
|
|
29
|
-
|
|
30
|
-
logger = logging.getLogger(__name__)
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
@dataclass
|
|
34
|
-
class DebuggingWizardConfig:
|
|
35
|
-
"""Configuration for deployment mode (web vs local)"""
|
|
36
|
-
|
|
37
|
-
deployment_mode: str = "local" # "web" or "local"
|
|
38
|
-
max_files: int | None = None # None = unlimited
|
|
39
|
-
max_file_size_mb: float | None = None
|
|
40
|
-
folder_upload_enabled: bool = True
|
|
41
|
-
show_upgrade_cta: bool = False
|
|
42
|
-
|
|
43
|
-
@classmethod
|
|
44
|
-
def web_config(cls) -> "DebuggingWizardConfig":
|
|
45
|
-
"""Website deployment - limited features"""
|
|
46
|
-
return cls(
|
|
47
|
-
deployment_mode="web",
|
|
48
|
-
max_files=5,
|
|
49
|
-
max_file_size_mb=1.0,
|
|
50
|
-
folder_upload_enabled=False,
|
|
51
|
-
show_upgrade_cta=True,
|
|
52
|
-
)
|
|
53
|
-
|
|
54
|
-
@classmethod
|
|
55
|
-
def local_config(cls) -> "DebuggingWizardConfig":
|
|
56
|
-
"""Local installation - full features"""
|
|
57
|
-
return cls(
|
|
58
|
-
deployment_mode="local",
|
|
59
|
-
max_files=None,
|
|
60
|
-
max_file_size_mb=None,
|
|
61
|
-
folder_upload_enabled=True,
|
|
62
|
-
show_upgrade_cta=False,
|
|
63
|
-
)
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
@dataclass
|
|
67
|
-
class BugResolution:
|
|
68
|
-
"""A historical bug resolution pattern"""
|
|
69
|
-
|
|
70
|
-
bug_id: str
|
|
71
|
-
date: str
|
|
72
|
-
file_path: str
|
|
73
|
-
error_type: str
|
|
74
|
-
error_message: str
|
|
75
|
-
root_cause: str
|
|
76
|
-
fix_applied: str
|
|
77
|
-
fix_code: str | None
|
|
78
|
-
resolution_time_minutes: int
|
|
79
|
-
resolved_by: str
|
|
80
|
-
confidence: float = 1.0
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
@dataclass
|
|
84
|
-
class HistoricalMatch:
|
|
85
|
-
"""A match from historical bug patterns"""
|
|
86
|
-
|
|
87
|
-
resolution: BugResolution
|
|
88
|
-
similarity_score: float
|
|
89
|
-
matching_factors: list[str] = field(default_factory=list)
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
class MemoryEnhancedDebuggingWizard(BaseWizard):
|
|
93
|
-
"""Memory-Enhanced Debugging Wizard - Level 4+
|
|
94
|
-
|
|
95
|
-
What's now possible that wasn't before:
|
|
96
|
-
|
|
97
|
-
WITHOUT PERSISTENT MEMORY (Before):
|
|
98
|
-
- Every debugging session starts from zero
|
|
99
|
-
- Same bugs diagnosed repeatedly
|
|
100
|
-
- Fix knowledge lost between sessions
|
|
101
|
-
- No learning from team's collective experience
|
|
102
|
-
|
|
103
|
-
WITH PERSISTENT MEMORY (After):
|
|
104
|
-
- AI remembers past bugs and fixes
|
|
105
|
-
- "This looks like bug #247 from 3 months ago"
|
|
106
|
-
- Recommends proven fixes
|
|
107
|
-
- Team knowledge compounds over time
|
|
108
|
-
|
|
109
|
-
Example:
|
|
110
|
-
>>> wizard = MemoryEnhancedDebuggingWizard()
|
|
111
|
-
>>> result = await wizard.analyze({
|
|
112
|
-
... "error_message": "TypeError: Cannot read property 'map' of undefined",
|
|
113
|
-
... "file_path": "src/components/UserList.tsx",
|
|
114
|
-
... "stack_trace": "...",
|
|
115
|
-
... "correlate_with_history": True
|
|
116
|
-
... })
|
|
117
|
-
>>> print(result["historical_matches"])
|
|
118
|
-
# Shows similar bugs from the past with their fixes
|
|
119
|
-
|
|
120
|
-
"""
|
|
121
|
-
|
|
122
|
-
@property
|
|
123
|
-
def name(self) -> str:
|
|
124
|
-
return "Memory-Enhanced Debugging Wizard"
|
|
125
|
-
|
|
126
|
-
@property
|
|
127
|
-
def level(self) -> int:
|
|
128
|
-
return 4 # Level 4+ with memory enhancement
|
|
129
|
-
|
|
130
|
-
def __init__(
|
|
131
|
-
self,
|
|
132
|
-
pattern_storage_path: str = "./patterns/debugging",
|
|
133
|
-
config: DebuggingWizardConfig | None = None,
|
|
134
|
-
):
|
|
135
|
-
"""Initialize the memory-enhanced debugging wizard.
|
|
136
|
-
|
|
137
|
-
Args:
|
|
138
|
-
pattern_storage_path: Path to git-based pattern storage
|
|
139
|
-
config: Deployment configuration (web vs local mode)
|
|
140
|
-
|
|
141
|
-
"""
|
|
142
|
-
super().__init__()
|
|
143
|
-
self.pattern_storage_path = Path(pattern_storage_path)
|
|
144
|
-
self.pattern_storage_path.mkdir(parents=True, exist_ok=True)
|
|
145
|
-
self.config = config or DebuggingWizardConfig.local_config()
|
|
146
|
-
|
|
147
|
-
# Error pattern classifiers
|
|
148
|
-
self.error_patterns = {
|
|
149
|
-
"null_reference": [
|
|
150
|
-
r"cannot read property .* of (undefined|null)",
|
|
151
|
-
r"TypeError:.*undefined",
|
|
152
|
-
r"NoneType.*has no attribute",
|
|
153
|
-
r"null pointer",
|
|
154
|
-
],
|
|
155
|
-
"type_mismatch": [
|
|
156
|
-
r"TypeError:.*expected.*got",
|
|
157
|
-
r"cannot assign.*to.*",
|
|
158
|
-
r"incompatible types",
|
|
159
|
-
],
|
|
160
|
-
"async_timing": [
|
|
161
|
-
r"promise.*rejected",
|
|
162
|
-
r"unhandled.*promise",
|
|
163
|
-
r"await.*undefined",
|
|
164
|
-
r"race condition",
|
|
165
|
-
],
|
|
166
|
-
"import_error": [
|
|
167
|
-
r"cannot find module",
|
|
168
|
-
r"ModuleNotFoundError",
|
|
169
|
-
r"ImportError",
|
|
170
|
-
r"no module named",
|
|
171
|
-
],
|
|
172
|
-
"api_error": [
|
|
173
|
-
r"fetch.*failed",
|
|
174
|
-
r"network.*error",
|
|
175
|
-
r"connection.*refused",
|
|
176
|
-
r"timeout.*exceeded",
|
|
177
|
-
],
|
|
178
|
-
}
|
|
179
|
-
|
|
180
|
-
def validate_file_inputs(
|
|
181
|
-
self,
|
|
182
|
-
files: list[dict[str, Any]],
|
|
183
|
-
is_folder_upload: bool = False,
|
|
184
|
-
) -> dict[str, Any]:
|
|
185
|
-
"""Validate file inputs against the deployment configuration limits.
|
|
186
|
-
|
|
187
|
-
Args:
|
|
188
|
-
files: List of file dicts with 'path' and 'size_bytes' keys
|
|
189
|
-
is_folder_upload: Whether this is a folder upload operation
|
|
190
|
-
|
|
191
|
-
Returns:
|
|
192
|
-
Validation result dict with:
|
|
193
|
-
- valid: bool indicating if inputs pass validation
|
|
194
|
-
- errors: list of validation error messages
|
|
195
|
-
- warnings: list of warning messages (e.g., upgrade CTA)
|
|
196
|
-
|
|
197
|
-
"""
|
|
198
|
-
errors: list[str] = []
|
|
199
|
-
warnings: list[str] = []
|
|
200
|
-
|
|
201
|
-
# Check folder upload permission
|
|
202
|
-
if is_folder_upload and not self.config.folder_upload_enabled:
|
|
203
|
-
errors.append(
|
|
204
|
-
"Folder upload is not available in web mode. "
|
|
205
|
-
"Please upload individual files or install locally for full features.",
|
|
206
|
-
)
|
|
207
|
-
|
|
208
|
-
# Check file count limit
|
|
209
|
-
if self.config.max_files is not None:
|
|
210
|
-
if len(files) > self.config.max_files:
|
|
211
|
-
errors.append(
|
|
212
|
-
f"Too many files: {len(files)} provided, "
|
|
213
|
-
f"maximum {self.config.max_files} allowed in {self.config.deployment_mode} mode.",
|
|
214
|
-
)
|
|
215
|
-
|
|
216
|
-
# Check individual file sizes
|
|
217
|
-
if self.config.max_file_size_mb is not None:
|
|
218
|
-
max_bytes = self.config.max_file_size_mb * 1024 * 1024
|
|
219
|
-
for file_info in files:
|
|
220
|
-
file_path = file_info.get("path", "unknown")
|
|
221
|
-
file_size = file_info.get("size_bytes", 0)
|
|
222
|
-
if file_size > max_bytes:
|
|
223
|
-
errors.append(
|
|
224
|
-
f"File '{file_path}' exceeds size limit: "
|
|
225
|
-
f"{file_size / (1024 * 1024):.2f}MB > {self.config.max_file_size_mb}MB",
|
|
226
|
-
)
|
|
227
|
-
|
|
228
|
-
# Add upgrade CTA if configured
|
|
229
|
-
if self.config.show_upgrade_cta and errors:
|
|
230
|
-
warnings.append(
|
|
231
|
-
"Upgrade to local installation for unlimited files and folder uploads. "
|
|
232
|
-
"Visit https://empathy-framework.dev/install for details.",
|
|
233
|
-
)
|
|
234
|
-
|
|
235
|
-
return {
|
|
236
|
-
"valid": len(errors) == 0,
|
|
237
|
-
"errors": errors,
|
|
238
|
-
"warnings": warnings,
|
|
239
|
-
"config": {
|
|
240
|
-
"deployment_mode": self.config.deployment_mode,
|
|
241
|
-
"max_files": self.config.max_files,
|
|
242
|
-
"max_file_size_mb": self.config.max_file_size_mb,
|
|
243
|
-
"folder_upload_enabled": self.config.folder_upload_enabled,
|
|
244
|
-
},
|
|
245
|
-
}
|
|
246
|
-
|
|
247
|
-
async def analyze(self, context: dict[str, Any]) -> dict[str, Any]:
|
|
248
|
-
"""Analyze a bug with historical correlation.
|
|
249
|
-
|
|
250
|
-
Context expects:
|
|
251
|
-
- error_message: The error message
|
|
252
|
-
- file_path: File where error occurred
|
|
253
|
-
- stack_trace: Optional stack trace
|
|
254
|
-
- line_number: Optional line number
|
|
255
|
-
- code_snippet: Optional surrounding code
|
|
256
|
-
- correlate_with_history: Enable historical matching (default True)
|
|
257
|
-
|
|
258
|
-
Returns:
|
|
259
|
-
Analysis with:
|
|
260
|
-
- error_classification: Type of error
|
|
261
|
-
- historical_matches: Similar past bugs
|
|
262
|
-
- recommended_fix: AI-suggested fix based on history
|
|
263
|
-
- confidence: Confidence in recommendation
|
|
264
|
-
- predictions: Level 4 predictions
|
|
265
|
-
|
|
266
|
-
"""
|
|
267
|
-
error_message = context.get("error_message", "")
|
|
268
|
-
file_path = context.get("file_path", "unknown")
|
|
269
|
-
stack_trace = context.get("stack_trace", "")
|
|
270
|
-
line_number = context.get("line_number")
|
|
271
|
-
code_snippet = context.get("code_snippet", "")
|
|
272
|
-
correlate_history = context.get("correlate_with_history", True)
|
|
273
|
-
|
|
274
|
-
# Step 1: Classify the error
|
|
275
|
-
error_type = self._classify_error(error_message, stack_trace)
|
|
276
|
-
|
|
277
|
-
# Step 2: Analyze current context
|
|
278
|
-
current_analysis = {
|
|
279
|
-
"error_type": error_type,
|
|
280
|
-
"error_message": error_message,
|
|
281
|
-
"file_path": file_path,
|
|
282
|
-
"line_number": line_number,
|
|
283
|
-
"file_type": Path(file_path).suffix if file_path else "unknown",
|
|
284
|
-
"likely_causes": self._identify_likely_causes(error_type, code_snippet),
|
|
285
|
-
}
|
|
286
|
-
|
|
287
|
-
# Step 3: Historical correlation (the magic!)
|
|
288
|
-
historical_matches = []
|
|
289
|
-
recommended_fix = None
|
|
290
|
-
fix_confidence = 0.5
|
|
291
|
-
|
|
292
|
-
if correlate_history:
|
|
293
|
-
historical_matches = self._find_historical_matches(
|
|
294
|
-
error_type=error_type,
|
|
295
|
-
error_message=error_message,
|
|
296
|
-
file_path=file_path,
|
|
297
|
-
)
|
|
298
|
-
|
|
299
|
-
if historical_matches:
|
|
300
|
-
# Use the best match to recommend a fix
|
|
301
|
-
best_match = historical_matches[0]
|
|
302
|
-
recommended_fix = self._generate_fix_recommendation(best_match, current_analysis)
|
|
303
|
-
fix_confidence = best_match.similarity_score
|
|
304
|
-
|
|
305
|
-
# Step 4: Generate predictions (Level 4)
|
|
306
|
-
predictions = self._generate_predictions(error_type, current_analysis, historical_matches)
|
|
307
|
-
|
|
308
|
-
# Step 5: Generate recommendations
|
|
309
|
-
recommendations = self._generate_recommendations(
|
|
310
|
-
current_analysis,
|
|
311
|
-
historical_matches,
|
|
312
|
-
recommended_fix,
|
|
313
|
-
)
|
|
314
|
-
|
|
315
|
-
result = {
|
|
316
|
-
"error_classification": current_analysis,
|
|
317
|
-
"historical_matches": [
|
|
318
|
-
{
|
|
319
|
-
"date": m.resolution.date,
|
|
320
|
-
"file": m.resolution.file_path,
|
|
321
|
-
"error_type": m.resolution.error_type,
|
|
322
|
-
"root_cause": m.resolution.root_cause,
|
|
323
|
-
"fix_applied": m.resolution.fix_applied,
|
|
324
|
-
"fix_code": m.resolution.fix_code,
|
|
325
|
-
"resolution_time_minutes": m.resolution.resolution_time_minutes,
|
|
326
|
-
"similarity_score": m.similarity_score,
|
|
327
|
-
"matching_factors": m.matching_factors,
|
|
328
|
-
}
|
|
329
|
-
for m in historical_matches[:5] # Top 5 matches
|
|
330
|
-
],
|
|
331
|
-
"historical_correlation_enabled": correlate_history,
|
|
332
|
-
"matches_found": len(historical_matches),
|
|
333
|
-
"recommended_fix": recommended_fix,
|
|
334
|
-
"predictions": predictions,
|
|
335
|
-
"recommendations": recommendations,
|
|
336
|
-
"confidence": fix_confidence,
|
|
337
|
-
"memory_benefit": self._calculate_memory_benefit(historical_matches),
|
|
338
|
-
}
|
|
339
|
-
|
|
340
|
-
# Store this bug for future correlation
|
|
341
|
-
await self._store_bug_pattern(context, result)
|
|
342
|
-
|
|
343
|
-
return result
|
|
344
|
-
|
|
345
|
-
def _classify_error(self, error_message: str, stack_trace: str) -> str:
|
|
346
|
-
"""Classify the error type based on patterns"""
|
|
347
|
-
combined_text = f"{error_message} {stack_trace}".lower()
|
|
348
|
-
|
|
349
|
-
for error_type, patterns in self.error_patterns.items():
|
|
350
|
-
for pattern in patterns:
|
|
351
|
-
if re.search(pattern, combined_text, re.IGNORECASE):
|
|
352
|
-
return error_type
|
|
353
|
-
|
|
354
|
-
return "unknown"
|
|
355
|
-
|
|
356
|
-
def _identify_likely_causes(self, error_type: str, code_snippet: str) -> list[dict[str, Any]]:
|
|
357
|
-
"""Identify likely causes based on error type"""
|
|
358
|
-
causes_by_type = {
|
|
359
|
-
"null_reference": [
|
|
360
|
-
{
|
|
361
|
-
"cause": "Accessing property before data loads",
|
|
362
|
-
"check": "Add null/undefined check before access",
|
|
363
|
-
"likelihood": 0.7,
|
|
364
|
-
},
|
|
365
|
-
{
|
|
366
|
-
"cause": "API returned null unexpectedly",
|
|
367
|
-
"check": "Verify API response structure",
|
|
368
|
-
"likelihood": 0.5,
|
|
369
|
-
},
|
|
370
|
-
{
|
|
371
|
-
"cause": "Optional chaining missing",
|
|
372
|
-
"check": "Use ?. operator or default values",
|
|
373
|
-
"likelihood": 0.6,
|
|
374
|
-
},
|
|
375
|
-
],
|
|
376
|
-
"type_mismatch": [
|
|
377
|
-
{
|
|
378
|
-
"cause": "Wrong data type from API",
|
|
379
|
-
"check": "Validate API response types",
|
|
380
|
-
"likelihood": 0.6,
|
|
381
|
-
},
|
|
382
|
-
{
|
|
383
|
-
"cause": "Type conversion missing",
|
|
384
|
-
"check": "Add explicit type conversion",
|
|
385
|
-
"likelihood": 0.5,
|
|
386
|
-
},
|
|
387
|
-
],
|
|
388
|
-
"async_timing": [
|
|
389
|
-
{
|
|
390
|
-
"cause": "Missing await keyword",
|
|
391
|
-
"check": "Verify all async calls are awaited",
|
|
392
|
-
"likelihood": 0.7,
|
|
393
|
-
},
|
|
394
|
-
{
|
|
395
|
-
"cause": "Race condition in state updates",
|
|
396
|
-
"check": "Use proper state management",
|
|
397
|
-
"likelihood": 0.5,
|
|
398
|
-
},
|
|
399
|
-
],
|
|
400
|
-
"import_error": [
|
|
401
|
-
{
|
|
402
|
-
"cause": "Module not installed",
|
|
403
|
-
"check": "Run npm install or pip install",
|
|
404
|
-
"likelihood": 0.8,
|
|
405
|
-
},
|
|
406
|
-
{
|
|
407
|
-
"cause": "Wrong import path",
|
|
408
|
-
"check": "Verify relative/absolute path",
|
|
409
|
-
"likelihood": 0.6,
|
|
410
|
-
},
|
|
411
|
-
],
|
|
412
|
-
"api_error": [
|
|
413
|
-
{
|
|
414
|
-
"cause": "Network connectivity",
|
|
415
|
-
"check": "Verify server is running",
|
|
416
|
-
"likelihood": 0.5,
|
|
417
|
-
},
|
|
418
|
-
{
|
|
419
|
-
"cause": "CORS policy",
|
|
420
|
-
"check": "Check server CORS configuration",
|
|
421
|
-
"likelihood": 0.6,
|
|
422
|
-
},
|
|
423
|
-
],
|
|
424
|
-
}
|
|
425
|
-
|
|
426
|
-
return causes_by_type.get(error_type, [{"cause": "Unknown", "likelihood": 0.3}])
|
|
427
|
-
|
|
428
|
-
def _find_historical_matches(
|
|
429
|
-
self,
|
|
430
|
-
error_type: str,
|
|
431
|
-
error_message: str,
|
|
432
|
-
file_path: str,
|
|
433
|
-
) -> list[HistoricalMatch]:
|
|
434
|
-
"""Find historical bug patterns that match the current error.
|
|
435
|
-
|
|
436
|
-
This is where persistent memory enables what wasn't possible before:
|
|
437
|
-
searching through accumulated team knowledge of past bugs.
|
|
438
|
-
"""
|
|
439
|
-
matches = []
|
|
440
|
-
|
|
441
|
-
# Search pattern storage
|
|
442
|
-
pattern_files = list(self.pattern_storage_path.glob("*.json"))
|
|
443
|
-
|
|
444
|
-
for pattern_file in pattern_files:
|
|
445
|
-
try:
|
|
446
|
-
with open(pattern_file, encoding="utf-8") as f:
|
|
447
|
-
stored = json.load(f)
|
|
448
|
-
|
|
449
|
-
resolution = BugResolution(
|
|
450
|
-
bug_id=stored.get("bug_id", pattern_file.stem),
|
|
451
|
-
date=stored.get("date", "unknown"),
|
|
452
|
-
file_path=stored.get("file_path", ""),
|
|
453
|
-
error_type=stored.get("error_type", ""),
|
|
454
|
-
error_message=stored.get("error_message", ""),
|
|
455
|
-
root_cause=stored.get("root_cause", ""),
|
|
456
|
-
fix_applied=stored.get("fix_applied", ""),
|
|
457
|
-
fix_code=stored.get("fix_code"),
|
|
458
|
-
resolution_time_minutes=stored.get("resolution_time_minutes", 0),
|
|
459
|
-
resolved_by=stored.get("resolved_by", "unknown"),
|
|
460
|
-
)
|
|
461
|
-
|
|
462
|
-
# Calculate similarity
|
|
463
|
-
similarity, factors = self._calculate_similarity(
|
|
464
|
-
error_type,
|
|
465
|
-
error_message,
|
|
466
|
-
file_path,
|
|
467
|
-
resolution,
|
|
468
|
-
)
|
|
469
|
-
|
|
470
|
-
if similarity > 0.3: # Threshold for relevance
|
|
471
|
-
matches.append(
|
|
472
|
-
HistoricalMatch(
|
|
473
|
-
resolution=resolution,
|
|
474
|
-
similarity_score=similarity,
|
|
475
|
-
matching_factors=factors,
|
|
476
|
-
),
|
|
477
|
-
)
|
|
478
|
-
|
|
479
|
-
except (json.JSONDecodeError, KeyError) as e:
|
|
480
|
-
logger.warning(f"Could not parse pattern {pattern_file}: {e}")
|
|
481
|
-
continue
|
|
482
|
-
|
|
483
|
-
# Sort by similarity
|
|
484
|
-
matches.sort(key=lambda m: m.similarity_score, reverse=True)
|
|
485
|
-
return matches
|
|
486
|
-
|
|
487
|
-
def _calculate_similarity(
|
|
488
|
-
self,
|
|
489
|
-
error_type: str,
|
|
490
|
-
error_message: str,
|
|
491
|
-
file_path: str,
|
|
492
|
-
resolution: BugResolution,
|
|
493
|
-
) -> tuple[float, list[str]]:
|
|
494
|
-
"""Calculate similarity score between current error and historical pattern"""
|
|
495
|
-
score = 0.0
|
|
496
|
-
factors = []
|
|
497
|
-
|
|
498
|
-
# Same error type (strong signal)
|
|
499
|
-
if error_type == resolution.error_type:
|
|
500
|
-
score += 0.4
|
|
501
|
-
factors.append(f"Same error type: {error_type}")
|
|
502
|
-
|
|
503
|
-
# Similar file type
|
|
504
|
-
current_ext = Path(file_path).suffix
|
|
505
|
-
historical_ext = Path(resolution.file_path).suffix
|
|
506
|
-
if current_ext == historical_ext:
|
|
507
|
-
score += 0.15
|
|
508
|
-
factors.append(f"Same file type: {current_ext}")
|
|
509
|
-
|
|
510
|
-
# Similar file path pattern
|
|
511
|
-
if self._paths_similar(file_path, resolution.file_path):
|
|
512
|
-
score += 0.15
|
|
513
|
-
factors.append("Similar file location")
|
|
514
|
-
|
|
515
|
-
# Error message similarity
|
|
516
|
-
msg_similarity = self._message_similarity(error_message, resolution.error_message)
|
|
517
|
-
if msg_similarity > 0.5:
|
|
518
|
-
score += 0.3 * msg_similarity
|
|
519
|
-
factors.append(f"Similar error message ({int(msg_similarity * 100)}% match)")
|
|
520
|
-
|
|
521
|
-
return min(score, 1.0), factors
|
|
522
|
-
|
|
523
|
-
def _paths_similar(self, path1: str, path2: str) -> bool:
|
|
524
|
-
"""Check if two file paths are in similar locations"""
|
|
525
|
-
parts1 = Path(path1).parts
|
|
526
|
-
parts2 = Path(path2).parts
|
|
527
|
-
|
|
528
|
-
# Check if they share common directory patterns
|
|
529
|
-
common_dirs = {"src", "components", "api", "utils", "lib", "services"}
|
|
530
|
-
dirs1 = set(parts1) & common_dirs
|
|
531
|
-
dirs2 = set(parts2) & common_dirs
|
|
532
|
-
|
|
533
|
-
return len(dirs1 & dirs2) > 0
|
|
534
|
-
|
|
535
|
-
def _message_similarity(self, msg1: str, msg2: str) -> float:
|
|
536
|
-
"""Calculate similarity between two error messages"""
|
|
537
|
-
# Simple word overlap similarity
|
|
538
|
-
words1 = set(msg1.lower().split())
|
|
539
|
-
words2 = set(msg2.lower().split())
|
|
540
|
-
|
|
541
|
-
if not words1 or not words2:
|
|
542
|
-
return 0.0
|
|
543
|
-
|
|
544
|
-
intersection = len(words1 & words2)
|
|
545
|
-
union = len(words1 | words2)
|
|
546
|
-
|
|
547
|
-
return intersection / union if union > 0 else 0.0
|
|
548
|
-
|
|
549
|
-
def _generate_fix_recommendation(
|
|
550
|
-
self,
|
|
551
|
-
match: HistoricalMatch,
|
|
552
|
-
current_analysis: dict[str, Any],
|
|
553
|
-
) -> dict[str, Any]:
|
|
554
|
-
"""Generate a fix recommendation based on historical match"""
|
|
555
|
-
return {
|
|
556
|
-
"based_on": f"Bug #{match.resolution.bug_id} from {match.resolution.date}",
|
|
557
|
-
"original_fix": match.resolution.fix_applied,
|
|
558
|
-
"fix_code": match.resolution.fix_code,
|
|
559
|
-
"expected_resolution_time": f"{match.resolution.resolution_time_minutes} minutes",
|
|
560
|
-
"confidence": match.similarity_score,
|
|
561
|
-
"adaptation_notes": self._generate_adaptation_notes(match, current_analysis),
|
|
562
|
-
}
|
|
563
|
-
|
|
564
|
-
def _generate_adaptation_notes(
|
|
565
|
-
self,
|
|
566
|
-
match: HistoricalMatch,
|
|
567
|
-
current_analysis: dict[str, Any],
|
|
568
|
-
) -> list[str]:
|
|
569
|
-
"""Generate notes on how to adapt the historical fix"""
|
|
570
|
-
notes = []
|
|
571
|
-
|
|
572
|
-
if match.resolution.file_path != current_analysis.get("file_path"):
|
|
573
|
-
notes.append(
|
|
574
|
-
f"Original fix was in {Path(match.resolution.file_path).name}, "
|
|
575
|
-
f"adapt for {Path(current_analysis.get('file_path', '')).name}",
|
|
576
|
-
)
|
|
577
|
-
|
|
578
|
-
if match.similarity_score < 0.8:
|
|
579
|
-
notes.append("Moderate similarity - verify fix applies to your specific case")
|
|
580
|
-
|
|
581
|
-
return notes
|
|
582
|
-
|
|
583
|
-
def _generate_predictions(
|
|
584
|
-
self,
|
|
585
|
-
error_type: str,
|
|
586
|
-
current_analysis: dict[str, Any],
|
|
587
|
-
historical_matches: list[HistoricalMatch],
|
|
588
|
-
) -> list[dict[str, Any]]:
|
|
589
|
-
"""Generate Level 4 predictions"""
|
|
590
|
-
predictions = []
|
|
591
|
-
|
|
592
|
-
# Predict based on error type patterns
|
|
593
|
-
if error_type == "null_reference":
|
|
594
|
-
predictions.append(
|
|
595
|
-
{
|
|
596
|
-
"type": "related_null_errors",
|
|
597
|
-
"severity": "medium",
|
|
598
|
-
"description": (
|
|
599
|
-
"Based on patterns, null reference errors often cluster. "
|
|
600
|
-
"Check similar components for the same issue."
|
|
601
|
-
),
|
|
602
|
-
"prevention_steps": [
|
|
603
|
-
"Add defensive null checks across related files",
|
|
604
|
-
"Consider TypeScript strict null checks",
|
|
605
|
-
"Review API contract for nullable fields",
|
|
606
|
-
],
|
|
607
|
-
},
|
|
608
|
-
)
|
|
609
|
-
|
|
610
|
-
# Predict based on historical patterns
|
|
611
|
-
if len(historical_matches) >= 2:
|
|
612
|
-
avg_resolution_time = sum(
|
|
613
|
-
m.resolution.resolution_time_minutes for m in historical_matches[:3]
|
|
614
|
-
) / min(len(historical_matches), 3)
|
|
615
|
-
|
|
616
|
-
predictions.append(
|
|
617
|
-
{
|
|
618
|
-
"type": "resolution_time_estimate",
|
|
619
|
-
"severity": "info",
|
|
620
|
-
"description": (
|
|
621
|
-
f"Based on {len(historical_matches)} similar past bugs, "
|
|
622
|
-
f"expect ~{int(avg_resolution_time)} minute resolution time."
|
|
623
|
-
),
|
|
624
|
-
"prevention_steps": [],
|
|
625
|
-
},
|
|
626
|
-
)
|
|
627
|
-
|
|
628
|
-
# Predict recurrence if same error type appears multiple times
|
|
629
|
-
same_type_count = sum(
|
|
630
|
-
1 for m in historical_matches if m.resolution.error_type == error_type
|
|
631
|
-
)
|
|
632
|
-
if same_type_count >= 3:
|
|
633
|
-
predictions.append(
|
|
634
|
-
{
|
|
635
|
-
"type": "recurring_pattern",
|
|
636
|
-
"severity": "high",
|
|
637
|
-
"description": (
|
|
638
|
-
f"This error type has occurred {same_type_count} times before. "
|
|
639
|
-
"Consider a systematic fix to prevent recurrence."
|
|
640
|
-
),
|
|
641
|
-
"prevention_steps": [
|
|
642
|
-
"Add linting rule to catch this pattern",
|
|
643
|
-
"Create code review checklist item",
|
|
644
|
-
"Consider architectural change to eliminate root cause",
|
|
645
|
-
],
|
|
646
|
-
},
|
|
647
|
-
)
|
|
648
|
-
|
|
649
|
-
return predictions
|
|
650
|
-
|
|
651
|
-
def _generate_recommendations(
|
|
652
|
-
self,
|
|
653
|
-
current_analysis: dict[str, Any],
|
|
654
|
-
historical_matches: list[HistoricalMatch],
|
|
655
|
-
recommended_fix: dict[str, Any] | None,
|
|
656
|
-
) -> list[str]:
|
|
657
|
-
"""Generate actionable recommendations"""
|
|
658
|
-
recommendations = []
|
|
659
|
-
|
|
660
|
-
# Historical match recommendations
|
|
661
|
-
if recommended_fix:
|
|
662
|
-
recommendations.append(
|
|
663
|
-
f"📚 Historical match found! Try: {recommended_fix['original_fix']}",
|
|
664
|
-
)
|
|
665
|
-
if recommended_fix.get("fix_code"):
|
|
666
|
-
recommendations.append(
|
|
667
|
-
f"💡 Example fix code available from {recommended_fix['based_on']}",
|
|
668
|
-
)
|
|
669
|
-
|
|
670
|
-
# Likely cause recommendations
|
|
671
|
-
for cause in current_analysis.get("likely_causes", [])[:2]:
|
|
672
|
-
cause_text = cause.get("cause", "Unknown")
|
|
673
|
-
check_text = cause.get("check", "Investigate further")
|
|
674
|
-
recommendations.append(f"🔍 Check: {cause_text} - {check_text}")
|
|
675
|
-
|
|
676
|
-
# Memory benefit reminder
|
|
677
|
-
if historical_matches:
|
|
678
|
-
recommendations.append(
|
|
679
|
-
f"⏱️ Memory saved you time: {len(historical_matches)} similar bugs found instantly",
|
|
680
|
-
)
|
|
681
|
-
else:
|
|
682
|
-
recommendations.append(
|
|
683
|
-
"💾 Tip: After fixing, resolution will be stored for future reference",
|
|
684
|
-
)
|
|
685
|
-
|
|
686
|
-
return recommendations
|
|
687
|
-
|
|
688
|
-
def _calculate_memory_benefit(
|
|
689
|
-
self,
|
|
690
|
-
historical_matches: list[HistoricalMatch],
|
|
691
|
-
) -> dict[str, Any]:
|
|
692
|
-
"""Calculate the benefit provided by persistent memory"""
|
|
693
|
-
if not historical_matches:
|
|
694
|
-
return {
|
|
695
|
-
"matches_found": 0,
|
|
696
|
-
"time_saved_estimate": "N/A - no historical data yet",
|
|
697
|
-
"value_statement": "Once resolved, this bug will help future debugging",
|
|
698
|
-
}
|
|
699
|
-
|
|
700
|
-
# Estimate time saved
|
|
701
|
-
avg_resolution = sum(
|
|
702
|
-
m.resolution.resolution_time_minutes for m in historical_matches[:3]
|
|
703
|
-
) / min(len(historical_matches), 3)
|
|
704
|
-
|
|
705
|
-
time_saved = int(avg_resolution * 0.6) # Estimate 60% time savings
|
|
706
|
-
|
|
707
|
-
return {
|
|
708
|
-
"matches_found": len(historical_matches),
|
|
709
|
-
"time_saved_estimate": f"~{time_saved} minutes",
|
|
710
|
-
"value_statement": (
|
|
711
|
-
f"Persistent memory found {len(historical_matches)} similar bugs. "
|
|
712
|
-
f"Without memory, you'd start from zero every time."
|
|
713
|
-
),
|
|
714
|
-
"historical_insight": (
|
|
715
|
-
f"Best match: {historical_matches[0].resolution.fix_applied}"
|
|
716
|
-
if historical_matches
|
|
717
|
-
else None
|
|
718
|
-
),
|
|
719
|
-
}
|
|
720
|
-
|
|
721
|
-
async def _store_bug_pattern(self, context: dict[str, Any], result: dict[str, Any]) -> None:
|
|
722
|
-
"""Store this bug for future correlation (when resolved)"""
|
|
723
|
-
# Only store if we have meaningful information
|
|
724
|
-
if not context.get("error_message"):
|
|
725
|
-
return
|
|
726
|
-
|
|
727
|
-
# Generate bug ID
|
|
728
|
-
bug_hash = hashlib.md5(
|
|
729
|
-
f"{context.get('error_message', '')}{context.get('file_path', '')}{datetime.now().isoformat()}".encode(),
|
|
730
|
-
usedforsecurity=False,
|
|
731
|
-
).hexdigest()[:8]
|
|
732
|
-
|
|
733
|
-
bug_id = f"bug_{datetime.now().strftime('%Y%m%d')}_{bug_hash}"
|
|
734
|
-
|
|
735
|
-
# Create pattern record (will be updated when fix is applied)
|
|
736
|
-
pattern = {
|
|
737
|
-
"bug_id": bug_id,
|
|
738
|
-
"date": datetime.now().isoformat(),
|
|
739
|
-
"file_path": context.get("file_path", ""),
|
|
740
|
-
"error_type": result.get("error_classification", {}).get("error_type", "unknown"),
|
|
741
|
-
"error_message": context.get("error_message", ""),
|
|
742
|
-
"root_cause": "", # To be filled when resolved
|
|
743
|
-
"fix_applied": "", # To be filled when resolved
|
|
744
|
-
"fix_code": None, # To be filled when resolved
|
|
745
|
-
"resolution_time_minutes": 0, # To be filled when resolved
|
|
746
|
-
"resolved_by": "", # To be filled when resolved
|
|
747
|
-
"status": "investigating",
|
|
748
|
-
}
|
|
749
|
-
|
|
750
|
-
# Store in pattern storage
|
|
751
|
-
pattern_file = self.pattern_storage_path / f"{bug_id}.json"
|
|
752
|
-
try:
|
|
753
|
-
with open(pattern_file, "w", encoding="utf-8") as f:
|
|
754
|
-
json.dump(pattern, f, indent=2)
|
|
755
|
-
logger.debug(f"Stored bug pattern: {bug_id}")
|
|
756
|
-
except OSError as e:
|
|
757
|
-
logger.warning(f"Could not store bug pattern: {e}")
|
|
758
|
-
|
|
759
|
-
async def record_resolution(
|
|
760
|
-
self,
|
|
761
|
-
bug_id: str,
|
|
762
|
-
root_cause: str,
|
|
763
|
-
fix_applied: str,
|
|
764
|
-
fix_code: str | None = None,
|
|
765
|
-
resolution_time_minutes: int = 0,
|
|
766
|
-
resolved_by: str = "developer",
|
|
767
|
-
) -> bool:
|
|
768
|
-
"""Record the resolution of a bug (updates stored pattern).
|
|
769
|
-
|
|
770
|
-
Call this after successfully fixing a bug to store the knowledge
|
|
771
|
-
for future correlation.
|
|
772
|
-
|
|
773
|
-
Args:
|
|
774
|
-
bug_id: The bug ID from the analyze result
|
|
775
|
-
root_cause: What caused the bug
|
|
776
|
-
fix_applied: Description of the fix
|
|
777
|
-
fix_code: Optional code snippet of the fix
|
|
778
|
-
resolution_time_minutes: How long it took to fix
|
|
779
|
-
resolved_by: Who fixed it
|
|
780
|
-
|
|
781
|
-
Returns:
|
|
782
|
-
True if recorded successfully
|
|
783
|
-
|
|
784
|
-
"""
|
|
785
|
-
pattern_file = self.pattern_storage_path / f"{bug_id}.json"
|
|
786
|
-
|
|
787
|
-
if not pattern_file.exists():
|
|
788
|
-
logger.warning(f"Bug {bug_id} not found in storage")
|
|
789
|
-
return False
|
|
790
|
-
|
|
791
|
-
try:
|
|
792
|
-
with open(pattern_file, encoding="utf-8") as f:
|
|
793
|
-
pattern = json.load(f)
|
|
794
|
-
|
|
795
|
-
# Update with resolution
|
|
796
|
-
pattern.update(
|
|
797
|
-
{
|
|
798
|
-
"root_cause": root_cause,
|
|
799
|
-
"fix_applied": fix_applied,
|
|
800
|
-
"fix_code": fix_code,
|
|
801
|
-
"resolution_time_minutes": resolution_time_minutes,
|
|
802
|
-
"resolved_by": resolved_by,
|
|
803
|
-
"status": "resolved",
|
|
804
|
-
"resolved_date": datetime.now().isoformat(),
|
|
805
|
-
},
|
|
806
|
-
)
|
|
807
|
-
|
|
808
|
-
with open(pattern_file, "w", encoding="utf-8") as f:
|
|
809
|
-
json.dump(pattern, f, indent=2)
|
|
810
|
-
|
|
811
|
-
logger.info(f"Recorded resolution for {bug_id}")
|
|
812
|
-
return True
|
|
813
|
-
|
|
814
|
-
except (OSError, json.JSONDecodeError) as e:
|
|
815
|
-
logger.error(f"Could not record resolution: {e}")
|
|
816
|
-
return False
|