empathy-framework 4.6.6__py3-none-any.whl → 4.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {empathy_framework-4.6.6.dist-info → empathy_framework-4.7.0.dist-info}/METADATA +7 -6
- empathy_framework-4.7.0.dist-info/RECORD +354 -0
- {empathy_framework-4.6.6.dist-info → empathy_framework-4.7.0.dist-info}/top_level.txt +0 -2
- empathy_healthcare_plugin/monitors/monitoring/__init__.py +9 -9
- empathy_llm_toolkit/agent_factory/__init__.py +6 -6
- empathy_llm_toolkit/agent_factory/adapters/wizard_adapter.py +7 -10
- empathy_llm_toolkit/agents_md/__init__.py +22 -0
- empathy_llm_toolkit/agents_md/loader.py +218 -0
- empathy_llm_toolkit/agents_md/parser.py +271 -0
- empathy_llm_toolkit/agents_md/registry.py +307 -0
- empathy_llm_toolkit/commands/__init__.py +51 -0
- empathy_llm_toolkit/commands/context.py +375 -0
- empathy_llm_toolkit/commands/loader.py +301 -0
- empathy_llm_toolkit/commands/models.py +231 -0
- empathy_llm_toolkit/commands/parser.py +371 -0
- empathy_llm_toolkit/commands/registry.py +429 -0
- empathy_llm_toolkit/config/__init__.py +8 -8
- empathy_llm_toolkit/config/unified.py +3 -7
- empathy_llm_toolkit/context/__init__.py +22 -0
- empathy_llm_toolkit/context/compaction.py +455 -0
- empathy_llm_toolkit/context/manager.py +434 -0
- empathy_llm_toolkit/hooks/__init__.py +24 -0
- empathy_llm_toolkit/hooks/config.py +306 -0
- empathy_llm_toolkit/hooks/executor.py +289 -0
- empathy_llm_toolkit/hooks/registry.py +302 -0
- empathy_llm_toolkit/hooks/scripts/__init__.py +39 -0
- empathy_llm_toolkit/hooks/scripts/evaluate_session.py +201 -0
- empathy_llm_toolkit/hooks/scripts/first_time_init.py +285 -0
- empathy_llm_toolkit/hooks/scripts/pre_compact.py +207 -0
- empathy_llm_toolkit/hooks/scripts/session_end.py +183 -0
- empathy_llm_toolkit/hooks/scripts/session_start.py +163 -0
- empathy_llm_toolkit/hooks/scripts/suggest_compact.py +225 -0
- empathy_llm_toolkit/learning/__init__.py +30 -0
- empathy_llm_toolkit/learning/evaluator.py +438 -0
- empathy_llm_toolkit/learning/extractor.py +514 -0
- empathy_llm_toolkit/learning/storage.py +560 -0
- empathy_llm_toolkit/providers.py +4 -11
- empathy_llm_toolkit/security/__init__.py +17 -17
- empathy_llm_toolkit/utils/tokens.py +2 -5
- empathy_os/__init__.py +202 -70
- empathy_os/cache_monitor.py +5 -3
- empathy_os/cli/__init__.py +11 -55
- empathy_os/cli/__main__.py +29 -15
- empathy_os/cli/commands/inspection.py +21 -12
- empathy_os/cli/commands/memory.py +4 -12
- empathy_os/cli/commands/profiling.py +198 -0
- empathy_os/cli/commands/utilities.py +27 -7
- empathy_os/cli.py +28 -57
- empathy_os/cli_unified.py +525 -1164
- empathy_os/cost_tracker.py +9 -3
- empathy_os/dashboard/server.py +200 -2
- empathy_os/hot_reload/__init__.py +7 -7
- empathy_os/hot_reload/config.py +6 -7
- empathy_os/hot_reload/integration.py +35 -35
- empathy_os/hot_reload/reloader.py +57 -57
- empathy_os/hot_reload/watcher.py +28 -28
- empathy_os/hot_reload/websocket.py +2 -2
- empathy_os/memory/__init__.py +11 -4
- empathy_os/memory/claude_memory.py +1 -1
- empathy_os/memory/cross_session.py +8 -12
- empathy_os/memory/edges.py +6 -6
- empathy_os/memory/file_session.py +770 -0
- empathy_os/memory/graph.py +30 -30
- empathy_os/memory/nodes.py +6 -6
- empathy_os/memory/short_term.py +15 -9
- empathy_os/memory/unified.py +606 -140
- empathy_os/meta_workflows/agent_creator.py +3 -9
- empathy_os/meta_workflows/cli_meta_workflows.py +113 -53
- empathy_os/meta_workflows/form_engine.py +6 -18
- empathy_os/meta_workflows/intent_detector.py +64 -24
- empathy_os/meta_workflows/models.py +3 -1
- empathy_os/meta_workflows/pattern_learner.py +13 -31
- empathy_os/meta_workflows/plan_generator.py +55 -47
- empathy_os/meta_workflows/session_context.py +2 -3
- empathy_os/meta_workflows/workflow.py +20 -51
- empathy_os/models/cli.py +2 -2
- empathy_os/models/tasks.py +1 -2
- empathy_os/models/telemetry.py +4 -1
- empathy_os/models/token_estimator.py +3 -1
- empathy_os/monitoring/alerts.py +938 -9
- empathy_os/monitoring/alerts_cli.py +346 -183
- empathy_os/orchestration/execution_strategies.py +12 -29
- empathy_os/orchestration/pattern_learner.py +20 -26
- empathy_os/orchestration/real_tools.py +6 -15
- empathy_os/platform_utils.py +2 -1
- empathy_os/plugins/__init__.py +2 -2
- empathy_os/plugins/base.py +64 -64
- empathy_os/plugins/registry.py +32 -32
- empathy_os/project_index/index.py +49 -15
- empathy_os/project_index/models.py +1 -2
- empathy_os/project_index/reports.py +1 -1
- empathy_os/project_index/scanner.py +1 -0
- empathy_os/redis_memory.py +10 -7
- empathy_os/resilience/__init__.py +1 -1
- empathy_os/resilience/health.py +10 -10
- empathy_os/routing/__init__.py +7 -7
- empathy_os/routing/chain_executor.py +37 -37
- empathy_os/routing/classifier.py +36 -36
- empathy_os/routing/smart_router.py +40 -40
- empathy_os/routing/{wizard_registry.py → workflow_registry.py} +47 -47
- empathy_os/scaffolding/__init__.py +8 -8
- empathy_os/scaffolding/__main__.py +1 -1
- empathy_os/scaffolding/cli.py +28 -28
- empathy_os/socratic/__init__.py +3 -19
- empathy_os/socratic/ab_testing.py +25 -36
- empathy_os/socratic/blueprint.py +38 -38
- empathy_os/socratic/cli.py +34 -20
- empathy_os/socratic/collaboration.py +30 -28
- empathy_os/socratic/domain_templates.py +9 -1
- empathy_os/socratic/embeddings.py +17 -13
- empathy_os/socratic/engine.py +135 -70
- empathy_os/socratic/explainer.py +70 -60
- empathy_os/socratic/feedback.py +24 -19
- empathy_os/socratic/forms.py +15 -10
- empathy_os/socratic/generator.py +51 -35
- empathy_os/socratic/llm_analyzer.py +25 -23
- empathy_os/socratic/mcp_server.py +99 -159
- empathy_os/socratic/session.py +19 -13
- empathy_os/socratic/storage.py +98 -67
- empathy_os/socratic/success.py +38 -27
- empathy_os/socratic/visual_editor.py +51 -39
- empathy_os/socratic/web_ui.py +99 -66
- empathy_os/telemetry/cli.py +3 -1
- empathy_os/telemetry/usage_tracker.py +1 -3
- empathy_os/test_generator/__init__.py +3 -3
- empathy_os/test_generator/cli.py +28 -28
- empathy_os/test_generator/generator.py +64 -66
- empathy_os/test_generator/risk_analyzer.py +11 -11
- empathy_os/vscode_bridge.py +173 -0
- empathy_os/workflows/__init__.py +212 -120
- empathy_os/workflows/batch_processing.py +8 -24
- empathy_os/workflows/bug_predict.py +1 -1
- empathy_os/workflows/code_review.py +20 -5
- empathy_os/workflows/code_review_pipeline.py +13 -8
- empathy_os/workflows/keyboard_shortcuts/workflow.py +6 -2
- empathy_os/workflows/manage_documentation.py +1 -0
- empathy_os/workflows/orchestrated_health_check.py +6 -11
- empathy_os/workflows/orchestrated_release_prep.py +3 -3
- empathy_os/workflows/pr_review.py +18 -10
- empathy_os/workflows/progressive/__init__.py +2 -12
- empathy_os/workflows/progressive/cli.py +14 -37
- empathy_os/workflows/progressive/core.py +12 -12
- empathy_os/workflows/progressive/orchestrator.py +166 -144
- empathy_os/workflows/progressive/reports.py +22 -31
- empathy_os/workflows/progressive/telemetry.py +8 -14
- empathy_os/workflows/progressive/test_gen.py +29 -48
- empathy_os/workflows/progressive/workflow.py +31 -70
- empathy_os/workflows/release_prep.py +21 -6
- empathy_os/workflows/release_prep_crew.py +1 -0
- empathy_os/workflows/secure_release.py +13 -6
- empathy_os/workflows/security_audit.py +8 -3
- empathy_os/workflows/test_coverage_boost_crew.py +3 -2
- empathy_os/workflows/test_maintenance_crew.py +1 -0
- empathy_os/workflows/test_runner.py +16 -12
- empathy_software_plugin/SOFTWARE_PLUGIN_README.md +25 -703
- empathy_software_plugin/cli.py +0 -122
- coach_wizards/__init__.py +0 -45
- coach_wizards/accessibility_wizard.py +0 -91
- coach_wizards/api_wizard.py +0 -91
- coach_wizards/base_wizard.py +0 -209
- coach_wizards/cicd_wizard.py +0 -91
- coach_wizards/code_reviewer_README.md +0 -60
- coach_wizards/code_reviewer_wizard.py +0 -180
- coach_wizards/compliance_wizard.py +0 -91
- coach_wizards/database_wizard.py +0 -91
- coach_wizards/debugging_wizard.py +0 -91
- coach_wizards/documentation_wizard.py +0 -91
- coach_wizards/generate_wizards.py +0 -347
- coach_wizards/localization_wizard.py +0 -173
- coach_wizards/migration_wizard.py +0 -91
- coach_wizards/monitoring_wizard.py +0 -91
- coach_wizards/observability_wizard.py +0 -91
- coach_wizards/performance_wizard.py +0 -91
- coach_wizards/prompt_engineering_wizard.py +0 -661
- coach_wizards/refactoring_wizard.py +0 -91
- coach_wizards/scaling_wizard.py +0 -90
- coach_wizards/security_wizard.py +0 -92
- coach_wizards/testing_wizard.py +0 -91
- empathy_framework-4.6.6.dist-info/RECORD +0 -410
- empathy_llm_toolkit/wizards/__init__.py +0 -43
- empathy_llm_toolkit/wizards/base_wizard.py +0 -364
- empathy_llm_toolkit/wizards/customer_support_wizard.py +0 -190
- empathy_llm_toolkit/wizards/healthcare_wizard.py +0 -378
- empathy_llm_toolkit/wizards/patient_assessment_README.md +0 -64
- empathy_llm_toolkit/wizards/patient_assessment_wizard.py +0 -193
- empathy_llm_toolkit/wizards/technology_wizard.py +0 -209
- empathy_os/wizard_factory_cli.py +0 -170
- empathy_software_plugin/wizards/__init__.py +0 -42
- empathy_software_plugin/wizards/advanced_debugging_wizard.py +0 -395
- empathy_software_plugin/wizards/agent_orchestration_wizard.py +0 -511
- empathy_software_plugin/wizards/ai_collaboration_wizard.py +0 -503
- empathy_software_plugin/wizards/ai_context_wizard.py +0 -441
- empathy_software_plugin/wizards/ai_documentation_wizard.py +0 -503
- empathy_software_plugin/wizards/base_wizard.py +0 -288
- empathy_software_plugin/wizards/book_chapter_wizard.py +0 -519
- empathy_software_plugin/wizards/code_review_wizard.py +0 -604
- empathy_software_plugin/wizards/debugging/__init__.py +0 -50
- empathy_software_plugin/wizards/debugging/bug_risk_analyzer.py +0 -414
- empathy_software_plugin/wizards/debugging/config_loaders.py +0 -446
- empathy_software_plugin/wizards/debugging/fix_applier.py +0 -469
- empathy_software_plugin/wizards/debugging/language_patterns.py +0 -385
- empathy_software_plugin/wizards/debugging/linter_parsers.py +0 -470
- empathy_software_plugin/wizards/debugging/verification.py +0 -369
- empathy_software_plugin/wizards/enhanced_testing_wizard.py +0 -537
- empathy_software_plugin/wizards/memory_enhanced_debugging_wizard.py +0 -816
- empathy_software_plugin/wizards/multi_model_wizard.py +0 -501
- empathy_software_plugin/wizards/pattern_extraction_wizard.py +0 -422
- empathy_software_plugin/wizards/pattern_retriever_wizard.py +0 -400
- empathy_software_plugin/wizards/performance/__init__.py +0 -9
- empathy_software_plugin/wizards/performance/bottleneck_detector.py +0 -221
- empathy_software_plugin/wizards/performance/profiler_parsers.py +0 -278
- empathy_software_plugin/wizards/performance/trajectory_analyzer.py +0 -429
- empathy_software_plugin/wizards/performance_profiling_wizard.py +0 -305
- empathy_software_plugin/wizards/prompt_engineering_wizard.py +0 -425
- empathy_software_plugin/wizards/rag_pattern_wizard.py +0 -461
- empathy_software_plugin/wizards/security/__init__.py +0 -32
- empathy_software_plugin/wizards/security/exploit_analyzer.py +0 -290
- empathy_software_plugin/wizards/security/owasp_patterns.py +0 -241
- empathy_software_plugin/wizards/security/vulnerability_scanner.py +0 -604
- empathy_software_plugin/wizards/security_analysis_wizard.py +0 -322
- empathy_software_plugin/wizards/security_learning_wizard.py +0 -740
- empathy_software_plugin/wizards/tech_debt_wizard.py +0 -726
- empathy_software_plugin/wizards/testing/__init__.py +0 -27
- empathy_software_plugin/wizards/testing/coverage_analyzer.py +0 -459
- empathy_software_plugin/wizards/testing/quality_analyzer.py +0 -525
- empathy_software_plugin/wizards/testing/test_suggester.py +0 -533
- empathy_software_plugin/wizards/testing_wizard.py +0 -274
- wizards/__init__.py +0 -82
- wizards/admission_assessment_wizard.py +0 -644
- wizards/care_plan.py +0 -321
- wizards/clinical_assessment.py +0 -769
- wizards/discharge_planning.py +0 -77
- wizards/discharge_summary_wizard.py +0 -468
- wizards/dosage_calculation.py +0 -497
- wizards/incident_report_wizard.py +0 -454
- wizards/medication_reconciliation.py +0 -85
- wizards/nursing_assessment.py +0 -171
- wizards/patient_education.py +0 -654
- wizards/quality_improvement.py +0 -705
- wizards/sbar_report.py +0 -324
- wizards/sbar_wizard.py +0 -608
- wizards/shift_handoff_wizard.py +0 -535
- wizards/soap_note_wizard.py +0 -679
- wizards/treatment_plan.py +0 -15
- {empathy_framework-4.6.6.dist-info → empathy_framework-4.7.0.dist-info}/WHEEL +0 -0
- {empathy_framework-4.6.6.dist-info → empathy_framework-4.7.0.dist-info}/entry_points.txt +0 -0
- {empathy_framework-4.6.6.dist-info → empathy_framework-4.7.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,400 +0,0 @@
|
|
|
1
|
-
"""Pattern Retriever Wizard
|
|
2
|
-
|
|
3
|
-
Retrieves relevant patterns from storage based on context.
|
|
4
|
-
Level 3 (Proactive) - anticipates pattern needs.
|
|
5
|
-
|
|
6
|
-
Example Usage:
|
|
7
|
-
from empathy_software_plugin.wizards import PatternRetrieverWizard
|
|
8
|
-
|
|
9
|
-
wizard = PatternRetrieverWizard()
|
|
10
|
-
result = await wizard.analyze({
|
|
11
|
-
"query": "null reference",
|
|
12
|
-
"pattern_type": "debugging",
|
|
13
|
-
"limit": 5
|
|
14
|
-
})
|
|
15
|
-
|
|
16
|
-
for pattern in result["matching_patterns"]:
|
|
17
|
-
print(f"{pattern['id']}: {pattern['relevance_score']:.0%}")
|
|
18
|
-
|
|
19
|
-
Copyright 2025 Smart AI Memory, LLC
|
|
20
|
-
Licensed under Fair Source 0.9
|
|
21
|
-
"""
|
|
22
|
-
|
|
23
|
-
import json
|
|
24
|
-
import logging
|
|
25
|
-
from datetime import datetime
|
|
26
|
-
from pathlib import Path
|
|
27
|
-
from typing import Any
|
|
28
|
-
|
|
29
|
-
from .base_wizard import BaseWizard
|
|
30
|
-
|
|
31
|
-
logger = logging.getLogger(__name__)
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
class PatternRetrieverWizard(BaseWizard):
|
|
35
|
-
"""Pattern Retriever Wizard - Level 3 (Proactive)
|
|
36
|
-
|
|
37
|
-
Retrieves relevant patterns from storage for given context.
|
|
38
|
-
Searches across bug patterns, security decisions, and tech debt history.
|
|
39
|
-
|
|
40
|
-
Features:
|
|
41
|
-
- Full-text search across all pattern types
|
|
42
|
-
- Relevance ranking
|
|
43
|
-
- Type and classification filtering
|
|
44
|
-
- Pattern metadata enrichment
|
|
45
|
-
"""
|
|
46
|
-
|
|
47
|
-
@property
|
|
48
|
-
def name(self) -> str:
|
|
49
|
-
return "Pattern Retriever Wizard"
|
|
50
|
-
|
|
51
|
-
@property
|
|
52
|
-
def level(self) -> int:
|
|
53
|
-
return 3 # Proactive - anticipates pattern needs
|
|
54
|
-
|
|
55
|
-
def __init__(
|
|
56
|
-
self,
|
|
57
|
-
pattern_storage_path: str = "./patterns",
|
|
58
|
-
**kwargs,
|
|
59
|
-
):
|
|
60
|
-
"""Initialize Pattern Retriever Wizard.
|
|
61
|
-
|
|
62
|
-
Args:
|
|
63
|
-
pattern_storage_path: Path to patterns directory
|
|
64
|
-
**kwargs: Passed to BaseWizard
|
|
65
|
-
|
|
66
|
-
"""
|
|
67
|
-
super().__init__(**kwargs)
|
|
68
|
-
self.pattern_storage_path = Path(pattern_storage_path)
|
|
69
|
-
self.pattern_storage_path.mkdir(parents=True, exist_ok=True)
|
|
70
|
-
|
|
71
|
-
async def analyze(self, context: dict[str, Any]) -> dict[str, Any]:
|
|
72
|
-
"""Retrieve relevant patterns for given context.
|
|
73
|
-
|
|
74
|
-
Args:
|
|
75
|
-
context: Dictionary with:
|
|
76
|
-
- query: Search query string
|
|
77
|
-
- pattern_type: Optional filter (debugging, security, tech_debt)
|
|
78
|
-
- limit: Max results (default 10)
|
|
79
|
-
- include_metadata: Include full pattern data (default True)
|
|
80
|
-
|
|
81
|
-
Returns:
|
|
82
|
-
Dictionary with:
|
|
83
|
-
- query: Original query
|
|
84
|
-
- matching_patterns: List of matching patterns with relevance
|
|
85
|
-
- summary: Count and distribution stats
|
|
86
|
-
- predictions: Level 3 predictions
|
|
87
|
-
- recommendations: Usage recommendations
|
|
88
|
-
- confidence: Retrieval confidence
|
|
89
|
-
|
|
90
|
-
"""
|
|
91
|
-
query = context.get("query", "")
|
|
92
|
-
pattern_type = context.get("pattern_type")
|
|
93
|
-
limit = context.get("limit", 10)
|
|
94
|
-
include_metadata = context.get("include_metadata", True)
|
|
95
|
-
|
|
96
|
-
# Load all patterns
|
|
97
|
-
all_patterns = self._load_all_patterns()
|
|
98
|
-
|
|
99
|
-
# Filter by type if specified
|
|
100
|
-
if pattern_type:
|
|
101
|
-
all_patterns = [p for p in all_patterns if p.get("_type") == pattern_type]
|
|
102
|
-
|
|
103
|
-
# Search and rank
|
|
104
|
-
if query:
|
|
105
|
-
matching = self._search_patterns(query, all_patterns)
|
|
106
|
-
ranked = self._rank_by_relevance(query, matching)
|
|
107
|
-
else:
|
|
108
|
-
ranked = all_patterns
|
|
109
|
-
|
|
110
|
-
# Limit results
|
|
111
|
-
limited = ranked[:limit]
|
|
112
|
-
|
|
113
|
-
# Generate predictions and recommendations
|
|
114
|
-
predictions = self._generate_predictions(limited, context)
|
|
115
|
-
recommendations = self._generate_recommendations(limited, query)
|
|
116
|
-
|
|
117
|
-
# Format output
|
|
118
|
-
formatted_patterns = []
|
|
119
|
-
for p in limited:
|
|
120
|
-
formatted = {
|
|
121
|
-
"id": p.get("_id", "unknown"),
|
|
122
|
-
"type": p.get("_type", "unknown"),
|
|
123
|
-
"relevance_score": p.get("_relevance_score", 0.0),
|
|
124
|
-
"summary": p.get("_summary", ""),
|
|
125
|
-
}
|
|
126
|
-
if include_metadata:
|
|
127
|
-
formatted["data"] = {k: v for k, v in p.items() if not k.startswith("_")}
|
|
128
|
-
formatted_patterns.append(formatted)
|
|
129
|
-
|
|
130
|
-
return {
|
|
131
|
-
"query": query,
|
|
132
|
-
"matching_patterns": formatted_patterns,
|
|
133
|
-
"summary": {
|
|
134
|
-
"total_available": len(all_patterns),
|
|
135
|
-
"total_matched": len(ranked),
|
|
136
|
-
"returned": len(limited),
|
|
137
|
-
"by_type": self._count_by_type(ranked),
|
|
138
|
-
},
|
|
139
|
-
"predictions": predictions,
|
|
140
|
-
"recommendations": recommendations,
|
|
141
|
-
"confidence": self._calculate_confidence(limited, query),
|
|
142
|
-
"metadata": {
|
|
143
|
-
"wizard": self.name,
|
|
144
|
-
"level": self.level,
|
|
145
|
-
"timestamp": datetime.now().isoformat(),
|
|
146
|
-
},
|
|
147
|
-
}
|
|
148
|
-
|
|
149
|
-
def _load_all_patterns(self) -> list[dict[str, Any]]:
|
|
150
|
-
"""Load all patterns from storage."""
|
|
151
|
-
patterns = []
|
|
152
|
-
|
|
153
|
-
# Load bug patterns
|
|
154
|
-
for bug_dir in ["debugging", "debugging_demo", "repo_test/debugging"]:
|
|
155
|
-
dir_path = self.pattern_storage_path / bug_dir
|
|
156
|
-
if not dir_path.exists():
|
|
157
|
-
continue
|
|
158
|
-
|
|
159
|
-
for json_file in dir_path.glob("bug_*.json"):
|
|
160
|
-
try:
|
|
161
|
-
with open(json_file, encoding="utf-8") as f:
|
|
162
|
-
data = json.load(f)
|
|
163
|
-
data["_type"] = "debugging"
|
|
164
|
-
data["_id"] = data.get("bug_id", json_file.stem)
|
|
165
|
-
# fmt: off
|
|
166
|
-
data["_summary"] = (
|
|
167
|
-
f"{data.get('error_type', 'unknown')}: {data.get('root_cause', 'N/A')}"
|
|
168
|
-
)
|
|
169
|
-
# fmt: on
|
|
170
|
-
patterns.append(data)
|
|
171
|
-
except (json.JSONDecodeError, OSError):
|
|
172
|
-
pass
|
|
173
|
-
|
|
174
|
-
# Load security decisions
|
|
175
|
-
for sec_dir in ["security", "security_demo", "repo_test/security"]:
|
|
176
|
-
decisions_file = self.pattern_storage_path / sec_dir / "team_decisions.json"
|
|
177
|
-
if not decisions_file.exists():
|
|
178
|
-
continue
|
|
179
|
-
|
|
180
|
-
try:
|
|
181
|
-
with open(decisions_file, encoding="utf-8") as f:
|
|
182
|
-
data = json.load(f)
|
|
183
|
-
for decision in data.get("decisions", []):
|
|
184
|
-
decision["_type"] = "security"
|
|
185
|
-
decision["_id"] = f"sec_{decision.get('finding_hash', 'unknown')}"
|
|
186
|
-
# fmt: off
|
|
187
|
-
decision["_summary"] = (
|
|
188
|
-
f"{decision.get('finding_hash', 'unknown')}: {decision.get('decision', 'N/A')}"
|
|
189
|
-
)
|
|
190
|
-
# fmt: on
|
|
191
|
-
patterns.append(decision)
|
|
192
|
-
except (json.JSONDecodeError, OSError):
|
|
193
|
-
pass
|
|
194
|
-
|
|
195
|
-
# Load tech debt snapshots
|
|
196
|
-
for debt_dir in ["tech_debt", "tech_debt_demo", "repo_test/tech_debt"]:
|
|
197
|
-
history_file = self.pattern_storage_path / debt_dir / "debt_history.json"
|
|
198
|
-
if not history_file.exists():
|
|
199
|
-
continue
|
|
200
|
-
|
|
201
|
-
try:
|
|
202
|
-
with open(history_file, encoding="utf-8") as f:
|
|
203
|
-
data = json.load(f)
|
|
204
|
-
# Only include most recent snapshot as a pattern
|
|
205
|
-
snapshots = data.get("snapshots", [])
|
|
206
|
-
if snapshots:
|
|
207
|
-
latest = max(snapshots, key=lambda s: s.get("date", ""))
|
|
208
|
-
latest["_type"] = "tech_debt"
|
|
209
|
-
latest["_id"] = f"debt_{latest.get('date', 'latest')[:10]}"
|
|
210
|
-
# fmt: off
|
|
211
|
-
latest["_summary"] = (
|
|
212
|
-
f"{latest.get('total_items', 0)} items, hotspots: {', '.join(latest.get('hotspots', [])[:2])}"
|
|
213
|
-
)
|
|
214
|
-
# fmt: on
|
|
215
|
-
patterns.append(latest)
|
|
216
|
-
except (json.JSONDecodeError, OSError):
|
|
217
|
-
pass
|
|
218
|
-
|
|
219
|
-
return patterns
|
|
220
|
-
|
|
221
|
-
def _search_patterns(self, query: str, patterns: list[dict]) -> list[dict]:
|
|
222
|
-
"""Search patterns by query text."""
|
|
223
|
-
query_lower = query.lower()
|
|
224
|
-
results = []
|
|
225
|
-
|
|
226
|
-
for pattern in patterns:
|
|
227
|
-
# Search across all string fields
|
|
228
|
-
searchable_text = self._extract_searchable_text(pattern)
|
|
229
|
-
|
|
230
|
-
if query_lower in searchable_text.lower():
|
|
231
|
-
results.append(pattern)
|
|
232
|
-
|
|
233
|
-
return results
|
|
234
|
-
|
|
235
|
-
def _extract_searchable_text(self, pattern: dict) -> str:
|
|
236
|
-
"""Extract searchable text from a pattern."""
|
|
237
|
-
parts = []
|
|
238
|
-
|
|
239
|
-
# Common fields to search
|
|
240
|
-
search_fields = [
|
|
241
|
-
"error_type",
|
|
242
|
-
"error_message",
|
|
243
|
-
"root_cause",
|
|
244
|
-
"fix_applied",
|
|
245
|
-
"finding_hash",
|
|
246
|
-
"decision",
|
|
247
|
-
"reason",
|
|
248
|
-
"hotspots",
|
|
249
|
-
"_summary",
|
|
250
|
-
]
|
|
251
|
-
|
|
252
|
-
for field in search_fields:
|
|
253
|
-
value = pattern.get(field)
|
|
254
|
-
if isinstance(value, str):
|
|
255
|
-
parts.append(value)
|
|
256
|
-
elif isinstance(value, list):
|
|
257
|
-
parts.extend(str(v) for v in value)
|
|
258
|
-
|
|
259
|
-
return " ".join(parts)
|
|
260
|
-
|
|
261
|
-
def _rank_by_relevance(self, query: str, patterns: list[dict]) -> list[dict]:
|
|
262
|
-
"""Rank patterns by relevance to query."""
|
|
263
|
-
query_lower = query.lower()
|
|
264
|
-
query_terms = query_lower.split()
|
|
265
|
-
|
|
266
|
-
for pattern in patterns:
|
|
267
|
-
score = 0.0
|
|
268
|
-
searchable = self._extract_searchable_text(pattern).lower()
|
|
269
|
-
|
|
270
|
-
# Exact phrase match
|
|
271
|
-
if query_lower in searchable:
|
|
272
|
-
score += 0.5
|
|
273
|
-
|
|
274
|
-
# Term matches
|
|
275
|
-
for term in query_terms:
|
|
276
|
-
if term in searchable:
|
|
277
|
-
score += 0.2
|
|
278
|
-
|
|
279
|
-
# Type-specific boosts
|
|
280
|
-
if pattern.get("_type") == "debugging" and any(
|
|
281
|
-
t in query_lower for t in ["bug", "error", "fix"]
|
|
282
|
-
):
|
|
283
|
-
score += 0.1
|
|
284
|
-
if pattern.get("_type") == "security" and any(
|
|
285
|
-
t in query_lower for t in ["security", "vulnerability"]
|
|
286
|
-
):
|
|
287
|
-
score += 0.1
|
|
288
|
-
|
|
289
|
-
pattern["_relevance_score"] = min(score, 1.0)
|
|
290
|
-
|
|
291
|
-
# Sort by relevance
|
|
292
|
-
return sorted(patterns, key=lambda p: p.get("_relevance_score", 0), reverse=True)
|
|
293
|
-
|
|
294
|
-
def _count_by_type(self, patterns: list[dict]) -> dict[str, int]:
|
|
295
|
-
"""Count patterns by type."""
|
|
296
|
-
counts: dict[str, int] = {}
|
|
297
|
-
for p in patterns:
|
|
298
|
-
pt = p.get("_type", "unknown")
|
|
299
|
-
counts[pt] = counts.get(pt, 0) + 1
|
|
300
|
-
return counts
|
|
301
|
-
|
|
302
|
-
def _calculate_confidence(self, patterns: list[dict], query: str) -> float:
|
|
303
|
-
"""Calculate confidence in retrieval results."""
|
|
304
|
-
if not patterns:
|
|
305
|
-
return 0.3
|
|
306
|
-
|
|
307
|
-
if not query:
|
|
308
|
-
return 0.5
|
|
309
|
-
|
|
310
|
-
# Average relevance score
|
|
311
|
-
avg_relevance = sum(p.get("_relevance_score", 0) for p in patterns) / len(patterns)
|
|
312
|
-
|
|
313
|
-
# Boost if high relevance matches
|
|
314
|
-
if patterns and patterns[0].get("_relevance_score", 0) > 0.7:
|
|
315
|
-
avg_relevance += 0.2
|
|
316
|
-
|
|
317
|
-
return float(min(avg_relevance + 0.3, 1.0))
|
|
318
|
-
|
|
319
|
-
def _generate_predictions(self, patterns: list[dict], context: dict) -> list[dict]:
|
|
320
|
-
"""Generate Level 3 predictions about pattern utility."""
|
|
321
|
-
predictions = []
|
|
322
|
-
|
|
323
|
-
if not patterns:
|
|
324
|
-
predictions.append(
|
|
325
|
-
{
|
|
326
|
-
"type": "no_matches",
|
|
327
|
-
"severity": "info",
|
|
328
|
-
"description": f"No patterns match query '{context.get('query')}'. Consider storing relevant patterns.",
|
|
329
|
-
},
|
|
330
|
-
)
|
|
331
|
-
return predictions
|
|
332
|
-
|
|
333
|
-
# Check for low relevance
|
|
334
|
-
if patterns and patterns[0].get("_relevance_score", 0) < 0.4:
|
|
335
|
-
predictions.append(
|
|
336
|
-
{
|
|
337
|
-
"type": "low_relevance",
|
|
338
|
-
"severity": "info",
|
|
339
|
-
"description": "Top results have low relevance. Consider refining your query.",
|
|
340
|
-
},
|
|
341
|
-
)
|
|
342
|
-
|
|
343
|
-
# Check pattern age (if dates available)
|
|
344
|
-
for p in patterns[:3]:
|
|
345
|
-
if "date" in p:
|
|
346
|
-
try:
|
|
347
|
-
pattern_date = datetime.fromisoformat(p["date"].replace("Z", "+00:00"))
|
|
348
|
-
age_days = (datetime.now(pattern_date.tzinfo) - pattern_date).days
|
|
349
|
-
if age_days > 90:
|
|
350
|
-
predictions.append(
|
|
351
|
-
{
|
|
352
|
-
"type": "stale_pattern",
|
|
353
|
-
"severity": "warning",
|
|
354
|
-
"description": f"Pattern '{p.get('_id')}' is {age_days} days old. Verify it's still relevant.",
|
|
355
|
-
},
|
|
356
|
-
)
|
|
357
|
-
break
|
|
358
|
-
except (ValueError, TypeError):
|
|
359
|
-
pass
|
|
360
|
-
|
|
361
|
-
return predictions
|
|
362
|
-
|
|
363
|
-
def _generate_recommendations(self, patterns: list[dict], query: str) -> list[str]:
|
|
364
|
-
"""Generate recommendations for using patterns."""
|
|
365
|
-
recommendations = []
|
|
366
|
-
|
|
367
|
-
if not patterns:
|
|
368
|
-
recommendations.append("No patterns found. Run the relevant wizard to store patterns.")
|
|
369
|
-
return recommendations
|
|
370
|
-
|
|
371
|
-
recommendations.append(f"Found {len(patterns)} relevant pattern(s)")
|
|
372
|
-
|
|
373
|
-
# Recommend based on top match type
|
|
374
|
-
top_type = patterns[0].get("_type") if patterns else None
|
|
375
|
-
|
|
376
|
-
if top_type == "debugging":
|
|
377
|
-
recommendations.append("Review bug fixes before implementing similar code")
|
|
378
|
-
elif top_type == "security":
|
|
379
|
-
recommendations.append("Check if security finding matches team decisions")
|
|
380
|
-
elif top_type == "tech_debt":
|
|
381
|
-
recommendations.append("Consider debt hotspots when planning changes")
|
|
382
|
-
|
|
383
|
-
return recommendations
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
# CLI support
|
|
387
|
-
if __name__ == "__main__":
|
|
388
|
-
import asyncio
|
|
389
|
-
|
|
390
|
-
async def main():
|
|
391
|
-
wizard = PatternRetrieverWizard()
|
|
392
|
-
result = await wizard.analyze(
|
|
393
|
-
{
|
|
394
|
-
"query": "null reference",
|
|
395
|
-
"limit": 5,
|
|
396
|
-
},
|
|
397
|
-
)
|
|
398
|
-
print(json.dumps(result, indent=2, default=str))
|
|
399
|
-
|
|
400
|
-
asyncio.run(main())
|
|
@@ -1,221 +0,0 @@
|
|
|
1
|
-
"""Bottleneck Detector
|
|
2
|
-
|
|
3
|
-
Identifies performance bottlenecks in code.
|
|
4
|
-
|
|
5
|
-
Copyright 2025 Smart AI Memory, LLC
|
|
6
|
-
Licensed under Fair Source 0.9
|
|
7
|
-
"""
|
|
8
|
-
|
|
9
|
-
from dataclasses import dataclass
|
|
10
|
-
from enum import Enum
|
|
11
|
-
from typing import Any
|
|
12
|
-
|
|
13
|
-
from .profiler_parsers import FunctionProfile
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
class BottleneckType(str, Enum):
|
|
17
|
-
"""Types of performance bottlenecks (str-based enum for easy comparison)"""
|
|
18
|
-
|
|
19
|
-
HOT_PATH = "hot_path" # Function taking most total time
|
|
20
|
-
CPU_BOUND = "cpu_bound" # Heavy computation
|
|
21
|
-
IO_BOUND = "io_bound" # Waiting for I/O
|
|
22
|
-
N_PLUS_ONE = "n_plus_one" # Database N+1 query pattern
|
|
23
|
-
MEMORY_LEAK = "memory_leak" # Growing memory usage
|
|
24
|
-
SYNCHRONOUS_IO = "synchronous_io" # Blocking I/O operations
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
@dataclass
|
|
28
|
-
class Bottleneck:
|
|
29
|
-
"""Identified performance bottleneck"""
|
|
30
|
-
|
|
31
|
-
type: BottleneckType
|
|
32
|
-
function_name: str
|
|
33
|
-
file_path: str
|
|
34
|
-
line_number: int
|
|
35
|
-
severity: str # "CRITICAL", "HIGH", "MEDIUM", "LOW"
|
|
36
|
-
time_cost: float # seconds
|
|
37
|
-
percent_total: float
|
|
38
|
-
reasoning: str
|
|
39
|
-
fix_suggestion: str
|
|
40
|
-
metadata: dict[str, Any]
|
|
41
|
-
|
|
42
|
-
def to_dict(self) -> dict[str, Any]:
|
|
43
|
-
"""Convert to dictionary"""
|
|
44
|
-
return {
|
|
45
|
-
"type": self.type.value,
|
|
46
|
-
"function_name": self.function_name,
|
|
47
|
-
"file_path": self.file_path,
|
|
48
|
-
"line_number": self.line_number,
|
|
49
|
-
"severity": self.severity,
|
|
50
|
-
"time_cost": self.time_cost,
|
|
51
|
-
"percent_total": self.percent_total,
|
|
52
|
-
"reasoning": self.reasoning,
|
|
53
|
-
"fix_suggestion": self.fix_suggestion,
|
|
54
|
-
"metadata": self.metadata,
|
|
55
|
-
}
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
class BottleneckDetector:
|
|
59
|
-
"""Detects performance bottlenecks from profiling data."""
|
|
60
|
-
|
|
61
|
-
def __init__(self):
|
|
62
|
-
# Patterns indicating different bottleneck types
|
|
63
|
-
self.io_patterns = [
|
|
64
|
-
"read",
|
|
65
|
-
"write",
|
|
66
|
-
"open",
|
|
67
|
-
"close",
|
|
68
|
-
"socket",
|
|
69
|
-
"request",
|
|
70
|
-
"query",
|
|
71
|
-
"execute",
|
|
72
|
-
"fetch",
|
|
73
|
-
"select",
|
|
74
|
-
"insert",
|
|
75
|
-
"update",
|
|
76
|
-
]
|
|
77
|
-
|
|
78
|
-
self.computation_patterns = [
|
|
79
|
-
"sort",
|
|
80
|
-
"calculate",
|
|
81
|
-
"compute",
|
|
82
|
-
"process",
|
|
83
|
-
"transform",
|
|
84
|
-
"encode",
|
|
85
|
-
"decode",
|
|
86
|
-
"compress",
|
|
87
|
-
"encrypt",
|
|
88
|
-
"hash",
|
|
89
|
-
]
|
|
90
|
-
|
|
91
|
-
def detect_bottlenecks(
|
|
92
|
-
self,
|
|
93
|
-
profiles: list[FunctionProfile],
|
|
94
|
-
threshold_percent: float = 5.0,
|
|
95
|
-
) -> list[Bottleneck]:
|
|
96
|
-
"""Detect bottlenecks from profiling data.
|
|
97
|
-
|
|
98
|
-
Args:
|
|
99
|
-
profiles: List of function profiles
|
|
100
|
-
threshold_percent: Min percentage of total time to consider
|
|
101
|
-
|
|
102
|
-
Returns:
|
|
103
|
-
List of detected bottlenecks
|
|
104
|
-
|
|
105
|
-
"""
|
|
106
|
-
bottlenecks = []
|
|
107
|
-
|
|
108
|
-
# Sort by total time
|
|
109
|
-
sorted_profiles = sorted(profiles, key=lambda p: p.total_time, reverse=True)
|
|
110
|
-
|
|
111
|
-
for profile in sorted_profiles:
|
|
112
|
-
# Skip if below threshold
|
|
113
|
-
if profile.percent_total < threshold_percent:
|
|
114
|
-
continue
|
|
115
|
-
|
|
116
|
-
# Detect hot paths (top time consumers >= 20%)
|
|
117
|
-
if profile.percent_total >= 20:
|
|
118
|
-
bottlenecks.append(
|
|
119
|
-
Bottleneck(
|
|
120
|
-
type=BottleneckType.HOT_PATH,
|
|
121
|
-
function_name=profile.function_name,
|
|
122
|
-
file_path=profile.file_path,
|
|
123
|
-
line_number=profile.line_number,
|
|
124
|
-
severity=self._determine_severity(profile.percent_total),
|
|
125
|
-
time_cost=profile.total_time,
|
|
126
|
-
percent_total=profile.percent_total,
|
|
127
|
-
reasoning=f"Consumes {profile.percent_total:.1f}% of total execution time",
|
|
128
|
-
fix_suggestion=self._suggest_hot_path_fix(profile),
|
|
129
|
-
metadata={"call_count": profile.call_count},
|
|
130
|
-
),
|
|
131
|
-
)
|
|
132
|
-
|
|
133
|
-
# Detect I/O bound operations
|
|
134
|
-
if self._is_io_bound(profile):
|
|
135
|
-
bottlenecks.append(
|
|
136
|
-
Bottleneck(
|
|
137
|
-
type=BottleneckType.IO_BOUND,
|
|
138
|
-
function_name=profile.function_name,
|
|
139
|
-
file_path=profile.file_path,
|
|
140
|
-
line_number=profile.line_number,
|
|
141
|
-
severity=self._determine_severity(profile.percent_total),
|
|
142
|
-
time_cost=profile.total_time,
|
|
143
|
-
percent_total=profile.percent_total,
|
|
144
|
-
reasoning=f"I/O operation taking {profile.total_time:.2f}s",
|
|
145
|
-
fix_suggestion=self._suggest_io_fix(profile),
|
|
146
|
-
metadata={"call_count": profile.call_count},
|
|
147
|
-
),
|
|
148
|
-
)
|
|
149
|
-
|
|
150
|
-
# Detect potential N+1 queries
|
|
151
|
-
if self._is_n_plus_one(profile):
|
|
152
|
-
bottlenecks.append(
|
|
153
|
-
Bottleneck(
|
|
154
|
-
type=BottleneckType.N_PLUS_ONE,
|
|
155
|
-
function_name=profile.function_name,
|
|
156
|
-
file_path=profile.file_path,
|
|
157
|
-
line_number=profile.line_number,
|
|
158
|
-
severity="HIGH",
|
|
159
|
-
time_cost=profile.total_time,
|
|
160
|
-
percent_total=profile.percent_total,
|
|
161
|
-
reasoning=f"Database query called {profile.call_count} times - potential N+1",
|
|
162
|
-
fix_suggestion="Add eager loading or batch queries",
|
|
163
|
-
metadata={"call_count": profile.call_count},
|
|
164
|
-
),
|
|
165
|
-
)
|
|
166
|
-
|
|
167
|
-
# Sort by severity and time cost
|
|
168
|
-
severity_order = {"CRITICAL": 0, "HIGH": 1, "MEDIUM": 2, "LOW": 3}
|
|
169
|
-
bottlenecks.sort(key=lambda b: (severity_order.get(b.severity, 4), -b.time_cost))
|
|
170
|
-
|
|
171
|
-
return bottlenecks
|
|
172
|
-
|
|
173
|
-
def _is_io_bound(self, profile: FunctionProfile) -> bool:
|
|
174
|
-
"""Check if function is I/O bound"""
|
|
175
|
-
func_name_lower = profile.function_name.lower()
|
|
176
|
-
return any(pattern in func_name_lower for pattern in self.io_patterns)
|
|
177
|
-
|
|
178
|
-
def _is_cpu_bound(self, profile: FunctionProfile) -> bool:
|
|
179
|
-
"""Check if function is CPU bound"""
|
|
180
|
-
func_name_lower = profile.function_name.lower()
|
|
181
|
-
return any(pattern in func_name_lower for pattern in self.computation_patterns)
|
|
182
|
-
|
|
183
|
-
def _is_n_plus_one(self, profile: FunctionProfile) -> bool:
|
|
184
|
-
"""Detect potential N+1 query pattern"""
|
|
185
|
-
func_name_lower = profile.function_name.lower()
|
|
186
|
-
|
|
187
|
-
# Database query patterns
|
|
188
|
-
is_query = any(p in func_name_lower for p in ["query", "select", "fetch", "get"])
|
|
189
|
-
|
|
190
|
-
# High call count suggests N+1
|
|
191
|
-
high_call_count = profile.call_count > 50
|
|
192
|
-
|
|
193
|
-
return is_query and high_call_count
|
|
194
|
-
|
|
195
|
-
def _determine_severity(self, percent_total: float) -> str:
|
|
196
|
-
"""Determine bottleneck severity"""
|
|
197
|
-
if percent_total > 30:
|
|
198
|
-
return "CRITICAL"
|
|
199
|
-
if percent_total > 20:
|
|
200
|
-
return "HIGH"
|
|
201
|
-
if percent_total > 10:
|
|
202
|
-
return "MEDIUM"
|
|
203
|
-
return "LOW"
|
|
204
|
-
|
|
205
|
-
def _suggest_hot_path_fix(self, profile: FunctionProfile) -> str:
|
|
206
|
-
"""Suggest fix for hot path"""
|
|
207
|
-
if self._is_cpu_bound(profile):
|
|
208
|
-
return "Optimize algorithm or consider caching results"
|
|
209
|
-
if self._is_io_bound(profile):
|
|
210
|
-
return "Use async I/O or connection pooling"
|
|
211
|
-
return "Profile function internally to identify specific bottleneck"
|
|
212
|
-
|
|
213
|
-
def _suggest_io_fix(self, profile: FunctionProfile) -> str:
|
|
214
|
-
"""Suggest fix for I/O bottleneck"""
|
|
215
|
-
if "query" in profile.function_name.lower() or "select" in profile.function_name.lower():
|
|
216
|
-
return "Add database indexes, use query optimization, or implement caching"
|
|
217
|
-
if "request" in profile.function_name.lower():
|
|
218
|
-
return "Implement request batching, caching, or use async HTTP client"
|
|
219
|
-
if "file" in profile.function_name.lower() or "read" in profile.function_name.lower():
|
|
220
|
-
return "Use buffered I/O, async file operations, or caching"
|
|
221
|
-
return "Consider async I/O operations or connection pooling"
|