empathy-framework 4.6.6__py3-none-any.whl → 4.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {empathy_framework-4.6.6.dist-info → empathy_framework-4.7.0.dist-info}/METADATA +7 -6
- empathy_framework-4.7.0.dist-info/RECORD +354 -0
- {empathy_framework-4.6.6.dist-info → empathy_framework-4.7.0.dist-info}/top_level.txt +0 -2
- empathy_healthcare_plugin/monitors/monitoring/__init__.py +9 -9
- empathy_llm_toolkit/agent_factory/__init__.py +6 -6
- empathy_llm_toolkit/agent_factory/adapters/wizard_adapter.py +7 -10
- empathy_llm_toolkit/agents_md/__init__.py +22 -0
- empathy_llm_toolkit/agents_md/loader.py +218 -0
- empathy_llm_toolkit/agents_md/parser.py +271 -0
- empathy_llm_toolkit/agents_md/registry.py +307 -0
- empathy_llm_toolkit/commands/__init__.py +51 -0
- empathy_llm_toolkit/commands/context.py +375 -0
- empathy_llm_toolkit/commands/loader.py +301 -0
- empathy_llm_toolkit/commands/models.py +231 -0
- empathy_llm_toolkit/commands/parser.py +371 -0
- empathy_llm_toolkit/commands/registry.py +429 -0
- empathy_llm_toolkit/config/__init__.py +8 -8
- empathy_llm_toolkit/config/unified.py +3 -7
- empathy_llm_toolkit/context/__init__.py +22 -0
- empathy_llm_toolkit/context/compaction.py +455 -0
- empathy_llm_toolkit/context/manager.py +434 -0
- empathy_llm_toolkit/hooks/__init__.py +24 -0
- empathy_llm_toolkit/hooks/config.py +306 -0
- empathy_llm_toolkit/hooks/executor.py +289 -0
- empathy_llm_toolkit/hooks/registry.py +302 -0
- empathy_llm_toolkit/hooks/scripts/__init__.py +39 -0
- empathy_llm_toolkit/hooks/scripts/evaluate_session.py +201 -0
- empathy_llm_toolkit/hooks/scripts/first_time_init.py +285 -0
- empathy_llm_toolkit/hooks/scripts/pre_compact.py +207 -0
- empathy_llm_toolkit/hooks/scripts/session_end.py +183 -0
- empathy_llm_toolkit/hooks/scripts/session_start.py +163 -0
- empathy_llm_toolkit/hooks/scripts/suggest_compact.py +225 -0
- empathy_llm_toolkit/learning/__init__.py +30 -0
- empathy_llm_toolkit/learning/evaluator.py +438 -0
- empathy_llm_toolkit/learning/extractor.py +514 -0
- empathy_llm_toolkit/learning/storage.py +560 -0
- empathy_llm_toolkit/providers.py +4 -11
- empathy_llm_toolkit/security/__init__.py +17 -17
- empathy_llm_toolkit/utils/tokens.py +2 -5
- empathy_os/__init__.py +202 -70
- empathy_os/cache_monitor.py +5 -3
- empathy_os/cli/__init__.py +11 -55
- empathy_os/cli/__main__.py +29 -15
- empathy_os/cli/commands/inspection.py +21 -12
- empathy_os/cli/commands/memory.py +4 -12
- empathy_os/cli/commands/profiling.py +198 -0
- empathy_os/cli/commands/utilities.py +27 -7
- empathy_os/cli.py +28 -57
- empathy_os/cli_unified.py +525 -1164
- empathy_os/cost_tracker.py +9 -3
- empathy_os/dashboard/server.py +200 -2
- empathy_os/hot_reload/__init__.py +7 -7
- empathy_os/hot_reload/config.py +6 -7
- empathy_os/hot_reload/integration.py +35 -35
- empathy_os/hot_reload/reloader.py +57 -57
- empathy_os/hot_reload/watcher.py +28 -28
- empathy_os/hot_reload/websocket.py +2 -2
- empathy_os/memory/__init__.py +11 -4
- empathy_os/memory/claude_memory.py +1 -1
- empathy_os/memory/cross_session.py +8 -12
- empathy_os/memory/edges.py +6 -6
- empathy_os/memory/file_session.py +770 -0
- empathy_os/memory/graph.py +30 -30
- empathy_os/memory/nodes.py +6 -6
- empathy_os/memory/short_term.py +15 -9
- empathy_os/memory/unified.py +606 -140
- empathy_os/meta_workflows/agent_creator.py +3 -9
- empathy_os/meta_workflows/cli_meta_workflows.py +113 -53
- empathy_os/meta_workflows/form_engine.py +6 -18
- empathy_os/meta_workflows/intent_detector.py +64 -24
- empathy_os/meta_workflows/models.py +3 -1
- empathy_os/meta_workflows/pattern_learner.py +13 -31
- empathy_os/meta_workflows/plan_generator.py +55 -47
- empathy_os/meta_workflows/session_context.py +2 -3
- empathy_os/meta_workflows/workflow.py +20 -51
- empathy_os/models/cli.py +2 -2
- empathy_os/models/tasks.py +1 -2
- empathy_os/models/telemetry.py +4 -1
- empathy_os/models/token_estimator.py +3 -1
- empathy_os/monitoring/alerts.py +938 -9
- empathy_os/monitoring/alerts_cli.py +346 -183
- empathy_os/orchestration/execution_strategies.py +12 -29
- empathy_os/orchestration/pattern_learner.py +20 -26
- empathy_os/orchestration/real_tools.py +6 -15
- empathy_os/platform_utils.py +2 -1
- empathy_os/plugins/__init__.py +2 -2
- empathy_os/plugins/base.py +64 -64
- empathy_os/plugins/registry.py +32 -32
- empathy_os/project_index/index.py +49 -15
- empathy_os/project_index/models.py +1 -2
- empathy_os/project_index/reports.py +1 -1
- empathy_os/project_index/scanner.py +1 -0
- empathy_os/redis_memory.py +10 -7
- empathy_os/resilience/__init__.py +1 -1
- empathy_os/resilience/health.py +10 -10
- empathy_os/routing/__init__.py +7 -7
- empathy_os/routing/chain_executor.py +37 -37
- empathy_os/routing/classifier.py +36 -36
- empathy_os/routing/smart_router.py +40 -40
- empathy_os/routing/{wizard_registry.py → workflow_registry.py} +47 -47
- empathy_os/scaffolding/__init__.py +8 -8
- empathy_os/scaffolding/__main__.py +1 -1
- empathy_os/scaffolding/cli.py +28 -28
- empathy_os/socratic/__init__.py +3 -19
- empathy_os/socratic/ab_testing.py +25 -36
- empathy_os/socratic/blueprint.py +38 -38
- empathy_os/socratic/cli.py +34 -20
- empathy_os/socratic/collaboration.py +30 -28
- empathy_os/socratic/domain_templates.py +9 -1
- empathy_os/socratic/embeddings.py +17 -13
- empathy_os/socratic/engine.py +135 -70
- empathy_os/socratic/explainer.py +70 -60
- empathy_os/socratic/feedback.py +24 -19
- empathy_os/socratic/forms.py +15 -10
- empathy_os/socratic/generator.py +51 -35
- empathy_os/socratic/llm_analyzer.py +25 -23
- empathy_os/socratic/mcp_server.py +99 -159
- empathy_os/socratic/session.py +19 -13
- empathy_os/socratic/storage.py +98 -67
- empathy_os/socratic/success.py +38 -27
- empathy_os/socratic/visual_editor.py +51 -39
- empathy_os/socratic/web_ui.py +99 -66
- empathy_os/telemetry/cli.py +3 -1
- empathy_os/telemetry/usage_tracker.py +1 -3
- empathy_os/test_generator/__init__.py +3 -3
- empathy_os/test_generator/cli.py +28 -28
- empathy_os/test_generator/generator.py +64 -66
- empathy_os/test_generator/risk_analyzer.py +11 -11
- empathy_os/vscode_bridge.py +173 -0
- empathy_os/workflows/__init__.py +212 -120
- empathy_os/workflows/batch_processing.py +8 -24
- empathy_os/workflows/bug_predict.py +1 -1
- empathy_os/workflows/code_review.py +20 -5
- empathy_os/workflows/code_review_pipeline.py +13 -8
- empathy_os/workflows/keyboard_shortcuts/workflow.py +6 -2
- empathy_os/workflows/manage_documentation.py +1 -0
- empathy_os/workflows/orchestrated_health_check.py +6 -11
- empathy_os/workflows/orchestrated_release_prep.py +3 -3
- empathy_os/workflows/pr_review.py +18 -10
- empathy_os/workflows/progressive/__init__.py +2 -12
- empathy_os/workflows/progressive/cli.py +14 -37
- empathy_os/workflows/progressive/core.py +12 -12
- empathy_os/workflows/progressive/orchestrator.py +166 -144
- empathy_os/workflows/progressive/reports.py +22 -31
- empathy_os/workflows/progressive/telemetry.py +8 -14
- empathy_os/workflows/progressive/test_gen.py +29 -48
- empathy_os/workflows/progressive/workflow.py +31 -70
- empathy_os/workflows/release_prep.py +21 -6
- empathy_os/workflows/release_prep_crew.py +1 -0
- empathy_os/workflows/secure_release.py +13 -6
- empathy_os/workflows/security_audit.py +8 -3
- empathy_os/workflows/test_coverage_boost_crew.py +3 -2
- empathy_os/workflows/test_maintenance_crew.py +1 -0
- empathy_os/workflows/test_runner.py +16 -12
- empathy_software_plugin/SOFTWARE_PLUGIN_README.md +25 -703
- empathy_software_plugin/cli.py +0 -122
- coach_wizards/__init__.py +0 -45
- coach_wizards/accessibility_wizard.py +0 -91
- coach_wizards/api_wizard.py +0 -91
- coach_wizards/base_wizard.py +0 -209
- coach_wizards/cicd_wizard.py +0 -91
- coach_wizards/code_reviewer_README.md +0 -60
- coach_wizards/code_reviewer_wizard.py +0 -180
- coach_wizards/compliance_wizard.py +0 -91
- coach_wizards/database_wizard.py +0 -91
- coach_wizards/debugging_wizard.py +0 -91
- coach_wizards/documentation_wizard.py +0 -91
- coach_wizards/generate_wizards.py +0 -347
- coach_wizards/localization_wizard.py +0 -173
- coach_wizards/migration_wizard.py +0 -91
- coach_wizards/monitoring_wizard.py +0 -91
- coach_wizards/observability_wizard.py +0 -91
- coach_wizards/performance_wizard.py +0 -91
- coach_wizards/prompt_engineering_wizard.py +0 -661
- coach_wizards/refactoring_wizard.py +0 -91
- coach_wizards/scaling_wizard.py +0 -90
- coach_wizards/security_wizard.py +0 -92
- coach_wizards/testing_wizard.py +0 -91
- empathy_framework-4.6.6.dist-info/RECORD +0 -410
- empathy_llm_toolkit/wizards/__init__.py +0 -43
- empathy_llm_toolkit/wizards/base_wizard.py +0 -364
- empathy_llm_toolkit/wizards/customer_support_wizard.py +0 -190
- empathy_llm_toolkit/wizards/healthcare_wizard.py +0 -378
- empathy_llm_toolkit/wizards/patient_assessment_README.md +0 -64
- empathy_llm_toolkit/wizards/patient_assessment_wizard.py +0 -193
- empathy_llm_toolkit/wizards/technology_wizard.py +0 -209
- empathy_os/wizard_factory_cli.py +0 -170
- empathy_software_plugin/wizards/__init__.py +0 -42
- empathy_software_plugin/wizards/advanced_debugging_wizard.py +0 -395
- empathy_software_plugin/wizards/agent_orchestration_wizard.py +0 -511
- empathy_software_plugin/wizards/ai_collaboration_wizard.py +0 -503
- empathy_software_plugin/wizards/ai_context_wizard.py +0 -441
- empathy_software_plugin/wizards/ai_documentation_wizard.py +0 -503
- empathy_software_plugin/wizards/base_wizard.py +0 -288
- empathy_software_plugin/wizards/book_chapter_wizard.py +0 -519
- empathy_software_plugin/wizards/code_review_wizard.py +0 -604
- empathy_software_plugin/wizards/debugging/__init__.py +0 -50
- empathy_software_plugin/wizards/debugging/bug_risk_analyzer.py +0 -414
- empathy_software_plugin/wizards/debugging/config_loaders.py +0 -446
- empathy_software_plugin/wizards/debugging/fix_applier.py +0 -469
- empathy_software_plugin/wizards/debugging/language_patterns.py +0 -385
- empathy_software_plugin/wizards/debugging/linter_parsers.py +0 -470
- empathy_software_plugin/wizards/debugging/verification.py +0 -369
- empathy_software_plugin/wizards/enhanced_testing_wizard.py +0 -537
- empathy_software_plugin/wizards/memory_enhanced_debugging_wizard.py +0 -816
- empathy_software_plugin/wizards/multi_model_wizard.py +0 -501
- empathy_software_plugin/wizards/pattern_extraction_wizard.py +0 -422
- empathy_software_plugin/wizards/pattern_retriever_wizard.py +0 -400
- empathy_software_plugin/wizards/performance/__init__.py +0 -9
- empathy_software_plugin/wizards/performance/bottleneck_detector.py +0 -221
- empathy_software_plugin/wizards/performance/profiler_parsers.py +0 -278
- empathy_software_plugin/wizards/performance/trajectory_analyzer.py +0 -429
- empathy_software_plugin/wizards/performance_profiling_wizard.py +0 -305
- empathy_software_plugin/wizards/prompt_engineering_wizard.py +0 -425
- empathy_software_plugin/wizards/rag_pattern_wizard.py +0 -461
- empathy_software_plugin/wizards/security/__init__.py +0 -32
- empathy_software_plugin/wizards/security/exploit_analyzer.py +0 -290
- empathy_software_plugin/wizards/security/owasp_patterns.py +0 -241
- empathy_software_plugin/wizards/security/vulnerability_scanner.py +0 -604
- empathy_software_plugin/wizards/security_analysis_wizard.py +0 -322
- empathy_software_plugin/wizards/security_learning_wizard.py +0 -740
- empathy_software_plugin/wizards/tech_debt_wizard.py +0 -726
- empathy_software_plugin/wizards/testing/__init__.py +0 -27
- empathy_software_plugin/wizards/testing/coverage_analyzer.py +0 -459
- empathy_software_plugin/wizards/testing/quality_analyzer.py +0 -525
- empathy_software_plugin/wizards/testing/test_suggester.py +0 -533
- empathy_software_plugin/wizards/testing_wizard.py +0 -274
- wizards/__init__.py +0 -82
- wizards/admission_assessment_wizard.py +0 -644
- wizards/care_plan.py +0 -321
- wizards/clinical_assessment.py +0 -769
- wizards/discharge_planning.py +0 -77
- wizards/discharge_summary_wizard.py +0 -468
- wizards/dosage_calculation.py +0 -497
- wizards/incident_report_wizard.py +0 -454
- wizards/medication_reconciliation.py +0 -85
- wizards/nursing_assessment.py +0 -171
- wizards/patient_education.py +0 -654
- wizards/quality_improvement.py +0 -705
- wizards/sbar_report.py +0 -324
- wizards/sbar_wizard.py +0 -608
- wizards/shift_handoff_wizard.py +0 -535
- wizards/soap_note_wizard.py +0 -679
- wizards/treatment_plan.py +0 -15
- {empathy_framework-4.6.6.dist-info → empathy_framework-4.7.0.dist-info}/WHEEL +0 -0
- {empathy_framework-4.6.6.dist-info → empathy_framework-4.7.0.dist-info}/entry_points.txt +0 -0
- {empathy_framework-4.6.6.dist-info → empathy_framework-4.7.0.dist-info}/licenses/LICENSE +0 -0
empathy_os/routing/classifier.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
"""LLM-based Request Classifier
|
|
2
2
|
|
|
3
3
|
Uses a cheap model (Haiku) to classify developer requests
|
|
4
|
-
and route them to appropriate
|
|
4
|
+
and route them to appropriate workflow(s).
|
|
5
5
|
|
|
6
6
|
Copyright 2025 Smart AI Memory, LLC
|
|
7
7
|
Licensed under Fair Source 0.9
|
|
@@ -12,15 +12,15 @@ import os
|
|
|
12
12
|
from dataclasses import dataclass, field
|
|
13
13
|
from typing import Any
|
|
14
14
|
|
|
15
|
-
from .
|
|
15
|
+
from .workflow_registry import WorkflowRegistry
|
|
16
16
|
|
|
17
17
|
|
|
18
18
|
@dataclass
|
|
19
19
|
class ClassificationResult:
|
|
20
20
|
"""Result of classifying a developer request."""
|
|
21
21
|
|
|
22
|
-
|
|
23
|
-
|
|
22
|
+
primary_workflow: str
|
|
23
|
+
secondary_workflows: list[str] = field(default_factory=list)
|
|
24
24
|
confidence: float = 0.0
|
|
25
25
|
reasoning: str = ""
|
|
26
26
|
suggested_chain: list[str] = field(default_factory=list)
|
|
@@ -28,7 +28,7 @@ class ClassificationResult:
|
|
|
28
28
|
|
|
29
29
|
|
|
30
30
|
class HaikuClassifier:
|
|
31
|
-
"""Uses Claude Haiku to classify requests to
|
|
31
|
+
"""Uses Claude Haiku to classify requests to workflows.
|
|
32
32
|
|
|
33
33
|
Why Haiku:
|
|
34
34
|
- Cheapest tier model
|
|
@@ -46,7 +46,7 @@ class HaikuClassifier:
|
|
|
46
46
|
"""
|
|
47
47
|
self._api_key = api_key or os.getenv("ANTHROPIC_API_KEY")
|
|
48
48
|
self._client = None
|
|
49
|
-
self._registry =
|
|
49
|
+
self._registry = WorkflowRegistry()
|
|
50
50
|
|
|
51
51
|
def _get_client(self):
|
|
52
52
|
"""Lazy-load the Anthropic client."""
|
|
@@ -63,41 +63,41 @@ class HaikuClassifier:
|
|
|
63
63
|
self,
|
|
64
64
|
request: str,
|
|
65
65
|
context: dict[str, Any] | None = None,
|
|
66
|
-
|
|
66
|
+
available_workflows: dict[str, str] | None = None,
|
|
67
67
|
) -> ClassificationResult:
|
|
68
|
-
"""Classify a developer request and determine which
|
|
68
|
+
"""Classify a developer request and determine which workflow(s) to invoke.
|
|
69
69
|
|
|
70
70
|
Args:
|
|
71
71
|
request: The developer's natural language request
|
|
72
72
|
context: Optional context (current file, project type, etc.)
|
|
73
|
-
|
|
73
|
+
available_workflows: Override for available workflow descriptions
|
|
74
74
|
|
|
75
75
|
Returns:
|
|
76
|
-
ClassificationResult with primary and secondary
|
|
76
|
+
ClassificationResult with primary and secondary workflow recommendations
|
|
77
77
|
|
|
78
78
|
"""
|
|
79
|
-
if
|
|
80
|
-
|
|
79
|
+
if available_workflows is None:
|
|
80
|
+
available_workflows = self._registry.get_descriptions_for_classification()
|
|
81
81
|
|
|
82
82
|
# Build classification prompt
|
|
83
|
-
|
|
83
|
+
workflow_list = "\n".join(f"- {name}: {desc}" for name, desc in available_workflows.items())
|
|
84
84
|
|
|
85
85
|
context_str = ""
|
|
86
86
|
if context:
|
|
87
87
|
context_str = f"\n\nContext:\n{json.dumps(context, indent=2)}"
|
|
88
88
|
|
|
89
|
-
system_prompt = """You are a request router that classifies requests to the appropriate
|
|
89
|
+
system_prompt = """You are a request router that classifies requests to the appropriate workflow.
|
|
90
90
|
|
|
91
91
|
Analyze the request and determine:
|
|
92
|
-
1. The PRIMARY
|
|
93
|
-
2. Any SECONDARY
|
|
92
|
+
1. The PRIMARY workflow that best handles this request
|
|
93
|
+
2. Any SECONDARY workflows that could provide additional value
|
|
94
94
|
3. Your confidence level (0.0 - 1.0)
|
|
95
95
|
4. Brief reasoning for your choice
|
|
96
96
|
|
|
97
97
|
Respond in JSON format:
|
|
98
98
|
{
|
|
99
|
-
"
|
|
100
|
-
"
|
|
99
|
+
"primary_workflow": "workflow-name",
|
|
100
|
+
"secondary_workflows": ["workflow-name-2"],
|
|
101
101
|
"confidence": 0.85,
|
|
102
102
|
"reasoning": "Brief explanation",
|
|
103
103
|
"extracted_context": {
|
|
@@ -106,8 +106,8 @@ Respond in JSON format:
|
|
|
106
106
|
}
|
|
107
107
|
}"""
|
|
108
108
|
|
|
109
|
-
user_prompt = f"""Available
|
|
110
|
-
{
|
|
109
|
+
user_prompt = f"""Available workflows:
|
|
110
|
+
{workflow_list}
|
|
111
111
|
|
|
112
112
|
Developer request: "{request}"{context_str}
|
|
113
113
|
|
|
@@ -136,8 +136,8 @@ Classify this request."""
|
|
|
136
136
|
|
|
137
137
|
data = json.loads(content.strip())
|
|
138
138
|
return ClassificationResult(
|
|
139
|
-
|
|
140
|
-
|
|
139
|
+
primary_workflow=data.get("primary_workflow", "code-review"),
|
|
140
|
+
secondary_workflows=data.get("secondary_workflows", []),
|
|
141
141
|
confidence=data.get("confidence", 0.5),
|
|
142
142
|
reasoning=data.get("reasoning", ""),
|
|
143
143
|
extracted_context=data.get("extracted_context", {}),
|
|
@@ -149,22 +149,22 @@ Classify this request."""
|
|
|
149
149
|
print(f"LLM classification error: {e}")
|
|
150
150
|
|
|
151
151
|
# Fallback to keyword-based classification
|
|
152
|
-
return self._keyword_classify(request,
|
|
152
|
+
return self._keyword_classify(request, available_workflows)
|
|
153
153
|
|
|
154
154
|
def _keyword_classify(
|
|
155
155
|
self,
|
|
156
156
|
request: str,
|
|
157
|
-
|
|
157
|
+
available_workflows: dict[str, str],
|
|
158
158
|
) -> ClassificationResult:
|
|
159
159
|
"""Fallback keyword-based classification."""
|
|
160
160
|
request_lower = request.lower()
|
|
161
161
|
|
|
162
|
-
# Score each
|
|
162
|
+
# Score each workflow based on keyword matches
|
|
163
163
|
scores: dict[str, float] = {}
|
|
164
164
|
|
|
165
|
-
for
|
|
165
|
+
for workflow in self._registry.list_all():
|
|
166
166
|
score = 0.0
|
|
167
|
-
for keyword in
|
|
167
|
+
for keyword in workflow.keywords:
|
|
168
168
|
if keyword in request_lower:
|
|
169
169
|
score += 1.0
|
|
170
170
|
# Exact word match bonus
|
|
@@ -172,25 +172,25 @@ Classify this request."""
|
|
|
172
172
|
score += 0.5
|
|
173
173
|
|
|
174
174
|
if score > 0:
|
|
175
|
-
scores[
|
|
175
|
+
scores[workflow.name] = score
|
|
176
176
|
|
|
177
177
|
if not scores:
|
|
178
178
|
# Default to code-review
|
|
179
179
|
return ClassificationResult(
|
|
180
|
-
|
|
180
|
+
primary_workflow="code-review",
|
|
181
181
|
confidence=0.3,
|
|
182
182
|
reasoning="No keyword matches, defaulting to code-review",
|
|
183
183
|
)
|
|
184
184
|
|
|
185
185
|
# Sort by score
|
|
186
|
-
|
|
187
|
-
primary =
|
|
188
|
-
primary_score =
|
|
186
|
+
sorted_workflows = sorted(scores.items(), key=lambda x: x[1], reverse=True)
|
|
187
|
+
primary = sorted_workflows[0][0]
|
|
188
|
+
primary_score = sorted_workflows[0][1]
|
|
189
189
|
|
|
190
190
|
# Get secondary if significantly different
|
|
191
191
|
secondary = []
|
|
192
|
-
if len(
|
|
193
|
-
for name, score in
|
|
192
|
+
if len(sorted_workflows) > 1:
|
|
193
|
+
for name, score in sorted_workflows[1:3]:
|
|
194
194
|
if score >= primary_score * 0.5:
|
|
195
195
|
secondary.append(name)
|
|
196
196
|
|
|
@@ -199,8 +199,8 @@ Classify this request."""
|
|
|
199
199
|
confidence = min(primary_score / max_possible, 1.0)
|
|
200
200
|
|
|
201
201
|
return ClassificationResult(
|
|
202
|
-
|
|
203
|
-
|
|
202
|
+
primary_workflow=primary,
|
|
203
|
+
secondary_workflows=secondary,
|
|
204
204
|
confidence=confidence,
|
|
205
205
|
reasoning=f"Keyword match score: {primary_score}",
|
|
206
206
|
)
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
"""Smart Router
|
|
2
2
|
|
|
3
3
|
Intelligent dispatcher that analyzes developer input and routes
|
|
4
|
-
to the appropriate
|
|
4
|
+
to the appropriate workflow(s) using LLM classification.
|
|
5
5
|
|
|
6
6
|
Copyright 2025 Smart AI Memory, LLC
|
|
7
7
|
Licensed under Fair Source 0.9
|
|
@@ -11,15 +11,15 @@ from dataclasses import dataclass, field
|
|
|
11
11
|
from typing import Any
|
|
12
12
|
|
|
13
13
|
from .classifier import ClassificationResult, HaikuClassifier
|
|
14
|
-
from .
|
|
14
|
+
from .workflow_registry import WorkflowInfo, WorkflowRegistry
|
|
15
15
|
|
|
16
16
|
|
|
17
17
|
@dataclass
|
|
18
18
|
class RoutingDecision:
|
|
19
19
|
"""Decision from the smart router."""
|
|
20
20
|
|
|
21
|
-
|
|
22
|
-
|
|
21
|
+
primary_workflow: str
|
|
22
|
+
secondary_workflows: list[str] = field(default_factory=list)
|
|
23
23
|
confidence: float = 0.0
|
|
24
24
|
reasoning: str = ""
|
|
25
25
|
suggested_chain: list[str] = field(default_factory=list)
|
|
@@ -31,17 +31,17 @@ class RoutingDecision:
|
|
|
31
31
|
|
|
32
32
|
|
|
33
33
|
class SmartRouter:
|
|
34
|
-
"""Routes developer requests to appropriate
|
|
34
|
+
"""Routes developer requests to appropriate workflow(s).
|
|
35
35
|
|
|
36
36
|
Uses LLM classification (Haiku) to understand natural language
|
|
37
|
-
requests and route them to the best
|
|
37
|
+
requests and route them to the best workflow(s).
|
|
38
38
|
|
|
39
39
|
Usage:
|
|
40
40
|
router = SmartRouter()
|
|
41
41
|
|
|
42
42
|
# Async routing (preferred - uses LLM)
|
|
43
43
|
decision = await router.route("Fix the security issue in auth.py")
|
|
44
|
-
print(f"Primary: {decision.
|
|
44
|
+
print(f"Primary: {decision.primary_workflow}")
|
|
45
45
|
print(f"Confidence: {decision.confidence}")
|
|
46
46
|
|
|
47
47
|
# Sync routing (keyword fallback)
|
|
@@ -55,7 +55,7 @@ class SmartRouter:
|
|
|
55
55
|
api_key: Optional Anthropic API key for LLM classification
|
|
56
56
|
|
|
57
57
|
"""
|
|
58
|
-
self._registry =
|
|
58
|
+
self._registry = WorkflowRegistry()
|
|
59
59
|
self._classifier = HaikuClassifier(api_key=api_key)
|
|
60
60
|
|
|
61
61
|
async def route(
|
|
@@ -63,7 +63,7 @@ class SmartRouter:
|
|
|
63
63
|
request: str,
|
|
64
64
|
context: dict[str, Any] | None = None,
|
|
65
65
|
) -> RoutingDecision:
|
|
66
|
-
"""Route a request to the appropriate
|
|
66
|
+
"""Route a request to the appropriate workflow(s).
|
|
67
67
|
|
|
68
68
|
Uses LLM classification for accurate natural language understanding.
|
|
69
69
|
|
|
@@ -72,7 +72,7 @@ class SmartRouter:
|
|
|
72
72
|
context: Optional context (current file, project info, etc.)
|
|
73
73
|
|
|
74
74
|
Returns:
|
|
75
|
-
RoutingDecision with
|
|
75
|
+
RoutingDecision with workflow recommendations
|
|
76
76
|
|
|
77
77
|
"""
|
|
78
78
|
# Classify the request
|
|
@@ -81,15 +81,15 @@ class SmartRouter:
|
|
|
81
81
|
context=context,
|
|
82
82
|
)
|
|
83
83
|
|
|
84
|
-
# Build suggested chain based on
|
|
84
|
+
# Build suggested chain based on workflow triggers
|
|
85
85
|
suggested_chain = self._build_chain(classification)
|
|
86
86
|
|
|
87
87
|
# Merge extracted context
|
|
88
88
|
merged_context = {**(context or {}), **classification.extracted_context}
|
|
89
89
|
|
|
90
90
|
return RoutingDecision(
|
|
91
|
-
|
|
92
|
-
|
|
91
|
+
primary_workflow=classification.primary_workflow,
|
|
92
|
+
secondary_workflows=classification.secondary_workflows,
|
|
93
93
|
confidence=classification.confidence,
|
|
94
94
|
reasoning=classification.reasoning,
|
|
95
95
|
suggested_chain=suggested_chain,
|
|
@@ -112,7 +112,7 @@ class SmartRouter:
|
|
|
112
112
|
context: Optional context
|
|
113
113
|
|
|
114
114
|
Returns:
|
|
115
|
-
RoutingDecision with
|
|
115
|
+
RoutingDecision with workflow recommendations
|
|
116
116
|
|
|
117
117
|
"""
|
|
118
118
|
classification = self._classifier.classify_sync(
|
|
@@ -123,8 +123,8 @@ class SmartRouter:
|
|
|
123
123
|
suggested_chain = self._build_chain(classification)
|
|
124
124
|
|
|
125
125
|
return RoutingDecision(
|
|
126
|
-
|
|
127
|
-
|
|
126
|
+
primary_workflow=classification.primary_workflow,
|
|
127
|
+
secondary_workflows=classification.secondary_workflows,
|
|
128
128
|
confidence=classification.confidence,
|
|
129
129
|
reasoning=classification.reasoning,
|
|
130
130
|
suggested_chain=suggested_chain,
|
|
@@ -134,39 +134,39 @@ class SmartRouter:
|
|
|
134
134
|
)
|
|
135
135
|
|
|
136
136
|
def _build_chain(self, classification: ClassificationResult) -> list[str]:
|
|
137
|
-
"""Build suggested
|
|
138
|
-
chain = [classification.
|
|
137
|
+
"""Build suggested workflow chain based on triggers."""
|
|
138
|
+
chain = [classification.primary_workflow]
|
|
139
139
|
|
|
140
|
-
# Add secondary
|
|
141
|
-
for secondary in classification.
|
|
140
|
+
# Add secondary workflows to chain
|
|
141
|
+
for secondary in classification.secondary_workflows:
|
|
142
142
|
if secondary not in chain:
|
|
143
143
|
chain.append(secondary)
|
|
144
144
|
|
|
145
|
-
# Check for auto-chain triggers from primary
|
|
146
|
-
triggers = self._registry.get_chain_triggers(classification.
|
|
145
|
+
# Check for auto-chain triggers from primary workflow
|
|
146
|
+
triggers = self._registry.get_chain_triggers(classification.primary_workflow)
|
|
147
147
|
for trigger in triggers:
|
|
148
|
-
|
|
149
|
-
if
|
|
150
|
-
chain.append(
|
|
148
|
+
next_workflow = trigger.get("next")
|
|
149
|
+
if next_workflow and next_workflow not in chain:
|
|
150
|
+
chain.append(next_workflow)
|
|
151
151
|
|
|
152
152
|
return chain
|
|
153
153
|
|
|
154
|
-
def
|
|
155
|
-
"""Get information about a specific
|
|
154
|
+
def get_workflow_info(self, name: str) -> WorkflowInfo | None:
|
|
155
|
+
"""Get information about a specific workflow."""
|
|
156
156
|
return self._registry.get(name)
|
|
157
157
|
|
|
158
|
-
def
|
|
159
|
-
"""List all available
|
|
158
|
+
def list_workflows(self) -> list[WorkflowInfo]:
|
|
159
|
+
"""List all available workflows."""
|
|
160
160
|
return self._registry.list_all()
|
|
161
161
|
|
|
162
162
|
def suggest_for_file(self, file_path: str) -> list[str]:
|
|
163
|
-
"""Suggest
|
|
163
|
+
"""Suggest workflows based on file type.
|
|
164
164
|
|
|
165
165
|
Args:
|
|
166
166
|
file_path: Path to the file
|
|
167
167
|
|
|
168
168
|
Returns:
|
|
169
|
-
List of suggested
|
|
169
|
+
List of suggested workflow names
|
|
170
170
|
|
|
171
171
|
"""
|
|
172
172
|
suggestions = []
|
|
@@ -175,9 +175,9 @@ class SmartRouter:
|
|
|
175
175
|
ext = "." + file_path.rsplit(".", 1)[-1] if "." in file_path else ""
|
|
176
176
|
filename = file_path.rsplit("/", 1)[-1]
|
|
177
177
|
|
|
178
|
-
for
|
|
179
|
-
if ext in
|
|
180
|
-
suggestions.append(
|
|
178
|
+
for workflow in self._registry.list_all():
|
|
179
|
+
if ext in workflow.handles_file_types or filename in workflow.handles_file_types:
|
|
180
|
+
suggestions.append(workflow.name)
|
|
181
181
|
|
|
182
182
|
# Default suggestions if no matches
|
|
183
183
|
if not suggestions:
|
|
@@ -186,19 +186,19 @@ class SmartRouter:
|
|
|
186
186
|
return suggestions
|
|
187
187
|
|
|
188
188
|
def suggest_for_error(self, error_type: str) -> list[str]:
|
|
189
|
-
"""Suggest
|
|
189
|
+
"""Suggest workflows based on error type.
|
|
190
190
|
|
|
191
191
|
Args:
|
|
192
192
|
error_type: Type of error (e.g., "TypeError", "SecurityError")
|
|
193
193
|
|
|
194
194
|
Returns:
|
|
195
|
-
List of suggested
|
|
195
|
+
List of suggested workflow names
|
|
196
196
|
|
|
197
197
|
"""
|
|
198
198
|
error_lower = error_type.lower()
|
|
199
199
|
|
|
200
|
-
# Map error types to
|
|
201
|
-
|
|
200
|
+
# Map error types to workflows
|
|
201
|
+
error_workflow_map = {
|
|
202
202
|
"security": ["security-audit", "code-review"],
|
|
203
203
|
"type": ["code-review", "bug-predict"],
|
|
204
204
|
"null": ["bug-predict", "test-gen"],
|
|
@@ -211,9 +211,9 @@ class SmartRouter:
|
|
|
211
211
|
"test": ["test-gen", "bug-predict"],
|
|
212
212
|
}
|
|
213
213
|
|
|
214
|
-
for keyword,
|
|
214
|
+
for keyword, workflows in error_workflow_map.items():
|
|
215
215
|
if keyword in error_lower:
|
|
216
|
-
return
|
|
216
|
+
return workflows
|
|
217
217
|
|
|
218
218
|
return ["bug-predict", "code-review"]
|
|
219
219
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
"""
|
|
1
|
+
"""Workflow Registry
|
|
2
2
|
|
|
3
|
-
Central registry of available
|
|
3
|
+
Central registry of available workflows with their descriptions,
|
|
4
4
|
capabilities, and auto-chain rules.
|
|
5
5
|
|
|
6
6
|
Copyright 2025 Smart AI Memory, LLC
|
|
@@ -13,8 +13,8 @@ from typing import Any
|
|
|
13
13
|
|
|
14
14
|
|
|
15
15
|
@dataclass
|
|
16
|
-
class
|
|
17
|
-
"""Information about a registered
|
|
16
|
+
class WorkflowInfo:
|
|
17
|
+
"""Information about a registered workflow."""
|
|
18
18
|
|
|
19
19
|
name: str
|
|
20
20
|
description: str
|
|
@@ -29,15 +29,15 @@ class WizardInfo:
|
|
|
29
29
|
auto_chain: bool = False
|
|
30
30
|
chain_triggers: list[dict[str, Any]] = field(default_factory=list)
|
|
31
31
|
|
|
32
|
-
# Optional
|
|
33
|
-
|
|
32
|
+
# Optional workflow class or factory
|
|
33
|
+
workflow_class: type | None = None
|
|
34
34
|
factory: Callable[..., Any] | None = None
|
|
35
35
|
|
|
36
36
|
|
|
37
|
-
# Default
|
|
38
|
-
|
|
37
|
+
# Default workflow registry
|
|
38
|
+
WORKFLOW_REGISTRY: dict[str, WorkflowInfo] = {
|
|
39
39
|
# Workflows (cost-optimized pipelines)
|
|
40
|
-
"security-audit":
|
|
40
|
+
"security-audit": WorkflowInfo(
|
|
41
41
|
name="security-audit",
|
|
42
42
|
description="Analyze code for security vulnerabilities, injection risks, compliance",
|
|
43
43
|
keywords=[
|
|
@@ -65,7 +65,7 @@ WIZARD_REGISTRY: dict[str, WizardInfo] = {
|
|
|
65
65
|
{"condition": "vulnerability_type == 'injection'", "next": "code-review"},
|
|
66
66
|
],
|
|
67
67
|
),
|
|
68
|
-
"code-review":
|
|
68
|
+
"code-review": WorkflowInfo(
|
|
69
69
|
name="code-review",
|
|
70
70
|
description="Review code for quality, best practices, maintainability, and bugs",
|
|
71
71
|
keywords=[
|
|
@@ -89,7 +89,7 @@ WIZARD_REGISTRY: dict[str, WizardInfo] = {
|
|
|
89
89
|
{"condition": "has_complexity_issues", "next": "refactor-plan"},
|
|
90
90
|
],
|
|
91
91
|
),
|
|
92
|
-
"bug-predict":
|
|
92
|
+
"bug-predict": WorkflowInfo(
|
|
93
93
|
name="bug-predict",
|
|
94
94
|
description="Predict potential bugs based on code patterns and historical data",
|
|
95
95
|
keywords=[
|
|
@@ -112,7 +112,7 @@ WIZARD_REGISTRY: dict[str, WizardInfo] = {
|
|
|
112
112
|
{"condition": "risk_score > 0.7", "next": "test-gen"},
|
|
113
113
|
],
|
|
114
114
|
),
|
|
115
|
-
"perf-audit":
|
|
115
|
+
"perf-audit": WorkflowInfo(
|
|
116
116
|
name="perf-audit",
|
|
117
117
|
description="Analyze code for performance issues, bottlenecks, optimizations",
|
|
118
118
|
keywords=[
|
|
@@ -136,7 +136,7 @@ WIZARD_REGISTRY: dict[str, WizardInfo] = {
|
|
|
136
136
|
{"condition": "hotspot_count > 5", "next": "refactor-plan"},
|
|
137
137
|
],
|
|
138
138
|
),
|
|
139
|
-
"refactor-plan":
|
|
139
|
+
"refactor-plan": WorkflowInfo(
|
|
140
140
|
name="refactor-plan",
|
|
141
141
|
description="Plan code refactoring to improve structure, reduce complexity",
|
|
142
142
|
keywords=[
|
|
@@ -153,7 +153,7 @@ WIZARD_REGISTRY: dict[str, WizardInfo] = {
|
|
|
153
153
|
primary_domain="architecture",
|
|
154
154
|
auto_chain=False, # Require approval for refactoring
|
|
155
155
|
),
|
|
156
|
-
"test-gen":
|
|
156
|
+
"test-gen": WorkflowInfo(
|
|
157
157
|
name="test-gen",
|
|
158
158
|
description="Generate test cases and improve test coverage",
|
|
159
159
|
keywords=[
|
|
@@ -175,7 +175,7 @@ WIZARD_REGISTRY: dict[str, WizardInfo] = {
|
|
|
175
175
|
{"condition": "coverage_low", "next": "bug-predict"},
|
|
176
176
|
],
|
|
177
177
|
),
|
|
178
|
-
"doc-gen":
|
|
178
|
+
"doc-gen": WorkflowInfo(
|
|
179
179
|
name="doc-gen",
|
|
180
180
|
description="Generate documentation from code including API docs, READMEs, and guides",
|
|
181
181
|
keywords=[
|
|
@@ -192,7 +192,7 @@ WIZARD_REGISTRY: dict[str, WizardInfo] = {
|
|
|
192
192
|
primary_domain="documentation",
|
|
193
193
|
auto_chain=False,
|
|
194
194
|
),
|
|
195
|
-
"dependency-check":
|
|
195
|
+
"dependency-check": WorkflowInfo(
|
|
196
196
|
name="dependency-check",
|
|
197
197
|
description="Audit dependencies for vulnerabilities, updates, and license issues",
|
|
198
198
|
keywords=[
|
|
@@ -214,7 +214,7 @@ WIZARD_REGISTRY: dict[str, WizardInfo] = {
|
|
|
214
214
|
{"condition": "critical_vuln_count > 0", "next": "security-audit"},
|
|
215
215
|
],
|
|
216
216
|
),
|
|
217
|
-
"release-prep":
|
|
217
|
+
"release-prep": WorkflowInfo(
|
|
218
218
|
name="release-prep",
|
|
219
219
|
description="Pre-release quality gate with health checks, security scan, and changelog",
|
|
220
220
|
keywords=[
|
|
@@ -230,7 +230,7 @@ WIZARD_REGISTRY: dict[str, WizardInfo] = {
|
|
|
230
230
|
primary_domain="release",
|
|
231
231
|
auto_chain=False, # Always require approval
|
|
232
232
|
),
|
|
233
|
-
"research":
|
|
233
|
+
"research": WorkflowInfo(
|
|
234
234
|
name="research",
|
|
235
235
|
description="Research and synthesize information from multiple sources",
|
|
236
236
|
keywords=[
|
|
@@ -249,59 +249,59 @@ WIZARD_REGISTRY: dict[str, WizardInfo] = {
|
|
|
249
249
|
}
|
|
250
250
|
|
|
251
251
|
|
|
252
|
-
class
|
|
253
|
-
"""Registry for managing available
|
|
252
|
+
class WorkflowRegistry:
|
|
253
|
+
"""Registry for managing available workflows.
|
|
254
254
|
|
|
255
255
|
Usage:
|
|
256
|
-
registry =
|
|
256
|
+
registry = WorkflowRegistry()
|
|
257
257
|
info = registry.get("security-audit")
|
|
258
|
-
|
|
258
|
+
all_workflows = registry.list_all()
|
|
259
259
|
"""
|
|
260
260
|
|
|
261
261
|
def __init__(self):
|
|
262
|
-
"""Initialize with default
|
|
263
|
-
self.
|
|
262
|
+
"""Initialize with default workflows."""
|
|
263
|
+
self._workflows: dict[str, WorkflowInfo] = dict(WORKFLOW_REGISTRY)
|
|
264
264
|
|
|
265
|
-
def register(self, info:
|
|
266
|
-
"""Register a new
|
|
267
|
-
self.
|
|
265
|
+
def register(self, info: WorkflowInfo) -> None:
|
|
266
|
+
"""Register a new workflow."""
|
|
267
|
+
self._workflows[info.name] = info
|
|
268
268
|
|
|
269
|
-
def get(self, name: str) ->
|
|
270
|
-
"""Get
|
|
271
|
-
return self.
|
|
269
|
+
def get(self, name: str) -> WorkflowInfo | None:
|
|
270
|
+
"""Get workflow info by name."""
|
|
271
|
+
return self._workflows.get(name)
|
|
272
272
|
|
|
273
|
-
def list_all(self) -> list[
|
|
274
|
-
"""List all registered
|
|
275
|
-
return list(self.
|
|
273
|
+
def list_all(self) -> list[WorkflowInfo]:
|
|
274
|
+
"""List all registered workflows."""
|
|
275
|
+
return list(self._workflows.values())
|
|
276
276
|
|
|
277
|
-
def find_by_domain(self, domain: str) -> list[
|
|
278
|
-
"""Find
|
|
279
|
-
return [w for w in self.
|
|
277
|
+
def find_by_domain(self, domain: str) -> list[WorkflowInfo]:
|
|
278
|
+
"""Find workflows by primary domain."""
|
|
279
|
+
return [w for w in self._workflows.values() if w.primary_domain == domain]
|
|
280
280
|
|
|
281
|
-
def find_by_keyword(self, keyword: str) -> list[
|
|
282
|
-
"""Find
|
|
281
|
+
def find_by_keyword(self, keyword: str) -> list[WorkflowInfo]:
|
|
282
|
+
"""Find workflows that handle a keyword."""
|
|
283
283
|
keyword = keyword.lower()
|
|
284
284
|
return [
|
|
285
|
-
w for w in self.
|
|
285
|
+
w for w in self._workflows.values() if any(keyword in kw.lower() for kw in w.keywords)
|
|
286
286
|
]
|
|
287
287
|
|
|
288
288
|
def get_descriptions_for_classification(self) -> dict[str, str]:
|
|
289
|
-
"""Get
|
|
289
|
+
"""Get workflow name to description mapping for LLM classification."""
|
|
290
290
|
return {
|
|
291
291
|
name: f"{info.description} (domain: {info.primary_domain})"
|
|
292
|
-
for name, info in self.
|
|
292
|
+
for name, info in self._workflows.items()
|
|
293
293
|
}
|
|
294
294
|
|
|
295
|
-
def get_chain_triggers(self,
|
|
296
|
-
"""Get auto-chain triggers for a
|
|
297
|
-
info = self.
|
|
295
|
+
def get_chain_triggers(self, workflow_name: str) -> list[dict[str, Any]]:
|
|
296
|
+
"""Get auto-chain triggers for a workflow."""
|
|
297
|
+
info = self._workflows.get(workflow_name)
|
|
298
298
|
if info and info.auto_chain:
|
|
299
299
|
return info.chain_triggers
|
|
300
300
|
return []
|
|
301
301
|
|
|
302
302
|
def unregister(self, name: str) -> bool:
|
|
303
|
-
"""Remove a
|
|
304
|
-
if name in self.
|
|
305
|
-
del self.
|
|
303
|
+
"""Remove a workflow from the registry."""
|
|
304
|
+
if name in self._workflows:
|
|
305
|
+
del self._workflows[name]
|
|
306
306
|
return True
|
|
307
307
|
return False
|
|
@@ -1,24 +1,24 @@
|
|
|
1
|
-
"""Methodology Scaffolding for
|
|
1
|
+
"""Methodology Scaffolding for Workflow Factory.
|
|
2
2
|
|
|
3
|
-
Provides CLI tools and methodologies for creating new
|
|
3
|
+
Provides CLI tools and methodologies for creating new workflows quickly
|
|
4
4
|
using proven patterns.
|
|
5
5
|
|
|
6
6
|
Methodologies:
|
|
7
|
-
- Pattern-Compose: Select patterns, compose
|
|
8
|
-
- TDD-First: Write tests first, implement
|
|
7
|
+
- Pattern-Compose: Select patterns, compose workflow (Recommended)
|
|
8
|
+
- TDD-First: Write tests first, implement workflow
|
|
9
9
|
- Prototype-Refine: Quick prototype, then refactor
|
|
10
10
|
- Risk-Driven: Focus on high-risk paths first
|
|
11
11
|
- Empathy-Centered: Design for user experience
|
|
12
12
|
|
|
13
13
|
Usage:
|
|
14
|
-
# Create
|
|
15
|
-
python -m scaffolding create
|
|
14
|
+
# Create workflow using Pattern-Compose (recommended)
|
|
15
|
+
python -m scaffolding create my_workflow --domain healthcare
|
|
16
16
|
|
|
17
17
|
# Create with specific methodology
|
|
18
|
-
python -m scaffolding create
|
|
18
|
+
python -m scaffolding create my_workflow --methodology tdd
|
|
19
19
|
|
|
20
20
|
# Interactive mode
|
|
21
|
-
python -m scaffolding create
|
|
21
|
+
python -m scaffolding create my_workflow --interactive
|
|
22
22
|
|
|
23
23
|
Copyright 2025 Smart AI Memory, LLC
|
|
24
24
|
Licensed under Fair Source 0.9
|