adaptive-memory-multi-model-router 1.2.2 → 1.3.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +146 -66
- package/dist/index.d.ts +1 -1
- package/dist/index.js +1 -1
- package/dist/integrations/airtable.js +20 -0
- package/dist/integrations/discord.js +18 -0
- package/dist/integrations/github.js +23 -0
- package/dist/integrations/gmail.js +19 -0
- package/dist/integrations/google-calendar.js +18 -0
- package/dist/integrations/index.js +61 -0
- package/dist/integrations/jira.js +21 -0
- package/dist/integrations/linear.js +19 -0
- package/dist/integrations/notion.js +19 -0
- package/dist/integrations/slack.js +18 -0
- package/dist/integrations/telegram.js +19 -0
- package/dist/providers/registry.js +7 -3
- package/docs/ARCHITECTURAL-IMPROVEMENTS-2025.md +1391 -0
- package/docs/ARCHITECTURAL-IMPROVEMENTS-REVISED-2025.md +1051 -0
- package/docs/CONFIGURATION.md +476 -0
- package/docs/COUNCIL_DECISION.json +308 -0
- package/docs/COUNCIL_SUMMARY.md +265 -0
- package/docs/COUNCIL_V2.2_DECISION.md +416 -0
- package/docs/IMPROVEMENT_ROADMAP.md +515 -0
- package/docs/LLM_COUNCIL_DECISION.md +508 -0
- package/docs/QUICK_START_VISIBILITY.md +782 -0
- package/docs/REDDIT_GAP_ANALYSIS.md +299 -0
- package/docs/RESEARCH_BACKED_IMPROVEMENTS.md +1180 -0
- package/docs/TMLPD_QNA.md +751 -0
- package/docs/TMLPD_V2.1_COMPLETE.md +763 -0
- package/docs/TMLPD_V2.2_RESEARCH_ROADMAP.md +754 -0
- package/docs/V2.2_IMPLEMENTATION_COMPLETE.md +446 -0
- package/docs/V2_IMPLEMENTATION_GUIDE.md +388 -0
- package/docs/VISIBILITY_ADOPTION_PLAN.md +1005 -0
- package/docs/launch-content/LAUNCH_EXECUTION_CHECKLIST.md +421 -0
- package/docs/launch-content/README.md +457 -0
- package/docs/launch-content/assets/cost_comparison_100_tasks.png +0 -0
- package/docs/launch-content/assets/cumulative_savings.png +0 -0
- package/docs/launch-content/assets/parallel_speedup.png +0 -0
- package/docs/launch-content/assets/provider_pricing_comparison.png +0 -0
- package/docs/launch-content/assets/task_breakdown_comparison.png +0 -0
- package/docs/launch-content/generate_charts.py +313 -0
- package/docs/launch-content/hn_show_post.md +139 -0
- package/docs/launch-content/partner_outreach_templates.md +745 -0
- package/docs/launch-content/reddit_posts.md +467 -0
- package/docs/launch-content/twitter_thread.txt +460 -0
- package/examples/QUICKSTART.md +1 -1
- package/openclaw-alexa-bridge/ALL_REMAINING_FIXES_PLAN.md +313 -0
- package/openclaw-alexa-bridge/REMAINING_FIXES_SUMMARY.md +277 -0
- package/openclaw-alexa-bridge/src/alexa_handler_no_tmlpd.js +1234 -0
- package/openclaw-alexa-bridge/test_fixes.js +77 -0
- package/package.json +120 -29
- package/package.json.tmp +0 -0
- package/qna/TMLPD_QNA.md +3 -3
- package/skill/SKILL.md +2 -2
- package/src/__tests__/integration/tmpld_integration.test.py +540 -0
- package/src/agents/skill_enhanced_agent.py +318 -0
- package/src/memory/__init__.py +15 -0
- package/src/memory/agentic_memory.py +353 -0
- package/src/memory/semantic_memory.py +444 -0
- package/src/memory/simple_memory.py +466 -0
- package/src/memory/working_memory.py +447 -0
- package/src/orchestration/__init__.py +52 -0
- package/src/orchestration/execution_engine.py +353 -0
- package/src/orchestration/halo_orchestrator.py +367 -0
- package/src/orchestration/mcts_workflow.py +498 -0
- package/src/orchestration/role_assigner.py +473 -0
- package/src/orchestration/task_planner.py +522 -0
- package/src/providers/__init__.py +67 -0
- package/src/providers/anthropic.py +304 -0
- package/src/providers/base.py +241 -0
- package/src/providers/cerebras.py +373 -0
- package/src/providers/registry.py +476 -0
- package/src/routing/__init__.py +30 -0
- package/src/routing/universal_router.py +621 -0
- package/src/skills/TMLPD-QUICKREF.md +210 -0
- package/src/skills/TMLPD-SETUP-SUMMARY.md +157 -0
- package/src/skills/TMLPD.md +540 -0
- package/src/skills/__tests__/skill_manager.test.ts +328 -0
- package/src/skills/skill_manager.py +385 -0
- package/src/skills/test-tmlpd.sh +108 -0
- package/src/skills/tmlpd-category.yaml +67 -0
- package/src/skills/tmlpd-monitoring.yaml +188 -0
- package/src/skills/tmlpd-phase.yaml +132 -0
- package/src/state/__init__.py +17 -0
- package/src/state/simple_checkpoint.py +508 -0
- package/src/tmlpd_agent.py +464 -0
- package/src/tmpld_v2.py +427 -0
- package/src/workflows/__init__.py +18 -0
- package/src/workflows/advanced_difficulty_classifier.py +377 -0
- package/src/workflows/chaining_executor.py +417 -0
- package/src/workflows/difficulty_integration.py +209 -0
- package/src/workflows/orchestrator.py +469 -0
- package/src/workflows/orchestrator_executor.py +456 -0
- package/src/workflows/parallelization_executor.py +382 -0
- package/src/workflows/router.py +311 -0
- package/test_integration_simple.py +86 -0
- package/test_mcts_workflow.py +150 -0
- package/test_templd_integration.py +262 -0
- package/test_universal_router.py +275 -0
- package/tmlpd-pi-extension/README.md +36 -0
- package/tmlpd-pi-extension/dist/cache/prefixCache.d.ts +114 -0
- package/tmlpd-pi-extension/dist/cache/prefixCache.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/cache/prefixCache.js +285 -0
- package/tmlpd-pi-extension/dist/cache/prefixCache.js.map +1 -0
- package/tmlpd-pi-extension/dist/cache/responseCache.d.ts +58 -0
- package/tmlpd-pi-extension/dist/cache/responseCache.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/cache/responseCache.js +153 -0
- package/tmlpd-pi-extension/dist/cache/responseCache.js.map +1 -0
- package/tmlpd-pi-extension/dist/cli.js +59 -0
- package/tmlpd-pi-extension/dist/cost/costTracker.d.ts +95 -0
- package/tmlpd-pi-extension/dist/cost/costTracker.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/cost/costTracker.js +240 -0
- package/tmlpd-pi-extension/dist/cost/costTracker.js.map +1 -0
- package/tmlpd-pi-extension/dist/index.d.ts +723 -0
- package/tmlpd-pi-extension/dist/index.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/index.js +239 -0
- package/tmlpd-pi-extension/dist/index.js.map +1 -0
- package/tmlpd-pi-extension/dist/memory/episodicMemory.d.ts +82 -0
- package/tmlpd-pi-extension/dist/memory/episodicMemory.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/memory/episodicMemory.js +145 -0
- package/tmlpd-pi-extension/dist/memory/episodicMemory.js.map +1 -0
- package/tmlpd-pi-extension/dist/orchestration/haloOrchestrator.d.ts +102 -0
- package/tmlpd-pi-extension/dist/orchestration/haloOrchestrator.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/orchestration/haloOrchestrator.js +207 -0
- package/tmlpd-pi-extension/dist/orchestration/haloOrchestrator.js.map +1 -0
- package/tmlpd-pi-extension/dist/orchestration/mctsWorkflow.d.ts +85 -0
- package/tmlpd-pi-extension/dist/orchestration/mctsWorkflow.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/orchestration/mctsWorkflow.js +210 -0
- package/tmlpd-pi-extension/dist/orchestration/mctsWorkflow.js.map +1 -0
- package/tmlpd-pi-extension/dist/providers/localProvider.d.ts +102 -0
- package/tmlpd-pi-extension/dist/providers/localProvider.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/providers/localProvider.js +338 -0
- package/tmlpd-pi-extension/dist/providers/localProvider.js.map +1 -0
- package/tmlpd-pi-extension/dist/providers/registry.d.ts +55 -0
- package/tmlpd-pi-extension/dist/providers/registry.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/providers/registry.js +138 -0
- package/tmlpd-pi-extension/dist/providers/registry.js.map +1 -0
- package/tmlpd-pi-extension/dist/routing/advancedRouter.d.ts +68 -0
- package/tmlpd-pi-extension/dist/routing/advancedRouter.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/routing/advancedRouter.js +332 -0
- package/tmlpd-pi-extension/dist/routing/advancedRouter.js.map +1 -0
- package/tmlpd-pi-extension/dist/tools/tmlpdTools.d.ts +101 -0
- package/tmlpd-pi-extension/dist/tools/tmlpdTools.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/tools/tmlpdTools.js +368 -0
- package/tmlpd-pi-extension/dist/tools/tmlpdTools.js.map +1 -0
- package/tmlpd-pi-extension/dist/utils/batchProcessor.d.ts +96 -0
- package/tmlpd-pi-extension/dist/utils/batchProcessor.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/utils/batchProcessor.js +170 -0
- package/tmlpd-pi-extension/dist/utils/batchProcessor.js.map +1 -0
- package/tmlpd-pi-extension/dist/utils/compression.d.ts +61 -0
- package/tmlpd-pi-extension/dist/utils/compression.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/utils/compression.js +281 -0
- package/tmlpd-pi-extension/dist/utils/compression.js.map +1 -0
- package/tmlpd-pi-extension/dist/utils/reliability.d.ts +74 -0
- package/tmlpd-pi-extension/dist/utils/reliability.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/utils/reliability.js +177 -0
- package/tmlpd-pi-extension/dist/utils/reliability.js.map +1 -0
- package/tmlpd-pi-extension/dist/utils/speculativeDecoding.d.ts +117 -0
- package/tmlpd-pi-extension/dist/utils/speculativeDecoding.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/utils/speculativeDecoding.js +246 -0
- package/tmlpd-pi-extension/dist/utils/speculativeDecoding.js.map +1 -0
- package/tmlpd-pi-extension/dist/utils/tokenUtils.d.ts +50 -0
- package/tmlpd-pi-extension/dist/utils/tokenUtils.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/utils/tokenUtils.js +124 -0
- package/tmlpd-pi-extension/dist/utils/tokenUtils.js.map +1 -0
- package/tmlpd-pi-extension/examples/QUICKSTART.md +183 -0
- package/tmlpd-pi-extension/package-lock.json +75 -0
- package/tmlpd-pi-extension/package.json +172 -0
- package/tmlpd-pi-extension/python/examples.py +53 -0
- package/tmlpd-pi-extension/python/integrations.py +330 -0
- package/tmlpd-pi-extension/python/setup.py +28 -0
- package/tmlpd-pi-extension/python/tmlpd.py +369 -0
- package/tmlpd-pi-extension/qna/REDDIT_GAP_ANALYSIS.md +299 -0
- package/tmlpd-pi-extension/qna/TMLPD_QNA.md +751 -0
- package/tmlpd-pi-extension/skill/SKILL.md +238 -0
- package/{src → tmlpd-pi-extension/src}/index.ts +1 -1
- package/tmlpd-pi-extension/tsconfig.json +18 -0
- package/demo/research-demo.js +0 -266
- package/notebooks/quickstart.ipynb +0 -157
- package/rust/tmlpd.h +0 -268
- package/src/cache/prefixCache.ts +0 -365
- package/src/routing/advancedRouter.ts +0 -406
- package/src/utils/speculativeDecoding.ts +0 -344
- /package/{src → tmlpd-pi-extension/src}/cache/responseCache.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/cost/costTracker.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/memory/episodicMemory.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/orchestration/haloOrchestrator.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/orchestration/mctsWorkflow.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/providers/localProvider.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/providers/registry.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/tools/tmlpdTools.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/utils/batchProcessor.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/utils/compression.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/utils/reliability.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/utils/tokenUtils.ts +0 -0
|
@@ -0,0 +1,417 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Phase 4a: Chaining Executor
|
|
3
|
+
|
|
4
|
+
Sequential task execution with context passing.
|
|
5
|
+
Based on agent orchestration patterns from arXiv:2506.12508.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import asyncio
|
|
9
|
+
from typing import Dict, List, Any, Optional, Callable
|
|
10
|
+
from datetime import datetime
|
|
11
|
+
from ..providers.registry import MultiProviderExecutor
|
|
12
|
+
from ..skills.skill_manager import SkillManager
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class ChainingExecutor:
|
|
16
|
+
"""
|
|
17
|
+
Execute tasks sequentially with context passing between steps.
|
|
18
|
+
|
|
19
|
+
Based on agent orchestration patterns (arXiv:2506.12508)
|
|
20
|
+
|
|
21
|
+
Features:
|
|
22
|
+
- Sequential step execution
|
|
23
|
+
- Context passing between steps
|
|
24
|
+
- Conditional branching
|
|
25
|
+
- Error handling and rollback
|
|
26
|
+
- Progress tracking
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
def __init__(
|
|
30
|
+
self,
|
|
31
|
+
provider_executor: Optional[MultiProviderExecutor] = None,
|
|
32
|
+
skill_manager: Optional[SkillManager] = None
|
|
33
|
+
):
|
|
34
|
+
"""
|
|
35
|
+
Initialize chaining executor.
|
|
36
|
+
|
|
37
|
+
Args:
|
|
38
|
+
provider_executor: Multi-provider executor for LLM calls
|
|
39
|
+
skill_manager: Skill manager for loading skills
|
|
40
|
+
"""
|
|
41
|
+
self.provider_executor = provider_executor
|
|
42
|
+
self.skill_manager = skill_manager
|
|
43
|
+
|
|
44
|
+
async def execute_chain(
|
|
45
|
+
self,
|
|
46
|
+
steps: List[Dict[str, Any]],
|
|
47
|
+
initial_context: Optional[Dict[str, Any]] = None,
|
|
48
|
+
continue_on_error: bool = False
|
|
49
|
+
) -> Dict[str, Any]:
|
|
50
|
+
"""
|
|
51
|
+
Execute a chain of tasks sequentially.
|
|
52
|
+
|
|
53
|
+
Args:
|
|
54
|
+
steps: List of step definitions
|
|
55
|
+
initial_context: Initial context to pass to first step
|
|
56
|
+
continue_on_error: Whether to continue if a step fails
|
|
57
|
+
|
|
58
|
+
Returns:
|
|
59
|
+
Execution result with final context and step results
|
|
60
|
+
"""
|
|
61
|
+
context = initial_context or {}
|
|
62
|
+
step_results = []
|
|
63
|
+
|
|
64
|
+
start_time = datetime.now()
|
|
65
|
+
|
|
66
|
+
for i, step in enumerate(steps):
|
|
67
|
+
step_number = i + 1
|
|
68
|
+
step_start = datetime.now()
|
|
69
|
+
|
|
70
|
+
print(f"\n🔗 Executing Step {step_number}/{len(steps)}: {step.get('name', 'Unnamed')}")
|
|
71
|
+
|
|
72
|
+
# Check if step should execute (condition)
|
|
73
|
+
if "condition" in step:
|
|
74
|
+
if not self._evaluate_condition(step["condition"], context):
|
|
75
|
+
print(f" ⏭️ Skipped (condition not met)")
|
|
76
|
+
step_results.append({
|
|
77
|
+
"step": step_number,
|
|
78
|
+
"name": step.get("name", "Unnamed"),
|
|
79
|
+
"status": "skipped",
|
|
80
|
+
"condition": step["condition"]
|
|
81
|
+
})
|
|
82
|
+
continue
|
|
83
|
+
|
|
84
|
+
# Execute step
|
|
85
|
+
try:
|
|
86
|
+
step_result = await self._execute_step(step, context)
|
|
87
|
+
|
|
88
|
+
step_end = datetime.now()
|
|
89
|
+
execution_time = (step_end - step_start).total_seconds()
|
|
90
|
+
|
|
91
|
+
# Update context with step output
|
|
92
|
+
if "output_key" in step:
|
|
93
|
+
context[step["output_key"]] = step_result.get("result")
|
|
94
|
+
|
|
95
|
+
# Add to context with step name
|
|
96
|
+
if "name" in step:
|
|
97
|
+
context[f"step_{step.get('name')}"] = step_result.get("result")
|
|
98
|
+
|
|
99
|
+
result_entry = {
|
|
100
|
+
"step": step_number,
|
|
101
|
+
"name": step.get("name", "Unnamed"),
|
|
102
|
+
"status": "success",
|
|
103
|
+
"result": step_result.get("result"),
|
|
104
|
+
"execution_time": execution_time,
|
|
105
|
+
"timestamp": step_end.isoformat()
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
step_results.append(result_entry)
|
|
109
|
+
|
|
110
|
+
print(f" ✅ Success ({execution_time:.2f}s)")
|
|
111
|
+
|
|
112
|
+
except Exception as e:
|
|
113
|
+
step_end = datetime.now()
|
|
114
|
+
execution_time = (step_end - step_start).total_seconds()
|
|
115
|
+
|
|
116
|
+
error_result = {
|
|
117
|
+
"step": step_number,
|
|
118
|
+
"name": step.get("name", "Unnamed"),
|
|
119
|
+
"status": "error",
|
|
120
|
+
"error": str(e),
|
|
121
|
+
"execution_time": execution_time,
|
|
122
|
+
"timestamp": step_end.isoformat()
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
step_results.append(error_result)
|
|
126
|
+
|
|
127
|
+
print(f" ❌ Error: {e}")
|
|
128
|
+
|
|
129
|
+
# Decide whether to continue
|
|
130
|
+
if not continue_on_error:
|
|
131
|
+
print(f"\n🛑 Stopping chain execution due to error")
|
|
132
|
+
break
|
|
133
|
+
else:
|
|
134
|
+
print(f" ⚠️ Continuing despite error")
|
|
135
|
+
|
|
136
|
+
end_time = datetime.now()
|
|
137
|
+
total_time = (end_time - start_time).total_seconds()
|
|
138
|
+
|
|
139
|
+
# Calculate success rate
|
|
140
|
+
successful_steps = sum(
|
|
141
|
+
1 for r in step_results if r["status"] == "success"
|
|
142
|
+
)
|
|
143
|
+
|
|
144
|
+
return {
|
|
145
|
+
"success": all(r["status"] in ["success", "skipped"] for r in step_results),
|
|
146
|
+
"total_steps": len(steps),
|
|
147
|
+
"successful_steps": successful_steps,
|
|
148
|
+
"failed_steps": len(steps) - successful_steps,
|
|
149
|
+
"step_results": step_results,
|
|
150
|
+
"final_context": context,
|
|
151
|
+
"total_time": total_time,
|
|
152
|
+
"started_at": start_time.isoformat(),
|
|
153
|
+
"completed_at": end_time.isoformat()
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
async def _execute_step(
|
|
157
|
+
self,
|
|
158
|
+
step: Dict[str, Any],
|
|
159
|
+
context: Dict[str, Any]
|
|
160
|
+
) -> Dict[str, Any]:
|
|
161
|
+
"""
|
|
162
|
+
Execute a single step.
|
|
163
|
+
|
|
164
|
+
Args:
|
|
165
|
+
step: Step definition
|
|
166
|
+
context: Current context
|
|
167
|
+
|
|
168
|
+
Returns:
|
|
169
|
+
Step result
|
|
170
|
+
"""
|
|
171
|
+
step_type = step.get("type", "llm")
|
|
172
|
+
|
|
173
|
+
if step_type == "llm":
|
|
174
|
+
return await self._execute_llm_step(step, context)
|
|
175
|
+
|
|
176
|
+
elif step_type == "function":
|
|
177
|
+
return await self._execute_function_step(step, context)
|
|
178
|
+
|
|
179
|
+
elif step_type == "parallel":
|
|
180
|
+
return await self._execute_parallel_step(step, context)
|
|
181
|
+
|
|
182
|
+
elif step_type == "conditional":
|
|
183
|
+
return await self._execute_conditional_step(step, context)
|
|
184
|
+
|
|
185
|
+
else:
|
|
186
|
+
raise ValueError(f"Unknown step type: {step_type}")
|
|
187
|
+
|
|
188
|
+
async def _execute_llm_step(
|
|
189
|
+
self,
|
|
190
|
+
step: Dict[str, Any],
|
|
191
|
+
context: Dict[str, Any]
|
|
192
|
+
) -> Dict[str, Any]:
|
|
193
|
+
"""Execute LLM-based step"""
|
|
194
|
+
if not self.provider_executor:
|
|
195
|
+
raise ValueError("provider_executor required for LLM steps")
|
|
196
|
+
|
|
197
|
+
# Build prompt with context
|
|
198
|
+
prompt = self._build_prompt_with_context(step["prompt"], context)
|
|
199
|
+
|
|
200
|
+
# Execute with provider
|
|
201
|
+
response = await self.provider_executor.execute({
|
|
202
|
+
"description": prompt,
|
|
203
|
+
**step.get("execution_params", {})
|
|
204
|
+
})
|
|
205
|
+
|
|
206
|
+
return {
|
|
207
|
+
"result": response.content if response.success else response.error,
|
|
208
|
+
"success": response.success,
|
|
209
|
+
"tokens_used": response.tokens_used,
|
|
210
|
+
"cost": response.cost
|
|
211
|
+
}
|
|
212
|
+
|
|
213
|
+
async def _execute_function_step(
|
|
214
|
+
self,
|
|
215
|
+
step: Dict[str, Any],
|
|
216
|
+
context: Dict[str, Any]
|
|
217
|
+
) -> Dict[str, Any]:
|
|
218
|
+
"""Execute function step"""
|
|
219
|
+
if "function" not in step:
|
|
220
|
+
raise ValueError("function step requires 'function' key")
|
|
221
|
+
|
|
222
|
+
func = step["function"]
|
|
223
|
+
|
|
224
|
+
# Prepare arguments
|
|
225
|
+
args = step.get("args", [])
|
|
226
|
+
kwargs = step.get("kwargs", {})
|
|
227
|
+
|
|
228
|
+
# Replace context variables in kwargs
|
|
229
|
+
kwargs = self._replace_context_variables(kwargs, context)
|
|
230
|
+
|
|
231
|
+
# Execute function
|
|
232
|
+
if asyncio.iscoroutinefunction(func):
|
|
233
|
+
result = await func(*args, **kwargs)
|
|
234
|
+
else:
|
|
235
|
+
result = func(*args, **kwargs)
|
|
236
|
+
|
|
237
|
+
return {
|
|
238
|
+
"result": result,
|
|
239
|
+
"success": True
|
|
240
|
+
}
|
|
241
|
+
|
|
242
|
+
async def _execute_parallel_step(
|
|
243
|
+
self,
|
|
244
|
+
step: Dict[str, Any],
|
|
245
|
+
context: Dict[str, Any]
|
|
246
|
+
) -> Dict[str, Any]:
|
|
247
|
+
"""Execute parallel step (sub-chains)"""
|
|
248
|
+
if "sub_steps" not in step:
|
|
249
|
+
raise ValueError("parallel step requires 'sub_steps' key")
|
|
250
|
+
|
|
251
|
+
# Create tasks for all sub-steps
|
|
252
|
+
tasks = []
|
|
253
|
+
for sub_step in step["sub_steps"]:
|
|
254
|
+
task = self._execute_step(sub_step, context)
|
|
255
|
+
tasks.append(task)
|
|
256
|
+
|
|
257
|
+
# Execute in parallel
|
|
258
|
+
results = await asyncio.gather(*tasks, return_exceptions=True)
|
|
259
|
+
|
|
260
|
+
# Process results
|
|
261
|
+
processed_results = []
|
|
262
|
+
for i, result in enumerate(results):
|
|
263
|
+
if isinstance(result, Exception):
|
|
264
|
+
processed_results.append({
|
|
265
|
+
"sub_step": i + 1,
|
|
266
|
+
"status": "error",
|
|
267
|
+
"error": str(result)
|
|
268
|
+
})
|
|
269
|
+
else:
|
|
270
|
+
processed_results.append({
|
|
271
|
+
"sub_step": i + 1,
|
|
272
|
+
"status": "success",
|
|
273
|
+
**result
|
|
274
|
+
})
|
|
275
|
+
|
|
276
|
+
return {
|
|
277
|
+
"result": processed_results,
|
|
278
|
+
"success": all(r["status"] == "success" for r in processed_results)
|
|
279
|
+
}
|
|
280
|
+
|
|
281
|
+
async def _execute_conditional_step(
|
|
282
|
+
self,
|
|
283
|
+
step: Dict[str, Any],
|
|
284
|
+
context: Dict[str, Any]
|
|
285
|
+
) -> Dict[str, Any]:
|
|
286
|
+
"""Execute conditional step (if-else logic)"""
|
|
287
|
+
if "branches" not in step:
|
|
288
|
+
raise ValueError("conditional step requires 'branches' key")
|
|
289
|
+
|
|
290
|
+
# Evaluate conditions
|
|
291
|
+
for branch in step["branches"]:
|
|
292
|
+
if "condition" not in branch or self._evaluate_condition(
|
|
293
|
+
branch["condition"],
|
|
294
|
+
context
|
|
295
|
+
):
|
|
296
|
+
# Execute this branch
|
|
297
|
+
return await self._execute_step(branch["step"], context)
|
|
298
|
+
|
|
299
|
+
# No condition matched, execute default if present
|
|
300
|
+
if "default" in step:
|
|
301
|
+
return await self._execute_step(step["default"], context)
|
|
302
|
+
|
|
303
|
+
return {
|
|
304
|
+
"result": None,
|
|
305
|
+
"success": True,
|
|
306
|
+
"message": "No condition matched"
|
|
307
|
+
}
|
|
308
|
+
|
|
309
|
+
def _build_prompt_with_context(
|
|
310
|
+
self,
|
|
311
|
+
prompt_template: str,
|
|
312
|
+
context: Dict[str, Any]
|
|
313
|
+
) -> str:
|
|
314
|
+
"""Build prompt by replacing context variables in template"""
|
|
315
|
+
prompt = prompt_template
|
|
316
|
+
|
|
317
|
+
# Replace {variable} with context values
|
|
318
|
+
for key, value in context.items():
|
|
319
|
+
placeholder = f"{{{key}}}"
|
|
320
|
+
if placeholder in prompt:
|
|
321
|
+
prompt = prompt.replace(placeholder, str(value))
|
|
322
|
+
|
|
323
|
+
return prompt
|
|
324
|
+
|
|
325
|
+
def _replace_context_variables(
|
|
326
|
+
self,
|
|
327
|
+
obj: Any,
|
|
328
|
+
context: Dict[str, Any]
|
|
329
|
+
) -> Any:
|
|
330
|
+
"""Replace context variables in object"""
|
|
331
|
+
if isinstance(obj, str):
|
|
332
|
+
return self._build_prompt_with_context(obj, context)
|
|
333
|
+
|
|
334
|
+
elif isinstance(obj, dict):
|
|
335
|
+
return {
|
|
336
|
+
k: self._replace_context_variables(v, context)
|
|
337
|
+
for k, v in obj.items()
|
|
338
|
+
}
|
|
339
|
+
|
|
340
|
+
elif isinstance(obj, list):
|
|
341
|
+
return [
|
|
342
|
+
self._replace_context_variables(item, context)
|
|
343
|
+
for item in obj
|
|
344
|
+
]
|
|
345
|
+
|
|
346
|
+
else:
|
|
347
|
+
return obj
|
|
348
|
+
|
|
349
|
+
def _evaluate_condition(
|
|
350
|
+
self,
|
|
351
|
+
condition: Any,
|
|
352
|
+
context: Dict[str, Any]
|
|
353
|
+
) -> bool:
|
|
354
|
+
"""
|
|
355
|
+
Evaluate condition against context.
|
|
356
|
+
|
|
357
|
+
Supports:
|
|
358
|
+
- Boolean values
|
|
359
|
+
- String comparisons
|
|
360
|
+
- Lambda functions
|
|
361
|
+
"""
|
|
362
|
+
if isinstance(condition, bool):
|
|
363
|
+
return condition
|
|
364
|
+
|
|
365
|
+
elif isinstance(condition, str):
|
|
366
|
+
# Check if context variable exists and is truthy
|
|
367
|
+
return context.get(condition, False)
|
|
368
|
+
|
|
369
|
+
elif callable(condition):
|
|
370
|
+
# Evaluate lambda function
|
|
371
|
+
return condition(context)
|
|
372
|
+
|
|
373
|
+
else:
|
|
374
|
+
return bool(condition)
|
|
375
|
+
|
|
376
|
+
def explain_chain(self, steps: List[Dict[str, Any]]) -> Dict[str, Any]:
|
|
377
|
+
"""
|
|
378
|
+
Explain execution plan for a chain.
|
|
379
|
+
|
|
380
|
+
Returns detailed information about how the chain will execute.
|
|
381
|
+
"""
|
|
382
|
+
step_explanations = []
|
|
383
|
+
|
|
384
|
+
for i, step in enumerate(steps):
|
|
385
|
+
explanation = {
|
|
386
|
+
"step": i + 1,
|
|
387
|
+
"name": step.get("name", "Unnamed"),
|
|
388
|
+
"type": step.get("type", "llm"),
|
|
389
|
+
"description": step.get("description", ""),
|
|
390
|
+
"conditional": "condition" in step,
|
|
391
|
+
"output_key": step.get("output_key"),
|
|
392
|
+
"estimated_difficulty": step.get("difficulty", "unknown")
|
|
393
|
+
}
|
|
394
|
+
|
|
395
|
+
step_explanations.append(explanation)
|
|
396
|
+
|
|
397
|
+
return {
|
|
398
|
+
"total_steps": len(steps),
|
|
399
|
+
"steps": step_explanations,
|
|
400
|
+
"requires_provider": any(
|
|
401
|
+
s.get("type") == "llm" for s in steps
|
|
402
|
+
),
|
|
403
|
+
"estimated_complexity": self._estimate_chain_complexity(steps)
|
|
404
|
+
}
|
|
405
|
+
|
|
406
|
+
def _estimate_chain_complexity(self, steps: List[Dict[str, Any]]) -> str:
|
|
407
|
+
"""Estimate overall chain complexity"""
|
|
408
|
+
llm_steps = sum(1 for s in steps if s.get("type") == "llm")
|
|
409
|
+
parallel_steps = sum(1 for s in steps if s.get("type") == "parallel")
|
|
410
|
+
conditional_steps = sum(1 for s in steps if s.get("type") == "conditional")
|
|
411
|
+
|
|
412
|
+
if llm_steps >= 5 or parallel_steps >= 2:
|
|
413
|
+
return "complex"
|
|
414
|
+
elif llm_steps >= 3 or conditional_steps >= 2:
|
|
415
|
+
return "medium"
|
|
416
|
+
else:
|
|
417
|
+
return "simple"
|
|
@@ -0,0 +1,209 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Phase 2a: Difficulty-Aware Routing Integration
|
|
3
|
+
|
|
4
|
+
Integrates difficulty-aware routing with existing skill system.
|
|
5
|
+
Combines multi-provider selection with skill-based context.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from typing import Dict, List, Any, Optional
|
|
9
|
+
from ..providers.registry import MultiProviderExecutor, IntelligentRouter
|
|
10
|
+
from ..skills.skill_manager import SkillManager
|
|
11
|
+
from ..agents.skill_enhanced_agent import TMLEnhancedAgent
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class DifficultyAwareSkillAgent:
|
|
15
|
+
"""
|
|
16
|
+
Agent that combines difficulty-aware routing with skills.
|
|
17
|
+
|
|
18
|
+
Process:
|
|
19
|
+
1. Classify task difficulty
|
|
20
|
+
2. Route to optimal provider
|
|
21
|
+
3. Load relevant skills
|
|
22
|
+
4. Execute with enhanced context
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
DIFFICULTY_LEVELS = {
|
|
26
|
+
"TRIVIAL": range(0, 20),
|
|
27
|
+
"SIMPLE": range(20, 40),
|
|
28
|
+
"MEDIUM": range(40, 60),
|
|
29
|
+
"COMPLEX": range(60, 80),
|
|
30
|
+
"EXPERT": range(80, 100)
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
def __init__(
|
|
34
|
+
self,
|
|
35
|
+
skills_dir: str = "tmlpd-skills",
|
|
36
|
+
provider_config: Optional[str] = None
|
|
37
|
+
):
|
|
38
|
+
self.skill_manager = SkillManager(skills_dir)
|
|
39
|
+
self.provider_executor = MultiProviderExecutor(provider_config)
|
|
40
|
+
self.router = IntelligentRouter(self.provider_executor.registry)
|
|
41
|
+
|
|
42
|
+
async def execute(
|
|
43
|
+
self,
|
|
44
|
+
task: Dict[str, Any],
|
|
45
|
+
difficulty_override: Optional[str] = None,
|
|
46
|
+
provider_override: Optional[str] = None,
|
|
47
|
+
**kwargs
|
|
48
|
+
) -> Dict[str, Any]:
|
|
49
|
+
"""
|
|
50
|
+
Execute task with difficulty-aware routing and skills.
|
|
51
|
+
|
|
52
|
+
Args:
|
|
53
|
+
task: Task to execute
|
|
54
|
+
difficulty_override: Force specific difficulty level
|
|
55
|
+
provider_override: Force specific provider
|
|
56
|
+
**kwargs: Additional execution parameters
|
|
57
|
+
|
|
58
|
+
Returns:
|
|
59
|
+
Execution result with metadata
|
|
60
|
+
"""
|
|
61
|
+
# Step 1: Classify difficulty
|
|
62
|
+
difficulty = difficulty_override or self.router.classify_difficulty(task)
|
|
63
|
+
|
|
64
|
+
# Step 2: Get relevant skills
|
|
65
|
+
relevant_skills = self.skill_manager.get_relevant_skills(
|
|
66
|
+
task.get("description", ""),
|
|
67
|
+
top_k=3
|
|
68
|
+
)
|
|
69
|
+
|
|
70
|
+
# Step 3: Get provider (with override support)
|
|
71
|
+
if provider_override:
|
|
72
|
+
provider = self.provider_executor.registry.get_provider(provider_override)
|
|
73
|
+
else:
|
|
74
|
+
provider = self.router.route(task, difficulty_override=difficulty)
|
|
75
|
+
|
|
76
|
+
if not provider:
|
|
77
|
+
return {
|
|
78
|
+
"success": False,
|
|
79
|
+
"error": "No healthy providers available",
|
|
80
|
+
"difficulty": difficulty,
|
|
81
|
+
"skills": relevant_skills
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
# Step 4: Build enhanced prompt with skills
|
|
85
|
+
enhanced_prompt = self._build_prompt_with_skills(
|
|
86
|
+
task,
|
|
87
|
+
relevant_skills,
|
|
88
|
+
difficulty
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
# Step 5: Execute with provider
|
|
92
|
+
from datetime import datetime
|
|
93
|
+
start_time = datetime.now()
|
|
94
|
+
|
|
95
|
+
try:
|
|
96
|
+
provider_response = await provider.execute_with_retry(
|
|
97
|
+
enhanced_prompt,
|
|
98
|
+
**kwargs
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
end_time = datetime.now()
|
|
102
|
+
execution_time = (end_time - start_time).total_seconds()
|
|
103
|
+
|
|
104
|
+
return {
|
|
105
|
+
"success": provider_response.success,
|
|
106
|
+
"content": provider_response.content,
|
|
107
|
+
"tokens_used": provider_response.tokens_used,
|
|
108
|
+
"cost": provider_response.cost,
|
|
109
|
+
"latency_ms": provider_response.latency_ms,
|
|
110
|
+
"execution_time": execution_time,
|
|
111
|
+
"provider": provider_response.provider,
|
|
112
|
+
"model": provider_response.model,
|
|
113
|
+
"difficulty": difficulty,
|
|
114
|
+
"skills_used": relevant_skills,
|
|
115
|
+
"timestamp": provider_response.timestamp
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
except Exception as e:
|
|
119
|
+
return {
|
|
120
|
+
"success": False,
|
|
121
|
+
"error": str(e),
|
|
122
|
+
"difficulty": difficulty,
|
|
123
|
+
"skills": relevant_skills
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
def _build_prompt_with_skills(
|
|
127
|
+
self,
|
|
128
|
+
task: Dict[str, Any],
|
|
129
|
+
skills: List[str],
|
|
130
|
+
difficulty: str
|
|
131
|
+
) -> str:
|
|
132
|
+
"""Build enhanced prompt with skill context"""
|
|
133
|
+
parts = []
|
|
134
|
+
|
|
135
|
+
# Add task context
|
|
136
|
+
parts.append(f"# Task (Difficulty: {difficulty})")
|
|
137
|
+
parts.append(f"{task.get('description', '')}\n")
|
|
138
|
+
|
|
139
|
+
# Add context and requirements
|
|
140
|
+
if "context" in task:
|
|
141
|
+
parts.append(f"## Context\n{task['context']}\n")
|
|
142
|
+
|
|
143
|
+
if "requirements" in task:
|
|
144
|
+
parts.append(f"## Requirements\n{task['requirements']}\n")
|
|
145
|
+
|
|
146
|
+
# Add skill contexts
|
|
147
|
+
if skills:
|
|
148
|
+
parts.append("## Relevant Skills\n")
|
|
149
|
+
parts.append("The following skills provide expert guidance:\n")
|
|
150
|
+
|
|
151
|
+
for skill_name in skills:
|
|
152
|
+
try:
|
|
153
|
+
skill = self.skill_manager.load_skill(skill_name)
|
|
154
|
+
parts.append(f"\n### {skill.name}\n")
|
|
155
|
+
parts.append(f"{skill.content}\n")
|
|
156
|
+
except Exception as e:
|
|
157
|
+
parts.append(f"\n### {skill_name}\n")
|
|
158
|
+
parts.append(f"(Error loading skill: {e})\n")
|
|
159
|
+
|
|
160
|
+
# Add execution guidance
|
|
161
|
+
parts.append(f"\n## Instructions\n")
|
|
162
|
+
parts.append(
|
|
163
|
+
f"Complete this {difficulty.lower()} task following the guidance "
|
|
164
|
+
"from the relevant skills above. Use best practices and patterns "
|
|
165
|
+
"recommended by the skills."
|
|
166
|
+
)
|
|
167
|
+
|
|
168
|
+
return "\n".join(parts)
|
|
169
|
+
|
|
170
|
+
def explain_execution_plan(
|
|
171
|
+
self,
|
|
172
|
+
task: Dict[str, Any]
|
|
173
|
+
) -> Dict[str, Any]:
|
|
174
|
+
"""
|
|
175
|
+
Explain the execution plan for transparency.
|
|
176
|
+
|
|
177
|
+
Returns detailed information about how a task will be executed.
|
|
178
|
+
"""
|
|
179
|
+
# Classify difficulty
|
|
180
|
+
difficulty = self.router.classify_difficulty(task)
|
|
181
|
+
|
|
182
|
+
# Get routing info
|
|
183
|
+
routing = self.router.explain_routing(task)
|
|
184
|
+
|
|
185
|
+
# Get relevant skills
|
|
186
|
+
skills = self.skill_manager.get_relevant_skills(
|
|
187
|
+
task.get("description", ""),
|
|
188
|
+
top_k=3
|
|
189
|
+
)
|
|
190
|
+
|
|
191
|
+
return {
|
|
192
|
+
"task": task.get("description", ""),
|
|
193
|
+
"difficulty": difficulty,
|
|
194
|
+
"provider_selection": routing,
|
|
195
|
+
"skills": skills,
|
|
196
|
+
"reasoning": (
|
|
197
|
+
f"Task classified as '{difficulty}' difficulty. "
|
|
198
|
+
f"Will use {routing['selected_provider']} ({routing['selected_model']}) "
|
|
199
|
+
f"with {len(skills)} relevant skills."
|
|
200
|
+
)
|
|
201
|
+
}
|
|
202
|
+
|
|
203
|
+
async def start(self):
|
|
204
|
+
"""Start background services"""
|
|
205
|
+
await self.provider_executor.start()
|
|
206
|
+
|
|
207
|
+
async def stop(self):
|
|
208
|
+
"""Stop background services"""
|
|
209
|
+
await self.provider_executor.stop()
|