adaptive-memory-multi-model-router 1.2.2 → 1.3.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +146 -66
- package/dist/index.d.ts +1 -1
- package/dist/index.js +1 -1
- package/dist/integrations/airtable.js +20 -0
- package/dist/integrations/discord.js +18 -0
- package/dist/integrations/github.js +23 -0
- package/dist/integrations/gmail.js +19 -0
- package/dist/integrations/google-calendar.js +18 -0
- package/dist/integrations/index.js +61 -0
- package/dist/integrations/jira.js +21 -0
- package/dist/integrations/linear.js +19 -0
- package/dist/integrations/notion.js +19 -0
- package/dist/integrations/slack.js +18 -0
- package/dist/integrations/telegram.js +19 -0
- package/dist/providers/registry.js +7 -3
- package/docs/ARCHITECTURAL-IMPROVEMENTS-2025.md +1391 -0
- package/docs/ARCHITECTURAL-IMPROVEMENTS-REVISED-2025.md +1051 -0
- package/docs/CONFIGURATION.md +476 -0
- package/docs/COUNCIL_DECISION.json +308 -0
- package/docs/COUNCIL_SUMMARY.md +265 -0
- package/docs/COUNCIL_V2.2_DECISION.md +416 -0
- package/docs/IMPROVEMENT_ROADMAP.md +515 -0
- package/docs/LLM_COUNCIL_DECISION.md +508 -0
- package/docs/QUICK_START_VISIBILITY.md +782 -0
- package/docs/REDDIT_GAP_ANALYSIS.md +299 -0
- package/docs/RESEARCH_BACKED_IMPROVEMENTS.md +1180 -0
- package/docs/TMLPD_QNA.md +751 -0
- package/docs/TMLPD_V2.1_COMPLETE.md +763 -0
- package/docs/TMLPD_V2.2_RESEARCH_ROADMAP.md +754 -0
- package/docs/V2.2_IMPLEMENTATION_COMPLETE.md +446 -0
- package/docs/V2_IMPLEMENTATION_GUIDE.md +388 -0
- package/docs/VISIBILITY_ADOPTION_PLAN.md +1005 -0
- package/docs/launch-content/LAUNCH_EXECUTION_CHECKLIST.md +421 -0
- package/docs/launch-content/README.md +457 -0
- package/docs/launch-content/assets/cost_comparison_100_tasks.png +0 -0
- package/docs/launch-content/assets/cumulative_savings.png +0 -0
- package/docs/launch-content/assets/parallel_speedup.png +0 -0
- package/docs/launch-content/assets/provider_pricing_comparison.png +0 -0
- package/docs/launch-content/assets/task_breakdown_comparison.png +0 -0
- package/docs/launch-content/generate_charts.py +313 -0
- package/docs/launch-content/hn_show_post.md +139 -0
- package/docs/launch-content/partner_outreach_templates.md +745 -0
- package/docs/launch-content/reddit_posts.md +467 -0
- package/docs/launch-content/twitter_thread.txt +460 -0
- package/examples/QUICKSTART.md +1 -1
- package/openclaw-alexa-bridge/ALL_REMAINING_FIXES_PLAN.md +313 -0
- package/openclaw-alexa-bridge/REMAINING_FIXES_SUMMARY.md +277 -0
- package/openclaw-alexa-bridge/src/alexa_handler_no_tmlpd.js +1234 -0
- package/openclaw-alexa-bridge/test_fixes.js +77 -0
- package/package.json +120 -29
- package/package.json.tmp +0 -0
- package/qna/TMLPD_QNA.md +3 -3
- package/skill/SKILL.md +2 -2
- package/src/__tests__/integration/tmpld_integration.test.py +540 -0
- package/src/agents/skill_enhanced_agent.py +318 -0
- package/src/memory/__init__.py +15 -0
- package/src/memory/agentic_memory.py +353 -0
- package/src/memory/semantic_memory.py +444 -0
- package/src/memory/simple_memory.py +466 -0
- package/src/memory/working_memory.py +447 -0
- package/src/orchestration/__init__.py +52 -0
- package/src/orchestration/execution_engine.py +353 -0
- package/src/orchestration/halo_orchestrator.py +367 -0
- package/src/orchestration/mcts_workflow.py +498 -0
- package/src/orchestration/role_assigner.py +473 -0
- package/src/orchestration/task_planner.py +522 -0
- package/src/providers/__init__.py +67 -0
- package/src/providers/anthropic.py +304 -0
- package/src/providers/base.py +241 -0
- package/src/providers/cerebras.py +373 -0
- package/src/providers/registry.py +476 -0
- package/src/routing/__init__.py +30 -0
- package/src/routing/universal_router.py +621 -0
- package/src/skills/TMLPD-QUICKREF.md +210 -0
- package/src/skills/TMLPD-SETUP-SUMMARY.md +157 -0
- package/src/skills/TMLPD.md +540 -0
- package/src/skills/__tests__/skill_manager.test.ts +328 -0
- package/src/skills/skill_manager.py +385 -0
- package/src/skills/test-tmlpd.sh +108 -0
- package/src/skills/tmlpd-category.yaml +67 -0
- package/src/skills/tmlpd-monitoring.yaml +188 -0
- package/src/skills/tmlpd-phase.yaml +132 -0
- package/src/state/__init__.py +17 -0
- package/src/state/simple_checkpoint.py +508 -0
- package/src/tmlpd_agent.py +464 -0
- package/src/tmpld_v2.py +427 -0
- package/src/workflows/__init__.py +18 -0
- package/src/workflows/advanced_difficulty_classifier.py +377 -0
- package/src/workflows/chaining_executor.py +417 -0
- package/src/workflows/difficulty_integration.py +209 -0
- package/src/workflows/orchestrator.py +469 -0
- package/src/workflows/orchestrator_executor.py +456 -0
- package/src/workflows/parallelization_executor.py +382 -0
- package/src/workflows/router.py +311 -0
- package/test_integration_simple.py +86 -0
- package/test_mcts_workflow.py +150 -0
- package/test_templd_integration.py +262 -0
- package/test_universal_router.py +275 -0
- package/tmlpd-pi-extension/README.md +36 -0
- package/tmlpd-pi-extension/dist/cache/prefixCache.d.ts +114 -0
- package/tmlpd-pi-extension/dist/cache/prefixCache.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/cache/prefixCache.js +285 -0
- package/tmlpd-pi-extension/dist/cache/prefixCache.js.map +1 -0
- package/tmlpd-pi-extension/dist/cache/responseCache.d.ts +58 -0
- package/tmlpd-pi-extension/dist/cache/responseCache.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/cache/responseCache.js +153 -0
- package/tmlpd-pi-extension/dist/cache/responseCache.js.map +1 -0
- package/tmlpd-pi-extension/dist/cli.js +59 -0
- package/tmlpd-pi-extension/dist/cost/costTracker.d.ts +95 -0
- package/tmlpd-pi-extension/dist/cost/costTracker.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/cost/costTracker.js +240 -0
- package/tmlpd-pi-extension/dist/cost/costTracker.js.map +1 -0
- package/tmlpd-pi-extension/dist/index.d.ts +723 -0
- package/tmlpd-pi-extension/dist/index.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/index.js +239 -0
- package/tmlpd-pi-extension/dist/index.js.map +1 -0
- package/tmlpd-pi-extension/dist/memory/episodicMemory.d.ts +82 -0
- package/tmlpd-pi-extension/dist/memory/episodicMemory.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/memory/episodicMemory.js +145 -0
- package/tmlpd-pi-extension/dist/memory/episodicMemory.js.map +1 -0
- package/tmlpd-pi-extension/dist/orchestration/haloOrchestrator.d.ts +102 -0
- package/tmlpd-pi-extension/dist/orchestration/haloOrchestrator.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/orchestration/haloOrchestrator.js +207 -0
- package/tmlpd-pi-extension/dist/orchestration/haloOrchestrator.js.map +1 -0
- package/tmlpd-pi-extension/dist/orchestration/mctsWorkflow.d.ts +85 -0
- package/tmlpd-pi-extension/dist/orchestration/mctsWorkflow.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/orchestration/mctsWorkflow.js +210 -0
- package/tmlpd-pi-extension/dist/orchestration/mctsWorkflow.js.map +1 -0
- package/tmlpd-pi-extension/dist/providers/localProvider.d.ts +102 -0
- package/tmlpd-pi-extension/dist/providers/localProvider.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/providers/localProvider.js +338 -0
- package/tmlpd-pi-extension/dist/providers/localProvider.js.map +1 -0
- package/tmlpd-pi-extension/dist/providers/registry.d.ts +55 -0
- package/tmlpd-pi-extension/dist/providers/registry.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/providers/registry.js +138 -0
- package/tmlpd-pi-extension/dist/providers/registry.js.map +1 -0
- package/tmlpd-pi-extension/dist/routing/advancedRouter.d.ts +68 -0
- package/tmlpd-pi-extension/dist/routing/advancedRouter.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/routing/advancedRouter.js +332 -0
- package/tmlpd-pi-extension/dist/routing/advancedRouter.js.map +1 -0
- package/tmlpd-pi-extension/dist/tools/tmlpdTools.d.ts +101 -0
- package/tmlpd-pi-extension/dist/tools/tmlpdTools.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/tools/tmlpdTools.js +368 -0
- package/tmlpd-pi-extension/dist/tools/tmlpdTools.js.map +1 -0
- package/tmlpd-pi-extension/dist/utils/batchProcessor.d.ts +96 -0
- package/tmlpd-pi-extension/dist/utils/batchProcessor.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/utils/batchProcessor.js +170 -0
- package/tmlpd-pi-extension/dist/utils/batchProcessor.js.map +1 -0
- package/tmlpd-pi-extension/dist/utils/compression.d.ts +61 -0
- package/tmlpd-pi-extension/dist/utils/compression.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/utils/compression.js +281 -0
- package/tmlpd-pi-extension/dist/utils/compression.js.map +1 -0
- package/tmlpd-pi-extension/dist/utils/reliability.d.ts +74 -0
- package/tmlpd-pi-extension/dist/utils/reliability.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/utils/reliability.js +177 -0
- package/tmlpd-pi-extension/dist/utils/reliability.js.map +1 -0
- package/tmlpd-pi-extension/dist/utils/speculativeDecoding.d.ts +117 -0
- package/tmlpd-pi-extension/dist/utils/speculativeDecoding.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/utils/speculativeDecoding.js +246 -0
- package/tmlpd-pi-extension/dist/utils/speculativeDecoding.js.map +1 -0
- package/tmlpd-pi-extension/dist/utils/tokenUtils.d.ts +50 -0
- package/tmlpd-pi-extension/dist/utils/tokenUtils.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/utils/tokenUtils.js +124 -0
- package/tmlpd-pi-extension/dist/utils/tokenUtils.js.map +1 -0
- package/tmlpd-pi-extension/examples/QUICKSTART.md +183 -0
- package/tmlpd-pi-extension/package-lock.json +75 -0
- package/tmlpd-pi-extension/package.json +172 -0
- package/tmlpd-pi-extension/python/examples.py +53 -0
- package/tmlpd-pi-extension/python/integrations.py +330 -0
- package/tmlpd-pi-extension/python/setup.py +28 -0
- package/tmlpd-pi-extension/python/tmlpd.py +369 -0
- package/tmlpd-pi-extension/qna/REDDIT_GAP_ANALYSIS.md +299 -0
- package/tmlpd-pi-extension/qna/TMLPD_QNA.md +751 -0
- package/tmlpd-pi-extension/skill/SKILL.md +238 -0
- package/{src → tmlpd-pi-extension/src}/index.ts +1 -1
- package/tmlpd-pi-extension/tsconfig.json +18 -0
- package/demo/research-demo.js +0 -266
- package/notebooks/quickstart.ipynb +0 -157
- package/rust/tmlpd.h +0 -268
- package/src/cache/prefixCache.ts +0 -365
- package/src/routing/advancedRouter.ts +0 -406
- package/src/utils/speculativeDecoding.ts +0 -344
- /package/{src → tmlpd-pi-extension/src}/cache/responseCache.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/cost/costTracker.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/memory/episodicMemory.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/orchestration/haloOrchestrator.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/orchestration/mctsWorkflow.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/providers/localProvider.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/providers/registry.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/tools/tmlpdTools.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/utils/batchProcessor.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/utils/compression.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/utils/reliability.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/utils/tokenUtils.ts +0 -0
|
@@ -0,0 +1,382 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Phase 4b: Parallelization Executor
|
|
3
|
+
|
|
4
|
+
Concurrent execution of independent tasks.
|
|
5
|
+
Based on agent orchestration patterns from arXiv:2506.12508.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import asyncio
|
|
9
|
+
from typing import Dict, List, Any, Optional, Callable
|
|
10
|
+
from datetime import datetime
|
|
11
|
+
from concurrent.futures import ThreadPoolExecutor
|
|
12
|
+
from ..providers.registry import MultiProviderExecutor
|
|
13
|
+
from ..skills.skill_manager import SkillManager
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class ParallelizationExecutor:
|
|
17
|
+
"""
|
|
18
|
+
Execute independent tasks concurrently for improved performance.
|
|
19
|
+
|
|
20
|
+
Based on agent orchestration patterns (arXiv:2506.12508)
|
|
21
|
+
|
|
22
|
+
Features:
|
|
23
|
+
- Concurrent task execution
|
|
24
|
+
- Dependency resolution
|
|
25
|
+
- Progress tracking
|
|
26
|
+
- Error isolation (one failure doesn't stop others)
|
|
27
|
+
- Resource limits
|
|
28
|
+
"""
|
|
29
|
+
|
|
30
|
+
def __init__(
|
|
31
|
+
self,
|
|
32
|
+
provider_executor: Optional[MultiProviderExecutor] = None,
|
|
33
|
+
skill_manager: Optional[SkillManager] = None,
|
|
34
|
+
max_concurrent: int = 10
|
|
35
|
+
):
|
|
36
|
+
"""
|
|
37
|
+
Initialize parallelization executor.
|
|
38
|
+
|
|
39
|
+
Args:
|
|
40
|
+
provider_executor: Multi-provider executor for LLM calls
|
|
41
|
+
skill_manager: Skill manager for loading skills
|
|
42
|
+
max_concurrent: Maximum concurrent tasks
|
|
43
|
+
"""
|
|
44
|
+
self.provider_executor = provider_executor
|
|
45
|
+
self.skill_manager = skill_manager
|
|
46
|
+
self.max_concurrent = max_concurrent
|
|
47
|
+
|
|
48
|
+
async def execute_parallel(
|
|
49
|
+
self,
|
|
50
|
+
tasks: List[Dict[str, Any]],
|
|
51
|
+
fail_on_error: bool = False,
|
|
52
|
+
return_exceptions: bool = True
|
|
53
|
+
) -> Dict[str, Any]:
|
|
54
|
+
"""
|
|
55
|
+
Execute multiple tasks concurrently.
|
|
56
|
+
|
|
57
|
+
Args:
|
|
58
|
+
tasks: List of task definitions
|
|
59
|
+
fail_on_error: Whether to stop on first error
|
|
60
|
+
return_exceptions: Whether to return exceptions instead of raising
|
|
61
|
+
|
|
62
|
+
Returns:
|
|
63
|
+
Execution result with task results and statistics
|
|
64
|
+
"""
|
|
65
|
+
start_time = datetime.now()
|
|
66
|
+
|
|
67
|
+
print(f"\n🚀 Executing {len(tasks)} tasks in parallel (max {self.max_concurrent} concurrent)")
|
|
68
|
+
|
|
69
|
+
# Create semaphore to limit concurrency
|
|
70
|
+
semaphore = asyncio.Semaphore(self.max_concurrent)
|
|
71
|
+
|
|
72
|
+
# Create tasks
|
|
73
|
+
async_tasks = []
|
|
74
|
+
for i, task in enumerate(tasks):
|
|
75
|
+
task_id = task.get("id", f"task_{i+1}")
|
|
76
|
+
async_task = self._execute_with_semaphore(
|
|
77
|
+
task_id,
|
|
78
|
+
task,
|
|
79
|
+
semaphore
|
|
80
|
+
)
|
|
81
|
+
async_tasks.append(async_task)
|
|
82
|
+
|
|
83
|
+
# Execute all tasks
|
|
84
|
+
if return_exceptions:
|
|
85
|
+
results = await asyncio.gather(*async_tasks, return_exceptions=True)
|
|
86
|
+
else:
|
|
87
|
+
results = await asyncio.gather(*async_tasks)
|
|
88
|
+
|
|
89
|
+
# Process results
|
|
90
|
+
task_results = []
|
|
91
|
+
successful = 0
|
|
92
|
+
failed = 0
|
|
93
|
+
|
|
94
|
+
for i, result in enumerate(results):
|
|
95
|
+
task_id = tasks[i].get("id", f"task_{i+1}")
|
|
96
|
+
|
|
97
|
+
if isinstance(result, Exception):
|
|
98
|
+
task_results.append({
|
|
99
|
+
"task_id": task_id,
|
|
100
|
+
"status": "error",
|
|
101
|
+
"error": str(result),
|
|
102
|
+
"result": None
|
|
103
|
+
})
|
|
104
|
+
failed += 1
|
|
105
|
+
else:
|
|
106
|
+
task_results.append({
|
|
107
|
+
"task_id": task_id,
|
|
108
|
+
"status": "success" if result.get("success") else "error",
|
|
109
|
+
**result
|
|
110
|
+
})
|
|
111
|
+
|
|
112
|
+
if result.get("success"):
|
|
113
|
+
successful += 1
|
|
114
|
+
else:
|
|
115
|
+
failed += 1
|
|
116
|
+
|
|
117
|
+
end_time = datetime.now()
|
|
118
|
+
total_time = (end_time - start_time).total_seconds()
|
|
119
|
+
|
|
120
|
+
# Calculate speedup
|
|
121
|
+
sequential_time = sum(
|
|
122
|
+
t.get("estimated_time", 1.0) for t in tasks
|
|
123
|
+
)
|
|
124
|
+
speedup = sequential_time / total_time if total_time > 0 else 1.0
|
|
125
|
+
|
|
126
|
+
print(f"\n✅ Completed: {successful} successful, {failed} failed")
|
|
127
|
+
print(f"⏱️ Total time: {total_time:.2f}s (estimated sequential: {sequential_time:.2f}s)")
|
|
128
|
+
print(f"🚀 Speedup: {speedup:.2f}x")
|
|
129
|
+
|
|
130
|
+
return {
|
|
131
|
+
"success": failed == 0 or not fail_on_error,
|
|
132
|
+
"total_tasks": len(tasks),
|
|
133
|
+
"successful_tasks": successful,
|
|
134
|
+
"failed_tasks": failed,
|
|
135
|
+
"task_results": task_results,
|
|
136
|
+
"total_time": total_time,
|
|
137
|
+
"sequential_time_estimate": sequential_time,
|
|
138
|
+
"speedup": speedup,
|
|
139
|
+
"started_at": start_time.isoformat(),
|
|
140
|
+
"completed_at": end_time.isoformat()
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
async def _execute_with_semaphore(
|
|
144
|
+
self,
|
|
145
|
+
task_id: str,
|
|
146
|
+
task: Dict[str, Any],
|
|
147
|
+
semaphore: asyncio.Semaphore
|
|
148
|
+
) -> Dict[str, Any]:
|
|
149
|
+
"""Execute task with semaphore (concurrency limit)"""
|
|
150
|
+
async with semaphore:
|
|
151
|
+
return await self._execute_task(task_id, task)
|
|
152
|
+
|
|
153
|
+
async def _execute_task(
|
|
154
|
+
self,
|
|
155
|
+
task_id: str,
|
|
156
|
+
task: Dict[str, Any]
|
|
157
|
+
) -> Dict[str, Any]:
|
|
158
|
+
"""Execute a single task"""
|
|
159
|
+
task_start = datetime.now()
|
|
160
|
+
task_type = task.get("type", "llm")
|
|
161
|
+
|
|
162
|
+
print(f" ⚙️ {task_id}: Starting ({task_type})")
|
|
163
|
+
|
|
164
|
+
try:
|
|
165
|
+
if task_type == "llm":
|
|
166
|
+
result = await self._execute_llm_task(task)
|
|
167
|
+
elif task_type == "function":
|
|
168
|
+
result = await self._execute_function_task(task)
|
|
169
|
+
elif task_type == "chain":
|
|
170
|
+
result = await self._execute_chain_task(task)
|
|
171
|
+
else:
|
|
172
|
+
raise ValueError(f"Unknown task type: {task_type}")
|
|
173
|
+
|
|
174
|
+
task_end = datetime.now()
|
|
175
|
+
execution_time = (task_end - task_start).total_seconds()
|
|
176
|
+
|
|
177
|
+
result["execution_time"] = execution_time
|
|
178
|
+
result["timestamp"] = task_end.isoformat()
|
|
179
|
+
|
|
180
|
+
status = "✅" if result.get("success") else "❌"
|
|
181
|
+
print(f" {status} {task_id}: Complete ({execution_time:.2f}s)")
|
|
182
|
+
|
|
183
|
+
return result
|
|
184
|
+
|
|
185
|
+
except Exception as e:
|
|
186
|
+
task_end = datetime.now()
|
|
187
|
+
execution_time = (task_end - task_start).total_seconds()
|
|
188
|
+
|
|
189
|
+
print(f" ❌ {task_id}: Failed - {e}")
|
|
190
|
+
|
|
191
|
+
return {
|
|
192
|
+
"success": False,
|
|
193
|
+
"error": str(e),
|
|
194
|
+
"execution_time": execution_time,
|
|
195
|
+
"timestamp": task_end.isoformat()
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
async def _execute_llm_task(self, task: Dict[str, Any]) -> Dict[str, Any]:
|
|
199
|
+
"""Execute LLM task"""
|
|
200
|
+
if not self.provider_executor:
|
|
201
|
+
raise ValueError("provider_executor required for LLM tasks")
|
|
202
|
+
|
|
203
|
+
response = await self.provider_executor.execute({
|
|
204
|
+
"description": task["prompt"],
|
|
205
|
+
**task.get("execution_params", {})
|
|
206
|
+
})
|
|
207
|
+
|
|
208
|
+
return {
|
|
209
|
+
"result": response.content if response.success else response.error,
|
|
210
|
+
"success": response.success,
|
|
211
|
+
"tokens_used": response.tokens_used,
|
|
212
|
+
"cost": response.cost
|
|
213
|
+
}
|
|
214
|
+
|
|
215
|
+
async def _execute_function_task(self, task: Dict[str, Any]) -> Dict[str, Any]:
|
|
216
|
+
"""Execute function task"""
|
|
217
|
+
if "function" not in task:
|
|
218
|
+
raise ValueError("function task requires 'function' key")
|
|
219
|
+
|
|
220
|
+
func = task["function"]
|
|
221
|
+
args = task.get("args", [])
|
|
222
|
+
kwargs = task.get("kwargs", {})
|
|
223
|
+
|
|
224
|
+
if asyncio.iscoroutinefunction(func):
|
|
225
|
+
result = await func(*args, **kwargs)
|
|
226
|
+
else:
|
|
227
|
+
# Run synchronous function in thread pool to avoid blocking
|
|
228
|
+
loop = asyncio.get_event_loop()
|
|
229
|
+
result = await loop.run_in_executor(
|
|
230
|
+
None,
|
|
231
|
+
lambda: func(*args, **kwargs)
|
|
232
|
+
)
|
|
233
|
+
|
|
234
|
+
return {
|
|
235
|
+
"result": result,
|
|
236
|
+
"success": True
|
|
237
|
+
}
|
|
238
|
+
|
|
239
|
+
async def _execute_chain_task(self, task: Dict[str, Any]) -> Dict[str, Any]:
|
|
240
|
+
"""Execute chain task (sequential sub-tasks)"""
|
|
241
|
+
from .chaining_executor import ChainingExecutor
|
|
242
|
+
|
|
243
|
+
if "steps" not in task:
|
|
244
|
+
raise ValueError("chain task requires 'steps' key")
|
|
245
|
+
|
|
246
|
+
chaining_executor = ChainingExecutor(
|
|
247
|
+
self.provider_executor,
|
|
248
|
+
self.skill_manager
|
|
249
|
+
)
|
|
250
|
+
|
|
251
|
+
return await chaining_executor.execute_chain(
|
|
252
|
+
task["steps"],
|
|
253
|
+
initial_context=task.get("context"),
|
|
254
|
+
continue_on_error=task.get("continue_on_error", False)
|
|
255
|
+
)
|
|
256
|
+
|
|
257
|
+
async def execute_with_dependencies(
|
|
258
|
+
self,
|
|
259
|
+
tasks: List[Dict[str, Any]]
|
|
260
|
+
) -> Dict[str, Any]:
|
|
261
|
+
"""
|
|
262
|
+
Execute tasks with dependency resolution.
|
|
263
|
+
|
|
264
|
+
Tasks will execute as soon as their dependencies are satisfied.
|
|
265
|
+
|
|
266
|
+
Args:
|
|
267
|
+
tasks: List of task definitions with 'depends_on' field
|
|
268
|
+
|
|
269
|
+
Returns:
|
|
270
|
+
Execution result
|
|
271
|
+
"""
|
|
272
|
+
print("\n🔗 Executing tasks with dependency resolution")
|
|
273
|
+
|
|
274
|
+
# Build dependency graph
|
|
275
|
+
task_map = {task.get("id", f"task_{i+1}"): task for i, task in enumerate(tasks)}
|
|
276
|
+
|
|
277
|
+
# Track completed tasks
|
|
278
|
+
completed = set()
|
|
279
|
+
results = {}
|
|
280
|
+
|
|
281
|
+
start_time = datetime.now()
|
|
282
|
+
|
|
283
|
+
while len(completed) < len(tasks):
|
|
284
|
+
# Find tasks whose dependencies are satisfied
|
|
285
|
+
ready_tasks = []
|
|
286
|
+
|
|
287
|
+
for task_id, task in task_map.items():
|
|
288
|
+
if task_id in completed:
|
|
289
|
+
continue
|
|
290
|
+
|
|
291
|
+
dependencies = task.get("depends_on", [])
|
|
292
|
+
|
|
293
|
+
if all(dep in completed for dep in dependencies):
|
|
294
|
+
ready_tasks.append((task_id, task))
|
|
295
|
+
|
|
296
|
+
if not ready_tasks:
|
|
297
|
+
# Circular dependency or missing dependency
|
|
298
|
+
raise ValueError("No ready tasks - possible circular dependency")
|
|
299
|
+
|
|
300
|
+
# Execute ready tasks in parallel
|
|
301
|
+
print(f"\n🚀 Executing batch of {len(ready_tasks)} tasks")
|
|
302
|
+
|
|
303
|
+
batch_results = await self.execute_parallel(
|
|
304
|
+
[task for _, task in ready_tasks],
|
|
305
|
+
return_exceptions=True
|
|
306
|
+
)
|
|
307
|
+
|
|
308
|
+
# Mark as completed
|
|
309
|
+
for i, (task_id, task) in enumerate(ready_tasks):
|
|
310
|
+
completed.add(task_id)
|
|
311
|
+
results[task_id] = batch_results["task_results"][i]
|
|
312
|
+
|
|
313
|
+
end_time = datetime.now()
|
|
314
|
+
total_time = (end_time - start_time).total_seconds()
|
|
315
|
+
|
|
316
|
+
successful = sum(
|
|
317
|
+
1 for r in results.values() if r.get("status") == "success"
|
|
318
|
+
)
|
|
319
|
+
|
|
320
|
+
return {
|
|
321
|
+
"success": successful == len(tasks),
|
|
322
|
+
"total_tasks": len(tasks),
|
|
323
|
+
"successful_tasks": successful,
|
|
324
|
+
"task_results": results,
|
|
325
|
+
"total_time": total_time,
|
|
326
|
+
"started_at": start_time.isoformat(),
|
|
327
|
+
"completed_at": end_time.isoformat()
|
|
328
|
+
}
|
|
329
|
+
|
|
330
|
+
def explain_execution(
|
|
331
|
+
self,
|
|
332
|
+
tasks: List[Dict[str, Any]]
|
|
333
|
+
) -> Dict[str, Any]:
|
|
334
|
+
"""
|
|
335
|
+
Explain execution plan for parallel tasks.
|
|
336
|
+
|
|
337
|
+
Returns information about how tasks will execute.
|
|
338
|
+
"""
|
|
339
|
+
task_explanations = []
|
|
340
|
+
|
|
341
|
+
for i, task in enumerate(tasks):
|
|
342
|
+
task_id = task.get("id", f"task_{i+1}")
|
|
343
|
+
|
|
344
|
+
explanation = {
|
|
345
|
+
"task_id": task_id,
|
|
346
|
+
"type": task.get("type", "llm"),
|
|
347
|
+
"description": task.get("description", ""),
|
|
348
|
+
"dependencies": task.get("depends_on", []),
|
|
349
|
+
"estimated_time": task.get("estimated_time", 1.0),
|
|
350
|
+
"can_parallelize": len(task.get("depends_on", [])) == 0
|
|
351
|
+
}
|
|
352
|
+
|
|
353
|
+
task_explanations.append(explanation)
|
|
354
|
+
|
|
355
|
+
# Calculate parallelization potential
|
|
356
|
+
independent_tasks = sum(
|
|
357
|
+
1 for e in task_explanations if e["can_parallelize"]
|
|
358
|
+
)
|
|
359
|
+
|
|
360
|
+
sequential_estimate = sum(e["estimated_time"] for e in task_explanations)
|
|
361
|
+
|
|
362
|
+
# Estimate parallel time (rough approximation)
|
|
363
|
+
if independent_tasks > 1:
|
|
364
|
+
parallel_estimate = (
|
|
365
|
+
max(e["estimated_time"] for e in task_explanations if e["can_parallelize"]) +
|
|
366
|
+
sum(e["estimated_time"] for e in task_explanations if not e["can_parallelize"])
|
|
367
|
+
)
|
|
368
|
+
else:
|
|
369
|
+
parallel_estimate = sequential_estimate
|
|
370
|
+
|
|
371
|
+
potential_speedup = sequential_estimate / parallel_estimate if parallel_estimate > 0 else 1.0
|
|
372
|
+
|
|
373
|
+
return {
|
|
374
|
+
"total_tasks": len(tasks),
|
|
375
|
+
"independent_tasks": independent_tasks,
|
|
376
|
+
"tasks_with_dependencies": len(tasks) - independent_tasks,
|
|
377
|
+
"tasks": task_explanations,
|
|
378
|
+
"sequential_time_estimate": sequential_estimate,
|
|
379
|
+
"parallel_time_estimate": parallel_estimate,
|
|
380
|
+
"potential_speedup": potential_speedup,
|
|
381
|
+
"max_concurrent": self.max_concurrent
|
|
382
|
+
}
|
|
@@ -0,0 +1,311 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Task Router - Workflow Phase 2
|
|
3
|
+
|
|
4
|
+
Routes incoming tasks to appropriate skills or workflows based on classification.
|
|
5
|
+
Follows the Routing workflow pattern from Anthropic's Agent specification.
|
|
6
|
+
|
|
7
|
+
Decision Framework:
|
|
8
|
+
- 80%: Single LLM call + Skills (direct routing)
|
|
9
|
+
- 15%: Workflows (chaining, routing, parallelization)
|
|
10
|
+
- 5%: True autonomous agents
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
from typing import Dict, List, Any, Optional
|
|
14
|
+
from pathlib import Path
|
|
15
|
+
import re
|
|
16
|
+
import json
|
|
17
|
+
|
|
18
|
+
from ..skills.skill_manager import SkillManager, Skill
|
|
19
|
+
from ..agents.skill_enhanced_agent import TMLEnhancedAgent
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class TaskRouter:
|
|
23
|
+
"""
|
|
24
|
+
Routes tasks to appropriate handlers based on classification.
|
|
25
|
+
|
|
26
|
+
Implements the Routing workflow pattern:
|
|
27
|
+
1. Classify incoming task
|
|
28
|
+
2. Route to relevant skill(s)
|
|
29
|
+
3. Execute with appropriate agent
|
|
30
|
+
4. Return result
|
|
31
|
+
"""
|
|
32
|
+
|
|
33
|
+
def __init__(
|
|
34
|
+
self,
|
|
35
|
+
skills_dir: str = "tmlpd-skills",
|
|
36
|
+
config_path: Optional[str] = None
|
|
37
|
+
):
|
|
38
|
+
"""
|
|
39
|
+
Initialize Task Router
|
|
40
|
+
|
|
41
|
+
Args:
|
|
42
|
+
skills_dir: Directory containing skill definitions
|
|
43
|
+
config_path: Optional path to routing configuration
|
|
44
|
+
"""
|
|
45
|
+
self.skill_manager = SkillManager(skills_dir)
|
|
46
|
+
self.config = self._load_config(config_path)
|
|
47
|
+
self.routing_stats = {
|
|
48
|
+
"total_tasks": 0,
|
|
49
|
+
"skill_routes": 0,
|
|
50
|
+
"workflow_routes": 0,
|
|
51
|
+
"agent_routes": 0,
|
|
52
|
+
"fallback_routes": 0
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
def _load_config(self, config_path: Optional[str]) -> Dict[str, Any]:
|
|
56
|
+
"""Load routing configuration"""
|
|
57
|
+
if config_path and Path(config_path).exists():
|
|
58
|
+
with open(config_path, 'r') as f:
|
|
59
|
+
return json.load(f)
|
|
60
|
+
|
|
61
|
+
# Default routing rules
|
|
62
|
+
return {
|
|
63
|
+
"keyword_mappings": {
|
|
64
|
+
"react": ["frontend", "react", "component", "jsx", "tsx"],
|
|
65
|
+
"node": ["backend", "api", "express", "server"],
|
|
66
|
+
"jest": ["test", "testing", "spec", "mock"],
|
|
67
|
+
"docs": ["documentation", "readme", "guide", "docs"]
|
|
68
|
+
},
|
|
69
|
+
"complexity_threshold": 0.7,
|
|
70
|
+
"enable_llm_classification": False,
|
|
71
|
+
"default_skill": None
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
def classify_task(self, task: Dict[str, Any]) -> Dict[str, Any]:
|
|
75
|
+
"""
|
|
76
|
+
Classify task to determine routing strategy.
|
|
77
|
+
|
|
78
|
+
Args:
|
|
79
|
+
task: Task dictionary with description and metadata
|
|
80
|
+
|
|
81
|
+
Returns:
|
|
82
|
+
Classification dictionary with:
|
|
83
|
+
- complexity: float (0-1)
|
|
84
|
+
- task_type: str ("simple", "workflow", "agent")
|
|
85
|
+
- suggested_skills: List[str]
|
|
86
|
+
- reasoning: str
|
|
87
|
+
"""
|
|
88
|
+
description = task.get("description", "")
|
|
89
|
+
task_words = set(re.findall(r'\w+', description.lower()))
|
|
90
|
+
|
|
91
|
+
# Calculate complexity based on multiple factors
|
|
92
|
+
complexity_score = self._calculate_complexity(task, task_words)
|
|
93
|
+
|
|
94
|
+
# Determine task type based on complexity
|
|
95
|
+
if complexity_score < 0.4:
|
|
96
|
+
task_type = "simple" # Direct LLM + Skills
|
|
97
|
+
reasoning = "Low complexity: Single LLM call with relevant skills sufficient"
|
|
98
|
+
elif complexity_score < 0.7:
|
|
99
|
+
task_type = "workflow" # May need chaining/routing
|
|
100
|
+
reasoning = "Medium complexity: May benefit from workflow patterns"
|
|
101
|
+
else:
|
|
102
|
+
task_type = "agent" # Complex multi-step task
|
|
103
|
+
reasoning = "High complexity: Requires agent-like orchestration"
|
|
104
|
+
|
|
105
|
+
# Find relevant skills
|
|
106
|
+
suggested_skills = self.skill_manager.get_relevant_skills(
|
|
107
|
+
description,
|
|
108
|
+
top_k=3,
|
|
109
|
+
threshold=0.1
|
|
110
|
+
)
|
|
111
|
+
|
|
112
|
+
return {
|
|
113
|
+
"complexity": complexity_score,
|
|
114
|
+
"task_type": task_type,
|
|
115
|
+
"suggested_skills": suggested_skills,
|
|
116
|
+
"reasoning": reasoning
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
def _calculate_complexity(self, task: Dict[str, Any], task_words: set) -> float:
|
|
120
|
+
"""
|
|
121
|
+
Calculate task complexity score (0-1).
|
|
122
|
+
|
|
123
|
+
Factors:
|
|
124
|
+
- Task length (longer = more complex)
|
|
125
|
+
- Number of distinct requirements
|
|
126
|
+
- Presence of multi-step keywords
|
|
127
|
+
- Dependencies or constraints
|
|
128
|
+
"""
|
|
129
|
+
score = 0.0
|
|
130
|
+
|
|
131
|
+
# Factor 1: Task length (0-0.3)
|
|
132
|
+
description = task.get("description", "")
|
|
133
|
+
word_count = len(description.split())
|
|
134
|
+
score += min(word_count / 100, 0.3)
|
|
135
|
+
|
|
136
|
+
# Factor 2: Multi-step indicators (0-0.4)
|
|
137
|
+
multi_step_keywords = [
|
|
138
|
+
"then", "after", "before", "followed by",
|
|
139
|
+
"multiple", "several", "sequence", "chain"
|
|
140
|
+
]
|
|
141
|
+
multi_step_count = sum(1 for keyword in multi_step_keywords if keyword in task_words)
|
|
142
|
+
score += min(multi_step_count * 0.1, 0.4)
|
|
143
|
+
|
|
144
|
+
# Factor 3: Requirements/constraints (0-0.2)
|
|
145
|
+
requirements = task.get("requirements", "")
|
|
146
|
+
context = task.get("context", "")
|
|
147
|
+
if requirements or context:
|
|
148
|
+
score += 0.2
|
|
149
|
+
|
|
150
|
+
# Factor 4: Dependency indicators (0-0.1)
|
|
151
|
+
dependency_keywords = ["depends", "requires", "needs", "after"]
|
|
152
|
+
if any(keyword in task_words for keyword in dependency_keywords):
|
|
153
|
+
score += 0.1
|
|
154
|
+
|
|
155
|
+
return min(score, 1.0)
|
|
156
|
+
|
|
157
|
+
def route(self, task: Dict[str, Any]) -> Dict[str, Any]:
|
|
158
|
+
"""
|
|
159
|
+
Route task to appropriate handler.
|
|
160
|
+
|
|
161
|
+
Args:
|
|
162
|
+
task: Task to route
|
|
163
|
+
|
|
164
|
+
Returns:
|
|
165
|
+
Routing result with execution strategy
|
|
166
|
+
"""
|
|
167
|
+
# Update stats
|
|
168
|
+
self.routing_stats["total_tasks"] += 1
|
|
169
|
+
|
|
170
|
+
# Classify task
|
|
171
|
+
classification = self.classify_task(task)
|
|
172
|
+
|
|
173
|
+
# Determine routing strategy
|
|
174
|
+
if classification["task_type"] == "simple":
|
|
175
|
+
return self._route_to_skill(task, classification)
|
|
176
|
+
elif classification["task_type"] == "workflow":
|
|
177
|
+
return self._route_to_workflow(task, classification)
|
|
178
|
+
else:
|
|
179
|
+
return self._route_to_agent(task, classification)
|
|
180
|
+
|
|
181
|
+
def _route_to_skill(
|
|
182
|
+
self,
|
|
183
|
+
task: Dict[str, Any],
|
|
184
|
+
classification: Dict[str, Any]
|
|
185
|
+
) -> Dict[str, Any]:
|
|
186
|
+
"""Route to single skill (80% case)"""
|
|
187
|
+
self.routing_stats["skill_routes"] += 1
|
|
188
|
+
|
|
189
|
+
return {
|
|
190
|
+
"strategy": "direct_skill",
|
|
191
|
+
"classification": classification,
|
|
192
|
+
"execution_plan": {
|
|
193
|
+
"type": "single_llm_with_skills",
|
|
194
|
+
"skills": classification["suggested_skills"],
|
|
195
|
+
"agent": TMLEnhancedAgent(
|
|
196
|
+
agent_id="task-router",
|
|
197
|
+
provider="anthropic", # Default provider
|
|
198
|
+
model="claude-sonnet-4",
|
|
199
|
+
assigned_skills=classification["suggested_skills"]
|
|
200
|
+
)
|
|
201
|
+
}
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
def _route_to_workflow(
|
|
205
|
+
self,
|
|
206
|
+
task: Dict[str, Any],
|
|
207
|
+
classification: Dict[str, Any]
|
|
208
|
+
) -> Dict[str, Any]:
|
|
209
|
+
"""Route to workflow pattern (15% case)"""
|
|
210
|
+
self.routing_stats["workflow_routes"] += 1
|
|
211
|
+
|
|
212
|
+
# Determine workflow type
|
|
213
|
+
description = task.get("description", "").lower()
|
|
214
|
+
|
|
215
|
+
if any(word in description for word in ["parallel", "simultaneous", "concurrent"]):
|
|
216
|
+
workflow_type = "parallelization"
|
|
217
|
+
elif any(word in description for word in ["then", "after", "followed"]):
|
|
218
|
+
workflow_type = "chaining"
|
|
219
|
+
else:
|
|
220
|
+
workflow_type = "routing"
|
|
221
|
+
|
|
222
|
+
return {
|
|
223
|
+
"strategy": "workflow",
|
|
224
|
+
"workflow_type": workflow_type,
|
|
225
|
+
"classification": classification,
|
|
226
|
+
"execution_plan": {
|
|
227
|
+
"type": "workflow_pattern",
|
|
228
|
+
"pattern": workflow_type,
|
|
229
|
+
"skills": classification["suggested_skills"]
|
|
230
|
+
}
|
|
231
|
+
}
|
|
232
|
+
|
|
233
|
+
def _route_to_agent(
|
|
234
|
+
self,
|
|
235
|
+
task: Dict[str, Any],
|
|
236
|
+
classification: Dict[str, Any]
|
|
237
|
+
) -> Dict[str, Any]:
|
|
238
|
+
"""Route to full agent (5% case)"""
|
|
239
|
+
self.routing_stats["agent_routes"] += 1
|
|
240
|
+
|
|
241
|
+
return {
|
|
242
|
+
"strategy": "agent",
|
|
243
|
+
"classification": classification,
|
|
244
|
+
"execution_plan": {
|
|
245
|
+
"type": "autonomous_agent",
|
|
246
|
+
"skills": classification["suggested_skills"],
|
|
247
|
+
"agent": TMLEnhancedAgent(
|
|
248
|
+
agent_id="orchestrator",
|
|
249
|
+
provider="anthropic",
|
|
250
|
+
model="claude-sonnet-4",
|
|
251
|
+
assigned_skills=classification["suggested_skills"]
|
|
252
|
+
),
|
|
253
|
+
"enable_memory": True,
|
|
254
|
+
"enable_checkpointing": True
|
|
255
|
+
}
|
|
256
|
+
}
|
|
257
|
+
|
|
258
|
+
def get_routing_stats(self) -> Dict[str, Any]:
|
|
259
|
+
"""Get routing statistics"""
|
|
260
|
+
total = self.routing_stats["total_tasks"]
|
|
261
|
+
|
|
262
|
+
if total == 0:
|
|
263
|
+
return self.routing_stats
|
|
264
|
+
|
|
265
|
+
# Calculate percentages
|
|
266
|
+
return {
|
|
267
|
+
**self.routing_stats,
|
|
268
|
+
"skill_route_percentage": (self.routing_stats["skill_routes"] / total) * 100,
|
|
269
|
+
"workflow_route_percentage": (self.routing_stats["workflow_routes"] / total) * 100,
|
|
270
|
+
"agent_route_percentage": (self.routing_stats["agent_routes"] / total) * 100
|
|
271
|
+
}
|
|
272
|
+
|
|
273
|
+
def reset_stats(self):
|
|
274
|
+
"""Reset routing statistics"""
|
|
275
|
+
self.routing_stats = {
|
|
276
|
+
"total_tasks": 0,
|
|
277
|
+
"skill_routes": 0,
|
|
278
|
+
"workflow_routes": 0,
|
|
279
|
+
"agent_routes": 0,
|
|
280
|
+
"fallback_routes": 0
|
|
281
|
+
}
|
|
282
|
+
|
|
283
|
+
|
|
284
|
+
# Convenience functions for common routing patterns
|
|
285
|
+
|
|
286
|
+
def route_and_execute(task: Dict[str, Any], router: TaskRouter) -> Dict[str, Any]:
|
|
287
|
+
"""
|
|
288
|
+
Route task and execute using determined strategy.
|
|
289
|
+
|
|
290
|
+
Args:
|
|
291
|
+
task: Task to execute
|
|
292
|
+
router: TaskRouter instance
|
|
293
|
+
|
|
294
|
+
Returns:
|
|
295
|
+
Execution result
|
|
296
|
+
"""
|
|
297
|
+
routing_result = router.route(task)
|
|
298
|
+
|
|
299
|
+
if routing_result["strategy"] == "direct_skill":
|
|
300
|
+
agent = routing_result["execution_plan"]["agent"]
|
|
301
|
+
return agent.execute_task(task)
|
|
302
|
+
elif routing_result["strategy"] == "workflow":
|
|
303
|
+
# Return workflow plan for execution
|
|
304
|
+
return {
|
|
305
|
+
"success": True,
|
|
306
|
+
"workflow_plan": routing_result,
|
|
307
|
+
"message": "Workflow pattern identified - use workflow executor"
|
|
308
|
+
}
|
|
309
|
+
else: # agent
|
|
310
|
+
agent = routing_result["execution_plan"]["agent"]
|
|
311
|
+
return agent.execute_task(task)
|