adaptive-memory-multi-model-router 1.2.2 → 1.3.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +146 -66
- package/dist/index.d.ts +1 -1
- package/dist/index.js +1 -1
- package/dist/integrations/airtable.js +20 -0
- package/dist/integrations/discord.js +18 -0
- package/dist/integrations/github.js +23 -0
- package/dist/integrations/gmail.js +19 -0
- package/dist/integrations/google-calendar.js +18 -0
- package/dist/integrations/index.js +61 -0
- package/dist/integrations/jira.js +21 -0
- package/dist/integrations/linear.js +19 -0
- package/dist/integrations/notion.js +19 -0
- package/dist/integrations/slack.js +18 -0
- package/dist/integrations/telegram.js +19 -0
- package/dist/providers/registry.js +7 -3
- package/docs/ARCHITECTURAL-IMPROVEMENTS-2025.md +1391 -0
- package/docs/ARCHITECTURAL-IMPROVEMENTS-REVISED-2025.md +1051 -0
- package/docs/CONFIGURATION.md +476 -0
- package/docs/COUNCIL_DECISION.json +308 -0
- package/docs/COUNCIL_SUMMARY.md +265 -0
- package/docs/COUNCIL_V2.2_DECISION.md +416 -0
- package/docs/IMPROVEMENT_ROADMAP.md +515 -0
- package/docs/LLM_COUNCIL_DECISION.md +508 -0
- package/docs/QUICK_START_VISIBILITY.md +782 -0
- package/docs/REDDIT_GAP_ANALYSIS.md +299 -0
- package/docs/RESEARCH_BACKED_IMPROVEMENTS.md +1180 -0
- package/docs/TMLPD_QNA.md +751 -0
- package/docs/TMLPD_V2.1_COMPLETE.md +763 -0
- package/docs/TMLPD_V2.2_RESEARCH_ROADMAP.md +754 -0
- package/docs/V2.2_IMPLEMENTATION_COMPLETE.md +446 -0
- package/docs/V2_IMPLEMENTATION_GUIDE.md +388 -0
- package/docs/VISIBILITY_ADOPTION_PLAN.md +1005 -0
- package/docs/launch-content/LAUNCH_EXECUTION_CHECKLIST.md +421 -0
- package/docs/launch-content/README.md +457 -0
- package/docs/launch-content/assets/cost_comparison_100_tasks.png +0 -0
- package/docs/launch-content/assets/cumulative_savings.png +0 -0
- package/docs/launch-content/assets/parallel_speedup.png +0 -0
- package/docs/launch-content/assets/provider_pricing_comparison.png +0 -0
- package/docs/launch-content/assets/task_breakdown_comparison.png +0 -0
- package/docs/launch-content/generate_charts.py +313 -0
- package/docs/launch-content/hn_show_post.md +139 -0
- package/docs/launch-content/partner_outreach_templates.md +745 -0
- package/docs/launch-content/reddit_posts.md +467 -0
- package/docs/launch-content/twitter_thread.txt +460 -0
- package/examples/QUICKSTART.md +1 -1
- package/openclaw-alexa-bridge/ALL_REMAINING_FIXES_PLAN.md +313 -0
- package/openclaw-alexa-bridge/REMAINING_FIXES_SUMMARY.md +277 -0
- package/openclaw-alexa-bridge/src/alexa_handler_no_tmlpd.js +1234 -0
- package/openclaw-alexa-bridge/test_fixes.js +77 -0
- package/package.json +120 -29
- package/package.json.tmp +0 -0
- package/qna/TMLPD_QNA.md +3 -3
- package/skill/SKILL.md +2 -2
- package/src/__tests__/integration/tmpld_integration.test.py +540 -0
- package/src/agents/skill_enhanced_agent.py +318 -0
- package/src/memory/__init__.py +15 -0
- package/src/memory/agentic_memory.py +353 -0
- package/src/memory/semantic_memory.py +444 -0
- package/src/memory/simple_memory.py +466 -0
- package/src/memory/working_memory.py +447 -0
- package/src/orchestration/__init__.py +52 -0
- package/src/orchestration/execution_engine.py +353 -0
- package/src/orchestration/halo_orchestrator.py +367 -0
- package/src/orchestration/mcts_workflow.py +498 -0
- package/src/orchestration/role_assigner.py +473 -0
- package/src/orchestration/task_planner.py +522 -0
- package/src/providers/__init__.py +67 -0
- package/src/providers/anthropic.py +304 -0
- package/src/providers/base.py +241 -0
- package/src/providers/cerebras.py +373 -0
- package/src/providers/registry.py +476 -0
- package/src/routing/__init__.py +30 -0
- package/src/routing/universal_router.py +621 -0
- package/src/skills/TMLPD-QUICKREF.md +210 -0
- package/src/skills/TMLPD-SETUP-SUMMARY.md +157 -0
- package/src/skills/TMLPD.md +540 -0
- package/src/skills/__tests__/skill_manager.test.ts +328 -0
- package/src/skills/skill_manager.py +385 -0
- package/src/skills/test-tmlpd.sh +108 -0
- package/src/skills/tmlpd-category.yaml +67 -0
- package/src/skills/tmlpd-monitoring.yaml +188 -0
- package/src/skills/tmlpd-phase.yaml +132 -0
- package/src/state/__init__.py +17 -0
- package/src/state/simple_checkpoint.py +508 -0
- package/src/tmlpd_agent.py +464 -0
- package/src/tmpld_v2.py +427 -0
- package/src/workflows/__init__.py +18 -0
- package/src/workflows/advanced_difficulty_classifier.py +377 -0
- package/src/workflows/chaining_executor.py +417 -0
- package/src/workflows/difficulty_integration.py +209 -0
- package/src/workflows/orchestrator.py +469 -0
- package/src/workflows/orchestrator_executor.py +456 -0
- package/src/workflows/parallelization_executor.py +382 -0
- package/src/workflows/router.py +311 -0
- package/test_integration_simple.py +86 -0
- package/test_mcts_workflow.py +150 -0
- package/test_templd_integration.py +262 -0
- package/test_universal_router.py +275 -0
- package/tmlpd-pi-extension/README.md +36 -0
- package/tmlpd-pi-extension/dist/cache/prefixCache.d.ts +114 -0
- package/tmlpd-pi-extension/dist/cache/prefixCache.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/cache/prefixCache.js +285 -0
- package/tmlpd-pi-extension/dist/cache/prefixCache.js.map +1 -0
- package/tmlpd-pi-extension/dist/cache/responseCache.d.ts +58 -0
- package/tmlpd-pi-extension/dist/cache/responseCache.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/cache/responseCache.js +153 -0
- package/tmlpd-pi-extension/dist/cache/responseCache.js.map +1 -0
- package/tmlpd-pi-extension/dist/cli.js +59 -0
- package/tmlpd-pi-extension/dist/cost/costTracker.d.ts +95 -0
- package/tmlpd-pi-extension/dist/cost/costTracker.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/cost/costTracker.js +240 -0
- package/tmlpd-pi-extension/dist/cost/costTracker.js.map +1 -0
- package/tmlpd-pi-extension/dist/index.d.ts +723 -0
- package/tmlpd-pi-extension/dist/index.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/index.js +239 -0
- package/tmlpd-pi-extension/dist/index.js.map +1 -0
- package/tmlpd-pi-extension/dist/memory/episodicMemory.d.ts +82 -0
- package/tmlpd-pi-extension/dist/memory/episodicMemory.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/memory/episodicMemory.js +145 -0
- package/tmlpd-pi-extension/dist/memory/episodicMemory.js.map +1 -0
- package/tmlpd-pi-extension/dist/orchestration/haloOrchestrator.d.ts +102 -0
- package/tmlpd-pi-extension/dist/orchestration/haloOrchestrator.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/orchestration/haloOrchestrator.js +207 -0
- package/tmlpd-pi-extension/dist/orchestration/haloOrchestrator.js.map +1 -0
- package/tmlpd-pi-extension/dist/orchestration/mctsWorkflow.d.ts +85 -0
- package/tmlpd-pi-extension/dist/orchestration/mctsWorkflow.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/orchestration/mctsWorkflow.js +210 -0
- package/tmlpd-pi-extension/dist/orchestration/mctsWorkflow.js.map +1 -0
- package/tmlpd-pi-extension/dist/providers/localProvider.d.ts +102 -0
- package/tmlpd-pi-extension/dist/providers/localProvider.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/providers/localProvider.js +338 -0
- package/tmlpd-pi-extension/dist/providers/localProvider.js.map +1 -0
- package/tmlpd-pi-extension/dist/providers/registry.d.ts +55 -0
- package/tmlpd-pi-extension/dist/providers/registry.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/providers/registry.js +138 -0
- package/tmlpd-pi-extension/dist/providers/registry.js.map +1 -0
- package/tmlpd-pi-extension/dist/routing/advancedRouter.d.ts +68 -0
- package/tmlpd-pi-extension/dist/routing/advancedRouter.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/routing/advancedRouter.js +332 -0
- package/tmlpd-pi-extension/dist/routing/advancedRouter.js.map +1 -0
- package/tmlpd-pi-extension/dist/tools/tmlpdTools.d.ts +101 -0
- package/tmlpd-pi-extension/dist/tools/tmlpdTools.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/tools/tmlpdTools.js +368 -0
- package/tmlpd-pi-extension/dist/tools/tmlpdTools.js.map +1 -0
- package/tmlpd-pi-extension/dist/utils/batchProcessor.d.ts +96 -0
- package/tmlpd-pi-extension/dist/utils/batchProcessor.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/utils/batchProcessor.js +170 -0
- package/tmlpd-pi-extension/dist/utils/batchProcessor.js.map +1 -0
- package/tmlpd-pi-extension/dist/utils/compression.d.ts +61 -0
- package/tmlpd-pi-extension/dist/utils/compression.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/utils/compression.js +281 -0
- package/tmlpd-pi-extension/dist/utils/compression.js.map +1 -0
- package/tmlpd-pi-extension/dist/utils/reliability.d.ts +74 -0
- package/tmlpd-pi-extension/dist/utils/reliability.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/utils/reliability.js +177 -0
- package/tmlpd-pi-extension/dist/utils/reliability.js.map +1 -0
- package/tmlpd-pi-extension/dist/utils/speculativeDecoding.d.ts +117 -0
- package/tmlpd-pi-extension/dist/utils/speculativeDecoding.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/utils/speculativeDecoding.js +246 -0
- package/tmlpd-pi-extension/dist/utils/speculativeDecoding.js.map +1 -0
- package/tmlpd-pi-extension/dist/utils/tokenUtils.d.ts +50 -0
- package/tmlpd-pi-extension/dist/utils/tokenUtils.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/utils/tokenUtils.js +124 -0
- package/tmlpd-pi-extension/dist/utils/tokenUtils.js.map +1 -0
- package/tmlpd-pi-extension/examples/QUICKSTART.md +183 -0
- package/tmlpd-pi-extension/package-lock.json +75 -0
- package/tmlpd-pi-extension/package.json +172 -0
- package/tmlpd-pi-extension/python/examples.py +53 -0
- package/tmlpd-pi-extension/python/integrations.py +330 -0
- package/tmlpd-pi-extension/python/setup.py +28 -0
- package/tmlpd-pi-extension/python/tmlpd.py +369 -0
- package/tmlpd-pi-extension/qna/REDDIT_GAP_ANALYSIS.md +299 -0
- package/tmlpd-pi-extension/qna/TMLPD_QNA.md +751 -0
- package/tmlpd-pi-extension/skill/SKILL.md +238 -0
- package/{src → tmlpd-pi-extension/src}/index.ts +1 -1
- package/tmlpd-pi-extension/tsconfig.json +18 -0
- package/demo/research-demo.js +0 -266
- package/notebooks/quickstart.ipynb +0 -157
- package/rust/tmlpd.h +0 -268
- package/src/cache/prefixCache.ts +0 -365
- package/src/routing/advancedRouter.ts +0 -406
- package/src/utils/speculativeDecoding.ts +0 -344
- /package/{src → tmlpd-pi-extension/src}/cache/responseCache.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/cost/costTracker.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/memory/episodicMemory.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/orchestration/haloOrchestrator.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/orchestration/mctsWorkflow.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/providers/localProvider.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/providers/registry.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/tools/tmlpdTools.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/utils/batchProcessor.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/utils/compression.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/utils/reliability.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/utils/tokenUtils.ts +0 -0
|
@@ -0,0 +1,456 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Phase 4c: Orchestrator Executor
|
|
3
|
+
|
|
4
|
+
Hierarchical task breakdown and delegation.
|
|
5
|
+
Based on agent orchestration patterns from arXiv:2506.12508 and arXiv:2509.11079.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import asyncio
|
|
9
|
+
from typing import Dict, List, Any, Optional
|
|
10
|
+
from datetime import datetime
|
|
11
|
+
from ..providers.registry import MultiProviderExecutor
|
|
12
|
+
from ..skills.skill_manager import SkillManager
|
|
13
|
+
from .chaining_executor import ChainingExecutor
|
|
14
|
+
from .parallelization_executor import ParallelizationExecutor
|
|
15
|
+
from .advanced_difficulty_classifier import AdvancedDifficultyClassifier
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class OrchestratorExecutor:
|
|
19
|
+
"""
|
|
20
|
+
Hierarchical task breakdown and intelligent delegation.
|
|
21
|
+
|
|
22
|
+
Based on agent orchestration patterns (arXiv:2506.12508)
|
|
23
|
+
and difficulty-aware routing (arXiv:2509.11079)
|
|
24
|
+
|
|
25
|
+
Features:
|
|
26
|
+
- Automatic task decomposition
|
|
27
|
+
- Intelligent delegation (chain vs parallel)
|
|
28
|
+
- Sub-task coordination
|
|
29
|
+
- Progress tracking
|
|
30
|
+
- Adaptive execution strategy
|
|
31
|
+
"""
|
|
32
|
+
|
|
33
|
+
def __init__(
|
|
34
|
+
self,
|
|
35
|
+
provider_executor: Optional[MultiProviderExecutor] = None,
|
|
36
|
+
skill_manager: Optional[SkillManager] = None
|
|
37
|
+
):
|
|
38
|
+
"""
|
|
39
|
+
Initialize orchestrator executor.
|
|
40
|
+
|
|
41
|
+
Args:
|
|
42
|
+
provider_executor: Multi-provider executor for LLM calls
|
|
43
|
+
skill_manager: Skill manager for loading skills
|
|
44
|
+
"""
|
|
45
|
+
self.provider_executor = provider_executor
|
|
46
|
+
self.skill_manager = skill_manager
|
|
47
|
+
self.difficulty_classifier = AdvancedDifficultyClassifier()
|
|
48
|
+
|
|
49
|
+
# Create sub-executors
|
|
50
|
+
self.chaining_executor = ChainingExecutor(provider_executor, skill_manager)
|
|
51
|
+
self.parallel_executor = ParallelizationExecutor(provider_executor, skill_manager)
|
|
52
|
+
|
|
53
|
+
async def execute(
|
|
54
|
+
self,
|
|
55
|
+
task: Dict[str, Any],
|
|
56
|
+
strategy: str = "auto",
|
|
57
|
+
max_depth: int = 3
|
|
58
|
+
) -> Dict[str, Any]:
|
|
59
|
+
"""
|
|
60
|
+
Execute task with hierarchical breakdown and delegation.
|
|
61
|
+
|
|
62
|
+
Args:
|
|
63
|
+
task: Task to execute
|
|
64
|
+
strategy: Execution strategy ("auto", "decompose", "direct", "parallel", "chain")
|
|
65
|
+
max_depth: Maximum decomposition depth
|
|
66
|
+
|
|
67
|
+
Returns:
|
|
68
|
+
Execution result
|
|
69
|
+
"""
|
|
70
|
+
start_time = datetime.now()
|
|
71
|
+
|
|
72
|
+
print(f"\n🎯 Orchestrator: Executing task")
|
|
73
|
+
print(f" Description: {task.get('description', 'No description')[:100]}...")
|
|
74
|
+
|
|
75
|
+
# Step 1: Classify task
|
|
76
|
+
classification = self.difficulty_classifier.classify_difficulty(task)
|
|
77
|
+
|
|
78
|
+
print(f" Difficulty: {classification['difficulty']} (score: {classification['score']:.1f})")
|
|
79
|
+
print(f" Confidence: {classification['confidence']*100:.0f}%")
|
|
80
|
+
|
|
81
|
+
# Step 2: Determine execution strategy
|
|
82
|
+
if strategy == "auto":
|
|
83
|
+
strategy = self._determine_strategy(classification, task)
|
|
84
|
+
|
|
85
|
+
print(f" Strategy: {strategy}")
|
|
86
|
+
|
|
87
|
+
# Step 3: Execute based on strategy
|
|
88
|
+
if strategy == "direct":
|
|
89
|
+
result = await self._execute_direct(task)
|
|
90
|
+
|
|
91
|
+
elif strategy == "decompose":
|
|
92
|
+
result = await self._execute_with_decomposition(task, max_depth)
|
|
93
|
+
|
|
94
|
+
elif strategy == "chain":
|
|
95
|
+
result = await self._execute_chain(task)
|
|
96
|
+
|
|
97
|
+
elif strategy == "parallel":
|
|
98
|
+
result = await self._execute_parallel(task)
|
|
99
|
+
|
|
100
|
+
else:
|
|
101
|
+
raise ValueError(f"Unknown strategy: {strategy}")
|
|
102
|
+
|
|
103
|
+
end_time = datetime.now()
|
|
104
|
+
execution_time = (end_time - start_time).total_seconds()
|
|
105
|
+
|
|
106
|
+
# Add metadata
|
|
107
|
+
result["orchestrator_metadata"] = {
|
|
108
|
+
"difficulty": classification["difficulty"],
|
|
109
|
+
"difficulty_score": classification["score"],
|
|
110
|
+
"strategy": strategy,
|
|
111
|
+
"execution_time": execution_time,
|
|
112
|
+
"started_at": start_time.isoformat(),
|
|
113
|
+
"completed_at": end_time.isoformat()
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
return result
|
|
117
|
+
|
|
118
|
+
def _determine_strategy(
|
|
119
|
+
self,
|
|
120
|
+
classification: Dict[str, Any],
|
|
121
|
+
task: Dict[str, Any]
|
|
122
|
+
) -> str:
|
|
123
|
+
"""
|
|
124
|
+
Determine optimal execution strategy.
|
|
125
|
+
|
|
126
|
+
Based on difficulty, task type, and requirements.
|
|
127
|
+
"""
|
|
128
|
+
difficulty = classification["difficulty"]
|
|
129
|
+
|
|
130
|
+
# TRIVIAL tasks: direct execution
|
|
131
|
+
if difficulty == "TRIVIAL":
|
|
132
|
+
return "direct"
|
|
133
|
+
|
|
134
|
+
# SIMPLE tasks: direct or chain if multi-step
|
|
135
|
+
elif difficulty == "SIMPLE":
|
|
136
|
+
# Check if multi-step
|
|
137
|
+
if classification["breakdown"]["multi_step"] > 5:
|
|
138
|
+
return "chain"
|
|
139
|
+
return "direct"
|
|
140
|
+
|
|
141
|
+
# MEDIUM tasks: decompose or chain
|
|
142
|
+
elif difficulty == "MEDIUM":
|
|
143
|
+
# Check for explicit dependencies
|
|
144
|
+
if "depends_on" in task or "dependencies" in task:
|
|
145
|
+
return "chain"
|
|
146
|
+
return "decompose"
|
|
147
|
+
|
|
148
|
+
# COMPLEX tasks: decompose
|
|
149
|
+
elif difficulty == "COMPLEX":
|
|
150
|
+
# Check for parallelizable sub-tasks
|
|
151
|
+
description = task.get("description", "").lower()
|
|
152
|
+
parallel_keywords = ["simultaneously", "concurrent", "parallel", "multiple"]
|
|
153
|
+
if any(kw in description for kw in parallel_keywords):
|
|
154
|
+
return "parallel"
|
|
155
|
+
return "decompose"
|
|
156
|
+
|
|
157
|
+
# EXPERT tasks: always decompose
|
|
158
|
+
else:
|
|
159
|
+
return "decompose"
|
|
160
|
+
|
|
161
|
+
async def _execute_direct(self, task: Dict[str, Any]) -> Dict[str, Any]:
|
|
162
|
+
"""Execute task directly without decomposition"""
|
|
163
|
+
print(" ⚡ Direct execution (no decomposition)")
|
|
164
|
+
|
|
165
|
+
if not self.provider_executor:
|
|
166
|
+
raise ValueError("provider_executor required for direct execution")
|
|
167
|
+
|
|
168
|
+
response = await self.provider_executor.execute(task)
|
|
169
|
+
|
|
170
|
+
return {
|
|
171
|
+
"success": response.success,
|
|
172
|
+
"result": response.content if response.success else response.error,
|
|
173
|
+
"tokens_used": response.tokens_used,
|
|
174
|
+
"cost": response.cost,
|
|
175
|
+
"provider": response.provider,
|
|
176
|
+
"model": response.model,
|
|
177
|
+
"decomposition": "none"
|
|
178
|
+
}
|
|
179
|
+
|
|
180
|
+
async def _execute_with_decomposition(
|
|
181
|
+
self,
|
|
182
|
+
task: Dict[str, Any],
|
|
183
|
+
max_depth: int
|
|
184
|
+
) -> Dict[str, Any]:
|
|
185
|
+
"""Execute task with hierarchical decomposition"""
|
|
186
|
+
print(f" 🧩 Decomposing task (max depth: {max_depth})")
|
|
187
|
+
|
|
188
|
+
# Use LLM to decompose task
|
|
189
|
+
sub_tasks = await self._decompose_task(task, max_depth)
|
|
190
|
+
|
|
191
|
+
if not sub_tasks:
|
|
192
|
+
# Fall back to direct execution
|
|
193
|
+
print(" ⚠️ No sub-tasks generated, falling back to direct execution")
|
|
194
|
+
return await self._execute_direct(task)
|
|
195
|
+
|
|
196
|
+
print(f" ✅ Generated {len(sub_tasks)} sub-tasks")
|
|
197
|
+
|
|
198
|
+
# Determine if sub-tasks can run in parallel
|
|
199
|
+
can_parallelize = self._can_parallelize(sub_tasks)
|
|
200
|
+
|
|
201
|
+
if can_parallelize:
|
|
202
|
+
print(" 🚀 Executing sub-tasks in parallel")
|
|
203
|
+
result = await self.parallel_executor.execute_parallel(sub_tasks)
|
|
204
|
+
else:
|
|
205
|
+
print(" 🔗 Executing sub-tasks in chain")
|
|
206
|
+
# Convert sub-tasks to chain steps
|
|
207
|
+
steps = [
|
|
208
|
+
{
|
|
209
|
+
"name": t.get("id", f"step_{i+1}"),
|
|
210
|
+
"type": "llm",
|
|
211
|
+
"prompt": t["description"],
|
|
212
|
+
"execution_params": t
|
|
213
|
+
}
|
|
214
|
+
for i, t in enumerate(sub_tasks)
|
|
215
|
+
]
|
|
216
|
+
result = await self.chaining_executor.execute_chain(steps)
|
|
217
|
+
|
|
218
|
+
result["decomposition"] = "hierarchical"
|
|
219
|
+
result["sub_tasks_count"] = len(sub_tasks)
|
|
220
|
+
result["parallelized": can_parallelize] = can_parallelize
|
|
221
|
+
|
|
222
|
+
return result
|
|
223
|
+
|
|
224
|
+
async def _decompose_task(
|
|
225
|
+
self,
|
|
226
|
+
task: Dict[str, Any],
|
|
227
|
+
max_depth: int
|
|
228
|
+
) -> List[Dict[str, Any]]:
|
|
229
|
+
"""
|
|
230
|
+
Decompose task into sub-tasks using LLM.
|
|
231
|
+
|
|
232
|
+
Args:
|
|
233
|
+
task: Task to decompose
|
|
234
|
+
max_depth: Maximum decomposition depth
|
|
235
|
+
|
|
236
|
+
Returns:
|
|
237
|
+
List of sub-tasks
|
|
238
|
+
"""
|
|
239
|
+
if not self.provider_executor:
|
|
240
|
+
raise ValueError("provider_executor required for decomposition")
|
|
241
|
+
|
|
242
|
+
# Build decomposition prompt
|
|
243
|
+
prompt = f"""Decompose the following task into 3-7 sub-tasks.
|
|
244
|
+
|
|
245
|
+
Task: {task.get('description', '')}
|
|
246
|
+
|
|
247
|
+
Requirements:
|
|
248
|
+
{task.get('requirements', 'N/A')}
|
|
249
|
+
|
|
250
|
+
Context:
|
|
251
|
+
{task.get('context', 'N/A')}
|
|
252
|
+
|
|
253
|
+
Please break this down into specific, actionable sub-tasks. Format your response as a JSON array:
|
|
254
|
+
|
|
255
|
+
[
|
|
256
|
+
{{
|
|
257
|
+
"id": "subtask_1",
|
|
258
|
+
"description": "Brief description of what to do",
|
|
259
|
+
"estimated_difficulty": "TRIVIAL|SIMPLE|MEDIUM|COMPLEX|EXPERT",
|
|
260
|
+
"depends_on": [] // IDs of sub-tasks this depends on
|
|
261
|
+
}}
|
|
262
|
+
]
|
|
263
|
+
|
|
264
|
+
Ensure sub-tasks are:
|
|
265
|
+
- Specific and actionable
|
|
266
|
+
- Ordered logically (dependencies in 'depends_on')
|
|
267
|
+
- Roughly equal in complexity
|
|
268
|
+
- Independent where possible (few dependencies)
|
|
269
|
+
"""
|
|
270
|
+
|
|
271
|
+
response = await self.provider_executor.execute({
|
|
272
|
+
"description": prompt
|
|
273
|
+
})
|
|
274
|
+
|
|
275
|
+
if not response.success:
|
|
276
|
+
print(f" ❌ Decomposition failed: {response.error}")
|
|
277
|
+
return []
|
|
278
|
+
|
|
279
|
+
# Parse sub-tasks from response
|
|
280
|
+
import json
|
|
281
|
+
try:
|
|
282
|
+
# Extract JSON from response
|
|
283
|
+
content = response.content.strip()
|
|
284
|
+
|
|
285
|
+
# Try to find JSON array
|
|
286
|
+
if "```json" in content:
|
|
287
|
+
json_start = content.find("```json") + 7
|
|
288
|
+
json_end = content.find("```", json_start)
|
|
289
|
+
content = content[json_start:json_end].strip()
|
|
290
|
+
elif "```" in content:
|
|
291
|
+
json_start = content.find("```") + 3
|
|
292
|
+
json_end = content.find("```", json_start)
|
|
293
|
+
content = content[json_start:json_end].strip()
|
|
294
|
+
|
|
295
|
+
sub_tasks = json.loads(content)
|
|
296
|
+
|
|
297
|
+
# Validate and enhance
|
|
298
|
+
valid_sub_tasks = []
|
|
299
|
+
for i, sub_task in enumerate(sub_tasks):
|
|
300
|
+
if "description" not in sub_task:
|
|
301
|
+
continue
|
|
302
|
+
|
|
303
|
+
# Add defaults
|
|
304
|
+
sub_task.setdefault("id", f"subtask_{i+1}")
|
|
305
|
+
sub_task.setdefault("depends_on", [])
|
|
306
|
+
sub_task.setdefault("estimated_difficulty", "MEDIUM")
|
|
307
|
+
|
|
308
|
+
valid_sub_tasks.append(sub_task)
|
|
309
|
+
|
|
310
|
+
return valid_sub_tasks
|
|
311
|
+
|
|
312
|
+
except Exception as e:
|
|
313
|
+
print(f" ❌ Failed to parse sub-tasks: {e}")
|
|
314
|
+
return []
|
|
315
|
+
|
|
316
|
+
async def _execute_chain(self, task: Dict[str, Any]) -> Dict[str, Any]:
|
|
317
|
+
"""Execute task as a chain"""
|
|
318
|
+
print(" 🔗 Chain execution")
|
|
319
|
+
|
|
320
|
+
# Decompose into chain steps
|
|
321
|
+
sub_tasks = await self._decompose_task(task, max_depth=1)
|
|
322
|
+
|
|
323
|
+
if not sub_tasks:
|
|
324
|
+
return await self._execute_direct(task)
|
|
325
|
+
|
|
326
|
+
# Convert to chain steps
|
|
327
|
+
steps = [
|
|
328
|
+
{
|
|
329
|
+
"name": t.get("id", f"step_{i+1}"),
|
|
330
|
+
"type": "llm",
|
|
331
|
+
"prompt": t["description"],
|
|
332
|
+
"output_key": t.get("id"),
|
|
333
|
+
"execution_params": t
|
|
334
|
+
}
|
|
335
|
+
for i, t in enumerate(sub_tasks)
|
|
336
|
+
]
|
|
337
|
+
|
|
338
|
+
result = await self.chaining_executor.execute_chain(steps)
|
|
339
|
+
|
|
340
|
+
result["decomposition"] = "chain"
|
|
341
|
+
result["sub_tasks_count"] = len(sub_tasks)
|
|
342
|
+
|
|
343
|
+
return result
|
|
344
|
+
|
|
345
|
+
async def _execute_parallel(self, task: Dict[str, Any]) -> Dict[str, Any]:
|
|
346
|
+
"""Execute task as parallel sub-tasks"""
|
|
347
|
+
print(" 🚀 Parallel execution")
|
|
348
|
+
|
|
349
|
+
# Decompose into parallel tasks
|
|
350
|
+
sub_tasks = await self._decompose_task(task, max_depth=1)
|
|
351
|
+
|
|
352
|
+
if not sub_tasks:
|
|
353
|
+
return await self._execute_direct(task)
|
|
354
|
+
|
|
355
|
+
# Add type and convert format
|
|
356
|
+
for sub_task in sub_tasks:
|
|
357
|
+
sub_task["type"] = "llm"
|
|
358
|
+
sub_task["prompt"] = sub_task["description"]
|
|
359
|
+
sub_task.setdefault("id", f"task_{sub_tasks.index(sub_task)+1}")
|
|
360
|
+
|
|
361
|
+
result = await self.parallel_executor.execute_parallel(sub_tasks)
|
|
362
|
+
|
|
363
|
+
result["decomposition"] = "parallel"
|
|
364
|
+
result["sub_tasks_count"] = len(sub_tasks)
|
|
365
|
+
|
|
366
|
+
return result
|
|
367
|
+
|
|
368
|
+
def _can_parallelize(self, sub_tasks: List[Dict[str, Any]]) -> bool:
|
|
369
|
+
"""
|
|
370
|
+
Determine if sub-tasks can run in parallel.
|
|
371
|
+
|
|
372
|
+
Checks for dependencies between sub-tasks.
|
|
373
|
+
"""
|
|
374
|
+
# If any sub-task has dependencies, can't parallelize
|
|
375
|
+
for sub_task in sub_tasks:
|
|
376
|
+
if sub_task.get("depends_on"):
|
|
377
|
+
return False
|
|
378
|
+
|
|
379
|
+
# If no dependencies, can parallelize
|
|
380
|
+
return len(sub_tasks) > 1
|
|
381
|
+
|
|
382
|
+
def explain_plan(
|
|
383
|
+
self,
|
|
384
|
+
task: Dict[str, Any],
|
|
385
|
+
strategy: str = "auto"
|
|
386
|
+
) -> Dict[str, Any]:
|
|
387
|
+
"""
|
|
388
|
+
Explain execution plan without running.
|
|
389
|
+
|
|
390
|
+
Returns detailed information about how the task will be executed.
|
|
391
|
+
"""
|
|
392
|
+
# Classify task
|
|
393
|
+
classification = self.difficulty_classifier.classify_difficulty(task)
|
|
394
|
+
|
|
395
|
+
# Determine strategy
|
|
396
|
+
if strategy == "auto":
|
|
397
|
+
strategy = self._determine_strategy(classification, task)
|
|
398
|
+
|
|
399
|
+
# Build explanation
|
|
400
|
+
explanation = {
|
|
401
|
+
"task": task.get("description", "")[:200],
|
|
402
|
+
"classification": {
|
|
403
|
+
"difficulty": classification["difficulty"],
|
|
404
|
+
"score": classification["score"],
|
|
405
|
+
"confidence": classification["confidence"],
|
|
406
|
+
"breakdown": classification["breakdown"]
|
|
407
|
+
},
|
|
408
|
+
"strategy": strategy,
|
|
409
|
+
"reasoning": self._explain_strategy_choice(strategy, classification),
|
|
410
|
+
"expected_execution": {
|
|
411
|
+
"decomposition": "none" if strategy == "direct" else "hierarchical",
|
|
412
|
+
"parallelization_possible": strategy in ["parallel", "decompose"],
|
|
413
|
+
"estimated_subtasks": self._estimate_subtasks(classification)
|
|
414
|
+
}
|
|
415
|
+
}
|
|
416
|
+
|
|
417
|
+
return explanation
|
|
418
|
+
|
|
419
|
+
def _explain_strategy_choice(
|
|
420
|
+
self,
|
|
421
|
+
strategy: str,
|
|
422
|
+
classification: Dict[str, Any]
|
|
423
|
+
) -> str:
|
|
424
|
+
"""Explain why a strategy was chosen"""
|
|
425
|
+
difficulty = classification["difficulty"]
|
|
426
|
+
|
|
427
|
+
reasoning = f"Task is '{difficulty}' difficulty (score: {classification['score']:.1f}). "
|
|
428
|
+
|
|
429
|
+
if strategy == "direct":
|
|
430
|
+
reasoning += "Simple enough for direct execution without decomposition."
|
|
431
|
+
|
|
432
|
+
elif strategy == "chain":
|
|
433
|
+
reasoning += "Multi-step task requiring sequential execution."
|
|
434
|
+
|
|
435
|
+
elif strategy == "parallel":
|
|
436
|
+
reasoning += "Can be broken into independent sub-tasks for parallel execution."
|
|
437
|
+
|
|
438
|
+
elif strategy == "decompose":
|
|
439
|
+
reasoning += "Complex task requiring hierarchical breakdown and delegation."
|
|
440
|
+
|
|
441
|
+
return reasoning
|
|
442
|
+
|
|
443
|
+
def _estimate_subtasks(self, classification: Dict[str, Any]) -> int:
|
|
444
|
+
"""Estimate number of sub-tasks based on difficulty"""
|
|
445
|
+
difficulty = classification["difficulty"]
|
|
446
|
+
|
|
447
|
+
if difficulty == "TRIVIAL":
|
|
448
|
+
return 1
|
|
449
|
+
elif difficulty == "SIMPLE":
|
|
450
|
+
return 2
|
|
451
|
+
elif difficulty == "MEDIUM":
|
|
452
|
+
return 3
|
|
453
|
+
elif difficulty == "COMPLEX":
|
|
454
|
+
return 5
|
|
455
|
+
else: # EXPERT
|
|
456
|
+
return 7
|