adaptive-memory-multi-model-router 1.2.2 → 1.3.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +146 -66
- package/dist/index.d.ts +1 -1
- package/dist/index.js +1 -1
- package/dist/integrations/airtable.js +20 -0
- package/dist/integrations/discord.js +18 -0
- package/dist/integrations/github.js +23 -0
- package/dist/integrations/gmail.js +19 -0
- package/dist/integrations/google-calendar.js +18 -0
- package/dist/integrations/index.js +61 -0
- package/dist/integrations/jira.js +21 -0
- package/dist/integrations/linear.js +19 -0
- package/dist/integrations/notion.js +19 -0
- package/dist/integrations/slack.js +18 -0
- package/dist/integrations/telegram.js +19 -0
- package/dist/providers/registry.js +7 -3
- package/docs/ARCHITECTURAL-IMPROVEMENTS-2025.md +1391 -0
- package/docs/ARCHITECTURAL-IMPROVEMENTS-REVISED-2025.md +1051 -0
- package/docs/CONFIGURATION.md +476 -0
- package/docs/COUNCIL_DECISION.json +308 -0
- package/docs/COUNCIL_SUMMARY.md +265 -0
- package/docs/COUNCIL_V2.2_DECISION.md +416 -0
- package/docs/IMPROVEMENT_ROADMAP.md +515 -0
- package/docs/LLM_COUNCIL_DECISION.md +508 -0
- package/docs/QUICK_START_VISIBILITY.md +782 -0
- package/docs/REDDIT_GAP_ANALYSIS.md +299 -0
- package/docs/RESEARCH_BACKED_IMPROVEMENTS.md +1180 -0
- package/docs/TMLPD_QNA.md +751 -0
- package/docs/TMLPD_V2.1_COMPLETE.md +763 -0
- package/docs/TMLPD_V2.2_RESEARCH_ROADMAP.md +754 -0
- package/docs/V2.2_IMPLEMENTATION_COMPLETE.md +446 -0
- package/docs/V2_IMPLEMENTATION_GUIDE.md +388 -0
- package/docs/VISIBILITY_ADOPTION_PLAN.md +1005 -0
- package/docs/launch-content/LAUNCH_EXECUTION_CHECKLIST.md +421 -0
- package/docs/launch-content/README.md +457 -0
- package/docs/launch-content/assets/cost_comparison_100_tasks.png +0 -0
- package/docs/launch-content/assets/cumulative_savings.png +0 -0
- package/docs/launch-content/assets/parallel_speedup.png +0 -0
- package/docs/launch-content/assets/provider_pricing_comparison.png +0 -0
- package/docs/launch-content/assets/task_breakdown_comparison.png +0 -0
- package/docs/launch-content/generate_charts.py +313 -0
- package/docs/launch-content/hn_show_post.md +139 -0
- package/docs/launch-content/partner_outreach_templates.md +745 -0
- package/docs/launch-content/reddit_posts.md +467 -0
- package/docs/launch-content/twitter_thread.txt +460 -0
- package/examples/QUICKSTART.md +1 -1
- package/openclaw-alexa-bridge/ALL_REMAINING_FIXES_PLAN.md +313 -0
- package/openclaw-alexa-bridge/REMAINING_FIXES_SUMMARY.md +277 -0
- package/openclaw-alexa-bridge/src/alexa_handler_no_tmlpd.js +1234 -0
- package/openclaw-alexa-bridge/test_fixes.js +77 -0
- package/package.json +120 -29
- package/package.json.tmp +0 -0
- package/qna/TMLPD_QNA.md +3 -3
- package/skill/SKILL.md +2 -2
- package/src/__tests__/integration/tmpld_integration.test.py +540 -0
- package/src/agents/skill_enhanced_agent.py +318 -0
- package/src/memory/__init__.py +15 -0
- package/src/memory/agentic_memory.py +353 -0
- package/src/memory/semantic_memory.py +444 -0
- package/src/memory/simple_memory.py +466 -0
- package/src/memory/working_memory.py +447 -0
- package/src/orchestration/__init__.py +52 -0
- package/src/orchestration/execution_engine.py +353 -0
- package/src/orchestration/halo_orchestrator.py +367 -0
- package/src/orchestration/mcts_workflow.py +498 -0
- package/src/orchestration/role_assigner.py +473 -0
- package/src/orchestration/task_planner.py +522 -0
- package/src/providers/__init__.py +67 -0
- package/src/providers/anthropic.py +304 -0
- package/src/providers/base.py +241 -0
- package/src/providers/cerebras.py +373 -0
- package/src/providers/registry.py +476 -0
- package/src/routing/__init__.py +30 -0
- package/src/routing/universal_router.py +621 -0
- package/src/skills/TMLPD-QUICKREF.md +210 -0
- package/src/skills/TMLPD-SETUP-SUMMARY.md +157 -0
- package/src/skills/TMLPD.md +540 -0
- package/src/skills/__tests__/skill_manager.test.ts +328 -0
- package/src/skills/skill_manager.py +385 -0
- package/src/skills/test-tmlpd.sh +108 -0
- package/src/skills/tmlpd-category.yaml +67 -0
- package/src/skills/tmlpd-monitoring.yaml +188 -0
- package/src/skills/tmlpd-phase.yaml +132 -0
- package/src/state/__init__.py +17 -0
- package/src/state/simple_checkpoint.py +508 -0
- package/src/tmlpd_agent.py +464 -0
- package/src/tmpld_v2.py +427 -0
- package/src/workflows/__init__.py +18 -0
- package/src/workflows/advanced_difficulty_classifier.py +377 -0
- package/src/workflows/chaining_executor.py +417 -0
- package/src/workflows/difficulty_integration.py +209 -0
- package/src/workflows/orchestrator.py +469 -0
- package/src/workflows/orchestrator_executor.py +456 -0
- package/src/workflows/parallelization_executor.py +382 -0
- package/src/workflows/router.py +311 -0
- package/test_integration_simple.py +86 -0
- package/test_mcts_workflow.py +150 -0
- package/test_templd_integration.py +262 -0
- package/test_universal_router.py +275 -0
- package/tmlpd-pi-extension/README.md +36 -0
- package/tmlpd-pi-extension/dist/cache/prefixCache.d.ts +114 -0
- package/tmlpd-pi-extension/dist/cache/prefixCache.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/cache/prefixCache.js +285 -0
- package/tmlpd-pi-extension/dist/cache/prefixCache.js.map +1 -0
- package/tmlpd-pi-extension/dist/cache/responseCache.d.ts +58 -0
- package/tmlpd-pi-extension/dist/cache/responseCache.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/cache/responseCache.js +153 -0
- package/tmlpd-pi-extension/dist/cache/responseCache.js.map +1 -0
- package/tmlpd-pi-extension/dist/cli.js +59 -0
- package/tmlpd-pi-extension/dist/cost/costTracker.d.ts +95 -0
- package/tmlpd-pi-extension/dist/cost/costTracker.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/cost/costTracker.js +240 -0
- package/tmlpd-pi-extension/dist/cost/costTracker.js.map +1 -0
- package/tmlpd-pi-extension/dist/index.d.ts +723 -0
- package/tmlpd-pi-extension/dist/index.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/index.js +239 -0
- package/tmlpd-pi-extension/dist/index.js.map +1 -0
- package/tmlpd-pi-extension/dist/memory/episodicMemory.d.ts +82 -0
- package/tmlpd-pi-extension/dist/memory/episodicMemory.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/memory/episodicMemory.js +145 -0
- package/tmlpd-pi-extension/dist/memory/episodicMemory.js.map +1 -0
- package/tmlpd-pi-extension/dist/orchestration/haloOrchestrator.d.ts +102 -0
- package/tmlpd-pi-extension/dist/orchestration/haloOrchestrator.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/orchestration/haloOrchestrator.js +207 -0
- package/tmlpd-pi-extension/dist/orchestration/haloOrchestrator.js.map +1 -0
- package/tmlpd-pi-extension/dist/orchestration/mctsWorkflow.d.ts +85 -0
- package/tmlpd-pi-extension/dist/orchestration/mctsWorkflow.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/orchestration/mctsWorkflow.js +210 -0
- package/tmlpd-pi-extension/dist/orchestration/mctsWorkflow.js.map +1 -0
- package/tmlpd-pi-extension/dist/providers/localProvider.d.ts +102 -0
- package/tmlpd-pi-extension/dist/providers/localProvider.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/providers/localProvider.js +338 -0
- package/tmlpd-pi-extension/dist/providers/localProvider.js.map +1 -0
- package/tmlpd-pi-extension/dist/providers/registry.d.ts +55 -0
- package/tmlpd-pi-extension/dist/providers/registry.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/providers/registry.js +138 -0
- package/tmlpd-pi-extension/dist/providers/registry.js.map +1 -0
- package/tmlpd-pi-extension/dist/routing/advancedRouter.d.ts +68 -0
- package/tmlpd-pi-extension/dist/routing/advancedRouter.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/routing/advancedRouter.js +332 -0
- package/tmlpd-pi-extension/dist/routing/advancedRouter.js.map +1 -0
- package/tmlpd-pi-extension/dist/tools/tmlpdTools.d.ts +101 -0
- package/tmlpd-pi-extension/dist/tools/tmlpdTools.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/tools/tmlpdTools.js +368 -0
- package/tmlpd-pi-extension/dist/tools/tmlpdTools.js.map +1 -0
- package/tmlpd-pi-extension/dist/utils/batchProcessor.d.ts +96 -0
- package/tmlpd-pi-extension/dist/utils/batchProcessor.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/utils/batchProcessor.js +170 -0
- package/tmlpd-pi-extension/dist/utils/batchProcessor.js.map +1 -0
- package/tmlpd-pi-extension/dist/utils/compression.d.ts +61 -0
- package/tmlpd-pi-extension/dist/utils/compression.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/utils/compression.js +281 -0
- package/tmlpd-pi-extension/dist/utils/compression.js.map +1 -0
- package/tmlpd-pi-extension/dist/utils/reliability.d.ts +74 -0
- package/tmlpd-pi-extension/dist/utils/reliability.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/utils/reliability.js +177 -0
- package/tmlpd-pi-extension/dist/utils/reliability.js.map +1 -0
- package/tmlpd-pi-extension/dist/utils/speculativeDecoding.d.ts +117 -0
- package/tmlpd-pi-extension/dist/utils/speculativeDecoding.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/utils/speculativeDecoding.js +246 -0
- package/tmlpd-pi-extension/dist/utils/speculativeDecoding.js.map +1 -0
- package/tmlpd-pi-extension/dist/utils/tokenUtils.d.ts +50 -0
- package/tmlpd-pi-extension/dist/utils/tokenUtils.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/utils/tokenUtils.js +124 -0
- package/tmlpd-pi-extension/dist/utils/tokenUtils.js.map +1 -0
- package/tmlpd-pi-extension/examples/QUICKSTART.md +183 -0
- package/tmlpd-pi-extension/package-lock.json +75 -0
- package/tmlpd-pi-extension/package.json +172 -0
- package/tmlpd-pi-extension/python/examples.py +53 -0
- package/tmlpd-pi-extension/python/integrations.py +330 -0
- package/tmlpd-pi-extension/python/setup.py +28 -0
- package/tmlpd-pi-extension/python/tmlpd.py +369 -0
- package/tmlpd-pi-extension/qna/REDDIT_GAP_ANALYSIS.md +299 -0
- package/tmlpd-pi-extension/qna/TMLPD_QNA.md +751 -0
- package/tmlpd-pi-extension/skill/SKILL.md +238 -0
- package/{src → tmlpd-pi-extension/src}/index.ts +1 -1
- package/tmlpd-pi-extension/tsconfig.json +18 -0
- package/demo/research-demo.js +0 -266
- package/notebooks/quickstart.ipynb +0 -157
- package/rust/tmlpd.h +0 -268
- package/src/cache/prefixCache.ts +0 -365
- package/src/routing/advancedRouter.ts +0 -406
- package/src/utils/speculativeDecoding.ts +0 -344
- /package/{src → tmlpd-pi-extension/src}/cache/responseCache.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/cost/costTracker.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/memory/episodicMemory.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/orchestration/haloOrchestrator.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/orchestration/mctsWorkflow.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/providers/localProvider.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/providers/registry.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/tools/tmlpdTools.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/utils/batchProcessor.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/utils/compression.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/utils/reliability.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/utils/tokenUtils.ts +0 -0
|
@@ -0,0 +1,469 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Orchestrator-Workers Workflow - Workflow Phase 5 (Optional)
|
|
3
|
+
|
|
4
|
+
Implements dynamic task breakdown and delegation for complex multi-step tasks.
|
|
5
|
+
Only for the 5% case where simple workflows aren't sufficient.
|
|
6
|
+
|
|
7
|
+
When to use:
|
|
8
|
+
- Truly unpredictable tasks
|
|
9
|
+
- Tasks requiring dynamic subtask creation
|
|
10
|
+
- Complex multi-step reasoning
|
|
11
|
+
- Tasks with unknown dependencies
|
|
12
|
+
|
|
13
|
+
Philosophy:
|
|
14
|
+
- Use sparingly (only 5% of cases)
|
|
15
|
+
- Dynamic task breakdown via LLM
|
|
16
|
+
- Skill delegation for subtasks
|
|
17
|
+
- Result synthesis
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
from typing import Dict, List, Any, Optional
|
|
21
|
+
from pathlib import Path
|
|
22
|
+
import json
|
|
23
|
+
from datetime import datetime
|
|
24
|
+
import asyncio
|
|
25
|
+
|
|
26
|
+
from ..skills.skill_manager import SkillManager
|
|
27
|
+
from ..agents.skill_enhanced_agent import TMLEnhancedAgent
|
|
28
|
+
from ..memory.simple_memory import SimpleProjectMemory
|
|
29
|
+
from ..state.simple_checkpoint import SimpleCheckpoint
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
class OrchestratorWorkflow:
|
|
33
|
+
"""
|
|
34
|
+
Orchestrator-Workers workflow for complex tasks.
|
|
35
|
+
|
|
36
|
+
Process:
|
|
37
|
+
1. Receive complex task
|
|
38
|
+
2. Use LLM to break down into subtasks
|
|
39
|
+
3. Delegate subtasks to specialized agents
|
|
40
|
+
4. Synthesize results
|
|
41
|
+
5. Return final result
|
|
42
|
+
"""
|
|
43
|
+
|
|
44
|
+
def __init__(
|
|
45
|
+
self,
|
|
46
|
+
skills_dir: str = "tmlpd-skills",
|
|
47
|
+
memory_file: str = ".taskmaster/memory.json",
|
|
48
|
+
checkpoint_dir: str = ".taskmaster/checkpoints"
|
|
49
|
+
):
|
|
50
|
+
"""
|
|
51
|
+
Initialize Orchestrator Workflow
|
|
52
|
+
|
|
53
|
+
Args:
|
|
54
|
+
skills_dir: Directory containing skill definitions
|
|
55
|
+
memory_file: Path to memory file
|
|
56
|
+
checkpoint_dir: Path to checkpoint directory
|
|
57
|
+
"""
|
|
58
|
+
self.skill_manager = SkillManager(skills_dir)
|
|
59
|
+
self.memory = SimpleProjectMemory(memory_file)
|
|
60
|
+
self.checkpoint = SimpleCheckpoint(checkpoint_dir)
|
|
61
|
+
|
|
62
|
+
self.stats = {
|
|
63
|
+
"tasks_orchestrated": 0,
|
|
64
|
+
"subtasks_created": 0,
|
|
65
|
+
"subtasks_completed": 0,
|
|
66
|
+
"subtasks_failed": 0
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
async def execute_task(
|
|
70
|
+
self,
|
|
71
|
+
task: Dict[str, Any],
|
|
72
|
+
max_iterations: int = 10,
|
|
73
|
+
enable_checkpointing: bool = True
|
|
74
|
+
) -> Dict[str, Any]:
|
|
75
|
+
"""
|
|
76
|
+
Execute a complex task using orchestrator-workers pattern.
|
|
77
|
+
|
|
78
|
+
Args:
|
|
79
|
+
task: Complex task to execute
|
|
80
|
+
max_iterations: Maximum subtask iterations
|
|
81
|
+
enable_checkpointing: Whether to save checkpoints
|
|
82
|
+
|
|
83
|
+
Returns:
|
|
84
|
+
Final execution result
|
|
85
|
+
"""
|
|
86
|
+
task_id = f"orchestrator_{datetime.now().strftime('%Y%m%d_%H%M%S')}"
|
|
87
|
+
self.stats["tasks_orchestrated"] += 1
|
|
88
|
+
|
|
89
|
+
# Initial checkpoint
|
|
90
|
+
if enable_checkpointing:
|
|
91
|
+
self.checkpoint.create_checkpoint(
|
|
92
|
+
state={"task": task, "status": "started"},
|
|
93
|
+
name=f"{task_id}_initial"
|
|
94
|
+
)
|
|
95
|
+
|
|
96
|
+
try:
|
|
97
|
+
# Step 1: Break down task into subtasks
|
|
98
|
+
subtasks = await self._break_down_task(task)
|
|
99
|
+
|
|
100
|
+
# Checkpoint after breakdown
|
|
101
|
+
if enable_checkpointing:
|
|
102
|
+
self.checkpoint.create_checkpoint(
|
|
103
|
+
state={"subtasks": subtasks, "status": "broken_down"},
|
|
104
|
+
name=f"{task_id}_broken_down"
|
|
105
|
+
)
|
|
106
|
+
|
|
107
|
+
# Step 2: Execute subtasks
|
|
108
|
+
results = []
|
|
109
|
+
for i, subtask in enumerate(subtasks[:max_iterations]):
|
|
110
|
+
self.stats["subtasks_created"] += 1
|
|
111
|
+
|
|
112
|
+
# Checkpoint before subtask
|
|
113
|
+
if enable_checkpointing:
|
|
114
|
+
self.checkpoint.create_checkpoint(
|
|
115
|
+
state={
|
|
116
|
+
"current_subtask": i,
|
|
117
|
+
"subtask": subtask,
|
|
118
|
+
"results_so_far": results
|
|
119
|
+
},
|
|
120
|
+
name=f"{task_id}_before_subtask_{i}"
|
|
121
|
+
)
|
|
122
|
+
|
|
123
|
+
# Execute subtask
|
|
124
|
+
try:
|
|
125
|
+
result = await self._execute_subtask(subtask)
|
|
126
|
+
results.append({
|
|
127
|
+
"subtask": subtask,
|
|
128
|
+
"result": result,
|
|
129
|
+
"status": "completed"
|
|
130
|
+
})
|
|
131
|
+
self.stats["subtasks_completed"] += 1
|
|
132
|
+
|
|
133
|
+
except Exception as e:
|
|
134
|
+
results.append({
|
|
135
|
+
"subtask": subtask,
|
|
136
|
+
"error": str(e),
|
|
137
|
+
"status": "failed"
|
|
138
|
+
})
|
|
139
|
+
self.stats["subtasks_failed"] += 1
|
|
140
|
+
|
|
141
|
+
# Checkpoint after all subtasks
|
|
142
|
+
if enable_checkpointing:
|
|
143
|
+
self.checkpoint.create_checkpoint(
|
|
144
|
+
state={"results": results, "status": "subtasks_complete"},
|
|
145
|
+
name=f"{task_id}_subtasks_complete"
|
|
146
|
+
)
|
|
147
|
+
|
|
148
|
+
# Step 3: Synthesize results
|
|
149
|
+
final_result = await self._synthesize_results(task, results)
|
|
150
|
+
|
|
151
|
+
# Final checkpoint
|
|
152
|
+
if enable_checkpointing:
|
|
153
|
+
self.checkpoint.create_checkpoint(
|
|
154
|
+
state={"final_result": final_result, "status": "complete"},
|
|
155
|
+
name=f"{task_id}_final"
|
|
156
|
+
)
|
|
157
|
+
|
|
158
|
+
return final_result
|
|
159
|
+
|
|
160
|
+
except Exception as e:
|
|
161
|
+
# Error checkpoint
|
|
162
|
+
if enable_checkpointing:
|
|
163
|
+
self.checkpoint.create_checkpoint(
|
|
164
|
+
state={"error": str(e), "status": "failed"},
|
|
165
|
+
name=f"{task_id}_error"
|
|
166
|
+
)
|
|
167
|
+
|
|
168
|
+
raise
|
|
169
|
+
|
|
170
|
+
async def _break_down_task(
|
|
171
|
+
self,
|
|
172
|
+
task: Dict[str, Any]
|
|
173
|
+
) -> List[Dict[str, Any]]:
|
|
174
|
+
"""
|
|
175
|
+
Break down complex task into subtasks using LLM.
|
|
176
|
+
|
|
177
|
+
Args:
|
|
178
|
+
task: Complex task
|
|
179
|
+
|
|
180
|
+
Returns:
|
|
181
|
+
List of subtasks
|
|
182
|
+
"""
|
|
183
|
+
# This would normally use an LLM to break down the task
|
|
184
|
+
# For now, return a simple structure
|
|
185
|
+
|
|
186
|
+
description = task.get("description", "")
|
|
187
|
+
|
|
188
|
+
# Use a simple heuristic-based breakdown
|
|
189
|
+
# In production, this would call an LLM
|
|
190
|
+
|
|
191
|
+
subtasks = []
|
|
192
|
+
|
|
193
|
+
# Check for multi-step keywords
|
|
194
|
+
if " then " in description.lower():
|
|
195
|
+
parts = description.split(" then ")
|
|
196
|
+
for i, part in enumerate(parts):
|
|
197
|
+
subtasks.append({
|
|
198
|
+
"id": f"subtask_{i}",
|
|
199
|
+
"description": part.strip(),
|
|
200
|
+
"requirements": task.get("requirements", ""),
|
|
201
|
+
"context": task.get("context", "")
|
|
202
|
+
})
|
|
203
|
+
else:
|
|
204
|
+
# Single task
|
|
205
|
+
subtasks.append({
|
|
206
|
+
"id": "subtask_0",
|
|
207
|
+
"description": description,
|
|
208
|
+
"requirements": task.get("requirements", ""),
|
|
209
|
+
"context": task.get("context", "")
|
|
210
|
+
})
|
|
211
|
+
|
|
212
|
+
return subtasks
|
|
213
|
+
|
|
214
|
+
async def _execute_subtask(
|
|
215
|
+
self,
|
|
216
|
+
subtask: Dict[str, Any]
|
|
217
|
+
) -> Dict[str, Any]:
|
|
218
|
+
"""
|
|
219
|
+
Execute a single subtask.
|
|
220
|
+
|
|
221
|
+
Args:
|
|
222
|
+
subtask: Subtask to execute
|
|
223
|
+
|
|
224
|
+
Returns:
|
|
225
|
+
Subtask result
|
|
226
|
+
"""
|
|
227
|
+
# Find relevant skills
|
|
228
|
+
relevant_skills = self.skill_manager.get_relevant_skills(
|
|
229
|
+
subtask["description"],
|
|
230
|
+
top_k=3
|
|
231
|
+
)
|
|
232
|
+
|
|
233
|
+
# Check memory for similar patterns
|
|
234
|
+
best_agent = self.memory.get_best_agent_for_task(subtask)
|
|
235
|
+
|
|
236
|
+
if best_agent:
|
|
237
|
+
# Use best agent from memory
|
|
238
|
+
agent = TMLEnhancedAgent(
|
|
239
|
+
agent_id=best_agent["agent_id"],
|
|
240
|
+
provider="anthropic",
|
|
241
|
+
model="claude-sonnet-4",
|
|
242
|
+
assigned_skills=best_agent["skills"]
|
|
243
|
+
)
|
|
244
|
+
else:
|
|
245
|
+
# Create agent with relevant skills
|
|
246
|
+
agent = TMLEnhancedAgent(
|
|
247
|
+
agent_id="orchestrator-worker",
|
|
248
|
+
provider="anthropic",
|
|
249
|
+
model="claude-sonnet-4",
|
|
250
|
+
assigned_skills=relevant_skills
|
|
251
|
+
)
|
|
252
|
+
|
|
253
|
+
# Execute subtask
|
|
254
|
+
result = agent.execute_task(subtask)
|
|
255
|
+
|
|
256
|
+
# Remember successful pattern
|
|
257
|
+
if result.get("success"):
|
|
258
|
+
self.memory.remember_pattern(
|
|
259
|
+
task=subtask,
|
|
260
|
+
result=result,
|
|
261
|
+
agent_id=agent.agent_id,
|
|
262
|
+
skills_used=relevant_skills
|
|
263
|
+
)
|
|
264
|
+
|
|
265
|
+
return result
|
|
266
|
+
|
|
267
|
+
async def _synthesize_results(
|
|
268
|
+
self,
|
|
269
|
+
original_task: Dict[str, Any],
|
|
270
|
+
subtask_results: List[Dict[str, Any]]
|
|
271
|
+
) -> Dict[str, Any]:
|
|
272
|
+
"""
|
|
273
|
+
Synthesize subtask results into final result.
|
|
274
|
+
|
|
275
|
+
Args:
|
|
276
|
+
original_task: Original complex task
|
|
277
|
+
subtask_results: Results from subtasks
|
|
278
|
+
|
|
279
|
+
Returns:
|
|
280
|
+
Synthesized final result
|
|
281
|
+
"""
|
|
282
|
+
# Check if any subtasks failed
|
|
283
|
+
failed = [r for r in subtask_results if r["status"] == "failed"]
|
|
284
|
+
|
|
285
|
+
if failed:
|
|
286
|
+
return {
|
|
287
|
+
"success": False,
|
|
288
|
+
"error": f"{len(failed)} subtasks failed",
|
|
289
|
+
"failed_subtasks": failed,
|
|
290
|
+
"completed_subtasks": len(subtask_results) - len(failed)
|
|
291
|
+
}
|
|
292
|
+
|
|
293
|
+
# Combine results
|
|
294
|
+
all_successful = all(r["result"].get("success", False) for r in subtask_results)
|
|
295
|
+
|
|
296
|
+
if not all_successful:
|
|
297
|
+
return {
|
|
298
|
+
"success": False,
|
|
299
|
+
"error": "Some subtasks did not complete successfully",
|
|
300
|
+
"subtask_results": subtask_results
|
|
301
|
+
}
|
|
302
|
+
|
|
303
|
+
# Combine outputs
|
|
304
|
+
outputs = [r["result"].get("output", "") for r in subtask_results]
|
|
305
|
+
|
|
306
|
+
return {
|
|
307
|
+
"success": True,
|
|
308
|
+
"output": "\n\n".join(outputs),
|
|
309
|
+
"subtask_count": len(subtask_results),
|
|
310
|
+
"original_task": original_task.get("description"),
|
|
311
|
+
"subtask_results": subtask_results
|
|
312
|
+
}
|
|
313
|
+
|
|
314
|
+
def get_stats(self) -> Dict[str, Any]:
|
|
315
|
+
"""
|
|
316
|
+
Get orchestrator statistics.
|
|
317
|
+
|
|
318
|
+
Returns:
|
|
319
|
+
Statistics dictionary
|
|
320
|
+
"""
|
|
321
|
+
total_subtasks = (
|
|
322
|
+
self.stats["subtasks_completed"] +
|
|
323
|
+
self.stats["subtasks_failed"]
|
|
324
|
+
)
|
|
325
|
+
|
|
326
|
+
success_rate = (
|
|
327
|
+
self.stats["subtasks_completed"] / total_subtasks
|
|
328
|
+
if total_subtasks > 0
|
|
329
|
+
else 0
|
|
330
|
+
)
|
|
331
|
+
|
|
332
|
+
return {
|
|
333
|
+
**self.stats,
|
|
334
|
+
"total_subtasks": total_subtasks,
|
|
335
|
+
"success_rate": success_rate
|
|
336
|
+
}
|
|
337
|
+
|
|
338
|
+
|
|
339
|
+
class OrchestratorAgent:
|
|
340
|
+
"""
|
|
341
|
+
Convenience class for creating an orchestrator-enhanced agent.
|
|
342
|
+
|
|
343
|
+
Combines orchestrator workflow with memory and checkpointing.
|
|
344
|
+
"""
|
|
345
|
+
|
|
346
|
+
def __init__(
|
|
347
|
+
self,
|
|
348
|
+
agent_id: str,
|
|
349
|
+
provider: str,
|
|
350
|
+
model: str,
|
|
351
|
+
skills_dir: str = "tmlpd-skills",
|
|
352
|
+
enable_orchestrator: bool = True,
|
|
353
|
+
enable_memory: bool = True,
|
|
354
|
+
enable_checkpointing: bool = True
|
|
355
|
+
):
|
|
356
|
+
"""
|
|
357
|
+
Initialize Orchestrator Agent
|
|
358
|
+
|
|
359
|
+
Args:
|
|
360
|
+
agent_id: Unique agent identifier
|
|
361
|
+
provider: LLM provider
|
|
362
|
+
model: Model name
|
|
363
|
+
skills_dir: Skills directory
|
|
364
|
+
enable_orchestrator: Enable orchestrator workflow
|
|
365
|
+
enable_memory: Enable memory system
|
|
366
|
+
enable_checkpointing: Enable checkpointing
|
|
367
|
+
"""
|
|
368
|
+
self.agent_id = agent_id
|
|
369
|
+
self.provider = provider
|
|
370
|
+
self.model = model
|
|
371
|
+
|
|
372
|
+
self.skill_manager = SkillManager(skills_dir)
|
|
373
|
+
|
|
374
|
+
self.base_agent = TMLEnhancedAgent(
|
|
375
|
+
agent_id=agent_id,
|
|
376
|
+
provider=provider,
|
|
377
|
+
model=model,
|
|
378
|
+
skills_dir=skills_dir
|
|
379
|
+
)
|
|
380
|
+
|
|
381
|
+
self.orchestrator = None
|
|
382
|
+
if enable_orchestrator:
|
|
383
|
+
self.orchestrator = OrchestratorWorkflow(
|
|
384
|
+
skills_dir=skills_dir,
|
|
385
|
+
memory_file=".taskmaster/memory.json",
|
|
386
|
+
checkpoint_dir=".taskmaster/checkpoints"
|
|
387
|
+
)
|
|
388
|
+
|
|
389
|
+
self.enable_memory = enable_memory
|
|
390
|
+
self.enable_checkpointing = enable_checkpointing
|
|
391
|
+
|
|
392
|
+
async def execute_task(
|
|
393
|
+
self,
|
|
394
|
+
task: Dict[str, Any],
|
|
395
|
+
force_orchestrator: bool = False
|
|
396
|
+
) -> Dict[str, Any]:
|
|
397
|
+
"""
|
|
398
|
+
Execute task, using orchestrator if complex.
|
|
399
|
+
|
|
400
|
+
Args:
|
|
401
|
+
task: Task to execute
|
|
402
|
+
force_orchestrator: Force use of orchestrator
|
|
403
|
+
|
|
404
|
+
Returns:
|
|
405
|
+
Execution result
|
|
406
|
+
"""
|
|
407
|
+
# Decide whether to use orchestrator
|
|
408
|
+
use_orchestrator = force_orchestrator
|
|
409
|
+
|
|
410
|
+
if not use_orchestrator and self.orchestrator:
|
|
411
|
+
# Simple heuristic: use orchestrator for long tasks
|
|
412
|
+
description = task.get("description", "")
|
|
413
|
+
use_orchestrator = len(description.split()) > 50
|
|
414
|
+
|
|
415
|
+
if use_orchestrator and self.orchestrator:
|
|
416
|
+
# Use orchestrator workflow
|
|
417
|
+
return await self.orchestrator.execute_task(
|
|
418
|
+
task,
|
|
419
|
+
enable_checkpointing=self.enable_checkpointing
|
|
420
|
+
)
|
|
421
|
+
else:
|
|
422
|
+
# Use base agent
|
|
423
|
+
result = self.base_agent.execute_task(task)
|
|
424
|
+
|
|
425
|
+
# Remember pattern if memory enabled
|
|
426
|
+
if self.enable_memory and result.get("success"):
|
|
427
|
+
skills = self.base_agent.get_assigned_skills()
|
|
428
|
+
self.orchestrator.memory.remember_pattern(
|
|
429
|
+
task=task,
|
|
430
|
+
result=result,
|
|
431
|
+
agent_id=self.agent_id,
|
|
432
|
+
skills_used=skills
|
|
433
|
+
)
|
|
434
|
+
|
|
435
|
+
return result
|
|
436
|
+
|
|
437
|
+
def get_stats(self) -> Dict[str, Any]:
|
|
438
|
+
"""Get agent statistics"""
|
|
439
|
+
stats = {
|
|
440
|
+
"agent_id": self.agent_id,
|
|
441
|
+
"provider": self.provider,
|
|
442
|
+
"model": self.model,
|
|
443
|
+
"assigned_skills": self.base_agent.get_assigned_skills()
|
|
444
|
+
}
|
|
445
|
+
|
|
446
|
+
if self.orchestrator:
|
|
447
|
+
stats["orchestrator_stats"] = self.orchestrator.get_stats()
|
|
448
|
+
|
|
449
|
+
return stats
|
|
450
|
+
|
|
451
|
+
|
|
452
|
+
# Convenience function for quick orchestrator execution
|
|
453
|
+
|
|
454
|
+
async def orchestrate_task(
|
|
455
|
+
task: Dict[str, Any],
|
|
456
|
+
skills_dir: str = "tmlpd-skills"
|
|
457
|
+
) -> Dict[str, Any]:
|
|
458
|
+
"""
|
|
459
|
+
Quick function to orchestrate a complex task.
|
|
460
|
+
|
|
461
|
+
Args:
|
|
462
|
+
task: Complex task to orchestrate
|
|
463
|
+
skills_dir: Skills directory
|
|
464
|
+
|
|
465
|
+
Returns:
|
|
466
|
+
Final result
|
|
467
|
+
"""
|
|
468
|
+
orchestrator = OrchestratorWorkflow(skills_dir=skills_dir)
|
|
469
|
+
return await orchestrator.execute_task(task)
|