adaptive-memory-multi-model-router 1.2.2 → 1.3.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +146 -66
- package/dist/index.d.ts +1 -1
- package/dist/index.js +1 -1
- package/dist/integrations/airtable.js +20 -0
- package/dist/integrations/discord.js +18 -0
- package/dist/integrations/github.js +23 -0
- package/dist/integrations/gmail.js +19 -0
- package/dist/integrations/google-calendar.js +18 -0
- package/dist/integrations/index.js +61 -0
- package/dist/integrations/jira.js +21 -0
- package/dist/integrations/linear.js +19 -0
- package/dist/integrations/notion.js +19 -0
- package/dist/integrations/slack.js +18 -0
- package/dist/integrations/telegram.js +19 -0
- package/dist/providers/registry.js +7 -3
- package/docs/ARCHITECTURAL-IMPROVEMENTS-2025.md +1391 -0
- package/docs/ARCHITECTURAL-IMPROVEMENTS-REVISED-2025.md +1051 -0
- package/docs/CONFIGURATION.md +476 -0
- package/docs/COUNCIL_DECISION.json +308 -0
- package/docs/COUNCIL_SUMMARY.md +265 -0
- package/docs/COUNCIL_V2.2_DECISION.md +416 -0
- package/docs/IMPROVEMENT_ROADMAP.md +515 -0
- package/docs/LLM_COUNCIL_DECISION.md +508 -0
- package/docs/QUICK_START_VISIBILITY.md +782 -0
- package/docs/REDDIT_GAP_ANALYSIS.md +299 -0
- package/docs/RESEARCH_BACKED_IMPROVEMENTS.md +1180 -0
- package/docs/TMLPD_QNA.md +751 -0
- package/docs/TMLPD_V2.1_COMPLETE.md +763 -0
- package/docs/TMLPD_V2.2_RESEARCH_ROADMAP.md +754 -0
- package/docs/V2.2_IMPLEMENTATION_COMPLETE.md +446 -0
- package/docs/V2_IMPLEMENTATION_GUIDE.md +388 -0
- package/docs/VISIBILITY_ADOPTION_PLAN.md +1005 -0
- package/docs/launch-content/LAUNCH_EXECUTION_CHECKLIST.md +421 -0
- package/docs/launch-content/README.md +457 -0
- package/docs/launch-content/assets/cost_comparison_100_tasks.png +0 -0
- package/docs/launch-content/assets/cumulative_savings.png +0 -0
- package/docs/launch-content/assets/parallel_speedup.png +0 -0
- package/docs/launch-content/assets/provider_pricing_comparison.png +0 -0
- package/docs/launch-content/assets/task_breakdown_comparison.png +0 -0
- package/docs/launch-content/generate_charts.py +313 -0
- package/docs/launch-content/hn_show_post.md +139 -0
- package/docs/launch-content/partner_outreach_templates.md +745 -0
- package/docs/launch-content/reddit_posts.md +467 -0
- package/docs/launch-content/twitter_thread.txt +460 -0
- package/examples/QUICKSTART.md +1 -1
- package/openclaw-alexa-bridge/ALL_REMAINING_FIXES_PLAN.md +313 -0
- package/openclaw-alexa-bridge/REMAINING_FIXES_SUMMARY.md +277 -0
- package/openclaw-alexa-bridge/src/alexa_handler_no_tmlpd.js +1234 -0
- package/openclaw-alexa-bridge/test_fixes.js +77 -0
- package/package.json +120 -29
- package/package.json.tmp +0 -0
- package/qna/TMLPD_QNA.md +3 -3
- package/skill/SKILL.md +2 -2
- package/src/__tests__/integration/tmpld_integration.test.py +540 -0
- package/src/agents/skill_enhanced_agent.py +318 -0
- package/src/memory/__init__.py +15 -0
- package/src/memory/agentic_memory.py +353 -0
- package/src/memory/semantic_memory.py +444 -0
- package/src/memory/simple_memory.py +466 -0
- package/src/memory/working_memory.py +447 -0
- package/src/orchestration/__init__.py +52 -0
- package/src/orchestration/execution_engine.py +353 -0
- package/src/orchestration/halo_orchestrator.py +367 -0
- package/src/orchestration/mcts_workflow.py +498 -0
- package/src/orchestration/role_assigner.py +473 -0
- package/src/orchestration/task_planner.py +522 -0
- package/src/providers/__init__.py +67 -0
- package/src/providers/anthropic.py +304 -0
- package/src/providers/base.py +241 -0
- package/src/providers/cerebras.py +373 -0
- package/src/providers/registry.py +476 -0
- package/src/routing/__init__.py +30 -0
- package/src/routing/universal_router.py +621 -0
- package/src/skills/TMLPD-QUICKREF.md +210 -0
- package/src/skills/TMLPD-SETUP-SUMMARY.md +157 -0
- package/src/skills/TMLPD.md +540 -0
- package/src/skills/__tests__/skill_manager.test.ts +328 -0
- package/src/skills/skill_manager.py +385 -0
- package/src/skills/test-tmlpd.sh +108 -0
- package/src/skills/tmlpd-category.yaml +67 -0
- package/src/skills/tmlpd-monitoring.yaml +188 -0
- package/src/skills/tmlpd-phase.yaml +132 -0
- package/src/state/__init__.py +17 -0
- package/src/state/simple_checkpoint.py +508 -0
- package/src/tmlpd_agent.py +464 -0
- package/src/tmpld_v2.py +427 -0
- package/src/workflows/__init__.py +18 -0
- package/src/workflows/advanced_difficulty_classifier.py +377 -0
- package/src/workflows/chaining_executor.py +417 -0
- package/src/workflows/difficulty_integration.py +209 -0
- package/src/workflows/orchestrator.py +469 -0
- package/src/workflows/orchestrator_executor.py +456 -0
- package/src/workflows/parallelization_executor.py +382 -0
- package/src/workflows/router.py +311 -0
- package/test_integration_simple.py +86 -0
- package/test_mcts_workflow.py +150 -0
- package/test_templd_integration.py +262 -0
- package/test_universal_router.py +275 -0
- package/tmlpd-pi-extension/README.md +36 -0
- package/tmlpd-pi-extension/dist/cache/prefixCache.d.ts +114 -0
- package/tmlpd-pi-extension/dist/cache/prefixCache.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/cache/prefixCache.js +285 -0
- package/tmlpd-pi-extension/dist/cache/prefixCache.js.map +1 -0
- package/tmlpd-pi-extension/dist/cache/responseCache.d.ts +58 -0
- package/tmlpd-pi-extension/dist/cache/responseCache.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/cache/responseCache.js +153 -0
- package/tmlpd-pi-extension/dist/cache/responseCache.js.map +1 -0
- package/tmlpd-pi-extension/dist/cli.js +59 -0
- package/tmlpd-pi-extension/dist/cost/costTracker.d.ts +95 -0
- package/tmlpd-pi-extension/dist/cost/costTracker.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/cost/costTracker.js +240 -0
- package/tmlpd-pi-extension/dist/cost/costTracker.js.map +1 -0
- package/tmlpd-pi-extension/dist/index.d.ts +723 -0
- package/tmlpd-pi-extension/dist/index.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/index.js +239 -0
- package/tmlpd-pi-extension/dist/index.js.map +1 -0
- package/tmlpd-pi-extension/dist/memory/episodicMemory.d.ts +82 -0
- package/tmlpd-pi-extension/dist/memory/episodicMemory.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/memory/episodicMemory.js +145 -0
- package/tmlpd-pi-extension/dist/memory/episodicMemory.js.map +1 -0
- package/tmlpd-pi-extension/dist/orchestration/haloOrchestrator.d.ts +102 -0
- package/tmlpd-pi-extension/dist/orchestration/haloOrchestrator.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/orchestration/haloOrchestrator.js +207 -0
- package/tmlpd-pi-extension/dist/orchestration/haloOrchestrator.js.map +1 -0
- package/tmlpd-pi-extension/dist/orchestration/mctsWorkflow.d.ts +85 -0
- package/tmlpd-pi-extension/dist/orchestration/mctsWorkflow.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/orchestration/mctsWorkflow.js +210 -0
- package/tmlpd-pi-extension/dist/orchestration/mctsWorkflow.js.map +1 -0
- package/tmlpd-pi-extension/dist/providers/localProvider.d.ts +102 -0
- package/tmlpd-pi-extension/dist/providers/localProvider.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/providers/localProvider.js +338 -0
- package/tmlpd-pi-extension/dist/providers/localProvider.js.map +1 -0
- package/tmlpd-pi-extension/dist/providers/registry.d.ts +55 -0
- package/tmlpd-pi-extension/dist/providers/registry.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/providers/registry.js +138 -0
- package/tmlpd-pi-extension/dist/providers/registry.js.map +1 -0
- package/tmlpd-pi-extension/dist/routing/advancedRouter.d.ts +68 -0
- package/tmlpd-pi-extension/dist/routing/advancedRouter.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/routing/advancedRouter.js +332 -0
- package/tmlpd-pi-extension/dist/routing/advancedRouter.js.map +1 -0
- package/tmlpd-pi-extension/dist/tools/tmlpdTools.d.ts +101 -0
- package/tmlpd-pi-extension/dist/tools/tmlpdTools.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/tools/tmlpdTools.js +368 -0
- package/tmlpd-pi-extension/dist/tools/tmlpdTools.js.map +1 -0
- package/tmlpd-pi-extension/dist/utils/batchProcessor.d.ts +96 -0
- package/tmlpd-pi-extension/dist/utils/batchProcessor.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/utils/batchProcessor.js +170 -0
- package/tmlpd-pi-extension/dist/utils/batchProcessor.js.map +1 -0
- package/tmlpd-pi-extension/dist/utils/compression.d.ts +61 -0
- package/tmlpd-pi-extension/dist/utils/compression.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/utils/compression.js +281 -0
- package/tmlpd-pi-extension/dist/utils/compression.js.map +1 -0
- package/tmlpd-pi-extension/dist/utils/reliability.d.ts +74 -0
- package/tmlpd-pi-extension/dist/utils/reliability.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/utils/reliability.js +177 -0
- package/tmlpd-pi-extension/dist/utils/reliability.js.map +1 -0
- package/tmlpd-pi-extension/dist/utils/speculativeDecoding.d.ts +117 -0
- package/tmlpd-pi-extension/dist/utils/speculativeDecoding.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/utils/speculativeDecoding.js +246 -0
- package/tmlpd-pi-extension/dist/utils/speculativeDecoding.js.map +1 -0
- package/tmlpd-pi-extension/dist/utils/tokenUtils.d.ts +50 -0
- package/tmlpd-pi-extension/dist/utils/tokenUtils.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/utils/tokenUtils.js +124 -0
- package/tmlpd-pi-extension/dist/utils/tokenUtils.js.map +1 -0
- package/tmlpd-pi-extension/examples/QUICKSTART.md +183 -0
- package/tmlpd-pi-extension/package-lock.json +75 -0
- package/tmlpd-pi-extension/package.json +172 -0
- package/tmlpd-pi-extension/python/examples.py +53 -0
- package/tmlpd-pi-extension/python/integrations.py +330 -0
- package/tmlpd-pi-extension/python/setup.py +28 -0
- package/tmlpd-pi-extension/python/tmlpd.py +369 -0
- package/tmlpd-pi-extension/qna/REDDIT_GAP_ANALYSIS.md +299 -0
- package/tmlpd-pi-extension/qna/TMLPD_QNA.md +751 -0
- package/tmlpd-pi-extension/skill/SKILL.md +238 -0
- package/{src → tmlpd-pi-extension/src}/index.ts +1 -1
- package/tmlpd-pi-extension/tsconfig.json +18 -0
- package/demo/research-demo.js +0 -266
- package/notebooks/quickstart.ipynb +0 -157
- package/rust/tmlpd.h +0 -268
- package/src/cache/prefixCache.ts +0 -365
- package/src/routing/advancedRouter.ts +0 -406
- package/src/utils/speculativeDecoding.ts +0 -344
- /package/{src → tmlpd-pi-extension/src}/cache/responseCache.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/cost/costTracker.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/memory/episodicMemory.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/orchestration/haloOrchestrator.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/orchestration/mctsWorkflow.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/providers/localProvider.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/providers/registry.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/tools/tmlpdTools.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/utils/batchProcessor.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/utils/compression.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/utils/reliability.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/utils/tokenUtils.ts +0 -0
|
@@ -0,0 +1,522 @@
|
|
|
1
|
+
"""
|
|
2
|
+
TaskPlanner - High-Level Task Decomposition
|
|
3
|
+
|
|
4
|
+
Based on arXiv:2505.13516 (HALO) and arXiv:2506.12508v3 (AgentOrchestra)
|
|
5
|
+
|
|
6
|
+
This module implements the first tier of HALO orchestration:
|
|
7
|
+
- Decomposes complex tasks into subtasks
|
|
8
|
+
- Identifies dependencies between subtasks
|
|
9
|
+
- Estimates difficulty and resource requirements
|
|
10
|
+
- Creates execution DAG (Directed Acyclic Graph)
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
import asyncio
|
|
14
|
+
from typing import Dict, List, Any, Optional, Set
|
|
15
|
+
from dataclasses import dataclass, field
|
|
16
|
+
from enum import Enum
|
|
17
|
+
import json
|
|
18
|
+
from datetime import datetime
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class TaskType(Enum):
|
|
22
|
+
"""Types of tasks for specialized routing"""
|
|
23
|
+
PLANNING = "planning" # High-level strategy
|
|
24
|
+
CODING = "coding" # Implementation/Writing
|
|
25
|
+
ANALYSIS = "analysis" # Data processing/reasoning
|
|
26
|
+
RESEARCH = "research" # Information gathering
|
|
27
|
+
TESTING = "testing" # Verification/Validation
|
|
28
|
+
DEPLOYMENT = "deployment" # Release/Infrastructure
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
@dataclass
|
|
32
|
+
class SubTask:
|
|
33
|
+
"""Individual subtask in the decomposition"""
|
|
34
|
+
id: str
|
|
35
|
+
description: str
|
|
36
|
+
task_type: TaskType
|
|
37
|
+
difficulty: float # 0-100
|
|
38
|
+
estimated_duration_seconds: int
|
|
39
|
+
dependencies: List[str] = field(default_factory=list)
|
|
40
|
+
required_capabilities: List[str] = field(default_factory=list)
|
|
41
|
+
context: Dict[str, Any] = field(default_factory=dict)
|
|
42
|
+
|
|
43
|
+
def to_dict(self) -> Dict:
|
|
44
|
+
return {
|
|
45
|
+
"id": self.id,
|
|
46
|
+
"description": self.description,
|
|
47
|
+
"task_type": self.task_type.value,
|
|
48
|
+
"difficulty": self.difficulty,
|
|
49
|
+
"estimated_duration_seconds": self.estimated_duration_seconds,
|
|
50
|
+
"dependencies": self.dependencies,
|
|
51
|
+
"required_capabilities": self.required_capabilities,
|
|
52
|
+
"context": self.context
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
@dataclass
|
|
57
|
+
class TaskDecomposition:
|
|
58
|
+
"""Result of task decomposition"""
|
|
59
|
+
original_task: Dict[str, Any]
|
|
60
|
+
subtasks: List[SubTask]
|
|
61
|
+
dependency_graph: Dict[str, List[str]] # task_id -> dependent task_ids
|
|
62
|
+
execution_order: List[str] # Topological sort
|
|
63
|
+
estimated_total_duration: int
|
|
64
|
+
metadata: Dict[str, Any] = field(default_factory=dict)
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
class TaskPlanner:
|
|
68
|
+
"""
|
|
69
|
+
High-level task decomposition planner
|
|
70
|
+
|
|
71
|
+
Implements Tier 1 of HALO orchestration:
|
|
72
|
+
- Analyzes task complexity
|
|
73
|
+
- Decomposes into manageable subtasks
|
|
74
|
+
- Identifies dependencies and constraints
|
|
75
|
+
- Creates optimal execution order
|
|
76
|
+
"""
|
|
77
|
+
|
|
78
|
+
def __init__(self, llm_client=None):
|
|
79
|
+
"""
|
|
80
|
+
Initialize task planner
|
|
81
|
+
|
|
82
|
+
Args:
|
|
83
|
+
llm_client: LLM client for decomposition (if None, uses mock)
|
|
84
|
+
"""
|
|
85
|
+
self.llm_client = llm_client
|
|
86
|
+
self.decomposition_history = []
|
|
87
|
+
|
|
88
|
+
async def decompose(
|
|
89
|
+
self,
|
|
90
|
+
task: Dict[str, Any],
|
|
91
|
+
max_subtasks: int = 10,
|
|
92
|
+
max_depth: int = 3
|
|
93
|
+
) -> TaskDecomposition:
|
|
94
|
+
"""
|
|
95
|
+
Decompose a complex task into subtasks
|
|
96
|
+
|
|
97
|
+
Args:
|
|
98
|
+
task: Original task with 'description' and optional context
|
|
99
|
+
max_subtasks: Maximum number of subtasks to create
|
|
100
|
+
max_depth: Maximum decomposition depth (for recursion)
|
|
101
|
+
|
|
102
|
+
Returns:
|
|
103
|
+
TaskDecomposition with subtasks and execution plan
|
|
104
|
+
"""
|
|
105
|
+
description = task.get("description", "")
|
|
106
|
+
context = task.get("context", {})
|
|
107
|
+
|
|
108
|
+
# Step 1: Analyze task complexity
|
|
109
|
+
complexity_analysis = await self._analyze_complexity(description, context)
|
|
110
|
+
|
|
111
|
+
# Step 2: Determine if decomposition is needed
|
|
112
|
+
if complexity_analysis["difficulty_score"] < 40:
|
|
113
|
+
# Simple task - no decomposition needed
|
|
114
|
+
return self._create_simple_decomposition(task, complexity_analysis)
|
|
115
|
+
|
|
116
|
+
# Step 3: Decompose into subtasks
|
|
117
|
+
subtasks = await self._generate_subtasks(
|
|
118
|
+
description,
|
|
119
|
+
context,
|
|
120
|
+
complexity_analysis,
|
|
121
|
+
max_subtasks
|
|
122
|
+
)
|
|
123
|
+
|
|
124
|
+
# Step 4: Identify dependencies
|
|
125
|
+
dependency_graph = await self._identify_dependencies(subtasks)
|
|
126
|
+
|
|
127
|
+
# Step 5: Create execution order (topological sort)
|
|
128
|
+
execution_order = self._topological_sort(subtasks, dependency_graph)
|
|
129
|
+
|
|
130
|
+
# Step 6: Estimate total duration
|
|
131
|
+
total_duration = sum(st.estimated_duration_seconds for st in subtasks)
|
|
132
|
+
|
|
133
|
+
decomposition = TaskDecomposition(
|
|
134
|
+
original_task=task,
|
|
135
|
+
subtasks=subtasks,
|
|
136
|
+
dependency_graph=dependency_graph,
|
|
137
|
+
execution_order=execution_order,
|
|
138
|
+
estimated_total_duration=total_duration,
|
|
139
|
+
metadata={
|
|
140
|
+
"decomposition_method": "halo_planner",
|
|
141
|
+
"complexity_analysis": complexity_analysis,
|
|
142
|
+
"decomposition_depth": 1,
|
|
143
|
+
"timestamp": datetime.now().isoformat()
|
|
144
|
+
}
|
|
145
|
+
)
|
|
146
|
+
|
|
147
|
+
# Log for learning
|
|
148
|
+
self.decomposition_history.append(decomposition)
|
|
149
|
+
|
|
150
|
+
return decomposition
|
|
151
|
+
|
|
152
|
+
async def _analyze_complexity(
|
|
153
|
+
self,
|
|
154
|
+
description: str,
|
|
155
|
+
context: Dict[str, Any]
|
|
156
|
+
) -> Dict[str, Any]:
|
|
157
|
+
"""
|
|
158
|
+
Analyze task complexity to determine decomposition strategy
|
|
159
|
+
|
|
160
|
+
Returns:
|
|
161
|
+
Dict with complexity metrics:
|
|
162
|
+
- difficulty_score: 0-100 overall difficulty
|
|
163
|
+
- num_steps: Estimated number of steps
|
|
164
|
+
- requires_research: Whether external info needed
|
|
165
|
+
- requires_coding: Whether implementation needed
|
|
166
|
+
- domains: List of domains involved
|
|
167
|
+
"""
|
|
168
|
+
# Heuristic-based analysis (can be enhanced with LLM)
|
|
169
|
+
text = description.lower()
|
|
170
|
+
|
|
171
|
+
# Complexity factors
|
|
172
|
+
factors = {
|
|
173
|
+
"length": len(description.split()),
|
|
174
|
+
"multi_step": any(word in text for word in [
|
|
175
|
+
"then", "after", "next", "finally", "followed by"
|
|
176
|
+
]),
|
|
177
|
+
"technical": any(word in text for word in [
|
|
178
|
+
"api", "database", "algorithm", "architecture", "system"
|
|
179
|
+
]),
|
|
180
|
+
"requirements": any(word in text for word in [
|
|
181
|
+
"requirement", "constraint", "must", "should", "specification"
|
|
182
|
+
]),
|
|
183
|
+
"dependencies": any(word in text for word in [
|
|
184
|
+
"using", "with", "from", "based on", "integrating"
|
|
185
|
+
]),
|
|
186
|
+
"domain_specific": any(word in text for word in [
|
|
187
|
+
"machine learning", "data science", "web development",
|
|
188
|
+
"mobile", "backend", "frontend"
|
|
189
|
+
]),
|
|
190
|
+
"ambiguous": any(word in text for word in [
|
|
191
|
+
"maybe", "possibly", "might", "could", "consider"
|
|
192
|
+
])
|
|
193
|
+
}
|
|
194
|
+
|
|
195
|
+
# Calculate difficulty score
|
|
196
|
+
difficulty = 0
|
|
197
|
+
if factors["length"] > 50:
|
|
198
|
+
difficulty += 15
|
|
199
|
+
if factors["multi_step"]:
|
|
200
|
+
difficulty += 15
|
|
201
|
+
if factors["technical"]:
|
|
202
|
+
difficulty += 15
|
|
203
|
+
if factors["requirements"]:
|
|
204
|
+
difficulty += 10
|
|
205
|
+
if factors["dependencies"]:
|
|
206
|
+
difficulty += 10
|
|
207
|
+
if factors["domain_specific"]:
|
|
208
|
+
difficulty += 10
|
|
209
|
+
if factors["ambiguous"]:
|
|
210
|
+
difficulty += 15
|
|
211
|
+
|
|
212
|
+
difficulty = min(100, difficulty)
|
|
213
|
+
|
|
214
|
+
# Determine characteristics
|
|
215
|
+
num_steps = max(1, difficulty // 20)
|
|
216
|
+
requires_research = any(word in text for word in [
|
|
217
|
+
"research", "find", "investigate", "explore", "lookup"
|
|
218
|
+
])
|
|
219
|
+
requires_coding = any(word in text for word in [
|
|
220
|
+
"implement", "code", "build", "create", "develop"
|
|
221
|
+
])
|
|
222
|
+
|
|
223
|
+
# Identify domains
|
|
224
|
+
domains = []
|
|
225
|
+
domain_keywords = {
|
|
226
|
+
"web": ["web", "frontend", "backend", "api", "http"],
|
|
227
|
+
"data": ["data", "database", "sql", "query", "analytics"],
|
|
228
|
+
"ml": ["machine learning", "model", "training", "inference"],
|
|
229
|
+
"mobile": ["mobile", "ios", "android", "app"],
|
|
230
|
+
"devops": ["deploy", "infrastructure", "ci/cd", "docker"]
|
|
231
|
+
}
|
|
232
|
+
|
|
233
|
+
for domain, keywords in domain_keywords.items():
|
|
234
|
+
if any(keyword in text for keyword in keywords):
|
|
235
|
+
domains.append(domain)
|
|
236
|
+
|
|
237
|
+
return {
|
|
238
|
+
"difficulty_score": difficulty,
|
|
239
|
+
"num_steps": num_steps,
|
|
240
|
+
"requires_research": requires_research,
|
|
241
|
+
"requires_coding": requires_coding,
|
|
242
|
+
"domains": domains,
|
|
243
|
+
"factors": factors
|
|
244
|
+
}
|
|
245
|
+
|
|
246
|
+
def _create_simple_decomposition(
|
|
247
|
+
self,
|
|
248
|
+
task: Dict[str, Any],
|
|
249
|
+
complexity: Dict[str, Any]
|
|
250
|
+
) -> TaskDecomposition:
|
|
251
|
+
"""Create decomposition for simple tasks (no subtasks)"""
|
|
252
|
+
subtask = SubTask(
|
|
253
|
+
id="task_1",
|
|
254
|
+
description=task.get("description", ""),
|
|
255
|
+
task_type=TaskType.ANALYSIS,
|
|
256
|
+
difficulty=complexity["difficulty_score"],
|
|
257
|
+
estimated_duration_seconds=60,
|
|
258
|
+
dependencies=[],
|
|
259
|
+
required_capabilities=[],
|
|
260
|
+
context=task.get("context", {})
|
|
261
|
+
)
|
|
262
|
+
|
|
263
|
+
return TaskDecomposition(
|
|
264
|
+
original_task=task,
|
|
265
|
+
subtasks=[subtask],
|
|
266
|
+
dependency_graph={"task_1": []},
|
|
267
|
+
execution_order=["task_1"],
|
|
268
|
+
estimated_total_duration=60,
|
|
269
|
+
metadata={
|
|
270
|
+
"decomposition_method": "simple",
|
|
271
|
+
"complexity_analysis": complexity,
|
|
272
|
+
"timestamp": datetime.now().isoformat()
|
|
273
|
+
}
|
|
274
|
+
)
|
|
275
|
+
|
|
276
|
+
async def _generate_subtasks(
|
|
277
|
+
self,
|
|
278
|
+
description: str,
|
|
279
|
+
context: Dict[str, Any],
|
|
280
|
+
complexity: Dict[str, Any],
|
|
281
|
+
max_subtasks: int
|
|
282
|
+
) -> List[SubTask]:
|
|
283
|
+
"""
|
|
284
|
+
Generate subtasks based on task analysis
|
|
285
|
+
|
|
286
|
+
This is a simplified implementation. In production, use LLM to generate
|
|
287
|
+
more sophisticated subtask decompositions.
|
|
288
|
+
"""
|
|
289
|
+
subtasks = []
|
|
290
|
+
|
|
291
|
+
# Determine task type and create appropriate subtasks
|
|
292
|
+
if complexity["requires_coding"]:
|
|
293
|
+
subtasks = self._create_coding_subtasks(description, context, complexity)
|
|
294
|
+
elif complexity["requires_research"]:
|
|
295
|
+
subtasks = self._create_research_subtasks(description, context, complexity)
|
|
296
|
+
else:
|
|
297
|
+
subtasks = self._create_analysis_subtasks(description, context, complexity)
|
|
298
|
+
|
|
299
|
+
# Limit to max_subtasks
|
|
300
|
+
return subtasks[:max_subtasks]
|
|
301
|
+
|
|
302
|
+
def _create_coding_subtasks(
|
|
303
|
+
self,
|
|
304
|
+
description: str,
|
|
305
|
+
context: Dict[str, Any],
|
|
306
|
+
complexity: Dict[str, Any]
|
|
307
|
+
) -> List[SubTask]:
|
|
308
|
+
"""Create subtasks for coding tasks"""
|
|
309
|
+
subtasks = []
|
|
310
|
+
|
|
311
|
+
# Subtask 1: Planning/Design
|
|
312
|
+
subtasks.append(SubTask(
|
|
313
|
+
id="task_1",
|
|
314
|
+
description=f"Plan architecture for: {description}",
|
|
315
|
+
task_type=TaskType.PLANNING,
|
|
316
|
+
difficulty=complexity["difficulty_score"] * 0.7,
|
|
317
|
+
estimated_duration_seconds=300,
|
|
318
|
+
dependencies=[],
|
|
319
|
+
required_capabilities=["planning", "architecture"],
|
|
320
|
+
context=context
|
|
321
|
+
))
|
|
322
|
+
|
|
323
|
+
# Subtask 2: Implementation
|
|
324
|
+
subtasks.append(SubTask(
|
|
325
|
+
id="task_2",
|
|
326
|
+
description=f"Implement: {description}",
|
|
327
|
+
task_type=TaskType.CODING,
|
|
328
|
+
difficulty=complexity["difficulty_score"],
|
|
329
|
+
estimated_duration_seconds=600,
|
|
330
|
+
dependencies=["task_1"],
|
|
331
|
+
required_capabilities=["coding", "implementation"],
|
|
332
|
+
context=context
|
|
333
|
+
))
|
|
334
|
+
|
|
335
|
+
# Subtask 3: Testing
|
|
336
|
+
subtasks.append(SubTask(
|
|
337
|
+
id="task_3",
|
|
338
|
+
description=f"Test implementation of: {description}",
|
|
339
|
+
task_type=TaskType.TESTING,
|
|
340
|
+
difficulty=complexity["difficulty_score"] * 0.6,
|
|
341
|
+
estimated_duration_seconds=300,
|
|
342
|
+
dependencies=["task_2"],
|
|
343
|
+
required_capabilities=["testing", "validation"],
|
|
344
|
+
context=context
|
|
345
|
+
))
|
|
346
|
+
|
|
347
|
+
return subtasks
|
|
348
|
+
|
|
349
|
+
def _create_research_subtasks(
|
|
350
|
+
self,
|
|
351
|
+
description: str,
|
|
352
|
+
context: Dict[str, Any],
|
|
353
|
+
complexity: Dict[str, Any]
|
|
354
|
+
) -> List[SubTask]:
|
|
355
|
+
"""Create subtasks for research tasks"""
|
|
356
|
+
subtasks = []
|
|
357
|
+
|
|
358
|
+
# Subtask 1: Initial research
|
|
359
|
+
subtasks.append(SubTask(
|
|
360
|
+
id="task_1",
|
|
361
|
+
description=f"Research: {description}",
|
|
362
|
+
task_type=TaskType.RESEARCH,
|
|
363
|
+
difficulty=complexity["difficulty_score"] * 0.8,
|
|
364
|
+
estimated_duration_seconds=400,
|
|
365
|
+
dependencies=[],
|
|
366
|
+
required_capabilities=["research", "information_gathering"],
|
|
367
|
+
context=context
|
|
368
|
+
))
|
|
369
|
+
|
|
370
|
+
# Subtask 2: Analysis
|
|
371
|
+
subtasks.append(SubTask(
|
|
372
|
+
id="task_2",
|
|
373
|
+
description=f"Analyze research findings for: {description}",
|
|
374
|
+
task_type=TaskType.ANALYSIS,
|
|
375
|
+
difficulty=complexity["difficulty_score"],
|
|
376
|
+
estimated_duration_seconds=300,
|
|
377
|
+
dependencies=["task_1"],
|
|
378
|
+
required_capabilities=["analysis", "synthesis"],
|
|
379
|
+
context=context
|
|
380
|
+
))
|
|
381
|
+
|
|
382
|
+
return subtasks
|
|
383
|
+
|
|
384
|
+
def _create_analysis_subtasks(
|
|
385
|
+
self,
|
|
386
|
+
description: str,
|
|
387
|
+
context: Dict[str, Any],
|
|
388
|
+
complexity: Dict[str, Any]
|
|
389
|
+
) -> List[SubTask]:
|
|
390
|
+
"""Create subtasks for analysis tasks"""
|
|
391
|
+
subtasks = []
|
|
392
|
+
|
|
393
|
+
# Subtask 1: Understand requirements
|
|
394
|
+
subtasks.append(SubTask(
|
|
395
|
+
id="task_1",
|
|
396
|
+
description=f"Understand requirements for: {description}",
|
|
397
|
+
task_type=TaskType.ANALYSIS,
|
|
398
|
+
difficulty=complexity["difficulty_score"] * 0.5,
|
|
399
|
+
estimated_duration_seconds=200,
|
|
400
|
+
dependencies=[],
|
|
401
|
+
required_capabilities=["analysis"],
|
|
402
|
+
context=context
|
|
403
|
+
))
|
|
404
|
+
|
|
405
|
+
# Subtask 2: Execute analysis
|
|
406
|
+
subtasks.append(SubTask(
|
|
407
|
+
id="task_2",
|
|
408
|
+
description=f"Perform analysis: {description}",
|
|
409
|
+
task_type=TaskType.ANALYSIS,
|
|
410
|
+
difficulty=complexity["difficulty_score"],
|
|
411
|
+
estimated_duration_seconds=400,
|
|
412
|
+
dependencies=["task_1"],
|
|
413
|
+
required_capabilities=["analysis", "reasoning"],
|
|
414
|
+
context=context
|
|
415
|
+
))
|
|
416
|
+
|
|
417
|
+
return subtasks
|
|
418
|
+
|
|
419
|
+
async def _identify_dependencies(
|
|
420
|
+
self,
|
|
421
|
+
subtasks: List[SubTask]
|
|
422
|
+
) -> Dict[str, List[str]]:
|
|
423
|
+
"""
|
|
424
|
+
Identify dependencies between subtasks
|
|
425
|
+
|
|
426
|
+
Returns:
|
|
427
|
+
Dict mapping task_id -> list of task_ids that depend on it
|
|
428
|
+
"""
|
|
429
|
+
dependency_graph = {}
|
|
430
|
+
|
|
431
|
+
# Build dependency graph from subtask dependencies
|
|
432
|
+
for subtask in subtasks:
|
|
433
|
+
dependency_graph[subtask.id] = []
|
|
434
|
+
|
|
435
|
+
for subtask in subtasks:
|
|
436
|
+
for dep_id in subtask.dependencies:
|
|
437
|
+
if dep_id in dependency_graph:
|
|
438
|
+
dependency_graph[dep_id].append(subtask.id)
|
|
439
|
+
|
|
440
|
+
return dependency_graph
|
|
441
|
+
|
|
442
|
+
def _topological_sort(
|
|
443
|
+
self,
|
|
444
|
+
subtasks: List[SubTask],
|
|
445
|
+
dependency_graph: Dict[str, List[str]]
|
|
446
|
+
) -> List[str]:
|
|
447
|
+
"""
|
|
448
|
+
Create topological sort of subtasks for execution order
|
|
449
|
+
|
|
450
|
+
Returns:
|
|
451
|
+
List of task IDs in execution order
|
|
452
|
+
"""
|
|
453
|
+
# Kahn's algorithm for topological sorting
|
|
454
|
+
in_degree = {task.id: len(task.dependencies) for task in subtasks}
|
|
455
|
+
queue = [task_id for task_id, degree in in_degree.items() if degree == 0]
|
|
456
|
+
execution_order = []
|
|
457
|
+
|
|
458
|
+
while queue:
|
|
459
|
+
# Sort queue by difficulty (execute easier tasks first)
|
|
460
|
+
queue.sort(key=lambda tid: next(
|
|
461
|
+
st.difficulty for st in subtasks if st.id == tid
|
|
462
|
+
))
|
|
463
|
+
|
|
464
|
+
task_id = queue.pop(0)
|
|
465
|
+
execution_order.append(task_id)
|
|
466
|
+
|
|
467
|
+
# Reduce in-degree for dependent tasks
|
|
468
|
+
for dependent_id in dependency_graph.get(task_id, []):
|
|
469
|
+
in_degree[dependent_id] -= 1
|
|
470
|
+
if in_degree[dependent_id] == 0:
|
|
471
|
+
queue.append(dependent_id)
|
|
472
|
+
|
|
473
|
+
return execution_order
|
|
474
|
+
|
|
475
|
+
def get_decomposition_stats(self) -> Dict[str, Any]:
|
|
476
|
+
"""Get statistics about decompositions performed"""
|
|
477
|
+
if not self.decomposition_history:
|
|
478
|
+
return {"total_decompositions": 0}
|
|
479
|
+
|
|
480
|
+
total_subtasks = sum(len(d.subtasks) for d in self.decomposition_history)
|
|
481
|
+
avg_subtasks = total_subtasks / len(self.decomposition_history)
|
|
482
|
+
|
|
483
|
+
return {
|
|
484
|
+
"total_decompositions": len(self.decomposition_history),
|
|
485
|
+
"total_subtasks_created": total_subtasks,
|
|
486
|
+
"average_subtasks_per_decomposition": avg_subtasks,
|
|
487
|
+
"latest_decomposition": self.decomposition_history[-1].metadata
|
|
488
|
+
}
|
|
489
|
+
|
|
490
|
+
|
|
491
|
+
# Example usage
|
|
492
|
+
async def main():
|
|
493
|
+
"""Example of TaskPlanner usage"""
|
|
494
|
+
planner = TaskPlanner()
|
|
495
|
+
|
|
496
|
+
# Simple task
|
|
497
|
+
simple_task = {
|
|
498
|
+
"description": "What is 2+2?",
|
|
499
|
+
"context": {}
|
|
500
|
+
}
|
|
501
|
+
|
|
502
|
+
result = await planner.decompose(simple_task)
|
|
503
|
+
print(f"Simple task: {len(result.subtasks)} subtasks")
|
|
504
|
+
print(f"Execution order: {result.execution_order}")
|
|
505
|
+
|
|
506
|
+
# Complex task
|
|
507
|
+
complex_task = {
|
|
508
|
+
"description": "Build a REST API with user authentication and database integration",
|
|
509
|
+
"context": {"requirements": ["JWT", "PostgreSQL"]}
|
|
510
|
+
}
|
|
511
|
+
|
|
512
|
+
result = await planner.decompose(complex_task)
|
|
513
|
+
print(f"\nComplex task: {len(result.subtasks)} subtasks")
|
|
514
|
+
print(f"Execution order: {result.execution_order}")
|
|
515
|
+
print(f"Total duration: {result.estimated_total_duration}s")
|
|
516
|
+
|
|
517
|
+
for subtask in result.subtasks:
|
|
518
|
+
print(f" - {subtask.id}: {subtask.description} (difficulty: {subtask.difficulty})")
|
|
519
|
+
|
|
520
|
+
|
|
521
|
+
if __name__ == "__main__":
|
|
522
|
+
asyncio.run(main())
|
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Multi-Provider System Module
|
|
3
|
+
|
|
4
|
+
This module provides a unified interface for multiple LLM providers
|
|
5
|
+
with health monitoring, intelligent routing, and automatic failover.
|
|
6
|
+
|
|
7
|
+
Components:
|
|
8
|
+
- BaseProvider: Abstract base class for all providers
|
|
9
|
+
- Individual Providers: Anthropic, OpenAI, Cerebras, Groq, Together
|
|
10
|
+
- ProviderRegistry: Central registry with health monitoring
|
|
11
|
+
- IntelligentRouter: Difficulty-aware routing (arXiv:2509.11079)
|
|
12
|
+
- MultiProviderExecutor: High-level executor
|
|
13
|
+
|
|
14
|
+
Usage:
|
|
15
|
+
executor = MultiProviderExecutor()
|
|
16
|
+
await executor.start()
|
|
17
|
+
|
|
18
|
+
result = await executor.execute({
|
|
19
|
+
"description": "Create a React component"
|
|
20
|
+
})
|
|
21
|
+
|
|
22
|
+
await executor.stop()
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
from .base import (
|
|
26
|
+
BaseProvider,
|
|
27
|
+
ProviderConfig,
|
|
28
|
+
ProviderHealth,
|
|
29
|
+
ProviderResponse
|
|
30
|
+
)
|
|
31
|
+
|
|
32
|
+
from .anthropic import (
|
|
33
|
+
AnthropicProvider,
|
|
34
|
+
OpenAIProvider
|
|
35
|
+
)
|
|
36
|
+
|
|
37
|
+
from .cerebras import (
|
|
38
|
+
CerebrasProvider,
|
|
39
|
+
GroqProvider,
|
|
40
|
+
TogetherProvider
|
|
41
|
+
)
|
|
42
|
+
|
|
43
|
+
from .registry import (
|
|
44
|
+
ProviderRegistry,
|
|
45
|
+
IntelligentRouter,
|
|
46
|
+
MultiProviderExecutor
|
|
47
|
+
)
|
|
48
|
+
|
|
49
|
+
__all__ = [
|
|
50
|
+
# Base classes
|
|
51
|
+
"BaseProvider",
|
|
52
|
+
"ProviderConfig",
|
|
53
|
+
"ProviderHealth",
|
|
54
|
+
"ProviderResponse",
|
|
55
|
+
|
|
56
|
+
# Individual providers
|
|
57
|
+
"AnthropicProvider",
|
|
58
|
+
"OpenAIProvider",
|
|
59
|
+
"CerebrasProvider",
|
|
60
|
+
"GroqProvider",
|
|
61
|
+
"TogetherProvider",
|
|
62
|
+
|
|
63
|
+
# Registry and routing
|
|
64
|
+
"ProviderRegistry",
|
|
65
|
+
"IntelligentRouter",
|
|
66
|
+
"MultiProviderExecutor",
|
|
67
|
+
]
|