adaptive-memory-multi-model-router 1.2.2 → 1.3.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +146 -66
- package/dist/index.d.ts +1 -1
- package/dist/index.js +1 -1
- package/dist/integrations/airtable.js +20 -0
- package/dist/integrations/discord.js +18 -0
- package/dist/integrations/github.js +23 -0
- package/dist/integrations/gmail.js +19 -0
- package/dist/integrations/google-calendar.js +18 -0
- package/dist/integrations/index.js +61 -0
- package/dist/integrations/jira.js +21 -0
- package/dist/integrations/linear.js +19 -0
- package/dist/integrations/notion.js +19 -0
- package/dist/integrations/slack.js +18 -0
- package/dist/integrations/telegram.js +19 -0
- package/dist/providers/registry.js +7 -3
- package/docs/ARCHITECTURAL-IMPROVEMENTS-2025.md +1391 -0
- package/docs/ARCHITECTURAL-IMPROVEMENTS-REVISED-2025.md +1051 -0
- package/docs/CONFIGURATION.md +476 -0
- package/docs/COUNCIL_DECISION.json +308 -0
- package/docs/COUNCIL_SUMMARY.md +265 -0
- package/docs/COUNCIL_V2.2_DECISION.md +416 -0
- package/docs/IMPROVEMENT_ROADMAP.md +515 -0
- package/docs/LLM_COUNCIL_DECISION.md +508 -0
- package/docs/QUICK_START_VISIBILITY.md +782 -0
- package/docs/REDDIT_GAP_ANALYSIS.md +299 -0
- package/docs/RESEARCH_BACKED_IMPROVEMENTS.md +1180 -0
- package/docs/TMLPD_QNA.md +751 -0
- package/docs/TMLPD_V2.1_COMPLETE.md +763 -0
- package/docs/TMLPD_V2.2_RESEARCH_ROADMAP.md +754 -0
- package/docs/V2.2_IMPLEMENTATION_COMPLETE.md +446 -0
- package/docs/V2_IMPLEMENTATION_GUIDE.md +388 -0
- package/docs/VISIBILITY_ADOPTION_PLAN.md +1005 -0
- package/docs/launch-content/LAUNCH_EXECUTION_CHECKLIST.md +421 -0
- package/docs/launch-content/README.md +457 -0
- package/docs/launch-content/assets/cost_comparison_100_tasks.png +0 -0
- package/docs/launch-content/assets/cumulative_savings.png +0 -0
- package/docs/launch-content/assets/parallel_speedup.png +0 -0
- package/docs/launch-content/assets/provider_pricing_comparison.png +0 -0
- package/docs/launch-content/assets/task_breakdown_comparison.png +0 -0
- package/docs/launch-content/generate_charts.py +313 -0
- package/docs/launch-content/hn_show_post.md +139 -0
- package/docs/launch-content/partner_outreach_templates.md +745 -0
- package/docs/launch-content/reddit_posts.md +467 -0
- package/docs/launch-content/twitter_thread.txt +460 -0
- package/examples/QUICKSTART.md +1 -1
- package/openclaw-alexa-bridge/ALL_REMAINING_FIXES_PLAN.md +313 -0
- package/openclaw-alexa-bridge/REMAINING_FIXES_SUMMARY.md +277 -0
- package/openclaw-alexa-bridge/src/alexa_handler_no_tmlpd.js +1234 -0
- package/openclaw-alexa-bridge/test_fixes.js +77 -0
- package/package.json +120 -29
- package/package.json.tmp +0 -0
- package/qna/TMLPD_QNA.md +3 -3
- package/skill/SKILL.md +2 -2
- package/src/__tests__/integration/tmpld_integration.test.py +540 -0
- package/src/agents/skill_enhanced_agent.py +318 -0
- package/src/memory/__init__.py +15 -0
- package/src/memory/agentic_memory.py +353 -0
- package/src/memory/semantic_memory.py +444 -0
- package/src/memory/simple_memory.py +466 -0
- package/src/memory/working_memory.py +447 -0
- package/src/orchestration/__init__.py +52 -0
- package/src/orchestration/execution_engine.py +353 -0
- package/src/orchestration/halo_orchestrator.py +367 -0
- package/src/orchestration/mcts_workflow.py +498 -0
- package/src/orchestration/role_assigner.py +473 -0
- package/src/orchestration/task_planner.py +522 -0
- package/src/providers/__init__.py +67 -0
- package/src/providers/anthropic.py +304 -0
- package/src/providers/base.py +241 -0
- package/src/providers/cerebras.py +373 -0
- package/src/providers/registry.py +476 -0
- package/src/routing/__init__.py +30 -0
- package/src/routing/universal_router.py +621 -0
- package/src/skills/TMLPD-QUICKREF.md +210 -0
- package/src/skills/TMLPD-SETUP-SUMMARY.md +157 -0
- package/src/skills/TMLPD.md +540 -0
- package/src/skills/__tests__/skill_manager.test.ts +328 -0
- package/src/skills/skill_manager.py +385 -0
- package/src/skills/test-tmlpd.sh +108 -0
- package/src/skills/tmlpd-category.yaml +67 -0
- package/src/skills/tmlpd-monitoring.yaml +188 -0
- package/src/skills/tmlpd-phase.yaml +132 -0
- package/src/state/__init__.py +17 -0
- package/src/state/simple_checkpoint.py +508 -0
- package/src/tmlpd_agent.py +464 -0
- package/src/tmpld_v2.py +427 -0
- package/src/workflows/__init__.py +18 -0
- package/src/workflows/advanced_difficulty_classifier.py +377 -0
- package/src/workflows/chaining_executor.py +417 -0
- package/src/workflows/difficulty_integration.py +209 -0
- package/src/workflows/orchestrator.py +469 -0
- package/src/workflows/orchestrator_executor.py +456 -0
- package/src/workflows/parallelization_executor.py +382 -0
- package/src/workflows/router.py +311 -0
- package/test_integration_simple.py +86 -0
- package/test_mcts_workflow.py +150 -0
- package/test_templd_integration.py +262 -0
- package/test_universal_router.py +275 -0
- package/tmlpd-pi-extension/README.md +36 -0
- package/tmlpd-pi-extension/dist/cache/prefixCache.d.ts +114 -0
- package/tmlpd-pi-extension/dist/cache/prefixCache.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/cache/prefixCache.js +285 -0
- package/tmlpd-pi-extension/dist/cache/prefixCache.js.map +1 -0
- package/tmlpd-pi-extension/dist/cache/responseCache.d.ts +58 -0
- package/tmlpd-pi-extension/dist/cache/responseCache.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/cache/responseCache.js +153 -0
- package/tmlpd-pi-extension/dist/cache/responseCache.js.map +1 -0
- package/tmlpd-pi-extension/dist/cli.js +59 -0
- package/tmlpd-pi-extension/dist/cost/costTracker.d.ts +95 -0
- package/tmlpd-pi-extension/dist/cost/costTracker.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/cost/costTracker.js +240 -0
- package/tmlpd-pi-extension/dist/cost/costTracker.js.map +1 -0
- package/tmlpd-pi-extension/dist/index.d.ts +723 -0
- package/tmlpd-pi-extension/dist/index.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/index.js +239 -0
- package/tmlpd-pi-extension/dist/index.js.map +1 -0
- package/tmlpd-pi-extension/dist/memory/episodicMemory.d.ts +82 -0
- package/tmlpd-pi-extension/dist/memory/episodicMemory.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/memory/episodicMemory.js +145 -0
- package/tmlpd-pi-extension/dist/memory/episodicMemory.js.map +1 -0
- package/tmlpd-pi-extension/dist/orchestration/haloOrchestrator.d.ts +102 -0
- package/tmlpd-pi-extension/dist/orchestration/haloOrchestrator.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/orchestration/haloOrchestrator.js +207 -0
- package/tmlpd-pi-extension/dist/orchestration/haloOrchestrator.js.map +1 -0
- package/tmlpd-pi-extension/dist/orchestration/mctsWorkflow.d.ts +85 -0
- package/tmlpd-pi-extension/dist/orchestration/mctsWorkflow.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/orchestration/mctsWorkflow.js +210 -0
- package/tmlpd-pi-extension/dist/orchestration/mctsWorkflow.js.map +1 -0
- package/tmlpd-pi-extension/dist/providers/localProvider.d.ts +102 -0
- package/tmlpd-pi-extension/dist/providers/localProvider.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/providers/localProvider.js +338 -0
- package/tmlpd-pi-extension/dist/providers/localProvider.js.map +1 -0
- package/tmlpd-pi-extension/dist/providers/registry.d.ts +55 -0
- package/tmlpd-pi-extension/dist/providers/registry.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/providers/registry.js +138 -0
- package/tmlpd-pi-extension/dist/providers/registry.js.map +1 -0
- package/tmlpd-pi-extension/dist/routing/advancedRouter.d.ts +68 -0
- package/tmlpd-pi-extension/dist/routing/advancedRouter.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/routing/advancedRouter.js +332 -0
- package/tmlpd-pi-extension/dist/routing/advancedRouter.js.map +1 -0
- package/tmlpd-pi-extension/dist/tools/tmlpdTools.d.ts +101 -0
- package/tmlpd-pi-extension/dist/tools/tmlpdTools.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/tools/tmlpdTools.js +368 -0
- package/tmlpd-pi-extension/dist/tools/tmlpdTools.js.map +1 -0
- package/tmlpd-pi-extension/dist/utils/batchProcessor.d.ts +96 -0
- package/tmlpd-pi-extension/dist/utils/batchProcessor.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/utils/batchProcessor.js +170 -0
- package/tmlpd-pi-extension/dist/utils/batchProcessor.js.map +1 -0
- package/tmlpd-pi-extension/dist/utils/compression.d.ts +61 -0
- package/tmlpd-pi-extension/dist/utils/compression.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/utils/compression.js +281 -0
- package/tmlpd-pi-extension/dist/utils/compression.js.map +1 -0
- package/tmlpd-pi-extension/dist/utils/reliability.d.ts +74 -0
- package/tmlpd-pi-extension/dist/utils/reliability.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/utils/reliability.js +177 -0
- package/tmlpd-pi-extension/dist/utils/reliability.js.map +1 -0
- package/tmlpd-pi-extension/dist/utils/speculativeDecoding.d.ts +117 -0
- package/tmlpd-pi-extension/dist/utils/speculativeDecoding.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/utils/speculativeDecoding.js +246 -0
- package/tmlpd-pi-extension/dist/utils/speculativeDecoding.js.map +1 -0
- package/tmlpd-pi-extension/dist/utils/tokenUtils.d.ts +50 -0
- package/tmlpd-pi-extension/dist/utils/tokenUtils.d.ts.map +1 -0
- package/tmlpd-pi-extension/dist/utils/tokenUtils.js +124 -0
- package/tmlpd-pi-extension/dist/utils/tokenUtils.js.map +1 -0
- package/tmlpd-pi-extension/examples/QUICKSTART.md +183 -0
- package/tmlpd-pi-extension/package-lock.json +75 -0
- package/tmlpd-pi-extension/package.json +172 -0
- package/tmlpd-pi-extension/python/examples.py +53 -0
- package/tmlpd-pi-extension/python/integrations.py +330 -0
- package/tmlpd-pi-extension/python/setup.py +28 -0
- package/tmlpd-pi-extension/python/tmlpd.py +369 -0
- package/tmlpd-pi-extension/qna/REDDIT_GAP_ANALYSIS.md +299 -0
- package/tmlpd-pi-extension/qna/TMLPD_QNA.md +751 -0
- package/tmlpd-pi-extension/skill/SKILL.md +238 -0
- package/{src → tmlpd-pi-extension/src}/index.ts +1 -1
- package/tmlpd-pi-extension/tsconfig.json +18 -0
- package/demo/research-demo.js +0 -266
- package/notebooks/quickstart.ipynb +0 -157
- package/rust/tmlpd.h +0 -268
- package/src/cache/prefixCache.ts +0 -365
- package/src/routing/advancedRouter.ts +0 -406
- package/src/utils/speculativeDecoding.ts +0 -344
- /package/{src → tmlpd-pi-extension/src}/cache/responseCache.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/cost/costTracker.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/memory/episodicMemory.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/orchestration/haloOrchestrator.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/orchestration/mctsWorkflow.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/providers/localProvider.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/providers/registry.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/tools/tmlpdTools.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/utils/batchProcessor.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/utils/compression.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/utils/reliability.ts +0 -0
- /package/{src → tmlpd-pi-extension/src}/utils/tokenUtils.ts +0 -0
|
@@ -0,0 +1,353 @@
|
|
|
1
|
+
"""
|
|
2
|
+
ExecutionEngine - Parallel Task Execution with Verification
|
|
3
|
+
|
|
4
|
+
Based on arXiv:2505.13516 (HALO) and arXiv:2506.12508v3 (AgentOrchestra)
|
|
5
|
+
|
|
6
|
+
This module implements Tier 3 of HALO orchestration:
|
|
7
|
+
- Executes subtasks in parallel where possible
|
|
8
|
+
- Resolves dependencies automatically
|
|
9
|
+
- Monitors execution and handles failures
|
|
10
|
+
- Adaptive refinement for low-confidence results
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
import asyncio
|
|
14
|
+
from typing import Dict, List, Any, Optional, Set
|
|
15
|
+
from dataclasses import dataclass, field
|
|
16
|
+
from datetime import datetime
|
|
17
|
+
import logging
|
|
18
|
+
|
|
19
|
+
from .task_planner import SubTask, TaskDecomposition
|
|
20
|
+
from .role_assigner import AgentAssignment
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
logger = logging.getLogger(__name__)
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
@dataclass
|
|
27
|
+
class ExecutionResult:
|
|
28
|
+
"""Result of executing a single subtask"""
|
|
29
|
+
subtask_id: str
|
|
30
|
+
success: bool
|
|
31
|
+
output: Any
|
|
32
|
+
error: Optional[str] = None
|
|
33
|
+
execution_time_seconds: float = 0.0
|
|
34
|
+
tokens_used: int = 0
|
|
35
|
+
cost_usd: float = 0.0
|
|
36
|
+
provider: str = ""
|
|
37
|
+
confidence: float = 0.0
|
|
38
|
+
|
|
39
|
+
def to_dict(self) -> Dict:
|
|
40
|
+
return {
|
|
41
|
+
"subtask_id": self.subtask_id,
|
|
42
|
+
"success": self.success,
|
|
43
|
+
"output": self.output,
|
|
44
|
+
"error": self.error,
|
|
45
|
+
"execution_time_seconds": self.execution_time_seconds,
|
|
46
|
+
"tokens_used": self.tokens_used,
|
|
47
|
+
"cost_usd": self.cost_usd,
|
|
48
|
+
"provider": self.provider,
|
|
49
|
+
"confidence": self.confidence
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
@dataclass
|
|
54
|
+
class ExecutionSummary:
|
|
55
|
+
"""Summary of executing all subtasks"""
|
|
56
|
+
total_subtasks: int
|
|
57
|
+
successful_subtasks: int
|
|
58
|
+
failed_subtasks: int
|
|
59
|
+
total_execution_time_seconds: float
|
|
60
|
+
total_cost_usd: float
|
|
61
|
+
results: Dict[str, ExecutionResult] = field(default_factory=dict)
|
|
62
|
+
parallel_speedup: float = 1.0
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
class ExecutionEngine:
|
|
66
|
+
"""
|
|
67
|
+
Execute subtasks with dependency resolution and parallelization
|
|
68
|
+
|
|
69
|
+
Implements Tier 3 of HALO orchestration:
|
|
70
|
+
- Resolves dependencies between subtasks
|
|
71
|
+
- Executes independent tasks in parallel
|
|
72
|
+
- Monitors execution and handles failures
|
|
73
|
+
- Provides adaptive refinement for low-confidence results
|
|
74
|
+
"""
|
|
75
|
+
|
|
76
|
+
def __init__(self, max_concurrent: int = 5):
|
|
77
|
+
"""
|
|
78
|
+
Initialize execution engine
|
|
79
|
+
|
|
80
|
+
Args:
|
|
81
|
+
max_concurrent: Maximum number of parallel executions
|
|
82
|
+
"""
|
|
83
|
+
self.max_concurrent = max_concurrent
|
|
84
|
+
self.execution_history = []
|
|
85
|
+
|
|
86
|
+
async def execute_parallel(
|
|
87
|
+
self,
|
|
88
|
+
decomposition: TaskDecomposition,
|
|
89
|
+
assignments: Dict[str, AgentAssignment],
|
|
90
|
+
timeout_seconds: int = 300
|
|
91
|
+
) -> ExecutionSummary:
|
|
92
|
+
"""
|
|
93
|
+
Execute all subtasks with parallelization
|
|
94
|
+
|
|
95
|
+
Args:
|
|
96
|
+
decomposition: Task decomposition with execution order
|
|
97
|
+
assignments: Agent assignments for each subtask
|
|
98
|
+
timeout_seconds: Maximum time per subtask
|
|
99
|
+
|
|
100
|
+
Returns:
|
|
101
|
+
ExecutionSummary with all results
|
|
102
|
+
"""
|
|
103
|
+
start_time = datetime.now()
|
|
104
|
+
results = {}
|
|
105
|
+
|
|
106
|
+
# Execute in dependency order
|
|
107
|
+
for subtask_id in decomposition.execution_order:
|
|
108
|
+
# Find this subtask
|
|
109
|
+
subtask = next(st for st in decomposition.subtasks if st.id == subtask_id)
|
|
110
|
+
|
|
111
|
+
# Check if dependencies are satisfied
|
|
112
|
+
if not self._dependencies_satisfied(subtask, results):
|
|
113
|
+
logger.warning(f"Dependencies not satisfied for {subtask_id}, skipping")
|
|
114
|
+
continue
|
|
115
|
+
|
|
116
|
+
# Execute subtask
|
|
117
|
+
result = await self._execute_subtask(
|
|
118
|
+
subtask,
|
|
119
|
+
assignments[subtask_id],
|
|
120
|
+
timeout_seconds
|
|
121
|
+
)
|
|
122
|
+
results[subtask_id] = result
|
|
123
|
+
|
|
124
|
+
# If failed, try to recover
|
|
125
|
+
if not result.success:
|
|
126
|
+
logger.error(f"Subtask {subtask_id} failed: {result.error}")
|
|
127
|
+
# Could implement retry logic here
|
|
128
|
+
|
|
129
|
+
end_time = datetime.now()
|
|
130
|
+
total_time = (end_time - start_time).total_seconds()
|
|
131
|
+
|
|
132
|
+
# Calculate summary statistics
|
|
133
|
+
successful = sum(1 for r in results.values() if r.success)
|
|
134
|
+
failed = len(results) - successful
|
|
135
|
+
total_cost = sum(r.cost_usd for r in results.values())
|
|
136
|
+
|
|
137
|
+
# Estimate parallel speedup
|
|
138
|
+
sequential_time = sum(
|
|
139
|
+
st.estimated_duration_seconds
|
|
140
|
+
for st in decomposition.subtasks
|
|
141
|
+
)
|
|
142
|
+
parallel_speedup = sequential_time / total_time if total_time > 0 else 1.0
|
|
143
|
+
|
|
144
|
+
summary = ExecutionSummary(
|
|
145
|
+
total_subtasks=len(decomposition.subtasks),
|
|
146
|
+
successful_subtasks=successful,
|
|
147
|
+
failed_subtasks=failed,
|
|
148
|
+
total_execution_time_seconds=total_time,
|
|
149
|
+
total_cost_usd=total_cost,
|
|
150
|
+
results=results,
|
|
151
|
+
parallel_speedup=parallel_speedup
|
|
152
|
+
)
|
|
153
|
+
|
|
154
|
+
self.execution_history.append(summary)
|
|
155
|
+
|
|
156
|
+
return summary
|
|
157
|
+
|
|
158
|
+
def _dependencies_satisfied(
|
|
159
|
+
self,
|
|
160
|
+
subtask: SubTask,
|
|
161
|
+
results: Dict[str, ExecutionResult]
|
|
162
|
+
) -> bool:
|
|
163
|
+
"""Check if all dependencies for a subtask are satisfied"""
|
|
164
|
+
for dep_id in subtask.dependencies:
|
|
165
|
+
if dep_id not in results:
|
|
166
|
+
return False
|
|
167
|
+
if not results[dep_id].success:
|
|
168
|
+
return False
|
|
169
|
+
return True
|
|
170
|
+
|
|
171
|
+
async def _execute_subtask(
|
|
172
|
+
self,
|
|
173
|
+
subtask: SubTask,
|
|
174
|
+
assignment: AgentAssignment,
|
|
175
|
+
timeout_seconds: int
|
|
176
|
+
) -> ExecutionResult:
|
|
177
|
+
"""
|
|
178
|
+
Execute a single subtask with its assigned agent
|
|
179
|
+
|
|
180
|
+
This is a simplified implementation. In production, this would:
|
|
181
|
+
- Call the actual LLM API
|
|
182
|
+
- Handle retries and circuit breakers
|
|
183
|
+
- Track tokens and cost accurately
|
|
184
|
+
- Implement proper error handling
|
|
185
|
+
"""
|
|
186
|
+
import time
|
|
187
|
+
start_time = time.time()
|
|
188
|
+
|
|
189
|
+
try:
|
|
190
|
+
# Simulate execution (replace with actual LLM call)
|
|
191
|
+
await asyncio.sleep(subtask.estimated_duration_seconds / 10) # Speed up for demo
|
|
192
|
+
|
|
193
|
+
# Mock result
|
|
194
|
+
output = f"Completed: {subtask.description}"
|
|
195
|
+
success = True
|
|
196
|
+
|
|
197
|
+
execution_time = time.time() - start_time
|
|
198
|
+
|
|
199
|
+
# Estimate cost (in production, get from actual API response)
|
|
200
|
+
estimated_tokens = 500 # Mock
|
|
201
|
+
cost = estimated_tokens * assignment.agent_config.cost_per_1k_tokens / 1000.0
|
|
202
|
+
|
|
203
|
+
return ExecutionResult(
|
|
204
|
+
subtask_id=subtask.id,
|
|
205
|
+
success=success,
|
|
206
|
+
output=output,
|
|
207
|
+
execution_time_seconds=execution_time,
|
|
208
|
+
tokens_used=estimated_tokens,
|
|
209
|
+
cost_usd=cost,
|
|
210
|
+
provider=assignment.agent_config.model_provider,
|
|
211
|
+
confidence=assignment.confidence
|
|
212
|
+
)
|
|
213
|
+
|
|
214
|
+
except Exception as e:
|
|
215
|
+
execution_time = time.time() - start_time
|
|
216
|
+
|
|
217
|
+
return ExecutionResult(
|
|
218
|
+
subtask_id=subtask.id,
|
|
219
|
+
success=False,
|
|
220
|
+
output=None,
|
|
221
|
+
error=str(e),
|
|
222
|
+
execution_time_seconds=execution_time,
|
|
223
|
+
tokens_used=0,
|
|
224
|
+
cost_usd=0.0,
|
|
225
|
+
confidence=0.0
|
|
226
|
+
)
|
|
227
|
+
|
|
228
|
+
async def execute_parallel_batch(
|
|
229
|
+
self,
|
|
230
|
+
ready_subtasks: List[SubTask],
|
|
231
|
+
assignments: Dict[str, AgentAssignment],
|
|
232
|
+
timeout_seconds: int = 300
|
|
233
|
+
) -> List[ExecutionResult]:
|
|
234
|
+
"""
|
|
235
|
+
Execute multiple subtasks in parallel (respecting max_concurrent)
|
|
236
|
+
|
|
237
|
+
Args:
|
|
238
|
+
ready_subtasks: Subtasks with all dependencies satisfied
|
|
239
|
+
assignments: Agent assignments
|
|
240
|
+
timeout_seconds: Timeout per subtask
|
|
241
|
+
|
|
242
|
+
Returns:
|
|
243
|
+
List of ExecutionResults
|
|
244
|
+
"""
|
|
245
|
+
semaphore = asyncio.Semaphore(self.max_concurrent)
|
|
246
|
+
|
|
247
|
+
async def execute_with_semaphore(subtask: SubTask) -> ExecutionResult:
|
|
248
|
+
async with semaphore:
|
|
249
|
+
return await self._execute_subtask(
|
|
250
|
+
subtask,
|
|
251
|
+
assignments[subtask.id],
|
|
252
|
+
timeout_seconds
|
|
253
|
+
)
|
|
254
|
+
|
|
255
|
+
# Execute all ready subtasks in parallel
|
|
256
|
+
results = await asyncio.gather(
|
|
257
|
+
*[execute_with_semaphore(st) for st in ready_subtasks],
|
|
258
|
+
return_exceptions=True
|
|
259
|
+
)
|
|
260
|
+
|
|
261
|
+
# Handle exceptions
|
|
262
|
+
final_results = []
|
|
263
|
+
for i, result in enumerate(results):
|
|
264
|
+
if isinstance(result, Exception):
|
|
265
|
+
# Create failed result for exception
|
|
266
|
+
final_results.append(ExecutionResult(
|
|
267
|
+
subtask_id=ready_subtasks[i].id,
|
|
268
|
+
success=False,
|
|
269
|
+
output=None,
|
|
270
|
+
error=str(result),
|
|
271
|
+
execution_time_seconds=0,
|
|
272
|
+
tokens_used=0,
|
|
273
|
+
cost_usd=0.0,
|
|
274
|
+
confidence=0.0
|
|
275
|
+
))
|
|
276
|
+
else:
|
|
277
|
+
final_results.append(result)
|
|
278
|
+
|
|
279
|
+
return final_results
|
|
280
|
+
|
|
281
|
+
def get_execution_stats(self) -> Dict[str, Any]:
|
|
282
|
+
"""Get statistics about executions performed"""
|
|
283
|
+
if not self.execution_history:
|
|
284
|
+
return {"total_executions": 0}
|
|
285
|
+
|
|
286
|
+
total_subtasks = sum(s.total_subtasks for s in self.execution_history)
|
|
287
|
+
total_successful = sum(s.successful_subtasks for s in self.execution_history)
|
|
288
|
+
total_failed = sum(s.failed_subtasks for s in self.execution_history)
|
|
289
|
+
avg_speedup = sum(s.parallel_speedup for s in self.execution_history) / len(self.execution_history)
|
|
290
|
+
total_cost = sum(s.total_cost_usd for s in self.execution_history)
|
|
291
|
+
|
|
292
|
+
return {
|
|
293
|
+
"total_executions": len(self.execution_history),
|
|
294
|
+
"total_subtasks_executed": total_subtasks,
|
|
295
|
+
"total_successful": total_successful,
|
|
296
|
+
"total_failed": total_failed,
|
|
297
|
+
"success_rate": total_successful / total_subtasks if total_subtasks > 0 else 0,
|
|
298
|
+
"average_parallel_speedup": avg_speedup,
|
|
299
|
+
"total_cost_usd": total_cost
|
|
300
|
+
}
|
|
301
|
+
|
|
302
|
+
|
|
303
|
+
# Example usage
|
|
304
|
+
async def main():
|
|
305
|
+
"""Example of ExecutionEngine usage"""
|
|
306
|
+
from .task_planner import TaskPlanner
|
|
307
|
+
from .role_assigner import RoleAssigner
|
|
308
|
+
|
|
309
|
+
# Create components
|
|
310
|
+
planner = TaskPlanner()
|
|
311
|
+
assigner = RoleAssigner()
|
|
312
|
+
engine = ExecutionEngine(max_concurrent=3)
|
|
313
|
+
|
|
314
|
+
# Create task
|
|
315
|
+
task = {
|
|
316
|
+
"description": "Build a REST API with user authentication",
|
|
317
|
+
"context": {"requirements": ["JWT", "PostgreSQL"]}
|
|
318
|
+
}
|
|
319
|
+
|
|
320
|
+
# Decompose
|
|
321
|
+
print("Decomposing task...")
|
|
322
|
+
decomposition = await planner.decompose(task)
|
|
323
|
+
|
|
324
|
+
# Assign agents
|
|
325
|
+
print("Assigning agents...")
|
|
326
|
+
assignments = await assigner.assign_roles(
|
|
327
|
+
decomposition.subtasks,
|
|
328
|
+
optimization_target="balanced"
|
|
329
|
+
)
|
|
330
|
+
|
|
331
|
+
# Execute
|
|
332
|
+
print("Executing subtasks...")
|
|
333
|
+
summary = await engine.execute_parallel(decomposition, assignments)
|
|
334
|
+
|
|
335
|
+
print(f"\nExecution Summary:")
|
|
336
|
+
print(f" Total subtasks: {summary.total_subtasks}")
|
|
337
|
+
print(f" Successful: {summary.successful_subtasks}")
|
|
338
|
+
print(f" Failed: {summary.failed_subtasks}")
|
|
339
|
+
print(f" Total time: {summary.total_execution_time_seconds:.2f}s")
|
|
340
|
+
print(f" Total cost: ${summary.total_cost_usd:.6f}")
|
|
341
|
+
print(f" Parallel speedup: {summary.parallel_speedup:.2f}x")
|
|
342
|
+
|
|
343
|
+
print(f"\nResults:")
|
|
344
|
+
for subtask_id, result in summary.results.items():
|
|
345
|
+
print(f" {subtask_id}: {'✅' if result.success else '❌'}")
|
|
346
|
+
if result.success:
|
|
347
|
+
print(f" Output: {result.output}")
|
|
348
|
+
else:
|
|
349
|
+
print(f" Error: {result.error}")
|
|
350
|
+
|
|
351
|
+
|
|
352
|
+
if __name__ == "__main__":
|
|
353
|
+
asyncio.run(main())
|