hanzo 0.3.21__py3-none-any.whl → 0.3.23__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of hanzo might be problematic. Click here for more details.
- hanzo/base_agent.py +517 -0
- hanzo/batch_orchestrator.py +988 -0
- hanzo/cli.py +1 -1
- hanzo/commands/repl.py +5 -2
- hanzo/dev.py +463 -261
- hanzo/fallback_handler.py +78 -52
- hanzo/memory_manager.py +145 -122
- hanzo/model_registry.py +399 -0
- hanzo/rate_limiter.py +59 -74
- hanzo/streaming.py +91 -70
- {hanzo-0.3.21.dist-info → hanzo-0.3.23.dist-info}/METADATA +1 -1
- {hanzo-0.3.21.dist-info → hanzo-0.3.23.dist-info}/RECORD +14 -11
- {hanzo-0.3.21.dist-info → hanzo-0.3.23.dist-info}/WHEEL +0 -0
- {hanzo-0.3.21.dist-info → hanzo-0.3.23.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,988 @@
|
|
|
1
|
+
"""Batch Orchestrator for Hanzo Dev - Unified Parallel Agent Execution.
|
|
2
|
+
|
|
3
|
+
This module provides a single, DRY implementation for all batch operations,
|
|
4
|
+
consensus mechanisms, and critic chains using the unified base classes.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import re
|
|
8
|
+
import asyncio
|
|
9
|
+
import logging
|
|
10
|
+
from typing import Any, Dict, List, Optional, AsyncIterator, Callable
|
|
11
|
+
from dataclasses import dataclass, field
|
|
12
|
+
from datetime import datetime
|
|
13
|
+
from pathlib import Path
|
|
14
|
+
import json
|
|
15
|
+
import subprocess
|
|
16
|
+
|
|
17
|
+
from rich.console import Console
|
|
18
|
+
from rich.progress import Progress, TaskID, TextColumn, SpinnerColumn, BarColumn
|
|
19
|
+
from rich.table import Table
|
|
20
|
+
from rich.panel import Panel
|
|
21
|
+
|
|
22
|
+
try:
|
|
23
|
+
# Try to import from hanzo-mcp if available
|
|
24
|
+
from hanzo_mcp.core.model_registry import registry
|
|
25
|
+
from hanzo_mcp.core.base_agent import AgentConfig, AgentResult, AgentOrchestrator
|
|
26
|
+
except ImportError:
|
|
27
|
+
# Fall back to local imports if hanzo-mcp is not installed
|
|
28
|
+
from .model_registry import registry
|
|
29
|
+
from .base_agent import AgentConfig, AgentResult, AgentOrchestrator
|
|
30
|
+
|
|
31
|
+
logger = logging.getLogger(__name__)
|
|
32
|
+
console = Console()
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
@dataclass
|
|
36
|
+
class BatchTask:
|
|
37
|
+
"""Represents a single task in a batch operation."""
|
|
38
|
+
|
|
39
|
+
id: str
|
|
40
|
+
description: str
|
|
41
|
+
file_path: Optional[Path] = None
|
|
42
|
+
agent_model: str = field(default_factory=lambda: registry.resolve("claude"))
|
|
43
|
+
status: str = "pending" # pending, running, completed, failed
|
|
44
|
+
result: Optional[AgentResult] = None
|
|
45
|
+
start_time: Optional[datetime] = None
|
|
46
|
+
end_time: Optional[datetime] = None
|
|
47
|
+
|
|
48
|
+
def duration(self) -> Optional[float]:
|
|
49
|
+
"""Calculate task duration in seconds."""
|
|
50
|
+
if self.start_time and self.end_time:
|
|
51
|
+
return (self.end_time - self.start_time).total_seconds()
|
|
52
|
+
return None
|
|
53
|
+
|
|
54
|
+
@property
|
|
55
|
+
def success(self) -> bool:
|
|
56
|
+
"""Check if task succeeded."""
|
|
57
|
+
return self.status == "completed" and self.result and self.result.success
|
|
58
|
+
|
|
59
|
+
@property
|
|
60
|
+
def error(self) -> Optional[str]:
|
|
61
|
+
"""Get error message if any."""
|
|
62
|
+
if self.result and not self.result.success:
|
|
63
|
+
return self.result.error
|
|
64
|
+
return None
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
@dataclass
|
|
68
|
+
class BatchConfig:
|
|
69
|
+
"""Configuration for batch operations."""
|
|
70
|
+
|
|
71
|
+
batch_size: int = 5 # Default concurrent tasks
|
|
72
|
+
agent_model: str = field(default_factory=lambda: registry.resolve("claude"))
|
|
73
|
+
operation: str = ""
|
|
74
|
+
target_pattern: str = "**/*" # File pattern
|
|
75
|
+
max_retries: int = 3
|
|
76
|
+
timeout_seconds: int = 300
|
|
77
|
+
stream_results: bool = True
|
|
78
|
+
use_mcp_tools: bool = True
|
|
79
|
+
use_worktrees: bool = False # Use git worktrees for parallel editing
|
|
80
|
+
worktree_base: str = ".worktrees" # Base dir for worktrees
|
|
81
|
+
|
|
82
|
+
# Consensus and critic features
|
|
83
|
+
consensus_mode: bool = False
|
|
84
|
+
consensus_models: List[str] = field(default_factory=list)
|
|
85
|
+
consensus_threshold: float = 0.66 # Agreement threshold
|
|
86
|
+
critic_mode: bool = False
|
|
87
|
+
critic_models: List[str] = field(default_factory=list)
|
|
88
|
+
critic_chain: bool = False # Chain critics sequentially
|
|
89
|
+
|
|
90
|
+
def __post_init__(self) -> None:
|
|
91
|
+
"""Resolve all model names using registry."""
|
|
92
|
+
self.agent_model = registry.resolve(self.agent_model)
|
|
93
|
+
self.consensus_models = [registry.resolve(m) for m in self.consensus_models]
|
|
94
|
+
self.critic_models = [registry.resolve(m) for m in self.critic_models]
|
|
95
|
+
|
|
96
|
+
@classmethod
|
|
97
|
+
def from_command(cls, command: str) -> "BatchConfig":
|
|
98
|
+
"""Parse batch command syntax.
|
|
99
|
+
|
|
100
|
+
Examples:
|
|
101
|
+
batch:5 add copyright to all files # Defaults to Claude
|
|
102
|
+
batch:100 agent:claude add copyright to all files
|
|
103
|
+
batch:50 agent:codex fix typing in *.py
|
|
104
|
+
batch:5 worktree:true parallel edits # Use git worktrees
|
|
105
|
+
|
|
106
|
+
consensus:3 agent:gemini,claude,codex review code
|
|
107
|
+
consensus:3 llm:gpt-5,opus-4.1,sonnet-4.1 analyze
|
|
108
|
+
|
|
109
|
+
critic:3 agent:claude,codex,gemini review implementation
|
|
110
|
+
critic:3 chain:true progressive review # Chain critics
|
|
111
|
+
"""
|
|
112
|
+
config = cls()
|
|
113
|
+
|
|
114
|
+
# Parse consensus mode
|
|
115
|
+
consensus_match = re.search(r'consensus:(\d+)', command)
|
|
116
|
+
if consensus_match:
|
|
117
|
+
config.consensus_mode = True
|
|
118
|
+
config.batch_size = int(consensus_match.group(1))
|
|
119
|
+
|
|
120
|
+
# Parse consensus agents/models
|
|
121
|
+
agent_list_match = re.search(r'agent:([a-zA-Z0-9,\-_.]+)', command)
|
|
122
|
+
llm_list_match = re.search(r'llm:([a-zA-Z0-9,\-_.]+)', command)
|
|
123
|
+
|
|
124
|
+
if agent_list_match:
|
|
125
|
+
agents = agent_list_match.group(1).split(',')
|
|
126
|
+
config.consensus_models = agents # Will be resolved in __post_init__
|
|
127
|
+
elif llm_list_match:
|
|
128
|
+
models = llm_list_match.group(1).split(',')
|
|
129
|
+
config.consensus_models = models # Will be resolved in __post_init__
|
|
130
|
+
|
|
131
|
+
# Parse critic mode
|
|
132
|
+
critic_match = re.search(r'critic:(\d+)', command)
|
|
133
|
+
if critic_match:
|
|
134
|
+
config.critic_mode = True
|
|
135
|
+
config.batch_size = int(critic_match.group(1))
|
|
136
|
+
|
|
137
|
+
# Parse critic chain option
|
|
138
|
+
chain_match = re.search(r'chain:(true|false)', command)
|
|
139
|
+
if chain_match:
|
|
140
|
+
config.critic_chain = chain_match.group(1) == 'true'
|
|
141
|
+
|
|
142
|
+
# Parse critic agents/models
|
|
143
|
+
agent_list_match = re.search(r'agent:([a-zA-Z0-9,\-_.]+)', command)
|
|
144
|
+
if agent_list_match:
|
|
145
|
+
agents = agent_list_match.group(1).split(',')
|
|
146
|
+
config.critic_models = agents # Will be resolved in __post_init__
|
|
147
|
+
|
|
148
|
+
# Parse batch size (if not consensus/critic)
|
|
149
|
+
if not config.consensus_mode and not config.critic_mode:
|
|
150
|
+
batch_match = re.search(r'batch:(\d+)', command)
|
|
151
|
+
if batch_match:
|
|
152
|
+
config.batch_size = int(batch_match.group(1))
|
|
153
|
+
|
|
154
|
+
# Parse single agent model (for regular batch)
|
|
155
|
+
if not config.consensus_mode and not config.critic_mode:
|
|
156
|
+
agent_match = re.search(r'agent:(\w+)', command)
|
|
157
|
+
if agent_match:
|
|
158
|
+
config.agent_model = agent_match.group(1) # Will be resolved in __post_init__
|
|
159
|
+
|
|
160
|
+
# Parse worktree option
|
|
161
|
+
worktree_match = re.search(r'worktree:(true|false)', command)
|
|
162
|
+
if worktree_match:
|
|
163
|
+
config.use_worktrees = worktree_match.group(1) == 'true'
|
|
164
|
+
|
|
165
|
+
# Parse file pattern
|
|
166
|
+
pattern_match = re.search(r'files:([^\s]+)', command)
|
|
167
|
+
if pattern_match:
|
|
168
|
+
config.target_pattern = pattern_match.group(1)
|
|
169
|
+
|
|
170
|
+
# Extract operation (remove all config parts)
|
|
171
|
+
operation = command
|
|
172
|
+
operation = re.sub(r'(batch|consensus|critic):\d+\s*', '', operation)
|
|
173
|
+
operation = re.sub(r'agent:[a-zA-Z0-9,\-_.]+\s*', '', operation)
|
|
174
|
+
operation = re.sub(r'llm:[a-zA-Z0-9,\-_.]+\s*', '', operation)
|
|
175
|
+
operation = re.sub(r'chain:(true|false)\s*', '', operation)
|
|
176
|
+
operation = re.sub(r'worktree:(true|false)\s*', '', operation)
|
|
177
|
+
operation = re.sub(r'files:[^\s]+\s*', '', operation)
|
|
178
|
+
config.operation = operation.strip()
|
|
179
|
+
|
|
180
|
+
# Trigger __post_init__ to resolve model names
|
|
181
|
+
config.__post_init__()
|
|
182
|
+
|
|
183
|
+
return config
|
|
184
|
+
|
|
185
|
+
|
|
186
|
+
class BatchOrchestrator:
|
|
187
|
+
"""Orchestrates parallel batch operations using unified agent system."""
|
|
188
|
+
|
|
189
|
+
def __init__(
|
|
190
|
+
self,
|
|
191
|
+
mcp_client: Optional[Any] = None,
|
|
192
|
+
hanzo_client: Optional[Any] = None,
|
|
193
|
+
):
|
|
194
|
+
"""Initialize batch orchestrator.
|
|
195
|
+
|
|
196
|
+
Args:
|
|
197
|
+
mcp_client: MCP client for tool access
|
|
198
|
+
hanzo_client: Hanzo client for AI operations
|
|
199
|
+
"""
|
|
200
|
+
self.mcp_client = mcp_client
|
|
201
|
+
self.hanzo_client = hanzo_client
|
|
202
|
+
self.agent_orchestrator = AgentOrchestrator()
|
|
203
|
+
self.active_tasks: Dict[str, BatchTask] = {}
|
|
204
|
+
self.completed_tasks: List[BatchTask] = []
|
|
205
|
+
self.failed_tasks: List[BatchTask] = []
|
|
206
|
+
self._task_counter = 0
|
|
207
|
+
self._progress: Optional[Progress] = None
|
|
208
|
+
self._worktrees: Dict[str, Path] = {} # Track worktrees
|
|
209
|
+
|
|
210
|
+
def _generate_task_id(self) -> str:
|
|
211
|
+
"""Generate unique task ID."""
|
|
212
|
+
self._task_counter += 1
|
|
213
|
+
return f"task_{self._task_counter:04d}"
|
|
214
|
+
|
|
215
|
+
async def _setup_worktree(self, task_id: str, config: BatchConfig) -> Optional[Path]:
|
|
216
|
+
"""Setup git worktree for parallel editing.
|
|
217
|
+
|
|
218
|
+
Args:
|
|
219
|
+
task_id: Task identifier
|
|
220
|
+
config: Batch configuration
|
|
221
|
+
|
|
222
|
+
Returns:
|
|
223
|
+
Path to worktree or None if not using worktrees
|
|
224
|
+
"""
|
|
225
|
+
if not config.use_worktrees:
|
|
226
|
+
return None
|
|
227
|
+
|
|
228
|
+
try:
|
|
229
|
+
# Create worktree directory
|
|
230
|
+
worktree_path = Path(config.worktree_base) / task_id
|
|
231
|
+
worktree_path.parent.mkdir(parents=True, exist_ok=True)
|
|
232
|
+
|
|
233
|
+
# Create worktree
|
|
234
|
+
import subprocess
|
|
235
|
+
result = subprocess.run(
|
|
236
|
+
["git", "worktree", "add", str(worktree_path), "HEAD"],
|
|
237
|
+
capture_output=True,
|
|
238
|
+
text=True,
|
|
239
|
+
)
|
|
240
|
+
|
|
241
|
+
if result.returncode != 0:
|
|
242
|
+
logger.error(f"Failed to create worktree: {result.stderr}")
|
|
243
|
+
return None
|
|
244
|
+
|
|
245
|
+
self._worktrees[task_id] = worktree_path
|
|
246
|
+
return worktree_path
|
|
247
|
+
|
|
248
|
+
except Exception as e:
|
|
249
|
+
logger.error(f"Error setting up worktree: {e}")
|
|
250
|
+
return None
|
|
251
|
+
|
|
252
|
+
async def _cleanup_worktree(self, task_id: str) -> None:
|
|
253
|
+
"""Cleanup git worktree after task completion.
|
|
254
|
+
|
|
255
|
+
Args:
|
|
256
|
+
task_id: Task identifier
|
|
257
|
+
"""
|
|
258
|
+
if task_id not in self._worktrees:
|
|
259
|
+
return
|
|
260
|
+
|
|
261
|
+
try:
|
|
262
|
+
worktree_path = self._worktrees[task_id]
|
|
263
|
+
|
|
264
|
+
# Remove worktree
|
|
265
|
+
import subprocess
|
|
266
|
+
subprocess.run(
|
|
267
|
+
["git", "worktree", "remove", str(worktree_path), "--force"],
|
|
268
|
+
capture_output=True,
|
|
269
|
+
)
|
|
270
|
+
|
|
271
|
+
del self._worktrees[task_id]
|
|
272
|
+
|
|
273
|
+
except Exception as e:
|
|
274
|
+
logger.error(f"Error cleaning up worktree: {e}")
|
|
275
|
+
|
|
276
|
+
async def _find_target_files(self, pattern: str) -> List[Path]:
|
|
277
|
+
"""Find files matching the target pattern.
|
|
278
|
+
|
|
279
|
+
Args:
|
|
280
|
+
pattern: Glob pattern for files
|
|
281
|
+
|
|
282
|
+
Returns:
|
|
283
|
+
List of matching file paths
|
|
284
|
+
"""
|
|
285
|
+
if self.mcp_client:
|
|
286
|
+
# Use MCP find tool
|
|
287
|
+
try:
|
|
288
|
+
result = await self.mcp_client.call_tool(
|
|
289
|
+
"find",
|
|
290
|
+
{"pattern": pattern}
|
|
291
|
+
)
|
|
292
|
+
if isinstance(result, str):
|
|
293
|
+
# Parse file paths from result
|
|
294
|
+
files = []
|
|
295
|
+
for line in result.split('\n'):
|
|
296
|
+
if line.strip():
|
|
297
|
+
files.append(Path(line.strip()))
|
|
298
|
+
return files
|
|
299
|
+
except Exception as e:
|
|
300
|
+
logger.error(f"MCP find failed: {e}")
|
|
301
|
+
|
|
302
|
+
# Fallback to Path.glob
|
|
303
|
+
base_path = Path.cwd()
|
|
304
|
+
return list(base_path.glob(pattern))
|
|
305
|
+
|
|
306
|
+
async def _execute_agent_task(
|
|
307
|
+
self,
|
|
308
|
+
task: BatchTask,
|
|
309
|
+
config: BatchConfig,
|
|
310
|
+
progress_task: Optional[TaskID] = None,
|
|
311
|
+
) -> None:
|
|
312
|
+
"""Execute a single agent task.
|
|
313
|
+
|
|
314
|
+
Args:
|
|
315
|
+
task: The task to execute
|
|
316
|
+
config: Batch configuration
|
|
317
|
+
progress_task: Optional progress bar task ID
|
|
318
|
+
"""
|
|
319
|
+
task.status = "running"
|
|
320
|
+
task.start_time = datetime.now()
|
|
321
|
+
worktree_path = None
|
|
322
|
+
|
|
323
|
+
try:
|
|
324
|
+
# Setup worktree if needed
|
|
325
|
+
worktree_path = await self._setup_worktree(task.id, config)
|
|
326
|
+
|
|
327
|
+
# Build the prompt for the agent
|
|
328
|
+
prompt = config.operation
|
|
329
|
+
if task.file_path:
|
|
330
|
+
# If using worktree, use the worktree path
|
|
331
|
+
if worktree_path:
|
|
332
|
+
file_path = worktree_path / task.file_path.relative_to(Path.cwd())
|
|
333
|
+
prompt = f"{config.operation} for file: {file_path}"
|
|
334
|
+
else:
|
|
335
|
+
prompt = f"{config.operation} for file: {task.file_path}"
|
|
336
|
+
|
|
337
|
+
# Use MCP agent tool if available
|
|
338
|
+
if self.mcp_client and config.use_mcp_tools:
|
|
339
|
+
# If using worktree, set working directory context
|
|
340
|
+
context = {}
|
|
341
|
+
if worktree_path:
|
|
342
|
+
context["working_dir"] = str(worktree_path)
|
|
343
|
+
|
|
344
|
+
result = await self.mcp_client.call_tool(
|
|
345
|
+
"agent",
|
|
346
|
+
{
|
|
347
|
+
"prompt": prompt,
|
|
348
|
+
"model": config.agent_model,
|
|
349
|
+
"max_iterations": 5,
|
|
350
|
+
**context,
|
|
351
|
+
}
|
|
352
|
+
)
|
|
353
|
+
task.result = result
|
|
354
|
+
|
|
355
|
+
# Use Hanzo client for direct AI calls
|
|
356
|
+
elif self.hanzo_client:
|
|
357
|
+
response = await self.hanzo_client.chat.completions.create(
|
|
358
|
+
model=config.agent_model,
|
|
359
|
+
messages=[
|
|
360
|
+
{"role": "system", "content": "You are a helpful coding assistant."},
|
|
361
|
+
{"role": "user", "content": prompt}
|
|
362
|
+
],
|
|
363
|
+
stream=False,
|
|
364
|
+
)
|
|
365
|
+
task.result = response.choices[0].message.content
|
|
366
|
+
|
|
367
|
+
else:
|
|
368
|
+
# Simulate agent execution for testing
|
|
369
|
+
await asyncio.sleep(0.1) # Simulate work
|
|
370
|
+
task.result = f"Completed: {prompt}"
|
|
371
|
+
|
|
372
|
+
task.status = "completed"
|
|
373
|
+
|
|
374
|
+
# If using worktree, merge changes back
|
|
375
|
+
if worktree_path and task.status == "completed":
|
|
376
|
+
await self._merge_worktree_changes(task.id, worktree_path)
|
|
377
|
+
|
|
378
|
+
except asyncio.TimeoutError:
|
|
379
|
+
task.status = "failed"
|
|
380
|
+
task.error = "Task timed out"
|
|
381
|
+
|
|
382
|
+
except Exception as e:
|
|
383
|
+
task.status = "failed"
|
|
384
|
+
task.error = str(e)
|
|
385
|
+
logger.error(f"Task {task.id} failed: {e}")
|
|
386
|
+
|
|
387
|
+
finally:
|
|
388
|
+
task.end_time = datetime.now()
|
|
389
|
+
|
|
390
|
+
# Cleanup worktree
|
|
391
|
+
if worktree_path:
|
|
392
|
+
await self._cleanup_worktree(task.id)
|
|
393
|
+
|
|
394
|
+
# Update progress if available
|
|
395
|
+
if self._progress and progress_task is not None:
|
|
396
|
+
self._progress.update(progress_task, advance=1)
|
|
397
|
+
|
|
398
|
+
# Stream result if enabled
|
|
399
|
+
if config.stream_results:
|
|
400
|
+
await self._stream_result(task)
|
|
401
|
+
|
|
402
|
+
async def _merge_worktree_changes(self, task_id: str, worktree_path: Path) -> None:
|
|
403
|
+
"""Merge changes from worktree back to main branch.
|
|
404
|
+
|
|
405
|
+
Args:
|
|
406
|
+
task_id: Task identifier
|
|
407
|
+
worktree_path: Path to worktree
|
|
408
|
+
"""
|
|
409
|
+
try:
|
|
410
|
+
import subprocess
|
|
411
|
+
|
|
412
|
+
# Stage and commit changes in worktree
|
|
413
|
+
subprocess.run(
|
|
414
|
+
["git", "add", "-A"],
|
|
415
|
+
cwd=worktree_path,
|
|
416
|
+
capture_output=True,
|
|
417
|
+
)
|
|
418
|
+
|
|
419
|
+
subprocess.run(
|
|
420
|
+
["git", "commit", "-m", f"Task {task_id}: Automated changes"],
|
|
421
|
+
cwd=worktree_path,
|
|
422
|
+
capture_output=True,
|
|
423
|
+
)
|
|
424
|
+
|
|
425
|
+
# Cherry-pick to main branch
|
|
426
|
+
result = subprocess.run(
|
|
427
|
+
["git", "rev-parse", "HEAD"],
|
|
428
|
+
cwd=worktree_path,
|
|
429
|
+
capture_output=True,
|
|
430
|
+
text=True,
|
|
431
|
+
)
|
|
432
|
+
|
|
433
|
+
if result.returncode == 0:
|
|
434
|
+
commit_hash = result.stdout.strip()
|
|
435
|
+
subprocess.run(
|
|
436
|
+
["git", "cherry-pick", commit_hash],
|
|
437
|
+
capture_output=True,
|
|
438
|
+
)
|
|
439
|
+
|
|
440
|
+
except Exception as e:
|
|
441
|
+
logger.error(f"Error merging worktree changes: {e}")
|
|
442
|
+
|
|
443
|
+
async def _stream_result(self, task: BatchTask) -> None:
|
|
444
|
+
"""Stream task result to console.
|
|
445
|
+
|
|
446
|
+
Args:
|
|
447
|
+
task: Completed task to stream
|
|
448
|
+
"""
|
|
449
|
+
status_color = "green" if task.status == "completed" else "red"
|
|
450
|
+
status_icon = "✓" if task.status == "completed" else "✗"
|
|
451
|
+
|
|
452
|
+
# Create result panel
|
|
453
|
+
content = task.result if task.status == "completed" else task.error
|
|
454
|
+
panel = Panel(
|
|
455
|
+
content or "No output",
|
|
456
|
+
title=f"[{status_color}]{status_icon}[/{status_color}] {task.id}: {task.description}",
|
|
457
|
+
border_style=status_color,
|
|
458
|
+
)
|
|
459
|
+
console.print(panel)
|
|
460
|
+
|
|
461
|
+
async def _execute_consensus(
|
|
462
|
+
self,
|
|
463
|
+
prompt: str,
|
|
464
|
+
models: List[str],
|
|
465
|
+
config: BatchConfig,
|
|
466
|
+
) -> Dict[str, Any]:
|
|
467
|
+
"""Execute consensus operation with multiple models.
|
|
468
|
+
|
|
469
|
+
Args:
|
|
470
|
+
prompt: The prompt to send to all models
|
|
471
|
+
models: List of model names
|
|
472
|
+
config: Batch configuration
|
|
473
|
+
|
|
474
|
+
Returns:
|
|
475
|
+
Consensus result with individual responses
|
|
476
|
+
"""
|
|
477
|
+
responses = []
|
|
478
|
+
|
|
479
|
+
# Execute with all models in parallel
|
|
480
|
+
async def get_response(model: str) -> Dict[str, Any]:
|
|
481
|
+
try:
|
|
482
|
+
if self.mcp_client:
|
|
483
|
+
result = await self.mcp_client.call_tool(
|
|
484
|
+
"llm",
|
|
485
|
+
{
|
|
486
|
+
"prompt": prompt,
|
|
487
|
+
"model": model,
|
|
488
|
+
}
|
|
489
|
+
)
|
|
490
|
+
return {"model": model, "response": result, "success": True}
|
|
491
|
+
elif self.hanzo_client:
|
|
492
|
+
response = await self.hanzo_client.chat.completions.create(
|
|
493
|
+
model=model,
|
|
494
|
+
messages=[{"role": "user", "content": prompt}],
|
|
495
|
+
)
|
|
496
|
+
return {
|
|
497
|
+
"model": model,
|
|
498
|
+
"response": response.choices[0].message.content,
|
|
499
|
+
"success": True,
|
|
500
|
+
}
|
|
501
|
+
else:
|
|
502
|
+
return {"model": model, "response": f"Mock response from {model}", "success": True}
|
|
503
|
+
except Exception as e:
|
|
504
|
+
return {"model": model, "error": str(e), "success": False}
|
|
505
|
+
|
|
506
|
+
# Get all responses in parallel
|
|
507
|
+
responses = await asyncio.gather(*[get_response(model) for model in models])
|
|
508
|
+
|
|
509
|
+
# Analyze consensus
|
|
510
|
+
successful_responses = [r for r in responses if r["success"]]
|
|
511
|
+
agreement_score = len(successful_responses) / len(models) if models else 0
|
|
512
|
+
|
|
513
|
+
# Simple consensus: majority agreement or summarize
|
|
514
|
+
consensus_result = {
|
|
515
|
+
"consensus_reached": agreement_score >= config.consensus_threshold,
|
|
516
|
+
"agreement_score": agreement_score,
|
|
517
|
+
"individual_responses": responses,
|
|
518
|
+
"models_used": models,
|
|
519
|
+
}
|
|
520
|
+
|
|
521
|
+
# If consensus reached, combine insights
|
|
522
|
+
if consensus_result["consensus_reached"] and successful_responses:
|
|
523
|
+
combined = "\n\n".join([
|
|
524
|
+
f"[{r['model']}]: {r['response']}"
|
|
525
|
+
for r in successful_responses
|
|
526
|
+
])
|
|
527
|
+
consensus_result["combined_response"] = combined
|
|
528
|
+
|
|
529
|
+
return consensus_result
|
|
530
|
+
|
|
531
|
+
async def _execute_critic_chain(
|
|
532
|
+
self,
|
|
533
|
+
initial_content: str,
|
|
534
|
+
models: List[str],
|
|
535
|
+
config: BatchConfig,
|
|
536
|
+
) -> Dict[str, Any]:
|
|
537
|
+
"""Execute critic chain with sequential review.
|
|
538
|
+
|
|
539
|
+
Args:
|
|
540
|
+
initial_content: Content to review
|
|
541
|
+
models: List of critic models
|
|
542
|
+
config: Batch configuration
|
|
543
|
+
|
|
544
|
+
Returns:
|
|
545
|
+
Chain of critic reviews
|
|
546
|
+
"""
|
|
547
|
+
reviews = []
|
|
548
|
+
current_content = initial_content
|
|
549
|
+
|
|
550
|
+
for i, model in enumerate(models):
|
|
551
|
+
# Build critic prompt
|
|
552
|
+
if i == 0:
|
|
553
|
+
prompt = f"Please review the following:\n\n{current_content}"
|
|
554
|
+
else:
|
|
555
|
+
prompt = f"""Please review the following, taking into account previous reviews:
|
|
556
|
+
|
|
557
|
+
Original content:
|
|
558
|
+
{initial_content}
|
|
559
|
+
|
|
560
|
+
Previous reviews:
|
|
561
|
+
{chr(10).join([f"[{r['model']}]: {r['review']}" for r in reviews])}
|
|
562
|
+
|
|
563
|
+
Provide your critical analysis:"""
|
|
564
|
+
|
|
565
|
+
try:
|
|
566
|
+
if self.mcp_client:
|
|
567
|
+
# Use critic tool if available
|
|
568
|
+
result = await self.mcp_client.call_tool(
|
|
569
|
+
"critic",
|
|
570
|
+
{
|
|
571
|
+
"analysis": prompt,
|
|
572
|
+
"model": model,
|
|
573
|
+
}
|
|
574
|
+
)
|
|
575
|
+
review = result
|
|
576
|
+
else:
|
|
577
|
+
# Fallback to LLM
|
|
578
|
+
if self.hanzo_client:
|
|
579
|
+
response = await self.hanzo_client.chat.completions.create(
|
|
580
|
+
model=model,
|
|
581
|
+
messages=[
|
|
582
|
+
{"role": "system", "content": "You are a thorough code critic."},
|
|
583
|
+
{"role": "user", "content": prompt}
|
|
584
|
+
],
|
|
585
|
+
)
|
|
586
|
+
review = response.choices[0].message.content
|
|
587
|
+
else:
|
|
588
|
+
review = f"Mock critic review from {model}"
|
|
589
|
+
|
|
590
|
+
reviews.append({
|
|
591
|
+
"model": model,
|
|
592
|
+
"review": review,
|
|
593
|
+
"iteration": i + 1,
|
|
594
|
+
})
|
|
595
|
+
|
|
596
|
+
# Update content for next critic
|
|
597
|
+
current_content = review
|
|
598
|
+
|
|
599
|
+
except Exception as e:
|
|
600
|
+
reviews.append({
|
|
601
|
+
"model": model,
|
|
602
|
+
"error": str(e),
|
|
603
|
+
"iteration": i + 1,
|
|
604
|
+
})
|
|
605
|
+
|
|
606
|
+
return {
|
|
607
|
+
"critic_chain": reviews,
|
|
608
|
+
"final_review": reviews[-1]["review"] if reviews and "review" in reviews[-1] else None,
|
|
609
|
+
"models_used": models,
|
|
610
|
+
"chain_length": len(reviews),
|
|
611
|
+
}
|
|
612
|
+
|
|
613
|
+
async def execute_batch(
|
|
614
|
+
self,
|
|
615
|
+
command: str,
|
|
616
|
+
stream_callback: Optional[Callable[[str], None]] = None,
|
|
617
|
+
) -> Dict[str, Any]:
|
|
618
|
+
"""Execute batch operation with parallel agents.
|
|
619
|
+
|
|
620
|
+
Args:
|
|
621
|
+
command: Batch command to execute
|
|
622
|
+
stream_callback: Optional callback for streaming results
|
|
623
|
+
|
|
624
|
+
Returns:
|
|
625
|
+
Summary of batch execution results
|
|
626
|
+
"""
|
|
627
|
+
# Parse configuration
|
|
628
|
+
config = BatchConfig.from_command(command)
|
|
629
|
+
|
|
630
|
+
# Handle consensus mode
|
|
631
|
+
if config.consensus_mode:
|
|
632
|
+
console.print(f"[bold cyan]Consensus Configuration:[/bold cyan]")
|
|
633
|
+
console.print(f" Models: {', '.join(config.consensus_models)}")
|
|
634
|
+
console.print(f" Operation: {config.operation}")
|
|
635
|
+
console.print(f" Threshold: {config.consensus_threshold}")
|
|
636
|
+
|
|
637
|
+
result = await self._execute_consensus(
|
|
638
|
+
config.operation,
|
|
639
|
+
config.consensus_models,
|
|
640
|
+
config,
|
|
641
|
+
)
|
|
642
|
+
|
|
643
|
+
# Display consensus result
|
|
644
|
+
if result["consensus_reached"]:
|
|
645
|
+
console.print("[bold green]✓ Consensus reached![/bold green]")
|
|
646
|
+
else:
|
|
647
|
+
console.print("[bold yellow]⚠ No consensus[/bold yellow]")
|
|
648
|
+
|
|
649
|
+
console.print(f"Agreement Score: {result['agreement_score']:.1%}")
|
|
650
|
+
|
|
651
|
+
for resp in result["individual_responses"]:
|
|
652
|
+
if resp["success"]:
|
|
653
|
+
console.print(Panel(
|
|
654
|
+
resp["response"][:500] + "..." if len(resp.get("response", "")) > 500 else resp.get("response", ""),
|
|
655
|
+
title=f"[cyan]{resp['model']}[/cyan]",
|
|
656
|
+
))
|
|
657
|
+
|
|
658
|
+
return result
|
|
659
|
+
|
|
660
|
+
# Handle critic mode
|
|
661
|
+
elif config.critic_mode:
|
|
662
|
+
console.print(f"[bold cyan]Critic Configuration:[/bold cyan]")
|
|
663
|
+
console.print(f" Models: {', '.join(config.critic_models)}")
|
|
664
|
+
console.print(f" Chain Mode: {config.critic_chain}")
|
|
665
|
+
console.print(f" Operation: {config.operation}")
|
|
666
|
+
|
|
667
|
+
if config.critic_chain:
|
|
668
|
+
result = await self._execute_critic_chain(
|
|
669
|
+
config.operation,
|
|
670
|
+
config.critic_models,
|
|
671
|
+
config,
|
|
672
|
+
)
|
|
673
|
+
|
|
674
|
+
# Display critic chain
|
|
675
|
+
for review in result["critic_chain"]:
|
|
676
|
+
if "review" in review:
|
|
677
|
+
console.print(Panel(
|
|
678
|
+
review["review"][:500] + "..." if len(review["review"]) > 500 else review["review"],
|
|
679
|
+
title=f"[cyan]Critic {review['iteration']}: {review['model']}[/cyan]",
|
|
680
|
+
))
|
|
681
|
+
|
|
682
|
+
return result
|
|
683
|
+
else:
|
|
684
|
+
# Parallel critics (use consensus mechanism)
|
|
685
|
+
result = await self._execute_consensus(
|
|
686
|
+
f"Please provide critical review: {config.operation}",
|
|
687
|
+
config.critic_models,
|
|
688
|
+
config,
|
|
689
|
+
)
|
|
690
|
+
return result
|
|
691
|
+
|
|
692
|
+
# Regular batch mode
|
|
693
|
+
console.print(f"[bold cyan]Batch Configuration:[/bold cyan]")
|
|
694
|
+
console.print(f" Batch Size: {config.batch_size}")
|
|
695
|
+
console.print(f" Agent Model: {config.agent_model}")
|
|
696
|
+
console.print(f" Operation: {config.operation}")
|
|
697
|
+
console.print(f" Target Pattern: {config.target_pattern}")
|
|
698
|
+
|
|
699
|
+
# Find target files
|
|
700
|
+
target_files = await self._find_target_files(config.target_pattern)
|
|
701
|
+
console.print(f"[bold]Found {len(target_files)} files to process[/bold]")
|
|
702
|
+
|
|
703
|
+
# Create tasks
|
|
704
|
+
tasks = []
|
|
705
|
+
for file_path in target_files:
|
|
706
|
+
task = BatchTask(
|
|
707
|
+
id=self._generate_task_id(),
|
|
708
|
+
description=f"{config.operation} - {file_path.name}",
|
|
709
|
+
file_path=file_path,
|
|
710
|
+
agent_model=config.agent_model,
|
|
711
|
+
)
|
|
712
|
+
tasks.append(task)
|
|
713
|
+
self.active_tasks[task.id] = task
|
|
714
|
+
|
|
715
|
+
# Setup concurrency control
|
|
716
|
+
self._semaphore = asyncio.Semaphore(config.batch_size)
|
|
717
|
+
|
|
718
|
+
# Create progress bar
|
|
719
|
+
with Progress(
|
|
720
|
+
SpinnerColumn(),
|
|
721
|
+
TextColumn("[progress.description]{task.description}"),
|
|
722
|
+
BarColumn(),
|
|
723
|
+
TextColumn("[progress.percentage]{task.percentage:>3.0f}%"),
|
|
724
|
+
console=console,
|
|
725
|
+
) as progress:
|
|
726
|
+
self._progress = progress
|
|
727
|
+
progress_task = progress.add_task(
|
|
728
|
+
f"Processing {len(tasks)} tasks...",
|
|
729
|
+
total=len(tasks),
|
|
730
|
+
)
|
|
731
|
+
|
|
732
|
+
# Execute tasks with concurrency limit
|
|
733
|
+
async def run_with_semaphore(task: BatchTask):
|
|
734
|
+
async with self._semaphore:
|
|
735
|
+
await self._execute_agent_task(task, config, progress_task)
|
|
736
|
+
|
|
737
|
+
# Run all tasks
|
|
738
|
+
await asyncio.gather(
|
|
739
|
+
*[run_with_semaphore(task) for task in tasks],
|
|
740
|
+
return_exceptions=True,
|
|
741
|
+
)
|
|
742
|
+
|
|
743
|
+
# Collect results
|
|
744
|
+
for task in tasks:
|
|
745
|
+
if task.status == "completed":
|
|
746
|
+
self.completed_tasks.append(task)
|
|
747
|
+
else:
|
|
748
|
+
self.failed_tasks.append(task)
|
|
749
|
+
del self.active_tasks[task.id]
|
|
750
|
+
|
|
751
|
+
# Generate summary
|
|
752
|
+
total_duration = sum(
|
|
753
|
+
t.duration() or 0 for t in self.completed_tasks + self.failed_tasks
|
|
754
|
+
)
|
|
755
|
+
|
|
756
|
+
summary = {
|
|
757
|
+
"total_tasks": len(tasks),
|
|
758
|
+
"completed": len(self.completed_tasks),
|
|
759
|
+
"failed": len(self.failed_tasks),
|
|
760
|
+
"total_duration": total_duration,
|
|
761
|
+
"average_duration": total_duration / len(tasks) if tasks else 0,
|
|
762
|
+
"batch_size": config.batch_size,
|
|
763
|
+
"agent_model": config.agent_model,
|
|
764
|
+
}
|
|
765
|
+
|
|
766
|
+
# Display summary
|
|
767
|
+
self._display_summary(summary)
|
|
768
|
+
|
|
769
|
+
return summary
|
|
770
|
+
|
|
771
|
+
def _display_summary(self, summary: Dict[str, Any]) -> None:
|
|
772
|
+
"""Display execution summary.
|
|
773
|
+
|
|
774
|
+
Args:
|
|
775
|
+
summary: Execution summary data
|
|
776
|
+
"""
|
|
777
|
+
table = Table(title="Batch Execution Summary", show_header=False)
|
|
778
|
+
table.add_column("Metric", style="cyan")
|
|
779
|
+
table.add_column("Value", style="white")
|
|
780
|
+
|
|
781
|
+
table.add_row("Total Tasks", str(summary["total_tasks"]))
|
|
782
|
+
table.add_row("Completed", f"[green]{summary['completed']}[/green]")
|
|
783
|
+
table.add_row("Failed", f"[red]{summary['failed']}[/red]")
|
|
784
|
+
table.add_row("Total Duration", f"{summary['total_duration']:.2f}s")
|
|
785
|
+
table.add_row("Average Duration", f"{summary['average_duration']:.2f}s")
|
|
786
|
+
table.add_row("Batch Size", str(summary["batch_size"]))
|
|
787
|
+
table.add_row("Agent Model", summary["agent_model"])
|
|
788
|
+
|
|
789
|
+
console.print(table)
|
|
790
|
+
|
|
791
|
+
async def stream_batch_results(self) -> AsyncIterator[BatchTask]:
|
|
792
|
+
"""Stream batch results as they complete.
|
|
793
|
+
|
|
794
|
+
Yields:
|
|
795
|
+
Completed batch tasks
|
|
796
|
+
"""
|
|
797
|
+
while self.active_tasks:
|
|
798
|
+
for task_id, task in list(self.active_tasks.items()):
|
|
799
|
+
if task.status in ["completed", "failed"]:
|
|
800
|
+
yield task
|
|
801
|
+
del self.active_tasks[task_id]
|
|
802
|
+
await asyncio.sleep(0.1)
|
|
803
|
+
|
|
804
|
+
def get_status(self) -> Dict[str, Any]:
|
|
805
|
+
"""Get current orchestrator status.
|
|
806
|
+
|
|
807
|
+
Returns:
|
|
808
|
+
Status information
|
|
809
|
+
"""
|
|
810
|
+
return {
|
|
811
|
+
"active": len(self.active_tasks),
|
|
812
|
+
"completed": len(self.completed_tasks),
|
|
813
|
+
"failed": len(self.failed_tasks),
|
|
814
|
+
"active_tasks": [
|
|
815
|
+
{
|
|
816
|
+
"id": task.id,
|
|
817
|
+
"description": task.description,
|
|
818
|
+
"status": task.status,
|
|
819
|
+
"duration": task.duration(),
|
|
820
|
+
}
|
|
821
|
+
for task in self.active_tasks.values()
|
|
822
|
+
],
|
|
823
|
+
}
|
|
824
|
+
|
|
825
|
+
|
|
826
|
+
class MetaAIOrchestrator:
|
|
827
|
+
"""Meta AI orchestrator that manages other AI agents."""
|
|
828
|
+
|
|
829
|
+
def __init__(
|
|
830
|
+
self,
|
|
831
|
+
primary_model: str = "claude-3-5-sonnet-20241022",
|
|
832
|
+
mcp_client: Optional[Any] = None,
|
|
833
|
+
):
|
|
834
|
+
"""Initialize meta AI orchestrator.
|
|
835
|
+
|
|
836
|
+
Args:
|
|
837
|
+
primary_model: Primary model for meta reasoning
|
|
838
|
+
mcp_client: MCP client for tool access
|
|
839
|
+
"""
|
|
840
|
+
self.primary_model = primary_model
|
|
841
|
+
self.mcp_client = mcp_client
|
|
842
|
+
self.batch_orchestrator = BatchOrchestrator(mcp_client=mcp_client)
|
|
843
|
+
self.agent_pool: Dict[str, Any] = {}
|
|
844
|
+
self.task_queue: asyncio.Queue = asyncio.Queue()
|
|
845
|
+
self.results_queue: asyncio.Queue = asyncio.Queue()
|
|
846
|
+
|
|
847
|
+
async def parse_and_execute(self, command: str) -> Dict[str, Any]:
|
|
848
|
+
"""Parse natural language command and execute appropriate action.
|
|
849
|
+
|
|
850
|
+
Args:
|
|
851
|
+
command: Natural language command
|
|
852
|
+
|
|
853
|
+
Returns:
|
|
854
|
+
Execution results
|
|
855
|
+
"""
|
|
856
|
+
# Check if it's a batch command
|
|
857
|
+
if "batch:" in command or command.startswith("batch"):
|
|
858
|
+
return await self.batch_orchestrator.execute_batch(command)
|
|
859
|
+
|
|
860
|
+
# Use meta AI to understand intent
|
|
861
|
+
intent = await self._analyze_intent(command)
|
|
862
|
+
|
|
863
|
+
if intent["type"] == "batch_operation":
|
|
864
|
+
# Convert natural language to batch syntax
|
|
865
|
+
batch_command = self._build_batch_command(intent)
|
|
866
|
+
return await self.batch_orchestrator.execute_batch(batch_command)
|
|
867
|
+
|
|
868
|
+
elif intent["type"] == "single_task":
|
|
869
|
+
# Execute single agent task
|
|
870
|
+
return await self._execute_single_task(intent)
|
|
871
|
+
|
|
872
|
+
else:
|
|
873
|
+
return {"error": f"Unknown command type: {intent['type']}"}
|
|
874
|
+
|
|
875
|
+
async def _analyze_intent(self, command: str) -> Dict[str, Any]:
|
|
876
|
+
"""Analyze user intent from natural language.
|
|
877
|
+
|
|
878
|
+
Args:
|
|
879
|
+
command: User command
|
|
880
|
+
|
|
881
|
+
Returns:
|
|
882
|
+
Intent analysis
|
|
883
|
+
"""
|
|
884
|
+
# Use primary model to analyze intent
|
|
885
|
+
prompt = f"""
|
|
886
|
+
Analyze the following command and determine the intent:
|
|
887
|
+
Command: {command}
|
|
888
|
+
|
|
889
|
+
Determine:
|
|
890
|
+
1. Is this a batch operation (multiple files/tasks)?
|
|
891
|
+
2. What is the main operation?
|
|
892
|
+
3. What agent/model should be used?
|
|
893
|
+
4. What are the target files/patterns?
|
|
894
|
+
|
|
895
|
+
Return as JSON.
|
|
896
|
+
"""
|
|
897
|
+
|
|
898
|
+
if self.mcp_client:
|
|
899
|
+
result = await self.mcp_client.call_tool(
|
|
900
|
+
"llm",
|
|
901
|
+
{
|
|
902
|
+
"prompt": prompt,
|
|
903
|
+
"model": self.primary_model,
|
|
904
|
+
"response_format": "json",
|
|
905
|
+
}
|
|
906
|
+
)
|
|
907
|
+
try:
|
|
908
|
+
return json.loads(result)
|
|
909
|
+
except:
|
|
910
|
+
pass
|
|
911
|
+
|
|
912
|
+
# Fallback intent detection
|
|
913
|
+
if any(word in command.lower() for word in ["all", "every", "each", "files"]):
|
|
914
|
+
return {
|
|
915
|
+
"type": "batch_operation",
|
|
916
|
+
"operation": command,
|
|
917
|
+
"model": "claude-3-5-sonnet-20241022",
|
|
918
|
+
"pattern": "**/*",
|
|
919
|
+
}
|
|
920
|
+
else:
|
|
921
|
+
return {
|
|
922
|
+
"type": "single_task",
|
|
923
|
+
"operation": command,
|
|
924
|
+
"model": "claude-3-5-sonnet-20241022",
|
|
925
|
+
}
|
|
926
|
+
|
|
927
|
+
def _build_batch_command(self, intent: Dict[str, Any]) -> str:
|
|
928
|
+
"""Build batch command from intent.
|
|
929
|
+
|
|
930
|
+
Args:
|
|
931
|
+
intent: Analyzed intent
|
|
932
|
+
|
|
933
|
+
Returns:
|
|
934
|
+
Batch command string
|
|
935
|
+
"""
|
|
936
|
+
batch_size = intent.get("batch_size", 10)
|
|
937
|
+
model = intent.get("model", "claude")
|
|
938
|
+
operation = intent.get("operation", "")
|
|
939
|
+
pattern = intent.get("pattern", "**/*")
|
|
940
|
+
|
|
941
|
+
# Map model names
|
|
942
|
+
model_short = {
|
|
943
|
+
"claude-3-5-sonnet-20241022": "claude",
|
|
944
|
+
"gpt-4-turbo": "codex",
|
|
945
|
+
"gemini-1.5-pro": "gemini",
|
|
946
|
+
}.get(model, model)
|
|
947
|
+
|
|
948
|
+
return f"batch:{batch_size} agent:{model_short} files:{pattern} {operation}"
|
|
949
|
+
|
|
950
|
+
async def _execute_single_task(self, intent: Dict[str, Any]) -> Dict[str, Any]:
|
|
951
|
+
"""Execute single agent task.
|
|
952
|
+
|
|
953
|
+
Args:
|
|
954
|
+
intent: Task intent
|
|
955
|
+
|
|
956
|
+
Returns:
|
|
957
|
+
Execution result
|
|
958
|
+
"""
|
|
959
|
+
task = BatchTask(
|
|
960
|
+
id=self.batch_orchestrator._generate_task_id(),
|
|
961
|
+
description=intent["operation"],
|
|
962
|
+
agent_model=intent.get("model", self.primary_model),
|
|
963
|
+
)
|
|
964
|
+
|
|
965
|
+
config = BatchConfig(
|
|
966
|
+
batch_size=1,
|
|
967
|
+
agent_model=task.agent_model,
|
|
968
|
+
operation=intent["operation"],
|
|
969
|
+
)
|
|
970
|
+
|
|
971
|
+
await self.batch_orchestrator._execute_agent_task(task, config)
|
|
972
|
+
|
|
973
|
+
return {
|
|
974
|
+
"task_id": task.id,
|
|
975
|
+
"status": task.status,
|
|
976
|
+
"result": task.result,
|
|
977
|
+
"error": task.error,
|
|
978
|
+
"duration": task.duration(),
|
|
979
|
+
}
|
|
980
|
+
|
|
981
|
+
|
|
982
|
+
# Export main classes
|
|
983
|
+
__all__ = [
|
|
984
|
+
"BatchTask",
|
|
985
|
+
"BatchConfig",
|
|
986
|
+
"BatchOrchestrator",
|
|
987
|
+
"MetaAIOrchestrator",
|
|
988
|
+
]
|