daita-agents 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- daita/__init__.py +216 -0
- daita/agents/__init__.py +33 -0
- daita/agents/base.py +743 -0
- daita/agents/substrate.py +1141 -0
- daita/cli/__init__.py +145 -0
- daita/cli/__main__.py +7 -0
- daita/cli/ascii_art.py +44 -0
- daita/cli/core/__init__.py +0 -0
- daita/cli/core/create.py +254 -0
- daita/cli/core/deploy.py +473 -0
- daita/cli/core/deployments.py +309 -0
- daita/cli/core/import_detector.py +219 -0
- daita/cli/core/init.py +481 -0
- daita/cli/core/logs.py +239 -0
- daita/cli/core/managed_deploy.py +709 -0
- daita/cli/core/run.py +648 -0
- daita/cli/core/status.py +421 -0
- daita/cli/core/test.py +239 -0
- daita/cli/core/webhooks.py +172 -0
- daita/cli/main.py +588 -0
- daita/cli/utils.py +541 -0
- daita/config/__init__.py +62 -0
- daita/config/base.py +159 -0
- daita/config/settings.py +184 -0
- daita/core/__init__.py +262 -0
- daita/core/decision_tracing.py +701 -0
- daita/core/exceptions.py +480 -0
- daita/core/focus.py +251 -0
- daita/core/interfaces.py +76 -0
- daita/core/plugin_tracing.py +550 -0
- daita/core/relay.py +779 -0
- daita/core/reliability.py +381 -0
- daita/core/scaling.py +459 -0
- daita/core/tools.py +554 -0
- daita/core/tracing.py +770 -0
- daita/core/workflow.py +1144 -0
- daita/display/__init__.py +1 -0
- daita/display/console.py +160 -0
- daita/execution/__init__.py +58 -0
- daita/execution/client.py +856 -0
- daita/execution/exceptions.py +92 -0
- daita/execution/models.py +317 -0
- daita/llm/__init__.py +60 -0
- daita/llm/anthropic.py +291 -0
- daita/llm/base.py +530 -0
- daita/llm/factory.py +101 -0
- daita/llm/gemini.py +355 -0
- daita/llm/grok.py +219 -0
- daita/llm/mock.py +172 -0
- daita/llm/openai.py +220 -0
- daita/plugins/__init__.py +141 -0
- daita/plugins/base.py +37 -0
- daita/plugins/base_db.py +167 -0
- daita/plugins/elasticsearch.py +849 -0
- daita/plugins/mcp.py +481 -0
- daita/plugins/mongodb.py +520 -0
- daita/plugins/mysql.py +362 -0
- daita/plugins/postgresql.py +342 -0
- daita/plugins/redis_messaging.py +500 -0
- daita/plugins/rest.py +537 -0
- daita/plugins/s3.py +770 -0
- daita/plugins/slack.py +729 -0
- daita/utils/__init__.py +18 -0
- daita_agents-0.2.0.dist-info/METADATA +409 -0
- daita_agents-0.2.0.dist-info/RECORD +69 -0
- daita_agents-0.2.0.dist-info/WHEEL +5 -0
- daita_agents-0.2.0.dist-info/entry_points.txt +2 -0
- daita_agents-0.2.0.dist-info/licenses/LICENSE +56 -0
- daita_agents-0.2.0.dist-info/top_level.txt +1 -0
daita/core/scaling.py
ADDED
|
@@ -0,0 +1,459 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Agent Pool Scaling for Daita Agents.
|
|
3
|
+
|
|
4
|
+
Provides manual horizontal scaling of agent instances for handling concurrent workloads.
|
|
5
|
+
Uses a simple, MVP-focused approach without complex auto-scaling logic.
|
|
6
|
+
|
|
7
|
+
Features:
|
|
8
|
+
- Manual agent pool management
|
|
9
|
+
- Round-robin load balancing
|
|
10
|
+
- Simple instance lifecycle management
|
|
11
|
+
- Integration with existing reliability features
|
|
12
|
+
- Async-safe concurrent task execution
|
|
13
|
+
|
|
14
|
+
Example:
|
|
15
|
+
```python
|
|
16
|
+
from daita.core.scaling import AgentPool
|
|
17
|
+
from daita.agents.substrate import SubstrateAgent
|
|
18
|
+
|
|
19
|
+
# Create agent factory
|
|
20
|
+
def create_processor():
|
|
21
|
+
return SubstrateAgent(name="Processor", preset="analysis")
|
|
22
|
+
|
|
23
|
+
# Create agent pool with 5 instances
|
|
24
|
+
pool = AgentPool(
|
|
25
|
+
agent_factory=create_processor,
|
|
26
|
+
instances=5,
|
|
27
|
+
pool_name="processors"
|
|
28
|
+
)
|
|
29
|
+
|
|
30
|
+
await pool.start()
|
|
31
|
+
|
|
32
|
+
# Submit tasks to pool (load balanced)
|
|
33
|
+
result = await pool.submit_task("analyze", data={"text": "Hello"})
|
|
34
|
+
|
|
35
|
+
await pool.stop()
|
|
36
|
+
```
|
|
37
|
+
"""
|
|
38
|
+
|
|
39
|
+
import asyncio
|
|
40
|
+
import logging
|
|
41
|
+
import time
|
|
42
|
+
import uuid
|
|
43
|
+
from typing import Dict, Any, Optional, List, Callable, Union
|
|
44
|
+
from dataclasses import dataclass, field
|
|
45
|
+
from enum import Enum
|
|
46
|
+
|
|
47
|
+
logger = logging.getLogger(__name__)
|
|
48
|
+
|
|
49
|
+
class PoolStatus(str, Enum):
|
|
50
|
+
"""Status of an agent pool."""
|
|
51
|
+
CREATED = "created"
|
|
52
|
+
STARTING = "starting"
|
|
53
|
+
RUNNING = "running"
|
|
54
|
+
STOPPING = "stopping"
|
|
55
|
+
STOPPED = "stopped"
|
|
56
|
+
ERROR = "error"
|
|
57
|
+
|
|
58
|
+
@dataclass
|
|
59
|
+
class AgentInstance:
|
|
60
|
+
"""Represents an agent instance in a pool."""
|
|
61
|
+
id: str
|
|
62
|
+
agent: Any
|
|
63
|
+
created_at: float = field(default_factory=time.time)
|
|
64
|
+
task_count: int = 0
|
|
65
|
+
last_task_at: Optional[float] = None
|
|
66
|
+
current_tasks: int = 0
|
|
67
|
+
status: str = "idle"
|
|
68
|
+
|
|
69
|
+
def is_available(self) -> bool:
|
|
70
|
+
"""Check if instance is available for new tasks."""
|
|
71
|
+
return self.status == "idle" and self.current_tasks == 0
|
|
72
|
+
|
|
73
|
+
class LoadBalancer:
|
|
74
|
+
"""Simple round-robin load balancer for agent instances."""
|
|
75
|
+
|
|
76
|
+
def __init__(self):
|
|
77
|
+
self.current_index = 0
|
|
78
|
+
|
|
79
|
+
def select_instance(self, instances: List[AgentInstance]) -> Optional[AgentInstance]:
|
|
80
|
+
"""
|
|
81
|
+
Select next available agent instance using round-robin.
|
|
82
|
+
|
|
83
|
+
Args:
|
|
84
|
+
instances: List of agent instances
|
|
85
|
+
|
|
86
|
+
Returns:
|
|
87
|
+
Selected agent instance or None if none available
|
|
88
|
+
"""
|
|
89
|
+
if not instances:
|
|
90
|
+
return None
|
|
91
|
+
|
|
92
|
+
# Try round-robin selection first
|
|
93
|
+
available_instances = [inst for inst in instances if inst.is_available()]
|
|
94
|
+
|
|
95
|
+
if not available_instances:
|
|
96
|
+
return None
|
|
97
|
+
|
|
98
|
+
# Select using round-robin
|
|
99
|
+
selected = available_instances[self.current_index % len(available_instances)]
|
|
100
|
+
self.current_index += 1
|
|
101
|
+
|
|
102
|
+
return selected
|
|
103
|
+
|
|
104
|
+
class AgentPool:
|
|
105
|
+
"""
|
|
106
|
+
Agent pool for horizontal scaling with manual instance management.
|
|
107
|
+
|
|
108
|
+
Provides load balancing across multiple agent instances to handle
|
|
109
|
+
concurrent workloads. Uses simple round-robin balancing and manual
|
|
110
|
+
instance count management.
|
|
111
|
+
"""
|
|
112
|
+
|
|
113
|
+
def __init__(
|
|
114
|
+
self,
|
|
115
|
+
agent_factory: Callable[[], Any],
|
|
116
|
+
instances: int = 1,
|
|
117
|
+
pool_name: Optional[str] = None,
|
|
118
|
+
max_concurrent_per_instance: int = 1
|
|
119
|
+
):
|
|
120
|
+
"""
|
|
121
|
+
Initialize agent pool.
|
|
122
|
+
|
|
123
|
+
Args:
|
|
124
|
+
agent_factory: Factory function to create agent instances
|
|
125
|
+
instances: Number of agent instances to create
|
|
126
|
+
pool_name: Optional name for the pool (for logging)
|
|
127
|
+
max_concurrent_per_instance: Max concurrent tasks per agent (default: 1)
|
|
128
|
+
"""
|
|
129
|
+
self.agent_factory = agent_factory
|
|
130
|
+
self.instance_count = max(1, instances) # At least 1 instance
|
|
131
|
+
self.pool_name = pool_name or f"pool_{uuid.uuid4().hex[:8]}"
|
|
132
|
+
self.max_concurrent_per_instance = max_concurrent_per_instance
|
|
133
|
+
|
|
134
|
+
# Pool state
|
|
135
|
+
self.status = PoolStatus.CREATED
|
|
136
|
+
self.instances: List[AgentInstance] = []
|
|
137
|
+
self.load_balancer = LoadBalancer()
|
|
138
|
+
|
|
139
|
+
# Statistics
|
|
140
|
+
self.total_tasks = 0
|
|
141
|
+
self.failed_tasks = 0
|
|
142
|
+
self.created_at = time.time()
|
|
143
|
+
|
|
144
|
+
# Async locks
|
|
145
|
+
self._pool_lock = asyncio.Lock()
|
|
146
|
+
|
|
147
|
+
logger.debug(f"AgentPool '{self.pool_name}' created with {self.instance_count} instances")
|
|
148
|
+
|
|
149
|
+
async def start(self) -> None:
|
|
150
|
+
"""Start the agent pool and create all agent instances."""
|
|
151
|
+
if self.status != PoolStatus.CREATED:
|
|
152
|
+
logger.warning(f"Pool '{self.pool_name}' already started or in invalid state")
|
|
153
|
+
return
|
|
154
|
+
|
|
155
|
+
self.status = PoolStatus.STARTING
|
|
156
|
+
logger.info(f"Starting agent pool '{self.pool_name}' with {self.instance_count} instances")
|
|
157
|
+
|
|
158
|
+
try:
|
|
159
|
+
async with self._pool_lock:
|
|
160
|
+
# Create all agent instances
|
|
161
|
+
for i in range(self.instance_count):
|
|
162
|
+
await self._create_instance(f"{self.pool_name}_instance_{i}")
|
|
163
|
+
|
|
164
|
+
self.status = PoolStatus.RUNNING
|
|
165
|
+
logger.info(f"Agent pool '{self.pool_name}' started successfully")
|
|
166
|
+
|
|
167
|
+
except Exception as e:
|
|
168
|
+
self.status = PoolStatus.ERROR
|
|
169
|
+
logger.error(f"Failed to start agent pool '{self.pool_name}': {e}")
|
|
170
|
+
raise
|
|
171
|
+
|
|
172
|
+
async def stop(self) -> None:
|
|
173
|
+
"""Stop the agent pool and cleanup all instances."""
|
|
174
|
+
if self.status in [PoolStatus.STOPPED, PoolStatus.STOPPING]:
|
|
175
|
+
return
|
|
176
|
+
|
|
177
|
+
self.status = PoolStatus.STOPPING
|
|
178
|
+
logger.info(f"Stopping agent pool '{self.pool_name}'")
|
|
179
|
+
|
|
180
|
+
try:
|
|
181
|
+
async with self._pool_lock:
|
|
182
|
+
# Stop all agent instances
|
|
183
|
+
for instance in self.instances:
|
|
184
|
+
try:
|
|
185
|
+
if hasattr(instance.agent, 'stop'):
|
|
186
|
+
await instance.agent.stop()
|
|
187
|
+
except Exception as e:
|
|
188
|
+
logger.warning(f"Error stopping instance {instance.id}: {e}")
|
|
189
|
+
|
|
190
|
+
self.instances.clear()
|
|
191
|
+
|
|
192
|
+
self.status = PoolStatus.STOPPED
|
|
193
|
+
logger.info(f"Agent pool '{self.pool_name}' stopped")
|
|
194
|
+
|
|
195
|
+
except Exception as e:
|
|
196
|
+
self.status = PoolStatus.ERROR
|
|
197
|
+
logger.error(f"Error stopping agent pool '{self.pool_name}': {e}")
|
|
198
|
+
raise
|
|
199
|
+
|
|
200
|
+
async def _create_instance(self, instance_id: str) -> AgentInstance:
|
|
201
|
+
"""
|
|
202
|
+
Create and start a new agent instance.
|
|
203
|
+
|
|
204
|
+
Args:
|
|
205
|
+
instance_id: Unique ID for the instance
|
|
206
|
+
|
|
207
|
+
Returns:
|
|
208
|
+
Created agent instance
|
|
209
|
+
"""
|
|
210
|
+
try:
|
|
211
|
+
# Create agent using factory
|
|
212
|
+
agent = self.agent_factory()
|
|
213
|
+
|
|
214
|
+
# Start the agent if it has a start method
|
|
215
|
+
if hasattr(agent, 'start'):
|
|
216
|
+
await agent.start()
|
|
217
|
+
|
|
218
|
+
# Create instance record
|
|
219
|
+
instance = AgentInstance(
|
|
220
|
+
id=instance_id,
|
|
221
|
+
agent=agent
|
|
222
|
+
)
|
|
223
|
+
|
|
224
|
+
self.instances.append(instance)
|
|
225
|
+
logger.debug(f"Created agent instance {instance_id}")
|
|
226
|
+
|
|
227
|
+
return instance
|
|
228
|
+
|
|
229
|
+
except Exception as e:
|
|
230
|
+
logger.error(f"Failed to create agent instance {instance_id}: {e}")
|
|
231
|
+
raise
|
|
232
|
+
|
|
233
|
+
async def submit_task(
|
|
234
|
+
self,
|
|
235
|
+
task: str,
|
|
236
|
+
data: Any = None,
|
|
237
|
+
context: Optional[Dict[str, Any]] = None,
|
|
238
|
+
**kwargs
|
|
239
|
+
) -> Any:
|
|
240
|
+
"""
|
|
241
|
+
Submit task to an available agent instance.
|
|
242
|
+
|
|
243
|
+
Args:
|
|
244
|
+
task: Task name/type
|
|
245
|
+
data: Task data
|
|
246
|
+
context: Optional task context
|
|
247
|
+
**kwargs: Additional task parameters
|
|
248
|
+
|
|
249
|
+
Returns:
|
|
250
|
+
Task result
|
|
251
|
+
"""
|
|
252
|
+
if self.status != PoolStatus.RUNNING:
|
|
253
|
+
raise RuntimeError(f"Pool '{self.pool_name}' is not running (status: {self.status})")
|
|
254
|
+
|
|
255
|
+
# Select available instance
|
|
256
|
+
instance = self.load_balancer.select_instance(self.instances)
|
|
257
|
+
if not instance:
|
|
258
|
+
raise RuntimeError(f"No available agent instances in pool '{self.pool_name}'")
|
|
259
|
+
|
|
260
|
+
# Track task execution
|
|
261
|
+
self.total_tasks += 1
|
|
262
|
+
instance.task_count += 1
|
|
263
|
+
instance.current_tasks += 1
|
|
264
|
+
instance.last_task_at = time.time()
|
|
265
|
+
instance.status = "busy"
|
|
266
|
+
|
|
267
|
+
try:
|
|
268
|
+
# Execute task on selected agent
|
|
269
|
+
logger.debug(f"Submitting task '{task}' to instance {instance.id}")
|
|
270
|
+
|
|
271
|
+
# Agent pools typically used for workflow parallelization
|
|
272
|
+
# Check task type and route appropriately
|
|
273
|
+
if task == "relay_message" and hasattr(instance.agent, 'receive_message'):
|
|
274
|
+
# Workflow relay message
|
|
275
|
+
result = await instance.agent.receive_message(
|
|
276
|
+
data=data,
|
|
277
|
+
source_agent=context.get('source_agent', 'pool') if context else 'pool',
|
|
278
|
+
channel=context.get('channel', 'default') if context else 'default',
|
|
279
|
+
workflow_name=context.get('workflow') if context else None
|
|
280
|
+
)
|
|
281
|
+
elif hasattr(instance.agent, '_process'):
|
|
282
|
+
# Fallback to internal API
|
|
283
|
+
result = await instance.agent._process(task, data, context or {}, **kwargs)
|
|
284
|
+
else:
|
|
285
|
+
# Last resort: try run_detailed
|
|
286
|
+
prompt = context.get('prompt', str(data)) if context else str(data)
|
|
287
|
+
result = await instance.agent.run_detailed(prompt)
|
|
288
|
+
|
|
289
|
+
logger.debug(f"Task '{task}' completed on instance {instance.id}")
|
|
290
|
+
return result
|
|
291
|
+
|
|
292
|
+
except Exception as e:
|
|
293
|
+
self.failed_tasks += 1
|
|
294
|
+
logger.error(f"Task '{task}' failed on instance {instance.id}: {e}")
|
|
295
|
+
raise
|
|
296
|
+
finally:
|
|
297
|
+
# Update instance state
|
|
298
|
+
instance.current_tasks = max(0, instance.current_tasks - 1)
|
|
299
|
+
if instance.current_tasks == 0:
|
|
300
|
+
instance.status = "idle"
|
|
301
|
+
|
|
302
|
+
async def resize(self, new_instance_count: int) -> None:
|
|
303
|
+
"""
|
|
304
|
+
Resize the agent pool (manual scaling).
|
|
305
|
+
|
|
306
|
+
Args:
|
|
307
|
+
new_instance_count: New number of instances
|
|
308
|
+
"""
|
|
309
|
+
if self.status != PoolStatus.RUNNING:
|
|
310
|
+
raise RuntimeError(f"Cannot resize pool '{self.pool_name}' - not running")
|
|
311
|
+
|
|
312
|
+
new_instance_count = max(1, new_instance_count) # At least 1 instance
|
|
313
|
+
current_count = len(self.instances)
|
|
314
|
+
|
|
315
|
+
if new_instance_count == current_count:
|
|
316
|
+
logger.debug(f"Pool '{self.pool_name}' already has {current_count} instances")
|
|
317
|
+
return
|
|
318
|
+
|
|
319
|
+
async with self._pool_lock:
|
|
320
|
+
if new_instance_count > current_count:
|
|
321
|
+
# Scale up - add instances
|
|
322
|
+
instances_to_add = new_instance_count - current_count
|
|
323
|
+
logger.info(f"Scaling up pool '{self.pool_name}' from {current_count} to {new_instance_count} instances")
|
|
324
|
+
|
|
325
|
+
for i in range(instances_to_add):
|
|
326
|
+
instance_id = f"{self.pool_name}_instance_{current_count + i}"
|
|
327
|
+
await self._create_instance(instance_id)
|
|
328
|
+
|
|
329
|
+
else:
|
|
330
|
+
# Scale down - remove instances
|
|
331
|
+
instances_to_remove = current_count - new_instance_count
|
|
332
|
+
logger.info(f"Scaling down pool '{self.pool_name}' from {current_count} to {new_instance_count} instances")
|
|
333
|
+
|
|
334
|
+
# Remove least busy instances
|
|
335
|
+
instances_by_load = sorted(self.instances, key=lambda x: x.current_tasks)
|
|
336
|
+
|
|
337
|
+
for _ in range(instances_to_remove):
|
|
338
|
+
if instances_by_load:
|
|
339
|
+
instance = instances_by_load.pop(0)
|
|
340
|
+
|
|
341
|
+
# Wait for current tasks to complete (with timeout)
|
|
342
|
+
timeout_seconds = 30
|
|
343
|
+
wait_start = time.time()
|
|
344
|
+
|
|
345
|
+
while instance.current_tasks > 0 and (time.time() - wait_start) < timeout_seconds:
|
|
346
|
+
await asyncio.sleep(0.1)
|
|
347
|
+
|
|
348
|
+
# Stop and remove instance
|
|
349
|
+
try:
|
|
350
|
+
if hasattr(instance.agent, 'stop'):
|
|
351
|
+
await instance.agent.stop()
|
|
352
|
+
except Exception as e:
|
|
353
|
+
logger.warning(f"Error stopping instance {instance.id}: {e}")
|
|
354
|
+
|
|
355
|
+
self.instances.remove(instance)
|
|
356
|
+
logger.debug(f"Removed instance {instance.id}")
|
|
357
|
+
|
|
358
|
+
self.instance_count = new_instance_count
|
|
359
|
+
logger.info(f"Pool '{self.pool_name}' resized to {new_instance_count} instances")
|
|
360
|
+
|
|
361
|
+
def get_stats(self) -> Dict[str, Any]:
|
|
362
|
+
"""Get agent pool statistics."""
|
|
363
|
+
if not self.instances:
|
|
364
|
+
return {
|
|
365
|
+
"pool_name": self.pool_name,
|
|
366
|
+
"status": self.status.value,
|
|
367
|
+
"instance_count": 0,
|
|
368
|
+
"total_tasks": self.total_tasks,
|
|
369
|
+
"failed_tasks": self.failed_tasks,
|
|
370
|
+
"success_rate": 0.0,
|
|
371
|
+
"uptime_seconds": time.time() - self.created_at
|
|
372
|
+
}
|
|
373
|
+
|
|
374
|
+
# Calculate instance statistics
|
|
375
|
+
busy_instances = sum(1 for inst in self.instances if inst.current_tasks > 0)
|
|
376
|
+
total_current_tasks = sum(inst.current_tasks for inst in self.instances)
|
|
377
|
+
avg_tasks_per_instance = sum(inst.task_count for inst in self.instances) / len(self.instances)
|
|
378
|
+
|
|
379
|
+
success_rate = 0.0
|
|
380
|
+
if self.total_tasks > 0:
|
|
381
|
+
success_rate = ((self.total_tasks - self.failed_tasks) / self.total_tasks) * 100
|
|
382
|
+
|
|
383
|
+
return {
|
|
384
|
+
"pool_name": self.pool_name,
|
|
385
|
+
"status": self.status.value,
|
|
386
|
+
"instance_count": len(self.instances),
|
|
387
|
+
"busy_instances": busy_instances,
|
|
388
|
+
"idle_instances": len(self.instances) - busy_instances,
|
|
389
|
+
"total_current_tasks": total_current_tasks,
|
|
390
|
+
"total_tasks": self.total_tasks,
|
|
391
|
+
"failed_tasks": self.failed_tasks,
|
|
392
|
+
"success_rate": round(success_rate, 2),
|
|
393
|
+
"avg_tasks_per_instance": round(avg_tasks_per_instance, 2),
|
|
394
|
+
"uptime_seconds": round(time.time() - self.created_at, 2)
|
|
395
|
+
}
|
|
396
|
+
|
|
397
|
+
def get_instance_stats(self) -> List[Dict[str, Any]]:
|
|
398
|
+
"""Get detailed statistics for each instance."""
|
|
399
|
+
return [
|
|
400
|
+
{
|
|
401
|
+
"id": inst.id,
|
|
402
|
+
"status": inst.status,
|
|
403
|
+
"task_count": inst.task_count,
|
|
404
|
+
"current_tasks": inst.current_tasks,
|
|
405
|
+
"last_task_at": inst.last_task_at,
|
|
406
|
+
"uptime_seconds": round(time.time() - inst.created_at, 2),
|
|
407
|
+
"is_available": inst.is_available()
|
|
408
|
+
}
|
|
409
|
+
for inst in self.instances
|
|
410
|
+
]
|
|
411
|
+
|
|
412
|
+
# Context manager support
|
|
413
|
+
async def __aenter__(self) -> "AgentPool":
|
|
414
|
+
"""Async context manager entry."""
|
|
415
|
+
await self.start()
|
|
416
|
+
return self
|
|
417
|
+
|
|
418
|
+
async def __aexit__(self, exc_type, exc_val, exc_tb) -> None:
|
|
419
|
+
"""Async context manager exit."""
|
|
420
|
+
await self.stop()
|
|
421
|
+
|
|
422
|
+
# Utility functions for pool management
|
|
423
|
+
|
|
424
|
+
def create_agent_pool(
|
|
425
|
+
agent_factory: Callable[[], Any],
|
|
426
|
+
instances: int = 1,
|
|
427
|
+
pool_name: Optional[str] = None
|
|
428
|
+
) -> AgentPool:
|
|
429
|
+
"""
|
|
430
|
+
Create an agent pool with the specified configuration.
|
|
431
|
+
|
|
432
|
+
Args:
|
|
433
|
+
agent_factory: Factory function to create agent instances
|
|
434
|
+
instances: Number of agent instances
|
|
435
|
+
pool_name: Optional pool name
|
|
436
|
+
|
|
437
|
+
Returns:
|
|
438
|
+
Configured AgentPool instance
|
|
439
|
+
|
|
440
|
+
Example:
|
|
441
|
+
```python
|
|
442
|
+
from daita.core.scaling import create_agent_pool
|
|
443
|
+
from daita.agents.substrate import SubstrateAgent
|
|
444
|
+
|
|
445
|
+
# Create pool factory
|
|
446
|
+
def make_processor():
|
|
447
|
+
return SubstrateAgent(name="Processor")
|
|
448
|
+
|
|
449
|
+
pool = create_agent_pool(make_processor, instances=5, pool_name="processors")
|
|
450
|
+
|
|
451
|
+
async with pool:
|
|
452
|
+
result = await pool.submit_task("analyze", data={"text": "Hello"})
|
|
453
|
+
```
|
|
454
|
+
"""
|
|
455
|
+
return AgentPool(
|
|
456
|
+
agent_factory=agent_factory,
|
|
457
|
+
instances=instances,
|
|
458
|
+
pool_name=pool_name
|
|
459
|
+
)
|