strands-swarms 0.1.1__py3-none-any.whl → 0.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- strands_swarms/__init__.py +29 -42
- strands_swarms/definition.py +169 -0
- strands_swarms/dynamic_swarm.py +439 -0
- strands_swarms/orchestrator.py +70 -219
- strands_swarms-0.1.2.dist-info/METADATA +171 -0
- strands_swarms-0.1.2.dist-info/RECORD +8 -0
- strands_swarms/events.py +0 -563
- strands_swarms/py.typed +0 -0
- strands_swarms/swarm.py +0 -739
- strands_swarms-0.1.1.dist-info/METADATA +0 -391
- strands_swarms-0.1.1.dist-info/RECORD +0 -9
- {strands_swarms-0.1.1.dist-info → strands_swarms-0.1.2.dist-info}/WHEEL +0 -0
- {strands_swarms-0.1.1.dist-info → strands_swarms-0.1.2.dist-info}/licenses/LICENSE +0 -0
strands_swarms/__init__.py
CHANGED
|
@@ -26,37 +26,35 @@ Example:
|
|
|
26
26
|
'''Search the web.'''
|
|
27
27
|
return f"Results for: {query}"
|
|
28
28
|
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
verbose=True,
|
|
32
|
-
)
|
|
29
|
+
# Basic usage
|
|
30
|
+
swarm = DynamicSwarm(available_tools={"search_web": search_web})
|
|
33
31
|
result = swarm.execute("Research AI trends and summarize")
|
|
32
|
+
|
|
33
|
+
# Streaming trajectory
|
|
34
|
+
import asyncio
|
|
35
|
+
|
|
36
|
+
async def run():
|
|
37
|
+
async for event in swarm.stream_async(
|
|
38
|
+
"Research AI trends and summarize",
|
|
39
|
+
include_subagent_events=False,
|
|
40
|
+
):
|
|
41
|
+
print(event)
|
|
42
|
+
|
|
43
|
+
asyncio.run(run())
|
|
34
44
|
"""
|
|
35
45
|
|
|
36
46
|
# Re-export strands types for convenience
|
|
37
|
-
from strands.hooks import HookProvider, HookRegistry
|
|
38
47
|
from strands.multiagent.base import Status
|
|
39
48
|
|
|
40
|
-
from .
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
PlanningStartedEvent,
|
|
47
|
-
# Hook provider
|
|
48
|
-
PrintingHookProvider,
|
|
49
|
-
SwarmCompletedEvent,
|
|
50
|
-
SwarmFailedEvent,
|
|
51
|
-
# Planning/Orchestration events
|
|
52
|
-
SwarmStartedEvent,
|
|
53
|
-
TaskCompletedEvent,
|
|
54
|
-
TaskCreatedEvent,
|
|
55
|
-
TaskFailedEvent,
|
|
56
|
-
TaskStartedEvent,
|
|
49
|
+
from .definition import (
|
|
50
|
+
AgentDefinition,
|
|
51
|
+
DynamicSwarmCapabilities,
|
|
52
|
+
SessionConfig,
|
|
53
|
+
SwarmDefinition,
|
|
54
|
+
TaskDefinition,
|
|
57
55
|
)
|
|
56
|
+
from .dynamic_swarm import DynamicSwarm, DynamicSwarmResult, build_swarm
|
|
58
57
|
from .orchestrator import create_orchestrator_agent
|
|
59
|
-
from .swarm import DynamicSwarm, DynamicSwarmResult
|
|
60
58
|
|
|
61
59
|
__version__ = "0.1.1"
|
|
62
60
|
|
|
@@ -64,25 +62,14 @@ __all__ = [
|
|
|
64
62
|
# Main API
|
|
65
63
|
"DynamicSwarm",
|
|
66
64
|
"DynamicSwarmResult",
|
|
67
|
-
|
|
65
|
+
"build_swarm",
|
|
66
|
+
# Definition types
|
|
67
|
+
"DynamicSwarmCapabilities",
|
|
68
|
+
"SwarmDefinition",
|
|
69
|
+
"AgentDefinition",
|
|
70
|
+
"TaskDefinition",
|
|
71
|
+
"SessionConfig",
|
|
72
|
+
# Orchestrator
|
|
68
73
|
"create_orchestrator_agent",
|
|
69
|
-
# Status enum
|
|
70
74
|
"Status",
|
|
71
|
-
# Events (for custom hooks)
|
|
72
|
-
"SwarmStartedEvent",
|
|
73
|
-
"PlanningStartedEvent",
|
|
74
|
-
"AgentSpawnedEvent",
|
|
75
|
-
"TaskCreatedEvent",
|
|
76
|
-
"PlanningCompletedEvent",
|
|
77
|
-
"ExecutionStartedEvent",
|
|
78
|
-
"TaskStartedEvent",
|
|
79
|
-
"TaskCompletedEvent",
|
|
80
|
-
"TaskFailedEvent",
|
|
81
|
-
"ExecutionCompletedEvent",
|
|
82
|
-
"SwarmCompletedEvent",
|
|
83
|
-
"SwarmFailedEvent",
|
|
84
|
-
# Hook system
|
|
85
|
-
"PrintingHookProvider",
|
|
86
|
-
"HookProvider",
|
|
87
|
-
"HookRegistry",
|
|
88
75
|
]
|
|
@@ -0,0 +1,169 @@
|
|
|
1
|
+
"""Swarm definition types - data structures for defining multi-agent workflows."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from dataclasses import dataclass, field
|
|
6
|
+
from typing import TYPE_CHECKING, Any, Callable
|
|
7
|
+
|
|
8
|
+
from strands.session.file_session_manager import FileSessionManager
|
|
9
|
+
|
|
10
|
+
# ANSI Color Constants (for consistent agent output coloring)
|
|
11
|
+
AGENT_COLORS: list[str] = [
|
|
12
|
+
"\033[94m", # Blue
|
|
13
|
+
"\033[92m", # Green
|
|
14
|
+
"\033[93m", # Yellow
|
|
15
|
+
"\033[95m", # Magenta
|
|
16
|
+
"\033[96m", # Cyan
|
|
17
|
+
"\033[91m", # Red
|
|
18
|
+
]
|
|
19
|
+
|
|
20
|
+
if TYPE_CHECKING:
|
|
21
|
+
from strands.models import Model
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
@dataclass(frozen=True)
|
|
25
|
+
class DynamicSwarmCapabilities:
|
|
26
|
+
"""Immutable configuration for available tools and models."""
|
|
27
|
+
|
|
28
|
+
available_tools: dict[str, Callable[..., Any]]
|
|
29
|
+
available_models: dict[str, "Model"]
|
|
30
|
+
default_model: str | None = None
|
|
31
|
+
|
|
32
|
+
def validate_tools(self, tool_names: list[str]) -> None:
|
|
33
|
+
"""Validate that all tool names exist in available tools."""
|
|
34
|
+
for tool_name in tool_names:
|
|
35
|
+
if tool_name not in self.available_tools:
|
|
36
|
+
available = list(self.available_tools.keys())
|
|
37
|
+
raise ValueError(
|
|
38
|
+
f"Tool '{tool_name}' not in available tools: {available}"
|
|
39
|
+
)
|
|
40
|
+
|
|
41
|
+
def validate_model(self, model_name: str | None) -> None:
|
|
42
|
+
"""Validate that the model name exists in available models."""
|
|
43
|
+
if model_name and model_name not in self.available_models:
|
|
44
|
+
available = list(self.available_models.keys())
|
|
45
|
+
raise ValueError(
|
|
46
|
+
f"Model '{model_name}' not in available models: {available}"
|
|
47
|
+
)
|
|
48
|
+
|
|
49
|
+
@property
|
|
50
|
+
def available_tool_names(self) -> list[str]:
|
|
51
|
+
return list(self.available_tools.keys())
|
|
52
|
+
|
|
53
|
+
@property
|
|
54
|
+
def available_model_names(self) -> list[str]:
|
|
55
|
+
return list(self.available_models.keys())
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
@dataclass
|
|
59
|
+
class AgentDefinition:
|
|
60
|
+
"""Definition of a sub-agent to be spawned."""
|
|
61
|
+
|
|
62
|
+
name: str
|
|
63
|
+
role: str
|
|
64
|
+
instructions: str | None = None
|
|
65
|
+
tools: list[str] = field(default_factory=list)
|
|
66
|
+
model: str | None = None
|
|
67
|
+
color: str | None = None
|
|
68
|
+
|
|
69
|
+
def build_system_prompt(self) -> str:
|
|
70
|
+
parts = [f"You are a {self.role}."]
|
|
71
|
+
if self.instructions:
|
|
72
|
+
parts.append(f"\n\nInstructions:\n{self.instructions}")
|
|
73
|
+
return "\n".join(parts)
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
@dataclass
|
|
77
|
+
class TaskDefinition:
|
|
78
|
+
"""Definition of a task to be executed by a sub-agent."""
|
|
79
|
+
|
|
80
|
+
name: str
|
|
81
|
+
agent: str
|
|
82
|
+
description: str | None = None
|
|
83
|
+
depends_on: list[str] = field(default_factory=list)
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
@dataclass
|
|
87
|
+
class SessionConfig:
|
|
88
|
+
"""Configuration for session persistence."""
|
|
89
|
+
|
|
90
|
+
session_id: str
|
|
91
|
+
storage_dir: str = "./.swarm_sessions"
|
|
92
|
+
|
|
93
|
+
def for_agent(self, agent_name: str) -> FileSessionManager:
|
|
94
|
+
return FileSessionManager(
|
|
95
|
+
session_id=f"{self.session_id}-{agent_name}",
|
|
96
|
+
storage_dir=self.storage_dir,
|
|
97
|
+
)
|
|
98
|
+
|
|
99
|
+
def for_graph(self) -> FileSessionManager:
|
|
100
|
+
return FileSessionManager(
|
|
101
|
+
session_id=f"{self.session_id}-graph",
|
|
102
|
+
storage_dir=self.storage_dir,
|
|
103
|
+
)
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
class SwarmDefinition:
|
|
107
|
+
"""Per-query definition of sub-agents and tasks created during planning."""
|
|
108
|
+
|
|
109
|
+
def __init__(
|
|
110
|
+
self,
|
|
111
|
+
capabilities: DynamicSwarmCapabilities,
|
|
112
|
+
) -> None:
|
|
113
|
+
self._capabilities = capabilities
|
|
114
|
+
self.sub_agents: dict[str, AgentDefinition] = {}
|
|
115
|
+
self.tasks: dict[str, TaskDefinition] = {}
|
|
116
|
+
self._color_index = 0
|
|
117
|
+
|
|
118
|
+
@property
|
|
119
|
+
def capabilities(self) -> DynamicSwarmCapabilities:
|
|
120
|
+
return self._capabilities
|
|
121
|
+
|
|
122
|
+
def register_agent(self, definition: AgentDefinition) -> None:
|
|
123
|
+
"""Register a sub-agent definition."""
|
|
124
|
+
if definition.name in self.sub_agents:
|
|
125
|
+
raise ValueError(f"Agent '{definition.name}' already exists")
|
|
126
|
+
|
|
127
|
+
self._capabilities.validate_tools(definition.tools)
|
|
128
|
+
self._capabilities.validate_model(definition.model)
|
|
129
|
+
|
|
130
|
+
definition.color = AGENT_COLORS[self._color_index % len(AGENT_COLORS)]
|
|
131
|
+
self._color_index += 1
|
|
132
|
+
|
|
133
|
+
self.sub_agents[definition.name] = definition
|
|
134
|
+
|
|
135
|
+
def register_task(self, definition: TaskDefinition) -> None:
|
|
136
|
+
"""Register a task definition.
|
|
137
|
+
|
|
138
|
+
Note: dependencies must already be registered (create dependency tasks first).
|
|
139
|
+
"""
|
|
140
|
+
if definition.name in self.tasks:
|
|
141
|
+
raise ValueError(f"Task '{definition.name}' already exists")
|
|
142
|
+
|
|
143
|
+
if definition.agent not in self.sub_agents:
|
|
144
|
+
available = list(self.sub_agents.keys())
|
|
145
|
+
raise ValueError(
|
|
146
|
+
f"Agent '{definition.agent}' not found. Available: {available}"
|
|
147
|
+
)
|
|
148
|
+
|
|
149
|
+
for dep in definition.depends_on:
|
|
150
|
+
if dep not in self.tasks:
|
|
151
|
+
available = list(self.tasks.keys())
|
|
152
|
+
raise ValueError(
|
|
153
|
+
f"Dependency '{dep}' not found. Available: {available}"
|
|
154
|
+
)
|
|
155
|
+
|
|
156
|
+
self.tasks[definition.name] = definition
|
|
157
|
+
|
|
158
|
+
def get_summary(self) -> str:
|
|
159
|
+
"""Get a summary of registered sub-agents and tasks."""
|
|
160
|
+
lines = [
|
|
161
|
+
f"Agents ({len(self.sub_agents)}):",
|
|
162
|
+
*[f" - {name}: {d.role}" for name, d in self.sub_agents.items()],
|
|
163
|
+
f"\nTasks ({len(self.tasks)}):",
|
|
164
|
+
*[
|
|
165
|
+
f" - {name} -> {d.agent}" + (f" (depends: {d.depends_on})" if d.depends_on else "")
|
|
166
|
+
for name, d in self.tasks.items()
|
|
167
|
+
],
|
|
168
|
+
]
|
|
169
|
+
return "\n".join(lines)
|
|
@@ -0,0 +1,439 @@
|
|
|
1
|
+
"""DynamicSwarm - automatically construct and execute multi-agent workflows."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import asyncio
|
|
6
|
+
from collections.abc import AsyncIterator
|
|
7
|
+
from dataclasses import dataclass, field
|
|
8
|
+
from typing import TYPE_CHECKING, Any, Callable
|
|
9
|
+
|
|
10
|
+
from strands import Agent
|
|
11
|
+
from strands.multiagent.base import MultiAgentResult, Status
|
|
12
|
+
from strands.multiagent.graph import (
|
|
13
|
+
Graph,
|
|
14
|
+
GraphBuilder,
|
|
15
|
+
GraphResult,
|
|
16
|
+
)
|
|
17
|
+
|
|
18
|
+
from .definition import (
|
|
19
|
+
AgentDefinition,
|
|
20
|
+
DynamicSwarmCapabilities,
|
|
21
|
+
SessionConfig,
|
|
22
|
+
SwarmDefinition,
|
|
23
|
+
TaskDefinition,
|
|
24
|
+
)
|
|
25
|
+
|
|
26
|
+
if TYPE_CHECKING:
|
|
27
|
+
from strands.models import Model
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
# =============================================================================
|
|
31
|
+
# DynamicSwarm
|
|
32
|
+
# =============================================================================
|
|
33
|
+
|
|
34
|
+
class DynamicSwarm:
|
|
35
|
+
"""Dynamically construct and execute multi-agent workflows.
|
|
36
|
+
|
|
37
|
+
An orchestrator agent analyzes queries and coordinates workflows by:
|
|
38
|
+
1. Planning: Creating specialized sub-agents with tools
|
|
39
|
+
2. Execution: Running tasks with dependencies in parallel where possible
|
|
40
|
+
3. Synthesis: Combining task outputs into a final response
|
|
41
|
+
"""
|
|
42
|
+
|
|
43
|
+
def __init__(
|
|
44
|
+
self,
|
|
45
|
+
available_tools: dict[str, Callable[..., Any]] | None = None,
|
|
46
|
+
available_models: dict[str, "Model"] | None = None,
|
|
47
|
+
*,
|
|
48
|
+
orchestrator_model: "Model" | None = None,
|
|
49
|
+
default_agent_model: str | None = None,
|
|
50
|
+
execution_timeout: float = 900.0,
|
|
51
|
+
task_timeout: float = 300.0,
|
|
52
|
+
session_id: str | None = None,
|
|
53
|
+
session_storage_dir: str = "./.swarm_sessions",
|
|
54
|
+
) -> None:
|
|
55
|
+
self._capabilities = DynamicSwarmCapabilities(
|
|
56
|
+
available_tools=available_tools or {},
|
|
57
|
+
available_models=available_models or {},
|
|
58
|
+
default_model=default_agent_model,
|
|
59
|
+
)
|
|
60
|
+
self._orchestrator_model = orchestrator_model
|
|
61
|
+
self._execution_timeout = execution_timeout
|
|
62
|
+
self._task_timeout = task_timeout
|
|
63
|
+
self._session_config = (
|
|
64
|
+
SessionConfig(session_id=session_id, storage_dir=session_storage_dir)
|
|
65
|
+
if session_id
|
|
66
|
+
else None
|
|
67
|
+
)
|
|
68
|
+
|
|
69
|
+
def execute(self, query: str) -> DynamicSwarmResult:
|
|
70
|
+
"""Execute a query synchronously."""
|
|
71
|
+
return asyncio.run(self.execute_async(query))
|
|
72
|
+
|
|
73
|
+
async def execute_async(self, query: str) -> DynamicSwarmResult:
|
|
74
|
+
"""Execute a query asynchronously (non-streaming)."""
|
|
75
|
+
result: DynamicSwarmResult | None = None
|
|
76
|
+
async for event in self.stream_async(query):
|
|
77
|
+
if event.get("type") == "swarm_result":
|
|
78
|
+
result = event.get("result")
|
|
79
|
+
if not isinstance(result, DynamicSwarmResult):
|
|
80
|
+
raise RuntimeError("DynamicSwarm stream ended without producing a result")
|
|
81
|
+
return result
|
|
82
|
+
|
|
83
|
+
async def stream_async(
|
|
84
|
+
self,
|
|
85
|
+
query: str,
|
|
86
|
+
*,
|
|
87
|
+
include_subagent_events: bool = False,
|
|
88
|
+
) -> AsyncIterator[dict[str, Any]]:
|
|
89
|
+
"""Stream execution events, including planning and graph execution.
|
|
90
|
+
|
|
91
|
+
This yields:
|
|
92
|
+
- High-level DynamicSwarm events (planning/execution/synthesis)
|
|
93
|
+
- Raw `strands` Graph stream events (e.g. multiagent_node_start/stop, multiagent_result)
|
|
94
|
+
|
|
95
|
+
By default, per-node agent stream events (type: "multiagent_node_stream") are
|
|
96
|
+
filtered to avoid interleaved token/tool output when tasks run in parallel.
|
|
97
|
+
Pass `include_subagent_events=True` to include them.
|
|
98
|
+
"""
|
|
99
|
+
definition = SwarmDefinition(capabilities=self._capabilities)
|
|
100
|
+
|
|
101
|
+
yield {
|
|
102
|
+
"type": "swarm_started",
|
|
103
|
+
"query": query,
|
|
104
|
+
"available_tools": self._capabilities.available_tool_names,
|
|
105
|
+
"available_models": self._capabilities.available_model_names,
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
# Phase 1: Planning
|
|
109
|
+
yield {"type": "planning_started"}
|
|
110
|
+
planning_result = await self._run_planning(query, definition)
|
|
111
|
+
|
|
112
|
+
if not planning_result.success:
|
|
113
|
+
error = planning_result.error or "Planning failed"
|
|
114
|
+
yield {"type": "planning_failed", "error": error}
|
|
115
|
+
yield {
|
|
116
|
+
"type": "swarm_result",
|
|
117
|
+
"result": DynamicSwarmResult(
|
|
118
|
+
status=Status.FAILED,
|
|
119
|
+
planning_output=planning_result.output,
|
|
120
|
+
agents_spawned=list(definition.sub_agents.values()),
|
|
121
|
+
tasks_created=list(definition.tasks.values()),
|
|
122
|
+
error=error,
|
|
123
|
+
),
|
|
124
|
+
}
|
|
125
|
+
return
|
|
126
|
+
|
|
127
|
+
if not definition.tasks:
|
|
128
|
+
error = "No tasks were created"
|
|
129
|
+
yield {"type": "planning_failed", "error": error}
|
|
130
|
+
yield {
|
|
131
|
+
"type": "swarm_result",
|
|
132
|
+
"result": DynamicSwarmResult(
|
|
133
|
+
status=Status.FAILED,
|
|
134
|
+
planning_output=planning_result.output,
|
|
135
|
+
agents_spawned=list(definition.sub_agents.values()),
|
|
136
|
+
tasks_created=[],
|
|
137
|
+
error=error,
|
|
138
|
+
),
|
|
139
|
+
}
|
|
140
|
+
return
|
|
141
|
+
|
|
142
|
+
yield {
|
|
143
|
+
"type": "planning_completed",
|
|
144
|
+
"planning_output": planning_result.output,
|
|
145
|
+
"summary": definition.get_summary(),
|
|
146
|
+
"agents": definition.sub_agents,
|
|
147
|
+
"tasks": definition.tasks,
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
# Phase 2: Execution
|
|
151
|
+
graph = build_swarm(
|
|
152
|
+
definition,
|
|
153
|
+
execution_timeout=self._execution_timeout,
|
|
154
|
+
task_timeout=self._task_timeout,
|
|
155
|
+
session_config=self._session_config,
|
|
156
|
+
)
|
|
157
|
+
yield {"type": "execution_started", "tasks": list(definition.tasks.keys())}
|
|
158
|
+
|
|
159
|
+
execution_result: GraphResult | None = None
|
|
160
|
+
execution_error: str | None = None
|
|
161
|
+
try:
|
|
162
|
+
async for event in graph.stream_async(query):
|
|
163
|
+
if event.get("type") == "multiagent_result":
|
|
164
|
+
candidate = event.get("result")
|
|
165
|
+
if isinstance(candidate, GraphResult):
|
|
166
|
+
execution_result = candidate
|
|
167
|
+
if not include_subagent_events and event.get("type") == "multiagent_node_stream":
|
|
168
|
+
continue
|
|
169
|
+
yield event
|
|
170
|
+
except Exception as e:
|
|
171
|
+
execution_error = str(e)
|
|
172
|
+
|
|
173
|
+
if execution_error:
|
|
174
|
+
yield {"type": "execution_failed", "error": execution_error}
|
|
175
|
+
yield {
|
|
176
|
+
"type": "swarm_result",
|
|
177
|
+
"result": DynamicSwarmResult(
|
|
178
|
+
status=Status.FAILED,
|
|
179
|
+
planning_output=planning_result.output,
|
|
180
|
+
execution_result=execution_result,
|
|
181
|
+
final_response=None,
|
|
182
|
+
agents_spawned=list(definition.sub_agents.values()),
|
|
183
|
+
tasks_created=list(definition.tasks.values()),
|
|
184
|
+
error=execution_error,
|
|
185
|
+
),
|
|
186
|
+
}
|
|
187
|
+
return
|
|
188
|
+
|
|
189
|
+
if not execution_result:
|
|
190
|
+
error = "Graph completed without producing a result"
|
|
191
|
+
yield {"type": "execution_failed", "error": error}
|
|
192
|
+
yield {
|
|
193
|
+
"type": "swarm_result",
|
|
194
|
+
"result": DynamicSwarmResult(
|
|
195
|
+
status=Status.FAILED,
|
|
196
|
+
planning_output=planning_result.output,
|
|
197
|
+
execution_result=None,
|
|
198
|
+
final_response=None,
|
|
199
|
+
agents_spawned=list(definition.sub_agents.values()),
|
|
200
|
+
tasks_created=list(definition.tasks.values()),
|
|
201
|
+
error=error,
|
|
202
|
+
),
|
|
203
|
+
}
|
|
204
|
+
return
|
|
205
|
+
|
|
206
|
+
yield {"type": "execution_completed", "status": execution_result.status.value}
|
|
207
|
+
|
|
208
|
+
# Phase 3: Synthesis
|
|
209
|
+
yield {"type": "synthesis_started"}
|
|
210
|
+
assert planning_result.orchestrator is not None
|
|
211
|
+
final_response = await self._synthesize_final_response(
|
|
212
|
+
query, definition, execution_result, planning_result.orchestrator
|
|
213
|
+
)
|
|
214
|
+
yield {"type": "synthesis_completed", "final_response": final_response}
|
|
215
|
+
|
|
216
|
+
result = DynamicSwarmResult(
|
|
217
|
+
status=execution_result.status,
|
|
218
|
+
planning_output=planning_result.output,
|
|
219
|
+
execution_result=execution_result,
|
|
220
|
+
final_response=final_response,
|
|
221
|
+
agents_spawned=list(definition.sub_agents.values()),
|
|
222
|
+
tasks_created=list(definition.tasks.values()),
|
|
223
|
+
error=(
|
|
224
|
+
None
|
|
225
|
+
if execution_result.status == Status.COMPLETED
|
|
226
|
+
else f"Execution ended with status {execution_result.status.value}"
|
|
227
|
+
),
|
|
228
|
+
)
|
|
229
|
+
|
|
230
|
+
yield {"type": "swarm_result", "result": result}
|
|
231
|
+
|
|
232
|
+
async def _run_planning(
|
|
233
|
+
self, query: str, definition: SwarmDefinition
|
|
234
|
+
) -> _PlanningResult:
|
|
235
|
+
from .orchestrator import create_orchestrator_agent
|
|
236
|
+
|
|
237
|
+
orchestrator = create_orchestrator_agent(
|
|
238
|
+
definition=definition,
|
|
239
|
+
model=self._orchestrator_model,
|
|
240
|
+
)
|
|
241
|
+
|
|
242
|
+
prompt = PLANNING_PROMPT.format(
|
|
243
|
+
query=query,
|
|
244
|
+
available_tools=definition.capabilities.available_tool_names or ["none"],
|
|
245
|
+
available_models=definition.capabilities.available_model_names or ["default"],
|
|
246
|
+
)
|
|
247
|
+
|
|
248
|
+
try:
|
|
249
|
+
output = _extract_message_text(orchestrator(prompt))
|
|
250
|
+
return _PlanningResult(success=True, output=output, orchestrator=orchestrator)
|
|
251
|
+
except Exception as e:
|
|
252
|
+
return _PlanningResult(success=False, error=str(e))
|
|
253
|
+
|
|
254
|
+
async def _synthesize_final_response(
|
|
255
|
+
self,
|
|
256
|
+
query: str,
|
|
257
|
+
definition: SwarmDefinition,
|
|
258
|
+
execution_result: MultiAgentResult | None,
|
|
259
|
+
orchestrator: Agent,
|
|
260
|
+
) -> str | None:
|
|
261
|
+
if not execution_result or not hasattr(execution_result, "results"):
|
|
262
|
+
return None
|
|
263
|
+
|
|
264
|
+
task_outputs: list[str] = []
|
|
265
|
+
for task_name in definition.tasks:
|
|
266
|
+
node_result = execution_result.results.get(task_name)
|
|
267
|
+
if node_result:
|
|
268
|
+
task_outputs.append(f"[{task_name}]:\n{node_result.result}")
|
|
269
|
+
|
|
270
|
+
if not task_outputs:
|
|
271
|
+
return None
|
|
272
|
+
|
|
273
|
+
prompt = SYNTHESIS_PROMPT.format(
|
|
274
|
+
query=query,
|
|
275
|
+
task_outputs="\n\n".join(task_outputs),
|
|
276
|
+
)
|
|
277
|
+
|
|
278
|
+
try:
|
|
279
|
+
return _extract_message_text(orchestrator(prompt))
|
|
280
|
+
except Exception:
|
|
281
|
+
return None
|
|
282
|
+
|
|
283
|
+
|
|
284
|
+
# =============================================================================
|
|
285
|
+
# Results
|
|
286
|
+
# =============================================================================
|
|
287
|
+
|
|
288
|
+
|
|
289
|
+
@dataclass
|
|
290
|
+
class DynamicSwarmResult:
|
|
291
|
+
"""Result from DynamicSwarm execution."""
|
|
292
|
+
|
|
293
|
+
status: Status
|
|
294
|
+
planning_output: str | None = None
|
|
295
|
+
execution_result: GraphResult | None = None
|
|
296
|
+
final_response: str | None = None
|
|
297
|
+
agents_spawned: list[AgentDefinition] = field(default_factory=list)
|
|
298
|
+
tasks_created: list[TaskDefinition] = field(default_factory=list)
|
|
299
|
+
error: str | None = None
|
|
300
|
+
|
|
301
|
+
@property
|
|
302
|
+
def agents_spawned_count(self) -> int:
|
|
303
|
+
return len(self.agents_spawned)
|
|
304
|
+
|
|
305
|
+
@property
|
|
306
|
+
def tasks_created_count(self) -> int:
|
|
307
|
+
return len(self.tasks_created)
|
|
308
|
+
|
|
309
|
+
def get_output(self, task_name: str) -> Any | None:
|
|
310
|
+
if self.execution_result and hasattr(self.execution_result, "results"):
|
|
311
|
+
node_result = self.execution_result.results.get(task_name)
|
|
312
|
+
if node_result:
|
|
313
|
+
return str(node_result.result)
|
|
314
|
+
return None
|
|
315
|
+
|
|
316
|
+
def __bool__(self) -> bool:
|
|
317
|
+
return self.status == Status.COMPLETED
|
|
318
|
+
|
|
319
|
+
|
|
320
|
+
@dataclass
|
|
321
|
+
class _PlanningResult:
|
|
322
|
+
success: bool
|
|
323
|
+
output: str | None = None
|
|
324
|
+
error: str | None = None
|
|
325
|
+
orchestrator: Agent | None = None
|
|
326
|
+
|
|
327
|
+
|
|
328
|
+
# =============================================================================
|
|
329
|
+
# Graph Building
|
|
330
|
+
# =============================================================================
|
|
331
|
+
|
|
332
|
+
def build_swarm(
|
|
333
|
+
definition: SwarmDefinition,
|
|
334
|
+
*,
|
|
335
|
+
execution_timeout: float = 900.0,
|
|
336
|
+
task_timeout: float = 300.0,
|
|
337
|
+
session_config: SessionConfig | None = None,
|
|
338
|
+
) -> Graph:
|
|
339
|
+
"""Build a strands Graph from a SwarmDefinition."""
|
|
340
|
+
if not definition.tasks:
|
|
341
|
+
raise ValueError("No tasks registered - cannot build swarm")
|
|
342
|
+
|
|
343
|
+
capabilities = definition.capabilities
|
|
344
|
+
|
|
345
|
+
# Build per-agent config from definitions.
|
|
346
|
+
#
|
|
347
|
+
# Note: strands Graph requires each node executor to be a unique object
|
|
348
|
+
# instance, so we use these configs to create a fresh Agent per task.
|
|
349
|
+
agent_configs: dict[str, dict[str, Any]] = {}
|
|
350
|
+
for name, agent_def in definition.sub_agents.items():
|
|
351
|
+
tools = [capabilities.available_tools[t] for t in agent_def.tools]
|
|
352
|
+
model_name = agent_def.model or capabilities.default_model
|
|
353
|
+
model = capabilities.available_models.get(model_name) if model_name else None
|
|
354
|
+
agent_configs[name] = {
|
|
355
|
+
"system_prompt": agent_def.build_system_prompt(),
|
|
356
|
+
"model": model,
|
|
357
|
+
"tools": tools or None,
|
|
358
|
+
}
|
|
359
|
+
|
|
360
|
+
# Build the execution graph
|
|
361
|
+
builder = GraphBuilder()
|
|
362
|
+
for task_name, task in definition.tasks.items():
|
|
363
|
+
agent_config = agent_configs[task.agent]
|
|
364
|
+
builder.add_node(
|
|
365
|
+
Agent(
|
|
366
|
+
name=task.agent,
|
|
367
|
+
system_prompt=agent_config["system_prompt"],
|
|
368
|
+
model=agent_config["model"],
|
|
369
|
+
tools=agent_config["tools"], # type: ignore[arg-type]
|
|
370
|
+
callback_handler=None,
|
|
371
|
+
),
|
|
372
|
+
task_name,
|
|
373
|
+
)
|
|
374
|
+
for task_name, task in definition.tasks.items():
|
|
375
|
+
for dep_name in task.depends_on:
|
|
376
|
+
builder.add_edge(dep_name, task_name)
|
|
377
|
+
|
|
378
|
+
builder.set_execution_timeout(execution_timeout)
|
|
379
|
+
builder.set_node_timeout(task_timeout)
|
|
380
|
+
if session_config:
|
|
381
|
+
builder.set_session_manager(session_config.for_graph())
|
|
382
|
+
|
|
383
|
+
return builder.build()
|
|
384
|
+
|
|
385
|
+
|
|
386
|
+
# =============================================================================
|
|
387
|
+
# Prompts
|
|
388
|
+
# =============================================================================
|
|
389
|
+
|
|
390
|
+
|
|
391
|
+
PLANNING_PROMPT = """\
|
|
392
|
+
Analyze this request and design a multi-agent workflow to complete it:
|
|
393
|
+
|
|
394
|
+
REQUEST: {query}
|
|
395
|
+
|
|
396
|
+
AVAILABLE TOOLS: {available_tools}
|
|
397
|
+
AVAILABLE MODELS: {available_models}
|
|
398
|
+
|
|
399
|
+
INSTRUCTIONS:
|
|
400
|
+
1. Break down the request into logical steps
|
|
401
|
+
2. Create specialized agents with appropriate tools using spawn_agent()
|
|
402
|
+
3. Create tasks with dependencies using create_task()
|
|
403
|
+
- Important: dependencies must already exist (create dependency tasks first)
|
|
404
|
+
4. Call finalize_plan() when done
|
|
405
|
+
|
|
406
|
+
Keep the workflow simple - only create agents and tasks that are necessary."""
|
|
407
|
+
|
|
408
|
+
SYNTHESIS_PROMPT = """\
|
|
409
|
+
The workflow has completed. Here are the results from each task:
|
|
410
|
+
|
|
411
|
+
{task_outputs}
|
|
412
|
+
|
|
413
|
+
ORIGINAL REQUEST: {query}
|
|
414
|
+
|
|
415
|
+
Synthesize these results into a final response. \
|
|
416
|
+
Be direct - deliver the answer without mentioning the workflow or agents."""
|
|
417
|
+
|
|
418
|
+
|
|
419
|
+
# =============================================================================
|
|
420
|
+
# Helpers
|
|
421
|
+
# =============================================================================
|
|
422
|
+
|
|
423
|
+
|
|
424
|
+
def _extract_message_text(result: Any) -> str | None:
|
|
425
|
+
"""Extract the first text block from a strands Agent result, if present."""
|
|
426
|
+
message = getattr(result, "message", None)
|
|
427
|
+
if not isinstance(message, dict):
|
|
428
|
+
return None
|
|
429
|
+
|
|
430
|
+
content = message.get("content")
|
|
431
|
+
if not isinstance(content, list) or not content:
|
|
432
|
+
return None
|
|
433
|
+
|
|
434
|
+
first = content[0]
|
|
435
|
+
if not isinstance(first, dict):
|
|
436
|
+
return None
|
|
437
|
+
|
|
438
|
+
text = first.get("text")
|
|
439
|
+
return text if isinstance(text, str) else None
|