dao-ai 0.0.25__py3-none-any.whl → 0.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- dao_ai/__init__.py +29 -0
- dao_ai/agent_as_code.py +5 -5
- dao_ai/cli.py +245 -40
- dao_ai/config.py +1863 -338
- dao_ai/genie/__init__.py +38 -0
- dao_ai/genie/cache/__init__.py +43 -0
- dao_ai/genie/cache/base.py +72 -0
- dao_ai/genie/cache/core.py +79 -0
- dao_ai/genie/cache/lru.py +347 -0
- dao_ai/genie/cache/semantic.py +970 -0
- dao_ai/genie/core.py +35 -0
- dao_ai/graph.py +27 -228
- dao_ai/hooks/__init__.py +9 -6
- dao_ai/hooks/core.py +27 -195
- dao_ai/logging.py +56 -0
- dao_ai/memory/__init__.py +10 -0
- dao_ai/memory/core.py +65 -30
- dao_ai/memory/databricks.py +402 -0
- dao_ai/memory/postgres.py +79 -38
- dao_ai/messages.py +6 -4
- dao_ai/middleware/__init__.py +125 -0
- dao_ai/middleware/assertions.py +806 -0
- dao_ai/middleware/base.py +50 -0
- dao_ai/middleware/core.py +67 -0
- dao_ai/middleware/guardrails.py +420 -0
- dao_ai/middleware/human_in_the_loop.py +232 -0
- dao_ai/middleware/message_validation.py +586 -0
- dao_ai/middleware/summarization.py +197 -0
- dao_ai/models.py +1306 -114
- dao_ai/nodes.py +261 -166
- dao_ai/optimization.py +674 -0
- dao_ai/orchestration/__init__.py +52 -0
- dao_ai/orchestration/core.py +294 -0
- dao_ai/orchestration/supervisor.py +278 -0
- dao_ai/orchestration/swarm.py +271 -0
- dao_ai/prompts.py +128 -31
- dao_ai/providers/databricks.py +645 -172
- dao_ai/state.py +157 -21
- dao_ai/tools/__init__.py +13 -5
- dao_ai/tools/agent.py +1 -3
- dao_ai/tools/core.py +64 -11
- dao_ai/tools/email.py +232 -0
- dao_ai/tools/genie.py +144 -295
- dao_ai/tools/mcp.py +220 -133
- dao_ai/tools/memory.py +50 -0
- dao_ai/tools/python.py +9 -14
- dao_ai/tools/search.py +14 -0
- dao_ai/tools/slack.py +22 -10
- dao_ai/tools/sql.py +202 -0
- dao_ai/tools/time.py +30 -7
- dao_ai/tools/unity_catalog.py +165 -88
- dao_ai/tools/vector_search.py +360 -40
- dao_ai/utils.py +218 -16
- dao_ai-0.1.2.dist-info/METADATA +455 -0
- dao_ai-0.1.2.dist-info/RECORD +64 -0
- {dao_ai-0.0.25.dist-info → dao_ai-0.1.2.dist-info}/WHEEL +1 -1
- dao_ai/chat_models.py +0 -204
- dao_ai/guardrails.py +0 -112
- dao_ai/tools/human_in_the_loop.py +0 -100
- dao_ai-0.0.25.dist-info/METADATA +0 -1165
- dao_ai-0.0.25.dist-info/RECORD +0 -41
- {dao_ai-0.0.25.dist-info → dao_ai-0.1.2.dist-info}/entry_points.txt +0 -0
- {dao_ai-0.0.25.dist-info → dao_ai-0.1.2.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,52 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Orchestration patterns for DAO AI multi-agent systems.
|
|
3
|
+
|
|
4
|
+
This package provides factory functions for creating LangGraph workflows
|
|
5
|
+
that orchestrate multiple agents using the supervisor and swarm patterns.
|
|
6
|
+
|
|
7
|
+
Supervisor Pattern:
|
|
8
|
+
A central supervisor coordinates specialized worker agents. The supervisor
|
|
9
|
+
hands off control to agents who then control the conversation. Agents can
|
|
10
|
+
hand back to the supervisor when done or hand off to other agents.
|
|
11
|
+
|
|
12
|
+
Swarm Pattern:
|
|
13
|
+
Agents can directly transfer control to each other using handoff tools.
|
|
14
|
+
The active agent changes, and the user may continue interacting with
|
|
15
|
+
the new agent. This provides decentralized, peer-to-peer collaboration.
|
|
16
|
+
|
|
17
|
+
Both patterns use Command(goto=...) for routing between agent nodes in
|
|
18
|
+
the workflow graph.
|
|
19
|
+
|
|
20
|
+
See: https://docs.langchain.com/oss/python/langchain/multi-agent
|
|
21
|
+
See: https://github.com/langchain-ai/langgraph-supervisor-py
|
|
22
|
+
See: https://github.com/langchain-ai/langgraph-swarm-py
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
from dao_ai.orchestration.core import (
|
|
26
|
+
SUPERVISOR_NODE,
|
|
27
|
+
OutputMode,
|
|
28
|
+
create_agent_node_handler,
|
|
29
|
+
create_checkpointer,
|
|
30
|
+
create_handoff_tool,
|
|
31
|
+
create_orchestration_graph,
|
|
32
|
+
create_store,
|
|
33
|
+
extract_agent_response,
|
|
34
|
+
filter_messages_for_agent,
|
|
35
|
+
get_handoff_description,
|
|
36
|
+
)
|
|
37
|
+
|
|
38
|
+
__all__ = [
|
|
39
|
+
# Constants
|
|
40
|
+
"SUPERVISOR_NODE",
|
|
41
|
+
"OutputMode",
|
|
42
|
+
# Core utilities
|
|
43
|
+
"create_store",
|
|
44
|
+
"create_checkpointer",
|
|
45
|
+
"filter_messages_for_agent",
|
|
46
|
+
"extract_agent_response",
|
|
47
|
+
"create_agent_node_handler",
|
|
48
|
+
"create_handoff_tool",
|
|
49
|
+
"get_handoff_description",
|
|
50
|
+
# Main factory
|
|
51
|
+
"create_orchestration_graph",
|
|
52
|
+
]
|
|
@@ -0,0 +1,294 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Core orchestration utilities and infrastructure.
|
|
3
|
+
|
|
4
|
+
This module provides the foundational utilities for multi-agent orchestration:
|
|
5
|
+
- Memory and checkpointer creation
|
|
6
|
+
- Message filtering and extraction
|
|
7
|
+
- Agent node handlers
|
|
8
|
+
- Handoff tools
|
|
9
|
+
- Main orchestration graph factory
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
from typing import Awaitable, Callable, Literal
|
|
13
|
+
|
|
14
|
+
from langchain.tools import ToolRuntime, tool
|
|
15
|
+
from langchain_core.messages import AIMessage, BaseMessage, HumanMessage, ToolMessage
|
|
16
|
+
from langchain_core.tools import BaseTool
|
|
17
|
+
from langgraph.checkpoint.base import BaseCheckpointSaver
|
|
18
|
+
from langgraph.graph.state import CompiledStateGraph
|
|
19
|
+
from langgraph.runtime import Runtime
|
|
20
|
+
from langgraph.store.base import BaseStore
|
|
21
|
+
from langgraph.types import Command
|
|
22
|
+
from loguru import logger
|
|
23
|
+
|
|
24
|
+
from dao_ai.config import AgentModel, AppConfig, OrchestrationModel
|
|
25
|
+
from dao_ai.messages import last_ai_message
|
|
26
|
+
from dao_ai.state import AgentState, Context
|
|
27
|
+
|
|
28
|
+
# Constant for supervisor node name
|
|
29
|
+
SUPERVISOR_NODE = "supervisor"
|
|
30
|
+
|
|
31
|
+
# Output mode for agent responses
|
|
32
|
+
# - "full_history": Include all messages from the agent's execution
|
|
33
|
+
# - "last_message": Include only the final AI message from the agent
|
|
34
|
+
OutputMode = Literal["full_history", "last_message"]
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def create_store(orchestration: OrchestrationModel) -> BaseStore | None:
|
|
38
|
+
"""
|
|
39
|
+
Create a memory store from orchestration config.
|
|
40
|
+
|
|
41
|
+
Args:
|
|
42
|
+
orchestration: The orchestration configuration
|
|
43
|
+
|
|
44
|
+
Returns:
|
|
45
|
+
The configured store, or None if not configured
|
|
46
|
+
"""
|
|
47
|
+
if orchestration.memory and orchestration.memory.store:
|
|
48
|
+
store = orchestration.memory.store.as_store()
|
|
49
|
+
logger.debug("Memory store configured", store_type=type(store).__name__)
|
|
50
|
+
return store
|
|
51
|
+
return None
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
def create_checkpointer(
|
|
55
|
+
orchestration: OrchestrationModel,
|
|
56
|
+
) -> BaseCheckpointSaver | None:
|
|
57
|
+
"""
|
|
58
|
+
Create a checkpointer from orchestration config.
|
|
59
|
+
|
|
60
|
+
Args:
|
|
61
|
+
orchestration: The orchestration configuration
|
|
62
|
+
|
|
63
|
+
Returns:
|
|
64
|
+
The configured checkpointer, or None if not configured
|
|
65
|
+
"""
|
|
66
|
+
if orchestration.memory and orchestration.memory.checkpointer:
|
|
67
|
+
checkpointer = orchestration.memory.checkpointer.as_checkpointer()
|
|
68
|
+
logger.debug(
|
|
69
|
+
"Checkpointer configured", checkpointer_type=type(checkpointer).__name__
|
|
70
|
+
)
|
|
71
|
+
return checkpointer
|
|
72
|
+
return None
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
def filter_messages_for_agent(messages: list[BaseMessage]) -> list[BaseMessage]:
|
|
76
|
+
"""
|
|
77
|
+
Filter messages for a worker agent to avoid tool_use/tool_result pairing errors.
|
|
78
|
+
|
|
79
|
+
When the supervisor hands off to an agent, the agent should only see:
|
|
80
|
+
- HumanMessage (user queries)
|
|
81
|
+
- AIMessage with content (previous responses, but not tool calls)
|
|
82
|
+
|
|
83
|
+
This prevents the agent from seeing orphaned ToolMessages or AIMessages
|
|
84
|
+
with tool_calls that don't belong to the agent's context.
|
|
85
|
+
|
|
86
|
+
Args:
|
|
87
|
+
messages: The full message history from parent state
|
|
88
|
+
|
|
89
|
+
Returns:
|
|
90
|
+
Filtered messages safe for the agent to process
|
|
91
|
+
"""
|
|
92
|
+
filtered: list[BaseMessage] = []
|
|
93
|
+
for msg in messages:
|
|
94
|
+
if isinstance(msg, HumanMessage):
|
|
95
|
+
# Always include user messages
|
|
96
|
+
filtered.append(msg)
|
|
97
|
+
elif isinstance(msg, AIMessage):
|
|
98
|
+
# Include AI messages but strip tool_calls to avoid confusion
|
|
99
|
+
if msg.content and not msg.tool_calls:
|
|
100
|
+
filtered.append(msg)
|
|
101
|
+
elif msg.content and msg.tool_calls:
|
|
102
|
+
# Include content but create clean AIMessage without tool_calls
|
|
103
|
+
filtered.append(AIMessage(content=msg.content, id=msg.id))
|
|
104
|
+
# Skip ToolMessages - they belong to the supervisor's context
|
|
105
|
+
return filtered
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
def extract_agent_response(
|
|
109
|
+
messages: list[BaseMessage],
|
|
110
|
+
output_mode: OutputMode = "last_message",
|
|
111
|
+
) -> list[BaseMessage]:
|
|
112
|
+
"""
|
|
113
|
+
Extract the agent's response based on the output mode.
|
|
114
|
+
|
|
115
|
+
Args:
|
|
116
|
+
messages: The agent's full message history after execution
|
|
117
|
+
output_mode: How to extract the response
|
|
118
|
+
- "full_history": Return all messages (may cause issues)
|
|
119
|
+
- "last_message": Return only the final AI message
|
|
120
|
+
|
|
121
|
+
Returns:
|
|
122
|
+
Messages to include in the parent state update
|
|
123
|
+
"""
|
|
124
|
+
if output_mode == "full_history":
|
|
125
|
+
return messages
|
|
126
|
+
|
|
127
|
+
# Find the last AI message with content
|
|
128
|
+
final_response: AIMessage | None = last_ai_message(messages)
|
|
129
|
+
|
|
130
|
+
if final_response:
|
|
131
|
+
# Return clean AIMessage without tool_calls
|
|
132
|
+
if final_response.tool_calls:
|
|
133
|
+
return [AIMessage(content=final_response.content, id=final_response.id)]
|
|
134
|
+
return [final_response]
|
|
135
|
+
|
|
136
|
+
return []
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
def create_agent_node_handler(
|
|
140
|
+
agent_name: str,
|
|
141
|
+
agent: CompiledStateGraph,
|
|
142
|
+
output_mode: OutputMode = "last_message",
|
|
143
|
+
) -> Callable[[AgentState, Runtime[Context]], Awaitable[AgentState]]:
|
|
144
|
+
"""
|
|
145
|
+
Create a handler that wraps an agent subgraph with message filtering.
|
|
146
|
+
|
|
147
|
+
This filters messages before passing to the agent and extracts only
|
|
148
|
+
the relevant response, avoiding tool_use/tool_result pairing errors.
|
|
149
|
+
|
|
150
|
+
Used by both supervisor and swarm patterns to ensure consistent
|
|
151
|
+
message handling when agents are CompiledStateGraphs.
|
|
152
|
+
|
|
153
|
+
Based on langgraph-supervisor-py output_mode pattern.
|
|
154
|
+
|
|
155
|
+
Args:
|
|
156
|
+
agent_name: Name of the agent (for logging)
|
|
157
|
+
agent: The compiled agent subgraph
|
|
158
|
+
output_mode: How to extract response ("last_message" or "full_history")
|
|
159
|
+
|
|
160
|
+
Returns:
|
|
161
|
+
An async handler function for the workflow node
|
|
162
|
+
"""
|
|
163
|
+
|
|
164
|
+
async def handler(state: AgentState, runtime: Runtime[Context]) -> AgentState:
|
|
165
|
+
# Filter messages to avoid tool_use/tool_result pairing errors
|
|
166
|
+
original_messages = state.get("messages", [])
|
|
167
|
+
filtered_messages = filter_messages_for_agent(original_messages)
|
|
168
|
+
|
|
169
|
+
logger.trace(
|
|
170
|
+
"Agent receiving filtered messages",
|
|
171
|
+
agent=agent_name,
|
|
172
|
+
filtered_count=len(filtered_messages),
|
|
173
|
+
original_count=len(original_messages),
|
|
174
|
+
)
|
|
175
|
+
|
|
176
|
+
# Create state with filtered messages for the agent
|
|
177
|
+
agent_state: AgentState = {
|
|
178
|
+
**state,
|
|
179
|
+
"messages": filtered_messages,
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
# Invoke the agent
|
|
183
|
+
result: AgentState = await agent.ainvoke(agent_state, context=runtime.context)
|
|
184
|
+
|
|
185
|
+
# Extract agent response based on output mode
|
|
186
|
+
result_messages = result.get("messages", [])
|
|
187
|
+
response_messages = extract_agent_response(result_messages, output_mode)
|
|
188
|
+
|
|
189
|
+
logger.debug(
|
|
190
|
+
"Agent completed",
|
|
191
|
+
agent=agent_name,
|
|
192
|
+
response_count=len(response_messages),
|
|
193
|
+
total_messages=len(result_messages),
|
|
194
|
+
output_mode=output_mode,
|
|
195
|
+
)
|
|
196
|
+
|
|
197
|
+
# Return state update with extracted response
|
|
198
|
+
return {
|
|
199
|
+
**result,
|
|
200
|
+
"messages": response_messages,
|
|
201
|
+
}
|
|
202
|
+
|
|
203
|
+
return handler
|
|
204
|
+
|
|
205
|
+
|
|
206
|
+
def create_handoff_tool(
|
|
207
|
+
target_agent_name: str,
|
|
208
|
+
description: str,
|
|
209
|
+
) -> BaseTool:
|
|
210
|
+
"""
|
|
211
|
+
Create a handoff tool that transfers control to another agent.
|
|
212
|
+
|
|
213
|
+
The tool returns a Command object with goto to directly route
|
|
214
|
+
to the target agent node in the parent graph.
|
|
215
|
+
|
|
216
|
+
Args:
|
|
217
|
+
target_agent_name: The name of the agent to hand off to
|
|
218
|
+
description: Description of what this agent handles
|
|
219
|
+
|
|
220
|
+
Returns:
|
|
221
|
+
A tool that triggers a handoff to the target agent via Command
|
|
222
|
+
"""
|
|
223
|
+
|
|
224
|
+
@tool
|
|
225
|
+
def handoff_tool(runtime: ToolRuntime[Context, AgentState]) -> Command:
|
|
226
|
+
"""Transfer control to another agent."""
|
|
227
|
+
tool_call_id: str = runtime.tool_call_id
|
|
228
|
+
logger.debug("Handoff to agent", target_agent=target_agent_name)
|
|
229
|
+
|
|
230
|
+
return Command(
|
|
231
|
+
update={
|
|
232
|
+
"active_agent": target_agent_name,
|
|
233
|
+
"messages": [
|
|
234
|
+
ToolMessage(
|
|
235
|
+
content=f"Transferred to {target_agent_name}",
|
|
236
|
+
tool_call_id=tool_call_id,
|
|
237
|
+
)
|
|
238
|
+
],
|
|
239
|
+
},
|
|
240
|
+
goto=target_agent_name,
|
|
241
|
+
graph=Command.PARENT,
|
|
242
|
+
)
|
|
243
|
+
|
|
244
|
+
# Set the tool name and description
|
|
245
|
+
handoff_tool.name = f"handoff_to_{target_agent_name}"
|
|
246
|
+
handoff_tool.__doc__ = f"Transfer to {target_agent_name}: {description}"
|
|
247
|
+
handoff_tool.description = f"Transfer to {target_agent_name}: {description}"
|
|
248
|
+
|
|
249
|
+
return handoff_tool
|
|
250
|
+
|
|
251
|
+
|
|
252
|
+
def get_handoff_description(agent: AgentModel) -> str:
|
|
253
|
+
"""
|
|
254
|
+
Get the handoff description for an agent.
|
|
255
|
+
|
|
256
|
+
Priority: handoff_prompt > description > default message
|
|
257
|
+
|
|
258
|
+
Args:
|
|
259
|
+
agent: The agent to get the handoff description for
|
|
260
|
+
|
|
261
|
+
Returns:
|
|
262
|
+
The handoff description string
|
|
263
|
+
"""
|
|
264
|
+
return (
|
|
265
|
+
agent.handoff_prompt
|
|
266
|
+
or agent.description
|
|
267
|
+
or f"Handles {agent.name} related tasks and inquiries"
|
|
268
|
+
)
|
|
269
|
+
|
|
270
|
+
|
|
271
|
+
def create_orchestration_graph(config: AppConfig) -> CompiledStateGraph:
|
|
272
|
+
"""
|
|
273
|
+
Create the main orchestration graph based on the configuration.
|
|
274
|
+
|
|
275
|
+
This factory function creates either a supervisor or swarm graph
|
|
276
|
+
depending on the configuration.
|
|
277
|
+
|
|
278
|
+
Args:
|
|
279
|
+
config: The application configuration
|
|
280
|
+
|
|
281
|
+
Returns:
|
|
282
|
+
A compiled LangGraph state machine
|
|
283
|
+
"""
|
|
284
|
+
from dao_ai.orchestration.supervisor import create_supervisor_graph
|
|
285
|
+
from dao_ai.orchestration.swarm import create_swarm_graph
|
|
286
|
+
|
|
287
|
+
orchestration: OrchestrationModel = config.app.orchestration
|
|
288
|
+
if orchestration.supervisor:
|
|
289
|
+
return create_supervisor_graph(config)
|
|
290
|
+
|
|
291
|
+
if orchestration.swarm:
|
|
292
|
+
return create_swarm_graph(config)
|
|
293
|
+
|
|
294
|
+
raise ValueError("No valid orchestration model found in the configuration.")
|
|
@@ -0,0 +1,278 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Supervisor pattern for multi-agent orchestration.
|
|
3
|
+
|
|
4
|
+
The supervisor pattern uses a central supervisor agent that coordinates
|
|
5
|
+
specialized worker agents. The supervisor hands off control to agents
|
|
6
|
+
who then control the conversation. Agents can hand back to the supervisor
|
|
7
|
+
when done.
|
|
8
|
+
|
|
9
|
+
Based on: https://github.com/langchain-ai/langgraph-supervisor-py
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
from langchain.agents import create_agent
|
|
13
|
+
from langchain.agents.middleware import AgentMiddleware as LangchainAgentMiddleware
|
|
14
|
+
from langchain.tools import ToolRuntime, tool
|
|
15
|
+
from langchain_core.language_models import LanguageModelLike
|
|
16
|
+
from langchain_core.messages import ToolMessage
|
|
17
|
+
from langchain_core.tools import BaseTool
|
|
18
|
+
from langgraph.checkpoint.base import BaseCheckpointSaver
|
|
19
|
+
from langgraph.graph import StateGraph
|
|
20
|
+
from langgraph.graph.state import CompiledStateGraph
|
|
21
|
+
from langgraph.store.base import BaseStore
|
|
22
|
+
from langgraph.types import Command
|
|
23
|
+
from langmem import create_manage_memory_tool
|
|
24
|
+
from loguru import logger
|
|
25
|
+
|
|
26
|
+
from dao_ai.config import (
|
|
27
|
+
AppConfig,
|
|
28
|
+
MemoryModel,
|
|
29
|
+
OrchestrationModel,
|
|
30
|
+
SupervisorModel,
|
|
31
|
+
)
|
|
32
|
+
from dao_ai.middleware.base import AgentMiddleware
|
|
33
|
+
from dao_ai.middleware.core import create_factory_middleware
|
|
34
|
+
from dao_ai.nodes import create_agent_node
|
|
35
|
+
from dao_ai.orchestration import (
|
|
36
|
+
SUPERVISOR_NODE,
|
|
37
|
+
create_agent_node_handler,
|
|
38
|
+
create_checkpointer,
|
|
39
|
+
create_handoff_tool,
|
|
40
|
+
create_store,
|
|
41
|
+
get_handoff_description,
|
|
42
|
+
)
|
|
43
|
+
from dao_ai.prompts import make_prompt
|
|
44
|
+
from dao_ai.state import AgentState, Context
|
|
45
|
+
from dao_ai.tools import create_tools
|
|
46
|
+
from dao_ai.tools.memory import create_search_memory_tool
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
def _create_handoff_back_to_supervisor_tool() -> BaseTool:
|
|
50
|
+
"""
|
|
51
|
+
Create a tool for agents to hand control back to the supervisor.
|
|
52
|
+
|
|
53
|
+
This is used in the supervisor pattern when an agent has completed
|
|
54
|
+
its task and wants to return control to the supervisor for further
|
|
55
|
+
coordination or to complete the conversation.
|
|
56
|
+
|
|
57
|
+
Returns:
|
|
58
|
+
A tool that routes back to the supervisor node
|
|
59
|
+
"""
|
|
60
|
+
|
|
61
|
+
@tool
|
|
62
|
+
def handoff_to_supervisor(
|
|
63
|
+
summary: str,
|
|
64
|
+
runtime: ToolRuntime[Context, AgentState],
|
|
65
|
+
) -> Command:
|
|
66
|
+
"""
|
|
67
|
+
Hand control back to the supervisor.
|
|
68
|
+
|
|
69
|
+
Use this when you have completed your task and want to return
|
|
70
|
+
control to the supervisor for further coordination.
|
|
71
|
+
|
|
72
|
+
Args:
|
|
73
|
+
summary: A brief summary of what was accomplished
|
|
74
|
+
"""
|
|
75
|
+
tool_call_id: str = runtime.tool_call_id
|
|
76
|
+
logger.debug("Agent handing back to supervisor", summary_preview=summary[:100])
|
|
77
|
+
|
|
78
|
+
return Command(
|
|
79
|
+
update={
|
|
80
|
+
"active_agent": None,
|
|
81
|
+
"messages": [
|
|
82
|
+
ToolMessage(
|
|
83
|
+
content=f"Task completed: {summary}",
|
|
84
|
+
tool_call_id=tool_call_id,
|
|
85
|
+
)
|
|
86
|
+
],
|
|
87
|
+
},
|
|
88
|
+
goto=SUPERVISOR_NODE,
|
|
89
|
+
graph=Command.PARENT,
|
|
90
|
+
)
|
|
91
|
+
|
|
92
|
+
return handoff_to_supervisor
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
def _create_supervisor_agent(
|
|
96
|
+
config: AppConfig,
|
|
97
|
+
tools: list[BaseTool],
|
|
98
|
+
handoff_tools: list[BaseTool],
|
|
99
|
+
middlewares: list[AgentMiddleware],
|
|
100
|
+
) -> CompiledStateGraph:
|
|
101
|
+
"""
|
|
102
|
+
Create a supervisor agent with handoff tools for each worker agent.
|
|
103
|
+
|
|
104
|
+
The supervisor coordinates worker agents by handing off control.
|
|
105
|
+
Worker agents take over the conversation and can hand back to
|
|
106
|
+
the supervisor when done.
|
|
107
|
+
|
|
108
|
+
Args:
|
|
109
|
+
config: Application configuration
|
|
110
|
+
tools: Additional tools for the supervisor (e.g., memory tools)
|
|
111
|
+
handoff_tools: Handoff tools to route to worker agents
|
|
112
|
+
middlewares: Middleware to apply to the supervisor
|
|
113
|
+
|
|
114
|
+
Returns:
|
|
115
|
+
Compiled supervisor agent
|
|
116
|
+
"""
|
|
117
|
+
orchestration: OrchestrationModel = config.app.orchestration
|
|
118
|
+
supervisor: SupervisorModel = orchestration.supervisor
|
|
119
|
+
|
|
120
|
+
all_tools: list[BaseTool] = list(tools) + list(handoff_tools)
|
|
121
|
+
|
|
122
|
+
model: LanguageModelLike = supervisor.model.as_chat_model()
|
|
123
|
+
|
|
124
|
+
# Get the prompt as middleware (always returns AgentMiddleware or None)
|
|
125
|
+
prompt_middleware: LangchainAgentMiddleware | None = make_prompt(supervisor.prompt)
|
|
126
|
+
|
|
127
|
+
# Add prompt middleware at the beginning for priority
|
|
128
|
+
if prompt_middleware is not None:
|
|
129
|
+
middlewares.insert(0, prompt_middleware)
|
|
130
|
+
|
|
131
|
+
# Create the supervisor agent
|
|
132
|
+
# Handoff tools route to worker agents in the parent workflow graph
|
|
133
|
+
supervisor_agent: CompiledStateGraph = create_agent(
|
|
134
|
+
name=SUPERVISOR_NODE,
|
|
135
|
+
model=model,
|
|
136
|
+
tools=all_tools,
|
|
137
|
+
middleware=middlewares,
|
|
138
|
+
state_schema=AgentState,
|
|
139
|
+
context_schema=Context,
|
|
140
|
+
)
|
|
141
|
+
|
|
142
|
+
return supervisor_agent
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
def create_supervisor_graph(config: AppConfig) -> CompiledStateGraph:
|
|
146
|
+
"""
|
|
147
|
+
Create a supervisor-based multi-agent system using handoffs.
|
|
148
|
+
|
|
149
|
+
This implements a supervisor pattern where:
|
|
150
|
+
1. Supervisor receives user input and decides which agent to hand off to
|
|
151
|
+
2. Agent takes control of the conversation and interacts with user
|
|
152
|
+
3. Agent can hand back to supervisor or complete the task
|
|
153
|
+
|
|
154
|
+
The supervisor and all worker agents are nodes in a workflow graph.
|
|
155
|
+
Handoff tools use Command(goto=..., graph=Command.PARENT) to route
|
|
156
|
+
between nodes.
|
|
157
|
+
|
|
158
|
+
Args:
|
|
159
|
+
config: The application configuration
|
|
160
|
+
|
|
161
|
+
Returns:
|
|
162
|
+
A compiled LangGraph state machine
|
|
163
|
+
|
|
164
|
+
Based on: https://github.com/langchain-ai/langgraph-supervisor-py
|
|
165
|
+
"""
|
|
166
|
+
orchestration: OrchestrationModel = config.app.orchestration
|
|
167
|
+
supervisor_config: SupervisorModel = orchestration.supervisor
|
|
168
|
+
|
|
169
|
+
logger.info(
|
|
170
|
+
"Creating supervisor graph",
|
|
171
|
+
pattern="handoff",
|
|
172
|
+
agents_count=len(config.app.agents),
|
|
173
|
+
)
|
|
174
|
+
|
|
175
|
+
# Create handoff tools for supervisor to route to agents
|
|
176
|
+
handoff_tools: list[BaseTool] = []
|
|
177
|
+
for registered_agent in config.app.agents:
|
|
178
|
+
description: str = get_handoff_description(registered_agent)
|
|
179
|
+
handoff_tool: BaseTool = create_handoff_tool(
|
|
180
|
+
target_agent_name=registered_agent.name,
|
|
181
|
+
description=description,
|
|
182
|
+
)
|
|
183
|
+
handoff_tools.append(handoff_tool)
|
|
184
|
+
logger.debug("Created handoff tool for supervisor", agent=registered_agent.name)
|
|
185
|
+
|
|
186
|
+
# Create supervisor's own tools (e.g., memory tools)
|
|
187
|
+
logger.debug(
|
|
188
|
+
"Creating tools for supervisor", tools_count=len(supervisor_config.tools)
|
|
189
|
+
)
|
|
190
|
+
supervisor_tools: list[BaseTool] = list(create_tools(supervisor_config.tools))
|
|
191
|
+
|
|
192
|
+
# Create middleware from configuration
|
|
193
|
+
middlewares: list[AgentMiddleware] = []
|
|
194
|
+
|
|
195
|
+
for middleware_config in supervisor_config.middleware:
|
|
196
|
+
logger.trace(
|
|
197
|
+
"Creating middleware for supervisor",
|
|
198
|
+
middleware_name=middleware_config.name,
|
|
199
|
+
)
|
|
200
|
+
middleware: LangchainAgentMiddleware = create_factory_middleware(
|
|
201
|
+
function_name=middleware_config.name,
|
|
202
|
+
args=middleware_config.args,
|
|
203
|
+
)
|
|
204
|
+
if middleware is not None:
|
|
205
|
+
middlewares.append(middleware)
|
|
206
|
+
logger.debug(
|
|
207
|
+
"Created supervisor middleware", middleware=middleware_config.name
|
|
208
|
+
)
|
|
209
|
+
|
|
210
|
+
# Set up memory store and checkpointer
|
|
211
|
+
store: BaseStore | None = create_store(orchestration)
|
|
212
|
+
checkpointer: BaseCheckpointSaver | None = create_checkpointer(orchestration)
|
|
213
|
+
|
|
214
|
+
# Add memory tools if store is configured with namespace
|
|
215
|
+
if (
|
|
216
|
+
orchestration.memory
|
|
217
|
+
and orchestration.memory.store
|
|
218
|
+
and orchestration.memory.store.namespace
|
|
219
|
+
):
|
|
220
|
+
namespace: tuple[str, ...] = ("memory", orchestration.memory.store.namespace)
|
|
221
|
+
logger.debug("Memory store namespace configured", namespace=namespace)
|
|
222
|
+
# Use Databricks-compatible search_memory tool (omits problematic filter field)
|
|
223
|
+
supervisor_tools += [
|
|
224
|
+
create_manage_memory_tool(namespace=namespace),
|
|
225
|
+
create_search_memory_tool(namespace=namespace),
|
|
226
|
+
]
|
|
227
|
+
|
|
228
|
+
# Create the supervisor agent
|
|
229
|
+
supervisor_agent: CompiledStateGraph = _create_supervisor_agent(
|
|
230
|
+
config=config,
|
|
231
|
+
tools=supervisor_tools,
|
|
232
|
+
handoff_tools=handoff_tools,
|
|
233
|
+
middlewares=middlewares,
|
|
234
|
+
)
|
|
235
|
+
|
|
236
|
+
# Create worker agent subgraphs
|
|
237
|
+
# Each worker gets a handoff_to_supervisor tool to return control
|
|
238
|
+
agent_subgraphs: dict[str, CompiledStateGraph] = {}
|
|
239
|
+
memory: MemoryModel | None = orchestration.memory
|
|
240
|
+
for registered_agent in config.app.agents:
|
|
241
|
+
# Create handoff back to supervisor tool
|
|
242
|
+
supervisor_handoff: BaseTool = _create_handoff_back_to_supervisor_tool()
|
|
243
|
+
|
|
244
|
+
# Create the worker agent with handoff back to supervisor
|
|
245
|
+
agent_subgraph: CompiledStateGraph = create_agent_node(
|
|
246
|
+
agent=registered_agent,
|
|
247
|
+
memory=memory,
|
|
248
|
+
chat_history=config.app.chat_history,
|
|
249
|
+
additional_tools=[supervisor_handoff],
|
|
250
|
+
)
|
|
251
|
+
agent_subgraphs[registered_agent.name] = agent_subgraph
|
|
252
|
+
logger.debug("Created worker agent subgraph", agent=registered_agent.name)
|
|
253
|
+
|
|
254
|
+
# Build the workflow graph
|
|
255
|
+
# All agents are nodes, handoffs route between them via Command
|
|
256
|
+
workflow: StateGraph = StateGraph(
|
|
257
|
+
AgentState,
|
|
258
|
+
input=AgentState,
|
|
259
|
+
output=AgentState,
|
|
260
|
+
context_schema=Context,
|
|
261
|
+
)
|
|
262
|
+
|
|
263
|
+
# Add supervisor node
|
|
264
|
+
workflow.add_node(SUPERVISOR_NODE, supervisor_agent)
|
|
265
|
+
|
|
266
|
+
# Add worker agent nodes with message filtering handlers
|
|
267
|
+
for agent_name, agent_subgraph in agent_subgraphs.items():
|
|
268
|
+
handler = create_agent_node_handler(
|
|
269
|
+
agent_name=agent_name,
|
|
270
|
+
agent=agent_subgraph,
|
|
271
|
+
output_mode="last_message", # Only return final response to avoid issues
|
|
272
|
+
)
|
|
273
|
+
workflow.add_node(agent_name, handler)
|
|
274
|
+
|
|
275
|
+
# Supervisor is the entry point
|
|
276
|
+
workflow.set_entry_point(SUPERVISOR_NODE)
|
|
277
|
+
|
|
278
|
+
return workflow.compile(checkpointer=checkpointer, store=store)
|