dao-ai 0.0.25__py3-none-any.whl → 0.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (63) hide show
  1. dao_ai/__init__.py +29 -0
  2. dao_ai/agent_as_code.py +5 -5
  3. dao_ai/cli.py +245 -40
  4. dao_ai/config.py +1863 -338
  5. dao_ai/genie/__init__.py +38 -0
  6. dao_ai/genie/cache/__init__.py +43 -0
  7. dao_ai/genie/cache/base.py +72 -0
  8. dao_ai/genie/cache/core.py +79 -0
  9. dao_ai/genie/cache/lru.py +347 -0
  10. dao_ai/genie/cache/semantic.py +970 -0
  11. dao_ai/genie/core.py +35 -0
  12. dao_ai/graph.py +27 -228
  13. dao_ai/hooks/__init__.py +9 -6
  14. dao_ai/hooks/core.py +27 -195
  15. dao_ai/logging.py +56 -0
  16. dao_ai/memory/__init__.py +10 -0
  17. dao_ai/memory/core.py +65 -30
  18. dao_ai/memory/databricks.py +402 -0
  19. dao_ai/memory/postgres.py +79 -38
  20. dao_ai/messages.py +6 -4
  21. dao_ai/middleware/__init__.py +125 -0
  22. dao_ai/middleware/assertions.py +806 -0
  23. dao_ai/middleware/base.py +50 -0
  24. dao_ai/middleware/core.py +67 -0
  25. dao_ai/middleware/guardrails.py +420 -0
  26. dao_ai/middleware/human_in_the_loop.py +232 -0
  27. dao_ai/middleware/message_validation.py +586 -0
  28. dao_ai/middleware/summarization.py +197 -0
  29. dao_ai/models.py +1306 -114
  30. dao_ai/nodes.py +261 -166
  31. dao_ai/optimization.py +674 -0
  32. dao_ai/orchestration/__init__.py +52 -0
  33. dao_ai/orchestration/core.py +294 -0
  34. dao_ai/orchestration/supervisor.py +278 -0
  35. dao_ai/orchestration/swarm.py +271 -0
  36. dao_ai/prompts.py +128 -31
  37. dao_ai/providers/databricks.py +645 -172
  38. dao_ai/state.py +157 -21
  39. dao_ai/tools/__init__.py +13 -5
  40. dao_ai/tools/agent.py +1 -3
  41. dao_ai/tools/core.py +64 -11
  42. dao_ai/tools/email.py +232 -0
  43. dao_ai/tools/genie.py +144 -295
  44. dao_ai/tools/mcp.py +220 -133
  45. dao_ai/tools/memory.py +50 -0
  46. dao_ai/tools/python.py +9 -14
  47. dao_ai/tools/search.py +14 -0
  48. dao_ai/tools/slack.py +22 -10
  49. dao_ai/tools/sql.py +202 -0
  50. dao_ai/tools/time.py +30 -7
  51. dao_ai/tools/unity_catalog.py +165 -88
  52. dao_ai/tools/vector_search.py +360 -40
  53. dao_ai/utils.py +218 -16
  54. dao_ai-0.1.2.dist-info/METADATA +455 -0
  55. dao_ai-0.1.2.dist-info/RECORD +64 -0
  56. {dao_ai-0.0.25.dist-info → dao_ai-0.1.2.dist-info}/WHEEL +1 -1
  57. dao_ai/chat_models.py +0 -204
  58. dao_ai/guardrails.py +0 -112
  59. dao_ai/tools/human_in_the_loop.py +0 -100
  60. dao_ai-0.0.25.dist-info/METADATA +0 -1165
  61. dao_ai-0.0.25.dist-info/RECORD +0 -41
  62. {dao_ai-0.0.25.dist-info → dao_ai-0.1.2.dist-info}/entry_points.txt +0 -0
  63. {dao_ai-0.0.25.dist-info → dao_ai-0.1.2.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,271 @@
1
+ """
2
+ Swarm pattern for multi-agent orchestration.
3
+
4
+ The swarm pattern allows agents to directly hand off control to each other
5
+ without a central coordinator. Each agent has handoff tools for the agents
6
+ they are allowed to transfer control to. This provides decentralized,
7
+ peer-to-peer collaboration.
8
+
9
+ Based on: https://github.com/langchain-ai/langgraph-swarm-py
10
+ """
11
+
12
+ from typing import Callable, Sequence
13
+
14
+ from langchain_core.tools import BaseTool
15
+ from langgraph.checkpoint.base import BaseCheckpointSaver
16
+ from langgraph.graph import StateGraph
17
+ from langgraph.graph.state import CompiledStateGraph
18
+ from langgraph.store.base import BaseStore
19
+ from loguru import logger
20
+
21
+ from dao_ai.config import (
22
+ AgentModel,
23
+ AppConfig,
24
+ MemoryModel,
25
+ OrchestrationModel,
26
+ SwarmModel,
27
+ )
28
+ from dao_ai.nodes import create_agent_node
29
+ from dao_ai.orchestration import (
30
+ create_agent_node_handler,
31
+ create_checkpointer,
32
+ create_handoff_tool,
33
+ create_store,
34
+ get_handoff_description,
35
+ )
36
+ from dao_ai.state import AgentState, Context
37
+
38
+
39
+ def _handoffs_for_agent(
40
+ agent: AgentModel,
41
+ config: AppConfig,
42
+ ) -> Sequence[BaseTool]:
43
+ """
44
+ Create handoff tools for an agent based on configuration.
45
+
46
+ Handoff tools route to the parent graph since agents are subgraphs
47
+ wrapped in handlers.
48
+
49
+ Args:
50
+ agent: The agent to create handoff tools for
51
+ config: The application configuration
52
+
53
+ Returns:
54
+ List of handoff tools for the agent
55
+ """
56
+ handoff_tools: list[BaseTool] = []
57
+
58
+ handoffs: dict[str, Sequence[AgentModel | str] | None] = (
59
+ config.app.orchestration.swarm.handoffs or {}
60
+ )
61
+ agent_handoffs: Sequence[AgentModel | str] | None = handoffs.get(agent.name)
62
+ if agent_handoffs is None:
63
+ agent_handoffs = config.app.agents
64
+
65
+ for handoff_to_agent in agent_handoffs:
66
+ if isinstance(handoff_to_agent, str):
67
+ handoff_to_agent = next(
68
+ iter(config.find_agents(lambda a: a.name == handoff_to_agent)), None
69
+ )
70
+
71
+ if handoff_to_agent is None:
72
+ logger.warning("Handoff agent not found in configuration", agent=agent.name)
73
+ continue
74
+ if agent.name == handoff_to_agent.name:
75
+ continue
76
+ logger.debug(
77
+ "Creating handoff tool",
78
+ from_agent=agent.name,
79
+ to_agent=handoff_to_agent.name,
80
+ )
81
+
82
+ handoff_description: str = get_handoff_description(handoff_to_agent)
83
+
84
+ handoff_tools.append(
85
+ create_handoff_tool(
86
+ target_agent_name=handoff_to_agent.name,
87
+ description=handoff_description,
88
+ )
89
+ )
90
+ return handoff_tools
91
+
92
+
93
+ def _create_swarm_router(
94
+ default_agent: str,
95
+ agent_names: list[str],
96
+ ) -> Callable[[AgentState], str]:
97
+ """
98
+ Create a router function for the swarm pattern.
99
+
100
+ This router checks the `active_agent` field in state to determine
101
+ which agent should handle the next step. This enables:
102
+ 1. Resuming conversations with the last active agent (from checkpointer)
103
+ 2. Routing to the default agent for new conversations
104
+ 3. Following handoffs that set active_agent
105
+
106
+ Args:
107
+ default_agent: The default agent to route to if active_agent is not set
108
+ agent_names: List of valid agent names
109
+
110
+ Returns:
111
+ A router function that returns the agent name to route to
112
+ """
113
+
114
+ def router(state: AgentState) -> str:
115
+ active_agent: str | None = state.get("active_agent")
116
+
117
+ # If no active agent set, use default
118
+ if not active_agent:
119
+ logger.trace(
120
+ "No active agent in state, routing to default",
121
+ default_agent=default_agent,
122
+ )
123
+ return default_agent
124
+
125
+ # Validate active_agent exists
126
+ if active_agent in agent_names:
127
+ logger.trace("Routing to active agent", active_agent=active_agent)
128
+ return active_agent
129
+
130
+ # Fallback to default if active_agent is invalid
131
+ logger.warning(
132
+ "Invalid active agent, routing to default",
133
+ active_agent=active_agent,
134
+ default_agent=default_agent,
135
+ )
136
+ return default_agent
137
+
138
+ return router
139
+
140
+
141
+ def create_swarm_graph(config: AppConfig) -> CompiledStateGraph:
142
+ """
143
+ Create a swarm-based multi-agent graph.
144
+
145
+ The swarm pattern allows agents to directly hand off control to each other
146
+ without a central coordinator. Each agent has handoff tools for the agents
147
+ they are allowed to transfer control to.
148
+
149
+ Key features:
150
+ 1. Router function checks `active_agent` state to resume with last active agent
151
+ 2. Handoff tools update `active_agent` and use Command(goto=...) to route
152
+ 3. Agents are CompiledStateGraphs wrapped in handlers for message filtering
153
+ 4. Checkpointer persists state to enable conversation resumption
154
+
155
+ Args:
156
+ config: The application configuration
157
+
158
+ Returns:
159
+ A compiled LangGraph state machine
160
+
161
+ See: https://github.com/langchain-ai/langgraph-swarm-py
162
+ """
163
+ orchestration: OrchestrationModel = config.app.orchestration
164
+ swarm: SwarmModel = orchestration.swarm
165
+
166
+ # Determine the default agent name
167
+ default_agent: str
168
+ if isinstance(swarm.default_agent, AgentModel):
169
+ default_agent = swarm.default_agent.name
170
+ else:
171
+ default_agent = swarm.default_agent
172
+
173
+ logger.info(
174
+ "Creating swarm graph",
175
+ pattern="handoff",
176
+ default_agent=default_agent,
177
+ agents_count=len(config.app.agents),
178
+ )
179
+
180
+ # Create agent subgraphs with their specific handoff tools
181
+ # Each agent gets handoff tools only for agents they're allowed to hand off to
182
+ agent_subgraphs: dict[str, CompiledStateGraph] = {}
183
+ memory: MemoryModel | None = orchestration.memory
184
+
185
+ # Get swarm-level middleware to apply to all agents
186
+ swarm_middleware: list = swarm.middleware if swarm.middleware else []
187
+ if swarm_middleware:
188
+ logger.info(
189
+ "Applying swarm-level middleware to all agents",
190
+ middleware_count=len(swarm_middleware),
191
+ middleware_names=[mw.name for mw in swarm_middleware],
192
+ )
193
+
194
+ for registered_agent in config.app.agents:
195
+ # Get handoff tools for this agent
196
+ handoff_tools: Sequence[BaseTool] = _handoffs_for_agent(
197
+ agent=registered_agent,
198
+ config=config,
199
+ )
200
+
201
+ # Merge swarm-level middleware with agent-specific middleware
202
+ # Swarm middleware is applied first, then agent middleware
203
+ if swarm_middleware:
204
+ from copy import deepcopy
205
+
206
+ # Create a copy of the agent to avoid modifying the original
207
+ agent_with_middleware = deepcopy(registered_agent)
208
+
209
+ # Combine swarm middleware (first) with agent middleware
210
+ agent_with_middleware.middleware = (
211
+ swarm_middleware + agent_with_middleware.middleware
212
+ )
213
+
214
+ logger.debug(
215
+ "Merged middleware for agent",
216
+ agent=registered_agent.name,
217
+ swarm_middleware_count=len(swarm_middleware),
218
+ agent_middleware_count=len(registered_agent.middleware),
219
+ total_middleware_count=len(agent_with_middleware.middleware),
220
+ )
221
+ else:
222
+ agent_with_middleware = registered_agent
223
+
224
+ agent_subgraph: CompiledStateGraph = create_agent_node(
225
+ agent=agent_with_middleware,
226
+ memory=memory,
227
+ chat_history=config.app.chat_history,
228
+ additional_tools=handoff_tools,
229
+ )
230
+ agent_subgraphs[registered_agent.name] = agent_subgraph
231
+ logger.debug(
232
+ "Created swarm agent subgraph",
233
+ agent=registered_agent.name,
234
+ handoffs_count=len(handoff_tools),
235
+ )
236
+
237
+ # Set up memory store and checkpointer
238
+ store: BaseStore | None = create_store(orchestration)
239
+ checkpointer: BaseCheckpointSaver | None = create_checkpointer(orchestration)
240
+
241
+ # Get list of agent names for the router
242
+ agent_names: list[str] = list(agent_subgraphs.keys())
243
+
244
+ # Create the workflow graph
245
+ # All agents are nodes wrapped in handlers, handoffs route via Command
246
+ workflow: StateGraph = StateGraph(
247
+ AgentState,
248
+ input=AgentState,
249
+ output=AgentState,
250
+ context_schema=Context,
251
+ )
252
+
253
+ # Add agent nodes with message filtering handlers
254
+ # This ensures consistent behavior with supervisor pattern
255
+ for agent_name, agent_subgraph in agent_subgraphs.items():
256
+ handler = create_agent_node_handler(
257
+ agent_name=agent_name,
258
+ agent=agent_subgraph,
259
+ output_mode="last_message",
260
+ )
261
+ workflow.add_node(agent_name, handler)
262
+
263
+ # Create the swarm router that checks active_agent state
264
+ # This enables resuming conversations with the last active agent
265
+ router = _create_swarm_router(default_agent, agent_names)
266
+
267
+ # Use conditional entry point to route based on active_agent
268
+ # This is the key pattern from langgraph-swarm-py
269
+ workflow.set_conditional_entry_point(router)
270
+
271
+ return workflow.compile(checkpointer=checkpointer, store=store)
dao_ai/prompts.py CHANGED
@@ -1,47 +1,144 @@
1
- from typing import Any, Callable, Optional, Sequence
1
+ """
2
+ Prompt utilities for DAO AI agents.
2
3
 
3
- from langchain.prompts import PromptTemplate
4
- from langchain_core.messages import (
5
- BaseMessage,
6
- SystemMessage,
4
+ This module provides utilities for creating dynamic prompts using
5
+ LangChain v1's @dynamic_prompt middleware decorator pattern.
6
+ """
7
+
8
+ from typing import Any, Optional
9
+
10
+ from langchain.agents.middleware import (
11
+ AgentMiddleware,
12
+ ModelRequest,
13
+ dynamic_prompt,
7
14
  )
8
- from langchain_core.runnables import RunnableConfig
15
+ from langchain_core.prompts import PromptTemplate
9
16
  from loguru import logger
10
17
 
11
18
  from dao_ai.config import PromptModel
12
- from dao_ai.state import SharedState
19
+ from dao_ai.state import Context
13
20
 
14
21
 
15
22
  def make_prompt(
16
23
  base_system_prompt: Optional[str | PromptModel],
17
- ) -> Callable[[dict, RunnableConfig], list]:
18
- logger.debug(f"make_prompt: {base_system_prompt}")
24
+ ) -> AgentMiddleware | None:
25
+ """
26
+ Create a dynamic prompt middleware from configuration.
27
+
28
+ For LangChain v1's create_agent, this function always returns an
29
+ AgentMiddleware instance for use with the middleware parameter.
30
+ This provides a consistent interface regardless of whether the
31
+ prompt template has variables or not.
32
+
33
+ Args:
34
+ base_system_prompt: The system prompt string or PromptModel
35
+
36
+ Returns:
37
+ An AgentMiddleware created by @dynamic_prompt, or None if no prompt
38
+ """
39
+ logger.trace("Creating prompt middleware", has_prompt=bool(base_system_prompt))
40
+
41
+ if not base_system_prompt:
42
+ return None
43
+
44
+ # Extract template string from PromptModel or use string directly
45
+ template: str
46
+ if isinstance(base_system_prompt, PromptModel):
47
+ template = base_system_prompt.template
48
+ else:
49
+ template = base_system_prompt
50
+
51
+ # Create prompt template (handles both static and dynamic prompts)
52
+ prompt_template: PromptTemplate = PromptTemplate.from_template(template)
53
+
54
+ if prompt_template.input_variables:
55
+ logger.trace(
56
+ "Dynamic prompt with variables", variables=prompt_template.input_variables
57
+ )
58
+ else:
59
+ logger.trace("Static prompt (no variables, using middleware for consistency)")
60
+
61
+ @dynamic_prompt
62
+ def dynamic_system_prompt(request: ModelRequest) -> str:
63
+ """Generate dynamic system prompt based on runtime context."""
64
+ # Get parameters from runtime context
65
+ params: dict[str, Any] = {
66
+ input_variable: "" for input_variable in prompt_template.input_variables
67
+ }
68
+
69
+ # Access context from runtime
70
+ context: Context = request.runtime.context
71
+ if context:
72
+ if context.user_id and "user_id" in params:
73
+ params["user_id"] = context.user_id
74
+ if context.thread_id and "thread_id" in params:
75
+ params["thread_id"] = context.thread_id
76
+ # Apply all context fields as template parameters
77
+ context_dict = context.model_dump()
78
+ for key, value in context_dict.items():
79
+ if key in params and value is not None:
80
+ params[key] = value
81
+
82
+ # Format the prompt
83
+ formatted_prompt: str = prompt_template.format(**params)
84
+ logger.trace(
85
+ "Formatted dynamic prompt with context",
86
+ prompt_prefix=formatted_prompt[:200],
87
+ )
88
+
89
+ return formatted_prompt
90
+
91
+ return dynamic_system_prompt
92
+
93
+
94
+ def create_prompt_middleware(
95
+ base_system_prompt: Optional[str | PromptModel],
96
+ ) -> AgentMiddleware | None:
97
+ """
98
+ Create a dynamic prompt middleware from configuration.
99
+
100
+ This always returns an AgentMiddleware suitable for use with
101
+ LangChain v1's middleware system.
102
+
103
+ Args:
104
+ base_system_prompt: The system prompt string or PromptModel
105
+
106
+ Returns:
107
+ An AgentMiddleware created by @dynamic_prompt, or None if no prompt
108
+ """
109
+ if not base_system_prompt:
110
+ return None
19
111
 
20
- def prompt(state: SharedState, config: RunnableConfig) -> list:
21
- system_prompt: str = ""
22
- if base_system_prompt:
23
- # Extract template string from PromptModel or use string directly
24
- template_str: str
25
- if isinstance(base_system_prompt, PromptModel):
26
- template_str = base_system_prompt.template
27
- else:
28
- template_str = base_system_prompt
112
+ # Extract template string from PromptModel or use string directly
113
+ template_str: str
114
+ if isinstance(base_system_prompt, PromptModel):
115
+ template_str = base_system_prompt.template
116
+ else:
117
+ template_str = base_system_prompt
29
118
 
30
- prompt_template: PromptTemplate = PromptTemplate.from_template(template_str)
119
+ prompt_template: PromptTemplate = PromptTemplate.from_template(template_str)
31
120
 
32
- params: dict[str, Any] = {
33
- input_variable: "" for input_variable in prompt_template.input_variables
34
- }
35
- params |= config.get("configurable", {})
121
+ @dynamic_prompt
122
+ def prompt_middleware(request: ModelRequest) -> str:
123
+ """Generate system prompt based on runtime context."""
124
+ # Get parameters from runtime context
125
+ params: dict[str, Any] = {
126
+ input_variable: "" for input_variable in prompt_template.input_variables
127
+ }
36
128
 
37
- system_prompt: str = prompt_template.format(**params)
129
+ # Access context from runtime
130
+ context: Context = request.runtime.context
131
+ if context:
132
+ # Apply all context fields as template parameters
133
+ context_dict = context.model_dump()
134
+ for key, value in context_dict.items():
135
+ if key in params and value is not None:
136
+ params[key] = value
38
137
 
39
- messages: Sequence[BaseMessage] = state["messages"]
40
- if system_prompt:
41
- messages = [SystemMessage(content=system_prompt)] + messages
138
+ # Format the prompt
139
+ formatted_prompt: str = prompt_template.format(**params)
140
+ logger.trace("Formatted dynamic prompt with context")
42
141
 
43
- logger.debug(f"Created prompt with messages: {len(messages)}")
44
- logger.trace(f"Messages: {[m.model_dump() for m in messages]}")
45
- return messages
142
+ return formatted_prompt
46
143
 
47
- return prompt
144
+ return prompt_middleware