dao-ai 0.0.36__py3-none-any.whl → 0.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. dao_ai/__init__.py +29 -0
  2. dao_ai/cli.py +195 -30
  3. dao_ai/config.py +770 -244
  4. dao_ai/genie/__init__.py +1 -22
  5. dao_ai/genie/cache/__init__.py +1 -2
  6. dao_ai/genie/cache/base.py +20 -70
  7. dao_ai/genie/cache/core.py +75 -0
  8. dao_ai/genie/cache/lru.py +44 -21
  9. dao_ai/genie/cache/semantic.py +390 -109
  10. dao_ai/genie/core.py +35 -0
  11. dao_ai/graph.py +27 -253
  12. dao_ai/hooks/__init__.py +9 -6
  13. dao_ai/hooks/core.py +22 -190
  14. dao_ai/memory/__init__.py +10 -0
  15. dao_ai/memory/core.py +23 -5
  16. dao_ai/memory/databricks.py +389 -0
  17. dao_ai/memory/postgres.py +2 -2
  18. dao_ai/messages.py +6 -4
  19. dao_ai/middleware/__init__.py +125 -0
  20. dao_ai/middleware/assertions.py +778 -0
  21. dao_ai/middleware/base.py +50 -0
  22. dao_ai/middleware/core.py +61 -0
  23. dao_ai/middleware/guardrails.py +415 -0
  24. dao_ai/middleware/human_in_the_loop.py +228 -0
  25. dao_ai/middleware/message_validation.py +554 -0
  26. dao_ai/middleware/summarization.py +192 -0
  27. dao_ai/models.py +1177 -108
  28. dao_ai/nodes.py +118 -161
  29. dao_ai/optimization.py +664 -0
  30. dao_ai/orchestration/__init__.py +52 -0
  31. dao_ai/orchestration/core.py +287 -0
  32. dao_ai/orchestration/supervisor.py +264 -0
  33. dao_ai/orchestration/swarm.py +226 -0
  34. dao_ai/prompts.py +126 -29
  35. dao_ai/providers/databricks.py +126 -381
  36. dao_ai/state.py +139 -21
  37. dao_ai/tools/__init__.py +8 -5
  38. dao_ai/tools/core.py +57 -4
  39. dao_ai/tools/email.py +280 -0
  40. dao_ai/tools/genie.py +47 -24
  41. dao_ai/tools/mcp.py +4 -3
  42. dao_ai/tools/memory.py +50 -0
  43. dao_ai/tools/python.py +4 -12
  44. dao_ai/tools/search.py +14 -0
  45. dao_ai/tools/slack.py +1 -1
  46. dao_ai/tools/unity_catalog.py +8 -6
  47. dao_ai/tools/vector_search.py +16 -9
  48. dao_ai/utils.py +72 -8
  49. dao_ai-0.1.1.dist-info/METADATA +1878 -0
  50. dao_ai-0.1.1.dist-info/RECORD +62 -0
  51. dao_ai/chat_models.py +0 -204
  52. dao_ai/guardrails.py +0 -112
  53. dao_ai/tools/genie/__init__.py +0 -236
  54. dao_ai/tools/human_in_the_loop.py +0 -100
  55. dao_ai-0.0.36.dist-info/METADATA +0 -951
  56. dao_ai-0.0.36.dist-info/RECORD +0 -47
  57. {dao_ai-0.0.36.dist-info → dao_ai-0.1.1.dist-info}/WHEEL +0 -0
  58. {dao_ai-0.0.36.dist-info → dao_ai-0.1.1.dist-info}/entry_points.txt +0 -0
  59. {dao_ai-0.0.36.dist-info → dao_ai-0.1.1.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,226 @@
1
+ """
2
+ Swarm pattern for multi-agent orchestration.
3
+
4
+ The swarm pattern allows agents to directly hand off control to each other
5
+ without a central coordinator. Each agent has handoff tools for the agents
6
+ they are allowed to transfer control to. This provides decentralized,
7
+ peer-to-peer collaboration.
8
+
9
+ Based on: https://github.com/langchain-ai/langgraph-swarm-py
10
+ """
11
+
12
+ from typing import Callable, Sequence
13
+
14
+ from langchain_core.tools import BaseTool
15
+ from langgraph.checkpoint.base import BaseCheckpointSaver
16
+ from langgraph.graph import StateGraph
17
+ from langgraph.graph.state import CompiledStateGraph
18
+ from langgraph.store.base import BaseStore
19
+ from loguru import logger
20
+
21
+ from dao_ai.config import (
22
+ AgentModel,
23
+ AppConfig,
24
+ MemoryModel,
25
+ OrchestrationModel,
26
+ SwarmModel,
27
+ )
28
+ from dao_ai.nodes import create_agent_node
29
+ from dao_ai.orchestration import (
30
+ create_agent_node_handler,
31
+ create_checkpointer,
32
+ create_handoff_tool,
33
+ create_store,
34
+ get_handoff_description,
35
+ )
36
+ from dao_ai.state import AgentState, Context
37
+
38
+
39
+ def _handoffs_for_agent(
40
+ agent: AgentModel,
41
+ config: AppConfig,
42
+ ) -> Sequence[BaseTool]:
43
+ """
44
+ Create handoff tools for an agent based on configuration.
45
+
46
+ Handoff tools route to the parent graph since agents are subgraphs
47
+ wrapped in handlers.
48
+
49
+ Args:
50
+ agent: The agent to create handoff tools for
51
+ config: The application configuration
52
+
53
+ Returns:
54
+ List of handoff tools for the agent
55
+ """
56
+ handoff_tools: list[BaseTool] = []
57
+
58
+ handoffs: dict[str, Sequence[AgentModel | str] | None] = (
59
+ config.app.orchestration.swarm.handoffs or {}
60
+ )
61
+ agent_handoffs: Sequence[AgentModel | str] | None = handoffs.get(agent.name)
62
+ if agent_handoffs is None:
63
+ agent_handoffs = config.app.agents
64
+
65
+ for handoff_to_agent in agent_handoffs:
66
+ if isinstance(handoff_to_agent, str):
67
+ handoff_to_agent = next(
68
+ iter(config.find_agents(lambda a: a.name == handoff_to_agent)), None
69
+ )
70
+
71
+ if handoff_to_agent is None:
72
+ logger.warning(
73
+ f"Handoff agent not found in configuration for agent {agent.name}"
74
+ )
75
+ continue
76
+ if agent.name == handoff_to_agent.name:
77
+ continue
78
+ logger.debug(
79
+ f"Creating handoff tool from agent {agent.name} to {handoff_to_agent.name}"
80
+ )
81
+
82
+ handoff_description: str = get_handoff_description(handoff_to_agent)
83
+
84
+ handoff_tools.append(
85
+ create_handoff_tool(
86
+ target_agent_name=handoff_to_agent.name,
87
+ description=handoff_description,
88
+ )
89
+ )
90
+ return handoff_tools
91
+
92
+
93
+ def _create_swarm_router(
94
+ default_agent: str,
95
+ agent_names: list[str],
96
+ ) -> Callable[[AgentState], str]:
97
+ """
98
+ Create a router function for the swarm pattern.
99
+
100
+ This router checks the `active_agent` field in state to determine
101
+ which agent should handle the next step. This enables:
102
+ 1. Resuming conversations with the last active agent (from checkpointer)
103
+ 2. Routing to the default agent for new conversations
104
+ 3. Following handoffs that set active_agent
105
+
106
+ Args:
107
+ default_agent: The default agent to route to if active_agent is not set
108
+ agent_names: List of valid agent names
109
+
110
+ Returns:
111
+ A router function that returns the agent name to route to
112
+ """
113
+
114
+ def router(state: AgentState) -> str:
115
+ active_agent: str | None = state.get("active_agent")
116
+
117
+ # If no active agent set, use default
118
+ if not active_agent:
119
+ logger.debug(
120
+ f"No active_agent in state, routing to default: {default_agent}"
121
+ )
122
+ return default_agent
123
+
124
+ # Validate active_agent exists
125
+ if active_agent in agent_names:
126
+ logger.debug(f"Routing to active_agent: {active_agent}")
127
+ return active_agent
128
+
129
+ # Fallback to default if active_agent is invalid
130
+ logger.warning(
131
+ f"Invalid active_agent '{active_agent}', routing to default: {default_agent}"
132
+ )
133
+ return default_agent
134
+
135
+ return router
136
+
137
+
138
+ def create_swarm_graph(config: AppConfig) -> CompiledStateGraph:
139
+ """
140
+ Create a swarm-based multi-agent graph.
141
+
142
+ The swarm pattern allows agents to directly hand off control to each other
143
+ without a central coordinator. Each agent has handoff tools for the agents
144
+ they are allowed to transfer control to.
145
+
146
+ Key features:
147
+ 1. Router function checks `active_agent` state to resume with last active agent
148
+ 2. Handoff tools update `active_agent` and use Command(goto=...) to route
149
+ 3. Agents are CompiledStateGraphs wrapped in handlers for message filtering
150
+ 4. Checkpointer persists state to enable conversation resumption
151
+
152
+ Args:
153
+ config: The application configuration
154
+
155
+ Returns:
156
+ A compiled LangGraph state machine
157
+
158
+ See: https://github.com/langchain-ai/langgraph-swarm-py
159
+ """
160
+ logger.debug("Creating swarm graph (handoff pattern)")
161
+
162
+ orchestration: OrchestrationModel = config.app.orchestration
163
+ swarm: SwarmModel = orchestration.swarm
164
+
165
+ # Determine the default agent name
166
+ default_agent: str
167
+ if isinstance(swarm.default_agent, AgentModel):
168
+ default_agent = swarm.default_agent.name
169
+ else:
170
+ default_agent = swarm.default_agent
171
+
172
+ # Create agent subgraphs with their specific handoff tools
173
+ # Each agent gets handoff tools only for agents they're allowed to hand off to
174
+ agent_subgraphs: dict[str, CompiledStateGraph] = {}
175
+ memory: MemoryModel | None = orchestration.memory
176
+ for registered_agent in config.app.agents:
177
+ # Get handoff tools for this agent
178
+ handoff_tools: Sequence[BaseTool] = _handoffs_for_agent(
179
+ agent=registered_agent,
180
+ config=config,
181
+ )
182
+
183
+ agent_subgraph: CompiledStateGraph = create_agent_node(
184
+ agent=registered_agent,
185
+ memory=memory,
186
+ chat_history=config.app.chat_history,
187
+ additional_tools=handoff_tools,
188
+ )
189
+ agent_subgraphs[registered_agent.name] = agent_subgraph
190
+ logger.debug(f"Created swarm agent subgraph: {registered_agent.name}")
191
+
192
+ # Set up memory store and checkpointer
193
+ store: BaseStore | None = create_store(orchestration)
194
+ checkpointer: BaseCheckpointSaver | None = create_checkpointer(orchestration)
195
+
196
+ # Get list of agent names for the router
197
+ agent_names: list[str] = list(agent_subgraphs.keys())
198
+
199
+ # Create the workflow graph
200
+ # All agents are nodes wrapped in handlers, handoffs route via Command
201
+ workflow: StateGraph = StateGraph(
202
+ AgentState,
203
+ input=AgentState,
204
+ output=AgentState,
205
+ context_schema=Context,
206
+ )
207
+
208
+ # Add agent nodes with message filtering handlers
209
+ # This ensures consistent behavior with supervisor pattern
210
+ for agent_name, agent_subgraph in agent_subgraphs.items():
211
+ handler = create_agent_node_handler(
212
+ agent_name=agent_name,
213
+ agent=agent_subgraph,
214
+ output_mode="last_message",
215
+ )
216
+ workflow.add_node(agent_name, handler)
217
+
218
+ # Create the swarm router that checks active_agent state
219
+ # This enables resuming conversations with the last active agent
220
+ router = _create_swarm_router(default_agent, agent_names)
221
+
222
+ # Use conditional entry point to route based on active_agent
223
+ # This is the key pattern from langgraph-swarm-py
224
+ workflow.set_conditional_entry_point(router)
225
+
226
+ return workflow.compile(checkpointer=checkpointer, store=store)
dao_ai/prompts.py CHANGED
@@ -1,47 +1,144 @@
1
- from typing import Any, Callable, Optional, Sequence
1
+ """
2
+ Prompt utilities for DAO AI agents.
2
3
 
3
- from langchain_core.messages import (
4
- BaseMessage,
5
- SystemMessage,
4
+ This module provides utilities for creating dynamic prompts using
5
+ LangChain v1's @dynamic_prompt middleware decorator pattern.
6
+ """
7
+
8
+ from typing import Any, Optional
9
+
10
+ from langchain.agents.middleware import (
11
+ AgentMiddleware,
12
+ ModelRequest,
13
+ dynamic_prompt,
6
14
  )
7
15
  from langchain_core.prompts import PromptTemplate
8
- from langchain_core.runnables import RunnableConfig
9
16
  from loguru import logger
10
17
 
11
18
  from dao_ai.config import PromptModel
12
- from dao_ai.state import SharedState
19
+ from dao_ai.state import Context
13
20
 
14
21
 
15
22
  def make_prompt(
16
23
  base_system_prompt: Optional[str | PromptModel],
17
- ) -> Callable[[dict, RunnableConfig], list]:
24
+ ) -> AgentMiddleware | None:
25
+ """
26
+ Create a dynamic prompt middleware from configuration.
27
+
28
+ For LangChain v1's create_agent, this function always returns an
29
+ AgentMiddleware instance for use with the middleware parameter.
30
+ This provides a consistent interface regardless of whether the
31
+ prompt template has variables or not.
32
+
33
+ Args:
34
+ base_system_prompt: The system prompt string or PromptModel
35
+
36
+ Returns:
37
+ An AgentMiddleware created by @dynamic_prompt, or None if no prompt
38
+ """
18
39
  logger.debug(f"make_prompt: {base_system_prompt}")
19
40
 
20
- def prompt(state: SharedState, config: RunnableConfig) -> list:
21
- system_prompt: str = ""
22
- if base_system_prompt:
23
- # Extract template string from PromptModel or use string directly
24
- template_str: str
25
- if isinstance(base_system_prompt, PromptModel):
26
- template_str = base_system_prompt.template
27
- else:
28
- template_str = base_system_prompt
41
+ if not base_system_prompt:
42
+ return None
43
+
44
+ # Extract template string from PromptModel or use string directly
45
+ template: str
46
+ if isinstance(base_system_prompt, PromptModel):
47
+ template = base_system_prompt.template
48
+ else:
49
+ template = base_system_prompt
50
+
51
+ # Create prompt template (handles both static and dynamic prompts)
52
+ prompt_template: PromptTemplate = PromptTemplate.from_template(template)
53
+
54
+ if prompt_template.input_variables:
55
+ logger.debug(
56
+ f"Dynamic prompt with variables: {prompt_template.input_variables}"
57
+ )
58
+ else:
59
+ logger.debug("Static prompt (no variables, using middleware for consistency)")
60
+
61
+ @dynamic_prompt
62
+ def dynamic_system_prompt(request: ModelRequest) -> str:
63
+ """Generate dynamic system prompt based on runtime context."""
64
+ # Get parameters from runtime context
65
+ params: dict[str, Any] = {
66
+ input_variable: "" for input_variable in prompt_template.input_variables
67
+ }
68
+
69
+ # Access context from runtime
70
+ context: Context = request.runtime.context
71
+ if context:
72
+ if context.user_id and "user_id" in params:
73
+ params["user_id"] = context.user_id
74
+ if context.thread_id and "thread_id" in params:
75
+ params["thread_id"] = context.thread_id
76
+ # Apply all custom context values as template parameters
77
+ for key, value in context.custom.items():
78
+ if key in params:
79
+ params[key] = value
80
+
81
+ # Format the prompt
82
+ formatted_prompt: str = prompt_template.format(**params)
83
+ logger.debug("Formatted dynamic prompt with context")
84
+ logger.trace(f"Prompt: {formatted_prompt[:200]}...")
85
+
86
+ return formatted_prompt
87
+
88
+ return dynamic_system_prompt
89
+
90
+
91
+ def create_prompt_middleware(
92
+ base_system_prompt: Optional[str | PromptModel],
93
+ ) -> AgentMiddleware | None:
94
+ """
95
+ Create a dynamic prompt middleware from configuration.
96
+
97
+ This always returns an AgentMiddleware suitable for use with
98
+ LangChain v1's middleware system.
99
+
100
+ Args:
101
+ base_system_prompt: The system prompt string or PromptModel
102
+
103
+ Returns:
104
+ An AgentMiddleware created by @dynamic_prompt, or None if no prompt
105
+ """
106
+ if not base_system_prompt:
107
+ return None
108
+
109
+ # Extract template string from PromptModel or use string directly
110
+ template_str: str
111
+ if isinstance(base_system_prompt, PromptModel):
112
+ template_str = base_system_prompt.template
113
+ else:
114
+ template_str = base_system_prompt
29
115
 
30
- prompt_template: PromptTemplate = PromptTemplate.from_template(template_str)
116
+ prompt_template: PromptTemplate = PromptTemplate.from_template(template_str)
31
117
 
32
- params: dict[str, Any] = {
33
- input_variable: "" for input_variable in prompt_template.input_variables
34
- }
35
- params |= config.get("configurable", {})
118
+ @dynamic_prompt
119
+ def prompt_middleware(request: ModelRequest) -> str:
120
+ """Generate system prompt based on runtime context."""
121
+ # Get parameters from runtime context
122
+ params: dict[str, Any] = {
123
+ input_variable: "" for input_variable in prompt_template.input_variables
124
+ }
36
125
 
37
- system_prompt: str = prompt_template.format(**params)
126
+ # Access context from runtime
127
+ context: Context = request.runtime.context
128
+ if context:
129
+ if context.user_id and "user_id" in params:
130
+ params["user_id"] = context.user_id
131
+ if context.thread_id and "thread_id" in params:
132
+ params["thread_id"] = context.thread_id
133
+ # Apply all custom context values as template parameters
134
+ for key, value in context.custom.items():
135
+ if key in params:
136
+ params[key] = value
38
137
 
39
- messages: Sequence[BaseMessage] = state["messages"]
40
- if system_prompt:
41
- messages = [SystemMessage(content=system_prompt)] + messages
138
+ # Format the prompt
139
+ formatted_prompt: str = prompt_template.format(**params)
140
+ logger.debug("Formatted dynamic prompt with context")
42
141
 
43
- logger.debug(f"Created prompt with messages: {len(messages)}")
44
- logger.trace(f"Messages: {[m.model_dump() for m in messages]}")
45
- return messages
142
+ return formatted_prompt
46
143
 
47
- return prompt
144
+ return prompt_middleware