dao-ai 0.0.35__py3-none-any.whl → 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. dao_ai/__init__.py +29 -0
  2. dao_ai/cli.py +195 -30
  3. dao_ai/config.py +797 -242
  4. dao_ai/genie/__init__.py +38 -0
  5. dao_ai/genie/cache/__init__.py +43 -0
  6. dao_ai/genie/cache/base.py +72 -0
  7. dao_ai/genie/cache/core.py +75 -0
  8. dao_ai/genie/cache/lru.py +329 -0
  9. dao_ai/genie/cache/semantic.py +919 -0
  10. dao_ai/genie/core.py +35 -0
  11. dao_ai/graph.py +27 -253
  12. dao_ai/hooks/__init__.py +9 -6
  13. dao_ai/hooks/core.py +22 -190
  14. dao_ai/memory/__init__.py +10 -0
  15. dao_ai/memory/core.py +23 -5
  16. dao_ai/memory/databricks.py +389 -0
  17. dao_ai/memory/postgres.py +2 -2
  18. dao_ai/messages.py +6 -4
  19. dao_ai/middleware/__init__.py +125 -0
  20. dao_ai/middleware/assertions.py +778 -0
  21. dao_ai/middleware/base.py +50 -0
  22. dao_ai/middleware/core.py +61 -0
  23. dao_ai/middleware/guardrails.py +415 -0
  24. dao_ai/middleware/human_in_the_loop.py +228 -0
  25. dao_ai/middleware/message_validation.py +554 -0
  26. dao_ai/middleware/summarization.py +192 -0
  27. dao_ai/models.py +1177 -108
  28. dao_ai/nodes.py +118 -161
  29. dao_ai/optimization.py +664 -0
  30. dao_ai/orchestration/__init__.py +52 -0
  31. dao_ai/orchestration/core.py +287 -0
  32. dao_ai/orchestration/supervisor.py +264 -0
  33. dao_ai/orchestration/swarm.py +226 -0
  34. dao_ai/prompts.py +126 -29
  35. dao_ai/providers/databricks.py +126 -381
  36. dao_ai/state.py +139 -21
  37. dao_ai/tools/__init__.py +11 -5
  38. dao_ai/tools/core.py +57 -4
  39. dao_ai/tools/email.py +280 -0
  40. dao_ai/tools/genie.py +108 -35
  41. dao_ai/tools/mcp.py +4 -3
  42. dao_ai/tools/memory.py +50 -0
  43. dao_ai/tools/python.py +4 -12
  44. dao_ai/tools/search.py +14 -0
  45. dao_ai/tools/slack.py +1 -1
  46. dao_ai/tools/unity_catalog.py +8 -6
  47. dao_ai/tools/vector_search.py +16 -9
  48. dao_ai/utils.py +72 -8
  49. dao_ai-0.1.0.dist-info/METADATA +1878 -0
  50. dao_ai-0.1.0.dist-info/RECORD +62 -0
  51. dao_ai/chat_models.py +0 -204
  52. dao_ai/guardrails.py +0 -112
  53. dao_ai/tools/human_in_the_loop.py +0 -100
  54. dao_ai-0.0.35.dist-info/METADATA +0 -1169
  55. dao_ai-0.0.35.dist-info/RECORD +0 -41
  56. {dao_ai-0.0.35.dist-info → dao_ai-0.1.0.dist-info}/WHEEL +0 -0
  57. {dao_ai-0.0.35.dist-info → dao_ai-0.1.0.dist-info}/entry_points.txt +0 -0
  58. {dao_ai-0.0.35.dist-info → dao_ai-0.1.0.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,192 @@
1
+ """
2
+ Summarization middleware for DAO AI agents.
3
+
4
+ This module provides a LoggingSummarizationMiddleware that extends LangChain's
5
+ built-in SummarizationMiddleware with logging capabilities, and provides
6
+ helper utilities for creating summarization middleware from DAO AI configuration.
7
+
8
+ The middleware automatically:
9
+ - Summarizes older messages using a separate LLM call when thresholds are exceeded
10
+ - Replaces them with a summary message in State (permanently)
11
+ - Keeps recent messages intact for context
12
+ - Logs when summarization is triggered and completed
13
+
14
+ Example:
15
+ from dao_ai.middleware import create_summarization_middleware
16
+ from dao_ai.config import ChatHistoryModel, LLMModel
17
+
18
+ chat_history = ChatHistoryModel(
19
+ model=LLMModel(name="gpt-4o-mini"),
20
+ max_tokens=256,
21
+ max_tokens_before_summary=4000,
22
+ )
23
+
24
+ middleware = create_summarization_middleware(chat_history)
25
+ """
26
+
27
+ from typing import Any, Tuple
28
+
29
+ from langchain.agents.middleware import SummarizationMiddleware
30
+ from langchain_core.language_models import LanguageModelLike
31
+ from langchain_core.messages import BaseMessage
32
+ from langgraph.runtime import Runtime
33
+ from loguru import logger
34
+
35
+ from dao_ai.config import ChatHistoryModel
36
+
37
+ __all__ = [
38
+ "SummarizationMiddleware",
39
+ "LoggingSummarizationMiddleware",
40
+ "create_summarization_middleware",
41
+ ]
42
+
43
+
44
+ class LoggingSummarizationMiddleware(SummarizationMiddleware):
45
+ """
46
+ SummarizationMiddleware with logging for when summarization occurs.
47
+
48
+ This extends LangChain's SummarizationMiddleware to add logging at INFO level
49
+ when summarization is triggered and completed, providing visibility into
50
+ when conversation history is being summarized.
51
+
52
+ Logs include:
53
+ - Original message count and approximate token count (before summarization)
54
+ - New message count and approximate token count (after summarization)
55
+ - Number of messages that were summarized
56
+ """
57
+
58
+ def _log_summarization(
59
+ self,
60
+ original_message_count: int,
61
+ original_token_count: int,
62
+ result_messages: list[Any],
63
+ ) -> None:
64
+ """Log summarization details with before/after metrics."""
65
+ # Result messages: [RemoveMessage, summary_message, ...preserved_messages]
66
+ # New message count excludes RemoveMessage (index 0)
67
+ new_messages = [
68
+ msg for msg in result_messages if not self._is_remove_message(msg)
69
+ ]
70
+ new_message_count = len(new_messages)
71
+ new_token_count = self.token_counter(new_messages) if new_messages else 0
72
+
73
+ # Calculate how many messages were summarized
74
+ # preserved = new_messages - 1 (the summary message)
75
+ preserved_count = max(0, new_message_count - 1)
76
+ summarized_count = original_message_count - preserved_count
77
+
78
+ logger.info(
79
+ f"Conversation summarized: "
80
+ f"BEFORE: {original_message_count} messages (~{original_token_count:,} tokens) → "
81
+ f"AFTER: {new_message_count} messages (~{new_token_count:,} tokens) | "
82
+ f"{summarized_count} messages condensed into 1 summary"
83
+ )
84
+ logger.debug(
85
+ f"Summarization details: trigger={self.trigger}, keep={self.keep}, "
86
+ f"preserved_messages={preserved_count}, "
87
+ f"token_reduction={original_token_count - new_token_count:,}"
88
+ )
89
+
90
+ def _is_remove_message(self, msg: Any) -> bool:
91
+ """Check if a message is a RemoveMessage."""
92
+ return type(msg).__name__ == "RemoveMessage"
93
+
94
+ def before_model(
95
+ self, state: dict[str, Any], runtime: Runtime
96
+ ) -> dict[str, Any] | None:
97
+ """Process messages before model invocation, logging when summarization occurs."""
98
+ messages: list[BaseMessage] = state.get("messages", [])
99
+ original_message_count = len(messages)
100
+ original_token_count = self.token_counter(messages) if messages else 0
101
+
102
+ result = super().before_model(state, runtime)
103
+
104
+ if result is not None:
105
+ result_messages = result.get("messages", [])
106
+ self._log_summarization(
107
+ original_message_count,
108
+ original_token_count,
109
+ result_messages,
110
+ )
111
+
112
+ return result
113
+
114
+ async def abefore_model(
115
+ self, state: dict[str, Any], runtime: Runtime
116
+ ) -> dict[str, Any] | None:
117
+ """Process messages before model invocation (async), logging when summarization occurs."""
118
+ messages: list[BaseMessage] = state.get("messages", [])
119
+ original_message_count = len(messages)
120
+ original_token_count = self.token_counter(messages) if messages else 0
121
+
122
+ result = await super().abefore_model(state, runtime)
123
+
124
+ if result is not None:
125
+ result_messages = result.get("messages", [])
126
+ self._log_summarization(
127
+ original_message_count,
128
+ original_token_count,
129
+ result_messages,
130
+ )
131
+
132
+ return result
133
+
134
+
135
+ def create_summarization_middleware(
136
+ chat_history: ChatHistoryModel,
137
+ ) -> LoggingSummarizationMiddleware:
138
+ """
139
+ Create a LoggingSummarizationMiddleware from DAO AI ChatHistoryModel configuration.
140
+
141
+ This factory function creates a LoggingSummarizationMiddleware instance
142
+ configured according to the DAO AI ChatHistoryModel settings. The middleware
143
+ includes logging at INFO level when summarization is triggered.
144
+
145
+ Args:
146
+ chat_history: ChatHistoryModel configuration for summarization
147
+
148
+ Returns:
149
+ LoggingSummarizationMiddleware configured with the specified parameters
150
+
151
+ Example:
152
+ from dao_ai.config import ChatHistoryModel, LLMModel
153
+
154
+ chat_history = ChatHistoryModel(
155
+ model=LLMModel(name="gpt-4o-mini"),
156
+ max_tokens=256,
157
+ max_tokens_before_summary=4000,
158
+ )
159
+
160
+ middleware = create_summarization_middleware(chat_history)
161
+ """
162
+ logger.debug(
163
+ f"Creating summarization middleware with max_tokens: {chat_history.max_tokens}, "
164
+ f"max_tokens_before_summary: {chat_history.max_tokens_before_summary}, "
165
+ f"max_messages_before_summary: {chat_history.max_messages_before_summary}"
166
+ )
167
+
168
+ # Get the LLM model
169
+ model: LanguageModelLike = chat_history.model.as_chat_model()
170
+
171
+ # Determine trigger condition
172
+ # LangChain uses ("tokens", value) or ("messages", value) tuples
173
+ trigger: Tuple[str, int]
174
+ if chat_history.max_tokens_before_summary:
175
+ trigger = ("tokens", chat_history.max_tokens_before_summary)
176
+ elif chat_history.max_messages_before_summary:
177
+ trigger = ("messages", chat_history.max_messages_before_summary)
178
+ else:
179
+ # Default to a reasonable token threshold
180
+ trigger = ("tokens", chat_history.max_tokens * 10)
181
+
182
+ # Determine keep condition - how many recent messages/tokens to preserve
183
+ # Default to keeping enough for context
184
+ keep: Tuple[str, int] = ("tokens", chat_history.max_tokens)
185
+
186
+ logger.info(f"Summarization middleware configured: trigger={trigger}, keep={keep}")
187
+
188
+ return LoggingSummarizationMiddleware(
189
+ model=model,
190
+ trigger=trigger,
191
+ keep=keep,
192
+ )