dao-ai 0.1.19__py3-none-any.whl → 0.1.21__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,227 @@
1
+ """
2
+ Tool call observability middleware for DAO AI agents.
3
+
4
+ This middleware logs detailed information about tool call patterns to help
5
+ diagnose whether tools are being called in parallel or sequentially.
6
+
7
+ Example YAML config::
8
+
9
+ middleware:
10
+ - name: dao_ai.middleware.tool_call_observability.\
11
+ create_tool_call_observability_middleware
12
+ args:
13
+ log_level: INFO
14
+ include_args: false
15
+ track_timing: true
16
+ """
17
+
18
+ from __future__ import annotations
19
+
20
+ import time
21
+ from typing import Any
22
+
23
+ from langchain.agents.middleware import AgentMiddleware
24
+ from langchain_core.messages import AIMessage, BaseMessage
25
+ from langgraph.runtime import Runtime
26
+ from loguru import logger
27
+
28
+ from dao_ai.state import AgentState, Context
29
+
30
+ __all__ = [
31
+ "ToolCallObservabilityMiddleware",
32
+ "create_tool_call_observability_middleware",
33
+ ]
34
+
35
+
36
+ class ToolCallObservabilityMiddleware(AgentMiddleware[AgentState, Context]):
37
+ """
38
+ Middleware that observes and logs tool call patterns.
39
+
40
+ Tracks:
41
+ - Number of tool calls per model response
42
+ - Whether tools are called in parallel (multiple per response) or sequentially
43
+ - Tool execution timing
44
+ - Cumulative statistics across the conversation
45
+ """
46
+
47
+ def __init__(
48
+ self,
49
+ log_level: str = "INFO",
50
+ include_args: bool = False,
51
+ track_timing: bool = True,
52
+ ):
53
+ """
54
+ Initialize the observability middleware.
55
+
56
+ Args:
57
+ log_level: Logging level ("DEBUG", "INFO", "WARNING")
58
+ include_args: Whether to log tool call arguments (may be verbose)
59
+ track_timing: Whether to track execution timing
60
+ """
61
+ self.log_level = log_level.upper()
62
+ self.include_args = include_args
63
+ self.track_timing = track_timing
64
+
65
+ # Statistics tracking (per-run, reset on before_agent)
66
+ self._total_model_calls = 0
67
+ self._total_tool_calls = 0
68
+ self._parallel_batches = 0 # Responses with 2+ tool calls
69
+ self._sequential_calls = 0 # Responses with exactly 1 tool call
70
+ self._tool_execution_times: dict[str, list[float]] = {}
71
+ self._run_start_time: float | None = None
72
+
73
+ def _log(self, message: str, **kwargs: Any) -> None:
74
+ """Log at the configured level."""
75
+ log_fn = getattr(logger, self.log_level.lower(), logger.info)
76
+ log_fn(message, **kwargs)
77
+
78
+ def before_agent(
79
+ self,
80
+ state: AgentState,
81
+ runtime: Runtime[Context],
82
+ ) -> dict[str, Any] | None:
83
+ """Reset statistics at the start of each agent run."""
84
+ self._total_model_calls = 0
85
+ self._total_tool_calls = 0
86
+ self._parallel_batches = 0
87
+ self._sequential_calls = 0
88
+ self._tool_execution_times = {}
89
+ self._run_start_time = time.time()
90
+
91
+ self._log(
92
+ "Tool call observability: Agent run started",
93
+ thread_id=runtime.context.thread_id if runtime.context else None,
94
+ )
95
+ return None
96
+
97
+ def after_model(
98
+ self,
99
+ state: AgentState,
100
+ runtime: Runtime[Context],
101
+ ) -> dict[str, Any] | None:
102
+ """Analyze tool calls in model response."""
103
+ self._total_model_calls += 1
104
+
105
+ # Extract tool calls from state messages (last message is model response)
106
+ messages: list[BaseMessage] = state.get("messages", [])
107
+
108
+ # Look at the last message which should be the model's response
109
+ for msg in messages[-1:]:
110
+ if isinstance(msg, AIMessage) and msg.tool_calls:
111
+ num_tool_calls = len(msg.tool_calls)
112
+ self._total_tool_calls += num_tool_calls
113
+
114
+ if num_tool_calls > 1:
115
+ self._parallel_batches += 1
116
+ self._log(
117
+ "PARALLEL tool calls detected",
118
+ num_tools=num_tool_calls,
119
+ tool_names=[tc["name"] for tc in msg.tool_calls],
120
+ model_call_number=self._total_model_calls,
121
+ )
122
+
123
+ if self.include_args:
124
+ for tc in msg.tool_calls:
125
+ self._log(
126
+ f" Tool: {tc['name']}",
127
+ args=tc.get("args", {}),
128
+ )
129
+ elif num_tool_calls == 1:
130
+ self._sequential_calls += 1
131
+ tc = msg.tool_calls[0]
132
+ log_kwargs: dict[str, Any] = {
133
+ "tool_name": tc["name"],
134
+ "model_call_number": self._total_model_calls,
135
+ }
136
+ if self.include_args:
137
+ log_kwargs["args"] = tc.get("args", {})
138
+ self._log("Sequential tool call", **log_kwargs)
139
+
140
+ return None
141
+
142
+ def after_agent(
143
+ self,
144
+ state: AgentState,
145
+ runtime: Runtime[Context],
146
+ ) -> dict[str, Any] | None:
147
+ """Log final statistics."""
148
+ total_time = time.time() - self._run_start_time if self._run_start_time else 0
149
+
150
+ # Calculate parallelism ratio
151
+ total_responses_with_tools = self._parallel_batches + self._sequential_calls
152
+ parallelism_ratio = (
153
+ self._parallel_batches / total_responses_with_tools * 100
154
+ if total_responses_with_tools > 0
155
+ else 0
156
+ )
157
+
158
+ # Calculate average tool times
159
+ avg_times = {
160
+ name: round(sum(times) / len(times) * 1000, 2)
161
+ for name, times in self._tool_execution_times.items()
162
+ }
163
+
164
+ self._log(
165
+ "Tool Call Observability Summary",
166
+ total_model_calls=self._total_model_calls,
167
+ total_tool_calls=self._total_tool_calls,
168
+ parallel_batches=self._parallel_batches,
169
+ sequential_calls=self._sequential_calls,
170
+ parallelism_ratio=f"{parallelism_ratio:.1f}%",
171
+ total_time_ms=round(total_time * 1000, 2),
172
+ avg_tool_times_ms=avg_times,
173
+ )
174
+
175
+ # Log verdict
176
+ if self._parallel_batches > 0:
177
+ logger.success(
178
+ f"Parallel tool calling IS happening: "
179
+ f"{self._parallel_batches} batches with multiple tools"
180
+ )
181
+ elif self._sequential_calls > 0:
182
+ logger.warning(
183
+ f"All tool calls are SEQUENTIAL: "
184
+ f"{self._sequential_calls} single-tool responses. "
185
+ f"Consider prompt engineering to encourage parallel calls."
186
+ )
187
+
188
+ return None
189
+
190
+
191
+ def create_tool_call_observability_middleware(
192
+ log_level: str = "INFO",
193
+ include_args: bool = False,
194
+ track_timing: bool = True,
195
+ ) -> ToolCallObservabilityMiddleware:
196
+ """
197
+ Factory function to create tool call observability middleware.
198
+
199
+ Args:
200
+ log_level: Logging level ("DEBUG", "INFO", "WARNING")
201
+ include_args: Whether to log tool call arguments
202
+ track_timing: Whether to track execution timing
203
+
204
+ Returns:
205
+ ToolCallObservabilityMiddleware instance
206
+
207
+ Example YAML config::
208
+
209
+ middleware:
210
+ - name: dao_ai.middleware.tool_call_observability.\
211
+ create_tool_call_observability_middleware
212
+ args:
213
+ log_level: INFO
214
+ include_args: false
215
+ track_timing: true
216
+ """
217
+ logger.debug(
218
+ "Creating tool call observability middleware",
219
+ log_level=log_level,
220
+ include_args=include_args,
221
+ track_timing=track_timing,
222
+ )
223
+ return ToolCallObservabilityMiddleware(
224
+ log_level=log_level,
225
+ include_args=include_args,
226
+ track_timing=track_timing,
227
+ )
dao_ai/nodes.py CHANGED
@@ -259,8 +259,6 @@ def create_agent_node(
259
259
  else:
260
260
  logger.debug("No custom prompt configured", agent=agent.name)
261
261
 
262
- checkpointer: bool = memory is not None and memory.checkpointer is not None
263
-
264
262
  # Get the prompt as middleware (always returns AgentMiddleware or None)
265
263
  prompt_middleware: AgentMiddleware | None = make_prompt(agent.prompt)
266
264
 
@@ -291,12 +289,14 @@ def create_agent_node(
291
289
  # Use LangChain v1's create_agent with middleware
292
290
  # AgentState extends MessagesState with additional DAO AI fields
293
291
  # System prompt is provided via middleware (dynamic_prompt)
292
+ # NOTE: checkpointer=False because these agents are used as subgraphs
293
+ # within the parent orchestration graph (swarm/supervisor) which handles
294
+ # checkpointing at the root level. Subgraphs cannot have checkpointer=True.
294
295
  logger.info(
295
296
  "Creating LangChain agent",
296
297
  agent=agent.name,
297
298
  tools_count=len(tools),
298
299
  middleware_count=len(middleware_list),
299
- has_checkpointer=checkpointer,
300
300
  )
301
301
 
302
302
  compiled_agent: CompiledStateGraph = create_agent(
@@ -304,7 +304,7 @@ def create_agent_node(
304
304
  model=llm,
305
305
  tools=tools,
306
306
  middleware=middleware_list,
307
- checkpointer=checkpointer,
307
+ checkpointer=False,
308
308
  state_schema=AgentState,
309
309
  context_schema=Context,
310
310
  response_format=response_format, # Add structured output support
dao_ai/tools/__init__.py CHANGED
@@ -1,4 +1,4 @@
1
- from dao_ai.genie.cache import LRUCacheService, SemanticCacheService
1
+ from dao_ai.genie.cache import LRUCacheService, PostgresContextAwareGenieService
2
2
  from dao_ai.hooks.core import create_hooks
3
3
  from dao_ai.tools.agent import create_agent_endpoint_tool
4
4
  from dao_ai.tools.core import create_tools, say_hello_tool
@@ -44,8 +44,8 @@ __all__ = [
44
44
  "format_time_tool",
45
45
  "is_business_hours_tool",
46
46
  "LRUCacheService",
47
+ "PostgresContextAwareGenieService",
47
48
  "say_hello_tool",
48
- "SemanticCacheService",
49
49
  "time_difference_tool",
50
50
  "time_in_timezone_tool",
51
51
  "time_until_tool",
dao_ai/tools/genie.py CHANGED
@@ -6,7 +6,7 @@ interact with Databricks Genie.
6
6
 
7
7
  For the core Genie service and cache implementations, see:
8
8
  - dao_ai.genie: GenieService, GenieServiceBase
9
- - dao_ai.genie.cache: LRUCacheService, SemanticCacheService
9
+ - dao_ai.genie.cache: LRUCacheService, PostgresContextAwareGenieService, InMemoryContextAwareGenieService
10
10
  """
11
11
 
12
12
  import json
@@ -25,18 +25,18 @@ from pydantic import BaseModel
25
25
  from dao_ai.config import (
26
26
  AnyVariable,
27
27
  CompositeVariableModel,
28
+ GenieContextAwareCacheParametersModel,
28
29
  GenieInMemorySemanticCacheParametersModel,
29
30
  GenieLRUCacheParametersModel,
30
31
  GenieRoomModel,
31
- GenieSemanticCacheParametersModel,
32
32
  value_of,
33
33
  )
34
34
  from dao_ai.genie import GenieService, GenieServiceBase
35
35
  from dao_ai.genie.cache import (
36
36
  CacheResult,
37
- InMemorySemanticCacheService,
37
+ InMemoryContextAwareGenieService,
38
38
  LRUCacheService,
39
- SemanticCacheService,
39
+ PostgresContextAwareGenieService,
40
40
  )
41
41
  from dao_ai.state import AgentState, Context, SessionState
42
42
 
@@ -70,7 +70,7 @@ def create_genie_tool(
70
70
  persist_conversation: bool = True,
71
71
  truncate_results: bool = False,
72
72
  lru_cache_parameters: GenieLRUCacheParametersModel | dict[str, Any] | None = None,
73
- semantic_cache_parameters: GenieSemanticCacheParametersModel
73
+ semantic_cache_parameters: GenieContextAwareCacheParametersModel
74
74
  | dict[str, Any]
75
75
  | None = None,
76
76
  in_memory_semantic_cache_parameters: GenieInMemorySemanticCacheParametersModel
@@ -118,7 +118,7 @@ def create_genie_tool(
118
118
  lru_cache_parameters = GenieLRUCacheParametersModel(**lru_cache_parameters)
119
119
 
120
120
  if isinstance(semantic_cache_parameters, dict):
121
- semantic_cache_parameters = GenieSemanticCacheParametersModel(
121
+ semantic_cache_parameters = GenieContextAwareCacheParametersModel(
122
122
  **semantic_cache_parameters
123
123
  )
124
124
 
@@ -182,17 +182,17 @@ GenieResponse: A response object containing the conversation ID and result from
182
182
 
183
183
  genie_service: GenieServiceBase = GenieService(genie)
184
184
 
185
- # Wrap with semantic cache first (checked second/third due to decorator pattern)
185
+ # Wrap with context-aware cache first (checked second/third due to decorator pattern)
186
186
  if semantic_cache_parameters is not None:
187
- genie_service = SemanticCacheService(
187
+ genie_service = PostgresContextAwareGenieService(
188
188
  impl=genie_service,
189
189
  parameters=semantic_cache_parameters,
190
190
  workspace_client=workspace_client,
191
191
  ).initialize()
192
192
 
193
- # Wrap with in-memory semantic cache (alternative to PostgreSQL semantic cache)
193
+ # Wrap with in-memory context-aware cache (alternative to PostgreSQL context-aware cache)
194
194
  if in_memory_semantic_cache_parameters is not None:
195
- genie_service = InMemorySemanticCacheService(
195
+ genie_service = InMemoryContextAwareGenieService(
196
196
  impl=genie_service,
197
197
  parameters=in_memory_semantic_cache_parameters,
198
198
  workspace_client=workspace_client,
dao_ai/utils.py CHANGED
@@ -152,10 +152,14 @@ def dao_ai_version() -> str:
152
152
  return "dev"
153
153
 
154
154
 
155
- def get_installed_packages() -> dict[str, str]:
156
- """Get all installed packages with versions"""
155
+ def get_installed_packages() -> list[str]:
156
+ """Get all installed packages with versions.
157
157
 
158
- packages: Sequence[str] = [
158
+ Returns a list of pip requirement strings for packages used by dao-ai.
159
+ This is used for MLflow model logging to ensure all dependencies are captured.
160
+ """
161
+
162
+ packages: list[str] = [
159
163
  f"databricks-agents=={version('databricks-agents')}",
160
164
  f"databricks-langchain[memory]=={version('databricks-langchain')}",
161
165
  f"databricks-mcp=={version('databricks-mcp')}",
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dao-ai
3
- Version: 0.1.19
3
+ Version: 0.1.21
4
4
  Summary: DAO AI: A modular, multi-agent orchestration framework for complex AI workflows. Supports agent handoff, tool integration, and dynamic configuration via YAML.
5
5
  Project-URL: Homepage, https://github.com/natefleming/dao-ai
6
6
  Project-URL: Documentation, https://natefleming.github.io/dao-ai
@@ -1,31 +1,35 @@
1
1
  dao_ai/__init__.py,sha256=18P98ExEgUaJ1Byw440Ct1ty59v6nxyWtc5S6Uq2m9Q,1062
2
2
  dao_ai/catalog.py,sha256=sPZpHTD3lPx4EZUtIWeQV7VQM89WJ6YH__wluk1v2lE,4947
3
- dao_ai/cli.py,sha256=Mcw03hemsT4O63lAH6mqTaPZjx0Q01YTgj5CN0thODI,52121
4
- dao_ai/config.py,sha256=CseNaB42FRKFz0NnfoCKmFROwVItwAHNAiipQqdwZbA,150445
3
+ dao_ai/cli.py,sha256=7hVCC8mn9S3c4wW-eRt-WoFKzV1wPdJVAeNhkyhfUGc,53251
4
+ dao_ai/config.py,sha256=alAPiAtglojEIBUm4dwrpnirkRbgDPBS-6DZAKCxyuE,159798
5
5
  dao_ai/evaluation.py,sha256=4dveWDwFnUxaybswr0gag3ydZ5RGVCTRaiE3eKLClD4,18161
6
6
  dao_ai/graph.py,sha256=1-uQlo7iXZQTT3uU8aYu0N5rnhw5_g_2YLwVsAs6M-U,1119
7
7
  dao_ai/logging.py,sha256=lYy4BmucCHvwW7aI3YQkQXKJtMvtTnPDu9Hnd7_O4oc,1556
8
8
  dao_ai/messages.py,sha256=4ZBzO4iFdktGSLrmhHzFjzMIt2tpaL-aQLHOQJysGnY,6959
9
9
  dao_ai/models.py,sha256=NaHj91Gra4M8thlKX1DSufLqtJfZSZ55lm1H1dJL_O8,77320
10
- dao_ai/nodes.py,sha256=7W6Ek6Uk9-pKa-H06nVCwuDllCrgX02IYy3rHtuL0aM,10777
10
+ dao_ai/nodes.py,sha256=H7_C0ev0TpS5KWkGZD6eE4Wn6ouBwnN5HgYUyBeKe0A,10881
11
11
  dao_ai/optimization.py,sha256=phK6t4wYmWPObCjGUBHdZzsaFXGhQOjhAek2bAEfwXo,22971
12
12
  dao_ai/state.py,sha256=ifDTAC7epdowk3Z1CP3Xqw4uH2dIxQEVF3C747dA8yI,6436
13
13
  dao_ai/types.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
14
- dao_ai/utils.py,sha256=ImgH0jnHPCK2AR7KcueG_Zb7kltcBzTw78ujDUpARIE,17184
14
+ dao_ai/utils.py,sha256=nQ5U92IU-JWLIikBg04s4O5gHUNX3j_Dy3YfAnT_ccM,17340
15
15
  dao_ai/vector_search.py,sha256=PfmT2PDMymk-3dTm2uszlOZNyHyiDge--imxKpKJRsY,4440
16
16
  dao_ai/apps/__init__.py,sha256=RLuhZf4gQ4pemwKDz1183aXib8UfaRhwfKvRx68GRlM,661
17
17
  dao_ai/apps/handlers.py,sha256=6-IhhklHSPnS8aqKp155wPaSnYWTU1BSOPwbdWYBkFU,3594
18
18
  dao_ai/apps/model_serving.py,sha256=XLt3_0pGSRceMK6YtOrND9Jnh7mKLPCtwjVDLIaptQU,847
19
19
  dao_ai/apps/resources.py,sha256=5l6UxfMq6uspOql-HNDyUikfqRAa9eH_TiJHrGgMb6s,40029
20
20
  dao_ai/apps/server.py,sha256=neWbVnC2z9f-tJZBnho70FytNDEVOdOM1YngoGc5KHI,1264
21
- dao_ai/genie/__init__.py,sha256=vdEyGhrt6L8GlK75SyYvTnl8QpHKDCJC5hJKLg4DesQ,1063
22
- dao_ai/genie/core.py,sha256=HPKbocvhnnw_PkQwfoq5bpgQmL9lZyyS6_goTJL8yiY,1073
23
- dao_ai/genie/cache/__init__.py,sha256=ssLDdBgPFeiCcvHgd1cujomUu2R0COHCmBpPGDMU17w,1318
24
- dao_ai/genie/cache/base.py,sha256=_MhHqYrHejVGrJjSLX26TdHwvQZb-HgiantRYSB8fJY,1961
21
+ dao_ai/genie/__init__.py,sha256=UpSvP6gZO8H-eAPokYpkshvFxYD4ETYZHz-pRPoK2sI,2786
22
+ dao_ai/genie/core.py,sha256=eKZo4pRagwI6QglAXqHYjUaC3AicmGMiy_9WIuZ-tzw,9119
23
+ dao_ai/genie/cache/__init__.py,sha256=gUKBcbSMicalQj6sc_bvuBUeVUUTZ90xIMytf9Bk_m8,2242
24
+ dao_ai/genie/cache/base.py,sha256=nbWl-KTstUPGagdUtO8xtVUSosuqkaNc_hx-PgT1ROo,7155
25
25
  dao_ai/genie/cache/core.py,sha256=48sDY7dbrsmflb96OFEE8DYarNB6zyiFxZQG-qfhXD4,2537
26
- dao_ai/genie/cache/in_memory_semantic.py,sha256=1Q7dpqZUcnpxRkzwWp13G9u7-iV4DG2gcx2N7KbUJx0,32426
27
- dao_ai/genie/cache/lru.py,sha256=c8_qkJ6NkCz9Jnr3KuCgTZDBWvHeuGnWyjMNy-l510I,12130
28
- dao_ai/genie/cache/semantic.py,sha256=Fshc2qB1UGgLAO_1gwH7v_taEJw3838bppfrUI_x4o4,39411
26
+ dao_ai/genie/cache/lru.py,sha256=dWoNottME8y6y_OKnQZ1xH4NmQxk2PdXvUgKcdzjlxI,19935
27
+ dao_ai/genie/cache/context_aware/__init__.py,sha256=8uxG-0_9SIqjmiNKyPXjn_pOhOi82RrcziljQOYdrwc,1946
28
+ dao_ai/genie/cache/context_aware/base.py,sha256=rxiXqJF5VZFTKEAr_MKqGH1hcqF5rxeqsC75Z7nlrWk,42819
29
+ dao_ai/genie/cache/context_aware/in_memory.py,sha256=9V7uDyXYe3A8GU3rxJq4icbt3IxSTuHhoPN6sJxOEDU,26809
30
+ dao_ai/genie/cache/context_aware/optimization.py,sha256=foUCjK5g0aRUH1wXWoPIVaGC6DqSstTrsP5naQGpw7U,31758
31
+ dao_ai/genie/cache/context_aware/persistent.py,sha256=cpn25Go6ZyN65lY_vh5cWcuqr__nNH7RPWJA_LP7wcE,28154
32
+ dao_ai/genie/cache/context_aware/postgres.py,sha256=HGadN7FmSXbDXc8itq6vz1tSbhyklxOnWB3qDegrVKc,53087
29
33
  dao_ai/hooks/__init__.py,sha256=uA4DQdP9gDf4SyNjNx9mWPoI8UZOcTyFsCXV0NraFvQ,463
30
34
  dao_ai/hooks/core.py,sha256=yZAfRfB0MyMo--uwGr4STtVxxen5s4ZUrNTnR3a3qkA,1721
31
35
  dao_ai/memory/__init__.py,sha256=Us3wFehvug_h83m-UJ7OXdq2qZ0e9nHBQE7m5RwoAd8,559
@@ -33,7 +37,7 @@ dao_ai/memory/base.py,sha256=99nfr2UZJ4jmfTL_KrqUlRSCoRxzkZyWyx5WqeUoMdQ,338
33
37
  dao_ai/memory/core.py,sha256=38H-JLIyUrRDIECLvpXK3iJlWG35X97E-DTo_4c3Jzc,6317
34
38
  dao_ai/memory/databricks.py,sha256=SM6nwLjhSRJO4hLc3GUuht5YydYtTi3BAOae6jPwTm4,14377
35
39
  dao_ai/memory/postgres.py,sha256=bSjtvEht0h6jy2ADN2vqISVQDxm_DeM586VDdGaShJQ,23168
36
- dao_ai/middleware/__init__.py,sha256=Qy8wbvjXF7TrUzi3tWziOwxqsrUcT1rzE3UWd3x5CrU,5108
40
+ dao_ai/middleware/__init__.py,sha256=cmXRtyY4rvyv1p2BJktw-SllI5qZLk_EEeiWJtOkbiM,5358
37
41
  dao_ai/middleware/assertions.py,sha256=C1K-TnNZfBEwWouioHCt6c48i1ux9QKfQaX6AzghhgE,27408
38
42
  dao_ai/middleware/base.py,sha256=uG2tpdnjL5xY5jCKvb_m3UTBtl4ZC6fJQUkDsQvV8S4,1279
39
43
  dao_ai/middleware/context_editing.py,sha256=5rNKqH1phFFQTVW-4nzlVH5cbqomD-HFEIy2Z841D4I,7687
@@ -46,6 +50,7 @@ dao_ai/middleware/model_retry.py,sha256=SlWjAcaEmvj6KBOkjUicChYjhlg7bAJM7-e6KLpH
46
50
  dao_ai/middleware/pii.py,sha256=zetfoz1WlJ-V0vjJp37v8NGimXB27EkZfetUHpGCXno,5137
47
51
  dao_ai/middleware/summarization.py,sha256=gp2s9uc4DEJat-mWjWEzMaR-zAAeUOXYvu5EEYtqae4,7143
48
52
  dao_ai/middleware/tool_call_limit.py,sha256=WQ3NmA3pLo-pNPBmwM7KwkYpT1segEnWqkhgW1xNkCE,6321
53
+ dao_ai/middleware/tool_call_observability.py,sha256=mU52HwF82Yb05TIAYhu5cNkn3jcPGn6cKhsxT3rkS8M,7673
49
54
  dao_ai/middleware/tool_retry.py,sha256=QfJ7yTHneME8VtnA88QcmnjXIegSFeJztyngy49wTgM,5568
50
55
  dao_ai/middleware/tool_selector.py,sha256=POj72YdzZEiNGfW4AQXPBeVVS1RUBsiG7PBuSENEhe0,4516
51
56
  dao_ai/orchestration/__init__.py,sha256=i85CLfRR335NcCFhaXABcMkn6WZfXnJ8cHH4YZsZN0s,1622
@@ -60,11 +65,11 @@ dao_ai/prompts/verifier.yaml,sha256=9snFQuxfYuEr46F4gv13VqL9q2PJCtWlbBhN3_IO2zI,
60
65
  dao_ai/providers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
61
66
  dao_ai/providers/base.py,sha256=cJGo3UjUTPgS91dv38ePOHwQQtYhIa84ebb167CBXjk,2111
62
67
  dao_ai/providers/databricks.py,sha256=cg-TY9IS3-OqIo1gkLe1YwOR1H-s8YTBGrqDrkOWR6c,73569
63
- dao_ai/tools/__init__.py,sha256=NfRpAKds_taHbx6gzLPWgtPXve-YpwzkoOAUflwxceM,1734
68
+ dao_ai/tools/__init__.py,sha256=4dX_N6G_WrkV2BhS7hN8tR9zrMNlorLTKQYM388KTR4,1758
64
69
  dao_ai/tools/agent.py,sha256=plIWALywRjaDSnot13nYehBsrHRpBUpsVZakoGeajOE,1858
65
70
  dao_ai/tools/core.py,sha256=bRIN3BZhRQX8-Kpu3HPomliodyskCqjxynQmYbk6Vjs,3783
66
71
  dao_ai/tools/email.py,sha256=A3TsCoQgJR7UUWR0g45OPRGDpVoYwctFs1MOZMTt_d4,7389
67
- dao_ai/tools/genie.py,sha256=Zq3k7sfz0Jy5cm-RM5uNaWR5Q2sq-syaTwcgmxCWXUs,12114
72
+ dao_ai/tools/genie.py,sha256=MWW2nCutl5-Wxwt4m7AxrS0ufqZimTKXa-lbojhwRYQ,12219
68
73
  dao_ai/tools/instructed_retriever.py,sha256=iEu7oH1Z9_-Id0SMaq-dAgCNigeRrJDDTSZTcOJLl6k,12990
69
74
  dao_ai/tools/instruction_reranker.py,sha256=_1kGwrXkJk4QR2p8n3lAaYkUVoidxCxV9wNCtoS0qco,6730
70
75
  dao_ai/tools/mcp.py,sha256=4uvag52OJPInUEnxFLwpE0JRugTrgHeWbkP5lzIx4lg,22620
@@ -78,8 +83,8 @@ dao_ai/tools/time.py,sha256=tufJniwivq29y0LIffbgeBTIDE6VgrLpmVf8Qr90qjw,9224
78
83
  dao_ai/tools/unity_catalog.py,sha256=oBlW6pH-Ne08g60QW9wVi_tyeVYDiecuNoxQbIIFmN8,16515
79
84
  dao_ai/tools/vector_search.py,sha256=34uhd58FKHzvcdgHHoACRdZAUJWTaUuPYiwIqBwvGqk,29061
80
85
  dao_ai/tools/verifier.py,sha256=ociBVsGkQNyhWS6F6G8x17V7zAQfSuTe4Xcd6Y-7lPE,4975
81
- dao_ai-0.1.19.dist-info/METADATA,sha256=KnF0S-dwcgBA2YPOWBoVQFhBk9mVW57XAQ0bV8rL3RY,16954
82
- dao_ai-0.1.19.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
83
- dao_ai-0.1.19.dist-info/entry_points.txt,sha256=Xa-UFyc6gWGwMqMJOt06ZOog2vAfygV_DSwg1AiP46g,43
84
- dao_ai-0.1.19.dist-info/licenses/LICENSE,sha256=YZt3W32LtPYruuvHE9lGk2bw6ZPMMJD8yLrjgHybyz4,1069
85
- dao_ai-0.1.19.dist-info/RECORD,,
86
+ dao_ai-0.1.21.dist-info/METADATA,sha256=v1iHbA1jzdcEUmkmOa0kyh16V7-BMhIx5HkAMWUnRlE,16954
87
+ dao_ai-0.1.21.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
88
+ dao_ai-0.1.21.dist-info/entry_points.txt,sha256=Xa-UFyc6gWGwMqMJOt06ZOog2vAfygV_DSwg1AiP46g,43
89
+ dao_ai-0.1.21.dist-info/licenses/LICENSE,sha256=YZt3W32LtPYruuvHE9lGk2bw6ZPMMJD8yLrjgHybyz4,1069
90
+ dao_ai-0.1.21.dist-info/RECORD,,