dao-ai 0.0.18__py3-none-any.whl → 0.0.19__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
dao_ai/config.py CHANGED
@@ -39,6 +39,7 @@ from mlflow.models import ModelConfig
39
39
  from mlflow.models.resources import (
40
40
  DatabricksFunction,
41
41
  DatabricksGenieSpace,
42
+ DatabricksLakebase,
42
43
  DatabricksResource,
43
44
  DatabricksServingEndpoint,
44
45
  DatabricksSQLWarehouse,
@@ -274,10 +275,8 @@ class TableModel(BaseModel, HasFullName, IsDatabricksResource):
274
275
  "_assessment_logs",
275
276
  "_request_logs",
276
277
  ]
277
-
278
- excluded_prefixes: Sequence[str] = [
279
- "trace_logs_"
280
- ]
278
+
279
+ excluded_prefixes: Sequence[str] = ["trace_logs_"]
281
280
 
282
281
  if self.name:
283
282
  resources.append(
@@ -299,8 +298,12 @@ class TableModel(BaseModel, HasFullName, IsDatabricksResource):
299
298
  on_behalf_of_user=self.on_behalf_of_user,
300
299
  )
301
300
  for table in tables
302
- if not any(table.name.endswith(suffix) for suffix in excluded_suffixes)
303
- and not any(table.name.startswith(prefix) for prefix in excluded_prefixes)
301
+ if not any(
302
+ table.name.endswith(suffix) for suffix in excluded_suffixes
303
+ )
304
+ and not any(
305
+ table.name.startswith(prefix) for prefix in excluded_prefixes
306
+ )
304
307
  ]
305
308
  )
306
309
 
@@ -413,7 +416,7 @@ class GenieRoomModel(BaseModel, IsDatabricksResource):
413
416
  model_config = ConfigDict(use_enum_values=True, extra="forbid")
414
417
  name: str
415
418
  description: Optional[str] = None
416
- space_id: str
419
+ space_id: AnyVariable
417
420
 
418
421
  @property
419
422
  def api_scopes(self) -> Sequence[str]:
@@ -428,6 +431,11 @@ class GenieRoomModel(BaseModel, IsDatabricksResource):
428
431
  )
429
432
  ]
430
433
 
434
+ @model_validator(mode="after")
435
+ def update_space_id(self):
436
+ self.space_id = value_of(self.space_id)
437
+ return self
438
+
431
439
 
432
440
  class VolumeModel(BaseModel, HasFullName):
433
441
  model_config = ConfigDict(use_enum_values=True, extra="forbid")
@@ -648,6 +656,7 @@ class ConnectionModel(BaseModel, HasFullName, IsDatabricksResource):
648
656
  def api_scopes(self) -> Sequence[str]:
649
657
  return [
650
658
  "catalog.connections",
659
+ "serving.serving-endpoints",
651
660
  ]
652
661
 
653
662
  def as_resources(self) -> Sequence[DatabricksResource]:
@@ -662,7 +671,7 @@ class WarehouseModel(BaseModel, IsDatabricksResource):
662
671
  model_config = ConfigDict()
663
672
  name: str
664
673
  description: Optional[str] = None
665
- warehouse_id: str
674
+ warehouse_id: AnyVariable
666
675
 
667
676
  @property
668
677
  def api_scopes(self) -> Sequence[str]:
@@ -678,8 +687,13 @@ class WarehouseModel(BaseModel, IsDatabricksResource):
678
687
  )
679
688
  ]
680
689
 
690
+ @model_validator(mode="after")
691
+ def update_warehouse_id(self):
692
+ self.warehouse_id = value_of(self.warehouse_id)
693
+ return self
694
+
681
695
 
682
- class DatabaseModel(BaseModel):
696
+ class DatabaseModel(BaseModel, IsDatabricksResource):
683
697
  model_config = ConfigDict(frozen=True)
684
698
  name: str
685
699
  description: Optional[str] = None
@@ -695,6 +709,18 @@ class DatabaseModel(BaseModel):
695
709
  client_secret: Optional[AnyVariable] = None
696
710
  workspace_host: Optional[AnyVariable] = None
697
711
 
712
+ @property
713
+ def api_scopes(self) -> Sequence[str]:
714
+ return []
715
+
716
+ def as_resources(self) -> Sequence[DatabricksResource]:
717
+ return [
718
+ DatabricksLakebase(
719
+ database_instance_name=self.name,
720
+ on_behalf_of_user=self.on_behalf_of_user,
721
+ )
722
+ ]
723
+
698
724
  @model_validator(mode="after")
699
725
  def validate_auth_methods(self):
700
726
  oauth_fields: Sequence[Any] = [
@@ -1190,13 +1216,13 @@ class AppModel(BaseModel):
1190
1216
  endpoint_name: Optional[str] = None
1191
1217
  tags: Optional[dict[str, Any]] = Field(default_factory=dict)
1192
1218
  scale_to_zero: Optional[bool] = True
1193
- environment_vars: Optional[dict[str, Any]] = Field(default_factory=dict)
1219
+ environment_vars: Optional[dict[str, AnyVariable]] = Field(default_factory=dict)
1194
1220
  budget_policy_id: Optional[str] = None
1195
1221
  workload_size: Optional[WorkloadSize] = "Small"
1196
1222
  permissions: Optional[list[AppPermissionModel]] = Field(default_factory=list)
1197
1223
  agents: list[AgentModel] = Field(default_factory=list)
1198
1224
 
1199
- orchestration: OrchestrationModel
1225
+ orchestration: Optional[OrchestrationModel] = None
1200
1226
  alias: Optional[str] = None
1201
1227
  initialization_hooks: Optional[FunctionHook | list[FunctionHook]] = Field(
1202
1228
  default_factory=list
@@ -1215,7 +1241,38 @@ class AppModel(BaseModel):
1215
1241
  @model_validator(mode="after")
1216
1242
  def validate_agents_not_empty(self):
1217
1243
  if not self.agents:
1218
- raise ValueError("agents must contain at least one item")
1244
+ raise ValueError("At least one agent must be specified")
1245
+ return self
1246
+
1247
+ @model_validator(mode="after")
1248
+ def update_environment_vars(self):
1249
+ for key, value in self.environment_vars.items():
1250
+ if isinstance(value, SecretVariableModel):
1251
+ updated_value = str(value)
1252
+ else:
1253
+ updated_value = value_of(value)
1254
+
1255
+ self.environment_vars[key] = updated_value
1256
+ return self
1257
+
1258
+ @model_validator(mode="after")
1259
+ def set_default_orchestration(self):
1260
+ if self.orchestration is None:
1261
+ if len(self.agents) > 1:
1262
+ default_agent: AgentModel = self.agents[0]
1263
+ self.orchestration = OrchestrationModel(
1264
+ swarm=SupervisorModel(model=default_agent.model)
1265
+ )
1266
+ elif len(self.agents) == 1:
1267
+ default_agent: AgentModel = self.agents[0]
1268
+ self.orchestration = OrchestrationModel(
1269
+ supervisor=SwarmModel(
1270
+ model=default_agent.model, default_agent=default_agent
1271
+ )
1272
+ )
1273
+ else:
1274
+ raise ValueError("At least one agent must be specified")
1275
+
1219
1276
  return self
1220
1277
 
1221
1278
  @model_validator(mode="after")
dao_ai/graph.py CHANGED
@@ -136,8 +136,8 @@ def _create_supervisor_graph(config: AppConfig) -> CompiledStateGraph:
136
136
 
137
137
  workflow: StateGraph = StateGraph(
138
138
  SharedState,
139
- input=IncomingState,
140
- output=OutgoingState,
139
+ input_schema=IncomingState,
140
+ output_schema=OutgoingState,
141
141
  context_schema=Context,
142
142
  )
143
143
 
@@ -200,8 +200,8 @@ def _create_swarm_graph(config: AppConfig) -> CompiledStateGraph:
200
200
 
201
201
  workflow: StateGraph = StateGraph(
202
202
  SharedState,
203
- input=IncomingState,
204
- output=OutgoingState,
203
+ input_schema=IncomingState,
204
+ output_schema=OutgoingState,
205
205
  context_schema=Context,
206
206
  )
207
207
 
dao_ai/memory/core.py CHANGED
@@ -99,13 +99,13 @@ class CheckpointManager:
99
99
  checkpointer_manager
100
100
  )
101
101
  case StorageType.POSTGRES:
102
- from dao_ai.memory.postgres import PostgresCheckpointerManager
102
+ from dao_ai.memory.postgres import AsyncPostgresCheckpointerManager
103
103
 
104
104
  checkpointer_manager = cls.checkpoint_managers.get(
105
105
  checkpointer_model.database.name
106
106
  )
107
107
  if checkpointer_manager is None:
108
- checkpointer_manager = PostgresCheckpointerManager(
108
+ checkpointer_manager = AsyncPostgresCheckpointerManager(
109
109
  checkpointer_model
110
110
  )
111
111
  cls.checkpoint_managers[checkpointer_model.database.name] = (
dao_ai/memory/postgres.py CHANGED
@@ -20,6 +20,137 @@ from dao_ai.memory.base import (
20
20
  )
21
21
 
22
22
 
23
+ class PatchedAsyncPostgresStore(AsyncPostgresStore):
24
+ """
25
+ Patched version of AsyncPostgresStore that properly handles event loop initialization
26
+ and task lifecycle management.
27
+
28
+ The issues occur because:
29
+ 1. AsyncBatchedBaseStore.__init__ calls asyncio.get_running_loop() and fails if no event loop is running
30
+ 2. The background _task can complete/fail, causing assertions in asearch/other methods to fail
31
+ 3. Destructor tries to access _task even when it doesn't exist
32
+
33
+ This patch ensures proper initialization and handles task lifecycle robustly.
34
+ """
35
+
36
+ def __init__(self, *args, **kwargs):
37
+ # Ensure we have a running event loop before calling super().__init__()
38
+ loop = None
39
+ try:
40
+ loop = asyncio.get_running_loop()
41
+ except RuntimeError:
42
+ # No running loop - create one temporarily for initialization
43
+ loop = asyncio.new_event_loop()
44
+ asyncio.set_event_loop(loop)
45
+
46
+ try:
47
+ super().__init__(*args, **kwargs)
48
+ except Exception as e:
49
+ # If parent initialization fails, ensure _task is at least defined
50
+ if not hasattr(self, "_task"):
51
+ self._task = None
52
+ logger.warning(f"AsyncPostgresStore initialization failed: {e}")
53
+ raise
54
+
55
+ def _ensure_task_running(self):
56
+ """
57
+ Ensure the background task is running. Recreate it if necessary.
58
+ """
59
+ if not hasattr(self, "_task") or self._task is None:
60
+ logger.error("AsyncPostgresStore task not initialized")
61
+ raise RuntimeError("Store task not properly initialized")
62
+
63
+ if self._task.done():
64
+ logger.warning(
65
+ "AsyncPostgresStore background task completed, attempting to restart"
66
+ )
67
+ # Try to get the task exception for debugging
68
+ try:
69
+ exception = self._task.exception()
70
+ if exception:
71
+ logger.error(f"Background task failed with: {exception}")
72
+ else:
73
+ logger.info("Background task completed normally")
74
+ except Exception as e:
75
+ logger.warning(f"Could not determine task completion reason: {e}")
76
+
77
+ # Try to restart the task
78
+ try:
79
+ import weakref
80
+
81
+ from langgraph.store.base.batch import _run
82
+
83
+ self._task = self._loop.create_task(
84
+ _run(self._aqueue, weakref.ref(self))
85
+ )
86
+ logger.info("Successfully restarted AsyncPostgresStore background task")
87
+ except Exception as e:
88
+ logger.error(f"Failed to restart background task: {e}")
89
+ raise RuntimeError(
90
+ f"Store background task failed and could not be restarted: {e}"
91
+ )
92
+
93
+ async def asearch(
94
+ self,
95
+ namespace_prefix,
96
+ /,
97
+ *,
98
+ query=None,
99
+ filter=None,
100
+ limit=10,
101
+ offset=0,
102
+ refresh_ttl=None,
103
+ ):
104
+ """
105
+ Override asearch to handle task lifecycle issues gracefully.
106
+ """
107
+ self._ensure_task_running()
108
+
109
+ # Call parent implementation if task is healthy
110
+ return await super().asearch(
111
+ namespace_prefix,
112
+ query=query,
113
+ filter=filter,
114
+ limit=limit,
115
+ offset=offset,
116
+ refresh_ttl=refresh_ttl,
117
+ )
118
+
119
+ async def aget(self, namespace, key, /, *, refresh_ttl=None):
120
+ """Override aget with task lifecycle management."""
121
+ self._ensure_task_running()
122
+ return await super().aget(namespace, key, refresh_ttl=refresh_ttl)
123
+
124
+ async def aput(self, namespace, key, value, /, *, refresh_ttl=None):
125
+ """Override aput with task lifecycle management."""
126
+ self._ensure_task_running()
127
+ return await super().aput(namespace, key, value, refresh_ttl=refresh_ttl)
128
+
129
+ async def adelete(self, namespace, key):
130
+ """Override adelete with task lifecycle management."""
131
+ self._ensure_task_running()
132
+ return await super().adelete(namespace, key)
133
+
134
+ async def alist_namespaces(self, *, prefix=None):
135
+ """Override alist_namespaces with task lifecycle management."""
136
+ self._ensure_task_running()
137
+ return await super().alist_namespaces(prefix=prefix)
138
+
139
+ def __del__(self):
140
+ """
141
+ Override destructor to handle missing _task attribute gracefully.
142
+ """
143
+ try:
144
+ # Only try to cancel if _task exists and is not None
145
+ if hasattr(self, "_task") and self._task is not None:
146
+ if not self._task.done():
147
+ self._task.cancel()
148
+ except Exception as e:
149
+ # Log but don't raise - destructors should not raise exceptions
150
+ logger.debug(f"AsyncPostgresStore destructor cleanup: {e}")
151
+ pass
152
+
153
+
23
154
  class AsyncPostgresPoolManager:
24
155
  _pools: dict[str, AsyncConnectionPool] = {}
25
156
  _lock: asyncio.Lock = asyncio.Lock()
@@ -119,8 +250,9 @@ class AsyncPostgresStoreManager(StoreManagerBase):
119
250
  self.store_model.database
120
251
  )
121
252
 
122
- # Create store with the shared pool
123
- self._store = AsyncPostgresStore(conn=self.pool)
253
+ # Create store with the shared pool (using patched version)
254
+ self._store = PatchedAsyncPostgresStore(conn=self.pool)
255
+
124
256
  await self._store.setup()
125
257
 
126
258
  self._setup_complete = True
dao_ai/models.py CHANGED
@@ -227,14 +227,28 @@ class LanggraphResponsesAgent(ResponsesAgent):
227
227
  import asyncio
228
228
 
229
229
  async def _async_invoke():
230
- return await self.graph.ainvoke(
231
- {"messages": messages}, context=context, config=custom_inputs
232
- )
230
+ try:
231
+ return await self.graph.ainvoke(
232
+ {"messages": messages}, context=context, config=custom_inputs
233
+ )
234
+ except Exception as e:
235
+ logger.error(f"Error in graph.ainvoke: {e}")
236
+ raise
233
237
 
234
- loop = asyncio.get_event_loop()
235
- response: dict[str, Sequence[BaseMessage]] = loop.run_until_complete(
236
- _async_invoke()
237
- )
238
+ try:
239
+ loop = asyncio.get_event_loop()
240
+ except RuntimeError:
241
+ # Handle case where no event loop exists (common in some deployment scenarios)
242
+ loop = asyncio.new_event_loop()
243
+ asyncio.set_event_loop(loop)
244
+
245
+ try:
246
+ response: dict[str, Sequence[BaseMessage]] = loop.run_until_complete(
247
+ _async_invoke()
248
+ )
249
+ except Exception as e:
250
+ logger.error(f"Error in async execution: {e}")
251
+ raise
238
252
 
239
253
  # Convert response to ResponsesAgent format
240
254
  last_message: BaseMessage = response["messages"][-1]
@@ -243,8 +257,9 @@ class LanggraphResponsesAgent(ResponsesAgent):
243
257
  text=last_message.content, id=f"msg_{uuid.uuid4().hex[:8]}"
244
258
  )
245
259
 
260
+ custom_outputs = custom_inputs
246
261
  return ResponsesAgentResponse(
247
- output=[output_item], custom_outputs=request.custom_inputs
262
+ output=[output_item], custom_outputs=custom_outputs
248
263
  )
249
264
 
250
265
  def predict_stream(
@@ -271,46 +286,59 @@ class LanggraphResponsesAgent(ResponsesAgent):
271
286
  item_id = f"msg_{uuid.uuid4().hex[:8]}"
272
287
  accumulated_content = ""
273
288
 
274
- async for nodes, stream_mode, messages_batch in self.graph.astream(
275
- {"messages": messages},
276
- context=context,
277
- config=custom_inputs,
278
- stream_mode=["messages", "custom"],
279
- subgraphs=True,
280
- ):
281
- nodes: tuple[str, ...]
282
- stream_mode: str
283
- messages_batch: Sequence[BaseMessage]
284
-
285
- for message in messages_batch:
286
- if (
287
- isinstance(
288
- message,
289
- (
290
- AIMessageChunk,
291
- AIMessage,
292
- ),
293
- )
294
- and message.content
295
- and "summarization" not in nodes
296
- ):
297
- content = message.content
298
- accumulated_content += content
299
-
300
- # Yield streaming delta
301
- yield ResponsesAgentStreamEvent(
302
- **self.create_text_delta(delta=content, item_id=item_id)
303
- )
304
-
305
- # Yield final output item
306
- yield ResponsesAgentStreamEvent(
307
- type="response.output_item.done",
308
- item=self.create_text_output_item(text=accumulated_content, id=item_id),
309
- custom_outputs=request.custom_inputs,
310
- )
289
+ try:
290
+ async for nodes, stream_mode, messages_batch in self.graph.astream(
291
+ {"messages": messages},
292
+ context=context,
293
+ config=custom_inputs,
294
+ stream_mode=["messages", "custom"],
295
+ subgraphs=True,
296
+ ):
297
+ nodes: tuple[str, ...]
298
+ stream_mode: str
299
+ messages_batch: Sequence[BaseMessage]
300
+
301
+ for message in messages_batch:
302
+ if (
303
+ isinstance(
304
+ message,
305
+ (
306
+ AIMessageChunk,
307
+ AIMessage,
308
+ ),
309
+ )
310
+ and message.content
311
+ and "summarization" not in nodes
312
+ ):
313
+ content = message.content
314
+ accumulated_content += content
315
+
316
+ # Yield streaming delta
317
+ yield ResponsesAgentStreamEvent(
318
+ **self.create_text_delta(delta=content, item_id=item_id)
319
+ )
320
+
321
+ custom_outputs = custom_inputs
322
+ # Yield final output item
323
+ yield ResponsesAgentStreamEvent(
324
+ type="response.output_item.done",
325
+ item=self.create_text_output_item(
326
+ text=accumulated_content, id=item_id
327
+ ),
328
+ custom_outputs=custom_outputs,
329
+ )
330
+ except Exception as e:
331
+ logger.error(f"Error in graph.astream: {e}")
332
+ raise
311
333
 
312
334
  # Convert async generator to sync generator
313
- loop = asyncio.get_event_loop()
335
+ try:
336
+ loop = asyncio.get_event_loop()
337
+ except RuntimeError:
338
+ # Handle case where no event loop exists (common in some deployment scenarios)
339
+ loop = asyncio.new_event_loop()
340
+ asyncio.set_event_loop(loop)
341
+
314
342
  async_gen = _async_stream()
315
343
 
316
344
  try:
@@ -320,8 +348,14 @@ class LanggraphResponsesAgent(ResponsesAgent):
320
348
  yield item
321
349
  except StopAsyncIteration:
322
350
  break
351
+ except Exception as e:
352
+ logger.error(f"Error in streaming: {e}")
353
+ raise
323
354
  finally:
324
- loop.run_until_complete(async_gen.aclose())
355
+ try:
356
+ loop.run_until_complete(async_gen.aclose())
357
+ except Exception as e:
358
+ logger.warning(f"Error closing async generator: {e}")
325
359
 
326
360
  def _extract_text_from_content(
327
361
  self,
@@ -522,7 +556,14 @@ def _process_langchain_messages_stream(
522
556
  yield message
523
557
 
524
558
  # Convert async generator to sync generator
525
- loop = asyncio.get_event_loop()
559
+
560
+ try:
561
+ loop = asyncio.get_event_loop()
562
+ except RuntimeError:
563
+ # Handle case where no event loop exists (common in some deployment scenarios)
564
+ loop = asyncio.new_event_loop()
565
+ asyncio.set_event_loop(loop)
566
+
526
567
  async_gen = _async_stream()
527
568
 
528
569
  try:
@@ -42,6 +42,7 @@ import dao_ai
42
42
  from dao_ai.config import (
43
43
  AppConfig,
44
44
  ConnectionModel,
45
+ DatabaseModel,
45
46
  DatasetModel,
46
47
  FunctionModel,
47
48
  GenieRoomModel,
@@ -224,6 +225,7 @@ class DatabricksProvider(ServiceProvider):
224
225
  connections: Sequence[ConnectionModel] = list(
225
226
  config.resources.connections.values()
226
227
  )
228
+ databases: Sequence[DatabaseModel] = list(config.resources.databases.values())
227
229
 
228
230
  resources: Sequence[IsDatabricksResource] = (
229
231
  llms
@@ -233,6 +235,7 @@ class DatabricksProvider(ServiceProvider):
233
235
  + functions
234
236
  + tables
235
237
  + connections
238
+ + databases
236
239
  )
237
240
 
238
241
  # Flatten all resources from all models into a single list
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dao-ai
3
- Version: 0.0.18
3
+ Version: 0.0.19
4
4
  Summary: DAO AI: A modular, multi-agent orchestration framework for complex AI workflows. Supports agent handoff, tool integration, and dynamic configuration via YAML.
5
5
  Project-URL: Homepage, https://github.com/natefleming/dao-ai
6
6
  Project-URL: Documentation, https://natefleming.github.io/dao-ai
@@ -24,22 +24,22 @@ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
24
24
  Classifier: Topic :: Software Development :: Libraries :: Python Modules
25
25
  Classifier: Topic :: System :: Distributed Computing
26
26
  Requires-Python: >=3.12
27
- Requires-Dist: databricks-agents>=1.2.0
28
- Requires-Dist: databricks-langchain>=0.4.2
29
- Requires-Dist: databricks-sdk[openai]>=0.55.0
27
+ Requires-Dist: databricks-agents>=1.6.0
28
+ Requires-Dist: databricks-langchain>=0.8.0
29
+ Requires-Dist: databricks-sdk[openai]>=0.66.0
30
30
  Requires-Dist: duckduckgo-search>=8.0.2
31
31
  Requires-Dist: grandalf>=0.8
32
- Requires-Dist: langchain-mcp-adapters>=0.1.9
32
+ Requires-Dist: langchain-mcp-adapters>=0.1.10
33
33
  Requires-Dist: langchain-tavily>=0.2.11
34
34
  Requires-Dist: langchain>=0.3.27
35
35
  Requires-Dist: langgraph-checkpoint-postgres>=2.0.23
36
36
  Requires-Dist: langgraph-supervisor>=0.0.29
37
37
  Requires-Dist: langgraph-swarm>=0.0.14
38
- Requires-Dist: langgraph>=0.6.5
38
+ Requires-Dist: langgraph>=0.6.7
39
39
  Requires-Dist: langmem>=0.0.29
40
40
  Requires-Dist: loguru>=0.7.3
41
- Requires-Dist: mcp>=1.9.1
42
- Requires-Dist: mlflow>=3.3.2
41
+ Requires-Dist: mcp>=1.14.1
42
+ Requires-Dist: mlflow>=3.4.0
43
43
  Requires-Dist: nest-asyncio>=1.6.0
44
44
  Requires-Dist: openevals>=0.0.19
45
45
  Requires-Dist: openpyxl>=3.1.5
@@ -3,11 +3,11 @@ dao_ai/agent_as_code.py,sha256=kPSeDz2-1jRaed1TMs4LA3VECoyqe9_Ed2beRLB9gXQ,472
3
3
  dao_ai/catalog.py,sha256=sPZpHTD3lPx4EZUtIWeQV7VQM89WJ6YH__wluk1v2lE,4947
4
4
  dao_ai/chat_models.py,sha256=uhwwOTeLyHWqoTTgHrs4n5iSyTwe4EQcLKnh3jRxPWI,8626
5
5
  dao_ai/cli.py,sha256=Aez2TQW3Q8Ho1IaIkRggt0NevDxAAVPjXkePC5GPJF0,20429
6
- dao_ai/config.py,sha256=JlYC8N_7UL8VVkdSepiCUnR9NA5OsCVAigLjse7dMFM,49922
7
- dao_ai/graph.py,sha256=kXaGLGFVekDWqm-AHzti6LmrXnyi99VQ-AdCGuNb_xM,7831
6
+ dao_ai/config.py,sha256=N_Vc-rJHvBzbia4TyAExGhCvZKXlk49bskrI_sbxwjg,51869
7
+ dao_ai/graph.py,sha256=gmD9mxODfXuvn9xWeBfewm1FiuVAWMLEdnZz7DNmSH0,7859
8
8
  dao_ai/guardrails.py,sha256=-Qh0f_2Db9t4Nbrrx9FM7tnpqShjMoyxepZ0HByItfU,4027
9
9
  dao_ai/messages.py,sha256=xl_3-WcFqZKCFCiov8sZOPljTdM3gX3fCHhxq-xFg2U,7005
10
- dao_ai/models.py,sha256=h_xFMK5FHQwPApEAYhvrt69y7ZUljmqThHTjp-yde_o,25368
10
+ dao_ai/models.py,sha256=Xb23U-lhDG8KyNRIijcJ4InluadlaGNy4rrYx7Cjgfg,26939
11
11
  dao_ai/nodes.py,sha256=SSuFNTXOdFaKg_aX-yUkQO7fM9wvNGu14lPXKDapU1U,8461
12
12
  dao_ai/prompts.py,sha256=vpmIbWs_szXUgNNDs5Gh2LcxKZti5pHDKSfoClUcgX0,1289
13
13
  dao_ai/state.py,sha256=GwbMbd1TWZx1T5iQrEOX6_rpxOitlmyeJ8dMr2o_pag,1031
@@ -18,11 +18,11 @@ dao_ai/hooks/__init__.py,sha256=LlHGIuiZt6vGW8K5AQo1XJEkBP5vDVtMhq0IdjcLrD4,417
18
18
  dao_ai/hooks/core.py,sha256=ZShHctUSoauhBgdf1cecy9-D7J6-sGn-pKjuRMumW5U,6663
19
19
  dao_ai/memory/__init__.py,sha256=1kHx_p9abKYFQ6EYD05nuc1GS5HXVEpufmjBGw_7Uho,260
20
20
  dao_ai/memory/base.py,sha256=99nfr2UZJ4jmfTL_KrqUlRSCoRxzkZyWyx5WqeUoMdQ,338
21
- dao_ai/memory/core.py,sha256=K45iCEFbqJCVxMi4m3vmBJi4c6TQ-UtKGzyugDTkPP0,4141
22
- dao_ai/memory/postgres.py,sha256=YILzA7xtqawPAOLFaGG_i17zW7cQxXTzTD8yd-ipe8k,12480
21
+ dao_ai/memory/core.py,sha256=g7chjBgVgx3iKjR2hghl0QL1j3802uIM_e7mgszur9M,4151
22
+ dao_ai/memory/postgres.py,sha256=ncvEKFYX-ZjUDYVmuWBMcZnykcp2eK4TP-ojzqkwDsk,17433
23
23
  dao_ai/providers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
24
24
  dao_ai/providers/base.py,sha256=-fjKypCOk28h6vioPfMj9YZSw_3Kcbi2nMuAyY7vX9k,1383
25
- dao_ai/providers/databricks.py,sha256=KLYrLccOA3Uws9nWJcJUZTbMz-MdR_onhlQeztbplCM,28073
25
+ dao_ai/providers/databricks.py,sha256=fZ8mGotfA3W3t5yUej2xGmGHSybjBFYr895mOctT418,28203
26
26
  dao_ai/tools/__init__.py,sha256=ye6MHaJY7tUnJ8336YJiLxuZr55zDPNdOw6gm7j5jlc,1103
27
27
  dao_ai/tools/agent.py,sha256=WbQnyziiT12TLMrA7xK0VuOU029tdmUBXbUl-R1VZ0Q,1886
28
28
  dao_ai/tools/core.py,sha256=Kei33S8vrmvPOAyrFNekaWmV2jqZ-IPS1QDSvU7RZF0,1984
@@ -33,8 +33,8 @@ dao_ai/tools/python.py,sha256=XcQiTMshZyLUTVR5peB3vqsoUoAAy8gol9_pcrhddfI,1831
33
33
  dao_ai/tools/time.py,sha256=Y-23qdnNHzwjvnfkWvYsE7PoWS1hfeKy44tA7sCnNac,8759
34
34
  dao_ai/tools/unity_catalog.py,sha256=PXfLj2EgyQgaXq4Qq3t25AmTC4KyVCF_-sCtg6enens,1404
35
35
  dao_ai/tools/vector_search.py,sha256=EDYQs51zIPaAP0ma1D81wJT77GQ-v-cjb2XrFVWfWdg,2621
36
- dao_ai-0.0.18.dist-info/METADATA,sha256=9lTAXjEqQHxl6dmRMyiqUnYT1Nh_wJpSeJXRG8bGZGg,41378
37
- dao_ai-0.0.18.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
38
- dao_ai-0.0.18.dist-info/entry_points.txt,sha256=Xa-UFyc6gWGwMqMJOt06ZOog2vAfygV_DSwg1AiP46g,43
39
- dao_ai-0.0.18.dist-info/licenses/LICENSE,sha256=YZt3W32LtPYruuvHE9lGk2bw6ZPMMJD8yLrjgHybyz4,1069
40
- dao_ai-0.0.18.dist-info/RECORD,,
36
+ dao_ai-0.0.19.dist-info/METADATA,sha256=hus4RZHOCTgDR6Rs8zS9l0OusplrFzryWCLsXZpTxgw,41380
37
+ dao_ai-0.0.19.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
38
+ dao_ai-0.0.19.dist-info/entry_points.txt,sha256=Xa-UFyc6gWGwMqMJOt06ZOog2vAfygV_DSwg1AiP46g,43
39
+ dao_ai-0.0.19.dist-info/licenses/LICENSE,sha256=YZt3W32LtPYruuvHE9lGk2bw6ZPMMJD8yLrjgHybyz4,1069
40
+ dao_ai-0.0.19.dist-info/RECORD,,