dao-ai 0.1.16__py3-none-any.whl → 0.1.18__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,159 @@
1
+ """
2
+ Result verifier for validating search results against user constraints.
3
+
4
+ Provides structured feedback for intelligent retry when results don't match intent.
5
+ """
6
+
7
+ import json
8
+ from pathlib import Path
9
+ from typing import Any
10
+
11
+ import mlflow
12
+ import yaml
13
+ from langchain_core.documents import Document
14
+ from langchain_core.language_models import BaseChatModel
15
+ from langchain_core.runnables import Runnable
16
+ from loguru import logger
17
+ from mlflow.entities import SpanType
18
+
19
+ from dao_ai.config import VerificationResult
20
+
21
+ # Load prompt template
22
+ _PROMPT_PATH = Path(__file__).parent.parent / "prompts" / "verifier.yaml"
23
+
24
+
25
+ def _load_prompt_template() -> dict[str, Any]:
26
+ """Load the verifier prompt template from YAML."""
27
+ with open(_PROMPT_PATH) as f:
28
+ return yaml.safe_load(f)
29
+
30
+
31
+ def _format_results_summary(documents: list[Document], max_docs: int = 5) -> str:
32
+ """Format top documents for verification prompt."""
33
+ if not documents:
34
+ return "No results retrieved."
35
+
36
+ summaries = []
37
+ for i, doc in enumerate(documents[:max_docs]):
38
+ metadata_str = ", ".join(
39
+ f"{k}: {v}"
40
+ for k, v in doc.metadata.items()
41
+ if not k.startswith("_") and k not in ("rrf_score", "reranker_score")
42
+ )
43
+ content_preview = (
44
+ doc.page_content[:200] + "..."
45
+ if len(doc.page_content) > 200
46
+ else doc.page_content
47
+ )
48
+ summaries.append(f"{i + 1}. {content_preview}\n Metadata: {metadata_str}")
49
+
50
+ return "\n\n".join(summaries)
51
+
52
+
53
+ def _format_constraints(constraints: list[str] | None) -> str:
54
+ """Format constraints list for prompt."""
55
+ if not constraints:
56
+ return "No explicit constraints specified."
57
+ return "\n".join(f"- {c}" for c in constraints)
58
+
59
+
60
+ @mlflow.trace(name="verify_results", span_type=SpanType.LLM)
61
+ def verify_results(
62
+ llm: BaseChatModel,
63
+ query: str,
64
+ documents: list[Document],
65
+ schema_description: str,
66
+ constraints: list[str] | None = None,
67
+ previous_feedback: str | None = None,
68
+ ) -> VerificationResult:
69
+ """
70
+ Verify that search results satisfy user constraints.
71
+
72
+ Args:
73
+ llm: Language model for verification
74
+ query: User's original search query
75
+ documents: Retrieved documents to verify
76
+ schema_description: Column names, types, and filter syntax
77
+ constraints: Explicit constraints to verify
78
+ previous_feedback: Feedback from previous failed attempt (for retry)
79
+
80
+ Returns:
81
+ VerificationResult with pass/fail status and structured feedback
82
+ """
83
+ prompt_config = _load_prompt_template()
84
+ prompt_template = prompt_config["template"]
85
+
86
+ prompt = prompt_template.format(
87
+ query=query,
88
+ schema_description=schema_description,
89
+ constraints=_format_constraints(constraints),
90
+ num_results=len(documents),
91
+ results_summary=_format_results_summary(documents),
92
+ previous_feedback=previous_feedback or "N/A (first attempt)",
93
+ )
94
+
95
+ logger.trace("Verifying results", query=query[:100], num_docs=len(documents))
96
+
97
+ # Use LangChain's with_structured_output for automatic strategy selection
98
+ # (JSON schema vs tool calling based on model capabilities)
99
+ try:
100
+ structured_llm: Runnable[str, VerificationResult] = llm.with_structured_output(
101
+ VerificationResult
102
+ )
103
+ result: VerificationResult = structured_llm.invoke(prompt)
104
+ except Exception as e:
105
+ logger.warning(
106
+ "Verifier failed, treating as passed with low confidence", error=str(e)
107
+ )
108
+ return VerificationResult(
109
+ passed=True,
110
+ confidence=0.0,
111
+ feedback="Verification failed to produce a valid result",
112
+ )
113
+
114
+ # Log for observability
115
+ mlflow.log_text(
116
+ json.dumps(result.model_dump(), indent=2),
117
+ "verification_result.json",
118
+ )
119
+
120
+ logger.debug(
121
+ "Verification complete",
122
+ passed=result.passed,
123
+ confidence=result.confidence,
124
+ unmet_constraints=result.unmet_constraints,
125
+ )
126
+
127
+ return result
128
+
129
+
130
+ def add_verification_metadata(
131
+ documents: list[Document],
132
+ result: VerificationResult,
133
+ exhausted: bool = False,
134
+ ) -> list[Document]:
135
+ """
136
+ Add verification metadata to documents.
137
+
138
+ Args:
139
+ documents: Documents to annotate
140
+ result: Verification result
141
+ exhausted: Whether max retries were exhausted
142
+
143
+ Returns:
144
+ Documents with verification metadata added
145
+ """
146
+ status = "exhausted" if exhausted else ("passed" if result.passed else "failed")
147
+
148
+ annotated = []
149
+ for doc in documents:
150
+ metadata = {
151
+ **doc.metadata,
152
+ "_verification_status": status,
153
+ "_verification_confidence": result.confidence,
154
+ }
155
+ if result.feedback:
156
+ metadata["_verification_feedback"] = result.feedback
157
+ annotated.append(Document(page_content=doc.page_content, metadata=metadata))
158
+
159
+ return annotated
dao_ai/utils.py CHANGED
@@ -1,17 +1,22 @@
1
1
  import importlib
2
2
  import importlib.metadata
3
+ import json
3
4
  import os
4
5
  import re
5
6
  import site
6
7
  from importlib.metadata import PackageNotFoundError, version
7
8
  from pathlib import Path
8
- from typing import Any, Callable, Sequence
9
+ from typing import Any, Callable, Sequence, TypeVar
9
10
 
11
+ from langchain_core.language_models import BaseChatModel
10
12
  from langchain_core.tools import BaseTool
11
13
  from loguru import logger
14
+ from pydantic import BaseModel
12
15
 
13
16
  import dao_ai
14
17
 
18
+ T = TypeVar("T", bound=BaseModel)
19
+
15
20
 
16
21
  def is_lib_provided(lib_name: str, pip_requirements: Sequence[str]) -> bool:
17
22
  return any(
@@ -152,7 +157,7 @@ def get_installed_packages() -> dict[str, str]:
152
157
 
153
158
  packages: Sequence[str] = [
154
159
  f"databricks-agents=={version('databricks-agents')}",
155
- f"databricks-langchain=={version('databricks-langchain')}",
160
+ f"databricks-langchain[memory]=={version('databricks-langchain')}",
156
161
  f"databricks-mcp=={version('databricks-mcp')}",
157
162
  f"databricks-sdk[openai]=={version('databricks-sdk')}",
158
163
  f"ddgs=={version('ddgs')}",
@@ -322,3 +327,178 @@ def is_in_model_serving() -> bool:
322
327
  return True
323
328
 
324
329
  return False
330
+
331
+
332
+ def get_databricks_response_format(model_class: type[BaseModel]) -> dict[str, Any]:
333
+ """Create a Databricks-compatible response_format for structured output.
334
+
335
+ Databricks requires the json_schema response format to have a 'name' field.
336
+ This function creates the properly formatted response_format dictionary
337
+ from a Pydantic model.
338
+
339
+ Args:
340
+ model_class: A Pydantic model class to use as the output schema
341
+
342
+ Returns:
343
+ A dictionary suitable for use with llm.bind(response_format=...)
344
+
345
+ Example:
346
+ >>> response_format = get_databricks_response_format(MyModel)
347
+ >>> bound_llm = llm.bind(response_format=response_format)
348
+ >>> result = bound_llm.invoke(prompt)
349
+ """
350
+ schema = model_class.model_json_schema()
351
+
352
+ # Remove $defs from the schema - Databricks doesn't support complex refs
353
+ # We need to inline any referenced definitions
354
+ if "$defs" in schema:
355
+ schema = _inline_schema_defs(schema)
356
+
357
+ return {
358
+ "type": "json_schema",
359
+ "json_schema": {
360
+ "name": model_class.__name__,
361
+ "schema": schema,
362
+ "strict": True,
363
+ },
364
+ }
365
+
366
+
367
+ def _inline_schema_defs(schema: dict[str, Any]) -> dict[str, Any]:
368
+ """Inline $defs references in a JSON schema.
369
+
370
+ Databricks doesn't support $ref and complex nested definitions,
371
+ so we need to inline them.
372
+
373
+ Args:
374
+ schema: The original JSON schema with $defs
375
+
376
+ Returns:
377
+ A schema with all references inlined
378
+ """
379
+ defs = schema.pop("$defs", {})
380
+ if not defs:
381
+ return schema
382
+
383
+ def resolve_refs(obj: Any) -> Any:
384
+ if isinstance(obj, dict):
385
+ if "$ref" in obj:
386
+ # Extract the definition name from #/$defs/DefinitionName
387
+ ref_path = obj["$ref"]
388
+ if ref_path.startswith("#/$defs/"):
389
+ def_name = ref_path[len("#/$defs/") :]
390
+ if def_name in defs:
391
+ # Return a copy of the definition with refs resolved
392
+ return resolve_refs(defs[def_name].copy())
393
+ return obj
394
+ return {k: resolve_refs(v) for k, v in obj.items()}
395
+ elif isinstance(obj, list):
396
+ return [resolve_refs(item) for item in obj]
397
+ return obj
398
+
399
+ return resolve_refs(schema)
400
+
401
+
402
+ def _repair_json(content: str) -> str | None:
403
+ """Attempt to repair malformed JSON from LLM output.
404
+
405
+ Handles common issues:
406
+ - Extra text before/after JSON object
407
+ - Truncated JSON (unclosed brackets/braces)
408
+ - Trailing commas
409
+
410
+ Args:
411
+ content: The potentially malformed JSON string
412
+
413
+ Returns:
414
+ Repaired JSON string if successful, None otherwise
415
+ """
416
+ # 1. Extract JSON object if wrapped in extra text
417
+ start = content.find("{")
418
+ end = content.rfind("}")
419
+ if start == -1 or end == -1 or start >= end:
420
+ return None
421
+ content = content[start : end + 1]
422
+
423
+ # 2. Try parsing as-is first
424
+ try:
425
+ json.loads(content)
426
+ return content
427
+ except json.JSONDecodeError:
428
+ pass
429
+
430
+ # 3. Fix trailing commas before closing brackets
431
+ content = re.sub(r",\s*}", "}", content)
432
+ content = re.sub(r",\s*]", "]", content)
433
+
434
+ # 4. Try to close unclosed brackets/braces
435
+ open_braces = content.count("{") - content.count("}")
436
+ open_brackets = content.count("[") - content.count("]")
437
+
438
+ if open_braces > 0 or open_brackets > 0:
439
+ # Remove trailing comma if present
440
+ content = content.rstrip().rstrip(",")
441
+ content += "]" * open_brackets + "}" * open_braces
442
+
443
+ # 5. Final validation
444
+ try:
445
+ json.loads(content)
446
+ return content
447
+ except json.JSONDecodeError:
448
+ return None
449
+
450
+
451
+ def invoke_with_structured_output(
452
+ llm: BaseChatModel,
453
+ prompt: str,
454
+ model_class: type[T],
455
+ ) -> T | None:
456
+ """Invoke an LLM with Databricks-compatible structured output.
457
+
458
+ Uses response_format with json_schema type and proper 'name' field
459
+ as required by Databricks Foundation Model APIs.
460
+
461
+ Args:
462
+ llm: The language model to invoke
463
+ prompt: The prompt to send to the model
464
+ model_class: The Pydantic model class for the expected output
465
+
466
+ Returns:
467
+ An instance of model_class, or None if parsing fails
468
+ """
469
+ response_format = get_databricks_response_format(model_class)
470
+ bound_llm = llm.bind(response_format=response_format)
471
+
472
+ response = bound_llm.invoke(prompt)
473
+
474
+ content = response.content
475
+ if not isinstance(content, str):
476
+ return None
477
+
478
+ try:
479
+ # Try parsing the JSON directly
480
+ result_dict = json.loads(content)
481
+ return model_class.model_validate(result_dict)
482
+ except json.JSONDecodeError as e:
483
+ # Attempt JSON repair
484
+ repaired = _repair_json(content)
485
+ if repaired:
486
+ try:
487
+ result_dict = json.loads(repaired)
488
+ logger.debug("JSON repair successful", model_class=model_class.__name__)
489
+ return model_class.model_validate(result_dict)
490
+ except (json.JSONDecodeError, Exception):
491
+ pass
492
+ logger.warning(
493
+ "Failed to parse structured output",
494
+ error=str(e),
495
+ model_class=model_class.__name__,
496
+ )
497
+ return None
498
+ except Exception as e:
499
+ logger.warning(
500
+ "Failed to parse structured output",
501
+ error=str(e),
502
+ model_class=model_class.__name__,
503
+ )
504
+ return None
dao_ai/vector_search.py CHANGED
@@ -64,7 +64,12 @@ def index_exists(
64
64
  return True
65
65
  except Exception as e:
66
66
  # Check if this is a "not exists" error or something else
67
- if "RESOURCE_DOES_NOT_EXIST" not in str(e):
67
+ # Handle both "RESOURCE_DOES_NOT_EXIST" and "does not exist" error patterns
68
+ error_str = str(e).lower()
69
+ if (
70
+ "does not exist" not in error_str
71
+ and "resource_does_not_exist" not in error_str
72
+ ):
68
73
  # For unexpected errors, provide a more helpful message
69
74
  print(
70
75
  "Unexpected error describing the index. This could be a permission issue."
@@ -106,6 +111,9 @@ def find_index(
106
111
  return (True, endpoint_name)
107
112
  except Exception:
108
113
  # Index not on this endpoint, try next
114
+ # Catches both "does not exist" and "RESOURCE_DOES_NOT_EXIST" errors,
115
+ # as well as other errors (permission issues, etc.) - we continue
116
+ # searching other endpoints regardless of error type
109
117
  continue
110
118
 
111
119
  return (False, None)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dao-ai
3
- Version: 0.1.16
3
+ Version: 0.1.18
4
4
  Summary: DAO AI: A modular, multi-agent orchestration framework for complex AI workflows. Supports agent handoff, tool integration, and dynamic configuration via YAML.
5
5
  Project-URL: Homepage, https://github.com/natefleming/dao-ai
6
6
  Project-URL: Documentation, https://natefleming.github.io/dao-ai
@@ -26,7 +26,7 @@ Classifier: Topic :: Software Development :: Libraries :: Python Modules
26
26
  Classifier: Topic :: System :: Distributed Computing
27
27
  Requires-Python: >=3.11
28
28
  Requires-Dist: databricks-agents>=1.9.0
29
- Requires-Dist: databricks-langchain[memory]>=0.12.1
29
+ Requires-Dist: databricks-langchain[memory]>=0.13.0
30
30
  Requires-Dist: databricks-mcp>=0.5.0
31
31
  Requires-Dist: databricks-sdk[openai]>=0.77.0
32
32
  Requires-Dist: ddgs>=9.10.0
@@ -235,7 +235,7 @@ app:
235
235
  - *assistant
236
236
  orchestration:
237
237
  swarm:
238
- model: *default_llm
238
+ default_agent: *assistant
239
239
  ```
240
240
 
241
241
  **💡 What's happening here?**
@@ -1,18 +1,18 @@
1
1
  dao_ai/__init__.py,sha256=18P98ExEgUaJ1Byw440Ct1ty59v6nxyWtc5S6Uq2m9Q,1062
2
2
  dao_ai/catalog.py,sha256=sPZpHTD3lPx4EZUtIWeQV7VQM89WJ6YH__wluk1v2lE,4947
3
- dao_ai/cli.py,sha256=6qwlS07_Tei6iEPXIJ-19cQVnLXd7vJDpuY4Qu0k96E,51634
4
- dao_ai/config.py,sha256=9RcyCNiLg0YlARZd-ZJ9M4oirNX5emxaIS23VHca_Z4,130328
3
+ dao_ai/cli.py,sha256=Mcw03hemsT4O63lAH6mqTaPZjx0Q01YTgj5CN0thODI,52121
4
+ dao_ai/config.py,sha256=lPWOCTL8NxoARkI8MRZlDJ2HiCAXgBB-SAyqCQQUVC8,145474
5
+ dao_ai/evaluation.py,sha256=4dveWDwFnUxaybswr0gag3ydZ5RGVCTRaiE3eKLClD4,18161
5
6
  dao_ai/graph.py,sha256=1-uQlo7iXZQTT3uU8aYu0N5rnhw5_g_2YLwVsAs6M-U,1119
6
7
  dao_ai/logging.py,sha256=lYy4BmucCHvwW7aI3YQkQXKJtMvtTnPDu9Hnd7_O4oc,1556
7
8
  dao_ai/messages.py,sha256=4ZBzO4iFdktGSLrmhHzFjzMIt2tpaL-aQLHOQJysGnY,6959
8
9
  dao_ai/models.py,sha256=NaHj91Gra4M8thlKX1DSufLqtJfZSZ55lm1H1dJL_O8,77320
9
10
  dao_ai/nodes.py,sha256=7W6Ek6Uk9-pKa-H06nVCwuDllCrgX02IYy3rHtuL0aM,10777
10
11
  dao_ai/optimization.py,sha256=phK6t4wYmWPObCjGUBHdZzsaFXGhQOjhAek2bAEfwXo,22971
11
- dao_ai/prompts.py,sha256=4cz5bZ7cOzrjyQ8hMp-K4evK6cVYrkGrAGdUl8-KDEM,2784
12
12
  dao_ai/state.py,sha256=ifDTAC7epdowk3Z1CP3Xqw4uH2dIxQEVF3C747dA8yI,6436
13
13
  dao_ai/types.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
14
- dao_ai/utils.py,sha256=_Urd7Nj2VzrgPKf3NS4E6vt0lWRhEUddBqWN9BksqeE,11543
15
- dao_ai/vector_search.py,sha256=8d3xROg9zSIYNXjRRl6rSexsJTlufjRl5Fy1ZA8daKA,4019
14
+ dao_ai/utils.py,sha256=ImgH0jnHPCK2AR7KcueG_Zb7kltcBzTw78ujDUpARIE,17184
15
+ dao_ai/vector_search.py,sha256=PfmT2PDMymk-3dTm2uszlOZNyHyiDge--imxKpKJRsY,4440
16
16
  dao_ai/apps/__init__.py,sha256=RLuhZf4gQ4pemwKDz1183aXib8UfaRhwfKvRx68GRlM,661
17
17
  dao_ai/apps/handlers.py,sha256=6-IhhklHSPnS8aqKp155wPaSnYWTU1BSOPwbdWYBkFU,3594
18
18
  dao_ai/apps/model_serving.py,sha256=XLt3_0pGSRceMK6YtOrND9Jnh7mKLPCtwjVDLIaptQU,847
@@ -31,7 +31,7 @@ dao_ai/memory/__init__.py,sha256=Us3wFehvug_h83m-UJ7OXdq2qZ0e9nHBQE7m5RwoAd8,559
31
31
  dao_ai/memory/base.py,sha256=99nfr2UZJ4jmfTL_KrqUlRSCoRxzkZyWyx5WqeUoMdQ,338
32
32
  dao_ai/memory/core.py,sha256=38H-JLIyUrRDIECLvpXK3iJlWG35X97E-DTo_4c3Jzc,6317
33
33
  dao_ai/memory/databricks.py,sha256=SM6nwLjhSRJO4hLc3GUuht5YydYtTi3BAOae6jPwTm4,14377
34
- dao_ai/memory/postgres.py,sha256=DeLmexSzz91eXEJN5zW4YJigLCby8j9qNAZleumaVHU,17481
34
+ dao_ai/memory/postgres.py,sha256=bSjtvEht0h6jy2ADN2vqISVQDxm_DeM586VDdGaShJQ,23168
35
35
  dao_ai/middleware/__init__.py,sha256=Qy8wbvjXF7TrUzi3tWziOwxqsrUcT1rzE3UWd3x5CrU,5108
36
36
  dao_ai/middleware/assertions.py,sha256=C1K-TnNZfBEwWouioHCt6c48i1ux9QKfQaX6AzghhgE,27408
37
37
  dao_ai/middleware/base.py,sha256=uG2tpdnjL5xY5jCKvb_m3UTBtl4ZC6fJQUkDsQvV8S4,1279
@@ -48,28 +48,37 @@ dao_ai/middleware/tool_call_limit.py,sha256=WQ3NmA3pLo-pNPBmwM7KwkYpT1segEnWqkhg
48
48
  dao_ai/middleware/tool_retry.py,sha256=QfJ7yTHneME8VtnA88QcmnjXIegSFeJztyngy49wTgM,5568
49
49
  dao_ai/middleware/tool_selector.py,sha256=POj72YdzZEiNGfW4AQXPBeVVS1RUBsiG7PBuSENEhe0,4516
50
50
  dao_ai/orchestration/__init__.py,sha256=i85CLfRR335NcCFhaXABcMkn6WZfXnJ8cHH4YZsZN0s,1622
51
- dao_ai/orchestration/core.py,sha256=qoU7uMXBJCth-sqfu0jRE1L0GOn5H4LoZdRUY1Ib3DI,9585
52
- dao_ai/orchestration/supervisor.py,sha256=alKMEEo9G5LhdpMvTVdAMel234cZj5_MguWl4wFB7XQ,9873
53
- dao_ai/orchestration/swarm.py,sha256=8tp1eGmsQqqWpaDcjPoJckddPWohZdmmN0RGRJ_xzOA,9198
51
+ dao_ai/orchestration/core.py,sha256=8bPigzWtHUZ0Gw4Q_91uvcREucVQstxlelC06W_qmn0,10683
52
+ dao_ai/orchestration/supervisor.py,sha256=FoQ1fYP_e0taKC4ByITqJLOYvwJd1zecYYLs4RcY1lk,10605
53
+ dao_ai/orchestration/swarm.py,sha256=BloDI0TWhGisv9r3-zTgJWZQy9l3hbQ5tXYggovr5i8,9467
54
+ dao_ai/prompts/__init__.py,sha256=r91BY_yq28iUL0Pz5NbMo1VEDQh-aE44GqN0tBrIKfc,3011
55
+ dao_ai/prompts/instructed_retriever_decomposition.yaml,sha256=OkBLLlgU8MXtvGPlhESXgFfwYCUGRDcyD8O1iWOsmbk,2107
56
+ dao_ai/prompts/instruction_reranker.yaml,sha256=4OGZLNbdcWk6slBY5vnt_C-nGLPZM6e21smTNyaRPmE,406
57
+ dao_ai/prompts/router.yaml,sha256=79C_O98cpNndeMO0Vdn91CC7vxZx0hZ1rl1BAgnGjYc,1319
58
+ dao_ai/prompts/verifier.yaml,sha256=9snFQuxfYuEr46F4gv13VqL9q2PJCtWlbBhN3_IO2zI,1455
54
59
  dao_ai/providers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
55
60
  dao_ai/providers/base.py,sha256=cJGo3UjUTPgS91dv38ePOHwQQtYhIa84ebb167CBXjk,2111
56
- dao_ai/providers/databricks.py,sha256=bI8lWZ2DkNac9aJWCIJzTG3lCE8MJ8n2BPurEHM1SeE,72791
61
+ dao_ai/providers/databricks.py,sha256=cg-TY9IS3-OqIo1gkLe1YwOR1H-s8YTBGrqDrkOWR6c,73569
57
62
  dao_ai/tools/__init__.py,sha256=NfRpAKds_taHbx6gzLPWgtPXve-YpwzkoOAUflwxceM,1734
58
63
  dao_ai/tools/agent.py,sha256=plIWALywRjaDSnot13nYehBsrHRpBUpsVZakoGeajOE,1858
59
64
  dao_ai/tools/core.py,sha256=bRIN3BZhRQX8-Kpu3HPomliodyskCqjxynQmYbk6Vjs,3783
60
65
  dao_ai/tools/email.py,sha256=A3TsCoQgJR7UUWR0g45OPRGDpVoYwctFs1MOZMTt_d4,7389
61
66
  dao_ai/tools/genie.py,sha256=b0R51N5D58H1vpOCUCA88ALjLs58KSMn6nl80ap8_c0,11009
62
- dao_ai/tools/mcp.py,sha256=K1yMQ39UgJ0Q4xhMpNWV3AVNx929w9vxZlLoCq_jrws,22016
67
+ dao_ai/tools/instructed_retriever.py,sha256=iEu7oH1Z9_-Id0SMaq-dAgCNigeRrJDDTSZTcOJLl6k,12990
68
+ dao_ai/tools/instruction_reranker.py,sha256=_1kGwrXkJk4QR2p8n3lAaYkUVoidxCxV9wNCtoS0qco,6730
69
+ dao_ai/tools/mcp.py,sha256=4uvag52OJPInUEnxFLwpE0JRugTrgHeWbkP5lzIx4lg,22620
63
70
  dao_ai/tools/memory.py,sha256=lwObKimAand22Nq3Y63tsv-AXQ5SXUigN9PqRjoWKes,1836
64
71
  dao_ai/tools/python.py,sha256=jWFnZPni2sCdtd8D1CqXnZIPHnWkdK27bCJnBXpzhvo,1879
72
+ dao_ai/tools/router.py,sha256=YIVzSk4-ZFQHgvkhyrPHGLbDyzE9koa5QmbcTT-npnI,2872
65
73
  dao_ai/tools/search.py,sha256=cJ3D9FKr1GAR6xz55dLtRkjtQsI0WRueGt9TPDFpOxc,433
66
74
  dao_ai/tools/slack.py,sha256=QnMsA7cYD1MnEcqGqqSr6bKIhV0RgDpkyaiPmDqnAts,5433
67
75
  dao_ai/tools/sql.py,sha256=FG-Aa0FAUAnhCuZvao1J-y-cMM6bU5eCujNbsYn0xDw,7864
68
76
  dao_ai/tools/time.py,sha256=tufJniwivq29y0LIffbgeBTIDE6VgrLpmVf8Qr90qjw,9224
69
77
  dao_ai/tools/unity_catalog.py,sha256=oBlW6pH-Ne08g60QW9wVi_tyeVYDiecuNoxQbIIFmN8,16515
70
- dao_ai/tools/vector_search.py,sha256=LF_72vlEF6TwUjKVv6nkUetLK766l9Kl6DQQTc9ebJI,15888
71
- dao_ai-0.1.16.dist-info/METADATA,sha256=hKYbqBIQ8eNjVtpkhysLAJNS1ISg9Tuy1ek9SggxSC4,16830
72
- dao_ai-0.1.16.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
73
- dao_ai-0.1.16.dist-info/entry_points.txt,sha256=Xa-UFyc6gWGwMqMJOt06ZOog2vAfygV_DSwg1AiP46g,43
74
- dao_ai-0.1.16.dist-info/licenses/LICENSE,sha256=YZt3W32LtPYruuvHE9lGk2bw6ZPMMJD8yLrjgHybyz4,1069
75
- dao_ai-0.1.16.dist-info/RECORD,,
78
+ dao_ai/tools/vector_search.py,sha256=34uhd58FKHzvcdgHHoACRdZAUJWTaUuPYiwIqBwvGqk,29061
79
+ dao_ai/tools/verifier.py,sha256=ociBVsGkQNyhWS6F6G8x17V7zAQfSuTe4Xcd6Y-7lPE,4975
80
+ dao_ai-0.1.18.dist-info/METADATA,sha256=_458v0KntzrWOU-pc8hs0YsRjfW0Oacgf91gqNloT5w,16836
81
+ dao_ai-0.1.18.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
82
+ dao_ai-0.1.18.dist-info/entry_points.txt,sha256=Xa-UFyc6gWGwMqMJOt06ZOog2vAfygV_DSwg1AiP46g,43
83
+ dao_ai-0.1.18.dist-info/licenses/LICENSE,sha256=YZt3W32LtPYruuvHE9lGk2bw6ZPMMJD8yLrjgHybyz4,1069
84
+ dao_ai-0.1.18.dist-info/RECORD,,