dao-ai 0.0.22__py3-none-any.whl → 0.0.24__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- dao_ai/config.py +47 -3
- dao_ai/prompts.py +13 -5
- dao_ai/providers/databricks.py +78 -0
- dao_ai/tools/__init__.py +2 -0
- dao_ai/tools/mcp.py +135 -76
- dao_ai/tools/slack.py +136 -0
- dao_ai/utils.py +4 -0
- {dao_ai-0.0.22.dist-info → dao_ai-0.0.24.dist-info}/METADATA +32 -9
- {dao_ai-0.0.22.dist-info → dao_ai-0.0.24.dist-info}/RECORD +12 -11
- {dao_ai-0.0.22.dist-info → dao_ai-0.0.24.dist-info}/WHEEL +0 -0
- {dao_ai-0.0.22.dist-info → dao_ai-0.0.24.dist-info}/entry_points.txt +0 -0
- {dao_ai-0.0.22.dist-info → dao_ai-0.0.24.dist-info}/licenses/LICENSE +0 -0
dao_ai/config.py
CHANGED
|
@@ -666,6 +666,10 @@ class ConnectionModel(BaseModel, HasFullName, IsDatabricksResource):
|
|
|
666
666
|
return [
|
|
667
667
|
"catalog.connections",
|
|
668
668
|
"serving.serving-endpoints",
|
|
669
|
+
"mcp.genie",
|
|
670
|
+
"mcp.functions",
|
|
671
|
+
"mcp.vectorsearch",
|
|
672
|
+
"mcp.external",
|
|
669
673
|
]
|
|
670
674
|
|
|
671
675
|
def as_resources(self) -> Sequence[DatabricksResource]:
|
|
@@ -988,6 +992,7 @@ class McpFunctionModel(BaseFunctionModel, HasFullName):
|
|
|
988
992
|
transport: TransportType = TransportType.STREAMABLE_HTTP
|
|
989
993
|
command: Optional[str] = "python"
|
|
990
994
|
url: Optional[AnyVariable] = None
|
|
995
|
+
connection: Optional[ConnectionModel] = None
|
|
991
996
|
headers: dict[str, AnyVariable] = Field(default_factory=dict)
|
|
992
997
|
args: list[str] = Field(default_factory=list)
|
|
993
998
|
pat: Optional[AnyVariable] = None
|
|
@@ -1007,8 +1012,12 @@ class McpFunctionModel(BaseFunctionModel, HasFullName):
|
|
|
1007
1012
|
|
|
1008
1013
|
@model_validator(mode="after")
|
|
1009
1014
|
def validate_mutually_exclusive(self):
|
|
1010
|
-
if self.transport == TransportType.STREAMABLE_HTTP and not
|
|
1011
|
-
|
|
1015
|
+
if self.transport == TransportType.STREAMABLE_HTTP and not (
|
|
1016
|
+
self.url or self.connection
|
|
1017
|
+
):
|
|
1018
|
+
raise ValueError(
|
|
1019
|
+
"url or connection must be provided for STREAMABLE_HTTP transport"
|
|
1020
|
+
)
|
|
1012
1021
|
if self.transport == TransportType.STDIO and not self.command:
|
|
1013
1022
|
raise ValueError("command must not be provided for STDIO transport")
|
|
1014
1023
|
if self.transport == TransportType.STDIO and not self.args:
|
|
@@ -1157,6 +1166,40 @@ class MemoryModel(BaseModel):
|
|
|
1157
1166
|
FunctionHook: TypeAlias = PythonFunctionModel | FactoryFunctionModel | str
|
|
1158
1167
|
|
|
1159
1168
|
|
|
1169
|
+
class PromptModel(BaseModel, HasFullName):
|
|
1170
|
+
model_config = ConfigDict(use_enum_values=True, extra="forbid")
|
|
1171
|
+
schema_model: Optional[SchemaModel] = Field(default=None, alias="schema")
|
|
1172
|
+
name: str
|
|
1173
|
+
description: Optional[str] = None
|
|
1174
|
+
default_template: Optional[str] = None
|
|
1175
|
+
alias: Optional[str] = None
|
|
1176
|
+
version: Optional[int] = None
|
|
1177
|
+
tags: Optional[dict[str, Any]] = Field(default_factory=dict)
|
|
1178
|
+
|
|
1179
|
+
@property
|
|
1180
|
+
def template(self) -> str:
|
|
1181
|
+
from dao_ai.providers.databricks import DatabricksProvider
|
|
1182
|
+
|
|
1183
|
+
provider: DatabricksProvider = DatabricksProvider()
|
|
1184
|
+
prompt: str = provider.get_prompt(self)
|
|
1185
|
+
return prompt
|
|
1186
|
+
|
|
1187
|
+
@property
|
|
1188
|
+
def full_name(self) -> str:
|
|
1189
|
+
if self.schema_model:
|
|
1190
|
+
name: str = ""
|
|
1191
|
+
if self.name:
|
|
1192
|
+
name = f".{self.name}"
|
|
1193
|
+
return f"{self.schema_model.catalog_name}.{self.schema_model.schema_name}{name}"
|
|
1194
|
+
return self.name
|
|
1195
|
+
|
|
1196
|
+
@model_validator(mode="after")
|
|
1197
|
+
def validate_mutually_exclusive(self):
|
|
1198
|
+
if self.alias and self.version:
|
|
1199
|
+
raise ValueError("Cannot specify both alias and version")
|
|
1200
|
+
return self
|
|
1201
|
+
|
|
1202
|
+
|
|
1160
1203
|
class AgentModel(BaseModel):
|
|
1161
1204
|
model_config = ConfigDict(use_enum_values=True, extra="forbid")
|
|
1162
1205
|
name: str
|
|
@@ -1164,7 +1207,7 @@ class AgentModel(BaseModel):
|
|
|
1164
1207
|
model: LLMModel
|
|
1165
1208
|
tools: list[ToolModel] = Field(default_factory=list)
|
|
1166
1209
|
guardrails: list[GuardrailModel] = Field(default_factory=list)
|
|
1167
|
-
prompt: Optional[str] = None
|
|
1210
|
+
prompt: Optional[str | PromptModel] = None
|
|
1168
1211
|
handoff_prompt: Optional[str] = None
|
|
1169
1212
|
create_agent_hook: Optional[FunctionHook] = None
|
|
1170
1213
|
pre_agent_hook: Optional[FunctionHook] = None
|
|
@@ -1490,6 +1533,7 @@ class AppConfig(BaseModel):
|
|
|
1490
1533
|
tools: dict[str, ToolModel] = Field(default_factory=dict)
|
|
1491
1534
|
guardrails: dict[str, GuardrailModel] = Field(default_factory=dict)
|
|
1492
1535
|
memory: Optional[MemoryModel] = None
|
|
1536
|
+
prompts: dict[str, PromptModel] = Field(default_factory=dict)
|
|
1493
1537
|
agents: dict[str, AgentModel] = Field(default_factory=dict)
|
|
1494
1538
|
app: Optional[AppModel] = None
|
|
1495
1539
|
evaluation: Optional[EvaluationModel] = None
|
dao_ai/prompts.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
from typing import Any, Callable, Sequence
|
|
1
|
+
from typing import Any, Callable, Optional, Sequence
|
|
2
2
|
|
|
3
3
|
from langchain.prompts import PromptTemplate
|
|
4
4
|
from langchain_core.messages import (
|
|
@@ -8,18 +8,26 @@ from langchain_core.messages import (
|
|
|
8
8
|
from langchain_core.runnables import RunnableConfig
|
|
9
9
|
from loguru import logger
|
|
10
10
|
|
|
11
|
+
from dao_ai.config import PromptModel
|
|
11
12
|
from dao_ai.state import SharedState
|
|
12
13
|
|
|
13
14
|
|
|
14
|
-
def make_prompt(
|
|
15
|
+
def make_prompt(
|
|
16
|
+
base_system_prompt: Optional[str | PromptModel],
|
|
17
|
+
) -> Callable[[dict, RunnableConfig], list]:
|
|
15
18
|
logger.debug(f"make_prompt: {base_system_prompt}")
|
|
16
19
|
|
|
17
20
|
def prompt(state: SharedState, config: RunnableConfig) -> list:
|
|
18
21
|
system_prompt: str = ""
|
|
19
22
|
if base_system_prompt:
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
)
|
|
23
|
+
# Extract template string from PromptModel or use string directly
|
|
24
|
+
template_str: str
|
|
25
|
+
if isinstance(base_system_prompt, PromptModel):
|
|
26
|
+
template_str = base_system_prompt.template
|
|
27
|
+
else:
|
|
28
|
+
template_str = base_system_prompt
|
|
29
|
+
|
|
30
|
+
prompt_template: PromptTemplate = PromptTemplate.from_template(template_str)
|
|
23
31
|
|
|
24
32
|
params: dict[str, Any] = {
|
|
25
33
|
input_variable: "" for input_variable in prompt_template.input_variables
|
dao_ai/providers/databricks.py
CHANGED
|
@@ -30,6 +30,7 @@ from databricks.vector_search.index import VectorSearchIndex
|
|
|
30
30
|
from loguru import logger
|
|
31
31
|
from mlflow import MlflowClient
|
|
32
32
|
from mlflow.entities import Experiment
|
|
33
|
+
from mlflow.entities.model_registry import PromptVersion
|
|
33
34
|
from mlflow.entities.model_registry.model_version import ModelVersion
|
|
34
35
|
from mlflow.models.auth_policy import AuthPolicy, SystemAuthPolicy, UserAuthPolicy
|
|
35
36
|
from mlflow.models.model import ModelInfo
|
|
@@ -52,6 +53,7 @@ from dao_ai.config import (
|
|
|
52
53
|
IndexModel,
|
|
53
54
|
IsDatabricksResource,
|
|
54
55
|
LLMModel,
|
|
56
|
+
PromptModel,
|
|
55
57
|
SchemaModel,
|
|
56
58
|
TableModel,
|
|
57
59
|
UnityCatalogFunctionSqlModel,
|
|
@@ -1023,3 +1025,79 @@ class DatabricksProvider(ServiceProvider):
|
|
|
1023
1025
|
f"Error creating instance role '{role_name}' for database {instance_name}: {e}"
|
|
1024
1026
|
)
|
|
1025
1027
|
raise
|
|
1028
|
+
|
|
1029
|
+
def get_prompt(self, prompt_model: PromptModel) -> str:
|
|
1030
|
+
"""Load prompt from MLflow Prompt Registry or fall back to default_template."""
|
|
1031
|
+
prompt_name: str = prompt_model.full_name
|
|
1032
|
+
|
|
1033
|
+
# Build prompt URI based on alias, version, or default to latest
|
|
1034
|
+
if prompt_model.alias:
|
|
1035
|
+
prompt_uri = f"prompts:/{prompt_name}@{prompt_model.alias}"
|
|
1036
|
+
elif prompt_model.version:
|
|
1037
|
+
prompt_uri = f"prompts:/{prompt_name}/{prompt_model.version}"
|
|
1038
|
+
else:
|
|
1039
|
+
prompt_uri = f"prompts:/{prompt_name}@latest"
|
|
1040
|
+
|
|
1041
|
+
try:
|
|
1042
|
+
from mlflow.genai.prompts import Prompt
|
|
1043
|
+
|
|
1044
|
+
prompt_obj: Prompt = mlflow.genai.load_prompt(prompt_uri)
|
|
1045
|
+
return prompt_obj.to_single_brace_format()
|
|
1046
|
+
|
|
1047
|
+
except Exception as e:
|
|
1048
|
+
logger.warning(f"Failed to load prompt '{prompt_name}' from registry: {e}")
|
|
1049
|
+
|
|
1050
|
+
if prompt_model.default_template:
|
|
1051
|
+
logger.info(f"Using default_template for '{prompt_name}'")
|
|
1052
|
+
self._sync_default_template_to_registry(
|
|
1053
|
+
prompt_name, prompt_model.default_template, prompt_model.description
|
|
1054
|
+
)
|
|
1055
|
+
return prompt_model.default_template
|
|
1056
|
+
|
|
1057
|
+
raise ValueError(
|
|
1058
|
+
f"Prompt '{prompt_name}' not found in registry and no default_template provided"
|
|
1059
|
+
) from e
|
|
1060
|
+
|
|
1061
|
+
def _sync_default_template_to_registry(
|
|
1062
|
+
self, prompt_name: str, default_template: str, description: str | None = None
|
|
1063
|
+
) -> None:
|
|
1064
|
+
"""Register default_template to prompt registry under 'default' alias if changed."""
|
|
1065
|
+
try:
|
|
1066
|
+
# Check if default alias already has the same template
|
|
1067
|
+
try:
|
|
1068
|
+
logger.debug(f"Loading prompt '{prompt_name}' from registry...")
|
|
1069
|
+
existing: PromptVersion = mlflow.genai.load_prompt(
|
|
1070
|
+
f"prompts:/{prompt_name}@default"
|
|
1071
|
+
)
|
|
1072
|
+
if (
|
|
1073
|
+
existing.to_single_brace_format().strip()
|
|
1074
|
+
== default_template.strip()
|
|
1075
|
+
):
|
|
1076
|
+
logger.debug(f"Prompt '{prompt_name}' is already up-to-date")
|
|
1077
|
+
return # Already up-to-date
|
|
1078
|
+
except Exception:
|
|
1079
|
+
logger.debug(
|
|
1080
|
+
f"Default alias for prompt '{prompt_name}' doesn't exist yet"
|
|
1081
|
+
)
|
|
1082
|
+
|
|
1083
|
+
# Register new version and set as default alias
|
|
1084
|
+
commit_message = description or "Auto-synced from default_template"
|
|
1085
|
+
prompt_version: PromptVersion = mlflow.genai.register_prompt(
|
|
1086
|
+
name=prompt_name,
|
|
1087
|
+
template=default_template,
|
|
1088
|
+
commit_message=commit_message,
|
|
1089
|
+
)
|
|
1090
|
+
|
|
1091
|
+
logger.debug(f"Setting default alias for prompt '{prompt_name}'")
|
|
1092
|
+
mlflow.genai.set_prompt_alias(
|
|
1093
|
+
name=prompt_name,
|
|
1094
|
+
alias="default",
|
|
1095
|
+
version=prompt_version.version,
|
|
1096
|
+
)
|
|
1097
|
+
|
|
1098
|
+
logger.info(
|
|
1099
|
+
f"Synced prompt '{prompt_name}' v{prompt_version.version} to registry"
|
|
1100
|
+
)
|
|
1101
|
+
|
|
1102
|
+
except Exception as e:
|
|
1103
|
+
logger.warning(f"Failed to sync '{prompt_name}' to registry: {e}")
|
dao_ai/tools/__init__.py
CHANGED
|
@@ -7,6 +7,7 @@ from dao_ai.tools.core import (
|
|
|
7
7
|
from dao_ai.tools.genie import create_genie_tool
|
|
8
8
|
from dao_ai.tools.mcp import create_mcp_tools
|
|
9
9
|
from dao_ai.tools.python import create_factory_tool, create_python_tool
|
|
10
|
+
from dao_ai.tools.slack import create_send_slack_message_tool
|
|
10
11
|
from dao_ai.tools.time import (
|
|
11
12
|
add_time_tool,
|
|
12
13
|
current_time_tool,
|
|
@@ -27,6 +28,7 @@ __all__ = [
|
|
|
27
28
|
"create_hooks",
|
|
28
29
|
"create_mcp_tools",
|
|
29
30
|
"create_python_tool",
|
|
31
|
+
"create_send_slack_message_tool",
|
|
30
32
|
"create_tools",
|
|
31
33
|
"create_uc_tools",
|
|
32
34
|
"create_vector_search_tool",
|
dao_ai/tools/mcp.py
CHANGED
|
@@ -1,10 +1,14 @@
|
|
|
1
1
|
import asyncio
|
|
2
2
|
from typing import Any, Sequence
|
|
3
3
|
|
|
4
|
+
from databricks_mcp import DatabricksOAuthClientProvider
|
|
4
5
|
from langchain_core.runnables.base import RunnableLike
|
|
5
6
|
from langchain_core.tools import tool as create_tool
|
|
6
7
|
from langchain_mcp_adapters.client import MultiServerMCPClient
|
|
8
|
+
from langchain_mcp_adapters.tools import load_mcp_tools
|
|
7
9
|
from loguru import logger
|
|
10
|
+
from mcp import ClientSession
|
|
11
|
+
from mcp.client.streamable_http import streamablehttp_client
|
|
8
12
|
from mcp.types import ListToolsResult, Tool
|
|
9
13
|
|
|
10
14
|
from dao_ai.config import (
|
|
@@ -20,98 +24,153 @@ def create_mcp_tools(
|
|
|
20
24
|
"""
|
|
21
25
|
Create tools for invoking Databricks MCP functions.
|
|
22
26
|
|
|
27
|
+
Supports both direct MCP connections and UC Connection-based MCP access.
|
|
23
28
|
Uses session-based approach to handle authentication token expiration properly.
|
|
29
|
+
|
|
30
|
+
Based on: https://docs.databricks.com/aws/en/generative-ai/mcp/external-mcp
|
|
24
31
|
"""
|
|
25
32
|
logger.debug(f"create_mcp_tools: {function}")
|
|
26
33
|
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
return {
|
|
32
|
-
"command": function.command,
|
|
33
|
-
"args": function.args,
|
|
34
|
-
"transport": function.transport,
|
|
35
|
-
}
|
|
36
|
-
|
|
37
|
-
# For HTTP transport, generate fresh headers
|
|
38
|
-
headers = function.headers.copy() if function.headers else {}
|
|
34
|
+
# Check if using UC Connection or direct MCP connection
|
|
35
|
+
if function.connection:
|
|
36
|
+
# Use UC Connection approach with DatabricksOAuthClientProvider
|
|
37
|
+
logger.debug(f"Using UC Connection for MCP: {function.connection.name}")
|
|
39
38
|
|
|
40
|
-
if
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
from dao_ai.providers.databricks import DatabricksProvider
|
|
45
|
-
|
|
46
|
-
try:
|
|
47
|
-
provider = DatabricksProvider(
|
|
48
|
-
workspace_host=value_of(function.workspace_host),
|
|
49
|
-
client_id=value_of(function.client_id),
|
|
50
|
-
client_secret=value_of(function.client_secret),
|
|
51
|
-
pat=value_of(function.pat),
|
|
52
|
-
)
|
|
53
|
-
headers["Authorization"] = f"Bearer {provider.create_token()}"
|
|
54
|
-
logger.debug("Generated fresh authentication token")
|
|
55
|
-
except Exception as e:
|
|
56
|
-
logger.error(f"Failed to create fresh token: {e}")
|
|
39
|
+
# Construct URL if not provided
|
|
40
|
+
if function.url:
|
|
41
|
+
mcp_url = function.url
|
|
42
|
+
logger.debug(f"Using provided MCP URL: {mcp_url}")
|
|
57
43
|
else:
|
|
58
|
-
|
|
44
|
+
# Construct URL from workspace host and connection name
|
|
45
|
+
# Pattern: https://{workspace_host}/api/2.0/mcp/external/{connection_name}
|
|
46
|
+
workspace_client = function.connection.workspace_client
|
|
47
|
+
workspace_host = workspace_client.config.host
|
|
48
|
+
connection_name = function.connection.name
|
|
49
|
+
mcp_url = f"{workspace_host}/api/2.0/mcp/external/{connection_name}"
|
|
50
|
+
logger.debug(f"Constructed MCP URL from connection: {mcp_url}")
|
|
51
|
+
|
|
52
|
+
async def _get_tools_with_connection():
|
|
53
|
+
"""Get tools using DatabricksOAuthClientProvider."""
|
|
54
|
+
workspace_client = function.connection.workspace_client
|
|
55
|
+
|
|
56
|
+
async with streamablehttp_client(
|
|
57
|
+
mcp_url, auth=DatabricksOAuthClientProvider(workspace_client)
|
|
58
|
+
) as (read_stream, write_stream, _):
|
|
59
|
+
async with ClientSession(read_stream, write_stream) as session:
|
|
60
|
+
# Initialize and list tools
|
|
61
|
+
await session.initialize()
|
|
62
|
+
tools = await load_mcp_tools(session)
|
|
63
|
+
return tools
|
|
59
64
|
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
return response
|
|
65
|
+
try:
|
|
66
|
+
langchain_tools = asyncio.run(_get_tools_with_connection())
|
|
67
|
+
logger.debug(
|
|
68
|
+
f"Retrieved {len(langchain_tools)} MCP tools via UC Connection"
|
|
69
|
+
)
|
|
67
70
|
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
71
|
+
# Wrap tools with human-in-the-loop if needed
|
|
72
|
+
wrapped_tools = [
|
|
73
|
+
as_human_in_the_loop(tool, function) for tool in langchain_tools
|
|
74
|
+
]
|
|
75
|
+
return wrapped_tools
|
|
72
76
|
|
|
73
|
-
try:
|
|
74
|
-
async with client.session(function.name) as session:
|
|
75
|
-
return await session.list_tools()
|
|
76
77
|
except Exception as e:
|
|
77
|
-
logger.error(f"Failed to
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
78
|
+
logger.error(f"Failed to get tools from MCP server via UC Connection: {e}")
|
|
79
|
+
raise RuntimeError(
|
|
80
|
+
f"Failed to list MCP tools for function '{function.name}' via UC Connection '{function.connection.name}': {e}"
|
|
81
|
+
)
|
|
82
|
+
|
|
83
|
+
else:
|
|
84
|
+
# Use direct MCP connection with MultiServerMCPClient
|
|
85
|
+
logger.debug("Using direct MCP connection with MultiServerMCPClient")
|
|
86
|
+
|
|
87
|
+
def _create_fresh_connection() -> dict[str, Any]:
|
|
88
|
+
"""Create connection config with fresh authentication headers."""
|
|
89
|
+
logger.debug("Creating fresh connection...")
|
|
90
|
+
|
|
91
|
+
if function.transport == TransportType.STDIO:
|
|
92
|
+
return {
|
|
93
|
+
"command": function.command,
|
|
94
|
+
"args": function.args,
|
|
95
|
+
"transport": function.transport,
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
# For HTTP transport, generate fresh headers
|
|
99
|
+
headers = function.headers.copy() if function.headers else {}
|
|
100
|
+
|
|
101
|
+
if "Authorization" not in headers:
|
|
102
|
+
logger.debug("Generating fresh authentication token for MCP function")
|
|
103
|
+
|
|
104
|
+
from dao_ai.config import value_of
|
|
105
|
+
from dao_ai.providers.databricks import DatabricksProvider
|
|
106
|
+
|
|
107
|
+
try:
|
|
108
|
+
provider = DatabricksProvider(
|
|
109
|
+
workspace_host=value_of(function.workspace_host),
|
|
110
|
+
client_id=value_of(function.client_id),
|
|
111
|
+
client_secret=value_of(function.client_secret),
|
|
112
|
+
pat=value_of(function.pat),
|
|
113
|
+
)
|
|
114
|
+
headers["Authorization"] = f"Bearer {provider.create_token()}"
|
|
115
|
+
logger.debug("Generated fresh authentication token")
|
|
116
|
+
except Exception as e:
|
|
117
|
+
logger.error(f"Failed to create fresh token: {e}")
|
|
118
|
+
else:
|
|
119
|
+
logger.debug("Using existing authentication token")
|
|
104
120
|
|
|
121
|
+
return {
|
|
122
|
+
"url": function.url,
|
|
123
|
+
"transport": function.transport,
|
|
124
|
+
"headers": headers,
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
# Get available tools from MCP server
|
|
128
|
+
async def _list_mcp_tools():
|
|
105
129
|
connection = _create_fresh_connection()
|
|
106
130
|
client = MultiServerMCPClient({function.name: connection})
|
|
107
131
|
|
|
108
132
|
try:
|
|
109
133
|
async with client.session(function.name) as session:
|
|
110
|
-
return await session.
|
|
134
|
+
return await session.list_tools()
|
|
111
135
|
except Exception as e:
|
|
112
|
-
logger.error(f"
|
|
113
|
-
|
|
136
|
+
logger.error(f"Failed to list MCP tools: {e}")
|
|
137
|
+
return []
|
|
114
138
|
|
|
115
|
-
|
|
139
|
+
# Note: This still needs to run sync during tool creation/registration
|
|
140
|
+
# The actual tool execution will be async
|
|
141
|
+
try:
|
|
142
|
+
mcp_tools: list[Tool] | ListToolsResult = asyncio.run(_list_mcp_tools())
|
|
143
|
+
if isinstance(mcp_tools, ListToolsResult):
|
|
144
|
+
mcp_tools = mcp_tools.tools
|
|
116
145
|
|
|
117
|
-
|
|
146
|
+
logger.debug(f"Retrieved {len(mcp_tools)} MCP tools")
|
|
147
|
+
except Exception as e:
|
|
148
|
+
logger.error(f"Failed to get tools from MCP server: {e}")
|
|
149
|
+
raise RuntimeError(
|
|
150
|
+
f"Failed to list MCP tools for function '{function.name}' with transport '{function.transport}' and URL '{function.url}': {e}"
|
|
151
|
+
)
|
|
152
|
+
|
|
153
|
+
# Create wrapper tools with fresh session per invocation
|
|
154
|
+
def _create_tool_wrapper(mcp_tool: Tool) -> RunnableLike:
|
|
155
|
+
@create_tool(
|
|
156
|
+
mcp_tool.name,
|
|
157
|
+
description=mcp_tool.description or f"MCP tool: {mcp_tool.name}",
|
|
158
|
+
args_schema=mcp_tool.inputSchema,
|
|
159
|
+
)
|
|
160
|
+
async def tool_wrapper(**kwargs):
|
|
161
|
+
"""Execute MCP tool with fresh session and authentication."""
|
|
162
|
+
logger.debug(f"Invoking MCP tool {mcp_tool.name} with fresh session")
|
|
163
|
+
|
|
164
|
+
connection = _create_fresh_connection()
|
|
165
|
+
client = MultiServerMCPClient({function.name: connection})
|
|
166
|
+
|
|
167
|
+
try:
|
|
168
|
+
async with client.session(function.name) as session:
|
|
169
|
+
return await session.call_tool(mcp_tool.name, kwargs)
|
|
170
|
+
except Exception as e:
|
|
171
|
+
logger.error(f"MCP tool {mcp_tool.name} failed: {e}")
|
|
172
|
+
raise
|
|
173
|
+
|
|
174
|
+
return as_human_in_the_loop(tool_wrapper, function)
|
|
175
|
+
|
|
176
|
+
return [_create_tool_wrapper(tool) for tool in mcp_tools]
|
dao_ai/tools/slack.py
ADDED
|
@@ -0,0 +1,136 @@
|
|
|
1
|
+
from typing import Any, Callable, Optional
|
|
2
|
+
|
|
3
|
+
from databricks.sdk.service.serving import ExternalFunctionRequestHttpMethod
|
|
4
|
+
from langchain_core.tools import tool
|
|
5
|
+
from loguru import logger
|
|
6
|
+
from requests import Response
|
|
7
|
+
|
|
8
|
+
from dao_ai.config import ConnectionModel
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def _find_channel_id_by_name(
|
|
12
|
+
connection: ConnectionModel, channel_name: str
|
|
13
|
+
) -> Optional[str]:
|
|
14
|
+
"""
|
|
15
|
+
Find a Slack channel ID by channel name using the conversations.list API.
|
|
16
|
+
|
|
17
|
+
Based on: https://docs.databricks.com/aws/en/generative-ai/agent-framework/slack-agent
|
|
18
|
+
|
|
19
|
+
Args:
|
|
20
|
+
connection: ConnectionModel with workspace_client
|
|
21
|
+
channel_name: Name of the Slack channel (with or without '#' prefix)
|
|
22
|
+
|
|
23
|
+
Returns:
|
|
24
|
+
Channel ID if found, None otherwise
|
|
25
|
+
"""
|
|
26
|
+
# Remove '#' prefix if present
|
|
27
|
+
clean_name = channel_name.lstrip("#")
|
|
28
|
+
|
|
29
|
+
logger.debug(f"Looking up Slack channel ID for channel name: {clean_name}")
|
|
30
|
+
|
|
31
|
+
try:
|
|
32
|
+
# Call Slack API to list conversations
|
|
33
|
+
response: Response = connection.workspace_client.serving_endpoints.http_request(
|
|
34
|
+
conn=connection.name,
|
|
35
|
+
method=ExternalFunctionRequestHttpMethod.GET,
|
|
36
|
+
path="/api/conversations.list",
|
|
37
|
+
)
|
|
38
|
+
|
|
39
|
+
if response.status_code != 200:
|
|
40
|
+
logger.error(f"Failed to list Slack channels: {response.text}")
|
|
41
|
+
return None
|
|
42
|
+
|
|
43
|
+
# Parse response
|
|
44
|
+
data = response.json()
|
|
45
|
+
|
|
46
|
+
if not data.get("ok"):
|
|
47
|
+
logger.error(f"Slack API returned error: {data.get('error')}")
|
|
48
|
+
return None
|
|
49
|
+
|
|
50
|
+
# Search for channel by name
|
|
51
|
+
channels = data.get("channels", [])
|
|
52
|
+
for channel in channels:
|
|
53
|
+
if channel.get("name") == clean_name:
|
|
54
|
+
channel_id = channel.get("id")
|
|
55
|
+
logger.debug(
|
|
56
|
+
f"Found channel ID '{channel_id}' for channel name '{clean_name}'"
|
|
57
|
+
)
|
|
58
|
+
return channel_id
|
|
59
|
+
|
|
60
|
+
logger.warning(f"Channel '{clean_name}' not found in Slack workspace")
|
|
61
|
+
return None
|
|
62
|
+
|
|
63
|
+
except Exception as e:
|
|
64
|
+
logger.error(f"Error looking up Slack channel: {e}")
|
|
65
|
+
return None
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
def create_send_slack_message_tool(
|
|
69
|
+
connection: ConnectionModel | dict[str, Any],
|
|
70
|
+
channel_id: Optional[str] = None,
|
|
71
|
+
channel_name: Optional[str] = None,
|
|
72
|
+
name: Optional[str] = None,
|
|
73
|
+
description: Optional[str] = None,
|
|
74
|
+
) -> Callable[[str], Any]:
|
|
75
|
+
"""
|
|
76
|
+
Create a tool that sends a message to a Slack channel.
|
|
77
|
+
|
|
78
|
+
Args:
|
|
79
|
+
connection: Unity Catalog connection to Slack (ConnectionModel or dict)
|
|
80
|
+
channel_id: Slack channel ID (e.g., 'C1234567890'). If not provided, channel_name is used.
|
|
81
|
+
channel_name: Slack channel name (e.g., 'general' or '#general'). Used to lookup channel_id if not provided.
|
|
82
|
+
name: Custom tool name (default: 'send_slack_message')
|
|
83
|
+
description: Custom tool description
|
|
84
|
+
|
|
85
|
+
Returns:
|
|
86
|
+
A tool function that sends messages to the specified Slack channel
|
|
87
|
+
|
|
88
|
+
Based on: https://docs.databricks.com/aws/en/generative-ai/agent-framework/slack-agent
|
|
89
|
+
"""
|
|
90
|
+
logger.debug("create_send_slack_message_tool")
|
|
91
|
+
|
|
92
|
+
# Validate inputs
|
|
93
|
+
if channel_id is None and channel_name is None:
|
|
94
|
+
raise ValueError("Either channel_id or channel_name must be provided")
|
|
95
|
+
|
|
96
|
+
# Convert connection dict to ConnectionModel if needed
|
|
97
|
+
if isinstance(connection, dict):
|
|
98
|
+
connection = ConnectionModel(**connection)
|
|
99
|
+
|
|
100
|
+
# Look up channel_id from channel_name if needed
|
|
101
|
+
if channel_id is None and channel_name is not None:
|
|
102
|
+
logger.debug(f"Looking up channel_id for channel_name: {channel_name}")
|
|
103
|
+
channel_id = _find_channel_id_by_name(connection, channel_name)
|
|
104
|
+
if channel_id is None:
|
|
105
|
+
raise ValueError(f"Could not find Slack channel with name '{channel_name}'")
|
|
106
|
+
logger.debug(
|
|
107
|
+
f"Resolved channel_name '{channel_name}' to channel_id '{channel_id}'"
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
if name is None:
|
|
111
|
+
name = "send_slack_message"
|
|
112
|
+
|
|
113
|
+
if description is None:
|
|
114
|
+
description = "Send a message to a Slack channel"
|
|
115
|
+
|
|
116
|
+
@tool(
|
|
117
|
+
name_or_callable=name,
|
|
118
|
+
description=description,
|
|
119
|
+
)
|
|
120
|
+
def send_slack_message(text: str) -> str:
|
|
121
|
+
response: Response = connection.workspace_client.serving_endpoints.http_request(
|
|
122
|
+
conn=connection.name,
|
|
123
|
+
method=ExternalFunctionRequestHttpMethod.POST,
|
|
124
|
+
path="/api/chat.postMessage",
|
|
125
|
+
json={"channel": channel_id, "text": text},
|
|
126
|
+
)
|
|
127
|
+
|
|
128
|
+
if response.status_code == 200:
|
|
129
|
+
return "Successful request sent to Slack: " + response.text
|
|
130
|
+
else:
|
|
131
|
+
return (
|
|
132
|
+
"Encountered failure when executing request. Message from Call: "
|
|
133
|
+
+ response.text
|
|
134
|
+
)
|
|
135
|
+
|
|
136
|
+
return send_slack_message
|
dao_ai/utils.py
CHANGED
|
@@ -43,6 +43,7 @@ def get_installed_packages() -> dict[str, str]:
|
|
|
43
43
|
packages: Sequence[str] = [
|
|
44
44
|
f"databricks-agents=={version('databricks-agents')}",
|
|
45
45
|
f"databricks-langchain=={version('databricks-langchain')}",
|
|
46
|
+
f"databricks-mcp=={version('databricks-mcp')}",
|
|
46
47
|
f"databricks-sdk[openai]=={version('databricks-sdk')}",
|
|
47
48
|
f"duckduckgo-search=={version('duckduckgo-search')}",
|
|
48
49
|
f"langchain=={version('langchain')}",
|
|
@@ -56,11 +57,14 @@ def get_installed_packages() -> dict[str, str]:
|
|
|
56
57
|
f"langgraph-swarm=={version('langgraph-swarm')}",
|
|
57
58
|
f"langmem=={version('langmem')}",
|
|
58
59
|
f"loguru=={version('loguru')}",
|
|
60
|
+
f"mcp=={version('mcp')}",
|
|
59
61
|
f"mlflow=={version('mlflow')}",
|
|
62
|
+
f"nest-asyncio=={version('nest-asyncio')}",
|
|
60
63
|
f"openevals=={version('openevals')}",
|
|
61
64
|
f"openpyxl=={version('openpyxl')}",
|
|
62
65
|
f"psycopg[binary,pool]=={version('psycopg')}",
|
|
63
66
|
f"pydantic=={version('pydantic')}",
|
|
67
|
+
f"pyyaml=={version('pyyaml')}",
|
|
64
68
|
f"unitycatalog-ai[databricks]=={version('unitycatalog-ai')}",
|
|
65
69
|
f"unitycatalog-langchain[databricks]=={version('unitycatalog-langchain')}",
|
|
66
70
|
]
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: dao-ai
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.24
|
|
4
4
|
Summary: DAO AI: A modular, multi-agent orchestration framework for complex AI workflows. Supports agent handoff, tool integration, and dynamic configuration via YAML.
|
|
5
5
|
Project-URL: Homepage, https://github.com/natefleming/dao-ai
|
|
6
6
|
Project-URL: Documentation, https://natefleming.github.io/dao-ai
|
|
@@ -24,27 +24,28 @@ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
|
24
24
|
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
25
25
|
Classifier: Topic :: System :: Distributed Computing
|
|
26
26
|
Requires-Python: >=3.12
|
|
27
|
-
Requires-Dist: databricks-agents>=1.
|
|
28
|
-
Requires-Dist: databricks-langchain>=0.8.
|
|
27
|
+
Requires-Dist: databricks-agents>=1.7.0
|
|
28
|
+
Requires-Dist: databricks-langchain>=0.8.1
|
|
29
|
+
Requires-Dist: databricks-mcp>=0.3.0
|
|
29
30
|
Requires-Dist: databricks-sdk[openai]>=0.67.0
|
|
30
31
|
Requires-Dist: duckduckgo-search>=8.0.2
|
|
31
32
|
Requires-Dist: grandalf>=0.8
|
|
32
33
|
Requires-Dist: langchain-mcp-adapters>=0.1.10
|
|
33
34
|
Requires-Dist: langchain-tavily>=0.2.11
|
|
34
35
|
Requires-Dist: langchain>=0.3.27
|
|
35
|
-
Requires-Dist: langgraph-checkpoint-postgres>=2.0.
|
|
36
|
+
Requires-Dist: langgraph-checkpoint-postgres>=2.0.25
|
|
36
37
|
Requires-Dist: langgraph-supervisor>=0.0.29
|
|
37
38
|
Requires-Dist: langgraph-swarm>=0.0.14
|
|
38
|
-
Requires-Dist: langgraph>=0.6.
|
|
39
|
+
Requires-Dist: langgraph>=0.6.10
|
|
39
40
|
Requires-Dist: langmem>=0.0.29
|
|
40
41
|
Requires-Dist: loguru>=0.7.3
|
|
41
|
-
Requires-Dist: mcp>=1.
|
|
42
|
+
Requires-Dist: mcp>=1.17.0
|
|
42
43
|
Requires-Dist: mlflow>=3.4.0
|
|
43
44
|
Requires-Dist: nest-asyncio>=1.6.0
|
|
44
45
|
Requires-Dist: openevals>=0.0.19
|
|
45
46
|
Requires-Dist: openpyxl>=3.1.5
|
|
46
47
|
Requires-Dist: psycopg[binary,pool]>=3.2.9
|
|
47
|
-
Requires-Dist: pydantic>=2.
|
|
48
|
+
Requires-Dist: pydantic>=2.12.0
|
|
48
49
|
Requires-Dist: python-dotenv>=1.1.0
|
|
49
50
|
Requires-Dist: pyyaml>=6.0.2
|
|
50
51
|
Requires-Dist: rich>=14.0.0
|
|
@@ -653,7 +654,7 @@ test:
|
|
|
653
654
|
#### 4. MCP (Model Context Protocol) Tools (`type: mcp`)
|
|
654
655
|
MCP tools allow interaction with external services that implement the Model Context Protocol, supporting both HTTP and stdio transports.
|
|
655
656
|
|
|
656
|
-
**Configuration Example:**
|
|
657
|
+
**Configuration Example (Direct URL):**
|
|
657
658
|
```yaml
|
|
658
659
|
tools:
|
|
659
660
|
weather_tool_mcp:
|
|
@@ -664,8 +665,30 @@ test:
|
|
|
664
665
|
transport: streamable_http
|
|
665
666
|
url: http://localhost:8000/mcp
|
|
666
667
|
```
|
|
668
|
+
|
|
669
|
+
**Configuration Example (Unity Catalog Connection):**
|
|
670
|
+
MCP tools can also use Unity Catalog Connections for secure, governed access with on-behalf-of-user capabilities. The connection provides OAuth authentication, while the URL specifies the endpoint:
|
|
671
|
+
```yaml
|
|
672
|
+
resources:
|
|
673
|
+
connections:
|
|
674
|
+
github_connection:
|
|
675
|
+
name: github_u2m_connection # UC Connection name
|
|
676
|
+
|
|
677
|
+
tools:
|
|
678
|
+
github_mcp:
|
|
679
|
+
name: github_mcp
|
|
680
|
+
function:
|
|
681
|
+
type: mcp
|
|
682
|
+
name: github_mcp
|
|
683
|
+
transport: streamable_http
|
|
684
|
+
url: https://workspace.databricks.com/api/2.0/mcp/external/github_u2m_connection # MCP endpoint URL
|
|
685
|
+
connection: *github_connection # UC Connection provides OAuth authentication
|
|
686
|
+
```
|
|
687
|
+
|
|
667
688
|
**Development:**
|
|
668
|
-
Ensure the MCP service is running and accessible at the specified URL or command.
|
|
689
|
+
- **For direct URL connections**: Ensure the MCP service is running and accessible at the specified URL or command. Provide OAuth credentials (client_id, client_secret) or PAT for authentication.
|
|
690
|
+
- **For UC Connection**: URL is required to specify the endpoint. The connection provides OAuth authentication via the workspace client. Ensure the connection is configured in Unity Catalog with appropriate MCP scopes (`mcp.genie`, `mcp.functions`, `mcp.vectorsearch`, `mcp.external`).
|
|
691
|
+
- The framework will handle the MCP protocol communication automatically, including session management and authentication.
|
|
669
692
|
|
|
670
693
|
### Configuring New Agents
|
|
671
694
|
|
|
@@ -3,16 +3,16 @@ dao_ai/agent_as_code.py,sha256=kPSeDz2-1jRaed1TMs4LA3VECoyqe9_Ed2beRLB9gXQ,472
|
|
|
3
3
|
dao_ai/catalog.py,sha256=sPZpHTD3lPx4EZUtIWeQV7VQM89WJ6YH__wluk1v2lE,4947
|
|
4
4
|
dao_ai/chat_models.py,sha256=uhwwOTeLyHWqoTTgHrs4n5iSyTwe4EQcLKnh3jRxPWI,8626
|
|
5
5
|
dao_ai/cli.py,sha256=Aez2TQW3Q8Ho1IaIkRggt0NevDxAAVPjXkePC5GPJF0,20429
|
|
6
|
-
dao_ai/config.py,sha256=
|
|
6
|
+
dao_ai/config.py,sha256=j9SAdf7UHSoS2pLos-oypJNDPS48A2rRq55OEW1wsMI,56755
|
|
7
7
|
dao_ai/graph.py,sha256=APYc2y3cig4P52X4sOHSFSZNK8j5EtEPJLFwWeJ3KQQ,7956
|
|
8
8
|
dao_ai/guardrails.py,sha256=4TKArDONRy8RwHzOT1plZ1rhy3x9GF_aeGpPCRl6wYA,4016
|
|
9
9
|
dao_ai/messages.py,sha256=xl_3-WcFqZKCFCiov8sZOPljTdM3gX3fCHhxq-xFg2U,7005
|
|
10
10
|
dao_ai/models.py,sha256=8r8GIG3EGxtVyWsRNI56lVaBjiNrPkzh4HdwMZRq8iw,31689
|
|
11
11
|
dao_ai/nodes.py,sha256=SSuFNTXOdFaKg_aX-yUkQO7fM9wvNGu14lPXKDapU1U,8461
|
|
12
|
-
dao_ai/prompts.py,sha256=
|
|
12
|
+
dao_ai/prompts.py,sha256=7Hcstmv514P0s9s-TVoIlbkDV2XXOphGCW6gcPeyUYE,1628
|
|
13
13
|
dao_ai/state.py,sha256=_lF9krAYYjvFDMUwZzVKOn0ZnXKcOrbjWKdre0C5B54,1137
|
|
14
14
|
dao_ai/types.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
15
|
-
dao_ai/utils.py,sha256=
|
|
15
|
+
dao_ai/utils.py,sha256=yXgqHrYdO5qDxgxUs2G5XJeLFgwg8D0BIJvbFkqSbhs,4519
|
|
16
16
|
dao_ai/vector_search.py,sha256=jlaFS_iizJ55wblgzZmswMM3UOL-qOp2BGJc0JqXYSg,2839
|
|
17
17
|
dao_ai/hooks/__init__.py,sha256=LlHGIuiZt6vGW8K5AQo1XJEkBP5vDVtMhq0IdjcLrD4,417
|
|
18
18
|
dao_ai/hooks/core.py,sha256=ZShHctUSoauhBgdf1cecy9-D7J6-sGn-pKjuRMumW5U,6663
|
|
@@ -22,19 +22,20 @@ dao_ai/memory/core.py,sha256=DnEjQO3S7hXr3CDDd7C2eE7fQUmcCS_8q9BXEgjPH3U,4271
|
|
|
22
22
|
dao_ai/memory/postgres.py,sha256=vvI3osjx1EoU5GBA6SCUstTBKillcmLl12hVgDMjfJY,15346
|
|
23
23
|
dao_ai/providers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
24
24
|
dao_ai/providers/base.py,sha256=-fjKypCOk28h6vioPfMj9YZSw_3Kcbi2nMuAyY7vX9k,1383
|
|
25
|
-
dao_ai/providers/databricks.py,sha256=
|
|
26
|
-
dao_ai/tools/__init__.py,sha256=
|
|
25
|
+
dao_ai/providers/databricks.py,sha256=1BPYQxi4-Z4I1ygZYlKV8ycdxZTtWNXplySToayHCEI,43096
|
|
26
|
+
dao_ai/tools/__init__.py,sha256=G5-5Yi6zpQOH53b5IzLdtsC6g0Ep6leI5GxgxOmgw7Q,1203
|
|
27
27
|
dao_ai/tools/agent.py,sha256=WbQnyziiT12TLMrA7xK0VuOU029tdmUBXbUl-R1VZ0Q,1886
|
|
28
28
|
dao_ai/tools/core.py,sha256=Kei33S8vrmvPOAyrFNekaWmV2jqZ-IPS1QDSvU7RZF0,1984
|
|
29
29
|
dao_ai/tools/genie.py,sha256=8HSOCzSg6PlBzBYXMmNfUnl-LO03p3Ki3fxLPm_dhPg,15051
|
|
30
30
|
dao_ai/tools/human_in_the_loop.py,sha256=yk35MO9eNETnYFH-sqlgR-G24TrEgXpJlnZUustsLkI,3681
|
|
31
|
-
dao_ai/tools/mcp.py,sha256=
|
|
31
|
+
dao_ai/tools/mcp.py,sha256=CYv59yn-LIY11atUgNtN2W6vR7C6Qyo7-rvPcVJnXVk,7461
|
|
32
32
|
dao_ai/tools/python.py,sha256=XcQiTMshZyLUTVR5peB3vqsoUoAAy8gol9_pcrhddfI,1831
|
|
33
|
+
dao_ai/tools/slack.py,sha256=SCvyVcD9Pv_XXPXePE_fSU1Pd8VLTEkKDLvoGTZWy2Y,4775
|
|
33
34
|
dao_ai/tools/time.py,sha256=Y-23qdnNHzwjvnfkWvYsE7PoWS1hfeKy44tA7sCnNac,8759
|
|
34
35
|
dao_ai/tools/unity_catalog.py,sha256=uX_h52BuBAr4c9UeqSMI7DNz3BPRLeai5tBVW4sJqRI,13113
|
|
35
36
|
dao_ai/tools/vector_search.py,sha256=EDYQs51zIPaAP0ma1D81wJT77GQ-v-cjb2XrFVWfWdg,2621
|
|
36
|
-
dao_ai-0.0.
|
|
37
|
-
dao_ai-0.0.
|
|
38
|
-
dao_ai-0.0.
|
|
39
|
-
dao_ai-0.0.
|
|
40
|
-
dao_ai-0.0.
|
|
37
|
+
dao_ai-0.0.24.dist-info/METADATA,sha256=69qYBqdZg8tZF1ni90RBrSG9OAmE4jXdf5lec6U0TL8,42639
|
|
38
|
+
dao_ai-0.0.24.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
39
|
+
dao_ai-0.0.24.dist-info/entry_points.txt,sha256=Xa-UFyc6gWGwMqMJOt06ZOog2vAfygV_DSwg1AiP46g,43
|
|
40
|
+
dao_ai-0.0.24.dist-info/licenses/LICENSE,sha256=YZt3W32LtPYruuvHE9lGk2bw6ZPMMJD8yLrjgHybyz4,1069
|
|
41
|
+
dao_ai-0.0.24.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|