dao-ai 0.1.5__py3-none-any.whl → 0.1.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,129 @@
1
+ """
2
+ Tool selector middleware for intelligently filtering tools before LLM calls.
3
+
4
+ This middleware uses an LLM to select relevant tools from a large set, improving
5
+ performance and accuracy by reducing context size and improving focus.
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ from typing import Any
11
+
12
+ from langchain.agents.middleware import LLMToolSelectorMiddleware
13
+ from langchain_core.language_models import LanguageModelLike
14
+ from loguru import logger
15
+
16
+ from dao_ai.config import ToolModel
17
+
18
+
19
+ def create_llm_tool_selector_middleware(
20
+ model: LanguageModelLike,
21
+ max_tools: int = 3,
22
+ always_include: list[str | ToolModel | dict[str, Any]] | None = None,
23
+ ) -> LLMToolSelectorMiddleware:
24
+ """
25
+ Create an LLMToolSelectorMiddleware for intelligent tool selection.
26
+
27
+ Uses an LLM to analyze the current query and select the most relevant tools
28
+ before calling the main model. This is particularly useful for agents with
29
+ many tools (10+) where most aren't relevant for any given query.
30
+
31
+ Benefits:
32
+ - Reduces token usage by filtering irrelevant tools
33
+ - Improves model focus and accuracy
34
+ - Optimizes cost for agents with large tool sets
35
+ - Maintains context window efficiency
36
+
37
+ Args:
38
+ model: The LLM to use for tool selection. Typically a smaller, faster
39
+ model like "gpt-4o-mini" or similar.
40
+ max_tools: Maximum number of tools to select for each query.
41
+ Default 3. Adjust based on your use case - higher values
42
+ increase context but improve tool coverage.
43
+ always_include: List of tools that should always be included regardless
44
+ of the LLM's selection. Can be:
45
+ - str: Tool name
46
+ - ToolModel: Full tool configuration
47
+ - dict: Tool configuration dictionary
48
+ Use this for critical tools that should always be available.
49
+
50
+ Returns:
51
+ LLMToolSelectorMiddleware configured with the specified parameters
52
+
53
+ Example:
54
+ from dao_ai.middleware import create_llm_tool_selector_middleware
55
+ from dao_ai.llms import create_llm
56
+
57
+ # Use a fast, cheap model for tool selection
58
+ selector_llm = create_llm("databricks-gpt-4o-mini")
59
+
60
+ middleware = create_llm_tool_selector_middleware(
61
+ model=selector_llm,
62
+ max_tools=3,
63
+ always_include=["search_web"], # Always include search
64
+ )
65
+
66
+ Use Cases:
67
+ - Large tool sets (10+ tools) where most are specialized
68
+ - Cost optimization by reducing tokens in main model calls
69
+ - Improved accuracy by reducing tool confusion
70
+ - Dynamic tool filtering based on query relevance
71
+
72
+ Note:
73
+ The selector model makes an additional LLM call for each agent turn.
74
+ Choose a fast, inexpensive model to minimize latency and cost overhead.
75
+ """
76
+ # Extract tool names from always_include
77
+ always_include_names: list[str] = []
78
+ if always_include:
79
+ always_include_names = _resolve_tool_names(always_include)
80
+
81
+ logger.debug(
82
+ "Creating LLM tool selector middleware",
83
+ max_tools=max_tools,
84
+ always_include_count=len(always_include_names),
85
+ always_include=always_include_names,
86
+ )
87
+
88
+ return LLMToolSelectorMiddleware(
89
+ model=model,
90
+ max_tools=max_tools,
91
+ always_include=always_include_names if always_include_names else None,
92
+ )
93
+
94
+
95
+ def _resolve_tool_names(tools: list[str | ToolModel | dict[str, Any]]) -> list[str]:
96
+ """
97
+ Extract tool names from a list of tool specifications.
98
+
99
+ Args:
100
+ tools: List of tool specifications (strings, ToolModels, or dicts)
101
+
102
+ Returns:
103
+ List of tool names as strings
104
+ """
105
+ names: list[str] = []
106
+
107
+ for tool_spec in tools:
108
+ if isinstance(tool_spec, str):
109
+ # Simple string tool name
110
+ names.append(tool_spec)
111
+ elif isinstance(tool_spec, ToolModel):
112
+ # ToolModel - use its name
113
+ names.append(tool_spec.name)
114
+ elif isinstance(tool_spec, dict):
115
+ # Dictionary - try to extract name
116
+ if "name" in tool_spec:
117
+ names.append(tool_spec["name"])
118
+ else:
119
+ logger.warning(
120
+ "Tool dict missing 'name' field, skipping",
121
+ tool_spec=tool_spec,
122
+ )
123
+ else:
124
+ logger.warning(
125
+ "Unknown tool specification type, skipping",
126
+ tool_spec_type=type(tool_spec).__name__,
127
+ )
128
+
129
+ return names
dao_ai/prompts.py CHANGED
@@ -61,19 +61,14 @@ def make_prompt(
61
61
  @dynamic_prompt
62
62
  def dynamic_system_prompt(request: ModelRequest) -> str:
63
63
  """Generate dynamic system prompt based on runtime context."""
64
- # Get parameters from runtime context
64
+ # Initialize parameters for template variables
65
65
  params: dict[str, Any] = {
66
66
  input_variable: "" for input_variable in prompt_template.input_variables
67
67
  }
68
68
 
69
- # Access context from runtime
69
+ # Apply context fields as template parameters
70
70
  context: Context = request.runtime.context
71
71
  if context:
72
- if context.user_id and "user_id" in params:
73
- params["user_id"] = context.user_id
74
- if context.thread_id and "thread_id" in params:
75
- params["thread_id"] = context.thread_id
76
- # Apply all context fields as template parameters
77
72
  context_dict = context.model_dump()
78
73
  for key, value in context_dict.items():
79
74
  if key in params and value is not None:
@@ -89,56 +84,3 @@ def make_prompt(
89
84
  return formatted_prompt
90
85
 
91
86
  return dynamic_system_prompt
92
-
93
-
94
- def create_prompt_middleware(
95
- base_system_prompt: Optional[str | PromptModel],
96
- ) -> AgentMiddleware | None:
97
- """
98
- Create a dynamic prompt middleware from configuration.
99
-
100
- This always returns an AgentMiddleware suitable for use with
101
- LangChain v1's middleware system.
102
-
103
- Args:
104
- base_system_prompt: The system prompt string or PromptModel
105
-
106
- Returns:
107
- An AgentMiddleware created by @dynamic_prompt, or None if no prompt
108
- """
109
- if not base_system_prompt:
110
- return None
111
-
112
- # Extract template string from PromptModel or use string directly
113
- template_str: str
114
- if isinstance(base_system_prompt, PromptModel):
115
- template_str = base_system_prompt.template
116
- else:
117
- template_str = base_system_prompt
118
-
119
- prompt_template: PromptTemplate = PromptTemplate.from_template(template_str)
120
-
121
- @dynamic_prompt
122
- def prompt_middleware(request: ModelRequest) -> str:
123
- """Generate system prompt based on runtime context."""
124
- # Get parameters from runtime context
125
- params: dict[str, Any] = {
126
- input_variable: "" for input_variable in prompt_template.input_variables
127
- }
128
-
129
- # Access context from runtime
130
- context: Context = request.runtime.context
131
- if context:
132
- # Apply all context fields as template parameters
133
- context_dict = context.model_dump()
134
- for key, value in context_dict.items():
135
- if key in params and value is not None:
136
- params[key] = value
137
-
138
- # Format the prompt
139
- formatted_prompt: str = prompt_template.format(**params)
140
- logger.trace("Formatted dynamic prompt with context")
141
-
142
- return formatted_prompt
143
-
144
- return prompt_middleware
dao_ai/tools/__init__.py CHANGED
@@ -4,7 +4,7 @@ from dao_ai.tools.agent import create_agent_endpoint_tool
4
4
  from dao_ai.tools.core import create_tools, say_hello_tool
5
5
  from dao_ai.tools.email import create_send_email_tool
6
6
  from dao_ai.tools.genie import create_genie_tool
7
- from dao_ai.tools.mcp import create_mcp_tools
7
+ from dao_ai.tools.mcp import MCPToolInfo, create_mcp_tools, list_mcp_tools
8
8
  from dao_ai.tools.memory import create_search_memory_tool
9
9
  from dao_ai.tools.python import create_factory_tool, create_python_tool
10
10
  from dao_ai.tools.search import create_search_tool
@@ -30,6 +30,8 @@ __all__ = [
30
30
  "create_genie_tool",
31
31
  "create_hooks",
32
32
  "create_mcp_tools",
33
+ "list_mcp_tools",
34
+ "MCPToolInfo",
33
35
  "create_python_tool",
34
36
  "create_search_memory_tool",
35
37
  "create_search_tool",
dao_ai/tools/mcp.py CHANGED
@@ -7,10 +7,16 @@ MCP SDK and langchain-mcp-adapters library.
7
7
  For compatibility with Databricks APIs, we use manual tool wrappers
8
8
  that give us full control over the response format.
9
9
 
10
+ Public API:
11
+ - list_mcp_tools(): List available tools from an MCP server (for discovery/UI)
12
+ - create_mcp_tools(): Create LangChain tools for agent execution
13
+
10
14
  Reference: https://docs.langchain.com/oss/python/langchain/mcp
11
15
  """
12
16
 
13
17
  import asyncio
18
+ import fnmatch
19
+ from dataclasses import dataclass
14
20
  from typing import Any, Sequence
15
21
 
16
22
  from langchain_core.runnables.base import RunnableLike
@@ -26,6 +32,117 @@ from dao_ai.config import (
26
32
  )
27
33
 
28
34
 
35
+ @dataclass
36
+ class MCPToolInfo:
37
+ """
38
+ Information about an MCP tool for display and selection.
39
+
40
+ This is a simplified representation of an MCP tool that contains
41
+ only the information needed for UI display and tool selection.
42
+ It's designed to be easily serializable for use in web UIs.
43
+
44
+ Attributes:
45
+ name: The unique identifier/name of the tool
46
+ description: Human-readable description of what the tool does
47
+ input_schema: JSON Schema describing the tool's input parameters
48
+ """
49
+
50
+ name: str
51
+ description: str | None
52
+ input_schema: dict[str, Any]
53
+
54
+ def to_dict(self) -> dict[str, Any]:
55
+ """Convert to dictionary for JSON serialization."""
56
+ return {
57
+ "name": self.name,
58
+ "description": self.description,
59
+ "input_schema": self.input_schema,
60
+ }
61
+
62
+
63
+ def _matches_pattern(tool_name: str, patterns: list[str]) -> bool:
64
+ """
65
+ Check if tool name matches any of the provided patterns.
66
+
67
+ Supports glob patterns:
68
+ - * matches any characters
69
+ - ? matches single character
70
+ - [abc] matches any char in set
71
+ - [!abc] matches any char NOT in set
72
+
73
+ Args:
74
+ tool_name: Name of the tool to check
75
+ patterns: List of exact names or glob patterns
76
+
77
+ Returns:
78
+ True if tool name matches any pattern
79
+
80
+ Examples:
81
+ >>> _matches_pattern("query_sales", ["query_*"])
82
+ True
83
+ >>> _matches_pattern("list_tables", ["query_*"])
84
+ False
85
+ >>> _matches_pattern("tool_a", ["tool_?"])
86
+ True
87
+ """
88
+ for pattern in patterns:
89
+ if fnmatch.fnmatch(tool_name, pattern):
90
+ return True
91
+ return False
92
+
93
+
94
+ def _should_include_tool(
95
+ tool_name: str,
96
+ include_tools: list[str] | None,
97
+ exclude_tools: list[str] | None,
98
+ ) -> bool:
99
+ """
100
+ Determine if a tool should be included based on include/exclude filters.
101
+
102
+ Logic:
103
+ 1. If exclude_tools specified and tool matches: EXCLUDE (highest priority)
104
+ 2. If include_tools specified and tool matches: INCLUDE
105
+ 3. If include_tools specified and tool doesn't match: EXCLUDE
106
+ 4. If no filters specified: INCLUDE (default)
107
+
108
+ Args:
109
+ tool_name: Name of the tool
110
+ include_tools: Optional list of tools/patterns to include
111
+ exclude_tools: Optional list of tools/patterns to exclude
112
+
113
+ Returns:
114
+ True if tool should be included
115
+
116
+ Examples:
117
+ >>> _should_include_tool("query_sales", ["query_*"], None)
118
+ True
119
+ >>> _should_include_tool("drop_table", None, ["drop_*"])
120
+ False
121
+ >>> _should_include_tool("query_sales", ["query_*"], ["*_sales"])
122
+ False # exclude takes precedence
123
+ """
124
+ # Exclude has highest priority
125
+ if exclude_tools and _matches_pattern(tool_name, exclude_tools):
126
+ logger.debug("Tool excluded by exclude_tools", tool_name=tool_name)
127
+ return False
128
+
129
+ # If include list exists, tool must match it
130
+ if include_tools:
131
+ if _matches_pattern(tool_name, include_tools):
132
+ logger.debug("Tool included by include_tools", tool_name=tool_name)
133
+ return True
134
+ else:
135
+ logger.debug(
136
+ "Tool not in include_tools",
137
+ tool_name=tool_name,
138
+ include_patterns=include_tools,
139
+ )
140
+ return False
141
+
142
+ # Default: include all tools
143
+ return True
144
+
145
+
29
146
  def _build_connection_config(
30
147
  function: McpFunctionModel,
31
148
  ) -> dict[str, Any]:
@@ -124,69 +241,33 @@ def _extract_text_content(result: CallToolResult) -> str:
124
241
  return "\n".join(text_parts)
125
242
 
126
243
 
127
- def create_mcp_tools(
128
- function: McpFunctionModel,
129
- ) -> Sequence[RunnableLike]:
244
+ def _fetch_tools_from_server(function: McpFunctionModel) -> list[Tool]:
130
245
  """
131
- Create tools for invoking Databricks MCP functions.
246
+ Fetch raw MCP tools from the server.
132
247
 
133
- Supports both direct MCP connections and UC Connection-based MCP access.
134
- Uses manual tool wrappers to ensure response format compatibility with
135
- Databricks APIs (which reject extra fields in tool results).
136
-
137
- Based on: https://docs.databricks.com/aws/en/generative-ai/mcp/external-mcp
248
+ This is the core async operation that connects to the MCP server
249
+ and retrieves the list of available tools.
138
250
 
139
251
  Args:
140
252
  function: The MCP function model configuration.
141
253
 
142
254
  Returns:
143
- A sequence of LangChain tools that can be used by agents.
144
- """
145
- mcp_url = function.mcp_url
146
- logger.debug("Creating MCP tools", mcp_url=mcp_url)
255
+ List of raw MCP Tool objects from the server.
147
256
 
257
+ Raises:
258
+ RuntimeError: If connection to MCP server fails.
259
+ """
148
260
  connection_config = _build_connection_config(function)
149
-
150
- if function.connection:
151
- logger.debug(
152
- "Using UC Connection for MCP",
153
- connection_name=function.connection.name,
154
- mcp_url=mcp_url,
155
- )
156
- else:
157
- logger.debug(
158
- "Using direct connection for MCP",
159
- transport=function.transport,
160
- mcp_url=mcp_url,
161
- )
162
-
163
- # Create client to list available tools
164
261
  client = MultiServerMCPClient({"mcp_function": connection_config})
165
262
 
166
- async def _list_tools() -> list[Tool]:
167
- """List available MCP tools from the server."""
263
+ async def _list_tools_async() -> list[Tool]:
264
+ """Async helper to list tools from MCP server."""
168
265
  async with client.session("mcp_function") as session:
169
266
  result = await session.list_tools()
170
267
  return result.tools if hasattr(result, "tools") else list(result)
171
268
 
172
269
  try:
173
- mcp_tools: list[Tool] = asyncio.run(_list_tools())
174
-
175
- # Log discovered tools
176
- logger.info(
177
- "Discovered MCP tools",
178
- tools_count=len(mcp_tools),
179
- mcp_url=mcp_url,
180
- )
181
- for mcp_tool in mcp_tools:
182
- logger.debug(
183
- "MCP tool discovered",
184
- tool_name=mcp_tool.name,
185
- tool_description=(
186
- mcp_tool.description[:100] if mcp_tool.description else None
187
- ),
188
- )
189
-
270
+ return asyncio.run(_list_tools_async())
190
271
  except Exception as e:
191
272
  if function.connection:
192
273
  logger.error(
@@ -210,6 +291,216 @@ def create_mcp_tools(
210
291
  f"and URL '{function.url}': {e}"
211
292
  ) from e
212
293
 
294
+
295
+ def list_mcp_tools(
296
+ function: McpFunctionModel,
297
+ apply_filters: bool = True,
298
+ ) -> list[MCPToolInfo]:
299
+ """
300
+ List available tools from an MCP server.
301
+
302
+ This function connects to an MCP server and returns information about
303
+ all available tools. It's designed for:
304
+ - Tool discovery and exploration
305
+ - UI-based tool selection (e.g., in DAO AI Builder)
306
+ - Debugging and validation of MCP configurations
307
+
308
+ The returned MCPToolInfo objects contain all information needed to
309
+ display tools in a UI and allow users to select which tools to use.
310
+
311
+ Args:
312
+ function: The MCP function model configuration containing:
313
+ - Connection details (url, connection, headers, etc.)
314
+ - Optional filtering (include_tools, exclude_tools)
315
+ apply_filters: Whether to apply include_tools/exclude_tools filters.
316
+ Set to False to get the complete list of available tools
317
+ regardless of filter configuration. Default True.
318
+
319
+ Returns:
320
+ List of MCPToolInfo objects describing available tools.
321
+ Each contains name, description, and input_schema.
322
+
323
+ Raises:
324
+ RuntimeError: If connection to MCP server fails.
325
+
326
+ Example:
327
+ # List all tools from a DBSQL MCP server
328
+ from dao_ai.config import McpFunctionModel
329
+ from dao_ai.tools.mcp import list_mcp_tools
330
+
331
+ function = McpFunctionModel(sql=True)
332
+ tools = list_mcp_tools(function)
333
+
334
+ for tool in tools:
335
+ print(f"{tool.name}: {tool.description}")
336
+
337
+ # Get unfiltered list (ignore include_tools/exclude_tools)
338
+ all_tools = list_mcp_tools(function, apply_filters=False)
339
+
340
+ Note:
341
+ For creating executable LangChain tools, use create_mcp_tools() instead.
342
+ This function is for discovery/display purposes only.
343
+ """
344
+ mcp_url = function.mcp_url
345
+ logger.debug("Listing MCP tools", mcp_url=mcp_url, apply_filters=apply_filters)
346
+
347
+ # Log connection type
348
+ if function.connection:
349
+ logger.debug(
350
+ "Using UC Connection for MCP",
351
+ connection_name=function.connection.name,
352
+ mcp_url=mcp_url,
353
+ )
354
+ else:
355
+ logger.debug(
356
+ "Using direct connection for MCP",
357
+ transport=function.transport,
358
+ mcp_url=mcp_url,
359
+ )
360
+
361
+ # Fetch tools from server
362
+ mcp_tools: list[Tool] = _fetch_tools_from_server(function)
363
+
364
+ # Log discovered tools
365
+ logger.info(
366
+ "Discovered MCP tools from server",
367
+ tools_count=len(mcp_tools),
368
+ tool_names=[t.name for t in mcp_tools],
369
+ mcp_url=mcp_url,
370
+ )
371
+
372
+ # Apply filtering if requested and configured
373
+ if apply_filters and (function.include_tools or function.exclude_tools):
374
+ original_count = len(mcp_tools)
375
+ mcp_tools = [
376
+ tool
377
+ for tool in mcp_tools
378
+ if _should_include_tool(
379
+ tool.name,
380
+ function.include_tools,
381
+ function.exclude_tools,
382
+ )
383
+ ]
384
+ filtered_count = original_count - len(mcp_tools)
385
+
386
+ logger.info(
387
+ "Filtered MCP tools",
388
+ original_count=original_count,
389
+ filtered_count=filtered_count,
390
+ final_count=len(mcp_tools),
391
+ include_patterns=function.include_tools,
392
+ exclude_patterns=function.exclude_tools,
393
+ )
394
+
395
+ # Convert to MCPToolInfo for cleaner API
396
+ tool_infos: list[MCPToolInfo] = []
397
+ for mcp_tool in mcp_tools:
398
+ tool_info = MCPToolInfo(
399
+ name=mcp_tool.name,
400
+ description=mcp_tool.description,
401
+ input_schema=mcp_tool.inputSchema or {},
402
+ )
403
+ tool_infos.append(tool_info)
404
+
405
+ logger.debug(
406
+ "MCP tool available",
407
+ tool_name=mcp_tool.name,
408
+ tool_description=(
409
+ mcp_tool.description[:100] if mcp_tool.description else None
410
+ ),
411
+ )
412
+
413
+ return tool_infos
414
+
415
+
416
+ def create_mcp_tools(
417
+ function: McpFunctionModel,
418
+ ) -> Sequence[RunnableLike]:
419
+ """
420
+ Create executable LangChain tools for invoking Databricks MCP functions.
421
+
422
+ Supports both direct MCP connections and UC Connection-based MCP access.
423
+ Uses manual tool wrappers to ensure response format compatibility with
424
+ Databricks APIs (which reject extra fields in tool results).
425
+
426
+ This function:
427
+ 1. Fetches available tools from the MCP server
428
+ 2. Applies include_tools/exclude_tools filters
429
+ 3. Wraps each tool for LangChain agent execution
430
+
431
+ For tool discovery without creating executable tools, use list_mcp_tools().
432
+
433
+ Based on: https://docs.databricks.com/aws/en/generative-ai/mcp/external-mcp
434
+
435
+ Args:
436
+ function: The MCP function model configuration containing:
437
+ - Connection details (url, connection, headers, etc.)
438
+ - Optional filtering (include_tools, exclude_tools)
439
+
440
+ Returns:
441
+ A sequence of LangChain tools that can be used by agents.
442
+
443
+ Raises:
444
+ RuntimeError: If connection to MCP server fails.
445
+
446
+ Example:
447
+ from dao_ai.config import McpFunctionModel
448
+ from dao_ai.tools.mcp import create_mcp_tools
449
+
450
+ function = McpFunctionModel(sql=True)
451
+ tools = create_mcp_tools(function)
452
+
453
+ # Use tools in an agent
454
+ agent = create_agent(model=model, tools=tools)
455
+ """
456
+ mcp_url = function.mcp_url
457
+ logger.debug("Creating MCP tools", mcp_url=mcp_url)
458
+
459
+ # Fetch and filter tools using shared logic
460
+ # We need the raw Tool objects here, not MCPToolInfo
461
+ mcp_tools: list[Tool] = _fetch_tools_from_server(function)
462
+
463
+ # Log discovered tools
464
+ logger.info(
465
+ "Discovered MCP tools from server",
466
+ tools_count=len(mcp_tools),
467
+ tool_names=[t.name for t in mcp_tools],
468
+ mcp_url=mcp_url,
469
+ )
470
+
471
+ # Apply filtering if configured
472
+ if function.include_tools or function.exclude_tools:
473
+ original_count = len(mcp_tools)
474
+ mcp_tools = [
475
+ tool
476
+ for tool in mcp_tools
477
+ if _should_include_tool(
478
+ tool.name,
479
+ function.include_tools,
480
+ function.exclude_tools,
481
+ )
482
+ ]
483
+ filtered_count = original_count - len(mcp_tools)
484
+
485
+ logger.info(
486
+ "Filtered MCP tools",
487
+ original_count=original_count,
488
+ filtered_count=filtered_count,
489
+ final_count=len(mcp_tools),
490
+ include_patterns=function.include_tools,
491
+ exclude_patterns=function.exclude_tools,
492
+ )
493
+
494
+ # Log final tool list
495
+ for mcp_tool in mcp_tools:
496
+ logger.debug(
497
+ "MCP tool available",
498
+ tool_name=mcp_tool.name,
499
+ tool_description=(
500
+ mcp_tool.description[:100] if mcp_tool.description else None
501
+ ),
502
+ )
503
+
213
504
  def _create_tool_wrapper(mcp_tool: Tool) -> RunnableLike:
214
505
  """
215
506
  Create a LangChain tool wrapper for an MCP tool.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dao-ai
3
- Version: 0.1.5
3
+ Version: 0.1.7
4
4
  Summary: DAO AI: A modular, multi-agent orchestration framework for complex AI workflows. Supports agent handoff, tool integration, and dynamic configuration via YAML.
5
5
  Project-URL: Homepage, https://github.com/natefleming/dao-ai
6
6
  Project-URL: Documentation, https://natefleming.github.io/dao-ai