dao-ai 0.1.9__py3-none-any.whl → 0.1.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- dao_ai/apps/__init__.py +24 -0
- dao_ai/{app_server.py → apps/handlers.py} +20 -39
- dao_ai/apps/model_serving.py +29 -0
- dao_ai/apps/resources.py +1072 -0
- dao_ai/apps/server.py +39 -0
- dao_ai/cli.py +51 -4
- dao_ai/config.py +34 -4
- dao_ai/memory/postgres.py +29 -4
- dao_ai/models.py +327 -370
- dao_ai/providers/databricks.py +62 -20
- dao_ai/tools/mcp.py +165 -68
- {dao_ai-0.1.9.dist-info → dao_ai-0.1.11.dist-info}/METADATA +2 -2
- {dao_ai-0.1.9.dist-info → dao_ai-0.1.11.dist-info}/RECORD +16 -13
- dao_ai/agent_as_code.py +0 -22
- {dao_ai-0.1.9.dist-info → dao_ai-0.1.11.dist-info}/WHEEL +0 -0
- {dao_ai-0.1.9.dist-info → dao_ai-0.1.11.dist-info}/entry_points.txt +0 -0
- {dao_ai-0.1.9.dist-info → dao_ai-0.1.11.dist-info}/licenses/LICENSE +0 -0
dao_ai/providers/databricks.py
CHANGED
|
@@ -327,7 +327,7 @@ class DatabricksProvider(ServiceProvider):
|
|
|
327
327
|
raise FileNotFoundError(f"Code path does not exist: {path}")
|
|
328
328
|
|
|
329
329
|
model_root_path: Path = Path(dao_ai.__file__).parent
|
|
330
|
-
model_path: Path = model_root_path / "
|
|
330
|
+
model_path: Path = model_root_path / "apps" / "model_serving.py"
|
|
331
331
|
|
|
332
332
|
pip_requirements: Sequence[str] = config.app.pip_requirements
|
|
333
333
|
|
|
@@ -558,6 +558,16 @@ class DatabricksProvider(ServiceProvider):
|
|
|
558
558
|
|
|
559
559
|
logger.info("Using workspace source path", source_path=source_path)
|
|
560
560
|
|
|
561
|
+
# Get or create experiment for this app (for tracing and tracking)
|
|
562
|
+
from mlflow.entities import Experiment
|
|
563
|
+
|
|
564
|
+
experiment: Experiment = self.get_or_create_experiment(config)
|
|
565
|
+
logger.info(
|
|
566
|
+
"Using MLflow experiment for app",
|
|
567
|
+
experiment_name=experiment.name,
|
|
568
|
+
experiment_id=experiment.experiment_id,
|
|
569
|
+
)
|
|
570
|
+
|
|
561
571
|
# Upload the configuration file to the workspace
|
|
562
572
|
source_config_path: str | None = config.source_config_path
|
|
563
573
|
if source_config_path:
|
|
@@ -596,22 +606,19 @@ class DatabricksProvider(ServiceProvider):
|
|
|
596
606
|
"model_config.yaml exists in the app source directory."
|
|
597
607
|
)
|
|
598
608
|
|
|
599
|
-
# Generate and upload app.yaml
|
|
600
|
-
|
|
601
|
-
|
|
602
|
-
|
|
603
|
-
|
|
604
|
-
|
|
605
|
-
|
|
606
|
-
|
|
607
|
-
|
|
608
|
-
|
|
609
|
-
|
|
610
|
-
|
|
611
|
-
|
|
612
|
-
- name: DAO_AI_CONFIG_PATH
|
|
613
|
-
value: "model_config.yaml"
|
|
614
|
-
"""
|
|
609
|
+
# Generate and upload app.yaml with dynamically discovered resources
|
|
610
|
+
from dao_ai.apps.resources import generate_app_yaml
|
|
611
|
+
|
|
612
|
+
app_yaml_content: str = generate_app_yaml(
|
|
613
|
+
config,
|
|
614
|
+
command=[
|
|
615
|
+
"/bin/bash",
|
|
616
|
+
"-c",
|
|
617
|
+
"pip install dao-ai && python -m dao_ai.apps.server",
|
|
618
|
+
],
|
|
619
|
+
include_resources=True,
|
|
620
|
+
)
|
|
621
|
+
|
|
615
622
|
app_yaml_path: str = f"{source_path}/app.yaml"
|
|
616
623
|
self.w.workspace.upload(
|
|
617
624
|
path=app_yaml_path,
|
|
@@ -619,7 +626,29 @@ env:
|
|
|
619
626
|
format=ImportFormat.AUTO,
|
|
620
627
|
overwrite=True,
|
|
621
628
|
)
|
|
622
|
-
logger.info("app.yaml uploaded", path=app_yaml_path)
|
|
629
|
+
logger.info("app.yaml with resources uploaded", path=app_yaml_path)
|
|
630
|
+
|
|
631
|
+
# Generate SDK resources from the config (including experiment)
|
|
632
|
+
from dao_ai.apps.resources import (
|
|
633
|
+
generate_sdk_resources,
|
|
634
|
+
generate_user_api_scopes,
|
|
635
|
+
)
|
|
636
|
+
|
|
637
|
+
sdk_resources = generate_sdk_resources(config, experiment_id=experiment.experiment_id)
|
|
638
|
+
if sdk_resources:
|
|
639
|
+
logger.info(
|
|
640
|
+
"Discovered app resources from config",
|
|
641
|
+
resource_count=len(sdk_resources),
|
|
642
|
+
resources=[r.name for r in sdk_resources],
|
|
643
|
+
)
|
|
644
|
+
|
|
645
|
+
# Generate user API scopes for on-behalf-of-user resources
|
|
646
|
+
user_api_scopes = generate_user_api_scopes(config)
|
|
647
|
+
if user_api_scopes:
|
|
648
|
+
logger.info(
|
|
649
|
+
"Discovered user API scopes for OBO resources",
|
|
650
|
+
scopes=user_api_scopes,
|
|
651
|
+
)
|
|
623
652
|
|
|
624
653
|
# Check if app exists
|
|
625
654
|
app_exists: bool = False
|
|
@@ -630,20 +659,33 @@ env:
|
|
|
630
659
|
except NotFound:
|
|
631
660
|
logger.debug("Creating new app", app_name=app_name)
|
|
632
661
|
|
|
633
|
-
# Create or
|
|
662
|
+
# Create or update the app with resources and user_api_scopes
|
|
634
663
|
if not app_exists:
|
|
635
664
|
logger.info("Creating Databricks App", app_name=app_name)
|
|
636
665
|
app_spec = App(
|
|
637
666
|
name=app_name,
|
|
638
667
|
description=config.app.description or f"DAO AI Agent: {app_name}",
|
|
668
|
+
resources=sdk_resources if sdk_resources else None,
|
|
669
|
+
user_api_scopes=user_api_scopes if user_api_scopes else None,
|
|
639
670
|
)
|
|
640
671
|
app: App = self.w.apps.create_and_wait(app=app_spec)
|
|
641
672
|
logger.info("App created", app_name=app.name, app_url=app.url)
|
|
642
673
|
else:
|
|
643
674
|
app = existing_app
|
|
675
|
+
# Update resources and scopes on existing app
|
|
676
|
+
if sdk_resources or user_api_scopes:
|
|
677
|
+
logger.info("Updating app resources and scopes", app_name=app_name)
|
|
678
|
+
updated_app = App(
|
|
679
|
+
name=app_name,
|
|
680
|
+
description=config.app.description or app.description,
|
|
681
|
+
resources=sdk_resources if sdk_resources else None,
|
|
682
|
+
user_api_scopes=user_api_scopes if user_api_scopes else None,
|
|
683
|
+
)
|
|
684
|
+
app = self.w.apps.update(name=app_name, app=updated_app)
|
|
685
|
+
logger.info("App resources and scopes updated", app_name=app_name)
|
|
644
686
|
|
|
645
687
|
# Deploy the app with source code
|
|
646
|
-
# The app will use the dao_ai.
|
|
688
|
+
# The app will use the dao_ai.apps.server module as the entry point
|
|
647
689
|
logger.info("Deploying app", app_name=app_name)
|
|
648
690
|
|
|
649
691
|
# Create deployment configuration
|
dao_ai/tools/mcp.py
CHANGED
|
@@ -261,12 +261,12 @@ def _extract_text_content(result: CallToolResult) -> str:
|
|
|
261
261
|
return "\n".join(text_parts)
|
|
262
262
|
|
|
263
263
|
|
|
264
|
-
def
|
|
264
|
+
async def _afetch_tools_from_server(function: McpFunctionModel) -> list[Tool]:
|
|
265
265
|
"""
|
|
266
|
-
Fetch raw MCP tools from the server.
|
|
266
|
+
Async version: Fetch raw MCP tools from the server.
|
|
267
267
|
|
|
268
|
-
This is the
|
|
269
|
-
and
|
|
268
|
+
This is the primary async implementation that handles the actual MCP connection
|
|
269
|
+
and tool listing. It's used by both alist_mcp_tools() and acreate_mcp_tools().
|
|
270
270
|
|
|
271
271
|
Args:
|
|
272
272
|
function: The MCP function model configuration.
|
|
@@ -280,14 +280,10 @@ def _fetch_tools_from_server(function: McpFunctionModel) -> list[Tool]:
|
|
|
280
280
|
connection_config = _build_connection_config(function)
|
|
281
281
|
client = MultiServerMCPClient({"mcp_function": connection_config})
|
|
282
282
|
|
|
283
|
-
|
|
284
|
-
"""Async helper to list tools from MCP server."""
|
|
283
|
+
try:
|
|
285
284
|
async with client.session("mcp_function") as session:
|
|
286
285
|
result = await session.list_tools()
|
|
287
286
|
return result.tools if hasattr(result, "tools") else list(result)
|
|
288
|
-
|
|
289
|
-
try:
|
|
290
|
-
return asyncio.run(_list_tools_async())
|
|
291
287
|
except Exception as e:
|
|
292
288
|
if function.connection:
|
|
293
289
|
logger.error(
|
|
@@ -312,57 +308,48 @@ def _fetch_tools_from_server(function: McpFunctionModel) -> list[Tool]:
|
|
|
312
308
|
) from e
|
|
313
309
|
|
|
314
310
|
|
|
315
|
-
def
|
|
316
|
-
function: McpFunctionModel,
|
|
317
|
-
apply_filters: bool = True,
|
|
318
|
-
) -> list[MCPToolInfo]:
|
|
311
|
+
def _fetch_tools_from_server(function: McpFunctionModel) -> list[Tool]:
|
|
319
312
|
"""
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
This function connects to an MCP server and returns information about
|
|
323
|
-
all available tools. It's designed for:
|
|
324
|
-
- Tool discovery and exploration
|
|
325
|
-
- UI-based tool selection (e.g., in DAO AI Builder)
|
|
326
|
-
- Debugging and validation of MCP configurations
|
|
313
|
+
Sync wrapper: Fetch raw MCP tools from the server.
|
|
327
314
|
|
|
328
|
-
|
|
329
|
-
display tools in a UI and allow users to select which tools to use.
|
|
315
|
+
For async contexts, use _afetch_tools_from_server() directly.
|
|
330
316
|
|
|
331
317
|
Args:
|
|
332
|
-
function: The MCP function model configuration
|
|
333
|
-
- Connection details (url, connection, headers, etc.)
|
|
334
|
-
- Optional filtering (include_tools, exclude_tools)
|
|
335
|
-
apply_filters: Whether to apply include_tools/exclude_tools filters.
|
|
336
|
-
Set to False to get the complete list of available tools
|
|
337
|
-
regardless of filter configuration. Default True.
|
|
318
|
+
function: The MCP function model configuration.
|
|
338
319
|
|
|
339
320
|
Returns:
|
|
340
|
-
List of
|
|
341
|
-
Each contains name, description, and input_schema.
|
|
321
|
+
List of raw MCP Tool objects from the server.
|
|
342
322
|
|
|
343
323
|
Raises:
|
|
344
324
|
RuntimeError: If connection to MCP server fails.
|
|
325
|
+
"""
|
|
326
|
+
return asyncio.run(_afetch_tools_from_server(function))
|
|
345
327
|
|
|
346
|
-
Example:
|
|
347
|
-
# List all tools from a DBSQL MCP server
|
|
348
|
-
from dao_ai.config import McpFunctionModel
|
|
349
|
-
from dao_ai.tools.mcp import list_mcp_tools
|
|
350
328
|
|
|
351
|
-
|
|
352
|
-
|
|
329
|
+
async def alist_mcp_tools(
|
|
330
|
+
function: McpFunctionModel,
|
|
331
|
+
apply_filters: bool = True,
|
|
332
|
+
) -> list[MCPToolInfo]:
|
|
333
|
+
"""
|
|
334
|
+
Async version: List available tools from an MCP server.
|
|
335
|
+
|
|
336
|
+
This is the primary async implementation for tool discovery.
|
|
337
|
+
For sync contexts, use list_mcp_tools() instead.
|
|
353
338
|
|
|
354
|
-
|
|
355
|
-
|
|
339
|
+
Args:
|
|
340
|
+
function: The MCP function model configuration.
|
|
341
|
+
apply_filters: Whether to apply include_tools/exclude_tools filters.
|
|
356
342
|
|
|
357
|
-
|
|
358
|
-
|
|
343
|
+
Returns:
|
|
344
|
+
List of MCPToolInfo objects describing available tools.
|
|
359
345
|
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
This function is for discovery/display purposes only.
|
|
346
|
+
Raises:
|
|
347
|
+
RuntimeError: If connection to MCP server fails.
|
|
363
348
|
"""
|
|
364
349
|
mcp_url = function.mcp_url
|
|
365
|
-
logger.debug(
|
|
350
|
+
logger.debug(
|
|
351
|
+
"Listing MCP tools (async)", mcp_url=mcp_url, apply_filters=apply_filters
|
|
352
|
+
)
|
|
366
353
|
|
|
367
354
|
# Log connection type
|
|
368
355
|
if function.connection:
|
|
@@ -378,8 +365,8 @@ def list_mcp_tools(
|
|
|
378
365
|
mcp_url=mcp_url,
|
|
379
366
|
)
|
|
380
367
|
|
|
381
|
-
# Fetch tools from server
|
|
382
|
-
mcp_tools: list[Tool] =
|
|
368
|
+
# Fetch tools from server (async)
|
|
369
|
+
mcp_tools: list[Tool] = await _afetch_tools_from_server(function)
|
|
383
370
|
|
|
384
371
|
# Log discovered tools
|
|
385
372
|
logger.info(
|
|
@@ -433,45 +420,155 @@ def list_mcp_tools(
|
|
|
433
420
|
return tool_infos
|
|
434
421
|
|
|
435
422
|
|
|
436
|
-
def
|
|
423
|
+
def list_mcp_tools(
|
|
437
424
|
function: McpFunctionModel,
|
|
438
|
-
|
|
425
|
+
apply_filters: bool = True,
|
|
426
|
+
) -> list[MCPToolInfo]:
|
|
439
427
|
"""
|
|
440
|
-
|
|
428
|
+
Sync wrapper: List available tools from an MCP server.
|
|
429
|
+
|
|
430
|
+
For async contexts, use alist_mcp_tools() directly.
|
|
431
|
+
|
|
432
|
+
Args:
|
|
433
|
+
function: The MCP function model configuration.
|
|
434
|
+
apply_filters: Whether to apply include_tools/exclude_tools filters.
|
|
441
435
|
|
|
442
|
-
|
|
443
|
-
|
|
444
|
-
|
|
436
|
+
Returns:
|
|
437
|
+
List of MCPToolInfo objects describing available tools.
|
|
438
|
+
|
|
439
|
+
Raises:
|
|
440
|
+
RuntimeError: If connection to MCP server fails.
|
|
441
|
+
"""
|
|
442
|
+
return asyncio.run(alist_mcp_tools(function, apply_filters))
|
|
445
443
|
|
|
446
|
-
This function:
|
|
447
|
-
1. Fetches available tools from the MCP server
|
|
448
|
-
2. Applies include_tools/exclude_tools filters
|
|
449
|
-
3. Wraps each tool for LangChain agent execution
|
|
450
444
|
|
|
451
|
-
|
|
445
|
+
async def acreate_mcp_tools(
|
|
446
|
+
function: McpFunctionModel,
|
|
447
|
+
) -> Sequence[RunnableLike]:
|
|
448
|
+
"""
|
|
449
|
+
Async version: Create executable LangChain tools for invoking Databricks MCP functions.
|
|
452
450
|
|
|
453
|
-
|
|
451
|
+
This is the primary async implementation. For sync contexts, use create_mcp_tools().
|
|
454
452
|
|
|
455
453
|
Args:
|
|
456
|
-
function: The MCP function model configuration
|
|
457
|
-
- Connection details (url, connection, headers, etc.)
|
|
458
|
-
- Optional filtering (include_tools, exclude_tools)
|
|
454
|
+
function: The MCP function model configuration.
|
|
459
455
|
|
|
460
456
|
Returns:
|
|
461
457
|
A sequence of LangChain tools that can be used by agents.
|
|
462
458
|
|
|
463
459
|
Raises:
|
|
464
460
|
RuntimeError: If connection to MCP server fails.
|
|
461
|
+
"""
|
|
462
|
+
mcp_url = function.mcp_url
|
|
463
|
+
logger.debug("Creating MCP tools (async)", mcp_url=mcp_url)
|
|
464
|
+
|
|
465
|
+
# Fetch tools from server (async)
|
|
466
|
+
mcp_tools: list[Tool] = await _afetch_tools_from_server(function)
|
|
467
|
+
|
|
468
|
+
# Log discovered tools
|
|
469
|
+
logger.info(
|
|
470
|
+
"Discovered MCP tools from server",
|
|
471
|
+
tools_count=len(mcp_tools),
|
|
472
|
+
tool_names=[t.name for t in mcp_tools],
|
|
473
|
+
mcp_url=mcp_url,
|
|
474
|
+
)
|
|
475
|
+
|
|
476
|
+
# Apply filtering if configured
|
|
477
|
+
if function.include_tools or function.exclude_tools:
|
|
478
|
+
original_count = len(mcp_tools)
|
|
479
|
+
mcp_tools = [
|
|
480
|
+
tool
|
|
481
|
+
for tool in mcp_tools
|
|
482
|
+
if _should_include_tool(
|
|
483
|
+
tool.name,
|
|
484
|
+
function.include_tools,
|
|
485
|
+
function.exclude_tools,
|
|
486
|
+
)
|
|
487
|
+
]
|
|
488
|
+
filtered_count = original_count - len(mcp_tools)
|
|
489
|
+
|
|
490
|
+
logger.info(
|
|
491
|
+
"Filtered MCP tools",
|
|
492
|
+
original_count=original_count,
|
|
493
|
+
filtered_count=filtered_count,
|
|
494
|
+
final_count=len(mcp_tools),
|
|
495
|
+
include_patterns=function.include_tools,
|
|
496
|
+
exclude_patterns=function.exclude_tools,
|
|
497
|
+
)
|
|
498
|
+
|
|
499
|
+
# Log final tool list
|
|
500
|
+
for mcp_tool in mcp_tools:
|
|
501
|
+
logger.debug(
|
|
502
|
+
"MCP tool available",
|
|
503
|
+
tool_name=mcp_tool.name,
|
|
504
|
+
tool_description=(
|
|
505
|
+
mcp_tool.description[:100] if mcp_tool.description else None
|
|
506
|
+
),
|
|
507
|
+
)
|
|
508
|
+
|
|
509
|
+
def _create_tool_wrapper(mcp_tool: Tool) -> RunnableLike:
|
|
510
|
+
"""
|
|
511
|
+
Create a LangChain tool wrapper for an MCP tool.
|
|
512
|
+
"""
|
|
513
|
+
|
|
514
|
+
@create_tool(
|
|
515
|
+
mcp_tool.name,
|
|
516
|
+
description=mcp_tool.description or f"MCP tool: {mcp_tool.name}",
|
|
517
|
+
args_schema=mcp_tool.inputSchema,
|
|
518
|
+
)
|
|
519
|
+
async def tool_wrapper(**kwargs: Any) -> str:
|
|
520
|
+
"""Execute MCP tool with fresh session."""
|
|
521
|
+
logger.trace("Invoking MCP tool", tool_name=mcp_tool.name, args=kwargs)
|
|
522
|
+
|
|
523
|
+
invocation_client = MultiServerMCPClient(
|
|
524
|
+
{"mcp_function": _build_connection_config(function)}
|
|
525
|
+
)
|
|
526
|
+
|
|
527
|
+
try:
|
|
528
|
+
async with invocation_client.session("mcp_function") as session:
|
|
529
|
+
result: CallToolResult = await session.call_tool(
|
|
530
|
+
mcp_tool.name, kwargs
|
|
531
|
+
)
|
|
532
|
+
|
|
533
|
+
text_result = _extract_text_content(result)
|
|
534
|
+
|
|
535
|
+
logger.trace(
|
|
536
|
+
"MCP tool completed",
|
|
537
|
+
tool_name=mcp_tool.name,
|
|
538
|
+
result_length=len(text_result),
|
|
539
|
+
)
|
|
540
|
+
|
|
541
|
+
return text_result
|
|
542
|
+
|
|
543
|
+
except Exception as e:
|
|
544
|
+
logger.error(
|
|
545
|
+
"MCP tool failed",
|
|
546
|
+
tool_name=mcp_tool.name,
|
|
547
|
+
error=str(e),
|
|
548
|
+
)
|
|
549
|
+
raise
|
|
550
|
+
|
|
551
|
+
return tool_wrapper
|
|
552
|
+
|
|
553
|
+
return [_create_tool_wrapper(tool) for tool in mcp_tools]
|
|
465
554
|
|
|
466
|
-
Example:
|
|
467
|
-
from dao_ai.config import McpFunctionModel
|
|
468
|
-
from dao_ai.tools.mcp import create_mcp_tools
|
|
469
555
|
|
|
470
|
-
|
|
471
|
-
|
|
556
|
+
def create_mcp_tools(
|
|
557
|
+
function: McpFunctionModel,
|
|
558
|
+
) -> Sequence[RunnableLike]:
|
|
559
|
+
"""
|
|
560
|
+
Sync wrapper: Create executable LangChain tools for invoking Databricks MCP functions.
|
|
472
561
|
|
|
473
|
-
|
|
474
|
-
|
|
562
|
+
For async contexts, use acreate_mcp_tools() directly.
|
|
563
|
+
|
|
564
|
+
Args:
|
|
565
|
+
function: The MCP function model configuration.
|
|
566
|
+
|
|
567
|
+
Returns:
|
|
568
|
+
A sequence of LangChain tools that can be used by agents.
|
|
569
|
+
|
|
570
|
+
Raises:
|
|
571
|
+
RuntimeError: If connection to MCP server fails.
|
|
475
572
|
"""
|
|
476
573
|
mcp_url = function.mcp_url
|
|
477
574
|
logger.debug("Creating MCP tools", mcp_url=mcp_url)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: dao-ai
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.11
|
|
4
4
|
Summary: DAO AI: A modular, multi-agent orchestration framework for complex AI workflows. Supports agent handoff, tool integration, and dynamic configuration via YAML.
|
|
5
5
|
Project-URL: Homepage, https://github.com/natefleming/dao-ai
|
|
6
6
|
Project-URL: Documentation, https://natefleming.github.io/dao-ai
|
|
@@ -28,7 +28,7 @@ Requires-Python: >=3.11
|
|
|
28
28
|
Requires-Dist: databricks-agents>=1.9.0
|
|
29
29
|
Requires-Dist: databricks-langchain[memory]>=0.12.1
|
|
30
30
|
Requires-Dist: databricks-mcp>=0.5.0
|
|
31
|
-
Requires-Dist: databricks-sdk[openai]>=0.
|
|
31
|
+
Requires-Dist: databricks-sdk[openai]>=0.77.0
|
|
32
32
|
Requires-Dist: ddgs>=9.10.0
|
|
33
33
|
Requires-Dist: dspy>=2.6.27
|
|
34
34
|
Requires-Dist: flashrank>=0.2.10
|
|
@@ -1,13 +1,11 @@
|
|
|
1
1
|
dao_ai/__init__.py,sha256=18P98ExEgUaJ1Byw440Ct1ty59v6nxyWtc5S6Uq2m9Q,1062
|
|
2
|
-
dao_ai/agent_as_code.py,sha256=xIlLDpPVfmDVzLvbdY_V_CrC4Jvj2ItCWJ-NzdrszTo,538
|
|
3
|
-
dao_ai/app_server.py,sha256=QKpl068z-s1gLF67dPW-3fT77i33t_Oab4_ugmxISWs,3010
|
|
4
2
|
dao_ai/catalog.py,sha256=sPZpHTD3lPx4EZUtIWeQV7VQM89WJ6YH__wluk1v2lE,4947
|
|
5
|
-
dao_ai/cli.py,sha256=
|
|
6
|
-
dao_ai/config.py,sha256=
|
|
3
|
+
dao_ai/cli.py,sha256=1Ox8qjLKRlrKu2YXozm0lWoeZnDCouECeZSGVPkQgIQ,50923
|
|
4
|
+
dao_ai/config.py,sha256=9G_JiPbr_ihUCaqYPvnMbzLKtyppXTjraQfVOxnqeBA,129323
|
|
7
5
|
dao_ai/graph.py,sha256=1-uQlo7iXZQTT3uU8aYu0N5rnhw5_g_2YLwVsAs6M-U,1119
|
|
8
6
|
dao_ai/logging.py,sha256=lYy4BmucCHvwW7aI3YQkQXKJtMvtTnPDu9Hnd7_O4oc,1556
|
|
9
7
|
dao_ai/messages.py,sha256=4ZBzO4iFdktGSLrmhHzFjzMIt2tpaL-aQLHOQJysGnY,6959
|
|
10
|
-
dao_ai/models.py,sha256=
|
|
8
|
+
dao_ai/models.py,sha256=NaHj91Gra4M8thlKX1DSufLqtJfZSZ55lm1H1dJL_O8,77320
|
|
11
9
|
dao_ai/nodes.py,sha256=7W6Ek6Uk9-pKa-H06nVCwuDllCrgX02IYy3rHtuL0aM,10777
|
|
12
10
|
dao_ai/optimization.py,sha256=phK6t4wYmWPObCjGUBHdZzsaFXGhQOjhAek2bAEfwXo,22971
|
|
13
11
|
dao_ai/prompts.py,sha256=4cz5bZ7cOzrjyQ8hMp-K4evK6cVYrkGrAGdUl8-KDEM,2784
|
|
@@ -15,6 +13,11 @@ dao_ai/state.py,sha256=ifDTAC7epdowk3Z1CP3Xqw4uH2dIxQEVF3C747dA8yI,6436
|
|
|
15
13
|
dao_ai/types.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
16
14
|
dao_ai/utils.py,sha256=_Urd7Nj2VzrgPKf3NS4E6vt0lWRhEUddBqWN9BksqeE,11543
|
|
17
15
|
dao_ai/vector_search.py,sha256=8d3xROg9zSIYNXjRRl6rSexsJTlufjRl5Fy1ZA8daKA,4019
|
|
16
|
+
dao_ai/apps/__init__.py,sha256=RLuhZf4gQ4pemwKDz1183aXib8UfaRhwfKvRx68GRlM,661
|
|
17
|
+
dao_ai/apps/handlers.py,sha256=NN81uDV2hy83zT-kY36mxyBgCQIHBJApX4bnUceGB8k,2614
|
|
18
|
+
dao_ai/apps/model_serving.py,sha256=XLt3_0pGSRceMK6YtOrND9Jnh7mKLPCtwjVDLIaptQU,847
|
|
19
|
+
dao_ai/apps/resources.py,sha256=kAXqYx-Xwba1SaltPZIAtB2xoIHxH98Q41b8YGPLfI0,38029
|
|
20
|
+
dao_ai/apps/server.py,sha256=D3R3J1svtpxnpDjoM-oxg66dMDI8USgiQHPRvRHc7oQ,1276
|
|
18
21
|
dao_ai/genie/__init__.py,sha256=vdEyGhrt6L8GlK75SyYvTnl8QpHKDCJC5hJKLg4DesQ,1063
|
|
19
22
|
dao_ai/genie/core.py,sha256=HPKbocvhnnw_PkQwfoq5bpgQmL9lZyyS6_goTJL8yiY,1073
|
|
20
23
|
dao_ai/genie/cache/__init__.py,sha256=JfgCJl1NYQ1aZvZ4kly4T6uQK6ZCJ6PX_htuq7nJF50,1203
|
|
@@ -28,7 +31,7 @@ dao_ai/memory/__init__.py,sha256=Us3wFehvug_h83m-UJ7OXdq2qZ0e9nHBQE7m5RwoAd8,559
|
|
|
28
31
|
dao_ai/memory/base.py,sha256=99nfr2UZJ4jmfTL_KrqUlRSCoRxzkZyWyx5WqeUoMdQ,338
|
|
29
32
|
dao_ai/memory/core.py,sha256=38H-JLIyUrRDIECLvpXK3iJlWG35X97E-DTo_4c3Jzc,6317
|
|
30
33
|
dao_ai/memory/databricks.py,sha256=SM6nwLjhSRJO4hLc3GUuht5YydYtTi3BAOae6jPwTm4,14377
|
|
31
|
-
dao_ai/memory/postgres.py,sha256=
|
|
34
|
+
dao_ai/memory/postgres.py,sha256=DeLmexSzz91eXEJN5zW4YJigLCby8j9qNAZleumaVHU,17481
|
|
32
35
|
dao_ai/middleware/__init__.py,sha256=Qy8wbvjXF7TrUzi3tWziOwxqsrUcT1rzE3UWd3x5CrU,5108
|
|
33
36
|
dao_ai/middleware/assertions.py,sha256=C1K-TnNZfBEwWouioHCt6c48i1ux9QKfQaX6AzghhgE,27408
|
|
34
37
|
dao_ai/middleware/base.py,sha256=uG2tpdnjL5xY5jCKvb_m3UTBtl4ZC6fJQUkDsQvV8S4,1279
|
|
@@ -50,13 +53,13 @@ dao_ai/orchestration/supervisor.py,sha256=alKMEEo9G5LhdpMvTVdAMel234cZj5_MguWl4w
|
|
|
50
53
|
dao_ai/orchestration/swarm.py,sha256=8tp1eGmsQqqWpaDcjPoJckddPWohZdmmN0RGRJ_xzOA,9198
|
|
51
54
|
dao_ai/providers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
52
55
|
dao_ai/providers/base.py,sha256=cJGo3UjUTPgS91dv38ePOHwQQtYhIa84ebb167CBXjk,2111
|
|
53
|
-
dao_ai/providers/databricks.py,sha256=
|
|
56
|
+
dao_ai/providers/databricks.py,sha256=EZokRfbScW5K2h8EOCQ95XjoXt981ySslQk0d0DJmeI,70687
|
|
54
57
|
dao_ai/tools/__init__.py,sha256=NfRpAKds_taHbx6gzLPWgtPXve-YpwzkoOAUflwxceM,1734
|
|
55
58
|
dao_ai/tools/agent.py,sha256=plIWALywRjaDSnot13nYehBsrHRpBUpsVZakoGeajOE,1858
|
|
56
59
|
dao_ai/tools/core.py,sha256=bRIN3BZhRQX8-Kpu3HPomliodyskCqjxynQmYbk6Vjs,3783
|
|
57
60
|
dao_ai/tools/email.py,sha256=A3TsCoQgJR7UUWR0g45OPRGDpVoYwctFs1MOZMTt_d4,7389
|
|
58
61
|
dao_ai/tools/genie.py,sha256=4e_5MeAe7kDzHbYeXuNPFbY5z8ci3ouj8l5254CZ2lA,8874
|
|
59
|
-
dao_ai/tools/mcp.py,sha256=
|
|
62
|
+
dao_ai/tools/mcp.py,sha256=tfn-sdKwfNY31RsDFlafdGyN4XlKGfniXG_mO-Meh4E,21030
|
|
60
63
|
dao_ai/tools/memory.py,sha256=lwObKimAand22Nq3Y63tsv-AXQ5SXUigN9PqRjoWKes,1836
|
|
61
64
|
dao_ai/tools/python.py,sha256=jWFnZPni2sCdtd8D1CqXnZIPHnWkdK27bCJnBXpzhvo,1879
|
|
62
65
|
dao_ai/tools/search.py,sha256=cJ3D9FKr1GAR6xz55dLtRkjtQsI0WRueGt9TPDFpOxc,433
|
|
@@ -65,8 +68,8 @@ dao_ai/tools/sql.py,sha256=tKd1gjpLuKdQDyfmyYYtMiNRHDW6MGRbdEVaeqyB8Ok,7632
|
|
|
65
68
|
dao_ai/tools/time.py,sha256=tufJniwivq29y0LIffbgeBTIDE6VgrLpmVf8Qr90qjw,9224
|
|
66
69
|
dao_ai/tools/unity_catalog.py,sha256=AjQfW7bvV8NurqDLIyntYRv2eJuTwNdbvex1L5CRjOk,15534
|
|
67
70
|
dao_ai/tools/vector_search.py,sha256=oe2uBwl2TfeJIXPpwiS6Rmz7wcHczSxNyqS9P3hE6co,14542
|
|
68
|
-
dao_ai-0.1.
|
|
69
|
-
dao_ai-0.1.
|
|
70
|
-
dao_ai-0.1.
|
|
71
|
-
dao_ai-0.1.
|
|
72
|
-
dao_ai-0.1.
|
|
71
|
+
dao_ai-0.1.11.dist-info/METADATA,sha256=uSlkpK84xgQlBOkLAzdOCycWxUC1ovzrrrU_uCVMOd0,16698
|
|
72
|
+
dao_ai-0.1.11.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
73
|
+
dao_ai-0.1.11.dist-info/entry_points.txt,sha256=Xa-UFyc6gWGwMqMJOt06ZOog2vAfygV_DSwg1AiP46g,43
|
|
74
|
+
dao_ai-0.1.11.dist-info/licenses/LICENSE,sha256=YZt3W32LtPYruuvHE9lGk2bw6ZPMMJD8yLrjgHybyz4,1069
|
|
75
|
+
dao_ai-0.1.11.dist-info/RECORD,,
|
dao_ai/agent_as_code.py
DELETED
|
@@ -1,22 +0,0 @@
|
|
|
1
|
-
import mlflow
|
|
2
|
-
from mlflow.models import ModelConfig
|
|
3
|
-
from mlflow.pyfunc import ResponsesAgent
|
|
4
|
-
|
|
5
|
-
from dao_ai.config import AppConfig
|
|
6
|
-
from dao_ai.logging import configure_logging
|
|
7
|
-
|
|
8
|
-
mlflow.set_registry_uri("databricks-uc")
|
|
9
|
-
mlflow.set_tracking_uri("databricks")
|
|
10
|
-
|
|
11
|
-
mlflow.langchain.autolog()
|
|
12
|
-
|
|
13
|
-
model_config: ModelConfig = ModelConfig()
|
|
14
|
-
config: AppConfig = AppConfig(**model_config.to_dict())
|
|
15
|
-
|
|
16
|
-
log_level: str = config.app.log_level
|
|
17
|
-
|
|
18
|
-
configure_logging(level=log_level)
|
|
19
|
-
|
|
20
|
-
app: ResponsesAgent = config.as_responses_agent()
|
|
21
|
-
|
|
22
|
-
mlflow.models.set_model(app)
|
|
File without changes
|
|
File without changes
|
|
File without changes
|