dao-ai 0.1.5__py3-none-any.whl → 0.1.20__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. dao_ai/apps/__init__.py +24 -0
  2. dao_ai/apps/handlers.py +105 -0
  3. dao_ai/apps/model_serving.py +29 -0
  4. dao_ai/apps/resources.py +1122 -0
  5. dao_ai/apps/server.py +39 -0
  6. dao_ai/cli.py +446 -16
  7. dao_ai/config.py +1034 -103
  8. dao_ai/evaluation.py +543 -0
  9. dao_ai/genie/__init__.py +55 -7
  10. dao_ai/genie/cache/__init__.py +34 -7
  11. dao_ai/genie/cache/base.py +143 -2
  12. dao_ai/genie/cache/context_aware/__init__.py +31 -0
  13. dao_ai/genie/cache/context_aware/base.py +1151 -0
  14. dao_ai/genie/cache/context_aware/in_memory.py +609 -0
  15. dao_ai/genie/cache/context_aware/persistent.py +802 -0
  16. dao_ai/genie/cache/context_aware/postgres.py +1166 -0
  17. dao_ai/genie/cache/core.py +1 -1
  18. dao_ai/genie/cache/lru.py +257 -75
  19. dao_ai/genie/cache/optimization.py +890 -0
  20. dao_ai/genie/core.py +235 -11
  21. dao_ai/memory/postgres.py +175 -39
  22. dao_ai/middleware/__init__.py +5 -0
  23. dao_ai/middleware/tool_selector.py +129 -0
  24. dao_ai/models.py +327 -370
  25. dao_ai/nodes.py +4 -4
  26. dao_ai/orchestration/core.py +33 -9
  27. dao_ai/orchestration/supervisor.py +23 -8
  28. dao_ai/orchestration/swarm.py +6 -1
  29. dao_ai/{prompts.py → prompts/__init__.py} +12 -61
  30. dao_ai/prompts/instructed_retriever_decomposition.yaml +58 -0
  31. dao_ai/prompts/instruction_reranker.yaml +14 -0
  32. dao_ai/prompts/router.yaml +37 -0
  33. dao_ai/prompts/verifier.yaml +46 -0
  34. dao_ai/providers/base.py +28 -2
  35. dao_ai/providers/databricks.py +352 -33
  36. dao_ai/state.py +1 -0
  37. dao_ai/tools/__init__.py +5 -3
  38. dao_ai/tools/genie.py +103 -26
  39. dao_ai/tools/instructed_retriever.py +366 -0
  40. dao_ai/tools/instruction_reranker.py +202 -0
  41. dao_ai/tools/mcp.py +539 -97
  42. dao_ai/tools/router.py +89 -0
  43. dao_ai/tools/slack.py +13 -2
  44. dao_ai/tools/sql.py +7 -3
  45. dao_ai/tools/unity_catalog.py +32 -10
  46. dao_ai/tools/vector_search.py +493 -160
  47. dao_ai/tools/verifier.py +159 -0
  48. dao_ai/utils.py +182 -2
  49. dao_ai/vector_search.py +9 -1
  50. {dao_ai-0.1.5.dist-info → dao_ai-0.1.20.dist-info}/METADATA +10 -8
  51. dao_ai-0.1.20.dist-info/RECORD +89 -0
  52. dao_ai/agent_as_code.py +0 -22
  53. dao_ai/genie/cache/semantic.py +0 -970
  54. dao_ai-0.1.5.dist-info/RECORD +0 -70
  55. {dao_ai-0.1.5.dist-info → dao_ai-0.1.20.dist-info}/WHEEL +0 -0
  56. {dao_ai-0.1.5.dist-info → dao_ai-0.1.20.dist-info}/entry_points.txt +0 -0
  57. {dao_ai-0.1.5.dist-info → dao_ai-0.1.20.dist-info}/licenses/LICENSE +0 -0
dao_ai/apps/server.py ADDED
@@ -0,0 +1,39 @@
1
+ """
2
+ App server module for running dao-ai agents as Databricks Apps.
3
+
4
+ This module provides the entry point for deploying dao-ai agents as Databricks Apps
5
+ using MLflow's AgentServer. It follows the same pattern as model_serving.py but
6
+ uses the AgentServer for the Databricks Apps runtime.
7
+
8
+ Configuration Loading:
9
+ The config path is specified via the DAO_AI_CONFIG_PATH environment variable,
10
+ or defaults to dao_ai.yaml in the current directory.
11
+
12
+ Usage:
13
+ # With environment variable
14
+ DAO_AI_CONFIG_PATH=/path/to/config.yaml python -m dao_ai.apps.server
15
+
16
+ # With default dao_ai.yaml in current directory
17
+ python -m dao_ai.apps.server
18
+ """
19
+
20
+ from mlflow.genai.agent_server import AgentServer
21
+
22
+ # Import the agent handlers to register the invoke and stream decorators
23
+ # This MUST happen before creating the AgentServer instance
24
+ import dao_ai.apps.handlers # noqa: E402, F401
25
+
26
+ # Create the AgentServer instance
27
+ agent_server = AgentServer("ResponsesAgent", enable_chat_proxy=True)
28
+
29
+ # Define the app as a module level variable to enable multiple workers
30
+ app = agent_server.app
31
+
32
+
33
+ def main() -> None:
34
+ """Entry point for running the agent server."""
35
+ agent_server.run(app_import_string="dao_ai.apps.server:app")
36
+
37
+
38
+ if __name__ == "__main__":
39
+ main()
dao_ai/cli.py CHANGED
@@ -2,12 +2,13 @@ import argparse
2
2
  import getpass
3
3
  import json
4
4
  import os
5
+ import signal
5
6
  import subprocess
6
7
  import sys
7
8
  import traceback
8
9
  from argparse import ArgumentParser, Namespace
9
10
  from pathlib import Path
10
- from typing import Optional, Sequence
11
+ from typing import Any, Optional, Sequence
11
12
 
12
13
  from dotenv import find_dotenv, load_dotenv
13
14
  from loguru import logger
@@ -63,16 +64,27 @@ def detect_cloud_provider(profile: Optional[str] = None) -> Optional[str]:
63
64
  Cloud provider string ('azure', 'aws', 'gcp') or None if detection fails
64
65
  """
65
66
  try:
67
+ import os
68
+
66
69
  from databricks.sdk import WorkspaceClient
67
70
 
71
+ # Check for environment variables that might override profile
72
+ if profile and os.environ.get("DATABRICKS_HOST"):
73
+ logger.warning(
74
+ f"DATABRICKS_HOST environment variable is set, which may override --profile {profile}"
75
+ )
76
+
68
77
  # Create workspace client with optional profile
69
78
  if profile:
79
+ logger.debug(f"Creating WorkspaceClient with profile: {profile}")
70
80
  w = WorkspaceClient(profile=profile)
71
81
  else:
82
+ logger.debug("Creating WorkspaceClient with default/ambient credentials")
72
83
  w = WorkspaceClient()
73
84
 
74
85
  # Get the workspace URL from config
75
86
  host = w.config.host
87
+ logger.debug(f"WorkspaceClient host: {host}, profile used: {profile}")
76
88
  if not host:
77
89
  logger.warning("Could not determine workspace URL for cloud detection")
78
90
  return None
@@ -114,6 +126,7 @@ Examples:
114
126
  dao-ai validate -c config/model_config.yaml # Validate a specific configuration file
115
127
  dao-ai graph -o architecture.png -c my_config.yaml -v # Generate visual graph with verbose output
116
128
  dao-ai chat -c config/retail.yaml --custom-input store_num=87887 # Start interactive chat session
129
+ dao-ai list-mcp-tools -c config/mcp_config.yaml --apply-filters # List filtered MCP tools only
117
130
  dao-ai validate # Validate with detailed logging
118
131
  dao-ai bundle --deploy # Deploy the DAO AI asset bundle
119
132
  """,
@@ -284,6 +297,15 @@ Examples:
284
297
  action="store_true",
285
298
  help="Perform a dry run without executing the deployment or run commands",
286
299
  )
300
+ bundle_parser.add_argument(
301
+ "--deployment-target",
302
+ type=str,
303
+ choices=["model_serving", "apps"],
304
+ default=None,
305
+ help="Agent deployment target: 'model_serving' or 'apps'. "
306
+ "If not specified, uses app.deployment_target from config file, "
307
+ "or defaults to 'model_serving'. Passed to the deploy notebook.",
308
+ )
287
309
 
288
310
  # Deploy command
289
311
  deploy_parser: ArgumentParser = subparsers.add_parser(
@@ -308,6 +330,63 @@ Examples:
308
330
  metavar="FILE",
309
331
  help="Path to the model configuration file to validate",
310
332
  )
333
+ deploy_parser.add_argument(
334
+ "-t",
335
+ "--target",
336
+ type=str,
337
+ choices=["model_serving", "apps"],
338
+ default=None,
339
+ help="Deployment target: 'model_serving' or 'apps'. "
340
+ "If not specified, uses app.deployment_target from config file, "
341
+ "or defaults to 'model_serving'.",
342
+ )
343
+
344
+ # List MCP tools command
345
+ list_mcp_parser: ArgumentParser = subparsers.add_parser(
346
+ "list-mcp-tools",
347
+ help="List available MCP tools from configuration",
348
+ description="""
349
+ List all available MCP tools from the configured MCP servers.
350
+ This command shows:
351
+ - All MCP servers/functions in the configuration
352
+ - Available tools from each server
353
+ - Full descriptions for each tool (no truncation)
354
+ - Tool parameters in readable format (type, required/optional, descriptions)
355
+ - Which tools are included/excluded based on filters
356
+ - Filter patterns (include_tools, exclude_tools)
357
+
358
+ Use this command to:
359
+ - Discover available tools before configuring agents
360
+ - Review tool descriptions and parameter schemas
361
+ - Debug tool filtering configuration
362
+ - Verify MCP server connectivity
363
+
364
+ Options:
365
+ - Use --apply-filters to only show tools that will be loaded (hides excluded tools)
366
+ - Without --apply-filters, see all available tools with include/exclude status
367
+
368
+ Note: Schemas are displayed in a concise, readable format instead of verbose JSON
369
+ """,
370
+ epilog="""Examples:
371
+ dao-ai list-mcp-tools -c config/model_config.yaml
372
+ dao-ai list-mcp-tools -c config/model_config.yaml --apply-filters
373
+ """,
374
+ formatter_class=argparse.RawDescriptionHelpFormatter,
375
+ )
376
+ list_mcp_parser.add_argument(
377
+ "-c",
378
+ "--config",
379
+ type=str,
380
+ default="./config/model_config.yaml",
381
+ required=False,
382
+ metavar="FILE",
383
+ help="Path to the model configuration file (default: ./config/model_config.yaml)",
384
+ )
385
+ list_mcp_parser.add_argument(
386
+ "--apply-filters",
387
+ action="store_true",
388
+ help="Only show tools that pass include/exclude filters (hide excluded tools)",
389
+ )
311
390
 
312
391
  chat_parser: ArgumentParser = subparsers.add_parser(
313
392
  "chat",
@@ -376,6 +455,18 @@ def handle_chat_command(options: Namespace) -> None:
376
455
  """Interactive chat REPL with the DAO AI system with Human-in-the-Loop support."""
377
456
  logger.debug("Starting chat session with DAO AI system...")
378
457
 
458
+ # Set up signal handler for clean Ctrl+C handling
459
+ def signal_handler(sig: int, frame: Any) -> None:
460
+ try:
461
+ print("\n\n👋 Chat session interrupted. Goodbye!")
462
+ sys.stdout.flush()
463
+ except Exception:
464
+ pass
465
+ sys.exit(0)
466
+
467
+ # Store original handler and set our handler
468
+ original_handler = signal.signal(signal.SIGINT, signal_handler)
469
+
379
470
  try:
380
471
  # Set default user_id if not provided
381
472
  if options.user_id is None:
@@ -443,14 +534,19 @@ def handle_chat_command(options: Namespace) -> None:
443
534
  )
444
535
  continue
445
536
 
537
+ # Normalize user_id for memory namespace compatibility (replace . with _)
538
+ # This matches the normalization in models.py _convert_to_context
539
+ if configurable.get("user_id"):
540
+ configurable["user_id"] = configurable["user_id"].replace(".", "_")
541
+
446
542
  # Create Context object from configurable dict
447
543
  from dao_ai.state import Context
448
544
 
449
545
  context = Context(**configurable)
450
546
 
451
- # Prepare config with thread_id for checkpointer
452
- # Note: thread_id is needed in config for checkpointer/memory
453
- config = {"configurable": {"thread_id": options.thread_id}}
547
+ # Prepare config with all context fields for checkpointer/memory
548
+ # Note: langmem tools require user_id in config.configurable for namespace resolution
549
+ config = {"configurable": context.model_dump()}
454
550
 
455
551
  # Invoke the graph and handle interrupts (HITL)
456
552
  # Wrap in async function to maintain connection pool throughout
@@ -584,6 +680,12 @@ def handle_chat_command(options: Namespace) -> None:
584
680
 
585
681
  try:
586
682
  result = loop.run_until_complete(_invoke_with_hitl())
683
+ except KeyboardInterrupt:
684
+ # Re-raise to be caught by outer handler
685
+ raise
686
+ except asyncio.CancelledError:
687
+ # Treat cancellation like KeyboardInterrupt
688
+ raise KeyboardInterrupt
587
689
  except Exception as e:
588
690
  logger.error(f"Error invoking graph: {e}")
589
691
  print(f"\n❌ Error: {e}")
@@ -649,23 +751,34 @@ def handle_chat_command(options: Namespace) -> None:
649
751
  logger.error(f"Response processing error: {e}")
650
752
  logger.error(f"Stack trace: {traceback.format_exc()}")
651
753
 
652
- except EOFError:
653
- # Handle Ctrl-D
654
- print("\n\n👋 Goodbye! Chat session ended.")
655
- break
656
- except KeyboardInterrupt:
657
- # Handle Ctrl-C
658
- print("\n\n👋 Chat session interrupted. Goodbye!")
754
+ except (EOFError, KeyboardInterrupt):
755
+ # Handle Ctrl-D (EOF) or Ctrl-C (interrupt)
756
+ # Use try/except for print in case stdout is closed
757
+ try:
758
+ print("\n\n👋 Goodbye! Chat session ended.")
759
+ sys.stdout.flush()
760
+ except Exception:
761
+ pass
659
762
  break
660
763
  except Exception as e:
661
764
  print(f"\n❌ Error: {e}")
662
765
  logger.error(f"Chat error: {e}")
663
766
  traceback.print_exc()
664
767
 
768
+ except (EOFError, KeyboardInterrupt):
769
+ # Handle interrupts during initialization
770
+ try:
771
+ print("\n\n👋 Chat session interrupted. Goodbye!")
772
+ sys.stdout.flush()
773
+ except Exception:
774
+ pass
665
775
  except Exception as e:
666
776
  logger.error(f"Failed to initialize chat session: {e}")
667
777
  print(f"❌ Failed to start chat session: {e}")
668
778
  sys.exit(1)
779
+ finally:
780
+ # Restore original signal handler
781
+ signal.signal(signal.SIGINT, original_handler)
669
782
 
670
783
 
671
784
  def handle_schema_command(options: Namespace) -> None:
@@ -681,11 +794,28 @@ def handle_graph_command(options: Namespace) -> None:
681
794
 
682
795
 
683
796
  def handle_deploy_command(options: Namespace) -> None:
797
+ from dao_ai.config import DeploymentTarget
798
+
684
799
  logger.debug(f"Validating configuration from {options.config}...")
685
800
  try:
686
801
  config: AppConfig = AppConfig.from_file(options.config)
802
+
803
+ # Hybrid target resolution:
804
+ # 1. CLI --target takes precedence
805
+ # 2. Fall back to config.app.deployment_target
806
+ # 3. Default to MODEL_SERVING (handled in deploy_agent)
807
+ target: DeploymentTarget | None = None
808
+ if options.target is not None:
809
+ target = DeploymentTarget(options.target)
810
+ logger.info(f"Using CLI-specified deployment target: {target.value}")
811
+ elif config.app is not None and config.app.deployment_target is not None:
812
+ target = config.app.deployment_target
813
+ logger.info(f"Using config file deployment target: {target.value}")
814
+ else:
815
+ logger.info("No deployment target specified, defaulting to model_serving")
816
+
687
817
  config.create_agent()
688
- config.deploy_agent()
818
+ config.deploy_agent(target=target)
689
819
  sys.exit(0)
690
820
  except Exception as e:
691
821
  logger.error(f"Deployment failed: {e}")
@@ -704,6 +834,275 @@ def handle_validate_command(options: Namespace) -> None:
704
834
  sys.exit(1)
705
835
 
706
836
 
837
+ def _format_schema_pretty(schema: dict[str, Any], indent: int = 0) -> str:
838
+ """
839
+ Format a JSON schema in a more readable, concise format.
840
+
841
+ Args:
842
+ schema: The JSON schema to format
843
+ indent: Current indentation level
844
+
845
+ Returns:
846
+ Pretty-formatted schema string
847
+ """
848
+ if not schema:
849
+ return ""
850
+
851
+ lines: list[str] = []
852
+ indent_str = " " * indent
853
+
854
+ # Get required fields
855
+ required_fields = set(schema.get("required", []))
856
+
857
+ # Handle object type with properties
858
+ if schema.get("type") == "object" and "properties" in schema:
859
+ properties = schema["properties"]
860
+
861
+ for prop_name, prop_schema in properties.items():
862
+ is_required = prop_name in required_fields
863
+ req_marker = " (required)" if is_required else " (optional)"
864
+
865
+ prop_type = prop_schema.get("type", "any")
866
+ prop_desc = prop_schema.get("description", "")
867
+
868
+ # Handle different types
869
+ if prop_type == "array":
870
+ items = prop_schema.get("items", {})
871
+ item_type = items.get("type", "any")
872
+ type_str = f"array<{item_type}>"
873
+ elif prop_type == "object":
874
+ type_str = "object"
875
+ else:
876
+ type_str = prop_type
877
+
878
+ # Format enum values if present
879
+ if "enum" in prop_schema:
880
+ enum_values = ", ".join(str(v) for v in prop_schema["enum"])
881
+ type_str = f"{type_str} (one of: {enum_values})"
882
+
883
+ # Build the line
884
+ line = f"{indent_str}{prop_name}: {type_str}{req_marker}"
885
+ if prop_desc:
886
+ line += f"\n{indent_str} └─ {prop_desc}"
887
+
888
+ lines.append(line)
889
+
890
+ # Recursively handle nested objects
891
+ if prop_type == "object" and "properties" in prop_schema:
892
+ nested = _format_schema_pretty(prop_schema, indent + 1)
893
+ if nested:
894
+ lines.append(nested)
895
+
896
+ # Handle simple types without properties
897
+ elif "type" in schema:
898
+ schema_type = schema["type"]
899
+ if schema.get("description"):
900
+ lines.append(f"{indent_str}Type: {schema_type}")
901
+ lines.append(f"{indent_str}└─ {schema['description']}")
902
+ else:
903
+ lines.append(f"{indent_str}Type: {schema_type}")
904
+
905
+ return "\n".join(lines)
906
+
907
+
908
+ def handle_list_mcp_tools_command(options: Namespace) -> None:
909
+ """
910
+ List available MCP tools from configuration.
911
+
912
+ Shows all MCP servers and their available tools, indicating which
913
+ are included/excluded based on filter configuration.
914
+ """
915
+ logger.debug(f"Listing MCP tools from configuration: {options.config}")
916
+
917
+ try:
918
+ from dao_ai.config import McpFunctionModel
919
+ from dao_ai.tools.mcp import MCPToolInfo, _matches_pattern, list_mcp_tools
920
+
921
+ # Load configuration
922
+ config: AppConfig = AppConfig.from_file(options.config)
923
+
924
+ # Find all MCP tools in configuration
925
+ mcp_tools_config: list[tuple[str, McpFunctionModel]] = []
926
+ if config.tools:
927
+ for tool_name, tool_model in config.tools.items():
928
+ logger.debug(
929
+ f"Checking tool: {tool_name}, function type: {type(tool_model.function)}"
930
+ )
931
+ if tool_model.function and isinstance(
932
+ tool_model.function, McpFunctionModel
933
+ ):
934
+ mcp_tools_config.append((tool_name, tool_model.function))
935
+
936
+ if not mcp_tools_config:
937
+ logger.warning("No MCP tools found in configuration")
938
+ print("\n⚠️ No MCP tools configured in this file.")
939
+ print(f" Configuration: {options.config}")
940
+ print(
941
+ "\nTo add MCP tools, define them in the 'tools' section with 'type: mcp'"
942
+ )
943
+ sys.exit(0)
944
+
945
+ # Collect all results first (aggregate before displaying)
946
+ results: list[dict[str, Any]] = []
947
+ for tool_name, mcp_function in mcp_tools_config:
948
+ result = {
949
+ "tool_name": tool_name,
950
+ "mcp_function": mcp_function,
951
+ "error": None,
952
+ "all_tools": [],
953
+ "included_tools": [],
954
+ "excluded_tools": [],
955
+ }
956
+
957
+ try:
958
+ logger.info(f"Connecting to MCP server: {mcp_function.mcp_url}")
959
+
960
+ # Get all available tools (unfiltered)
961
+ all_tools: list[MCPToolInfo] = list_mcp_tools(
962
+ mcp_function, apply_filters=False
963
+ )
964
+
965
+ # Get filtered tools (what will actually be loaded)
966
+ filtered_tools: list[MCPToolInfo] = list_mcp_tools(
967
+ mcp_function, apply_filters=True
968
+ )
969
+
970
+ included_names = {t.name for t in filtered_tools}
971
+
972
+ # Categorize tools
973
+ for tool in sorted(all_tools, key=lambda t: t.name):
974
+ if tool.name in included_names:
975
+ result["included_tools"].append(tool)
976
+ else:
977
+ # Determine why it was excluded
978
+ reason = ""
979
+ if mcp_function.exclude_tools:
980
+ if _matches_pattern(tool.name, mcp_function.exclude_tools):
981
+ matching_patterns = [
982
+ p
983
+ for p in mcp_function.exclude_tools
984
+ if _matches_pattern(tool.name, [p])
985
+ ]
986
+ reason = f" (matches exclude pattern: {', '.join(matching_patterns)})"
987
+ if not reason and mcp_function.include_tools:
988
+ reason = " (not in include list)"
989
+ result["excluded_tools"].append((tool, reason))
990
+
991
+ result["all_tools"] = all_tools
992
+
993
+ except KeyboardInterrupt:
994
+ result["error"] = "Connection interrupted by user"
995
+ results.append(result)
996
+ break
997
+ except Exception as e:
998
+ logger.error(f"Failed to list tools from MCP server: {e}")
999
+ result["error"] = str(e)
1000
+
1001
+ results.append(result)
1002
+
1003
+ # Now display all results at once (no logging interleaving)
1004
+ print(f"\n{'=' * 80}")
1005
+ print("MCP TOOLS DISCOVERY")
1006
+ print(f"Configuration: {options.config}")
1007
+ print(f"{'=' * 80}\n")
1008
+
1009
+ for result in results:
1010
+ tool_name = result["tool_name"]
1011
+ mcp_function = result["mcp_function"]
1012
+
1013
+ print(f"📦 Tool: {tool_name}")
1014
+ print(f" Server: {mcp_function.mcp_url}")
1015
+
1016
+ # Show connection type
1017
+ if mcp_function.connection:
1018
+ print(f" Connection: UC Connection '{mcp_function.connection.name}'")
1019
+ else:
1020
+ print(f" Transport: {mcp_function.transport.value}")
1021
+
1022
+ # Show filters if configured
1023
+ if mcp_function.include_tools or mcp_function.exclude_tools:
1024
+ print("\n Filters:")
1025
+ if mcp_function.include_tools:
1026
+ print(f" Include: {', '.join(mcp_function.include_tools)}")
1027
+ if mcp_function.exclude_tools:
1028
+ print(f" Exclude: {', '.join(mcp_function.exclude_tools)}")
1029
+
1030
+ # Check for errors
1031
+ if result["error"]:
1032
+ print(f"\n ❌ Error: {result['error']}")
1033
+ print(" Could not connect to MCP server")
1034
+ if result["error"] != "Connection interrupted by user":
1035
+ print(
1036
+ " Tip: Verify server URL, authentication, and network connectivity"
1037
+ )
1038
+ else:
1039
+ all_tools = result["all_tools"]
1040
+ included_tools = result["included_tools"]
1041
+ excluded_tools = result["excluded_tools"]
1042
+
1043
+ # Show stats based on --apply-filters flag
1044
+ if options.apply_filters:
1045
+ # Simplified view: only show filtered tools count
1046
+ print(
1047
+ f"\n Available Tools: {len(included_tools)} (after filters)"
1048
+ )
1049
+ else:
1050
+ # Full view: show all, included, and excluded counts
1051
+ print(f"\n Available Tools: {len(all_tools)} total")
1052
+ print(f" ├─ ✓ Included: {len(included_tools)}")
1053
+ print(f" └─ ✗ Excluded: {len(excluded_tools)}")
1054
+
1055
+ # Show included tools with FULL descriptions and schemas
1056
+ if included_tools:
1057
+ if options.apply_filters:
1058
+ print(f"\n Tools ({len(included_tools)}):")
1059
+ else:
1060
+ print(f"\n ✓ Included Tools ({len(included_tools)}):")
1061
+
1062
+ for tool in included_tools:
1063
+ print(f"\n • {tool.name}")
1064
+ if tool.description:
1065
+ # Show full description (no truncation)
1066
+ print(f" Description: {tool.description}")
1067
+ if tool.input_schema:
1068
+ # Pretty print schema in readable format
1069
+ print(" Parameters:")
1070
+ pretty_schema = _format_schema_pretty(
1071
+ tool.input_schema, indent=0
1072
+ )
1073
+ if pretty_schema:
1074
+ # Indent the schema for better readability
1075
+ for line in pretty_schema.split("\n"):
1076
+ print(f" {line}")
1077
+ else:
1078
+ print(" (none)")
1079
+
1080
+ # Show excluded tools only if NOT applying filters
1081
+ if excluded_tools and not options.apply_filters:
1082
+ print(f"\n ✗ Excluded Tools ({len(excluded_tools)}):")
1083
+ for tool, reason in excluded_tools:
1084
+ print(f" • {tool.name}{reason}")
1085
+
1086
+ print(f"\n{'-' * 80}\n")
1087
+
1088
+ # Summary
1089
+ print(f"{'=' * 80}")
1090
+ print(f"Summary: Found {len(mcp_tools_config)} MCP server(s)")
1091
+ print(f"{'=' * 80}\n")
1092
+
1093
+ sys.exit(0)
1094
+
1095
+ except FileNotFoundError:
1096
+ logger.error(f"Configuration file not found: {options.config}")
1097
+ print(f"\n❌ Error: Configuration file not found: {options.config}")
1098
+ sys.exit(1)
1099
+ except Exception as e:
1100
+ logger.error(f"Failed to list MCP tools: {e}")
1101
+ logger.debug(traceback.format_exc())
1102
+ print(f"\n❌ Error: {e}")
1103
+ sys.exit(1)
1104
+
1105
+
707
1106
  def setup_logging(verbosity: int) -> None:
708
1107
  levels: dict[int, str] = {
709
1108
  0: "ERROR",
@@ -766,6 +1165,7 @@ def run_databricks_command(
766
1165
  target: Optional[str] = None,
767
1166
  cloud: Optional[str] = None,
768
1167
  dry_run: bool = False,
1168
+ deployment_target: Optional[str] = None,
769
1169
  ) -> None:
770
1170
  """Execute a databricks CLI command with optional profile, target, and cloud.
771
1171
 
@@ -776,6 +1176,8 @@ def run_databricks_command(
776
1176
  target: Optional bundle target name (if not provided, auto-generated from app name and cloud)
777
1177
  cloud: Optional cloud provider ('azure', 'aws', 'gcp'). Auto-detected if not specified.
778
1178
  dry_run: If True, print the command without executing
1179
+ deployment_target: Optional agent deployment target ('model_serving' or 'apps').
1180
+ Passed to the deploy notebook via bundle variable.
779
1181
  """
780
1182
  config_path = Path(config) if config else None
781
1183
 
@@ -787,7 +1189,7 @@ def run_databricks_command(
787
1189
  app_config: AppConfig = AppConfig.from_file(config_path) if config_path else None
788
1190
  normalized_name: str = normalize_name(app_config.app.name) if app_config else None
789
1191
 
790
- # Auto-detect cloud provider if not specified
1192
+ # Auto-detect cloud provider if not specified (used for node_type selection)
791
1193
  if not cloud:
792
1194
  cloud = detect_cloud_provider(profile)
793
1195
  if cloud:
@@ -800,10 +1202,12 @@ def run_databricks_command(
800
1202
  if config_path and app_config:
801
1203
  generate_bundle_from_template(config_path, normalized_name)
802
1204
 
803
- # Use cloud as target (azure, aws, gcp) - can be overridden with explicit --target
1205
+ # Use app-specific cloud target: {app_name}-{cloud}
1206
+ # This ensures each app has unique deployment identity while supporting cloud-specific settings
1207
+ # Can be overridden with explicit --target
804
1208
  if not target:
805
- target = cloud
806
- logger.debug(f"Using cloud-based target: {target}")
1209
+ target = f"{normalized_name}-{cloud}"
1210
+ logger.info(f"Using app-specific cloud target: {target}")
807
1211
 
808
1212
  # Build databricks command
809
1213
  # --profile is a global flag, --target is a subcommand flag for 'bundle'
@@ -831,6 +1235,26 @@ def run_databricks_command(
831
1235
 
832
1236
  cmd.append(f'--var="config_path={relative_config}"')
833
1237
 
1238
+ # Add deployment_target variable for notebooks (hybrid resolution)
1239
+ # Priority: CLI arg > config file > default (model_serving)
1240
+ resolved_deployment_target: str = "model_serving"
1241
+ if deployment_target is not None:
1242
+ resolved_deployment_target = deployment_target
1243
+ logger.debug(
1244
+ f"Using CLI-specified deployment target: {resolved_deployment_target}"
1245
+ )
1246
+ elif app_config and app_config.app and app_config.app.deployment_target:
1247
+ # deployment_target is DeploymentTarget enum (str subclass) or string
1248
+ # str() works for both since DeploymentTarget inherits from str
1249
+ resolved_deployment_target = str(app_config.app.deployment_target)
1250
+ logger.debug(
1251
+ f"Using config file deployment target: {resolved_deployment_target}"
1252
+ )
1253
+ else:
1254
+ logger.debug("Using default deployment target: model_serving")
1255
+
1256
+ cmd.append(f'--var="deployment_target={resolved_deployment_target}"')
1257
+
834
1258
  logger.debug(f"Executing command: {' '.join(cmd)}")
835
1259
 
836
1260
  if dry_run:
@@ -873,6 +1297,7 @@ def handle_bundle_command(options: Namespace) -> None:
873
1297
  target: Optional[str] = options.target
874
1298
  cloud: Optional[str] = options.cloud
875
1299
  dry_run: bool = options.dry_run
1300
+ deployment_target: Optional[str] = options.deployment_target
876
1301
 
877
1302
  if options.deploy:
878
1303
  logger.info("Deploying DAO AI asset bundle...")
@@ -883,6 +1308,7 @@ def handle_bundle_command(options: Namespace) -> None:
883
1308
  target=target,
884
1309
  cloud=cloud,
885
1310
  dry_run=dry_run,
1311
+ deployment_target=deployment_target,
886
1312
  )
887
1313
  if options.run:
888
1314
  logger.info("Running DAO AI system with current configuration...")
@@ -894,6 +1320,7 @@ def handle_bundle_command(options: Namespace) -> None:
894
1320
  target=target,
895
1321
  cloud=cloud,
896
1322
  dry_run=dry_run,
1323
+ deployment_target=deployment_target,
897
1324
  )
898
1325
  if options.destroy:
899
1326
  logger.info("Destroying DAO AI system with current configuration...")
@@ -904,6 +1331,7 @@ def handle_bundle_command(options: Namespace) -> None:
904
1331
  target=target,
905
1332
  cloud=cloud,
906
1333
  dry_run=dry_run,
1334
+ deployment_target=deployment_target,
907
1335
  )
908
1336
  else:
909
1337
  logger.warning("No action specified. Use --deploy, --run or --destroy flags.")
@@ -925,6 +1353,8 @@ def main() -> None:
925
1353
  handle_deploy_command(options)
926
1354
  case "chat":
927
1355
  handle_chat_command(options)
1356
+ case "list-mcp-tools":
1357
+ handle_list_mcp_tools_command(options)
928
1358
  case _:
929
1359
  logger.error(f"Unknown command: {options.command}")
930
1360
  sys.exit(1)