datarobot-genai 0.2.30__py3-none-any.whl → 0.2.32__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (23) hide show
  1. datarobot_genai/drmcp/__init__.py +2 -2
  2. datarobot_genai/drmcp/core/exceptions.py +0 -4
  3. datarobot_genai/drmcp/core/logging.py +2 -2
  4. datarobot_genai/drmcp/test_utils/clients/__init__.py +0 -0
  5. datarobot_genai/drmcp/test_utils/clients/anthropic.py +68 -0
  6. datarobot_genai/drmcp/test_utils/{openai_llm_mcp_client.py → clients/base.py} +38 -40
  7. datarobot_genai/drmcp/test_utils/clients/dr_gateway.py +58 -0
  8. datarobot_genai/drmcp/test_utils/clients/openai.py +68 -0
  9. datarobot_genai/drmcp/test_utils/test_interactive.py +16 -16
  10. datarobot_genai/drmcp/test_utils/tool_base_ete.py +1 -1
  11. datarobot_genai/drmcp/test_utils/utils.py +1 -1
  12. datarobot_genai/drmcp/tools/clients/gdrive.py +91 -0
  13. datarobot_genai/drmcp/tools/gdrive/tools.py +100 -1
  14. datarobot_genai/drmcp/tools/predictive/data.py +5 -5
  15. datarobot_genai/drmcp/tools/predictive/model.py +87 -52
  16. datarobot_genai/drmcp/tools/predictive/project.py +2 -2
  17. datarobot_genai/drmcp/tools/predictive/training.py +14 -14
  18. {datarobot_genai-0.2.30.dist-info → datarobot_genai-0.2.32.dist-info}/METADATA +1 -1
  19. {datarobot_genai-0.2.30.dist-info → datarobot_genai-0.2.32.dist-info}/RECORD +23 -19
  20. {datarobot_genai-0.2.30.dist-info → datarobot_genai-0.2.32.dist-info}/WHEEL +0 -0
  21. {datarobot_genai-0.2.30.dist-info → datarobot_genai-0.2.32.dist-info}/entry_points.txt +0 -0
  22. {datarobot_genai-0.2.30.dist-info → datarobot_genai-0.2.32.dist-info}/licenses/AUTHORS +0 -0
  23. {datarobot_genai-0.2.30.dist-info → datarobot_genai-0.2.32.dist-info}/licenses/LICENSE +0 -0
@@ -19,11 +19,11 @@ A reusable library for building Model Context Protocol (MCP) servers with DataRo
19
19
  """
20
20
 
21
21
  # Export main server components
22
+ from datarobot_genai.drmcp.test_utils.clients.openai import OpenAILLMMCPClient
22
23
  from datarobot_genai.drmcp.test_utils.mcp_utils_ete import ete_test_mcp_session
23
24
  from datarobot_genai.drmcp.test_utils.mcp_utils_ete import get_dr_mcp_server_url
24
25
  from datarobot_genai.drmcp.test_utils.mcp_utils_ete import get_headers
25
26
  from datarobot_genai.drmcp.test_utils.mcp_utils_integration import integration_test_mcp_session
26
- from datarobot_genai.drmcp.test_utils.openai_llm_mcp_client import LLMMCPClient
27
27
  from datarobot_genai.drmcp.test_utils.tool_base_ete import ETETestExpectations
28
28
  from datarobot_genai.drmcp.test_utils.tool_base_ete import ToolBaseE2E
29
29
  from datarobot_genai.drmcp.test_utils.tool_base_ete import ToolCallTestExpectations
@@ -70,7 +70,7 @@ __all__ = [
70
70
  "get_dr_mcp_server_url",
71
71
  "get_headers",
72
72
  "ete_test_mcp_session",
73
- "LLMMCPClient",
73
+ "OpenAILLMMCPClient",
74
74
  "ETETestExpectations",
75
75
  "ToolBaseE2E",
76
76
  "ToolCallTestExpectations",
@@ -19,7 +19,3 @@ class DynamicToolRegistrationError(Exception):
19
19
 
20
20
  class DynamicPromptRegistrationError(Exception):
21
21
  """Exception raised for errors in the dynamic prompt registration process."""
22
-
23
-
24
- class MCPError(Exception):
25
- """Base class for MCP errors."""
@@ -20,7 +20,7 @@ from collections.abc import Callable
20
20
  from typing import Any
21
21
  from typing import TypeVar
22
22
 
23
- from .exceptions import MCPError
23
+ from fastmcp.exceptions import ToolError
24
24
 
25
25
  # Secret patterns to redact from logs
26
26
  SECRET_PATTERNS = [
@@ -93,6 +93,6 @@ def log_execution(func: F) -> F:
93
93
  return result
94
94
  except Exception as e:
95
95
  error_msg = _log_error(logger, func.__name__, e, args=args, kwargs=kwargs)
96
- raise MCPError(error_msg)
96
+ raise ToolError(error_msg)
97
97
 
98
98
  return wrapper # type: ignore[return-value]
File without changes
@@ -0,0 +1,68 @@
1
+ # Copyright 2026 DataRobot, Inc.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """Anthropic LLM MCP Client implementation (example).
16
+
17
+ This is an example implementation showing how easy it is to add a new LLM provider.
18
+ Anthropic's API is OpenAI-compatible, so we can use the OpenAI SDK with their endpoint.
19
+ """
20
+
21
+ import openai
22
+
23
+ from .base import BaseLLMMCPClient
24
+
25
+
26
+ class AnthropicMCPClient(BaseLLMMCPClient):
27
+ """
28
+ Client for interacting with LLMs via MCP using Anthropic Claude.
29
+
30
+ Note: Elicitation is handled at the protocol level by FastMCP's ctx.elicit().
31
+ Tools using FastMCP's built-in elicitation will work automatically.
32
+
33
+ Example:
34
+ ```python
35
+ config = {
36
+ "anthropic_api_key": "sk-ant-...",
37
+ "model": "claude-3-5-sonnet-20241022",
38
+ }
39
+ client = AnthropicMCPClient(str(config))
40
+ ```
41
+ """
42
+
43
+ def __init__(
44
+ self,
45
+ config: str | dict,
46
+ ):
47
+ """
48
+ Initialize the LLM MCP client.
49
+
50
+ Args:
51
+ config: Configuration string or dict with:
52
+ - anthropic_api_key: Anthropic API key
53
+ - model: Model name (default: "claude-3-5-sonnet-20241022")
54
+ - save_llm_responses: Whether to save responses (default: True)
55
+ """
56
+ super().__init__(config)
57
+
58
+ def _create_llm_client(self, config_dict: dict) -> tuple[openai.OpenAI, str]:
59
+ """Create the LLM client for Anthropic (OpenAI-compatible endpoint)."""
60
+ anthropic_api_key = config_dict.get("anthropic_api_key")
61
+ model = config_dict.get("model", "claude-3-5-sonnet-20241022")
62
+
63
+ # Anthropic provides an OpenAI-compatible endpoint
64
+ client = openai.OpenAI(
65
+ api_key=anthropic_api_key,
66
+ base_url="https://api.anthropic.com/v1",
67
+ )
68
+ return client, model
@@ -1,4 +1,4 @@
1
- # Copyright 2025 DataRobot, Inc.
1
+ # Copyright 2026 DataRobot, Inc.
2
2
  #
3
3
  # Licensed under the Apache License, Version 2.0 (the "License");
4
4
  # you may not use this file except in compliance with the License.
@@ -12,7 +12,11 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
 
15
+ """Base classes for LLM MCP clients."""
16
+
15
17
  import json
18
+ from abc import ABC
19
+ from abc import abstractmethod
16
20
  from ast import literal_eval
17
21
  from typing import Any
18
22
 
@@ -23,7 +27,7 @@ from mcp.types import ListToolsResult
23
27
  from mcp.types import TextContent
24
28
  from openai.types.chat.chat_completion import ChatCompletion
25
29
 
26
- from .utils import save_response_to_file
30
+ from datarobot_genai.drmcp.test_utils.utils import save_response_to_file
27
31
 
28
32
 
29
33
  class ToolCall:
@@ -44,9 +48,9 @@ class LLMResponse:
44
48
  self.tool_results = tool_results
45
49
 
46
50
 
47
- class LLMMCPClient:
51
+ class BaseLLMMCPClient(ABC):
48
52
  """
49
- Client for interacting with LLMs via MCP.
53
+ Base class for LLM MCP clients.
50
54
 
51
55
  Note: Elicitation is handled at the protocol level by FastMCP's ctx.elicit().
52
56
  Tools using FastMCP's built-in elicitation will work automatically.
@@ -54,54 +58,48 @@ class LLMMCPClient:
54
58
 
55
59
  def __init__(
56
60
  self,
57
- config: str,
61
+ config: str | dict,
58
62
  ):
59
63
  """
60
64
  Initialize the LLM MCP client.
61
65
 
62
66
  Args:
63
- config: Configuration string or dict with:
64
- - openai_api_key: OpenAI API key
65
- - openai_api_base: Optional Azure OpenAI endpoint
66
- - openai_api_deployment_id: Optional Azure deployment ID
67
- - openai_api_version: Optional Azure API version
68
- - model: Model name (default: "gpt-3.5-turbo")
69
- - save_llm_responses: Whether to save responses (default: True)
67
+ config: Configuration string or dict with provider-specific keys.
70
68
  """
71
- # Parse config string to extract parameters
69
+ config_dict = self._parse_config(config)
70
+ self.openai_client, self.model = self._create_llm_client(config_dict)
71
+ self.save_llm_responses = config_dict.get("save_llm_responses", True)
72
+ self.available_tools: list[dict[str, Any]] = []
73
+ self.available_prompts: list[dict[str, Any]] = []
74
+ self.available_resources: list[dict[str, Any]] = []
75
+
76
+ @staticmethod
77
+ def _parse_config(config: str | dict) -> dict:
78
+ """Parse config string to dict."""
72
79
  if isinstance(config, str):
73
80
  # Try JSON first (safer), fall back to literal_eval for Python dict strings
74
81
  try:
75
- config_dict = json.loads(config)
82
+ return json.loads(config)
76
83
  except json.JSONDecodeError:
77
84
  # Fall back to literal_eval for Python dict literal strings
78
- config_dict = literal_eval(config)
79
- else:
80
- config_dict = config
81
-
82
- openai_api_key = config_dict.get("openai_api_key")
83
- openai_api_base = config_dict.get("openai_api_base")
84
- openai_api_deployment_id = config_dict.get("openai_api_deployment_id")
85
- model = config_dict.get("model", "gpt-3.5-turbo")
86
- save_llm_responses = config_dict.get("save_llm_responses", True)
87
-
88
- if openai_api_base and openai_api_deployment_id:
89
- # Azure OpenAI
90
- self.openai_client = openai.AzureOpenAI(
91
- api_key=openai_api_key,
92
- azure_endpoint=openai_api_base,
93
- api_version=config_dict.get("openai_api_version", "2024-02-15-preview"),
94
- )
95
- self.model = openai_api_deployment_id
96
- else:
97
- # Regular OpenAI
98
- self.openai_client = openai.OpenAI(api_key=openai_api_key) # type: ignore[assignment]
99
- self.model = model
85
+ return literal_eval(config)
86
+ return config
100
87
 
101
- self.save_llm_responses = save_llm_responses
102
- self.available_tools: list[dict[str, Any]] = []
103
- self.available_prompts: list[dict[str, Any]] = []
104
- self.available_resources: list[dict[str, Any]] = []
88
+ @abstractmethod
89
+ def _create_llm_client(
90
+ self, config_dict: dict
91
+ ) -> tuple[openai.OpenAI | openai.AzureOpenAI, str]:
92
+ """
93
+ Create the LLM client.
94
+
95
+ Args:
96
+ config_dict: Parsed configuration dictionary
97
+
98
+ Returns
99
+ -------
100
+ Tuple of (LLM client instance, model name)
101
+ """
102
+ pass
105
103
 
106
104
  async def _add_mcp_tool_to_available_tools(self, mcp_session: ClientSession) -> None:
107
105
  """Add a tool to the available tools."""
@@ -0,0 +1,58 @@
1
+ # Copyright 2026 DataRobot, Inc.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """DataRobot LLM Gateway MCP Client implementation."""
16
+
17
+ import openai
18
+
19
+ from .base import BaseLLMMCPClient
20
+
21
+
22
+ class DRLLMGatewayMCPClient(BaseLLMMCPClient):
23
+ """
24
+ Client for interacting with LLMs via MCP using DataRobot LLM Gateway.
25
+
26
+ Note: Elicitation is handled at the protocol level by FastMCP's ctx.elicit().
27
+ Tools using FastMCP's built-in elicitation will work automatically.
28
+ """
29
+
30
+ def __init__(
31
+ self,
32
+ config: str | dict,
33
+ ):
34
+ """
35
+ Initialize the LLM MCP client.
36
+
37
+ Args:
38
+ config: Configuration string or dict with:
39
+ - datarobot_api_token: DataRobot API token
40
+ - datarobot_endpoint: DataRobot endpoint URL (default: "https://app.datarobot.com/api/v2")
41
+ - model: Model name (default: "gpt-4o-mini")
42
+ - save_llm_responses: Whether to save responses (default: True)
43
+ """
44
+ super().__init__(config)
45
+
46
+ def _create_llm_client(self, config_dict: dict) -> tuple[openai.OpenAI, str]:
47
+ """Create the LLM client for DataRobot LLM Gateway."""
48
+ datarobot_api_token = config_dict.get("datarobot_api_token")
49
+ datarobot_endpoint = config_dict.get(
50
+ "datarobot_endpoint", "https://app.datarobot.com/api/v2"
51
+ )
52
+ model = config_dict.get("model", "gpt-4o-mini")
53
+
54
+ # Build gateway URL: {endpoint}/genai/llmgw
55
+ gateway_url = datarobot_endpoint.rstrip("/") + "/genai/llmgw"
56
+
57
+ client = openai.OpenAI(api_key=datarobot_api_token, base_url=gateway_url)
58
+ return client, model
@@ -0,0 +1,68 @@
1
+ # Copyright 2026 DataRobot, Inc.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """OpenAI LLM MCP Client implementation."""
16
+
17
+ import openai
18
+
19
+ from .base import BaseLLMMCPClient
20
+
21
+
22
+ class OpenAILLMMCPClient(BaseLLMMCPClient):
23
+ """
24
+ Client for interacting with LLMs via MCP using OpenAI or Azure OpenAI.
25
+
26
+ Note: Elicitation is handled at the protocol level by FastMCP's ctx.elicit().
27
+ Tools using FastMCP's built-in elicitation will work automatically.
28
+ """
29
+
30
+ def __init__(
31
+ self,
32
+ config: str | dict,
33
+ ):
34
+ """
35
+ Initialize the LLM MCP client.
36
+
37
+ Args:
38
+ config: Configuration string or dict with:
39
+ - openai_api_key: OpenAI API key
40
+ - openai_api_base: Optional Azure OpenAI endpoint
41
+ - openai_api_deployment_id: Optional Azure deployment ID
42
+ - openai_api_version: Optional Azure API version
43
+ - model: Model name (default: "gpt-3.5-turbo")
44
+ - save_llm_responses: Whether to save responses (default: True)
45
+ """
46
+ super().__init__(config)
47
+
48
+ def _create_llm_client(
49
+ self, config_dict: dict
50
+ ) -> tuple[openai.OpenAI | openai.AzureOpenAI, str]:
51
+ """Create the LLM client for OpenAI or Azure OpenAI."""
52
+ openai_api_key = config_dict.get("openai_api_key")
53
+ openai_api_base = config_dict.get("openai_api_base")
54
+ openai_api_deployment_id = config_dict.get("openai_api_deployment_id")
55
+ model = config_dict.get("model", "gpt-3.5-turbo")
56
+
57
+ if openai_api_base and openai_api_deployment_id:
58
+ # Azure OpenAI
59
+ client = openai.AzureOpenAI(
60
+ api_key=openai_api_key,
61
+ azure_endpoint=openai_api_base,
62
+ api_version=config_dict.get("openai_api_version", "2024-02-15-preview"),
63
+ )
64
+ return client, openai_api_deployment_id
65
+ else:
66
+ # Regular OpenAI
67
+ client = openai.OpenAI(api_key=openai_api_key) # type: ignore[assignment]
68
+ return client, model
@@ -40,40 +40,40 @@ from mcp.types import ElicitResult
40
40
 
41
41
  from datarobot_genai.drmcp import get_dr_mcp_server_url
42
42
  from datarobot_genai.drmcp import get_headers
43
- from datarobot_genai.drmcp.test_utils.openai_llm_mcp_client import LLMMCPClient
44
- from datarobot_genai.drmcp.test_utils.openai_llm_mcp_client import LLMResponse
45
- from datarobot_genai.drmcp.test_utils.openai_llm_mcp_client import ToolCall
43
+ from datarobot_genai.drmcp.test_utils.clients.base import LLMResponse
44
+ from datarobot_genai.drmcp.test_utils.clients.base import ToolCall
45
+ from datarobot_genai.drmcp.test_utils.clients.dr_gateway import DRLLMGatewayMCPClient
46
46
 
47
47
  # Re-export for backwards compatibility
48
- __all__ = ["LLMMCPClient", "LLMResponse", "ToolCall", "test_mcp_interactive"]
48
+ __all__ = ["DRLLMGatewayMCPClient", "LLMResponse", "ToolCall", "test_mcp_interactive"]
49
49
 
50
50
 
51
51
  async def test_mcp_interactive() -> None:
52
52
  """Test the MCP server interactively with LLM agent."""
53
53
  # Check for required environment variables
54
- openai_api_key = os.environ.get("OPENAI_API_KEY")
55
- if not openai_api_key:
56
- print("❌ Error: OPENAI_API_KEY environment variable is required")
54
+ datarobot_api_token = os.environ.get("DATAROBOT_API_TOKEN")
55
+ if not datarobot_api_token:
56
+ print("❌ Error: DATAROBOT_API_TOKEN environment variable is required")
57
57
  print("Please set it in your .env file or export it")
58
58
  return
59
59
 
60
- # Optional Azure OpenAI settings
61
- openai_api_base = os.environ.get("OPENAI_API_BASE")
62
- openai_api_deployment_id = os.environ.get("OPENAI_API_DEPLOYMENT_ID")
63
- openai_api_version = os.environ.get("OPENAI_API_VERSION")
60
+ # Optional DataRobot settings
61
+ datarobot_endpoint = os.environ.get("DATAROBOT_ENDPOINT")
62
+ model = os.environ.get("MODEL")
64
63
 
65
64
  print("🤖 Initializing LLM MCP Client...")
66
65
 
67
66
  # Initialize the LLM client with elicitation handler
68
67
  config = {
69
- "openai_api_key": openai_api_key,
70
- "openai_api_base": openai_api_base,
71
- "openai_api_deployment_id": openai_api_deployment_id,
72
- "openai_api_version": openai_api_version,
68
+ "datarobot_api_token": datarobot_api_token,
73
69
  "save_llm_responses": False,
74
70
  }
71
+ if datarobot_endpoint:
72
+ config["datarobot_endpoint"] = datarobot_endpoint
73
+ if model:
74
+ config["model"] = model
75
75
 
76
- llm_client = LLMMCPClient(str(config))
76
+ llm_client = DRLLMGatewayMCPClient(str(config))
77
77
 
78
78
  # Get MCP server URL
79
79
  mcp_server_url = get_dr_mcp_server_url()
@@ -17,7 +17,7 @@ from typing import Any
17
17
 
18
18
  from pydantic import BaseModel
19
19
 
20
- from .openai_llm_mcp_client import LLMResponse
20
+ from .clients.base import LLMResponse
21
21
 
22
22
 
23
23
  class ToolCallTestExpectations(BaseModel):
@@ -19,7 +19,7 @@ from typing import TYPE_CHECKING
19
19
  from typing import Any
20
20
 
21
21
  if TYPE_CHECKING:
22
- from .openai_llm_mcp_client import LLMResponse
22
+ from .clients.base import LLMResponse
23
23
 
24
24
  from dotenv import load_dotenv
25
25
 
@@ -20,6 +20,7 @@ import logging
20
20
  import uuid
21
21
  from typing import Annotated
22
22
  from typing import Any
23
+ from typing import Literal
23
24
 
24
25
  import httpx
25
26
  from datarobot.auth.datarobot.exceptions import OAuthServiceClientErr
@@ -821,6 +822,96 @@ class GoogleDriveClient:
821
822
  headers={"Content-Type": f"multipart/related; boundary={boundary}"},
822
823
  )
823
824
 
825
+ async def manage_access(
826
+ self,
827
+ *,
828
+ file_id: str,
829
+ action: Literal["add", "update", "remove"],
830
+ role: Literal["reader", "commenter", "writer", "fileOrganizer", "organizer", "owner"]
831
+ | None = None,
832
+ email_address: str | None = None,
833
+ permission_id: str | None = None,
834
+ transfer_ownership: bool = False,
835
+ ) -> str:
836
+ """Manage access permissions for a Google Drive file or folder.
837
+
838
+ Adds, updates, or removes sharing permissions on an existing Google Drive
839
+ file or folder using the Google Drive Permissions API.
840
+
841
+ This method supports granting access to users or groups, changing access
842
+ roles, and revoking permissions. Ownership transfer is supported for files
843
+ in "My Drive" when explicitly requested.
844
+
845
+ Args:
846
+ file_id: The ID of the Google Drive file or folder whose permissions
847
+ are being managed.
848
+ action: The permission operation to perform.
849
+ role: The access role to assign or update. Valid values include
850
+ Required for "add" and "update" actions.
851
+ email_address: The email address of the user or group to grant access to.
852
+ Required for the "add" action.
853
+ permission_id: The ID of the permission to update or remove.
854
+ Required for "update" and "remove" actions.
855
+ transfer_ownership: Whether to transfer ownership of the file.
856
+ Only applicable when action="update" and role="owner".
857
+
858
+ Returns
859
+ -------
860
+ Permission id.
861
+ For "add" its newly added permission.
862
+ For "update"/"remove" its previous permission.
863
+
864
+ Raises
865
+ ------
866
+ GoogleDriveError: If the permission operation fails (invalid arguments,
867
+ insufficient permissions, resource not found, ownership transfer
868
+ not allowed, rate limited, etc.).
869
+ """
870
+ if not file_id.strip():
871
+ raise GoogleDriveError("Argument validation error: 'file_id' cannot be empty.")
872
+
873
+ if action == "add" and not email_address:
874
+ raise GoogleDriveError("'email_address' is required for action 'add'.")
875
+
876
+ if action in ("update", "remove") and not permission_id:
877
+ raise GoogleDriveError("'permission_id' is required for action 'update' or 'remove'.")
878
+
879
+ if action != "remove" and not role:
880
+ raise GoogleDriveError("'role' is required for action 'add' or 'update'.")
881
+
882
+ if action == "add":
883
+ response = await self._client.post(
884
+ url=f"/{file_id}/permissions",
885
+ json={
886
+ "type": "user",
887
+ "role": role,
888
+ "emailAddress": email_address,
889
+ },
890
+ params={"sendNotificationEmail": False, "supportsAllDrives": True},
891
+ )
892
+
893
+ elif action == "update":
894
+ response = await self._client.patch(
895
+ url=f"/{file_id}/permissions/{permission_id}",
896
+ json={"role": role},
897
+ params={"transferOwnership": transfer_ownership, "supportsAllDrives": True},
898
+ )
899
+
900
+ elif action == "remove":
901
+ response = await self._client.delete(url=f"/{file_id}/permissions/{permission_id}")
902
+
903
+ else:
904
+ raise GoogleDriveError(f"Invalid action '{action}'")
905
+
906
+ if response.status_code not in (200, 201, 204):
907
+ raise GoogleDriveError(f"Drive API error {response.status_code}: {response.text}")
908
+
909
+ if action == "add":
910
+ return response.json()["id"]
911
+
912
+ # Cannot be null here because of above validators
913
+ return permission_id # type: ignore
914
+
824
915
  async def __aenter__(self) -> "GoogleDriveClient":
825
916
  """Async context manager entry."""
826
917
  return self
@@ -16,6 +16,7 @@
16
16
 
17
17
  import logging
18
18
  from typing import Annotated
19
+ from typing import Literal
19
20
 
20
21
  from fastmcp.exceptions import ToolError
21
22
  from fastmcp.tools.tool import ToolResult
@@ -33,7 +34,9 @@ from datarobot_genai.drmcp.tools.clients.gdrive import get_gdrive_access_token
33
34
  logger = logging.getLogger(__name__)
34
35
 
35
36
 
36
- @dr_mcp_tool(tags={"google", "gdrive", "list", "search", "files", "find", "contents"})
37
+ @dr_mcp_tool(
38
+ tags={"google", "gdrive", "list", "search", "files", "find", "contents"}, enabled=False
39
+ )
37
40
  async def gdrive_find_contents(
38
41
  *,
39
42
  page_size: Annotated[
@@ -345,3 +348,99 @@ async def gdrive_update_metadata(
345
348
  content=f"Successfully updated file '{updated_file.name}': {changes_description}.",
346
349
  structured_content=updated_file.as_flat_dict(),
347
350
  )
351
+
352
+
353
+ @dr_mcp_tool(tags={"google", "gdrive", "manage", "access", "acl"})
354
+ async def gdrive_manage_access(
355
+ *,
356
+ file_id: Annotated[str, "The ID of the file or folder."],
357
+ action: Annotated[Literal["add", "update", "remove"], "The operation to perform."],
358
+ role: Annotated[
359
+ Literal["reader", "commenter", "writer", "fileOrganizer", "organizer", "owner"] | None,
360
+ "The access level.",
361
+ ] = None,
362
+ email_address: Annotated[
363
+ str | None, "The email of the user or group (required for 'add')."
364
+ ] = None,
365
+ permission_id: Annotated[
366
+ str | None, "The specific permission ID (required for 'update' or 'remove')."
367
+ ] = None,
368
+ transfer_ownership: Annotated[
369
+ bool, "Whether to transfer ownership (only for 'update' to 'owner' role)."
370
+ ] = False,
371
+ ) -> ToolResult:
372
+ """
373
+ Consolidated tool for sharing files and managing permissions.
374
+ Pushes all logic to the Google Drive API permissions resource (create, update, delete).
375
+
376
+ Usage:
377
+ - Add role: gdrive_manage_access(
378
+ file_id="SomeFileId",
379
+ action="add",
380
+ role="reader",
381
+ email_address="dummy@user.com"
382
+ )
383
+ - Update role: gdrive_manage_access(
384
+ file_id="SomeFileId",
385
+ action="update",
386
+ role="reader",
387
+ permission_id="SomePermissionId"
388
+ )
389
+ - Remove permission: gdrive_manage_access(
390
+ file_id="SomeFileId",
391
+ action="remove",
392
+ permission_id="SomePermissionId"
393
+ )
394
+ """
395
+ if not file_id or not file_id.strip():
396
+ raise ToolError("Argument validation error: 'file_id' cannot be empty.")
397
+
398
+ if action == "add" and not email_address:
399
+ raise ToolError("'email_address' is required for action 'add'.")
400
+
401
+ if action in ("update", "remove") and not permission_id:
402
+ raise ToolError("'permission_id' is required for action 'update' or 'remove'.")
403
+
404
+ if action != "remove" and not role:
405
+ raise ToolError("'role' is required for action 'add' or 'update'.")
406
+
407
+ access_token = await get_gdrive_access_token()
408
+ if isinstance(access_token, ToolError):
409
+ raise access_token
410
+
411
+ try:
412
+ async with GoogleDriveClient(access_token) as client:
413
+ permission_id = await client.manage_access(
414
+ file_id=file_id,
415
+ action=action,
416
+ role=role,
417
+ email_address=email_address,
418
+ permission_id=permission_id,
419
+ transfer_ownership=transfer_ownership,
420
+ )
421
+ except GoogleDriveError as e:
422
+ logger.error(f"Google Drive permission operation failed: {e}")
423
+ raise ToolError(str(e))
424
+ except Exception as e:
425
+ logger.error(f"Unexpected error changing permissions for Google Drive file {file_id}: {e}")
426
+ raise ToolError(
427
+ f"Unexpected error changing permissions for Google Drive file {file_id}: {str(e)}"
428
+ )
429
+
430
+ # Build response
431
+ structured_content = {"affectedFileId": file_id}
432
+ if action == "add":
433
+ content = (
434
+ f"Successfully added role '{role}' for '{email_address}' for gdrive file '{file_id}'. "
435
+ f"New permission id '{permission_id}'."
436
+ )
437
+ structured_content["newPermissionId"] = permission_id
438
+ elif action == "update":
439
+ content = (
440
+ f"Successfully updated role '{role}' (permission '{permission_id}') "
441
+ f"for gdrive file '{file_id}'."
442
+ )
443
+ else: # action == "remove":
444
+ content = f"Successfully removed permission '{permission_id}' for gdrive file '{file_id}'."
445
+
446
+ return ToolResult(content=content, structured_content=structured_content)
@@ -35,9 +35,9 @@ async def upload_dataset_to_ai_catalog(
35
35
  ) -> ToolError | ToolResult:
36
36
  """Upload a dataset to the DataRobot AI Catalog / Data Registry."""
37
37
  if not file_path and not file_url:
38
- return ToolError("Either file_path or file_url must be provided.")
38
+ raise ToolError("Either file_path or file_url must be provided.")
39
39
  if file_path and file_url:
40
- return ToolError("Please provide either file_path or file_url, not both.")
40
+ raise ToolError("Please provide either file_path or file_url, not both.")
41
41
 
42
42
  # Get client
43
43
  client = get_sdk_client()
@@ -47,17 +47,17 @@ async def upload_dataset_to_ai_catalog(
47
47
  # Does file exist?
48
48
  if not os.path.exists(file_path):
49
49
  logger.error("File not found: %s", file_path)
50
- return ToolError(f"File not found: {file_path}")
50
+ raise ToolError(f"File not found: {file_path}")
51
51
  catalog_item = client.Dataset.create_from_file(file_path)
52
52
  else:
53
53
  # Does URL exist?
54
54
  if file_url is None or not is_valid_url(file_url):
55
55
  logger.error("Invalid file URL: %s", file_url)
56
- return ToolError(f"Invalid file URL: {file_url}")
56
+ raise ToolError(f"Invalid file URL: {file_url}")
57
57
  catalog_item = client.Dataset.create_from_url(file_url)
58
58
 
59
59
  if not catalog_item:
60
- return ToolError("Failed to upload dataset.")
60
+ raise ToolError("Failed to upload dataset.")
61
61
 
62
62
  return ToolResult(
63
63
  content=f"Successfully uploaded dataset: {catalog_item.id}",
@@ -14,9 +14,12 @@
14
14
 
15
15
  import json
16
16
  import logging
17
+ from typing import Annotated
17
18
  from typing import Any
18
19
 
19
20
  from datarobot.models.model import Model
21
+ from fastmcp.exceptions import ToolError
22
+ from fastmcp.tools.tool import ToolResult
20
23
 
21
24
  from datarobot_genai.drmcp.core.clients import get_sdk_client
22
25
  from datarobot_genai.drmcp.core.mcp_instance import dr_mcp_tool
@@ -50,33 +53,25 @@ class ModelEncoder(json.JSONEncoder):
50
53
  return super().default(obj)
51
54
 
52
55
 
53
- @dr_mcp_tool(tags={"model", "management", "info"})
54
- async def get_best_model(project_id: str, metric: str | None = None) -> str:
55
- """
56
- Get the best model for a DataRobot project, optionally by a specific metric.
56
+ @dr_mcp_tool(tags={"predictive", "model", "read", "management", "info"})
57
+ async def get_best_model(
58
+ *,
59
+ project_id: Annotated[str, "The DataRobot project ID"] | None = None,
60
+ metric: Annotated[str, "The metric to use for best model selection (e.g., 'AUC', 'LogLoss')"]
61
+ | None = None,
62
+ ) -> ToolError | ToolResult:
63
+ """Get the best model for a DataRobot project, optionally by a specific metric."""
64
+ if not project_id:
65
+ raise ToolError("Project ID must be provided")
57
66
 
58
- Args:
59
- project_id: The ID of the DataRobot project.
60
- metric: (Optional) The metric to use for best model selection (e.g., 'AUC', 'LogLoss').
61
-
62
- Returns
63
- -------
64
- A formatted string describing the best model.
65
-
66
- Raises
67
- ------
68
- Exception: If project not found or no models exist in the project.
69
- """
70
67
  client = get_sdk_client()
71
68
  project = client.Project.get(project_id)
72
69
  if not project:
73
- logger.error(f"Project with ID {project_id} not found")
74
- raise Exception(f"Project with ID {project_id} not found.")
70
+ raise ToolError(f"Project with ID {project_id} not found.")
75
71
 
76
72
  leaderboard = project.get_models()
77
73
  if not leaderboard:
78
- logger.info(f"No models found for project {project_id}")
79
- raise Exception("No models found for this project.")
74
+ raise ToolError("No models found for this project.")
80
75
 
81
76
  if metric:
82
77
  reverse_sort = metric.upper() in [
@@ -98,51 +93,91 @@ async def get_best_model(project_id: str, metric: str | None = None) -> str:
98
93
  best_model = leaderboard[0]
99
94
  logger.info(f"Found best model {best_model.id} for project {project_id}")
100
95
 
101
- # Format the response as a human-readable string
102
96
  metric_info = ""
97
+ metric_value = None
98
+
103
99
  if metric and best_model.metrics and metric in best_model.metrics:
104
100
  metric_value = best_model.metrics[metric].get("validation")
105
101
  if metric_value is not None:
106
102
  metric_info = f" with {metric}: {metric_value:.2f}"
107
103
 
108
- return f"Best model: {best_model.model_type}{metric_info}"
109
-
110
-
111
- @dr_mcp_tool(tags={"model", "prediction", "scoring"})
112
- async def score_dataset_with_model(project_id: str, model_id: str, dataset_url: str) -> str:
113
- """
114
- Score a dataset using a specific DataRobot model.
104
+ # Include full metrics in the response
105
+ best_model_dict = model_to_dict(best_model)
106
+ best_model_dict["metric"] = metric
107
+ best_model_dict["metric_value"] = metric_value
108
+
109
+ # Format metrics for human-readable content
110
+ metrics_text = ""
111
+ if best_model.metrics:
112
+ metrics_list = []
113
+ for metric_name, metric_data in best_model.metrics.items():
114
+ if isinstance(metric_data, dict) and "validation" in metric_data:
115
+ val = metric_data["validation"]
116
+ if val is not None:
117
+ metrics_list.append(f"{metric_name}: {val:.4f}")
118
+ if metrics_list:
119
+ metrics_text = "\nPerformance metrics:\n" + "\n".join(f" - {m}" for m in metrics_list)
120
+
121
+ return ToolResult(
122
+ content=f"Best model: {best_model.model_type}{metric_info}{metrics_text}",
123
+ structured_content={
124
+ "project_id": project_id,
125
+ "best_model": best_model_dict,
126
+ },
127
+ )
128
+
129
+
130
+ @dr_mcp_tool(tags={"predictive", "model", "read", "scoring", "dataset"})
131
+ async def score_dataset_with_model(
132
+ *,
133
+ project_id: Annotated[str, "The DataRobot project ID"] | None = None,
134
+ model_id: Annotated[str, "The DataRobot model ID"] | None = None,
135
+ dataset_url: Annotated[str, "The dataset URL"] | None = None,
136
+ ) -> ToolError | ToolResult:
137
+ """Score a dataset using a specific DataRobot model."""
138
+ if not project_id:
139
+ raise ToolError("Project ID must be provided")
140
+ if not model_id:
141
+ raise ToolError("Model ID must be provided")
142
+ if not dataset_url:
143
+ raise ToolError("Dataset URL must be provided")
115
144
 
116
- Args:
117
- project_id: The ID of the DataRobot project.
118
- model_id: The ID of the DataRobot model to use for scoring.
119
- dataset_url: The URL to the dataset to score (must be accessible to DataRobot).
120
-
121
- Returns
122
- -------
123
- A string summary of the scoring job or a meaningful error message.
124
- """
125
145
  client = get_sdk_client()
126
146
  project = client.Project.get(project_id)
127
147
  model = client.Model.get(project, model_id)
128
148
  job = model.score(dataset_url)
129
- logger.info(f"Started scoring job {job.id} for model {model_id}")
130
- return f"Scoring job started: {job.id}"
131
-
132
149
 
133
- @dr_mcp_tool(tags={"model", "management", "list"})
134
- async def list_models(project_id: str) -> str:
135
- """
136
- List all models in a project.
150
+ return ToolResult(
151
+ content=f"Scoring job started: {job.id}",
152
+ structured_content={
153
+ "scoring_job_id": job.id,
154
+ "project_id": project_id,
155
+ "model_id": model_id,
156
+ "dataset_url": dataset_url,
157
+ },
158
+ )
159
+
160
+
161
+ @dr_mcp_tool(tags={"predictive", "model", "read", "management", "list"})
162
+ async def list_models(
163
+ *,
164
+ project_id: Annotated[str, "The DataRobot project ID"] | None = None,
165
+ ) -> ToolError | ToolResult:
166
+ """List all models in a project."""
167
+ if not project_id:
168
+ raise ToolError("Project ID must be provided")
137
169
 
138
- Args:
139
- project_id: The ID of the DataRobot project.
140
-
141
- Returns
142
- -------
143
- A string summary of the models in the project.
144
- """
145
170
  client = get_sdk_client()
146
171
  project = client.Project.get(project_id)
147
172
  models = project.get_models()
148
- return json.dumps(models, indent=2, cls=ModelEncoder)
173
+
174
+ return ToolResult(
175
+ content=(
176
+ f"Found {len(models)} models in project {project_id}, here are the details:\n"
177
+ f"{json.dumps(models, indent=2, cls=ModelEncoder)}"
178
+ ),
179
+ structured_content={
180
+ "project_id": project_id,
181
+ "models": [model_to_dict(model) for model in models],
182
+ },
183
+ )
@@ -54,9 +54,9 @@ async def get_project_dataset_by_name(
54
54
  The dataset ID and the dataset type (source or prediction) as a string, or an error message.
55
55
  """
56
56
  if not project_id:
57
- return ToolError("Project ID is required.")
57
+ raise ToolError("Project ID is required.")
58
58
  if not dataset_name:
59
- return ToolError("Dataset name is required.")
59
+ raise ToolError("Dataset name is required.")
60
60
 
61
61
  client = get_sdk_client()
62
62
  project = client.Project.get(project_id)
@@ -63,7 +63,7 @@ async def analyze_dataset(
63
63
  ) -> ToolError | ToolResult:
64
64
  """Analyze a dataset to understand its structure and potential use cases."""
65
65
  if not dataset_id:
66
- return ToolError("Dataset ID must be provided")
66
+ raise ToolError("Dataset ID must be provided")
67
67
 
68
68
  client = get_sdk_client()
69
69
  dataset = client.Dataset.get(dataset_id)
@@ -116,7 +116,7 @@ async def suggest_use_cases(
116
116
  ) -> ToolError | ToolResult:
117
117
  """Analyze a dataset and suggest potential machine learning use cases."""
118
118
  if not dataset_id:
119
- return ToolError("Dataset ID must be provided")
119
+ raise ToolError("Dataset ID must be provided")
120
120
 
121
121
  client = get_sdk_client()
122
122
  dataset = client.Dataset.get(dataset_id)
@@ -148,7 +148,7 @@ async def get_exploratory_insights(
148
148
  ) -> ToolError | ToolResult:
149
149
  """Generate exploratory data insights for a dataset."""
150
150
  if not dataset_id:
151
- return ToolError("Dataset ID must be provided")
151
+ raise ToolError("Dataset ID must be provided")
152
152
 
153
153
  client = get_sdk_client()
154
154
  dataset = client.Dataset.get(dataset_id)
@@ -481,9 +481,9 @@ async def start_autopilot(
481
481
 
482
482
  if not project_id:
483
483
  if not dataset_url and not dataset_id:
484
- return ToolError("Either dataset_url or dataset_id must be provided")
484
+ raise ToolError("Either dataset_url or dataset_id must be provided")
485
485
  if dataset_url and dataset_id:
486
- return ToolError("Please provide either dataset_url or dataset_id, not both")
486
+ raise ToolError("Please provide either dataset_url or dataset_id, not both")
487
487
 
488
488
  if dataset_url:
489
489
  dataset = client.Dataset.create_from_url(dataset_url)
@@ -497,7 +497,7 @@ async def start_autopilot(
497
497
  project = client.Project.get(project_id)
498
498
 
499
499
  if not target:
500
- return ToolError("Target variable must be specified")
500
+ raise ToolError("Target variable must be specified")
501
501
 
502
502
  try:
503
503
  # Start modeling
@@ -517,7 +517,7 @@ async def start_autopilot(
517
517
  )
518
518
 
519
519
  except Exception as e:
520
- return ToolError(
520
+ raise ToolError(
521
521
  content=json.dumps(
522
522
  {
523
523
  "error": f"Failed to start Autopilot: {str(e)}",
@@ -546,9 +546,9 @@ async def get_model_roc_curve(
546
546
  ) -> ToolError | ToolResult:
547
547
  """Get detailed ROC curve for a specific model."""
548
548
  if not project_id:
549
- return ToolError("Project ID must be provided")
549
+ raise ToolError("Project ID must be provided")
550
550
  if not model_id:
551
- return ToolError("Model ID must be provided")
551
+ raise ToolError("Model ID must be provided")
552
552
 
553
553
  client = get_sdk_client()
554
554
  project = client.Project.get(project_id)
@@ -587,7 +587,7 @@ async def get_model_roc_curve(
587
587
  structured_content={"data": roc_data},
588
588
  )
589
589
  except Exception as e:
590
- return ToolError(f"Failed to get ROC curve: {str(e)}")
590
+ raise ToolError(f"Failed to get ROC curve: {str(e)}")
591
591
 
592
592
 
593
593
  @dr_mcp_tool(tags={"predictive", "training", "read", "model", "evaluation"})
@@ -598,9 +598,9 @@ async def get_model_feature_impact(
598
598
  ) -> ToolError | ToolResult:
599
599
  """Get detailed feature impact for a specific model."""
600
600
  if not project_id:
601
- return ToolError("Project ID must be provided")
601
+ raise ToolError("Project ID must be provided")
602
602
  if not model_id:
603
- return ToolError("Model ID must be provided")
603
+ raise ToolError("Model ID must be provided")
604
604
 
605
605
  client = get_sdk_client()
606
606
  project = client.Project.get(project_id)
@@ -631,9 +631,9 @@ async def get_model_lift_chart(
631
631
  ) -> ToolError | ToolResult:
632
632
  """Get detailed lift chart for a specific model."""
633
633
  if not project_id:
634
- return ToolError("Project ID must be provided")
634
+ raise ToolError("Project ID must be provided")
635
635
  if not model_id:
636
- return ToolError("Model ID must be provided")
636
+ raise ToolError("Model ID must be provided")
637
637
 
638
638
  client = get_sdk_client()
639
639
  project = client.Project.get(project_id)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: datarobot-genai
3
- Version: 0.2.30
3
+ Version: 0.2.32
4
4
  Summary: Generic helpers for GenAI
5
5
  Project-URL: Homepage, https://github.com/datarobot-oss/datarobot-genai
6
6
  Author: DataRobot, Inc.
@@ -22,7 +22,7 @@ datarobot_genai/crewai/agent.py,sha256=vp8_2LExpeLls7Fpzo0R6ud5I6Ryfu3n3oVTN4Yyi
22
22
  datarobot_genai/crewai/base.py,sha256=JLljEN7sj8zaH8OamYoevFBZzza5BjZ4f0CGHRp2jUU,6447
23
23
  datarobot_genai/crewai/events.py,sha256=K67bO1zwPrxmppz2wh8dFGNbVebyWGXAMD7oodFE2sQ,5462
24
24
  datarobot_genai/crewai/mcp.py,sha256=AJTrs-8KdiRSjRECfBT1lJOsszWMoFoN9NIa1p5_wsM,2115
25
- datarobot_genai/drmcp/__init__.py,sha256=JE83bfpGU7v77VzrDdlb0l8seM5OwUsUbaQErJ2eisc,2983
25
+ datarobot_genai/drmcp/__init__.py,sha256=WyKW7p77kS63EsmuW8cnZpqgcxCzIv1TmRPo2qo-Z8A,2988
26
26
  datarobot_genai/drmcp/server.py,sha256=KE4kjS5f9bfdYftG14HBHrfvxDfCD4pwCXePfvl1OvU,724
27
27
  datarobot_genai/drmcp/core/__init__.py,sha256=y4yapzp3KnFMzSR6HlNDS4uSuyNT7I1iPBvaCLsS0sU,577
28
28
  datarobot_genai/drmcp/core/auth.py,sha256=E-5wrGbBFEBlD5377g6Exddrc7HsazamwX8tWr2RLXY,5815
@@ -33,8 +33,8 @@ datarobot_genai/drmcp/core/constants.py,sha256=lUwoW_PTrbaBGqRJifKqCn3EoFacoEgdO
33
33
  datarobot_genai/drmcp/core/credentials.py,sha256=PYEUDNMVw1BoMzZKLkPVTypNkVevEPtmk3scKnE-zYg,6706
34
34
  datarobot_genai/drmcp/core/dr_mcp_server.py,sha256=czcjbwhZAeW9EtG_Bys0GARPOuQulstkiU7FG48Q9bg,14118
35
35
  datarobot_genai/drmcp/core/dr_mcp_server_logo.py,sha256=hib-nfR1SNTW6CnpFsFCkL9H_OMwa4YYyinV7VNOuLk,4708
36
- datarobot_genai/drmcp/core/exceptions.py,sha256=eqsGI-lxybgvWL5w4BFhbm3XzH1eU5tetwjnhJxelpc,905
37
- datarobot_genai/drmcp/core/logging.py,sha256=Y_hig4eBWiXGaVV7B_3wBcaYVRNH4ydptbEQhrP9-mY,3414
36
+ datarobot_genai/drmcp/core/exceptions.py,sha256=9zoNh5ph6QihWIYuw37ljZ73_iUfy38YVYyFSnEwivc,839
37
+ datarobot_genai/drmcp/core/logging.py,sha256=rnUkws0vIDy_uLevwNj-wgA9uijW1Go774JPCrG0Yfw,3423
38
38
  datarobot_genai/drmcp/core/mcp_instance.py,sha256=nt4gOlAQklMcqmohRIKovYcyhgLdb08NHMo28DBYmOk,18362
39
39
  datarobot_genai/drmcp/core/routes.py,sha256=dqE2M0UzAyyN9vQjlyTjYW4rpju3LT039po5weuO__I,17936
40
40
  datarobot_genai/drmcp/core/routes_utils.py,sha256=vSseXWlplMSnRgoJgtP_rHxWSAVYcx_tpTv4lyTpQoc,944
@@ -70,35 +70,39 @@ datarobot_genai/drmcp/test_utils/elicitation_test_tool.py,sha256=UVKwy39nl3XcVAh
70
70
  datarobot_genai/drmcp/test_utils/integration_mcp_server.py,sha256=YSk19tbaka_0ziqi7LoXie4SJs-cvi9-H00Go0ZtQWE,3575
71
71
  datarobot_genai/drmcp/test_utils/mcp_utils_ete.py,sha256=46rH0fYYmUj7ygf968iRbdSp5u95v23BEw3Ts_c431Y,4788
72
72
  datarobot_genai/drmcp/test_utils/mcp_utils_integration.py,sha256=sHA_BWtpgIAFp9IXiNkUeBartBMjLAauqkV9bYtCr-g,3874
73
- datarobot_genai/drmcp/test_utils/openai_llm_mcp_client.py,sha256=YgyqHK09MB-PBaqT34heqvmvYYFtLpzzSJt7xuTJmDg,11224
74
- datarobot_genai/drmcp/test_utils/test_interactive.py,sha256=guXvR8q2H6VUdmvIjEJcElQJCC6lQ-oTrzbD2EkHeCs,8025
75
- datarobot_genai/drmcp/test_utils/tool_base_ete.py,sha256=3yMfOsz3LdHYywuE5BhdJDpTUowx37HsFSsMdBTxA80,9337
76
- datarobot_genai/drmcp/test_utils/utils.py,sha256=esGKFv8aO31-Qg3owayeWp32BYe1CdYOEutjjdbweCw,3048
73
+ datarobot_genai/drmcp/test_utils/test_interactive.py,sha256=KAScFT65GUkOxuiiBcjli8HHvV1NusVN01nOib3xVCc,7939
74
+ datarobot_genai/drmcp/test_utils/tool_base_ete.py,sha256=2RvVmwHYczl7F6qZHkKYiI77IoL-PaMT3y59t0aQtTE,9328
75
+ datarobot_genai/drmcp/test_utils/utils.py,sha256=JF2W9J4Q8pCqro7dj_bHObHNP7dfybDXesTLFOUsIVM,3039
76
+ datarobot_genai/drmcp/test_utils/clients/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
77
+ datarobot_genai/drmcp/test_utils/clients/anthropic.py,sha256=qFhLvLZHMpZa2tZwI8pZQaaeG1lsM56VaONt6a9VU8c,2333
78
+ datarobot_genai/drmcp/test_utils/clients/base.py,sha256=WoPdddYmmXGylEuKRtKHPfprcHMjbHqPB9PwzWmORV4,10637
79
+ datarobot_genai/drmcp/test_utils/clients/dr_gateway.py,sha256=qlx0WxEOtTkxt9PiCxgWAp02k5jyUgXcKb9AwCGw6cw,2150
80
+ datarobot_genai/drmcp/test_utils/clients/openai.py,sha256=tyIibvjtFp7u2BoHJqwIRlHn9UPtysKOgStoA9SZUYs,2566
77
81
  datarobot_genai/drmcp/tools/__init__.py,sha256=0kq9vMkF7EBsS6lkEdiLibmUrghTQqosHbZ5k-V9a5g,578
78
82
  datarobot_genai/drmcp/tools/clients/__init__.py,sha256=0kq9vMkF7EBsS6lkEdiLibmUrghTQqosHbZ5k-V9a5g,578
79
83
  datarobot_genai/drmcp/tools/clients/atlassian.py,sha256=__M_uz7FrcbKCYRzeMn24DCEYD6OmFx_LuywHCxgXsA,6472
80
84
  datarobot_genai/drmcp/tools/clients/confluence.py,sha256=h_G0By_kDnJeWDT_d-IREsaZ5-0xB5GoLXOqblYP5MA,20706
81
- datarobot_genai/drmcp/tools/clients/gdrive.py,sha256=Sonc8g52z8gDeaIGxh2GkoWYbtveCBGfDU0z3m4iSRU,29292
85
+ datarobot_genai/drmcp/tools/clients/gdrive.py,sha256=RK4IISpYb99aK6WgDthesDoglaZxwGpG_PPAAe6xsVM,33064
82
86
  datarobot_genai/drmcp/tools/clients/jira.py,sha256=Rm91JAyrNIqxu66-9rU1YqoRXVnWbEy-Ahvy6f6HlVg,9823
83
87
  datarobot_genai/drmcp/tools/clients/microsoft_graph.py,sha256=PASGThDPE8zkBZqach8lurJL1y47DWUPLwvf9N6uLGM,19234
84
88
  datarobot_genai/drmcp/tools/clients/s3.py,sha256=GmwzvurFdNfvxOooA8g5S4osRysHYU0S9ypg_177Glg,953
85
89
  datarobot_genai/drmcp/tools/confluence/__init__.py,sha256=0kq9vMkF7EBsS6lkEdiLibmUrghTQqosHbZ5k-V9a5g,578
86
90
  datarobot_genai/drmcp/tools/confluence/tools.py,sha256=_-ws65WLK8KZP_mKkf4yJ7ZunR8qdyoiMwHQX47MSMw,12362
87
91
  datarobot_genai/drmcp/tools/gdrive/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
88
- datarobot_genai/drmcp/tools/gdrive/tools.py,sha256=jSKz0TuqAdHFitUZ-BW8NDl31Aq4VGpWa0yWSksLi00,13887
92
+ datarobot_genai/drmcp/tools/gdrive/tools.py,sha256=7bNrp7E3opKwsBDYfLIOsOGfPXW-Ae9KvcimEzetR0A,17631
89
93
  datarobot_genai/drmcp/tools/jira/__init__.py,sha256=0kq9vMkF7EBsS6lkEdiLibmUrghTQqosHbZ5k-V9a5g,578
90
94
  datarobot_genai/drmcp/tools/jira/tools.py,sha256=dfkqTU2HH-7n44hX80ODFacKq0p0LOchFcZtIIKFNMM,9687
91
95
  datarobot_genai/drmcp/tools/microsoft_graph/__init__.py,sha256=CuOaMt1AJo7cHx_GuhO3s_aqxZas_wlDsoBorBsvbeU,577
92
96
  datarobot_genai/drmcp/tools/microsoft_graph/tools.py,sha256=zJ-UA1TMhPOYcExvgWv0YBjDsSIDPA-U1SEbBrVfAc8,7744
93
97
  datarobot_genai/drmcp/tools/predictive/__init__.py,sha256=WuOHlNNEpEmcF7gVnhckruJRKU2qtmJLE3E7zoCGLDo,1030
94
- datarobot_genai/drmcp/tools/predictive/data.py,sha256=sSFAmO6x0DSuolw8urhMaOj5PwfUH29oc2mEOZI3YU4,4631
98
+ datarobot_genai/drmcp/tools/predictive/data.py,sha256=VbGs8ERP8vNFtTTryGhI61JItNVaJsx1gxpRX1ZFZcg,4626
95
99
  datarobot_genai/drmcp/tools/predictive/deployment.py,sha256=lm02Ayuo11L1hP41fgi3QpR1Eyty-Wc16rM0c8SgliM,3277
96
100
  datarobot_genai/drmcp/tools/predictive/deployment_info.py,sha256=BGEF_dmbxOBJR0n1Tt9TO2-iNTQSBTr-oQUyaxLZ0ZI,15297
97
- datarobot_genai/drmcp/tools/predictive/model.py,sha256=Yih5-KedJ-1yupPLXCJsCXOdyWWi9pRvgapXDlgXWJA,4891
101
+ datarobot_genai/drmcp/tools/predictive/model.py,sha256=BVxOMHh3--liwBU4VB1OWRrqkhJ4y_Rq053f7y94TF8,6276
98
102
  datarobot_genai/drmcp/tools/predictive/predict.py,sha256=Qoob2_t2crfWtyPzkXMRz2ITZumnczU6Dq4C7q9RBMI,9370
99
103
  datarobot_genai/drmcp/tools/predictive/predict_realtime.py,sha256=urq6rPyZFsAP-bPyclSNzrkvb6FTamdlFau8q0IWWJ0,13472
100
- datarobot_genai/drmcp/tools/predictive/project.py,sha256=xC52UdYvuFeNZC7Y5MfXcvzTL70WwAacQXESr6rqN6s,3255
101
- datarobot_genai/drmcp/tools/predictive/training.py,sha256=LzMxbBT8wxKYDrRlVElfmTUrzpmGvwrR-mTGf6YUnIA,23998
104
+ datarobot_genai/drmcp/tools/predictive/project.py,sha256=Mzf7rQogBV6h1-MWQYTwtDHOsMWfjOyyJpSYmmvNNuc,3253
105
+ datarobot_genai/drmcp/tools/predictive/training.py,sha256=WWzzGibYMSvI8kqHnvav6qNIVjoe1EG4RyiYa3XhFYA,23984
102
106
  datarobot_genai/langgraph/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
103
107
  datarobot_genai/langgraph/agent.py,sha256=DRnywmS9KDywyChtuIZZwNKbJs8BpC259EG_kxYbiQ8,15828
104
108
  datarobot_genai/langgraph/mcp.py,sha256=iA2_j46mZAaNaL7ntXT-LW6C-NMJkzr3VfKDDfe7mh8,2851
@@ -113,9 +117,9 @@ datarobot_genai/nat/datarobot_llm_clients.py,sha256=-_q_KlKOVQecIYJd8YRiYnS4ZNaz
113
117
  datarobot_genai/nat/datarobot_llm_providers.py,sha256=aDoQcTeGI-odqydPXEX9OGGNFbzAtpqzTvHHEkmJuEQ,4963
114
118
  datarobot_genai/nat/datarobot_mcp_client.py,sha256=jL8sXb8g4gvt0VYgB2tfMGsMjpB1GV2XIbN0iv_LxVU,10701
115
119
  datarobot_genai/nat/helpers.py,sha256=Q7E3ADZdtFfS8E6OQPyw2wgA6laQ58N3bhLj5CBWwJs,3265
116
- datarobot_genai-0.2.30.dist-info/METADATA,sha256=OfWIH55TShkE_jPNSKFx04ZceCEYP-qFCHyr31sHD6U,6301
117
- datarobot_genai-0.2.30.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
118
- datarobot_genai-0.2.30.dist-info/entry_points.txt,sha256=jEW3WxDZ8XIK9-ISmTyt5DbmBb047rFlzQuhY09rGrM,284
119
- datarobot_genai-0.2.30.dist-info/licenses/AUTHORS,sha256=isJGUXdjq1U7XZ_B_9AH8Qf0u4eX0XyQifJZ_Sxm4sA,80
120
- datarobot_genai-0.2.30.dist-info/licenses/LICENSE,sha256=U2_VkLIktQoa60Nf6Tbt7E4RMlfhFSjWjcJJfVC-YCE,11341
121
- datarobot_genai-0.2.30.dist-info/RECORD,,
120
+ datarobot_genai-0.2.32.dist-info/METADATA,sha256=I5_bDUsP1rsNCYVIXvZjYYb91UwhA7N51E_Y0EN-fSc,6301
121
+ datarobot_genai-0.2.32.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
122
+ datarobot_genai-0.2.32.dist-info/entry_points.txt,sha256=jEW3WxDZ8XIK9-ISmTyt5DbmBb047rFlzQuhY09rGrM,284
123
+ datarobot_genai-0.2.32.dist-info/licenses/AUTHORS,sha256=isJGUXdjq1U7XZ_B_9AH8Qf0u4eX0XyQifJZ_Sxm4sA,80
124
+ datarobot_genai-0.2.32.dist-info/licenses/LICENSE,sha256=U2_VkLIktQoa60Nf6Tbt7E4RMlfhFSjWjcJJfVC-YCE,11341
125
+ datarobot_genai-0.2.32.dist-info/RECORD,,