datarobot-genai 0.2.39__py3-none-any.whl → 0.3.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- datarobot_genai/core/agents/__init__.py +1 -1
- datarobot_genai/core/agents/base.py +5 -2
- datarobot_genai/core/chat/responses.py +6 -1
- datarobot_genai/core/utils/auth.py +188 -31
- datarobot_genai/crewai/__init__.py +1 -4
- datarobot_genai/crewai/agent.py +150 -17
- datarobot_genai/crewai/events.py +11 -4
- datarobot_genai/drmcp/__init__.py +4 -2
- datarobot_genai/drmcp/core/config.py +21 -1
- datarobot_genai/drmcp/core/mcp_instance.py +5 -49
- datarobot_genai/drmcp/core/routes.py +108 -13
- datarobot_genai/drmcp/core/tool_config.py +16 -0
- datarobot_genai/drmcp/core/utils.py +110 -0
- datarobot_genai/drmcp/test_utils/tool_base_ete.py +41 -26
- datarobot_genai/drmcp/tools/clients/gdrive.py +2 -0
- datarobot_genai/drmcp/tools/clients/microsoft_graph.py +96 -0
- datarobot_genai/drmcp/tools/clients/perplexity.py +173 -0
- datarobot_genai/drmcp/tools/clients/tavily.py +199 -0
- datarobot_genai/drmcp/tools/confluence/tools.py +0 -5
- datarobot_genai/drmcp/tools/gdrive/tools.py +12 -59
- datarobot_genai/drmcp/tools/jira/tools.py +4 -8
- datarobot_genai/drmcp/tools/microsoft_graph/tools.py +135 -19
- datarobot_genai/drmcp/tools/perplexity/__init__.py +0 -0
- datarobot_genai/drmcp/tools/perplexity/tools.py +117 -0
- datarobot_genai/drmcp/tools/predictive/data.py +1 -9
- datarobot_genai/drmcp/tools/predictive/deployment.py +0 -8
- datarobot_genai/drmcp/tools/predictive/deployment_info.py +0 -19
- datarobot_genai/drmcp/tools/predictive/model.py +0 -21
- datarobot_genai/drmcp/tools/predictive/predict_realtime.py +3 -0
- datarobot_genai/drmcp/tools/predictive/project.py +3 -19
- datarobot_genai/drmcp/tools/predictive/training.py +1 -19
- datarobot_genai/drmcp/tools/tavily/__init__.py +13 -0
- datarobot_genai/drmcp/tools/tavily/tools.py +141 -0
- datarobot_genai/langgraph/agent.py +10 -2
- datarobot_genai/llama_index/__init__.py +1 -1
- datarobot_genai/llama_index/agent.py +284 -5
- datarobot_genai/nat/agent.py +17 -6
- {datarobot_genai-0.2.39.dist-info → datarobot_genai-0.3.1.dist-info}/METADATA +3 -1
- {datarobot_genai-0.2.39.dist-info → datarobot_genai-0.3.1.dist-info}/RECORD +43 -40
- datarobot_genai/crewai/base.py +0 -159
- datarobot_genai/drmcp/core/tool_filter.py +0 -117
- datarobot_genai/llama_index/base.py +0 -299
- {datarobot_genai-0.2.39.dist-info → datarobot_genai-0.3.1.dist-info}/WHEEL +0 -0
- {datarobot_genai-0.2.39.dist-info → datarobot_genai-0.3.1.dist-info}/entry_points.txt +0 -0
- {datarobot_genai-0.2.39.dist-info → datarobot_genai-0.3.1.dist-info}/licenses/AUTHORS +0 -0
- {datarobot_genai-0.2.39.dist-info → datarobot_genai-0.3.1.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,173 @@
|
|
|
1
|
+
# Copyright 2025 DataRobot, Inc.
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
import logging
|
|
16
|
+
from typing import Any
|
|
17
|
+
from typing import Literal
|
|
18
|
+
|
|
19
|
+
from fastmcp.exceptions import ToolError
|
|
20
|
+
from fastmcp.server.dependencies import get_http_headers
|
|
21
|
+
from perplexity import AsyncPerplexity
|
|
22
|
+
from perplexity.types import search_create_response
|
|
23
|
+
from pydantic import BaseModel
|
|
24
|
+
from pydantic import ConfigDict
|
|
25
|
+
|
|
26
|
+
logger = logging.getLogger(__name__)
|
|
27
|
+
|
|
28
|
+
MAX_QUERIES: int = 5
|
|
29
|
+
MAX_RESULTS: int = 20
|
|
30
|
+
MAX_TOKENS_PER_PAGE: int = 8192
|
|
31
|
+
MAX_SEARCH_DOMAIN_FILTER: int = 20
|
|
32
|
+
|
|
33
|
+
MAX_RESULTS_DEFAULT: int = 10
|
|
34
|
+
MAX_TOKENS_PER_PAGE_DEFAULT: int = 2048
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
async def get_perplexity_access_token() -> str | ToolError:
|
|
38
|
+
"""
|
|
39
|
+
Get Perplexity API key from HTTP headers.
|
|
40
|
+
|
|
41
|
+
At the moment of creating this fn. Perplexity does not support OAuth.
|
|
42
|
+
It allows only API-KEY authorized flow.
|
|
43
|
+
|
|
44
|
+
Returns
|
|
45
|
+
-------
|
|
46
|
+
Access token string on success, ToolError on failure
|
|
47
|
+
|
|
48
|
+
Example:
|
|
49
|
+
```python
|
|
50
|
+
token = await get_perplexity_access_token()
|
|
51
|
+
if isinstance(token, ToolError):
|
|
52
|
+
# Handle error
|
|
53
|
+
return token
|
|
54
|
+
# Use token
|
|
55
|
+
```
|
|
56
|
+
"""
|
|
57
|
+
try:
|
|
58
|
+
headers = get_http_headers()
|
|
59
|
+
|
|
60
|
+
if api_key := headers.get("x-perplexity-api-key"):
|
|
61
|
+
return api_key
|
|
62
|
+
|
|
63
|
+
logger.warning("Perplexity API key not found in headers.")
|
|
64
|
+
return ToolError(
|
|
65
|
+
"Perplexity API key not found in headers. "
|
|
66
|
+
"Please provide it via 'x-perplexity-api-key' header."
|
|
67
|
+
)
|
|
68
|
+
except Exception as e:
|
|
69
|
+
logger.error(f"Unexpected error obtaining Perplexity API key: {e}.", exc_info=e)
|
|
70
|
+
return ToolError("An unexpected error occured while obtaining Perplexity API key.")
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
class PerplexityError(Exception):
|
|
74
|
+
"""Exception for Perplexity API errors."""
|
|
75
|
+
|
|
76
|
+
def __init__(self, message: str) -> None:
|
|
77
|
+
super().__init__(message)
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
class PerplexitySearchResult(BaseModel):
|
|
81
|
+
snippet: str
|
|
82
|
+
title: str
|
|
83
|
+
url: str
|
|
84
|
+
date: str | None = None
|
|
85
|
+
last_updated: str | None = None
|
|
86
|
+
|
|
87
|
+
model_config = ConfigDict(populate_by_name=True)
|
|
88
|
+
|
|
89
|
+
@classmethod
|
|
90
|
+
def from_perplexity_sdk(cls, result: search_create_response.Result) -> "PerplexitySearchResult":
|
|
91
|
+
"""Create a PerplexitySearchResult from perplexity sdk response data."""
|
|
92
|
+
return cls(**result.model_dump())
|
|
93
|
+
|
|
94
|
+
def as_flat_dict(self) -> dict[str, Any]:
|
|
95
|
+
"""Return a flat dictionary representation of the search result."""
|
|
96
|
+
return self.model_dump(by_alias=True)
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
class PerplexityClient:
|
|
100
|
+
"""Client for interacting with Perplexity API.
|
|
101
|
+
Its simple wrapper around perplexity python sdk.
|
|
102
|
+
"""
|
|
103
|
+
|
|
104
|
+
def __init__(self, access_token: str) -> None:
|
|
105
|
+
self._client = AsyncPerplexity(api_key=access_token)
|
|
106
|
+
|
|
107
|
+
async def search(
|
|
108
|
+
self,
|
|
109
|
+
query: str | list[str],
|
|
110
|
+
search_domain_filter: list[str] | None = None,
|
|
111
|
+
recency: Literal["hour", "day", "week", "month", "year"] | None = None,
|
|
112
|
+
max_results: int = MAX_RESULTS_DEFAULT,
|
|
113
|
+
max_tokens_per_page: int = MAX_TOKENS_PER_PAGE_DEFAULT,
|
|
114
|
+
) -> list[PerplexitySearchResult]:
|
|
115
|
+
"""
|
|
116
|
+
Search using Perplexity.
|
|
117
|
+
|
|
118
|
+
Args:
|
|
119
|
+
query: Query to filter results.
|
|
120
|
+
search_domain_filter: Up to 20 domains/URLs to allowlist or denylist.
|
|
121
|
+
recency: Filter results by time period.
|
|
122
|
+
max_results: Number of ranked results to return.
|
|
123
|
+
max_tokens_per_page: Context extraction cap per page.
|
|
124
|
+
|
|
125
|
+
Returns
|
|
126
|
+
-------
|
|
127
|
+
List of Perplexity search results.
|
|
128
|
+
"""
|
|
129
|
+
if not query:
|
|
130
|
+
raise PerplexityError("Error: query cannot be empty.")
|
|
131
|
+
if query and isinstance(query, str) and not query.strip():
|
|
132
|
+
raise PerplexityError("Error: query cannot be empty.")
|
|
133
|
+
if query and isinstance(query, list) and len(query) > MAX_QUERIES:
|
|
134
|
+
raise PerplexityError(f"Error: query list cannot be bigger than {MAX_QUERIES}.")
|
|
135
|
+
if query and isinstance(query, list) and not all(q.strip() for q in query):
|
|
136
|
+
raise PerplexityError("Error: query cannot contain empty str.")
|
|
137
|
+
if search_domain_filter and len(search_domain_filter) > MAX_SEARCH_DOMAIN_FILTER:
|
|
138
|
+
raise PerplexityError("Error: maximum number of search domain filters is 20.")
|
|
139
|
+
if max_results <= 0:
|
|
140
|
+
raise PerplexityError("Error: max_results must be greater than 0.")
|
|
141
|
+
if max_results > MAX_RESULTS:
|
|
142
|
+
raise PerplexityError("Error: max_results must be smaller than or equal to 20.")
|
|
143
|
+
if max_tokens_per_page <= 0:
|
|
144
|
+
raise PerplexityError("Error: max_tokens_per_page must be greater than 0.")
|
|
145
|
+
if max_tokens_per_page > MAX_TOKENS_PER_PAGE:
|
|
146
|
+
raise PerplexityError(
|
|
147
|
+
"Error: max_tokens_per_page must be smaller than or equal to 8192."
|
|
148
|
+
)
|
|
149
|
+
|
|
150
|
+
max_results = min(max_results, MAX_RESULTS)
|
|
151
|
+
max_tokens_per_page = min(max_tokens_per_page, MAX_TOKENS_PER_PAGE)
|
|
152
|
+
|
|
153
|
+
search_result = await self._client.search.create(
|
|
154
|
+
query=query,
|
|
155
|
+
search_domain_filter=search_domain_filter,
|
|
156
|
+
search_recency_filter=recency,
|
|
157
|
+
max_results=max_results,
|
|
158
|
+
max_tokens_per_page=max_tokens_per_page,
|
|
159
|
+
)
|
|
160
|
+
|
|
161
|
+
return [
|
|
162
|
+
PerplexitySearchResult.from_perplexity_sdk(result) for result in search_result.results
|
|
163
|
+
]
|
|
164
|
+
|
|
165
|
+
async def __aenter__(self) -> "PerplexityClient":
|
|
166
|
+
"""Async context manager entry."""
|
|
167
|
+
return self
|
|
168
|
+
|
|
169
|
+
async def __aexit__(
|
|
170
|
+
self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: Any
|
|
171
|
+
) -> None:
|
|
172
|
+
"""Async context manager exit."""
|
|
173
|
+
await self._client.close()
|
|
@@ -0,0 +1,199 @@
|
|
|
1
|
+
# Copyright 2025 DataRobot, Inc.
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
"""Tavily API Client and utilities for API key authentication."""
|
|
16
|
+
|
|
17
|
+
import logging
|
|
18
|
+
from typing import Any
|
|
19
|
+
from typing import Literal
|
|
20
|
+
|
|
21
|
+
from fastmcp.exceptions import ToolError
|
|
22
|
+
from fastmcp.server.dependencies import get_http_headers
|
|
23
|
+
from pydantic import BaseModel
|
|
24
|
+
from pydantic import ConfigDict
|
|
25
|
+
from tavily import AsyncTavilyClient
|
|
26
|
+
|
|
27
|
+
logger = logging.getLogger(__name__)
|
|
28
|
+
|
|
29
|
+
MAX_RESULTS: int = 20
|
|
30
|
+
MAX_CHUNKS_PER_SOURCE: int = 3
|
|
31
|
+
|
|
32
|
+
MAX_RESULTS_DEFAULT: int = 5
|
|
33
|
+
CHUNKS_PER_SOURCE_DEFAULT: int = 1
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
async def get_tavily_access_token() -> str:
|
|
37
|
+
"""
|
|
38
|
+
Get Tavily API key from HTTP headers.
|
|
39
|
+
|
|
40
|
+
Returns
|
|
41
|
+
-------
|
|
42
|
+
API key string
|
|
43
|
+
|
|
44
|
+
Raises
|
|
45
|
+
------
|
|
46
|
+
ToolError: If API key is not found in headers
|
|
47
|
+
"""
|
|
48
|
+
headers = get_http_headers()
|
|
49
|
+
|
|
50
|
+
api_key = headers.get("x-tavily-api-key")
|
|
51
|
+
if api_key:
|
|
52
|
+
return api_key
|
|
53
|
+
|
|
54
|
+
logger.warning("Tavily API key not found in headers")
|
|
55
|
+
raise ToolError(
|
|
56
|
+
"Tavily API key not found in headers. Please provide it via 'x-tavily-api-key' header."
|
|
57
|
+
)
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
class TavilySearchResult(BaseModel):
|
|
61
|
+
"""A single search result from Tavily API."""
|
|
62
|
+
|
|
63
|
+
title: str
|
|
64
|
+
url: str
|
|
65
|
+
content: str
|
|
66
|
+
score: float
|
|
67
|
+
|
|
68
|
+
model_config = ConfigDict(populate_by_name=True)
|
|
69
|
+
|
|
70
|
+
@classmethod
|
|
71
|
+
def from_tavily_sdk(cls, result: dict[str, Any]) -> "TavilySearchResult":
|
|
72
|
+
"""Create a TavilySearchResult from Tavily SDK response data."""
|
|
73
|
+
return cls(
|
|
74
|
+
title=result.get("title", ""),
|
|
75
|
+
url=result.get("url", ""),
|
|
76
|
+
content=result.get("content", ""),
|
|
77
|
+
score=result.get("score", 0.0),
|
|
78
|
+
)
|
|
79
|
+
|
|
80
|
+
def as_flat_dict(self) -> dict[str, Any]:
|
|
81
|
+
"""Return a flat dictionary representation of the search result."""
|
|
82
|
+
return self.model_dump(by_alias=True)
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
class TavilyImage(BaseModel):
|
|
86
|
+
"""An image result from Tavily API."""
|
|
87
|
+
|
|
88
|
+
url: str
|
|
89
|
+
description: str | None = None
|
|
90
|
+
|
|
91
|
+
model_config = ConfigDict(populate_by_name=True)
|
|
92
|
+
|
|
93
|
+
@classmethod
|
|
94
|
+
def from_tavily_sdk(cls, image: dict[str, Any] | str) -> "TavilyImage":
|
|
95
|
+
"""Create a TavilyImage from Tavily SDK response data."""
|
|
96
|
+
if isinstance(image, str):
|
|
97
|
+
return cls(url=image)
|
|
98
|
+
return cls(
|
|
99
|
+
url=image.get("url", ""),
|
|
100
|
+
description=image.get("description"),
|
|
101
|
+
)
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
class TavilyClient:
|
|
105
|
+
"""Client for interacting with Tavily Search API.
|
|
106
|
+
|
|
107
|
+
This is a wrapper around the official tavily-python SDK.
|
|
108
|
+
"""
|
|
109
|
+
|
|
110
|
+
def __init__(self, api_key: str) -> None:
|
|
111
|
+
self._client = AsyncTavilyClient(api_key=api_key)
|
|
112
|
+
|
|
113
|
+
async def search(
|
|
114
|
+
self,
|
|
115
|
+
query: str,
|
|
116
|
+
*,
|
|
117
|
+
topic: Literal["general", "news", "finance"] = "general",
|
|
118
|
+
search_depth: Literal["basic", "advanced"] = "basic",
|
|
119
|
+
max_results: int = MAX_RESULTS_DEFAULT,
|
|
120
|
+
time_range: Literal["day", "week", "month", "year"] | None = None,
|
|
121
|
+
include_images: bool = False,
|
|
122
|
+
include_image_descriptions: bool = False,
|
|
123
|
+
chunks_per_source: int = CHUNKS_PER_SOURCE_DEFAULT,
|
|
124
|
+
include_answer: bool = False,
|
|
125
|
+
) -> dict[str, Any]:
|
|
126
|
+
"""
|
|
127
|
+
Perform a web search using Tavily API.
|
|
128
|
+
|
|
129
|
+
Args:
|
|
130
|
+
query: The search query to execute.
|
|
131
|
+
topic: The category of search ("general", "news", or "finance").
|
|
132
|
+
search_depth: The depth of search ("basic" or "advanced").
|
|
133
|
+
max_results: Maximum number of results to return (1-20).
|
|
134
|
+
time_range: Time range filter ("day", "week", "month", "year").
|
|
135
|
+
include_images: Whether to include images in results.
|
|
136
|
+
include_image_descriptions: Whether to include image descriptions.
|
|
137
|
+
chunks_per_source: Maximum content snippets per URL (1-3).
|
|
138
|
+
include_answer: Whether to include an AI-generated answer.
|
|
139
|
+
|
|
140
|
+
Returns
|
|
141
|
+
-------
|
|
142
|
+
Dict with search results from Tavily API.
|
|
143
|
+
|
|
144
|
+
Raises
|
|
145
|
+
------
|
|
146
|
+
ValueError: If validation fails.
|
|
147
|
+
TavilyInvalidAPIKeyError: If the API key is invalid.
|
|
148
|
+
TavilyUsageLimitExceededError: If usage limit is exceeded.
|
|
149
|
+
TavilyForbiddenError: If access is forbidden.
|
|
150
|
+
TavilyBadRequestError: If the request is malformed.
|
|
151
|
+
"""
|
|
152
|
+
# Validate inputs
|
|
153
|
+
if not query:
|
|
154
|
+
raise ValueError("query cannot be empty.")
|
|
155
|
+
if isinstance(query, str) and not query.strip():
|
|
156
|
+
raise ValueError("query cannot be empty.")
|
|
157
|
+
if max_results <= 0:
|
|
158
|
+
raise ValueError("max_results must be greater than 0.")
|
|
159
|
+
if max_results > MAX_RESULTS:
|
|
160
|
+
raise ValueError(f"max_results must be smaller than or equal to {MAX_RESULTS}.")
|
|
161
|
+
if chunks_per_source <= 0:
|
|
162
|
+
raise ValueError("chunks_per_source must be greater than 0.")
|
|
163
|
+
if chunks_per_source > MAX_CHUNKS_PER_SOURCE:
|
|
164
|
+
raise ValueError(
|
|
165
|
+
f"chunks_per_source must be smaller than or equal to {MAX_CHUNKS_PER_SOURCE}."
|
|
166
|
+
)
|
|
167
|
+
|
|
168
|
+
# Clamp values to valid ranges
|
|
169
|
+
max_results = min(max_results, MAX_RESULTS)
|
|
170
|
+
chunks_per_source = min(chunks_per_source, MAX_CHUNKS_PER_SOURCE)
|
|
171
|
+
|
|
172
|
+
# Build search parameters
|
|
173
|
+
search_kwargs: dict[str, Any] = {
|
|
174
|
+
"query": query,
|
|
175
|
+
"topic": topic,
|
|
176
|
+
"search_depth": search_depth,
|
|
177
|
+
"max_results": max_results,
|
|
178
|
+
"include_images": include_images,
|
|
179
|
+
"include_image_descriptions": include_image_descriptions,
|
|
180
|
+
"chunks_per_source": chunks_per_source,
|
|
181
|
+
"include_answer": include_answer,
|
|
182
|
+
}
|
|
183
|
+
|
|
184
|
+
if time_range:
|
|
185
|
+
search_kwargs["time_range"] = time_range
|
|
186
|
+
|
|
187
|
+
return await self._client.search(**search_kwargs)
|
|
188
|
+
|
|
189
|
+
async def __aenter__(self) -> "TavilyClient":
|
|
190
|
+
"""Async context manager entry."""
|
|
191
|
+
return self
|
|
192
|
+
|
|
193
|
+
async def __aexit__(
|
|
194
|
+
self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: Any
|
|
195
|
+
) -> None:
|
|
196
|
+
"""Async context manager exit."""
|
|
197
|
+
# AsyncTavilyClient doesn't have a close method, but we keep the context manager
|
|
198
|
+
# pattern for consistency with other clients
|
|
199
|
+
pass
|
|
@@ -67,7 +67,6 @@ async def confluence_get_page(
|
|
|
67
67
|
page_response = await client.get_page_by_title(page_id_or_title, space_key)
|
|
68
68
|
|
|
69
69
|
return ToolResult(
|
|
70
|
-
content=f"Successfully retrieved page '{page_response.title}'.",
|
|
71
70
|
structured_content=page_response.as_flat_dict(),
|
|
72
71
|
)
|
|
73
72
|
|
|
@@ -115,7 +114,6 @@ async def confluence_create_page(
|
|
|
115
114
|
)
|
|
116
115
|
|
|
117
116
|
return ToolResult(
|
|
118
|
-
content=f"New page '{title}' created successfully in space '{space_key}'.",
|
|
119
117
|
structured_content={"new_page_id": page_response.page_id, "title": page_response.title},
|
|
120
118
|
)
|
|
121
119
|
|
|
@@ -151,7 +149,6 @@ async def confluence_add_comment(
|
|
|
151
149
|
)
|
|
152
150
|
|
|
153
151
|
return ToolResult(
|
|
154
|
-
content=f"Comment added successfully to page ID {page_id}.",
|
|
155
152
|
structured_content={
|
|
156
153
|
"comment_id": comment_response.comment_id,
|
|
157
154
|
"page_id": page_id,
|
|
@@ -212,7 +209,6 @@ async def confluence_search(
|
|
|
212
209
|
|
|
213
210
|
n = len(results)
|
|
214
211
|
return ToolResult(
|
|
215
|
-
content=f"Successfully executed CQL query and retrieved {n} result(s).",
|
|
216
212
|
structured_content={"data": data, "count": n},
|
|
217
213
|
)
|
|
218
214
|
|
|
@@ -267,7 +263,6 @@ async def confluence_update_page(
|
|
|
267
263
|
)
|
|
268
264
|
|
|
269
265
|
return ToolResult(
|
|
270
|
-
content=f"Page ID {page_id} updated successfully to version {page_response.version}.",
|
|
271
266
|
structured_content={
|
|
272
267
|
"updated_page_id": page_response.page_id,
|
|
273
268
|
"new_version": page_response.version,
|
|
@@ -22,7 +22,6 @@ from fastmcp.exceptions import ToolError
|
|
|
22
22
|
from fastmcp.tools.tool import ToolResult
|
|
23
23
|
|
|
24
24
|
from datarobot_genai.drmcp.core.mcp_instance import dr_mcp_tool
|
|
25
|
-
from datarobot_genai.drmcp.tools.clients.gdrive import GOOGLE_DRIVE_FOLDER_MIME
|
|
26
25
|
from datarobot_genai.drmcp.tools.clients.gdrive import LIMIT
|
|
27
26
|
from datarobot_genai.drmcp.tools.clients.gdrive import MAX_PAGE_SIZE
|
|
28
27
|
from datarobot_genai.drmcp.tools.clients.gdrive import SUPPORTED_FIELDS
|
|
@@ -33,9 +32,7 @@ from datarobot_genai.drmcp.tools.clients.gdrive import get_gdrive_access_token
|
|
|
33
32
|
logger = logging.getLogger(__name__)
|
|
34
33
|
|
|
35
34
|
|
|
36
|
-
@dr_mcp_tool(
|
|
37
|
-
tags={"google", "gdrive", "list", "search", "files", "find", "contents"}, enabled=False
|
|
38
|
-
)
|
|
35
|
+
@dr_mcp_tool(tags={"google", "gdrive", "list", "search", "files", "find", "contents"})
|
|
39
36
|
async def gdrive_find_contents(
|
|
40
37
|
*,
|
|
41
38
|
page_size: Annotated[
|
|
@@ -66,13 +63,15 @@ async def gdrive_find_contents(
|
|
|
66
63
|
) -> ToolResult:
|
|
67
64
|
"""
|
|
68
65
|
Search or list files in the user's Google Drive with pagination and filtering support.
|
|
69
|
-
Use this tool to discover file names and IDs for use with other tools.
|
|
66
|
+
Use this tool to discover GDrive file names and IDs for use with other tools.
|
|
70
67
|
|
|
71
68
|
Limit must be bigger than or equal to page size and it must be multiplication of page size.
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
page size =
|
|
69
|
+
|
|
70
|
+
Examples
|
|
71
|
+
--------
|
|
72
|
+
- page size = 10 limit = 50
|
|
73
|
+
- page size = 3 limit = 3
|
|
74
|
+
- page size = 12 limit = 36
|
|
76
75
|
"""
|
|
77
76
|
access_token = await get_gdrive_access_token()
|
|
78
77
|
if isinstance(access_token, ToolError):
|
|
@@ -90,13 +89,8 @@ async def gdrive_find_contents(
|
|
|
90
89
|
|
|
91
90
|
filtered_fields = set(fields).intersection(SUPPORTED_FIELDS) if fields else SUPPORTED_FIELDS
|
|
92
91
|
number_of_files = len(data.files)
|
|
93
|
-
|
|
94
|
-
f"Next page token needed to fetch more data: {data.next_page_token}"
|
|
95
|
-
if data.next_page_token
|
|
96
|
-
else "There're no more pages."
|
|
97
|
-
)
|
|
92
|
+
|
|
98
93
|
return ToolResult(
|
|
99
|
-
content=f"Successfully listed {number_of_files} files. {next_page_info}",
|
|
100
94
|
structured_content={
|
|
101
95
|
"files": [
|
|
102
96
|
file.model_dump(by_alias=True, include=filtered_fields) for file in data.files
|
|
@@ -119,8 +113,7 @@ async def gdrive_read_content(
|
|
|
119
113
|
] = None,
|
|
120
114
|
) -> ToolResult:
|
|
121
115
|
"""
|
|
122
|
-
Retrieve the content of a specific file by its ID.
|
|
123
|
-
automatically exported to LLM-readable formats (Push-Down).
|
|
116
|
+
Retrieve the content of a specific Google drive file by its ID.
|
|
124
117
|
|
|
125
118
|
Usage:
|
|
126
119
|
- Basic: gdrive_read_content(file_id="1ABC123def456")
|
|
@@ -150,15 +143,7 @@ async def gdrive_read_content(
|
|
|
150
143
|
async with GoogleDriveClient(access_token) as client:
|
|
151
144
|
file_content = await client.read_file_content(file_id, target_format)
|
|
152
145
|
|
|
153
|
-
export_info = ""
|
|
154
|
-
if file_content.was_exported:
|
|
155
|
-
export_info = f" (exported from {file_content.original_mime_type})"
|
|
156
|
-
|
|
157
146
|
return ToolResult(
|
|
158
|
-
content=(
|
|
159
|
-
f"Successfully retrieved content of '{file_content.name}' "
|
|
160
|
-
f"({file_content.mime_type}){export_info}."
|
|
161
|
-
),
|
|
162
147
|
structured_content=file_content.as_flat_dict(),
|
|
163
148
|
)
|
|
164
149
|
|
|
@@ -230,13 +215,7 @@ async def gdrive_create_file(
|
|
|
230
215
|
initial_content=initial_content,
|
|
231
216
|
)
|
|
232
217
|
|
|
233
|
-
file_type = "folder" if mime_type == GOOGLE_DRIVE_FOLDER_MIME else "file"
|
|
234
|
-
content_info = ""
|
|
235
|
-
if initial_content and mime_type != GOOGLE_DRIVE_FOLDER_MIME:
|
|
236
|
-
content_info = " with initial content"
|
|
237
|
-
|
|
238
218
|
return ToolResult(
|
|
239
|
-
content=f"Successfully created {file_type} '{created_file.name}'{content_info}.",
|
|
240
219
|
structured_content=created_file.as_flat_dict(),
|
|
241
220
|
)
|
|
242
221
|
|
|
@@ -297,27 +276,12 @@ async def gdrive_update_metadata(
|
|
|
297
276
|
trashed=trash,
|
|
298
277
|
)
|
|
299
278
|
|
|
300
|
-
changes: list[str] = []
|
|
301
|
-
if new_name is not None:
|
|
302
|
-
changes.append(f"renamed to '{new_name}'")
|
|
303
|
-
if starred is True:
|
|
304
|
-
changes.append("starred")
|
|
305
|
-
elif starred is False:
|
|
306
|
-
changes.append("unstarred")
|
|
307
|
-
if trash is True:
|
|
308
|
-
changes.append("moved to trash")
|
|
309
|
-
elif trash is False:
|
|
310
|
-
changes.append("restored from trash")
|
|
311
|
-
|
|
312
|
-
changes_description = ", ".join(changes)
|
|
313
|
-
|
|
314
279
|
return ToolResult(
|
|
315
|
-
content=f"Successfully updated file '{updated_file.name}': {changes_description}.",
|
|
316
280
|
structured_content=updated_file.as_flat_dict(),
|
|
317
281
|
)
|
|
318
282
|
|
|
319
283
|
|
|
320
|
-
@dr_mcp_tool(tags={"google", "gdrive", "manage", "access", "acl"})
|
|
284
|
+
@dr_mcp_tool(tags={"google", "gdrive", "manage", "access", "acl"}, enabled=False)
|
|
321
285
|
async def gdrive_manage_access(
|
|
322
286
|
*,
|
|
323
287
|
file_id: Annotated[str, "The ID of the file or folder."],
|
|
@@ -388,17 +352,6 @@ async def gdrive_manage_access(
|
|
|
388
352
|
# Build response
|
|
389
353
|
structured_content = {"affectedFileId": file_id}
|
|
390
354
|
if action == "add":
|
|
391
|
-
content = (
|
|
392
|
-
f"Successfully added role '{role}' for '{email_address}' for gdrive file '{file_id}'. "
|
|
393
|
-
f"New permission id '{permission_id}'."
|
|
394
|
-
)
|
|
395
355
|
structured_content["newPermissionId"] = permission_id
|
|
396
|
-
elif action == "update":
|
|
397
|
-
content = (
|
|
398
|
-
f"Successfully updated role '{role}' (permission '{permission_id}') "
|
|
399
|
-
f"for gdrive file '{file_id}'."
|
|
400
|
-
)
|
|
401
|
-
else: # action == "remove":
|
|
402
|
-
content = f"Successfully removed permission '{permission_id}' for gdrive file '{file_id}'."
|
|
403
356
|
|
|
404
|
-
return ToolResult(
|
|
357
|
+
return ToolResult(structured_content=structured_content)
|
|
@@ -53,10 +53,11 @@ async def jira_search_issues(
|
|
|
53
53
|
async with JiraClient(access_token) as client:
|
|
54
54
|
issues = await client.search_jira_issues(jql_query=jql_query, max_results=max_results)
|
|
55
55
|
|
|
56
|
-
n = len(issues)
|
|
57
56
|
return ToolResult(
|
|
58
|
-
|
|
59
|
-
|
|
57
|
+
structured_content={
|
|
58
|
+
"data": [issue.as_flat_dict() for issue in issues],
|
|
59
|
+
"count": len(issues),
|
|
60
|
+
},
|
|
60
61
|
)
|
|
61
62
|
|
|
62
63
|
|
|
@@ -76,7 +77,6 @@ async def jira_get_issue(
|
|
|
76
77
|
issue = await client.get_jira_issue(issue_key)
|
|
77
78
|
|
|
78
79
|
return ToolResult(
|
|
79
|
-
content=f"Successfully retrieved details for issue '{issue_key}'.",
|
|
80
80
|
structured_content=issue.as_flat_dict(),
|
|
81
81
|
)
|
|
82
82
|
|
|
@@ -121,7 +121,6 @@ async def jira_create_issue(
|
|
|
121
121
|
)
|
|
122
122
|
|
|
123
123
|
return ToolResult(
|
|
124
|
-
content=f"Successfully created issue '{issue_key}'.",
|
|
125
124
|
structured_content={"newIssueKey": issue_key, "projectKey": project_key},
|
|
126
125
|
)
|
|
127
126
|
|
|
@@ -174,9 +173,7 @@ async def jira_update_issue(
|
|
|
174
173
|
issue_key=issue_key, fields=fields_to_update
|
|
175
174
|
)
|
|
176
175
|
|
|
177
|
-
updated_fields_str = ",".join(updated_fields)
|
|
178
176
|
return ToolResult(
|
|
179
|
-
content=f"Successfully updated issue '{issue_key}'. Fields modified: {updated_fields_str}.",
|
|
180
177
|
structured_content={"updatedIssueKey": issue_key, "fields": updated_fields},
|
|
181
178
|
)
|
|
182
179
|
|
|
@@ -216,7 +213,6 @@ async def jira_transition_issue(
|
|
|
216
213
|
await client.transition_jira_issue(issue_key=issue_key, transition_id=transition_id)
|
|
217
214
|
|
|
218
215
|
return ToolResult(
|
|
219
|
-
content=f"Successfully transitioned issue '{issue_key}' to status '{transition_name}'.",
|
|
220
216
|
structured_content={
|
|
221
217
|
"transitionedIssueKey": issue_key,
|
|
222
218
|
"newStatusName": transition_name,
|