datarobot-genai 0.2.13__py3-none-any.whl → 0.2.15__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- datarobot_genai/drmcp/tools/clients/confluence.py +113 -4
- datarobot_genai/drmcp/tools/clients/jira.py +75 -2
- datarobot_genai/drmcp/tools/confluence/tools.py +61 -2
- datarobot_genai/drmcp/tools/jira/tools.py +108 -0
- datarobot_genai/nat/agent.py +20 -7
- datarobot_genai/nat/helpers.py +87 -0
- {datarobot_genai-0.2.13.dist-info → datarobot_genai-0.2.15.dist-info}/METADATA +1 -1
- {datarobot_genai-0.2.13.dist-info → datarobot_genai-0.2.15.dist-info}/RECORD +12 -11
- {datarobot_genai-0.2.13.dist-info → datarobot_genai-0.2.15.dist-info}/WHEEL +0 -0
- {datarobot_genai-0.2.13.dist-info → datarobot_genai-0.2.15.dist-info}/entry_points.txt +0 -0
- {datarobot_genai-0.2.13.dist-info → datarobot_genai-0.2.15.dist-info}/licenses/AUTHORS +0 -0
- {datarobot_genai-0.2.13.dist-info → datarobot_genai-0.2.15.dist-info}/licenses/LICENSE +0 -0
|
@@ -31,6 +31,14 @@ from .atlassian import get_atlassian_cloud_id
|
|
|
31
31
|
logger = logging.getLogger(__name__)
|
|
32
32
|
|
|
33
33
|
|
|
34
|
+
class ConfluenceError(Exception):
|
|
35
|
+
"""Exception for Confluence API errors."""
|
|
36
|
+
|
|
37
|
+
def __init__(self, message: str, status_code: int | None = None) -> None:
|
|
38
|
+
super().__init__(message)
|
|
39
|
+
self.status_code = status_code
|
|
40
|
+
|
|
41
|
+
|
|
34
42
|
class ConfluencePage(BaseModel):
|
|
35
43
|
"""Pydantic model for Confluence page."""
|
|
36
44
|
|
|
@@ -133,7 +141,7 @@ class ConfluenceClient:
|
|
|
133
141
|
|
|
134
142
|
Raises
|
|
135
143
|
------
|
|
136
|
-
|
|
144
|
+
ConfluenceError: If page is not found
|
|
137
145
|
httpx.HTTPStatusError: If the API request fails
|
|
138
146
|
"""
|
|
139
147
|
cloud_id = await self._get_cloud_id()
|
|
@@ -142,7 +150,7 @@ class ConfluenceClient:
|
|
|
142
150
|
response = await self._client.get(url, params={"expand": self.EXPAND_FIELDS})
|
|
143
151
|
|
|
144
152
|
if response.status_code == HTTPStatus.NOT_FOUND:
|
|
145
|
-
raise
|
|
153
|
+
raise ConfluenceError(f"Page with ID '{page_id}' not found", status_code=404)
|
|
146
154
|
|
|
147
155
|
response.raise_for_status()
|
|
148
156
|
return self._parse_response(response.json())
|
|
@@ -161,7 +169,7 @@ class ConfluenceClient:
|
|
|
161
169
|
|
|
162
170
|
Raises
|
|
163
171
|
------
|
|
164
|
-
|
|
172
|
+
ConfluenceError: If the page is not found
|
|
165
173
|
httpx.HTTPStatusError: If the API request fails
|
|
166
174
|
"""
|
|
167
175
|
cloud_id = await self._get_cloud_id()
|
|
@@ -181,10 +189,111 @@ class ConfluenceClient:
|
|
|
181
189
|
results = data.get("results", [])
|
|
182
190
|
|
|
183
191
|
if not results:
|
|
184
|
-
raise
|
|
192
|
+
raise ConfluenceError(
|
|
193
|
+
f"Page with title '{title}' not found in space '{space_key}'", status_code=404
|
|
194
|
+
)
|
|
185
195
|
|
|
186
196
|
return self._parse_response(results[0])
|
|
187
197
|
|
|
198
|
+
def _extract_error_message(self, response: httpx.Response) -> str:
|
|
199
|
+
"""Extract error message from Confluence API error response."""
|
|
200
|
+
try:
|
|
201
|
+
error_data = response.json()
|
|
202
|
+
# Confluence API returns errors in different formats
|
|
203
|
+
if "message" in error_data:
|
|
204
|
+
return error_data["message"]
|
|
205
|
+
if "errorMessages" in error_data and error_data["errorMessages"]:
|
|
206
|
+
return "; ".join(error_data["errorMessages"])
|
|
207
|
+
if "errors" in error_data:
|
|
208
|
+
errors = error_data["errors"]
|
|
209
|
+
if isinstance(errors, list):
|
|
210
|
+
return "; ".join(str(e) for e in errors)
|
|
211
|
+
if isinstance(errors, dict):
|
|
212
|
+
return "; ".join(f"{k}: {v}" for k, v in errors.items())
|
|
213
|
+
except Exception:
|
|
214
|
+
pass
|
|
215
|
+
return response.text or "Unknown error"
|
|
216
|
+
|
|
217
|
+
async def create_page(
|
|
218
|
+
self,
|
|
219
|
+
space_key: str,
|
|
220
|
+
title: str,
|
|
221
|
+
body_content: str,
|
|
222
|
+
parent_id: int | None = None,
|
|
223
|
+
) -> ConfluencePage:
|
|
224
|
+
"""
|
|
225
|
+
Create a new Confluence page in a specified space.
|
|
226
|
+
|
|
227
|
+
Args:
|
|
228
|
+
space_key: The key of the Confluence space where the page should live
|
|
229
|
+
title: The title of the new page
|
|
230
|
+
body_content: The content in Confluence Storage Format (XML) or raw text
|
|
231
|
+
parent_id: Optional ID of the parent page for creating a child page
|
|
232
|
+
|
|
233
|
+
Returns
|
|
234
|
+
-------
|
|
235
|
+
ConfluencePage with the created page data
|
|
236
|
+
|
|
237
|
+
Raises
|
|
238
|
+
------
|
|
239
|
+
ConfluenceError: If space not found, parent page not found, duplicate title,
|
|
240
|
+
permission denied, or invalid content
|
|
241
|
+
httpx.HTTPStatusError: If the API request fails with unexpected status
|
|
242
|
+
"""
|
|
243
|
+
cloud_id = await self._get_cloud_id()
|
|
244
|
+
url = f"{ATLASSIAN_API_BASE}/ex/confluence/{cloud_id}/wiki/rest/api/content"
|
|
245
|
+
|
|
246
|
+
payload: dict[str, Any] = {
|
|
247
|
+
"type": "page",
|
|
248
|
+
"title": title,
|
|
249
|
+
"space": {"key": space_key},
|
|
250
|
+
"body": {
|
|
251
|
+
"storage": {
|
|
252
|
+
"value": body_content,
|
|
253
|
+
"representation": "storage",
|
|
254
|
+
}
|
|
255
|
+
},
|
|
256
|
+
}
|
|
257
|
+
|
|
258
|
+
if parent_id is not None:
|
|
259
|
+
payload["ancestors"] = [{"id": parent_id}]
|
|
260
|
+
|
|
261
|
+
response = await self._client.post(url, json=payload)
|
|
262
|
+
|
|
263
|
+
if response.status_code == HTTPStatus.NOT_FOUND:
|
|
264
|
+
error_msg = self._extract_error_message(response)
|
|
265
|
+
if parent_id is not None and "ancestor" in error_msg.lower():
|
|
266
|
+
raise ConfluenceError(
|
|
267
|
+
f"Parent page with ID '{parent_id}' not found", status_code=404
|
|
268
|
+
)
|
|
269
|
+
raise ConfluenceError(
|
|
270
|
+
f"Space '{space_key}' not found or resource unavailable: {error_msg}",
|
|
271
|
+
status_code=404,
|
|
272
|
+
)
|
|
273
|
+
|
|
274
|
+
if response.status_code == HTTPStatus.CONFLICT:
|
|
275
|
+
raise ConfluenceError(
|
|
276
|
+
f"A page with title '{title}' already exists in space '{space_key}'",
|
|
277
|
+
status_code=409,
|
|
278
|
+
)
|
|
279
|
+
|
|
280
|
+
if response.status_code == HTTPStatus.FORBIDDEN:
|
|
281
|
+
raise ConfluenceError(
|
|
282
|
+
f"Permission denied: you don't have access to create pages in space '{space_key}'",
|
|
283
|
+
status_code=403,
|
|
284
|
+
)
|
|
285
|
+
|
|
286
|
+
if response.status_code == HTTPStatus.BAD_REQUEST:
|
|
287
|
+
error_msg = self._extract_error_message(response)
|
|
288
|
+
raise ConfluenceError(f"Invalid request: {error_msg}", status_code=400)
|
|
289
|
+
|
|
290
|
+
if response.status_code == HTTPStatus.TOO_MANY_REQUESTS:
|
|
291
|
+
raise ConfluenceError("Rate limit exceeded. Please try again later.", status_code=429)
|
|
292
|
+
|
|
293
|
+
response.raise_for_status()
|
|
294
|
+
|
|
295
|
+
return self._parse_response(response.json())
|
|
296
|
+
|
|
188
297
|
async def __aenter__(self) -> "ConfluenceClient":
|
|
189
298
|
"""Async context manager entry."""
|
|
190
299
|
return self
|
|
@@ -107,10 +107,10 @@ class JiraClient:
|
|
|
107
107
|
self._cloud_id = await get_atlassian_cloud_id(self._client, service_type="jira")
|
|
108
108
|
return self._cloud_id
|
|
109
109
|
|
|
110
|
-
async def _get_full_url(self,
|
|
110
|
+
async def _get_full_url(self, path: str) -> str:
|
|
111
111
|
"""Return URL for Jira API."""
|
|
112
112
|
cloud_id = await self._get_cloud_id()
|
|
113
|
-
return f"{ATLASSIAN_API_BASE}/ex/jira/{cloud_id}/rest/api/3/{
|
|
113
|
+
return f"{ATLASSIAN_API_BASE}/ex/jira/{cloud_id}/rest/api/3/{path}"
|
|
114
114
|
|
|
115
115
|
async def get_jira_issue(self, issue_key: str) -> Issue:
|
|
116
116
|
"""
|
|
@@ -209,6 +209,79 @@ class JiraClient:
|
|
|
209
209
|
jsoned = response.json()
|
|
210
210
|
return jsoned["key"]
|
|
211
211
|
|
|
212
|
+
async def update_jira_issue(self, issue_key: str, fields: dict[str, Any]) -> list[str]:
|
|
213
|
+
"""
|
|
214
|
+
Update Jira issue.
|
|
215
|
+
|
|
216
|
+
Args:
|
|
217
|
+
issue_key: The key of the Jira issue, e.g., 'PROJ-123'
|
|
218
|
+
fields: A dictionary of field names and their new values
|
|
219
|
+
e.g., {'description': 'New content'}
|
|
220
|
+
|
|
221
|
+
Returns
|
|
222
|
+
-------
|
|
223
|
+
List of updated fields
|
|
224
|
+
|
|
225
|
+
Raises
|
|
226
|
+
------
|
|
227
|
+
httpx.HTTPStatusError: If the API request fails
|
|
228
|
+
"""
|
|
229
|
+
url = await self._get_full_url(f"issue/{issue_key}")
|
|
230
|
+
payload = {"fields": fields}
|
|
231
|
+
|
|
232
|
+
response = await self._client.put(url, json=payload)
|
|
233
|
+
|
|
234
|
+
response.raise_for_status()
|
|
235
|
+
return list(fields.keys())
|
|
236
|
+
|
|
237
|
+
async def get_available_jira_transitions(self, issue_key: str) -> dict[str, str]:
|
|
238
|
+
"""
|
|
239
|
+
Get Available Jira Transitions.
|
|
240
|
+
|
|
241
|
+
Args:
|
|
242
|
+
issue_key: The key of the Jira issue, e.g., 'PROJ-123'
|
|
243
|
+
|
|
244
|
+
Returns
|
|
245
|
+
-------
|
|
246
|
+
Dictionary where key is the transition name and value is the transition ID
|
|
247
|
+
|
|
248
|
+
Raises
|
|
249
|
+
------
|
|
250
|
+
httpx.HTTPStatusError: If the API request fails
|
|
251
|
+
"""
|
|
252
|
+
url = await self._get_full_url(f"issue/{issue_key}/transitions")
|
|
253
|
+
response = await self._client.get(url)
|
|
254
|
+
response.raise_for_status()
|
|
255
|
+
jsoned = response.json()
|
|
256
|
+
transitions = {
|
|
257
|
+
transition["name"]: transition["id"] for transition in jsoned.get("transitions", [])
|
|
258
|
+
}
|
|
259
|
+
return transitions
|
|
260
|
+
|
|
261
|
+
async def transition_jira_issue(self, issue_key: str, transition_id: str) -> None:
|
|
262
|
+
"""
|
|
263
|
+
Transition Jira issue.
|
|
264
|
+
|
|
265
|
+
Args:
|
|
266
|
+
issue_key: The key of the Jira issue, e.g., 'PROJ-123'
|
|
267
|
+
transition_id: Id of target transitionm e.g. '123'.
|
|
268
|
+
Can be obtained from `get_available_jira_transitions`.
|
|
269
|
+
|
|
270
|
+
Returns
|
|
271
|
+
-------
|
|
272
|
+
Nothing
|
|
273
|
+
|
|
274
|
+
Raises
|
|
275
|
+
------
|
|
276
|
+
httpx.HTTPStatusError: If the API request fails
|
|
277
|
+
"""
|
|
278
|
+
url = await self._get_full_url(f"issue/{issue_key}")
|
|
279
|
+
payload = {"transition": {"id": transition_id}}
|
|
280
|
+
|
|
281
|
+
response = await self._client.post(url, json=payload)
|
|
282
|
+
|
|
283
|
+
response.raise_for_status()
|
|
284
|
+
|
|
212
285
|
async def __aenter__(self) -> "JiraClient":
|
|
213
286
|
"""Async context manager entry."""
|
|
214
287
|
return self
|
|
@@ -23,6 +23,7 @@ from fastmcp.tools.tool import ToolResult
|
|
|
23
23
|
from datarobot_genai.drmcp.core.mcp_instance import dr_mcp_tool
|
|
24
24
|
from datarobot_genai.drmcp.tools.clients.atlassian import get_atlassian_access_token
|
|
25
25
|
from datarobot_genai.drmcp.tools.clients.confluence import ConfluenceClient
|
|
26
|
+
from datarobot_genai.drmcp.tools.clients.confluence import ConfluenceError
|
|
26
27
|
|
|
27
28
|
logger = logging.getLogger(__name__)
|
|
28
29
|
|
|
@@ -65,8 +66,8 @@ async def confluence_get_page(
|
|
|
65
66
|
"'space_key' is required when identifying a page by title."
|
|
66
67
|
)
|
|
67
68
|
page_response = await client.get_page_by_title(page_id_or_title, space_key)
|
|
68
|
-
except
|
|
69
|
-
logger.error(f"
|
|
69
|
+
except ConfluenceError as e:
|
|
70
|
+
logger.error(f"Confluence error getting page: {e}")
|
|
70
71
|
raise ToolError(str(e))
|
|
71
72
|
except Exception as e:
|
|
72
73
|
logger.error(f"Unexpected error getting Confluence page: {e}")
|
|
@@ -79,3 +80,61 @@ async def confluence_get_page(
|
|
|
79
80
|
content=f"Successfully retrieved page '{page_response.title}'.",
|
|
80
81
|
structured_content=page_response.as_flat_dict(),
|
|
81
82
|
)
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
@dr_mcp_tool(tags={"confluence", "write", "create", "page"})
|
|
86
|
+
async def confluence_create_page(
|
|
87
|
+
*,
|
|
88
|
+
space_key: Annotated[str, "The key of the Confluence space where the new page should live."],
|
|
89
|
+
title: Annotated[str, "The title of the new page."],
|
|
90
|
+
body_content: Annotated[
|
|
91
|
+
str,
|
|
92
|
+
"The content of the page, typically in Confluence Storage Format (XML) or raw text.",
|
|
93
|
+
],
|
|
94
|
+
parent_id: Annotated[
|
|
95
|
+
int | None,
|
|
96
|
+
"The ID of the parent page, used to create a child page.",
|
|
97
|
+
] = None,
|
|
98
|
+
) -> ToolResult:
|
|
99
|
+
"""Create a new documentation page in a specified Confluence space.
|
|
100
|
+
|
|
101
|
+
Use this tool to create new Confluence pages with content in storage format.
|
|
102
|
+
The page will be created at the root level of the space unless a parent_id
|
|
103
|
+
is provided, in which case it will be created as a child page.
|
|
104
|
+
|
|
105
|
+
Usage:
|
|
106
|
+
- Root page: space_key="PROJ", title="New Page", body_content="<p>Content</p>"
|
|
107
|
+
- Child page: space_key="PROJ", title="Sub Page", body_content="<p>Content</p>",
|
|
108
|
+
parent_id=123456
|
|
109
|
+
"""
|
|
110
|
+
if not all([space_key, title, body_content]):
|
|
111
|
+
raise ToolError(
|
|
112
|
+
"Argument validation error: space_key, title, and body_content are required fields."
|
|
113
|
+
)
|
|
114
|
+
|
|
115
|
+
access_token = await get_atlassian_access_token()
|
|
116
|
+
if isinstance(access_token, ToolError):
|
|
117
|
+
raise access_token
|
|
118
|
+
|
|
119
|
+
try:
|
|
120
|
+
async with ConfluenceClient(access_token) as client:
|
|
121
|
+
page_response = await client.create_page(
|
|
122
|
+
space_key=space_key,
|
|
123
|
+
title=title,
|
|
124
|
+
body_content=body_content,
|
|
125
|
+
parent_id=parent_id,
|
|
126
|
+
)
|
|
127
|
+
except ConfluenceError as e:
|
|
128
|
+
logger.error(f"Confluence error creating page: {e}")
|
|
129
|
+
raise ToolError(str(e))
|
|
130
|
+
except Exception as e:
|
|
131
|
+
logger.error(f"Unexpected error creating Confluence page: {e}")
|
|
132
|
+
raise ToolError(
|
|
133
|
+
f"An unexpected error occurred while creating Confluence page "
|
|
134
|
+
f"'{title}' in space '{space_key}': {str(e)}"
|
|
135
|
+
)
|
|
136
|
+
|
|
137
|
+
return ToolResult(
|
|
138
|
+
content=f"New page '{title}' created successfully in space '{space_key}'.",
|
|
139
|
+
structured_content={"new_page_id": page_response.page_id, "title": page_response.title},
|
|
140
|
+
)
|
|
@@ -14,6 +14,7 @@
|
|
|
14
14
|
|
|
15
15
|
import logging
|
|
16
16
|
from typing import Annotated
|
|
17
|
+
from typing import Any
|
|
17
18
|
|
|
18
19
|
from fastmcp.exceptions import ToolError
|
|
19
20
|
from fastmcp.tools.tool import ToolResult
|
|
@@ -99,3 +100,110 @@ async def jira_create_issue(
|
|
|
99
100
|
content=f"Successfully created issue '{issue_key}'.",
|
|
100
101
|
structured_content={"newIssueKey": issue_key, "projectKey": project_key},
|
|
101
102
|
)
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
@dr_mcp_tool(tags={"jira", "update", "edit", "issue"})
|
|
106
|
+
async def jira_update_issue(
|
|
107
|
+
*,
|
|
108
|
+
issue_key: Annotated[str, "The key (ID) of the Jira issue to retrieve, e.g., 'PROJ-123'."],
|
|
109
|
+
fields_to_update: Annotated[
|
|
110
|
+
dict[str, Any],
|
|
111
|
+
"A dictionary of field names and their new values (e.g., {'summary': 'New content'}).",
|
|
112
|
+
],
|
|
113
|
+
) -> ToolResult:
|
|
114
|
+
"""
|
|
115
|
+
Modify descriptive fields or custom fields on an existing Jira issue using its key.
|
|
116
|
+
If you want to update issue status you should use `jira_transition_issue` tool instead.
|
|
117
|
+
|
|
118
|
+
Some fields needs very specific schema to allow update.
|
|
119
|
+
You should follow jira rest api guidance.
|
|
120
|
+
Good example is description field:
|
|
121
|
+
"description": {
|
|
122
|
+
"type": "text",
|
|
123
|
+
"version": 1,
|
|
124
|
+
"text": [
|
|
125
|
+
{
|
|
126
|
+
"type": "paragraph",
|
|
127
|
+
"content": [
|
|
128
|
+
{
|
|
129
|
+
"type": "text",
|
|
130
|
+
"text": "[HERE YOU PUT REAL DESCRIPTION]"
|
|
131
|
+
}
|
|
132
|
+
]
|
|
133
|
+
}
|
|
134
|
+
]
|
|
135
|
+
}
|
|
136
|
+
"""
|
|
137
|
+
if not issue_key:
|
|
138
|
+
raise ToolError("Argument validation error: 'issue_key' cannot be empty.")
|
|
139
|
+
if not fields_to_update or not isinstance(fields_to_update, dict):
|
|
140
|
+
raise ToolError(
|
|
141
|
+
"Argument validation error: 'fields_to_update' must be a non-empty dictionary."
|
|
142
|
+
)
|
|
143
|
+
|
|
144
|
+
access_token = await get_atlassian_access_token()
|
|
145
|
+
if isinstance(access_token, ToolError):
|
|
146
|
+
raise access_token
|
|
147
|
+
|
|
148
|
+
try:
|
|
149
|
+
async with JiraClient(access_token) as client:
|
|
150
|
+
updated_fields = await client.update_jira_issue(
|
|
151
|
+
issue_key=issue_key, fields=fields_to_update
|
|
152
|
+
)
|
|
153
|
+
except Exception as e:
|
|
154
|
+
logger.error(f"Unexpected error while updating Jira issue: {e}")
|
|
155
|
+
raise ToolError(f"An unexpected error occurred while updating Jira issue: {str(e)}")
|
|
156
|
+
|
|
157
|
+
updated_fields_str = ",".join(updated_fields)
|
|
158
|
+
return ToolResult(
|
|
159
|
+
content=f"Successfully updated issue '{issue_key}'. Fields modified: {updated_fields_str}.",
|
|
160
|
+
structured_content={"updatedIssueKey": issue_key, "fields": updated_fields},
|
|
161
|
+
)
|
|
162
|
+
|
|
163
|
+
|
|
164
|
+
@dr_mcp_tool(tags={"jira", "update", "transition", "issue"})
|
|
165
|
+
async def jira_transition_issue(
|
|
166
|
+
*,
|
|
167
|
+
issue_key: Annotated[str, "The key (ID) of the Jira issue to transition, e.g. 'PROJ-123'."],
|
|
168
|
+
transition_name: Annotated[
|
|
169
|
+
str, "The exact name of the target status/transition (e.g., 'In Progress')."
|
|
170
|
+
],
|
|
171
|
+
) -> ToolResult:
|
|
172
|
+
"""
|
|
173
|
+
Move a Jira issue through its defined workflow to a new status.
|
|
174
|
+
This leverages Jira's workflow engine directly.
|
|
175
|
+
"""
|
|
176
|
+
if not all([issue_key, transition_name]):
|
|
177
|
+
raise ToolError("Argument validation error: issue_key and transition name/ID are required.")
|
|
178
|
+
|
|
179
|
+
access_token = await get_atlassian_access_token()
|
|
180
|
+
if isinstance(access_token, ToolError):
|
|
181
|
+
raise access_token
|
|
182
|
+
|
|
183
|
+
async with JiraClient(access_token) as client:
|
|
184
|
+
available_transitions = await client.get_available_jira_transitions(issue_key=issue_key)
|
|
185
|
+
|
|
186
|
+
try:
|
|
187
|
+
transition_id = available_transitions[transition_name]
|
|
188
|
+
except KeyError:
|
|
189
|
+
available_transitions_str = ",".join(available_transitions)
|
|
190
|
+
raise ToolError(
|
|
191
|
+
f"Unexpected transition name `{transition_name}`. "
|
|
192
|
+
f"Possible values are {available_transitions_str}."
|
|
193
|
+
)
|
|
194
|
+
|
|
195
|
+
try:
|
|
196
|
+
async with JiraClient(access_token) as client:
|
|
197
|
+
await client.transition_jira_issue(issue_key=issue_key, transition_id=transition_id)
|
|
198
|
+
except Exception as e:
|
|
199
|
+
logger.error(f"Unexpected error while transitioning Jira issue: {e}")
|
|
200
|
+
raise ToolError(f"An unexpected error occurred while transitioning Jira issue: {str(e)}")
|
|
201
|
+
|
|
202
|
+
return ToolResult(
|
|
203
|
+
content=f"Successfully transitioned issue '{issue_key}' to status '{transition_name}'.",
|
|
204
|
+
structured_content={
|
|
205
|
+
"transitionedIssueKey": issue_key,
|
|
206
|
+
"newStatusName": transition_name,
|
|
207
|
+
"newStatusId": transition_id,
|
|
208
|
+
},
|
|
209
|
+
)
|
datarobot_genai/nat/agent.py
CHANGED
|
@@ -21,7 +21,6 @@ from nat.data_models.api_server import ChatRequest
|
|
|
21
21
|
from nat.data_models.api_server import ChatResponse
|
|
22
22
|
from nat.data_models.intermediate_step import IntermediateStep
|
|
23
23
|
from nat.data_models.intermediate_step import IntermediateStepType
|
|
24
|
-
from nat.runtime.loader import load_workflow
|
|
25
24
|
from nat.utils.type_utils import StrPath
|
|
26
25
|
from openai.types.chat import CompletionCreateParams
|
|
27
26
|
from ragas import MultiTurnSample
|
|
@@ -34,6 +33,8 @@ from datarobot_genai.core.agents.base import InvokeReturn
|
|
|
34
33
|
from datarobot_genai.core.agents.base import UsageMetrics
|
|
35
34
|
from datarobot_genai.core.agents.base import extract_user_prompt_content
|
|
36
35
|
from datarobot_genai.core.agents.base import is_streaming
|
|
36
|
+
from datarobot_genai.core.mcp.common import MCPConfig
|
|
37
|
+
from datarobot_genai.nat.helpers import load_workflow
|
|
37
38
|
|
|
38
39
|
logger = logging.getLogger(__name__)
|
|
39
40
|
|
|
@@ -166,17 +167,24 @@ class NatAgent(BaseAgent[None]):
|
|
|
166
167
|
# Print commands may need flush=True to ensure they are displayed in real-time.
|
|
167
168
|
print("Running agent with user prompt:", chat_request.messages[0].content, flush=True)
|
|
168
169
|
|
|
170
|
+
mcp_config = MCPConfig(
|
|
171
|
+
authorization_context=self.authorization_context,
|
|
172
|
+
forwarded_headers=self.forwarded_headers,
|
|
173
|
+
)
|
|
174
|
+
server_config = mcp_config.server_config
|
|
175
|
+
headers = server_config["headers"] if server_config else None
|
|
176
|
+
|
|
169
177
|
if is_streaming(completion_create_params):
|
|
170
178
|
|
|
171
179
|
async def stream_generator() -> AsyncGenerator[
|
|
172
180
|
tuple[str, MultiTurnSample | None, UsageMetrics], None
|
|
173
181
|
]:
|
|
174
|
-
|
|
182
|
+
default_usage_metrics: UsageMetrics = {
|
|
175
183
|
"completion_tokens": 0,
|
|
176
184
|
"prompt_tokens": 0,
|
|
177
185
|
"total_tokens": 0,
|
|
178
186
|
}
|
|
179
|
-
async with load_workflow(self.workflow_path) as workflow:
|
|
187
|
+
async with load_workflow(self.workflow_path, headers=headers) as workflow:
|
|
180
188
|
async with workflow.run(chat_request) as runner:
|
|
181
189
|
intermediate_future = pull_intermediate_structured()
|
|
182
190
|
async for result in runner.result_stream():
|
|
@@ -188,7 +196,7 @@ class NatAgent(BaseAgent[None]):
|
|
|
188
196
|
yield (
|
|
189
197
|
result_text,
|
|
190
198
|
None,
|
|
191
|
-
|
|
199
|
+
default_usage_metrics,
|
|
192
200
|
)
|
|
193
201
|
|
|
194
202
|
steps = await intermediate_future
|
|
@@ -197,6 +205,11 @@ class NatAgent(BaseAgent[None]):
|
|
|
197
205
|
for step in steps
|
|
198
206
|
if step.event_type == IntermediateStepType.LLM_END
|
|
199
207
|
]
|
|
208
|
+
usage_metrics: UsageMetrics = {
|
|
209
|
+
"completion_tokens": 0,
|
|
210
|
+
"prompt_tokens": 0,
|
|
211
|
+
"total_tokens": 0,
|
|
212
|
+
}
|
|
200
213
|
for step in llm_end_steps:
|
|
201
214
|
if step.usage_info:
|
|
202
215
|
token_usage = step.usage_info.token_usage
|
|
@@ -210,7 +223,7 @@ class NatAgent(BaseAgent[None]):
|
|
|
210
223
|
return stream_generator()
|
|
211
224
|
|
|
212
225
|
# Create and invoke the NAT (Nemo Agent Toolkit) Agentic Workflow with the inputs
|
|
213
|
-
result, steps = await self.run_nat_workflow(self.workflow_path, chat_request)
|
|
226
|
+
result, steps = await self.run_nat_workflow(self.workflow_path, chat_request, headers)
|
|
214
227
|
|
|
215
228
|
llm_end_steps = [step for step in steps if step.event_type == IntermediateStepType.LLM_END]
|
|
216
229
|
usage_metrics: UsageMetrics = {
|
|
@@ -234,7 +247,7 @@ class NatAgent(BaseAgent[None]):
|
|
|
234
247
|
return result_text, pipeline_interactions, usage_metrics
|
|
235
248
|
|
|
236
249
|
async def run_nat_workflow(
|
|
237
|
-
self, workflow_path: StrPath, chat_request: ChatRequest
|
|
250
|
+
self, workflow_path: StrPath, chat_request: ChatRequest, headers: dict[str, str] | None
|
|
238
251
|
) -> tuple[ChatResponse | str, list[IntermediateStep]]:
|
|
239
252
|
"""Run the NAT workflow with the provided config file and input string.
|
|
240
253
|
|
|
@@ -247,7 +260,7 @@ class NatAgent(BaseAgent[None]):
|
|
|
247
260
|
ChatResponse | str: The result from the NAT workflow
|
|
248
261
|
list[IntermediateStep]: The list of intermediate steps
|
|
249
262
|
"""
|
|
250
|
-
async with load_workflow(workflow_path) as workflow:
|
|
263
|
+
async with load_workflow(workflow_path, headers=headers) as workflow:
|
|
251
264
|
async with workflow.run(chat_request) as runner:
|
|
252
265
|
intermediate_future = pull_intermediate_structured()
|
|
253
266
|
runner_outputs = await runner.result()
|
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
# Copyright 2025 DataRobot, Inc. and its affiliates.
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
from collections.abc import AsyncGenerator
|
|
16
|
+
from contextlib import asynccontextmanager
|
|
17
|
+
|
|
18
|
+
from nat.builder.workflow import Workflow
|
|
19
|
+
from nat.builder.workflow_builder import WorkflowBuilder
|
|
20
|
+
from nat.data_models.config import Config
|
|
21
|
+
from nat.runtime.loader import PluginTypes
|
|
22
|
+
from nat.runtime.loader import discover_and_register_plugins
|
|
23
|
+
from nat.runtime.session import SessionManager
|
|
24
|
+
from nat.utils.data_models.schema_validator import validate_schema
|
|
25
|
+
from nat.utils.io.yaml_tools import yaml_load
|
|
26
|
+
from nat.utils.type_utils import StrPath
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def load_config(config_file: StrPath, headers: dict[str, str] | None = None) -> Config:
|
|
30
|
+
"""
|
|
31
|
+
Load a NAT configuration file with injected headers. It ensures that all plugins are
|
|
32
|
+
loaded and then validates the configuration file against the Config schema.
|
|
33
|
+
|
|
34
|
+
Parameters
|
|
35
|
+
----------
|
|
36
|
+
config_file : StrPath
|
|
37
|
+
The path to the configuration file
|
|
38
|
+
|
|
39
|
+
Returns
|
|
40
|
+
-------
|
|
41
|
+
Config
|
|
42
|
+
The validated Config object
|
|
43
|
+
"""
|
|
44
|
+
# Ensure all of the plugins are loaded
|
|
45
|
+
discover_and_register_plugins(PluginTypes.CONFIG_OBJECT)
|
|
46
|
+
|
|
47
|
+
config_yaml = yaml_load(config_file)
|
|
48
|
+
|
|
49
|
+
add_headers_to_datarobot_mcp_auth(config_yaml, headers)
|
|
50
|
+
|
|
51
|
+
# Validate configuration adheres to NAT schemas
|
|
52
|
+
validated_nat_config = validate_schema(config_yaml, Config)
|
|
53
|
+
|
|
54
|
+
return validated_nat_config
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def add_headers_to_datarobot_mcp_auth(config_yaml: dict, headers: dict[str, str] | None) -> None:
|
|
58
|
+
if headers:
|
|
59
|
+
if authentication := config_yaml.get("authentication"):
|
|
60
|
+
for auth_name in authentication:
|
|
61
|
+
auth_config = authentication[auth_name]
|
|
62
|
+
if auth_config.get("_type") == "datarobot_mcp_auth":
|
|
63
|
+
auth_config["headers"] = headers
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
@asynccontextmanager
|
|
67
|
+
async def load_workflow(
|
|
68
|
+
config_file: StrPath, max_concurrency: int = -1, headers: dict[str, str] | None = None
|
|
69
|
+
) -> AsyncGenerator[Workflow, None]:
|
|
70
|
+
"""
|
|
71
|
+
Load the NAT configuration file and create a Runner object. This is the primary entry point for
|
|
72
|
+
running NAT workflows with injected headers.
|
|
73
|
+
|
|
74
|
+
Parameters
|
|
75
|
+
----------
|
|
76
|
+
config_file : StrPath
|
|
77
|
+
The path to the configuration file
|
|
78
|
+
max_concurrency : int, optional
|
|
79
|
+
The maximum number of parallel workflow invocations to support. Specifying 0 or -1 will
|
|
80
|
+
allow an unlimited count, by default -1
|
|
81
|
+
"""
|
|
82
|
+
# Load the config object
|
|
83
|
+
config = load_config(config_file, headers=headers)
|
|
84
|
+
|
|
85
|
+
# Must yield the workflow function otherwise it cleans up
|
|
86
|
+
async with WorkflowBuilder.from_config(config=config) as workflow:
|
|
87
|
+
yield SessionManager(await workflow.build(), max_concurrency=max_concurrency)
|
|
@@ -76,13 +76,13 @@ datarobot_genai/drmcp/test_utils/utils.py,sha256=esGKFv8aO31-Qg3owayeWp32BYe1CdY
|
|
|
76
76
|
datarobot_genai/drmcp/tools/__init__.py,sha256=0kq9vMkF7EBsS6lkEdiLibmUrghTQqosHbZ5k-V9a5g,578
|
|
77
77
|
datarobot_genai/drmcp/tools/clients/__init__.py,sha256=0kq9vMkF7EBsS6lkEdiLibmUrghTQqosHbZ5k-V9a5g,578
|
|
78
78
|
datarobot_genai/drmcp/tools/clients/atlassian.py,sha256=__M_uz7FrcbKCYRzeMn24DCEYD6OmFx_LuywHCxgXsA,6472
|
|
79
|
-
datarobot_genai/drmcp/tools/clients/confluence.py,sha256=
|
|
80
|
-
datarobot_genai/drmcp/tools/clients/jira.py,sha256=
|
|
79
|
+
datarobot_genai/drmcp/tools/clients/confluence.py,sha256=DF6TIGJfR3Lh-D_x66cDNkvOTS8gxL6bVhHRtcP0LKw,10493
|
|
80
|
+
datarobot_genai/drmcp/tools/clients/jira.py,sha256=bL7dL3TSdxoE940iVzpNGbSA6ehpatFw-dmseV9HYgM,8751
|
|
81
81
|
datarobot_genai/drmcp/tools/clients/s3.py,sha256=GmwzvurFdNfvxOooA8g5S4osRysHYU0S9ypg_177Glg,953
|
|
82
82
|
datarobot_genai/drmcp/tools/confluence/__init__.py,sha256=0kq9vMkF7EBsS6lkEdiLibmUrghTQqosHbZ5k-V9a5g,578
|
|
83
|
-
datarobot_genai/drmcp/tools/confluence/tools.py,sha256=
|
|
83
|
+
datarobot_genai/drmcp/tools/confluence/tools.py,sha256=iqX7CR57WCXsQxHQCsfPL_Q78QjN9YZv3uIQbTMfYAg,5459
|
|
84
84
|
datarobot_genai/drmcp/tools/jira/__init__.py,sha256=0kq9vMkF7EBsS6lkEdiLibmUrghTQqosHbZ5k-V9a5g,578
|
|
85
|
-
datarobot_genai/drmcp/tools/jira/tools.py,sha256=
|
|
85
|
+
datarobot_genai/drmcp/tools/jira/tools.py,sha256=Q34JHOuTU7N2RKH4buUQP_E5cM-LIY2o-UQwj9RFMts,8200
|
|
86
86
|
datarobot_genai/drmcp/tools/predictive/__init__.py,sha256=WuOHlNNEpEmcF7gVnhckruJRKU2qtmJLE3E7zoCGLDo,1030
|
|
87
87
|
datarobot_genai/drmcp/tools/predictive/data.py,sha256=k4EJxJrl8DYVGVfJ0DM4YTfnZlC_K3OUHZ0eRUzfluI,3165
|
|
88
88
|
datarobot_genai/drmcp/tools/predictive/deployment.py,sha256=lm02Ayuo11L1hP41fgi3QpR1Eyty-Wc16rM0c8SgliM,3277
|
|
@@ -100,14 +100,15 @@ datarobot_genai/llama_index/agent.py,sha256=V6ZsD9GcBDJS-RJo1tJtIHhyW69_78gM6_fO
|
|
|
100
100
|
datarobot_genai/llama_index/base.py,sha256=ovcQQtC-djD_hcLrWdn93jg23AmD6NBEj7xtw4a6K6c,14481
|
|
101
101
|
datarobot_genai/llama_index/mcp.py,sha256=leXqF1C4zhuYEKFwNEfZHY4dsUuGZk3W7KArY-zxVL8,2645
|
|
102
102
|
datarobot_genai/nat/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
103
|
-
datarobot_genai/nat/agent.py,sha256=
|
|
103
|
+
datarobot_genai/nat/agent.py,sha256=DuGrgqt1FzvAE-cRH_P3LTFUlwuClvbVurdwA-RsbuY,11177
|
|
104
104
|
datarobot_genai/nat/datarobot_auth_provider.py,sha256=Z4NSsrHxK8hUeiqtK_lryHsUuZC74ziNo_FHbsZgtiM,4230
|
|
105
105
|
datarobot_genai/nat/datarobot_llm_clients.py,sha256=Yu208Ed_p_4P3HdpuM7fYnKcXtimORHpKlWVPyijpU8,11356
|
|
106
106
|
datarobot_genai/nat/datarobot_llm_providers.py,sha256=aDoQcTeGI-odqydPXEX9OGGNFbzAtpqzTvHHEkmJuEQ,4963
|
|
107
107
|
datarobot_genai/nat/datarobot_mcp_client.py,sha256=35FzilxNp4VqwBYI0NsOc91-xZm1C-AzWqrOdDy962A,9612
|
|
108
|
-
datarobot_genai
|
|
109
|
-
datarobot_genai-0.2.
|
|
110
|
-
datarobot_genai-0.2.
|
|
111
|
-
datarobot_genai-0.2.
|
|
112
|
-
datarobot_genai-0.2.
|
|
113
|
-
datarobot_genai-0.2.
|
|
108
|
+
datarobot_genai/nat/helpers.py,sha256=Q7E3ADZdtFfS8E6OQPyw2wgA6laQ58N3bhLj5CBWwJs,3265
|
|
109
|
+
datarobot_genai-0.2.15.dist-info/METADATA,sha256=gMptTChyeXtNjX4UhXtmoHfrsnTEG-vtG3dVwZYfW68,6301
|
|
110
|
+
datarobot_genai-0.2.15.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
111
|
+
datarobot_genai-0.2.15.dist-info/entry_points.txt,sha256=jEW3WxDZ8XIK9-ISmTyt5DbmBb047rFlzQuhY09rGrM,284
|
|
112
|
+
datarobot_genai-0.2.15.dist-info/licenses/AUTHORS,sha256=isJGUXdjq1U7XZ_B_9AH8Qf0u4eX0XyQifJZ_Sxm4sA,80
|
|
113
|
+
datarobot_genai-0.2.15.dist-info/licenses/LICENSE,sha256=U2_VkLIktQoa60Nf6Tbt7E4RMlfhFSjWjcJJfVC-YCE,11341
|
|
114
|
+
datarobot_genai-0.2.15.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|