datarobot-genai 0.2.37__py3-none-any.whl → 0.2.39__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -16,6 +16,7 @@
16
16
 
17
17
  import logging
18
18
  from typing import Any
19
+ from typing import Literal
19
20
  from urllib.parse import quote
20
21
 
21
22
  import httpx
@@ -541,6 +542,50 @@ class MicrosoftGraphClient:
541
542
 
542
543
  return MicrosoftGraphError(error_msg)
543
544
 
545
+ async def share_item(
546
+ self,
547
+ file_id: str,
548
+ document_library_id: str,
549
+ recipient_emails: list[str],
550
+ role: Literal["read", "write"],
551
+ send_invitation: bool,
552
+ ) -> None:
553
+ """
554
+ Share sharepoint / ondrive item using Microsoft Graph API.
555
+ Under the hood all resources in sharepoint/onedrive
556
+ in MS Graph API are treated as 'driveItem'.
557
+
558
+ Args:
559
+ file_id: The ID of the file or folder to share.
560
+ document_library_id: The ID of the document library containing the item.
561
+ recipient_emails: A list of email addresses to invite.
562
+ role: The role to assign.
563
+ send_invitation: Flag determining if recipients should be notified
564
+
565
+ Returns
566
+ -------
567
+ None
568
+
569
+ Raises
570
+ ------
571
+ MicrosoftGraphError: If sharing fails
572
+ """
573
+ graph_url = f"{GRAPH_API_BASE}/drives/{document_library_id}/items/{file_id}/invite"
574
+
575
+ payload = {
576
+ "recipients": [{"email": email} for email in recipient_emails],
577
+ "requireSignIn": True,
578
+ "sendInvitation": send_invitation,
579
+ "roles": [role],
580
+ }
581
+
582
+ response = await self._client.post(url=graph_url, json=payload)
583
+
584
+ if response.status_code not in (200, 201):
585
+ raise MicrosoftGraphError(
586
+ f"Microsoft Graph API error {response.status_code}: {response.text}"
587
+ )
588
+
544
589
  async def __aenter__(self) -> "MicrosoftGraphClient":
545
590
  """Async context manager entry."""
546
591
  return self
@@ -55,26 +55,16 @@ async def confluence_get_page(
55
55
  if isinstance(access_token, ToolError):
56
56
  raise access_token
57
57
 
58
- try:
59
- async with ConfluenceClient(access_token) as client:
60
- if page_id_or_title.isdigit():
61
- page_response = await client.get_page_by_id(page_id_or_title)
62
- else:
63
- if not space_key:
64
- raise ToolError(
65
- "Argument validation error: "
66
- "'space_key' is required when identifying a page by title."
67
- )
68
- page_response = await client.get_page_by_title(page_id_or_title, space_key)
69
- except ConfluenceError as e:
70
- logger.error(f"Confluence error getting page: {e}")
71
- raise ToolError(str(e))
72
- except Exception as e:
73
- logger.error(f"Unexpected error getting Confluence page: {e}")
74
- raise ToolError(
75
- f"An unexpected error occurred while getting Confluence page "
76
- f"'{page_id_or_title}': {str(e)}"
77
- )
58
+ async with ConfluenceClient(access_token) as client:
59
+ if page_id_or_title.isdigit():
60
+ page_response = await client.get_page_by_id(page_id_or_title)
61
+ else:
62
+ if not space_key:
63
+ raise ToolError(
64
+ "Argument validation error: "
65
+ "'space_key' is required when identifying a page by title."
66
+ )
67
+ page_response = await client.get_page_by_title(page_id_or_title, space_key)
78
68
 
79
69
  return ToolResult(
80
70
  content=f"Successfully retrieved page '{page_response.title}'.",
@@ -116,22 +106,12 @@ async def confluence_create_page(
116
106
  if isinstance(access_token, ToolError):
117
107
  raise access_token
118
108
 
119
- try:
120
- async with ConfluenceClient(access_token) as client:
121
- page_response = await client.create_page(
122
- space_key=space_key,
123
- title=title,
124
- body_content=body_content,
125
- parent_id=parent_id,
126
- )
127
- except ConfluenceError as e:
128
- logger.error(f"Confluence error creating page: {e}")
129
- raise ToolError(str(e))
130
- except Exception as e:
131
- logger.error(f"Unexpected error creating Confluence page: {e}")
132
- raise ToolError(
133
- f"An unexpected error occurred while creating Confluence page "
134
- f"'{title}' in space '{space_key}': {str(e)}"
109
+ async with ConfluenceClient(access_token) as client:
110
+ page_response = await client.create_page(
111
+ space_key=space_key,
112
+ title=title,
113
+ body_content=body_content,
114
+ parent_id=parent_id,
135
115
  )
136
116
 
137
117
  return ToolResult(
@@ -164,19 +144,10 @@ async def confluence_add_comment(
164
144
  if isinstance(access_token, ToolError):
165
145
  raise access_token
166
146
 
167
- try:
168
- async with ConfluenceClient(access_token) as client:
169
- comment_response = await client.add_comment(
170
- page_id=page_id,
171
- comment_body=comment_body,
172
- )
173
- except ConfluenceError as e:
174
- logger.error(f"Confluence error adding comment: {e}")
175
- raise ToolError(str(e))
176
- except Exception as e:
177
- logger.error(f"Unexpected error adding comment to Confluence page: {e}")
178
- raise ToolError(
179
- f"An unexpected error occurred while adding comment to page '{page_id}': {str(e)}"
147
+ async with ConfluenceClient(access_token) as client:
148
+ comment_response = await client.add_comment(
149
+ page_id=page_id,
150
+ comment_body=comment_body,
180
151
  )
181
152
 
182
153
  return ToolResult(
@@ -220,32 +191,24 @@ async def confluence_search(
220
191
  if isinstance(access_token, ToolError):
221
192
  raise access_token
222
193
 
223
- try:
224
- async with ConfluenceClient(access_token) as client:
225
- results = await client.search_confluence_content(
226
- cql_query=cql_query, max_results=max_results
227
- )
228
-
229
- # If include_body is True, fetch full content for each page
230
- if include_body and results:
231
- data = []
232
- for result in results:
233
- flat = result.as_flat_dict()
234
- try:
235
- page = await client.get_page_by_id(result.id)
236
- flat["body"] = page.body
237
- except ConfluenceError:
238
- flat["body"] = None # Keep excerpt if page fetch fails
239
- data.append(flat)
240
- else:
241
- data = [result.as_flat_dict() for result in results]
242
-
243
- except ConfluenceError as e:
244
- logger.error(f"Confluence error searching content: {e}")
245
- raise ToolError(str(e))
246
- except Exception as e:
247
- logger.error(f"Unexpected error searching Confluence content: {e}")
248
- raise ToolError(f"An unexpected error occurred while searching Confluence: {str(e)}")
194
+ async with ConfluenceClient(access_token) as client:
195
+ results = await client.search_confluence_content(
196
+ cql_query=cql_query, max_results=max_results
197
+ )
198
+
199
+ # If include_body is True, fetch full content for each page
200
+ if include_body and results:
201
+ data = []
202
+ for result in results:
203
+ flat = result.as_flat_dict()
204
+ try:
205
+ page = await client.get_page_by_id(result.id)
206
+ flat["body"] = page.body
207
+ except ConfluenceError:
208
+ flat["body"] = None # Keep excerpt if page fetch fails
209
+ data.append(flat)
210
+ else:
211
+ data = [result.as_flat_dict() for result in results]
249
212
 
250
213
  n = len(results)
251
214
  return ToolResult(
@@ -296,20 +259,11 @@ async def confluence_update_page(
296
259
  if isinstance(access_token, ToolError):
297
260
  raise access_token
298
261
 
299
- try:
300
- async with ConfluenceClient(access_token) as client:
301
- page_response = await client.update_page(
302
- page_id=page_id,
303
- new_body_content=new_body_content,
304
- version_number=version_number,
305
- )
306
- except ConfluenceError as e:
307
- logger.error(f"Confluence error updating page: {e}")
308
- raise ToolError(str(e))
309
- except Exception as e:
310
- logger.error(f"Unexpected error updating Confluence page: {e}")
311
- raise ToolError(
312
- f"An unexpected error occurred while updating Confluence page '{page_id}': {str(e)}"
262
+ async with ConfluenceClient(access_token) as client:
263
+ page_response = await client.update_page(
264
+ page_id=page_id,
265
+ new_body_content=new_body_content,
266
+ version_number=version_number,
313
267
  )
314
268
 
315
269
  return ToolResult(
@@ -28,7 +28,6 @@ from datarobot_genai.drmcp.tools.clients.gdrive import MAX_PAGE_SIZE
28
28
  from datarobot_genai.drmcp.tools.clients.gdrive import SUPPORTED_FIELDS
29
29
  from datarobot_genai.drmcp.tools.clients.gdrive import SUPPORTED_FIELDS_STR
30
30
  from datarobot_genai.drmcp.tools.clients.gdrive import GoogleDriveClient
31
- from datarobot_genai.drmcp.tools.clients.gdrive import GoogleDriveError
32
31
  from datarobot_genai.drmcp.tools.clients.gdrive import get_gdrive_access_token
33
32
 
34
33
  logger = logging.getLogger(__name__)
@@ -79,22 +78,15 @@ async def gdrive_find_contents(
79
78
  if isinstance(access_token, ToolError):
80
79
  raise access_token
81
80
 
82
- try:
83
- async with GoogleDriveClient(access_token) as client:
84
- data = await client.list_files(
85
- page_size=page_size,
86
- page_token=page_token,
87
- query=query,
88
- limit=limit,
89
- folder_id=folder_id,
90
- recursive=recursive,
91
- )
92
- except GoogleDriveError as e:
93
- logger.error(f"Google Drive error listing files: {e}")
94
- raise ToolError(str(e))
95
- except Exception as e:
96
- logger.error(f"Unexpected error listing Google Drive files: {e}")
97
- raise ToolError(f"An unexpected error occurred while listing Google Drive files: {str(e)}")
81
+ async with GoogleDriveClient(access_token) as client:
82
+ data = await client.list_files(
83
+ page_size=page_size,
84
+ page_token=page_token,
85
+ query=query,
86
+ limit=limit,
87
+ folder_id=folder_id,
88
+ recursive=recursive,
89
+ )
98
90
 
99
91
  filtered_fields = set(fields).intersection(SUPPORTED_FIELDS) if fields else SUPPORTED_FIELDS
100
92
  number_of_files = len(data.files)
@@ -155,17 +147,8 @@ async def gdrive_read_content(
155
147
  if isinstance(access_token, ToolError):
156
148
  raise access_token
157
149
 
158
- try:
159
- async with GoogleDriveClient(access_token) as client:
160
- file_content = await client.read_file_content(file_id, target_format)
161
- except GoogleDriveError as e:
162
- logger.error(f"Google Drive error reading file content: {e}")
163
- raise ToolError(str(e))
164
- except Exception as e:
165
- logger.error(f"Unexpected error reading Google Drive file content: {e}")
166
- raise ToolError(
167
- f"An unexpected error occurred while reading Google Drive file content: {str(e)}"
168
- )
150
+ async with GoogleDriveClient(access_token) as client:
151
+ file_content = await client.read_file_content(file_id, target_format)
169
152
 
170
153
  export_info = ""
171
154
  if file_content.was_exported:
@@ -239,20 +222,13 @@ async def gdrive_create_file(
239
222
  if isinstance(access_token, ToolError):
240
223
  raise access_token
241
224
 
242
- try:
243
- async with GoogleDriveClient(access_token) as client:
244
- created_file = await client.create_file(
245
- name=name,
246
- mime_type=mime_type,
247
- parent_id=parent_id,
248
- initial_content=initial_content,
249
- )
250
- except GoogleDriveError as e:
251
- logger.error(f"Google Drive error creating file: {e}")
252
- raise ToolError(str(e))
253
- except Exception as e:
254
- logger.error(f"Unexpected error creating Google Drive file: {e}")
255
- raise ToolError(f"An unexpected error occurred while creating Google Drive file: {str(e)}")
225
+ async with GoogleDriveClient(access_token) as client:
226
+ created_file = await client.create_file(
227
+ name=name,
228
+ mime_type=mime_type,
229
+ parent_id=parent_id,
230
+ initial_content=initial_content,
231
+ )
256
232
 
257
233
  file_type = "folder" if mime_type == GOOGLE_DRIVE_FOLDER_MIME else "file"
258
234
  content_info = ""
@@ -313,21 +289,12 @@ async def gdrive_update_metadata(
313
289
  if isinstance(access_token, ToolError):
314
290
  raise access_token
315
291
 
316
- try:
317
- async with GoogleDriveClient(access_token) as client:
318
- updated_file = await client.update_file_metadata(
319
- file_id=file_id,
320
- new_name=new_name,
321
- starred=starred,
322
- trashed=trash,
323
- )
324
- except GoogleDriveError as e:
325
- logger.error(f"Google Drive error updating file metadata: {e}")
326
- raise ToolError(str(e))
327
- except Exception as e:
328
- logger.error(f"Unexpected error updating Google Drive file metadata: {e}")
329
- raise ToolError(
330
- f"An unexpected error occurred while updating Google Drive file metadata: {str(e)}"
292
+ async with GoogleDriveClient(access_token) as client:
293
+ updated_file = await client.update_file_metadata(
294
+ file_id=file_id,
295
+ new_name=new_name,
296
+ starred=starred,
297
+ trashed=trash,
331
298
  )
332
299
 
333
300
  changes: list[str] = []
@@ -408,23 +375,14 @@ async def gdrive_manage_access(
408
375
  if isinstance(access_token, ToolError):
409
376
  raise access_token
410
377
 
411
- try:
412
- async with GoogleDriveClient(access_token) as client:
413
- permission_id = await client.manage_access(
414
- file_id=file_id,
415
- action=action,
416
- role=role,
417
- email_address=email_address,
418
- permission_id=permission_id,
419
- transfer_ownership=transfer_ownership,
420
- )
421
- except GoogleDriveError as e:
422
- logger.error(f"Google Drive permission operation failed: {e}")
423
- raise ToolError(str(e))
424
- except Exception as e:
425
- logger.error(f"Unexpected error changing permissions for Google Drive file {file_id}: {e}")
426
- raise ToolError(
427
- f"Unexpected error changing permissions for Google Drive file {file_id}: {str(e)}"
378
+ async with GoogleDriveClient(access_token) as client:
379
+ permission_id = await client.manage_access(
380
+ file_id=file_id,
381
+ action=action,
382
+ role=role,
383
+ email_address=email_address,
384
+ permission_id=permission_id,
385
+ transfer_ownership=transfer_ownership,
428
386
  )
429
387
 
430
388
  # Build response
@@ -72,14 +72,8 @@ async def jira_get_issue(
72
72
  if isinstance(access_token, ToolError):
73
73
  raise access_token
74
74
 
75
- try:
76
- async with JiraClient(access_token) as client:
77
- issue = await client.get_jira_issue(issue_key)
78
- except Exception as e:
79
- logger.error(f"Unexpected error while getting Jira issue: {e}")
80
- raise ToolError(
81
- f"An unexpected error occurred while getting Jira issue '{issue_key}': {str(e)}"
82
- )
75
+ async with JiraClient(access_token) as client:
76
+ issue = await client.get_jira_issue(issue_key)
83
77
 
84
78
  return ToolResult(
85
79
  content=f"Successfully retrieved details for issue '{issue_key}'.",
@@ -118,17 +112,13 @@ async def jira_create_issue(
118
112
  f"Unexpected issue type `{issue_type}`. Possible values are {possible_issue_types}."
119
113
  )
120
114
 
121
- try:
122
- async with JiraClient(access_token) as client:
123
- issue_key = await client.create_jira_issue(
124
- project_key=project_key,
125
- summary=summary,
126
- issue_type_id=issue_type_id,
127
- description=description,
128
- )
129
- except Exception as e:
130
- logger.error(f"Unexpected error while creating Jira issue: {e}")
131
- raise ToolError(f"An unexpected error occurred while creating Jira issue: {str(e)}")
115
+ async with JiraClient(access_token) as client:
116
+ issue_key = await client.create_jira_issue(
117
+ project_key=project_key,
118
+ summary=summary,
119
+ issue_type_id=issue_type_id,
120
+ description=description,
121
+ )
132
122
 
133
123
  return ToolResult(
134
124
  content=f"Successfully created issue '{issue_key}'.",
@@ -179,14 +169,10 @@ async def jira_update_issue(
179
169
  if isinstance(access_token, ToolError):
180
170
  raise access_token
181
171
 
182
- try:
183
- async with JiraClient(access_token) as client:
184
- updated_fields = await client.update_jira_issue(
185
- issue_key=issue_key, fields=fields_to_update
186
- )
187
- except Exception as e:
188
- logger.error(f"Unexpected error while updating Jira issue: {e}")
189
- raise ToolError(f"An unexpected error occurred while updating Jira issue: {str(e)}")
172
+ async with JiraClient(access_token) as client:
173
+ updated_fields = await client.update_jira_issue(
174
+ issue_key=issue_key, fields=fields_to_update
175
+ )
190
176
 
191
177
  updated_fields_str = ",".join(updated_fields)
192
178
  return ToolResult(
@@ -226,12 +212,8 @@ async def jira_transition_issue(
226
212
  f"Possible values are {available_transitions_str}."
227
213
  )
228
214
 
229
- try:
230
- async with JiraClient(access_token) as client:
231
- await client.transition_jira_issue(issue_key=issue_key, transition_id=transition_id)
232
- except Exception as e:
233
- logger.error(f"Unexpected error while transitioning Jira issue: {e}")
234
- raise ToolError(f"An unexpected error occurred while transitioning Jira issue: {str(e)}")
215
+ async with JiraClient(access_token) as client:
216
+ await client.transition_jira_issue(issue_key=issue_key, transition_id=transition_id)
235
217
 
236
218
  return ToolResult(
237
219
  content=f"Successfully transitioned issue '{issue_key}' to status '{transition_name}'.",
@@ -16,13 +16,13 @@
16
16
 
17
17
  import logging
18
18
  from typing import Annotated
19
+ from typing import Literal
19
20
 
20
21
  from fastmcp.exceptions import ToolError
21
22
  from fastmcp.tools.tool import ToolResult
22
23
 
23
24
  from datarobot_genai.drmcp.core.mcp_instance import dr_mcp_tool
24
25
  from datarobot_genai.drmcp.tools.clients.microsoft_graph import MicrosoftGraphClient
25
- from datarobot_genai.drmcp.tools.clients.microsoft_graph import MicrosoftGraphError
26
26
  from datarobot_genai.drmcp.tools.clients.microsoft_graph import get_microsoft_graph_access_token
27
27
  from datarobot_genai.drmcp.tools.clients.microsoft_graph import validate_site_url
28
28
 
@@ -142,25 +142,16 @@ async def microsoft_graph_search_content(
142
142
  if isinstance(access_token, ToolError):
143
143
  raise access_token
144
144
 
145
- try:
146
- async with MicrosoftGraphClient(access_token=access_token, site_url=site_url) as client:
147
- items = await client.search_content(
148
- search_query=search_query,
149
- site_id=site_id,
150
- from_offset=from_offset,
151
- size=size,
152
- entity_types=entity_types,
153
- filters=filters,
154
- include_hidden_content=include_hidden_content,
155
- region=region,
156
- )
157
- except MicrosoftGraphError as e:
158
- logger.error(f"Microsoft Graph error searching content: {e}")
159
- raise ToolError(str(e))
160
- except Exception as e:
161
- logger.error(f"Unexpected error searching Microsoft Graph content: {e}", exc_info=True)
162
- raise ToolError(
163
- f"An unexpected error occurred while searching Microsoft Graph content: {str(e)}"
145
+ async with MicrosoftGraphClient(access_token=access_token, site_url=site_url) as client:
146
+ items = await client.search_content(
147
+ search_query=search_query,
148
+ site_id=site_id,
149
+ from_offset=from_offset,
150
+ size=size,
151
+ entity_types=entity_types,
152
+ filters=filters,
153
+ include_hidden_content=include_hidden_content,
154
+ region=region,
164
155
  )
165
156
 
166
157
  results = []
@@ -198,6 +189,68 @@ async def microsoft_graph_search_content(
198
189
  )
199
190
 
200
191
 
192
+ @dr_mcp_tool(tags={"microsoft", "graph api", "sharepoint", "onedrive", "share"}, enabled=False)
193
+ async def microsoft_graph_share_item(
194
+ *,
195
+ file_id: Annotated[str, "The ID of the file or folder to share."],
196
+ document_library_id: Annotated[str, "The ID of the document library containing the item."],
197
+ recipient_emails: Annotated[list[str], "A list of email addresses to invite."],
198
+ role: Annotated[Literal["read", "write"], "The role to assign: 'read' or 'write'."] = "read",
199
+ send_invitation: Annotated[
200
+ bool, "Flag determining if recipients should be notified. Default False"
201
+ ] = False,
202
+ ) -> ToolResult | ToolError:
203
+ """
204
+ Share a SharePoint or Onedrive file or folder with one or more users.
205
+ It works with internal users or existing guest users in the
206
+ tenant. It does NOT create new guest accounts and does NOT use the tenant-level
207
+ /invitations endpoint.
208
+
209
+ Microsoft Graph API is treating OneDrive and SharePoint resources as driveItem.
210
+
211
+ API Reference:
212
+ - DriveItem Resource Type: https://learn.microsoft.com/en-us/graph/api/resources/driveitem
213
+ - API Documentation: https://learn.microsoft.com/en-us/graph/api/driveitem-invite
214
+ """
215
+ if not file_id or not file_id.strip():
216
+ raise ToolError("Argument validation error: 'file_id' cannot be empty.")
217
+
218
+ if not document_library_id or not document_library_id.strip():
219
+ raise ToolError("Argument validation error: 'document_library_id' cannot be empty.")
220
+
221
+ if not recipient_emails:
222
+ raise ToolError("Argument validation error: you must provide at least one 'recipient'.")
223
+
224
+ access_token = await get_microsoft_graph_access_token()
225
+ if isinstance(access_token, ToolError):
226
+ raise access_token
227
+
228
+ async with MicrosoftGraphClient(access_token=access_token) as client:
229
+ await client.share_item(
230
+ file_id=file_id,
231
+ document_library_id=document_library_id,
232
+ recipient_emails=recipient_emails,
233
+ role=role,
234
+ send_invitation=send_invitation,
235
+ )
236
+
237
+ n = len(recipient_emails)
238
+ return ToolResult(
239
+ content=(
240
+ f"Successfully shared file {file_id} "
241
+ f"from document library {document_library_id} "
242
+ f"with {n} recipients with '{role}' role."
243
+ ),
244
+ structured_content={
245
+ "fileId": file_id,
246
+ "documentLibraryId": document_library_id,
247
+ "recipientEmails": recipient_emails,
248
+ "n": n,
249
+ "role": role,
250
+ },
251
+ )
252
+
253
+
201
254
  @dr_mcp_tool(
202
255
  tags={
203
256
  "microsoft",
@@ -19,9 +19,13 @@ import json
19
19
  import logging
20
20
  from datetime import datetime
21
21
  from datetime import timedelta
22
+ from typing import Annotated
22
23
  from typing import Any
23
24
 
24
25
  import pandas as pd
26
+ from fastmcp.exceptions import ToolError
27
+ from fastmcp.tools.tool import ToolResult
28
+ from mcp.types import TextContent
25
29
 
26
30
  from datarobot_genai.drmcp.core.clients import get_sdk_client
27
31
  from datarobot_genai.drmcp.core.mcp_instance import dr_mcp_tool
@@ -29,40 +33,18 @@ from datarobot_genai.drmcp.core.mcp_instance import dr_mcp_tool
29
33
  logger = logging.getLogger(__name__)
30
34
 
31
35
 
32
- @dr_mcp_tool(tags={"deployment", "info", "metadata"})
33
- async def get_deployment_info(deployment_id: str) -> str:
36
+ @dr_mcp_tool(tags={"predictive", "deployment", "read", "info", "metadata"})
37
+ async def get_deployment_info(
38
+ *,
39
+ deployment_id: Annotated[str, "The ID of the DataRobot deployment"] | None = None,
40
+ ) -> ToolError | ToolResult:
34
41
  """
35
42
  Retrieve information about the deployment, including the list of
36
43
  features needed to make predictions on this deployment.
37
-
38
- Args:
39
- deployment_id: The ID of the DataRobot deployment
40
-
41
- Returns
42
- -------
43
- JSON string containing model and feature information including:
44
- For datarobot native models will return model information for custom models
45
- this will likely just return features and total_features values.
46
-
47
- - model_type: Type of model
48
- - target: Name of the target feature
49
- - target_type: Type of the target feature
50
- - features: List of features with their importance and type
51
- - total_features: Total number of features
52
- - time_series_config: Time series configuration if applicable
53
-
54
- for features:
55
- - feature_name: Name of the feature
56
- - ``name`` : str, feature name
57
- - ``feature_type`` : str, feature type
58
- - ``importance`` : float, numeric measure of the relationship strength between
59
- the feature and target (independent of model or other features)
60
- - ``date_format`` : str or None, the date format string for how this feature was
61
- interpreted, null if not a date feature, compatible with
62
- https://docs.python.org/2/library/time.html#time.strftime.
63
- - ``known_in_advance`` : bool, whether the feature was selected as known in advance in
64
- a time series model, false for non-time series models.
65
44
  """
45
+ if not deployment_id:
46
+ raise ToolError("Deployment ID must be provided")
47
+
66
48
  client = get_sdk_client()
67
49
  deployment = client.Deployment.get(deployment_id)
68
50
 
@@ -112,40 +94,34 @@ async def get_deployment_info(deployment_id: str) -> str:
112
94
  "series_id_columns": partition.multiseries_id_columns or [],
113
95
  }
114
96
 
115
- return json.dumps(result, indent=2)
97
+ return ToolResult(
98
+ content=json.dumps(result, indent=2),
99
+ structured_content=result,
100
+ )
116
101
 
117
102
 
118
- @dr_mcp_tool(tags={"deployment", "template", "data"})
119
- async def generate_prediction_data_template(deployment_id: str, n_rows: int = 1) -> str:
120
- """
121
- Generate a template CSV with the correct structure for making predictions.
122
-
123
- This creates a template with:
124
- - All required feature columns in the correct order
125
- - Sample values based on feature types
126
- - Comments explaining each feature
127
- - When using this tool, always consider feature importance. For features with high importance,
128
- try to infer or ask for a reasonable value, using frequent values or domain knowledge if
129
- available. For less important features, you may leave them blank.
130
- - If frequent values are available for a feature, they will be used as sample values;
131
- otherwise, blank fields will be used.
132
- Please note that using frequent values in your predictions data can influence the prediction,
133
- think of it as sending in the average value for the feature. If you don't want this effect on
134
- your predictions leave the field blank you in predictions dataset.
135
-
136
- Args:
137
- deployment_id: The ID of the DataRobot deployment
138
- n_rows: Number of template rows to generate (default 1)
139
-
140
- Returns
141
- -------
142
- CSV template string with sample data ready for predictions
143
- """
103
+ @dr_mcp_tool(tags={"predictive", "deployment", "read", "template", "data"})
104
+ async def generate_prediction_data_template(
105
+ *,
106
+ deployment_id: Annotated[str, "The ID of the DataRobot deployment"] | None = None,
107
+ n_rows: Annotated[int, "Number of template rows to generate"] = 1,
108
+ ) -> ToolError | ToolResult:
109
+ """Generate a template CSV with the correct structure for making predictions."""
110
+ if not deployment_id:
111
+ raise ToolError("Deployment ID must be provided")
112
+ if n_rows is None or n_rows <= 0:
113
+ n_rows = 1
114
+
144
115
  # Get feature information
145
- features_json = await get_deployment_features(deployment_id)
116
+ features_result = await get_deployment_features(deployment_id=deployment_id)
146
117
  # Add error handling for empty or error responses
118
+ # Extract text content from ToolResult
119
+ if features_result.content and isinstance(features_result.content[0], TextContent):
120
+ features_json = features_result.content[0].text
121
+ else:
122
+ features_json = str(features_result.content)
147
123
  if not features_json or features_json.strip().startswith("Error"):
148
- return f"Error: {features_json}"
124
+ raise ToolError(f"Error with feature information: {features_json}")
149
125
  features_info = json.loads(features_json)
150
126
 
151
127
  # Create template data
@@ -218,49 +194,55 @@ async def generate_prediction_data_template(deployment_id: str, n_rows: int = 1)
218
194
  result += f"# Total Features: {features_info['total_features']}\n"
219
195
  result += df.to_csv(index=False)
220
196
 
221
- return str(result)
197
+ # Build structured content with template data and metadata
198
+ structured_content = {
199
+ "deployment_id": deployment_id,
200
+ "model_type": features_info["model_type"],
201
+ "target": features_info["target"],
202
+ "target_type": features_info["target_type"],
203
+ "total_features": features_info["total_features"],
204
+ "template_data": df.to_dict("records"), # Convert DataFrame to list of dicts
205
+ }
206
+
207
+ if "time_series_config" in features_info:
208
+ structured_content["time_series_config"] = features_info["time_series_config"]
209
+
210
+ return ToolResult(
211
+ content=str(result),
212
+ structured_content=structured_content,
213
+ )
222
214
 
223
215
 
224
- @dr_mcp_tool(tags={"deployment", "validation", "data"})
216
+ @dr_mcp_tool(tags={"predictive", "deployment", "read", "validation", "data"})
225
217
  async def validate_prediction_data(
226
- deployment_id: str,
227
- file_path: str | None = None,
228
- csv_string: str | None = None,
229
- ) -> str:
230
- """
231
- Validate if a CSV file is suitable for making predictions with a deployment.
232
-
233
- Checks:
234
- - All required features are present
235
- - Feature types match expectations
236
- - Missing values (null, empty string, or blank fields) are allowed and will not cause errors
237
- - No critical issues that would prevent predictions
238
-
239
- Args:
240
- deployment_id: The ID of the DataRobot deployment
241
- file_path: Path to the CSV file to validate (optional if csv_string is provided)
242
- csv_string: CSV data as a string (optional, used if file_path is not provided)
243
-
244
- Returns
245
- -------
246
- Validation report including any errors, warnings, and suggestions
247
- """
218
+ *,
219
+ deployment_id: Annotated[str, "The ID of the DataRobot deployment"] | None = None,
220
+ file_path: Annotated[
221
+ str, "Path to the CSV file to validate (optional if csv_string is provided)"
222
+ ]
223
+ | None = None,
224
+ csv_string: Annotated[str, "CSV data as a string (optional, used if file_path is not provided)"]
225
+ | None = None,
226
+ ) -> ToolError | ToolResult:
227
+ """Validate if a CSV file is suitable for making predictions with a deployment."""
248
228
  # Load the data
249
229
  if csv_string is not None:
250
230
  df = pd.read_csv(io.StringIO(csv_string))
251
231
  elif file_path is not None:
252
232
  df = pd.read_csv(file_path)
253
233
  else:
254
- return json.dumps(
255
- {
256
- "status": "error",
257
- "error": "Must provide either file_path or csv_string.",
258
- },
259
- indent=2,
260
- )
234
+ raise ToolError("Must provide either file_path or csv_string.")
235
+
236
+ if not deployment_id:
237
+ raise ToolError("Deployment ID must be provided")
261
238
 
262
239
  # Get deployment features
263
- features_json = await get_deployment_features(deployment_id)
240
+ features_result = await get_deployment_features(deployment_id=deployment_id)
241
+ # Extract text content from ToolResult
242
+ if features_result.content and isinstance(features_result.content[0], TextContent):
243
+ features_json = features_result.content[0].text
244
+ else:
245
+ features_json = str(features_result.content)
264
246
  features_info = json.loads(features_json)
265
247
 
266
248
  validation_report: dict[str, Any] = {
@@ -359,22 +341,29 @@ async def validate_prediction_data(
359
341
  "model_type": features_info["model_type"],
360
342
  }
361
343
 
362
- return json.dumps(validation_report, indent=2)
363
-
364
-
365
- @dr_mcp_tool(tags={"deployment", "features", "info"})
366
- async def get_deployment_features(deployment_id: str) -> str:
367
- """
368
- Retrieve only the features list for a deployment, as JSON string.
369
- Args:
370
- deployment_id: The ID of the DataRobot deployment
371
- Returns:
372
- JSON string containing only the features list and time series config if present.
373
- """
374
- info_json = await get_deployment_info(deployment_id)
344
+ return ToolResult(
345
+ content=json.dumps(validation_report, indent=2),
346
+ structured_content=validation_report,
347
+ )
348
+
349
+
350
+ @dr_mcp_tool(tags={"predictive", "deployment", "read", "features", "info"})
351
+ async def get_deployment_features(
352
+ *,
353
+ deployment_id: Annotated[str, "The ID of the DataRobot deployment"] | None = None,
354
+ ) -> ToolError | ToolResult:
355
+ """Retrieve only the features list for a deployment, as JSON string."""
356
+ if not deployment_id:
357
+ raise ToolError("Deployment ID must be provided")
358
+
359
+ info_result = await get_deployment_info(deployment_id=deployment_id)
360
+ # Extract text content from ToolResult
361
+ if info_result.content and isinstance(info_result.content[0], TextContent):
362
+ info_json = info_result.content[0].text
363
+ else:
364
+ info_json = str(info_result.content)
375
365
  if not info_json.strip().startswith("{"):
376
- # Return a default error JSON
377
- return json.dumps({"features": [], "total_features": 0, "error": info_json}, indent=2)
366
+ raise ToolError(f"Error with deployment info: {info_json}")
378
367
  info = json.loads(info_json)
379
368
  # Only keep features, time_series_config, and total_features
380
369
  result = {
@@ -389,4 +378,8 @@ async def get_deployment_features(deployment_id: str) -> str:
389
378
  result["target"] = info["target"]
390
379
  if "target_type" in info:
391
380
  result["target_type"] = info["target_type"]
392
- return json.dumps(result, indent=2)
381
+
382
+ return ToolResult(
383
+ content=json.dumps(result, indent=2),
384
+ structured_content=result,
385
+ )
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: datarobot-genai
3
- Version: 0.2.37
3
+ Version: 0.2.39
4
4
  Summary: Generic helpers for GenAI
5
5
  Project-URL: Homepage, https://github.com/datarobot-oss/datarobot-genai
6
6
  Author: DataRobot, Inc.
@@ -84,20 +84,20 @@ datarobot_genai/drmcp/tools/clients/atlassian.py,sha256=__M_uz7FrcbKCYRzeMn24DCE
84
84
  datarobot_genai/drmcp/tools/clients/confluence.py,sha256=h_G0By_kDnJeWDT_d-IREsaZ5-0xB5GoLXOqblYP5MA,20706
85
85
  datarobot_genai/drmcp/tools/clients/gdrive.py,sha256=RK4IISpYb99aK6WgDthesDoglaZxwGpG_PPAAe6xsVM,33064
86
86
  datarobot_genai/drmcp/tools/clients/jira.py,sha256=Rm91JAyrNIqxu66-9rU1YqoRXVnWbEy-Ahvy6f6HlVg,9823
87
- datarobot_genai/drmcp/tools/clients/microsoft_graph.py,sha256=-g0EhaBVElKbujaO2cHdgc86hwFEkkyEyZVAw8pq7yM,24468
87
+ datarobot_genai/drmcp/tools/clients/microsoft_graph.py,sha256=xh3J3QvBpsk-jO5tPIikn2AR1q5Fp3Z3_zdfb-C6UKE,25970
88
88
  datarobot_genai/drmcp/tools/clients/s3.py,sha256=GmwzvurFdNfvxOooA8g5S4osRysHYU0S9ypg_177Glg,953
89
89
  datarobot_genai/drmcp/tools/confluence/__init__.py,sha256=0kq9vMkF7EBsS6lkEdiLibmUrghTQqosHbZ5k-V9a5g,578
90
- datarobot_genai/drmcp/tools/confluence/tools.py,sha256=_-ws65WLK8KZP_mKkf4yJ7ZunR8qdyoiMwHQX47MSMw,12362
90
+ datarobot_genai/drmcp/tools/confluence/tools.py,sha256=tbZxpSkMqFWSz8HxCnjFuJ0JL06RD5t-B1alxxKnMl4,10314
91
91
  datarobot_genai/drmcp/tools/gdrive/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
92
- datarobot_genai/drmcp/tools/gdrive/tools.py,sha256=7bNrp7E3opKwsBDYfLIOsOGfPXW-Ae9KvcimEzetR0A,17631
92
+ datarobot_genai/drmcp/tools/gdrive/tools.py,sha256=38XxGXQHZcoX06HQDFf0-BFmpNxNG4wdvijaoqIM4ho,15572
93
93
  datarobot_genai/drmcp/tools/jira/__init__.py,sha256=0kq9vMkF7EBsS6lkEdiLibmUrghTQqosHbZ5k-V9a5g,578
94
- datarobot_genai/drmcp/tools/jira/tools.py,sha256=dfkqTU2HH-7n44hX80ODFacKq0p0LOchFcZtIIKFNMM,9687
94
+ datarobot_genai/drmcp/tools/jira/tools.py,sha256=qEQyyri2bPYpphDR9SVxon1_gTmBO2bPuP9nOLLYdk0,8775
95
95
  datarobot_genai/drmcp/tools/microsoft_graph/__init__.py,sha256=CuOaMt1AJo7cHx_GuhO3s_aqxZas_wlDsoBorBsvbeU,577
96
- datarobot_genai/drmcp/tools/microsoft_graph/tools.py,sha256=cNctozv_4lRC5Kva3D2j4taZfeQHDE6LTAjcmeQXwWA,10446
96
+ datarobot_genai/drmcp/tools/microsoft_graph/tools.py,sha256=iQnmj-AEIVpGBVjlg2ogwDeHdQlTexdTQzX-DYQbFfQ,12479
97
97
  datarobot_genai/drmcp/tools/predictive/__init__.py,sha256=WuOHlNNEpEmcF7gVnhckruJRKU2qtmJLE3E7zoCGLDo,1030
98
98
  datarobot_genai/drmcp/tools/predictive/data.py,sha256=VbGs8ERP8vNFtTTryGhI61JItNVaJsx1gxpRX1ZFZcg,4626
99
99
  datarobot_genai/drmcp/tools/predictive/deployment.py,sha256=Pc6lz9V2JOw3Ufw-SsGAhMKf6-YhvbjGoNLRFOIcSSY,3670
100
- datarobot_genai/drmcp/tools/predictive/deployment_info.py,sha256=BGEF_dmbxOBJR0n1Tt9TO2-iNTQSBTr-oQUyaxLZ0ZI,15297
100
+ datarobot_genai/drmcp/tools/predictive/deployment_info.py,sha256=I9YxznndDOq0H2QgIjkb5O5sX8S6GTbYmXtRRYjEzOw,14778
101
101
  datarobot_genai/drmcp/tools/predictive/model.py,sha256=BVxOMHh3--liwBU4VB1OWRrqkhJ4y_Rq053f7y94TF8,6276
102
102
  datarobot_genai/drmcp/tools/predictive/predict.py,sha256=Qoob2_t2crfWtyPzkXMRz2ITZumnczU6Dq4C7q9RBMI,9370
103
103
  datarobot_genai/drmcp/tools/predictive/predict_realtime.py,sha256=urq6rPyZFsAP-bPyclSNzrkvb6FTamdlFau8q0IWWJ0,13472
@@ -117,9 +117,9 @@ datarobot_genai/nat/datarobot_llm_clients.py,sha256=-_q_KlKOVQecIYJd8YRiYnS4ZNaz
117
117
  datarobot_genai/nat/datarobot_llm_providers.py,sha256=aDoQcTeGI-odqydPXEX9OGGNFbzAtpqzTvHHEkmJuEQ,4963
118
118
  datarobot_genai/nat/datarobot_mcp_client.py,sha256=jL8sXb8g4gvt0VYgB2tfMGsMjpB1GV2XIbN0iv_LxVU,10701
119
119
  datarobot_genai/nat/helpers.py,sha256=Q7E3ADZdtFfS8E6OQPyw2wgA6laQ58N3bhLj5CBWwJs,3265
120
- datarobot_genai-0.2.37.dist-info/METADATA,sha256=JFxYZKfbfrbePIywe1WQ1ZfjQ5W9HAq_GNYAuXxTcB8,6301
121
- datarobot_genai-0.2.37.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
122
- datarobot_genai-0.2.37.dist-info/entry_points.txt,sha256=jEW3WxDZ8XIK9-ISmTyt5DbmBb047rFlzQuhY09rGrM,284
123
- datarobot_genai-0.2.37.dist-info/licenses/AUTHORS,sha256=isJGUXdjq1U7XZ_B_9AH8Qf0u4eX0XyQifJZ_Sxm4sA,80
124
- datarobot_genai-0.2.37.dist-info/licenses/LICENSE,sha256=U2_VkLIktQoa60Nf6Tbt7E4RMlfhFSjWjcJJfVC-YCE,11341
125
- datarobot_genai-0.2.37.dist-info/RECORD,,
120
+ datarobot_genai-0.2.39.dist-info/METADATA,sha256=V8b_XudaiugsXFgEhkrtEpEr0UaSHVHLL6xPV578UGc,6301
121
+ datarobot_genai-0.2.39.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
122
+ datarobot_genai-0.2.39.dist-info/entry_points.txt,sha256=jEW3WxDZ8XIK9-ISmTyt5DbmBb047rFlzQuhY09rGrM,284
123
+ datarobot_genai-0.2.39.dist-info/licenses/AUTHORS,sha256=isJGUXdjq1U7XZ_B_9AH8Qf0u4eX0XyQifJZ_Sxm4sA,80
124
+ datarobot_genai-0.2.39.dist-info/licenses/LICENSE,sha256=U2_VkLIktQoa60Nf6Tbt7E4RMlfhFSjWjcJJfVC-YCE,11341
125
+ datarobot_genai-0.2.39.dist-info/RECORD,,