datarobot-genai 0.2.26__py3-none-any.whl → 0.2.34__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. datarobot_genai/core/cli/agent_kernel.py +4 -1
  2. datarobot_genai/drmcp/__init__.py +2 -2
  3. datarobot_genai/drmcp/core/config.py +121 -83
  4. datarobot_genai/drmcp/core/exceptions.py +0 -4
  5. datarobot_genai/drmcp/core/logging.py +2 -2
  6. datarobot_genai/drmcp/core/tool_config.py +17 -9
  7. datarobot_genai/drmcp/test_utils/clients/__init__.py +0 -0
  8. datarobot_genai/drmcp/test_utils/clients/anthropic.py +68 -0
  9. datarobot_genai/drmcp/test_utils/{openai_llm_mcp_client.py → clients/base.py} +38 -40
  10. datarobot_genai/drmcp/test_utils/clients/dr_gateway.py +58 -0
  11. datarobot_genai/drmcp/test_utils/clients/openai.py +68 -0
  12. datarobot_genai/drmcp/test_utils/mcp_utils_ete.py +20 -0
  13. datarobot_genai/drmcp/test_utils/test_interactive.py +16 -16
  14. datarobot_genai/drmcp/test_utils/tool_base_ete.py +69 -2
  15. datarobot_genai/drmcp/test_utils/utils.py +1 -1
  16. datarobot_genai/drmcp/tools/clients/gdrive.py +314 -1
  17. datarobot_genai/drmcp/tools/clients/microsoft_graph.py +479 -0
  18. datarobot_genai/drmcp/tools/gdrive/tools.py +273 -4
  19. datarobot_genai/drmcp/tools/microsoft_graph/__init__.py +13 -0
  20. datarobot_genai/drmcp/tools/microsoft_graph/tools.py +198 -0
  21. datarobot_genai/drmcp/tools/predictive/data.py +16 -8
  22. datarobot_genai/drmcp/tools/predictive/model.py +87 -52
  23. datarobot_genai/drmcp/tools/predictive/project.py +2 -2
  24. datarobot_genai/drmcp/tools/predictive/training.py +15 -14
  25. datarobot_genai/nat/datarobot_llm_clients.py +90 -54
  26. datarobot_genai/nat/datarobot_mcp_client.py +47 -15
  27. {datarobot_genai-0.2.26.dist-info → datarobot_genai-0.2.34.dist-info}/METADATA +1 -1
  28. {datarobot_genai-0.2.26.dist-info → datarobot_genai-0.2.34.dist-info}/RECORD +32 -25
  29. {datarobot_genai-0.2.26.dist-info → datarobot_genai-0.2.34.dist-info}/WHEEL +0 -0
  30. {datarobot_genai-0.2.26.dist-info → datarobot_genai-0.2.34.dist-info}/entry_points.txt +0 -0
  31. {datarobot_genai-0.2.26.dist-info → datarobot_genai-0.2.34.dist-info}/licenses/AUTHORS +0 -0
  32. {datarobot_genai-0.2.26.dist-info → datarobot_genai-0.2.34.dist-info}/licenses/LICENSE +0 -0
@@ -68,6 +68,26 @@ def get_openai_llm_client_config() -> dict[str, str]:
68
68
  return config
69
69
 
70
70
 
71
+ def get_dr_llm_gateway_client_config() -> dict[str, str]:
72
+ """Get DataRobot LLM Gateway client configuration."""
73
+ datarobot_api_token = os.environ.get("DATAROBOT_API_TOKEN")
74
+ datarobot_endpoint = os.environ.get("DATAROBOT_ENDPOINT")
75
+ save_llm_responses = os.environ.get("SAVE_LLM_RESPONSES", "false").lower() == "true"
76
+
77
+ if not datarobot_api_token:
78
+ raise ValueError("Missing required environment variable: DATAROBOT_API_TOKEN")
79
+
80
+ config: dict[str, str] = {
81
+ "datarobot_api_token": datarobot_api_token,
82
+ "save_llm_responses": str(save_llm_responses),
83
+ }
84
+
85
+ if datarobot_endpoint:
86
+ config["datarobot_endpoint"] = datarobot_endpoint
87
+
88
+ return config
89
+
90
+
71
91
  def get_headers() -> dict[str, str]:
72
92
  # When the MCP server is deployed in DataRobot, we have to include the API token in headers for
73
93
  # authentication.
@@ -40,40 +40,40 @@ from mcp.types import ElicitResult
40
40
 
41
41
  from datarobot_genai.drmcp import get_dr_mcp_server_url
42
42
  from datarobot_genai.drmcp import get_headers
43
- from datarobot_genai.drmcp.test_utils.openai_llm_mcp_client import LLMMCPClient
44
- from datarobot_genai.drmcp.test_utils.openai_llm_mcp_client import LLMResponse
45
- from datarobot_genai.drmcp.test_utils.openai_llm_mcp_client import ToolCall
43
+ from datarobot_genai.drmcp.test_utils.clients.base import LLMResponse
44
+ from datarobot_genai.drmcp.test_utils.clients.base import ToolCall
45
+ from datarobot_genai.drmcp.test_utils.clients.dr_gateway import DRLLMGatewayMCPClient
46
46
 
47
47
  # Re-export for backwards compatibility
48
- __all__ = ["LLMMCPClient", "LLMResponse", "ToolCall", "test_mcp_interactive"]
48
+ __all__ = ["DRLLMGatewayMCPClient", "LLMResponse", "ToolCall", "test_mcp_interactive"]
49
49
 
50
50
 
51
51
  async def test_mcp_interactive() -> None:
52
52
  """Test the MCP server interactively with LLM agent."""
53
53
  # Check for required environment variables
54
- openai_api_key = os.environ.get("OPENAI_API_KEY")
55
- if not openai_api_key:
56
- print("❌ Error: OPENAI_API_KEY environment variable is required")
54
+ datarobot_api_token = os.environ.get("DATAROBOT_API_TOKEN")
55
+ if not datarobot_api_token:
56
+ print("❌ Error: DATAROBOT_API_TOKEN environment variable is required")
57
57
  print("Please set it in your .env file or export it")
58
58
  return
59
59
 
60
- # Optional Azure OpenAI settings
61
- openai_api_base = os.environ.get("OPENAI_API_BASE")
62
- openai_api_deployment_id = os.environ.get("OPENAI_API_DEPLOYMENT_ID")
63
- openai_api_version = os.environ.get("OPENAI_API_VERSION")
60
+ # Optional DataRobot settings
61
+ datarobot_endpoint = os.environ.get("DATAROBOT_ENDPOINT")
62
+ model = os.environ.get("MODEL")
64
63
 
65
64
  print("🤖 Initializing LLM MCP Client...")
66
65
 
67
66
  # Initialize the LLM client with elicitation handler
68
67
  config = {
69
- "openai_api_key": openai_api_key,
70
- "openai_api_base": openai_api_base,
71
- "openai_api_deployment_id": openai_api_deployment_id,
72
- "openai_api_version": openai_api_version,
68
+ "datarobot_api_token": datarobot_api_token,
73
69
  "save_llm_responses": False,
74
70
  }
71
+ if datarobot_endpoint:
72
+ config["datarobot_endpoint"] = datarobot_endpoint
73
+ if model:
74
+ config["model"] = model
75
75
 
76
- llm_client = LLMMCPClient(str(config))
76
+ llm_client = DRLLMGatewayMCPClient(str(config))
77
77
 
78
78
  # Get MCP server URL
79
79
  mcp_server_url = get_dr_mcp_server_url()
@@ -17,7 +17,7 @@ from typing import Any
17
17
 
18
18
  from pydantic import BaseModel
19
19
 
20
- from .openai_llm_mcp_client import LLMResponse
20
+ from .clients.base import LLMResponse
21
21
 
22
22
 
23
23
  class ToolCallTestExpectations(BaseModel):
@@ -39,6 +39,54 @@ class ETETestExpectations(BaseModel):
39
39
  SHOULD_NOT_BE_EMPTY = "SHOULD_NOT_BE_EMPTY"
40
40
 
41
41
 
42
+ def _extract_structured_content(tool_result: str) -> Any:
43
+ r"""
44
+ Extract and parse structured content from tool result string.
45
+
46
+ Tool results are formatted as:
47
+ "Content: {content}\nStructured content: {structured_content}"
48
+
49
+ Structured content can be:
50
+ 1. A JSON object with a "result" key: {"result": "..."} or {"result": "{...}"}
51
+ 2. A direct JSON object: {"key": "value", ...}
52
+ 3. Empty or missing
53
+
54
+ Args:
55
+ tool_result: The tool result string
56
+
57
+ Returns
58
+ -------
59
+ Parsed structured content, or None if not available
60
+ """
61
+ # Early returns for invalid inputs
62
+ if not tool_result or "Structured content: " not in tool_result:
63
+ return None
64
+
65
+ structured_part = tool_result.split("Structured content: ", 1)[1].strip()
66
+ # Parse JSON, return None on failure or empty structured_part
67
+ if not structured_part:
68
+ return None
69
+ try:
70
+ structured_data = json.loads(structured_part)
71
+ except json.JSONDecodeError:
72
+ return None
73
+
74
+ # If structured data has a "result" key, extract and parse that
75
+ if isinstance(structured_data, dict) and "result" in structured_data:
76
+ result_value = structured_data["result"]
77
+ # If result is a JSON string (starts with { or [), try to parse it
78
+ if isinstance(result_value, str) and result_value.strip().startswith(("{", "[")):
79
+ try:
80
+ parsed_result = json.loads(result_value)
81
+ except json.JSONDecodeError:
82
+ parsed_result = result_value # Return string as-is if parsing fails
83
+ return parsed_result
84
+ return result_value # Return result value directly
85
+
86
+ # If it's a direct JSON object (not wrapped in {"result": ...}), return it as-is
87
+ return structured_data
88
+
89
+
42
90
  def _check_dict_has_keys(
43
91
  expected: dict[str, Any],
44
92
  actual: dict[str, Any] | list[dict[str, Any]],
@@ -130,7 +178,26 @@ class ToolBaseE2E:
130
178
  f"result, but got: {response.tool_results[i]}"
131
179
  )
132
180
  else:
133
- actual_result = json.loads(response.tool_results[i])
181
+ actual_result = _extract_structured_content(response.tool_results[i])
182
+ if actual_result is None:
183
+ # Fallback: try to parse the entire tool result as JSON
184
+ try:
185
+ actual_result = json.loads(response.tool_results[i])
186
+ except json.JSONDecodeError:
187
+ # If that fails, try to extract content part
188
+ if "Content: " in response.tool_results[i]:
189
+ content_part = response.tool_results[i].split("Content: ", 1)[1]
190
+ if "\nStructured content: " in content_part:
191
+ content_part = content_part.split(
192
+ "\nStructured content: ", 1
193
+ )[0]
194
+ try:
195
+ actual_result = json.loads(content_part.strip())
196
+ except json.JSONDecodeError:
197
+ raise AssertionError(
198
+ f"Could not parse tool result for "
199
+ f"{tool_call.tool_name}: {response.tool_results[i]}"
200
+ )
134
201
  assert _check_dict_has_keys(expected_result, actual_result), (
135
202
  f"Should have called {tool_call.tool_name} tool with the correct "
136
203
  f"result structure, but got: {response.tool_results[i]}"
@@ -19,7 +19,7 @@ from typing import TYPE_CHECKING
19
19
  from typing import Any
20
20
 
21
21
  if TYPE_CHECKING:
22
- from .openai_llm_mcp_client import LLMResponse
22
+ from .clients.base import LLMResponse
23
23
 
24
24
  from dotenv import load_dotenv
25
25
 
@@ -15,9 +15,12 @@
15
15
  """Google Drive API Client and utilities for OAuth."""
16
16
 
17
17
  import io
18
+ import json
18
19
  import logging
20
+ import uuid
19
21
  from typing import Annotated
20
22
  from typing import Any
23
+ from typing import Literal
21
24
 
22
25
  import httpx
23
26
  from datarobot.auth.datarobot.exceptions import OAuthServiceClientErr
@@ -31,7 +34,17 @@ from datarobot_genai.drmcp.core.auth import get_access_token
31
34
 
32
35
  logger = logging.getLogger(__name__)
33
36
 
34
- SUPPORTED_FIELDS = {"id", "name", "size", "mimeType", "webViewLink", "createdTime", "modifiedTime"}
37
+ SUPPORTED_FIELDS = {
38
+ "id",
39
+ "name",
40
+ "size",
41
+ "mimeType",
42
+ "webViewLink",
43
+ "createdTime",
44
+ "modifiedTime",
45
+ "starred",
46
+ "trashed",
47
+ }
35
48
  SUPPORTED_FIELDS_STR = ",".join(SUPPORTED_FIELDS)
36
49
  DEFAULT_FIELDS = f"nextPageToken,files({SUPPORTED_FIELDS_STR})"
37
50
  GOOGLE_DRIVE_FOLDER_MIME = "application/vnd.google-apps.folder"
@@ -45,6 +58,12 @@ GOOGLE_WORKSPACE_EXPORT_MIMES: dict[str, str] = {
45
58
  "application/vnd.google-apps.presentation": "text/plain",
46
59
  }
47
60
 
61
+ # MIME type mappings for content conversion during upload to Google Workspace formats
62
+ UPLOAD_CONTENT_TYPES: dict[str, str] = {
63
+ "application/vnd.google-apps.document": "text/plain",
64
+ "application/vnd.google-apps.spreadsheet": "text/csv",
65
+ }
66
+
48
67
  BINARY_MIME_PREFIXES = (
49
68
  "image/",
50
69
  "audio/",
@@ -111,6 +130,8 @@ class GoogleDriveFile(BaseModel):
111
130
  web_view_link: Annotated[str | None, Field(alias="webViewLink")] = None
112
131
  created_time: Annotated[str | None, Field(alias="createdTime")] = None
113
132
  modified_time: Annotated[str | None, Field(alias="modifiedTime")] = None
133
+ starred: bool | None = None
134
+ trashed: bool | None = None
114
135
 
115
136
  model_config = ConfigDict(populate_by_name=True)
116
137
 
@@ -125,8 +146,31 @@ class GoogleDriveFile(BaseModel):
125
146
  web_view_link=data.get("webViewLink"),
126
147
  created_time=data.get("createdTime"),
127
148
  modified_time=data.get("modifiedTime"),
149
+ starred=data.get("starred"),
150
+ trashed=data.get("trashed"),
128
151
  )
129
152
 
153
+ def as_flat_dict(self) -> dict[str, Any]:
154
+ """Return a flat dictionary representation of the file."""
155
+ result: dict[str, Any] = {
156
+ "id": self.id,
157
+ "name": self.name,
158
+ "mimeType": self.mime_type,
159
+ }
160
+ if self.size is not None:
161
+ result["size"] = self.size
162
+ if self.web_view_link is not None:
163
+ result["webViewLink"] = self.web_view_link
164
+ if self.created_time is not None:
165
+ result["createdTime"] = self.created_time
166
+ if self.modified_time is not None:
167
+ result["modifiedTime"] = self.modified_time
168
+ if self.starred is not None:
169
+ result["starred"] = self.starred
170
+ if self.trashed is not None:
171
+ result["trashed"] = self.trashed
172
+ return result
173
+
130
174
 
131
175
  class PaginatedResult(BaseModel):
132
176
  """Result of a paginated API call."""
@@ -432,6 +476,66 @@ class GoogleDriveClient:
432
476
  response.raise_for_status()
433
477
  return GoogleDriveFile.from_api_response(response.json())
434
478
 
479
+ async def update_file_metadata(
480
+ self,
481
+ file_id: str,
482
+ new_name: str | None = None,
483
+ starred: bool | None = None,
484
+ trashed: bool | None = None,
485
+ ) -> GoogleDriveFile:
486
+ """Update file metadata in Google Drive.
487
+
488
+ Args:
489
+ file_id: The ID of the file to update.
490
+ new_name: A new name to rename the file. Must not be empty or whitespace.
491
+ starred: Set to True to star the file or False to unstar it.
492
+ trashed: Set to True to trash the file or False to restore it.
493
+
494
+ Returns
495
+ -------
496
+ GoogleDriveFile with updated metadata.
497
+
498
+ Raises
499
+ ------
500
+ GoogleDriveError: If no update fields are provided, file is not found,
501
+ access is denied, or the request is invalid.
502
+ """
503
+ if new_name is None and starred is None and trashed is None:
504
+ raise GoogleDriveError(
505
+ "At least one of new_name, starred, or trashed must be provided."
506
+ )
507
+
508
+ if new_name is not None and not new_name.strip():
509
+ raise GoogleDriveError("new_name cannot be empty or whitespace.")
510
+
511
+ body: dict[str, Any] = {}
512
+ if new_name is not None:
513
+ body["name"] = new_name
514
+ if starred is not None:
515
+ body["starred"] = starred
516
+ if trashed is not None:
517
+ body["trashed"] = trashed
518
+
519
+ response = await self._client.patch(
520
+ f"/{file_id}",
521
+ json=body,
522
+ params={"fields": SUPPORTED_FIELDS_STR, "supportsAllDrives": "true"},
523
+ )
524
+
525
+ if response.status_code == 404:
526
+ raise GoogleDriveError(f"File with ID '{file_id}' not found.")
527
+ if response.status_code == 403:
528
+ raise GoogleDriveError(
529
+ f"Permission denied: you don't have permission to update file '{file_id}'."
530
+ )
531
+ if response.status_code == 400:
532
+ raise GoogleDriveError("Bad request: invalid parameters for file update.")
533
+ if response.status_code == 429:
534
+ raise GoogleDriveError("Rate limit exceeded. Please try again later.")
535
+
536
+ response.raise_for_status()
537
+ return GoogleDriveFile.from_api_response(response.json())
538
+
435
539
  async def _export_workspace_file(self, file_id: str, export_mime_type: str) -> str:
436
540
  """Export a Google Workspace file to the specified format.
437
541
 
@@ -599,6 +703,215 @@ class GoogleDriveClient:
599
703
  web_view_link=file_metadata.web_view_link,
600
704
  )
601
705
 
706
+ async def create_file(
707
+ self,
708
+ name: str,
709
+ mime_type: str,
710
+ parent_id: str | None = None,
711
+ initial_content: str | None = None,
712
+ ) -> GoogleDriveFile:
713
+ """Create a new file or folder in Google Drive.
714
+
715
+ Creates a new file with the specified name and MIME type. Optionally places
716
+ it in a specific folder and populates it with initial content.
717
+
718
+ For Google Workspace files (Docs, Sheets), the Drive API automatically
719
+ converts plain text content to the appropriate format.
720
+
721
+ Args:
722
+ name: The name for the new file or folder.
723
+ mime_type: The MIME type of the file (e.g., 'text/plain',
724
+ 'application/vnd.google-apps.document',
725
+ 'application/vnd.google-apps.folder').
726
+ parent_id: Optional ID of the parent folder. If not specified,
727
+ the file is created in the root of the user's Drive.
728
+ initial_content: Optional text content to populate the file.
729
+ Ignored for folders.
730
+
731
+ Returns
732
+ -------
733
+ GoogleDriveFile with the created file's metadata.
734
+
735
+ Raises
736
+ ------
737
+ GoogleDriveError: If file creation fails (permission denied,
738
+ parent not found, rate limited, etc.).
739
+ """
740
+ metadata: dict[str, Any] = {
741
+ "name": name,
742
+ "mimeType": mime_type,
743
+ }
744
+ if parent_id:
745
+ metadata["parents"] = [parent_id]
746
+
747
+ if mime_type == GOOGLE_DRIVE_FOLDER_MIME or not initial_content:
748
+ response = await self._client.post(
749
+ "/",
750
+ json=metadata,
751
+ params={"fields": SUPPORTED_FIELDS_STR, "supportsAllDrives": "true"},
752
+ )
753
+ else:
754
+ response = await self._create_file_with_content(
755
+ metadata=metadata,
756
+ content=initial_content,
757
+ target_mime_type=mime_type,
758
+ )
759
+
760
+ if response.status_code == 404:
761
+ raise GoogleDriveError(
762
+ f"Parent folder with ID '{parent_id}' not found."
763
+ if parent_id
764
+ else "Resource not found."
765
+ )
766
+ if response.status_code == 403:
767
+ raise GoogleDriveError(
768
+ "Permission denied: you don't have permission to create files in this location."
769
+ )
770
+ if response.status_code == 400:
771
+ raise GoogleDriveError(
772
+ f"Bad request: invalid parameters for file creation. "
773
+ f"Check that the MIME type '{mime_type}' is valid."
774
+ )
775
+ if response.status_code == 429:
776
+ raise GoogleDriveError("Rate limit exceeded. Please try again later.")
777
+
778
+ response.raise_for_status()
779
+ return GoogleDriveFile.from_api_response(response.json())
780
+
781
+ async def _create_file_with_content(
782
+ self,
783
+ metadata: dict[str, Any],
784
+ content: str,
785
+ target_mime_type: str,
786
+ ) -> httpx.Response:
787
+ """Create a file with content using multipart upload.
788
+
789
+ Args:
790
+ metadata: File metadata dictionary.
791
+ content: Text content for the file.
792
+ target_mime_type: The target MIME type for the file.
793
+
794
+ Returns
795
+ -------
796
+ The HTTP response from the upload.
797
+ """
798
+ content_type = UPLOAD_CONTENT_TYPES.get(target_mime_type, "text/plain")
799
+ boundary = f"===gdrive_boundary_{uuid.uuid4().hex}==="
800
+ body_parts = [
801
+ f"--{boundary}",
802
+ "Content-Type: application/json; charset=UTF-8",
803
+ "",
804
+ json.dumps(metadata),
805
+ f"--{boundary}",
806
+ f"Content-Type: {content_type}",
807
+ "",
808
+ content,
809
+ f"--{boundary}--",
810
+ ]
811
+ body = "\r\n".join(body_parts)
812
+
813
+ upload_url = "https://www.googleapis.com/upload/drive/v3/files"
814
+ return await self._client.post(
815
+ upload_url,
816
+ content=body.encode("utf-8"),
817
+ params={
818
+ "uploadType": "multipart",
819
+ "fields": SUPPORTED_FIELDS_STR,
820
+ "supportsAllDrives": "true",
821
+ },
822
+ headers={"Content-Type": f"multipart/related; boundary={boundary}"},
823
+ )
824
+
825
+ async def manage_access(
826
+ self,
827
+ *,
828
+ file_id: str,
829
+ action: Literal["add", "update", "remove"],
830
+ role: Literal["reader", "commenter", "writer", "fileOrganizer", "organizer", "owner"]
831
+ | None = None,
832
+ email_address: str | None = None,
833
+ permission_id: str | None = None,
834
+ transfer_ownership: bool = False,
835
+ ) -> str:
836
+ """Manage access permissions for a Google Drive file or folder.
837
+
838
+ Adds, updates, or removes sharing permissions on an existing Google Drive
839
+ file or folder using the Google Drive Permissions API.
840
+
841
+ This method supports granting access to users or groups, changing access
842
+ roles, and revoking permissions. Ownership transfer is supported for files
843
+ in "My Drive" when explicitly requested.
844
+
845
+ Args:
846
+ file_id: The ID of the Google Drive file or folder whose permissions
847
+ are being managed.
848
+ action: The permission operation to perform.
849
+ role: The access role to assign or update. Valid values include
850
+ Required for "add" and "update" actions.
851
+ email_address: The email address of the user or group to grant access to.
852
+ Required for the "add" action.
853
+ permission_id: The ID of the permission to update or remove.
854
+ Required for "update" and "remove" actions.
855
+ transfer_ownership: Whether to transfer ownership of the file.
856
+ Only applicable when action="update" and role="owner".
857
+
858
+ Returns
859
+ -------
860
+ Permission id.
861
+ For "add" its newly added permission.
862
+ For "update"/"remove" its previous permission.
863
+
864
+ Raises
865
+ ------
866
+ GoogleDriveError: If the permission operation fails (invalid arguments,
867
+ insufficient permissions, resource not found, ownership transfer
868
+ not allowed, rate limited, etc.).
869
+ """
870
+ if not file_id.strip():
871
+ raise GoogleDriveError("Argument validation error: 'file_id' cannot be empty.")
872
+
873
+ if action == "add" and not email_address:
874
+ raise GoogleDriveError("'email_address' is required for action 'add'.")
875
+
876
+ if action in ("update", "remove") and not permission_id:
877
+ raise GoogleDriveError("'permission_id' is required for action 'update' or 'remove'.")
878
+
879
+ if action != "remove" and not role:
880
+ raise GoogleDriveError("'role' is required for action 'add' or 'update'.")
881
+
882
+ if action == "add":
883
+ response = await self._client.post(
884
+ url=f"/{file_id}/permissions",
885
+ json={
886
+ "type": "user",
887
+ "role": role,
888
+ "emailAddress": email_address,
889
+ },
890
+ params={"sendNotificationEmail": False, "supportsAllDrives": True},
891
+ )
892
+
893
+ elif action == "update":
894
+ response = await self._client.patch(
895
+ url=f"/{file_id}/permissions/{permission_id}",
896
+ json={"role": role},
897
+ params={"transferOwnership": transfer_ownership, "supportsAllDrives": True},
898
+ )
899
+
900
+ elif action == "remove":
901
+ response = await self._client.delete(url=f"/{file_id}/permissions/{permission_id}")
902
+
903
+ else:
904
+ raise GoogleDriveError(f"Invalid action '{action}'")
905
+
906
+ if response.status_code not in (200, 201, 204):
907
+ raise GoogleDriveError(f"Drive API error {response.status_code}: {response.text}")
908
+
909
+ if action == "add":
910
+ return response.json()["id"]
911
+
912
+ # Cannot be null here because of above validators
913
+ return permission_id # type: ignore
914
+
602
915
  async def __aenter__(self) -> "GoogleDriveClient":
603
916
  """Async context manager entry."""
604
917
  return self