datarobot-genai 0.2.20__py3-none-any.whl → 0.2.22__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -30,6 +30,9 @@ from .atlassian import get_atlassian_cloud_id
30
30
 
31
31
  logger = logging.getLogger(__name__)
32
32
 
33
+ # Search expand fields for CQL search - content.space gives us space.key directly
34
+ SEARCH_EXPAND_FIELDS = "content.space"
35
+
33
36
 
34
37
  class ConfluenceError(Exception):
35
38
  """Exception for Confluence API errors."""
@@ -75,6 +78,32 @@ class ConfluenceComment(BaseModel):
75
78
  }
76
79
 
77
80
 
81
+ class ContentSearchResult(BaseModel):
82
+ """Pydantic model for Confluence search result item."""
83
+
84
+ id: str
85
+ title: str
86
+ type: str
87
+ space_key: str = ""
88
+ space_name: str = ""
89
+ excerpt: str = ""
90
+ last_modified: str | None = None
91
+ url: str = ""
92
+
93
+ def as_flat_dict(self) -> dict[str, Any]:
94
+ """Return a flat dictionary representation of the search result."""
95
+ return {
96
+ "id": self.id,
97
+ "title": self.title,
98
+ "type": self.type,
99
+ "spaceKey": self.space_key,
100
+ "spaceName": self.space_name,
101
+ "excerpt": self.excerpt,
102
+ "lastModified": self.last_modified,
103
+ "url": self.url,
104
+ }
105
+
106
+
78
107
  class ConfluenceClient:
79
108
  """
80
109
  Client for interacting with Confluence API using OAuth access token.
@@ -382,6 +411,76 @@ class ConfluenceClient:
382
411
 
383
412
  return self._parse_comment_response(response.json(), page_id)
384
413
 
414
+ async def search_confluence_content(
415
+ self, cql_query: str, max_results: int
416
+ ) -> list[ContentSearchResult]:
417
+ """
418
+ Search Confluence content using CQL (Confluence Query Language).
419
+
420
+ Args:
421
+ cql_query: CQL Query
422
+ max_results: Maximum number of results to return
423
+
424
+ Returns
425
+ -------
426
+ List of Confluence content search results
427
+
428
+ Raises
429
+ ------
430
+ ConfluenceError: If the API request fails (400, 403, 429)
431
+ """
432
+ cloud_id = await self._get_cloud_id()
433
+ url = f"{ATLASSIAN_API_BASE}/ex/confluence/{cloud_id}/wiki/rest/api/search"
434
+
435
+ response = await self._client.get(
436
+ url,
437
+ params={
438
+ "cql": cql_query,
439
+ "limit": max_results,
440
+ "expand": SEARCH_EXPAND_FIELDS,
441
+ },
442
+ )
443
+
444
+ if response.status_code == HTTPStatus.BAD_REQUEST:
445
+ error_msg = self._extract_error_message(response)
446
+ raise ConfluenceError(f"Invalid CQL query: {error_msg}", status_code=400)
447
+
448
+ if response.status_code == HTTPStatus.FORBIDDEN:
449
+ raise ConfluenceError(
450
+ "Permission denied: you don't have access to search this content",
451
+ status_code=403,
452
+ )
453
+
454
+ if response.status_code == HTTPStatus.TOO_MANY_REQUESTS:
455
+ raise ConfluenceError("Rate limit exceeded. Please try again later.", status_code=429)
456
+
457
+ response.raise_for_status()
458
+ raw_results = response.json().get("results", [])
459
+ results = [ContentSearchResult(**self._parse_search_item(item)) for item in raw_results]
460
+ return results
461
+
462
+ def _parse_search_item(self, item: dict) -> dict:
463
+ """Parse raw search API response item into model-compatible dict."""
464
+ content = item.get("content", item)
465
+ links = content.get("_links", {})
466
+ base_url = links.get("base", "")
467
+ webui = links.get("webui", "")
468
+ url = f"{base_url}{webui}" if base_url and webui else webui
469
+
470
+ # Get space from content.space (requires expand=content.space)
471
+ content_space = content.get("space", {})
472
+
473
+ return {
474
+ "id": str(content.get("id", "")),
475
+ "title": content.get("title", ""),
476
+ "type": content.get("type", "page"),
477
+ "space_key": content_space.get("key", ""),
478
+ "space_name": content_space.get("name", ""),
479
+ "excerpt": item.get("excerpt", ""),
480
+ "last_modified": item.get("lastModified"),
481
+ "url": url,
482
+ }
483
+
385
484
  async def __aenter__(self) -> "ConfluenceClient":
386
485
  """Async context manager entry."""
387
486
  return self
@@ -29,7 +29,10 @@ from datarobot_genai.drmcp.core.auth import get_access_token
29
29
 
30
30
  logger = logging.getLogger(__name__)
31
31
 
32
- DEFAULT_FIELDS = "nextPageToken,files(id,name,size,mimeType,webViewLink,createdTime,modifiedTime)"
32
+ SUPPORTED_FIELDS = {"id", "name", "size", "mimeType", "webViewLink", "createdTime", "modifiedTime"}
33
+ SUPPORTED_FIELDS_STR = ",".join(SUPPORTED_FIELDS)
34
+ DEFAULT_FIELDS = f"nextPageToken,files({SUPPORTED_FIELDS_STR})"
35
+ GOOGLE_DRIVE_FOLDER_MIME = "application/vnd.google-apps.folder"
33
36
  DEFAULT_ORDER = "modifiedTime desc"
34
37
  MAX_PAGE_SIZE = 100
35
38
  LIMIT = 500
@@ -129,6 +132,8 @@ class GoogleDriveClient:
129
132
  limit: int,
130
133
  page_token: str | None = None,
131
134
  query: str | None = None,
135
+ folder_id: str | None = None,
136
+ recursive: bool = False,
132
137
  ) -> PaginatedResult:
133
138
  """
134
139
  List files from Google Drive.
@@ -143,6 +148,10 @@ class GoogleDriveClient:
143
148
  If not provided it'll list all authorized user files.
144
149
  If the query doesn't contain operators (contains, =, etc.), it will be treated as
145
150
  a name search: "name contains '{query}'".
151
+ folder_id: The ID of a specific folder to list or search within.
152
+ If omitted, searches the entire Drive.
153
+ recursive: If True, searches all subfolders.
154
+ If False and folder_id is provided, only lists immediate children.
146
155
 
147
156
  Returns
148
157
  -------
@@ -159,26 +168,85 @@ class GoogleDriveClient:
159
168
 
160
169
  page_size = min(page_size, MAX_PAGE_SIZE)
161
170
  limit = min(limit, LIMIT)
162
- fetched = 0
171
+ formatted_query = self._build_query(query, folder_id)
172
+
173
+ if not recursive or not folder_id:
174
+ files, next_token = await self._fetch_paginated(
175
+ page_size=page_size,
176
+ limit=limit,
177
+ page_token=page_token,
178
+ query=formatted_query,
179
+ )
180
+ return PaginatedResult(files=files, next_page_token=next_token)
163
181
 
164
- formatted_query = self._get_formatted_query(query)
182
+ files = await self._fetch_recursive(
183
+ root_folder_id=folder_id,
184
+ base_query=query,
185
+ page_size=page_size,
186
+ limit=limit,
187
+ )
165
188
 
189
+ return PaginatedResult(files=files, next_page_token=page_token)
190
+
191
+ async def _fetch_paginated(
192
+ self,
193
+ page_size: int,
194
+ limit: int,
195
+ page_token: str | None,
196
+ query: str | None,
197
+ ) -> tuple[list[GoogleDriveFile], str | None]:
198
+ fetched = 0
166
199
  files: list[GoogleDriveFile] = []
200
+ next_page_token = page_token
167
201
 
168
202
  while fetched < limit:
169
203
  data = await self._list_files(
170
204
  page_size=page_size,
171
- page_token=page_token,
172
- query=formatted_query,
205
+ page_token=next_page_token,
206
+ query=query,
173
207
  )
208
+
174
209
  files.extend(data.files)
175
210
  fetched += len(data.files)
176
- page_token = data.next_page_token
211
+ next_page_token = data.next_page_token
177
212
 
178
- if not page_token:
213
+ if not next_page_token:
179
214
  break
180
215
 
181
- return PaginatedResult(files=files, next_page_token=page_token)
216
+ return files, next_page_token
217
+
218
+ async def _fetch_recursive(
219
+ self,
220
+ root_folder_id: str,
221
+ base_query: str | None,
222
+ page_size: int,
223
+ limit: int,
224
+ ) -> list[GoogleDriveFile]:
225
+ collected: list[GoogleDriveFile] = []
226
+ folders_to_visit: list[str] = [root_folder_id]
227
+
228
+ while folders_to_visit and len(collected) < limit:
229
+ current_folder = folders_to_visit.pop(0)
230
+
231
+ query = self._build_query(base_query, current_folder)
232
+
233
+ files, _ = await self._fetch_paginated(
234
+ page_size=page_size,
235
+ limit=limit - len(collected),
236
+ page_token=None,
237
+ query=query,
238
+ )
239
+
240
+ for file in files:
241
+ collected.append(file)
242
+
243
+ if file.mime_type == GOOGLE_DRIVE_FOLDER_MIME:
244
+ folders_to_visit.append(file.id)
245
+
246
+ if len(collected) >= limit:
247
+ break
248
+
249
+ return collected
182
250
 
183
251
  async def _list_files(
184
252
  self,
@@ -207,6 +275,45 @@ class GoogleDriveClient:
207
275
  next_page_token = data.get("nextPageToken")
208
276
  return PaginatedResult(files=files, next_page_token=next_page_token)
209
277
 
278
+ def _build_query(self, query: str | None, folder_id: str | None) -> str | None:
279
+ """Build Google Drive API query.
280
+
281
+ Args:
282
+ query: Optional search query string (e.g., "name contains 'report'"").
283
+ If the query doesn't contain operators (contains, =, etc.), it will be treated as
284
+ a name search: "name contains '{query}'".
285
+ folder_id: Optional folder id.
286
+ If provided it'll narrow query to search/list only in given folder.
287
+
288
+ Returns
289
+ -------
290
+ Correctly builded query (if provided)
291
+ """
292
+ base_query = self._get_formatted_query(query)
293
+
294
+ if base_query:
295
+ # Case #1 -- Some query provided and contains in parents (gdrive "folder id")
296
+ if "in parents" in base_query and folder_id:
297
+ logger.debug(
298
+ "In-parents (parent folder) already used in query. "
299
+ "Omiting folder_id argument. "
300
+ f"Query: {base_query} | FolderId: {folder_id}"
301
+ )
302
+ return base_query
303
+ # Case #2 -- Some query provided without "in parents" and folder id provided.
304
+ elif folder_id:
305
+ return f"{base_query} and '{folder_id}' in parents"
306
+ # Case #3 -- Query provided without "in parents" and no folder id.
307
+ else:
308
+ return base_query
309
+
310
+ # Case #4 -- Base query is null but folder id provided
311
+ if folder_id:
312
+ return f"'{folder_id}' in parents"
313
+
314
+ # Case #5 -- Neither query not folder provided
315
+ return None
316
+
210
317
  @staticmethod
211
318
  def _get_formatted_query(query: str | None) -> str | None:
212
319
  """Get formatted Google Drive API query.
@@ -186,3 +186,69 @@ async def confluence_add_comment(
186
186
  "page_id": page_id,
187
187
  },
188
188
  )
189
+
190
+
191
+ @dr_mcp_tool(tags={"confluence", "search", "content"})
192
+ async def confluence_search(
193
+ *,
194
+ cql_query: Annotated[
195
+ str,
196
+ "The CQL (Confluence Query Language) string used to filter content, "
197
+ "e.g., 'type=page and space=DOC'.",
198
+ ],
199
+ max_results: Annotated[int, "Maximum number of content items to return. Default is 10."] = 10,
200
+ include_body: Annotated[
201
+ bool,
202
+ "If True, fetch full page body content for each result (slower, "
203
+ "makes additional API calls). Default is False, which returns only excerpts.",
204
+ ] = False,
205
+ ) -> ToolResult:
206
+ """
207
+ Search Confluence pages and content efficiently using a CQL query string.
208
+ This pushes the search logic to the Confluence API (Push-Down).
209
+
210
+ Refer to Confluence documentation for advanced searching using CQL:
211
+ https://developer.atlassian.com/cloud/confluence/advanced-searching-using-cql/
212
+ """
213
+ if not cql_query:
214
+ raise ToolError("Argument validation error: 'cql_query' cannot be empty.")
215
+
216
+ if max_results < 1 or max_results > 100:
217
+ raise ToolError("Argument validation error: 'max_results' must be between 1 and 100.")
218
+
219
+ access_token = await get_atlassian_access_token()
220
+ if isinstance(access_token, ToolError):
221
+ raise access_token
222
+
223
+ try:
224
+ async with ConfluenceClient(access_token) as client:
225
+ results = await client.search_confluence_content(
226
+ cql_query=cql_query, max_results=max_results
227
+ )
228
+
229
+ # If include_body is True, fetch full content for each page
230
+ if include_body and results:
231
+ data = []
232
+ for result in results:
233
+ flat = result.as_flat_dict()
234
+ try:
235
+ page = await client.get_page_by_id(result.id)
236
+ flat["body"] = page.body
237
+ except ConfluenceError:
238
+ flat["body"] = None # Keep excerpt if page fetch fails
239
+ data.append(flat)
240
+ else:
241
+ data = [result.as_flat_dict() for result in results]
242
+
243
+ except ConfluenceError as e:
244
+ logger.error(f"Confluence error searching content: {e}")
245
+ raise ToolError(str(e))
246
+ except Exception as e:
247
+ logger.error(f"Unexpected error searching Confluence content: {e}")
248
+ raise ToolError(f"An unexpected error occurred while searching Confluence: {str(e)}")
249
+
250
+ n = len(results)
251
+ return ToolResult(
252
+ content=f"Successfully executed CQL query and retrieved {n} result(s).",
253
+ structured_content={"data": data, "count": n},
254
+ )
@@ -23,6 +23,8 @@ from fastmcp.tools.tool import ToolResult
23
23
  from datarobot_genai.drmcp.core.mcp_instance import dr_mcp_tool
24
24
  from datarobot_genai.drmcp.tools.clients.gdrive import LIMIT
25
25
  from datarobot_genai.drmcp.tools.clients.gdrive import MAX_PAGE_SIZE
26
+ from datarobot_genai.drmcp.tools.clients.gdrive import SUPPORTED_FIELDS
27
+ from datarobot_genai.drmcp.tools.clients.gdrive import SUPPORTED_FIELDS_STR
26
28
  from datarobot_genai.drmcp.tools.clients.gdrive import GoogleDriveClient
27
29
  from datarobot_genai.drmcp.tools.clients.gdrive import GoogleDriveError
28
30
  from datarobot_genai.drmcp.tools.clients.gdrive import get_gdrive_access_token
@@ -30,8 +32,8 @@ from datarobot_genai.drmcp.tools.clients.gdrive import get_gdrive_access_token
30
32
  logger = logging.getLogger(__name__)
31
33
 
32
34
 
33
- @dr_mcp_tool(tags={"google", "gdrive", "list", "files"})
34
- async def google_drive_list_files(
35
+ @dr_mcp_tool(tags={"google", "gdrive", "list", "search", "files", "find", "contents"})
36
+ async def gdrive_find_contents(
35
37
  *,
36
38
  page_size: Annotated[
37
39
  int, f"Maximum number of files to return per page (max {MAX_PAGE_SIZE})."
@@ -43,9 +45,24 @@ async def google_drive_list_files(
43
45
  query: Annotated[
44
46
  str | None, "Optional filter to narrow results (e.g., 'trashed = false')."
45
47
  ] = None,
48
+ folder_id: Annotated[
49
+ str | None,
50
+ "The ID of a specific folder to list or search within. "
51
+ "If omitted, searches the entire Drive.",
52
+ ] = None,
53
+ recursive: Annotated[
54
+ bool,
55
+ "If True, searches all subfolders. "
56
+ "If False and folder_id is provided, only lists immediate children.",
57
+ ] = False,
58
+ fields: Annotated[
59
+ list[str] | None,
60
+ "Optional list of metadata fields to include. Ex. id, name, mimeType. "
61
+ f"Default = {SUPPORTED_FIELDS_STR}",
62
+ ] = None,
46
63
  ) -> ToolResult | ToolError:
47
64
  """
48
- List files in the user's Google Drive with pagination and filtering support.
65
+ Search or list files in the user's Google Drive with pagination and filtering support.
49
66
  Use this tool to discover file names and IDs for use with other tools.
50
67
 
51
68
  Limit must be bigger than or equal to page size and it must be multiplication of page size.
@@ -61,7 +78,12 @@ async def google_drive_list_files(
61
78
  try:
62
79
  async with GoogleDriveClient(access_token) as client:
63
80
  data = await client.list_files(
64
- page_size=page_size, page_token=page_token, query=query, limit=limit
81
+ page_size=page_size,
82
+ page_token=page_token,
83
+ query=query,
84
+ limit=limit,
85
+ folder_id=folder_id,
86
+ recursive=recursive,
65
87
  )
66
88
  except GoogleDriveError as e:
67
89
  logger.error(f"Google Drive error listing files: {e}")
@@ -70,6 +92,7 @@ async def google_drive_list_files(
70
92
  logger.error(f"Unexpected error listing Google Drive files: {e}")
71
93
  raise ToolError(f"An unexpected error occurred while listing Google Drive files: {str(e)}")
72
94
 
95
+ filtered_fields = set(fields).intersection(SUPPORTED_FIELDS) if fields else SUPPORTED_FIELDS
73
96
  number_of_files = len(data.files)
74
97
  next_page_info = (
75
98
  f"Next page token needed to fetch more data: {data.next_page_token}"
@@ -80,7 +103,7 @@ async def google_drive_list_files(
80
103
  content=f"Successfully listed {number_of_files} files. {next_page_info}",
81
104
  structured_content={
82
105
  "files": [
83
- file.model_dump(by_alias=True, include={"id", "name"}) for file in data.files
106
+ file.model_dump(by_alias=True, include=filtered_fields) for file in data.files
84
107
  ],
85
108
  "count": number_of_files,
86
109
  "nextPageToken": data.next_page_token,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: datarobot-genai
3
- Version: 0.2.20
3
+ Version: 0.2.22
4
4
  Summary: Generic helpers for GenAI
5
5
  Project-URL: Homepage, https://github.com/datarobot-oss/datarobot-genai
6
6
  Author: DataRobot, Inc.
@@ -78,14 +78,14 @@ datarobot_genai/drmcp/test_utils/utils.py,sha256=esGKFv8aO31-Qg3owayeWp32BYe1CdY
78
78
  datarobot_genai/drmcp/tools/__init__.py,sha256=0kq9vMkF7EBsS6lkEdiLibmUrghTQqosHbZ5k-V9a5g,578
79
79
  datarobot_genai/drmcp/tools/clients/__init__.py,sha256=0kq9vMkF7EBsS6lkEdiLibmUrghTQqosHbZ5k-V9a5g,578
80
80
  datarobot_genai/drmcp/tools/clients/atlassian.py,sha256=__M_uz7FrcbKCYRzeMn24DCEYD6OmFx_LuywHCxgXsA,6472
81
- datarobot_genai/drmcp/tools/clients/confluence.py,sha256=gDzy8t5t3b1mwEr-CuZ5BwXXQ52AXke8J_Ra7i_8T1g,13692
82
- datarobot_genai/drmcp/tools/clients/gdrive.py,sha256=QmNTmJdSqYO5Y5Vnp3roNZiNNJeocBVjF9UcSzcjgRY,8635
81
+ datarobot_genai/drmcp/tools/clients/confluence.py,sha256=YS5XsKd-jK5Yg0rgwOcC76v9e8fDJgUZIW5B9kcq5B0,17101
82
+ datarobot_genai/drmcp/tools/clients/gdrive.py,sha256=gRtWWCENHcmLepKQbS7qsF4R6vbQQK1Ru-EqfeUbldY,12550
83
83
  datarobot_genai/drmcp/tools/clients/jira.py,sha256=Rm91JAyrNIqxu66-9rU1YqoRXVnWbEy-Ahvy6f6HlVg,9823
84
84
  datarobot_genai/drmcp/tools/clients/s3.py,sha256=GmwzvurFdNfvxOooA8g5S4osRysHYU0S9ypg_177Glg,953
85
85
  datarobot_genai/drmcp/tools/confluence/__init__.py,sha256=0kq9vMkF7EBsS6lkEdiLibmUrghTQqosHbZ5k-V9a5g,578
86
- datarobot_genai/drmcp/tools/confluence/tools.py,sha256=jSF7yXGFqqlMcavkRIY4HbMxb7tCeunA2ST41wa2vGI,7219
86
+ datarobot_genai/drmcp/tools/confluence/tools.py,sha256=ySwABe8osAzky3BO3lRaF6UHnXQgaurkmvM0iHFfL30,9849
87
87
  datarobot_genai/drmcp/tools/gdrive/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
88
- datarobot_genai/drmcp/tools/gdrive/tools.py,sha256=wmCUSaCWqepdlOIApA8tZ-grPYSV7wZKoer6uRy26Qg,3459
88
+ datarobot_genai/drmcp/tools/gdrive/tools.py,sha256=BP5tcpciuijakmXTjEgS6CySg5TUBAmlKYPkTgpVZbc,4406
89
89
  datarobot_genai/drmcp/tools/jira/__init__.py,sha256=0kq9vMkF7EBsS6lkEdiLibmUrghTQqosHbZ5k-V9a5g,578
90
90
  datarobot_genai/drmcp/tools/jira/tools.py,sha256=dfkqTU2HH-7n44hX80ODFacKq0p0LOchFcZtIIKFNMM,9687
91
91
  datarobot_genai/drmcp/tools/predictive/__init__.py,sha256=WuOHlNNEpEmcF7gVnhckruJRKU2qtmJLE3E7zoCGLDo,1030
@@ -111,9 +111,9 @@ datarobot_genai/nat/datarobot_llm_clients.py,sha256=Yu208Ed_p_4P3HdpuM7fYnKcXtim
111
111
  datarobot_genai/nat/datarobot_llm_providers.py,sha256=aDoQcTeGI-odqydPXEX9OGGNFbzAtpqzTvHHEkmJuEQ,4963
112
112
  datarobot_genai/nat/datarobot_mcp_client.py,sha256=35FzilxNp4VqwBYI0NsOc91-xZm1C-AzWqrOdDy962A,9612
113
113
  datarobot_genai/nat/helpers.py,sha256=Q7E3ADZdtFfS8E6OQPyw2wgA6laQ58N3bhLj5CBWwJs,3265
114
- datarobot_genai-0.2.20.dist-info/METADATA,sha256=iWXFNkplo7YmQoa5bCbVPhhv-KzDIHsotkbKf-bEbEk,6301
115
- datarobot_genai-0.2.20.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
116
- datarobot_genai-0.2.20.dist-info/entry_points.txt,sha256=jEW3WxDZ8XIK9-ISmTyt5DbmBb047rFlzQuhY09rGrM,284
117
- datarobot_genai-0.2.20.dist-info/licenses/AUTHORS,sha256=isJGUXdjq1U7XZ_B_9AH8Qf0u4eX0XyQifJZ_Sxm4sA,80
118
- datarobot_genai-0.2.20.dist-info/licenses/LICENSE,sha256=U2_VkLIktQoa60Nf6Tbt7E4RMlfhFSjWjcJJfVC-YCE,11341
119
- datarobot_genai-0.2.20.dist-info/RECORD,,
114
+ datarobot_genai-0.2.22.dist-info/METADATA,sha256=Jc7FEPYKQq7DI3dpk2qbMSYJolHyrBeO-A_9i7IsFFM,6301
115
+ datarobot_genai-0.2.22.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
116
+ datarobot_genai-0.2.22.dist-info/entry_points.txt,sha256=jEW3WxDZ8XIK9-ISmTyt5DbmBb047rFlzQuhY09rGrM,284
117
+ datarobot_genai-0.2.22.dist-info/licenses/AUTHORS,sha256=isJGUXdjq1U7XZ_B_9AH8Qf0u4eX0XyQifJZ_Sxm4sA,80
118
+ datarobot_genai-0.2.22.dist-info/licenses/LICENSE,sha256=U2_VkLIktQoa60Nf6Tbt7E4RMlfhFSjWjcJJfVC-YCE,11341
119
+ datarobot_genai-0.2.22.dist-info/RECORD,,