datarobot-genai 0.2.37__py3-none-any.whl → 0.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. datarobot_genai/core/agents/__init__.py +1 -1
  2. datarobot_genai/core/agents/base.py +5 -2
  3. datarobot_genai/core/chat/responses.py +6 -1
  4. datarobot_genai/core/utils/auth.py +188 -31
  5. datarobot_genai/crewai/__init__.py +1 -4
  6. datarobot_genai/crewai/agent.py +150 -17
  7. datarobot_genai/crewai/events.py +11 -4
  8. datarobot_genai/drmcp/__init__.py +4 -2
  9. datarobot_genai/drmcp/core/config.py +21 -1
  10. datarobot_genai/drmcp/core/mcp_instance.py +5 -49
  11. datarobot_genai/drmcp/core/routes.py +108 -13
  12. datarobot_genai/drmcp/core/tool_config.py +16 -0
  13. datarobot_genai/drmcp/core/utils.py +110 -0
  14. datarobot_genai/drmcp/test_utils/tool_base_ete.py +41 -26
  15. datarobot_genai/drmcp/tools/clients/gdrive.py +2 -0
  16. datarobot_genai/drmcp/tools/clients/microsoft_graph.py +141 -0
  17. datarobot_genai/drmcp/tools/clients/perplexity.py +173 -0
  18. datarobot_genai/drmcp/tools/clients/tavily.py +199 -0
  19. datarobot_genai/drmcp/tools/confluence/tools.py +43 -94
  20. datarobot_genai/drmcp/tools/gdrive/tools.py +44 -133
  21. datarobot_genai/drmcp/tools/jira/tools.py +19 -41
  22. datarobot_genai/drmcp/tools/microsoft_graph/tools.py +201 -32
  23. datarobot_genai/drmcp/tools/perplexity/__init__.py +0 -0
  24. datarobot_genai/drmcp/tools/perplexity/tools.py +117 -0
  25. datarobot_genai/drmcp/tools/predictive/data.py +1 -9
  26. datarobot_genai/drmcp/tools/predictive/deployment.py +0 -8
  27. datarobot_genai/drmcp/tools/predictive/deployment_info.py +91 -117
  28. datarobot_genai/drmcp/tools/predictive/model.py +0 -21
  29. datarobot_genai/drmcp/tools/predictive/predict_realtime.py +3 -0
  30. datarobot_genai/drmcp/tools/predictive/project.py +3 -19
  31. datarobot_genai/drmcp/tools/predictive/training.py +1 -19
  32. datarobot_genai/drmcp/tools/tavily/__init__.py +13 -0
  33. datarobot_genai/drmcp/tools/tavily/tools.py +141 -0
  34. datarobot_genai/langgraph/agent.py +10 -2
  35. datarobot_genai/llama_index/__init__.py +1 -1
  36. datarobot_genai/llama_index/agent.py +284 -5
  37. datarobot_genai/nat/agent.py +17 -6
  38. {datarobot_genai-0.2.37.dist-info → datarobot_genai-0.3.1.dist-info}/METADATA +3 -1
  39. {datarobot_genai-0.2.37.dist-info → datarobot_genai-0.3.1.dist-info}/RECORD +43 -40
  40. datarobot_genai/crewai/base.py +0 -159
  41. datarobot_genai/drmcp/core/tool_filter.py +0 -117
  42. datarobot_genai/llama_index/base.py +0 -299
  43. {datarobot_genai-0.2.37.dist-info → datarobot_genai-0.3.1.dist-info}/WHEEL +0 -0
  44. {datarobot_genai-0.2.37.dist-info → datarobot_genai-0.3.1.dist-info}/entry_points.txt +0 -0
  45. {datarobot_genai-0.2.37.dist-info → datarobot_genai-0.3.1.dist-info}/licenses/AUTHORS +0 -0
  46. {datarobot_genai-0.2.37.dist-info → datarobot_genai-0.3.1.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,173 @@
1
+ # Copyright 2025 DataRobot, Inc.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import logging
16
+ from typing import Any
17
+ from typing import Literal
18
+
19
+ from fastmcp.exceptions import ToolError
20
+ from fastmcp.server.dependencies import get_http_headers
21
+ from perplexity import AsyncPerplexity
22
+ from perplexity.types import search_create_response
23
+ from pydantic import BaseModel
24
+ from pydantic import ConfigDict
25
+
26
+ logger = logging.getLogger(__name__)
27
+
28
+ MAX_QUERIES: int = 5
29
+ MAX_RESULTS: int = 20
30
+ MAX_TOKENS_PER_PAGE: int = 8192
31
+ MAX_SEARCH_DOMAIN_FILTER: int = 20
32
+
33
+ MAX_RESULTS_DEFAULT: int = 10
34
+ MAX_TOKENS_PER_PAGE_DEFAULT: int = 2048
35
+
36
+
37
+ async def get_perplexity_access_token() -> str | ToolError:
38
+ """
39
+ Get Perplexity API key from HTTP headers.
40
+
41
+ At the moment of creating this fn. Perplexity does not support OAuth.
42
+ It allows only API-KEY authorized flow.
43
+
44
+ Returns
45
+ -------
46
+ Access token string on success, ToolError on failure
47
+
48
+ Example:
49
+ ```python
50
+ token = await get_perplexity_access_token()
51
+ if isinstance(token, ToolError):
52
+ # Handle error
53
+ return token
54
+ # Use token
55
+ ```
56
+ """
57
+ try:
58
+ headers = get_http_headers()
59
+
60
+ if api_key := headers.get("x-perplexity-api-key"):
61
+ return api_key
62
+
63
+ logger.warning("Perplexity API key not found in headers.")
64
+ return ToolError(
65
+ "Perplexity API key not found in headers. "
66
+ "Please provide it via 'x-perplexity-api-key' header."
67
+ )
68
+ except Exception as e:
69
+ logger.error(f"Unexpected error obtaining Perplexity API key: {e}.", exc_info=e)
70
+ return ToolError("An unexpected error occured while obtaining Perplexity API key.")
71
+
72
+
73
+ class PerplexityError(Exception):
74
+ """Exception for Perplexity API errors."""
75
+
76
+ def __init__(self, message: str) -> None:
77
+ super().__init__(message)
78
+
79
+
80
+ class PerplexitySearchResult(BaseModel):
81
+ snippet: str
82
+ title: str
83
+ url: str
84
+ date: str | None = None
85
+ last_updated: str | None = None
86
+
87
+ model_config = ConfigDict(populate_by_name=True)
88
+
89
+ @classmethod
90
+ def from_perplexity_sdk(cls, result: search_create_response.Result) -> "PerplexitySearchResult":
91
+ """Create a PerplexitySearchResult from perplexity sdk response data."""
92
+ return cls(**result.model_dump())
93
+
94
+ def as_flat_dict(self) -> dict[str, Any]:
95
+ """Return a flat dictionary representation of the search result."""
96
+ return self.model_dump(by_alias=True)
97
+
98
+
99
+ class PerplexityClient:
100
+ """Client for interacting with Perplexity API.
101
+ Its simple wrapper around perplexity python sdk.
102
+ """
103
+
104
+ def __init__(self, access_token: str) -> None:
105
+ self._client = AsyncPerplexity(api_key=access_token)
106
+
107
+ async def search(
108
+ self,
109
+ query: str | list[str],
110
+ search_domain_filter: list[str] | None = None,
111
+ recency: Literal["hour", "day", "week", "month", "year"] | None = None,
112
+ max_results: int = MAX_RESULTS_DEFAULT,
113
+ max_tokens_per_page: int = MAX_TOKENS_PER_PAGE_DEFAULT,
114
+ ) -> list[PerplexitySearchResult]:
115
+ """
116
+ Search using Perplexity.
117
+
118
+ Args:
119
+ query: Query to filter results.
120
+ search_domain_filter: Up to 20 domains/URLs to allowlist or denylist.
121
+ recency: Filter results by time period.
122
+ max_results: Number of ranked results to return.
123
+ max_tokens_per_page: Context extraction cap per page.
124
+
125
+ Returns
126
+ -------
127
+ List of Perplexity search results.
128
+ """
129
+ if not query:
130
+ raise PerplexityError("Error: query cannot be empty.")
131
+ if query and isinstance(query, str) and not query.strip():
132
+ raise PerplexityError("Error: query cannot be empty.")
133
+ if query and isinstance(query, list) and len(query) > MAX_QUERIES:
134
+ raise PerplexityError(f"Error: query list cannot be bigger than {MAX_QUERIES}.")
135
+ if query and isinstance(query, list) and not all(q.strip() for q in query):
136
+ raise PerplexityError("Error: query cannot contain empty str.")
137
+ if search_domain_filter and len(search_domain_filter) > MAX_SEARCH_DOMAIN_FILTER:
138
+ raise PerplexityError("Error: maximum number of search domain filters is 20.")
139
+ if max_results <= 0:
140
+ raise PerplexityError("Error: max_results must be greater than 0.")
141
+ if max_results > MAX_RESULTS:
142
+ raise PerplexityError("Error: max_results must be smaller than or equal to 20.")
143
+ if max_tokens_per_page <= 0:
144
+ raise PerplexityError("Error: max_tokens_per_page must be greater than 0.")
145
+ if max_tokens_per_page > MAX_TOKENS_PER_PAGE:
146
+ raise PerplexityError(
147
+ "Error: max_tokens_per_page must be smaller than or equal to 8192."
148
+ )
149
+
150
+ max_results = min(max_results, MAX_RESULTS)
151
+ max_tokens_per_page = min(max_tokens_per_page, MAX_TOKENS_PER_PAGE)
152
+
153
+ search_result = await self._client.search.create(
154
+ query=query,
155
+ search_domain_filter=search_domain_filter,
156
+ search_recency_filter=recency,
157
+ max_results=max_results,
158
+ max_tokens_per_page=max_tokens_per_page,
159
+ )
160
+
161
+ return [
162
+ PerplexitySearchResult.from_perplexity_sdk(result) for result in search_result.results
163
+ ]
164
+
165
+ async def __aenter__(self) -> "PerplexityClient":
166
+ """Async context manager entry."""
167
+ return self
168
+
169
+ async def __aexit__(
170
+ self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: Any
171
+ ) -> None:
172
+ """Async context manager exit."""
173
+ await self._client.close()
@@ -0,0 +1,199 @@
1
+ # Copyright 2025 DataRobot, Inc.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """Tavily API Client and utilities for API key authentication."""
16
+
17
+ import logging
18
+ from typing import Any
19
+ from typing import Literal
20
+
21
+ from fastmcp.exceptions import ToolError
22
+ from fastmcp.server.dependencies import get_http_headers
23
+ from pydantic import BaseModel
24
+ from pydantic import ConfigDict
25
+ from tavily import AsyncTavilyClient
26
+
27
+ logger = logging.getLogger(__name__)
28
+
29
+ MAX_RESULTS: int = 20
30
+ MAX_CHUNKS_PER_SOURCE: int = 3
31
+
32
+ MAX_RESULTS_DEFAULT: int = 5
33
+ CHUNKS_PER_SOURCE_DEFAULT: int = 1
34
+
35
+
36
+ async def get_tavily_access_token() -> str:
37
+ """
38
+ Get Tavily API key from HTTP headers.
39
+
40
+ Returns
41
+ -------
42
+ API key string
43
+
44
+ Raises
45
+ ------
46
+ ToolError: If API key is not found in headers
47
+ """
48
+ headers = get_http_headers()
49
+
50
+ api_key = headers.get("x-tavily-api-key")
51
+ if api_key:
52
+ return api_key
53
+
54
+ logger.warning("Tavily API key not found in headers")
55
+ raise ToolError(
56
+ "Tavily API key not found in headers. Please provide it via 'x-tavily-api-key' header."
57
+ )
58
+
59
+
60
+ class TavilySearchResult(BaseModel):
61
+ """A single search result from Tavily API."""
62
+
63
+ title: str
64
+ url: str
65
+ content: str
66
+ score: float
67
+
68
+ model_config = ConfigDict(populate_by_name=True)
69
+
70
+ @classmethod
71
+ def from_tavily_sdk(cls, result: dict[str, Any]) -> "TavilySearchResult":
72
+ """Create a TavilySearchResult from Tavily SDK response data."""
73
+ return cls(
74
+ title=result.get("title", ""),
75
+ url=result.get("url", ""),
76
+ content=result.get("content", ""),
77
+ score=result.get("score", 0.0),
78
+ )
79
+
80
+ def as_flat_dict(self) -> dict[str, Any]:
81
+ """Return a flat dictionary representation of the search result."""
82
+ return self.model_dump(by_alias=True)
83
+
84
+
85
+ class TavilyImage(BaseModel):
86
+ """An image result from Tavily API."""
87
+
88
+ url: str
89
+ description: str | None = None
90
+
91
+ model_config = ConfigDict(populate_by_name=True)
92
+
93
+ @classmethod
94
+ def from_tavily_sdk(cls, image: dict[str, Any] | str) -> "TavilyImage":
95
+ """Create a TavilyImage from Tavily SDK response data."""
96
+ if isinstance(image, str):
97
+ return cls(url=image)
98
+ return cls(
99
+ url=image.get("url", ""),
100
+ description=image.get("description"),
101
+ )
102
+
103
+
104
+ class TavilyClient:
105
+ """Client for interacting with Tavily Search API.
106
+
107
+ This is a wrapper around the official tavily-python SDK.
108
+ """
109
+
110
+ def __init__(self, api_key: str) -> None:
111
+ self._client = AsyncTavilyClient(api_key=api_key)
112
+
113
+ async def search(
114
+ self,
115
+ query: str,
116
+ *,
117
+ topic: Literal["general", "news", "finance"] = "general",
118
+ search_depth: Literal["basic", "advanced"] = "basic",
119
+ max_results: int = MAX_RESULTS_DEFAULT,
120
+ time_range: Literal["day", "week", "month", "year"] | None = None,
121
+ include_images: bool = False,
122
+ include_image_descriptions: bool = False,
123
+ chunks_per_source: int = CHUNKS_PER_SOURCE_DEFAULT,
124
+ include_answer: bool = False,
125
+ ) -> dict[str, Any]:
126
+ """
127
+ Perform a web search using Tavily API.
128
+
129
+ Args:
130
+ query: The search query to execute.
131
+ topic: The category of search ("general", "news", or "finance").
132
+ search_depth: The depth of search ("basic" or "advanced").
133
+ max_results: Maximum number of results to return (1-20).
134
+ time_range: Time range filter ("day", "week", "month", "year").
135
+ include_images: Whether to include images in results.
136
+ include_image_descriptions: Whether to include image descriptions.
137
+ chunks_per_source: Maximum content snippets per URL (1-3).
138
+ include_answer: Whether to include an AI-generated answer.
139
+
140
+ Returns
141
+ -------
142
+ Dict with search results from Tavily API.
143
+
144
+ Raises
145
+ ------
146
+ ValueError: If validation fails.
147
+ TavilyInvalidAPIKeyError: If the API key is invalid.
148
+ TavilyUsageLimitExceededError: If usage limit is exceeded.
149
+ TavilyForbiddenError: If access is forbidden.
150
+ TavilyBadRequestError: If the request is malformed.
151
+ """
152
+ # Validate inputs
153
+ if not query:
154
+ raise ValueError("query cannot be empty.")
155
+ if isinstance(query, str) and not query.strip():
156
+ raise ValueError("query cannot be empty.")
157
+ if max_results <= 0:
158
+ raise ValueError("max_results must be greater than 0.")
159
+ if max_results > MAX_RESULTS:
160
+ raise ValueError(f"max_results must be smaller than or equal to {MAX_RESULTS}.")
161
+ if chunks_per_source <= 0:
162
+ raise ValueError("chunks_per_source must be greater than 0.")
163
+ if chunks_per_source > MAX_CHUNKS_PER_SOURCE:
164
+ raise ValueError(
165
+ f"chunks_per_source must be smaller than or equal to {MAX_CHUNKS_PER_SOURCE}."
166
+ )
167
+
168
+ # Clamp values to valid ranges
169
+ max_results = min(max_results, MAX_RESULTS)
170
+ chunks_per_source = min(chunks_per_source, MAX_CHUNKS_PER_SOURCE)
171
+
172
+ # Build search parameters
173
+ search_kwargs: dict[str, Any] = {
174
+ "query": query,
175
+ "topic": topic,
176
+ "search_depth": search_depth,
177
+ "max_results": max_results,
178
+ "include_images": include_images,
179
+ "include_image_descriptions": include_image_descriptions,
180
+ "chunks_per_source": chunks_per_source,
181
+ "include_answer": include_answer,
182
+ }
183
+
184
+ if time_range:
185
+ search_kwargs["time_range"] = time_range
186
+
187
+ return await self._client.search(**search_kwargs)
188
+
189
+ async def __aenter__(self) -> "TavilyClient":
190
+ """Async context manager entry."""
191
+ return self
192
+
193
+ async def __aexit__(
194
+ self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: Any
195
+ ) -> None:
196
+ """Async context manager exit."""
197
+ # AsyncTavilyClient doesn't have a close method, but we keep the context manager
198
+ # pattern for consistency with other clients
199
+ pass
@@ -55,29 +55,18 @@ async def confluence_get_page(
55
55
  if isinstance(access_token, ToolError):
56
56
  raise access_token
57
57
 
58
- try:
59
- async with ConfluenceClient(access_token) as client:
60
- if page_id_or_title.isdigit():
61
- page_response = await client.get_page_by_id(page_id_or_title)
62
- else:
63
- if not space_key:
64
- raise ToolError(
65
- "Argument validation error: "
66
- "'space_key' is required when identifying a page by title."
67
- )
68
- page_response = await client.get_page_by_title(page_id_or_title, space_key)
69
- except ConfluenceError as e:
70
- logger.error(f"Confluence error getting page: {e}")
71
- raise ToolError(str(e))
72
- except Exception as e:
73
- logger.error(f"Unexpected error getting Confluence page: {e}")
74
- raise ToolError(
75
- f"An unexpected error occurred while getting Confluence page "
76
- f"'{page_id_or_title}': {str(e)}"
77
- )
58
+ async with ConfluenceClient(access_token) as client:
59
+ if page_id_or_title.isdigit():
60
+ page_response = await client.get_page_by_id(page_id_or_title)
61
+ else:
62
+ if not space_key:
63
+ raise ToolError(
64
+ "Argument validation error: "
65
+ "'space_key' is required when identifying a page by title."
66
+ )
67
+ page_response = await client.get_page_by_title(page_id_or_title, space_key)
78
68
 
79
69
  return ToolResult(
80
- content=f"Successfully retrieved page '{page_response.title}'.",
81
70
  structured_content=page_response.as_flat_dict(),
82
71
  )
83
72
 
@@ -116,26 +105,15 @@ async def confluence_create_page(
116
105
  if isinstance(access_token, ToolError):
117
106
  raise access_token
118
107
 
119
- try:
120
- async with ConfluenceClient(access_token) as client:
121
- page_response = await client.create_page(
122
- space_key=space_key,
123
- title=title,
124
- body_content=body_content,
125
- parent_id=parent_id,
126
- )
127
- except ConfluenceError as e:
128
- logger.error(f"Confluence error creating page: {e}")
129
- raise ToolError(str(e))
130
- except Exception as e:
131
- logger.error(f"Unexpected error creating Confluence page: {e}")
132
- raise ToolError(
133
- f"An unexpected error occurred while creating Confluence page "
134
- f"'{title}' in space '{space_key}': {str(e)}"
108
+ async with ConfluenceClient(access_token) as client:
109
+ page_response = await client.create_page(
110
+ space_key=space_key,
111
+ title=title,
112
+ body_content=body_content,
113
+ parent_id=parent_id,
135
114
  )
136
115
 
137
116
  return ToolResult(
138
- content=f"New page '{title}' created successfully in space '{space_key}'.",
139
117
  structured_content={"new_page_id": page_response.page_id, "title": page_response.title},
140
118
  )
141
119
 
@@ -164,23 +142,13 @@ async def confluence_add_comment(
164
142
  if isinstance(access_token, ToolError):
165
143
  raise access_token
166
144
 
167
- try:
168
- async with ConfluenceClient(access_token) as client:
169
- comment_response = await client.add_comment(
170
- page_id=page_id,
171
- comment_body=comment_body,
172
- )
173
- except ConfluenceError as e:
174
- logger.error(f"Confluence error adding comment: {e}")
175
- raise ToolError(str(e))
176
- except Exception as e:
177
- logger.error(f"Unexpected error adding comment to Confluence page: {e}")
178
- raise ToolError(
179
- f"An unexpected error occurred while adding comment to page '{page_id}': {str(e)}"
145
+ async with ConfluenceClient(access_token) as client:
146
+ comment_response = await client.add_comment(
147
+ page_id=page_id,
148
+ comment_body=comment_body,
180
149
  )
181
150
 
182
151
  return ToolResult(
183
- content=f"Comment added successfully to page ID {page_id}.",
184
152
  structured_content={
185
153
  "comment_id": comment_response.comment_id,
186
154
  "page_id": page_id,
@@ -220,36 +188,27 @@ async def confluence_search(
220
188
  if isinstance(access_token, ToolError):
221
189
  raise access_token
222
190
 
223
- try:
224
- async with ConfluenceClient(access_token) as client:
225
- results = await client.search_confluence_content(
226
- cql_query=cql_query, max_results=max_results
227
- )
228
-
229
- # If include_body is True, fetch full content for each page
230
- if include_body and results:
231
- data = []
232
- for result in results:
233
- flat = result.as_flat_dict()
234
- try:
235
- page = await client.get_page_by_id(result.id)
236
- flat["body"] = page.body
237
- except ConfluenceError:
238
- flat["body"] = None # Keep excerpt if page fetch fails
239
- data.append(flat)
240
- else:
241
- data = [result.as_flat_dict() for result in results]
242
-
243
- except ConfluenceError as e:
244
- logger.error(f"Confluence error searching content: {e}")
245
- raise ToolError(str(e))
246
- except Exception as e:
247
- logger.error(f"Unexpected error searching Confluence content: {e}")
248
- raise ToolError(f"An unexpected error occurred while searching Confluence: {str(e)}")
191
+ async with ConfluenceClient(access_token) as client:
192
+ results = await client.search_confluence_content(
193
+ cql_query=cql_query, max_results=max_results
194
+ )
195
+
196
+ # If include_body is True, fetch full content for each page
197
+ if include_body and results:
198
+ data = []
199
+ for result in results:
200
+ flat = result.as_flat_dict()
201
+ try:
202
+ page = await client.get_page_by_id(result.id)
203
+ flat["body"] = page.body
204
+ except ConfluenceError:
205
+ flat["body"] = None # Keep excerpt if page fetch fails
206
+ data.append(flat)
207
+ else:
208
+ data = [result.as_flat_dict() for result in results]
249
209
 
250
210
  n = len(results)
251
211
  return ToolResult(
252
- content=f"Successfully executed CQL query and retrieved {n} result(s).",
253
212
  structured_content={"data": data, "count": n},
254
213
  )
255
214
 
@@ -296,24 +255,14 @@ async def confluence_update_page(
296
255
  if isinstance(access_token, ToolError):
297
256
  raise access_token
298
257
 
299
- try:
300
- async with ConfluenceClient(access_token) as client:
301
- page_response = await client.update_page(
302
- page_id=page_id,
303
- new_body_content=new_body_content,
304
- version_number=version_number,
305
- )
306
- except ConfluenceError as e:
307
- logger.error(f"Confluence error updating page: {e}")
308
- raise ToolError(str(e))
309
- except Exception as e:
310
- logger.error(f"Unexpected error updating Confluence page: {e}")
311
- raise ToolError(
312
- f"An unexpected error occurred while updating Confluence page '{page_id}': {str(e)}"
258
+ async with ConfluenceClient(access_token) as client:
259
+ page_response = await client.update_page(
260
+ page_id=page_id,
261
+ new_body_content=new_body_content,
262
+ version_number=version_number,
313
263
  )
314
264
 
315
265
  return ToolResult(
316
- content=f"Page ID {page_id} updated successfully to version {page_response.version}.",
317
266
  structured_content={
318
267
  "updated_page_id": page_response.page_id,
319
268
  "new_version": page_response.version,