rootly-mcp-server 2.0.15__py3-none-any.whl → 2.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- rootly_mcp_server/__init__.py +9 -5
- rootly_mcp_server/__main__.py +44 -29
- rootly_mcp_server/client.py +98 -44
- rootly_mcp_server/data/__init__.py +1 -1
- rootly_mcp_server/exceptions.py +148 -0
- rootly_mcp_server/monitoring.py +378 -0
- rootly_mcp_server/pagination.py +98 -0
- rootly_mcp_server/security.py +404 -0
- rootly_mcp_server/server.py +877 -464
- rootly_mcp_server/smart_utils.py +294 -209
- rootly_mcp_server/utils.py +48 -33
- rootly_mcp_server/validators.py +147 -0
- {rootly_mcp_server-2.0.15.dist-info → rootly_mcp_server-2.1.1.dist-info}/METADATA +66 -13
- rootly_mcp_server-2.1.1.dist-info/RECORD +18 -0
- {rootly_mcp_server-2.0.15.dist-info → rootly_mcp_server-2.1.1.dist-info}/WHEEL +1 -1
- rootly_mcp_server-2.0.15.dist-info/RECORD +0 -13
- {rootly_mcp_server-2.0.15.dist-info → rootly_mcp_server-2.1.1.dist-info}/entry_points.txt +0 -0
- {rootly_mcp_server-2.0.15.dist-info → rootly_mcp_server-2.1.1.dist-info}/licenses/LICENSE +0 -0
rootly_mcp_server/server.py
CHANGED
|
@@ -6,115 +6,263 @@ the Rootly API's OpenAPI (Swagger) specification using FastMCP's OpenAPI integra
|
|
|
6
6
|
"""
|
|
7
7
|
|
|
8
8
|
import json
|
|
9
|
-
import os
|
|
10
9
|
import logging
|
|
10
|
+
import os
|
|
11
11
|
from copy import deepcopy
|
|
12
12
|
from pathlib import Path
|
|
13
|
-
import
|
|
14
|
-
import httpx
|
|
15
|
-
from typing import Any, Dict, List, Optional, Annotated
|
|
13
|
+
from typing import Annotated, Any
|
|
16
14
|
|
|
15
|
+
import httpx
|
|
16
|
+
import requests
|
|
17
17
|
from fastmcp import FastMCP
|
|
18
|
-
|
|
19
18
|
from pydantic import Field
|
|
20
19
|
|
|
20
|
+
from .smart_utils import SolutionExtractor, TextSimilarityAnalyzer
|
|
21
21
|
from .utils import sanitize_parameters_in_spec
|
|
22
|
-
from .smart_utils import TextSimilarityAnalyzer, SolutionExtractor
|
|
23
22
|
|
|
24
23
|
# Set up logger
|
|
25
24
|
logger = logging.getLogger(__name__)
|
|
26
25
|
|
|
27
26
|
|
|
27
|
+
def strip_heavy_nested_data(data: dict[str, Any]) -> dict[str, Any]:
|
|
28
|
+
"""
|
|
29
|
+
Strip heavy nested relationship data from incident responses to reduce payload size.
|
|
30
|
+
Removes embedded user objects, roles, permissions, schedules, etc.
|
|
31
|
+
"""
|
|
32
|
+
if not isinstance(data, dict):
|
|
33
|
+
return data
|
|
34
|
+
|
|
35
|
+
if "data" in data and isinstance(data["data"], list):
|
|
36
|
+
# Process list of incidents
|
|
37
|
+
for incident in data["data"]:
|
|
38
|
+
if "attributes" in incident:
|
|
39
|
+
attrs = incident["attributes"]
|
|
40
|
+
# Strip heavy embedded user objects
|
|
41
|
+
for user_field in [
|
|
42
|
+
"user",
|
|
43
|
+
"started_by",
|
|
44
|
+
"mitigated_by",
|
|
45
|
+
"resolved_by",
|
|
46
|
+
"closed_by",
|
|
47
|
+
"cancelled_by",
|
|
48
|
+
"in_triage_by",
|
|
49
|
+
]:
|
|
50
|
+
if user_field in attrs and isinstance(attrs[user_field], dict):
|
|
51
|
+
user_data = attrs[user_field].get("data", {})
|
|
52
|
+
if "attributes" in user_data:
|
|
53
|
+
# Keep only basic user info
|
|
54
|
+
attrs[user_field] = {
|
|
55
|
+
"data": {
|
|
56
|
+
"id": user_data.get("id"),
|
|
57
|
+
"type": user_data.get("type"),
|
|
58
|
+
"attributes": {
|
|
59
|
+
"name": user_data.get("attributes", {}).get("name"),
|
|
60
|
+
"email": user_data.get("attributes", {}).get("email"),
|
|
61
|
+
},
|
|
62
|
+
}
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
# Strip heavy severity object, keep only essential info
|
|
66
|
+
if "severity" in attrs and isinstance(attrs["severity"], dict):
|
|
67
|
+
sev_data = attrs["severity"].get("data", {})
|
|
68
|
+
if sev_data and "attributes" in sev_data:
|
|
69
|
+
# Simplify to just name and slug
|
|
70
|
+
attrs["severity"] = {
|
|
71
|
+
"name": sev_data.get("attributes", {}).get("name"),
|
|
72
|
+
"slug": sev_data.get("attributes", {}).get("slug"),
|
|
73
|
+
}
|
|
74
|
+
elif not sev_data:
|
|
75
|
+
# Severity is null/empty
|
|
76
|
+
attrs["severity"] = None
|
|
77
|
+
|
|
78
|
+
# Remove heavy integration fields (50+ fields with IDs/URLs)
|
|
79
|
+
integration_fields = [
|
|
80
|
+
"zoom_meeting_start_url",
|
|
81
|
+
"zoom_meeting_global_dial_in_numbers",
|
|
82
|
+
"shortcut_story_id",
|
|
83
|
+
"shortcut_story_url",
|
|
84
|
+
"shortcut_task_id",
|
|
85
|
+
"shortcut_task_url",
|
|
86
|
+
"asana_task_id",
|
|
87
|
+
"asana_task_url",
|
|
88
|
+
"github_issue_id",
|
|
89
|
+
"github_issue_url",
|
|
90
|
+
"gitlab_issue_id",
|
|
91
|
+
"gitlab_issue_url",
|
|
92
|
+
"google_meeting_id",
|
|
93
|
+
"trello_card_id",
|
|
94
|
+
"trello_card_url",
|
|
95
|
+
"linear_issue_id",
|
|
96
|
+
"linear_issue_url",
|
|
97
|
+
"zendesk_ticket_id",
|
|
98
|
+
"zendesk_ticket_url",
|
|
99
|
+
"motion_task_id",
|
|
100
|
+
"motion_task_url",
|
|
101
|
+
"clickup_task_id",
|
|
102
|
+
"clickup_task_url",
|
|
103
|
+
"slack_channel_deep_link",
|
|
104
|
+
"service_now_incident_id",
|
|
105
|
+
"service_now_incident_key",
|
|
106
|
+
"service_now_incident_url",
|
|
107
|
+
"opsgenie_incident_id",
|
|
108
|
+
"opsgenie_incident_url",
|
|
109
|
+
"opsgenie_alert_id",
|
|
110
|
+
"opsgenie_alert_url",
|
|
111
|
+
"victor_ops_incident_id",
|
|
112
|
+
"victor_ops_incident_url",
|
|
113
|
+
"pagerduty_incident_id",
|
|
114
|
+
"pagerduty_incident_number",
|
|
115
|
+
"pagerduty_incident_url",
|
|
116
|
+
"mattermost_channel_id",
|
|
117
|
+
"mattermost_channel_name",
|
|
118
|
+
"mattermost_channel_url",
|
|
119
|
+
"confluence_page_id",
|
|
120
|
+
"quip_page_id",
|
|
121
|
+
"quip_page_url",
|
|
122
|
+
"airtable_base_key",
|
|
123
|
+
"airtable_table_name",
|
|
124
|
+
"airtable_record_id",
|
|
125
|
+
"airtable_record_url",
|
|
126
|
+
"google_drive_id",
|
|
127
|
+
"google_drive_parent_id",
|
|
128
|
+
"google_drive_url",
|
|
129
|
+
"sharepoint_page_id",
|
|
130
|
+
"sharepoint_page_url",
|
|
131
|
+
"datadog_notebook_id",
|
|
132
|
+
"datadog_notebook_url",
|
|
133
|
+
"freshservice_ticket_id",
|
|
134
|
+
"freshservice_ticket_url",
|
|
135
|
+
"freshservice_task_id",
|
|
136
|
+
"freshservice_task_url",
|
|
137
|
+
"zoom_meeting_password",
|
|
138
|
+
"zoom_meeting_pstn_password",
|
|
139
|
+
"zoom_meeting_h323_password",
|
|
140
|
+
"labels",
|
|
141
|
+
"slack_last_message_ts",
|
|
142
|
+
]
|
|
143
|
+
for field in integration_fields:
|
|
144
|
+
attrs.pop(field, None)
|
|
145
|
+
|
|
146
|
+
# Remove heavy relationships data
|
|
147
|
+
if "relationships" in incident:
|
|
148
|
+
rels = incident["relationships"]
|
|
149
|
+
# Keep only counts for heavy relationships, remove the actual data
|
|
150
|
+
for rel_key in [
|
|
151
|
+
"events",
|
|
152
|
+
"action_items",
|
|
153
|
+
"subscribers",
|
|
154
|
+
"roles",
|
|
155
|
+
"slack_messages",
|
|
156
|
+
"alerts",
|
|
157
|
+
]:
|
|
158
|
+
if (
|
|
159
|
+
rel_key in rels
|
|
160
|
+
and isinstance(rels[rel_key], dict)
|
|
161
|
+
and "data" in rels[rel_key]
|
|
162
|
+
):
|
|
163
|
+
# Replace with just count
|
|
164
|
+
rels[rel_key] = {"count": len(rels[rel_key]["data"])}
|
|
165
|
+
|
|
166
|
+
return data
|
|
167
|
+
|
|
168
|
+
|
|
28
169
|
class MCPError:
|
|
29
170
|
"""Enhanced error handling for MCP protocol compliance."""
|
|
30
|
-
|
|
171
|
+
|
|
31
172
|
@staticmethod
|
|
32
|
-
def protocol_error(code: int, message: str, data:
|
|
173
|
+
def protocol_error(code: int, message: str, data: dict | None = None):
|
|
33
174
|
"""Create a JSON-RPC protocol-level error response."""
|
|
34
|
-
error_response = {
|
|
35
|
-
"jsonrpc": "2.0",
|
|
36
|
-
"error": {
|
|
37
|
-
"code": code,
|
|
38
|
-
"message": message
|
|
39
|
-
}
|
|
40
|
-
}
|
|
175
|
+
error_response = {"jsonrpc": "2.0", "error": {"code": code, "message": message}}
|
|
41
176
|
if data:
|
|
42
177
|
error_response["error"]["data"] = data
|
|
43
178
|
return error_response
|
|
44
|
-
|
|
179
|
+
|
|
45
180
|
@staticmethod
|
|
46
|
-
def tool_error(
|
|
181
|
+
def tool_error(
|
|
182
|
+
error_message: str, error_type: str = "execution_error", details: dict | None = None
|
|
183
|
+
):
|
|
47
184
|
"""Create a tool-level error response (returned as successful tool result)."""
|
|
48
|
-
error_response = {
|
|
49
|
-
"error": True,
|
|
50
|
-
"error_type": error_type,
|
|
51
|
-
"message": error_message
|
|
52
|
-
}
|
|
185
|
+
error_response = {"error": True, "error_type": error_type, "message": error_message}
|
|
53
186
|
if details:
|
|
54
187
|
error_response["details"] = details
|
|
55
188
|
return error_response
|
|
56
|
-
|
|
189
|
+
|
|
57
190
|
@staticmethod
|
|
58
191
|
def categorize_error(exception: Exception) -> tuple[str, str]:
|
|
59
192
|
"""Categorize an exception into error type and appropriate message."""
|
|
60
193
|
error_str = str(exception)
|
|
61
194
|
exception_type = type(exception).__name__
|
|
62
|
-
|
|
195
|
+
|
|
63
196
|
# Authentication/Authorization errors
|
|
64
|
-
if any(
|
|
197
|
+
if any(
|
|
198
|
+
keyword in error_str.lower()
|
|
199
|
+
for keyword in ["401", "unauthorized", "authentication", "token", "forbidden"]
|
|
200
|
+
):
|
|
65
201
|
return "authentication_error", f"Authentication failed: {error_str}"
|
|
66
|
-
|
|
67
|
-
# Network/Connection errors
|
|
68
|
-
if any(
|
|
202
|
+
|
|
203
|
+
# Network/Connection errors
|
|
204
|
+
if any(
|
|
205
|
+
keyword in exception_type.lower() for keyword in ["connection", "timeout", "network"]
|
|
206
|
+
):
|
|
69
207
|
return "network_error", f"Network error: {error_str}"
|
|
70
|
-
|
|
208
|
+
|
|
71
209
|
# HTTP errors
|
|
72
210
|
if "40" in error_str[:10]: # 4xx client errors
|
|
73
211
|
return "client_error", f"Client error: {error_str}"
|
|
74
212
|
elif "50" in error_str[:10]: # 5xx server errors
|
|
75
213
|
return "server_error", f"Server error: {error_str}"
|
|
76
|
-
|
|
214
|
+
|
|
77
215
|
# Validation errors
|
|
78
|
-
if any(
|
|
216
|
+
if any(
|
|
217
|
+
keyword in exception_type.lower() for keyword in ["validation", "pydantic", "field"]
|
|
218
|
+
):
|
|
79
219
|
return "validation_error", f"Input validation error: {error_str}"
|
|
80
|
-
|
|
220
|
+
|
|
81
221
|
# Generic execution errors
|
|
82
222
|
return "execution_error", f"Tool execution error: {error_str}"
|
|
83
223
|
|
|
224
|
+
|
|
84
225
|
# Default Swagger URL
|
|
85
226
|
SWAGGER_URL = "https://rootly-heroku.s3.amazonaws.com/swagger/v1/swagger.json"
|
|
86
227
|
|
|
228
|
+
|
|
87
229
|
# Default allowed API paths
|
|
88
230
|
def _generate_recommendation(solution_data: dict) -> str:
|
|
89
231
|
"""Generate a high-level recommendation based on solution analysis."""
|
|
90
232
|
solutions = solution_data.get("solutions", [])
|
|
91
233
|
avg_time = solution_data.get("average_resolution_time")
|
|
92
|
-
|
|
234
|
+
|
|
93
235
|
if not solutions:
|
|
94
236
|
return "No similar incidents found. This may be a novel issue requiring escalation."
|
|
95
|
-
|
|
237
|
+
|
|
96
238
|
recommendation_parts = []
|
|
97
|
-
|
|
239
|
+
|
|
98
240
|
# Time expectation
|
|
99
241
|
if avg_time:
|
|
100
242
|
if avg_time < 1:
|
|
101
243
|
recommendation_parts.append("Similar incidents typically resolve quickly (< 1 hour).")
|
|
102
244
|
elif avg_time > 4:
|
|
103
|
-
recommendation_parts.append(
|
|
104
|
-
|
|
245
|
+
recommendation_parts.append(
|
|
246
|
+
"Similar incidents typically require more time (> 4 hours)."
|
|
247
|
+
)
|
|
248
|
+
|
|
105
249
|
# Top solution
|
|
106
250
|
if solutions:
|
|
107
251
|
top_solution = solutions[0]
|
|
108
252
|
if top_solution.get("suggested_actions"):
|
|
109
253
|
actions = top_solution["suggested_actions"][:2] # Top 2 actions
|
|
110
254
|
recommendation_parts.append(f"Consider trying: {', '.join(actions)}")
|
|
111
|
-
|
|
255
|
+
|
|
112
256
|
# Pattern insights
|
|
113
257
|
patterns = solution_data.get("common_patterns", [])
|
|
114
258
|
if patterns:
|
|
115
259
|
recommendation_parts.append(f"Common patterns: {patterns[0]}")
|
|
116
|
-
|
|
117
|
-
return
|
|
260
|
+
|
|
261
|
+
return (
|
|
262
|
+
" ".join(recommendation_parts)
|
|
263
|
+
if recommendation_parts
|
|
264
|
+
else "Review similar incidents above for resolution guidance."
|
|
265
|
+
)
|
|
118
266
|
|
|
119
267
|
|
|
120
268
|
# Default allowed API paths
|
|
@@ -175,7 +323,12 @@ DEFAULT_ALLOWED_PATHS = [
|
|
|
175
323
|
class AuthenticatedHTTPXClient:
|
|
176
324
|
"""An HTTPX client wrapper that handles Rootly API authentication and parameter transformation."""
|
|
177
325
|
|
|
178
|
-
def __init__(
|
|
326
|
+
def __init__(
|
|
327
|
+
self,
|
|
328
|
+
base_url: str = "https://api.rootly.com",
|
|
329
|
+
hosted: bool = False,
|
|
330
|
+
parameter_mapping: dict[str, str] | None = None,
|
|
331
|
+
):
|
|
179
332
|
self._base_url = base_url
|
|
180
333
|
self.hosted = hosted
|
|
181
334
|
self._api_token = None
|
|
@@ -184,10 +337,10 @@ class AuthenticatedHTTPXClient:
|
|
|
184
337
|
if not self.hosted:
|
|
185
338
|
self._api_token = self._get_api_token()
|
|
186
339
|
|
|
187
|
-
# Create the HTTPX client
|
|
340
|
+
# Create the HTTPX client
|
|
188
341
|
headers = {
|
|
189
|
-
"Content-Type": "application/vnd.api+json",
|
|
190
|
-
"Accept": "application/vnd.api+json"
|
|
342
|
+
"Content-Type": "application/vnd.api+json",
|
|
343
|
+
"Accept": "application/vnd.api+json",
|
|
191
344
|
# Let httpx handle Accept-Encoding automatically with all supported formats
|
|
192
345
|
}
|
|
193
346
|
if self._api_token:
|
|
@@ -199,10 +352,10 @@ class AuthenticatedHTTPXClient:
|
|
|
199
352
|
timeout=30.0,
|
|
200
353
|
follow_redirects=True,
|
|
201
354
|
# Ensure proper handling of compressed responses
|
|
202
|
-
limits=httpx.Limits(max_keepalive_connections=5, max_connections=10)
|
|
355
|
+
limits=httpx.Limits(max_keepalive_connections=5, max_connections=10),
|
|
203
356
|
)
|
|
204
357
|
|
|
205
|
-
def _get_api_token(self) ->
|
|
358
|
+
def _get_api_token(self) -> str | None:
|
|
206
359
|
"""Get the API token from environment variables."""
|
|
207
360
|
api_token = os.getenv("ROOTLY_API_TOKEN")
|
|
208
361
|
if not api_token:
|
|
@@ -210,7 +363,7 @@ class AuthenticatedHTTPXClient:
|
|
|
210
363
|
return None
|
|
211
364
|
return api_token
|
|
212
365
|
|
|
213
|
-
def _transform_params(self, params:
|
|
366
|
+
def _transform_params(self, params: dict[str, Any] | None) -> dict[str, Any] | None:
|
|
214
367
|
"""Transform sanitized parameter names back to original names."""
|
|
215
368
|
if not params or not self.parameter_mapping:
|
|
216
369
|
return params
|
|
@@ -227,31 +380,31 @@ class AuthenticatedHTTPXClient:
|
|
|
227
380
|
async def request(self, method: str, url: str, **kwargs):
|
|
228
381
|
"""Override request to transform parameters."""
|
|
229
382
|
# Transform query parameters
|
|
230
|
-
if
|
|
231
|
-
kwargs[
|
|
383
|
+
if "params" in kwargs:
|
|
384
|
+
kwargs["params"] = self._transform_params(kwargs["params"])
|
|
232
385
|
|
|
233
386
|
# Call the underlying client's request method and let it handle everything
|
|
234
387
|
return await self.client.request(method, url, **kwargs)
|
|
235
388
|
|
|
236
389
|
async def get(self, url: str, **kwargs):
|
|
237
390
|
"""Proxy to request with GET method."""
|
|
238
|
-
return await self.request(
|
|
391
|
+
return await self.request("GET", url, **kwargs)
|
|
239
392
|
|
|
240
393
|
async def post(self, url: str, **kwargs):
|
|
241
394
|
"""Proxy to request with POST method."""
|
|
242
|
-
return await self.request(
|
|
395
|
+
return await self.request("POST", url, **kwargs)
|
|
243
396
|
|
|
244
397
|
async def put(self, url: str, **kwargs):
|
|
245
398
|
"""Proxy to request with PUT method."""
|
|
246
|
-
return await self.request(
|
|
399
|
+
return await self.request("PUT", url, **kwargs)
|
|
247
400
|
|
|
248
401
|
async def patch(self, url: str, **kwargs):
|
|
249
402
|
"""Proxy to request with PATCH method."""
|
|
250
|
-
return await self.request(
|
|
403
|
+
return await self.request("PATCH", url, **kwargs)
|
|
251
404
|
|
|
252
405
|
async def delete(self, url: str, **kwargs):
|
|
253
406
|
"""Proxy to request with DELETE method."""
|
|
254
|
-
return await self.request(
|
|
407
|
+
return await self.request("DELETE", url, **kwargs)
|
|
255
408
|
|
|
256
409
|
async def __aenter__(self):
|
|
257
410
|
return self
|
|
@@ -261,26 +414,26 @@ class AuthenticatedHTTPXClient:
|
|
|
261
414
|
|
|
262
415
|
def __getattr__(self, name):
|
|
263
416
|
# Delegate all other attributes to the underlying client, except for request methods
|
|
264
|
-
if name in [
|
|
417
|
+
if name in ["request", "get", "post", "put", "patch", "delete"]:
|
|
265
418
|
# Use our overridden methods instead
|
|
266
419
|
return getattr(self, name)
|
|
267
420
|
return getattr(self.client, name)
|
|
268
|
-
|
|
269
|
-
@property
|
|
421
|
+
|
|
422
|
+
@property
|
|
270
423
|
def base_url(self):
|
|
271
424
|
return self._base_url
|
|
272
|
-
|
|
425
|
+
|
|
273
426
|
@property
|
|
274
427
|
def headers(self):
|
|
275
428
|
return self.client.headers
|
|
276
429
|
|
|
277
430
|
|
|
278
431
|
def create_rootly_mcp_server(
|
|
279
|
-
swagger_path:
|
|
432
|
+
swagger_path: str | None = None,
|
|
280
433
|
name: str = "Rootly",
|
|
281
|
-
allowed_paths:
|
|
434
|
+
allowed_paths: list[str] | None = None,
|
|
282
435
|
hosted: bool = False,
|
|
283
|
-
base_url:
|
|
436
|
+
base_url: str | None = None,
|
|
284
437
|
) -> FastMCP:
|
|
285
438
|
"""
|
|
286
439
|
Create a Rootly MCP Server using FastMCP's OpenAPI integration.
|
|
@@ -301,8 +454,7 @@ def create_rootly_mcp_server(
|
|
|
301
454
|
|
|
302
455
|
# Add /v1 prefix to paths if not present
|
|
303
456
|
allowed_paths_v1 = [
|
|
304
|
-
f"/v1{path}" if not path.startswith("/v1") else path
|
|
305
|
-
for path in allowed_paths
|
|
457
|
+
f"/v1{path}" if not path.startswith("/v1") else path for path in allowed_paths
|
|
306
458
|
]
|
|
307
459
|
|
|
308
460
|
logger.info(f"Creating Rootly MCP Server with allowed paths: {allowed_paths_v1}")
|
|
@@ -317,7 +469,9 @@ def create_rootly_mcp_server(
|
|
|
317
469
|
|
|
318
470
|
# Sanitize all parameter names in the filtered spec to be MCP-compliant
|
|
319
471
|
parameter_mapping = sanitize_parameters_in_spec(filtered_spec)
|
|
320
|
-
logger.info(
|
|
472
|
+
logger.info(
|
|
473
|
+
f"Sanitized parameter names for MCP compatibility (mapped {len(parameter_mapping)} parameters)"
|
|
474
|
+
)
|
|
321
475
|
|
|
322
476
|
# Determine the base URL
|
|
323
477
|
if base_url is None:
|
|
@@ -328,27 +482,29 @@ def create_rootly_mcp_server(
|
|
|
328
482
|
# Create the authenticated HTTP client with parameter mapping
|
|
329
483
|
|
|
330
484
|
http_client = AuthenticatedHTTPXClient(
|
|
331
|
-
base_url=base_url,
|
|
332
|
-
hosted=hosted,
|
|
333
|
-
parameter_mapping=parameter_mapping
|
|
485
|
+
base_url=base_url, hosted=hosted, parameter_mapping=parameter_mapping
|
|
334
486
|
)
|
|
335
487
|
|
|
336
488
|
# Create the MCP server using OpenAPI integration
|
|
337
489
|
# By default, all routes become tools which is what we want
|
|
490
|
+
# NOTE: We pass http_client (the wrapper) instead of http_client.client (the inner httpx client)
|
|
491
|
+
# so that parameter transformation (e.g., filter_status -> filter[status]) is applied.
|
|
492
|
+
# The wrapper implements the same interface as httpx.AsyncClient (duck typing).
|
|
338
493
|
mcp = FastMCP.from_openapi(
|
|
339
494
|
openapi_spec=filtered_spec,
|
|
340
|
-
client=http_client
|
|
495
|
+
client=http_client, # type: ignore[arg-type]
|
|
341
496
|
name=name,
|
|
342
497
|
timeout=30.0,
|
|
343
498
|
tags={"rootly", "incident-management"},
|
|
344
499
|
)
|
|
345
|
-
|
|
500
|
+
|
|
346
501
|
@mcp.custom_route("/healthz", methods=["GET"])
|
|
347
502
|
@mcp.custom_route("/health", methods=["GET"])
|
|
348
503
|
async def health_check(request):
|
|
349
504
|
from starlette.responses import PlainTextResponse
|
|
505
|
+
|
|
350
506
|
return PlainTextResponse("OK")
|
|
351
|
-
|
|
507
|
+
|
|
352
508
|
# Add some custom tools for enhanced functionality
|
|
353
509
|
|
|
354
510
|
@mcp.tool()
|
|
@@ -363,12 +519,14 @@ def create_rootly_mcp_server(
|
|
|
363
519
|
summary = operation.get("summary", "")
|
|
364
520
|
description = operation.get("description", "")
|
|
365
521
|
|
|
366
|
-
endpoints.append(
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
522
|
+
endpoints.append(
|
|
523
|
+
{
|
|
524
|
+
"path": path,
|
|
525
|
+
"method": method.upper(),
|
|
526
|
+
"summary": summary,
|
|
527
|
+
"description": description,
|
|
528
|
+
}
|
|
529
|
+
)
|
|
372
530
|
|
|
373
531
|
return endpoints
|
|
374
532
|
|
|
@@ -378,6 +536,7 @@ def create_rootly_mcp_server(
|
|
|
378
536
|
if hosted:
|
|
379
537
|
try:
|
|
380
538
|
from fastmcp.server.dependencies import get_http_headers
|
|
539
|
+
|
|
381
540
|
request_headers = get_http_headers()
|
|
382
541
|
auth_header = request_headers.get("authorization", "")
|
|
383
542
|
if auth_header:
|
|
@@ -385,18 +544,33 @@ def create_rootly_mcp_server(
|
|
|
385
544
|
if "headers" not in kwargs:
|
|
386
545
|
kwargs["headers"] = {}
|
|
387
546
|
kwargs["headers"]["Authorization"] = auth_header
|
|
388
|
-
except Exception:
|
|
389
|
-
|
|
390
|
-
|
|
547
|
+
except Exception: # nosec B110
|
|
548
|
+
# Intentionally broad exception handling: fallback to default client behavior
|
|
549
|
+
# if token extraction fails for any reason (missing env var, invalid format, etc.)
|
|
550
|
+
pass
|
|
551
|
+
|
|
391
552
|
# Use our custom client with proper error handling instead of bypassing it
|
|
392
553
|
return await http_client.request(method, url, **kwargs)
|
|
393
554
|
|
|
394
555
|
@mcp.tool()
|
|
395
556
|
async def search_incidents(
|
|
396
|
-
query: Annotated[
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
557
|
+
query: Annotated[
|
|
558
|
+
str, Field(description="Search query to filter incidents by title/summary")
|
|
559
|
+
] = "",
|
|
560
|
+
page_size: Annotated[
|
|
561
|
+
int, Field(description="Number of results per page (max: 20)", ge=1, le=20)
|
|
562
|
+
] = 10,
|
|
563
|
+
page_number: Annotated[
|
|
564
|
+
int, Field(description="Page number to retrieve (use 0 for all pages)", ge=0)
|
|
565
|
+
] = 1,
|
|
566
|
+
max_results: Annotated[
|
|
567
|
+
int,
|
|
568
|
+
Field(
|
|
569
|
+
description="Maximum total results when fetching all pages (ignored if page_number > 0)",
|
|
570
|
+
ge=1,
|
|
571
|
+
le=10,
|
|
572
|
+
),
|
|
573
|
+
] = 5,
|
|
400
574
|
) -> dict:
|
|
401
575
|
"""
|
|
402
576
|
Search incidents with flexible pagination control.
|
|
@@ -410,7 +584,7 @@ def create_rootly_mcp_server(
|
|
|
410
584
|
"page[size]": page_size, # Use requested page size (already limited to max 20)
|
|
411
585
|
"page[number]": page_number,
|
|
412
586
|
"include": "",
|
|
413
|
-
"fields[incidents]": "id,title,summary,status,
|
|
587
|
+
"fields[incidents]": "id,title,summary,status,created_at,updated_at,url,started_at",
|
|
414
588
|
}
|
|
415
589
|
if query:
|
|
416
590
|
params["filter[search]"] = query
|
|
@@ -418,7 +592,7 @@ def create_rootly_mcp_server(
|
|
|
418
592
|
try:
|
|
419
593
|
response = await make_authenticated_request("GET", "/v1/incidents", params=params)
|
|
420
594
|
response.raise_for_status()
|
|
421
|
-
return response.json()
|
|
595
|
+
return strip_heavy_nested_data(response.json())
|
|
422
596
|
except Exception as e:
|
|
423
597
|
error_type, error_message = MCPError.categorize_error(e)
|
|
424
598
|
return MCPError.tool_error(error_message, error_type)
|
|
@@ -435,13 +609,15 @@ def create_rootly_mcp_server(
|
|
|
435
609
|
"page[size]": effective_page_size,
|
|
436
610
|
"page[number]": current_page,
|
|
437
611
|
"include": "",
|
|
438
|
-
"fields[incidents]": "id,title,summary,status,
|
|
612
|
+
"fields[incidents]": "id,title,summary,status,created_at,updated_at,url,started_at",
|
|
439
613
|
}
|
|
440
614
|
if query:
|
|
441
615
|
params["filter[search]"] = query
|
|
442
616
|
|
|
443
617
|
try:
|
|
444
|
-
response = await make_authenticated_request(
|
|
618
|
+
response = await make_authenticated_request(
|
|
619
|
+
"GET", "/v1/incidents", params=params
|
|
620
|
+
)
|
|
445
621
|
response.raise_for_status()
|
|
446
622
|
response_data = response.json()
|
|
447
623
|
|
|
@@ -450,19 +626,19 @@ def create_rootly_mcp_server(
|
|
|
450
626
|
if not incidents:
|
|
451
627
|
# No more incidents available
|
|
452
628
|
break
|
|
453
|
-
|
|
629
|
+
|
|
454
630
|
# Check if we got fewer incidents than requested (last page)
|
|
455
631
|
if len(incidents) < effective_page_size:
|
|
456
632
|
all_incidents.extend(incidents)
|
|
457
633
|
break
|
|
458
|
-
|
|
634
|
+
|
|
459
635
|
all_incidents.extend(incidents)
|
|
460
636
|
|
|
461
637
|
# Check metadata if available
|
|
462
638
|
meta = response_data.get("meta", {})
|
|
463
639
|
current_page_meta = meta.get("current_page", current_page)
|
|
464
640
|
total_pages = meta.get("total_pages")
|
|
465
|
-
|
|
641
|
+
|
|
466
642
|
# If we have reliable metadata, use it
|
|
467
643
|
if total_pages and current_page_meta >= total_pages:
|
|
468
644
|
break
|
|
@@ -473,7 +649,11 @@ def create_rootly_mcp_server(
|
|
|
473
649
|
|
|
474
650
|
except Exception as e:
|
|
475
651
|
# Re-raise authentication or critical errors for immediate handling
|
|
476
|
-
if
|
|
652
|
+
if (
|
|
653
|
+
"401" in str(e)
|
|
654
|
+
or "Unauthorized" in str(e)
|
|
655
|
+
or "authentication" in str(e).lower()
|
|
656
|
+
):
|
|
477
657
|
error_type, error_message = MCPError.categorize_error(e)
|
|
478
658
|
return MCPError.tool_error(error_message, error_type)
|
|
479
659
|
# For other errors, break loop and return partial results
|
|
@@ -483,16 +663,18 @@ def create_rootly_mcp_server(
|
|
|
483
663
|
if len(all_incidents) > max_results:
|
|
484
664
|
all_incidents = all_incidents[:max_results]
|
|
485
665
|
|
|
486
|
-
return
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
"
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
666
|
+
return strip_heavy_nested_data(
|
|
667
|
+
{
|
|
668
|
+
"data": all_incidents,
|
|
669
|
+
"meta": {
|
|
670
|
+
"total_fetched": len(all_incidents),
|
|
671
|
+
"max_results": max_results,
|
|
672
|
+
"query": query,
|
|
673
|
+
"pages_fetched": current_page - 1,
|
|
674
|
+
"page_size": effective_page_size,
|
|
675
|
+
},
|
|
494
676
|
}
|
|
495
|
-
|
|
677
|
+
)
|
|
496
678
|
except Exception as e:
|
|
497
679
|
error_type, error_message = MCPError.categorize_error(e)
|
|
498
680
|
return MCPError.tool_error(error_message, error_type)
|
|
@@ -505,24 +687,37 @@ def create_rootly_mcp_server(
|
|
|
505
687
|
async def find_related_incidents(
|
|
506
688
|
incident_id: str = "",
|
|
507
689
|
incident_description: str = "",
|
|
508
|
-
similarity_threshold: Annotated[
|
|
509
|
-
|
|
510
|
-
|
|
690
|
+
similarity_threshold: Annotated[
|
|
691
|
+
float, Field(description="Minimum similarity score (0.0-1.0)", ge=0.0, le=1.0)
|
|
692
|
+
] = 0.15,
|
|
693
|
+
max_results: Annotated[
|
|
694
|
+
int, Field(description="Maximum number of related incidents to return", ge=1, le=20)
|
|
695
|
+
] = 5,
|
|
696
|
+
status_filter: Annotated[
|
|
697
|
+
str,
|
|
698
|
+
Field(
|
|
699
|
+
description="Filter incidents by status (empty for all, 'resolved', 'investigating', etc.)"
|
|
700
|
+
),
|
|
701
|
+
] = "",
|
|
511
702
|
) -> dict:
|
|
512
703
|
"""Find similar incidents to help with context and resolution strategies. Provide either incident_id OR incident_description (e.g., 'website is down', 'database timeout errors'). Use status_filter to limit to specific incident statuses or leave empty for all incidents."""
|
|
513
704
|
try:
|
|
514
705
|
target_incident = {}
|
|
515
|
-
|
|
706
|
+
|
|
516
707
|
if incident_id:
|
|
517
708
|
# Get the target incident details by ID
|
|
518
|
-
target_response = await make_authenticated_request(
|
|
709
|
+
target_response = await make_authenticated_request(
|
|
710
|
+
"GET", f"/v1/incidents/{incident_id}"
|
|
711
|
+
)
|
|
519
712
|
target_response.raise_for_status()
|
|
520
|
-
target_incident_data =
|
|
521
|
-
|
|
522
|
-
|
|
713
|
+
target_incident_data = strip_heavy_nested_data(
|
|
714
|
+
{"data": [target_response.json().get("data", {})]}
|
|
715
|
+
)
|
|
716
|
+
target_incident = target_incident_data.get("data", [{}])[0]
|
|
717
|
+
|
|
523
718
|
if not target_incident:
|
|
524
719
|
return MCPError.tool_error("Incident not found", "not_found")
|
|
525
|
-
|
|
720
|
+
|
|
526
721
|
elif incident_description:
|
|
527
722
|
# Create synthetic incident for analysis from descriptive text
|
|
528
723
|
target_incident = {
|
|
@@ -530,101 +725,124 @@ def create_rootly_mcp_server(
|
|
|
530
725
|
"attributes": {
|
|
531
726
|
"title": incident_description,
|
|
532
727
|
"summary": incident_description,
|
|
533
|
-
"description": incident_description
|
|
534
|
-
}
|
|
728
|
+
"description": incident_description,
|
|
729
|
+
},
|
|
535
730
|
}
|
|
536
731
|
else:
|
|
537
|
-
return MCPError.tool_error(
|
|
538
|
-
|
|
732
|
+
return MCPError.tool_error(
|
|
733
|
+
"Must provide either incident_id or incident_description", "validation_error"
|
|
734
|
+
)
|
|
735
|
+
|
|
539
736
|
# Get historical incidents for comparison
|
|
540
737
|
params = {
|
|
541
738
|
"page[size]": 100, # Get more incidents for better matching
|
|
542
739
|
"page[number]": 1,
|
|
543
|
-
"include": ""
|
|
740
|
+
"include": "",
|
|
741
|
+
"fields[incidents]": "id,title,summary,status,created_at,url",
|
|
544
742
|
}
|
|
545
|
-
|
|
743
|
+
|
|
546
744
|
# Only add status filter if specified
|
|
547
745
|
if status_filter:
|
|
548
746
|
params["filter[status]"] = status_filter
|
|
549
|
-
|
|
550
|
-
historical_response = await make_authenticated_request(
|
|
747
|
+
|
|
748
|
+
historical_response = await make_authenticated_request(
|
|
749
|
+
"GET", "/v1/incidents", params=params
|
|
750
|
+
)
|
|
551
751
|
historical_response.raise_for_status()
|
|
552
|
-
historical_data = historical_response.json()
|
|
752
|
+
historical_data = strip_heavy_nested_data(historical_response.json())
|
|
553
753
|
historical_incidents = historical_data.get("data", [])
|
|
554
|
-
|
|
754
|
+
|
|
555
755
|
# Filter out the target incident itself if it exists
|
|
556
756
|
if incident_id:
|
|
557
|
-
historical_incidents = [
|
|
558
|
-
|
|
757
|
+
historical_incidents = [
|
|
758
|
+
inc for inc in historical_incidents if str(inc.get("id")) != str(incident_id)
|
|
759
|
+
]
|
|
760
|
+
|
|
559
761
|
if not historical_incidents:
|
|
560
762
|
return {
|
|
561
763
|
"related_incidents": [],
|
|
562
764
|
"message": "No historical incidents found for comparison",
|
|
563
765
|
"target_incident": {
|
|
564
766
|
"id": incident_id or "synthetic",
|
|
565
|
-
"title": target_incident.get("attributes", {}).get(
|
|
566
|
-
|
|
767
|
+
"title": target_incident.get("attributes", {}).get(
|
|
768
|
+
"title", incident_description
|
|
769
|
+
),
|
|
770
|
+
},
|
|
567
771
|
}
|
|
568
|
-
|
|
772
|
+
|
|
569
773
|
# Calculate similarities
|
|
570
|
-
similar_incidents = similarity_analyzer.calculate_similarity(
|
|
571
|
-
|
|
774
|
+
similar_incidents = similarity_analyzer.calculate_similarity(
|
|
775
|
+
historical_incidents, target_incident
|
|
776
|
+
)
|
|
777
|
+
|
|
572
778
|
# Filter by threshold and limit results
|
|
573
779
|
filtered_incidents = [
|
|
574
|
-
inc for inc in similar_incidents
|
|
575
|
-
if inc.similarity_score >= similarity_threshold
|
|
780
|
+
inc for inc in similar_incidents if inc.similarity_score >= similarity_threshold
|
|
576
781
|
][:max_results]
|
|
577
|
-
|
|
782
|
+
|
|
578
783
|
# Format response
|
|
579
784
|
related_incidents = []
|
|
580
785
|
for incident in filtered_incidents:
|
|
581
|
-
related_incidents.append(
|
|
582
|
-
|
|
583
|
-
|
|
584
|
-
|
|
585
|
-
|
|
586
|
-
|
|
587
|
-
|
|
588
|
-
|
|
589
|
-
|
|
590
|
-
|
|
786
|
+
related_incidents.append(
|
|
787
|
+
{
|
|
788
|
+
"incident_id": incident.incident_id,
|
|
789
|
+
"title": incident.title,
|
|
790
|
+
"similarity_score": round(incident.similarity_score, 3),
|
|
791
|
+
"matched_services": incident.matched_services,
|
|
792
|
+
"matched_keywords": incident.matched_keywords,
|
|
793
|
+
"resolution_summary": incident.resolution_summary,
|
|
794
|
+
"resolution_time_hours": incident.resolution_time_hours,
|
|
795
|
+
}
|
|
796
|
+
)
|
|
797
|
+
|
|
591
798
|
return {
|
|
592
799
|
"target_incident": {
|
|
593
800
|
"id": incident_id or "synthetic",
|
|
594
|
-
"title": target_incident.get("attributes", {}).get(
|
|
801
|
+
"title": target_incident.get("attributes", {}).get(
|
|
802
|
+
"title", incident_description
|
|
803
|
+
),
|
|
595
804
|
},
|
|
596
805
|
"related_incidents": related_incidents,
|
|
597
806
|
"total_found": len(filtered_incidents),
|
|
598
807
|
"similarity_threshold": similarity_threshold,
|
|
599
|
-
"analysis_summary": f"Found {len(filtered_incidents)} similar incidents out of {len(historical_incidents)} historical incidents"
|
|
808
|
+
"analysis_summary": f"Found {len(filtered_incidents)} similar incidents out of {len(historical_incidents)} historical incidents",
|
|
600
809
|
}
|
|
601
|
-
|
|
810
|
+
|
|
602
811
|
except Exception as e:
|
|
603
812
|
error_type, error_message = MCPError.categorize_error(e)
|
|
604
|
-
return MCPError.tool_error(
|
|
813
|
+
return MCPError.tool_error(
|
|
814
|
+
f"Failed to find related incidents: {error_message}", error_type
|
|
815
|
+
)
|
|
605
816
|
|
|
606
817
|
@mcp.tool()
|
|
607
818
|
async def suggest_solutions(
|
|
608
819
|
incident_id: str = "",
|
|
609
820
|
incident_title: str = "",
|
|
610
821
|
incident_description: str = "",
|
|
611
|
-
max_solutions: Annotated[
|
|
612
|
-
|
|
822
|
+
max_solutions: Annotated[
|
|
823
|
+
int, Field(description="Maximum number of solution suggestions", ge=1, le=10)
|
|
824
|
+
] = 3,
|
|
825
|
+
status_filter: Annotated[
|
|
826
|
+
str,
|
|
827
|
+
Field(
|
|
828
|
+
description="Filter incidents by status (default 'resolved', empty for all, 'investigating', etc.)"
|
|
829
|
+
),
|
|
830
|
+
] = "resolved",
|
|
613
831
|
) -> dict:
|
|
614
832
|
"""Suggest solutions based on similar incidents. Provide either incident_id OR title/description. Defaults to resolved incidents for solution mining, but can search all statuses."""
|
|
615
833
|
try:
|
|
616
834
|
target_incident = {}
|
|
617
|
-
|
|
835
|
+
|
|
618
836
|
if incident_id:
|
|
619
837
|
# Get incident details by ID
|
|
620
838
|
response = await make_authenticated_request("GET", f"/v1/incidents/{incident_id}")
|
|
621
839
|
response.raise_for_status()
|
|
622
|
-
incident_data = response.json()
|
|
623
|
-
target_incident = incident_data.get("data", {})
|
|
624
|
-
|
|
840
|
+
incident_data = strip_heavy_nested_data({"data": [response.json().get("data", {})]})
|
|
841
|
+
target_incident = incident_data.get("data", [{}])[0]
|
|
842
|
+
|
|
625
843
|
if not target_incident:
|
|
626
844
|
return MCPError.tool_error("Incident not found", "not_found")
|
|
627
|
-
|
|
845
|
+
|
|
628
846
|
elif incident_title or incident_description:
|
|
629
847
|
# Create synthetic incident for analysis
|
|
630
848
|
target_incident = {
|
|
@@ -632,83 +850,117 @@ def create_rootly_mcp_server(
|
|
|
632
850
|
"attributes": {
|
|
633
851
|
"title": incident_title,
|
|
634
852
|
"summary": incident_description,
|
|
635
|
-
"description": incident_description
|
|
636
|
-
}
|
|
853
|
+
"description": incident_description,
|
|
854
|
+
},
|
|
637
855
|
}
|
|
638
856
|
else:
|
|
639
|
-
return MCPError.tool_error(
|
|
640
|
-
|
|
857
|
+
return MCPError.tool_error(
|
|
858
|
+
"Must provide either incident_id or incident_title/description",
|
|
859
|
+
"validation_error",
|
|
860
|
+
)
|
|
861
|
+
|
|
641
862
|
# Get incidents for solution mining
|
|
642
863
|
params = {
|
|
643
864
|
"page[size]": 150, # Get more incidents for better solution matching
|
|
644
865
|
"page[number]": 1,
|
|
645
|
-
"include": ""
|
|
866
|
+
"include": "",
|
|
646
867
|
}
|
|
647
|
-
|
|
868
|
+
|
|
648
869
|
# Only add status filter if specified
|
|
649
870
|
if status_filter:
|
|
650
871
|
params["filter[status]"] = status_filter
|
|
651
|
-
|
|
652
|
-
historical_response = await make_authenticated_request(
|
|
872
|
+
|
|
873
|
+
historical_response = await make_authenticated_request(
|
|
874
|
+
"GET", "/v1/incidents", params=params
|
|
875
|
+
)
|
|
653
876
|
historical_response.raise_for_status()
|
|
654
|
-
historical_data = historical_response.json()
|
|
877
|
+
historical_data = strip_heavy_nested_data(historical_response.json())
|
|
655
878
|
historical_incidents = historical_data.get("data", [])
|
|
656
|
-
|
|
879
|
+
|
|
657
880
|
# Filter out target incident if it exists
|
|
658
881
|
if incident_id:
|
|
659
|
-
historical_incidents = [
|
|
660
|
-
|
|
882
|
+
historical_incidents = [
|
|
883
|
+
inc for inc in historical_incidents if str(inc.get("id")) != str(incident_id)
|
|
884
|
+
]
|
|
885
|
+
|
|
661
886
|
if not historical_incidents:
|
|
662
887
|
status_msg = f" with status '{status_filter}'" if status_filter else ""
|
|
663
888
|
return {
|
|
664
889
|
"solutions": [],
|
|
665
|
-
"message": f"No historical incidents found{status_msg} for solution mining"
|
|
890
|
+
"message": f"No historical incidents found{status_msg} for solution mining",
|
|
666
891
|
}
|
|
667
|
-
|
|
892
|
+
|
|
668
893
|
# Find similar incidents
|
|
669
|
-
similar_incidents = similarity_analyzer.calculate_similarity(
|
|
670
|
-
|
|
894
|
+
similar_incidents = similarity_analyzer.calculate_similarity(
|
|
895
|
+
historical_incidents, target_incident
|
|
896
|
+
)
|
|
897
|
+
|
|
671
898
|
# Filter to reasonably similar incidents (lower threshold for solution suggestions)
|
|
672
|
-
relevant_incidents = [inc for inc in similar_incidents if inc.similarity_score >= 0.2][
|
|
673
|
-
|
|
899
|
+
relevant_incidents = [inc for inc in similar_incidents if inc.similarity_score >= 0.2][
|
|
900
|
+
: max_solutions * 2
|
|
901
|
+
]
|
|
902
|
+
|
|
674
903
|
if not relevant_incidents:
|
|
675
904
|
return {
|
|
676
905
|
"solutions": [],
|
|
677
906
|
"message": "No sufficiently similar incidents found for solution suggestions",
|
|
678
|
-
"suggestion": "This appears to be a unique incident. Consider escalating or consulting documentation."
|
|
907
|
+
"suggestion": "This appears to be a unique incident. Consider escalating or consulting documentation.",
|
|
679
908
|
}
|
|
680
|
-
|
|
909
|
+
|
|
681
910
|
# Extract solutions
|
|
682
911
|
solution_data = solution_extractor.extract_solutions(relevant_incidents)
|
|
683
|
-
|
|
912
|
+
|
|
684
913
|
# Format response
|
|
685
914
|
return {
|
|
686
915
|
"target_incident": {
|
|
687
916
|
"id": incident_id or "synthetic",
|
|
688
917
|
"title": target_incident.get("attributes", {}).get("title", incident_title),
|
|
689
|
-
"description": target_incident.get("attributes", {}).get(
|
|
918
|
+
"description": target_incident.get("attributes", {}).get(
|
|
919
|
+
"summary", incident_description
|
|
920
|
+
),
|
|
690
921
|
},
|
|
691
922
|
"solutions": solution_data["solutions"][:max_solutions],
|
|
692
923
|
"insights": {
|
|
693
924
|
"common_patterns": solution_data["common_patterns"],
|
|
694
925
|
"average_resolution_time_hours": solution_data["average_resolution_time"],
|
|
695
|
-
"total_similar_incidents": solution_data["total_similar_incidents"]
|
|
926
|
+
"total_similar_incidents": solution_data["total_similar_incidents"],
|
|
696
927
|
},
|
|
697
|
-
"recommendation": _generate_recommendation(solution_data)
|
|
928
|
+
"recommendation": _generate_recommendation(solution_data),
|
|
698
929
|
}
|
|
699
|
-
|
|
930
|
+
|
|
700
931
|
except Exception as e:
|
|
701
932
|
error_type, error_message = MCPError.categorize_error(e)
|
|
702
933
|
return MCPError.tool_error(f"Failed to suggest solutions: {error_message}", error_type)
|
|
703
934
|
|
|
704
935
|
@mcp.tool()
|
|
705
936
|
async def get_oncall_shift_metrics(
|
|
706
|
-
start_date: Annotated[
|
|
707
|
-
|
|
708
|
-
|
|
709
|
-
|
|
710
|
-
|
|
711
|
-
|
|
937
|
+
start_date: Annotated[
|
|
938
|
+
str,
|
|
939
|
+
Field(
|
|
940
|
+
description="Start date for metrics (ISO 8601 format, e.g., '2025-10-01' or '2025-10-01T00:00:00Z')"
|
|
941
|
+
),
|
|
942
|
+
],
|
|
943
|
+
end_date: Annotated[
|
|
944
|
+
str,
|
|
945
|
+
Field(
|
|
946
|
+
description="End date for metrics (ISO 8601 format, e.g., '2025-10-31' or '2025-10-31T23:59:59Z')"
|
|
947
|
+
),
|
|
948
|
+
],
|
|
949
|
+
user_ids: Annotated[
|
|
950
|
+
str, Field(description="Comma-separated list of user IDs to filter by (optional)")
|
|
951
|
+
] = "",
|
|
952
|
+
schedule_ids: Annotated[
|
|
953
|
+
str, Field(description="Comma-separated list of schedule IDs to filter by (optional)")
|
|
954
|
+
] = "",
|
|
955
|
+
team_ids: Annotated[
|
|
956
|
+
str,
|
|
957
|
+
Field(
|
|
958
|
+
description="Comma-separated list of team IDs to filter by (requires querying schedules first)"
|
|
959
|
+
),
|
|
960
|
+
] = "",
|
|
961
|
+
group_by: Annotated[
|
|
962
|
+
str, Field(description="Group results by: 'user', 'schedule', 'team', or 'none'")
|
|
963
|
+
] = "user",
|
|
712
964
|
) -> dict:
|
|
713
965
|
"""
|
|
714
966
|
Get on-call shift metrics for a specified time period. Returns shift counts, total hours,
|
|
@@ -720,21 +972,25 @@ def create_rootly_mcp_server(
|
|
|
720
972
|
- Specific team: team_ids='team-1' (will query schedules for that team first)
|
|
721
973
|
"""
|
|
722
974
|
try:
|
|
723
|
-
from datetime import datetime, timedelta
|
|
724
975
|
from collections import defaultdict
|
|
725
|
-
from
|
|
976
|
+
from datetime import datetime, timedelta
|
|
977
|
+
from typing import Any
|
|
726
978
|
|
|
727
979
|
# Build query parameters
|
|
728
|
-
params:
|
|
980
|
+
params: dict[str, Any] = {
|
|
729
981
|
"from": start_date,
|
|
730
982
|
"to": end_date,
|
|
731
983
|
}
|
|
732
984
|
|
|
733
985
|
# Fetch schedules (schedules don't have team relationship, they have owner_group_ids)
|
|
734
|
-
schedules_response = await make_authenticated_request(
|
|
986
|
+
schedules_response = await make_authenticated_request(
|
|
987
|
+
"GET", "/v1/schedules", params={"page[size]": 100}
|
|
988
|
+
)
|
|
735
989
|
|
|
736
990
|
if schedules_response is None:
|
|
737
|
-
return MCPError.tool_error(
|
|
991
|
+
return MCPError.tool_error(
|
|
992
|
+
"Failed to get schedules: API request returned None", "execution_error"
|
|
993
|
+
)
|
|
738
994
|
|
|
739
995
|
schedules_response.raise_for_status()
|
|
740
996
|
schedules_data = schedules_response.json()
|
|
@@ -750,7 +1006,9 @@ def create_rootly_mcp_server(
|
|
|
750
1006
|
# Fetch all teams
|
|
751
1007
|
teams_map = {}
|
|
752
1008
|
if team_ids_set:
|
|
753
|
-
teams_response = await make_authenticated_request(
|
|
1009
|
+
teams_response = await make_authenticated_request(
|
|
1010
|
+
"GET", "/v1/teams", params={"page[size]": 100}
|
|
1011
|
+
)
|
|
754
1012
|
if teams_response and teams_response.status_code == 200:
|
|
755
1013
|
teams_data = teams_response.json()
|
|
756
1014
|
for team in teams_data.get("data", []):
|
|
@@ -771,7 +1029,7 @@ def create_rootly_mcp_server(
|
|
|
771
1029
|
schedule_to_team_map[schedule_id] = {
|
|
772
1030
|
"team_id": team_id,
|
|
773
1031
|
"team_name": team_name,
|
|
774
|
-
"schedule_name": schedule_name
|
|
1032
|
+
"schedule_name": schedule_name,
|
|
775
1033
|
}
|
|
776
1034
|
|
|
777
1035
|
# Handle team filtering (requires multi-step query)
|
|
@@ -802,23 +1060,39 @@ def create_rootly_mcp_server(
|
|
|
802
1060
|
|
|
803
1061
|
# Query shifts
|
|
804
1062
|
try:
|
|
805
|
-
shifts_response = await make_authenticated_request(
|
|
1063
|
+
shifts_response = await make_authenticated_request(
|
|
1064
|
+
"GET", "/v1/shifts", params=params
|
|
1065
|
+
)
|
|
806
1066
|
|
|
807
1067
|
if shifts_response is None:
|
|
808
|
-
return MCPError.tool_error(
|
|
1068
|
+
return MCPError.tool_error(
|
|
1069
|
+
"Failed to get shifts: API request returned None", "execution_error"
|
|
1070
|
+
)
|
|
809
1071
|
|
|
810
1072
|
shifts_response.raise_for_status()
|
|
811
1073
|
shifts_data = shifts_response.json()
|
|
812
1074
|
|
|
813
1075
|
if shifts_data is None:
|
|
814
|
-
return MCPError.tool_error(
|
|
1076
|
+
return MCPError.tool_error(
|
|
1077
|
+
"Failed to get shifts: API returned null/empty response",
|
|
1078
|
+
"execution_error",
|
|
1079
|
+
details={"status": shifts_response.status_code},
|
|
1080
|
+
)
|
|
815
1081
|
|
|
816
1082
|
shifts = shifts_data.get("data", [])
|
|
817
1083
|
included = shifts_data.get("included", [])
|
|
818
1084
|
except AttributeError as e:
|
|
819
|
-
return MCPError.tool_error(
|
|
1085
|
+
return MCPError.tool_error(
|
|
1086
|
+
f"Failed to get shifts: Response object error - {str(e)}",
|
|
1087
|
+
"execution_error",
|
|
1088
|
+
details={"params": params},
|
|
1089
|
+
)
|
|
820
1090
|
except Exception as e:
|
|
821
|
-
return MCPError.tool_error(
|
|
1091
|
+
return MCPError.tool_error(
|
|
1092
|
+
f"Failed to get shifts: {str(e)}",
|
|
1093
|
+
"execution_error",
|
|
1094
|
+
details={"params": params, "error_type": type(e).__name__},
|
|
1095
|
+
)
|
|
822
1096
|
|
|
823
1097
|
# Build lookup maps for included resources
|
|
824
1098
|
users_map = {}
|
|
@@ -830,19 +1104,21 @@ def create_rootly_mcp_server(
|
|
|
830
1104
|
on_call_roles_map[resource.get("id")] = resource
|
|
831
1105
|
|
|
832
1106
|
# Calculate metrics
|
|
833
|
-
metrics:
|
|
834
|
-
|
|
835
|
-
|
|
836
|
-
|
|
837
|
-
|
|
838
|
-
|
|
839
|
-
|
|
840
|
-
|
|
841
|
-
|
|
842
|
-
|
|
843
|
-
|
|
844
|
-
|
|
845
|
-
|
|
1107
|
+
metrics: dict[str, dict[str, Any]] = defaultdict(
|
|
1108
|
+
lambda: {
|
|
1109
|
+
"shift_count": 0,
|
|
1110
|
+
"total_hours": 0.0,
|
|
1111
|
+
"override_count": 0,
|
|
1112
|
+
"regular_count": 0,
|
|
1113
|
+
"primary_count": 0,
|
|
1114
|
+
"secondary_count": 0,
|
|
1115
|
+
"primary_hours": 0.0,
|
|
1116
|
+
"secondary_hours": 0.0,
|
|
1117
|
+
"unknown_role_count": 0,
|
|
1118
|
+
"unique_days": set(),
|
|
1119
|
+
"shifts": [],
|
|
1120
|
+
}
|
|
1121
|
+
)
|
|
846
1122
|
|
|
847
1123
|
for shift in shifts:
|
|
848
1124
|
attrs = shift.get("attributes", {})
|
|
@@ -937,19 +1213,21 @@ def create_rootly_mcp_server(
|
|
|
937
1213
|
# Track unique days
|
|
938
1214
|
metrics[key]["unique_days"].update(shift_days)
|
|
939
1215
|
|
|
940
|
-
metrics[key]["shifts"].append(
|
|
941
|
-
|
|
942
|
-
|
|
943
|
-
|
|
944
|
-
|
|
945
|
-
|
|
946
|
-
|
|
947
|
-
|
|
948
|
-
|
|
949
|
-
|
|
950
|
-
|
|
951
|
-
|
|
952
|
-
|
|
1216
|
+
metrics[key]["shifts"].append(
|
|
1217
|
+
{
|
|
1218
|
+
"shift_id": shift.get("id"),
|
|
1219
|
+
"starts_at": starts_at,
|
|
1220
|
+
"ends_at": ends_at,
|
|
1221
|
+
"duration_hours": round(duration_hours, 2),
|
|
1222
|
+
"is_override": is_override,
|
|
1223
|
+
"schedule_id": schedule_id,
|
|
1224
|
+
"user_id": user_id,
|
|
1225
|
+
"user_name": user_name,
|
|
1226
|
+
"user_email": user_email,
|
|
1227
|
+
"role_name": role_name,
|
|
1228
|
+
"is_primary": is_primary,
|
|
1229
|
+
}
|
|
1230
|
+
)
|
|
953
1231
|
|
|
954
1232
|
# Format results
|
|
955
1233
|
results = []
|
|
@@ -1023,10 +1301,7 @@ def create_rootly_mcp_server(
|
|
|
1023
1301
|
results.sort(key=lambda x: x["shift_count"], reverse=True)
|
|
1024
1302
|
|
|
1025
1303
|
return {
|
|
1026
|
-
"period": {
|
|
1027
|
-
"start_date": start_date,
|
|
1028
|
-
"end_date": end_date
|
|
1029
|
-
},
|
|
1304
|
+
"period": {"start_date": start_date, "end_date": end_date},
|
|
1030
1305
|
"total_shifts": len(shifts),
|
|
1031
1306
|
"grouped_by": group_by,
|
|
1032
1307
|
"metrics": results,
|
|
@@ -1034,12 +1309,13 @@ def create_rootly_mcp_server(
|
|
|
1034
1309
|
"total_hours": round(sum(m["total_hours"] for m in results), 2),
|
|
1035
1310
|
"total_regular_shifts": sum(m["regular_shifts"] for m in results),
|
|
1036
1311
|
"total_override_shifts": sum(m["override_shifts"] for m in results),
|
|
1037
|
-
"unique_people": len(results) if group_by == "user" else None
|
|
1038
|
-
}
|
|
1312
|
+
"unique_people": len(results) if group_by == "user" else None,
|
|
1313
|
+
},
|
|
1039
1314
|
}
|
|
1040
1315
|
|
|
1041
1316
|
except Exception as e:
|
|
1042
1317
|
import traceback
|
|
1318
|
+
|
|
1043
1319
|
error_type, error_message = MCPError.categorize_error(e)
|
|
1044
1320
|
return MCPError.tool_error(
|
|
1045
1321
|
f"Failed to get on-call shift metrics: {error_message}",
|
|
@@ -1048,17 +1324,37 @@ def create_rootly_mcp_server(
|
|
|
1048
1324
|
"params": {"start_date": start_date, "end_date": end_date},
|
|
1049
1325
|
"exception_type": type(e).__name__,
|
|
1050
1326
|
"exception_str": str(e),
|
|
1051
|
-
"traceback": traceback.format_exc()
|
|
1052
|
-
}
|
|
1327
|
+
"traceback": traceback.format_exc(),
|
|
1328
|
+
},
|
|
1053
1329
|
)
|
|
1054
1330
|
|
|
1055
1331
|
@mcp.tool()
|
|
1056
1332
|
async def get_oncall_handoff_summary(
|
|
1057
|
-
team_ids: Annotated[
|
|
1058
|
-
|
|
1059
|
-
|
|
1060
|
-
|
|
1061
|
-
|
|
1333
|
+
team_ids: Annotated[
|
|
1334
|
+
str,
|
|
1335
|
+
Field(description="Comma-separated list of team IDs to filter schedules (optional)"),
|
|
1336
|
+
] = "",
|
|
1337
|
+
schedule_ids: Annotated[
|
|
1338
|
+
str, Field(description="Comma-separated list of schedule IDs (optional)")
|
|
1339
|
+
] = "",
|
|
1340
|
+
timezone: Annotated[
|
|
1341
|
+
str,
|
|
1342
|
+
Field(
|
|
1343
|
+
description="Timezone to use for display and filtering (e.g., 'America/Los_Angeles', 'Europe/London', 'Asia/Tokyo'). IMPORTANT: If user mentions a city, location, or region (e.g., 'Toronto', 'APAC', 'my time'), infer the appropriate IANA timezone. Defaults to UTC if not specified."
|
|
1344
|
+
),
|
|
1345
|
+
] = "UTC",
|
|
1346
|
+
filter_by_region: Annotated[
|
|
1347
|
+
bool,
|
|
1348
|
+
Field(
|
|
1349
|
+
description="If True, only show on-call for people whose shifts are during business hours (9am-5pm) in the specified timezone. Defaults to False."
|
|
1350
|
+
),
|
|
1351
|
+
] = False,
|
|
1352
|
+
include_incidents: Annotated[
|
|
1353
|
+
bool,
|
|
1354
|
+
Field(
|
|
1355
|
+
description="If True, fetch incidents for each shift (slower). If False, only show on-call info (faster). Defaults to False for better performance."
|
|
1356
|
+
),
|
|
1357
|
+
] = False,
|
|
1062
1358
|
) -> dict:
|
|
1063
1359
|
"""
|
|
1064
1360
|
Get current on-call handoff summary. Shows who's currently on-call and who's next.
|
|
@@ -1109,15 +1405,19 @@ def create_rootly_mcp_server(
|
|
|
1109
1405
|
max_pages = 5 # Schedules shouldn't have many pages
|
|
1110
1406
|
|
|
1111
1407
|
while page <= max_pages:
|
|
1112
|
-
schedules_response = await make_authenticated_request(
|
|
1408
|
+
schedules_response = await make_authenticated_request(
|
|
1409
|
+
"GET", "/v1/schedules", params={"page[size]": 100, "page[number]": page}
|
|
1410
|
+
)
|
|
1113
1411
|
if not schedules_response:
|
|
1114
|
-
return MCPError.tool_error(
|
|
1412
|
+
return MCPError.tool_error(
|
|
1413
|
+
"Failed to fetch schedules - no response from API", "execution_error"
|
|
1414
|
+
)
|
|
1115
1415
|
|
|
1116
1416
|
if schedules_response.status_code != 200:
|
|
1117
1417
|
return MCPError.tool_error(
|
|
1118
1418
|
f"Failed to fetch schedules - API returned status {schedules_response.status_code}",
|
|
1119
1419
|
"execution_error",
|
|
1120
|
-
details={"status_code": schedules_response.status_code}
|
|
1420
|
+
details={"status_code": schedules_response.status_code},
|
|
1121
1421
|
)
|
|
1122
1422
|
|
|
1123
1423
|
schedules_data = schedules_response.json()
|
|
@@ -1145,7 +1445,9 @@ def create_rootly_mcp_server(
|
|
|
1145
1445
|
|
|
1146
1446
|
teams_map = {}
|
|
1147
1447
|
if team_ids_set:
|
|
1148
|
-
teams_response = await make_authenticated_request(
|
|
1448
|
+
teams_response = await make_authenticated_request(
|
|
1449
|
+
"GET", "/v1/teams", params={"page[size]": 100}
|
|
1450
|
+
)
|
|
1149
1451
|
if teams_response and teams_response.status_code == 200:
|
|
1150
1452
|
teams_data = teams_response.json()
|
|
1151
1453
|
for team in teams_data.get("data", []):
|
|
@@ -1153,8 +1455,14 @@ def create_rootly_mcp_server(
|
|
|
1153
1455
|
|
|
1154
1456
|
# Filter schedules
|
|
1155
1457
|
target_schedules = []
|
|
1156
|
-
team_filter =
|
|
1157
|
-
|
|
1458
|
+
team_filter = (
|
|
1459
|
+
[tid.strip() for tid in team_ids.split(",") if tid.strip()] if team_ids else []
|
|
1460
|
+
)
|
|
1461
|
+
schedule_filter = (
|
|
1462
|
+
[sid.strip() for sid in schedule_ids.split(",") if sid.strip()]
|
|
1463
|
+
if schedule_ids
|
|
1464
|
+
else []
|
|
1465
|
+
)
|
|
1158
1466
|
|
|
1159
1467
|
for schedule in all_schedules:
|
|
1160
1468
|
schedule_id = schedule.get("id")
|
|
@@ -1192,8 +1500,8 @@ def create_rootly_mcp_server(
|
|
|
1192
1500
|
"filter[starts_at][gte]": (now - timedelta(days=1)).isoformat(),
|
|
1193
1501
|
"filter[starts_at][lte]": (now + timedelta(days=7)).isoformat(),
|
|
1194
1502
|
"include": "user,on_call_role",
|
|
1195
|
-
"page[size]": 50
|
|
1196
|
-
}
|
|
1503
|
+
"page[size]": 50,
|
|
1504
|
+
},
|
|
1197
1505
|
)
|
|
1198
1506
|
|
|
1199
1507
|
if not shifts_response:
|
|
@@ -1216,7 +1524,9 @@ def create_rootly_mcp_server(
|
|
|
1216
1524
|
current_shift = None
|
|
1217
1525
|
next_shift = None
|
|
1218
1526
|
|
|
1219
|
-
for shift in sorted(
|
|
1527
|
+
for shift in sorted(
|
|
1528
|
+
shifts, key=lambda s: s.get("attributes", {}).get("starts_at", "")
|
|
1529
|
+
):
|
|
1220
1530
|
attrs = shift.get("attributes", {})
|
|
1221
1531
|
starts_at_str = attrs.get("starts_at")
|
|
1222
1532
|
ends_at_str = attrs.get("ends_at")
|
|
@@ -1244,21 +1554,23 @@ def create_rootly_mcp_server(
|
|
|
1244
1554
|
"schedule_name": schedule_name,
|
|
1245
1555
|
"team_name": team_name,
|
|
1246
1556
|
"current_oncall": None,
|
|
1247
|
-
"next_oncall": None
|
|
1557
|
+
"next_oncall": None,
|
|
1248
1558
|
}
|
|
1249
1559
|
|
|
1250
1560
|
if current_shift:
|
|
1251
1561
|
current_attrs = current_shift.get("attributes", {})
|
|
1252
1562
|
current_rels = current_shift.get("relationships", {})
|
|
1253
|
-
user_data =
|
|
1563
|
+
user_data = current_rels.get("user", {}).get("data") or {}
|
|
1254
1564
|
user_id = user_data.get("id")
|
|
1255
|
-
role_data =
|
|
1565
|
+
role_data = current_rels.get("on_call_role", {}).get("data") or {}
|
|
1256
1566
|
role_id = role_data.get("id")
|
|
1257
1567
|
|
|
1258
1568
|
user_name = "Unknown"
|
|
1259
1569
|
if user_id and user_id in users_map:
|
|
1260
1570
|
user_attrs = users_map[user_id].get("attributes", {})
|
|
1261
|
-
user_name = user_attrs.get("full_name") or user_attrs.get(
|
|
1571
|
+
user_name = user_attrs.get("full_name") or user_attrs.get(
|
|
1572
|
+
"email", "Unknown"
|
|
1573
|
+
)
|
|
1262
1574
|
|
|
1263
1575
|
role_name = "Unknown Role"
|
|
1264
1576
|
if role_id and role_id in roles_map:
|
|
@@ -1271,21 +1583,23 @@ def create_rootly_mcp_server(
|
|
|
1271
1583
|
"role": role_name,
|
|
1272
1584
|
"starts_at": convert_to_timezone(current_attrs.get("starts_at")),
|
|
1273
1585
|
"ends_at": convert_to_timezone(current_attrs.get("ends_at")),
|
|
1274
|
-
"is_override": current_attrs.get("is_override", False)
|
|
1586
|
+
"is_override": current_attrs.get("is_override", False),
|
|
1275
1587
|
}
|
|
1276
1588
|
|
|
1277
1589
|
if next_shift:
|
|
1278
1590
|
next_attrs = next_shift.get("attributes", {})
|
|
1279
1591
|
next_rels = next_shift.get("relationships", {})
|
|
1280
|
-
user_data =
|
|
1592
|
+
user_data = next_rels.get("user", {}).get("data") or {}
|
|
1281
1593
|
user_id = user_data.get("id")
|
|
1282
|
-
role_data =
|
|
1594
|
+
role_data = next_rels.get("on_call_role", {}).get("data") or {}
|
|
1283
1595
|
role_id = role_data.get("id")
|
|
1284
1596
|
|
|
1285
1597
|
user_name = "Unknown"
|
|
1286
1598
|
if user_id and user_id in users_map:
|
|
1287
1599
|
user_attrs = users_map[user_id].get("attributes", {})
|
|
1288
|
-
user_name = user_attrs.get("full_name") or user_attrs.get(
|
|
1600
|
+
user_name = user_attrs.get("full_name") or user_attrs.get(
|
|
1601
|
+
"email", "Unknown"
|
|
1602
|
+
)
|
|
1289
1603
|
|
|
1290
1604
|
role_name = "Unknown Role"
|
|
1291
1605
|
if role_id and role_id in roles_map:
|
|
@@ -1298,7 +1612,7 @@ def create_rootly_mcp_server(
|
|
|
1298
1612
|
"role": role_name,
|
|
1299
1613
|
"starts_at": convert_to_timezone(next_attrs.get("starts_at")),
|
|
1300
1614
|
"ends_at": convert_to_timezone(next_attrs.get("ends_at")),
|
|
1301
|
-
"is_override": next_attrs.get("is_override", False)
|
|
1615
|
+
"is_override": next_attrs.get("is_override", False),
|
|
1302
1616
|
}
|
|
1303
1617
|
|
|
1304
1618
|
handoff_data.append(schedule_info)
|
|
@@ -1310,8 +1624,12 @@ def create_rootly_mcp_server(
|
|
|
1310
1624
|
business_end_hour = 17
|
|
1311
1625
|
|
|
1312
1626
|
# Create datetime objects for today's business hours in target timezone
|
|
1313
|
-
today_business_start = now.replace(
|
|
1314
|
-
|
|
1627
|
+
today_business_start = now.replace(
|
|
1628
|
+
hour=business_start_hour, minute=0, second=0, microsecond=0
|
|
1629
|
+
)
|
|
1630
|
+
today_business_end = now.replace(
|
|
1631
|
+
hour=business_end_hour, minute=0, second=0, microsecond=0
|
|
1632
|
+
)
|
|
1315
1633
|
|
|
1316
1634
|
# Filter schedules where current shift overlaps with business hours
|
|
1317
1635
|
filtered_data = []
|
|
@@ -1324,12 +1642,19 @@ def create_rootly_mcp_server(
|
|
|
1324
1642
|
|
|
1325
1643
|
if shift_start_str and shift_end_str:
|
|
1326
1644
|
try:
|
|
1327
|
-
shift_start = datetime.fromisoformat(
|
|
1328
|
-
|
|
1645
|
+
shift_start = datetime.fromisoformat(
|
|
1646
|
+
shift_start_str.replace("Z", "+00:00")
|
|
1647
|
+
)
|
|
1648
|
+
shift_end = datetime.fromisoformat(
|
|
1649
|
+
shift_end_str.replace("Z", "+00:00")
|
|
1650
|
+
)
|
|
1329
1651
|
|
|
1330
1652
|
# Check if shift overlaps with today's business hours
|
|
1331
1653
|
# Shift overlaps if: shift_start < business_end AND shift_end > business_start
|
|
1332
|
-
if
|
|
1654
|
+
if (
|
|
1655
|
+
shift_start < today_business_end
|
|
1656
|
+
and shift_end > today_business_start
|
|
1657
|
+
):
|
|
1333
1658
|
filtered_data.append(schedule_info)
|
|
1334
1659
|
except (ValueError, AttributeError):
|
|
1335
1660
|
# Skip if we can't parse times
|
|
@@ -1351,10 +1676,12 @@ def create_rootly_mcp_server(
|
|
|
1351
1676
|
schedule_ids="",
|
|
1352
1677
|
severity="",
|
|
1353
1678
|
status="",
|
|
1354
|
-
tags=""
|
|
1679
|
+
tags="",
|
|
1355
1680
|
)
|
|
1356
1681
|
|
|
1357
|
-
schedule_info["shift_incidents"] =
|
|
1682
|
+
schedule_info["shift_incidents"] = (
|
|
1683
|
+
incidents_result if incidents_result.get("success") else None
|
|
1684
|
+
)
|
|
1358
1685
|
else:
|
|
1359
1686
|
schedule_info["shift_incidents"] = None
|
|
1360
1687
|
else:
|
|
@@ -1369,18 +1696,21 @@ def create_rootly_mcp_server(
|
|
|
1369
1696
|
"schedules": handoff_data,
|
|
1370
1697
|
"summary": {
|
|
1371
1698
|
"total_schedules": len(handoff_data),
|
|
1372
|
-
"schedules_with_current_oncall": sum(
|
|
1699
|
+
"schedules_with_current_oncall": sum(
|
|
1700
|
+
1 for s in handoff_data if s["current_oncall"]
|
|
1701
|
+
),
|
|
1373
1702
|
"schedules_with_next_oncall": sum(1 for s in handoff_data if s["next_oncall"]),
|
|
1374
1703
|
"total_incidents": sum(
|
|
1375
1704
|
s.get("shift_incidents", {}).get("summary", {}).get("total_incidents", 0)
|
|
1376
1705
|
for s in handoff_data
|
|
1377
1706
|
if s.get("shift_incidents")
|
|
1378
|
-
)
|
|
1379
|
-
}
|
|
1707
|
+
),
|
|
1708
|
+
},
|
|
1380
1709
|
}
|
|
1381
1710
|
|
|
1382
1711
|
except Exception as e:
|
|
1383
1712
|
import traceback
|
|
1713
|
+
|
|
1384
1714
|
error_type, error_message = MCPError.categorize_error(e)
|
|
1385
1715
|
return MCPError.tool_error(
|
|
1386
1716
|
f"Failed to get on-call handoff summary: {error_message}",
|
|
@@ -1388,8 +1718,8 @@ def create_rootly_mcp_server(
|
|
|
1388
1718
|
details={
|
|
1389
1719
|
"exception_type": type(e).__name__,
|
|
1390
1720
|
"exception_str": str(e),
|
|
1391
|
-
"traceback": traceback.format_exc()
|
|
1392
|
-
}
|
|
1721
|
+
"traceback": traceback.format_exc(),
|
|
1722
|
+
},
|
|
1393
1723
|
)
|
|
1394
1724
|
|
|
1395
1725
|
async def _fetch_shift_incidents_internal(
|
|
@@ -1398,7 +1728,7 @@ def create_rootly_mcp_server(
|
|
|
1398
1728
|
schedule_ids: str = "",
|
|
1399
1729
|
severity: str = "",
|
|
1400
1730
|
status: str = "",
|
|
1401
|
-
tags: str = ""
|
|
1731
|
+
tags: str = "",
|
|
1402
1732
|
) -> dict:
|
|
1403
1733
|
"""Internal helper to fetch incidents - used by both get_shift_incidents and get_oncall_handoff_summary."""
|
|
1404
1734
|
try:
|
|
@@ -1408,10 +1738,7 @@ def create_rootly_mcp_server(
|
|
|
1408
1738
|
# Fetch incidents that:
|
|
1409
1739
|
# 1. Were created during the shift (created_at in range)
|
|
1410
1740
|
# 2. OR are currently active/unresolved (started but not resolved yet)
|
|
1411
|
-
params = {
|
|
1412
|
-
"page[size]": 100,
|
|
1413
|
-
"sort": "-created_at"
|
|
1414
|
-
}
|
|
1741
|
+
params = {"page[size]": 100, "sort": "-created_at"}
|
|
1415
1742
|
|
|
1416
1743
|
# Get incidents created during shift OR still active
|
|
1417
1744
|
# We'll fetch all incidents and filter in-memory for active ones
|
|
@@ -1438,16 +1765,23 @@ def create_rootly_mcp_server(
|
|
|
1438
1765
|
|
|
1439
1766
|
while page <= max_pages:
|
|
1440
1767
|
params["page[number]"] = page
|
|
1441
|
-
incidents_response = await make_authenticated_request(
|
|
1768
|
+
incidents_response = await make_authenticated_request(
|
|
1769
|
+
"GET", "/v1/incidents", params=params
|
|
1770
|
+
)
|
|
1442
1771
|
|
|
1443
1772
|
if not incidents_response:
|
|
1444
|
-
return MCPError.tool_error(
|
|
1773
|
+
return MCPError.tool_error(
|
|
1774
|
+
"Failed to fetch incidents - no response from API", "execution_error"
|
|
1775
|
+
)
|
|
1445
1776
|
|
|
1446
1777
|
if incidents_response.status_code != 200:
|
|
1447
1778
|
return MCPError.tool_error(
|
|
1448
1779
|
f"Failed to fetch incidents - API returned status {incidents_response.status_code}",
|
|
1449
1780
|
"execution_error",
|
|
1450
|
-
details={
|
|
1781
|
+
details={
|
|
1782
|
+
"status_code": incidents_response.status_code,
|
|
1783
|
+
"time_range": f"{start_time} to {end_time}",
|
|
1784
|
+
},
|
|
1451
1785
|
)
|
|
1452
1786
|
|
|
1453
1787
|
incidents_data = incidents_response.json()
|
|
@@ -1471,6 +1805,7 @@ def create_rootly_mcp_server(
|
|
|
1471
1805
|
# 1. Created during shift (created_at between start_time and end_time)
|
|
1472
1806
|
# 2. Currently active (started but not resolved, regardless of when created)
|
|
1473
1807
|
from datetime import timezone as dt_timezone
|
|
1808
|
+
|
|
1474
1809
|
shift_start_dt = datetime.fromisoformat(start_time.replace("Z", "+00:00"))
|
|
1475
1810
|
shift_end_dt = datetime.fromisoformat(end_time.replace("Z", "+00:00"))
|
|
1476
1811
|
now_dt = datetime.now(dt_timezone.utc)
|
|
@@ -1488,9 +1823,21 @@ def create_rootly_mcp_server(
|
|
|
1488
1823
|
|
|
1489
1824
|
# Parse timestamps
|
|
1490
1825
|
try:
|
|
1491
|
-
created_dt =
|
|
1492
|
-
|
|
1493
|
-
|
|
1826
|
+
created_dt = (
|
|
1827
|
+
datetime.fromisoformat(created_at.replace("Z", "+00:00"))
|
|
1828
|
+
if created_at
|
|
1829
|
+
else None
|
|
1830
|
+
)
|
|
1831
|
+
started_dt = (
|
|
1832
|
+
datetime.fromisoformat(started_at.replace("Z", "+00:00"))
|
|
1833
|
+
if started_at
|
|
1834
|
+
else None
|
|
1835
|
+
)
|
|
1836
|
+
resolved_dt = (
|
|
1837
|
+
datetime.fromisoformat(resolved_at.replace("Z", "+00:00"))
|
|
1838
|
+
if resolved_at
|
|
1839
|
+
else None
|
|
1840
|
+
)
|
|
1494
1841
|
except (ValueError, AttributeError):
|
|
1495
1842
|
continue # Skip if we can't parse dates
|
|
1496
1843
|
|
|
@@ -1551,24 +1898,28 @@ def create_rootly_mcp_server(
|
|
|
1551
1898
|
if attrs.get("mitigation"):
|
|
1552
1899
|
narrative_parts.append(f"Resolution: {attrs.get('mitigation')}")
|
|
1553
1900
|
elif attrs.get("action_items_count") and attrs.get("action_items_count") > 0:
|
|
1554
|
-
narrative_parts.append(
|
|
1901
|
+
narrative_parts.append(
|
|
1902
|
+
f"Action items created: {attrs.get('action_items_count')}"
|
|
1903
|
+
)
|
|
1555
1904
|
|
|
1556
1905
|
narrative = " | ".join(narrative_parts)
|
|
1557
1906
|
|
|
1558
|
-
incidents_summary.append(
|
|
1559
|
-
|
|
1560
|
-
|
|
1561
|
-
|
|
1562
|
-
|
|
1563
|
-
|
|
1564
|
-
|
|
1565
|
-
|
|
1566
|
-
|
|
1567
|
-
|
|
1568
|
-
|
|
1569
|
-
|
|
1570
|
-
|
|
1571
|
-
|
|
1907
|
+
incidents_summary.append(
|
|
1908
|
+
{
|
|
1909
|
+
"incident_id": incident_id,
|
|
1910
|
+
"title": attrs.get("title", "Untitled Incident"),
|
|
1911
|
+
"severity": attrs.get("severity"),
|
|
1912
|
+
"status": attrs.get("status"),
|
|
1913
|
+
"started_at": started_at,
|
|
1914
|
+
"resolved_at": resolved_at,
|
|
1915
|
+
"duration_minutes": duration_minutes,
|
|
1916
|
+
"summary": attrs.get("summary"),
|
|
1917
|
+
"impact": attrs.get("customer_impact_summary"),
|
|
1918
|
+
"mitigation": attrs.get("mitigation"),
|
|
1919
|
+
"narrative": narrative,
|
|
1920
|
+
"incident_url": attrs.get("incident_url"),
|
|
1921
|
+
}
|
|
1922
|
+
)
|
|
1572
1923
|
|
|
1573
1924
|
# Group by severity
|
|
1574
1925
|
by_severity = {}
|
|
@@ -1584,28 +1935,28 @@ def create_rootly_mcp_server(
|
|
|
1584
1935
|
ongoing_count = total_incidents - resolved_count
|
|
1585
1936
|
|
|
1586
1937
|
avg_resolution_time = None
|
|
1587
|
-
durations = [
|
|
1938
|
+
durations = [
|
|
1939
|
+
inc["duration_minutes"] for inc in incidents_summary if inc["duration_minutes"]
|
|
1940
|
+
]
|
|
1588
1941
|
if durations:
|
|
1589
1942
|
avg_resolution_time = int(sum(durations) / len(durations))
|
|
1590
1943
|
|
|
1591
1944
|
return {
|
|
1592
1945
|
"success": True,
|
|
1593
|
-
"period": {
|
|
1594
|
-
"start_time": start_time,
|
|
1595
|
-
"end_time": end_time
|
|
1596
|
-
},
|
|
1946
|
+
"period": {"start_time": start_time, "end_time": end_time},
|
|
1597
1947
|
"summary": {
|
|
1598
1948
|
"total_incidents": total_incidents,
|
|
1599
1949
|
"resolved": resolved_count,
|
|
1600
1950
|
"ongoing": ongoing_count,
|
|
1601
1951
|
"average_resolution_minutes": avg_resolution_time,
|
|
1602
|
-
"by_severity": {k: len(v) for k, v in by_severity.items()}
|
|
1952
|
+
"by_severity": {k: len(v) for k, v in by_severity.items()},
|
|
1603
1953
|
},
|
|
1604
|
-
"incidents": incidents_summary
|
|
1954
|
+
"incidents": incidents_summary,
|
|
1605
1955
|
}
|
|
1606
1956
|
|
|
1607
1957
|
except Exception as e:
|
|
1608
1958
|
import traceback
|
|
1959
|
+
|
|
1609
1960
|
error_type, error_message = MCPError.categorize_error(e)
|
|
1610
1961
|
return MCPError.tool_error(
|
|
1611
1962
|
f"Failed to get shift incidents: {error_message}",
|
|
@@ -1614,18 +1965,44 @@ def create_rootly_mcp_server(
|
|
|
1614
1965
|
"params": {"start_time": start_time, "end_time": end_time},
|
|
1615
1966
|
"exception_type": type(e).__name__,
|
|
1616
1967
|
"exception_str": str(e),
|
|
1617
|
-
"traceback": traceback.format_exc()
|
|
1618
|
-
}
|
|
1968
|
+
"traceback": traceback.format_exc(),
|
|
1969
|
+
},
|
|
1619
1970
|
)
|
|
1620
1971
|
|
|
1621
1972
|
@mcp.tool()
|
|
1622
1973
|
async def get_shift_incidents(
|
|
1623
|
-
start_time: Annotated[
|
|
1624
|
-
|
|
1625
|
-
|
|
1626
|
-
|
|
1627
|
-
|
|
1628
|
-
|
|
1974
|
+
start_time: Annotated[
|
|
1975
|
+
str,
|
|
1976
|
+
Field(
|
|
1977
|
+
description="Start time for incident search (ISO 8601 format, e.g., '2025-10-01T00:00:00Z')"
|
|
1978
|
+
),
|
|
1979
|
+
],
|
|
1980
|
+
end_time: Annotated[
|
|
1981
|
+
str,
|
|
1982
|
+
Field(
|
|
1983
|
+
description="End time for incident search (ISO 8601 format, e.g., '2025-10-01T23:59:59Z')"
|
|
1984
|
+
),
|
|
1985
|
+
],
|
|
1986
|
+
schedule_ids: Annotated[
|
|
1987
|
+
str,
|
|
1988
|
+
Field(
|
|
1989
|
+
description="Comma-separated list of schedule IDs to filter incidents (optional)"
|
|
1990
|
+
),
|
|
1991
|
+
] = "",
|
|
1992
|
+
severity: Annotated[
|
|
1993
|
+
str,
|
|
1994
|
+
Field(description="Filter by severity: 'critical', 'high', 'medium', 'low' (optional)"),
|
|
1995
|
+
] = "",
|
|
1996
|
+
status: Annotated[
|
|
1997
|
+
str,
|
|
1998
|
+
Field(
|
|
1999
|
+
description="Filter by status: 'started', 'detected', 'acknowledged', 'investigating', 'identified', 'monitoring', 'resolved', 'cancelled' (optional)"
|
|
2000
|
+
),
|
|
2001
|
+
] = "",
|
|
2002
|
+
tags: Annotated[
|
|
2003
|
+
str,
|
|
2004
|
+
Field(description="Comma-separated list of tag slugs to filter incidents (optional)"),
|
|
2005
|
+
] = "",
|
|
1629
2006
|
) -> dict:
|
|
1630
2007
|
"""
|
|
1631
2008
|
Get incidents and alerts that occurred during a specific shift or time period.
|
|
@@ -1638,7 +2015,9 @@ def create_rootly_mcp_server(
|
|
|
1638
2015
|
|
|
1639
2016
|
Returns incident details including severity, status, duration, and basic summary.
|
|
1640
2017
|
"""
|
|
1641
|
-
return await _fetch_shift_incidents_internal(
|
|
2018
|
+
return await _fetch_shift_incidents_internal(
|
|
2019
|
+
start_time, end_time, schedule_ids, severity, status, tags
|
|
2020
|
+
)
|
|
1642
2021
|
|
|
1643
2022
|
# Add MCP resources for incidents and teams
|
|
1644
2023
|
@mcp.resource("incident://{incident_id}")
|
|
@@ -1647,26 +2026,26 @@ def create_rootly_mcp_server(
|
|
|
1647
2026
|
try:
|
|
1648
2027
|
response = await make_authenticated_request("GET", f"/v1/incidents/{incident_id}")
|
|
1649
2028
|
response.raise_for_status()
|
|
1650
|
-
incident_data = response.json()
|
|
1651
|
-
|
|
2029
|
+
incident_data = strip_heavy_nested_data({"data": [response.json().get("data", {})]})
|
|
2030
|
+
|
|
1652
2031
|
# Format incident data as readable text
|
|
1653
|
-
incident = incident_data.get("data", {})
|
|
2032
|
+
incident = incident_data.get("data", [{}])[0]
|
|
1654
2033
|
attributes = incident.get("attributes", {})
|
|
1655
|
-
|
|
2034
|
+
|
|
1656
2035
|
text_content = f"""Incident #{incident_id}
|
|
1657
|
-
Title: {attributes.get(
|
|
1658
|
-
Status: {attributes.get(
|
|
1659
|
-
Severity: {attributes.get(
|
|
1660
|
-
Created: {attributes.get(
|
|
1661
|
-
Updated: {attributes.get(
|
|
1662
|
-
Summary: {attributes.get(
|
|
1663
|
-
URL: {attributes.get(
|
|
1664
|
-
|
|
2036
|
+
Title: {attributes.get("title", "N/A")}
|
|
2037
|
+
Status: {attributes.get("status", "N/A")}
|
|
2038
|
+
Severity: {attributes.get("severity", "N/A")}
|
|
2039
|
+
Created: {attributes.get("created_at", "N/A")}
|
|
2040
|
+
Updated: {attributes.get("updated_at", "N/A")}
|
|
2041
|
+
Summary: {attributes.get("summary", "N/A")}
|
|
2042
|
+
URL: {attributes.get("url", "N/A")}"""
|
|
2043
|
+
|
|
1665
2044
|
return {
|
|
1666
2045
|
"uri": f"incident://{incident_id}",
|
|
1667
2046
|
"name": f"Incident #{incident_id}",
|
|
1668
2047
|
"text": text_content,
|
|
1669
|
-
"mimeType": "text/plain"
|
|
2048
|
+
"mimeType": "text/plain",
|
|
1670
2049
|
}
|
|
1671
2050
|
except Exception as e:
|
|
1672
2051
|
error_type, error_message = MCPError.categorize_error(e)
|
|
@@ -1674,7 +2053,7 @@ URL: {attributes.get('url', 'N/A')}"""
|
|
|
1674
2053
|
"uri": f"incident://{incident_id}",
|
|
1675
2054
|
"name": f"Incident #{incident_id} (Error)",
|
|
1676
2055
|
"text": f"Error ({error_type}): {error_message}",
|
|
1677
|
-
"mimeType": "text/plain"
|
|
2056
|
+
"mimeType": "text/plain",
|
|
1678
2057
|
}
|
|
1679
2058
|
|
|
1680
2059
|
@mcp.resource("team://{team_id}")
|
|
@@ -1684,23 +2063,23 @@ URL: {attributes.get('url', 'N/A')}"""
|
|
|
1684
2063
|
response = await make_authenticated_request("GET", f"/v1/teams/{team_id}")
|
|
1685
2064
|
response.raise_for_status()
|
|
1686
2065
|
team_data = response.json()
|
|
1687
|
-
|
|
2066
|
+
|
|
1688
2067
|
# Format team data as readable text
|
|
1689
2068
|
team = team_data.get("data", {})
|
|
1690
2069
|
attributes = team.get("attributes", {})
|
|
1691
|
-
|
|
2070
|
+
|
|
1692
2071
|
text_content = f"""Team #{team_id}
|
|
1693
|
-
Name: {attributes.get(
|
|
1694
|
-
Color: {attributes.get(
|
|
1695
|
-
Slug: {attributes.get(
|
|
1696
|
-
Created: {attributes.get(
|
|
1697
|
-
Updated: {attributes.get(
|
|
1698
|
-
|
|
2072
|
+
Name: {attributes.get("name", "N/A")}
|
|
2073
|
+
Color: {attributes.get("color", "N/A")}
|
|
2074
|
+
Slug: {attributes.get("slug", "N/A")}
|
|
2075
|
+
Created: {attributes.get("created_at", "N/A")}
|
|
2076
|
+
Updated: {attributes.get("updated_at", "N/A")}"""
|
|
2077
|
+
|
|
1699
2078
|
return {
|
|
1700
2079
|
"uri": f"team://{team_id}",
|
|
1701
2080
|
"name": f"Team: {attributes.get('name', team_id)}",
|
|
1702
2081
|
"text": text_content,
|
|
1703
|
-
"mimeType": "text/plain"
|
|
2082
|
+
"mimeType": "text/plain",
|
|
1704
2083
|
}
|
|
1705
2084
|
except Exception as e:
|
|
1706
2085
|
error_type, error_message = MCPError.categorize_error(e)
|
|
@@ -1708,50 +2087,56 @@ Updated: {attributes.get('updated_at', 'N/A')}"""
|
|
|
1708
2087
|
"uri": f"team://{team_id}",
|
|
1709
2088
|
"name": f"Team #{team_id} (Error)",
|
|
1710
2089
|
"text": f"Error ({error_type}): {error_message}",
|
|
1711
|
-
"mimeType": "text/plain"
|
|
2090
|
+
"mimeType": "text/plain",
|
|
1712
2091
|
}
|
|
1713
2092
|
|
|
1714
2093
|
@mcp.resource("rootly://incidents")
|
|
1715
2094
|
async def list_incidents_resource():
|
|
1716
2095
|
"""List recent incidents as an MCP resource for quick reference."""
|
|
1717
2096
|
try:
|
|
1718
|
-
response = await make_authenticated_request(
|
|
1719
|
-
"
|
|
1720
|
-
"
|
|
1721
|
-
|
|
1722
|
-
|
|
2097
|
+
response = await make_authenticated_request(
|
|
2098
|
+
"GET",
|
|
2099
|
+
"/v1/incidents",
|
|
2100
|
+
params={
|
|
2101
|
+
"page[size]": 10,
|
|
2102
|
+
"page[number]": 1,
|
|
2103
|
+
"include": "",
|
|
2104
|
+
"fields[incidents]": "id,title,status",
|
|
2105
|
+
},
|
|
2106
|
+
)
|
|
1723
2107
|
response.raise_for_status()
|
|
1724
|
-
data = response.json()
|
|
1725
|
-
|
|
2108
|
+
data = strip_heavy_nested_data(response.json())
|
|
2109
|
+
|
|
1726
2110
|
incidents = data.get("data", [])
|
|
1727
2111
|
text_lines = ["Recent Incidents:\n"]
|
|
1728
|
-
|
|
2112
|
+
|
|
1729
2113
|
for incident in incidents:
|
|
1730
2114
|
attrs = incident.get("attributes", {})
|
|
1731
|
-
text_lines.append(
|
|
1732
|
-
|
|
2115
|
+
text_lines.append(
|
|
2116
|
+
f"• #{incident.get('id', 'N/A')} - {attrs.get('title', 'N/A')} [{attrs.get('status', 'N/A')}]"
|
|
2117
|
+
)
|
|
2118
|
+
|
|
1733
2119
|
return {
|
|
1734
2120
|
"uri": "rootly://incidents",
|
|
1735
2121
|
"name": "Recent Incidents",
|
|
1736
2122
|
"text": "\n".join(text_lines),
|
|
1737
|
-
"mimeType": "text/plain"
|
|
2123
|
+
"mimeType": "text/plain",
|
|
1738
2124
|
}
|
|
1739
2125
|
except Exception as e:
|
|
1740
2126
|
error_type, error_message = MCPError.categorize_error(e)
|
|
1741
2127
|
return {
|
|
1742
|
-
"uri": "rootly://incidents",
|
|
2128
|
+
"uri": "rootly://incidents",
|
|
1743
2129
|
"name": "Recent Incidents (Error)",
|
|
1744
2130
|
"text": f"Error ({error_type}): {error_message}",
|
|
1745
|
-
"mimeType": "text/plain"
|
|
2131
|
+
"mimeType": "text/plain",
|
|
1746
2132
|
}
|
|
1747
2133
|
|
|
1748
|
-
|
|
1749
2134
|
# Log server creation (tool count will be shown when tools are accessed)
|
|
1750
2135
|
logger.info("Created Rootly MCP Server successfully")
|
|
1751
2136
|
return mcp
|
|
1752
2137
|
|
|
1753
2138
|
|
|
1754
|
-
def _load_swagger_spec(swagger_path:
|
|
2139
|
+
def _load_swagger_spec(swagger_path: str | None = None) -> dict[str, Any]:
|
|
1755
2140
|
"""
|
|
1756
2141
|
Load the Swagger specification from a file or URL.
|
|
1757
2142
|
|
|
@@ -1766,7 +2151,7 @@ def _load_swagger_spec(swagger_path: Optional[str] = None) -> Dict[str, Any]:
|
|
|
1766
2151
|
logger.info(f"Using provided Swagger path: {swagger_path}")
|
|
1767
2152
|
if not os.path.isfile(swagger_path):
|
|
1768
2153
|
raise FileNotFoundError(f"Swagger file not found at {swagger_path}")
|
|
1769
|
-
with open(swagger_path,
|
|
2154
|
+
with open(swagger_path, encoding="utf-8") as f:
|
|
1770
2155
|
return json.load(f)
|
|
1771
2156
|
else:
|
|
1772
2157
|
# First, check in the package data directory
|
|
@@ -1774,7 +2159,7 @@ def _load_swagger_spec(swagger_path: Optional[str] = None) -> Dict[str, Any]:
|
|
|
1774
2159
|
package_data_path = Path(__file__).parent / "data" / "swagger.json"
|
|
1775
2160
|
if package_data_path.is_file():
|
|
1776
2161
|
logger.info(f"Found Swagger file in package data: {package_data_path}")
|
|
1777
|
-
with open(package_data_path,
|
|
2162
|
+
with open(package_data_path, encoding="utf-8") as f:
|
|
1778
2163
|
return json.load(f)
|
|
1779
2164
|
except Exception as e:
|
|
1780
2165
|
logger.debug(f"Could not load Swagger file from package data: {e}")
|
|
@@ -1787,7 +2172,7 @@ def _load_swagger_spec(swagger_path: Optional[str] = None) -> Dict[str, Any]:
|
|
|
1787
2172
|
local_swagger_path = current_dir / "swagger.json"
|
|
1788
2173
|
if local_swagger_path.is_file():
|
|
1789
2174
|
logger.info(f"Found Swagger file at {local_swagger_path}")
|
|
1790
|
-
with open(local_swagger_path,
|
|
2175
|
+
with open(local_swagger_path, encoding="utf-8") as f:
|
|
1791
2176
|
return json.load(f)
|
|
1792
2177
|
|
|
1793
2178
|
# Check parent directories
|
|
@@ -1795,7 +2180,7 @@ def _load_swagger_spec(swagger_path: Optional[str] = None) -> Dict[str, Any]:
|
|
|
1795
2180
|
parent_swagger_path = parent / "swagger.json"
|
|
1796
2181
|
if parent_swagger_path.is_file():
|
|
1797
2182
|
logger.info(f"Found Swagger file at {parent_swagger_path}")
|
|
1798
|
-
with open(parent_swagger_path,
|
|
2183
|
+
with open(parent_swagger_path, encoding="utf-8") as f:
|
|
1799
2184
|
return json.load(f)
|
|
1800
2185
|
|
|
1801
2186
|
# If the file wasn't found, fetch it from the URL and save it
|
|
@@ -1815,7 +2200,7 @@ def _load_swagger_spec(swagger_path: Optional[str] = None) -> Dict[str, Any]:
|
|
|
1815
2200
|
return swagger_spec
|
|
1816
2201
|
|
|
1817
2202
|
|
|
1818
|
-
def _fetch_swagger_from_url(url: str = SWAGGER_URL) ->
|
|
2203
|
+
def _fetch_swagger_from_url(url: str = SWAGGER_URL) -> dict[str, Any]:
|
|
1819
2204
|
"""
|
|
1820
2205
|
Fetch the Swagger specification from the specified URL.
|
|
1821
2206
|
|
|
@@ -1827,7 +2212,7 @@ def _fetch_swagger_from_url(url: str = SWAGGER_URL) -> Dict[str, Any]:
|
|
|
1827
2212
|
"""
|
|
1828
2213
|
logger.info(f"Fetching Swagger specification from {url}")
|
|
1829
2214
|
try:
|
|
1830
|
-
response = requests.get(url)
|
|
2215
|
+
response = requests.get(url, timeout=30)
|
|
1831
2216
|
response.raise_for_status()
|
|
1832
2217
|
return response.json()
|
|
1833
2218
|
except requests.RequestException as e:
|
|
@@ -1838,7 +2223,7 @@ def _fetch_swagger_from_url(url: str = SWAGGER_URL) -> Dict[str, Any]:
|
|
|
1838
2223
|
raise Exception(f"Failed to parse Swagger specification: {e}")
|
|
1839
2224
|
|
|
1840
2225
|
|
|
1841
|
-
def _filter_openapi_spec(spec:
|
|
2226
|
+
def _filter_openapi_spec(spec: dict[str, Any], allowed_paths: list[str]) -> dict[str, Any]:
|
|
1842
2227
|
"""
|
|
1843
2228
|
Filter an OpenAPI specification to only include specified paths and clean up schema references.
|
|
1844
2229
|
|
|
@@ -1855,9 +2240,7 @@ def _filter_openapi_spec(spec: Dict[str, Any], allowed_paths: List[str]) -> Dict
|
|
|
1855
2240
|
# Filter paths
|
|
1856
2241
|
original_paths = filtered_spec.get("paths", {})
|
|
1857
2242
|
filtered_paths = {
|
|
1858
|
-
path: path_item
|
|
1859
|
-
for path, path_item in original_paths.items()
|
|
1860
|
-
if path in allowed_paths
|
|
2243
|
+
path: path_item for path, path_item in original_paths.items() if path in allowed_paths
|
|
1861
2244
|
}
|
|
1862
2245
|
|
|
1863
2246
|
filtered_spec["paths"] = filtered_paths
|
|
@@ -1873,7 +2256,7 @@ def _filter_openapi_spec(spec: Dict[str, Any], allowed_paths: List[str]) -> Dict
|
|
|
1873
2256
|
if "requestBody" in operation:
|
|
1874
2257
|
request_body = operation["requestBody"]
|
|
1875
2258
|
if "content" in request_body:
|
|
1876
|
-
for
|
|
2259
|
+
for _content_type, content_info in request_body["content"].items():
|
|
1877
2260
|
if "schema" in content_info:
|
|
1878
2261
|
schema = content_info["schema"]
|
|
1879
2262
|
# Remove problematic $ref references
|
|
@@ -1882,20 +2265,20 @@ def _filter_openapi_spec(spec: Dict[str, Any], allowed_paths: List[str]) -> Dict
|
|
|
1882
2265
|
content_info["schema"] = {
|
|
1883
2266
|
"type": "object",
|
|
1884
2267
|
"description": "Request parameters for this endpoint",
|
|
1885
|
-
"additionalProperties": True
|
|
2268
|
+
"additionalProperties": True,
|
|
1886
2269
|
}
|
|
1887
2270
|
|
|
1888
2271
|
# Remove response schemas to avoid validation issues
|
|
1889
2272
|
# FastMCP will still return the data, just without strict validation
|
|
1890
2273
|
if "responses" in operation:
|
|
1891
|
-
for
|
|
2274
|
+
for _status_code, response in operation["responses"].items():
|
|
1892
2275
|
if "content" in response:
|
|
1893
|
-
for
|
|
2276
|
+
for _content_type, content_info in response["content"].items():
|
|
1894
2277
|
if "schema" in content_info:
|
|
1895
2278
|
# Replace with a simple schema that accepts any response
|
|
1896
2279
|
content_info["schema"] = {
|
|
1897
2280
|
"type": "object",
|
|
1898
|
-
"additionalProperties": True
|
|
2281
|
+
"additionalProperties": True,
|
|
1899
2282
|
}
|
|
1900
2283
|
|
|
1901
2284
|
# Clean parameter schemas (parameter names are already sanitized)
|
|
@@ -1907,135 +2290,155 @@ def _filter_openapi_spec(spec: Dict[str, Any], allowed_paths: List[str]) -> Dict
|
|
|
1907
2290
|
# Replace with a simple string schema
|
|
1908
2291
|
param["schema"] = {
|
|
1909
2292
|
"type": "string",
|
|
1910
|
-
"description": param.get("description", "Parameter value")
|
|
2293
|
+
"description": param.get("description", "Parameter value"),
|
|
1911
2294
|
}
|
|
1912
2295
|
|
|
1913
2296
|
# Add/modify pagination limits to alerts and incident-related endpoints to prevent infinite loops
|
|
1914
2297
|
if method.lower() == "get" and ("alerts" in path.lower() or "incident" in path.lower()):
|
|
1915
2298
|
if "parameters" not in operation:
|
|
1916
2299
|
operation["parameters"] = []
|
|
1917
|
-
|
|
2300
|
+
|
|
1918
2301
|
# Find existing pagination parameters and update them with limits
|
|
1919
2302
|
page_size_param = None
|
|
1920
2303
|
page_number_param = None
|
|
1921
|
-
|
|
2304
|
+
|
|
1922
2305
|
for param in operation["parameters"]:
|
|
1923
2306
|
if param.get("name") == "page[size]":
|
|
1924
2307
|
page_size_param = param
|
|
1925
2308
|
elif param.get("name") == "page[number]":
|
|
1926
2309
|
page_number_param = param
|
|
1927
|
-
|
|
2310
|
+
|
|
1928
2311
|
# Update or add page[size] parameter with limits
|
|
1929
2312
|
if page_size_param:
|
|
1930
2313
|
# Update existing parameter with limits
|
|
1931
2314
|
if "schema" not in page_size_param:
|
|
1932
2315
|
page_size_param["schema"] = {}
|
|
1933
|
-
page_size_param["schema"].update(
|
|
1934
|
-
|
|
1935
|
-
"default": 10,
|
|
1936
|
-
"minimum": 1,
|
|
1937
|
-
"maximum": 20,
|
|
1938
|
-
"description": "Number of results per page (max: 20)"
|
|
1939
|
-
})
|
|
1940
|
-
else:
|
|
1941
|
-
# Add new parameter
|
|
1942
|
-
operation["parameters"].append({
|
|
1943
|
-
"name": "page[size]",
|
|
1944
|
-
"in": "query",
|
|
1945
|
-
"required": False,
|
|
1946
|
-
"schema": {
|
|
2316
|
+
page_size_param["schema"].update(
|
|
2317
|
+
{
|
|
1947
2318
|
"type": "integer",
|
|
1948
2319
|
"default": 10,
|
|
1949
2320
|
"minimum": 1,
|
|
1950
2321
|
"maximum": 20,
|
|
1951
|
-
"description": "Number of results per page (max: 20)"
|
|
2322
|
+
"description": "Number of results per page (max: 20)",
|
|
2323
|
+
}
|
|
2324
|
+
)
|
|
2325
|
+
else:
|
|
2326
|
+
# Add new parameter
|
|
2327
|
+
operation["parameters"].append(
|
|
2328
|
+
{
|
|
2329
|
+
"name": "page[size]",
|
|
2330
|
+
"in": "query",
|
|
2331
|
+
"required": False,
|
|
2332
|
+
"schema": {
|
|
2333
|
+
"type": "integer",
|
|
2334
|
+
"default": 10,
|
|
2335
|
+
"minimum": 1,
|
|
2336
|
+
"maximum": 20,
|
|
2337
|
+
"description": "Number of results per page (max: 20)",
|
|
2338
|
+
},
|
|
1952
2339
|
}
|
|
1953
|
-
|
|
1954
|
-
|
|
2340
|
+
)
|
|
2341
|
+
|
|
1955
2342
|
# Update or add page[number] parameter with defaults
|
|
1956
2343
|
if page_number_param:
|
|
1957
|
-
# Update existing parameter
|
|
2344
|
+
# Update existing parameter
|
|
1958
2345
|
if "schema" not in page_number_param:
|
|
1959
2346
|
page_number_param["schema"] = {}
|
|
1960
|
-
page_number_param["schema"].update(
|
|
1961
|
-
|
|
1962
|
-
"default": 1,
|
|
1963
|
-
"minimum": 1,
|
|
1964
|
-
"description": "Page number to retrieve"
|
|
1965
|
-
})
|
|
1966
|
-
else:
|
|
1967
|
-
# Add new parameter
|
|
1968
|
-
operation["parameters"].append({
|
|
1969
|
-
"name": "page[number]",
|
|
1970
|
-
"in": "query",
|
|
1971
|
-
"required": False,
|
|
1972
|
-
"schema": {
|
|
2347
|
+
page_number_param["schema"].update(
|
|
2348
|
+
{
|
|
1973
2349
|
"type": "integer",
|
|
1974
2350
|
"default": 1,
|
|
1975
2351
|
"minimum": 1,
|
|
1976
|
-
"description": "Page number to retrieve"
|
|
2352
|
+
"description": "Page number to retrieve",
|
|
1977
2353
|
}
|
|
1978
|
-
|
|
1979
|
-
|
|
2354
|
+
)
|
|
2355
|
+
else:
|
|
2356
|
+
# Add new parameter
|
|
2357
|
+
operation["parameters"].append(
|
|
2358
|
+
{
|
|
2359
|
+
"name": "page[number]",
|
|
2360
|
+
"in": "query",
|
|
2361
|
+
"required": False,
|
|
2362
|
+
"schema": {
|
|
2363
|
+
"type": "integer",
|
|
2364
|
+
"default": 1,
|
|
2365
|
+
"minimum": 1,
|
|
2366
|
+
"description": "Page number to retrieve",
|
|
2367
|
+
},
|
|
2368
|
+
}
|
|
2369
|
+
)
|
|
2370
|
+
|
|
1980
2371
|
# Add sparse fieldsets for alerts endpoints to reduce payload size
|
|
1981
2372
|
if "alert" in path.lower():
|
|
1982
2373
|
# Add fields[alerts] parameter with essential fields only - make it required with default
|
|
1983
|
-
operation["parameters"].append(
|
|
1984
|
-
|
|
1985
|
-
|
|
1986
|
-
|
|
1987
|
-
|
|
1988
|
-
"
|
|
1989
|
-
|
|
1990
|
-
|
|
2374
|
+
operation["parameters"].append(
|
|
2375
|
+
{
|
|
2376
|
+
"name": "fields[alerts]",
|
|
2377
|
+
"in": "query",
|
|
2378
|
+
"required": True,
|
|
2379
|
+
"schema": {
|
|
2380
|
+
"type": "string",
|
|
2381
|
+
"default": "id,summary,status,started_at,ended_at,short_id,alert_urgency_id,source,noise",
|
|
2382
|
+
"description": "Comma-separated list of alert fields to include (reduces payload size)",
|
|
2383
|
+
},
|
|
1991
2384
|
}
|
|
1992
|
-
|
|
1993
|
-
|
|
2385
|
+
)
|
|
2386
|
+
|
|
1994
2387
|
# Add include parameter for alerts endpoints to minimize relationships
|
|
1995
2388
|
if "alert" in path.lower():
|
|
1996
2389
|
# Check if include parameter already exists
|
|
1997
|
-
include_param_exists = any(
|
|
2390
|
+
include_param_exists = any(
|
|
2391
|
+
param.get("name") == "include" for param in operation["parameters"]
|
|
2392
|
+
)
|
|
1998
2393
|
if not include_param_exists:
|
|
1999
|
-
operation["parameters"].append(
|
|
2000
|
-
|
|
2001
|
-
|
|
2002
|
-
|
|
2003
|
-
|
|
2004
|
-
"
|
|
2005
|
-
|
|
2006
|
-
|
|
2394
|
+
operation["parameters"].append(
|
|
2395
|
+
{
|
|
2396
|
+
"name": "include",
|
|
2397
|
+
"in": "query",
|
|
2398
|
+
"required": True,
|
|
2399
|
+
"schema": {
|
|
2400
|
+
"type": "string",
|
|
2401
|
+
"default": "",
|
|
2402
|
+
"description": "Related resources to include (empty for minimal payload)",
|
|
2403
|
+
},
|
|
2007
2404
|
}
|
|
2008
|
-
|
|
2009
|
-
|
|
2405
|
+
)
|
|
2406
|
+
|
|
2010
2407
|
# Add sparse fieldsets for incidents endpoints to reduce payload size
|
|
2011
2408
|
if "incident" in path.lower():
|
|
2012
2409
|
# Add fields[incidents] parameter with essential fields only - make it required with default
|
|
2013
|
-
operation["parameters"].append(
|
|
2014
|
-
|
|
2015
|
-
|
|
2016
|
-
|
|
2017
|
-
|
|
2018
|
-
"
|
|
2019
|
-
|
|
2020
|
-
|
|
2410
|
+
operation["parameters"].append(
|
|
2411
|
+
{
|
|
2412
|
+
"name": "fields[incidents]",
|
|
2413
|
+
"in": "query",
|
|
2414
|
+
"required": True,
|
|
2415
|
+
"schema": {
|
|
2416
|
+
"type": "string",
|
|
2417
|
+
"default": "id,title,summary,status,severity,created_at,updated_at,url,started_at",
|
|
2418
|
+
"description": "Comma-separated list of incident fields to include (reduces payload size)",
|
|
2419
|
+
},
|
|
2021
2420
|
}
|
|
2022
|
-
|
|
2023
|
-
|
|
2421
|
+
)
|
|
2422
|
+
|
|
2024
2423
|
# Add include parameter for incidents endpoints to minimize relationships
|
|
2025
2424
|
if "incident" in path.lower():
|
|
2026
2425
|
# Check if include parameter already exists
|
|
2027
|
-
include_param_exists = any(
|
|
2426
|
+
include_param_exists = any(
|
|
2427
|
+
param.get("name") == "include" for param in operation["parameters"]
|
|
2428
|
+
)
|
|
2028
2429
|
if not include_param_exists:
|
|
2029
|
-
operation["parameters"].append(
|
|
2030
|
-
|
|
2031
|
-
|
|
2032
|
-
|
|
2033
|
-
|
|
2034
|
-
"
|
|
2035
|
-
|
|
2036
|
-
|
|
2430
|
+
operation["parameters"].append(
|
|
2431
|
+
{
|
|
2432
|
+
"name": "include",
|
|
2433
|
+
"in": "query",
|
|
2434
|
+
"required": True,
|
|
2435
|
+
"schema": {
|
|
2436
|
+
"type": "string",
|
|
2437
|
+
"default": "",
|
|
2438
|
+
"description": "Related resources to include (empty for minimal payload)",
|
|
2439
|
+
},
|
|
2037
2440
|
}
|
|
2038
|
-
|
|
2441
|
+
)
|
|
2039
2442
|
|
|
2040
2443
|
# Also clean up any remaining broken references in components
|
|
2041
2444
|
if "components" in filtered_spec and "schemas" in filtered_spec["components"]:
|
|
@@ -2053,20 +2456,29 @@ def _filter_openapi_spec(spec: Dict[str, Any], allowed_paths: List[str]) -> Dict
|
|
|
2053
2456
|
# Clean up any operation-level references to removed schemas
|
|
2054
2457
|
removed_schemas = set()
|
|
2055
2458
|
if "components" in filtered_spec and "schemas" in filtered_spec["components"]:
|
|
2056
|
-
removed_schemas = {
|
|
2057
|
-
|
|
2058
|
-
|
|
2059
|
-
|
|
2459
|
+
removed_schemas = {
|
|
2460
|
+
"new_workflow",
|
|
2461
|
+
"update_workflow",
|
|
2462
|
+
"workflow",
|
|
2463
|
+
"workflow_task",
|
|
2464
|
+
"workflow_response",
|
|
2465
|
+
"workflow_list",
|
|
2466
|
+
"new_workflow_task",
|
|
2467
|
+
"update_workflow_task",
|
|
2468
|
+
"workflow_task_response",
|
|
2469
|
+
"workflow_task_list",
|
|
2470
|
+
}
|
|
2471
|
+
|
|
2060
2472
|
for path, path_item in filtered_spec.get("paths", {}).items():
|
|
2061
2473
|
for method, operation in path_item.items():
|
|
2062
2474
|
if method.lower() not in ["get", "post", "put", "delete", "patch"]:
|
|
2063
2475
|
continue
|
|
2064
|
-
|
|
2476
|
+
|
|
2065
2477
|
# Clean request body references
|
|
2066
2478
|
if "requestBody" in operation:
|
|
2067
2479
|
request_body = operation["requestBody"]
|
|
2068
2480
|
if "content" in request_body:
|
|
2069
|
-
for
|
|
2481
|
+
for _content_type, content_info in request_body["content"].items():
|
|
2070
2482
|
if "schema" in content_info and "$ref" in content_info["schema"]:
|
|
2071
2483
|
ref_path = content_info["schema"]["$ref"]
|
|
2072
2484
|
schema_name = ref_path.split("/")[-1]
|
|
@@ -2075,15 +2487,17 @@ def _filter_openapi_spec(spec: Dict[str, Any], allowed_paths: List[str]) -> Dict
|
|
|
2075
2487
|
content_info["schema"] = {
|
|
2076
2488
|
"type": "object",
|
|
2077
2489
|
"description": "Request data for this endpoint",
|
|
2078
|
-
"additionalProperties": True
|
|
2490
|
+
"additionalProperties": True,
|
|
2079
2491
|
}
|
|
2080
|
-
logger.debug(
|
|
2081
|
-
|
|
2082
|
-
|
|
2492
|
+
logger.debug(
|
|
2493
|
+
f"Cleaned broken reference in {method.upper()} {path} request body: {ref_path}"
|
|
2494
|
+
)
|
|
2495
|
+
|
|
2496
|
+
# Clean response references
|
|
2083
2497
|
if "responses" in operation:
|
|
2084
|
-
for
|
|
2498
|
+
for _status_code, response in operation["responses"].items():
|
|
2085
2499
|
if "content" in response:
|
|
2086
|
-
for
|
|
2500
|
+
for _content_type, content_info in response["content"].items():
|
|
2087
2501
|
if "schema" in content_info and "$ref" in content_info["schema"]:
|
|
2088
2502
|
ref_path = content_info["schema"]["$ref"]
|
|
2089
2503
|
schema_name = ref_path.split("/")[-1]
|
|
@@ -2092,14 +2506,16 @@ def _filter_openapi_spec(spec: Dict[str, Any], allowed_paths: List[str]) -> Dict
|
|
|
2092
2506
|
content_info["schema"] = {
|
|
2093
2507
|
"type": "object",
|
|
2094
2508
|
"description": "Response data from this endpoint",
|
|
2095
|
-
"additionalProperties": True
|
|
2509
|
+
"additionalProperties": True,
|
|
2096
2510
|
}
|
|
2097
|
-
logger.debug(
|
|
2511
|
+
logger.debug(
|
|
2512
|
+
f"Cleaned broken reference in {method.upper()} {path} response: {ref_path}"
|
|
2513
|
+
)
|
|
2098
2514
|
|
|
2099
2515
|
return filtered_spec
|
|
2100
2516
|
|
|
2101
2517
|
|
|
2102
|
-
def _has_broken_references(schema_def:
|
|
2518
|
+
def _has_broken_references(schema_def: dict[str, Any]) -> bool:
|
|
2103
2519
|
"""Check if a schema definition has broken references."""
|
|
2104
2520
|
if "$ref" in schema_def:
|
|
2105
2521
|
ref_path = schema_def["$ref"]
|
|
@@ -2107,7 +2523,7 @@ def _has_broken_references(schema_def: Dict[str, Any]) -> bool:
|
|
|
2107
2523
|
broken_refs = [
|
|
2108
2524
|
"incident_trigger_params",
|
|
2109
2525
|
"new_workflow",
|
|
2110
|
-
"update_workflow",
|
|
2526
|
+
"update_workflow",
|
|
2111
2527
|
"workflow",
|
|
2112
2528
|
"new_workflow_task",
|
|
2113
2529
|
"update_workflow_task",
|
|
@@ -2118,18 +2534,18 @@ def _has_broken_references(schema_def: Dict[str, Any]) -> bool:
|
|
|
2118
2534
|
"workflow_list",
|
|
2119
2535
|
"workflow_custom_field_selection_response",
|
|
2120
2536
|
"workflow_custom_field_selection_list",
|
|
2121
|
-
"workflow_form_field_condition_response",
|
|
2537
|
+
"workflow_form_field_condition_response",
|
|
2122
2538
|
"workflow_form_field_condition_list",
|
|
2123
2539
|
"workflow_group_response",
|
|
2124
2540
|
"workflow_group_list",
|
|
2125
2541
|
"workflow_run_response",
|
|
2126
|
-
"workflow_runs_list"
|
|
2542
|
+
"workflow_runs_list",
|
|
2127
2543
|
]
|
|
2128
2544
|
if any(broken_ref in ref_path for broken_ref in broken_refs):
|
|
2129
2545
|
return True
|
|
2130
2546
|
|
|
2131
2547
|
# Recursively check nested schemas
|
|
2132
|
-
for
|
|
2548
|
+
for _key, value in schema_def.items():
|
|
2133
2549
|
if isinstance(value, dict):
|
|
2134
2550
|
if _has_broken_references(value):
|
|
2135
2551
|
return True
|
|
@@ -2151,10 +2567,10 @@ class RootlyMCPServer(FastMCP):
|
|
|
2151
2567
|
|
|
2152
2568
|
def __init__(
|
|
2153
2569
|
self,
|
|
2154
|
-
swagger_path:
|
|
2570
|
+
swagger_path: str | None = None,
|
|
2155
2571
|
name: str = "Rootly",
|
|
2156
2572
|
default_page_size: int = 10,
|
|
2157
|
-
allowed_paths:
|
|
2573
|
+
allowed_paths: list[str] | None = None,
|
|
2158
2574
|
hosted: bool = False,
|
|
2159
2575
|
*args,
|
|
2160
2576
|
**kwargs,
|
|
@@ -2165,10 +2581,7 @@ class RootlyMCPServer(FastMCP):
|
|
|
2165
2581
|
|
|
2166
2582
|
# Create the server using the new function
|
|
2167
2583
|
server = create_rootly_mcp_server(
|
|
2168
|
-
swagger_path=swagger_path,
|
|
2169
|
-
name=name,
|
|
2170
|
-
allowed_paths=allowed_paths,
|
|
2171
|
-
hosted=hosted
|
|
2584
|
+
swagger_path=swagger_path, name=name, allowed_paths=allowed_paths, hosted=hosted
|
|
2172
2585
|
)
|
|
2173
2586
|
|
|
2174
2587
|
# Copy the server's state to this instance
|
|
@@ -2177,5 +2590,5 @@ class RootlyMCPServer(FastMCP):
|
|
|
2177
2590
|
# Tools will be accessed via async methods when needed
|
|
2178
2591
|
self._server = server
|
|
2179
2592
|
self._tools = {} # Placeholder - tools should be accessed via async methods
|
|
2180
|
-
self._resources = getattr(server,
|
|
2181
|
-
self._prompts = getattr(server,
|
|
2593
|
+
self._resources = getattr(server, "_resources", {})
|
|
2594
|
+
self._prompts = getattr(server, "_prompts", {})
|