rootly-mcp-server 2.0.14__py3-none-any.whl → 2.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- rootly_mcp_server/__init__.py +9 -5
- rootly_mcp_server/__main__.py +44 -29
- rootly_mcp_server/client.py +98 -44
- rootly_mcp_server/data/__init__.py +1 -1
- rootly_mcp_server/exceptions.py +148 -0
- rootly_mcp_server/monitoring.py +378 -0
- rootly_mcp_server/pagination.py +98 -0
- rootly_mcp_server/security.py +404 -0
- rootly_mcp_server/server.py +1864 -343
- rootly_mcp_server/smart_utils.py +294 -209
- rootly_mcp_server/texttest.json +3178 -0
- rootly_mcp_server/utils.py +48 -33
- rootly_mcp_server/validators.py +147 -0
- {rootly_mcp_server-2.0.14.dist-info → rootly_mcp_server-2.1.0.dist-info}/METADATA +180 -50
- rootly_mcp_server-2.1.0.dist-info/RECORD +18 -0
- {rootly_mcp_server-2.0.14.dist-info → rootly_mcp_server-2.1.0.dist-info}/WHEEL +1 -1
- rootly_mcp_server-2.0.14.dist-info/RECORD +0 -12
- {rootly_mcp_server-2.0.14.dist-info → rootly_mcp_server-2.1.0.dist-info}/entry_points.txt +0 -0
- {rootly_mcp_server-2.0.14.dist-info → rootly_mcp_server-2.1.0.dist-info}/licenses/LICENSE +0 -0
rootly_mcp_server/server.py
CHANGED
|
@@ -6,115 +6,385 @@ the Rootly API's OpenAPI (Swagger) specification using FastMCP's OpenAPI integra
|
|
|
6
6
|
"""
|
|
7
7
|
|
|
8
8
|
import json
|
|
9
|
-
import os
|
|
10
9
|
import logging
|
|
10
|
+
import os
|
|
11
11
|
from copy import deepcopy
|
|
12
12
|
from pathlib import Path
|
|
13
|
-
import
|
|
14
|
-
import httpx
|
|
15
|
-
from typing import Any, Dict, List, Optional, Annotated
|
|
13
|
+
from typing import Annotated, Any
|
|
16
14
|
|
|
15
|
+
import httpx
|
|
16
|
+
import requests
|
|
17
17
|
from fastmcp import FastMCP
|
|
18
|
-
|
|
19
18
|
from pydantic import Field
|
|
20
19
|
|
|
20
|
+
from .smart_utils import SolutionExtractor, TextSimilarityAnalyzer
|
|
21
21
|
from .utils import sanitize_parameters_in_spec
|
|
22
|
-
from .smart_utils import TextSimilarityAnalyzer, SolutionExtractor
|
|
23
22
|
|
|
24
23
|
# Set up logger
|
|
25
24
|
logger = logging.getLogger(__name__)
|
|
26
25
|
|
|
27
26
|
|
|
27
|
+
def strip_heavy_nested_data(data: dict[str, Any]) -> dict[str, Any]:
|
|
28
|
+
"""
|
|
29
|
+
Strip heavy nested relationship data from incident responses to reduce payload size.
|
|
30
|
+
Removes embedded user objects, roles, permissions, schedules, etc.
|
|
31
|
+
"""
|
|
32
|
+
if not isinstance(data, dict):
|
|
33
|
+
return data
|
|
34
|
+
|
|
35
|
+
if "data" in data and isinstance(data["data"], list):
|
|
36
|
+
# Process list of incidents
|
|
37
|
+
for incident in data["data"]:
|
|
38
|
+
if "attributes" in incident:
|
|
39
|
+
attrs = incident["attributes"]
|
|
40
|
+
# Strip heavy embedded user objects
|
|
41
|
+
for user_field in [
|
|
42
|
+
"user",
|
|
43
|
+
"started_by",
|
|
44
|
+
"mitigated_by",
|
|
45
|
+
"resolved_by",
|
|
46
|
+
"closed_by",
|
|
47
|
+
"cancelled_by",
|
|
48
|
+
"in_triage_by",
|
|
49
|
+
]:
|
|
50
|
+
if user_field in attrs and isinstance(attrs[user_field], dict):
|
|
51
|
+
user_data = attrs[user_field].get("data", {})
|
|
52
|
+
if "attributes" in user_data:
|
|
53
|
+
# Keep only basic user info
|
|
54
|
+
attrs[user_field] = {
|
|
55
|
+
"data": {
|
|
56
|
+
"id": user_data.get("id"),
|
|
57
|
+
"type": user_data.get("type"),
|
|
58
|
+
"attributes": {
|
|
59
|
+
"name": user_data.get("attributes", {}).get("name"),
|
|
60
|
+
"email": user_data.get("attributes", {}).get("email"),
|
|
61
|
+
},
|
|
62
|
+
}
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
# Strip heavy severity object, keep only essential info
|
|
66
|
+
if "severity" in attrs and isinstance(attrs["severity"], dict):
|
|
67
|
+
sev_data = attrs["severity"].get("data", {})
|
|
68
|
+
if sev_data and "attributes" in sev_data:
|
|
69
|
+
# Simplify to just name and slug
|
|
70
|
+
attrs["severity"] = {
|
|
71
|
+
"name": sev_data.get("attributes", {}).get("name"),
|
|
72
|
+
"slug": sev_data.get("attributes", {}).get("slug"),
|
|
73
|
+
}
|
|
74
|
+
elif not sev_data:
|
|
75
|
+
# Severity is null/empty
|
|
76
|
+
attrs["severity"] = None
|
|
77
|
+
|
|
78
|
+
# Remove heavy integration fields (50+ fields with IDs/URLs)
|
|
79
|
+
integration_fields = [
|
|
80
|
+
"zoom_meeting_start_url",
|
|
81
|
+
"zoom_meeting_global_dial_in_numbers",
|
|
82
|
+
"shortcut_story_id",
|
|
83
|
+
"shortcut_story_url",
|
|
84
|
+
"shortcut_task_id",
|
|
85
|
+
"shortcut_task_url",
|
|
86
|
+
"asana_task_id",
|
|
87
|
+
"asana_task_url",
|
|
88
|
+
"github_issue_id",
|
|
89
|
+
"github_issue_url",
|
|
90
|
+
"gitlab_issue_id",
|
|
91
|
+
"gitlab_issue_url",
|
|
92
|
+
"google_meeting_id",
|
|
93
|
+
"trello_card_id",
|
|
94
|
+
"trello_card_url",
|
|
95
|
+
"linear_issue_id",
|
|
96
|
+
"linear_issue_url",
|
|
97
|
+
"zendesk_ticket_id",
|
|
98
|
+
"zendesk_ticket_url",
|
|
99
|
+
"motion_task_id",
|
|
100
|
+
"motion_task_url",
|
|
101
|
+
"clickup_task_id",
|
|
102
|
+
"clickup_task_url",
|
|
103
|
+
"slack_channel_deep_link",
|
|
104
|
+
"service_now_incident_id",
|
|
105
|
+
"service_now_incident_key",
|
|
106
|
+
"service_now_incident_url",
|
|
107
|
+
"opsgenie_incident_id",
|
|
108
|
+
"opsgenie_incident_url",
|
|
109
|
+
"opsgenie_alert_id",
|
|
110
|
+
"opsgenie_alert_url",
|
|
111
|
+
"victor_ops_incident_id",
|
|
112
|
+
"victor_ops_incident_url",
|
|
113
|
+
"pagerduty_incident_id",
|
|
114
|
+
"pagerduty_incident_number",
|
|
115
|
+
"pagerduty_incident_url",
|
|
116
|
+
"mattermost_channel_id",
|
|
117
|
+
"mattermost_channel_name",
|
|
118
|
+
"mattermost_channel_url",
|
|
119
|
+
"confluence_page_id",
|
|
120
|
+
"quip_page_id",
|
|
121
|
+
"quip_page_url",
|
|
122
|
+
"airtable_base_key",
|
|
123
|
+
"airtable_table_name",
|
|
124
|
+
"airtable_record_id",
|
|
125
|
+
"airtable_record_url",
|
|
126
|
+
"google_drive_id",
|
|
127
|
+
"google_drive_parent_id",
|
|
128
|
+
"google_drive_url",
|
|
129
|
+
"sharepoint_page_id",
|
|
130
|
+
"sharepoint_page_url",
|
|
131
|
+
"datadog_notebook_id",
|
|
132
|
+
"datadog_notebook_url",
|
|
133
|
+
"freshservice_ticket_id",
|
|
134
|
+
"freshservice_ticket_url",
|
|
135
|
+
"freshservice_task_id",
|
|
136
|
+
"freshservice_task_url",
|
|
137
|
+
"zoom_meeting_password",
|
|
138
|
+
"zoom_meeting_pstn_password",
|
|
139
|
+
"zoom_meeting_h323_password",
|
|
140
|
+
"labels",
|
|
141
|
+
"slack_last_message_ts",
|
|
142
|
+
]
|
|
143
|
+
for field in integration_fields:
|
|
144
|
+
attrs.pop(field, None)
|
|
145
|
+
|
|
146
|
+
# Remove heavy relationships data
|
|
147
|
+
if "relationships" in incident:
|
|
148
|
+
rels = incident["relationships"]
|
|
149
|
+
# Keep only counts for heavy relationships, remove the actual data
|
|
150
|
+
for rel_key in [
|
|
151
|
+
"events",
|
|
152
|
+
"action_items",
|
|
153
|
+
"subscribers",
|
|
154
|
+
"roles",
|
|
155
|
+
"slack_messages",
|
|
156
|
+
"alerts",
|
|
157
|
+
]:
|
|
158
|
+
if (
|
|
159
|
+
rel_key in rels
|
|
160
|
+
and isinstance(rels[rel_key], dict)
|
|
161
|
+
and "data" in rels[rel_key]
|
|
162
|
+
):
|
|
163
|
+
# Replace with just count
|
|
164
|
+
rels[rel_key] = {"count": len(rels[rel_key]["data"])}
|
|
165
|
+
|
|
166
|
+
# Process "included" section (common in shifts/alerts with user data)
|
|
167
|
+
if "included" in data and isinstance(data["included"], list):
|
|
168
|
+
for item in data["included"]:
|
|
169
|
+
if item.get("type") == "users":
|
|
170
|
+
# Keep only essential user fields
|
|
171
|
+
if "attributes" in item:
|
|
172
|
+
attrs = item["attributes"]
|
|
173
|
+
keep_fields = {"name", "email", "phone", "time_zone", "full_name"}
|
|
174
|
+
item["attributes"] = {k: v for k, v in attrs.items() if k in keep_fields}
|
|
175
|
+
# Strip heavy relationships
|
|
176
|
+
if "relationships" in item:
|
|
177
|
+
for rel_key in [
|
|
178
|
+
"schedules",
|
|
179
|
+
"notification_rules",
|
|
180
|
+
"teams",
|
|
181
|
+
"devices",
|
|
182
|
+
"email_addresses",
|
|
183
|
+
"phone_numbers",
|
|
184
|
+
]:
|
|
185
|
+
if rel_key in item["relationships"]:
|
|
186
|
+
rel_data = item["relationships"][rel_key]
|
|
187
|
+
if isinstance(rel_data, dict) and "data" in rel_data:
|
|
188
|
+
data_list = rel_data.get("data", [])
|
|
189
|
+
if isinstance(data_list, list):
|
|
190
|
+
item["relationships"][rel_key] = {"count": len(data_list)}
|
|
191
|
+
|
|
192
|
+
# Process alerts in data list
|
|
193
|
+
if "data" in data and isinstance(data["data"], list):
|
|
194
|
+
for item in data["data"]:
|
|
195
|
+
if item.get("type") == "alerts":
|
|
196
|
+
# Strip heavy attributes from alerts
|
|
197
|
+
if "attributes" in item:
|
|
198
|
+
attrs = item["attributes"]
|
|
199
|
+
# Remove heavy fields - raw data, embedded objects, integration fields
|
|
200
|
+
heavy_fields = [
|
|
201
|
+
"data", # Raw alert payload from source - very large
|
|
202
|
+
"labels",
|
|
203
|
+
"external_url",
|
|
204
|
+
"pagerduty_incident_id",
|
|
205
|
+
"pagerduty_incident_url",
|
|
206
|
+
"opsgenie_alert_id",
|
|
207
|
+
"opsgenie_alert_url",
|
|
208
|
+
"deduplication_key",
|
|
209
|
+
]
|
|
210
|
+
for field in heavy_fields:
|
|
211
|
+
attrs.pop(field, None)
|
|
212
|
+
|
|
213
|
+
# Simplify embedded objects to just IDs/counts
|
|
214
|
+
# groups - keep only group_ids
|
|
215
|
+
if "groups" in attrs:
|
|
216
|
+
attrs.pop("groups", None)
|
|
217
|
+
# environments - keep only environment_ids
|
|
218
|
+
if "environments" in attrs:
|
|
219
|
+
attrs.pop("environments", None)
|
|
220
|
+
# services - keep only service_ids
|
|
221
|
+
if "services" in attrs:
|
|
222
|
+
attrs.pop("services", None)
|
|
223
|
+
# incidents - embedded incident objects
|
|
224
|
+
if "incidents" in attrs:
|
|
225
|
+
attrs.pop("incidents", None)
|
|
226
|
+
# responders - embedded responder objects
|
|
227
|
+
if "responders" in attrs:
|
|
228
|
+
attrs.pop("responders", None)
|
|
229
|
+
# notified_users - embedded user objects
|
|
230
|
+
if "notified_users" in attrs:
|
|
231
|
+
attrs.pop("notified_users", None)
|
|
232
|
+
# alerting_targets - embedded target objects
|
|
233
|
+
if "alerting_targets" in attrs:
|
|
234
|
+
attrs.pop("alerting_targets", None)
|
|
235
|
+
# alert_urgency - keep only alert_urgency_id
|
|
236
|
+
if "alert_urgency" in attrs:
|
|
237
|
+
attrs.pop("alert_urgency", None)
|
|
238
|
+
# alert_field_values - embedded custom field values
|
|
239
|
+
if "alert_field_values" in attrs:
|
|
240
|
+
attrs.pop("alert_field_values", None)
|
|
241
|
+
|
|
242
|
+
# Strip heavy relationships
|
|
243
|
+
if "relationships" in item:
|
|
244
|
+
rels = item["relationships"]
|
|
245
|
+
for rel_key in ["events", "subscribers", "alerts"]:
|
|
246
|
+
if (
|
|
247
|
+
rel_key in rels
|
|
248
|
+
and isinstance(rels[rel_key], dict)
|
|
249
|
+
and "data" in rels[rel_key]
|
|
250
|
+
):
|
|
251
|
+
data_list = rels[rel_key].get("data", [])
|
|
252
|
+
if isinstance(data_list, list):
|
|
253
|
+
rels[rel_key] = {"count": len(data_list)}
|
|
254
|
+
|
|
255
|
+
return data
|
|
256
|
+
|
|
257
|
+
|
|
258
|
+
class ProcessedResponse:
|
|
259
|
+
"""Wrapper around httpx.Response that processes JSON to reduce payload size."""
|
|
260
|
+
|
|
261
|
+
def __init__(self, response: httpx.Response):
|
|
262
|
+
self._response = response
|
|
263
|
+
self._processed_json = None
|
|
264
|
+
|
|
265
|
+
def json(self, **kwargs):
|
|
266
|
+
"""Parse JSON and strip heavy nested data."""
|
|
267
|
+
if self._processed_json is None:
|
|
268
|
+
raw_data = self._response.json(**kwargs)
|
|
269
|
+
self._processed_json = strip_heavy_nested_data(raw_data)
|
|
270
|
+
return self._processed_json
|
|
271
|
+
|
|
272
|
+
def __getattr__(self, name):
|
|
273
|
+
"""Delegate all other attributes to the wrapped response."""
|
|
274
|
+
return getattr(self._response, name)
|
|
275
|
+
|
|
276
|
+
|
|
277
|
+
class ResponseProcessingClient(httpx.AsyncClient):
|
|
278
|
+
"""AsyncClient subclass that wraps responses to reduce payload size.
|
|
279
|
+
|
|
280
|
+
This is necessary because FastMCP.from_openapi() uses the client directly,
|
|
281
|
+
bypassing any wrapper class. By subclassing httpx.AsyncClient, we ensure
|
|
282
|
+
all responses go through our processing.
|
|
283
|
+
"""
|
|
284
|
+
|
|
285
|
+
async def request(self, method, url, **kwargs):
|
|
286
|
+
"""Override request to wrap response with ProcessedResponse."""
|
|
287
|
+
response = await super().request(method, url, **kwargs)
|
|
288
|
+
return ProcessedResponse(response)
|
|
289
|
+
|
|
290
|
+
|
|
28
291
|
class MCPError:
|
|
29
292
|
"""Enhanced error handling for MCP protocol compliance."""
|
|
30
|
-
|
|
293
|
+
|
|
31
294
|
@staticmethod
|
|
32
|
-
def protocol_error(code: int, message: str, data:
|
|
295
|
+
def protocol_error(code: int, message: str, data: dict | None = None):
|
|
33
296
|
"""Create a JSON-RPC protocol-level error response."""
|
|
34
|
-
error_response = {
|
|
35
|
-
"jsonrpc": "2.0",
|
|
36
|
-
"error": {
|
|
37
|
-
"code": code,
|
|
38
|
-
"message": message
|
|
39
|
-
}
|
|
40
|
-
}
|
|
297
|
+
error_response = {"jsonrpc": "2.0", "error": {"code": code, "message": message}}
|
|
41
298
|
if data:
|
|
42
299
|
error_response["error"]["data"] = data
|
|
43
300
|
return error_response
|
|
44
|
-
|
|
301
|
+
|
|
45
302
|
@staticmethod
|
|
46
|
-
def tool_error(
|
|
303
|
+
def tool_error(
|
|
304
|
+
error_message: str, error_type: str = "execution_error", details: dict | None = None
|
|
305
|
+
):
|
|
47
306
|
"""Create a tool-level error response (returned as successful tool result)."""
|
|
48
|
-
error_response = {
|
|
49
|
-
"error": True,
|
|
50
|
-
"error_type": error_type,
|
|
51
|
-
"message": error_message
|
|
52
|
-
}
|
|
307
|
+
error_response = {"error": True, "error_type": error_type, "message": error_message}
|
|
53
308
|
if details:
|
|
54
309
|
error_response["details"] = details
|
|
55
310
|
return error_response
|
|
56
|
-
|
|
311
|
+
|
|
57
312
|
@staticmethod
|
|
58
313
|
def categorize_error(exception: Exception) -> tuple[str, str]:
|
|
59
314
|
"""Categorize an exception into error type and appropriate message."""
|
|
60
315
|
error_str = str(exception)
|
|
61
316
|
exception_type = type(exception).__name__
|
|
62
|
-
|
|
317
|
+
|
|
63
318
|
# Authentication/Authorization errors
|
|
64
|
-
if any(
|
|
319
|
+
if any(
|
|
320
|
+
keyword in error_str.lower()
|
|
321
|
+
for keyword in ["401", "unauthorized", "authentication", "token", "forbidden"]
|
|
322
|
+
):
|
|
65
323
|
return "authentication_error", f"Authentication failed: {error_str}"
|
|
66
|
-
|
|
67
|
-
# Network/Connection errors
|
|
68
|
-
if any(
|
|
324
|
+
|
|
325
|
+
# Network/Connection errors
|
|
326
|
+
if any(
|
|
327
|
+
keyword in exception_type.lower() for keyword in ["connection", "timeout", "network"]
|
|
328
|
+
):
|
|
69
329
|
return "network_error", f"Network error: {error_str}"
|
|
70
|
-
|
|
330
|
+
|
|
71
331
|
# HTTP errors
|
|
72
332
|
if "40" in error_str[:10]: # 4xx client errors
|
|
73
333
|
return "client_error", f"Client error: {error_str}"
|
|
74
334
|
elif "50" in error_str[:10]: # 5xx server errors
|
|
75
335
|
return "server_error", f"Server error: {error_str}"
|
|
76
|
-
|
|
336
|
+
|
|
77
337
|
# Validation errors
|
|
78
|
-
if any(
|
|
338
|
+
if any(
|
|
339
|
+
keyword in exception_type.lower() for keyword in ["validation", "pydantic", "field"]
|
|
340
|
+
):
|
|
79
341
|
return "validation_error", f"Input validation error: {error_str}"
|
|
80
|
-
|
|
342
|
+
|
|
81
343
|
# Generic execution errors
|
|
82
344
|
return "execution_error", f"Tool execution error: {error_str}"
|
|
83
345
|
|
|
346
|
+
|
|
84
347
|
# Default Swagger URL
|
|
85
348
|
SWAGGER_URL = "https://rootly-heroku.s3.amazonaws.com/swagger/v1/swagger.json"
|
|
86
349
|
|
|
350
|
+
|
|
87
351
|
# Default allowed API paths
|
|
88
352
|
def _generate_recommendation(solution_data: dict) -> str:
|
|
89
353
|
"""Generate a high-level recommendation based on solution analysis."""
|
|
90
354
|
solutions = solution_data.get("solutions", [])
|
|
91
355
|
avg_time = solution_data.get("average_resolution_time")
|
|
92
|
-
|
|
356
|
+
|
|
93
357
|
if not solutions:
|
|
94
358
|
return "No similar incidents found. This may be a novel issue requiring escalation."
|
|
95
|
-
|
|
359
|
+
|
|
96
360
|
recommendation_parts = []
|
|
97
|
-
|
|
361
|
+
|
|
98
362
|
# Time expectation
|
|
99
363
|
if avg_time:
|
|
100
364
|
if avg_time < 1:
|
|
101
365
|
recommendation_parts.append("Similar incidents typically resolve quickly (< 1 hour).")
|
|
102
366
|
elif avg_time > 4:
|
|
103
|
-
recommendation_parts.append(
|
|
104
|
-
|
|
367
|
+
recommendation_parts.append(
|
|
368
|
+
"Similar incidents typically require more time (> 4 hours)."
|
|
369
|
+
)
|
|
370
|
+
|
|
105
371
|
# Top solution
|
|
106
372
|
if solutions:
|
|
107
373
|
top_solution = solutions[0]
|
|
108
374
|
if top_solution.get("suggested_actions"):
|
|
109
375
|
actions = top_solution["suggested_actions"][:2] # Top 2 actions
|
|
110
376
|
recommendation_parts.append(f"Consider trying: {', '.join(actions)}")
|
|
111
|
-
|
|
377
|
+
|
|
112
378
|
# Pattern insights
|
|
113
379
|
patterns = solution_data.get("common_patterns", [])
|
|
114
380
|
if patterns:
|
|
115
381
|
recommendation_parts.append(f"Common patterns: {patterns[0]}")
|
|
116
|
-
|
|
117
|
-
return
|
|
382
|
+
|
|
383
|
+
return (
|
|
384
|
+
" ".join(recommendation_parts)
|
|
385
|
+
if recommendation_parts
|
|
386
|
+
else "Review similar incidents above for resolution guidance."
|
|
387
|
+
)
|
|
118
388
|
|
|
119
389
|
|
|
120
390
|
# Default allowed API paths
|
|
@@ -153,13 +423,34 @@ DEFAULT_ALLOWED_PATHS = [
|
|
|
153
423
|
# Status pages
|
|
154
424
|
"/status_pages",
|
|
155
425
|
"/status_pages/{status_page_id}",
|
|
426
|
+
# On-call schedules and shifts
|
|
427
|
+
"/schedules",
|
|
428
|
+
"/schedules/{schedule_id}",
|
|
429
|
+
"/schedules/{schedule_id}/shifts",
|
|
430
|
+
"/shifts",
|
|
431
|
+
"/schedule_rotations/{schedule_rotation_id}",
|
|
432
|
+
"/schedule_rotations/{schedule_rotation_id}/schedule_rotation_users",
|
|
433
|
+
"/schedule_rotations/{schedule_rotation_id}/schedule_rotation_active_days",
|
|
434
|
+
# On-call overrides
|
|
435
|
+
"/schedules/{schedule_id}/override_shifts",
|
|
436
|
+
"/override_shifts/{override_shift_id}",
|
|
437
|
+
# On-call shadows and roles
|
|
438
|
+
"/schedules/{schedule_id}/on_call_shadows",
|
|
439
|
+
"/on_call_shadows/{on_call_shadow_id}",
|
|
440
|
+
"/on_call_roles",
|
|
441
|
+
"/on_call_roles/{on_call_role_id}",
|
|
156
442
|
]
|
|
157
443
|
|
|
158
444
|
|
|
159
445
|
class AuthenticatedHTTPXClient:
|
|
160
446
|
"""An HTTPX client wrapper that handles Rootly API authentication and parameter transformation."""
|
|
161
447
|
|
|
162
|
-
def __init__(
|
|
448
|
+
def __init__(
|
|
449
|
+
self,
|
|
450
|
+
base_url: str = "https://api.rootly.com",
|
|
451
|
+
hosted: bool = False,
|
|
452
|
+
parameter_mapping: dict[str, str] | None = None,
|
|
453
|
+
):
|
|
163
454
|
self._base_url = base_url
|
|
164
455
|
self.hosted = hosted
|
|
165
456
|
self._api_token = None
|
|
@@ -168,25 +459,25 @@ class AuthenticatedHTTPXClient:
|
|
|
168
459
|
if not self.hosted:
|
|
169
460
|
self._api_token = self._get_api_token()
|
|
170
461
|
|
|
171
|
-
# Create the HTTPX client
|
|
462
|
+
# Create the HTTPX client
|
|
172
463
|
headers = {
|
|
173
|
-
"Content-Type": "application/vnd.api+json",
|
|
174
|
-
"Accept": "application/vnd.api+json"
|
|
464
|
+
"Content-Type": "application/vnd.api+json",
|
|
465
|
+
"Accept": "application/vnd.api+json",
|
|
175
466
|
# Let httpx handle Accept-Encoding automatically with all supported formats
|
|
176
467
|
}
|
|
177
468
|
if self._api_token:
|
|
178
469
|
headers["Authorization"] = f"Bearer {self._api_token}"
|
|
179
470
|
|
|
180
|
-
self.client =
|
|
471
|
+
self.client = ResponseProcessingClient(
|
|
181
472
|
base_url=base_url,
|
|
182
473
|
headers=headers,
|
|
183
474
|
timeout=30.0,
|
|
184
475
|
follow_redirects=True,
|
|
185
476
|
# Ensure proper handling of compressed responses
|
|
186
|
-
limits=httpx.Limits(max_keepalive_connections=5, max_connections=10)
|
|
477
|
+
limits=httpx.Limits(max_keepalive_connections=5, max_connections=10),
|
|
187
478
|
)
|
|
188
479
|
|
|
189
|
-
def _get_api_token(self) ->
|
|
480
|
+
def _get_api_token(self) -> str | None:
|
|
190
481
|
"""Get the API token from environment variables."""
|
|
191
482
|
api_token = os.getenv("ROOTLY_API_TOKEN")
|
|
192
483
|
if not api_token:
|
|
@@ -194,7 +485,7 @@ class AuthenticatedHTTPXClient:
|
|
|
194
485
|
return None
|
|
195
486
|
return api_token
|
|
196
487
|
|
|
197
|
-
def _transform_params(self, params:
|
|
488
|
+
def _transform_params(self, params: dict[str, Any] | None) -> dict[str, Any] | None:
|
|
198
489
|
"""Transform sanitized parameter names back to original names."""
|
|
199
490
|
if not params or not self.parameter_mapping:
|
|
200
491
|
return params
|
|
@@ -209,33 +500,36 @@ class AuthenticatedHTTPXClient:
|
|
|
209
500
|
return transformed
|
|
210
501
|
|
|
211
502
|
async def request(self, method: str, url: str, **kwargs):
|
|
212
|
-
"""Override request to transform parameters."""
|
|
503
|
+
"""Override request to transform parameters and wrap response for payload reduction."""
|
|
213
504
|
# Transform query parameters
|
|
214
|
-
if
|
|
215
|
-
kwargs[
|
|
505
|
+
if "params" in kwargs:
|
|
506
|
+
kwargs["params"] = self._transform_params(kwargs["params"])
|
|
216
507
|
|
|
217
|
-
# Call the underlying client's request method
|
|
218
|
-
|
|
508
|
+
# Call the underlying client's request method
|
|
509
|
+
response = await self.client.request(method, url, **kwargs)
|
|
510
|
+
|
|
511
|
+
# Wrap response to process JSON and reduce payload size
|
|
512
|
+
return ProcessedResponse(response)
|
|
219
513
|
|
|
220
514
|
async def get(self, url: str, **kwargs):
|
|
221
515
|
"""Proxy to request with GET method."""
|
|
222
|
-
return await self.request(
|
|
516
|
+
return await self.request("GET", url, **kwargs)
|
|
223
517
|
|
|
224
518
|
async def post(self, url: str, **kwargs):
|
|
225
519
|
"""Proxy to request with POST method."""
|
|
226
|
-
return await self.request(
|
|
520
|
+
return await self.request("POST", url, **kwargs)
|
|
227
521
|
|
|
228
522
|
async def put(self, url: str, **kwargs):
|
|
229
523
|
"""Proxy to request with PUT method."""
|
|
230
|
-
return await self.request(
|
|
524
|
+
return await self.request("PUT", url, **kwargs)
|
|
231
525
|
|
|
232
526
|
async def patch(self, url: str, **kwargs):
|
|
233
527
|
"""Proxy to request with PATCH method."""
|
|
234
|
-
return await self.request(
|
|
528
|
+
return await self.request("PATCH", url, **kwargs)
|
|
235
529
|
|
|
236
530
|
async def delete(self, url: str, **kwargs):
|
|
237
531
|
"""Proxy to request with DELETE method."""
|
|
238
|
-
return await self.request(
|
|
532
|
+
return await self.request("DELETE", url, **kwargs)
|
|
239
533
|
|
|
240
534
|
async def __aenter__(self):
|
|
241
535
|
return self
|
|
@@ -245,26 +539,26 @@ class AuthenticatedHTTPXClient:
|
|
|
245
539
|
|
|
246
540
|
def __getattr__(self, name):
|
|
247
541
|
# Delegate all other attributes to the underlying client, except for request methods
|
|
248
|
-
if name in [
|
|
542
|
+
if name in ["request", "get", "post", "put", "patch", "delete"]:
|
|
249
543
|
# Use our overridden methods instead
|
|
250
544
|
return getattr(self, name)
|
|
251
545
|
return getattr(self.client, name)
|
|
252
|
-
|
|
253
|
-
@property
|
|
546
|
+
|
|
547
|
+
@property
|
|
254
548
|
def base_url(self):
|
|
255
549
|
return self._base_url
|
|
256
|
-
|
|
550
|
+
|
|
257
551
|
@property
|
|
258
552
|
def headers(self):
|
|
259
553
|
return self.client.headers
|
|
260
554
|
|
|
261
555
|
|
|
262
556
|
def create_rootly_mcp_server(
|
|
263
|
-
swagger_path:
|
|
557
|
+
swagger_path: str | None = None,
|
|
264
558
|
name: str = "Rootly",
|
|
265
|
-
allowed_paths:
|
|
559
|
+
allowed_paths: list[str] | None = None,
|
|
266
560
|
hosted: bool = False,
|
|
267
|
-
base_url:
|
|
561
|
+
base_url: str | None = None,
|
|
268
562
|
) -> FastMCP:
|
|
269
563
|
"""
|
|
270
564
|
Create a Rootly MCP Server using FastMCP's OpenAPI integration.
|
|
@@ -285,8 +579,7 @@ def create_rootly_mcp_server(
|
|
|
285
579
|
|
|
286
580
|
# Add /v1 prefix to paths if not present
|
|
287
581
|
allowed_paths_v1 = [
|
|
288
|
-
f"/v1{path}" if not path.startswith("/v1") else path
|
|
289
|
-
for path in allowed_paths
|
|
582
|
+
f"/v1{path}" if not path.startswith("/v1") else path for path in allowed_paths
|
|
290
583
|
]
|
|
291
584
|
|
|
292
585
|
logger.info(f"Creating Rootly MCP Server with allowed paths: {allowed_paths_v1}")
|
|
@@ -301,7 +594,9 @@ def create_rootly_mcp_server(
|
|
|
301
594
|
|
|
302
595
|
# Sanitize all parameter names in the filtered spec to be MCP-compliant
|
|
303
596
|
parameter_mapping = sanitize_parameters_in_spec(filtered_spec)
|
|
304
|
-
logger.info(
|
|
597
|
+
logger.info(
|
|
598
|
+
f"Sanitized parameter names for MCP compatibility (mapped {len(parameter_mapping)} parameters)"
|
|
599
|
+
)
|
|
305
600
|
|
|
306
601
|
# Determine the base URL
|
|
307
602
|
if base_url is None:
|
|
@@ -312,9 +607,7 @@ def create_rootly_mcp_server(
|
|
|
312
607
|
# Create the authenticated HTTP client with parameter mapping
|
|
313
608
|
|
|
314
609
|
http_client = AuthenticatedHTTPXClient(
|
|
315
|
-
base_url=base_url,
|
|
316
|
-
hosted=hosted,
|
|
317
|
-
parameter_mapping=parameter_mapping
|
|
610
|
+
base_url=base_url, hosted=hosted, parameter_mapping=parameter_mapping
|
|
318
611
|
)
|
|
319
612
|
|
|
320
613
|
# Create the MCP server using OpenAPI integration
|
|
@@ -326,13 +619,14 @@ def create_rootly_mcp_server(
|
|
|
326
619
|
timeout=30.0,
|
|
327
620
|
tags={"rootly", "incident-management"},
|
|
328
621
|
)
|
|
329
|
-
|
|
622
|
+
|
|
330
623
|
@mcp.custom_route("/healthz", methods=["GET"])
|
|
331
624
|
@mcp.custom_route("/health", methods=["GET"])
|
|
332
625
|
async def health_check(request):
|
|
333
626
|
from starlette.responses import PlainTextResponse
|
|
627
|
+
|
|
334
628
|
return PlainTextResponse("OK")
|
|
335
|
-
|
|
629
|
+
|
|
336
630
|
# Add some custom tools for enhanced functionality
|
|
337
631
|
|
|
338
632
|
@mcp.tool()
|
|
@@ -347,12 +641,14 @@ def create_rootly_mcp_server(
|
|
|
347
641
|
summary = operation.get("summary", "")
|
|
348
642
|
description = operation.get("description", "")
|
|
349
643
|
|
|
350
|
-
endpoints.append(
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
644
|
+
endpoints.append(
|
|
645
|
+
{
|
|
646
|
+
"path": path,
|
|
647
|
+
"method": method.upper(),
|
|
648
|
+
"summary": summary,
|
|
649
|
+
"description": description,
|
|
650
|
+
}
|
|
651
|
+
)
|
|
356
652
|
|
|
357
653
|
return endpoints
|
|
358
654
|
|
|
@@ -362,6 +658,7 @@ def create_rootly_mcp_server(
|
|
|
362
658
|
if hosted:
|
|
363
659
|
try:
|
|
364
660
|
from fastmcp.server.dependencies import get_http_headers
|
|
661
|
+
|
|
365
662
|
request_headers = get_http_headers()
|
|
366
663
|
auth_header = request_headers.get("authorization", "")
|
|
367
664
|
if auth_header:
|
|
@@ -369,18 +666,33 @@ def create_rootly_mcp_server(
|
|
|
369
666
|
if "headers" not in kwargs:
|
|
370
667
|
kwargs["headers"] = {}
|
|
371
668
|
kwargs["headers"]["Authorization"] = auth_header
|
|
372
|
-
except Exception:
|
|
373
|
-
|
|
374
|
-
|
|
669
|
+
except Exception: # nosec B110
|
|
670
|
+
# Intentionally broad exception handling: fallback to default client behavior
|
|
671
|
+
# if token extraction fails for any reason (missing env var, invalid format, etc.)
|
|
672
|
+
pass
|
|
673
|
+
|
|
375
674
|
# Use our custom client with proper error handling instead of bypassing it
|
|
376
675
|
return await http_client.request(method, url, **kwargs)
|
|
377
676
|
|
|
378
677
|
@mcp.tool()
|
|
379
678
|
async def search_incidents(
|
|
380
|
-
query: Annotated[
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
679
|
+
query: Annotated[
|
|
680
|
+
str, Field(description="Search query to filter incidents by title/summary")
|
|
681
|
+
] = "",
|
|
682
|
+
page_size: Annotated[
|
|
683
|
+
int, Field(description="Number of results per page (max: 20)", ge=1, le=20)
|
|
684
|
+
] = 10,
|
|
685
|
+
page_number: Annotated[
|
|
686
|
+
int, Field(description="Page number to retrieve (use 0 for all pages)", ge=0)
|
|
687
|
+
] = 1,
|
|
688
|
+
max_results: Annotated[
|
|
689
|
+
int,
|
|
690
|
+
Field(
|
|
691
|
+
description="Maximum total results when fetching all pages (ignored if page_number > 0)",
|
|
692
|
+
ge=1,
|
|
693
|
+
le=10,
|
|
694
|
+
),
|
|
695
|
+
] = 5,
|
|
384
696
|
) -> dict:
|
|
385
697
|
"""
|
|
386
698
|
Search incidents with flexible pagination control.
|
|
@@ -394,7 +706,7 @@ def create_rootly_mcp_server(
|
|
|
394
706
|
"page[size]": page_size, # Use requested page size (already limited to max 20)
|
|
395
707
|
"page[number]": page_number,
|
|
396
708
|
"include": "",
|
|
397
|
-
"fields[incidents]": "id,title,summary,status,
|
|
709
|
+
"fields[incidents]": "id,title,summary,status,created_at,updated_at,url,started_at",
|
|
398
710
|
}
|
|
399
711
|
if query:
|
|
400
712
|
params["filter[search]"] = query
|
|
@@ -402,7 +714,7 @@ def create_rootly_mcp_server(
|
|
|
402
714
|
try:
|
|
403
715
|
response = await make_authenticated_request("GET", "/v1/incidents", params=params)
|
|
404
716
|
response.raise_for_status()
|
|
405
|
-
return response.json()
|
|
717
|
+
return strip_heavy_nested_data(response.json())
|
|
406
718
|
except Exception as e:
|
|
407
719
|
error_type, error_message = MCPError.categorize_error(e)
|
|
408
720
|
return MCPError.tool_error(error_message, error_type)
|
|
@@ -419,13 +731,15 @@ def create_rootly_mcp_server(
|
|
|
419
731
|
"page[size]": effective_page_size,
|
|
420
732
|
"page[number]": current_page,
|
|
421
733
|
"include": "",
|
|
422
|
-
"fields[incidents]": "id,title,summary,status,
|
|
734
|
+
"fields[incidents]": "id,title,summary,status,created_at,updated_at,url,started_at",
|
|
423
735
|
}
|
|
424
736
|
if query:
|
|
425
737
|
params["filter[search]"] = query
|
|
426
738
|
|
|
427
739
|
try:
|
|
428
|
-
response = await make_authenticated_request(
|
|
740
|
+
response = await make_authenticated_request(
|
|
741
|
+
"GET", "/v1/incidents", params=params
|
|
742
|
+
)
|
|
429
743
|
response.raise_for_status()
|
|
430
744
|
response_data = response.json()
|
|
431
745
|
|
|
@@ -434,19 +748,19 @@ def create_rootly_mcp_server(
|
|
|
434
748
|
if not incidents:
|
|
435
749
|
# No more incidents available
|
|
436
750
|
break
|
|
437
|
-
|
|
751
|
+
|
|
438
752
|
# Check if we got fewer incidents than requested (last page)
|
|
439
753
|
if len(incidents) < effective_page_size:
|
|
440
754
|
all_incidents.extend(incidents)
|
|
441
755
|
break
|
|
442
|
-
|
|
756
|
+
|
|
443
757
|
all_incidents.extend(incidents)
|
|
444
758
|
|
|
445
759
|
# Check metadata if available
|
|
446
760
|
meta = response_data.get("meta", {})
|
|
447
761
|
current_page_meta = meta.get("current_page", current_page)
|
|
448
762
|
total_pages = meta.get("total_pages")
|
|
449
|
-
|
|
763
|
+
|
|
450
764
|
# If we have reliable metadata, use it
|
|
451
765
|
if total_pages and current_page_meta >= total_pages:
|
|
452
766
|
break
|
|
@@ -457,7 +771,11 @@ def create_rootly_mcp_server(
|
|
|
457
771
|
|
|
458
772
|
except Exception as e:
|
|
459
773
|
# Re-raise authentication or critical errors for immediate handling
|
|
460
|
-
if
|
|
774
|
+
if (
|
|
775
|
+
"401" in str(e)
|
|
776
|
+
or "Unauthorized" in str(e)
|
|
777
|
+
or "authentication" in str(e).lower()
|
|
778
|
+
):
|
|
461
779
|
error_type, error_message = MCPError.categorize_error(e)
|
|
462
780
|
return MCPError.tool_error(error_message, error_type)
|
|
463
781
|
# For other errors, break loop and return partial results
|
|
@@ -467,16 +785,18 @@ def create_rootly_mcp_server(
|
|
|
467
785
|
if len(all_incidents) > max_results:
|
|
468
786
|
all_incidents = all_incidents[:max_results]
|
|
469
787
|
|
|
470
|
-
return
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
"
|
|
474
|
-
|
|
475
|
-
|
|
476
|
-
|
|
477
|
-
|
|
788
|
+
return strip_heavy_nested_data(
|
|
789
|
+
{
|
|
790
|
+
"data": all_incidents,
|
|
791
|
+
"meta": {
|
|
792
|
+
"total_fetched": len(all_incidents),
|
|
793
|
+
"max_results": max_results,
|
|
794
|
+
"query": query,
|
|
795
|
+
"pages_fetched": current_page - 1,
|
|
796
|
+
"page_size": effective_page_size,
|
|
797
|
+
},
|
|
478
798
|
}
|
|
479
|
-
|
|
799
|
+
)
|
|
480
800
|
except Exception as e:
|
|
481
801
|
error_type, error_message = MCPError.categorize_error(e)
|
|
482
802
|
return MCPError.tool_error(error_message, error_type)
|
|
@@ -487,103 +807,164 @@ def create_rootly_mcp_server(
|
|
|
487
807
|
|
|
488
808
|
@mcp.tool()
|
|
489
809
|
async def find_related_incidents(
|
|
490
|
-
incident_id: str,
|
|
491
|
-
|
|
492
|
-
|
|
810
|
+
incident_id: str = "",
|
|
811
|
+
incident_description: str = "",
|
|
812
|
+
similarity_threshold: Annotated[
|
|
813
|
+
float, Field(description="Minimum similarity score (0.0-1.0)", ge=0.0, le=1.0)
|
|
814
|
+
] = 0.15,
|
|
815
|
+
max_results: Annotated[
|
|
816
|
+
int, Field(description="Maximum number of related incidents to return", ge=1, le=20)
|
|
817
|
+
] = 5,
|
|
818
|
+
status_filter: Annotated[
|
|
819
|
+
str,
|
|
820
|
+
Field(
|
|
821
|
+
description="Filter incidents by status (empty for all, 'resolved', 'investigating', etc.)"
|
|
822
|
+
),
|
|
823
|
+
] = "",
|
|
493
824
|
) -> dict:
|
|
494
|
-
"""Find
|
|
825
|
+
"""Find similar incidents to help with context and resolution strategies. Provide either incident_id OR incident_description (e.g., 'website is down', 'database timeout errors'). Use status_filter to limit to specific incident statuses or leave empty for all incidents."""
|
|
495
826
|
try:
|
|
496
|
-
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
|
|
501
|
-
|
|
502
|
-
|
|
503
|
-
|
|
504
|
-
|
|
505
|
-
|
|
506
|
-
|
|
827
|
+
target_incident = {}
|
|
828
|
+
|
|
829
|
+
if incident_id:
|
|
830
|
+
# Get the target incident details by ID
|
|
831
|
+
target_response = await make_authenticated_request(
|
|
832
|
+
"GET", f"/v1/incidents/{incident_id}"
|
|
833
|
+
)
|
|
834
|
+
target_response.raise_for_status()
|
|
835
|
+
target_incident_data = strip_heavy_nested_data(
|
|
836
|
+
{"data": [target_response.json().get("data", {})]}
|
|
837
|
+
)
|
|
838
|
+
target_incident = target_incident_data.get("data", [{}])[0]
|
|
839
|
+
|
|
840
|
+
if not target_incident:
|
|
841
|
+
return MCPError.tool_error("Incident not found", "not_found")
|
|
842
|
+
|
|
843
|
+
elif incident_description:
|
|
844
|
+
# Create synthetic incident for analysis from descriptive text
|
|
845
|
+
target_incident = {
|
|
846
|
+
"id": "synthetic",
|
|
847
|
+
"attributes": {
|
|
848
|
+
"title": incident_description,
|
|
849
|
+
"summary": incident_description,
|
|
850
|
+
"description": incident_description,
|
|
851
|
+
},
|
|
852
|
+
}
|
|
853
|
+
else:
|
|
854
|
+
return MCPError.tool_error(
|
|
855
|
+
"Must provide either incident_id or incident_description", "validation_error"
|
|
856
|
+
)
|
|
857
|
+
|
|
858
|
+
# Get historical incidents for comparison
|
|
859
|
+
params = {
|
|
507
860
|
"page[size]": 100, # Get more incidents for better matching
|
|
508
861
|
"page[number]": 1,
|
|
509
|
-
"
|
|
510
|
-
"
|
|
511
|
-
}
|
|
862
|
+
"include": "",
|
|
863
|
+
"fields[incidents]": "id,title,summary,status,created_at,url",
|
|
864
|
+
}
|
|
865
|
+
|
|
866
|
+
# Only add status filter if specified
|
|
867
|
+
if status_filter:
|
|
868
|
+
params["filter[status]"] = status_filter
|
|
869
|
+
|
|
870
|
+
historical_response = await make_authenticated_request(
|
|
871
|
+
"GET", "/v1/incidents", params=params
|
|
872
|
+
)
|
|
512
873
|
historical_response.raise_for_status()
|
|
513
|
-
historical_data = historical_response.json()
|
|
874
|
+
historical_data = strip_heavy_nested_data(historical_response.json())
|
|
514
875
|
historical_incidents = historical_data.get("data", [])
|
|
515
|
-
|
|
516
|
-
# Filter out the target incident itself
|
|
517
|
-
|
|
518
|
-
|
|
876
|
+
|
|
877
|
+
# Filter out the target incident itself if it exists
|
|
878
|
+
if incident_id:
|
|
879
|
+
historical_incidents = [
|
|
880
|
+
inc for inc in historical_incidents if str(inc.get("id")) != str(incident_id)
|
|
881
|
+
]
|
|
882
|
+
|
|
519
883
|
if not historical_incidents:
|
|
520
884
|
return {
|
|
521
885
|
"related_incidents": [],
|
|
522
886
|
"message": "No historical incidents found for comparison",
|
|
523
887
|
"target_incident": {
|
|
524
|
-
"id": incident_id,
|
|
525
|
-
"title": target_incident.get("attributes", {}).get(
|
|
526
|
-
|
|
888
|
+
"id": incident_id or "synthetic",
|
|
889
|
+
"title": target_incident.get("attributes", {}).get(
|
|
890
|
+
"title", incident_description
|
|
891
|
+
),
|
|
892
|
+
},
|
|
527
893
|
}
|
|
528
|
-
|
|
894
|
+
|
|
529
895
|
# Calculate similarities
|
|
530
|
-
similar_incidents = similarity_analyzer.calculate_similarity(
|
|
531
|
-
|
|
896
|
+
similar_incidents = similarity_analyzer.calculate_similarity(
|
|
897
|
+
historical_incidents, target_incident
|
|
898
|
+
)
|
|
899
|
+
|
|
532
900
|
# Filter by threshold and limit results
|
|
533
901
|
filtered_incidents = [
|
|
534
|
-
inc for inc in similar_incidents
|
|
535
|
-
if inc.similarity_score >= similarity_threshold
|
|
902
|
+
inc for inc in similar_incidents if inc.similarity_score >= similarity_threshold
|
|
536
903
|
][:max_results]
|
|
537
|
-
|
|
904
|
+
|
|
538
905
|
# Format response
|
|
539
906
|
related_incidents = []
|
|
540
907
|
for incident in filtered_incidents:
|
|
541
|
-
related_incidents.append(
|
|
542
|
-
|
|
543
|
-
|
|
544
|
-
|
|
545
|
-
|
|
546
|
-
|
|
547
|
-
|
|
548
|
-
|
|
549
|
-
|
|
550
|
-
|
|
908
|
+
related_incidents.append(
|
|
909
|
+
{
|
|
910
|
+
"incident_id": incident.incident_id,
|
|
911
|
+
"title": incident.title,
|
|
912
|
+
"similarity_score": round(incident.similarity_score, 3),
|
|
913
|
+
"matched_services": incident.matched_services,
|
|
914
|
+
"matched_keywords": incident.matched_keywords,
|
|
915
|
+
"resolution_summary": incident.resolution_summary,
|
|
916
|
+
"resolution_time_hours": incident.resolution_time_hours,
|
|
917
|
+
}
|
|
918
|
+
)
|
|
919
|
+
|
|
551
920
|
return {
|
|
552
921
|
"target_incident": {
|
|
553
|
-
"id": incident_id,
|
|
554
|
-
"title": target_incident.get("attributes", {}).get(
|
|
922
|
+
"id": incident_id or "synthetic",
|
|
923
|
+
"title": target_incident.get("attributes", {}).get(
|
|
924
|
+
"title", incident_description
|
|
925
|
+
),
|
|
555
926
|
},
|
|
556
927
|
"related_incidents": related_incidents,
|
|
557
928
|
"total_found": len(filtered_incidents),
|
|
558
929
|
"similarity_threshold": similarity_threshold,
|
|
559
|
-
"analysis_summary": f"Found {len(filtered_incidents)} similar incidents out of {len(historical_incidents)} historical incidents"
|
|
930
|
+
"analysis_summary": f"Found {len(filtered_incidents)} similar incidents out of {len(historical_incidents)} historical incidents",
|
|
560
931
|
}
|
|
561
|
-
|
|
932
|
+
|
|
562
933
|
except Exception as e:
|
|
563
934
|
error_type, error_message = MCPError.categorize_error(e)
|
|
564
|
-
return MCPError.tool_error(
|
|
935
|
+
return MCPError.tool_error(
|
|
936
|
+
f"Failed to find related incidents: {error_message}", error_type
|
|
937
|
+
)
|
|
565
938
|
|
|
566
939
|
@mcp.tool()
|
|
567
940
|
async def suggest_solutions(
|
|
568
941
|
incident_id: str = "",
|
|
569
942
|
incident_title: str = "",
|
|
570
943
|
incident_description: str = "",
|
|
571
|
-
max_solutions: Annotated[
|
|
944
|
+
max_solutions: Annotated[
|
|
945
|
+
int, Field(description="Maximum number of solution suggestions", ge=1, le=10)
|
|
946
|
+
] = 3,
|
|
947
|
+
status_filter: Annotated[
|
|
948
|
+
str,
|
|
949
|
+
Field(
|
|
950
|
+
description="Filter incidents by status (default 'resolved', empty for all, 'investigating', etc.)"
|
|
951
|
+
),
|
|
952
|
+
] = "resolved",
|
|
572
953
|
) -> dict:
|
|
573
|
-
"""Suggest solutions based on similar
|
|
954
|
+
"""Suggest solutions based on similar incidents. Provide either incident_id OR title/description. Defaults to resolved incidents for solution mining, but can search all statuses."""
|
|
574
955
|
try:
|
|
575
956
|
target_incident = {}
|
|
576
|
-
|
|
957
|
+
|
|
577
958
|
if incident_id:
|
|
578
959
|
# Get incident details by ID
|
|
579
960
|
response = await make_authenticated_request("GET", f"/v1/incidents/{incident_id}")
|
|
580
961
|
response.raise_for_status()
|
|
581
|
-
incident_data = response.json()
|
|
582
|
-
target_incident = incident_data.get("data", {})
|
|
583
|
-
|
|
962
|
+
incident_data = strip_heavy_nested_data({"data": [response.json().get("data", {})]})
|
|
963
|
+
target_incident = incident_data.get("data", [{}])[0]
|
|
964
|
+
|
|
584
965
|
if not target_incident:
|
|
585
966
|
return MCPError.tool_error("Incident not found", "not_found")
|
|
586
|
-
|
|
967
|
+
|
|
587
968
|
elif incident_title or incident_description:
|
|
588
969
|
# Create synthetic incident for analysis
|
|
589
970
|
target_incident = {
|
|
@@ -591,69 +972,1175 @@ def create_rootly_mcp_server(
|
|
|
591
972
|
"attributes": {
|
|
592
973
|
"title": incident_title,
|
|
593
974
|
"summary": incident_description,
|
|
594
|
-
"description": incident_description
|
|
595
|
-
}
|
|
975
|
+
"description": incident_description,
|
|
976
|
+
},
|
|
596
977
|
}
|
|
597
978
|
else:
|
|
598
|
-
return MCPError.tool_error(
|
|
599
|
-
|
|
600
|
-
|
|
601
|
-
|
|
979
|
+
return MCPError.tool_error(
|
|
980
|
+
"Must provide either incident_id or incident_title/description",
|
|
981
|
+
"validation_error",
|
|
982
|
+
)
|
|
983
|
+
|
|
984
|
+
# Get incidents for solution mining
|
|
985
|
+
params = {
|
|
602
986
|
"page[size]": 150, # Get more incidents for better solution matching
|
|
603
987
|
"page[number]": 1,
|
|
604
|
-
"
|
|
605
|
-
|
|
606
|
-
|
|
988
|
+
"include": "",
|
|
989
|
+
}
|
|
990
|
+
|
|
991
|
+
# Only add status filter if specified
|
|
992
|
+
if status_filter:
|
|
993
|
+
params["filter[status]"] = status_filter
|
|
994
|
+
|
|
995
|
+
historical_response = await make_authenticated_request(
|
|
996
|
+
"GET", "/v1/incidents", params=params
|
|
997
|
+
)
|
|
607
998
|
historical_response.raise_for_status()
|
|
608
|
-
historical_data = historical_response.json()
|
|
999
|
+
historical_data = strip_heavy_nested_data(historical_response.json())
|
|
609
1000
|
historical_incidents = historical_data.get("data", [])
|
|
610
|
-
|
|
1001
|
+
|
|
611
1002
|
# Filter out target incident if it exists
|
|
612
1003
|
if incident_id:
|
|
613
|
-
historical_incidents = [
|
|
614
|
-
|
|
1004
|
+
historical_incidents = [
|
|
1005
|
+
inc for inc in historical_incidents if str(inc.get("id")) != str(incident_id)
|
|
1006
|
+
]
|
|
1007
|
+
|
|
615
1008
|
if not historical_incidents:
|
|
1009
|
+
status_msg = f" with status '{status_filter}'" if status_filter else ""
|
|
616
1010
|
return {
|
|
617
1011
|
"solutions": [],
|
|
618
|
-
"message": "No historical
|
|
1012
|
+
"message": f"No historical incidents found{status_msg} for solution mining",
|
|
619
1013
|
}
|
|
620
|
-
|
|
1014
|
+
|
|
621
1015
|
# Find similar incidents
|
|
622
|
-
similar_incidents = similarity_analyzer.calculate_similarity(
|
|
623
|
-
|
|
1016
|
+
similar_incidents = similarity_analyzer.calculate_similarity(
|
|
1017
|
+
historical_incidents, target_incident
|
|
1018
|
+
)
|
|
1019
|
+
|
|
624
1020
|
# Filter to reasonably similar incidents (lower threshold for solution suggestions)
|
|
625
|
-
relevant_incidents = [inc for inc in similar_incidents if inc.similarity_score >= 0.2][
|
|
626
|
-
|
|
1021
|
+
relevant_incidents = [inc for inc in similar_incidents if inc.similarity_score >= 0.2][
|
|
1022
|
+
: max_solutions * 2
|
|
1023
|
+
]
|
|
1024
|
+
|
|
627
1025
|
if not relevant_incidents:
|
|
628
1026
|
return {
|
|
629
1027
|
"solutions": [],
|
|
630
1028
|
"message": "No sufficiently similar incidents found for solution suggestions",
|
|
631
|
-
"suggestion": "This appears to be a unique incident. Consider escalating or consulting documentation."
|
|
1029
|
+
"suggestion": "This appears to be a unique incident. Consider escalating or consulting documentation.",
|
|
632
1030
|
}
|
|
633
|
-
|
|
1031
|
+
|
|
634
1032
|
# Extract solutions
|
|
635
1033
|
solution_data = solution_extractor.extract_solutions(relevant_incidents)
|
|
636
|
-
|
|
1034
|
+
|
|
637
1035
|
# Format response
|
|
638
1036
|
return {
|
|
639
1037
|
"target_incident": {
|
|
640
1038
|
"id": incident_id or "synthetic",
|
|
641
1039
|
"title": target_incident.get("attributes", {}).get("title", incident_title),
|
|
642
|
-
"description": target_incident.get("attributes", {}).get(
|
|
1040
|
+
"description": target_incident.get("attributes", {}).get(
|
|
1041
|
+
"summary", incident_description
|
|
1042
|
+
),
|
|
643
1043
|
},
|
|
644
1044
|
"solutions": solution_data["solutions"][:max_solutions],
|
|
645
1045
|
"insights": {
|
|
646
1046
|
"common_patterns": solution_data["common_patterns"],
|
|
647
1047
|
"average_resolution_time_hours": solution_data["average_resolution_time"],
|
|
648
|
-
"total_similar_incidents": solution_data["total_similar_incidents"]
|
|
1048
|
+
"total_similar_incidents": solution_data["total_similar_incidents"],
|
|
649
1049
|
},
|
|
650
|
-
"recommendation": _generate_recommendation(solution_data)
|
|
1050
|
+
"recommendation": _generate_recommendation(solution_data),
|
|
651
1051
|
}
|
|
652
|
-
|
|
1052
|
+
|
|
653
1053
|
except Exception as e:
|
|
654
1054
|
error_type, error_message = MCPError.categorize_error(e)
|
|
655
1055
|
return MCPError.tool_error(f"Failed to suggest solutions: {error_message}", error_type)
|
|
656
1056
|
|
|
1057
|
+
@mcp.tool()
|
|
1058
|
+
async def get_oncall_shift_metrics(
|
|
1059
|
+
start_date: Annotated[
|
|
1060
|
+
str,
|
|
1061
|
+
Field(
|
|
1062
|
+
description="Start date for metrics (ISO 8601 format, e.g., '2025-10-01' or '2025-10-01T00:00:00Z')"
|
|
1063
|
+
),
|
|
1064
|
+
],
|
|
1065
|
+
end_date: Annotated[
|
|
1066
|
+
str,
|
|
1067
|
+
Field(
|
|
1068
|
+
description="End date for metrics (ISO 8601 format, e.g., '2025-10-31' or '2025-10-31T23:59:59Z')"
|
|
1069
|
+
),
|
|
1070
|
+
],
|
|
1071
|
+
user_ids: Annotated[
|
|
1072
|
+
str, Field(description="Comma-separated list of user IDs to filter by (optional)")
|
|
1073
|
+
] = "",
|
|
1074
|
+
schedule_ids: Annotated[
|
|
1075
|
+
str, Field(description="Comma-separated list of schedule IDs to filter by (optional)")
|
|
1076
|
+
] = "",
|
|
1077
|
+
team_ids: Annotated[
|
|
1078
|
+
str,
|
|
1079
|
+
Field(
|
|
1080
|
+
description="Comma-separated list of team IDs to filter by (requires querying schedules first)"
|
|
1081
|
+
),
|
|
1082
|
+
] = "",
|
|
1083
|
+
group_by: Annotated[
|
|
1084
|
+
str, Field(description="Group results by: 'user', 'schedule', 'team', or 'none'")
|
|
1085
|
+
] = "user",
|
|
1086
|
+
) -> dict:
|
|
1087
|
+
"""
|
|
1088
|
+
Get on-call shift metrics for a specified time period. Returns shift counts, total hours,
|
|
1089
|
+
and other statistics grouped by user, schedule, or team.
|
|
1090
|
+
|
|
1091
|
+
Examples:
|
|
1092
|
+
- Monthly report: start_date='2025-10-01', end_date='2025-10-31'
|
|
1093
|
+
- Specific user: start_date='2025-10-01', end_date='2025-10-31', user_ids='123,456'
|
|
1094
|
+
- Specific team: team_ids='team-1' (will query schedules for that team first)
|
|
1095
|
+
"""
|
|
1096
|
+
try:
|
|
1097
|
+
from collections import defaultdict
|
|
1098
|
+
from datetime import datetime, timedelta
|
|
1099
|
+
from typing import Any
|
|
1100
|
+
|
|
1101
|
+
# Build query parameters
|
|
1102
|
+
params: dict[str, Any] = {
|
|
1103
|
+
"from": start_date,
|
|
1104
|
+
"to": end_date,
|
|
1105
|
+
}
|
|
1106
|
+
|
|
1107
|
+
# Fetch schedules (schedules don't have team relationship, they have owner_group_ids)
|
|
1108
|
+
schedules_response = await make_authenticated_request(
|
|
1109
|
+
"GET", "/v1/schedules", params={"page[size]": 100}
|
|
1110
|
+
)
|
|
1111
|
+
|
|
1112
|
+
if schedules_response is None:
|
|
1113
|
+
return MCPError.tool_error(
|
|
1114
|
+
"Failed to get schedules: API request returned None", "execution_error"
|
|
1115
|
+
)
|
|
1116
|
+
|
|
1117
|
+
schedules_response.raise_for_status()
|
|
1118
|
+
schedules_data = schedules_response.json()
|
|
1119
|
+
|
|
1120
|
+
all_schedules = schedules_data.get("data", [])
|
|
1121
|
+
|
|
1122
|
+
# Collect all unique team IDs from schedules' owner_group_ids
|
|
1123
|
+
team_ids_set = set()
|
|
1124
|
+
for schedule in all_schedules:
|
|
1125
|
+
owner_group_ids = schedule.get("attributes", {}).get("owner_group_ids", [])
|
|
1126
|
+
team_ids_set.update(owner_group_ids)
|
|
1127
|
+
|
|
1128
|
+
# Fetch all teams
|
|
1129
|
+
teams_map = {}
|
|
1130
|
+
if team_ids_set:
|
|
1131
|
+
teams_response = await make_authenticated_request(
|
|
1132
|
+
"GET", "/v1/teams", params={"page[size]": 100}
|
|
1133
|
+
)
|
|
1134
|
+
if teams_response and teams_response.status_code == 200:
|
|
1135
|
+
teams_data = teams_response.json()
|
|
1136
|
+
for team in teams_data.get("data", []):
|
|
1137
|
+
teams_map[team.get("id")] = team
|
|
1138
|
+
|
|
1139
|
+
# Build schedule -> team mapping
|
|
1140
|
+
schedule_to_team_map = {}
|
|
1141
|
+
for schedule in all_schedules:
|
|
1142
|
+
schedule_id = schedule.get("id")
|
|
1143
|
+
schedule_name = schedule.get("attributes", {}).get("name", "Unknown")
|
|
1144
|
+
owner_group_ids = schedule.get("attributes", {}).get("owner_group_ids", [])
|
|
1145
|
+
|
|
1146
|
+
# Use the first owner group as the primary team
|
|
1147
|
+
if owner_group_ids:
|
|
1148
|
+
team_id = owner_group_ids[0]
|
|
1149
|
+
team_attrs = teams_map.get(team_id, {}).get("attributes", {})
|
|
1150
|
+
team_name = team_attrs.get("name", "Unknown Team")
|
|
1151
|
+
schedule_to_team_map[schedule_id] = {
|
|
1152
|
+
"team_id": team_id,
|
|
1153
|
+
"team_name": team_name,
|
|
1154
|
+
"schedule_name": schedule_name,
|
|
1155
|
+
}
|
|
1156
|
+
|
|
1157
|
+
# Handle team filtering (requires multi-step query)
|
|
1158
|
+
target_schedule_ids = []
|
|
1159
|
+
if team_ids:
|
|
1160
|
+
team_id_list = [tid.strip() for tid in team_ids.split(",") if tid.strip()]
|
|
1161
|
+
|
|
1162
|
+
# Filter schedules by team
|
|
1163
|
+
for schedule_id, team_info in schedule_to_team_map.items():
|
|
1164
|
+
if str(team_info["team_id"]) in team_id_list:
|
|
1165
|
+
target_schedule_ids.append(schedule_id)
|
|
1166
|
+
|
|
1167
|
+
# Apply schedule filtering
|
|
1168
|
+
if schedule_ids:
|
|
1169
|
+
schedule_id_list = [sid.strip() for sid in schedule_ids.split(",") if sid.strip()]
|
|
1170
|
+
target_schedule_ids.extend(schedule_id_list)
|
|
1171
|
+
|
|
1172
|
+
if target_schedule_ids:
|
|
1173
|
+
params["schedule_ids[]"] = target_schedule_ids
|
|
1174
|
+
|
|
1175
|
+
# Apply user filtering
|
|
1176
|
+
if user_ids:
|
|
1177
|
+
user_id_list = [uid.strip() for uid in user_ids.split(",") if uid.strip()]
|
|
1178
|
+
params["user_ids[]"] = user_id_list
|
|
1179
|
+
|
|
1180
|
+
# Include relationships for richer data
|
|
1181
|
+
params["include"] = "user,shift_override,on_call_role,schedule_rotation"
|
|
1182
|
+
|
|
1183
|
+
# Query shifts
|
|
1184
|
+
try:
|
|
1185
|
+
shifts_response = await make_authenticated_request(
|
|
1186
|
+
"GET", "/v1/shifts", params=params
|
|
1187
|
+
)
|
|
1188
|
+
|
|
1189
|
+
if shifts_response is None:
|
|
1190
|
+
return MCPError.tool_error(
|
|
1191
|
+
"Failed to get shifts: API request returned None", "execution_error"
|
|
1192
|
+
)
|
|
1193
|
+
|
|
1194
|
+
shifts_response.raise_for_status()
|
|
1195
|
+
shifts_data = shifts_response.json()
|
|
1196
|
+
|
|
1197
|
+
if shifts_data is None:
|
|
1198
|
+
return MCPError.tool_error(
|
|
1199
|
+
"Failed to get shifts: API returned null/empty response",
|
|
1200
|
+
"execution_error",
|
|
1201
|
+
details={"status": shifts_response.status_code},
|
|
1202
|
+
)
|
|
1203
|
+
|
|
1204
|
+
shifts = shifts_data.get("data", [])
|
|
1205
|
+
included = shifts_data.get("included", [])
|
|
1206
|
+
except AttributeError as e:
|
|
1207
|
+
return MCPError.tool_error(
|
|
1208
|
+
f"Failed to get shifts: Response object error - {str(e)}",
|
|
1209
|
+
"execution_error",
|
|
1210
|
+
details={"params": params},
|
|
1211
|
+
)
|
|
1212
|
+
except Exception as e:
|
|
1213
|
+
return MCPError.tool_error(
|
|
1214
|
+
f"Failed to get shifts: {str(e)}",
|
|
1215
|
+
"execution_error",
|
|
1216
|
+
details={"params": params, "error_type": type(e).__name__},
|
|
1217
|
+
)
|
|
1218
|
+
|
|
1219
|
+
# Build lookup maps for included resources
|
|
1220
|
+
users_map = {}
|
|
1221
|
+
on_call_roles_map = {}
|
|
1222
|
+
for resource in included:
|
|
1223
|
+
if resource.get("type") == "users":
|
|
1224
|
+
users_map[resource.get("id")] = resource
|
|
1225
|
+
elif resource.get("type") == "on_call_roles":
|
|
1226
|
+
on_call_roles_map[resource.get("id")] = resource
|
|
1227
|
+
|
|
1228
|
+
# Calculate metrics
|
|
1229
|
+
metrics: dict[str, dict[str, Any]] = defaultdict(
|
|
1230
|
+
lambda: {
|
|
1231
|
+
"shift_count": 0,
|
|
1232
|
+
"total_hours": 0.0,
|
|
1233
|
+
"override_count": 0,
|
|
1234
|
+
"regular_count": 0,
|
|
1235
|
+
"primary_count": 0,
|
|
1236
|
+
"secondary_count": 0,
|
|
1237
|
+
"primary_hours": 0.0,
|
|
1238
|
+
"secondary_hours": 0.0,
|
|
1239
|
+
"unknown_role_count": 0,
|
|
1240
|
+
"unique_days": set(),
|
|
1241
|
+
"shifts": [],
|
|
1242
|
+
}
|
|
1243
|
+
)
|
|
1244
|
+
|
|
1245
|
+
for shift in shifts:
|
|
1246
|
+
attrs = shift.get("attributes", {})
|
|
1247
|
+
relationships = shift.get("relationships", {})
|
|
1248
|
+
|
|
1249
|
+
# Parse timestamps
|
|
1250
|
+
starts_at = attrs.get("starts_at")
|
|
1251
|
+
ends_at = attrs.get("ends_at")
|
|
1252
|
+
is_override = attrs.get("is_override", False)
|
|
1253
|
+
schedule_id = attrs.get("schedule_id")
|
|
1254
|
+
|
|
1255
|
+
# Calculate shift duration in hours and track unique days
|
|
1256
|
+
duration_hours = 0.0
|
|
1257
|
+
shift_days = set()
|
|
1258
|
+
if starts_at and ends_at:
|
|
1259
|
+
try:
|
|
1260
|
+
start_dt = datetime.fromisoformat(starts_at.replace("Z", "+00:00"))
|
|
1261
|
+
end_dt = datetime.fromisoformat(ends_at.replace("Z", "+00:00"))
|
|
1262
|
+
duration_hours = (end_dt - start_dt).total_seconds() / 3600
|
|
1263
|
+
|
|
1264
|
+
# Track all unique calendar days this shift spans
|
|
1265
|
+
shift_start_date = start_dt.date()
|
|
1266
|
+
shift_end_date = end_dt.date()
|
|
1267
|
+
while shift_start_date <= shift_end_date:
|
|
1268
|
+
shift_days.add(shift_start_date)
|
|
1269
|
+
shift_start_date += timedelta(days=1)
|
|
1270
|
+
except (ValueError, AttributeError):
|
|
1271
|
+
pass
|
|
1272
|
+
|
|
1273
|
+
# Get user info
|
|
1274
|
+
user_rel = relationships.get("user", {}).get("data") or {}
|
|
1275
|
+
user_id = user_rel.get("id")
|
|
1276
|
+
user_name = "Unknown"
|
|
1277
|
+
user_email = ""
|
|
1278
|
+
|
|
1279
|
+
if user_id and user_id in users_map:
|
|
1280
|
+
user_attrs = users_map[user_id].get("attributes", {})
|
|
1281
|
+
user_name = user_attrs.get("full_name") or user_attrs.get("email", "Unknown")
|
|
1282
|
+
user_email = user_attrs.get("email", "")
|
|
1283
|
+
|
|
1284
|
+
# Get on-call role info (primary vs secondary)
|
|
1285
|
+
role_rel = relationships.get("on_call_role", {}).get("data") or {}
|
|
1286
|
+
role_id = role_rel.get("id")
|
|
1287
|
+
role_name = "unknown"
|
|
1288
|
+
is_primary = False
|
|
1289
|
+
|
|
1290
|
+
if role_id and role_id in on_call_roles_map:
|
|
1291
|
+
role_attrs = on_call_roles_map[role_id].get("attributes", {})
|
|
1292
|
+
role_name = role_attrs.get("name", "").lower()
|
|
1293
|
+
# Typically primary roles contain "primary" and secondary contain "secondary"
|
|
1294
|
+
# Common patterns: "Primary", "Secondary", "L1", "L2", etc.
|
|
1295
|
+
is_primary = "primary" in role_name or role_name == "l1" or role_name == "p1"
|
|
1296
|
+
|
|
1297
|
+
# Determine grouping key
|
|
1298
|
+
if group_by == "user":
|
|
1299
|
+
key = f"{user_id}|{user_name}"
|
|
1300
|
+
elif group_by == "schedule":
|
|
1301
|
+
schedule_info = schedule_to_team_map.get(schedule_id, {})
|
|
1302
|
+
schedule_name = schedule_info.get("schedule_name", f"schedule_{schedule_id}")
|
|
1303
|
+
key = f"{schedule_id}|{schedule_name}"
|
|
1304
|
+
elif group_by == "team":
|
|
1305
|
+
team_info = schedule_to_team_map.get(schedule_id, {})
|
|
1306
|
+
if team_info:
|
|
1307
|
+
team_id = team_info["team_id"]
|
|
1308
|
+
team_name = team_info["team_name"]
|
|
1309
|
+
key = f"{team_id}|{team_name}"
|
|
1310
|
+
else:
|
|
1311
|
+
key = "unknown_team|Unknown Team"
|
|
1312
|
+
else:
|
|
1313
|
+
key = "all"
|
|
1314
|
+
|
|
1315
|
+
# Update metrics
|
|
1316
|
+
metrics[key]["shift_count"] += 1
|
|
1317
|
+
metrics[key]["total_hours"] += duration_hours
|
|
1318
|
+
|
|
1319
|
+
if is_override:
|
|
1320
|
+
metrics[key]["override_count"] += 1
|
|
1321
|
+
else:
|
|
1322
|
+
metrics[key]["regular_count"] += 1
|
|
1323
|
+
|
|
1324
|
+
# Track primary vs secondary
|
|
1325
|
+
if role_id:
|
|
1326
|
+
if is_primary:
|
|
1327
|
+
metrics[key]["primary_count"] += 1
|
|
1328
|
+
metrics[key]["primary_hours"] += duration_hours
|
|
1329
|
+
else:
|
|
1330
|
+
metrics[key]["secondary_count"] += 1
|
|
1331
|
+
metrics[key]["secondary_hours"] += duration_hours
|
|
1332
|
+
else:
|
|
1333
|
+
metrics[key]["unknown_role_count"] += 1
|
|
1334
|
+
|
|
1335
|
+
# Track unique days
|
|
1336
|
+
metrics[key]["unique_days"].update(shift_days)
|
|
1337
|
+
|
|
1338
|
+
metrics[key]["shifts"].append(
|
|
1339
|
+
{
|
|
1340
|
+
"shift_id": shift.get("id"),
|
|
1341
|
+
"starts_at": starts_at,
|
|
1342
|
+
"ends_at": ends_at,
|
|
1343
|
+
"duration_hours": round(duration_hours, 2),
|
|
1344
|
+
"is_override": is_override,
|
|
1345
|
+
"schedule_id": schedule_id,
|
|
1346
|
+
"user_id": user_id,
|
|
1347
|
+
"user_name": user_name,
|
|
1348
|
+
"user_email": user_email,
|
|
1349
|
+
"role_name": role_name,
|
|
1350
|
+
"is_primary": is_primary,
|
|
1351
|
+
}
|
|
1352
|
+
)
|
|
1353
|
+
|
|
1354
|
+
# Format results
|
|
1355
|
+
results = []
|
|
1356
|
+
for key, data in metrics.items():
|
|
1357
|
+
if group_by == "user":
|
|
1358
|
+
user_id, user_name = key.split("|", 1)
|
|
1359
|
+
result = {
|
|
1360
|
+
"user_id": user_id,
|
|
1361
|
+
"user_name": user_name,
|
|
1362
|
+
"shift_count": data["shift_count"],
|
|
1363
|
+
"days_on_call": len(data["unique_days"]),
|
|
1364
|
+
"total_hours": round(data["total_hours"], 2),
|
|
1365
|
+
"regular_shifts": data["regular_count"],
|
|
1366
|
+
"override_shifts": data["override_count"],
|
|
1367
|
+
"primary_shifts": data["primary_count"],
|
|
1368
|
+
"secondary_shifts": data["secondary_count"],
|
|
1369
|
+
"primary_hours": round(data["primary_hours"], 2),
|
|
1370
|
+
"secondary_hours": round(data["secondary_hours"], 2),
|
|
1371
|
+
"unknown_role_shifts": data["unknown_role_count"],
|
|
1372
|
+
}
|
|
1373
|
+
elif group_by == "schedule":
|
|
1374
|
+
schedule_id, schedule_name = key.split("|", 1)
|
|
1375
|
+
result = {
|
|
1376
|
+
"schedule_id": schedule_id,
|
|
1377
|
+
"schedule_name": schedule_name,
|
|
1378
|
+
"shift_count": data["shift_count"],
|
|
1379
|
+
"days_on_call": len(data["unique_days"]),
|
|
1380
|
+
"total_hours": round(data["total_hours"], 2),
|
|
1381
|
+
"regular_shifts": data["regular_count"],
|
|
1382
|
+
"override_shifts": data["override_count"],
|
|
1383
|
+
"primary_shifts": data["primary_count"],
|
|
1384
|
+
"secondary_shifts": data["secondary_count"],
|
|
1385
|
+
"primary_hours": round(data["primary_hours"], 2),
|
|
1386
|
+
"secondary_hours": round(data["secondary_hours"], 2),
|
|
1387
|
+
"unknown_role_shifts": data["unknown_role_count"],
|
|
1388
|
+
}
|
|
1389
|
+
elif group_by == "team":
|
|
1390
|
+
team_id, team_name = key.split("|", 1)
|
|
1391
|
+
result = {
|
|
1392
|
+
"team_id": team_id,
|
|
1393
|
+
"team_name": team_name,
|
|
1394
|
+
"shift_count": data["shift_count"],
|
|
1395
|
+
"days_on_call": len(data["unique_days"]),
|
|
1396
|
+
"total_hours": round(data["total_hours"], 2),
|
|
1397
|
+
"regular_shifts": data["regular_count"],
|
|
1398
|
+
"override_shifts": data["override_count"],
|
|
1399
|
+
"primary_shifts": data["primary_count"],
|
|
1400
|
+
"secondary_shifts": data["secondary_count"],
|
|
1401
|
+
"primary_hours": round(data["primary_hours"], 2),
|
|
1402
|
+
"secondary_hours": round(data["secondary_hours"], 2),
|
|
1403
|
+
"unknown_role_shifts": data["unknown_role_count"],
|
|
1404
|
+
}
|
|
1405
|
+
else:
|
|
1406
|
+
result = {
|
|
1407
|
+
"group_key": key,
|
|
1408
|
+
"shift_count": data["shift_count"],
|
|
1409
|
+
"days_on_call": len(data["unique_days"]),
|
|
1410
|
+
"total_hours": round(data["total_hours"], 2),
|
|
1411
|
+
"regular_shifts": data["regular_count"],
|
|
1412
|
+
"override_shifts": data["override_count"],
|
|
1413
|
+
"primary_shifts": data["primary_count"],
|
|
1414
|
+
"secondary_shifts": data["secondary_count"],
|
|
1415
|
+
"primary_hours": round(data["primary_hours"], 2),
|
|
1416
|
+
"secondary_hours": round(data["secondary_hours"], 2),
|
|
1417
|
+
"unknown_role_shifts": data["unknown_role_count"],
|
|
1418
|
+
}
|
|
1419
|
+
|
|
1420
|
+
results.append(result)
|
|
1421
|
+
|
|
1422
|
+
# Sort by shift count descending
|
|
1423
|
+
results.sort(key=lambda x: x["shift_count"], reverse=True)
|
|
1424
|
+
|
|
1425
|
+
return {
|
|
1426
|
+
"period": {"start_date": start_date, "end_date": end_date},
|
|
1427
|
+
"total_shifts": len(shifts),
|
|
1428
|
+
"grouped_by": group_by,
|
|
1429
|
+
"metrics": results,
|
|
1430
|
+
"summary": {
|
|
1431
|
+
"total_hours": round(sum(m["total_hours"] for m in results), 2),
|
|
1432
|
+
"total_regular_shifts": sum(m["regular_shifts"] for m in results),
|
|
1433
|
+
"total_override_shifts": sum(m["override_shifts"] for m in results),
|
|
1434
|
+
"unique_people": len(results) if group_by == "user" else None,
|
|
1435
|
+
},
|
|
1436
|
+
}
|
|
1437
|
+
|
|
1438
|
+
except Exception as e:
|
|
1439
|
+
import traceback
|
|
1440
|
+
|
|
1441
|
+
error_type, error_message = MCPError.categorize_error(e)
|
|
1442
|
+
return MCPError.tool_error(
|
|
1443
|
+
f"Failed to get on-call shift metrics: {error_message}",
|
|
1444
|
+
error_type,
|
|
1445
|
+
details={
|
|
1446
|
+
"params": {"start_date": start_date, "end_date": end_date},
|
|
1447
|
+
"exception_type": type(e).__name__,
|
|
1448
|
+
"exception_str": str(e),
|
|
1449
|
+
"traceback": traceback.format_exc(),
|
|
1450
|
+
},
|
|
1451
|
+
)
|
|
1452
|
+
|
|
1453
|
+
@mcp.tool()
|
|
1454
|
+
async def get_oncall_handoff_summary(
|
|
1455
|
+
team_ids: Annotated[
|
|
1456
|
+
str,
|
|
1457
|
+
Field(description="Comma-separated list of team IDs to filter schedules (optional)"),
|
|
1458
|
+
] = "",
|
|
1459
|
+
schedule_ids: Annotated[
|
|
1460
|
+
str, Field(description="Comma-separated list of schedule IDs (optional)")
|
|
1461
|
+
] = "",
|
|
1462
|
+
timezone: Annotated[
|
|
1463
|
+
str,
|
|
1464
|
+
Field(
|
|
1465
|
+
description="Timezone to use for display and filtering (e.g., 'America/Los_Angeles', 'Europe/London', 'Asia/Tokyo'). IMPORTANT: If user mentions a city, location, or region (e.g., 'Toronto', 'APAC', 'my time'), infer the appropriate IANA timezone. Defaults to UTC if not specified."
|
|
1466
|
+
),
|
|
1467
|
+
] = "UTC",
|
|
1468
|
+
filter_by_region: Annotated[
|
|
1469
|
+
bool,
|
|
1470
|
+
Field(
|
|
1471
|
+
description="If True, only show on-call for people whose shifts are during business hours (9am-5pm) in the specified timezone. Defaults to False."
|
|
1472
|
+
),
|
|
1473
|
+
] = False,
|
|
1474
|
+
include_incidents: Annotated[
|
|
1475
|
+
bool,
|
|
1476
|
+
Field(
|
|
1477
|
+
description="If True, fetch incidents for each shift (slower). If False, only show on-call info (faster). Defaults to False for better performance."
|
|
1478
|
+
),
|
|
1479
|
+
] = False,
|
|
1480
|
+
) -> dict:
|
|
1481
|
+
"""
|
|
1482
|
+
Get current on-call handoff summary. Shows who's currently on-call and who's next.
|
|
1483
|
+
Optionally fetch incidents (set include_incidents=True, but slower).
|
|
1484
|
+
|
|
1485
|
+
Timezone handling: If user mentions their location/timezone, infer it (e.g., "Toronto" → "America/Toronto",
|
|
1486
|
+
"my time" → ask clarifying question or use a common timezone).
|
|
1487
|
+
|
|
1488
|
+
Regional filtering: Use timezone + filter_by_region=True to see only people on-call
|
|
1489
|
+
during business hours in that region (e.g., timezone='Asia/Tokyo', filter_by_region=True
|
|
1490
|
+
shows only APAC on-call during APAC business hours).
|
|
1491
|
+
|
|
1492
|
+
Performance: By default, incidents are NOT fetched for faster response. Set include_incidents=True
|
|
1493
|
+
to fetch incidents for each shift (slower, may timeout with many schedules).
|
|
1494
|
+
|
|
1495
|
+
Useful for:
|
|
1496
|
+
- Quick on-call status checks
|
|
1497
|
+
- Daily handoff meetings
|
|
1498
|
+
- Regional on-call status (APAC, EU, Americas)
|
|
1499
|
+
- Team coordination across timezones
|
|
1500
|
+
"""
|
|
1501
|
+
try:
|
|
1502
|
+
from datetime import datetime, timedelta
|
|
1503
|
+
from zoneinfo import ZoneInfo
|
|
1504
|
+
|
|
1505
|
+
# Validate and set timezone
|
|
1506
|
+
try:
|
|
1507
|
+
tz = ZoneInfo(timezone)
|
|
1508
|
+
except Exception:
|
|
1509
|
+
tz = ZoneInfo("UTC") # Fallback to UTC if invalid timezone
|
|
1510
|
+
|
|
1511
|
+
now = datetime.now(tz)
|
|
1512
|
+
|
|
1513
|
+
def convert_to_timezone(iso_string: str) -> str:
|
|
1514
|
+
"""Convert ISO timestamp to target timezone."""
|
|
1515
|
+
if not iso_string:
|
|
1516
|
+
return iso_string
|
|
1517
|
+
try:
|
|
1518
|
+
dt = datetime.fromisoformat(iso_string.replace("Z", "+00:00"))
|
|
1519
|
+
dt_converted = dt.astimezone(tz)
|
|
1520
|
+
return dt_converted.isoformat()
|
|
1521
|
+
except (ValueError, AttributeError):
|
|
1522
|
+
return iso_string # Return original if conversion fails
|
|
1523
|
+
|
|
1524
|
+
# Fetch schedules with team info (with pagination)
|
|
1525
|
+
all_schedules = []
|
|
1526
|
+
page = 1
|
|
1527
|
+
max_pages = 5 # Schedules shouldn't have many pages
|
|
1528
|
+
|
|
1529
|
+
while page <= max_pages:
|
|
1530
|
+
schedules_response = await make_authenticated_request(
|
|
1531
|
+
"GET", "/v1/schedules", params={"page[size]": 100, "page[number]": page}
|
|
1532
|
+
)
|
|
1533
|
+
if not schedules_response:
|
|
1534
|
+
return MCPError.tool_error(
|
|
1535
|
+
"Failed to fetch schedules - no response from API", "execution_error"
|
|
1536
|
+
)
|
|
1537
|
+
|
|
1538
|
+
if schedules_response.status_code != 200:
|
|
1539
|
+
return MCPError.tool_error(
|
|
1540
|
+
f"Failed to fetch schedules - API returned status {schedules_response.status_code}",
|
|
1541
|
+
"execution_error",
|
|
1542
|
+
details={"status_code": schedules_response.status_code},
|
|
1543
|
+
)
|
|
1544
|
+
|
|
1545
|
+
schedules_data = schedules_response.json()
|
|
1546
|
+
page_schedules = schedules_data.get("data", [])
|
|
1547
|
+
|
|
1548
|
+
if not page_schedules:
|
|
1549
|
+
break
|
|
1550
|
+
|
|
1551
|
+
all_schedules.extend(page_schedules)
|
|
1552
|
+
|
|
1553
|
+
# Check if there are more pages
|
|
1554
|
+
meta = schedules_data.get("meta", {})
|
|
1555
|
+
total_pages = meta.get("total_pages", 1)
|
|
1556
|
+
|
|
1557
|
+
if page >= total_pages:
|
|
1558
|
+
break
|
|
1559
|
+
|
|
1560
|
+
page += 1
|
|
1561
|
+
|
|
1562
|
+
# Build team mapping
|
|
1563
|
+
team_ids_set = set()
|
|
1564
|
+
for schedule in all_schedules:
|
|
1565
|
+
owner_group_ids = schedule.get("attributes", {}).get("owner_group_ids", [])
|
|
1566
|
+
team_ids_set.update(owner_group_ids)
|
|
1567
|
+
|
|
1568
|
+
teams_map = {}
|
|
1569
|
+
if team_ids_set:
|
|
1570
|
+
teams_response = await make_authenticated_request(
|
|
1571
|
+
"GET", "/v1/teams", params={"page[size]": 100}
|
|
1572
|
+
)
|
|
1573
|
+
if teams_response and teams_response.status_code == 200:
|
|
1574
|
+
teams_data = teams_response.json()
|
|
1575
|
+
for team in teams_data.get("data", []):
|
|
1576
|
+
teams_map[team.get("id")] = team
|
|
1577
|
+
|
|
1578
|
+
# Filter schedules
|
|
1579
|
+
target_schedules = []
|
|
1580
|
+
team_filter = (
|
|
1581
|
+
[tid.strip() for tid in team_ids.split(",") if tid.strip()] if team_ids else []
|
|
1582
|
+
)
|
|
1583
|
+
schedule_filter = (
|
|
1584
|
+
[sid.strip() for sid in schedule_ids.split(",") if sid.strip()]
|
|
1585
|
+
if schedule_ids
|
|
1586
|
+
else []
|
|
1587
|
+
)
|
|
1588
|
+
|
|
1589
|
+
for schedule in all_schedules:
|
|
1590
|
+
schedule_id = schedule.get("id")
|
|
1591
|
+
owner_group_ids = schedule.get("attributes", {}).get("owner_group_ids", [])
|
|
1592
|
+
|
|
1593
|
+
# Apply filters
|
|
1594
|
+
if schedule_filter and schedule_id not in schedule_filter:
|
|
1595
|
+
continue
|
|
1596
|
+
if team_filter and not any(str(tgid) in team_filter for tgid in owner_group_ids):
|
|
1597
|
+
continue
|
|
1598
|
+
|
|
1599
|
+
target_schedules.append(schedule)
|
|
1600
|
+
|
|
1601
|
+
# Get current and upcoming shifts for each schedule
|
|
1602
|
+
handoff_data = []
|
|
1603
|
+
for schedule in target_schedules:
|
|
1604
|
+
schedule_id = schedule.get("id")
|
|
1605
|
+
schedule_attrs = schedule.get("attributes", {})
|
|
1606
|
+
schedule_name = schedule_attrs.get("name", "Unknown Schedule")
|
|
1607
|
+
owner_group_ids = schedule_attrs.get("owner_group_ids", [])
|
|
1608
|
+
|
|
1609
|
+
# Get team info
|
|
1610
|
+
team_name = "No Team"
|
|
1611
|
+
if owner_group_ids:
|
|
1612
|
+
team_id = owner_group_ids[0]
|
|
1613
|
+
team_attrs = teams_map.get(team_id, {}).get("attributes", {})
|
|
1614
|
+
team_name = team_attrs.get("name", "Unknown Team")
|
|
1615
|
+
|
|
1616
|
+
# Query shifts for this schedule
|
|
1617
|
+
shifts_response = await make_authenticated_request(
|
|
1618
|
+
"GET",
|
|
1619
|
+
"/v1/shifts",
|
|
1620
|
+
params={
|
|
1621
|
+
"schedule_ids[]": [schedule_id],
|
|
1622
|
+
"filter[starts_at][gte]": (now - timedelta(days=1)).isoformat(),
|
|
1623
|
+
"filter[starts_at][lte]": (now + timedelta(days=7)).isoformat(),
|
|
1624
|
+
"include": "user,on_call_role",
|
|
1625
|
+
"page[size]": 50,
|
|
1626
|
+
},
|
|
1627
|
+
)
|
|
1628
|
+
|
|
1629
|
+
if not shifts_response:
|
|
1630
|
+
continue
|
|
1631
|
+
|
|
1632
|
+
shifts_data = shifts_response.json()
|
|
1633
|
+
shifts = shifts_data.get("data", [])
|
|
1634
|
+
included = shifts_data.get("included", [])
|
|
1635
|
+
|
|
1636
|
+
# Build user and role maps
|
|
1637
|
+
users_map = {}
|
|
1638
|
+
roles_map = {}
|
|
1639
|
+
for resource in included:
|
|
1640
|
+
if resource.get("type") == "users":
|
|
1641
|
+
users_map[resource.get("id")] = resource
|
|
1642
|
+
elif resource.get("type") == "on_call_roles":
|
|
1643
|
+
roles_map[resource.get("id")] = resource
|
|
1644
|
+
|
|
1645
|
+
# Find current and next shifts
|
|
1646
|
+
current_shift = None
|
|
1647
|
+
next_shift = None
|
|
1648
|
+
|
|
1649
|
+
for shift in sorted(
|
|
1650
|
+
shifts, key=lambda s: s.get("attributes", {}).get("starts_at", "")
|
|
1651
|
+
):
|
|
1652
|
+
attrs = shift.get("attributes", {})
|
|
1653
|
+
starts_at_str = attrs.get("starts_at")
|
|
1654
|
+
ends_at_str = attrs.get("ends_at")
|
|
1655
|
+
|
|
1656
|
+
if not starts_at_str or not ends_at_str:
|
|
1657
|
+
continue
|
|
1658
|
+
|
|
1659
|
+
try:
|
|
1660
|
+
starts_at = datetime.fromisoformat(starts_at_str.replace("Z", "+00:00"))
|
|
1661
|
+
ends_at = datetime.fromisoformat(ends_at_str.replace("Z", "+00:00"))
|
|
1662
|
+
|
|
1663
|
+
# Current shift: ongoing now
|
|
1664
|
+
if starts_at <= now <= ends_at:
|
|
1665
|
+
current_shift = shift
|
|
1666
|
+
# Next shift: starts after now and no current shift found yet
|
|
1667
|
+
elif starts_at > now and not next_shift:
|
|
1668
|
+
next_shift = shift
|
|
1669
|
+
|
|
1670
|
+
except (ValueError, AttributeError):
|
|
1671
|
+
continue
|
|
1672
|
+
|
|
1673
|
+
# Build response for this schedule
|
|
1674
|
+
schedule_info = {
|
|
1675
|
+
"schedule_id": schedule_id,
|
|
1676
|
+
"schedule_name": schedule_name,
|
|
1677
|
+
"team_name": team_name,
|
|
1678
|
+
"current_oncall": None,
|
|
1679
|
+
"next_oncall": None,
|
|
1680
|
+
}
|
|
1681
|
+
|
|
1682
|
+
if current_shift:
|
|
1683
|
+
current_attrs = current_shift.get("attributes", {})
|
|
1684
|
+
current_rels = current_shift.get("relationships", {})
|
|
1685
|
+
user_data = current_rels.get("user", {}).get("data") or {}
|
|
1686
|
+
user_id = user_data.get("id")
|
|
1687
|
+
role_data = current_rels.get("on_call_role", {}).get("data") or {}
|
|
1688
|
+
role_id = role_data.get("id")
|
|
1689
|
+
|
|
1690
|
+
user_name = "Unknown"
|
|
1691
|
+
if user_id and user_id in users_map:
|
|
1692
|
+
user_attrs = users_map[user_id].get("attributes", {})
|
|
1693
|
+
user_name = user_attrs.get("full_name") or user_attrs.get(
|
|
1694
|
+
"email", "Unknown"
|
|
1695
|
+
)
|
|
1696
|
+
|
|
1697
|
+
role_name = "Unknown Role"
|
|
1698
|
+
if role_id and role_id in roles_map:
|
|
1699
|
+
role_attrs = roles_map[role_id].get("attributes", {})
|
|
1700
|
+
role_name = role_attrs.get("name", "Unknown Role")
|
|
1701
|
+
|
|
1702
|
+
schedule_info["current_oncall"] = {
|
|
1703
|
+
"user_name": user_name,
|
|
1704
|
+
"user_id": user_id,
|
|
1705
|
+
"role": role_name,
|
|
1706
|
+
"starts_at": convert_to_timezone(current_attrs.get("starts_at")),
|
|
1707
|
+
"ends_at": convert_to_timezone(current_attrs.get("ends_at")),
|
|
1708
|
+
"is_override": current_attrs.get("is_override", False),
|
|
1709
|
+
}
|
|
1710
|
+
|
|
1711
|
+
if next_shift:
|
|
1712
|
+
next_attrs = next_shift.get("attributes", {})
|
|
1713
|
+
next_rels = next_shift.get("relationships", {})
|
|
1714
|
+
user_data = next_rels.get("user", {}).get("data") or {}
|
|
1715
|
+
user_id = user_data.get("id")
|
|
1716
|
+
role_data = next_rels.get("on_call_role", {}).get("data") or {}
|
|
1717
|
+
role_id = role_data.get("id")
|
|
1718
|
+
|
|
1719
|
+
user_name = "Unknown"
|
|
1720
|
+
if user_id and user_id in users_map:
|
|
1721
|
+
user_attrs = users_map[user_id].get("attributes", {})
|
|
1722
|
+
user_name = user_attrs.get("full_name") or user_attrs.get(
|
|
1723
|
+
"email", "Unknown"
|
|
1724
|
+
)
|
|
1725
|
+
|
|
1726
|
+
role_name = "Unknown Role"
|
|
1727
|
+
if role_id and role_id in roles_map:
|
|
1728
|
+
role_attrs = roles_map[role_id].get("attributes", {})
|
|
1729
|
+
role_name = role_attrs.get("name", "Unknown Role")
|
|
1730
|
+
|
|
1731
|
+
schedule_info["next_oncall"] = {
|
|
1732
|
+
"user_name": user_name,
|
|
1733
|
+
"user_id": user_id,
|
|
1734
|
+
"role": role_name,
|
|
1735
|
+
"starts_at": convert_to_timezone(next_attrs.get("starts_at")),
|
|
1736
|
+
"ends_at": convert_to_timezone(next_attrs.get("ends_at")),
|
|
1737
|
+
"is_override": next_attrs.get("is_override", False),
|
|
1738
|
+
}
|
|
1739
|
+
|
|
1740
|
+
handoff_data.append(schedule_info)
|
|
1741
|
+
|
|
1742
|
+
# Filter by region if requested
|
|
1743
|
+
if filter_by_region:
|
|
1744
|
+
# Define business hours (9am-5pm) in the target timezone
|
|
1745
|
+
business_start_hour = 9
|
|
1746
|
+
business_end_hour = 17
|
|
1747
|
+
|
|
1748
|
+
# Create datetime objects for today's business hours in target timezone
|
|
1749
|
+
today_business_start = now.replace(
|
|
1750
|
+
hour=business_start_hour, minute=0, second=0, microsecond=0
|
|
1751
|
+
)
|
|
1752
|
+
today_business_end = now.replace(
|
|
1753
|
+
hour=business_end_hour, minute=0, second=0, microsecond=0
|
|
1754
|
+
)
|
|
1755
|
+
|
|
1756
|
+
# Filter schedules where current shift overlaps with business hours
|
|
1757
|
+
filtered_data = []
|
|
1758
|
+
for schedule_info in handoff_data:
|
|
1759
|
+
current_oncall = schedule_info.get("current_oncall")
|
|
1760
|
+
if current_oncall:
|
|
1761
|
+
# Parse shift times (already in target timezone)
|
|
1762
|
+
shift_start_str = current_oncall.get("starts_at")
|
|
1763
|
+
shift_end_str = current_oncall.get("ends_at")
|
|
1764
|
+
|
|
1765
|
+
if shift_start_str and shift_end_str:
|
|
1766
|
+
try:
|
|
1767
|
+
shift_start = datetime.fromisoformat(
|
|
1768
|
+
shift_start_str.replace("Z", "+00:00")
|
|
1769
|
+
)
|
|
1770
|
+
shift_end = datetime.fromisoformat(
|
|
1771
|
+
shift_end_str.replace("Z", "+00:00")
|
|
1772
|
+
)
|
|
1773
|
+
|
|
1774
|
+
# Check if shift overlaps with today's business hours
|
|
1775
|
+
# Shift overlaps if: shift_start < business_end AND shift_end > business_start
|
|
1776
|
+
if (
|
|
1777
|
+
shift_start < today_business_end
|
|
1778
|
+
and shift_end > today_business_start
|
|
1779
|
+
):
|
|
1780
|
+
filtered_data.append(schedule_info)
|
|
1781
|
+
except (ValueError, AttributeError):
|
|
1782
|
+
# Skip if we can't parse times
|
|
1783
|
+
continue
|
|
1784
|
+
|
|
1785
|
+
handoff_data = filtered_data
|
|
1786
|
+
|
|
1787
|
+
# Fetch incidents for each current shift (only if requested)
|
|
1788
|
+
if include_incidents:
|
|
1789
|
+
for schedule_info in handoff_data:
|
|
1790
|
+
current_oncall = schedule_info.get("current_oncall")
|
|
1791
|
+
if current_oncall:
|
|
1792
|
+
shift_start = current_oncall["starts_at"]
|
|
1793
|
+
shift_end = current_oncall["ends_at"]
|
|
1794
|
+
|
|
1795
|
+
incidents_result = await _fetch_shift_incidents_internal(
|
|
1796
|
+
start_time=shift_start,
|
|
1797
|
+
end_time=shift_end,
|
|
1798
|
+
schedule_ids="",
|
|
1799
|
+
severity="",
|
|
1800
|
+
status="",
|
|
1801
|
+
tags="",
|
|
1802
|
+
)
|
|
1803
|
+
|
|
1804
|
+
schedule_info["shift_incidents"] = (
|
|
1805
|
+
incidents_result if incidents_result.get("success") else None
|
|
1806
|
+
)
|
|
1807
|
+
else:
|
|
1808
|
+
schedule_info["shift_incidents"] = None
|
|
1809
|
+
else:
|
|
1810
|
+
# Skip incident fetching for better performance
|
|
1811
|
+
for schedule_info in handoff_data:
|
|
1812
|
+
schedule_info["shift_incidents"] = None
|
|
1813
|
+
|
|
1814
|
+
return {
|
|
1815
|
+
"success": True,
|
|
1816
|
+
"timestamp": now.isoformat(),
|
|
1817
|
+
"timezone": timezone,
|
|
1818
|
+
"schedules": handoff_data,
|
|
1819
|
+
"summary": {
|
|
1820
|
+
"total_schedules": len(handoff_data),
|
|
1821
|
+
"schedules_with_current_oncall": sum(
|
|
1822
|
+
1 for s in handoff_data if s["current_oncall"]
|
|
1823
|
+
),
|
|
1824
|
+
"schedules_with_next_oncall": sum(1 for s in handoff_data if s["next_oncall"]),
|
|
1825
|
+
"total_incidents": sum(
|
|
1826
|
+
s.get("shift_incidents", {}).get("summary", {}).get("total_incidents", 0)
|
|
1827
|
+
for s in handoff_data
|
|
1828
|
+
if s.get("shift_incidents")
|
|
1829
|
+
),
|
|
1830
|
+
},
|
|
1831
|
+
}
|
|
1832
|
+
|
|
1833
|
+
except Exception as e:
|
|
1834
|
+
import traceback
|
|
1835
|
+
|
|
1836
|
+
error_type, error_message = MCPError.categorize_error(e)
|
|
1837
|
+
return MCPError.tool_error(
|
|
1838
|
+
f"Failed to get on-call handoff summary: {error_message}",
|
|
1839
|
+
error_type,
|
|
1840
|
+
details={
|
|
1841
|
+
"exception_type": type(e).__name__,
|
|
1842
|
+
"exception_str": str(e),
|
|
1843
|
+
"traceback": traceback.format_exc(),
|
|
1844
|
+
},
|
|
1845
|
+
)
|
|
1846
|
+
|
|
1847
|
+
async def _fetch_shift_incidents_internal(
|
|
1848
|
+
start_time: str,
|
|
1849
|
+
end_time: str,
|
|
1850
|
+
schedule_ids: str = "",
|
|
1851
|
+
severity: str = "",
|
|
1852
|
+
status: str = "",
|
|
1853
|
+
tags: str = "",
|
|
1854
|
+
) -> dict:
|
|
1855
|
+
"""Internal helper to fetch incidents - used by both get_shift_incidents and get_oncall_handoff_summary."""
|
|
1856
|
+
try:
|
|
1857
|
+
from datetime import datetime
|
|
1858
|
+
|
|
1859
|
+
# Build query parameters
|
|
1860
|
+
# Fetch incidents that:
|
|
1861
|
+
# 1. Were created during the shift (created_at in range)
|
|
1862
|
+
# 2. OR are currently active/unresolved (started but not resolved yet)
|
|
1863
|
+
params = {"page[size]": 100, "sort": "-created_at"}
|
|
1864
|
+
|
|
1865
|
+
# Get incidents created during shift OR still active
|
|
1866
|
+
# We'll fetch all incidents and filter in-memory for active ones
|
|
1867
|
+
params["filter[started_at][lte]"] = end_time # Started before shift ended
|
|
1868
|
+
|
|
1869
|
+
# Add severity filter if provided
|
|
1870
|
+
if severity:
|
|
1871
|
+
params["filter[severity]"] = severity.lower()
|
|
1872
|
+
|
|
1873
|
+
# Add status filter if provided
|
|
1874
|
+
if status:
|
|
1875
|
+
params["filter[status]"] = status.lower()
|
|
1876
|
+
|
|
1877
|
+
# Add tags filter if provided
|
|
1878
|
+
if tags:
|
|
1879
|
+
tag_list = [t.strip() for t in tags.split(",") if t.strip()]
|
|
1880
|
+
if tag_list:
|
|
1881
|
+
params["filter[tags][]"] = tag_list
|
|
1882
|
+
|
|
1883
|
+
# Query incidents with pagination
|
|
1884
|
+
all_incidents = []
|
|
1885
|
+
page = 1
|
|
1886
|
+
max_pages = 10 # Safety limit to prevent infinite loops
|
|
1887
|
+
|
|
1888
|
+
while page <= max_pages:
|
|
1889
|
+
params["page[number]"] = page
|
|
1890
|
+
incidents_response = await make_authenticated_request(
|
|
1891
|
+
"GET", "/v1/incidents", params=params
|
|
1892
|
+
)
|
|
1893
|
+
|
|
1894
|
+
if not incidents_response:
|
|
1895
|
+
return MCPError.tool_error(
|
|
1896
|
+
"Failed to fetch incidents - no response from API", "execution_error"
|
|
1897
|
+
)
|
|
1898
|
+
|
|
1899
|
+
if incidents_response.status_code != 200:
|
|
1900
|
+
return MCPError.tool_error(
|
|
1901
|
+
f"Failed to fetch incidents - API returned status {incidents_response.status_code}",
|
|
1902
|
+
"execution_error",
|
|
1903
|
+
details={
|
|
1904
|
+
"status_code": incidents_response.status_code,
|
|
1905
|
+
"time_range": f"{start_time} to {end_time}",
|
|
1906
|
+
},
|
|
1907
|
+
)
|
|
1908
|
+
|
|
1909
|
+
incidents_data = incidents_response.json()
|
|
1910
|
+
page_incidents = incidents_data.get("data", [])
|
|
1911
|
+
|
|
1912
|
+
if not page_incidents:
|
|
1913
|
+
break # No more data
|
|
1914
|
+
|
|
1915
|
+
all_incidents.extend(page_incidents)
|
|
1916
|
+
|
|
1917
|
+
# Check if there are more pages
|
|
1918
|
+
meta = incidents_data.get("meta", {})
|
|
1919
|
+
total_pages = meta.get("total_pages", 1)
|
|
1920
|
+
|
|
1921
|
+
if page >= total_pages:
|
|
1922
|
+
break # Reached the last page
|
|
1923
|
+
|
|
1924
|
+
page += 1
|
|
1925
|
+
|
|
1926
|
+
# Filter incidents to include:
|
|
1927
|
+
# 1. Created during shift (created_at between start_time and end_time)
|
|
1928
|
+
# 2. Currently active (started but not resolved, regardless of when created)
|
|
1929
|
+
from datetime import timezone as dt_timezone
|
|
1930
|
+
|
|
1931
|
+
shift_start_dt = datetime.fromisoformat(start_time.replace("Z", "+00:00"))
|
|
1932
|
+
shift_end_dt = datetime.fromisoformat(end_time.replace("Z", "+00:00"))
|
|
1933
|
+
now_dt = datetime.now(dt_timezone.utc)
|
|
1934
|
+
|
|
1935
|
+
# Format incidents for handoff summary
|
|
1936
|
+
incidents_summary = []
|
|
1937
|
+
for incident in all_incidents:
|
|
1938
|
+
incident_id = incident.get("id")
|
|
1939
|
+
attrs = incident.get("attributes", {})
|
|
1940
|
+
|
|
1941
|
+
# Check if incident is relevant to this shift
|
|
1942
|
+
created_at = attrs.get("created_at")
|
|
1943
|
+
started_at = attrs.get("started_at")
|
|
1944
|
+
resolved_at = attrs.get("resolved_at")
|
|
1945
|
+
|
|
1946
|
+
# Parse timestamps
|
|
1947
|
+
try:
|
|
1948
|
+
created_dt = (
|
|
1949
|
+
datetime.fromisoformat(created_at.replace("Z", "+00:00"))
|
|
1950
|
+
if created_at
|
|
1951
|
+
else None
|
|
1952
|
+
)
|
|
1953
|
+
started_dt = (
|
|
1954
|
+
datetime.fromisoformat(started_at.replace("Z", "+00:00"))
|
|
1955
|
+
if started_at
|
|
1956
|
+
else None
|
|
1957
|
+
)
|
|
1958
|
+
resolved_dt = (
|
|
1959
|
+
datetime.fromisoformat(resolved_at.replace("Z", "+00:00"))
|
|
1960
|
+
if resolved_at
|
|
1961
|
+
else None
|
|
1962
|
+
)
|
|
1963
|
+
except (ValueError, AttributeError):
|
|
1964
|
+
continue # Skip if we can't parse dates
|
|
1965
|
+
|
|
1966
|
+
# Include incident if:
|
|
1967
|
+
# 1. Created during shift
|
|
1968
|
+
# 2. Started during shift
|
|
1969
|
+
# 3. Resolved during shift
|
|
1970
|
+
# 4. Currently active (not resolved and started before now)
|
|
1971
|
+
include_incident = False
|
|
1972
|
+
|
|
1973
|
+
if created_dt and shift_start_dt <= created_dt <= shift_end_dt:
|
|
1974
|
+
include_incident = True # Created during shift
|
|
1975
|
+
|
|
1976
|
+
if started_dt and shift_start_dt <= started_dt <= shift_end_dt:
|
|
1977
|
+
include_incident = True # Started during shift
|
|
1978
|
+
|
|
1979
|
+
if resolved_dt and shift_start_dt <= resolved_dt <= shift_end_dt:
|
|
1980
|
+
include_incident = True # Resolved during shift
|
|
1981
|
+
|
|
1982
|
+
if not resolved_dt and started_dt and started_dt <= now_dt:
|
|
1983
|
+
include_incident = True # Currently active
|
|
1984
|
+
|
|
1985
|
+
if not include_incident:
|
|
1986
|
+
continue
|
|
1987
|
+
|
|
1988
|
+
# Calculate duration if resolved
|
|
1989
|
+
duration_minutes = None
|
|
1990
|
+
if started_dt and resolved_dt:
|
|
1991
|
+
duration_minutes = int((resolved_dt - started_dt).total_seconds() / 60)
|
|
1992
|
+
|
|
1993
|
+
# Build narrative summary
|
|
1994
|
+
narrative_parts = []
|
|
1995
|
+
|
|
1996
|
+
# What happened
|
|
1997
|
+
title = attrs.get("title", "Untitled Incident")
|
|
1998
|
+
severity = attrs.get("severity", "unknown")
|
|
1999
|
+
narrative_parts.append(f"[{severity.upper()}] {title}")
|
|
2000
|
+
|
|
2001
|
+
# When and duration
|
|
2002
|
+
if started_at:
|
|
2003
|
+
narrative_parts.append(f"Started at {started_at}")
|
|
2004
|
+
if resolved_at:
|
|
2005
|
+
narrative_parts.append(f"Resolved at {resolved_at}")
|
|
2006
|
+
if duration_minutes:
|
|
2007
|
+
narrative_parts.append(f"Duration: {duration_minutes} minutes")
|
|
2008
|
+
elif attrs.get("status"):
|
|
2009
|
+
narrative_parts.append(f"Status: {attrs.get('status')}")
|
|
2010
|
+
|
|
2011
|
+
# What was the issue
|
|
2012
|
+
if attrs.get("summary"):
|
|
2013
|
+
narrative_parts.append(f"Details: {attrs.get('summary')}")
|
|
2014
|
+
|
|
2015
|
+
# Impact
|
|
2016
|
+
if attrs.get("customer_impact_summary"):
|
|
2017
|
+
narrative_parts.append(f"Impact: {attrs.get('customer_impact_summary')}")
|
|
2018
|
+
|
|
2019
|
+
# Resolution (if available)
|
|
2020
|
+
if attrs.get("mitigation"):
|
|
2021
|
+
narrative_parts.append(f"Resolution: {attrs.get('mitigation')}")
|
|
2022
|
+
elif attrs.get("action_items_count") and attrs.get("action_items_count") > 0:
|
|
2023
|
+
narrative_parts.append(
|
|
2024
|
+
f"Action items created: {attrs.get('action_items_count')}"
|
|
2025
|
+
)
|
|
2026
|
+
|
|
2027
|
+
narrative = " | ".join(narrative_parts)
|
|
2028
|
+
|
|
2029
|
+
incidents_summary.append(
|
|
2030
|
+
{
|
|
2031
|
+
"incident_id": incident_id,
|
|
2032
|
+
"title": attrs.get("title", "Untitled Incident"),
|
|
2033
|
+
"severity": attrs.get("severity"),
|
|
2034
|
+
"status": attrs.get("status"),
|
|
2035
|
+
"started_at": started_at,
|
|
2036
|
+
"resolved_at": resolved_at,
|
|
2037
|
+
"duration_minutes": duration_minutes,
|
|
2038
|
+
"summary": attrs.get("summary"),
|
|
2039
|
+
"impact": attrs.get("customer_impact_summary"),
|
|
2040
|
+
"mitigation": attrs.get("mitigation"),
|
|
2041
|
+
"narrative": narrative,
|
|
2042
|
+
"incident_url": attrs.get("incident_url"),
|
|
2043
|
+
}
|
|
2044
|
+
)
|
|
2045
|
+
|
|
2046
|
+
# Group by severity
|
|
2047
|
+
by_severity = {}
|
|
2048
|
+
for inc in incidents_summary:
|
|
2049
|
+
sev = inc["severity"] or "unknown"
|
|
2050
|
+
if sev not in by_severity:
|
|
2051
|
+
by_severity[sev] = []
|
|
2052
|
+
by_severity[sev].append(inc)
|
|
2053
|
+
|
|
2054
|
+
# Calculate statistics
|
|
2055
|
+
total_incidents = len(incidents_summary)
|
|
2056
|
+
resolved_count = sum(1 for inc in incidents_summary if inc["resolved_at"])
|
|
2057
|
+
ongoing_count = total_incidents - resolved_count
|
|
2058
|
+
|
|
2059
|
+
avg_resolution_time = None
|
|
2060
|
+
durations = [
|
|
2061
|
+
inc["duration_minutes"] for inc in incidents_summary if inc["duration_minutes"]
|
|
2062
|
+
]
|
|
2063
|
+
if durations:
|
|
2064
|
+
avg_resolution_time = int(sum(durations) / len(durations))
|
|
2065
|
+
|
|
2066
|
+
return {
|
|
2067
|
+
"success": True,
|
|
2068
|
+
"period": {"start_time": start_time, "end_time": end_time},
|
|
2069
|
+
"summary": {
|
|
2070
|
+
"total_incidents": total_incidents,
|
|
2071
|
+
"resolved": resolved_count,
|
|
2072
|
+
"ongoing": ongoing_count,
|
|
2073
|
+
"average_resolution_minutes": avg_resolution_time,
|
|
2074
|
+
"by_severity": {k: len(v) for k, v in by_severity.items()},
|
|
2075
|
+
},
|
|
2076
|
+
"incidents": incidents_summary,
|
|
2077
|
+
}
|
|
2078
|
+
|
|
2079
|
+
except Exception as e:
|
|
2080
|
+
import traceback
|
|
2081
|
+
|
|
2082
|
+
error_type, error_message = MCPError.categorize_error(e)
|
|
2083
|
+
return MCPError.tool_error(
|
|
2084
|
+
f"Failed to get shift incidents: {error_message}",
|
|
2085
|
+
error_type,
|
|
2086
|
+
details={
|
|
2087
|
+
"params": {"start_time": start_time, "end_time": end_time},
|
|
2088
|
+
"exception_type": type(e).__name__,
|
|
2089
|
+
"exception_str": str(e),
|
|
2090
|
+
"traceback": traceback.format_exc(),
|
|
2091
|
+
},
|
|
2092
|
+
)
|
|
2093
|
+
|
|
2094
|
+
@mcp.tool()
|
|
2095
|
+
async def get_shift_incidents(
|
|
2096
|
+
start_time: Annotated[
|
|
2097
|
+
str,
|
|
2098
|
+
Field(
|
|
2099
|
+
description="Start time for incident search (ISO 8601 format, e.g., '2025-10-01T00:00:00Z')"
|
|
2100
|
+
),
|
|
2101
|
+
],
|
|
2102
|
+
end_time: Annotated[
|
|
2103
|
+
str,
|
|
2104
|
+
Field(
|
|
2105
|
+
description="End time for incident search (ISO 8601 format, e.g., '2025-10-01T23:59:59Z')"
|
|
2106
|
+
),
|
|
2107
|
+
],
|
|
2108
|
+
schedule_ids: Annotated[
|
|
2109
|
+
str,
|
|
2110
|
+
Field(
|
|
2111
|
+
description="Comma-separated list of schedule IDs to filter incidents (optional)"
|
|
2112
|
+
),
|
|
2113
|
+
] = "",
|
|
2114
|
+
severity: Annotated[
|
|
2115
|
+
str,
|
|
2116
|
+
Field(description="Filter by severity: 'critical', 'high', 'medium', 'low' (optional)"),
|
|
2117
|
+
] = "",
|
|
2118
|
+
status: Annotated[
|
|
2119
|
+
str,
|
|
2120
|
+
Field(
|
|
2121
|
+
description="Filter by status: 'started', 'detected', 'acknowledged', 'investigating', 'identified', 'monitoring', 'resolved', 'cancelled' (optional)"
|
|
2122
|
+
),
|
|
2123
|
+
] = "",
|
|
2124
|
+
tags: Annotated[
|
|
2125
|
+
str,
|
|
2126
|
+
Field(description="Comma-separated list of tag slugs to filter incidents (optional)"),
|
|
2127
|
+
] = "",
|
|
2128
|
+
) -> dict:
|
|
2129
|
+
"""
|
|
2130
|
+
Get incidents and alerts that occurred during a specific shift or time period.
|
|
2131
|
+
|
|
2132
|
+
Useful for:
|
|
2133
|
+
- Shift handoff summaries showing what happened during the shift
|
|
2134
|
+
- Post-shift debriefs and reporting
|
|
2135
|
+
- Incident analysis by time period
|
|
2136
|
+
- Understanding team workload during specific shifts
|
|
2137
|
+
|
|
2138
|
+
Returns incident details including severity, status, duration, and basic summary.
|
|
2139
|
+
"""
|
|
2140
|
+
return await _fetch_shift_incidents_internal(
|
|
2141
|
+
start_time, end_time, schedule_ids, severity, status, tags
|
|
2142
|
+
)
|
|
2143
|
+
|
|
657
2144
|
# Add MCP resources for incidents and teams
|
|
658
2145
|
@mcp.resource("incident://{incident_id}")
|
|
659
2146
|
async def get_incident_resource(incident_id: str):
|
|
@@ -661,26 +2148,26 @@ def create_rootly_mcp_server(
|
|
|
661
2148
|
try:
|
|
662
2149
|
response = await make_authenticated_request("GET", f"/v1/incidents/{incident_id}")
|
|
663
2150
|
response.raise_for_status()
|
|
664
|
-
incident_data = response.json()
|
|
665
|
-
|
|
2151
|
+
incident_data = strip_heavy_nested_data({"data": [response.json().get("data", {})]})
|
|
2152
|
+
|
|
666
2153
|
# Format incident data as readable text
|
|
667
|
-
incident = incident_data.get("data", {})
|
|
2154
|
+
incident = incident_data.get("data", [{}])[0]
|
|
668
2155
|
attributes = incident.get("attributes", {})
|
|
669
|
-
|
|
2156
|
+
|
|
670
2157
|
text_content = f"""Incident #{incident_id}
|
|
671
|
-
Title: {attributes.get(
|
|
672
|
-
Status: {attributes.get(
|
|
673
|
-
Severity: {attributes.get(
|
|
674
|
-
Created: {attributes.get(
|
|
675
|
-
Updated: {attributes.get(
|
|
676
|
-
Summary: {attributes.get(
|
|
677
|
-
URL: {attributes.get(
|
|
678
|
-
|
|
2158
|
+
Title: {attributes.get("title", "N/A")}
|
|
2159
|
+
Status: {attributes.get("status", "N/A")}
|
|
2160
|
+
Severity: {attributes.get("severity", "N/A")}
|
|
2161
|
+
Created: {attributes.get("created_at", "N/A")}
|
|
2162
|
+
Updated: {attributes.get("updated_at", "N/A")}
|
|
2163
|
+
Summary: {attributes.get("summary", "N/A")}
|
|
2164
|
+
URL: {attributes.get("url", "N/A")}"""
|
|
2165
|
+
|
|
679
2166
|
return {
|
|
680
2167
|
"uri": f"incident://{incident_id}",
|
|
681
2168
|
"name": f"Incident #{incident_id}",
|
|
682
2169
|
"text": text_content,
|
|
683
|
-
"mimeType": "text/plain"
|
|
2170
|
+
"mimeType": "text/plain",
|
|
684
2171
|
}
|
|
685
2172
|
except Exception as e:
|
|
686
2173
|
error_type, error_message = MCPError.categorize_error(e)
|
|
@@ -688,7 +2175,7 @@ URL: {attributes.get('url', 'N/A')}"""
|
|
|
688
2175
|
"uri": f"incident://{incident_id}",
|
|
689
2176
|
"name": f"Incident #{incident_id} (Error)",
|
|
690
2177
|
"text": f"Error ({error_type}): {error_message}",
|
|
691
|
-
"mimeType": "text/plain"
|
|
2178
|
+
"mimeType": "text/plain",
|
|
692
2179
|
}
|
|
693
2180
|
|
|
694
2181
|
@mcp.resource("team://{team_id}")
|
|
@@ -698,23 +2185,23 @@ URL: {attributes.get('url', 'N/A')}"""
|
|
|
698
2185
|
response = await make_authenticated_request("GET", f"/v1/teams/{team_id}")
|
|
699
2186
|
response.raise_for_status()
|
|
700
2187
|
team_data = response.json()
|
|
701
|
-
|
|
2188
|
+
|
|
702
2189
|
# Format team data as readable text
|
|
703
2190
|
team = team_data.get("data", {})
|
|
704
2191
|
attributes = team.get("attributes", {})
|
|
705
|
-
|
|
2192
|
+
|
|
706
2193
|
text_content = f"""Team #{team_id}
|
|
707
|
-
Name: {attributes.get(
|
|
708
|
-
Color: {attributes.get(
|
|
709
|
-
Slug: {attributes.get(
|
|
710
|
-
Created: {attributes.get(
|
|
711
|
-
Updated: {attributes.get(
|
|
712
|
-
|
|
2194
|
+
Name: {attributes.get("name", "N/A")}
|
|
2195
|
+
Color: {attributes.get("color", "N/A")}
|
|
2196
|
+
Slug: {attributes.get("slug", "N/A")}
|
|
2197
|
+
Created: {attributes.get("created_at", "N/A")}
|
|
2198
|
+
Updated: {attributes.get("updated_at", "N/A")}"""
|
|
2199
|
+
|
|
713
2200
|
return {
|
|
714
2201
|
"uri": f"team://{team_id}",
|
|
715
2202
|
"name": f"Team: {attributes.get('name', team_id)}",
|
|
716
2203
|
"text": text_content,
|
|
717
|
-
"mimeType": "text/plain"
|
|
2204
|
+
"mimeType": "text/plain",
|
|
718
2205
|
}
|
|
719
2206
|
except Exception as e:
|
|
720
2207
|
error_type, error_message = MCPError.categorize_error(e)
|
|
@@ -722,50 +2209,56 @@ Updated: {attributes.get('updated_at', 'N/A')}"""
|
|
|
722
2209
|
"uri": f"team://{team_id}",
|
|
723
2210
|
"name": f"Team #{team_id} (Error)",
|
|
724
2211
|
"text": f"Error ({error_type}): {error_message}",
|
|
725
|
-
"mimeType": "text/plain"
|
|
2212
|
+
"mimeType": "text/plain",
|
|
726
2213
|
}
|
|
727
2214
|
|
|
728
2215
|
@mcp.resource("rootly://incidents")
|
|
729
2216
|
async def list_incidents_resource():
|
|
730
2217
|
"""List recent incidents as an MCP resource for quick reference."""
|
|
731
2218
|
try:
|
|
732
|
-
response = await make_authenticated_request(
|
|
733
|
-
"
|
|
734
|
-
"
|
|
735
|
-
|
|
736
|
-
|
|
2219
|
+
response = await make_authenticated_request(
|
|
2220
|
+
"GET",
|
|
2221
|
+
"/v1/incidents",
|
|
2222
|
+
params={
|
|
2223
|
+
"page[size]": 10,
|
|
2224
|
+
"page[number]": 1,
|
|
2225
|
+
"include": "",
|
|
2226
|
+
"fields[incidents]": "id,title,status",
|
|
2227
|
+
},
|
|
2228
|
+
)
|
|
737
2229
|
response.raise_for_status()
|
|
738
|
-
data = response.json()
|
|
739
|
-
|
|
2230
|
+
data = strip_heavy_nested_data(response.json())
|
|
2231
|
+
|
|
740
2232
|
incidents = data.get("data", [])
|
|
741
2233
|
text_lines = ["Recent Incidents:\n"]
|
|
742
|
-
|
|
2234
|
+
|
|
743
2235
|
for incident in incidents:
|
|
744
2236
|
attrs = incident.get("attributes", {})
|
|
745
|
-
text_lines.append(
|
|
746
|
-
|
|
2237
|
+
text_lines.append(
|
|
2238
|
+
f"• #{incident.get('id', 'N/A')} - {attrs.get('title', 'N/A')} [{attrs.get('status', 'N/A')}]"
|
|
2239
|
+
)
|
|
2240
|
+
|
|
747
2241
|
return {
|
|
748
2242
|
"uri": "rootly://incidents",
|
|
749
2243
|
"name": "Recent Incidents",
|
|
750
2244
|
"text": "\n".join(text_lines),
|
|
751
|
-
"mimeType": "text/plain"
|
|
2245
|
+
"mimeType": "text/plain",
|
|
752
2246
|
}
|
|
753
2247
|
except Exception as e:
|
|
754
2248
|
error_type, error_message = MCPError.categorize_error(e)
|
|
755
2249
|
return {
|
|
756
|
-
"uri": "rootly://incidents",
|
|
2250
|
+
"uri": "rootly://incidents",
|
|
757
2251
|
"name": "Recent Incidents (Error)",
|
|
758
2252
|
"text": f"Error ({error_type}): {error_message}",
|
|
759
|
-
"mimeType": "text/plain"
|
|
2253
|
+
"mimeType": "text/plain",
|
|
760
2254
|
}
|
|
761
2255
|
|
|
762
|
-
|
|
763
2256
|
# Log server creation (tool count will be shown when tools are accessed)
|
|
764
2257
|
logger.info("Created Rootly MCP Server successfully")
|
|
765
2258
|
return mcp
|
|
766
2259
|
|
|
767
2260
|
|
|
768
|
-
def _load_swagger_spec(swagger_path:
|
|
2261
|
+
def _load_swagger_spec(swagger_path: str | None = None) -> dict[str, Any]:
|
|
769
2262
|
"""
|
|
770
2263
|
Load the Swagger specification from a file or URL.
|
|
771
2264
|
|
|
@@ -780,7 +2273,7 @@ def _load_swagger_spec(swagger_path: Optional[str] = None) -> Dict[str, Any]:
|
|
|
780
2273
|
logger.info(f"Using provided Swagger path: {swagger_path}")
|
|
781
2274
|
if not os.path.isfile(swagger_path):
|
|
782
2275
|
raise FileNotFoundError(f"Swagger file not found at {swagger_path}")
|
|
783
|
-
with open(swagger_path,
|
|
2276
|
+
with open(swagger_path, encoding="utf-8") as f:
|
|
784
2277
|
return json.load(f)
|
|
785
2278
|
else:
|
|
786
2279
|
# First, check in the package data directory
|
|
@@ -788,7 +2281,7 @@ def _load_swagger_spec(swagger_path: Optional[str] = None) -> Dict[str, Any]:
|
|
|
788
2281
|
package_data_path = Path(__file__).parent / "data" / "swagger.json"
|
|
789
2282
|
if package_data_path.is_file():
|
|
790
2283
|
logger.info(f"Found Swagger file in package data: {package_data_path}")
|
|
791
|
-
with open(package_data_path,
|
|
2284
|
+
with open(package_data_path, encoding="utf-8") as f:
|
|
792
2285
|
return json.load(f)
|
|
793
2286
|
except Exception as e:
|
|
794
2287
|
logger.debug(f"Could not load Swagger file from package data: {e}")
|
|
@@ -801,7 +2294,7 @@ def _load_swagger_spec(swagger_path: Optional[str] = None) -> Dict[str, Any]:
|
|
|
801
2294
|
local_swagger_path = current_dir / "swagger.json"
|
|
802
2295
|
if local_swagger_path.is_file():
|
|
803
2296
|
logger.info(f"Found Swagger file at {local_swagger_path}")
|
|
804
|
-
with open(local_swagger_path,
|
|
2297
|
+
with open(local_swagger_path, encoding="utf-8") as f:
|
|
805
2298
|
return json.load(f)
|
|
806
2299
|
|
|
807
2300
|
# Check parent directories
|
|
@@ -809,7 +2302,7 @@ def _load_swagger_spec(swagger_path: Optional[str] = None) -> Dict[str, Any]:
|
|
|
809
2302
|
parent_swagger_path = parent / "swagger.json"
|
|
810
2303
|
if parent_swagger_path.is_file():
|
|
811
2304
|
logger.info(f"Found Swagger file at {parent_swagger_path}")
|
|
812
|
-
with open(parent_swagger_path,
|
|
2305
|
+
with open(parent_swagger_path, encoding="utf-8") as f:
|
|
813
2306
|
return json.load(f)
|
|
814
2307
|
|
|
815
2308
|
# If the file wasn't found, fetch it from the URL and save it
|
|
@@ -829,7 +2322,7 @@ def _load_swagger_spec(swagger_path: Optional[str] = None) -> Dict[str, Any]:
|
|
|
829
2322
|
return swagger_spec
|
|
830
2323
|
|
|
831
2324
|
|
|
832
|
-
def _fetch_swagger_from_url(url: str = SWAGGER_URL) ->
|
|
2325
|
+
def _fetch_swagger_from_url(url: str = SWAGGER_URL) -> dict[str, Any]:
|
|
833
2326
|
"""
|
|
834
2327
|
Fetch the Swagger specification from the specified URL.
|
|
835
2328
|
|
|
@@ -841,7 +2334,7 @@ def _fetch_swagger_from_url(url: str = SWAGGER_URL) -> Dict[str, Any]:
|
|
|
841
2334
|
"""
|
|
842
2335
|
logger.info(f"Fetching Swagger specification from {url}")
|
|
843
2336
|
try:
|
|
844
|
-
response = requests.get(url)
|
|
2337
|
+
response = requests.get(url, timeout=30)
|
|
845
2338
|
response.raise_for_status()
|
|
846
2339
|
return response.json()
|
|
847
2340
|
except requests.RequestException as e:
|
|
@@ -852,7 +2345,7 @@ def _fetch_swagger_from_url(url: str = SWAGGER_URL) -> Dict[str, Any]:
|
|
|
852
2345
|
raise Exception(f"Failed to parse Swagger specification: {e}")
|
|
853
2346
|
|
|
854
2347
|
|
|
855
|
-
def _filter_openapi_spec(spec:
|
|
2348
|
+
def _filter_openapi_spec(spec: dict[str, Any], allowed_paths: list[str]) -> dict[str, Any]:
|
|
856
2349
|
"""
|
|
857
2350
|
Filter an OpenAPI specification to only include specified paths and clean up schema references.
|
|
858
2351
|
|
|
@@ -869,9 +2362,7 @@ def _filter_openapi_spec(spec: Dict[str, Any], allowed_paths: List[str]) -> Dict
|
|
|
869
2362
|
# Filter paths
|
|
870
2363
|
original_paths = filtered_spec.get("paths", {})
|
|
871
2364
|
filtered_paths = {
|
|
872
|
-
path: path_item
|
|
873
|
-
for path, path_item in original_paths.items()
|
|
874
|
-
if path in allowed_paths
|
|
2365
|
+
path: path_item for path, path_item in original_paths.items() if path in allowed_paths
|
|
875
2366
|
}
|
|
876
2367
|
|
|
877
2368
|
filtered_spec["paths"] = filtered_paths
|
|
@@ -887,7 +2378,7 @@ def _filter_openapi_spec(spec: Dict[str, Any], allowed_paths: List[str]) -> Dict
|
|
|
887
2378
|
if "requestBody" in operation:
|
|
888
2379
|
request_body = operation["requestBody"]
|
|
889
2380
|
if "content" in request_body:
|
|
890
|
-
for
|
|
2381
|
+
for _content_type, content_info in request_body["content"].items():
|
|
891
2382
|
if "schema" in content_info:
|
|
892
2383
|
schema = content_info["schema"]
|
|
893
2384
|
# Remove problematic $ref references
|
|
@@ -896,20 +2387,20 @@ def _filter_openapi_spec(spec: Dict[str, Any], allowed_paths: List[str]) -> Dict
|
|
|
896
2387
|
content_info["schema"] = {
|
|
897
2388
|
"type": "object",
|
|
898
2389
|
"description": "Request parameters for this endpoint",
|
|
899
|
-
"additionalProperties": True
|
|
2390
|
+
"additionalProperties": True,
|
|
900
2391
|
}
|
|
901
2392
|
|
|
902
2393
|
# Remove response schemas to avoid validation issues
|
|
903
2394
|
# FastMCP will still return the data, just without strict validation
|
|
904
2395
|
if "responses" in operation:
|
|
905
|
-
for
|
|
2396
|
+
for _status_code, response in operation["responses"].items():
|
|
906
2397
|
if "content" in response:
|
|
907
|
-
for
|
|
2398
|
+
for _content_type, content_info in response["content"].items():
|
|
908
2399
|
if "schema" in content_info:
|
|
909
2400
|
# Replace with a simple schema that accepts any response
|
|
910
2401
|
content_info["schema"] = {
|
|
911
2402
|
"type": "object",
|
|
912
|
-
"additionalProperties": True
|
|
2403
|
+
"additionalProperties": True,
|
|
913
2404
|
}
|
|
914
2405
|
|
|
915
2406
|
# Clean parameter schemas (parameter names are already sanitized)
|
|
@@ -921,135 +2412,155 @@ def _filter_openapi_spec(spec: Dict[str, Any], allowed_paths: List[str]) -> Dict
|
|
|
921
2412
|
# Replace with a simple string schema
|
|
922
2413
|
param["schema"] = {
|
|
923
2414
|
"type": "string",
|
|
924
|
-
"description": param.get("description", "Parameter value")
|
|
2415
|
+
"description": param.get("description", "Parameter value"),
|
|
925
2416
|
}
|
|
926
2417
|
|
|
927
2418
|
# Add/modify pagination limits to alerts and incident-related endpoints to prevent infinite loops
|
|
928
2419
|
if method.lower() == "get" and ("alerts" in path.lower() or "incident" in path.lower()):
|
|
929
2420
|
if "parameters" not in operation:
|
|
930
2421
|
operation["parameters"] = []
|
|
931
|
-
|
|
2422
|
+
|
|
932
2423
|
# Find existing pagination parameters and update them with limits
|
|
933
2424
|
page_size_param = None
|
|
934
2425
|
page_number_param = None
|
|
935
|
-
|
|
2426
|
+
|
|
936
2427
|
for param in operation["parameters"]:
|
|
937
2428
|
if param.get("name") == "page[size]":
|
|
938
2429
|
page_size_param = param
|
|
939
2430
|
elif param.get("name") == "page[number]":
|
|
940
2431
|
page_number_param = param
|
|
941
|
-
|
|
2432
|
+
|
|
942
2433
|
# Update or add page[size] parameter with limits
|
|
943
2434
|
if page_size_param:
|
|
944
2435
|
# Update existing parameter with limits
|
|
945
2436
|
if "schema" not in page_size_param:
|
|
946
2437
|
page_size_param["schema"] = {}
|
|
947
|
-
page_size_param["schema"].update(
|
|
948
|
-
|
|
949
|
-
"default": 10,
|
|
950
|
-
"minimum": 1,
|
|
951
|
-
"maximum": 20,
|
|
952
|
-
"description": "Number of results per page (max: 20)"
|
|
953
|
-
})
|
|
954
|
-
else:
|
|
955
|
-
# Add new parameter
|
|
956
|
-
operation["parameters"].append({
|
|
957
|
-
"name": "page[size]",
|
|
958
|
-
"in": "query",
|
|
959
|
-
"required": False,
|
|
960
|
-
"schema": {
|
|
2438
|
+
page_size_param["schema"].update(
|
|
2439
|
+
{
|
|
961
2440
|
"type": "integer",
|
|
962
2441
|
"default": 10,
|
|
963
2442
|
"minimum": 1,
|
|
964
2443
|
"maximum": 20,
|
|
965
|
-
"description": "Number of results per page (max: 20)"
|
|
2444
|
+
"description": "Number of results per page (max: 20)",
|
|
966
2445
|
}
|
|
967
|
-
|
|
968
|
-
|
|
2446
|
+
)
|
|
2447
|
+
else:
|
|
2448
|
+
# Add new parameter
|
|
2449
|
+
operation["parameters"].append(
|
|
2450
|
+
{
|
|
2451
|
+
"name": "page[size]",
|
|
2452
|
+
"in": "query",
|
|
2453
|
+
"required": False,
|
|
2454
|
+
"schema": {
|
|
2455
|
+
"type": "integer",
|
|
2456
|
+
"default": 10,
|
|
2457
|
+
"minimum": 1,
|
|
2458
|
+
"maximum": 20,
|
|
2459
|
+
"description": "Number of results per page (max: 20)",
|
|
2460
|
+
},
|
|
2461
|
+
}
|
|
2462
|
+
)
|
|
2463
|
+
|
|
969
2464
|
# Update or add page[number] parameter with defaults
|
|
970
2465
|
if page_number_param:
|
|
971
|
-
# Update existing parameter
|
|
2466
|
+
# Update existing parameter
|
|
972
2467
|
if "schema" not in page_number_param:
|
|
973
2468
|
page_number_param["schema"] = {}
|
|
974
|
-
page_number_param["schema"].update(
|
|
975
|
-
|
|
976
|
-
"default": 1,
|
|
977
|
-
"minimum": 1,
|
|
978
|
-
"description": "Page number to retrieve"
|
|
979
|
-
})
|
|
980
|
-
else:
|
|
981
|
-
# Add new parameter
|
|
982
|
-
operation["parameters"].append({
|
|
983
|
-
"name": "page[number]",
|
|
984
|
-
"in": "query",
|
|
985
|
-
"required": False,
|
|
986
|
-
"schema": {
|
|
2469
|
+
page_number_param["schema"].update(
|
|
2470
|
+
{
|
|
987
2471
|
"type": "integer",
|
|
988
2472
|
"default": 1,
|
|
989
2473
|
"minimum": 1,
|
|
990
|
-
"description": "Page number to retrieve"
|
|
2474
|
+
"description": "Page number to retrieve",
|
|
991
2475
|
}
|
|
992
|
-
|
|
993
|
-
|
|
2476
|
+
)
|
|
2477
|
+
else:
|
|
2478
|
+
# Add new parameter
|
|
2479
|
+
operation["parameters"].append(
|
|
2480
|
+
{
|
|
2481
|
+
"name": "page[number]",
|
|
2482
|
+
"in": "query",
|
|
2483
|
+
"required": False,
|
|
2484
|
+
"schema": {
|
|
2485
|
+
"type": "integer",
|
|
2486
|
+
"default": 1,
|
|
2487
|
+
"minimum": 1,
|
|
2488
|
+
"description": "Page number to retrieve",
|
|
2489
|
+
},
|
|
2490
|
+
}
|
|
2491
|
+
)
|
|
2492
|
+
|
|
994
2493
|
# Add sparse fieldsets for alerts endpoints to reduce payload size
|
|
995
2494
|
if "alert" in path.lower():
|
|
996
2495
|
# Add fields[alerts] parameter with essential fields only - make it required with default
|
|
997
|
-
operation["parameters"].append(
|
|
998
|
-
|
|
999
|
-
|
|
1000
|
-
|
|
1001
|
-
|
|
1002
|
-
"
|
|
1003
|
-
|
|
1004
|
-
|
|
2496
|
+
operation["parameters"].append(
|
|
2497
|
+
{
|
|
2498
|
+
"name": "fields[alerts]",
|
|
2499
|
+
"in": "query",
|
|
2500
|
+
"required": True,
|
|
2501
|
+
"schema": {
|
|
2502
|
+
"type": "string",
|
|
2503
|
+
"default": "id,summary,status,started_at,ended_at,short_id,alert_urgency_id,source,noise",
|
|
2504
|
+
"description": "Comma-separated list of alert fields to include (reduces payload size)",
|
|
2505
|
+
},
|
|
1005
2506
|
}
|
|
1006
|
-
|
|
1007
|
-
|
|
2507
|
+
)
|
|
2508
|
+
|
|
1008
2509
|
# Add include parameter for alerts endpoints to minimize relationships
|
|
1009
2510
|
if "alert" in path.lower():
|
|
1010
2511
|
# Check if include parameter already exists
|
|
1011
|
-
include_param_exists = any(
|
|
2512
|
+
include_param_exists = any(
|
|
2513
|
+
param.get("name") == "include" for param in operation["parameters"]
|
|
2514
|
+
)
|
|
1012
2515
|
if not include_param_exists:
|
|
1013
|
-
operation["parameters"].append(
|
|
1014
|
-
|
|
1015
|
-
|
|
1016
|
-
|
|
1017
|
-
|
|
1018
|
-
"
|
|
1019
|
-
|
|
1020
|
-
|
|
2516
|
+
operation["parameters"].append(
|
|
2517
|
+
{
|
|
2518
|
+
"name": "include",
|
|
2519
|
+
"in": "query",
|
|
2520
|
+
"required": True,
|
|
2521
|
+
"schema": {
|
|
2522
|
+
"type": "string",
|
|
2523
|
+
"default": "",
|
|
2524
|
+
"description": "Related resources to include (empty for minimal payload)",
|
|
2525
|
+
},
|
|
1021
2526
|
}
|
|
1022
|
-
|
|
1023
|
-
|
|
2527
|
+
)
|
|
2528
|
+
|
|
1024
2529
|
# Add sparse fieldsets for incidents endpoints to reduce payload size
|
|
1025
2530
|
if "incident" in path.lower():
|
|
1026
2531
|
# Add fields[incidents] parameter with essential fields only - make it required with default
|
|
1027
|
-
operation["parameters"].append(
|
|
1028
|
-
|
|
1029
|
-
|
|
1030
|
-
|
|
1031
|
-
|
|
1032
|
-
"
|
|
1033
|
-
|
|
1034
|
-
|
|
2532
|
+
operation["parameters"].append(
|
|
2533
|
+
{
|
|
2534
|
+
"name": "fields[incidents]",
|
|
2535
|
+
"in": "query",
|
|
2536
|
+
"required": True,
|
|
2537
|
+
"schema": {
|
|
2538
|
+
"type": "string",
|
|
2539
|
+
"default": "id,title,summary,status,severity,created_at,updated_at,url,started_at",
|
|
2540
|
+
"description": "Comma-separated list of incident fields to include (reduces payload size)",
|
|
2541
|
+
},
|
|
1035
2542
|
}
|
|
1036
|
-
|
|
1037
|
-
|
|
2543
|
+
)
|
|
2544
|
+
|
|
1038
2545
|
# Add include parameter for incidents endpoints to minimize relationships
|
|
1039
2546
|
if "incident" in path.lower():
|
|
1040
2547
|
# Check if include parameter already exists
|
|
1041
|
-
include_param_exists = any(
|
|
2548
|
+
include_param_exists = any(
|
|
2549
|
+
param.get("name") == "include" for param in operation["parameters"]
|
|
2550
|
+
)
|
|
1042
2551
|
if not include_param_exists:
|
|
1043
|
-
operation["parameters"].append(
|
|
1044
|
-
|
|
1045
|
-
|
|
1046
|
-
|
|
1047
|
-
|
|
1048
|
-
"
|
|
1049
|
-
|
|
1050
|
-
|
|
2552
|
+
operation["parameters"].append(
|
|
2553
|
+
{
|
|
2554
|
+
"name": "include",
|
|
2555
|
+
"in": "query",
|
|
2556
|
+
"required": True,
|
|
2557
|
+
"schema": {
|
|
2558
|
+
"type": "string",
|
|
2559
|
+
"default": "",
|
|
2560
|
+
"description": "Related resources to include (empty for minimal payload)",
|
|
2561
|
+
},
|
|
1051
2562
|
}
|
|
1052
|
-
|
|
2563
|
+
)
|
|
1053
2564
|
|
|
1054
2565
|
# Also clean up any remaining broken references in components
|
|
1055
2566
|
if "components" in filtered_spec and "schemas" in filtered_spec["components"]:
|
|
@@ -1067,20 +2578,29 @@ def _filter_openapi_spec(spec: Dict[str, Any], allowed_paths: List[str]) -> Dict
|
|
|
1067
2578
|
# Clean up any operation-level references to removed schemas
|
|
1068
2579
|
removed_schemas = set()
|
|
1069
2580
|
if "components" in filtered_spec and "schemas" in filtered_spec["components"]:
|
|
1070
|
-
removed_schemas = {
|
|
1071
|
-
|
|
1072
|
-
|
|
1073
|
-
|
|
2581
|
+
removed_schemas = {
|
|
2582
|
+
"new_workflow",
|
|
2583
|
+
"update_workflow",
|
|
2584
|
+
"workflow",
|
|
2585
|
+
"workflow_task",
|
|
2586
|
+
"workflow_response",
|
|
2587
|
+
"workflow_list",
|
|
2588
|
+
"new_workflow_task",
|
|
2589
|
+
"update_workflow_task",
|
|
2590
|
+
"workflow_task_response",
|
|
2591
|
+
"workflow_task_list",
|
|
2592
|
+
}
|
|
2593
|
+
|
|
1074
2594
|
for path, path_item in filtered_spec.get("paths", {}).items():
|
|
1075
2595
|
for method, operation in path_item.items():
|
|
1076
2596
|
if method.lower() not in ["get", "post", "put", "delete", "patch"]:
|
|
1077
2597
|
continue
|
|
1078
|
-
|
|
2598
|
+
|
|
1079
2599
|
# Clean request body references
|
|
1080
2600
|
if "requestBody" in operation:
|
|
1081
2601
|
request_body = operation["requestBody"]
|
|
1082
2602
|
if "content" in request_body:
|
|
1083
|
-
for
|
|
2603
|
+
for _content_type, content_info in request_body["content"].items():
|
|
1084
2604
|
if "schema" in content_info and "$ref" in content_info["schema"]:
|
|
1085
2605
|
ref_path = content_info["schema"]["$ref"]
|
|
1086
2606
|
schema_name = ref_path.split("/")[-1]
|
|
@@ -1089,15 +2609,17 @@ def _filter_openapi_spec(spec: Dict[str, Any], allowed_paths: List[str]) -> Dict
|
|
|
1089
2609
|
content_info["schema"] = {
|
|
1090
2610
|
"type": "object",
|
|
1091
2611
|
"description": "Request data for this endpoint",
|
|
1092
|
-
"additionalProperties": True
|
|
2612
|
+
"additionalProperties": True,
|
|
1093
2613
|
}
|
|
1094
|
-
logger.debug(
|
|
1095
|
-
|
|
1096
|
-
|
|
2614
|
+
logger.debug(
|
|
2615
|
+
f"Cleaned broken reference in {method.upper()} {path} request body: {ref_path}"
|
|
2616
|
+
)
|
|
2617
|
+
|
|
2618
|
+
# Clean response references
|
|
1097
2619
|
if "responses" in operation:
|
|
1098
|
-
for
|
|
2620
|
+
for _status_code, response in operation["responses"].items():
|
|
1099
2621
|
if "content" in response:
|
|
1100
|
-
for
|
|
2622
|
+
for _content_type, content_info in response["content"].items():
|
|
1101
2623
|
if "schema" in content_info and "$ref" in content_info["schema"]:
|
|
1102
2624
|
ref_path = content_info["schema"]["$ref"]
|
|
1103
2625
|
schema_name = ref_path.split("/")[-1]
|
|
@@ -1106,14 +2628,16 @@ def _filter_openapi_spec(spec: Dict[str, Any], allowed_paths: List[str]) -> Dict
|
|
|
1106
2628
|
content_info["schema"] = {
|
|
1107
2629
|
"type": "object",
|
|
1108
2630
|
"description": "Response data from this endpoint",
|
|
1109
|
-
"additionalProperties": True
|
|
2631
|
+
"additionalProperties": True,
|
|
1110
2632
|
}
|
|
1111
|
-
logger.debug(
|
|
2633
|
+
logger.debug(
|
|
2634
|
+
f"Cleaned broken reference in {method.upper()} {path} response: {ref_path}"
|
|
2635
|
+
)
|
|
1112
2636
|
|
|
1113
2637
|
return filtered_spec
|
|
1114
2638
|
|
|
1115
2639
|
|
|
1116
|
-
def _has_broken_references(schema_def:
|
|
2640
|
+
def _has_broken_references(schema_def: dict[str, Any]) -> bool:
|
|
1117
2641
|
"""Check if a schema definition has broken references."""
|
|
1118
2642
|
if "$ref" in schema_def:
|
|
1119
2643
|
ref_path = schema_def["$ref"]
|
|
@@ -1121,7 +2645,7 @@ def _has_broken_references(schema_def: Dict[str, Any]) -> bool:
|
|
|
1121
2645
|
broken_refs = [
|
|
1122
2646
|
"incident_trigger_params",
|
|
1123
2647
|
"new_workflow",
|
|
1124
|
-
"update_workflow",
|
|
2648
|
+
"update_workflow",
|
|
1125
2649
|
"workflow",
|
|
1126
2650
|
"new_workflow_task",
|
|
1127
2651
|
"update_workflow_task",
|
|
@@ -1132,18 +2656,18 @@ def _has_broken_references(schema_def: Dict[str, Any]) -> bool:
|
|
|
1132
2656
|
"workflow_list",
|
|
1133
2657
|
"workflow_custom_field_selection_response",
|
|
1134
2658
|
"workflow_custom_field_selection_list",
|
|
1135
|
-
"workflow_form_field_condition_response",
|
|
2659
|
+
"workflow_form_field_condition_response",
|
|
1136
2660
|
"workflow_form_field_condition_list",
|
|
1137
2661
|
"workflow_group_response",
|
|
1138
2662
|
"workflow_group_list",
|
|
1139
2663
|
"workflow_run_response",
|
|
1140
|
-
"workflow_runs_list"
|
|
2664
|
+
"workflow_runs_list",
|
|
1141
2665
|
]
|
|
1142
2666
|
if any(broken_ref in ref_path for broken_ref in broken_refs):
|
|
1143
2667
|
return True
|
|
1144
2668
|
|
|
1145
2669
|
# Recursively check nested schemas
|
|
1146
|
-
for
|
|
2670
|
+
for _key, value in schema_def.items():
|
|
1147
2671
|
if isinstance(value, dict):
|
|
1148
2672
|
if _has_broken_references(value):
|
|
1149
2673
|
return True
|
|
@@ -1165,10 +2689,10 @@ class RootlyMCPServer(FastMCP):
|
|
|
1165
2689
|
|
|
1166
2690
|
def __init__(
|
|
1167
2691
|
self,
|
|
1168
|
-
swagger_path:
|
|
2692
|
+
swagger_path: str | None = None,
|
|
1169
2693
|
name: str = "Rootly",
|
|
1170
2694
|
default_page_size: int = 10,
|
|
1171
|
-
allowed_paths:
|
|
2695
|
+
allowed_paths: list[str] | None = None,
|
|
1172
2696
|
hosted: bool = False,
|
|
1173
2697
|
*args,
|
|
1174
2698
|
**kwargs,
|
|
@@ -1179,10 +2703,7 @@ class RootlyMCPServer(FastMCP):
|
|
|
1179
2703
|
|
|
1180
2704
|
# Create the server using the new function
|
|
1181
2705
|
server = create_rootly_mcp_server(
|
|
1182
|
-
swagger_path=swagger_path,
|
|
1183
|
-
name=name,
|
|
1184
|
-
allowed_paths=allowed_paths,
|
|
1185
|
-
hosted=hosted
|
|
2706
|
+
swagger_path=swagger_path, name=name, allowed_paths=allowed_paths, hosted=hosted
|
|
1186
2707
|
)
|
|
1187
2708
|
|
|
1188
2709
|
# Copy the server's state to this instance
|
|
@@ -1191,5 +2712,5 @@ class RootlyMCPServer(FastMCP):
|
|
|
1191
2712
|
# Tools will be accessed via async methods when needed
|
|
1192
2713
|
self._server = server
|
|
1193
2714
|
self._tools = {} # Placeholder - tools should be accessed via async methods
|
|
1194
|
-
self._resources = getattr(server,
|
|
1195
|
-
self._prompts = getattr(server,
|
|
2715
|
+
self._resources = getattr(server, "_resources", {})
|
|
2716
|
+
self._prompts = getattr(server, "_prompts", {})
|