rootly-mcp-server 2.0.15__py3-none-any.whl → 2.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -6,115 +6,385 @@ the Rootly API's OpenAPI (Swagger) specification using FastMCP's OpenAPI integra
6
6
  """
7
7
 
8
8
  import json
9
- import os
10
9
  import logging
10
+ import os
11
11
  from copy import deepcopy
12
12
  from pathlib import Path
13
- import requests
14
- import httpx
15
- from typing import Any, Dict, List, Optional, Annotated
13
+ from typing import Annotated, Any
16
14
 
15
+ import httpx
16
+ import requests
17
17
  from fastmcp import FastMCP
18
-
19
18
  from pydantic import Field
20
19
 
20
+ from .smart_utils import SolutionExtractor, TextSimilarityAnalyzer
21
21
  from .utils import sanitize_parameters_in_spec
22
- from .smart_utils import TextSimilarityAnalyzer, SolutionExtractor
23
22
 
24
23
  # Set up logger
25
24
  logger = logging.getLogger(__name__)
26
25
 
27
26
 
27
+ def strip_heavy_nested_data(data: dict[str, Any]) -> dict[str, Any]:
28
+ """
29
+ Strip heavy nested relationship data from incident responses to reduce payload size.
30
+ Removes embedded user objects, roles, permissions, schedules, etc.
31
+ """
32
+ if not isinstance(data, dict):
33
+ return data
34
+
35
+ if "data" in data and isinstance(data["data"], list):
36
+ # Process list of incidents
37
+ for incident in data["data"]:
38
+ if "attributes" in incident:
39
+ attrs = incident["attributes"]
40
+ # Strip heavy embedded user objects
41
+ for user_field in [
42
+ "user",
43
+ "started_by",
44
+ "mitigated_by",
45
+ "resolved_by",
46
+ "closed_by",
47
+ "cancelled_by",
48
+ "in_triage_by",
49
+ ]:
50
+ if user_field in attrs and isinstance(attrs[user_field], dict):
51
+ user_data = attrs[user_field].get("data", {})
52
+ if "attributes" in user_data:
53
+ # Keep only basic user info
54
+ attrs[user_field] = {
55
+ "data": {
56
+ "id": user_data.get("id"),
57
+ "type": user_data.get("type"),
58
+ "attributes": {
59
+ "name": user_data.get("attributes", {}).get("name"),
60
+ "email": user_data.get("attributes", {}).get("email"),
61
+ },
62
+ }
63
+ }
64
+
65
+ # Strip heavy severity object, keep only essential info
66
+ if "severity" in attrs and isinstance(attrs["severity"], dict):
67
+ sev_data = attrs["severity"].get("data", {})
68
+ if sev_data and "attributes" in sev_data:
69
+ # Simplify to just name and slug
70
+ attrs["severity"] = {
71
+ "name": sev_data.get("attributes", {}).get("name"),
72
+ "slug": sev_data.get("attributes", {}).get("slug"),
73
+ }
74
+ elif not sev_data:
75
+ # Severity is null/empty
76
+ attrs["severity"] = None
77
+
78
+ # Remove heavy integration fields (50+ fields with IDs/URLs)
79
+ integration_fields = [
80
+ "zoom_meeting_start_url",
81
+ "zoom_meeting_global_dial_in_numbers",
82
+ "shortcut_story_id",
83
+ "shortcut_story_url",
84
+ "shortcut_task_id",
85
+ "shortcut_task_url",
86
+ "asana_task_id",
87
+ "asana_task_url",
88
+ "github_issue_id",
89
+ "github_issue_url",
90
+ "gitlab_issue_id",
91
+ "gitlab_issue_url",
92
+ "google_meeting_id",
93
+ "trello_card_id",
94
+ "trello_card_url",
95
+ "linear_issue_id",
96
+ "linear_issue_url",
97
+ "zendesk_ticket_id",
98
+ "zendesk_ticket_url",
99
+ "motion_task_id",
100
+ "motion_task_url",
101
+ "clickup_task_id",
102
+ "clickup_task_url",
103
+ "slack_channel_deep_link",
104
+ "service_now_incident_id",
105
+ "service_now_incident_key",
106
+ "service_now_incident_url",
107
+ "opsgenie_incident_id",
108
+ "opsgenie_incident_url",
109
+ "opsgenie_alert_id",
110
+ "opsgenie_alert_url",
111
+ "victor_ops_incident_id",
112
+ "victor_ops_incident_url",
113
+ "pagerduty_incident_id",
114
+ "pagerduty_incident_number",
115
+ "pagerduty_incident_url",
116
+ "mattermost_channel_id",
117
+ "mattermost_channel_name",
118
+ "mattermost_channel_url",
119
+ "confluence_page_id",
120
+ "quip_page_id",
121
+ "quip_page_url",
122
+ "airtable_base_key",
123
+ "airtable_table_name",
124
+ "airtable_record_id",
125
+ "airtable_record_url",
126
+ "google_drive_id",
127
+ "google_drive_parent_id",
128
+ "google_drive_url",
129
+ "sharepoint_page_id",
130
+ "sharepoint_page_url",
131
+ "datadog_notebook_id",
132
+ "datadog_notebook_url",
133
+ "freshservice_ticket_id",
134
+ "freshservice_ticket_url",
135
+ "freshservice_task_id",
136
+ "freshservice_task_url",
137
+ "zoom_meeting_password",
138
+ "zoom_meeting_pstn_password",
139
+ "zoom_meeting_h323_password",
140
+ "labels",
141
+ "slack_last_message_ts",
142
+ ]
143
+ for field in integration_fields:
144
+ attrs.pop(field, None)
145
+
146
+ # Remove heavy relationships data
147
+ if "relationships" in incident:
148
+ rels = incident["relationships"]
149
+ # Keep only counts for heavy relationships, remove the actual data
150
+ for rel_key in [
151
+ "events",
152
+ "action_items",
153
+ "subscribers",
154
+ "roles",
155
+ "slack_messages",
156
+ "alerts",
157
+ ]:
158
+ if (
159
+ rel_key in rels
160
+ and isinstance(rels[rel_key], dict)
161
+ and "data" in rels[rel_key]
162
+ ):
163
+ # Replace with just count
164
+ rels[rel_key] = {"count": len(rels[rel_key]["data"])}
165
+
166
+ # Process "included" section (common in shifts/alerts with user data)
167
+ if "included" in data and isinstance(data["included"], list):
168
+ for item in data["included"]:
169
+ if item.get("type") == "users":
170
+ # Keep only essential user fields
171
+ if "attributes" in item:
172
+ attrs = item["attributes"]
173
+ keep_fields = {"name", "email", "phone", "time_zone", "full_name"}
174
+ item["attributes"] = {k: v for k, v in attrs.items() if k in keep_fields}
175
+ # Strip heavy relationships
176
+ if "relationships" in item:
177
+ for rel_key in [
178
+ "schedules",
179
+ "notification_rules",
180
+ "teams",
181
+ "devices",
182
+ "email_addresses",
183
+ "phone_numbers",
184
+ ]:
185
+ if rel_key in item["relationships"]:
186
+ rel_data = item["relationships"][rel_key]
187
+ if isinstance(rel_data, dict) and "data" in rel_data:
188
+ data_list = rel_data.get("data", [])
189
+ if isinstance(data_list, list):
190
+ item["relationships"][rel_key] = {"count": len(data_list)}
191
+
192
+ # Process alerts in data list
193
+ if "data" in data and isinstance(data["data"], list):
194
+ for item in data["data"]:
195
+ if item.get("type") == "alerts":
196
+ # Strip heavy attributes from alerts
197
+ if "attributes" in item:
198
+ attrs = item["attributes"]
199
+ # Remove heavy fields - raw data, embedded objects, integration fields
200
+ heavy_fields = [
201
+ "data", # Raw alert payload from source - very large
202
+ "labels",
203
+ "external_url",
204
+ "pagerduty_incident_id",
205
+ "pagerduty_incident_url",
206
+ "opsgenie_alert_id",
207
+ "opsgenie_alert_url",
208
+ "deduplication_key",
209
+ ]
210
+ for field in heavy_fields:
211
+ attrs.pop(field, None)
212
+
213
+ # Simplify embedded objects to just IDs/counts
214
+ # groups - keep only group_ids
215
+ if "groups" in attrs:
216
+ attrs.pop("groups", None)
217
+ # environments - keep only environment_ids
218
+ if "environments" in attrs:
219
+ attrs.pop("environments", None)
220
+ # services - keep only service_ids
221
+ if "services" in attrs:
222
+ attrs.pop("services", None)
223
+ # incidents - embedded incident objects
224
+ if "incidents" in attrs:
225
+ attrs.pop("incidents", None)
226
+ # responders - embedded responder objects
227
+ if "responders" in attrs:
228
+ attrs.pop("responders", None)
229
+ # notified_users - embedded user objects
230
+ if "notified_users" in attrs:
231
+ attrs.pop("notified_users", None)
232
+ # alerting_targets - embedded target objects
233
+ if "alerting_targets" in attrs:
234
+ attrs.pop("alerting_targets", None)
235
+ # alert_urgency - keep only alert_urgency_id
236
+ if "alert_urgency" in attrs:
237
+ attrs.pop("alert_urgency", None)
238
+ # alert_field_values - embedded custom field values
239
+ if "alert_field_values" in attrs:
240
+ attrs.pop("alert_field_values", None)
241
+
242
+ # Strip heavy relationships
243
+ if "relationships" in item:
244
+ rels = item["relationships"]
245
+ for rel_key in ["events", "subscribers", "alerts"]:
246
+ if (
247
+ rel_key in rels
248
+ and isinstance(rels[rel_key], dict)
249
+ and "data" in rels[rel_key]
250
+ ):
251
+ data_list = rels[rel_key].get("data", [])
252
+ if isinstance(data_list, list):
253
+ rels[rel_key] = {"count": len(data_list)}
254
+
255
+ return data
256
+
257
+
258
+ class ProcessedResponse:
259
+ """Wrapper around httpx.Response that processes JSON to reduce payload size."""
260
+
261
+ def __init__(self, response: httpx.Response):
262
+ self._response = response
263
+ self._processed_json = None
264
+
265
+ def json(self, **kwargs):
266
+ """Parse JSON and strip heavy nested data."""
267
+ if self._processed_json is None:
268
+ raw_data = self._response.json(**kwargs)
269
+ self._processed_json = strip_heavy_nested_data(raw_data)
270
+ return self._processed_json
271
+
272
+ def __getattr__(self, name):
273
+ """Delegate all other attributes to the wrapped response."""
274
+ return getattr(self._response, name)
275
+
276
+
277
+ class ResponseProcessingClient(httpx.AsyncClient):
278
+ """AsyncClient subclass that wraps responses to reduce payload size.
279
+
280
+ This is necessary because FastMCP.from_openapi() uses the client directly,
281
+ bypassing any wrapper class. By subclassing httpx.AsyncClient, we ensure
282
+ all responses go through our processing.
283
+ """
284
+
285
+ async def request(self, method, url, **kwargs):
286
+ """Override request to wrap response with ProcessedResponse."""
287
+ response = await super().request(method, url, **kwargs)
288
+ return ProcessedResponse(response)
289
+
290
+
28
291
  class MCPError:
29
292
  """Enhanced error handling for MCP protocol compliance."""
30
-
293
+
31
294
  @staticmethod
32
- def protocol_error(code: int, message: str, data: Optional[Dict] = None):
295
+ def protocol_error(code: int, message: str, data: dict | None = None):
33
296
  """Create a JSON-RPC protocol-level error response."""
34
- error_response = {
35
- "jsonrpc": "2.0",
36
- "error": {
37
- "code": code,
38
- "message": message
39
- }
40
- }
297
+ error_response = {"jsonrpc": "2.0", "error": {"code": code, "message": message}}
41
298
  if data:
42
299
  error_response["error"]["data"] = data
43
300
  return error_response
44
-
301
+
45
302
  @staticmethod
46
- def tool_error(error_message: str, error_type: str = "execution_error", details: Optional[Dict] = None):
303
+ def tool_error(
304
+ error_message: str, error_type: str = "execution_error", details: dict | None = None
305
+ ):
47
306
  """Create a tool-level error response (returned as successful tool result)."""
48
- error_response = {
49
- "error": True,
50
- "error_type": error_type,
51
- "message": error_message
52
- }
307
+ error_response = {"error": True, "error_type": error_type, "message": error_message}
53
308
  if details:
54
309
  error_response["details"] = details
55
310
  return error_response
56
-
311
+
57
312
  @staticmethod
58
313
  def categorize_error(exception: Exception) -> tuple[str, str]:
59
314
  """Categorize an exception into error type and appropriate message."""
60
315
  error_str = str(exception)
61
316
  exception_type = type(exception).__name__
62
-
317
+
63
318
  # Authentication/Authorization errors
64
- if any(keyword in error_str.lower() for keyword in ["401", "unauthorized", "authentication", "token", "forbidden"]):
319
+ if any(
320
+ keyword in error_str.lower()
321
+ for keyword in ["401", "unauthorized", "authentication", "token", "forbidden"]
322
+ ):
65
323
  return "authentication_error", f"Authentication failed: {error_str}"
66
-
67
- # Network/Connection errors
68
- if any(keyword in exception_type.lower() for keyword in ["connection", "timeout", "network"]):
324
+
325
+ # Network/Connection errors
326
+ if any(
327
+ keyword in exception_type.lower() for keyword in ["connection", "timeout", "network"]
328
+ ):
69
329
  return "network_error", f"Network error: {error_str}"
70
-
330
+
71
331
  # HTTP errors
72
332
  if "40" in error_str[:10]: # 4xx client errors
73
333
  return "client_error", f"Client error: {error_str}"
74
334
  elif "50" in error_str[:10]: # 5xx server errors
75
335
  return "server_error", f"Server error: {error_str}"
76
-
336
+
77
337
  # Validation errors
78
- if any(keyword in exception_type.lower() for keyword in ["validation", "pydantic", "field"]):
338
+ if any(
339
+ keyword in exception_type.lower() for keyword in ["validation", "pydantic", "field"]
340
+ ):
79
341
  return "validation_error", f"Input validation error: {error_str}"
80
-
342
+
81
343
  # Generic execution errors
82
344
  return "execution_error", f"Tool execution error: {error_str}"
83
345
 
346
+
84
347
  # Default Swagger URL
85
348
  SWAGGER_URL = "https://rootly-heroku.s3.amazonaws.com/swagger/v1/swagger.json"
86
349
 
350
+
87
351
  # Default allowed API paths
88
352
  def _generate_recommendation(solution_data: dict) -> str:
89
353
  """Generate a high-level recommendation based on solution analysis."""
90
354
  solutions = solution_data.get("solutions", [])
91
355
  avg_time = solution_data.get("average_resolution_time")
92
-
356
+
93
357
  if not solutions:
94
358
  return "No similar incidents found. This may be a novel issue requiring escalation."
95
-
359
+
96
360
  recommendation_parts = []
97
-
361
+
98
362
  # Time expectation
99
363
  if avg_time:
100
364
  if avg_time < 1:
101
365
  recommendation_parts.append("Similar incidents typically resolve quickly (< 1 hour).")
102
366
  elif avg_time > 4:
103
- recommendation_parts.append("Similar incidents typically require more time (> 4 hours).")
104
-
367
+ recommendation_parts.append(
368
+ "Similar incidents typically require more time (> 4 hours)."
369
+ )
370
+
105
371
  # Top solution
106
372
  if solutions:
107
373
  top_solution = solutions[0]
108
374
  if top_solution.get("suggested_actions"):
109
375
  actions = top_solution["suggested_actions"][:2] # Top 2 actions
110
376
  recommendation_parts.append(f"Consider trying: {', '.join(actions)}")
111
-
377
+
112
378
  # Pattern insights
113
379
  patterns = solution_data.get("common_patterns", [])
114
380
  if patterns:
115
381
  recommendation_parts.append(f"Common patterns: {patterns[0]}")
116
-
117
- return " ".join(recommendation_parts) if recommendation_parts else "Review similar incidents above for resolution guidance."
382
+
383
+ return (
384
+ " ".join(recommendation_parts)
385
+ if recommendation_parts
386
+ else "Review similar incidents above for resolution guidance."
387
+ )
118
388
 
119
389
 
120
390
  # Default allowed API paths
@@ -175,7 +445,12 @@ DEFAULT_ALLOWED_PATHS = [
175
445
  class AuthenticatedHTTPXClient:
176
446
  """An HTTPX client wrapper that handles Rootly API authentication and parameter transformation."""
177
447
 
178
- def __init__(self, base_url: str = "https://api.rootly.com", hosted: bool = False, parameter_mapping: Optional[Dict[str, str]] = None):
448
+ def __init__(
449
+ self,
450
+ base_url: str = "https://api.rootly.com",
451
+ hosted: bool = False,
452
+ parameter_mapping: dict[str, str] | None = None,
453
+ ):
179
454
  self._base_url = base_url
180
455
  self.hosted = hosted
181
456
  self._api_token = None
@@ -184,25 +459,25 @@ class AuthenticatedHTTPXClient:
184
459
  if not self.hosted:
185
460
  self._api_token = self._get_api_token()
186
461
 
187
- # Create the HTTPX client
462
+ # Create the HTTPX client
188
463
  headers = {
189
- "Content-Type": "application/vnd.api+json",
190
- "Accept": "application/vnd.api+json"
464
+ "Content-Type": "application/vnd.api+json",
465
+ "Accept": "application/vnd.api+json",
191
466
  # Let httpx handle Accept-Encoding automatically with all supported formats
192
467
  }
193
468
  if self._api_token:
194
469
  headers["Authorization"] = f"Bearer {self._api_token}"
195
470
 
196
- self.client = httpx.AsyncClient(
471
+ self.client = ResponseProcessingClient(
197
472
  base_url=base_url,
198
473
  headers=headers,
199
474
  timeout=30.0,
200
475
  follow_redirects=True,
201
476
  # Ensure proper handling of compressed responses
202
- limits=httpx.Limits(max_keepalive_connections=5, max_connections=10)
477
+ limits=httpx.Limits(max_keepalive_connections=5, max_connections=10),
203
478
  )
204
479
 
205
- def _get_api_token(self) -> Optional[str]:
480
+ def _get_api_token(self) -> str | None:
206
481
  """Get the API token from environment variables."""
207
482
  api_token = os.getenv("ROOTLY_API_TOKEN")
208
483
  if not api_token:
@@ -210,7 +485,7 @@ class AuthenticatedHTTPXClient:
210
485
  return None
211
486
  return api_token
212
487
 
213
- def _transform_params(self, params: Optional[Dict[str, Any]]) -> Optional[Dict[str, Any]]:
488
+ def _transform_params(self, params: dict[str, Any] | None) -> dict[str, Any] | None:
214
489
  """Transform sanitized parameter names back to original names."""
215
490
  if not params or not self.parameter_mapping:
216
491
  return params
@@ -225,33 +500,36 @@ class AuthenticatedHTTPXClient:
225
500
  return transformed
226
501
 
227
502
  async def request(self, method: str, url: str, **kwargs):
228
- """Override request to transform parameters."""
503
+ """Override request to transform parameters and wrap response for payload reduction."""
229
504
  # Transform query parameters
230
- if 'params' in kwargs:
231
- kwargs['params'] = self._transform_params(kwargs['params'])
505
+ if "params" in kwargs:
506
+ kwargs["params"] = self._transform_params(kwargs["params"])
232
507
 
233
- # Call the underlying client's request method and let it handle everything
234
- return await self.client.request(method, url, **kwargs)
508
+ # Call the underlying client's request method
509
+ response = await self.client.request(method, url, **kwargs)
510
+
511
+ # Wrap response to process JSON and reduce payload size
512
+ return ProcessedResponse(response)
235
513
 
236
514
  async def get(self, url: str, **kwargs):
237
515
  """Proxy to request with GET method."""
238
- return await self.request('GET', url, **kwargs)
516
+ return await self.request("GET", url, **kwargs)
239
517
 
240
518
  async def post(self, url: str, **kwargs):
241
519
  """Proxy to request with POST method."""
242
- return await self.request('POST', url, **kwargs)
520
+ return await self.request("POST", url, **kwargs)
243
521
 
244
522
  async def put(self, url: str, **kwargs):
245
523
  """Proxy to request with PUT method."""
246
- return await self.request('PUT', url, **kwargs)
524
+ return await self.request("PUT", url, **kwargs)
247
525
 
248
526
  async def patch(self, url: str, **kwargs):
249
527
  """Proxy to request with PATCH method."""
250
- return await self.request('PATCH', url, **kwargs)
528
+ return await self.request("PATCH", url, **kwargs)
251
529
 
252
530
  async def delete(self, url: str, **kwargs):
253
531
  """Proxy to request with DELETE method."""
254
- return await self.request('DELETE', url, **kwargs)
532
+ return await self.request("DELETE", url, **kwargs)
255
533
 
256
534
  async def __aenter__(self):
257
535
  return self
@@ -261,26 +539,26 @@ class AuthenticatedHTTPXClient:
261
539
 
262
540
  def __getattr__(self, name):
263
541
  # Delegate all other attributes to the underlying client, except for request methods
264
- if name in ['request', 'get', 'post', 'put', 'patch', 'delete']:
542
+ if name in ["request", "get", "post", "put", "patch", "delete"]:
265
543
  # Use our overridden methods instead
266
544
  return getattr(self, name)
267
545
  return getattr(self.client, name)
268
-
269
- @property
546
+
547
+ @property
270
548
  def base_url(self):
271
549
  return self._base_url
272
-
550
+
273
551
  @property
274
552
  def headers(self):
275
553
  return self.client.headers
276
554
 
277
555
 
278
556
  def create_rootly_mcp_server(
279
- swagger_path: Optional[str] = None,
557
+ swagger_path: str | None = None,
280
558
  name: str = "Rootly",
281
- allowed_paths: Optional[List[str]] = None,
559
+ allowed_paths: list[str] | None = None,
282
560
  hosted: bool = False,
283
- base_url: Optional[str] = None,
561
+ base_url: str | None = None,
284
562
  ) -> FastMCP:
285
563
  """
286
564
  Create a Rootly MCP Server using FastMCP's OpenAPI integration.
@@ -301,8 +579,7 @@ def create_rootly_mcp_server(
301
579
 
302
580
  # Add /v1 prefix to paths if not present
303
581
  allowed_paths_v1 = [
304
- f"/v1{path}" if not path.startswith("/v1") else path
305
- for path in allowed_paths
582
+ f"/v1{path}" if not path.startswith("/v1") else path for path in allowed_paths
306
583
  ]
307
584
 
308
585
  logger.info(f"Creating Rootly MCP Server with allowed paths: {allowed_paths_v1}")
@@ -317,7 +594,9 @@ def create_rootly_mcp_server(
317
594
 
318
595
  # Sanitize all parameter names in the filtered spec to be MCP-compliant
319
596
  parameter_mapping = sanitize_parameters_in_spec(filtered_spec)
320
- logger.info(f"Sanitized parameter names for MCP compatibility (mapped {len(parameter_mapping)} parameters)")
597
+ logger.info(
598
+ f"Sanitized parameter names for MCP compatibility (mapped {len(parameter_mapping)} parameters)"
599
+ )
321
600
 
322
601
  # Determine the base URL
323
602
  if base_url is None:
@@ -328,9 +607,7 @@ def create_rootly_mcp_server(
328
607
  # Create the authenticated HTTP client with parameter mapping
329
608
 
330
609
  http_client = AuthenticatedHTTPXClient(
331
- base_url=base_url,
332
- hosted=hosted,
333
- parameter_mapping=parameter_mapping
610
+ base_url=base_url, hosted=hosted, parameter_mapping=parameter_mapping
334
611
  )
335
612
 
336
613
  # Create the MCP server using OpenAPI integration
@@ -342,13 +619,14 @@ def create_rootly_mcp_server(
342
619
  timeout=30.0,
343
620
  tags={"rootly", "incident-management"},
344
621
  )
345
-
622
+
346
623
  @mcp.custom_route("/healthz", methods=["GET"])
347
624
  @mcp.custom_route("/health", methods=["GET"])
348
625
  async def health_check(request):
349
626
  from starlette.responses import PlainTextResponse
627
+
350
628
  return PlainTextResponse("OK")
351
-
629
+
352
630
  # Add some custom tools for enhanced functionality
353
631
 
354
632
  @mcp.tool()
@@ -363,12 +641,14 @@ def create_rootly_mcp_server(
363
641
  summary = operation.get("summary", "")
364
642
  description = operation.get("description", "")
365
643
 
366
- endpoints.append({
367
- "path": path,
368
- "method": method.upper(),
369
- "summary": summary,
370
- "description": description,
371
- })
644
+ endpoints.append(
645
+ {
646
+ "path": path,
647
+ "method": method.upper(),
648
+ "summary": summary,
649
+ "description": description,
650
+ }
651
+ )
372
652
 
373
653
  return endpoints
374
654
 
@@ -378,6 +658,7 @@ def create_rootly_mcp_server(
378
658
  if hosted:
379
659
  try:
380
660
  from fastmcp.server.dependencies import get_http_headers
661
+
381
662
  request_headers = get_http_headers()
382
663
  auth_header = request_headers.get("authorization", "")
383
664
  if auth_header:
@@ -385,18 +666,33 @@ def create_rootly_mcp_server(
385
666
  if "headers" not in kwargs:
386
667
  kwargs["headers"] = {}
387
668
  kwargs["headers"]["Authorization"] = auth_header
388
- except Exception:
389
- pass # Fallback to default client behavior
390
-
669
+ except Exception: # nosec B110
670
+ # Intentionally broad exception handling: fallback to default client behavior
671
+ # if token extraction fails for any reason (missing env var, invalid format, etc.)
672
+ pass
673
+
391
674
  # Use our custom client with proper error handling instead of bypassing it
392
675
  return await http_client.request(method, url, **kwargs)
393
676
 
394
677
  @mcp.tool()
395
678
  async def search_incidents(
396
- query: Annotated[str, Field(description="Search query to filter incidents by title/summary")] = "",
397
- page_size: Annotated[int, Field(description="Number of results per page (max: 20)", ge=1, le=20)] = 10,
398
- page_number: Annotated[int, Field(description="Page number to retrieve (use 0 for all pages)", ge=0)] = 1,
399
- max_results: Annotated[int, Field(description="Maximum total results when fetching all pages (ignored if page_number > 0)", ge=1, le=10)] = 5,
679
+ query: Annotated[
680
+ str, Field(description="Search query to filter incidents by title/summary")
681
+ ] = "",
682
+ page_size: Annotated[
683
+ int, Field(description="Number of results per page (max: 20)", ge=1, le=20)
684
+ ] = 10,
685
+ page_number: Annotated[
686
+ int, Field(description="Page number to retrieve (use 0 for all pages)", ge=0)
687
+ ] = 1,
688
+ max_results: Annotated[
689
+ int,
690
+ Field(
691
+ description="Maximum total results when fetching all pages (ignored if page_number > 0)",
692
+ ge=1,
693
+ le=10,
694
+ ),
695
+ ] = 5,
400
696
  ) -> dict:
401
697
  """
402
698
  Search incidents with flexible pagination control.
@@ -410,7 +706,7 @@ def create_rootly_mcp_server(
410
706
  "page[size]": page_size, # Use requested page size (already limited to max 20)
411
707
  "page[number]": page_number,
412
708
  "include": "",
413
- "fields[incidents]": "id,title,summary,status,severity,created_at,updated_at,url,started_at",
709
+ "fields[incidents]": "id,title,summary,status,created_at,updated_at,url,started_at",
414
710
  }
415
711
  if query:
416
712
  params["filter[search]"] = query
@@ -418,7 +714,7 @@ def create_rootly_mcp_server(
418
714
  try:
419
715
  response = await make_authenticated_request("GET", "/v1/incidents", params=params)
420
716
  response.raise_for_status()
421
- return response.json()
717
+ return strip_heavy_nested_data(response.json())
422
718
  except Exception as e:
423
719
  error_type, error_message = MCPError.categorize_error(e)
424
720
  return MCPError.tool_error(error_message, error_type)
@@ -435,13 +731,15 @@ def create_rootly_mcp_server(
435
731
  "page[size]": effective_page_size,
436
732
  "page[number]": current_page,
437
733
  "include": "",
438
- "fields[incidents]": "id,title,summary,status,severity,created_at,updated_at,url,started_at",
734
+ "fields[incidents]": "id,title,summary,status,created_at,updated_at,url,started_at",
439
735
  }
440
736
  if query:
441
737
  params["filter[search]"] = query
442
738
 
443
739
  try:
444
- response = await make_authenticated_request("GET", "/v1/incidents", params=params)
740
+ response = await make_authenticated_request(
741
+ "GET", "/v1/incidents", params=params
742
+ )
445
743
  response.raise_for_status()
446
744
  response_data = response.json()
447
745
 
@@ -450,19 +748,19 @@ def create_rootly_mcp_server(
450
748
  if not incidents:
451
749
  # No more incidents available
452
750
  break
453
-
751
+
454
752
  # Check if we got fewer incidents than requested (last page)
455
753
  if len(incidents) < effective_page_size:
456
754
  all_incidents.extend(incidents)
457
755
  break
458
-
756
+
459
757
  all_incidents.extend(incidents)
460
758
 
461
759
  # Check metadata if available
462
760
  meta = response_data.get("meta", {})
463
761
  current_page_meta = meta.get("current_page", current_page)
464
762
  total_pages = meta.get("total_pages")
465
-
763
+
466
764
  # If we have reliable metadata, use it
467
765
  if total_pages and current_page_meta >= total_pages:
468
766
  break
@@ -473,7 +771,11 @@ def create_rootly_mcp_server(
473
771
 
474
772
  except Exception as e:
475
773
  # Re-raise authentication or critical errors for immediate handling
476
- if "401" in str(e) or "Unauthorized" in str(e) or "authentication" in str(e).lower():
774
+ if (
775
+ "401" in str(e)
776
+ or "Unauthorized" in str(e)
777
+ or "authentication" in str(e).lower()
778
+ ):
477
779
  error_type, error_message = MCPError.categorize_error(e)
478
780
  return MCPError.tool_error(error_message, error_type)
479
781
  # For other errors, break loop and return partial results
@@ -483,16 +785,18 @@ def create_rootly_mcp_server(
483
785
  if len(all_incidents) > max_results:
484
786
  all_incidents = all_incidents[:max_results]
485
787
 
486
- return {
487
- "data": all_incidents,
488
- "meta": {
489
- "total_fetched": len(all_incidents),
490
- "max_results": max_results,
491
- "query": query,
492
- "pages_fetched": current_page - 1,
493
- "page_size": effective_page_size
788
+ return strip_heavy_nested_data(
789
+ {
790
+ "data": all_incidents,
791
+ "meta": {
792
+ "total_fetched": len(all_incidents),
793
+ "max_results": max_results,
794
+ "query": query,
795
+ "pages_fetched": current_page - 1,
796
+ "page_size": effective_page_size,
797
+ },
494
798
  }
495
- }
799
+ )
496
800
  except Exception as e:
497
801
  error_type, error_message = MCPError.categorize_error(e)
498
802
  return MCPError.tool_error(error_message, error_type)
@@ -505,24 +809,37 @@ def create_rootly_mcp_server(
505
809
  async def find_related_incidents(
506
810
  incident_id: str = "",
507
811
  incident_description: str = "",
508
- similarity_threshold: Annotated[float, Field(description="Minimum similarity score (0.0-1.0)", ge=0.0, le=1.0)] = 0.15,
509
- max_results: Annotated[int, Field(description="Maximum number of related incidents to return", ge=1, le=20)] = 5,
510
- status_filter: Annotated[str, Field(description="Filter incidents by status (empty for all, 'resolved', 'investigating', etc.)")] = ""
812
+ similarity_threshold: Annotated[
813
+ float, Field(description="Minimum similarity score (0.0-1.0)", ge=0.0, le=1.0)
814
+ ] = 0.15,
815
+ max_results: Annotated[
816
+ int, Field(description="Maximum number of related incidents to return", ge=1, le=20)
817
+ ] = 5,
818
+ status_filter: Annotated[
819
+ str,
820
+ Field(
821
+ description="Filter incidents by status (empty for all, 'resolved', 'investigating', etc.)"
822
+ ),
823
+ ] = "",
511
824
  ) -> dict:
512
825
  """Find similar incidents to help with context and resolution strategies. Provide either incident_id OR incident_description (e.g., 'website is down', 'database timeout errors'). Use status_filter to limit to specific incident statuses or leave empty for all incidents."""
513
826
  try:
514
827
  target_incident = {}
515
-
828
+
516
829
  if incident_id:
517
830
  # Get the target incident details by ID
518
- target_response = await make_authenticated_request("GET", f"/v1/incidents/{incident_id}")
831
+ target_response = await make_authenticated_request(
832
+ "GET", f"/v1/incidents/{incident_id}"
833
+ )
519
834
  target_response.raise_for_status()
520
- target_incident_data = target_response.json()
521
- target_incident = target_incident_data.get("data", {})
522
-
835
+ target_incident_data = strip_heavy_nested_data(
836
+ {"data": [target_response.json().get("data", {})]}
837
+ )
838
+ target_incident = target_incident_data.get("data", [{}])[0]
839
+
523
840
  if not target_incident:
524
841
  return MCPError.tool_error("Incident not found", "not_found")
525
-
842
+
526
843
  elif incident_description:
527
844
  # Create synthetic incident for analysis from descriptive text
528
845
  target_incident = {
@@ -530,101 +847,124 @@ def create_rootly_mcp_server(
530
847
  "attributes": {
531
848
  "title": incident_description,
532
849
  "summary": incident_description,
533
- "description": incident_description
534
- }
850
+ "description": incident_description,
851
+ },
535
852
  }
536
853
  else:
537
- return MCPError.tool_error("Must provide either incident_id or incident_description", "validation_error")
538
-
854
+ return MCPError.tool_error(
855
+ "Must provide either incident_id or incident_description", "validation_error"
856
+ )
857
+
539
858
  # Get historical incidents for comparison
540
859
  params = {
541
860
  "page[size]": 100, # Get more incidents for better matching
542
861
  "page[number]": 1,
543
- "include": ""
862
+ "include": "",
863
+ "fields[incidents]": "id,title,summary,status,created_at,url",
544
864
  }
545
-
865
+
546
866
  # Only add status filter if specified
547
867
  if status_filter:
548
868
  params["filter[status]"] = status_filter
549
-
550
- historical_response = await make_authenticated_request("GET", "/v1/incidents", params=params)
869
+
870
+ historical_response = await make_authenticated_request(
871
+ "GET", "/v1/incidents", params=params
872
+ )
551
873
  historical_response.raise_for_status()
552
- historical_data = historical_response.json()
874
+ historical_data = strip_heavy_nested_data(historical_response.json())
553
875
  historical_incidents = historical_data.get("data", [])
554
-
876
+
555
877
  # Filter out the target incident itself if it exists
556
878
  if incident_id:
557
- historical_incidents = [inc for inc in historical_incidents if str(inc.get('id')) != str(incident_id)]
558
-
879
+ historical_incidents = [
880
+ inc for inc in historical_incidents if str(inc.get("id")) != str(incident_id)
881
+ ]
882
+
559
883
  if not historical_incidents:
560
884
  return {
561
885
  "related_incidents": [],
562
886
  "message": "No historical incidents found for comparison",
563
887
  "target_incident": {
564
888
  "id": incident_id or "synthetic",
565
- "title": target_incident.get("attributes", {}).get("title", incident_description)
566
- }
889
+ "title": target_incident.get("attributes", {}).get(
890
+ "title", incident_description
891
+ ),
892
+ },
567
893
  }
568
-
894
+
569
895
  # Calculate similarities
570
- similar_incidents = similarity_analyzer.calculate_similarity(historical_incidents, target_incident)
571
-
896
+ similar_incidents = similarity_analyzer.calculate_similarity(
897
+ historical_incidents, target_incident
898
+ )
899
+
572
900
  # Filter by threshold and limit results
573
901
  filtered_incidents = [
574
- inc for inc in similar_incidents
575
- if inc.similarity_score >= similarity_threshold
902
+ inc for inc in similar_incidents if inc.similarity_score >= similarity_threshold
576
903
  ][:max_results]
577
-
904
+
578
905
  # Format response
579
906
  related_incidents = []
580
907
  for incident in filtered_incidents:
581
- related_incidents.append({
582
- "incident_id": incident.incident_id,
583
- "title": incident.title,
584
- "similarity_score": round(incident.similarity_score, 3),
585
- "matched_services": incident.matched_services,
586
- "matched_keywords": incident.matched_keywords,
587
- "resolution_summary": incident.resolution_summary,
588
- "resolution_time_hours": incident.resolution_time_hours
589
- })
590
-
908
+ related_incidents.append(
909
+ {
910
+ "incident_id": incident.incident_id,
911
+ "title": incident.title,
912
+ "similarity_score": round(incident.similarity_score, 3),
913
+ "matched_services": incident.matched_services,
914
+ "matched_keywords": incident.matched_keywords,
915
+ "resolution_summary": incident.resolution_summary,
916
+ "resolution_time_hours": incident.resolution_time_hours,
917
+ }
918
+ )
919
+
591
920
  return {
592
921
  "target_incident": {
593
922
  "id": incident_id or "synthetic",
594
- "title": target_incident.get("attributes", {}).get("title", incident_description)
923
+ "title": target_incident.get("attributes", {}).get(
924
+ "title", incident_description
925
+ ),
595
926
  },
596
927
  "related_incidents": related_incidents,
597
928
  "total_found": len(filtered_incidents),
598
929
  "similarity_threshold": similarity_threshold,
599
- "analysis_summary": f"Found {len(filtered_incidents)} similar incidents out of {len(historical_incidents)} historical incidents"
930
+ "analysis_summary": f"Found {len(filtered_incidents)} similar incidents out of {len(historical_incidents)} historical incidents",
600
931
  }
601
-
932
+
602
933
  except Exception as e:
603
934
  error_type, error_message = MCPError.categorize_error(e)
604
- return MCPError.tool_error(f"Failed to find related incidents: {error_message}", error_type)
935
+ return MCPError.tool_error(
936
+ f"Failed to find related incidents: {error_message}", error_type
937
+ )
605
938
 
606
939
  @mcp.tool()
607
940
  async def suggest_solutions(
608
941
  incident_id: str = "",
609
942
  incident_title: str = "",
610
943
  incident_description: str = "",
611
- max_solutions: Annotated[int, Field(description="Maximum number of solution suggestions", ge=1, le=10)] = 3,
612
- status_filter: Annotated[str, Field(description="Filter incidents by status (default 'resolved', empty for all, 'investigating', etc.)")] = "resolved"
944
+ max_solutions: Annotated[
945
+ int, Field(description="Maximum number of solution suggestions", ge=1, le=10)
946
+ ] = 3,
947
+ status_filter: Annotated[
948
+ str,
949
+ Field(
950
+ description="Filter incidents by status (default 'resolved', empty for all, 'investigating', etc.)"
951
+ ),
952
+ ] = "resolved",
613
953
  ) -> dict:
614
954
  """Suggest solutions based on similar incidents. Provide either incident_id OR title/description. Defaults to resolved incidents for solution mining, but can search all statuses."""
615
955
  try:
616
956
  target_incident = {}
617
-
957
+
618
958
  if incident_id:
619
959
  # Get incident details by ID
620
960
  response = await make_authenticated_request("GET", f"/v1/incidents/{incident_id}")
621
961
  response.raise_for_status()
622
- incident_data = response.json()
623
- target_incident = incident_data.get("data", {})
624
-
962
+ incident_data = strip_heavy_nested_data({"data": [response.json().get("data", {})]})
963
+ target_incident = incident_data.get("data", [{}])[0]
964
+
625
965
  if not target_incident:
626
966
  return MCPError.tool_error("Incident not found", "not_found")
627
-
967
+
628
968
  elif incident_title or incident_description:
629
969
  # Create synthetic incident for analysis
630
970
  target_incident = {
@@ -632,83 +972,117 @@ def create_rootly_mcp_server(
632
972
  "attributes": {
633
973
  "title": incident_title,
634
974
  "summary": incident_description,
635
- "description": incident_description
636
- }
975
+ "description": incident_description,
976
+ },
637
977
  }
638
978
  else:
639
- return MCPError.tool_error("Must provide either incident_id or incident_title/description", "validation_error")
640
-
979
+ return MCPError.tool_error(
980
+ "Must provide either incident_id or incident_title/description",
981
+ "validation_error",
982
+ )
983
+
641
984
  # Get incidents for solution mining
642
985
  params = {
643
986
  "page[size]": 150, # Get more incidents for better solution matching
644
987
  "page[number]": 1,
645
- "include": ""
988
+ "include": "",
646
989
  }
647
-
990
+
648
991
  # Only add status filter if specified
649
992
  if status_filter:
650
993
  params["filter[status]"] = status_filter
651
-
652
- historical_response = await make_authenticated_request("GET", "/v1/incidents", params=params)
994
+
995
+ historical_response = await make_authenticated_request(
996
+ "GET", "/v1/incidents", params=params
997
+ )
653
998
  historical_response.raise_for_status()
654
- historical_data = historical_response.json()
999
+ historical_data = strip_heavy_nested_data(historical_response.json())
655
1000
  historical_incidents = historical_data.get("data", [])
656
-
1001
+
657
1002
  # Filter out target incident if it exists
658
1003
  if incident_id:
659
- historical_incidents = [inc for inc in historical_incidents if str(inc.get('id')) != str(incident_id)]
660
-
1004
+ historical_incidents = [
1005
+ inc for inc in historical_incidents if str(inc.get("id")) != str(incident_id)
1006
+ ]
1007
+
661
1008
  if not historical_incidents:
662
1009
  status_msg = f" with status '{status_filter}'" if status_filter else ""
663
1010
  return {
664
1011
  "solutions": [],
665
- "message": f"No historical incidents found{status_msg} for solution mining"
1012
+ "message": f"No historical incidents found{status_msg} for solution mining",
666
1013
  }
667
-
1014
+
668
1015
  # Find similar incidents
669
- similar_incidents = similarity_analyzer.calculate_similarity(historical_incidents, target_incident)
670
-
1016
+ similar_incidents = similarity_analyzer.calculate_similarity(
1017
+ historical_incidents, target_incident
1018
+ )
1019
+
671
1020
  # Filter to reasonably similar incidents (lower threshold for solution suggestions)
672
- relevant_incidents = [inc for inc in similar_incidents if inc.similarity_score >= 0.2][:max_solutions * 2]
673
-
1021
+ relevant_incidents = [inc for inc in similar_incidents if inc.similarity_score >= 0.2][
1022
+ : max_solutions * 2
1023
+ ]
1024
+
674
1025
  if not relevant_incidents:
675
1026
  return {
676
1027
  "solutions": [],
677
1028
  "message": "No sufficiently similar incidents found for solution suggestions",
678
- "suggestion": "This appears to be a unique incident. Consider escalating or consulting documentation."
1029
+ "suggestion": "This appears to be a unique incident. Consider escalating or consulting documentation.",
679
1030
  }
680
-
1031
+
681
1032
  # Extract solutions
682
1033
  solution_data = solution_extractor.extract_solutions(relevant_incidents)
683
-
1034
+
684
1035
  # Format response
685
1036
  return {
686
1037
  "target_incident": {
687
1038
  "id": incident_id or "synthetic",
688
1039
  "title": target_incident.get("attributes", {}).get("title", incident_title),
689
- "description": target_incident.get("attributes", {}).get("summary", incident_description)
1040
+ "description": target_incident.get("attributes", {}).get(
1041
+ "summary", incident_description
1042
+ ),
690
1043
  },
691
1044
  "solutions": solution_data["solutions"][:max_solutions],
692
1045
  "insights": {
693
1046
  "common_patterns": solution_data["common_patterns"],
694
1047
  "average_resolution_time_hours": solution_data["average_resolution_time"],
695
- "total_similar_incidents": solution_data["total_similar_incidents"]
1048
+ "total_similar_incidents": solution_data["total_similar_incidents"],
696
1049
  },
697
- "recommendation": _generate_recommendation(solution_data)
1050
+ "recommendation": _generate_recommendation(solution_data),
698
1051
  }
699
-
1052
+
700
1053
  except Exception as e:
701
1054
  error_type, error_message = MCPError.categorize_error(e)
702
1055
  return MCPError.tool_error(f"Failed to suggest solutions: {error_message}", error_type)
703
1056
 
704
1057
  @mcp.tool()
705
1058
  async def get_oncall_shift_metrics(
706
- start_date: Annotated[str, Field(description="Start date for metrics (ISO 8601 format, e.g., '2025-10-01' or '2025-10-01T00:00:00Z')")],
707
- end_date: Annotated[str, Field(description="End date for metrics (ISO 8601 format, e.g., '2025-10-31' or '2025-10-31T23:59:59Z')")],
708
- user_ids: Annotated[str, Field(description="Comma-separated list of user IDs to filter by (optional)")] = "",
709
- schedule_ids: Annotated[str, Field(description="Comma-separated list of schedule IDs to filter by (optional)")] = "",
710
- team_ids: Annotated[str, Field(description="Comma-separated list of team IDs to filter by (requires querying schedules first)")] = "",
711
- group_by: Annotated[str, Field(description="Group results by: 'user', 'schedule', 'team', or 'none'")] = "user"
1059
+ start_date: Annotated[
1060
+ str,
1061
+ Field(
1062
+ description="Start date for metrics (ISO 8601 format, e.g., '2025-10-01' or '2025-10-01T00:00:00Z')"
1063
+ ),
1064
+ ],
1065
+ end_date: Annotated[
1066
+ str,
1067
+ Field(
1068
+ description="End date for metrics (ISO 8601 format, e.g., '2025-10-31' or '2025-10-31T23:59:59Z')"
1069
+ ),
1070
+ ],
1071
+ user_ids: Annotated[
1072
+ str, Field(description="Comma-separated list of user IDs to filter by (optional)")
1073
+ ] = "",
1074
+ schedule_ids: Annotated[
1075
+ str, Field(description="Comma-separated list of schedule IDs to filter by (optional)")
1076
+ ] = "",
1077
+ team_ids: Annotated[
1078
+ str,
1079
+ Field(
1080
+ description="Comma-separated list of team IDs to filter by (requires querying schedules first)"
1081
+ ),
1082
+ ] = "",
1083
+ group_by: Annotated[
1084
+ str, Field(description="Group results by: 'user', 'schedule', 'team', or 'none'")
1085
+ ] = "user",
712
1086
  ) -> dict:
713
1087
  """
714
1088
  Get on-call shift metrics for a specified time period. Returns shift counts, total hours,
@@ -720,21 +1094,25 @@ def create_rootly_mcp_server(
720
1094
  - Specific team: team_ids='team-1' (will query schedules for that team first)
721
1095
  """
722
1096
  try:
723
- from datetime import datetime, timedelta
724
1097
  from collections import defaultdict
725
- from typing import Any, Dict
1098
+ from datetime import datetime, timedelta
1099
+ from typing import Any
726
1100
 
727
1101
  # Build query parameters
728
- params: Dict[str, Any] = {
1102
+ params: dict[str, Any] = {
729
1103
  "from": start_date,
730
1104
  "to": end_date,
731
1105
  }
732
1106
 
733
1107
  # Fetch schedules (schedules don't have team relationship, they have owner_group_ids)
734
- schedules_response = await make_authenticated_request("GET", "/v1/schedules", params={"page[size]": 100})
1108
+ schedules_response = await make_authenticated_request(
1109
+ "GET", "/v1/schedules", params={"page[size]": 100}
1110
+ )
735
1111
 
736
1112
  if schedules_response is None:
737
- return MCPError.tool_error("Failed to get schedules: API request returned None", "execution_error")
1113
+ return MCPError.tool_error(
1114
+ "Failed to get schedules: API request returned None", "execution_error"
1115
+ )
738
1116
 
739
1117
  schedules_response.raise_for_status()
740
1118
  schedules_data = schedules_response.json()
@@ -750,7 +1128,9 @@ def create_rootly_mcp_server(
750
1128
  # Fetch all teams
751
1129
  teams_map = {}
752
1130
  if team_ids_set:
753
- teams_response = await make_authenticated_request("GET", "/v1/teams", params={"page[size]": 100})
1131
+ teams_response = await make_authenticated_request(
1132
+ "GET", "/v1/teams", params={"page[size]": 100}
1133
+ )
754
1134
  if teams_response and teams_response.status_code == 200:
755
1135
  teams_data = teams_response.json()
756
1136
  for team in teams_data.get("data", []):
@@ -771,7 +1151,7 @@ def create_rootly_mcp_server(
771
1151
  schedule_to_team_map[schedule_id] = {
772
1152
  "team_id": team_id,
773
1153
  "team_name": team_name,
774
- "schedule_name": schedule_name
1154
+ "schedule_name": schedule_name,
775
1155
  }
776
1156
 
777
1157
  # Handle team filtering (requires multi-step query)
@@ -802,23 +1182,39 @@ def create_rootly_mcp_server(
802
1182
 
803
1183
  # Query shifts
804
1184
  try:
805
- shifts_response = await make_authenticated_request("GET", "/v1/shifts", params=params)
1185
+ shifts_response = await make_authenticated_request(
1186
+ "GET", "/v1/shifts", params=params
1187
+ )
806
1188
 
807
1189
  if shifts_response is None:
808
- return MCPError.tool_error("Failed to get shifts: API request returned None", "execution_error")
1190
+ return MCPError.tool_error(
1191
+ "Failed to get shifts: API request returned None", "execution_error"
1192
+ )
809
1193
 
810
1194
  shifts_response.raise_for_status()
811
1195
  shifts_data = shifts_response.json()
812
1196
 
813
1197
  if shifts_data is None:
814
- return MCPError.tool_error("Failed to get shifts: API returned null/empty response", "execution_error", details={"status": shifts_response.status_code})
1198
+ return MCPError.tool_error(
1199
+ "Failed to get shifts: API returned null/empty response",
1200
+ "execution_error",
1201
+ details={"status": shifts_response.status_code},
1202
+ )
815
1203
 
816
1204
  shifts = shifts_data.get("data", [])
817
1205
  included = shifts_data.get("included", [])
818
1206
  except AttributeError as e:
819
- return MCPError.tool_error(f"Failed to get shifts: Response object error - {str(e)}", "execution_error", details={"params": params})
1207
+ return MCPError.tool_error(
1208
+ f"Failed to get shifts: Response object error - {str(e)}",
1209
+ "execution_error",
1210
+ details={"params": params},
1211
+ )
820
1212
  except Exception as e:
821
- return MCPError.tool_error(f"Failed to get shifts: {str(e)}", "execution_error", details={"params": params, "error_type": type(e).__name__})
1213
+ return MCPError.tool_error(
1214
+ f"Failed to get shifts: {str(e)}",
1215
+ "execution_error",
1216
+ details={"params": params, "error_type": type(e).__name__},
1217
+ )
822
1218
 
823
1219
  # Build lookup maps for included resources
824
1220
  users_map = {}
@@ -830,19 +1226,21 @@ def create_rootly_mcp_server(
830
1226
  on_call_roles_map[resource.get("id")] = resource
831
1227
 
832
1228
  # Calculate metrics
833
- metrics: Dict[str, Dict[str, Any]] = defaultdict(lambda: {
834
- "shift_count": 0,
835
- "total_hours": 0.0,
836
- "override_count": 0,
837
- "regular_count": 0,
838
- "primary_count": 0,
839
- "secondary_count": 0,
840
- "primary_hours": 0.0,
841
- "secondary_hours": 0.0,
842
- "unknown_role_count": 0,
843
- "unique_days": set(),
844
- "shifts": []
845
- })
1229
+ metrics: dict[str, dict[str, Any]] = defaultdict(
1230
+ lambda: {
1231
+ "shift_count": 0,
1232
+ "total_hours": 0.0,
1233
+ "override_count": 0,
1234
+ "regular_count": 0,
1235
+ "primary_count": 0,
1236
+ "secondary_count": 0,
1237
+ "primary_hours": 0.0,
1238
+ "secondary_hours": 0.0,
1239
+ "unknown_role_count": 0,
1240
+ "unique_days": set(),
1241
+ "shifts": [],
1242
+ }
1243
+ )
846
1244
 
847
1245
  for shift in shifts:
848
1246
  attrs = shift.get("attributes", {})
@@ -937,19 +1335,21 @@ def create_rootly_mcp_server(
937
1335
  # Track unique days
938
1336
  metrics[key]["unique_days"].update(shift_days)
939
1337
 
940
- metrics[key]["shifts"].append({
941
- "shift_id": shift.get("id"),
942
- "starts_at": starts_at,
943
- "ends_at": ends_at,
944
- "duration_hours": round(duration_hours, 2),
945
- "is_override": is_override,
946
- "schedule_id": schedule_id,
947
- "user_id": user_id,
948
- "user_name": user_name,
949
- "user_email": user_email,
950
- "role_name": role_name,
951
- "is_primary": is_primary
952
- })
1338
+ metrics[key]["shifts"].append(
1339
+ {
1340
+ "shift_id": shift.get("id"),
1341
+ "starts_at": starts_at,
1342
+ "ends_at": ends_at,
1343
+ "duration_hours": round(duration_hours, 2),
1344
+ "is_override": is_override,
1345
+ "schedule_id": schedule_id,
1346
+ "user_id": user_id,
1347
+ "user_name": user_name,
1348
+ "user_email": user_email,
1349
+ "role_name": role_name,
1350
+ "is_primary": is_primary,
1351
+ }
1352
+ )
953
1353
 
954
1354
  # Format results
955
1355
  results = []
@@ -1023,10 +1423,7 @@ def create_rootly_mcp_server(
1023
1423
  results.sort(key=lambda x: x["shift_count"], reverse=True)
1024
1424
 
1025
1425
  return {
1026
- "period": {
1027
- "start_date": start_date,
1028
- "end_date": end_date
1029
- },
1426
+ "period": {"start_date": start_date, "end_date": end_date},
1030
1427
  "total_shifts": len(shifts),
1031
1428
  "grouped_by": group_by,
1032
1429
  "metrics": results,
@@ -1034,12 +1431,13 @@ def create_rootly_mcp_server(
1034
1431
  "total_hours": round(sum(m["total_hours"] for m in results), 2),
1035
1432
  "total_regular_shifts": sum(m["regular_shifts"] for m in results),
1036
1433
  "total_override_shifts": sum(m["override_shifts"] for m in results),
1037
- "unique_people": len(results) if group_by == "user" else None
1038
- }
1434
+ "unique_people": len(results) if group_by == "user" else None,
1435
+ },
1039
1436
  }
1040
1437
 
1041
1438
  except Exception as e:
1042
1439
  import traceback
1440
+
1043
1441
  error_type, error_message = MCPError.categorize_error(e)
1044
1442
  return MCPError.tool_error(
1045
1443
  f"Failed to get on-call shift metrics: {error_message}",
@@ -1048,17 +1446,37 @@ def create_rootly_mcp_server(
1048
1446
  "params": {"start_date": start_date, "end_date": end_date},
1049
1447
  "exception_type": type(e).__name__,
1050
1448
  "exception_str": str(e),
1051
- "traceback": traceback.format_exc()
1052
- }
1449
+ "traceback": traceback.format_exc(),
1450
+ },
1053
1451
  )
1054
1452
 
1055
1453
  @mcp.tool()
1056
1454
  async def get_oncall_handoff_summary(
1057
- team_ids: Annotated[str, Field(description="Comma-separated list of team IDs to filter schedules (optional)")] = "",
1058
- schedule_ids: Annotated[str, Field(description="Comma-separated list of schedule IDs (optional)")] = "",
1059
- timezone: Annotated[str, Field(description="Timezone to use for display and filtering (e.g., 'America/Los_Angeles', 'Europe/London', 'Asia/Tokyo'). IMPORTANT: If user mentions a city, location, or region (e.g., 'Toronto', 'APAC', 'my time'), infer the appropriate IANA timezone. Defaults to UTC if not specified.")] = "UTC",
1060
- filter_by_region: Annotated[bool, Field(description="If True, only show on-call for people whose shifts are during business hours (9am-5pm) in the specified timezone. Defaults to False.")] = False,
1061
- include_incidents: Annotated[bool, Field(description="If True, fetch incidents for each shift (slower). If False, only show on-call info (faster). Defaults to False for better performance.")] = False
1455
+ team_ids: Annotated[
1456
+ str,
1457
+ Field(description="Comma-separated list of team IDs to filter schedules (optional)"),
1458
+ ] = "",
1459
+ schedule_ids: Annotated[
1460
+ str, Field(description="Comma-separated list of schedule IDs (optional)")
1461
+ ] = "",
1462
+ timezone: Annotated[
1463
+ str,
1464
+ Field(
1465
+ description="Timezone to use for display and filtering (e.g., 'America/Los_Angeles', 'Europe/London', 'Asia/Tokyo'). IMPORTANT: If user mentions a city, location, or region (e.g., 'Toronto', 'APAC', 'my time'), infer the appropriate IANA timezone. Defaults to UTC if not specified."
1466
+ ),
1467
+ ] = "UTC",
1468
+ filter_by_region: Annotated[
1469
+ bool,
1470
+ Field(
1471
+ description="If True, only show on-call for people whose shifts are during business hours (9am-5pm) in the specified timezone. Defaults to False."
1472
+ ),
1473
+ ] = False,
1474
+ include_incidents: Annotated[
1475
+ bool,
1476
+ Field(
1477
+ description="If True, fetch incidents for each shift (slower). If False, only show on-call info (faster). Defaults to False for better performance."
1478
+ ),
1479
+ ] = False,
1062
1480
  ) -> dict:
1063
1481
  """
1064
1482
  Get current on-call handoff summary. Shows who's currently on-call and who's next.
@@ -1109,15 +1527,19 @@ def create_rootly_mcp_server(
1109
1527
  max_pages = 5 # Schedules shouldn't have many pages
1110
1528
 
1111
1529
  while page <= max_pages:
1112
- schedules_response = await make_authenticated_request("GET", "/v1/schedules", params={"page[size]": 100, "page[number]": page})
1530
+ schedules_response = await make_authenticated_request(
1531
+ "GET", "/v1/schedules", params={"page[size]": 100, "page[number]": page}
1532
+ )
1113
1533
  if not schedules_response:
1114
- return MCPError.tool_error("Failed to fetch schedules - no response from API", "execution_error")
1534
+ return MCPError.tool_error(
1535
+ "Failed to fetch schedules - no response from API", "execution_error"
1536
+ )
1115
1537
 
1116
1538
  if schedules_response.status_code != 200:
1117
1539
  return MCPError.tool_error(
1118
1540
  f"Failed to fetch schedules - API returned status {schedules_response.status_code}",
1119
1541
  "execution_error",
1120
- details={"status_code": schedules_response.status_code}
1542
+ details={"status_code": schedules_response.status_code},
1121
1543
  )
1122
1544
 
1123
1545
  schedules_data = schedules_response.json()
@@ -1145,7 +1567,9 @@ def create_rootly_mcp_server(
1145
1567
 
1146
1568
  teams_map = {}
1147
1569
  if team_ids_set:
1148
- teams_response = await make_authenticated_request("GET", "/v1/teams", params={"page[size]": 100})
1570
+ teams_response = await make_authenticated_request(
1571
+ "GET", "/v1/teams", params={"page[size]": 100}
1572
+ )
1149
1573
  if teams_response and teams_response.status_code == 200:
1150
1574
  teams_data = teams_response.json()
1151
1575
  for team in teams_data.get("data", []):
@@ -1153,8 +1577,14 @@ def create_rootly_mcp_server(
1153
1577
 
1154
1578
  # Filter schedules
1155
1579
  target_schedules = []
1156
- team_filter = [tid.strip() for tid in team_ids.split(",") if tid.strip()] if team_ids else []
1157
- schedule_filter = [sid.strip() for sid in schedule_ids.split(",") if sid.strip()] if schedule_ids else []
1580
+ team_filter = (
1581
+ [tid.strip() for tid in team_ids.split(",") if tid.strip()] if team_ids else []
1582
+ )
1583
+ schedule_filter = (
1584
+ [sid.strip() for sid in schedule_ids.split(",") if sid.strip()]
1585
+ if schedule_ids
1586
+ else []
1587
+ )
1158
1588
 
1159
1589
  for schedule in all_schedules:
1160
1590
  schedule_id = schedule.get("id")
@@ -1192,8 +1622,8 @@ def create_rootly_mcp_server(
1192
1622
  "filter[starts_at][gte]": (now - timedelta(days=1)).isoformat(),
1193
1623
  "filter[starts_at][lte]": (now + timedelta(days=7)).isoformat(),
1194
1624
  "include": "user,on_call_role",
1195
- "page[size]": 50
1196
- }
1625
+ "page[size]": 50,
1626
+ },
1197
1627
  )
1198
1628
 
1199
1629
  if not shifts_response:
@@ -1216,7 +1646,9 @@ def create_rootly_mcp_server(
1216
1646
  current_shift = None
1217
1647
  next_shift = None
1218
1648
 
1219
- for shift in sorted(shifts, key=lambda s: s.get("attributes", {}).get("starts_at", "")):
1649
+ for shift in sorted(
1650
+ shifts, key=lambda s: s.get("attributes", {}).get("starts_at", "")
1651
+ ):
1220
1652
  attrs = shift.get("attributes", {})
1221
1653
  starts_at_str = attrs.get("starts_at")
1222
1654
  ends_at_str = attrs.get("ends_at")
@@ -1244,21 +1676,23 @@ def create_rootly_mcp_server(
1244
1676
  "schedule_name": schedule_name,
1245
1677
  "team_name": team_name,
1246
1678
  "current_oncall": None,
1247
- "next_oncall": None
1679
+ "next_oncall": None,
1248
1680
  }
1249
1681
 
1250
1682
  if current_shift:
1251
1683
  current_attrs = current_shift.get("attributes", {})
1252
1684
  current_rels = current_shift.get("relationships", {})
1253
- user_data = (current_rels.get("user", {}).get("data") or {})
1685
+ user_data = current_rels.get("user", {}).get("data") or {}
1254
1686
  user_id = user_data.get("id")
1255
- role_data = (current_rels.get("on_call_role", {}).get("data") or {})
1687
+ role_data = current_rels.get("on_call_role", {}).get("data") or {}
1256
1688
  role_id = role_data.get("id")
1257
1689
 
1258
1690
  user_name = "Unknown"
1259
1691
  if user_id and user_id in users_map:
1260
1692
  user_attrs = users_map[user_id].get("attributes", {})
1261
- user_name = user_attrs.get("full_name") or user_attrs.get("email", "Unknown")
1693
+ user_name = user_attrs.get("full_name") or user_attrs.get(
1694
+ "email", "Unknown"
1695
+ )
1262
1696
 
1263
1697
  role_name = "Unknown Role"
1264
1698
  if role_id and role_id in roles_map:
@@ -1271,21 +1705,23 @@ def create_rootly_mcp_server(
1271
1705
  "role": role_name,
1272
1706
  "starts_at": convert_to_timezone(current_attrs.get("starts_at")),
1273
1707
  "ends_at": convert_to_timezone(current_attrs.get("ends_at")),
1274
- "is_override": current_attrs.get("is_override", False)
1708
+ "is_override": current_attrs.get("is_override", False),
1275
1709
  }
1276
1710
 
1277
1711
  if next_shift:
1278
1712
  next_attrs = next_shift.get("attributes", {})
1279
1713
  next_rels = next_shift.get("relationships", {})
1280
- user_data = (next_rels.get("user", {}).get("data") or {})
1714
+ user_data = next_rels.get("user", {}).get("data") or {}
1281
1715
  user_id = user_data.get("id")
1282
- role_data = (next_rels.get("on_call_role", {}).get("data") or {})
1716
+ role_data = next_rels.get("on_call_role", {}).get("data") or {}
1283
1717
  role_id = role_data.get("id")
1284
1718
 
1285
1719
  user_name = "Unknown"
1286
1720
  if user_id and user_id in users_map:
1287
1721
  user_attrs = users_map[user_id].get("attributes", {})
1288
- user_name = user_attrs.get("full_name") or user_attrs.get("email", "Unknown")
1722
+ user_name = user_attrs.get("full_name") or user_attrs.get(
1723
+ "email", "Unknown"
1724
+ )
1289
1725
 
1290
1726
  role_name = "Unknown Role"
1291
1727
  if role_id and role_id in roles_map:
@@ -1298,7 +1734,7 @@ def create_rootly_mcp_server(
1298
1734
  "role": role_name,
1299
1735
  "starts_at": convert_to_timezone(next_attrs.get("starts_at")),
1300
1736
  "ends_at": convert_to_timezone(next_attrs.get("ends_at")),
1301
- "is_override": next_attrs.get("is_override", False)
1737
+ "is_override": next_attrs.get("is_override", False),
1302
1738
  }
1303
1739
 
1304
1740
  handoff_data.append(schedule_info)
@@ -1310,8 +1746,12 @@ def create_rootly_mcp_server(
1310
1746
  business_end_hour = 17
1311
1747
 
1312
1748
  # Create datetime objects for today's business hours in target timezone
1313
- today_business_start = now.replace(hour=business_start_hour, minute=0, second=0, microsecond=0)
1314
- today_business_end = now.replace(hour=business_end_hour, minute=0, second=0, microsecond=0)
1749
+ today_business_start = now.replace(
1750
+ hour=business_start_hour, minute=0, second=0, microsecond=0
1751
+ )
1752
+ today_business_end = now.replace(
1753
+ hour=business_end_hour, minute=0, second=0, microsecond=0
1754
+ )
1315
1755
 
1316
1756
  # Filter schedules where current shift overlaps with business hours
1317
1757
  filtered_data = []
@@ -1324,12 +1764,19 @@ def create_rootly_mcp_server(
1324
1764
 
1325
1765
  if shift_start_str and shift_end_str:
1326
1766
  try:
1327
- shift_start = datetime.fromisoformat(shift_start_str.replace("Z", "+00:00"))
1328
- shift_end = datetime.fromisoformat(shift_end_str.replace("Z", "+00:00"))
1767
+ shift_start = datetime.fromisoformat(
1768
+ shift_start_str.replace("Z", "+00:00")
1769
+ )
1770
+ shift_end = datetime.fromisoformat(
1771
+ shift_end_str.replace("Z", "+00:00")
1772
+ )
1329
1773
 
1330
1774
  # Check if shift overlaps with today's business hours
1331
1775
  # Shift overlaps if: shift_start < business_end AND shift_end > business_start
1332
- if shift_start < today_business_end and shift_end > today_business_start:
1776
+ if (
1777
+ shift_start < today_business_end
1778
+ and shift_end > today_business_start
1779
+ ):
1333
1780
  filtered_data.append(schedule_info)
1334
1781
  except (ValueError, AttributeError):
1335
1782
  # Skip if we can't parse times
@@ -1351,10 +1798,12 @@ def create_rootly_mcp_server(
1351
1798
  schedule_ids="",
1352
1799
  severity="",
1353
1800
  status="",
1354
- tags=""
1801
+ tags="",
1355
1802
  )
1356
1803
 
1357
- schedule_info["shift_incidents"] = incidents_result if incidents_result.get("success") else None
1804
+ schedule_info["shift_incidents"] = (
1805
+ incidents_result if incidents_result.get("success") else None
1806
+ )
1358
1807
  else:
1359
1808
  schedule_info["shift_incidents"] = None
1360
1809
  else:
@@ -1369,18 +1818,21 @@ def create_rootly_mcp_server(
1369
1818
  "schedules": handoff_data,
1370
1819
  "summary": {
1371
1820
  "total_schedules": len(handoff_data),
1372
- "schedules_with_current_oncall": sum(1 for s in handoff_data if s["current_oncall"]),
1821
+ "schedules_with_current_oncall": sum(
1822
+ 1 for s in handoff_data if s["current_oncall"]
1823
+ ),
1373
1824
  "schedules_with_next_oncall": sum(1 for s in handoff_data if s["next_oncall"]),
1374
1825
  "total_incidents": sum(
1375
1826
  s.get("shift_incidents", {}).get("summary", {}).get("total_incidents", 0)
1376
1827
  for s in handoff_data
1377
1828
  if s.get("shift_incidents")
1378
- )
1379
- }
1829
+ ),
1830
+ },
1380
1831
  }
1381
1832
 
1382
1833
  except Exception as e:
1383
1834
  import traceback
1835
+
1384
1836
  error_type, error_message = MCPError.categorize_error(e)
1385
1837
  return MCPError.tool_error(
1386
1838
  f"Failed to get on-call handoff summary: {error_message}",
@@ -1388,8 +1840,8 @@ def create_rootly_mcp_server(
1388
1840
  details={
1389
1841
  "exception_type": type(e).__name__,
1390
1842
  "exception_str": str(e),
1391
- "traceback": traceback.format_exc()
1392
- }
1843
+ "traceback": traceback.format_exc(),
1844
+ },
1393
1845
  )
1394
1846
 
1395
1847
  async def _fetch_shift_incidents_internal(
@@ -1398,7 +1850,7 @@ def create_rootly_mcp_server(
1398
1850
  schedule_ids: str = "",
1399
1851
  severity: str = "",
1400
1852
  status: str = "",
1401
- tags: str = ""
1853
+ tags: str = "",
1402
1854
  ) -> dict:
1403
1855
  """Internal helper to fetch incidents - used by both get_shift_incidents and get_oncall_handoff_summary."""
1404
1856
  try:
@@ -1408,10 +1860,7 @@ def create_rootly_mcp_server(
1408
1860
  # Fetch incidents that:
1409
1861
  # 1. Were created during the shift (created_at in range)
1410
1862
  # 2. OR are currently active/unresolved (started but not resolved yet)
1411
- params = {
1412
- "page[size]": 100,
1413
- "sort": "-created_at"
1414
- }
1863
+ params = {"page[size]": 100, "sort": "-created_at"}
1415
1864
 
1416
1865
  # Get incidents created during shift OR still active
1417
1866
  # We'll fetch all incidents and filter in-memory for active ones
@@ -1438,16 +1887,23 @@ def create_rootly_mcp_server(
1438
1887
 
1439
1888
  while page <= max_pages:
1440
1889
  params["page[number]"] = page
1441
- incidents_response = await make_authenticated_request("GET", "/v1/incidents", params=params)
1890
+ incidents_response = await make_authenticated_request(
1891
+ "GET", "/v1/incidents", params=params
1892
+ )
1442
1893
 
1443
1894
  if not incidents_response:
1444
- return MCPError.tool_error("Failed to fetch incidents - no response from API", "execution_error")
1895
+ return MCPError.tool_error(
1896
+ "Failed to fetch incidents - no response from API", "execution_error"
1897
+ )
1445
1898
 
1446
1899
  if incidents_response.status_code != 200:
1447
1900
  return MCPError.tool_error(
1448
1901
  f"Failed to fetch incidents - API returned status {incidents_response.status_code}",
1449
1902
  "execution_error",
1450
- details={"status_code": incidents_response.status_code, "time_range": f"{start_time} to {end_time}"}
1903
+ details={
1904
+ "status_code": incidents_response.status_code,
1905
+ "time_range": f"{start_time} to {end_time}",
1906
+ },
1451
1907
  )
1452
1908
 
1453
1909
  incidents_data = incidents_response.json()
@@ -1471,6 +1927,7 @@ def create_rootly_mcp_server(
1471
1927
  # 1. Created during shift (created_at between start_time and end_time)
1472
1928
  # 2. Currently active (started but not resolved, regardless of when created)
1473
1929
  from datetime import timezone as dt_timezone
1930
+
1474
1931
  shift_start_dt = datetime.fromisoformat(start_time.replace("Z", "+00:00"))
1475
1932
  shift_end_dt = datetime.fromisoformat(end_time.replace("Z", "+00:00"))
1476
1933
  now_dt = datetime.now(dt_timezone.utc)
@@ -1488,9 +1945,21 @@ def create_rootly_mcp_server(
1488
1945
 
1489
1946
  # Parse timestamps
1490
1947
  try:
1491
- created_dt = datetime.fromisoformat(created_at.replace("Z", "+00:00")) if created_at else None
1492
- started_dt = datetime.fromisoformat(started_at.replace("Z", "+00:00")) if started_at else None
1493
- resolved_dt = datetime.fromisoformat(resolved_at.replace("Z", "+00:00")) if resolved_at else None
1948
+ created_dt = (
1949
+ datetime.fromisoformat(created_at.replace("Z", "+00:00"))
1950
+ if created_at
1951
+ else None
1952
+ )
1953
+ started_dt = (
1954
+ datetime.fromisoformat(started_at.replace("Z", "+00:00"))
1955
+ if started_at
1956
+ else None
1957
+ )
1958
+ resolved_dt = (
1959
+ datetime.fromisoformat(resolved_at.replace("Z", "+00:00"))
1960
+ if resolved_at
1961
+ else None
1962
+ )
1494
1963
  except (ValueError, AttributeError):
1495
1964
  continue # Skip if we can't parse dates
1496
1965
 
@@ -1551,24 +2020,28 @@ def create_rootly_mcp_server(
1551
2020
  if attrs.get("mitigation"):
1552
2021
  narrative_parts.append(f"Resolution: {attrs.get('mitigation')}")
1553
2022
  elif attrs.get("action_items_count") and attrs.get("action_items_count") > 0:
1554
- narrative_parts.append(f"Action items created: {attrs.get('action_items_count')}")
2023
+ narrative_parts.append(
2024
+ f"Action items created: {attrs.get('action_items_count')}"
2025
+ )
1555
2026
 
1556
2027
  narrative = " | ".join(narrative_parts)
1557
2028
 
1558
- incidents_summary.append({
1559
- "incident_id": incident_id,
1560
- "title": attrs.get("title", "Untitled Incident"),
1561
- "severity": attrs.get("severity"),
1562
- "status": attrs.get("status"),
1563
- "started_at": started_at,
1564
- "resolved_at": resolved_at,
1565
- "duration_minutes": duration_minutes,
1566
- "summary": attrs.get("summary"),
1567
- "impact": attrs.get("customer_impact_summary"),
1568
- "mitigation": attrs.get("mitigation"),
1569
- "narrative": narrative,
1570
- "incident_url": attrs.get("incident_url")
1571
- })
2029
+ incidents_summary.append(
2030
+ {
2031
+ "incident_id": incident_id,
2032
+ "title": attrs.get("title", "Untitled Incident"),
2033
+ "severity": attrs.get("severity"),
2034
+ "status": attrs.get("status"),
2035
+ "started_at": started_at,
2036
+ "resolved_at": resolved_at,
2037
+ "duration_minutes": duration_minutes,
2038
+ "summary": attrs.get("summary"),
2039
+ "impact": attrs.get("customer_impact_summary"),
2040
+ "mitigation": attrs.get("mitigation"),
2041
+ "narrative": narrative,
2042
+ "incident_url": attrs.get("incident_url"),
2043
+ }
2044
+ )
1572
2045
 
1573
2046
  # Group by severity
1574
2047
  by_severity = {}
@@ -1584,28 +2057,28 @@ def create_rootly_mcp_server(
1584
2057
  ongoing_count = total_incidents - resolved_count
1585
2058
 
1586
2059
  avg_resolution_time = None
1587
- durations = [inc["duration_minutes"] for inc in incidents_summary if inc["duration_minutes"]]
2060
+ durations = [
2061
+ inc["duration_minutes"] for inc in incidents_summary if inc["duration_minutes"]
2062
+ ]
1588
2063
  if durations:
1589
2064
  avg_resolution_time = int(sum(durations) / len(durations))
1590
2065
 
1591
2066
  return {
1592
2067
  "success": True,
1593
- "period": {
1594
- "start_time": start_time,
1595
- "end_time": end_time
1596
- },
2068
+ "period": {"start_time": start_time, "end_time": end_time},
1597
2069
  "summary": {
1598
2070
  "total_incidents": total_incidents,
1599
2071
  "resolved": resolved_count,
1600
2072
  "ongoing": ongoing_count,
1601
2073
  "average_resolution_minutes": avg_resolution_time,
1602
- "by_severity": {k: len(v) for k, v in by_severity.items()}
2074
+ "by_severity": {k: len(v) for k, v in by_severity.items()},
1603
2075
  },
1604
- "incidents": incidents_summary
2076
+ "incidents": incidents_summary,
1605
2077
  }
1606
2078
 
1607
2079
  except Exception as e:
1608
2080
  import traceback
2081
+
1609
2082
  error_type, error_message = MCPError.categorize_error(e)
1610
2083
  return MCPError.tool_error(
1611
2084
  f"Failed to get shift incidents: {error_message}",
@@ -1614,18 +2087,44 @@ def create_rootly_mcp_server(
1614
2087
  "params": {"start_time": start_time, "end_time": end_time},
1615
2088
  "exception_type": type(e).__name__,
1616
2089
  "exception_str": str(e),
1617
- "traceback": traceback.format_exc()
1618
- }
2090
+ "traceback": traceback.format_exc(),
2091
+ },
1619
2092
  )
1620
2093
 
1621
2094
  @mcp.tool()
1622
2095
  async def get_shift_incidents(
1623
- start_time: Annotated[str, Field(description="Start time for incident search (ISO 8601 format, e.g., '2025-10-01T00:00:00Z')")],
1624
- end_time: Annotated[str, Field(description="End time for incident search (ISO 8601 format, e.g., '2025-10-01T23:59:59Z')")],
1625
- schedule_ids: Annotated[str, Field(description="Comma-separated list of schedule IDs to filter incidents (optional)")] = "",
1626
- severity: Annotated[str, Field(description="Filter by severity: 'critical', 'high', 'medium', 'low' (optional)")] = "",
1627
- status: Annotated[str, Field(description="Filter by status: 'started', 'detected', 'acknowledged', 'investigating', 'identified', 'monitoring', 'resolved', 'cancelled' (optional)")] = "",
1628
- tags: Annotated[str, Field(description="Comma-separated list of tag slugs to filter incidents (optional)")] = ""
2096
+ start_time: Annotated[
2097
+ str,
2098
+ Field(
2099
+ description="Start time for incident search (ISO 8601 format, e.g., '2025-10-01T00:00:00Z')"
2100
+ ),
2101
+ ],
2102
+ end_time: Annotated[
2103
+ str,
2104
+ Field(
2105
+ description="End time for incident search (ISO 8601 format, e.g., '2025-10-01T23:59:59Z')"
2106
+ ),
2107
+ ],
2108
+ schedule_ids: Annotated[
2109
+ str,
2110
+ Field(
2111
+ description="Comma-separated list of schedule IDs to filter incidents (optional)"
2112
+ ),
2113
+ ] = "",
2114
+ severity: Annotated[
2115
+ str,
2116
+ Field(description="Filter by severity: 'critical', 'high', 'medium', 'low' (optional)"),
2117
+ ] = "",
2118
+ status: Annotated[
2119
+ str,
2120
+ Field(
2121
+ description="Filter by status: 'started', 'detected', 'acknowledged', 'investigating', 'identified', 'monitoring', 'resolved', 'cancelled' (optional)"
2122
+ ),
2123
+ ] = "",
2124
+ tags: Annotated[
2125
+ str,
2126
+ Field(description="Comma-separated list of tag slugs to filter incidents (optional)"),
2127
+ ] = "",
1629
2128
  ) -> dict:
1630
2129
  """
1631
2130
  Get incidents and alerts that occurred during a specific shift or time period.
@@ -1638,7 +2137,9 @@ def create_rootly_mcp_server(
1638
2137
 
1639
2138
  Returns incident details including severity, status, duration, and basic summary.
1640
2139
  """
1641
- return await _fetch_shift_incidents_internal(start_time, end_time, schedule_ids, severity, status, tags)
2140
+ return await _fetch_shift_incidents_internal(
2141
+ start_time, end_time, schedule_ids, severity, status, tags
2142
+ )
1642
2143
 
1643
2144
  # Add MCP resources for incidents and teams
1644
2145
  @mcp.resource("incident://{incident_id}")
@@ -1647,26 +2148,26 @@ def create_rootly_mcp_server(
1647
2148
  try:
1648
2149
  response = await make_authenticated_request("GET", f"/v1/incidents/{incident_id}")
1649
2150
  response.raise_for_status()
1650
- incident_data = response.json()
1651
-
2151
+ incident_data = strip_heavy_nested_data({"data": [response.json().get("data", {})]})
2152
+
1652
2153
  # Format incident data as readable text
1653
- incident = incident_data.get("data", {})
2154
+ incident = incident_data.get("data", [{}])[0]
1654
2155
  attributes = incident.get("attributes", {})
1655
-
2156
+
1656
2157
  text_content = f"""Incident #{incident_id}
1657
- Title: {attributes.get('title', 'N/A')}
1658
- Status: {attributes.get('status', 'N/A')}
1659
- Severity: {attributes.get('severity', 'N/A')}
1660
- Created: {attributes.get('created_at', 'N/A')}
1661
- Updated: {attributes.get('updated_at', 'N/A')}
1662
- Summary: {attributes.get('summary', 'N/A')}
1663
- URL: {attributes.get('url', 'N/A')}"""
1664
-
2158
+ Title: {attributes.get("title", "N/A")}
2159
+ Status: {attributes.get("status", "N/A")}
2160
+ Severity: {attributes.get("severity", "N/A")}
2161
+ Created: {attributes.get("created_at", "N/A")}
2162
+ Updated: {attributes.get("updated_at", "N/A")}
2163
+ Summary: {attributes.get("summary", "N/A")}
2164
+ URL: {attributes.get("url", "N/A")}"""
2165
+
1665
2166
  return {
1666
2167
  "uri": f"incident://{incident_id}",
1667
2168
  "name": f"Incident #{incident_id}",
1668
2169
  "text": text_content,
1669
- "mimeType": "text/plain"
2170
+ "mimeType": "text/plain",
1670
2171
  }
1671
2172
  except Exception as e:
1672
2173
  error_type, error_message = MCPError.categorize_error(e)
@@ -1674,7 +2175,7 @@ URL: {attributes.get('url', 'N/A')}"""
1674
2175
  "uri": f"incident://{incident_id}",
1675
2176
  "name": f"Incident #{incident_id} (Error)",
1676
2177
  "text": f"Error ({error_type}): {error_message}",
1677
- "mimeType": "text/plain"
2178
+ "mimeType": "text/plain",
1678
2179
  }
1679
2180
 
1680
2181
  @mcp.resource("team://{team_id}")
@@ -1684,23 +2185,23 @@ URL: {attributes.get('url', 'N/A')}"""
1684
2185
  response = await make_authenticated_request("GET", f"/v1/teams/{team_id}")
1685
2186
  response.raise_for_status()
1686
2187
  team_data = response.json()
1687
-
2188
+
1688
2189
  # Format team data as readable text
1689
2190
  team = team_data.get("data", {})
1690
2191
  attributes = team.get("attributes", {})
1691
-
2192
+
1692
2193
  text_content = f"""Team #{team_id}
1693
- Name: {attributes.get('name', 'N/A')}
1694
- Color: {attributes.get('color', 'N/A')}
1695
- Slug: {attributes.get('slug', 'N/A')}
1696
- Created: {attributes.get('created_at', 'N/A')}
1697
- Updated: {attributes.get('updated_at', 'N/A')}"""
1698
-
2194
+ Name: {attributes.get("name", "N/A")}
2195
+ Color: {attributes.get("color", "N/A")}
2196
+ Slug: {attributes.get("slug", "N/A")}
2197
+ Created: {attributes.get("created_at", "N/A")}
2198
+ Updated: {attributes.get("updated_at", "N/A")}"""
2199
+
1699
2200
  return {
1700
2201
  "uri": f"team://{team_id}",
1701
2202
  "name": f"Team: {attributes.get('name', team_id)}",
1702
2203
  "text": text_content,
1703
- "mimeType": "text/plain"
2204
+ "mimeType": "text/plain",
1704
2205
  }
1705
2206
  except Exception as e:
1706
2207
  error_type, error_message = MCPError.categorize_error(e)
@@ -1708,50 +2209,56 @@ Updated: {attributes.get('updated_at', 'N/A')}"""
1708
2209
  "uri": f"team://{team_id}",
1709
2210
  "name": f"Team #{team_id} (Error)",
1710
2211
  "text": f"Error ({error_type}): {error_message}",
1711
- "mimeType": "text/plain"
2212
+ "mimeType": "text/plain",
1712
2213
  }
1713
2214
 
1714
2215
  @mcp.resource("rootly://incidents")
1715
2216
  async def list_incidents_resource():
1716
2217
  """List recent incidents as an MCP resource for quick reference."""
1717
2218
  try:
1718
- response = await make_authenticated_request("GET", "/v1/incidents", params={
1719
- "page[size]": 10,
1720
- "page[number]": 1,
1721
- "include": ""
1722
- })
2219
+ response = await make_authenticated_request(
2220
+ "GET",
2221
+ "/v1/incidents",
2222
+ params={
2223
+ "page[size]": 10,
2224
+ "page[number]": 1,
2225
+ "include": "",
2226
+ "fields[incidents]": "id,title,status",
2227
+ },
2228
+ )
1723
2229
  response.raise_for_status()
1724
- data = response.json()
1725
-
2230
+ data = strip_heavy_nested_data(response.json())
2231
+
1726
2232
  incidents = data.get("data", [])
1727
2233
  text_lines = ["Recent Incidents:\n"]
1728
-
2234
+
1729
2235
  for incident in incidents:
1730
2236
  attrs = incident.get("attributes", {})
1731
- text_lines.append(f"• #{incident.get('id', 'N/A')} - {attrs.get('title', 'N/A')} [{attrs.get('status', 'N/A')}]")
1732
-
2237
+ text_lines.append(
2238
+ f"• #{incident.get('id', 'N/A')} - {attrs.get('title', 'N/A')} [{attrs.get('status', 'N/A')}]"
2239
+ )
2240
+
1733
2241
  return {
1734
2242
  "uri": "rootly://incidents",
1735
2243
  "name": "Recent Incidents",
1736
2244
  "text": "\n".join(text_lines),
1737
- "mimeType": "text/plain"
2245
+ "mimeType": "text/plain",
1738
2246
  }
1739
2247
  except Exception as e:
1740
2248
  error_type, error_message = MCPError.categorize_error(e)
1741
2249
  return {
1742
- "uri": "rootly://incidents",
2250
+ "uri": "rootly://incidents",
1743
2251
  "name": "Recent Incidents (Error)",
1744
2252
  "text": f"Error ({error_type}): {error_message}",
1745
- "mimeType": "text/plain"
2253
+ "mimeType": "text/plain",
1746
2254
  }
1747
2255
 
1748
-
1749
2256
  # Log server creation (tool count will be shown when tools are accessed)
1750
2257
  logger.info("Created Rootly MCP Server successfully")
1751
2258
  return mcp
1752
2259
 
1753
2260
 
1754
- def _load_swagger_spec(swagger_path: Optional[str] = None) -> Dict[str, Any]:
2261
+ def _load_swagger_spec(swagger_path: str | None = None) -> dict[str, Any]:
1755
2262
  """
1756
2263
  Load the Swagger specification from a file or URL.
1757
2264
 
@@ -1766,7 +2273,7 @@ def _load_swagger_spec(swagger_path: Optional[str] = None) -> Dict[str, Any]:
1766
2273
  logger.info(f"Using provided Swagger path: {swagger_path}")
1767
2274
  if not os.path.isfile(swagger_path):
1768
2275
  raise FileNotFoundError(f"Swagger file not found at {swagger_path}")
1769
- with open(swagger_path, "r", encoding="utf-8") as f:
2276
+ with open(swagger_path, encoding="utf-8") as f:
1770
2277
  return json.load(f)
1771
2278
  else:
1772
2279
  # First, check in the package data directory
@@ -1774,7 +2281,7 @@ def _load_swagger_spec(swagger_path: Optional[str] = None) -> Dict[str, Any]:
1774
2281
  package_data_path = Path(__file__).parent / "data" / "swagger.json"
1775
2282
  if package_data_path.is_file():
1776
2283
  logger.info(f"Found Swagger file in package data: {package_data_path}")
1777
- with open(package_data_path, "r", encoding="utf-8") as f:
2284
+ with open(package_data_path, encoding="utf-8") as f:
1778
2285
  return json.load(f)
1779
2286
  except Exception as e:
1780
2287
  logger.debug(f"Could not load Swagger file from package data: {e}")
@@ -1787,7 +2294,7 @@ def _load_swagger_spec(swagger_path: Optional[str] = None) -> Dict[str, Any]:
1787
2294
  local_swagger_path = current_dir / "swagger.json"
1788
2295
  if local_swagger_path.is_file():
1789
2296
  logger.info(f"Found Swagger file at {local_swagger_path}")
1790
- with open(local_swagger_path, "r", encoding="utf-8") as f:
2297
+ with open(local_swagger_path, encoding="utf-8") as f:
1791
2298
  return json.load(f)
1792
2299
 
1793
2300
  # Check parent directories
@@ -1795,7 +2302,7 @@ def _load_swagger_spec(swagger_path: Optional[str] = None) -> Dict[str, Any]:
1795
2302
  parent_swagger_path = parent / "swagger.json"
1796
2303
  if parent_swagger_path.is_file():
1797
2304
  logger.info(f"Found Swagger file at {parent_swagger_path}")
1798
- with open(parent_swagger_path, "r", encoding="utf-8") as f:
2305
+ with open(parent_swagger_path, encoding="utf-8") as f:
1799
2306
  return json.load(f)
1800
2307
 
1801
2308
  # If the file wasn't found, fetch it from the URL and save it
@@ -1815,7 +2322,7 @@ def _load_swagger_spec(swagger_path: Optional[str] = None) -> Dict[str, Any]:
1815
2322
  return swagger_spec
1816
2323
 
1817
2324
 
1818
- def _fetch_swagger_from_url(url: str = SWAGGER_URL) -> Dict[str, Any]:
2325
+ def _fetch_swagger_from_url(url: str = SWAGGER_URL) -> dict[str, Any]:
1819
2326
  """
1820
2327
  Fetch the Swagger specification from the specified URL.
1821
2328
 
@@ -1827,7 +2334,7 @@ def _fetch_swagger_from_url(url: str = SWAGGER_URL) -> Dict[str, Any]:
1827
2334
  """
1828
2335
  logger.info(f"Fetching Swagger specification from {url}")
1829
2336
  try:
1830
- response = requests.get(url)
2337
+ response = requests.get(url, timeout=30)
1831
2338
  response.raise_for_status()
1832
2339
  return response.json()
1833
2340
  except requests.RequestException as e:
@@ -1838,7 +2345,7 @@ def _fetch_swagger_from_url(url: str = SWAGGER_URL) -> Dict[str, Any]:
1838
2345
  raise Exception(f"Failed to parse Swagger specification: {e}")
1839
2346
 
1840
2347
 
1841
- def _filter_openapi_spec(spec: Dict[str, Any], allowed_paths: List[str]) -> Dict[str, Any]:
2348
+ def _filter_openapi_spec(spec: dict[str, Any], allowed_paths: list[str]) -> dict[str, Any]:
1842
2349
  """
1843
2350
  Filter an OpenAPI specification to only include specified paths and clean up schema references.
1844
2351
 
@@ -1855,9 +2362,7 @@ def _filter_openapi_spec(spec: Dict[str, Any], allowed_paths: List[str]) -> Dict
1855
2362
  # Filter paths
1856
2363
  original_paths = filtered_spec.get("paths", {})
1857
2364
  filtered_paths = {
1858
- path: path_item
1859
- for path, path_item in original_paths.items()
1860
- if path in allowed_paths
2365
+ path: path_item for path, path_item in original_paths.items() if path in allowed_paths
1861
2366
  }
1862
2367
 
1863
2368
  filtered_spec["paths"] = filtered_paths
@@ -1873,7 +2378,7 @@ def _filter_openapi_spec(spec: Dict[str, Any], allowed_paths: List[str]) -> Dict
1873
2378
  if "requestBody" in operation:
1874
2379
  request_body = operation["requestBody"]
1875
2380
  if "content" in request_body:
1876
- for content_type, content_info in request_body["content"].items():
2381
+ for _content_type, content_info in request_body["content"].items():
1877
2382
  if "schema" in content_info:
1878
2383
  schema = content_info["schema"]
1879
2384
  # Remove problematic $ref references
@@ -1882,20 +2387,20 @@ def _filter_openapi_spec(spec: Dict[str, Any], allowed_paths: List[str]) -> Dict
1882
2387
  content_info["schema"] = {
1883
2388
  "type": "object",
1884
2389
  "description": "Request parameters for this endpoint",
1885
- "additionalProperties": True
2390
+ "additionalProperties": True,
1886
2391
  }
1887
2392
 
1888
2393
  # Remove response schemas to avoid validation issues
1889
2394
  # FastMCP will still return the data, just without strict validation
1890
2395
  if "responses" in operation:
1891
- for status_code, response in operation["responses"].items():
2396
+ for _status_code, response in operation["responses"].items():
1892
2397
  if "content" in response:
1893
- for content_type, content_info in response["content"].items():
2398
+ for _content_type, content_info in response["content"].items():
1894
2399
  if "schema" in content_info:
1895
2400
  # Replace with a simple schema that accepts any response
1896
2401
  content_info["schema"] = {
1897
2402
  "type": "object",
1898
- "additionalProperties": True
2403
+ "additionalProperties": True,
1899
2404
  }
1900
2405
 
1901
2406
  # Clean parameter schemas (parameter names are already sanitized)
@@ -1907,135 +2412,155 @@ def _filter_openapi_spec(spec: Dict[str, Any], allowed_paths: List[str]) -> Dict
1907
2412
  # Replace with a simple string schema
1908
2413
  param["schema"] = {
1909
2414
  "type": "string",
1910
- "description": param.get("description", "Parameter value")
2415
+ "description": param.get("description", "Parameter value"),
1911
2416
  }
1912
2417
 
1913
2418
  # Add/modify pagination limits to alerts and incident-related endpoints to prevent infinite loops
1914
2419
  if method.lower() == "get" and ("alerts" in path.lower() or "incident" in path.lower()):
1915
2420
  if "parameters" not in operation:
1916
2421
  operation["parameters"] = []
1917
-
2422
+
1918
2423
  # Find existing pagination parameters and update them with limits
1919
2424
  page_size_param = None
1920
2425
  page_number_param = None
1921
-
2426
+
1922
2427
  for param in operation["parameters"]:
1923
2428
  if param.get("name") == "page[size]":
1924
2429
  page_size_param = param
1925
2430
  elif param.get("name") == "page[number]":
1926
2431
  page_number_param = param
1927
-
2432
+
1928
2433
  # Update or add page[size] parameter with limits
1929
2434
  if page_size_param:
1930
2435
  # Update existing parameter with limits
1931
2436
  if "schema" not in page_size_param:
1932
2437
  page_size_param["schema"] = {}
1933
- page_size_param["schema"].update({
1934
- "type": "integer",
1935
- "default": 10,
1936
- "minimum": 1,
1937
- "maximum": 20,
1938
- "description": "Number of results per page (max: 20)"
1939
- })
1940
- else:
1941
- # Add new parameter
1942
- operation["parameters"].append({
1943
- "name": "page[size]",
1944
- "in": "query",
1945
- "required": False,
1946
- "schema": {
2438
+ page_size_param["schema"].update(
2439
+ {
1947
2440
  "type": "integer",
1948
2441
  "default": 10,
1949
2442
  "minimum": 1,
1950
2443
  "maximum": 20,
1951
- "description": "Number of results per page (max: 20)"
2444
+ "description": "Number of results per page (max: 20)",
1952
2445
  }
1953
- })
1954
-
2446
+ )
2447
+ else:
2448
+ # Add new parameter
2449
+ operation["parameters"].append(
2450
+ {
2451
+ "name": "page[size]",
2452
+ "in": "query",
2453
+ "required": False,
2454
+ "schema": {
2455
+ "type": "integer",
2456
+ "default": 10,
2457
+ "minimum": 1,
2458
+ "maximum": 20,
2459
+ "description": "Number of results per page (max: 20)",
2460
+ },
2461
+ }
2462
+ )
2463
+
1955
2464
  # Update or add page[number] parameter with defaults
1956
2465
  if page_number_param:
1957
- # Update existing parameter
2466
+ # Update existing parameter
1958
2467
  if "schema" not in page_number_param:
1959
2468
  page_number_param["schema"] = {}
1960
- page_number_param["schema"].update({
1961
- "type": "integer",
1962
- "default": 1,
1963
- "minimum": 1,
1964
- "description": "Page number to retrieve"
1965
- })
1966
- else:
1967
- # Add new parameter
1968
- operation["parameters"].append({
1969
- "name": "page[number]",
1970
- "in": "query",
1971
- "required": False,
1972
- "schema": {
2469
+ page_number_param["schema"].update(
2470
+ {
1973
2471
  "type": "integer",
1974
2472
  "default": 1,
1975
2473
  "minimum": 1,
1976
- "description": "Page number to retrieve"
2474
+ "description": "Page number to retrieve",
2475
+ }
2476
+ )
2477
+ else:
2478
+ # Add new parameter
2479
+ operation["parameters"].append(
2480
+ {
2481
+ "name": "page[number]",
2482
+ "in": "query",
2483
+ "required": False,
2484
+ "schema": {
2485
+ "type": "integer",
2486
+ "default": 1,
2487
+ "minimum": 1,
2488
+ "description": "Page number to retrieve",
2489
+ },
1977
2490
  }
1978
- })
1979
-
2491
+ )
2492
+
1980
2493
  # Add sparse fieldsets for alerts endpoints to reduce payload size
1981
2494
  if "alert" in path.lower():
1982
2495
  # Add fields[alerts] parameter with essential fields only - make it required with default
1983
- operation["parameters"].append({
1984
- "name": "fields[alerts]",
1985
- "in": "query",
1986
- "required": True,
1987
- "schema": {
1988
- "type": "string",
1989
- "default": "id,summary,status,started_at,ended_at,short_id,alert_urgency_id,source,noise",
1990
- "description": "Comma-separated list of alert fields to include (reduces payload size)"
2496
+ operation["parameters"].append(
2497
+ {
2498
+ "name": "fields[alerts]",
2499
+ "in": "query",
2500
+ "required": True,
2501
+ "schema": {
2502
+ "type": "string",
2503
+ "default": "id,summary,status,started_at,ended_at,short_id,alert_urgency_id,source,noise",
2504
+ "description": "Comma-separated list of alert fields to include (reduces payload size)",
2505
+ },
1991
2506
  }
1992
- })
1993
-
2507
+ )
2508
+
1994
2509
  # Add include parameter for alerts endpoints to minimize relationships
1995
2510
  if "alert" in path.lower():
1996
2511
  # Check if include parameter already exists
1997
- include_param_exists = any(param.get("name") == "include" for param in operation["parameters"])
2512
+ include_param_exists = any(
2513
+ param.get("name") == "include" for param in operation["parameters"]
2514
+ )
1998
2515
  if not include_param_exists:
1999
- operation["parameters"].append({
2000
- "name": "include",
2001
- "in": "query",
2002
- "required": True,
2003
- "schema": {
2004
- "type": "string",
2005
- "default": "",
2006
- "description": "Related resources to include (empty for minimal payload)"
2516
+ operation["parameters"].append(
2517
+ {
2518
+ "name": "include",
2519
+ "in": "query",
2520
+ "required": True,
2521
+ "schema": {
2522
+ "type": "string",
2523
+ "default": "",
2524
+ "description": "Related resources to include (empty for minimal payload)",
2525
+ },
2007
2526
  }
2008
- })
2009
-
2527
+ )
2528
+
2010
2529
  # Add sparse fieldsets for incidents endpoints to reduce payload size
2011
2530
  if "incident" in path.lower():
2012
2531
  # Add fields[incidents] parameter with essential fields only - make it required with default
2013
- operation["parameters"].append({
2014
- "name": "fields[incidents]",
2015
- "in": "query",
2016
- "required": True,
2017
- "schema": {
2018
- "type": "string",
2019
- "default": "id,title,summary,status,severity,created_at,updated_at,url,started_at",
2020
- "description": "Comma-separated list of incident fields to include (reduces payload size)"
2532
+ operation["parameters"].append(
2533
+ {
2534
+ "name": "fields[incidents]",
2535
+ "in": "query",
2536
+ "required": True,
2537
+ "schema": {
2538
+ "type": "string",
2539
+ "default": "id,title,summary,status,severity,created_at,updated_at,url,started_at",
2540
+ "description": "Comma-separated list of incident fields to include (reduces payload size)",
2541
+ },
2021
2542
  }
2022
- })
2023
-
2543
+ )
2544
+
2024
2545
  # Add include parameter for incidents endpoints to minimize relationships
2025
2546
  if "incident" in path.lower():
2026
2547
  # Check if include parameter already exists
2027
- include_param_exists = any(param.get("name") == "include" for param in operation["parameters"])
2548
+ include_param_exists = any(
2549
+ param.get("name") == "include" for param in operation["parameters"]
2550
+ )
2028
2551
  if not include_param_exists:
2029
- operation["parameters"].append({
2030
- "name": "include",
2031
- "in": "query",
2032
- "required": True,
2033
- "schema": {
2034
- "type": "string",
2035
- "default": "",
2036
- "description": "Related resources to include (empty for minimal payload)"
2552
+ operation["parameters"].append(
2553
+ {
2554
+ "name": "include",
2555
+ "in": "query",
2556
+ "required": True,
2557
+ "schema": {
2558
+ "type": "string",
2559
+ "default": "",
2560
+ "description": "Related resources to include (empty for minimal payload)",
2561
+ },
2037
2562
  }
2038
- })
2563
+ )
2039
2564
 
2040
2565
  # Also clean up any remaining broken references in components
2041
2566
  if "components" in filtered_spec and "schemas" in filtered_spec["components"]:
@@ -2053,20 +2578,29 @@ def _filter_openapi_spec(spec: Dict[str, Any], allowed_paths: List[str]) -> Dict
2053
2578
  # Clean up any operation-level references to removed schemas
2054
2579
  removed_schemas = set()
2055
2580
  if "components" in filtered_spec and "schemas" in filtered_spec["components"]:
2056
- removed_schemas = {"new_workflow", "update_workflow", "workflow", "workflow_task",
2057
- "workflow_response", "workflow_list", "new_workflow_task",
2058
- "update_workflow_task", "workflow_task_response", "workflow_task_list"}
2059
-
2581
+ removed_schemas = {
2582
+ "new_workflow",
2583
+ "update_workflow",
2584
+ "workflow",
2585
+ "workflow_task",
2586
+ "workflow_response",
2587
+ "workflow_list",
2588
+ "new_workflow_task",
2589
+ "update_workflow_task",
2590
+ "workflow_task_response",
2591
+ "workflow_task_list",
2592
+ }
2593
+
2060
2594
  for path, path_item in filtered_spec.get("paths", {}).items():
2061
2595
  for method, operation in path_item.items():
2062
2596
  if method.lower() not in ["get", "post", "put", "delete", "patch"]:
2063
2597
  continue
2064
-
2598
+
2065
2599
  # Clean request body references
2066
2600
  if "requestBody" in operation:
2067
2601
  request_body = operation["requestBody"]
2068
2602
  if "content" in request_body:
2069
- for content_type, content_info in request_body["content"].items():
2603
+ for _content_type, content_info in request_body["content"].items():
2070
2604
  if "schema" in content_info and "$ref" in content_info["schema"]:
2071
2605
  ref_path = content_info["schema"]["$ref"]
2072
2606
  schema_name = ref_path.split("/")[-1]
@@ -2075,15 +2609,17 @@ def _filter_openapi_spec(spec: Dict[str, Any], allowed_paths: List[str]) -> Dict
2075
2609
  content_info["schema"] = {
2076
2610
  "type": "object",
2077
2611
  "description": "Request data for this endpoint",
2078
- "additionalProperties": True
2612
+ "additionalProperties": True,
2079
2613
  }
2080
- logger.debug(f"Cleaned broken reference in {method.upper()} {path} request body: {ref_path}")
2081
-
2082
- # Clean response references
2614
+ logger.debug(
2615
+ f"Cleaned broken reference in {method.upper()} {path} request body: {ref_path}"
2616
+ )
2617
+
2618
+ # Clean response references
2083
2619
  if "responses" in operation:
2084
- for status_code, response in operation["responses"].items():
2620
+ for _status_code, response in operation["responses"].items():
2085
2621
  if "content" in response:
2086
- for content_type, content_info in response["content"].items():
2622
+ for _content_type, content_info in response["content"].items():
2087
2623
  if "schema" in content_info and "$ref" in content_info["schema"]:
2088
2624
  ref_path = content_info["schema"]["$ref"]
2089
2625
  schema_name = ref_path.split("/")[-1]
@@ -2092,14 +2628,16 @@ def _filter_openapi_spec(spec: Dict[str, Any], allowed_paths: List[str]) -> Dict
2092
2628
  content_info["schema"] = {
2093
2629
  "type": "object",
2094
2630
  "description": "Response data from this endpoint",
2095
- "additionalProperties": True
2631
+ "additionalProperties": True,
2096
2632
  }
2097
- logger.debug(f"Cleaned broken reference in {method.upper()} {path} response: {ref_path}")
2633
+ logger.debug(
2634
+ f"Cleaned broken reference in {method.upper()} {path} response: {ref_path}"
2635
+ )
2098
2636
 
2099
2637
  return filtered_spec
2100
2638
 
2101
2639
 
2102
- def _has_broken_references(schema_def: Dict[str, Any]) -> bool:
2640
+ def _has_broken_references(schema_def: dict[str, Any]) -> bool:
2103
2641
  """Check if a schema definition has broken references."""
2104
2642
  if "$ref" in schema_def:
2105
2643
  ref_path = schema_def["$ref"]
@@ -2107,7 +2645,7 @@ def _has_broken_references(schema_def: Dict[str, Any]) -> bool:
2107
2645
  broken_refs = [
2108
2646
  "incident_trigger_params",
2109
2647
  "new_workflow",
2110
- "update_workflow",
2648
+ "update_workflow",
2111
2649
  "workflow",
2112
2650
  "new_workflow_task",
2113
2651
  "update_workflow_task",
@@ -2118,18 +2656,18 @@ def _has_broken_references(schema_def: Dict[str, Any]) -> bool:
2118
2656
  "workflow_list",
2119
2657
  "workflow_custom_field_selection_response",
2120
2658
  "workflow_custom_field_selection_list",
2121
- "workflow_form_field_condition_response",
2659
+ "workflow_form_field_condition_response",
2122
2660
  "workflow_form_field_condition_list",
2123
2661
  "workflow_group_response",
2124
2662
  "workflow_group_list",
2125
2663
  "workflow_run_response",
2126
- "workflow_runs_list"
2664
+ "workflow_runs_list",
2127
2665
  ]
2128
2666
  if any(broken_ref in ref_path for broken_ref in broken_refs):
2129
2667
  return True
2130
2668
 
2131
2669
  # Recursively check nested schemas
2132
- for key, value in schema_def.items():
2670
+ for _key, value in schema_def.items():
2133
2671
  if isinstance(value, dict):
2134
2672
  if _has_broken_references(value):
2135
2673
  return True
@@ -2151,10 +2689,10 @@ class RootlyMCPServer(FastMCP):
2151
2689
 
2152
2690
  def __init__(
2153
2691
  self,
2154
- swagger_path: Optional[str] = None,
2692
+ swagger_path: str | None = None,
2155
2693
  name: str = "Rootly",
2156
2694
  default_page_size: int = 10,
2157
- allowed_paths: Optional[List[str]] = None,
2695
+ allowed_paths: list[str] | None = None,
2158
2696
  hosted: bool = False,
2159
2697
  *args,
2160
2698
  **kwargs,
@@ -2165,10 +2703,7 @@ class RootlyMCPServer(FastMCP):
2165
2703
 
2166
2704
  # Create the server using the new function
2167
2705
  server = create_rootly_mcp_server(
2168
- swagger_path=swagger_path,
2169
- name=name,
2170
- allowed_paths=allowed_paths,
2171
- hosted=hosted
2706
+ swagger_path=swagger_path, name=name, allowed_paths=allowed_paths, hosted=hosted
2172
2707
  )
2173
2708
 
2174
2709
  # Copy the server's state to this instance
@@ -2177,5 +2712,5 @@ class RootlyMCPServer(FastMCP):
2177
2712
  # Tools will be accessed via async methods when needed
2178
2713
  self._server = server
2179
2714
  self._tools = {} # Placeholder - tools should be accessed via async methods
2180
- self._resources = getattr(server, '_resources', {})
2181
- self._prompts = getattr(server, '_prompts', {})
2715
+ self._resources = getattr(server, "_resources", {})
2716
+ self._prompts = getattr(server, "_prompts", {})